Commit 223ffb6a3d05 for kernel
commit 223ffb6a3d0582522455e83ccf7ad2d3a753e039
Author: Eduard Zingerman <eddyz87@gmail.com>
Date: Fri Mar 6 16:02:48 2026 -0800
selftests/bpf: add reproducer for spurious precision propagation through calls
Add a test for the scenario described in the previous commit:
an iterator loop with two paths where one ties r2/r7 via
shared scalar id and skips a call, while the other goes
through the call. Precision marks from the linked registers
get spuriously propagated to the call path via
propagate_precision(), hitting "backtracking call unexpected
regs" in backtrack_insn().
Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20260306-linked-regs-and-propagate-precision-v1-2-18e859be570d@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
diff --git a/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c b/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
index 2ef346c827c2..7bf7dbfd237d 100644
--- a/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
+++ b/tools/testing/selftests/bpf/progs/verifier_linked_scalars.c
@@ -363,4 +363,68 @@ void alu32_negative_offset(void)
__sink(path[0]);
}
+void dummy_calls(void)
+{
+ bpf_iter_num_new(0, 0, 0);
+ bpf_iter_num_next(0);
+ bpf_iter_num_destroy(0);
+}
+
+SEC("socket")
+__success
+__flag(BPF_F_TEST_STATE_FREQ)
+int spurious_precision_marks(void *ctx)
+{
+ struct bpf_iter_num iter;
+
+ asm volatile(
+ "r1 = %[iter];"
+ "r2 = 0;"
+ "r3 = 10;"
+ "call %[bpf_iter_num_new];"
+ "1:"
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_next];"
+ "if r0 == 0 goto 4f;"
+ "r7 = *(u32 *)(r0 + 0);"
+ "r8 = *(u32 *)(r0 + 0);"
+ /* This jump can't be predicted and does not change r7 or r8 state. */
+ "if r7 > r8 goto 2f;"
+ /* Branch explored first ties r2 and r7 as having the same id. */
+ "r2 = r7;"
+ "goto 3f;"
+ "2:"
+ /* Branch explored second does not tie r2 and r7 but has a function call. */
+ "call %[bpf_get_prandom_u32];"
+ "3:"
+ /*
+ * A checkpoint.
+ * When first branch is explored, this would inject linked registers
+ * r2 and r7 into the jump history.
+ * When second branch is explored, this would be a cache hit point,
+ * triggering propagate_precision().
+ */
+ "if r7 <= 42 goto +0;"
+ /*
+ * Mark r7 as precise using an if condition that is always true.
+ * When reached via the second branch, this triggered a bug in the backtrack_insn()
+ * because r2 (tied to r7) was propagated as precise to a call.
+ */
+ "if r7 <= 0xffffFFFF goto +0;"
+ "goto 1b;"
+ "4:"
+ "r1 = %[iter];"
+ "call %[bpf_iter_num_destroy];"
+ :
+ : __imm_ptr(iter),
+ __imm(bpf_iter_num_new),
+ __imm(bpf_iter_num_next),
+ __imm(bpf_iter_num_destroy),
+ __imm(bpf_get_prandom_u32)
+ : __clobber_common, "r7", "r8"
+ );
+
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";