The prog_test that's added depends on Clang/LLVM features added by Yonghong in commit 286daafd6512 (was https://reviews.llvm.org/D72184). Note the use of a define called ENABLE_ATOMICS_TESTS: this is used to: - Avoid breaking the build for people on old versions of Clang - Avoid needing separate lists of test objects for no_alu32, where atomics are not supported even if Clang has the feature. The atomics_test.o BPF object is built unconditionally both for test_progs and test_progs-no_alu32. For test_progs, if Clang supports atomics, ENABLE_ATOMICS_TESTS is defined, so it includes the proper test code. Otherwise, progs and global vars are defined anyway, as stubs; this means that the skeleton user code still builds. The atomics_test.o userspace object is built once and used for both test_progs and test_progs-no_alu32. A variable called skip_tests is defined in the BPF object's data section, which tells the userspace object whether to skip the atomics test. Signed-off-by: Brendan Jackman <jackmanb@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20210114181751.768687-11-jackmanb@google.com
77 lines
2.3 KiB
C
77 lines
2.3 KiB
C
{
|
|
"BPF_ATOMIC XOR without fetch",
|
|
.insns = {
|
|
/* val = 0x110; */
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
|
|
/* atomic_xor(&val, 0x011); */
|
|
BPF_MOV64_IMM(BPF_REG_1, 0x011),
|
|
BPF_ATOMIC_OP(BPF_DW, BPF_XOR, BPF_REG_10, BPF_REG_1, -8),
|
|
/* if (val != 0x101) exit(2); */
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0x101, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
BPF_EXIT_INSN(),
|
|
/* r1 should not be clobbered, no BPF_FETCH flag */
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x011, 1),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
},
|
|
{
|
|
"BPF_ATOMIC XOR with fetch",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 123),
|
|
/* val = 0x110; */
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0x110),
|
|
/* old = atomic_fetch_xor(&val, 0x011); */
|
|
BPF_MOV64_IMM(BPF_REG_1, 0x011),
|
|
BPF_ATOMIC_OP(BPF_DW, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -8),
|
|
/* if (old != 0x110) exit(3); */
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 3),
|
|
BPF_EXIT_INSN(),
|
|
/* if (val != 0x101) exit(2); */
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x101, 2),
|
|
BPF_MOV64_IMM(BPF_REG_1, 2),
|
|
BPF_EXIT_INSN(),
|
|
/* Check R0 wasn't clobbered (fxor fear of x86 JIT bug) */
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 123, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
/* exit(0); */
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
},
|
|
{
|
|
"BPF_ATOMIC XOR with fetch 32bit",
|
|
.insns = {
|
|
/* r0 = (s64) -1 */
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 1),
|
|
/* val = 0x110; */
|
|
BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0x110),
|
|
/* old = atomic_fetch_xor(&val, 0x011); */
|
|
BPF_MOV32_IMM(BPF_REG_1, 0x011),
|
|
BPF_ATOMIC_OP(BPF_W, BPF_XOR | BPF_FETCH, BPF_REG_10, BPF_REG_1, -4),
|
|
/* if (old != 0x110) exit(3); */
|
|
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x110, 2),
|
|
BPF_MOV32_IMM(BPF_REG_0, 3),
|
|
BPF_EXIT_INSN(),
|
|
/* if (val != 0x101) exit(2); */
|
|
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -4),
|
|
BPF_JMP32_IMM(BPF_JEQ, BPF_REG_1, 0x101, 2),
|
|
BPF_MOV32_IMM(BPF_REG_1, 2),
|
|
BPF_EXIT_INSN(),
|
|
/* Check R0 wasn't clobbered (fxor fear of x86 JIT bug)
|
|
* It should be -1 so add 1 to get exit code.
|
|
*/
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
},
|