u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
u32 tmp_reg = bpf_to_ppc(TMP_REG);
u32 size = BPF_SIZE(code);
+ u32 save_reg, ret_reg;
s16 off = insn[i].off;
s32 imm = insn[i].imm;
bool func_addr_fixed;
* BPF_STX ATOMIC (atomic ops)
*/
case BPF_STX | BPF_ATOMIC | BPF_W:
+ save_reg = _R0;
+ ret_reg = src_reg;
+
bpf_set_seen_register(ctx, tmp_reg);
bpf_set_seen_register(ctx, ax_reg);
case BPF_XOR | BPF_FETCH:
EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
break;
+ case BPF_CMPXCHG:
+ /*
+ * Return old value in BPF_REG_0 for BPF_CMPXCHG &
+ * in src_reg for other cases.
+ */
+ ret_reg = bpf_to_ppc(BPF_REG_0);
+
+ /* Compare with old value in BPF_REG_0 */
+ EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0));
+ /* Don't set if different from old value */
+ PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
+ fallthrough;
+ case BPF_XCHG:
+ save_reg = src_reg;
+ break;
default:
pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
code, i);
}
/* store new value */
- EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
+ EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg));
/* we're done if this succeeded */
PPC_BCC_SHORT(COND_NE, tmp_idx);
/* For the BPF_FETCH variant, get old data into src_reg */
if (imm & BPF_FETCH) {
- EMIT(PPC_RAW_MR(src_reg, ax_reg));
+ EMIT(PPC_RAW_MR(ret_reg, ax_reg));
if (!fp->aux->verifier_zext)
- EMIT(PPC_RAW_LI(src_reg_h, 0));
+ EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
}
break;