summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-05-07 23:35:08 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-07 23:35:08 -0400
commit01adc4851a8090b46c7a5ed9cfc4b97e65abfbf4 (patch)
tree2ae02593d7139962648dff203f3f9701e34ccbc3 /arch
parent18b338f5f9539512e76fd9ebd4c6ca1a0e159e2b (diff)
parente94fa1d93117e7f1eb783dc9cae6c70650944449 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Minor conflict, a CHECK was placed into an if() statement in net-next, whilst a newline was added to that CHECK call in 'net'. Thanks to Daniel for the merge resolution. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/net/bpf_jit_32.c77
-rw-r--r--arch/arm64/net/bpf_jit_comp.c65
-rw-r--r--arch/mips/net/ebpf_jit.c104
-rw-r--r--arch/powerpc/net/Makefile2
-rw-r--r--arch/powerpc/net/bpf_jit64.h37
-rw-r--r--arch/powerpc/net/bpf_jit_asm64.S180
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c109
-rw-r--r--arch/s390/net/Makefile2
-rw-r--r--arch/s390/net/bpf_jit.S116
-rw-r--r--arch/s390/net/bpf_jit.h20
-rw-r--r--arch/s390/net/bpf_jit_comp.c127
-rw-r--r--arch/sparc/net/Makefile5
-rw-r--r--arch/sparc/net/bpf_jit_64.h29
-rw-r--r--arch/sparc/net/bpf_jit_asm_64.S162
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c79
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/nospec-branch.h30
-rw-r--r--arch/x86/net/Makefile7
-rw-r--r--arch/x86/net/bpf_jit.S154
-rw-r--r--arch/x86/net/bpf_jit_comp.c343
-rw-r--r--arch/x86/net/bpf_jit_comp32.c2419
21 files changed, 2602 insertions, 1467 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index b5030e1a41d8..82689b999257 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1452,83 +1452,6 @@ exit:
emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
break;
- /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
- case BPF_LD | BPF_ABS | BPF_W:
- case BPF_LD | BPF_ABS | BPF_H:
- case BPF_LD | BPF_ABS | BPF_B:
- /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
- case BPF_LD | BPF_IND | BPF_W:
- case BPF_LD | BPF_IND | BPF_H:
- case BPF_LD | BPF_IND | BPF_B:
- {
- const u8 r4 = bpf2a32[BPF_REG_6][1]; /* r4 = ptr to sk_buff */
- const u8 r0 = bpf2a32[BPF_REG_0][1]; /*r0: struct sk_buff *skb*/
- /* rtn value */
- const u8 r1 = bpf2a32[BPF_REG_0][0]; /* r1: int k */
- const u8 r2 = bpf2a32[BPF_REG_1][1]; /* r2: unsigned int size */
- const u8 r3 = bpf2a32[BPF_REG_1][0]; /* r3: void *buffer */
- const u8 r6 = bpf2a32[TMP_REG_1][1]; /* r6: void *(*func)(..) */
- int size;
-
- /* Setting up first argument */
- emit(ARM_MOV_R(r0, r4), ctx);
-
- /* Setting up second argument */
- emit_a32_mov_i(r1, imm, false, ctx);
- if (BPF_MODE(code) == BPF_IND)
- emit_a32_alu_r(r1, src_lo, false, sstk, ctx,
- false, false, BPF_ADD);
-
- /* Setting up third argument */
- switch (BPF_SIZE(code)) {
- case BPF_W:
- size = 4;
- break;
- case BPF_H:
- size = 2;
- break;
- case BPF_B:
- size = 1;
- break;
- default:
- return -EINVAL;
- }
- emit_a32_mov_i(r2, size, false, ctx);
-
- /* Setting up fourth argument */
- emit(ARM_ADD_I(r3, ARM_SP, imm8m(SKB_BUFFER)), ctx);
-
- /* Setting up function pointer to call */
- emit_a32_mov_i(r6, (unsigned int)bpf_load_pointer, false, ctx);
- emit_blx_r(r6, ctx);
-
- emit(ARM_EOR_R(r1, r1, r1), ctx);
- /* Check if return address is NULL or not.
- * if NULL then jump to epilogue
- * else continue to load the value from retn address
- */
- emit(ARM_CMP_I(r0, 0), ctx);
- jmp_offset = epilogue_offset(ctx);
- check_imm24(jmp_offset);
- _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx);
-
- /* Load value from the address */
- switch (BPF_SIZE(code)) {
- case BPF_W:
- emit(ARM_LDR_I(r0, r0, 0), ctx);
- emit_rev32(r0, r0, ctx);
- break;
- case BPF_H:
- emit(ARM_LDRH_I(r0, r0, 0), ctx);
- emit_rev16(r0, r0, ctx);
- break;
- case BPF_B:
- emit(ARM_LDRB_I(r0, r0, 0), ctx);
- /* No need to reverse */
- break;
- }
- break;
- }
/* ST: *(size *)(dst + off) = imm */
case BPF_ST | BPF_MEM | BPF_W:
case BPF_ST | BPF_MEM | BPF_H:
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index a93350451e8e..0b40c8fb0706 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -723,71 +723,6 @@ emit_cond_jmp:
emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
break;
- /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
- case BPF_LD | BPF_ABS | BPF_W:
- case BPF_LD | BPF_ABS | BPF_H:
- case BPF_LD | BPF_ABS | BPF_B:
- /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
- case BPF_LD | BPF_IND | BPF_W:
- case BPF_LD | BPF_IND | BPF_H:
- case BPF_LD | BPF_IND | BPF_B:
- {
- const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
- const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
- const u8 fp = bpf2a64[BPF_REG_FP];
- const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
- const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
- const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
- const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
- const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
- int size;
-
- emit(A64_MOV(1, r1, r6), ctx);
- emit_a64_mov_i(0, r2, imm, ctx);
- if (BPF_MODE(code) == BPF_IND)
- emit(A64_ADD(0, r2, r2, src), ctx);
- switch (BPF_SIZE(code)) {
- case BPF_W:
- size = 4;
- break;
- case BPF_H:
- size = 2;
- break;
- case BPF_B:
- size = 1;
- break;
- default:
- return -EINVAL;
- }
- emit_a64_mov_i64(r3, size, ctx);
- emit(A64_SUB_I(1, r4, fp, ctx->stack_size), ctx);
- emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
- emit(A64_BLR(r5), ctx);
- emit(A64_MOV(1, r0, A64_R(0)), ctx);
-
- jmp_offset = epilogue_offset(ctx);
- check_imm19(jmp_offset);
- emit(A64_CBZ(1, r0, jmp_offset), ctx);
- emit(A64_MOV(1, r5, r0), ctx);
- switch (BPF_SIZE(code)) {
- case BPF_W:
- emit(A64_LDR32(r0, r5, A64_ZR), ctx);
-#ifndef CONFIG_CPU_BIG_ENDIAN
- emit(A64_REV32(0, r0, r0), ctx);
-#endif
- break;
- case BPF_H:
- emit(A64_LDRH(r0, r5, A64_ZR), ctx);
-#ifndef CONFIG_CPU_BIG_ENDIAN
- emit(A64_REV16(0, r0, r0), ctx);
-#endif
- break;
- case BPF_B:
- emit(A64_LDRB(r0, r5, A64_ZR), ctx);
- break;
- }
- break;
- }
default:
pr_err_once("unknown opcode %02x\n", code);
return -EINVAL;
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 3e2798bfea4f..7ba7df9c28fc 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -1267,110 +1267,6 @@ jeq_common:
return -EINVAL;
break;
- case BPF_LD | BPF_B | BPF_ABS:
- case BPF_LD | BPF_H | BPF_ABS:
- case BPF_LD | BPF_W | BPF_ABS:
- case BPF_LD | BPF_DW | BPF_ABS:
- ctx->flags |= EBPF_SAVE_RA;
-
- gen_imm_to_reg(insn, MIPS_R_A1, ctx);
- emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
-
- if (insn->imm < 0) {
- emit_const_to_reg(ctx, MIPS_R_T9, (u64)bpf_internal_load_pointer_neg_helper);
- } else {
- emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
- emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
- }
- goto ld_skb_common;
-
- case BPF_LD | BPF_B | BPF_IND:
- case BPF_LD | BPF_H | BPF_IND:
- case BPF_LD | BPF_W | BPF_IND:
- case BPF_LD | BPF_DW | BPF_IND:
- ctx->flags |= EBPF_SAVE_RA;
- src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
- if (src < 0)
- return src;
- ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
- if (ts == REG_32BIT_ZERO_EX) {
- /* sign extend */
- emit_instr(ctx, sll, MIPS_R_A1, src, 0);
- src = MIPS_R_A1;
- }
- if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
- emit_instr(ctx, daddiu, MIPS_R_A1, src, insn->imm);
- } else {
- gen_imm_to_reg(insn, MIPS_R_AT, ctx);
- emit_instr(ctx, daddu, MIPS_R_A1, MIPS_R_AT, src);
- }
- /* truncate to 32-bit int */
- emit_instr(ctx, sll, MIPS_R_A1, MIPS_R_A1, 0);
- emit_instr(ctx, daddiu, MIPS_R_A3, MIPS_R_SP, ctx->tmp_offset);
- emit_instr(ctx, slt, MIPS_R_AT, MIPS_R_A1, MIPS_R_ZERO);
-
- emit_const_to_reg(ctx, MIPS_R_T8, (u64)bpf_internal_load_pointer_neg_helper);
- emit_const_to_reg(ctx, MIPS_R_T9, (u64)ool_skb_header_pointer);
- emit_instr(ctx, addiu, MIPS_R_A2, MIPS_R_ZERO, size_to_len(insn));
- emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_T8, MIPS_R_AT);
-
-ld_skb_common:
- emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
- /* delay slot move */
- emit_instr(ctx, daddu, MIPS_R_A0, MIPS_R_S0, MIPS_R_ZERO);
-
- /* Check the error value */
- b_off = b_imm(exit_idx, ctx);
- if (is_bad_offset(b_off)) {
- target = j_target(ctx, exit_idx);
- if (target == (unsigned int)-1)
- return -E2BIG;
-
- if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
- ctx->offsets[this_idx] |= OFFSETS_B_CONV;
- ctx->long_b_conversion = 1;
- }
- emit_instr(ctx, bne, MIPS_R_V0, MIPS_R_ZERO, 4 * 3);
- emit_instr(ctx, nop);
- emit_instr(ctx, j, target);
- emit_instr(ctx, nop);
- } else {
- emit_instr(ctx, beq, MIPS_R_V0, MIPS_R_ZERO, b_off);
- emit_instr(ctx, nop);
- }
-
-#ifdef __BIG_ENDIAN
- need_swap = false;
-#else
- need_swap = true;
-#endif
- dst = MIPS_R_V0;
- switch (BPF_SIZE(insn->code)) {
- case BPF_B:
- emit_instr(ctx, lbu, dst, 0, MIPS_R_V0);
- break;
- case BPF_H:
- emit_instr(ctx, lhu, dst, 0, MIPS_R_V0);
- if (need_swap)
- emit_instr(ctx, wsbh, dst, dst);
- break;
- case BPF_W:
- emit_instr(ctx, lw, dst, 0, MIPS_R_V0);
- if (need_swap) {
- emit_instr(ctx, wsbh, dst, dst);
- emit_instr(ctx, rotr, dst, dst, 16);
- }
- break;
- case BPF_DW:
- emit_instr(ctx, ld, dst, 0, MIPS_R_V0);
- if (need_swap) {
- emit_instr(ctx, dsbh, dst, dst);
- emit_instr(ctx, dshd, dst, dst);
- }
- break;
- }
-
- break;
case BPF_ALU | BPF_END | BPF_FROM_BE:
case BPF_ALU | BPF_END | BPF_FROM_LE:
dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
diff --git a/arch/powerpc/net/Makefile b/arch/powerpc/net/Makefile
index 02d369ca6a53..809f019d3cba 100644
--- a/arch/powerpc/net/Makefile
+++ b/arch/powerpc/net/Makefile
@@ -3,7 +3,7 @@
# Arch-specific network modules
#
ifeq ($(CONFIG_PPC64),y)
-obj-$(CONFIG_BPF_JIT) += bpf_jit_asm64.o bpf_jit_comp64.o
+obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o
else
obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
endif
diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
index 8bdef7ed28a8..3609be4692b3 100644
--- a/arch/powerpc/net/bpf_jit64.h
+++ b/arch/powerpc/net/bpf_jit64.h
@@ -20,7 +20,7 @@
* with our redzone usage.
*
* [ prev sp ] <-------------
- * [ nv gpr save area ] 8*8 |
+ * [ nv gpr save area ] 6*8 |
* [ tail_call_cnt ] 8 |
* [ local_tmp_var ] 8 |
* fp (r31) --> [ ebpf stack space ] upto 512 |
@@ -28,8 +28,8 @@
* sp (r1) ---> [ stack pointer ] --------------
*/
-/* for gpr non volatile registers BPG_REG_6 to 10, plus skb cache registers */
-#define BPF_PPC_STACK_SAVE (8*8)
+/* for gpr non volatile registers BPG_REG_6 to 10 */
+#define BPF_PPC_STACK_SAVE (6*8)
/* for bpf JIT code internal usage */
#define BPF_PPC_STACK_LOCALS 16
/* stack frame excluding BPF stack, ensure this is quadword aligned */
@@ -39,10 +39,8 @@
#ifndef __ASSEMBLY__
/* BPF register usage */
-#define SKB_HLEN_REG (MAX_BPF_JIT_REG + 0)
-#define SKB_DATA_REG (MAX_BPF_JIT_REG + 1)
-#define TMP_REG_1 (MAX_BPF_JIT_REG + 2)
-#define TMP_REG_2 (MAX_BPF_JIT_REG + 3)
+#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
+#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
/* BPF to ppc register mappings */
static const int b2p[] = {
@@ -63,40 +61,23 @@ static const int b2p[] = {
[BPF_REG_FP] = 31,
/* eBPF jit internal registers */
[BPF_REG_AX] = 2,
- [SKB_HLEN_REG] = 25,
- [SKB_DATA_REG] = 26,
[TMP_REG_1] = 9,
[TMP_REG_2] = 10
};
-/* PPC NVR range -- update this if we ever use NVRs below r24 */
-#define BPF_PPC_NVR_MIN 24
-
-/* Assembly helpers */
-#define DECLARE_LOAD_FUNC(func) u64 func(u64 r3, u64 r4); \
- u64 func##_negative_offset(u64 r3, u64 r4); \
- u64 func##_positive_offset(u64 r3, u64 r4);
-
-DECLARE_LOAD_FUNC(sk_load_word);
-DECLARE_LOAD_FUNC(sk_load_half);
-DECLARE_LOAD_FUNC(sk_load_byte);
-
-#define CHOOSE_LOAD_FUNC(imm, func) \
- (imm < 0 ? \
- (imm >= SKF_LL_OFF ? func##_negative_offset : func) : \
- func##_positive_offset)
+/* PPC NVR range -- update this if we ever use NVRs below r27 */
+#define BPF_PPC_NVR_MIN 27
#define SEEN_FUNC 0x1000 /* might call external helpers */
#define SEEN_STACK 0x2000 /* uses BPF stack */
-#define SEEN_SKB 0x4000 /* uses sk_buff */
-#define SEEN_TAILCALL 0x8000 /* uses tail calls */
+#define SEEN_TAILCALL 0x4000 /* uses tail calls */
struct codegen_context {
/*
* This is used to track register usage as well
* as calls to external helpers.
* - register usage is tracked with corresponding
- * bits (r3-r10 and r25-r31)
+ * bits (r3-r10 and r27-r31)
* - rest of the bits can be used to track other
* things -- for now, we use bits 16 to 23
* encoded in SEEN_* macros above
diff --git a/arch/powerpc/net/bpf_jit_asm64.S b/arch/powerpc/net/bpf_jit_asm64.S
deleted file mode 100644
index 7e4c51430b84..000000000000
--- a/arch/powerpc/net/bpf_jit_asm64.S
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * bpf_jit_asm64.S: Packet/header access helper functions
- * for PPC64 BPF compiler.
- *
- * Copyright 2016, Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
- * IBM Corporation
- *
- * Based on bpf_jit_asm.S by Matt Evans
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/ptrace.h>
-#include "bpf_jit64.h"
-
-/*
- * All of these routines are called directly from generated code,
- * with the below register usage:
- * r27 skb pointer (ctx)
- * r25 skb header length
- * r26 skb->data pointer
- * r4 offset
- *
- * Result is passed back in:
- * r8 data read in host endian format (accumulator)
- *
- * r9 is used as a temporary register
- */
-
-#define r_skb r27
-#define r_hlen r25
-#define r_data r26
-#define r_off r4
-#define r_val r8
-#define r_tmp r9
-
-_GLOBAL_TOC(sk_load_word)
- cmpdi r_off, 0
- blt bpf_slow_path_word_neg
- b sk_load_word_positive_offset
-
-_GLOBAL_TOC(sk_load_word_positive_offset)
- /* Are we accessing past headlen? */
- subi r_tmp, r_hlen, 4
- cmpd r_tmp, r_off
- blt bpf_slow_path_word
- /* Nope, just hitting the header. cr0 here is eq or gt! */
- LWZX_BE r_val, r_data, r_off
- blr /* Return success, cr0 != LT */
-
-_GLOBAL_TOC(sk_load_half)
- cmpdi r_off, 0
- blt bpf_slow_path_half_neg
- b sk_load_half_positive_offset
-
-_GLOBAL_TOC(sk_load_half_positive_offset)
- subi r_tmp, r_hlen, 2
- cmpd r_tmp, r_off
- blt bpf_slow_path_half
- LHZX_BE r_val, r_data, r_off
- blr
-
-_GLOBAL_TOC(sk_load_byte)
- cmpdi r_off, 0
- blt bpf_slow_path_byte_neg
- b sk_load_byte_positive_offset
-
-_GLOBAL_TOC(sk_load_byte_positive_offset)
- cmpd r_hlen, r_off
- ble bpf_slow_path_byte
- lbzx r_val, r_data, r_off
- blr
-
-/*
- * Call out to skb_copy_bits:
- * Allocate a new stack frame here to remain ABI-compliant in
- * stashing LR.
- */
-#define bpf_slow_path_common(SIZE) \
- mflr r0; \
- std r0, PPC_LR_STKOFF(r1); \
- stdu r1, -(STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS)(r1); \
- mr r3, r_skb; \
- /* r4 = r_off as passed */ \
- addi r5, r1, STACK_FRAME_MIN_SIZE; \
- li r6, SIZE; \
- bl skb_copy_bits; \
- nop; \
- /* save r5 */ \
- addi r5, r1, STACK_FRAME_MIN_SIZE; \
- /* r3 = 0 on success */ \
- addi r1, r1, STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_LOCALS; \
- ld r0, PPC_LR_STKOFF(r1); \
- mtlr r0; \
- cmpdi r3, 0; \
- blt bpf_error; /* cr0 = LT */
-
-bpf_slow_path_word:
- bpf_slow_path_common(4)
- /* Data value is on stack, and cr0 != LT */
- LWZX_BE r_val, 0, r5
- blr
-
-bpf_slow_path_half:
- bpf_slow_path_common(2)
- LHZX_BE r_val, 0, r5
- blr
-
-bpf_slow_path_byte:
- bpf_slow_path_common(1)
- lbzx r_val, 0, r5
- blr
-
-/*
- * Call out to bpf_internal_load_pointer_neg_helper
- */
-#define sk_negative_common(SIZE) \
- mflr r0; \
- std r0, PPC_LR_STKOFF(r1); \
- stdu r1, -STACK_FRAME_MIN_SIZE(r1); \
- mr r3, r_skb; \
- /* r4 = r_off, as passed */ \
- li r5, SIZE; \
- bl bpf_internal_load_pointer_neg_helper; \
- nop; \
- addi r1, r1, STACK_FRAME_MIN_SIZE; \
- ld r0, PPC_LR_STKOFF(r1); \
- mtlr r0; \
- /* R3 != 0 on success */ \
- cmpldi r3, 0; \
- beq bpf_error_slow; /* cr0 = EQ */
-
-bpf_slow_path_word_neg:
- lis r_tmp, -32 /* SKF_LL_OFF */
- cmpd r_off, r_tmp /* addr < SKF_* */
- blt bpf_error /* cr0 = LT */
- b sk_load_word_negative_offset
-
-_GLOBAL_TOC(sk_load_word_negative_offset)
- sk_negative_common(4)
- LWZX_BE r_val, 0, r3
- blr
-
-bpf_slow_path_half_neg:
- lis r_tmp, -32 /* SKF_LL_OFF */
- cmpd r_off, r_tmp /* addr < SKF_* */
- blt bpf_error /* cr0 = LT */
- b sk_load_half_negative_offset
-
-_GLOBAL_TOC(sk_load_half_negative_offset)
- sk_negative_common(2)
- LHZX_BE r_val, 0, r3
- blr
-
-bpf_slow_path_byte_neg:
- lis r_tmp, -32 /* SKF_LL_OFF */
- cmpd r_off, r_tmp /* addr < SKF_* */
- blt bpf_error /* cr0 = LT */
- b sk_load_byte_negative_offset
-
-_GLOBAL_TOC(sk_load_byte_negative_offset)
- sk_negative_common(1)
- lbzx r_val, 0, r3
- blr
-
-bpf_error_slow:
- /* fabricate a cr0 = lt */
- li r_tmp, -1
- cmpdi r_tmp, 0
-bpf_error:
- /*
- * Entered with cr0 = lt
- * Generated code will 'blt epilogue', returning 0.
- */
- li r_val, 0
- blr
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 0ef3d9580e98..1bdb1aff0619 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -59,7 +59,7 @@ static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
* [ prev sp ] <-------------
* [ ... ] |
* sp (r1) ---> [ stack pointer ] --------------
- * [ nv gpr save area ] 8*8
+ * [ nv gpr save area ] 6*8
* [ tail_call_cnt ] 8
* [ local_tmp_var ] 8
* [ unused red zone ] 208 bytes protected
@@ -88,21 +88,6 @@ static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
BUG();
}
-static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
-{
- /*
- * Load skb->len and skb->data_len
- * r3 points to skb
- */
- PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len));
- PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len));
- /* header_len = len - data_len */
- PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]);
-
- /* skb->data pointer */
- PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
-}
-
static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
{
int i;
@@ -145,18 +130,6 @@ static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
if (bpf_is_seen_register(ctx, i))
PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
- /*
- * Save additional non-volatile regs if we cache skb
- * Also, setup skb data
- */
- if (ctx->seen & SEEN_SKB) {
- PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
- bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
- PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
- bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
- bpf_jit_emit_skb_loads(image, ctx);
- }
-
/* Setup frame pointer to point to the bpf stack area */
if (bpf_is_seen_register(ctx, BPF_REG_FP))
PPC_ADDI(b2p[BPF_REG_FP], 1,
@@ -172,14 +145,6 @@ static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx
if (bpf_is_seen_register(ctx, i))
PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
- /* Restore non-volatile registers used for skb cache */
- if (ctx->seen & SEEN_SKB) {
- PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
- bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
- PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
- bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
- }
-
/* Tear down our stack frame */
if (bpf_has_stack_frame(ctx)) {
PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
@@ -753,23 +718,10 @@ emit_clear:
ctx->seen |= SEEN_FUNC;
func = (u8 *) __bpf_call_base + imm;
- /* Save skb pointer if we need to re-cache skb data */
- if ((ctx->seen & SEEN_SKB) &&
- bpf_helper_changes_pkt_data(func))
- PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
-
bpf_jit_emit_func_call(image, ctx, (u64)func);
/* move return value from r3 to BPF_REG_0 */
PPC_MR(b2p[BPF_REG_0], 3);
-
- /* refresh skb cache */
- if ((ctx->seen & SEEN_SKB) &&
- bpf_helper_changes_pkt_data(func)) {
- /* reload skb pointer to r3 */
- PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
- bpf_jit_emit_skb_loads(image, ctx);
- }
break;
/*
@@ -887,65 +839,6 @@ cond_branch:
break;
/*
- * Loads from packet header/data
- * Assume 32-bit input value in imm and X (src_reg)
- */
-
- /* Absolute loads */
- case BPF_LD | BPF_W | BPF_ABS:
- func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
- goto common_load_abs;
- case BPF_LD | BPF_H | BPF_ABS:
- func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
- goto common_load_abs;
- case BPF_LD | BPF_B | BPF_ABS:
- func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
-common_load_abs:
- /*
- * Load from [imm]
- * Load into r4, which can just be passed onto
- * skb load helpers as the second parameter
- */
- PPC_LI32(4, imm);
- goto common_load;
-
- /* Indirect loads */
- case BPF_LD | BPF_W | BPF_IND:
- func = (u8 *)sk_load_word;
- goto common_load_ind;
- case BPF_LD | BPF_H | BPF_IND:
- func = (u8 *)sk_load_half;
- goto common_load_ind;
- case BPF_LD | BPF_B | BPF_IND:
- func = (u8 *)sk_load_byte;
-common_load_ind:
- /*
- * Load from [src_reg + imm]
- * Treat src_reg as a 32-bit value
- */
- PPC_EXTSW(4, src_reg);
- if (imm) {
- if (imm >= -32768 && imm < 32768)
- PPC_ADDI(4, 4, IMM_L(imm));
- else {
- PPC_LI32(b2p[TMP_REG_1], imm);
- PPC_ADD(4, 4, b2p[TMP_REG_1]);
- }
- }
-
-common_load:
- ctx->seen |= SEEN_SKB;
- ctx->seen |= SEEN_FUNC;
- bpf_jit_emit_func_call(image, ctx, (u64)func);
-
- /*
- * Helper returns 'lt' condition on error, and an
- * appropriate return value in BPF_REG_0
- */
- PPC_BCC(COND_LT, exit_addr);
- break;
-
- /*
* Tail call
*/
case BPF_JMP | BPF_TAIL_CALL:
diff --git a/arch/s390/net/Makefile b/arch/s390/net/Makefile
index e0d5f245e42b..d4663b4bf509 100644
--- a/arch/s390/net/Makefile
+++ b/arch/s390/net/Makefile
@@ -2,4 +2,4 @@
#
# Arch-specific network modules
#
-obj-$(CONFIG_BPF_JIT) += bpf_jit.o bpf_jit_comp.o
+obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
deleted file mode 100644
index 25bb4643c4f4..000000000000
--- a/arch/s390/net/bpf_jit.S
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * BPF Jit compiler for s390, help functions.
- *
- * Copyright IBM Corp. 2012,2015
- *
- * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Michael Holzheu <holzheu@linux.vnet.ibm.com>
- */
-
-#include <linux/linkage.h>
-#include "bpf_jit.h"
-
-/*
- * Calling convention:
- * registers %r7-%r10, %r11,%r13, and %r15 are call saved
- *
- * Input (64 bit):
- * %r3 (%b2) = offset into skb data
- * %r6 (%b5) = return address
- * %r7 (%b6) = skb pointer
- * %r12 = skb data pointer
- *
- * Output:
- * %r14= %b0 = return value (read skb value)
- *
- * Work registers: %r2,%r4,%r5,%r14
- *
- * skb_copy_bits takes 4 parameters:
- * %r2 = skb pointer
- * %r3 = offset into skb data
- * %r4 = pointer to temp buffer
- * %r5 = length to copy
- * Return value in %r2: 0 = ok
- *
- * bpf_internal_load_pointer_neg_helper takes 3 parameters:
- * %r2 = skb pointer
- * %r3 = offset into data
- * %r4 = length to copy
- * Return value in %r2: Pointer to data
- */
-
-#define SKF_MAX_NEG_OFF -0x200000 /* SKF_LL_OFF from filter.h */
-
-/*
- * Load SIZE bytes from SKB
- */
-#define sk_load_common(NAME, SIZE, LOAD) \
-ENTRY(sk_load_##NAME); \
- ltgr %r3,%r3; /* Is offset negative? */ \
- jl sk_load_##NAME##_slow_neg; \
-ENTRY(sk_load_##NAME##_pos); \
- aghi %r3,SIZE; /* Offset + SIZE */ \
- clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
- jh sk_load_##NAME##_slow; \
- LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
- b OFF_OK(%r6); /* Return */ \
- \
-sk_load_##NAME##_slow:; \
- lgr %r2,%r7; /* Arg1 = skb pointer */ \
- aghi %r3,-SIZE; /* Arg2 = offset */ \
- la %r4,STK_OFF_TMP(%r15); /* Arg3 = temp bufffer */ \
- lghi %r5,SIZE; /* Arg4 = size */ \
- brasl %r14,skb_copy_bits; /* Get data from skb */ \
- LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
- ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
- br %r6; /* Return */
-
-sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
-sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
-
-/*
- * Load 1 byte from SKB (optimized version)
- */
- /* r14 = *(u8 *) (skb->data+offset) */
-ENTRY(sk_load_byte)
- ltgr %r3,%r3 # Is offset negative?
- jl sk_load_byte_slow_neg
-ENTRY(sk_load_byte_pos)
- clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
- jnl sk_load_byte_slow
- llgc %r14,0(%r3,%r12) # Get byte from skb
- b OFF_OK(%r6) # Return OK
-
-sk_load_byte_slow:
- lgr %r2,%r7 # Arg1 = skb pointer
- # Arg2 = offset
- la %r4,STK_OFF_TMP(%r15) # Arg3 = pointer to temp buffer
- lghi %r5,1 # Arg4 = size (1 byte)
- brasl %r14,skb_copy_bits # Get data from skb
- llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
- ltgr %r2,%r2 # Set cc to (%r2 != 0)
- br %r6 # Return cc
-
-#define sk_negative_common(NAME, SIZE, LOAD) \
-sk_load_##NAME##_slow_neg:; \
- cgfi %r3,SKF_MAX_NEG_OFF; \
- jl bpf_error; \
- lgr %r2,%r7; /* Arg1 = skb pointer */ \
- /* Arg2 = offset */ \
- lghi %r4,SIZE; /* Arg3 = size */ \
- brasl %r14,bpf_internal_load_pointer_neg_helper; \
- ltgr %r2,%r2; \
- jz bpf_error; \
- LOAD %r14,0(%r2); /* Get data from pointer */ \
- xr %r3,%r3; /* Set cc to zero */ \
- br %r6; /* Return cc */
-
-sk_negative_common(word, 4, llgf)
-sk_negative_common(half, 2, llgh)
-sk_negative_common(byte, 1, llgc)
-
-bpf_error:
-# force a return 0 from jit handler
- ltgr %r15,%r15 # Set condition code
- br %r6
diff --git a/arch/s390/net/bpf_jit.h b/arch/s390/net/bpf_jit.h
index 5e1e5133132d..7822ea92e54a 100644
--- a/arch/s390/net/bpf_jit.h
+++ b/arch/s390/net/bpf_jit.h
@@ -16,9 +16,6 @@
#include <linux/filter.h>
#include <linux/types.h>
-extern u8 sk_load_word_pos[], sk_load_half_pos[], sk_load_byte_pos[];
-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
-
#endif /* __ASSEMBLY__ */
/*
@@ -36,15 +33,6 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
* | | |
* | BPF stack | |
* | | |
- * +---------------+ |
- * | 8 byte skbp | |
- * R15+176 -> +---------------+ |
- * | 8 byte hlen | |
- * R15+168 -> +---------------+ |
- * | 4 byte align | |
- * +---------------+ |
- * | 4 byte temp | |
- * | for bpf_jit.S | |
* R15+160 -> +---------------+ |
* | new backchain | |
* R15+152 -> +---------------+ |
@@ -57,17 +45,11 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
* The stack size used by the BPF program ("BPF stack" above) is passed
* via "aux->stack_depth".
*/
-#define STK_SPACE_ADD (8 + 8 + 4 + 4 + 160)
+#define STK_SPACE_ADD (160)
#define STK_160_UNUSED (160 - 12 * 8)
#define STK_OFF (STK_SPACE_ADD - STK_160_UNUSED)
-#define STK_OFF_TMP 160 /* Offset of tmp buffer on stack */
-#define STK_OFF_HLEN 168 /* Offset of SKB header length on stack */
-#define STK_OFF_SKBP 176 /* Offset of SKB pointer on stack */
#define STK_OFF_R6 (160 - 11 * 8) /* Offset of r6 on stack */
#define STK_OFF_TCCNT (160 - 12 * 8) /* Offset of tail_call_cnt on stack */
-/* Offset to skip condition code check */
-#define OFF_OK 4
-
#endif /* __ARCH_S390_NET_BPF_JIT_H */
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 78a19c93b380..b020bea040b7 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -47,23 +47,21 @@ struct bpf_jit {
#define BPF_SIZE_MAX 0xffff /* Max size for program (16 bit branches) */
-#define SEEN_SKB 1 /* skb access */
-#define SEEN_MEM 2 /* use mem[] for temporary storage */
-#define SEEN_RET0 4 /* ret0_ip points to a valid return 0 */
-#define SEEN_LITERAL 8 /* code uses literals */
-#define SEEN_FUNC 16 /* calls C functions */
-#define SEEN_TAIL_CALL 32 /* code uses tail calls */
-#define SEEN_REG_AX 64 /* code uses constant blinding */
-#define SEEN_STACK (SEEN_FUNC | SEEN_MEM | SEEN_SKB)
+#define SEEN_MEM (1 << 0) /* use mem[] for temporary storage */
+#define SEEN_RET0 (1 << 1) /* ret0_ip points to a valid return 0 */
+#define SEEN_LITERAL (1 << 2) /* code uses literals */
+#define SEEN_FUNC (1 << 3) /* calls C functions */
+#define SEEN_TAIL_CALL (1 << 4) /* code uses tail calls */
+#define SEEN_REG_AX (1 << 5) /* code uses constant blinding */
+#define SEEN_STACK (SEEN_FUNC | SEEN_MEM)
/*
* s390 registers
*/
#define REG_W0 (MAX_BPF_JIT_REG + 0) /* Work register 1 (even) */
#define REG_W1 (MAX_BPF_JIT_REG + 1) /* Work register 2 (odd) */
-#define REG_SKB_DATA (MAX_BPF_JIT_REG + 2) /* SKB data register */
-#define REG_L (MAX_BPF_JIT_REG + 3) /* Literal pool register */
-#define REG_15 (MAX_BPF_JIT_REG + 4) /* Register 15 */
+#define REG_L (MAX_BPF_JIT_REG + 2) /* Literal pool register */
+#define REG_15 (MAX_BPF_JIT_REG + 3) /* Register 15 */