summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-01-20 22:03:46 -0500
committerDavid S. Miller <davem@davemloft.net>2018-01-20 22:03:46 -0500
commitea9722e2650db8f0a0d9ef2e391c95285ef991cd (patch)
treec06d0fafd5d756733a329b726ad66986e471eecf
parent8565d26bcb2ff6df646e946d2913fcf706d46b66 (diff)
parent1391040b6570584c177a238eeac01930beabbaa4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2018-01-19 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) bpf array map HW offload, from Jakub. 2) support for bpf_get_next_key() for LPM map, from Yonghong. 3) test_verifier now runs loaded programs, from Alexei. 4) xdp cpumap monitoring, from Jesper. 5) variety of tests, cleanups and small x64 JIT optimization, from Daniel. 6) user space can now retrieve HW JITed program, from Jiong. Note there is a minor conflict between Russell's arm32 JIT fixes and removal of bpf_jit_enable variable by Daniel which should be resolved by keeping Russell's comment and removing that variable. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/arm/net/bpf_jit_32.c2
-rw-r--r--arch/arm64/net/bpf_jit_comp.c2
-rw-r--r--arch/mips/net/bpf_jit.c2
-rw-r--r--arch/mips/net/ebpf_jit.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/sparc/net/bpf_jit_comp_32.c2
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c37
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c12
-rw-r--r--drivers/net/netdevsim/bpf.c246
-rw-r--r--drivers/net/netdevsim/netdevsim.h3
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/uapi/linux/bpf.h5
-rw-r--r--include/uapi/linux/bpf_common.h7
-rw-r--r--kernel/bpf/arraymap.c49
-rw-r--r--kernel/bpf/core.c19
-rw-r--r--kernel/bpf/lpm_trie.c95
-rw-r--r--kernel/bpf/offload.c81
-rw-r--r--kernel/bpf/syscall.c39
-rw-r--r--kernel/bpf/verifier.c82
-rw-r--r--kernel/trace/bpf_trace.c2
-rw-r--r--lib/test_bpf.c104
-rw-r--r--net/core/filter.c7
-rw-r--r--net/core/sysctl_net_core.c60
-rw-r--r--net/socket.c9
-rw-r--r--samples/bpf/xdp2skb_meta_kern.c8
-rw-r--r--samples/bpf/xdp_monitor_kern.c94
-rw-r--r--samples/bpf/xdp_monitor_user.c416
-rw-r--r--tools/bpf/bpf_jit_disasm.c7
-rw-r--r--tools/bpf/bpftool/common.c72
-rw-r--r--tools/bpf/bpftool/jit_disasm.c16
-rw-r--r--tools/bpf/bpftool/main.h5
-rw-r--r--tools/bpf/bpftool/map.c8
-rw-r--r--tools/bpf/bpftool/prog.c12
-rw-r--r--tools/include/uapi/linux/bpf.h15
-rw-r--r--tools/testing/selftests/bpf/.gitignore7
-rw-r--r--tools/testing/selftests/bpf/Makefile3
-rw-r--r--tools/testing/selftests/bpf/sample_map_ret0.c34
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c122
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py206
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c137
44 files changed, 1828 insertions, 222 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index a5edea07b8d5..41e2feb0cf4f 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -25,8 +25,6 @@
#include "bpf_jit_32.h"
-int bpf_jit_enable __read_mostly;
-
/*
* eBPF prog stack layout:
*
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index cbfe890b0ee5..0775d5ab8ee9 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -31,8 +31,6 @@
#include "bpf_jit.h"
-int bpf_jit_enable __read_mostly;
-
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 44b925005dd3..4d8cb9bb8365 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1207,8 +1207,6 @@ jmp_cmp:
return 0;
}
-int bpf_jit_enable __read_mostly;
-
void bpf_jit_compile(struct bpf_prog *fp)
{
struct jit_ctx ctx;
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 97069a1b6f43..4e347030ed2c 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -177,8 +177,6 @@ static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
(ctx->idx * 4) - 4;
}
-int bpf_jit_enable __read_mostly;
-
enum which_ebpf_reg {
src_reg,
src_reg_no_fp,
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index f9941b3b5770..872d1f6dd11e 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -18,8 +18,6 @@
#include "bpf_jit32.h"
-int bpf_jit_enable __read_mostly;
-
static inline void bpf_flush_icache(void *start, void *end)
{
smp_wmb();
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 6771c63b2bec..217a78e84865 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -21,8 +21,6 @@
#include "bpf_jit64.h"
-int bpf_jit_enable __read_mostly;
-
static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
{
memset32(area, BREAKPOINT_INSTRUCTION, size/4);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 1dfadbd126f3..e50188773ff3 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -28,8 +28,6 @@
#include <asm/set_memory.h>
#include "bpf_jit.h"
-int bpf_jit_enable __read_mostly;
-
struct bpf_jit {
u32 seen; /* Flags to remember seen eBPF instructions */
u32 seen_reg[16]; /* Array to remember which registers are used */
diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c
index 09e318eb34ee..3bd8ca95e521 100644
--- a/arch/sparc/net/bpf_jit_comp_32.c
+++ b/arch/sparc/net/bpf_jit_comp_32.c
@@ -11,8 +11,6 @@
#include "bpf_jit_32.h"
-int bpf_jit_enable __read_mostly;
-
static inline bool is_simm13(unsigned int value)
{
return value + 0x1000 < 0x2000;
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 635fdefd4ae2..50a24d7bd4c5 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -12,8 +12,6 @@
#include "bpf_jit_64.h"
-int bpf_jit_enable __read_mostly;
-
static inline bool is_simm13(unsigned int value)
{
return value + 0x1000 < 0x2000;
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 87f214fbe66e..5acee5139e28 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -15,8 +15,6 @@
#include <asm/set_memory.h>
#include <linux/bpf.h>
-int bpf_jit_enable __read_mostly;
-
/*
* assembly code in arch/x86/net/bpf_jit.S
*/
@@ -154,6 +152,11 @@ static bool is_ereg(u32 reg)
BIT(BPF_REG_AX));
}
+static bool is_axreg(u32 reg)
+{
+ return reg == BPF_REG_0;
+}
+
/* add modifiers if 'reg' maps to x64 registers r8..r15 */
static u8 add_1mod(u8 byte, u32 reg)
{
@@ -447,16 +450,36 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
else if (is_ereg(dst_reg))
EMIT1(add_1mod(0x40, dst_reg));
+ /* b3 holds 'normal' opcode, b2 short form only valid
+ * in case dst is eax/rax.
+ */
switch (BPF_OP(insn->code)) {
- case BPF_ADD: b3 = 0xC0; break;
- case BPF_SUB: b3 = 0xE8; break;
- case BPF_AND: b3 = 0xE0; break;
- case BPF_OR: b3 = 0xC8; break;
- case BPF_XOR: b3 = 0xF0; break;
+ case BPF_ADD:
+ b3 = 0xC0;
+ b2 = 0x05;
+ break;
+ case BPF_SUB:
+ b3 = 0xE8;
+ b2 = 0x2D;
+ break;
+ case BPF_AND:
+ b3 = 0xE0;
+ b2 = 0x25;
+ break;
+ case BPF_OR:
+ b3 = 0xC8;
+ b2 = 0x0D;
+ break;
+ case BPF_XOR:
+ b3 = 0xF0;
+ b2 = 0x35;
+ break;
}
if (is_imm8(imm32))
EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
+ else if (is_axreg(dst_reg))
+ EMIT1_off32(b2, imm32);
else
EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
break;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 71e6586acc36..80d3aa0fc9d3 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -157,7 +157,14 @@ nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
int tag)
{
struct sk_buff *skb;
- int err;
+ int i, err;
+
+ for (i = 0; i < 50; i++) {
+ udelay(4);
+ skb = nfp_bpf_reply(bpf, tag);
+ if (skb)
+ return skb;
+ }
err = wait_event_interruptible_timeout(bpf->cmsg_wq,
skb = nfp_bpf_reply(bpf, tag),
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index e2859b2e9c6a..1a357aacc444 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -127,6 +127,7 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int stack_size;
unsigned int max_instr;
+ int err;
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (prog->aux->stack_depth > stack_size) {
@@ -143,7 +144,14 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
if (!nfp_prog->prog)
return -ENOMEM;
- return nfp_bpf_jit(nfp_prog);
+ err = nfp_bpf_jit(nfp_prog);
+ if (err)
+ return err;
+
+ prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
+ prog->aux->offload->jited_image = nfp_prog->prog;
+
+ return 0;
}
static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
@@ -168,6 +176,8 @@ nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
static int
nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
{
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
+ return -EINVAL;
return nfp_bpf_ctrl_del_entry(offmap, key);
}
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 5134d5c1306c..b3851bbefad3 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -17,6 +17,7 @@
#include <linux/bpf_verifier.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
+#include <linux/mutex.h>
#include <linux/rtnetlink.h>
#include <net/pkt_cls.h>
@@ -31,6 +32,19 @@ struct nsim_bpf_bound_prog {
struct list_head l;
};
+#define NSIM_BPF_MAX_KEYS 2
+
+struct nsim_bpf_bound_map {
+ struct netdevsim *ns;
+ struct bpf_offloaded_map *map;
+ struct mutex mutex;
+ struct nsim_map_entry {
+ void *key;
+ void *value;
+ } entry[NSIM_BPF_MAX_KEYS];
+ struct list_head l;
+};
+
static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data)
{
const char **str = file->private;
@@ -284,6 +298,224 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
return 0;
}
+static bool
+nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key)
+{
+ return e->key && !memcmp(key, e->key, map->key_size);
+}
+
+static int nsim_map_key_find(struct bpf_offloaded_map *offmap, void *key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++)
+ if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key))
+ return i;
+
+ return -ENOENT;
+}
+
+static int
+nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+
+ nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER);
+ if (!nmap->entry[idx].key)
+ return -ENOMEM;
+ nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER);
+ if (!nmap->entry[idx].value) {
+ kfree(nmap->entry[idx].key);
+ nmap->entry[idx].key = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+nsim_map_get_next_key(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx = -ENOENT;
+
+ mutex_lock(&nmap->mutex);
+
+ if (key)
+ idx = nsim_map_key_find(offmap, key);
+ if (idx == -ENOENT)
+ idx = 0;
+ else
+ idx++;
+
+ for (; idx < ARRAY_SIZE(nmap->entry); idx++) {
+ if (nmap->entry[idx].key) {
+ memcpy(next_key, nmap->entry[idx].key,
+ offmap->map.key_size);
+ break;
+ }
+ }
+
+ mutex_unlock(&nmap->mutex);
+
+ if (idx == ARRAY_SIZE(nmap->entry))
+ return -ENOENT;
+ return 0;
+}
+
+static int
+nsim_map_lookup_elem(struct bpf_offloaded_map *offmap, void *key, void *value)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx >= 0)
+ memcpy(value, nmap->entry[idx].value, offmap->map.value_size);
+
+ mutex_unlock(&nmap->mutex);
+
+ return idx < 0 ? idx : 0;
+}
+
+static int
+nsim_map_update_elem(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx, err = 0;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx < 0 && flags == BPF_EXIST) {
+ err = idx;
+ goto exit_unlock;
+ }
+ if (idx >= 0 && flags == BPF_NOEXIST) {
+ err = -EEXIST;
+ goto exit_unlock;
+ }
+
+ if (idx < 0) {
+ for (idx = 0; idx < ARRAY_SIZE(nmap->entry); idx++)
+ if (!nmap->entry[idx].key)
+ break;
+ if (idx == ARRAY_SIZE(nmap->entry)) {
+ err = -E2BIG;
+ goto exit_unlock;
+ }
+
+ err = nsim_map_alloc_elem(offmap, idx);
+ if (err)
+ goto exit_unlock;
+ }
+
+ memcpy(nmap->entry[idx].key, key, offmap->map.key_size);
+ memcpy(nmap->entry[idx].value, value, offmap->map.value_size);
+exit_unlock:
+ mutex_unlock(&nmap->mutex);
+
+ return err;
+}
+
+static int nsim_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx;
+
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
+ return -EINVAL;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx >= 0) {
+ kfree(nmap->entry[idx].key);
+ kfree(nmap->entry[idx].value);
+ memset(&nmap->entry[idx], 0, sizeof(nmap->entry[idx]));
+ }
+
+ mutex_unlock(&nmap->mutex);
+
+ return idx < 0 ? idx : 0;
+}
+
+static const struct bpf_map_dev_ops nsim_bpf_map_ops = {
+ .map_get_next_key = nsim_map_get_next_key,
+ .map_lookup_elem = nsim_map_lookup_elem,
+ .map_update_elem = nsim_map_update_elem,
+ .map_delete_elem = nsim_map_delete_elem,
+};
+
+static int
+nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
+{
+ struct nsim_bpf_bound_map *nmap;
+ unsigned int i;
+ int err;
+
+ if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY &&
+ offmap->map.map_type != BPF_MAP_TYPE_HASH))
+ return -EINVAL;
+ if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS)
+ return -ENOMEM;
+ if (offmap->map.map_flags)
+ return -EINVAL;
+
+ nmap = kzalloc(sizeof(*nmap), GFP_USER);
+ if (!nmap)
+ return -ENOMEM;
+
+ offmap->dev_priv = nmap;
+ nmap->ns = ns;
+ nmap->map = offmap;
+ mutex_init(&nmap->mutex);
+
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) {
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
+ u32 *key;
+
+ err = nsim_map_alloc_elem(offmap, i);
+ if (err)
+ goto err_free;
+ key = nmap->entry[i].key;
+ *key = i;
+ }
+ }
+
+ offmap->dev_ops = &nsim_bpf_map_ops;
+ list_add_tail(&nmap->l, &ns->bpf_bound_maps);
+
+ return 0;
+
+err_free:
+ while (--i) {
+ kfree(nmap->entry[i].key);
+ kfree(nmap->entry[i].value);
+ }
+ kfree(nmap);
+ return err;
+}
+
+static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
+ kfree(nmap->entry[i].key);
+ kfree(nmap->entry[i].value);
+ }
+ list_del_init(&nmap->l);
+ mutex_destroy(&nmap->mutex);
+ kfree(nmap);
+}
+
int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
{
struct netdevsim *ns = netdev_priv(dev);
@@ -328,6 +560,14 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
return err;
return nsim_xdp_set_prog(ns, bpf);
+ case BPF_OFFLOAD_MAP_ALLOC:
+ if (!ns->bpf_map_accept)
+ return -EOPNOTSUPP;
+
+ return nsim_bpf_map_alloc(ns, bpf->offmap);
+ case BPF_OFFLOAD_MAP_FREE:
+ nsim_bpf_map_free(bpf->offmap);
+ return 0;
default:
return -EINVAL;
}
@@ -336,6 +576,7 @@ int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
int nsim_bpf_init(struct netdevsim *ns)
{
INIT_LIST_HEAD(&ns->bpf_bound_progs);
+ INIT_LIST_HEAD(&ns->bpf_bound_maps);
debugfs_create_u32("bpf_offloaded_id", 0400, ns->ddir,
&ns->bpf_offloaded_id);
@@ -362,12 +603,17 @@ int nsim_bpf_init(struct netdevsim *ns)
debugfs_create_bool("bpf_xdpoffload_accept", 0600, ns->ddir,
&ns->bpf_xdpoffload_accept);
+ ns->bpf_map_accept = true;
+ debugfs_create_bool("bpf_map_accept", 0600, ns->ddir,
+ &ns->bpf_map_accept);
+
return 0;
}
void nsim_bpf_uninit(struct netdevsim *ns)
{
WARN_ON(!list_empty(&ns->bpf_bound_progs));
+ WARN_ON(!list_empty(&ns->bpf_bound_maps));
WARN_ON(ns->xdp_prog);
WARN_ON(ns->bpf_offloaded);
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 32270de9395a..b80361200302 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -61,6 +61,9 @@ struct netdevsim {
bool bpf_tc_non_bound_accept;
bool bpf_xdpdrv_accept;
bool bpf_xdpoffload_accept;
+
+ bool bpf_map_accept;
+ struct list_head bpf_bound_maps;
};
extern struct dentry *nsim_ddir;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5c2c104dc2c5..66df387106de 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -234,6 +234,8 @@ struct bpf_prog_offload {
struct list_head offloads;
bool dev_state;
const struct bpf_prog_offload_ops *dev_ops;
+ void *jited_image;
+ u32 jited_len;
};
struct bpf_prog_aux {
@@ -584,6 +586,8 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog);
int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
struct bpf_prog *prog);
+int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map);
+
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
int bpf_map_offload_update_elem(struct bpf_map *map,
void *key, void *value, u64 flags);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 7c2259e8bc54..406c19d6016b 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -17,7 +17,7 @@
#define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */
-#define BPF_DW 0x18 /* double word */
+#define BPF_DW 0x18 /* double word (64-bit) */
#define BPF_XADD 0xc0 /* exclusive add */
/* alu/jmp fields */
@@ -938,6 +938,9 @@ struct bpf_map_info {
__u32 max_entries;
__u32 map_flags;
char name[BPF_OBJ_NAME_LEN];
+ __u32 ifindex;
+ __u64 netns_dev;
+ __u64 netns_ino;
} __attribute__((aligned(8)));
/* User bpf_sock_ops struct to access socket values and specify request ops
diff --git a/include/uapi/linux/bpf_common.h b/include/uapi/linux/bpf_common.h
index 18be90725ab0..ee97668bdadb 100644
--- a/include/uapi/linux/bpf_common.h
+++ b/include/uapi/linux/bpf_common.h
@@ -15,9 +15,10 @@
/* ld/ldx fields */
#define BPF_SIZE(code) ((code) & 0x18)
-#define BPF_W 0x00
-#define BPF_H 0x08
-#define BPF_B 0x10
+#define BPF_W 0x00 /* 32-bit */
+#define BPF_H 0x08 /* 16-bit */
+#define BPF_B 0x10 /* 8-bit */
+/* eBPF BPF_DW 0x18 64-bit */
#define BPF_MODE(code) ((code) & 0xe0)
#define BPF_IMM 0x00
#define BPF_ABS 0x20
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index ab94d304a634..b1f66480135b 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -49,27 +49,35 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
}
/* Called from syscall */
-static struct bpf_map *array_map_alloc(union bpf_attr *attr)
+static int array_map_alloc_check(union bpf_attr *attr)
{
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
int numa_node = bpf_map_attr_numa_node(attr);
- u32 elem_size, index_mask, max_entries;
- bool unpriv = !capable(CAP_SYS_ADMIN);
- struct bpf_array *array;
- u64 array_size, mask64;
/* check sanity of attributes */
if (attr->max_entries == 0 || attr->key_size != 4 ||
attr->value_size == 0 ||
attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
(percpu && numa_node != NUMA_NO_NODE))
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
if (attr->value_size > KMALLOC_MAX_SIZE)
/* if value_size is bigger, the user space won't be able to
* access the elements.
*/
- return ERR_PTR(-E2BIG);
+ return -E2BIG;
+
+ return 0;
+}
+
+static struct bpf_map *array_map_alloc(union bpf_attr *attr)
+{
+ bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
+ int numa_node = bpf_map_attr_numa_node(attr);
+ u32 elem_size, index_mask, max_entries;
+ bool unpriv = !capable(CAP_SYS_ADMIN);
+ struct bpf_array *array;
+ u64 array_size, mask64;
elem_size = round_up(attr->value_size, 8);
@@ -112,12 +120,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array->map.unpriv_array = unpriv;
/* copy mandatory map attributes */
- array->map.map_type = attr->map_type;
- array->map.key_size = attr->key_size;
- array->map.value_size = attr->value_size;
- array->map.max_entries = attr->max_entries;
- array->map.map_flags = attr->map_flags;
- array->map.numa_node = numa_node;
+ bpf_map_init_from_attr(&array->map, attr);
array->elem_size = elem_size;
if (!percpu)
@@ -327,6 +330,7 @@ static void array_map_free(struct bpf_map *map)
}
const struct bpf_map_ops array_map_ops = {
+ .map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc,
.map_free = array_map_free,
.map_get_next_key = array_map_get_next_key,
@@ -337,6 +341,7 @@ const struct bpf_map_ops array_map_ops = {
};
const struct bpf_map_ops percpu_array_map_ops = {
+ .map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc,
.map_free = array_map_free,
.map_get_next_key = array_map_get_next_key,
@@ -345,12 +350,12 @@ const struct bpf_map_ops percpu_array_map_ops = {
.map_delete_elem = array_map_delete_elem,
};
-static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr)
+static int fd_array_map_alloc_check(union bpf_attr *attr)
{
/* only file descriptors can be stored in this type of map */
if (attr->value_size != sizeof(u32))
- return ERR_PTR(-EINVAL);
- return array_map_alloc(attr);
+ return -EINVAL;
+ return array_map_alloc_check(attr);
}
static void fd_array_map_free(struct bpf_map *map)
@@ -474,7 +479,8 @@ void bpf_fd_array_map_clear(struct bpf_map *map)
}
const struct bpf_map_ops prog_array_map_ops = {
- .map_alloc = fd_array_map_alloc,
+ .map_alloc_check = fd_array_map_alloc_check,
+ .map_alloc = array_map_alloc,
.map_free = fd_array_map_free,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
@@ -561,7 +567,8 @@ static void perf_event_fd_array_release(struct bpf_map *map,
}
const struct bpf_map_ops perf_event_array_map_ops = {
- .map_alloc = fd_array_map_alloc,
+ .map_alloc_check = fd_array_map_alloc_check,
+ .map_alloc = array_map_alloc,
.map_free = fd_array_map_free,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
@@ -592,7 +599,8 @@ static void cgroup_fd_array_free(struct bpf_map *map)
}
const struct bpf_map_ops cgroup_array_map_ops = {
- .map_alloc = fd_array_map_alloc,
+ .map_alloc_check = fd_array_map_alloc_check,
+ .map_alloc = array_map_alloc,
.map_free = cgroup_fd_array_free,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
@@ -610,7 +618,7 @@ static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
if (IS_ERR(inner_map_meta))
return inner_map_meta;
- map = fd_array_map_alloc(attr);
+ map = array_map_alloc(attr);
if (IS_ERR(map)) {
bpf_map_meta_free(inner_map_meta);
return map;
@@ -673,6 +681,7 @@ static u32 array_of_map_gen_lookup(struct bpf_map *map,
}