summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/core.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-09-30 01:41:50 +0200
committerDavid S. Miller <davem@davemloft.net>2015-10-03 05:02:39 -0700
commita91263d520246b63c63e75ddfb072ee6a853fe15 (patch)
tree17f158474a969614207febc06cae85cd17dd4ab8 /kernel/bpf/core.c
parentbd8762bec95ed81d5b81390ff23c5f83345cb536 (diff)
ebpf: migrate bpf_prog's flags to bitfield
As we need to add further flags to the bpf_prog structure, lets migrate both bools to a bitfield representation. The size of the base structure (excluding insns) remains unchanged at 40 bytes. Add also tags for the kmemchecker, so that it doesn't throw false positives. Even in case gcc would generate suboptimal code, it's not being accessed in performance critical paths. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/core.c')
-rw-r--r--kernel/bpf/core.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 67c380cfa9ca..c8855c2a7a48 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -82,6 +82,8 @@ struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags)
if (fp == NULL)
return NULL;
+ kmemcheck_annotate_bitfield(fp, meta);
+
aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags);
if (aux == NULL) {
vfree(fp);
@@ -110,6 +112,8 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
if (fp != NULL) {
+ kmemcheck_annotate_bitfield(fp, meta);
+
memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
fp->pages = size / PAGE_SIZE;