From 5327644614a18f5d0ff845844a4e9976210b3d8d Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 19 Feb 2020 22:26:35 -0800 Subject: libbpf: Relax check whether BTF is mandatory If BPF program is using BTF-defined maps, BTF is required only for libbpf itself to process map definitions. If after that BTF fails to be loaded into kernel (e.g., if it doesn't support BTF at all), this shouldn't prevent valid BPF program from loading. Existing retry-without-BTF logic for creating maps will succeed to create such maps without any problems. So, presence of .maps section shouldn't make BTF required for kernel. Update the check accordingly. Validated by ensuring simple BPF program with BTF-defined maps is still loaded on old kernel without BTF support and map is correctly parsed and created. Fixes: abd29c931459 ("libbpf: allow specifying map definitions using BTF") Reported-by: Julia Kartseva Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200220062635.1497872-1-andriin@fb.com --- tools/lib/bpf/libbpf.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 514b1a524abb..0eb10b681413 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2280,9 +2280,7 @@ static void bpf_object__sanitize_btf_ext(struct bpf_object *obj) static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj) { - return obj->efile.btf_maps_shndx >= 0 || - obj->efile.st_ops_shndx >= 0 || - obj->nr_extern > 0; + return obj->efile.st_ops_shndx >= 0 || obj->nr_extern > 0; } static int bpf_object__init_btf(struct bpf_object *obj, -- cgit v1.2.3 From dd88aed92d017bed2a2c0023f22f0eef7cd29702 Mon Sep 17 00:00:00 2001 From: Eelco Chaudron Date: Thu, 20 Feb 2020 13:26:24 +0000 Subject: libbpf: Bump libpf current version to v0.0.8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New development cycles starts, bump to v0.0.8. Signed-off-by: Eelco Chaudron Signed-off-by: Alexei Starovoitov Acked-by: Toke Høiland-Jørgensen Link: https://lore.kernel.org/bpf/158220518424.127661.8278643006567775528.stgit@xdp-tutorial --- tools/lib/bpf/libbpf.map | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index b035122142bb..45be19c9d752 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -235,3 +235,6 @@ LIBBPF_0.0.7 { btf__align_of; libbpf_find_kernel_btf; } LIBBPF_0.0.6; + +LIBBPF_0.0.8 { +} LIBBPF_0.0.7; -- cgit v1.2.3 From ff26ce5cd7680ebc9c5446cda013e2087decc60f Mon Sep 17 00:00:00 2001 From: Eelco Chaudron Date: Thu, 20 Feb 2020 13:26:35 +0000 Subject: libbpf: Add support for dynamic program attach target MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently when you want to attach a trace program to a bpf program the section name needs to match the tracepoint/function semantics. However the addition of the bpf_program__set_attach_target() API allows you to specify the tracepoint/function dynamically. The call flow would look something like this: xdp_fd = bpf_prog_get_fd_by_id(id); trace_obj = bpf_object__open_file("func.o", NULL); prog = bpf_object__find_program_by_title(trace_obj, "fentry/myfunc"); bpf_program__set_expected_attach_type(prog, BPF_TRACE_FENTRY); bpf_program__set_attach_target(prog, xdp_fd, "xdpfilt_blk_all"); bpf_object__load(trace_obj) Signed-off-by: Eelco Chaudron Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Toke Høiland-Jørgensen Link: https://lore.kernel.org/bpf/158220519486.127661.7964708960649051384.stgit@xdp-tutorial --- tools/lib/bpf/libbpf.c | 34 ++++++++++++++++++++++++++++++---- tools/lib/bpf/libbpf.h | 4 ++++ tools/lib/bpf/libbpf.map | 2 ++ 3 files changed, 36 insertions(+), 4 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 0eb10b681413..8f0c436d5880 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4937,8 +4937,8 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) { int err = 0, fd, i, btf_id; - if (prog->type == BPF_PROG_TYPE_TRACING || - prog->type == BPF_PROG_TYPE_EXT) { + if ((prog->type == BPF_PROG_TYPE_TRACING || + prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { btf_id = libbpf_find_attach_btf_id(prog); if (btf_id <= 0) return btf_id; @@ -6581,6 +6581,9 @@ static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name, else err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); + if (err <= 0) + pr_warn("%s is not found in vmlinux BTF\n", name); + return err; } @@ -6653,8 +6656,6 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog) err = __find_vmlinux_btf_id(prog->obj->btf_vmlinux, name + section_defs[i].len, attach_type); - if (err <= 0) - pr_warn("%s is not found in vmlinux BTF\n", name); return err; } pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name); @@ -8130,6 +8131,31 @@ void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) } } +int bpf_program__set_attach_target(struct bpf_program *prog, + int attach_prog_fd, + const char *attach_func_name) +{ + int btf_id; + + if (!prog || attach_prog_fd < 0 || !attach_func_name) + return -EINVAL; + + if (attach_prog_fd) + btf_id = libbpf_find_prog_btf_id(attach_func_name, + attach_prog_fd); + else + btf_id = __find_vmlinux_btf_id(prog->obj->btf_vmlinux, + attach_func_name, + prog->expected_attach_type); + + if (btf_id < 0) + return btf_id; + + prog->attach_btf_id = btf_id; + prog->attach_prog_fd = attach_prog_fd; + return 0; +} + int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz) { int err = 0, n, len, start, end = -1; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 3fe12c9d1f92..02fc58a21a7f 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -334,6 +334,10 @@ LIBBPF_API void bpf_program__set_expected_attach_type(struct bpf_program *prog, enum bpf_attach_type type); +LIBBPF_API int +bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd, + const char *attach_func_name); + LIBBPF_API bool bpf_program__is_socket_filter(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_tracepoint(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_raw_tracepoint(const struct bpf_program *prog); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 45be19c9d752..7b014c8cdece 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -237,4 +237,6 @@ LIBBPF_0.0.7 { } LIBBPF_0.0.6; LIBBPF_0.0.8 { + global: + bpf_program__set_attach_target; } LIBBPF_0.0.7; -- cgit v1.2.3 From fd56e0058412fb542db0e9556f425747cf3f8366 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 29 Feb 2020 15:11:10 -0800 Subject: libbpf: Fix use of PT_REGS_PARM macros with vmlinux.h Add detection of vmlinux.h to bpf_tracing.h header for PT_REGS macro. Currently, BPF applications have to define __KERNEL__ symbol to use correct definition of struct pt_regs on x86 arch. This is due to different field names under internal kernel vs UAPI conditions. To make this more transparent for users, detect vmlinux.h by checking __VMLINUX_H__ symbol. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200229231112.1240137-3-andriin@fb.com --- tools/lib/bpf/bpf_tracing.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index b0dafe8b4ebc..8376f22b0e36 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -49,7 +49,7 @@ #if defined(bpf_target_x86) -#ifdef __KERNEL__ +#if defined(__KERNEL__) || defined(__VMLINUX_H__) #define PT_REGS_PARM1(x) ((x)->di) #define PT_REGS_PARM2(x) ((x)->si) #define PT_REGS_PARM3(x) ((x)->dx) -- cgit v1.2.3 From df8ff35311c8d10d90b4604c02b32c361dc997aa Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sat, 29 Feb 2020 15:11:12 -0800 Subject: libbpf: Merge selftests' bpf_trace_helpers.h into libbpf's bpf_tracing.h Move BPF_PROG, BPF_KPROBE, and BPF_KRETPROBE macro into libbpf's bpf_tracing.h header to make it available for non-selftests users. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200229231112.1240137-5-andriin@fb.com --- tools/lib/bpf/bpf_tracing.h | 118 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 118 insertions(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 8376f22b0e36..379d03b211ea 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -192,4 +192,122 @@ struct pt_regs; (void *)(PT_REGS_FP(ctx) + sizeof(ip))); }) #endif +#define ___bpf_concat(a, b) a ## b +#define ___bpf_apply(fn, n) ___bpf_concat(fn, n) +#define ___bpf_nth(_, _1, _2, _3, _4, _5, _6, _7, _8, _9, _a, _b, _c, N, ...) N +#define ___bpf_narg(...) \ + ___bpf_nth(_, ##__VA_ARGS__, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +#define ___bpf_empty(...) \ + ___bpf_nth(_, ##__VA_ARGS__, N, N, N, N, N, N, N, N, N, N, 0) + +#define ___bpf_ctx_cast0() ctx +#define ___bpf_ctx_cast1(x) ___bpf_ctx_cast0(), (void *)ctx[0] +#define ___bpf_ctx_cast2(x, args...) ___bpf_ctx_cast1(args), (void *)ctx[1] +#define ___bpf_ctx_cast3(x, args...) ___bpf_ctx_cast2(args), (void *)ctx[2] +#define ___bpf_ctx_cast4(x, args...) ___bpf_ctx_cast3(args), (void *)ctx[3] +#define ___bpf_ctx_cast5(x, args...) ___bpf_ctx_cast4(args), (void *)ctx[4] +#define ___bpf_ctx_cast6(x, args...) ___bpf_ctx_cast5(args), (void *)ctx[5] +#define ___bpf_ctx_cast7(x, args...) ___bpf_ctx_cast6(args), (void *)ctx[6] +#define ___bpf_ctx_cast8(x, args...) ___bpf_ctx_cast7(args), (void *)ctx[7] +#define ___bpf_ctx_cast9(x, args...) ___bpf_ctx_cast8(args), (void *)ctx[8] +#define ___bpf_ctx_cast10(x, args...) ___bpf_ctx_cast9(args), (void *)ctx[9] +#define ___bpf_ctx_cast11(x, args...) ___bpf_ctx_cast10(args), (void *)ctx[10] +#define ___bpf_ctx_cast12(x, args...) ___bpf_ctx_cast11(args), (void *)ctx[11] +#define ___bpf_ctx_cast(args...) \ + ___bpf_apply(___bpf_ctx_cast, ___bpf_narg(args))(args) + +/* + * BPF_PROG is a convenience wrapper for generic tp_btf/fentry/fexit and + * similar kinds of BPF programs, that accept input arguments as a single + * pointer to untyped u64 array, where each u64 can actually be a typed + * pointer or integer of different size. Instead of requring user to write + * manual casts and work with array elements by index, BPF_PROG macro + * allows user to declare a list of named and typed input arguments in the + * same syntax as for normal C function. All the casting is hidden and + * performed transparently, while user code can just assume working with + * function arguments of specified type and name. + * + * Original raw context argument is preserved as well as 'ctx' argument. + * This is useful when using BPF helpers that expect original context + * as one of the parameters (e.g., for bpf_perf_event_output()). + */ +#define BPF_PROG(name, args...) \ +name(unsigned long long *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args); \ +typeof(name(0)) name(unsigned long long *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_ctx_cast(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args) + +struct pt_regs; + +#define ___bpf_kprobe_args0() ctx +#define ___bpf_kprobe_args1(x) \ + ___bpf_kprobe_args0(), (void *)PT_REGS_PARM1(ctx) +#define ___bpf_kprobe_args2(x, args...) \ + ___bpf_kprobe_args1(args), (void *)PT_REGS_PARM2(ctx) +#define ___bpf_kprobe_args3(x, args...) \ + ___bpf_kprobe_args2(args), (void *)PT_REGS_PARM3(ctx) +#define ___bpf_kprobe_args4(x, args...) \ + ___bpf_kprobe_args3(args), (void *)PT_REGS_PARM4(ctx) +#define ___bpf_kprobe_args5(x, args...) \ + ___bpf_kprobe_args4(args), (void *)PT_REGS_PARM5(ctx) +#define ___bpf_kprobe_args(args...) \ + ___bpf_apply(___bpf_kprobe_args, ___bpf_narg(args))(args) + +/* + * BPF_KPROBE serves the same purpose for kprobes as BPF_PROG for + * tp_btf/fentry/fexit BPF programs. It hides the underlying platform-specific + * low-level way of getting kprobe input arguments from struct pt_regs, and + * provides a familiar typed and named function arguments syntax and + * semantics of accessing kprobe input paremeters. + * + * Original struct pt_regs* context is preserved as 'ctx' argument. This might + * be necessary when using BPF helpers like bpf_perf_event_output(). + */ +#define BPF_KPROBE(name, args...) \ +name(struct pt_regs *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args); \ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_kprobe_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args) + +#define ___bpf_kretprobe_args0() ctx +#define ___bpf_kretprobe_args1(x) \ + ___bpf_kretprobe_args0(), (void *)PT_REGS_RET(ctx) +#define ___bpf_kretprobe_args(args...) \ + ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args) + +/* + * BPF_KRETPROBE is similar to BPF_KPROBE, except, it only provides optional + * return value (in addition to `struct pt_regs *ctx`), but no input + * arguments, because they will be clobbered by the time probed function + * returns. + */ +#define BPF_KRETPROBE(name, args...) \ +name(struct pt_regs *ctx); \ +static __attribute__((always_inline)) typeof(name(0)) \ +____##name(struct pt_regs *ctx, ##args); \ +typeof(name(0)) name(struct pt_regs *ctx) \ +{ \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_kretprobe_args(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) + #endif -- cgit v1.2.3 From c016b68edc7a2adf3db0f11fb649797c1f9216ea Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 2 Mar 2020 20:31:58 -0800 Subject: libbpf: Add bpf_link pinning/unpinning With bpf_link abstraction supported by kernel explicitly, add pinning/unpinning API for links. Also allow to create (open) bpf_link from BPF FS file. This API allows to have an "ephemeral" FD-based BPF links (like raw tracepoint or fexit/freplace attachments) surviving user process exit, by pinning them in a BPF FS, which is an important use case for long-running BPF programs. As part of this, expose underlying FD for bpf_link. While legacy bpf_link's might not have a FD associated with them (which will be expressed as a bpf_link with fd=-1), kernel's abstraction is based around FD-based usage, so match it closely. This, subsequently, allows to have a generic pinning/unpinning API for generalized bpf_link. For some types of bpf_links kernel might not support pinning, in which case bpf_link__pin() will return error. With FD being part of generic bpf_link, also get rid of bpf_link_fd in favor of using vanialla bpf_link. Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200303043159.323675-3-andriin@fb.com --- tools/lib/bpf/libbpf.c | 131 +++++++++++++++++++++++++++++++++++++---------- tools/lib/bpf/libbpf.h | 5 ++ tools/lib/bpf/libbpf.map | 5 ++ 3 files changed, 114 insertions(+), 27 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 996162801f7a..f8c4042e5855 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6931,6 +6931,8 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, struct bpf_link { int (*detach)(struct bpf_link *link); int (*destroy)(struct bpf_link *link); + char *pin_path; /* NULL, if not pinned */ + int fd; /* hook FD, -1 if not applicable */ bool disconnected; }; @@ -6960,26 +6962,109 @@ int bpf_link__destroy(struct bpf_link *link) err = link->detach(link); if (link->destroy) link->destroy(link); + if (link->pin_path) + free(link->pin_path); free(link); return err; } -struct bpf_link_fd { - struct bpf_link link; /* has to be at the top of struct */ - int fd; /* hook FD */ -}; +int bpf_link__fd(const struct bpf_link *link) +{ + return link->fd; +} + +const char *bpf_link__pin_path(const struct bpf_link *link) +{ + return link->pin_path; +} + +static int bpf_link__detach_fd(struct bpf_link *link) +{ + return close(link->fd); +} + +struct bpf_link *bpf_link__open(const char *path) +{ + struct bpf_link *link; + int fd; + + fd = bpf_obj_get(path); + if (fd < 0) { + fd = -errno; + pr_warn("failed to open link at %s: %d\n", path, fd); + return ERR_PTR(fd); + } + + link = calloc(1, sizeof(*link)); + if (!link) { + close(fd); + return ERR_PTR(-ENOMEM); + } + link->detach = &bpf_link__detach_fd; + link->fd = fd; + + link->pin_path = strdup(path); + if (!link->pin_path) { + bpf_link__destroy(link); + return ERR_PTR(-ENOMEM); + } + + return link; +} + +int bpf_link__pin(struct bpf_link *link, const char *path) +{ + int err; + + if (link->pin_path) + return -EBUSY; + err = make_parent_dir(path); + if (err) + return err; + err = check_path(path); + if (err) + return err; + + link->pin_path = strdup(path); + if (!link->pin_path) + return -ENOMEM; + + if (bpf_obj_pin(link->fd, link->pin_path)) { + err = -errno; + zfree(&link->pin_path); + return err; + } + + pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); + return 0; +} + +int bpf_link__unpin(struct bpf_link *link) +{ + int err; + + if (!link->pin_path) + return -EINVAL; + + err = unlink(link->pin_path); + if (err != 0) + return -errno; + + pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); + zfree(&link->pin_path); + return 0; +} static int bpf_link__detach_perf_event(struct bpf_link *link) { - struct bpf_link_fd *l = (void *)link; int err; - err = ioctl(l->fd, PERF_EVENT_IOC_DISABLE, 0); + err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0); if (err) err = -errno; - close(l->fd); + close(link->fd); return err; } @@ -6987,7 +7072,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, int pfd) { char errmsg[STRERR_BUFSIZE]; - struct bpf_link_fd *link; + struct bpf_link *link; int prog_fd, err; if (pfd < 0) { @@ -7005,7 +7090,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, link = calloc(1, sizeof(*link)); if (!link) return ERR_PTR(-ENOMEM); - link->link.detach = &bpf_link__detach_perf_event; + link->detach = &bpf_link__detach_perf_event; link->fd = pfd; if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { @@ -7024,7 +7109,7 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); return ERR_PTR(err); } - return (struct bpf_link *)link; + return link; } /* @@ -7312,18 +7397,11 @@ out: return link; } -static int bpf_link__detach_fd(struct bpf_link *link) -{ - struct bpf_link_fd *l = (void *)link; - - return close(l->fd); -} - struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog, const char *tp_name) { char errmsg[STRERR_BUFSIZE]; - struct bpf_link_fd *link; + struct bpf_link *link; int prog_fd, pfd; prog_fd = bpf_program__fd(prog); @@ -7336,7 +7414,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog, link = calloc(1, sizeof(*link)); if (!link) return ERR_PTR(-ENOMEM); - link->link.detach = &bpf_link__detach_fd; + link->detach = &bpf_link__detach_fd; pfd = bpf_raw_tracepoint_open(tp_name, prog_fd); if (pfd < 0) { @@ -7348,7 +7426,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog, return ERR_PTR(pfd); } link->fd = pfd; - return (struct bpf_link *)link; + return link; } static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec, @@ -7362,7 +7440,7 @@ static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec, struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog) { char errmsg[STRERR_BUFSIZE]; - struct bpf_link_fd *link; + struct bpf_link *link; int prog_fd, pfd; prog_fd = bpf_program__fd(prog); @@ -7375,7 +7453,7 @@ struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog) link = calloc(1, sizeof(*link)); if (!link) return ERR_PTR(-ENOMEM); - link->link.detach = &bpf_link__detach_fd; + link->detach = &bpf_link__detach_fd; pfd = bpf_raw_tracepoint_open(NULL, prog_fd); if (pfd < 0) { @@ -7409,10 +7487,9 @@ struct bpf_link *bpf_program__attach(struct bpf_program *prog) static int bpf_link__detach_struct_ops(struct bpf_link *link) { - struct bpf_link_fd *l = (void *)link; __u32 zero = 0; - if (bpf_map_delete_elem(l->fd, &zero)) + if (bpf_map_delete_elem(link->fd, &zero)) return -errno; return 0; @@ -7421,7 +7498,7 @@ static int bpf_link__detach_struct_ops(struct bpf_link *link) struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map) { struct bpf_struct_ops *st_ops; - struct bpf_link_fd *link; + struct bpf_link *link; __u32 i, zero = 0; int err; @@ -7453,10 +7530,10 @@ struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map) return ERR_PTR(err); } - link->link.detach = bpf_link__detach_struct_ops; + link->detach = bpf_link__detach_struct_ops; link->fd = map->fd; - return (struct bpf_link *)link; + return link; } enum bpf_perf_event_ret diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 02fc58a21a7f..d38d7a629417 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -219,6 +219,11 @@ LIBBPF_API void bpf_program__unload(struct bpf_program *prog); struct bpf_link; +LIBBPF_API struct bpf_link *bpf_link__open(const char *path); +LIBBPF_API int bpf_link__fd(const struct bpf_link *link); +LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link); +LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path); +LIBBPF_API int bpf_link__unpin(struct bpf_link *link); LIBBPF_API void bpf_link__disconnect(struct bpf_link *link); LIBBPF_API int bpf_link__destroy(struct bpf_link *link); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 7b014c8cdece..5129283c0284 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -238,5 +238,10 @@ LIBBPF_0.0.7 { LIBBPF_0.0.8 { global: + bpf_link__fd; + bpf_link__open; + bpf_link__pin; + bpf_link__pin_path; + bpf_link__unpin; bpf_program__set_attach_target; } LIBBPF_0.0.7; -- cgit v1.2.3 From 320a36063e1441210106aa33997ad3770d4c86b4 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 3 Mar 2020 10:08:00 -0800 Subject: libbpf: Fix handling of optional field_name in btf_dump__emit_type_decl Internal functions, used by btf_dump__emit_type_decl(), assume field_name is never going to be NULL. Ensure it's always the case. Fixes: 9f81654eebe8 ("libbpf: Expose BTF-to-C type declaration emitting API") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20200303180800.3303471-1-andriin@fb.com --- tools/lib/bpf/btf_dump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index bd09ed1710f1..dc451e4de5ad 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -1030,7 +1030,7 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id, if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts)) return -EINVAL; - fname = OPTS_GET(opts, field_name, NULL); + fname = OPTS_GET(opts, field_name, ""); lvl = OPTS_GET(opts, indent_level, 0); btf_dump_emit_type_decl(d, id, fname, lvl); return 0; -- cgit v1.2.3 From 7cb30aaab3f277aa88e20a008faf57e0fb1119ec Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 2 Mar 2020 16:32:32 -0800 Subject: libbpf: Assume unsigned values for BTF_KIND_ENUM Currently, BTF_KIND_ENUM type doesn't record whether enum values should be interpreted as signed or unsigned. In Linux, most enums are unsigned, though, so interpreting them as unsigned matches real world better. Change btf_dump test case to test maximum 32-bit value, instead of negative value. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200303003233.3496043-3-andriin@fb.com --- tools/lib/bpf/btf_dump.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index dc451e4de5ad..0c28ee82834b 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -916,13 +916,13 @@ static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id, /* enumerators share namespace with typedef idents */ dup_cnt = btf_dump_name_dups(d, d->ident_names, name); if (dup_cnt > 1) { - btf_dump_printf(d, "\n%s%s___%zu = %d,", + btf_dump_printf(d, "\n%s%s___%zu = %u,", pfx(lvl + 1), name, dup_cnt, - (__s32)v->val); + (__u32)v->val); } else { - btf_dump_printf(d, "\n%s%s = %d,", + btf_dump_printf(d, "\n%s%s = %u,", pfx(lvl + 1), name, - (__s32)v->val); + (__u32)v->val); } } btf_dump_printf(d, "\n%s}", pfx(lvl)); -- cgit v1.2.3 From aca228cd3387447d99d3ebaee3ebcc2b015a3e46 Mon Sep 17 00:00:00 2001 From: KP Singh Date: Wed, 4 Mar 2020 20:18:51 +0100 Subject: tools/libbpf: Add support for BPF_MODIFY_RETURN Signed-off-by: KP Singh Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Acked-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20200304191853.1529-6-kpsingh@chromium.org --- tools/lib/bpf/libbpf.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index f8c4042e5855..223be01dc466 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6288,6 +6288,10 @@ static const struct bpf_sec_def section_defs[] = { .expected_attach_type = BPF_TRACE_FENTRY, .is_attach_btf = true, .attach_fn = attach_trace), + SEC_DEF("fmod_ret/", TRACING, + .expected_attach_type = BPF_MODIFY_RETURN, + .is_attach_btf = true, + .attach_fn = attach_trace), SEC_DEF("fexit/", TRACING, .expected_attach_type = BPF_TRACE_FEXIT, .is_attach_btf = true, -- cgit v1.2.3 From b35f14f410416f06ec54d187dedc372405757290 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 12 Mar 2020 11:50:33 -0700 Subject: libbpf: Split BTF presence checks into libbpf- and kernel-specific parts Needs for application BTF being present differs between user-space libbpf needs and kernel needs. Currently, BTF is mandatory only in kernel only when BPF application is using STRUCT_OPS. While libbpf itself relies more heavily on presense of BTF: - for BTF-defined maps; - for Kconfig externs; - for STRUCT_OPS as well. Thus, checks for presence and validness of bpf_object's BPF needs to be performed separately, which is patch does. Fixes: 5327644614a1 ("libbpf: Relax check whether BTF is mandatory") Reported-by: Michal Rostecki Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Cc: Quentin Monnet Link: https://lore.kernel.org/bpf/20200312185033.736911-1-andriin@fb.com --- tools/lib/bpf/libbpf.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 223be01dc466..1a787a2faf58 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2284,9 +2284,16 @@ static void bpf_object__sanitize_btf_ext(struct bpf_object *obj) } } -static bool bpf_object__is_btf_mandatory(const struct bpf_object *obj) +static bool libbpf_needs_btf(const struct bpf_object *obj) { - return obj->efile.st_ops_shndx >= 0 || obj->nr_extern > 0; + return obj->efile.btf_maps_shndx >= 0 || + obj->efile.st_ops_shndx >= 0 || + obj->nr_extern > 0; +} + +static bool kernel_needs_btf(const struct bpf_object *obj) +{ + return obj->efile.st_ops_shndx >= 0; } static int bpf_object__init_btf(struct bpf_object *obj, @@ -2322,7 +2329,7 @@ static int bpf_object__init_btf(struct bpf_object *obj, } } out: - if (err && bpf_object__is_btf_mandatory(obj)) { + if (err && libbpf_needs_btf(obj)) { pr_warn("BTF is required, but is missing or corrupted.\n"); return err; } @@ -2346,7 +2353,7 @@ static int bpf_object__finalize_btf(struct bpf_object *obj) btf_ext__free(obj->btf_ext); obj->btf_ext = NULL; - if (bpf_object__is_btf_mandatory(obj)) { + if (libbpf_needs_btf(obj)) { pr_warn("BTF is required, but is missing or corrupted.\n"); return -ENOENT; } @@ -2410,7 +2417,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) obj->btf_ext = NULL; } - if (bpf_object__is_btf_mandatory(obj)) + if (kernel_needs_btf(obj)) return err; } return 0; -- cgit v1.2.3 From d121e1d34b72c4975ff0340901d926c0aaf98174 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 13 Mar 2020 10:23:34 -0700 Subject: libbpf: Ignore incompatible types with matching name during CO-RE relocation When finding target type candidates, ignore forward declarations, functions, and other named types of incompatible kind. Not doing this can cause false errors. See [0] for one such case (due to struct pt_regs forward declaration). [0] https://github.com/iovisor/bcc/pull/2806#issuecomment-598543645 Fixes: ddc7c3042614 ("libbpf: implement BPF CO-RE offset relocation algorithm") Reported-by: Wenbo Zhang Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200313172336.1879637-3-andriin@fb.com --- tools/lib/bpf/libbpf.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 1a787a2faf58..085e41f9b68e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3873,6 +3873,10 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf, if (str_is_empty(targ_name)) continue; + t = skip_mods_and_typedefs(targ_btf, i, NULL); + if (!btf_is_composite(t) && !btf_is_array(t)) + continue; + targ_essent_len = bpf_core_essential_name_len(targ_name); if (targ_essent_len != local_essent_len) continue; -- cgit v1.2.3 From b8ebce86ffe655ac15a841bba2d645105ffe3d38 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 13 Mar 2020 10:23:35 -0700 Subject: libbpf: Provide CO-RE variants of PT_REGS macros Syscall raw tracepoints have struct pt_regs pointer as tracepoint's first argument. After that, reading any of pt_regs fields requires bpf_probe_read(), even for tp_btf programs. Due to that, PT_REGS_PARMx macros are not usable as is. This patch adds CO-RE variants of those macros that use BPF_CORE_READ() to read necessary fields. This provides relocatable architecture-agnostic pt_regs field accesses. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20200313172336.1879637-4-andriin@fb.com --- tools/lib/bpf/bpf_tracing.h | 103 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 103 insertions(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 379d03b211ea..b0c9ae5c73b5 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -50,6 +50,7 @@ #if defined(bpf_target_x86) #if defined(__KERNEL__) || defined(__VMLINUX_H__) + #define PT_REGS_PARM1(x) ((x)->di) #define PT_REGS_PARM2(x) ((x)->si) #define PT_REGS_PARM3(x) ((x)->dx) @@ -60,7 +61,20 @@ #define PT_REGS_RC(x) ((x)->ax) #define PT_REGS_SP(x) ((x)->sp) #define PT_REGS_IP(x) ((x)->ip) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), di) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), si) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), dx) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), cx) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), sp) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), bp) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), ax) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), ip) + #else + #ifdef __i386__ /* i386 kernel is built with -mregparm=3 */ #define PT_REGS_PARM1(x) ((x)->eax) @@ -73,7 +87,20 @@ #define PT_REGS_RC(x) ((x)->eax) #define PT_REGS_SP(x) ((x)->esp) #define PT_REGS_IP(x) ((x)->eip) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), eax) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), edx) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), ecx) +#define PT_REGS_PARM4_CORE(x) 0 +#define PT_REGS_PARM5_CORE(x) 0 +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), esp) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), ebp) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), eax) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), esp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), eip) + #else + #define PT_REGS_PARM1(x) ((x)->rdi) #define PT_REGS_PARM2(x) ((x)->rsi) #define PT_REGS_PARM3(x) ((x)->rdx) @@ -84,6 +111,18 @@ #define PT_REGS_RC(x) ((x)->rax) #define PT_REGS_SP(x) ((x)->rsp) #define PT_REGS_IP(x) ((x)->rip) + +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), rdi) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), rsi) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), rdx) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), rcx) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), r8) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), rsp) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), rbp) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), rax) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), rsp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), rip) + #endif #endif @@ -104,6 +143,17 @@ struct pt_regs; #define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15]) #define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr) +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[3]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[4]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[5]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[6]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), grps[14]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[11]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[2]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), gprs[15]) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_S390 *)(x), pdw.addr) + #elif defined(bpf_target_arm) #define PT_REGS_PARM1(x) ((x)->uregs[0]) @@ -117,6 +167,17 @@ struct pt_regs; #define PT_REGS_SP(x) ((x)->uregs[13]) #define PT_REGS_IP(x) ((x)->uregs[12]) +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), uregs[0]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), uregs[1]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), uregs[2]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), uregs[3]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), uregs[4]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), uregs[14]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), uregs[11]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), uregs[0]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), uregs[13]) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), uregs[12]) + #elif defined(bpf_target_arm64) /* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */ @@ -134,6 +195,17 @@ struct pt_regs; #define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp) #define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc) +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[1]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[2]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[3]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[4]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[30]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[29]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), regs[0]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), sp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((PT_REGS_ARM64 *)(x), pc) + #elif defined(bpf_target_mips) #define PT_REGS_PARM1(x) ((x)->regs[4]) @@ -147,6 +219,17 @@ struct pt_regs; #define PT_REGS_SP(x) ((x)->regs[29]) #define PT_REGS_IP(x) ((x)->cp0_epc) +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), regs[4]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), regs[5]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), regs[6]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), regs[7]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), regs[8]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), regs[31]) +#define PT_REGS_FP_CORE(x) BPF_CORE_READ((x), regs[30]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), regs[1]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), regs[29]) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), cp0_epc) + #elif defined(bpf_target_powerpc) #define PT_REGS_PARM1(x) ((x)->gpr[3]) @@ -158,6 +241,15 @@ struct pt_regs; #define PT_REGS_SP(x) ((x)->sp) #define PT_REGS_IP(x) ((x)->nip) +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), gpr[3]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), gpr[4]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), gpr[5]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), gpr[6]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), gpr[7]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), gpr[3]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), sp) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), nip) + #elif defined(bpf_target_sparc) #define PT_REGS_PARM1(x) ((x)->u_regs[UREG_I0]) @@ -169,11 +261,22 @@ struct pt_regs; #define PT_REGS_RC(x) ((x)->u_regs[UREG_I0]) #define PT_REGS_SP(x) ((x)->u_regs[UREG_FP]) +#define PT_REGS_PARM1_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0]) +#define PT_REGS_PARM2_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I1]) +#define PT_REGS_PARM3_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I2]) +#define PT_REGS_PARM4_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I3]) +#define PT_REGS_PARM5_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I4]) +#define PT_REGS_RET_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I7]) +#define PT_REGS_RC_CORE(x) BPF_CORE_READ((x), u_regs[UREG_I0]) +#define PT_REGS_SP_CORE(x) BPF_CORE_READ((x), u_regs[UREG_FP]) + /* Should this also be a bpf_target check for the sparc case? */ #if defined(__arch64__) #define PT_REGS_IP(x) ((x)->tpc) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), tpc) #else #define PT_REGS_IP(x) ((x)->pc) +#define PT_REGS_IP_CORE(x) BPF_CORE_READ((x), pc) #endif #endif -- cgit v1.2.3 From 483d7a30f538e2f8addd32aa9a3d2e94ae55fa65 Mon Sep 17 00:00:00 2001 From: Wenbo Zhang Date: Sun, 15 Mar 2020 04:32:52 -0400 Subject: bpf, libbpf: Fix ___bpf_kretprobe_args1(x) macro definition Use PT_REGS_RC instead of PT_REGS_RET to get ret correctly. Fixes: df8ff35311c8 ("libbpf: Merge selftests' bpf_trace_helpers.h into libbpf's bpf_tracing.h") Signed-off-by: Wenbo Zhang Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200315083252.22274-1-ethercflow@gmail.com --- tools/lib/bpf/bpf_tracing.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index b0c9ae5c73b5..f3f3c3fb98cb 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -390,7 +390,7 @@ ____##name(struct pt_regs *ctx, ##args) #define ___bpf_kretprobe_args0() ctx #define ___bpf_kretprobe_args1(x) \ - ___bpf_kretprobe_args0(), (void *)PT_REGS_RET(ctx) + ___bpf_kretprobe_args0(), (void *)PT_REGS_RC(ctx) #define ___bpf_kretprobe_args(args...) \ ___bpf_apply(___bpf_kretprobe_args, ___bpf_narg(args))(args) -- cgit v1.2.3 From 9fc9aad99e5d654bd3ea48861bda57f03b118ca9 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Wed, 25 Mar 2020 12:36:55 +0100 Subject: libbpf: Remove unused parameter `def` to get_map_field_int Has been unused since commit ef99b02b23ef ("libbpf: capture value in BTF type info for BTF-defined map defs"). Signed-off-by: Tobias Klauser Signed-off-by: Daniel Borkmann Reviewed-by: Quentin Monnet Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200325113655.19341-1-tklauser@distanz.ch --- tools/lib/bpf/libbpf.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 085e41f9b68e..e9479ad9dd51 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1845,7 +1845,6 @@ resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id) * type definition, while using only sizeof(void *) space in ELF data section. */ static bool get_map_field_int(const char *map_name, const struct btf *btf, - const struct btf_type *def, const struct btf_member *m, __u32 *res) { const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); @@ -1972,19 +1971,19 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, return -EINVAL; } if (strcmp(name, "type") == 0) { - if (!get_map_field_int(map_name, obj->btf, def, m, + if (!get_map_field_int(map_name, obj->btf, m, &map->def.type)) return -EINVAL; pr_debug("map '%s': found type = %u.\n", map_name, map->def.type); } else if (strcmp(name, "max_entries") == 0) { - if (!get_map_field_int(map_name, obj->btf, def, m, + if (!get_map_field_int(map_name, obj->btf, m, &map->def.max_entries)) return -EINVAL; pr_debug("map '%s': found max_entries = %u.\n", map_name, map->def.max_entries); } else if (strcmp(name, "map_flags") == 0) { - if (!get_map_field_int(map_name, obj->btf, def, m, + if (!get_map_field_int(map_name, obj->btf, m, &map->def.map_flags)) return -EINVAL; pr_debug("map '%s': found map_flags = %u.\n", @@ -1992,8 +1991,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, } else if (strcmp(name, "key_size") == 0) { __u32 sz; - if (!get_map_field_int(map_name, obj->btf, def, m, - &sz)) + if (!get_map_field_int(map_name, obj->btf, m, &sz)) return -EINVAL; pr_debug("map '%s': found key_size = %u.\n", map_name, sz); @@ -2035,8 +2033,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, } else if (strcmp(name, "value_size") == 0) { __u32 sz; - if (!get_map_field_int(map_name, obj->btf, def, m, - &sz)) + if (!get_map_field_int(map_name, obj->btf, m, &sz)) return -EINVAL; pr_debug("map '%s': found value_size = %u.\n", map_name, sz); @@ -2079,8 +2076,7 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, __u32 val; int err; - if (!get_map_field_int(map_name, obj->btf, def, m, - &val)) + if (!get_map_field_int(map_name, obj->btf, m, &val)) return -EINVAL; pr_debug("map '%s': found pinning = %u.\n", map_name, val); -- cgit v1.2.3 From 8395f320b407509819cc112f61a1de05780c8cba Mon Sep 17 00:00:00 2001 From: Stanislav Fomichev Date: Wed, 25 Mar 2020 12:55:21 -0700 Subject: libbpf: Don't allocate 16M for log buffer by default For each prog/btf load we allocate and free 16 megs of verifier buffer. On production systems it doesn't really make sense because the programs/btf have gone through extensive testing and (mostly) guaranteed to successfully load. Let's assume successful case by default and skip buffer allocation on the first try. If there is an error, start with BPF_LOG_BUF_SIZE and double it on each ENOSPC iteration. v3: * Return -ENOMEM when can't allocate log buffer (Andrii Nakryiko) v2: * Don't allocate the buffer at all on the first try (Andrii Nakryiko) Signed-off-by: Stanislav Fomichev Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200325195521.112210-1-sdf@google.com --- tools/lib/bpf/btf.c | 20 +++++++++++++++----- tools/lib/bpf/libbpf.c | 22 ++++++++++++++-------- 2 files changed, 29 insertions(+), 13 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 3d1c25fc97ae..bfef3d606b54 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -657,22 +657,32 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf) int btf__load(struct btf *btf) { - __u32 log_buf_size = BPF_LOG_BUF_SIZE; + __u32 log_buf_size = 0; char *log_buf = NULL; int err = 0; if (btf->fd >= 0) return -EEXIST; - log_buf = malloc(log_buf_size); - if (!log_buf) - return -ENOMEM; +retry_load: + if (log_buf_size) { + log_buf = malloc(log_buf_size); + if (!log_buf) + return -ENOMEM; - *log_buf = 0; + *log_buf = 0; + } btf->fd = bpf_load_btf(btf->data, btf->data_size, log_buf, log_buf_size, false); if (btf->fd < 0) { + if (!log_buf || errno == ENOSPC) { + log_buf_size = max((__u32)BPF_LOG_BUF_SIZE, + log_buf_size << 1); + free(log_buf); + goto retry_load; + } + err = -errno; pr_warn("Error loading BTF: %s(%d)\n", strerror(errno), errno); if (*log_buf) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index e9479ad9dd51..62903302935e 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -4851,8 +4851,8 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, { struct bpf_load_program_attr load_attr; char *cp, errmsg[STRERR_BUFSIZE]; - int log_buf_size = BPF_LOG_BUF_SIZE; - char *log_buf; + size_t log_buf_size = 0; + char *log_buf = NULL; int btf_fd, ret; if (!insns || !insns_cnt) @@ -4892,22 +4892,28 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.prog_flags = prog->prog_flags; retry_load: - log_buf = malloc(log_buf_size); - if (!log_buf) - pr_warn("Alloc log buffer for bpf loader error, continue without log\n"); + if (log_buf_size) { + log_buf = malloc(log_buf_size); + if (!log_buf) + return -ENOMEM; + + *log_buf = 0; + } ret = bpf_load_program_xattr(&load_attr, log_buf, log_buf_size); if (ret >= 0) { - if (load_attr.log_level) + if (log_buf && load_attr.log_level) pr_debug("verifier log:\n%s", log_buf); *pfd = ret; ret = 0; goto out; } - if (errno == ENOSPC) { - log_buf_size <<= 1; + if (!log_buf || errno == ENOSPC) { + log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, + log_buf_size << 1); + free(log_buf); goto retry_load; } -- cgit v1.2.3 From 291cfe365bd3d5002a9c49e9fca8f30136eebc28 Mon Sep 17 00:00:00 2001 From: Fletcher Dunn Date: Fri, 27 Mar 2020 03:24:07 +0000 Subject: libbpf, xsk: Init all ring members in xsk_umem__create and xsk_socket__create Fix a sharp edge in xsk_umem__create and xsk_socket__create. Almost all of the members of the ring buffer structs are initialized, but the "cached_xxx" variables are not all initialized. The caller is required to zero them. This is needlessly dangerous. The results if you don't do it can be very bad. For example, they can cause xsk_prod_nb_free and xsk_cons_nb_avail to return values greater than the size of the queue. xsk_ring_cons__peek can return an index that does not refer to an item that has been queued. I have confirmed that without this change, my program misbehaves unless I memset the ring buffers to zero before calling the function. Afterwards, my program works without (or with) the memset. Signed-off-by: Fletcher Dunn Signed-off-by: Daniel Borkmann Acked-by: Magnus Karlsson Link: https://lore.kernel.org/bpf/85f12913cde94b19bfcb598344701c38@valvesoftware.com --- tools/lib/bpf/xsk.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 9807903f121e..f7f4efb70a4c 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -280,7 +280,11 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, fill->consumer = map + off.fr.consumer; fill->flags = map + off.fr.flags; fill->ring = map + off.fr.desc; - fill->cached_cons = umem->config.fill_size; + fill->cached_prod = *fill->producer; + /* cached_cons is "size" bigger than the real consumer pointer + * See xsk_prod_nb_free + */ + fill->cached_cons = *fill->consumer + umem->config.fill_size; map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd, @@ -297,6 +301,8 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, comp->consumer = map + off.cr.consumer; comp->flags = map + off.cr.flags; comp->ring = map + off.cr.desc; + comp->cached_prod = *comp->producer; + comp->cached_cons = *comp->consumer; *umem_ptr = umem; return 0; @@ -672,6 +678,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, rx->consumer = rx_map + off.rx.consumer; rx->flags = rx_map + off.rx.flags; rx->ring = rx_map + off.rx.desc; + rx->cached_prod = *rx->producer; + rx->cached_cons = *rx->consumer; } xsk->rx = rx; @@ -691,7 +699,11 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, tx->consumer = tx_map + off.tx.consumer; tx->flags = tx_map + off.tx.flags; tx->ring = tx_map + off.tx.desc; - tx->cached_cons = xsk->config.tx_size; + tx->cached_prod = *tx->producer; + /* cached_cons is r->size bigger than the real consumer pointer + * See xsk_prod_nb_free + */ + tx->cached_cons = *tx->consumer + xsk->config.tx_size; } xsk->tx = tx; -- cgit v1.2.3 From bd5ca3ef93cd8fb7e913f15eeb78e67a0d802274 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Wed, 25 Mar 2020 18:23:28 +0100 Subject: libbpf: Add function to set link XDP fd while specifying old program MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a new function to set the XDP fd while specifying the FD of the program to replace, using the newly added IFLA_XDP_EXPECTED_FD netlink parameter. The new function uses the opts struct mechanism to be extendable in the future. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/158515700857.92963.7052131201257841700.stgit@toke.dk --- tools/lib/bpf/libbpf.h | 8 ++++++++ tools/lib/bpf/libbpf.map | 1 + tools/lib/bpf/netlink.c | 34 +++++++++++++++++++++++++++++++++- 3 files changed, 42 insertions(+), 1 deletion(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index d38d7a629417..bf7a35a9556d 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -444,7 +444,15 @@ struct xdp_link_info { __u8 attach_mode; }; +struct bpf_xdp_set_link_opts { + size_t sz; + __u32 old_fd; +}; +#define bpf_xdp_set_link_opts__last_field old_fd + LIBBPF_API int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags); +LIBBPF_API int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags, + const struct bpf_xdp_set_link_opts *opts); LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags); LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info, size_t info_size, __u32 flags); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 5129283c0284..dcc87db3ca8a 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -244,4 +244,5 @@ LIBBPF_0.0.8 { bpf_link__pin_path; bpf_link__unpin; bpf_program__set_attach_target; + bpf_set_link_xdp_fd_opts; } LIBBPF_0.0.7; diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c index 431bd25c6cdb..18b5319025e1 100644 --- a/tools/lib/bpf/netlink.c +++ b/tools/lib/bpf/netlink.c @@ -132,7 +132,8 @@ done: return ret; } -int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) +static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd, + __u32 flags) { int sock, seq = 0, ret; struct nlattr *nla, *nla_xdp; @@ -178,6 +179,14 @@ int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) nla->nla_len += nla_xdp->nla_len; } + if (flags & XDP_FLAGS_REPLACE) { + nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len); + nla_xdp->nla_type = IFLA_XDP_EXPECTED_FD; + nla_xdp->nla_len = NLA_HDRLEN + sizeof(old_fd); + memcpy((char *)nla_xdp + NLA_HDRLEN, &old_fd, sizeof(old_fd)); + nla->nla_len += nla_xdp->nla_len; + } + req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len); if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) { @@ -191,6 +200,29 @@ cleanup: return ret; } +int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags, + const struct bpf_xdp_set_link_opts *opts) +{ + int old_fd = -1; + + if (!OPTS_VALID(opts, bpf_xdp_set_link_opts)) + return -EINVAL; + + if (OPTS_HAS(opts, old_fd)) { + old_fd = OPTS_GET(opts, old_fd, -1); + flags |= XDP_FLAGS_REPLACE; + } + + return __bpf_set_link_xdp_fd_replace(ifindex, fd, + old_fd, + flags); +} + +int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags) +{ + return __bpf_set_link_xdp_fd_replace(ifindex, fd, 0, flags); +} + static int __dump_link_nlmsg(struct nlmsghdr *nlh, libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie) { -- cgit v1.2.3 From e2842be53d4f31962a9992eab39391cdf637fa2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Sun, 29 Mar 2020 15:22:52 +0200 Subject: libbpf: Add setter for initial value for internal maps MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For internal maps (most notably the maps backing global variables), libbpf uses an internal mmaped area to store the data after opening the object. This data is subsequently copied into the kernel map when the object is loaded. This adds a function to set a new value for that data, which can be used to before it is loaded into the kernel. This is especially relevant for RODATA maps, since those are frozen on load. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200329132253.232541-1-toke@redhat.com --- tools/lib/bpf/libbpf.c | 11 +++++++++++ tools/lib/bpf/libbpf.h | 2 ++ tools/lib/bpf/libbpf.map | 1 + 3 files changed, 14 insertions(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 62903302935e..7deab98720ee 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -6758,6 +6758,17 @@ void *bpf_map__priv(const struct bpf_map *map) return map ? map->priv : ERR_PTR(-EINVAL); } +int bpf_map__set_initial_value(struct bpf_map *map, + const void *data, size_t size) +{ + if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG || + size != map->def.value_size || map->fd >= 0) + return -EINVAL; + + memcpy(map->mmaped, data, size); + return 0; +} + bool bpf_map__is_offload_neutral(const struct bpf_map *map) { return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index bf7a35a9556d..958ae71c116e 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -407,6 +407,8 @@ typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, bpf_map_clear_priv_t clear_priv); LIBBPF_API void *bpf_map__priv(const struct bpf_map *map); +LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map, + const void *data, size_t size); LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index dcc87db3ca8a..159826b36b38 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -243,6 +243,7 @@ LIBBPF_0.0.8 { bpf_link__pin; bpf_link__pin_path; bpf_link__unpin; + bpf_map__set_initial_value; bpf_program__set_attach_target; bpf_set_link_xdp_fd_opts; } LIBBPF_0.0.7; -- cgit v1.2.3 From fc611f47f2188ade2b48ff6902d5cce8baac0c58 Mon Sep 17 00:00:00 2001 From: KP Singh Date: Sun, 29 Mar 2020 01:43:49 +0100 Subject: bpf: Introduce BPF_PROG_TYPE_LSM Introduce types and configs for bpf programs that can be attached to LSM hooks. The programs can be enabled by the config option CONFIG_BPF_LSM. Signed-off-by: KP Singh Signed-off-by: Daniel Borkmann Reviewed-by: Brendan Jackman Reviewed-by: Florent Revest Reviewed-by: Thomas Garnier Acked-by: Yonghong Song Acked-by: Andrii Nakryiko Acked-by: James Morris Link: https://lore.kernel.org/bpf/20200329004356.27286-2-kpsingh@chromium.org --- tools/lib/bpf/libbpf_probes.c | 1 + 1 file changed, 1 insertion(+) (limited to 'tools/lib') diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index b782ebef6ac9..2c92059c0c90 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -108,6 +108,7 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, case BPF_PROG_TYPE_TRACING: case BPF_PROG_TYPE_STRUCT_OPS: case BPF_PROG_TYPE_EXT: + case BPF_PROG_TYPE_LSM: default: break; } -- cgit v1.2.3 From 1e092a0318292637cde832868016f37e942eed24 Mon Sep 17 00:00:00 2001 From: KP Singh Date: Sun, 29 Mar 2020 01:43:54 +0100 Subject: tools/libbpf: Add support for BPF_PROG_TYPE_LSM Since BPF_PROG_TYPE_LSM uses the same attaching mechanism as BPF_PROG_TYPE_TRACING, the common logic is refactored into a static function bpf_program__attach_btf_id. A new API call bpf_program__attach_lsm is still added to avoid userspace conflicts if this ever changes in the future. Signed-off-by: KP Singh Signed-off-by: Daniel Borkmann Reviewed-by: Brendan Jackman Reviewed-by: Florent Revest Reviewed-by: James Morris Acked-by: Yonghong Song Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20200329004356.27286-7-kpsingh@chromium.org --- tools/lib/bpf/bpf.c | 3 ++- tools/lib/bpf/libbpf.c | 39 +++++++++++++++++++++++++++++++++++---- tools/lib/bpf/libbpf.h | 4 ++++ tools/lib/bpf/libbpf.map | 3 +++ 4 files changed, 44 insertions(+), 5 deletions(-) (limited to 'tools/lib') diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index c6dafe563176..73220176728d 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -235,7 +235,8 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, memset(&attr, 0, sizeof(attr)); attr.prog_type = load_attr->prog_type; attr.expected_attach_type = load_attr->expected_attach_type; - if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS) { + if (attr.prog_type == BPF_PROG_TYPE_STRUCT_OPS || + attr.prog_type == BPF_PROG_TYPE_LSM) { attr.attach_btf_id = load_attr->attach_btf_id; } else if (attr.prog_type == BPF_PROG_TYPE_TRACING || attr.prog_type == BPF_PROG_TYPE_EXT) { diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 7deab98720ee..0638e717f502 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -2358,7 +2358,8 @@ static int bpf_object__finalize_btf(struct bpf_object *obj) static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog) { - if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) + if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || + prog->type == BPF_PROG_TYPE_LSM) return true; /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs @@ -4866,7 +4867,8 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.insns = insns; load_attr.insns_cnt = insns_cnt; load_attr.license = license; - if (prog->type == BPF_PROG_TYPE_STRUCT_OPS) { + if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || + prog->type == BPF_PROG_TYPE_LSM) { load_attr.attach_btf_id = prog->attach_btf_id; } else if (prog->type == BPF_PROG_TYPE_TRACING || prog->type == BPF_PROG_TYPE_EXT) { @@ -4957,6 +4959,7 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) int err = 0, fd, i, btf_id; if ((prog->type == BPF_PROG_TYPE_TRACING || + prog->type == BPF_PROG_TYPE_LSM || prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { btf_id = libbpf_find_attach_btf_id(prog); if (btf_id <= 0) @@ -6196,6 +6199,7 @@ bool bpf_program__is_##NAME(const struct bpf_program *prog) \ } \ BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); +BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM); BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); @@ -6262,6 +6266,8 @@ static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec, struct bpf_program *prog); static struct bpf_link *attach_trace(const struct bpf_sec_def *sec, struct bpf_program *prog); +static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec, + struct bpf_program *prog); struct bpf_sec_def { const char *sec; @@ -6312,6 +6318,10 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("freplace/", EXT, .is_attach_btf = true, .attach_fn = attach_trace), + SEC_DEF("lsm/", LSM, + .is_attach_btf = true, + .expected_attach_type = BPF_LSM_MAC, + .attach_fn = attach_lsm), BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), @@ -6574,6 +6584,7 @@ invalid_prog: } #define BTF_TRACE_PREFIX "btf_trace_" +#define BTF_LSM_PREFIX "bpf_lsm_" #define BTF_MAX_NAME_SIZE 128 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, @@ -6601,6 +6612,9 @@ static inline int __find_vmlinux_btf_id(struct btf *btf, const char *name, if (attach_type == BPF_TRACE_RAW_TP) err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name, BTF_KIND_TYPEDEF); + else if (attach_typ