summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/eventpoll.c3
-rw-r--r--fs/io_uring.c16
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/net.h2
-rw-r--r--include/trace/events/xdp.h12
-rw-r--r--include/uapi/linux/bpf.h9
-rw-r--r--kernel/bpf/hashtab.c6
-rw-r--r--kernel/bpf/syscall.c5
-rw-r--r--kernel/bpf/verifier.c3
-rw-r--r--kernel/trace/bpf_trace.c2
-rw-r--r--net/core/bpf_sk_storage.c1
-rw-r--r--net/core/filter.c18
-rw-r--r--net/core/netclassid_cgroup.c3
-rw-r--r--net/core/netprio_cgroup.c3
-rw-r--r--net/core/sock.c8
-rw-r--r--net/socket.c27
-rw-r--r--net/xdp/xsk.c4
-rw-r--r--samples/bpf/xdpsock_user.c2
-rwxr-xr-xscripts/bpf_helpers_doc.py4
-rw-r--r--tools/include/uapi/linux/bpf.h9
-rw-r--r--tools/lib/bpf/libbpf.c64
-rw-r--r--tools/lib/bpf/libbpf.h1
-rw-r--r--tools/lib/bpf/libbpf.map1
-rw-r--r--tools/lib/bpf/ringbuf.c6
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile52
-rwxr-xr-xtools/testing/selftests/bpf/ima_setup.sh24
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c118
-rw-r--r--tools/testing/selftests/bpf/prog_tests/module_attach.c11
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_bpf_sk_storage_helpers.c65
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task.c3
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_module.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_module_attach.c11
-rw-r--r--tools/testing/selftests/bpf/test_progs.c10
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c31
-rwxr-xr-xtools/testing/selftests/bpf/test_xsk.sh259
-rw-r--r--tools/testing/selftests/bpf/verifier/unpriv.c5
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.c1074
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.h160
-rwxr-xr-xtools/testing/selftests/bpf/xsk_prereqs.sh135
40 files changed, 2063 insertions, 114 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 73c346e503d7..19499b7bb82c 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -416,12 +416,11 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
unsigned int napi_id;
struct socket *sock;
struct sock *sk;
- int err;
if (!net_busy_loop_on())
return;
- sock = sock_from_file(epi->ffd.file, &err);
+ sock = sock_from_file(epi->ffd.file);
if (!sock)
return;
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a2a7c65a77aa..a97488c7b2cf 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -4356,9 +4356,9 @@ static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret;
- sock = sock_from_file(req->file, &ret);
+ sock = sock_from_file(req->file);
if (unlikely(!sock))
- return ret;
+ return -ENOTSOCK;
if (req->async_data) {
kmsg = req->async_data;
@@ -4405,9 +4405,9 @@ static int io_send(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret;
- sock = sock_from_file(req->file, &ret);
+ sock = sock_from_file(req->file);
if (unlikely(!sock))
- return ret;
+ return -ENOTSOCK;
ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
if (unlikely(ret))
@@ -4585,9 +4585,9 @@ static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret, cflags = 0;
- sock = sock_from_file(req->file, &ret);
+ sock = sock_from_file(req->file);
if (unlikely(!sock))
- return ret;
+ return -ENOTSOCK;
if (req->async_data) {
kmsg = req->async_data;
@@ -4648,9 +4648,9 @@ static int io_recv(struct io_kiocb *req, bool force_nonblock,
unsigned flags;
int ret, cflags = 0;
- sock = sock_from_file(req->file, &ret);
+ sock = sock_from_file(req->file);
if (unlikely(!sock))
- return ret;
+ return -ENOTSOCK;
if (req->flags & REQ_F_BUFFER_SELECT) {
kbuf = io_recv_buffer_select(req, !force_nonblock);
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index d05e75ed8c1b..07cb5d15e743 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1859,6 +1859,7 @@ extern const struct bpf_func_proto bpf_snprintf_btf_proto;
extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
+extern const struct bpf_func_proto bpf_sock_from_file_proto;
const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
diff --git a/include/linux/net.h b/include/linux/net.h
index 0dcd51feef02..9e2324efc26a 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -240,7 +240,7 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg);
int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags);
struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname);
struct socket *sockfd_lookup(int fd, int *err);
-struct socket *sock_from_file(struct file *file, int *err);
+struct socket *sock_from_file(struct file *file);
#define sockfd_put(sock) fput(sock->file)
int net_ratelimit(void);
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index cd24e8a59529..76a97176ab81 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -145,17 +145,17 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
TP_ARGS(dev, xdp, tgt, err, map, index)
);
-#define _trace_xdp_redirect(dev, xdp, to) \
- trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
+#define _trace_xdp_redirect(dev, xdp, to) \
+ trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to)
-#define _trace_xdp_redirect_err(dev, xdp, to, err) \
- trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
+#define _trace_xdp_redirect_err(dev, xdp, to, err) \
+ trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to)
#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
- trace_xdp_redirect(dev, xdp, to, 0, map, index);
+ trace_xdp_redirect(dev, xdp, to, 0, map, index)
#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
- trace_xdp_redirect_err(dev, xdp, to, err, map, index);
+ trace_xdp_redirect_err(dev, xdp, to, err, map, index)
/* not used anymore, but kept around so as not to break old programs */
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 7e9931b6c735..77d7c1bb2923 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -3822,6 +3822,14 @@ union bpf_attr {
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
* invalid arguments are passed.
+ *
+ * struct socket *bpf_sock_from_file(struct file *file)
+ * Description
+ * If the given file represents a socket, returns the associated
+ * socket.
+ * Return
+ * A pointer to a struct socket on success or NULL if the file is
+ * not a socket.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -3986,6 +3994,7 @@ union bpf_attr {
FN(bprm_opts_set), \
FN(ktime_get_coarse_ns), \
FN(ima_inode_hash), \
+ FN(sock_from_file), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index fe7a0733a63a..7e848200cd26 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -224,7 +224,7 @@ static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
{
- return (struct htab_elem *) (htab->elems + i * htab->elem_size);
+ return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
}
static void htab_free_elems(struct bpf_htab *htab)
@@ -280,7 +280,7 @@ static int prealloc_init(struct bpf_htab *htab)
if (!htab_is_percpu(htab) && !htab_is_lru(htab))
num_entries += num_possible_cpus();
- htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
+ htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
htab->map.numa_node);
if (!htab->elems)
return -ENOMEM;
@@ -1412,7 +1412,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
void __user *uvalues = u64_to_user_ptr(attr->batch.values);
void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
- void *ubatch = u64_to_user_ptr(attr->batch.in_batch);
+ void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
u32 batch, max_count, size, bucket_size;
struct htab_elem *node_to_free = NULL;
u64 elem_map_flags, map_flags;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0cd3cc2af9c1..287be337d5f6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2121,8 +2121,11 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
if (IS_ERR(attach_btf))
return -EINVAL;
if (!btf_is_kernel(attach_btf)) {
+ /* attaching through specifying bpf_prog's BTF
+ * objects directly might be supported eventually
+ */
btf_put(attach_btf);
- return -EINVAL;
+ return -ENOTSUPP;
}
}
} else if (attr->attach_btf_id) {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d43b30c92c7d..17270b8404f1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3767,7 +3767,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
goto mark;
if (state->stack[spi].slot_type[0] == STACK_SPILL &&
- state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
+ (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
+ env->allow_ptr_leaks)) {
__mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
for (j = 0; j < BPF_REG_SIZE; j++)
state->stack[spi].slot_type[j] = STACK_MISC;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index fb2fbcbf6de6..4be771df5549 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1758,6 +1758,8 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_storage_get_tracing_proto;
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_tracing_proto;
+ case BPF_FUNC_sock_from_file:
+ return &bpf_sock_from_file_proto;
#endif
case BPF_FUNC_seq_printf:
return prog->expected_attach_type == BPF_TRACE_ITER ?
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index a32037daa933..4edd033e899c 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -394,6 +394,7 @@ static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
* use the bpf_sk_storage_(get|delete) helper.
*/
switch (prog->expected_attach_type) {
+ case BPF_TRACE_ITER:
case BPF_TRACE_RAW_TP:
/* bpf_sk_storage has no trace point */
return true;
diff --git a/net/core/filter.c b/net/core/filter.c
index 77001a35768f..255aeee72402 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -10413,6 +10413,24 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
};
+BPF_CALL_1(bpf_sock_from_file, struct file *, file)
+{
+ return (unsigned long)sock_from_file(file);
+}
+
+BTF_ID_LIST(bpf_sock_from_file_btf_ids)
+BTF_ID(struct, socket)
+BTF_ID(struct, file)
+
+const struct bpf_func_proto bpf_sock_from_file_proto = {
+ .func = bpf_sock_from_file,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
+ .ret_btf_id = &bpf_sock_from_file_btf_ids[0],
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_sock_from_file_btf_ids[1],
+};
+
static const struct bpf_func_proto *
bpf_sk_base_func_proto(enum bpf_func_id func_id)
{
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 41b24cd31562..b49c57d35a88 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -68,9 +68,8 @@ struct update_classid_context {
static int update_classid_sock(const void *v, struct file *file, unsigned n)
{
- int err;
struct update_classid_context *ctx = (void *)v;
- struct socket *sock = sock_from_file(file, &err);
+ struct socket *sock = sock_from_file(file);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index 9bd4cab7d510..99a431c56f23 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -220,8 +220,7 @@ static ssize_t write_priomap(struct kernfs_open_file *of,
static int update_netprio(const void *v, struct file *file, unsigned n)
{
- int err;
- struct socket *sock = sock_from_file(file, &err);
+ struct socket *sock = sock_from_file(file);
if (sock) {
spin_lock(&cgroup_sk_update_lock);
sock_cgroup_set_prioidx(&sock->sk->sk_cgrp_data,
diff --git a/net/core/sock.c b/net/core/sock.c
index 4fd7e785f177..bbcd4b97eddd 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2827,14 +2827,8 @@ EXPORT_SYMBOL(sock_no_mmap);
void __receive_sock(struct file *file)
{
struct socket *sock;
- int error;
- /*
- * The resulting value of "error" is ignored here since we only
- * need to take action when the file is a socket and testing
- * "sock" for NULL is sufficient.
- */
- sock = sock_from_file(file, &error);
+ sock = sock_from_file(file);
if (sock) {
sock_update_netprioidx(&sock->sk->sk_cgrp_data);
sock_update_classid(&sock->sk->sk_cgrp_data);
diff --git a/net/socket.c b/net/socket.c
index bfef11ba35b8..9a240b45bdf3 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -445,17 +445,15 @@ static int sock_map_fd(struct socket *sock, int flags)
/**
* sock_from_file - Return the &socket bounded to @file.
* @file: file
- * @err: pointer to an error code return
*
- * On failure returns %NULL and assigns -ENOTSOCK to @err.
+ * On failure returns %NULL.
*/
-struct socket *sock_from_file(struct file *file, int *err)
+struct socket *sock_from_file(struct file *file)
{
if (file->f_op == &socket_file_ops)
return file->private_data; /* set in sock_map_fd */
- *err = -ENOTSOCK;
return NULL;
}
EXPORT_SYMBOL(sock_from_file);
@@ -484,9 +482,11 @@ struct socket *sockfd_lookup(int fd, int *err)
return NULL;
}
- sock = sock_from_file(file, err);
- if (!sock)
+ sock = sock_from_file(file);
+ if (!sock) {
+ *err = -ENOTSOCK;
fput(file);
+ }
return sock;
}
EXPORT_SYMBOL(sockfd_lookup);
@@ -498,11 +498,12 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
*err = -EBADF;
if (f.file) {
- sock = sock_from_file(f.file, err);
+ sock = sock_from_file(f.file);
if (likely(sock)) {
*fput_needed = f.flags & FDPUT_FPUT;
return sock;
}
+ *err = -ENOTSOCK;
fdput(f);
}
return NULL;
@@ -1693,9 +1694,11 @@ int __sys_accept4_file(struct file *file, unsigned file_flags,
if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
- sock = sock_from_file(file, &err);
- if (!sock)
+ sock = sock_from_file(file);
+ if (!sock) {
+ err = -ENOTSOCK;
goto out;
+ }
err = -ENFILE;
newsock = sock_alloc();
@@ -1818,9 +1821,11 @@ int __sys_connect_file(struct file *file, struct sockaddr_storage *address,
struct socket *sock;
int err;
- sock = sock_from_file(file, &err);
- if (!sock)
+ sock = sock_from_file(file);
+ if (!sock) {
+ err = -ENOTSOCK;
goto out;
+ }
err =
security_socket_connect(sock, (struct sockaddr *)address, addrlen);
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 310cfc68875a..ac4a317038f1 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -564,12 +564,12 @@ static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int fl
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
+ if (unlikely(!xsk_is_bound(xs)))
+ return -ENXIO;
if (unlikely(!(xs->dev->flags & IFF_UP)))
return -ENETDOWN;
if (unlikely(!xs->rx))
return -ENOBUFS;
- if (unlikely(!xsk_is_bound(xs)))
- return -ENXIO;
if (unlikely(need_wait))
return -EOPNOTSUPP;
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index 568f9815bb1b..db0cb73513a5 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -1275,6 +1275,8 @@ static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
while (xsk_ring_prod__reserve(&xsk->tx, batch_size, &idx) <
batch_size) {
complete_tx_only(xsk, batch_size);
+ if (benchmark_done)
+ return;
}
for (i = 0; i < batch_size; i++) {
diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py
index 8b829748d488..867ada23281c 100755
--- a/scripts/bpf_helpers_doc.py
+++ b/scripts/bpf_helpers_doc.py
@@ -437,6 +437,8 @@ class PrinterHelpers(Printer):
'struct path',
'struct btf_ptr',
'struct inode',
+ 'struct socket',
+ 'struct file',
]
known_types = {
'...',
@@ -482,6 +484,8 @@ class PrinterHelpers(Printer):
'struct path',
'struct btf_ptr',
'struct inode',
+ 'struct socket',
+ 'struct file',
}
mapped_types = {
'u8': '__u8',
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 7e9931b6c735..77d7c1bb2923 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -3822,6 +3822,14 @@ union bpf_attr {
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if IMA is disabled or **-EINVAL** if
* invalid arguments are passed.
+ *
+ * struct socket *bpf_sock_from_file(struct file *file)
+ * Description
+ * If the given file represents a socket, returns the associated
+ * socket.
+ * Return
+ * A pointer to a struct socket on success or NULL if the file is
+ * not a socket.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -3986,6 +3994,7 @@ union bpf_attr {
FN(bprm_opts_set), \
FN(ktime_get_coarse_ns), \
FN(ima_inode_hash), \
+ FN(sock_from_file), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 9be88a90a4aa..6ae748f6ea11 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -2518,7 +2518,7 @@ static int bpf_object__finalize_btf(struct bpf_object *obj)
return 0;
}
-static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
+static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
{
if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
prog->type == BPF_PROG_TYPE_LSM)
@@ -2533,37 +2533,43 @@ static inline bool libbpf_prog_needs_vmlinux_btf(struct bpf_program *prog)
return false;
}
-static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
+static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
{
- bool need_vmlinux_btf = false;
struct bpf_program *prog;
- int i, err;
+ int i;
/* CO-RE relocations need kernel BTF */
if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
- need_vmlinux_btf = true;
+ return true;
/* Support for typed ksyms needs kernel BTF */
for (i = 0; i < obj->nr_extern; i++) {
const struct extern_desc *ext;
ext = &obj->externs[i];
- if (ext->type == EXT_KSYM && ext->ksym.type_id) {
- need_vmlinux_btf = true;
- break;
- }
+ if (ext->type == EXT_KSYM && ext->ksym.type_id)
+ return true;
}
bpf_object__for_each_program(prog, obj) {
if (!prog->load)
continue;
- if (libbpf_prog_needs_vmlinux_btf(prog)) {
- need_vmlinux_btf = true;
- break;
- }
+ if (prog_needs_vmlinux_btf(prog))
+ return true;
}
- if (!need_vmlinux_btf)
+ return false;
+}
+
+static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
+{
+ int err;
+
+ /* btf_vmlinux could be loaded earlier */
+ if (obj->btf_vmlinux)
+ return 0;
+
+ if (!force && !obj_needs_vmlinux_btf(obj))
return 0;
obj->btf_vmlinux = libbpf_find_kernel_btf();
@@ -7475,7 +7481,7 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
}
err = bpf_object__probe_loading(obj);
- err = err ? : bpf_object__load_vmlinux_btf(obj);
+ err = err ? : bpf_object__load_vmlinux_btf(obj, false);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__sanitize_maps(obj);
@@ -10870,23 +10876,33 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
int attach_prog_fd,
const char *attach_func_name)
{
- int btf_id;
+ int btf_obj_fd = 0, btf_id = 0, err;
if (!prog || attach_prog_fd < 0 || !attach_func_name)
return -EINVAL;
- if (attach_prog_fd)
+ if (prog->obj->loaded)
+ return -EINVAL;
+
+ if (attach_prog_fd) {
btf_id = libbpf_find_prog_btf_id(attach_func_name,
attach_prog_fd);
- else
- btf_id = libbpf_find_vmlinux_btf_id(attach_func_name,
- prog->expected_attach_type);
-
- if (btf_id < 0)
- return btf_id;
+ if (btf_id < 0)
+ return btf_id;
+ } else {
+ /* load btf_vmlinux, if not yet */
+ err = bpf_object__load_vmlinux_btf(prog->obj, true);
+ if (err)
+ return err;
+ err = find_kernel_btf_id(prog->obj, attach_func_name,
+ prog->expected_attach_type,
+ &btf_obj_fd, &btf_id);
+ if (err)
+ return err;
+ }
prog->attach_btf_id = btf_id;
- prog->attach_btf_obj_fd = 0;
+ prog->attach_btf_obj_fd = btf_obj_fd;
prog->attach_prog_fd = attach_prog_fd;
return 0;
}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 6909ee81113a..3c35eb401931 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -536,6 +536,7 @@ LIBBPF_API int ring_buffer__add(struct ring_buffer *rb, int map_fd,
ring_buffer_sample_fn sample_cb, void *ctx);
LIBBPF_API int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms);
LIBBPF_API int ring_buffer__consume(struct ring_buffer *rb);
+LIBBPF_API int ring_buffer__epoll_fd(const struct ring_buffer *rb);
/* Perf buffer APIs */
struct perf_buffer;
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 7c4126542e2b..1c0fd2dd233a 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -346,6 +346,7 @@ LIBBPF_0.3.0 {
btf__parse_split;
btf__new_empty_split;
btf__new_split;
+ ring_buffer__epoll_fd;
xsk_setup_xdp_prog;
xsk_socket__update_xskmap;
} LIBBPF_0.2.0;
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index 98537ff2679e..8caaafe7e312 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -282,3 +282,9 @@ int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
}
return cnt < 0 ? -errno : res;
}
+
+/* Get an fd that can be used to sleep until data is available in the ring(s) */
+int ring_buffer__epoll_fd(const struct ring_buffer *rb)
+{
+ return rb->epoll_fd;
+}
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 752d8edddc66..f5b7ef93618c 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -36,3 +36,4 @@ test_cpp
/runqslower
/bench
*.ko
+xdpxceiver
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index ac25ba5d0d6c..8c33e999319a 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -19,7 +19,6 @@ ifneq ($(wildcard $(GENHDR)),)
endif
CLANG ?= clang
-LLC ?= llc
LLVM_OBJCOPY ?= llvm-objcopy
BPF_GCC ?= $(shell command -v bpf-gcc;)
SAN_CFLAGS ?=
@@ -46,7 +45,8 @@ endif
TEST_GEN_FILES =
TEST_FILES = test_lwt_ip_encap.o \
- test_tc_edt.o
+ test_tc_edt.o \
+ xsk_prereqs.sh
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
@@ -70,17 +70,17 @@ TEST_PROGS := test_kmod.sh \
test_bpftool_build.sh \
test_bpftool.sh \
test_bpftool_metadata.sh \
+ test_xsk.sh
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \
- tcp_client.py \
- tcp_server.py \
test_xdp_vlan.sh
# Compile but not part of 'make run_tests'
TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \
flow_dissector_load test_flow_dissector test_tcp_check_syncookie_user \
- test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko
+ test_lirc_mode2_user xdping test_cpp runqslower bench bpf_testmod.ko \
+ xdpxceiver
TEST_CUSTOM_PROGS = urandom_read
@@ -115,6 +115,13 @@ INCLUDE_DIR := $(SCRATCH_DIR)/include
BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a
RESOLVE_BTFIDS := $(BUILD_DIR)/resolve_btfids/resolve_btfids
+VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../../../vmlinux \
+ /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+
# Define simple and short `make tes