summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/cpumap.c
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2017-10-16 12:19:34 +0200
committerDavid S. Miller <davem@davemloft.net>2017-10-18 12:12:18 +0100
commit9c270af37bb62e708e3e4415d653ce73e713df02 (patch)
treebd24384b686366f88605892c453cd1c68e3d858c /kernel/bpf/cpumap.c
parent6710e1126934d8b4372b4d2f9ae1646cd3f151bf (diff)
bpf: XDP_REDIRECT enable use of cpumap
This patch connects cpumap to the xdp_do_redirect_map infrastructure. Still no SKB allocation are done yet. The XDP frames are transferred to the other CPU, but they are simply refcnt decremented on the remote CPU. This served as a good benchmark for measuring the overhead of remote refcnt decrement. If driver page recycle cache is not efficient then this, exposes a bottleneck in the page allocator. A shout-out to MST's ptr_ring, which is the secret behind is being so efficient to transfer memory pointers between CPUs, without constantly bouncing cache-lines between CPUs. V3: Handle !CONFIG_BPF_SYSCALL pointed out by kbuild test robot. V4: Make Generic-XDP aware of cpumap type, but don't allow redirect yet, as implementation require a separate upstream discussion. V5: - Fix a maybe-uninitialized pointed out by kbuild test robot. - Restrict bpf-prog side access to cpumap, open when use-cases appear - Implement cpu_map_enqueue() as a more simple void pointer enqueue V6: - Allow cpumap type for usage in helper bpf_redirect_map, general bpf-prog side restriction moved to earlier patch. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/cpumap.c')
-rw-r--r--kernel/bpf/cpumap.c22
1 files changed, 21 insertions, 1 deletions
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index e1e25ddba038..768da6a2c265 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -500,7 +500,7 @@ struct xdp_pkt {
/* Runs under RCU-read-side, plus in softirq under NAPI protection.
* Thus, safe percpu variable access.
*/
-int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_pkt *xdp_pkt)
+static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_pkt *xdp_pkt)
{
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
@@ -520,6 +520,26 @@ int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_pkt *xdp_pkt)
return 0;
}
+int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
+ struct net_device *dev_rx)
+{
+ struct xdp_pkt *xdp_pkt;
+ int headroom;
+
+ /* For now this is just used as a void pointer to data_hard_start.
+ * Followup patch will generalize this.
+ */
+ xdp_pkt = xdp->data_hard_start;
+
+ /* Fake writing into xdp_pkt->data to measure overhead */
+ headroom = xdp->data - xdp->data_hard_start;
+ if (headroom < sizeof(*xdp_pkt))
+ xdp_pkt->data = xdp->data;
+
+ bq_enqueue(rcpu, xdp_pkt);
+ return 0;
+}
+
void __cpu_map_insert_ctx(struct bpf_map *map, u32 bit)
{
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);