Skip to content
/ linux Public
forked from torvalds/linux

Commit

Permalink
samples: bpf: Add BPF support for cpumap tracepoints
Browse files Browse the repository at this point in the history
These are invoked in two places, when the XDP frame or SKB (for generic
XDP) enqueued to the ptr_ring (cpumap_enqueue) and when kthread processes
the frame after invoking the CPUMAP program for it (returning stats for
the batch).

We use cpumap_map_id to filter on the map_id as a way to avoid printing
incorrect stats for parallel sessions of xdp_redirect_cpu.

Signed-off-by: Kumar Kartikeya Dwivedi <[email protected]>
Signed-off-by: Alexei Starovoitov <[email protected]>
Link: https://lore.kernel.org/bpf/[email protected]
  • Loading branch information
kkdwivedi authored and Alexei Starovoitov committed Aug 24, 2021
1 parent 82c4508 commit 0cf3c2f
Showing 1 changed file with 57 additions and 1 deletion.
58 changes: 57 additions & 1 deletion samples/bpf/xdp_sample.bpf.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@

array_map rx_cnt SEC(".maps");
array_map redir_err_cnt SEC(".maps");
array_map cpumap_enqueue_cnt SEC(".maps");
array_map cpumap_kthread_cnt SEC(".maps");
array_map exception_cnt SEC(".maps");

const volatile int nr_cpus = 0;
Expand All @@ -19,6 +21,8 @@ const volatile int nr_cpus = 0;
const volatile int from_match[32] = {};
const volatile int to_match[32] = {};

int cpumap_map_id = 0;

/* Find if b is part of set a, but if a is empty set then evaluate to true */
#define IN_SET(a, b) \
({ \
Expand Down Expand Up @@ -112,6 +116,59 @@ int BPF_PROG(tp_xdp_redirect_map, const struct net_device *dev,
return xdp_redirect_collect_stat(dev->ifindex, err);
}

SEC("tp_btf/xdp_cpumap_enqueue")
int BPF_PROG(tp_xdp_cpumap_enqueue, int map_id, unsigned int processed,
unsigned int drops, int to_cpu)
{
u32 cpu = bpf_get_smp_processor_id();
struct datarec *rec;
u32 idx;

if (cpumap_map_id && cpumap_map_id != map_id)
return 0;

idx = to_cpu * nr_cpus + cpu;
rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &idx);
if (!rec)
return 0;
NO_TEAR_ADD(rec->processed, processed);
NO_TEAR_ADD(rec->dropped, drops);
/* Record bulk events, then userspace can calc average bulk size */
if (processed > 0)
NO_TEAR_INC(rec->issue);
/* Inception: It's possible to detect overload situations, via
* this tracepoint. This can be used for creating a feedback
* loop to XDP, which can take appropriate actions to mitigate
* this overload situation.
*/
return 0;
}

SEC("tp_btf/xdp_cpumap_kthread")
int BPF_PROG(tp_xdp_cpumap_kthread, int map_id, unsigned int processed,
unsigned int drops, int sched, struct xdp_cpumap_stats *xdp_stats)
{
struct datarec *rec;
u32 cpu;

if (cpumap_map_id && cpumap_map_id != map_id)
return 0;

cpu = bpf_get_smp_processor_id();
rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &cpu);
if (!rec)
return 0;
NO_TEAR_ADD(rec->processed, processed);
NO_TEAR_ADD(rec->dropped, drops);
NO_TEAR_ADD(rec->xdp_pass, xdp_stats->pass);
NO_TEAR_ADD(rec->xdp_drop, xdp_stats->drop);
NO_TEAR_ADD(rec->xdp_redirect, xdp_stats->redirect);
/* Count times kthread yielded CPU via schedule call */
if (sched)
NO_TEAR_INC(rec->issue);
return 0;
}

SEC("tp_btf/xdp_exception")
int BPF_PROG(tp_xdp_exception, const struct net_device *dev,
const struct bpf_prog *xdp, u32 act)
Expand All @@ -136,4 +193,3 @@ int BPF_PROG(tp_xdp_exception, const struct net_device *dev,

return 0;
}

0 comments on commit 0cf3c2f

Please sign in to comment.