Skip to content

Commit

Permalink
cpumap: Formalize map value as a named struct
Browse files Browse the repository at this point in the history
As it has been already done for devmap, introduce 'struct bpf_cpumap_val'
to formalize the expected values that can be passed in for a CPUMAP.
Update cpumap code to use the struct.

Signed-off-by: Lorenzo Bianconi <[email protected]>
Signed-off-by: Daniel Borkmann <[email protected]>
Acked-by: Jesper Dangaard Brouer <[email protected]>
Link: https://lore.kernel.org/bpf/754f950674665dae6139c061d28c1d982aaf4170.1594734381.git.lorenzo@kernel.org
  • Loading branch information
LorenzoBianconi authored and borkmann committed Jul 16, 2020
1 parent a4e76f1 commit 644bfe5
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 13 deletions.
9 changes: 9 additions & 0 deletions include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -3849,6 +3849,15 @@ struct bpf_devmap_val {
} bpf_prog;
};

/* CPUMAP map-value layout
*
* The struct data-layout of map-value is a configuration interface.
* New members can only be added to the end of this structure.
*/
struct bpf_cpumap_val {
__u32 qsize; /* queue size to remote target CPU */
};

enum sk_action {
SK_DROP = 0,
SK_PASS,
Expand Down
28 changes: 15 additions & 13 deletions kernel/bpf/cpumap.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ struct xdp_bulk_queue {
struct bpf_cpu_map_entry {
u32 cpu; /* kthread CPU and map index */
int map_id; /* Back reference to map */
u32 qsize; /* Queue size placeholder for map lookup */

/* XDP can run multiple RX-ring queues, need __percpu enqueue store */
struct xdp_bulk_queue __percpu *bulkq;
Expand All @@ -62,10 +61,13 @@ struct bpf_cpu_map_entry {
/* Queue with potential multi-producers, and single-consumer kthread */
struct ptr_ring *queue;
struct task_struct *kthread;
struct work_struct kthread_stop_wq;

struct bpf_cpumap_val value;

atomic_t refcnt; /* Control when this struct can be free'ed */
struct rcu_head rcu;

struct work_struct kthread_stop_wq;
};

struct bpf_cpu_map {
Expand Down Expand Up @@ -307,8 +309,8 @@ static int cpu_map_kthread_run(void *data)
return 0;
}

static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
int map_id)
static struct bpf_cpu_map_entry *
__cpu_map_entry_alloc(struct bpf_cpumap_val *value, u32 cpu, int map_id)
{
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct bpf_cpu_map_entry *rcpu;
Expand Down Expand Up @@ -338,13 +340,13 @@ static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
if (!rcpu->queue)
goto free_bulkq;

err = ptr_ring_init(rcpu->queue, qsize, gfp);
err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
if (err)
goto free_queue;

rcpu->cpu = cpu;
rcpu->map_id = map_id;
rcpu->qsize = qsize;
rcpu->value.qsize = value->qsize;

/* Setup kthread */
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
Expand Down Expand Up @@ -437,31 +439,31 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
u64 map_flags)
{
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
struct bpf_cpumap_val cpumap_value = {};
struct bpf_cpu_map_entry *rcpu;

/* Array index key correspond to CPU number */
u32 key_cpu = *(u32 *)key;
/* Value is the queue size */
u32 qsize = *(u32 *)value;

memcpy(&cpumap_value, value, map->value_size);

if (unlikely(map_flags > BPF_EXIST))
return -EINVAL;
if (unlikely(key_cpu >= cmap->map.max_entries))
return -E2BIG;
if (unlikely(map_flags == BPF_NOEXIST))
return -EEXIST;
if (unlikely(qsize > 16384)) /* sanity limit on qsize */
if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */
return -EOVERFLOW;

/* Make sure CPU is a valid possible cpu */
if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
return -ENODEV;

if (qsize == 0) {
if (cpumap_value.qsize == 0) {
rcpu = NULL; /* Same as deleting */
} else {
/* Updating qsize cause re-allocation of bpf_cpu_map_entry */
rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id);
rcpu = __cpu_map_entry_alloc(&cpumap_value, key_cpu, map->id);
if (!rcpu)
return -ENOMEM;
rcpu->cmap = cmap;
Expand Down Expand Up @@ -523,7 +525,7 @@ static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
struct bpf_cpu_map_entry *rcpu =
__cpu_map_lookup_elem(map, *(u32 *)key);

return rcpu ? &rcpu->qsize : NULL;
return rcpu ? &rcpu->value : NULL;
}

static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
Expand Down
9 changes: 9 additions & 0 deletions tools/include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -3849,6 +3849,15 @@ struct bpf_devmap_val {
} bpf_prog;
};

/* CPUMAP map-value layout
*
* The struct data-layout of map-value is a configuration interface.
* New members can only be added to the end of this structure.
*/
struct bpf_cpumap_val {
__u32 qsize; /* queue size to remote target CPU */
};

enum sk_action {
SK_DROP = 0,
SK_PASS,
Expand Down

0 comments on commit 644bfe5

Please sign in to comment.