Skip to content

Commit

Permalink
bpf: prevent kprobe+bpf deadlocks
Browse files Browse the repository at this point in the history
if kprobe is placed within update or delete hash map helpers
that hold bucket spin lock and triggered bpf program is trying to
grab the spinlock for the same bucket on the same cpu, it will
deadlock.
Fix it by extending existing recursion prevention mechanism.

Note, map_lookup and other tracing helpers don't have this problem,
since they don't hold any locks and don't modify global data.
bpf_trace_printk has its own recursive check and ok as well.

Signed-off-by: Alexei Starovoitov <[email protected]>
Acked-by: Daniel Borkmann <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
4ast authored and davem330 committed Mar 8, 2016
1 parent 8aba8b8 commit b121d1e
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 2 deletions.
3 changes: 3 additions & 0 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <uapi/linux/bpf.h>
#include <linux/workqueue.h>
#include <linux/file.h>
#include <linux/percpu.h>

struct bpf_map;

Expand Down Expand Up @@ -163,6 +164,8 @@ bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *f
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);

#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);

void bpf_register_prog_type(struct bpf_prog_type_list *tl);
void bpf_register_map_type(struct bpf_map_type_list *tl);

Expand Down
13 changes: 13 additions & 0 deletions kernel/bpf/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
#include <linux/filter.h>
#include <linux/version.h>

DEFINE_PER_CPU(int, bpf_prog_active);

int sysctl_unprivileged_bpf_disabled __read_mostly;

static LIST_HEAD(bpf_map_types);
Expand Down Expand Up @@ -347,6 +349,11 @@ static int map_update_elem(union bpf_attr *attr)
if (copy_from_user(value, uvalue, value_size) != 0)
goto free_value;

/* must increment bpf_prog_active to avoid kprobe+bpf triggering from
* inside bpf map update or delete otherwise deadlocks are possible
*/
preempt_disable();
__this_cpu_inc(bpf_prog_active);
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
err = bpf_percpu_hash_update(map, key, value, attr->flags);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
Expand All @@ -356,6 +363,8 @@ static int map_update_elem(union bpf_attr *attr)
err = map->ops->map_update_elem(map, key, value, attr->flags);
rcu_read_unlock();
}
__this_cpu_dec(bpf_prog_active);
preempt_enable();

free_value:
kfree(value);
Expand Down Expand Up @@ -394,9 +403,13 @@ static int map_delete_elem(union bpf_attr *attr)
if (copy_from_user(key, ukey, map->key_size) != 0)
goto free_key;

preempt_disable();
__this_cpu_inc(bpf_prog_active);
rcu_read_lock();
err = map->ops->map_delete_elem(map, key);
rcu_read_unlock();
__this_cpu_dec(bpf_prog_active);
preempt_enable();

free_key:
kfree(key);
Expand Down
2 changes: 0 additions & 2 deletions kernel/trace/bpf_trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@
#include <linux/ctype.h>
#include "trace.h"

static DEFINE_PER_CPU(int, bpf_prog_active);

/**
* trace_call_bpf - invoke BPF program
* @prog: BPF program
Expand Down

0 comments on commit b121d1e

Please sign in to comment.