Skip to content

Commit

Permalink
perf: Fix u16 overflows
Browse files Browse the repository at this point in the history
Vince reported that its possible to overflow the various size fields
and get weird stuff if you stick too many events in a group.

Put a lid on this by requiring the fixed record size not exceed 16k.
This is still a fair amount of events (silly amount really) and leaves
plenty room for callchains and stack dwarves while also avoiding
overflowing the u16 variables.

Reported-by: Vince Weaver <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: [email protected]
Signed-off-by: Ingo Molnar <[email protected]>
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Sep 18, 2015
1 parent f55fc2a commit a723968
Showing 1 changed file with 40 additions and 10 deletions.
50 changes: 40 additions & 10 deletions kernel/events/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -1243,11 +1243,7 @@ static inline void perf_event__state_init(struct perf_event *event)
PERF_EVENT_STATE_INACTIVE;
}

/*
* Called at perf_event creation and when events are attached/detached from a
* group.
*/
static void perf_event__read_size(struct perf_event *event)
static void __perf_event_read_size(struct perf_event *event, int nr_siblings)
{
int entry = sizeof(u64); /* value */
int size = 0;
Expand All @@ -1263,22 +1259,19 @@ static void perf_event__read_size(struct perf_event *event)
entry += sizeof(u64);

if (event->attr.read_format & PERF_FORMAT_GROUP) {
nr += event->group_leader->nr_siblings;
nr += nr_siblings;
size += sizeof(u64);
}

size += entry * nr;
event->read_size = size;
}

static void perf_event__header_size(struct perf_event *event)
static void __perf_event_header_size(struct perf_event *event, u64 sample_type)
{
struct perf_sample_data *data;
u64 sample_type = event->attr.sample_type;
u16 size = 0;

perf_event__read_size(event);

if (sample_type & PERF_SAMPLE_IP)
size += sizeof(data->ip);

Expand All @@ -1303,6 +1296,17 @@ static void perf_event__header_size(struct perf_event *event)
event->header_size = size;
}

/*
* Called at perf_event creation and when events are attached/detached from a
* group.
*/
static void perf_event__header_size(struct perf_event *event)
{
__perf_event_read_size(event,
event->group_leader->nr_siblings);
__perf_event_header_size(event, event->attr.sample_type);
}

static void perf_event__id_header_size(struct perf_event *event)
{
struct perf_sample_data *data;
Expand Down Expand Up @@ -1330,6 +1334,27 @@ static void perf_event__id_header_size(struct perf_event *event)
event->id_header_size = size;
}

static bool perf_event_validate_size(struct perf_event *event)
{
/*
* The values computed here will be over-written when we actually
* attach the event.
*/
__perf_event_read_size(event, event->group_leader->nr_siblings + 1);
__perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ);
perf_event__id_header_size(event);

/*
* Sum the lot; should not exceed the 64k limit we have on records.
* Conservative limit to allow for callchains and other variable fields.
*/
if (event->read_size + event->header_size +
event->id_header_size + sizeof(struct perf_event_header) >= 16*1024)
return false;

return true;
}

static void perf_group_attach(struct perf_event *event)
{
struct perf_event *group_leader = event->group_leader, *pos;
Expand Down Expand Up @@ -8302,6 +8327,11 @@ SYSCALL_DEFINE5(perf_event_open,
mutex_lock(&ctx->mutex);
}

if (!perf_event_validate_size(event)) {
err = -E2BIG;
goto err_locked;
}

/*
* Must be under the same ctx::mutex as perf_install_in_context(),
* because we need to serialize with concurrent event creation.
Expand Down

0 comments on commit a723968

Please sign in to comment.