Skip to content

Commit

Permalink
scheduler: allow RUNNING tasks to be re-scheduled in the future.
Browse files Browse the repository at this point in the history
Because of a race condition between when the DONE interrupt
is sent to the host and when the previous ipc task's
state is updated to COMPLETED, new ipc's from the host
could end up not being scheduled. This leads to ipc
time outs on the host.

In order to prevent this, this patch introduces a new task state
called PENDING which is assigned to the task when it is picked
as the next task to be run. The state is then updated to running
when the task function is executed. This way when a ipc task
comes in, a RUNNING task could get scheduled again and assigned
the PENDING state to ensure that it doesnt get missed.

Signed-off-by: Ranjani Sridharan <[email protected]>
  • Loading branch information
ranj063 committed Dec 13, 2018
1 parent dd98c47 commit f765638
Show file tree
Hide file tree
Showing 3 changed files with 37 additions and 12 deletions.
13 changes: 11 additions & 2 deletions src/arch/xtensa/include/arch/task.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,6 +149,7 @@ static void _irq_task(void *arg)
struct list_item *clist;
struct task *task;
uint32_t flags;
int run_task = 0;

spin_lock_irq(&irq_task->lock, flags);

Expand All @@ -159,13 +160,21 @@ static void _irq_task(void *arg)
task = container_of(clist, struct task, irq_list);
list_item_del(clist);

if (task->func && task->state == TASK_STATE_PENDING) {
schedule_task_running(task);
run_task = 1;
} else {
run_task = 0;
}

/* run task without holding task lock */
spin_unlock_irq(&irq_task->lock, flags);

if (task->func && task->state == TASK_STATE_RUNNING)
if (run_task)
task->func(task->data);

schedule_task_complete(task);
spin_lock_irq(&irq_task->lock, flags);
schedule_task_complete(task);
}

spin_unlock_irq(&irq_task->lock, flags);
Expand Down
13 changes: 8 additions & 5 deletions src/include/sof/schedule.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,11 +46,12 @@ struct sof;
/* task states */
#define TASK_STATE_INIT 0
#define TASK_STATE_QUEUED 1
#define TASK_STATE_RUNNING 2
#define TASK_STATE_PREEMPTED 3
#define TASK_STATE_COMPLETED 4
#define TASK_STATE_FREE 5
#define TASK_STATE_CANCEL 6
#define TASK_STATE_PENDING 2
#define TASK_STATE_RUNNING 3
#define TASK_STATE_PREEMPTED 4
#define TASK_STATE_COMPLETED 5
#define TASK_STATE_FREE 6
#define TASK_STATE_CANCEL 7

/* task priorities - values same as Linux processes, gives scope for future.*/
#define TASK_PRI_LOW 19
Expand Down Expand Up @@ -94,6 +95,8 @@ int schedule_task_cancel(struct task *task);

void schedule_task_complete(struct task *task);

void schedule_task_running(struct task *task);

static inline void schedule_task_init(struct task *task, void (*func)(void *),
void *data)
{
Expand Down
23 changes: 18 additions & 5 deletions src/lib/schedule.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ static struct task *schedule_edf(void)

/* init task for running */
spin_lock_irq(&sch->lock, flags);
task->state = TASK_STATE_RUNNING;
task->state = TASK_STATE_PENDING;
list_item_del(&task->list);
spin_unlock_irq(&sch->lock, flags);

Expand Down Expand Up @@ -264,14 +264,14 @@ static int _schedule_task(struct task *task, uint64_t start, uint64_t deadline)

spin_lock_irq(&sch->lock, flags);

/* is task already running ? - not enough MIPS to complete ? */
if (task->state == TASK_STATE_RUNNING) {
trace_pipe("_schedule_task(), task already running");
/* is task already pending ? - not enough MIPS to complete ? */
if (task->state == TASK_STATE_PENDING) {
trace_pipe("_schedule_task(), task already pending");
spin_unlock_irq(&sch->lock, flags);
return 0;
}

/* is task already running ? - not enough MIPS to complete ? */
/* is task already queued ? - not enough MIPS to complete ? */
if (task->state == TASK_STATE_QUEUED) {
trace_pipe("_schedule_task(), task already queued");
spin_unlock_irq(&sch->lock, flags);
Expand Down Expand Up @@ -350,6 +350,19 @@ void schedule_task_complete(struct task *task)
wait_completed(&task->complete);
}

/* Update task state to running */
void schedule_task_running(struct task *task)
{
struct schedule_data *sch = *arch_schedule_get();
uint32_t flags;

tracev_pipe("schedule_task_running()");

spin_lock_irq(&sch->lock, flags);
task->state = TASK_STATE_RUNNING;
spin_unlock_irq(&sch->lock, flags);
}

static void scheduler_run(void *unused)
{
struct task *future_task;
Expand Down

0 comments on commit f765638

Please sign in to comment.