-
Notifications
You must be signed in to change notification settings - Fork 313
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
task: move task_complete ahead task function run #704
Conversation
With IPC task, the function will free lock on IPC and allow new IPC comes from HOST. But the state of ipc_task may be not udpated at this moment, and the new comer IPC will be droped by task scheduler add. Move the complete ahead to avoid this situation. May need more attention when new user for task. Signed-off-by: Pan Xiuli <[email protected]>
@xiulipan this is only a workaround for now. We should keep in mind that this task only runs in the sch irq context and could be preempted by a higher priority interrupt. What would be a more viable solution is to clear busy bit to interrupt the host and unmask busy interrupt in a callback function that can be called after the task state is set to completed |
@ranj063 |
@xiulipan this change is breaking audio playback on the Yorp. oops...sorry i used the 16-bit dmic capture format with your branch. This patch is fine for now. |
@xiulipan @ranj063 We should refine the patch. FW won't recover from XRUN when I test #506 FW will broken after XRUN. |
@xiulipan, we should be able to schedule a task in the future when it is currently running e.g. something like diff --git a/src/arch/xtensa/include/arch/task.h b/src/arch/xtensa/include/arch/task.h
index e12b443e..34d045aa 100644
--- a/src/arch/xtensa/include/arch/task.h
+++ b/src/arch/xtensa/include/arch/task.h
@@ -149,6 +149,7 @@ static void _irq_task(void *arg)
struct list_item *clist;
struct task *task;
uint32_t flags;
+ int run_task;
spin_lock_irq(&irq_task->lock, flags);
@@ -159,13 +160,20 @@ static void _irq_task(void *arg)
task = container_of(clist, struct task, irq_list);
list_item_del(clist);
+ if (task->func && task->state == TASK_STATE_RUNNING)
+ run_task = 1;
+ else
+ run_task = 0;
+
+ /* run task without holding task lock */
spin_unlock_irq(&irq_task->lock, flags);
- if (task->func && task->state == TASK_STATE_RUNNING)
+ if (run_task)
task->func(task->data);
- schedule_task_complete(task);
spin_lock_irq(&irq_task->lock, flags);
+
+ schedule_task_complete(task);
}
spin_unlock_irq(&irq_task->lock, flags);
diff --git a/src/include/sof/schedule.h b/src/include/sof/schedule.h
index 2e3526b4..12532df6 100644
--- a/src/include/sof/schedule.h
+++ b/src/include/sof/schedule.h
@@ -44,13 +44,14 @@ struct schedule_data;
struct sof;
/* task states */
-#define TASK_STATE_INIT 0
+#define TASK_STATE_INIT 0
#define TASK_STATE_QUEUED 1
-#define TASK_STATE_RUNNING 2
-#define TASK_STATE_PREEMPTED 3
-#define TASK_STATE_COMPLETED 4
-#define TASK_STATE_FREE 5
-#define TASK_STATE_CANCEL 6
+#define TASK_STATE_PENDING 2
+#define TASK_STATE_RUNNING 3
+#define TASK_STATE_PREEMPTED 4
+#define TASK_STATE_COMPLETED 5
+#define TASK_STATE_FREE 6
+#define TASK_STATE_CANCEL 7
/* task priorities - values same as Linux processes, gives scope for future.*/
#define TASK_PRI_LOW 19
diff --git a/src/lib/schedule.c b/src/lib/schedule.c
index ade69658..f7d13d9e 100644
--- a/src/lib/schedule.c
+++ b/src/lib/schedule.c
@@ -207,7 +207,7 @@ static struct task *schedule_edf(void)
/* init task for running */
spin_lock_irq(&sch->lock, flags);
- task->state = TASK_STATE_RUNNING;
+ task->state = TASK_STATE_PENDING;
list_item_del(&task->list);
spin_unlock_irq(&sch->lock, flags);
@@ -264,9 +264,9 @@ static int _schedule_task(struct task *task, uint64_t start, uint64_t deadline)
spin_lock_irq(&sch->lock, flags);
- /* is task already running ? - not enough MIPS to complete ? */
- if (task->state == TASK_STATE_RUNNING) {
- trace_pipe("_schedule_task(), task already running");
+ /* is task already pending ? - not enough MIPS to complete ? */
+ if (task->state == TASK_STATE_PENDING) {
+ trace_pipe("_schedule_task(), task already pending");
spin_unlock_irq(&sch->lock, flags);
return 0;
}
@@ -350,6 +350,19 @@ void schedule_task_complete(struct task *task)
wait_completed(&task->complete);
}
+/* Remove a task from the scheduler when complete - locks held by caller */
+void schedule_task_running(struct task *task)
+{
+ struct schedule_data *sch = *arch_schedule_get();
+ uint32_t flags;
+
+ tracev_pipe("schedule_task_running()");
+
+ spin_lock_irq(&sch->lock, flags);
+ task->state = TASK_STATE_RUNNING;
+ spin_unlock_irq(&sch->lock, flags);
+}
+
static void scheduler_run(void *unused)
{
struct task *future_task; |
@lgirdwood Otherwise we could not handle how many task is scheduled when that task is running. |
@lgirdwood when do you call schedule_task_running in the above diff? |
With IPC task, the function will free lock on IPC and allow new IPC
comes from HOST. But the state of ipc_task may be not udpated at this
moment, and the new comer IPC will be droped by task scheduler add.
Move the complete ahead to avoid this situation. May need more
attention when new user for task.
Signed-off-by: Pan Xiuli [email protected]