Skip to content

Commit 5e7b2cc

Browse files
arighigregkh
authored andcommitted
sched_ext: Fix stale direct dispatch state in ddsp_dsq_id
[ Upstream commit 7e0ffb7 ] @p->scx.ddsp_dsq_id can be left set (non-SCX_DSQ_INVALID) triggering a spurious warning in mark_direct_dispatch() when the next wakeup's ops.select_cpu() calls scx_bpf_dsq_insert(), such as: WARNING: kernel/sched/ext.c:1273 at scx_dsq_insert_commit+0xcd/0x140 The root cause is that ddsp_dsq_id was only cleared in dispatch_enqueue(), which is not reached in all paths that consume or cancel a direct dispatch verdict. Fix it by clearing it at the right places: - direct_dispatch(): cache the direct dispatch state in local variables and clear it before dispatch_enqueue() on the synchronous path. For the deferred path, the direct dispatch state must remain set until process_ddsp_deferred_locals() consumes them. - process_ddsp_deferred_locals(): cache the dispatch state in local variables and clear it before calling dispatch_to_local_dsq(), which may migrate the task to another rq. - do_enqueue_task(): clear the dispatch state on the enqueue path (local/global/bypass fallbacks), where the direct dispatch verdict is ignored. - dequeue_task_scx(): clear the dispatch state after dispatch_dequeue() to handle both the deferred dispatch cancellation and the holding_cpu race, covering all cases where a pending direct dispatch is cancelled. - scx_disable_task(): clear the direct dispatch state when transitioning a task out of the current scheduler. Waking tasks may have had the direct dispatch state set by the outgoing scheduler's ops.select_cpu() and then been queued on a wake_list via ttwu_queue_wakelist(), when SCX_OPS_ALLOW_QUEUED_WAKEUP is set. Such tasks are not on the runqueue and are not iterated by scx_bypass(), so their direct dispatch state won't be cleared. Without this clear, any subsequent SCX scheduler that tries to direct dispatch the task will trigger the WARN_ON_ONCE() in mark_direct_dispatch(). Fixes: 5b26f7b ("sched_ext: Allow SCX_DSQ_LOCAL_ON for direct dispatches") Cc: stable@vger.kernel.org # v6.12+ Cc: Daniel Hodges <hodgesd@meta.com> Cc: Patrick Somaru <patsomaru@meta.com> Signed-off-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent d5776a3 commit 5e7b2cc

1 file changed

Lines changed: 35 additions & 14 deletions

File tree

kernel/sched/ext.c

Lines changed: 35 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1025,15 +1025,6 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
10251025
dsq_mod_nr(dsq, 1);
10261026
p->scx.dsq = dsq;
10271027

1028-
/*
1029-
* scx.ddsp_dsq_id and scx.ddsp_enq_flags are only relevant on the
1030-
* direct dispatch path, but we clear them here because the direct
1031-
* dispatch verdict may be overridden on the enqueue path during e.g.
1032-
* bypass.
1033-
*/
1034-
p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1035-
p->scx.ddsp_enq_flags = 0;
1036-
10371028
/*
10381029
* We're transitioning out of QUEUEING or DISPATCHING. store_release to
10391030
* match waiters' load_acquire.
@@ -1176,12 +1167,34 @@ static void mark_direct_dispatch(struct scx_sched *sch,
11761167
p->scx.ddsp_enq_flags = enq_flags;
11771168
}
11781169

1170+
/*
1171+
* Clear @p direct dispatch state when leaving the scheduler.
1172+
*
1173+
* Direct dispatch state must be cleared in the following cases:
1174+
* - direct_dispatch(): cleared on the synchronous enqueue path, deferred
1175+
* dispatch keeps the state until consumed
1176+
* - process_ddsp_deferred_locals(): cleared after consuming deferred state,
1177+
* - do_enqueue_task(): cleared on enqueue fallbacks where the dispatch
1178+
* verdict is ignored (local/global/bypass)
1179+
* - dequeue_task_scx(): cleared after dispatch_dequeue(), covering deferred
1180+
* cancellation and holding_cpu races
1181+
* - scx_disable_task(): cleared for queued wakeup tasks, which are excluded by
1182+
* the scx_bypass() loop, so that stale state is not reused by a subsequent
1183+
* scheduler instance
1184+
*/
1185+
static inline void clear_direct_dispatch(struct task_struct *p)
1186+
{
1187+
p->scx.ddsp_dsq_id = SCX_DSQ_INVALID;
1188+
p->scx.ddsp_enq_flags = 0;
1189+
}
1190+
11791191
static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
11801192
u64 enq_flags)
11811193
{
11821194
struct rq *rq = task_rq(p);
11831195
struct scx_dispatch_q *dsq =
11841196
find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
1197+
u64 ddsp_enq_flags;
11851198

11861199
touch_core_sched_dispatch(rq, p);
11871200

@@ -1222,8 +1235,10 @@ static void direct_dispatch(struct scx_sched *sch, struct task_struct *p,
12221235
return;
12231236
}
12241237

1225-
dispatch_enqueue(sch, dsq, p,
1226-
p->scx.ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
1238+
ddsp_enq_flags = p->scx.ddsp_enq_flags;
1239+
clear_direct_dispatch(p);
1240+
1241+
dispatch_enqueue(sch, dsq, p, ddsp_enq_flags | SCX_ENQ_CLEAR_OPSS);
12271242
}
12281243

12291244
static bool scx_rq_online(struct rq *rq)
@@ -1329,6 +1344,7 @@ static void do_enqueue_task(struct rq *rq, struct task_struct *p, u64 enq_flags,
13291344
*/
13301345
touch_core_sched(rq, p);
13311346
refill_task_slice_dfl(sch, p);
1347+
clear_direct_dispatch(p);
13321348
dispatch_enqueue(sch, dsq, p, enq_flags);
13331349
}
13341350

@@ -1496,6 +1512,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags
14961512
sub_nr_running(rq, 1);
14971513

14981514
dispatch_dequeue(rq, p);
1515+
clear_direct_dispatch(p);
14991516
return true;
15001517
}
15011518

@@ -2236,13 +2253,15 @@ static void process_ddsp_deferred_locals(struct rq *rq)
22362253
struct task_struct, scx.dsq_list.node))) {
22372254
struct scx_sched *sch = scx_root;
22382255
struct scx_dispatch_q *dsq;
2256+
u64 dsq_id = p->scx.ddsp_dsq_id;
2257+
u64 enq_flags = p->scx.ddsp_enq_flags;
22392258

22402259
list_del_init(&p->scx.dsq_list.node);
2260+
clear_direct_dispatch(p);
22412261

2242-
dsq = find_dsq_for_dispatch(sch, rq, p->scx.ddsp_dsq_id, p);
2262+
dsq = find_dsq_for_dispatch(sch, rq, dsq_id, p);
22432263
if (!WARN_ON_ONCE(dsq->id != SCX_DSQ_LOCAL))
2244-
dispatch_to_local_dsq(sch, rq, dsq, p,
2245-
p->scx.ddsp_enq_flags);
2264+
dispatch_to_local_dsq(sch, rq, dsq, p, enq_flags);
22462265
}
22472266
}
22482267

@@ -2881,6 +2900,8 @@ static void scx_disable_task(struct task_struct *p)
28812900
lockdep_assert_rq_held(rq);
28822901
WARN_ON_ONCE(scx_get_task_state(p) != SCX_TASK_ENABLED);
28832902

2903+
clear_direct_dispatch(p);
2904+
28842905
if (SCX_HAS_OP(sch, disable))
28852906
SCX_CALL_OP_TASK(sch, SCX_KF_REST, disable, rq, p);
28862907
scx_set_task_state(p, SCX_TASK_READY);

0 commit comments

Comments
 (0)