mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 09:00:12 +00:00
sched_ext: Use the resched_cpu() to replace resched_curr() in the bypass_lb_node()
For the PREEMPT_RT kernels, the scx_bypass_lb_timerfn() running in the
preemptible per-CPU ktimer kthread context, this means that the following
scenarios will occur(for x86 platform):
cpu1 cpu2
ktimer kthread:
->scx_bypass_lb_timerfn
->bypass_lb_node
->for_each_cpu(cpu, resched_mask)
migration/1: by preempt by migration/2:
multi_cpu_stop() multi_cpu_stop()
->take_cpu_down()
->__cpu_disable()
->set cpu1 offline
->rq1 = cpu_rq(cpu1)
->resched_curr(rq1)
->smp_send_reschedule(cpu1)
->native_smp_send_reschedule(cpu1)
->if(unlikely(cpu_is_offline(cpu))) {
WARN(1, "sched: Unexpected
reschedule of offline CPU#%d!\n", cpu);
return;
}
This commit therefore use the resched_cpu() to replace resched_curr()
in the bypass_lb_node() to avoid send-ipi to offline CPUs.
Signed-off-by: Zqiang <qiang.zhang@linux.dev>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
12494e5e2a
commit
ccaeeb585c
@ -3956,13 +3956,8 @@ static void bypass_lb_node(struct scx_sched *sch, int node)
|
||||
nr_donor_target, nr_target);
|
||||
}
|
||||
|
||||
for_each_cpu(cpu, resched_mask) {
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
raw_spin_rq_lock_irq(rq);
|
||||
resched_curr(rq);
|
||||
raw_spin_rq_unlock_irq(rq);
|
||||
}
|
||||
for_each_cpu(cpu, resched_mask)
|
||||
resched_cpu(cpu);
|
||||
|
||||
for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
|
||||
u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user