mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 09:00:12 +00:00
sched_ext: Fixes for v6.19-rc3
- Fix uninitialized @ret on alloc_percpu() failure leading to ERR_PTR(0). - Fix PREEMPT_RT warning when bypass load balancer sends IPI to offline CPU by using resched_cpu() instead of resched_curr(). - Fix comment referring to renamed function. - Update scx_show_state.py for scx_root and scx_aborting changes. -----BEGIN PGP SIGNATURE----- iIQEABYKACwWIQTfIjM1kS57o3GsC/uxYfJx3gVYGQUCaVHQAw4cdGpAa2VybmVs Lm9yZwAKCRCxYfJx3gVYGdJ2AP9nLMUa5Rw2hpcKCLvPjgkqe5fDpNteWrQB3ni9 bu28jQD/XGMwooJbATlDEgCtFqCH74QbddqUABJxBw4FE8qpUgw= =hp6J -----END PGP SIGNATURE----- Merge tag 'sched_ext-for-6.19-rc3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext Pull sched_ext fixes from Tejun Heo: - Fix uninitialized @ret on alloc_percpu() failure leading to ERR_PTR(0) - Fix PREEMPT_RT warning when bypass load balancer sends IPI to offline CPU by using resched_cpu() instead of resched_curr() - Fix comment referring to renamed function - Update scx_show_state.py for scx_root and scx_aborting changes * tag 'sched_ext-for-6.19-rc3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext: tools/sched_ext: update scx_show_state.py for scx_aborting change tools/sched_ext: fix scx_show_state.py for scx_root change sched_ext: Use the resched_cpu() to replace resched_curr() in the bypass_lb_node() sched_ext: Fix some comments in ext.c sched_ext: fix uninitialized ret on alloc_percpu() failure
This commit is contained in:
commit
7839932417
@ -1577,7 +1577,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags
|
||||
*
|
||||
* @p may go through multiple stopping <-> running transitions between
|
||||
* here and put_prev_task_scx() if task attribute changes occur while
|
||||
* balance_scx() leaves @rq unlocked. However, they don't contain any
|
||||
* balance_one() leaves @rq unlocked. However, they don't contain any
|
||||
* information meaningful to the BPF scheduler and can be suppressed by
|
||||
* skipping the callbacks if the task is !QUEUED.
|
||||
*/
|
||||
@ -2372,7 +2372,7 @@ static void switch_class(struct rq *rq, struct task_struct *next)
|
||||
* preempted, and it regaining control of the CPU.
|
||||
*
|
||||
* ->cpu_release() complements ->cpu_acquire(), which is emitted the
|
||||
* next time that balance_scx() is invoked.
|
||||
* next time that balance_one() is invoked.
|
||||
*/
|
||||
if (!rq->scx.cpu_released) {
|
||||
if (SCX_HAS_OP(sch, cpu_release)) {
|
||||
@ -2478,7 +2478,7 @@ do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
|
||||
}
|
||||
|
||||
/*
|
||||
* If balance_scx() is telling us to keep running @prev, replenish slice
|
||||
* If balance_one() is telling us to keep running @prev, replenish slice
|
||||
* if necessary and keep running @prev. Otherwise, pop the first one
|
||||
* from the local DSQ.
|
||||
*/
|
||||
@ -3956,13 +3956,8 @@ static void bypass_lb_node(struct scx_sched *sch, int node)
|
||||
nr_donor_target, nr_target);
|
||||
}
|
||||
|
||||
for_each_cpu(cpu, resched_mask) {
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
raw_spin_rq_lock_irq(rq);
|
||||
resched_curr(rq);
|
||||
raw_spin_rq_unlock_irq(rq);
|
||||
}
|
||||
for_each_cpu(cpu, resched_mask)
|
||||
resched_cpu(cpu);
|
||||
|
||||
for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
|
||||
u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr);
|
||||
@ -4025,7 +4020,7 @@ static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn);
|
||||
*
|
||||
* - ops.dispatch() is ignored.
|
||||
*
|
||||
* - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
|
||||
* - balance_one() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
|
||||
* can't be trusted. Whenever a tick triggers, the running task is rotated to
|
||||
* the tail of the queue with core_sched_at touched.
|
||||
*
|
||||
@ -4783,8 +4778,10 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
|
||||
}
|
||||
|
||||
sch->pcpu = alloc_percpu(struct scx_sched_pcpu);
|
||||
if (!sch->pcpu)
|
||||
if (!sch->pcpu) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_gdsqs;
|
||||
}
|
||||
|
||||
sch->helper = kthread_run_worker(0, "sched_ext_helper");
|
||||
if (IS_ERR(sch->helper)) {
|
||||
@ -6067,7 +6064,7 @@ __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
|
||||
/*
|
||||
* A successfully consumed task can be dequeued before it starts
|
||||
* running while the CPU is trying to migrate other dispatched
|
||||
* tasks. Bump nr_tasks to tell balance_scx() to retry on empty
|
||||
* tasks. Bump nr_tasks to tell balance_one() to retry on empty
|
||||
* local DSQ.
|
||||
*/
|
||||
dspc->nr_tasks++;
|
||||
|
||||
@ -27,16 +27,18 @@ def read_static_key(name):
|
||||
def state_str(state):
|
||||
return prog['scx_enable_state_str'][state].string_().decode()
|
||||
|
||||
ops = prog['scx_ops']
|
||||
root = prog['scx_root']
|
||||
enable_state = read_atomic("scx_enable_state_var")
|
||||
|
||||
print(f'ops : {ops.name.string_().decode()}')
|
||||
if root:
|
||||
print(f'ops : {root.ops.name.string_().decode()}')
|
||||
else:
|
||||
print('ops : ')
|
||||
print(f'enabled : {read_static_key("__scx_enabled")}')
|
||||
print(f'switching_all : {read_int("scx_switching_all")}')
|
||||
print(f'switched_all : {read_static_key("__scx_switched_all")}')
|
||||
print(f'enable_state : {state_str(enable_state)} ({enable_state})')
|
||||
print(f'in_softlockup : {prog["scx_in_softlockup"].value_()}')
|
||||
print(f'breather_depth: {read_atomic("scx_breather_depth")}')
|
||||
print(f'aborting : {prog["scx_aborting"].value_()}')
|
||||
print(f'bypass_depth : {prog["scx_bypass_depth"].value_()}')
|
||||
print(f'nr_rejected : {read_atomic("scx_nr_rejected")}')
|
||||
print(f'enable_seq : {read_atomic("scx_enable_seq")}')
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user