mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
Compare commits
11 Commits
f8f9c1f4d0
...
7839932417
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7839932417 | ||
|
|
bba0b6a1c4 | ||
|
|
a69eddfd17 | ||
|
|
c9894e6f01 | ||
|
|
f92ff79ba2 | ||
|
|
ccaeeb585c | ||
|
|
12494e5e2a | ||
|
|
b74fd80d7f | ||
|
|
50fdb78b7c | ||
|
|
aa7d3a56a2 | ||
|
|
b0101ccb5b |
@ -50,6 +50,7 @@ static int seqiv_aead_encrypt(struct aead_request *req)
|
||||
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
|
||||
struct aead_request *subreq = aead_request_ctx(req);
|
||||
crypto_completion_t compl;
|
||||
bool unaligned_info;
|
||||
void *data;
|
||||
u8 *info;
|
||||
unsigned int ivsize = 8;
|
||||
@ -68,8 +69,9 @@ static int seqiv_aead_encrypt(struct aead_request *req)
|
||||
memcpy_sglist(req->dst, req->src,
|
||||
req->assoclen + req->cryptlen);
|
||||
|
||||
if (unlikely(!IS_ALIGNED((unsigned long)info,
|
||||
crypto_aead_alignmask(geniv) + 1))) {
|
||||
unaligned_info = !IS_ALIGNED((unsigned long)info,
|
||||
crypto_aead_alignmask(geniv) + 1);
|
||||
if (unlikely(unaligned_info)) {
|
||||
info = kmemdup(req->iv, ivsize, req->base.flags &
|
||||
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
|
||||
GFP_ATOMIC);
|
||||
@ -89,7 +91,7 @@ static int seqiv_aead_encrypt(struct aead_request *req)
|
||||
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
|
||||
|
||||
err = crypto_aead_encrypt(subreq);
|
||||
if (unlikely(info != req->iv))
|
||||
if (unlikely(unaligned_info))
|
||||
seqiv_aead_encrypt_complete2(req, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -991,7 +991,7 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
|
||||
return;
|
||||
poll_data = &qm->poll_data[cqn];
|
||||
|
||||
while (QM_EQE_PHASE(dw0) != qm->status.eqc_phase) {
|
||||
do {
|
||||
poll_data->qp_finish_id[eqe_num] = dw0 & QM_EQE_CQN_MASK;
|
||||
eqe_num++;
|
||||
|
||||
@ -1004,11 +1004,10 @@ static void qm_get_complete_eqe_num(struct hisi_qm *qm)
|
||||
qm->status.eq_head++;
|
||||
}
|
||||
|
||||
if (eqe_num == (eq_depth >> 1) - 1)
|
||||
break;
|
||||
|
||||
dw0 = le32_to_cpu(eqe->dw0);
|
||||
}
|
||||
if (QM_EQE_PHASE(dw0) != qm->status.eqc_phase)
|
||||
break;
|
||||
} while (eqe_num < (eq_depth >> 1) - 1);
|
||||
|
||||
poll_data->eqe_num = eqe_num;
|
||||
queue_work(qm->wq, &poll_data->work);
|
||||
|
||||
@ -1668,7 +1668,14 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs,
|
||||
static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp)
|
||||
{
|
||||
WARN_ON_ONCE(!is_remote_partition(cs));
|
||||
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus));
|
||||
/*
|
||||
* When a CPU is offlined, top_cpuset may end up with no available CPUs,
|
||||
* which should clear subpartitions_cpus. We should not emit a warning for this
|
||||
* scenario: the hierarchy is updated from top to bottom, so subpartitions_cpus
|
||||
* may already be cleared when disabling the partition.
|
||||
*/
|
||||
WARN_ON_ONCE(!cpumask_subset(cs->effective_xcpus, subpartitions_cpus) &&
|
||||
!cpumask_empty(subpartitions_cpus));
|
||||
|
||||
spin_lock_irq(&callback_lock);
|
||||
cs->remote_partition = false;
|
||||
@ -3976,8 +3983,9 @@ retry:
|
||||
if (remote || (is_partition_valid(cs) && is_partition_valid(parent)))
|
||||
compute_partition_effective_cpumask(cs, &new_cpus);
|
||||
|
||||
if (remote && cpumask_empty(&new_cpus) &&
|
||||
partition_is_populated(cs, NULL)) {
|
||||
if (remote && (cpumask_empty(subpartitions_cpus) ||
|
||||
(cpumask_empty(&new_cpus) &&
|
||||
partition_is_populated(cs, NULL)))) {
|
||||
cs->prs_err = PERR_HOTPLUG;
|
||||
remote_partition_disable(cs, tmp);
|
||||
compute_effective_cpumask(&new_cpus, cs, parent);
|
||||
@ -3990,9 +3998,12 @@ retry:
|
||||
* 1) empty effective cpus but not valid empty partition.
|
||||
* 2) parent is invalid or doesn't grant any cpus to child
|
||||
* partitions.
|
||||
* 3) subpartitions_cpus is empty.
|
||||
*/
|
||||
if (is_local_partition(cs) && (!is_partition_valid(parent) ||
|
||||
tasks_nocpu_error(parent, cs, &new_cpus)))
|
||||
if (is_local_partition(cs) &&
|
||||
(!is_partition_valid(parent) ||
|
||||
tasks_nocpu_error(parent, cs, &new_cpus) ||
|
||||
cpumask_empty(subpartitions_cpus)))
|
||||
partcmd = partcmd_invalidate;
|
||||
/*
|
||||
* On the other hand, an invalid partition root may be transitioned
|
||||
|
||||
@ -1577,7 +1577,7 @@ static bool dequeue_task_scx(struct rq *rq, struct task_struct *p, int deq_flags
|
||||
*
|
||||
* @p may go through multiple stopping <-> running transitions between
|
||||
* here and put_prev_task_scx() if task attribute changes occur while
|
||||
* balance_scx() leaves @rq unlocked. However, they don't contain any
|
||||
* balance_one() leaves @rq unlocked. However, they don't contain any
|
||||
* information meaningful to the BPF scheduler and can be suppressed by
|
||||
* skipping the callbacks if the task is !QUEUED.
|
||||
*/
|
||||
@ -2372,7 +2372,7 @@ static void switch_class(struct rq *rq, struct task_struct *next)
|
||||
* preempted, and it regaining control of the CPU.
|
||||
*
|
||||
* ->cpu_release() complements ->cpu_acquire(), which is emitted the
|
||||
* next time that balance_scx() is invoked.
|
||||
* next time that balance_one() is invoked.
|
||||
*/
|
||||
if (!rq->scx.cpu_released) {
|
||||
if (SCX_HAS_OP(sch, cpu_release)) {
|
||||
@ -2478,7 +2478,7 @@ do_pick_task_scx(struct rq *rq, struct rq_flags *rf, bool force_scx)
|
||||
}
|
||||
|
||||
/*
|
||||
* If balance_scx() is telling us to keep running @prev, replenish slice
|
||||
* If balance_one() is telling us to keep running @prev, replenish slice
|
||||
* if necessary and keep running @prev. Otherwise, pop the first one
|
||||
* from the local DSQ.
|
||||
*/
|
||||
@ -3956,13 +3956,8 @@ static void bypass_lb_node(struct scx_sched *sch, int node)
|
||||
nr_donor_target, nr_target);
|
||||
}
|
||||
|
||||
for_each_cpu(cpu, resched_mask) {
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
raw_spin_rq_lock_irq(rq);
|
||||
resched_curr(rq);
|
||||
raw_spin_rq_unlock_irq(rq);
|
||||
}
|
||||
for_each_cpu(cpu, resched_mask)
|
||||
resched_cpu(cpu);
|
||||
|
||||
for_each_cpu_and(cpu, cpu_online_mask, node_mask) {
|
||||
u32 nr = READ_ONCE(cpu_rq(cpu)->scx.bypass_dsq.nr);
|
||||
@ -4025,7 +4020,7 @@ static DEFINE_TIMER(scx_bypass_lb_timer, scx_bypass_lb_timerfn);
|
||||
*
|
||||
* - ops.dispatch() is ignored.
|
||||
*
|
||||
* - balance_scx() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
|
||||
* - balance_one() does not set %SCX_RQ_BAL_KEEP on non-zero slice as slice
|
||||
* can't be trusted. Whenever a tick triggers, the running task is rotated to
|
||||
* the tail of the queue with core_sched_at touched.
|
||||
*
|
||||
@ -4783,8 +4778,10 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops)
|
||||
}
|
||||
|
||||
sch->pcpu = alloc_percpu(struct scx_sched_pcpu);
|
||||
if (!sch->pcpu)
|
||||
if (!sch->pcpu) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_gdsqs;
|
||||
}
|
||||
|
||||
sch->helper = kthread_run_worker(0, "sched_ext_helper");
|
||||
if (IS_ERR(sch->helper)) {
|
||||
@ -6067,7 +6064,7 @@ __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id)
|
||||
/*
|
||||
* A successfully consumed task can be dequeued before it starts
|
||||
* running while the CPU is trying to migrate other dispatched
|
||||
* tasks. Bump nr_tasks to tell balance_scx() to retry on empty
|
||||
* tasks. Bump nr_tasks to tell balance_one() to retry on empty
|
||||
* local DSQ.
|
||||
*/
|
||||
dspc->nr_tasks++;
|
||||
|
||||
@ -27,16 +27,18 @@ def read_static_key(name):
|
||||
def state_str(state):
|
||||
return prog['scx_enable_state_str'][state].string_().decode()
|
||||
|
||||
ops = prog['scx_ops']
|
||||
root = prog['scx_root']
|
||||
enable_state = read_atomic("scx_enable_state_var")
|
||||
|
||||
print(f'ops : {ops.name.string_().decode()}')
|
||||
if root:
|
||||
print(f'ops : {root.ops.name.string_().decode()}')
|
||||
else:
|
||||
print('ops : ')
|
||||
print(f'enabled : {read_static_key("__scx_enabled")}')
|
||||
print(f'switching_all : {read_int("scx_switching_all")}')
|
||||
print(f'switched_all : {read_static_key("__scx_switched_all")}')
|
||||
print(f'enable_state : {state_str(enable_state)} ({enable_state})')
|
||||
print(f'in_softlockup : {prog["scx_in_softlockup"].value_()}')
|
||||
print(f'breather_depth: {read_atomic("scx_breather_depth")}')
|
||||
print(f'aborting : {prog["scx_aborting"].value_()}')
|
||||
print(f'bypass_depth : {prog["scx_bypass_depth"].value_()}')
|
||||
print(f'nr_rejected : {read_atomic("scx_nr_rejected")}')
|
||||
print(f'enable_seq : {read_atomic("scx_enable_seq")}')
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user