mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
Merge branches 'rcu/misc-for-6.16', 'rcu/seq-counters-for-6.16' and 'rcu/torture-for-6.16' into rcu/for-next
This commit is contained in:
commit
9c80e44337
@ -5657,6 +5657,31 @@
|
||||
are zero, rcutorture acts as if is interpreted
|
||||
they are all non-zero.
|
||||
|
||||
rcutorture.gpwrap_lag= [KNL]
|
||||
Enable grace-period wrap lag testing. Setting
|
||||
to false prevents the gpwrap lag test from
|
||||
running. Default is true.
|
||||
|
||||
rcutorture.gpwrap_lag_gps= [KNL]
|
||||
Set the value for grace-period wrap lag during
|
||||
active lag testing periods. This controls how many
|
||||
grace periods differences we tolerate between
|
||||
rdp and rnp's gp_seq before setting overflow flag.
|
||||
The default is always set to 8.
|
||||
|
||||
rcutorture.gpwrap_lag_cycle_mins= [KNL]
|
||||
Set the total cycle duration for gpwrap lag
|
||||
testing in minutes. This is the total time for
|
||||
one complete cycle of active and inactive
|
||||
testing periods. Default is 30 minutes.
|
||||
|
||||
rcutorture.gpwrap_lag_active_mins= [KNL]
|
||||
Set the duration for which gpwrap lag is active
|
||||
within each cycle, in minutes. During this time,
|
||||
the grace-period wrap lag will be set to the
|
||||
value specified by gpwrap_lag_gps. Default is
|
||||
5 minutes.
|
||||
|
||||
rcutorture.irqreader= [KNL]
|
||||
Run RCU readers from irq handlers, or, more
|
||||
accurately, from a timer handler. Not all RCU
|
||||
|
||||
@ -57,6 +57,9 @@
|
||||
/* Low-order bit definition for polled grace-period APIs. */
|
||||
#define RCU_GET_STATE_COMPLETED 0x1
|
||||
|
||||
/* A complete grace period count */
|
||||
#define RCU_SEQ_GP (RCU_SEQ_STATE_MASK + 1)
|
||||
|
||||
extern int sysctl_sched_rt_runtime;
|
||||
|
||||
/*
|
||||
@ -157,12 +160,21 @@ static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
|
||||
* Given a snapshot from rcu_seq_snap(), determine whether or not a
|
||||
* full update-side operation has occurred, but do not allow the
|
||||
* (ULONG_MAX / 2) safety-factor/guard-band.
|
||||
*
|
||||
* The token returned by get_state_synchronize_rcu_full() is based on
|
||||
* rcu_state.gp_seq but it is tested in poll_state_synchronize_rcu_full()
|
||||
* against the root rnp->gp_seq. Since rcu_seq_start() is first called
|
||||
* on rcu_state.gp_seq and only later reflected on the root rnp->gp_seq,
|
||||
* it is possible that rcu_seq_snap(rcu_state.gp_seq) returns 2 full grace
|
||||
* periods ahead of the root rnp->gp_seq. To prevent false-positives with the
|
||||
* full polling API that a wrap around instantly completed the GP, when nothing
|
||||
* like that happened, adjust for the 2 GPs in the ULONG_CMP_LT().
|
||||
*/
|
||||
static inline bool rcu_seq_done_exact(unsigned long *sp, unsigned long s)
|
||||
{
|
||||
unsigned long cur_s = READ_ONCE(*sp);
|
||||
|
||||
return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (3 * RCU_SEQ_STATE_MASK + 1));
|
||||
return ULONG_CMP_GE(cur_s, s) || ULONG_CMP_LT(cur_s, s - (2 * RCU_SEQ_GP));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -572,6 +584,8 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
|
||||
unsigned long c_old,
|
||||
unsigned long c);
|
||||
void rcu_gp_set_torture_wait(int duration);
|
||||
void rcu_set_gpwrap_lag(unsigned long lag);
|
||||
int rcu_get_gpwrap_count(int cpu);
|
||||
#else
|
||||
static inline void rcutorture_get_gp_data(int *flags, unsigned long *gp_seq)
|
||||
{
|
||||
@ -589,6 +603,8 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
|
||||
do { } while (0)
|
||||
#endif
|
||||
static inline void rcu_gp_set_torture_wait(int duration) { }
|
||||
static inline void rcu_set_gpwrap_lag(unsigned long lag) { }
|
||||
static inline int rcu_get_gpwrap_count(int cpu) { return 0; }
|
||||
#endif
|
||||
unsigned long long rcutorture_gather_gp_seqs(void);
|
||||
void rcutorture_format_gp_seqs(unsigned long long seqs, char *cp, size_t len);
|
||||
|
||||
@ -115,6 +115,10 @@ torture_param(int, nreaders, -1, "Number of RCU reader threads");
|
||||
torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
|
||||
torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
|
||||
torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
|
||||
torture_param(bool, gpwrap_lag, true, "Enable grace-period wrap lag testing");
|
||||
torture_param(int, gpwrap_lag_gps, 8, "Value to set for set_gpwrap_lag during an active testing period.");
|
||||
torture_param(int, gpwrap_lag_cycle_mins, 30, "Total cycle duration for gpwrap lag testing (in minutes)");
|
||||
torture_param(int, gpwrap_lag_active_mins, 5, "Duration for which gpwrap lag is active within each cycle (in minutes)");
|
||||
torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
|
||||
torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
|
||||
torture_param(int, preempt_duration, 0, "Preemption duration (ms), zero to disable");
|
||||
@ -413,6 +417,8 @@ struct rcu_torture_ops {
|
||||
bool (*reader_blocked)(void);
|
||||
unsigned long long (*gather_gp_seqs)(void);
|
||||
void (*format_gp_seqs)(unsigned long long seqs, char *cp, size_t len);
|
||||
void (*set_gpwrap_lag)(unsigned long lag);
|
||||
int (*get_gpwrap_count)(int cpu);
|
||||
long cbflood_max;
|
||||
int irq_capable;
|
||||
int can_boost;
|
||||
@ -619,6 +625,8 @@ static struct rcu_torture_ops rcu_ops = {
|
||||
: NULL,
|
||||
.gather_gp_seqs = rcutorture_gather_gp_seqs,
|
||||
.format_gp_seqs = rcutorture_format_gp_seqs,
|
||||
.set_gpwrap_lag = rcu_set_gpwrap_lag,
|
||||
.get_gpwrap_count = rcu_get_gpwrap_count,
|
||||
.irq_capable = 1,
|
||||
.can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
|
||||
.extendables = RCUTORTURE_MAX_EXTEND,
|
||||
@ -2164,53 +2172,70 @@ rcutorture_loop_extend(int *readstate, bool insoftirq, struct torture_random_sta
|
||||
return &rtrsp[j];
|
||||
}
|
||||
|
||||
/*
|
||||
* Do one read-side critical section, returning false if there was
|
||||
* no data to read. Can be invoked both from process context and
|
||||
* from a timer handler.
|
||||
*/
|
||||
static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
|
||||
{
|
||||
bool checkpolling = !(torture_random(trsp) & 0xfff);
|
||||
struct rcu_torture_one_read_state {
|
||||
bool checkpolling;
|
||||
unsigned long cookie;
|
||||
struct rcu_gp_oldstate cookie_full;
|
||||
int i;
|
||||
unsigned long started;
|
||||
unsigned long completed;
|
||||
int newstate;
|
||||
struct rcu_torture *p;
|
||||
int pipe_count;
|
||||
bool preempted = false;
|
||||
int readstate = 0;
|
||||
struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
|
||||
struct rt_read_seg *rtrsp = &rtseg[0];
|
||||
struct rt_read_seg *rtrsp1;
|
||||
int readstate;
|
||||
struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS];
|
||||
struct rt_read_seg *rtrsp;
|
||||
unsigned long long ts;
|
||||
};
|
||||
|
||||
WARN_ON_ONCE(!rcu_is_watching());
|
||||
newstate = rcutorture_extend_mask(readstate, trsp);
|
||||
rcutorture_one_extend(&readstate, newstate, myid < 0, trsp, rtrsp++);
|
||||
if (checkpolling) {
|
||||
static void init_rcu_torture_one_read_state(struct rcu_torture_one_read_state *rtorsp,
|
||||
struct torture_random_state *trsp)
|
||||
{
|
||||
memset(rtorsp, 0, sizeof(*rtorsp));
|
||||
rtorsp->checkpolling = !(torture_random(trsp) & 0xfff);
|
||||
rtorsp->rtrsp = &rtorsp->rtseg[0];
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the first segment of a series of overlapping read-side
|
||||
* critical sections. The caller must have actually initiated the
|
||||
* outermost read-side critical section.
|
||||
*/
|
||||
static bool rcu_torture_one_read_start(struct rcu_torture_one_read_state *rtorsp,
|
||||
struct torture_random_state *trsp, long myid)
|
||||
{
|
||||
if (rtorsp->checkpolling) {
|
||||
if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
|
||||
cookie = cur_ops->get_gp_state();
|
||||
rtorsp->cookie = cur_ops->get_gp_state();
|
||||
if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
|
||||
cur_ops->get_gp_state_full(&cookie_full);
|
||||
cur_ops->get_gp_state_full(&rtorsp->cookie_full);
|
||||
}
|
||||
started = cur_ops->get_gp_seq();
|
||||
ts = rcu_trace_clock_local();
|
||||
p = rcu_dereference_check(rcu_torture_current,
|
||||
rtorsp->started = cur_ops->get_gp_seq();
|
||||
rtorsp->ts = rcu_trace_clock_local();
|
||||
rtorsp->p = rcu_dereference_check(rcu_torture_current,
|
||||
!cur_ops->readlock_held || cur_ops->readlock_held());
|
||||
if (p == NULL) {
|
||||
if (rtorsp->p == NULL) {
|
||||
/* Wait for rcu_torture_writer to get underway */
|
||||
rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp);
|
||||
rcutorture_one_extend(&rtorsp->readstate, 0, myid < 0, trsp, rtorsp->rtrsp);
|
||||
return false;
|
||||
}
|
||||
if (p->rtort_mbtest == 0)
|
||||
if (rtorsp->p->rtort_mbtest == 0)
|
||||
atomic_inc(&n_rcu_torture_mberror);
|
||||
rcu_torture_reader_do_mbchk(myid, p, trsp);
|
||||
rtrsp = rcutorture_loop_extend(&readstate, myid < 0, trsp, rtrsp);
|
||||
rcu_torture_reader_do_mbchk(myid, rtorsp->p, trsp);
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Complete the last segment of a series of overlapping read-side
|
||||
* critical sections and check for errors.
|
||||
*/
|
||||
static void rcu_torture_one_read_end(struct rcu_torture_one_read_state *rtorsp,
|
||||
struct torture_random_state *trsp, long myid)
|
||||
{
|
||||
int i;
|
||||
unsigned long completed;
|
||||
int pipe_count;
|
||||
bool preempted = false;
|
||||
struct rt_read_seg *rtrsp1;
|
||||
|
||||
preempt_disable();
|
||||
pipe_count = READ_ONCE(p->rtort_pipe_count);
|
||||
pipe_count = READ_ONCE(rtorsp->p->rtort_pipe_count);
|
||||
if (pipe_count > RCU_TORTURE_PIPE_LEN) {
|
||||
// Should not happen in a correct RCU implementation,
|
||||
// happens quite often for torture_type=busted.
|
||||
@ -2218,28 +2243,28 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
|
||||
}
|
||||
completed = cur_ops->get_gp_seq();
|
||||
if (pipe_count > 1) {
|
||||
do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
|
||||
ts, started, completed);
|
||||
do_trace_rcu_torture_read(cur_ops->name, &rtorsp->p->rtort_rcu,
|
||||
rtorsp->ts, rtorsp->started, completed);
|
||||
rcu_ftrace_dump(DUMP_ALL);
|
||||
}
|
||||
__this_cpu_inc(rcu_torture_count[pipe_count]);
|
||||
completed = rcutorture_seq_diff(completed, started);
|
||||
completed = rcutorture_seq_diff(completed, rtorsp->started);
|
||||
if (completed > RCU_TORTURE_PIPE_LEN) {
|
||||
/* Should not happen, but... */
|
||||
completed = RCU_TORTURE_PIPE_LEN;
|
||||
}
|
||||
__this_cpu_inc(rcu_torture_batch[completed]);
|
||||
preempt_enable();
|
||||
if (checkpolling) {
|
||||
if (rtorsp->checkpolling) {
|
||||
if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
|
||||
WARN_ONCE(cur_ops->poll_gp_state(cookie),
|
||||
WARN_ONCE(cur_ops->poll_gp_state(rtorsp->cookie),
|
||||
"%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
|
||||
__func__,
|
||||
rcu_torture_writer_state_getname(),
|
||||
rcu_torture_writer_state,
|
||||
cookie, cur_ops->get_gp_state());
|
||||
rtorsp->cookie, cur_ops->get_gp_state());
|
||||
if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
|
||||
WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
|
||||
WARN_ONCE(cur_ops->poll_gp_state_full(&rtorsp->cookie_full),
|
||||
"%s: Cookie check 6 failed %s(%d) online %*pbl\n",
|
||||
__func__,
|
||||
rcu_torture_writer_state_getname(),
|
||||
@ -2248,21 +2273,42 @@ static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
|
||||
}
|
||||
if (cur_ops->reader_blocked)
|
||||
preempted = cur_ops->reader_blocked();
|
||||
rcutorture_one_extend(&readstate, 0, myid < 0, trsp, rtrsp);
|
||||
WARN_ON_ONCE(readstate);
|
||||
rcutorture_one_extend(&rtorsp->readstate, 0, myid < 0, trsp, rtorsp->rtrsp);
|
||||
WARN_ON_ONCE(rtorsp->readstate);
|
||||
// This next splat is expected behavior if leakpointer, especially
|
||||
// for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
|
||||
WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
|
||||
WARN_ON_ONCE(leakpointer && READ_ONCE(rtorsp->p->rtort_pipe_count) > 1);
|
||||
|
||||
/* If error or close call, record the sequence of reader protections. */
|
||||
if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
|
||||
i = 0;
|
||||
for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
|
||||
for (rtrsp1 = &rtorsp->rtseg[0]; rtrsp1 < rtorsp->rtrsp; rtrsp1++)
|
||||
err_segs[i++] = *rtrsp1;
|
||||
rt_read_nsegs = i;
|
||||
rt_read_preempted = preempted;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Do one read-side critical section, returning false if there was
|
||||
* no data to read. Can be invoked both from process context and
|
||||
* from a timer handler.
|
||||
*/
|
||||
static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
|
||||
{
|
||||
int newstate;
|
||||
struct rcu_torture_one_read_state rtors;
|
||||
|
||||
WARN_ON_ONCE(!rcu_is_watching());
|
||||
init_rcu_torture_one_read_state(&rtors, trsp);
|
||||
newstate = rcutorture_extend_mask(rtors.readstate, trsp);
|
||||
rcutorture_one_extend(&rtors.readstate, newstate, myid < 0, trsp, rtors.rtrsp++);
|
||||
if (!rcu_torture_one_read_start(&rtors, trsp, myid)) {
|
||||
rcutorture_one_extend(&rtors.readstate, 0, myid < 0, trsp, rtors.rtrsp);
|
||||
return false;
|
||||
}
|
||||
rtors.rtrsp = rcutorture_loop_extend(&rtors.readstate, myid < 0, trsp, rtors.rtrsp);
|
||||
rcu_torture_one_read_end(&rtors, trsp, myid);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2307,7 +2353,7 @@ rcu_torture_reader(void *arg)
|
||||
set_user_nice(current, MAX_NICE);
|
||||
if (irqreader && cur_ops->irq_capable)
|
||||
timer_setup_on_stack(&t, rcu_torture_timer, 0);
|
||||
tick_dep_set_task(current, TICK_DEP_BIT_RCU);
|
||||
tick_dep_set_task(current, TICK_DEP_BIT_RCU); // CPU bound, so need tick.
|
||||
do {
|
||||
if (irqreader && cur_ops->irq_capable) {
|
||||
if (!timer_pending(&t))
|
||||
@ -2394,6 +2440,7 @@ rcu_torture_stats_print(void)
|
||||
int i;
|
||||
long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
|
||||
long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
|
||||
long n_gpwraps = 0;
|
||||
struct rcu_torture *rtcp;
|
||||
static unsigned long rtcv_snap = ULONG_MAX;
|
||||
static bool splatted;
|
||||
@ -2404,6 +2451,8 @@ rcu_torture_stats_print(void)
|
||||
pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
|
||||
batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
|
||||
}
|
||||
if (cur_ops->get_gpwrap_count)
|
||||
n_gpwraps += cur_ops->get_gpwrap_count(cpu);
|
||||
}
|
||||
for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
|
||||
if (pipesummary[i] != 0)
|
||||
@ -2435,8 +2484,9 @@ rcu_torture_stats_print(void)
|
||||
data_race(n_barrier_attempts),
|
||||
data_race(n_rcu_torture_barrier_error));
|
||||
pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
|
||||
pr_cont("nocb-toggles: %ld:%ld\n",
|
||||
pr_cont("nocb-toggles: %ld:%ld ",
|
||||
atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
|
||||
pr_cont("gpwraps: %ld\n", n_gpwraps);
|
||||
|
||||
pr_alert("%s%s ", torture_type, TORTURE_FLAG);
|
||||
if (atomic_read(&n_rcu_torture_mberror) ||
|
||||
@ -3036,7 +3086,7 @@ static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
|
||||
cver = READ_ONCE(rcu_torture_current_version);
|
||||
gps = cur_ops->get_gp_seq();
|
||||
rfp->rcu_launder_gp_seq_start = gps;
|
||||
tick_dep_set_task(current, TICK_DEP_BIT_RCU);
|
||||
tick_dep_set_task(current, TICK_DEP_BIT_RCU); // CPU bound, so need tick.
|
||||
while (time_before(jiffies, stopat) &&
|
||||
!shutdown_time_arrived() &&
|
||||
!READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
|
||||
@ -3607,6 +3657,57 @@ static int rcu_torture_preempt(void *unused)
|
||||
|
||||
static enum cpuhp_state rcutor_hp;
|
||||
|
||||
static struct hrtimer gpwrap_lag_timer;
|
||||
static bool gpwrap_lag_active;
|
||||
|
||||
/* Timer handler for toggling RCU grace-period sequence overflow test lag value */
|
||||
static enum hrtimer_restart rcu_gpwrap_lag_timer(struct hrtimer *timer)
|
||||
{
|
||||
ktime_t next_delay;
|
||||
|
||||
if (gpwrap_lag_active) {
|
||||
pr_alert("rcu-torture: Disabling gpwrap lag (value=0)\n");
|
||||
cur_ops->set_gpwrap_lag(0);
|
||||
gpwrap_lag_active = false;
|
||||
next_delay = ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0);
|
||||
} else {
|
||||
pr_alert("rcu-torture: Enabling gpwrap lag (value=%d)\n", gpwrap_lag_gps);
|
||||
cur_ops->set_gpwrap_lag(gpwrap_lag_gps);
|
||||
gpwrap_lag_active = true;
|
||||
next_delay = ktime_set(gpwrap_lag_active_mins * 60, 0);
|
||||
}
|
||||
|
||||
if (torture_must_stop_irq())
|
||||
return HRTIMER_NORESTART;
|
||||
|
||||
hrtimer_forward_now(timer, next_delay);
|
||||
return HRTIMER_RESTART;
|
||||
}
|
||||
|
||||
static int rcu_gpwrap_lag_init(void)
|
||||
{
|
||||
if (!gpwrap_lag)
|
||||
return 0;
|
||||
|
||||
if (gpwrap_lag_cycle_mins <= 0 || gpwrap_lag_active_mins <= 0) {
|
||||
pr_alert("rcu-torture: lag timing parameters must be positive\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hrtimer_setup(&gpwrap_lag_timer, rcu_gpwrap_lag_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
gpwrap_lag_active = false;
|
||||
hrtimer_start(&gpwrap_lag_timer,
|
||||
ktime_set((gpwrap_lag_cycle_mins - gpwrap_lag_active_mins) * 60, 0), HRTIMER_MODE_REL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void rcu_gpwrap_lag_cleanup(void)
|
||||
{
|
||||
hrtimer_cancel(&gpwrap_lag_timer);
|
||||
cur_ops->set_gpwrap_lag(0);
|
||||
gpwrap_lag_active = false;
|
||||
}
|
||||
static void
|
||||
rcu_torture_cleanup(void)
|
||||
{
|
||||
@ -3776,6 +3877,9 @@ rcu_torture_cleanup(void)
|
||||
torture_cleanup_end();
|
||||
if (cur_ops->gp_slow_unregister)
|
||||
cur_ops->gp_slow_unregister(NULL);
|
||||
|
||||
if (gpwrap_lag && cur_ops->set_gpwrap_lag)
|
||||
rcu_gpwrap_lag_cleanup();
|
||||
}
|
||||
|
||||
static void rcu_torture_leak_cb(struct rcu_head *rhp)
|
||||
@ -4272,9 +4376,17 @@ rcu_torture_init(void)
|
||||
}
|
||||
if (object_debug)
|
||||
rcu_test_debug_objects();
|
||||
torture_init_end();
|
||||
|
||||
if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister))
|
||||
cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay);
|
||||
|
||||
if (gpwrap_lag && cur_ops->set_gpwrap_lag) {
|
||||
firsterr = rcu_gpwrap_lag_init();
|
||||
if (torture_init_error(firsterr))
|
||||
goto unwind;
|
||||
}
|
||||
|
||||
torture_init_end();
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
|
||||
@ -1589,7 +1589,7 @@ EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
|
||||
bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
|
||||
{
|
||||
if (cookie != SRCU_GET_STATE_COMPLETED &&
|
||||
!rcu_seq_done(&ssp->srcu_sup->srcu_gp_seq, cookie))
|
||||
!rcu_seq_done_exact(&ssp->srcu_sup->srcu_gp_seq, cookie))
|
||||
return false;
|
||||
// Ensure that the end of the SRCU grace period happens before
|
||||
// any subsequent code that the caller might execute.
|
||||
|
||||
@ -80,6 +80,15 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *);
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, rcu_data) = {
|
||||
.gpwrap = true,
|
||||
};
|
||||
|
||||
int rcu_get_gpwrap_count(int cpu)
|
||||
{
|
||||
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
|
||||
return READ_ONCE(rdp->gpwrap_count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_get_gpwrap_count);
|
||||
|
||||
static struct rcu_state rcu_state = {
|
||||
.level = { &rcu_state.node[0] },
|
||||
.gp_state = RCU_GP_IDLE,
|
||||
@ -757,6 +766,25 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
|
||||
smp_store_release(per_cpu_ptr(&rcu_data.rcu_urgent_qs, cpu), true);
|
||||
}
|
||||
|
||||
static unsigned long seq_gpwrap_lag = ULONG_MAX / 4;
|
||||
|
||||
/**
|
||||
* rcu_set_gpwrap_lag - Set RCU GP sequence overflow lag value.
|
||||
* @lag_gps: Set overflow lag to this many grace period worth of counters
|
||||
* which is used by rcutorture to quickly force a gpwrap situation.
|
||||
* @lag_gps = 0 means we reset it back to the boot-time value.
|
||||
*/
|
||||
void rcu_set_gpwrap_lag(unsigned long lag_gps)
|
||||
{
|
||||
unsigned long lag_seq_count;
|
||||
|
||||
lag_seq_count = (lag_gps == 0)
|
||||
? ULONG_MAX / 4
|
||||
: lag_gps << RCU_SEQ_CTR_SHIFT;
|
||||
WRITE_ONCE(seq_gpwrap_lag, lag_seq_count);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rcu_set_gpwrap_lag);
|
||||
|
||||
/*
|
||||
* When trying to report a quiescent state on behalf of some other CPU,
|
||||
* it is our responsibility to check for and handle potential overflow
|
||||
@ -767,9 +795,11 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
|
||||
static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
|
||||
{
|
||||
raw_lockdep_assert_held_rcu_node(rnp);
|
||||
if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + ULONG_MAX / 4,
|
||||
rnp->gp_seq))
|
||||
if (ULONG_CMP_LT(rcu_seq_current(&rdp->gp_seq) + seq_gpwrap_lag,
|
||||
rnp->gp_seq)) {
|
||||
WRITE_ONCE(rdp->gpwrap, true);
|
||||
WRITE_ONCE(rdp->gpwrap_count, READ_ONCE(rdp->gpwrap_count) + 1);
|
||||
}
|
||||
if (ULONG_CMP_LT(rdp->rcu_iw_gp_seq + ULONG_MAX / 4, rnp->gp_seq))
|
||||
rdp->rcu_iw_gp_seq = rnp->gp_seq + ULONG_MAX / 4;
|
||||
}
|
||||
@ -1770,6 +1800,7 @@ static noinline_for_stack bool rcu_gp_init(void)
|
||||
struct rcu_data *rdp;
|
||||
struct rcu_node *rnp = rcu_get_root();
|
||||
bool start_new_poll;
|
||||
unsigned long old_gp_seq;
|
||||
|
||||
WRITE_ONCE(rcu_state.gp_activity, jiffies);
|
||||
raw_spin_lock_irq_rcu_node(rnp);
|
||||
@ -1797,7 +1828,12 @@ static noinline_for_stack bool rcu_gp_init(void)
|
||||
*/
|
||||
start_new_poll = rcu_sr_normal_gp_init();
|
||||
/* Record GP times before starting GP, hence rcu_seq_start(). */
|
||||
old_gp_seq = rcu_state.gp_seq;
|
||||
rcu_seq_start(&rcu_state.gp_seq);
|
||||
/* Ensure that rcu_seq_done_exact() guardband doesn't give false positives. */
|
||||
WARN_ON_ONCE(IS_ENABLED(CONFIG_PROVE_RCU) &&
|
||||
rcu_seq_done_exact(&old_gp_seq, rcu_seq_snap(&rcu_state.gp_seq)));
|
||||
|
||||
ASSERT_EXCLUSIVE_WRITER(rcu_state.gp_seq);
|
||||
trace_rcu_grace_period(rcu_state.name, rcu_state.gp_seq, TPS("start"));
|
||||
rcu_poll_gp_seq_start(&rcu_state.gp_seq_polled_snap);
|
||||
|
||||
@ -183,6 +183,7 @@ struct rcu_data {
|
||||
bool core_needs_qs; /* Core waits for quiescent state. */
|
||||
bool beenonline; /* CPU online at least once. */
|
||||
bool gpwrap; /* Possible ->gp_seq wrap. */
|
||||
unsigned int gpwrap_count; /* Count of GP sequence wrap. */
|
||||
bool cpu_started; /* RCU watching this onlining CPU. */
|
||||
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
|
||||
unsigned long grpmask; /* Mask to apply to leaf qsmask. */
|
||||
|
||||
@ -839,6 +839,8 @@ our %deprecated_apis = (
|
||||
"kunmap" => "kunmap_local",
|
||||
"kmap_atomic" => "kmap_local_page",
|
||||
"kunmap_atomic" => "kunmap_local",
|
||||
"srcu_read_lock_lite" => "srcu_read_lock_fast",
|
||||
"srcu_read_unlock_lite" => "srcu_read_unlock_fast",
|
||||
);
|
||||
|
||||
#Create a search pattern for all these strings to speed up a loop below
|
||||
|
||||
@ -10,7 +10,7 @@
|
||||
#
|
||||
# Authors: Paul E. McKenney <paulmck@kernel.org>
|
||||
|
||||
grep -E 'Badness|WARNING:|Warn|BUG|===========|BUG: KCSAN:|Call Trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state|rcu_.*kthread starved for|!!!' |
|
||||
grep -E 'Badness|WARNING:|Warn|BUG|===========|BUG: KCSAN:|Call Trace:|Call trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state|rcu_.*kthread starved for|!!!' |
|
||||
grep -v 'ODEBUG: ' |
|
||||
grep -v 'This means that this is a DEBUG kernel and it is' |
|
||||
grep -v 'Warning: unable to open an initial console' |
|
||||
|
||||
@ -73,7 +73,7 @@ config_override_param "$config_dir/CFcommon.$(uname -m)" KcList \
|
||||
cp $T/KcList $resdir/ConfigFragment
|
||||
|
||||
base_resdir=`echo $resdir | sed -e 's/\.[0-9]\+$//'`
|
||||
if test "$base_resdir" != "$resdir" && test -f $base_resdir/bzImage && test -f $base_resdir/vmlinux
|
||||
if test "$base_resdir" != "$resdir" && (test -f $base_resdir/bzImage || test -f $base_resdir/Image) && test -f $base_resdir/vmlinux
|
||||
then
|
||||
# Rerunning previous test, so use that test's kernel.
|
||||
QEMU="`identify_qemu $base_resdir/vmlinux`"
|
||||
|
||||
@ -148,7 +148,7 @@ then
|
||||
summary="$summary KCSAN: $n_kcsan"
|
||||
fi
|
||||
fi
|
||||
n_calltrace=`grep -c 'Call Trace:' $file`
|
||||
n_calltrace=`grep -Ec 'Call Trace:|Call trace:' $file`
|
||||
if test "$n_calltrace" -ne 0
|
||||
then
|
||||
summary="$summary Call Traces: $n_calltrace"
|
||||
|
||||
@ -39,8 +39,9 @@ do
|
||||
shift
|
||||
done
|
||||
|
||||
err=
|
||||
nerrs=0
|
||||
|
||||
# Test lockdep's handling of deadlocks.
|
||||
for d in 0 1
|
||||
do
|
||||
for t in 0 1 2
|
||||
@ -52,6 +53,12 @@ do
|
||||
tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 5s --configs "SRCU-P" --kconfig "CONFIG_FORCE_NEED_SRCU_NMI_SAFE=y" --bootargs "rcutorture.test_srcu_lockdep=$val rcutorture.reader_flavor=0x2" --trust-make --datestamp "$ds/$val" > "$T/kvm.sh.out" 2>&1
|
||||
ret=$?
|
||||
mv "$T/kvm.sh.out" "$RCUTORTURE/res/$ds/$val"
|
||||
if ! grep -q '^CONFIG_PROVE_LOCKING=y' .config
|
||||
then
|
||||
echo "rcu_torture_init_srcu_lockdep:Error: CONFIG_PROVE_LOCKING disabled in rcutorture SRCU-P scenario"
|
||||
nerrs=$((nerrs+1))
|
||||
err=1
|
||||
fi
|
||||
if test "$d" -ne 0 && test "$ret" -eq 0
|
||||
then
|
||||
err=1
|
||||
@ -71,6 +78,39 @@ do
|
||||
done
|
||||
done
|
||||
done
|
||||
|
||||
# Test lockdep-enabled testing of mixed SRCU readers.
|
||||
for val in 0x1 0xf
|
||||
do
|
||||
err=
|
||||
tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration 5s --configs "SRCU-P" --kconfig "CONFIG_FORCE_NEED_SRCU_NMI_SAFE=y" --bootargs "rcutorture.reader_flavor=$val" --trust-make --datestamp "$ds/$val" > "$T/kvm.sh.out" 2>&1
|
||||
ret=$?
|
||||
mv "$T/kvm.sh.out" "$RCUTORTURE/res/$ds/$val"
|
||||
if ! grep -q '^CONFIG_PROVE_LOCKING=y' .config
|
||||
then
|
||||
echo "rcu_torture_init_srcu_lockdep:Error: CONFIG_PROVE_LOCKING disabled in rcutorture SRCU-P scenario"
|
||||
nerrs=$((nerrs+1))
|
||||
err=1
|
||||
fi
|
||||
if test "$val" -eq 0xf && test "$ret" -eq 0
|
||||
then
|
||||
err=1
|
||||
echo -n Unexpected success for > "$RCUTORTURE/res/$ds/$val/kvm.sh.err"
|
||||
fi
|
||||
if test "$val" -eq 0x1 && test "$ret" -ne 0
|
||||
then
|
||||
err=1
|
||||
echo -n Unexpected failure for > "$RCUTORTURE/res/$ds/$val/kvm.sh.err"
|
||||
fi
|
||||
if test -n "$err"
|
||||
then
|
||||
grep "rcu_torture_init_srcu_lockdep: test_srcu_lockdep = " "$RCUTORTURE/res/$ds/$val/SRCU-P/console.log" | sed -e 's/^.*rcu_torture_init_srcu_lockdep://' >> "$RCUTORTURE/res/$ds/$val/kvm.sh.err"
|
||||
cat "$RCUTORTURE/res/$ds/$val/kvm.sh.err"
|
||||
nerrs=$((nerrs+1))
|
||||
fi
|
||||
done
|
||||
|
||||
# Set up exit code.
|
||||
if test "$nerrs" -ne 0
|
||||
then
|
||||
exit 1
|
||||
|
||||
@ -51,12 +51,15 @@ do_scftorture=yes
|
||||
do_rcuscale=yes
|
||||
do_refscale=yes
|
||||
do_kvfree=yes
|
||||
do_normal=yes
|
||||
explicit_normal=no
|
||||
do_kasan=yes
|
||||
do_kcsan=no
|
||||
do_clocksourcewd=yes
|
||||
do_rt=yes
|
||||
do_rcutasksflavors=yes
|
||||
do_srcu_lockdep=yes
|
||||
do_rcu_rust=no
|
||||
|
||||
# doyesno - Helper function for yes/no arguments
|
||||
function doyesno () {
|
||||
@ -87,6 +90,7 @@ usage () {
|
||||
echo " --do-rcutorture / --do-no-rcutorture / --no-rcutorture"
|
||||
echo " --do-refscale / --do-no-refscale / --no-refscale"
|
||||
echo " --do-rt / --do-no-rt / --no-rt"
|
||||
echo " --do-rcu-rust / --do-no-rcu-rust / --no-rcu-rust"
|
||||
echo " --do-scftorture / --do-no-scftorture / --no-scftorture"
|
||||
echo " --do-srcu-lockdep / --do-no-srcu-lockdep / --no-srcu-lockdep"
|
||||
echo " --duration [ <minutes> | <hours>h | <days>d ]"
|
||||
@ -128,6 +132,8 @@ do
|
||||
do_refscale=yes
|
||||
do_rt=yes
|
||||
do_kvfree=yes
|
||||
do_normal=yes
|
||||
explicit_normal=no
|
||||
do_kasan=yes
|
||||
do_kcsan=yes
|
||||
do_clocksourcewd=yes
|
||||
@ -161,11 +167,17 @@ do
|
||||
do_refscale=no
|
||||
do_rt=no
|
||||
do_kvfree=no
|
||||
do_normal=no
|
||||
explicit_normal=no
|
||||
do_kasan=no
|
||||
do_kcsan=no
|
||||
do_clocksourcewd=no
|
||||
do_srcu_lockdep=no
|
||||
;;
|
||||
--do-normal|--do-no-normal|--no-normal)
|
||||
do_normal=`doyesno "$1" --do-normal`
|
||||
explicit_normal=yes
|
||||
;;
|
||||
--do-rcuscale|--do-no-rcuscale|--no-rcuscale)
|
||||
do_rcuscale=`doyesno "$1" --do-rcuscale`
|
||||
;;
|
||||
@ -181,6 +193,9 @@ do
|
||||
--do-rt|--do-no-rt|--no-rt)
|
||||
do_rt=`doyesno "$1" --do-rt`
|
||||
;;
|
||||
--do-rcu-rust|--do-no-rcu-rust|--no-rcu-rust)
|
||||
do_rcu_rust=`doyesno "$1" --do-rcu-rust`
|
||||
;;
|
||||
--do-scftorture|--do-no-scftorture|--no-scftorture)
|
||||
do_scftorture=`doyesno "$1" --do-scftorture`
|
||||
;;
|
||||
@ -242,6 +257,17 @@ trap 'rm -rf $T' 0 2
|
||||
echo " --- " $scriptname $args | tee -a $T/log
|
||||
echo " --- Results directory: " $ds | tee -a $T/log
|
||||
|
||||
if test "$do_normal" = "no" && test "$do_kasan" = "no" && test "$do_kcsan" = "no"
|
||||
then
|
||||
# Match old scripts so that "--do-none --do-rcutorture" does
|
||||
# normal rcutorture testing, but no KASAN or KCSAN testing.
|
||||
if test $explicit_normal = yes
|
||||
then
|
||||
echo " --- Everything disabled, so explicit --do-normal overridden" | tee -a $T/log
|
||||
fi
|
||||
do_normal=yes
|
||||
fi
|
||||
|
||||
# Calculate rcutorture defaults and apportion time
|
||||
if test -z "$configs_rcutorture"
|
||||
then
|
||||
@ -332,9 +358,12 @@ function torture_set {
|
||||
local kcsan_kmake_tag=
|
||||
local flavor=$1
|
||||
shift
|
||||
curflavor=$flavor
|
||||
torture_one "$@"
|
||||
mv $T/last-resdir $T/last-resdir-nodebug || :
|
||||
if test "$do_normal" = "yes"
|
||||
then
|
||||
curflavor=$flavor
|
||||
torture_one "$@"
|
||||
mv $T/last-resdir $T/last-resdir-nodebug || :
|
||||
fi
|
||||
if test "$do_kasan" = "yes"
|
||||
then
|
||||
curflavor=${flavor}-kasan
|
||||
@ -448,13 +477,57 @@ fi
|
||||
|
||||
if test "$do_rt" = "yes"
|
||||
then
|
||||
# With all post-boot grace periods forced to normal.
|
||||
torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcupdate.rcu_normal=1"
|
||||
torture_set "rcurttorture" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --trust-make
|
||||
# In both runs, disable testing of RCU priority boosting because
|
||||
# -rt doesn't like its interaction with testing of callback
|
||||
# flooding.
|
||||
|
||||
# With all post-boot grace periods forced to normal (default for PREEMPT_RT).
|
||||
torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcutorture.test_boost=0 rcutorture.preempt_duration=0"
|
||||
torture_set "rcurttorture" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --kconfig "CONFIG_PREEMPT_RT=y CONFIG_EXPERT=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_IDLE=y CONFIG_RCU_NOCB_CPU=y" --trust-make
|
||||
|
||||
# With all post-boot grace periods forced to expedited.
|
||||
torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcupdate.rcu_expedited=1"
|
||||
torture_set "rcurttorture-exp" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --trust-make
|
||||
torture_bootargs="rcupdate.rcu_cpu_stall_suppress_at_boot=1 torture.disable_onoff_at_boot rcupdate.rcu_task_stall_timeout=30000 rcutorture.test_boost=0 rcupdate.rcu_normal_after_boot=0 rcupdate.rcu_expedited=1 rcutorture.preempt_duration=0"
|
||||
torture_set "rcurttorture-exp" tools/testing/selftests/rcutorture/bin/kvm.sh --allcpus --duration "$duration_rcutorture" --configs "TREE03" --kconfig "CONFIG_PREEMPT_RT=y CONFIG_EXPERT=y CONFIG_HZ_PERIODIC=n CONFIG_NO_HZ_FULL=y CONFIG_RCU_NOCB_CPU=y" --trust-make
|
||||
fi
|
||||
|
||||
if test "$do_rcu_rust" = "yes"
|
||||
then
|
||||
echo " --- do-rcu-rust:" Start `date` | tee -a $T/log
|
||||
rrdir="tools/testing/selftests/rcutorture/res/$ds/results-rcu-rust"
|
||||
mkdir -p "$rrdir"
|
||||
echo " --- make LLVM=1 rustavailable " | tee -a $rrdir/log > $rrdir/rustavailable.out
|
||||
make LLVM=1 rustavailable > $T/rustavailable.out 2>&1
|
||||
retcode=$?
|
||||
echo $retcode > $rrdir/rustavailable.exitcode
|
||||
cat $T/rustavailable.out | tee -a $rrdir/log >> $rrdir/rustavailable.out 2>&1
|
||||
buildphase=rustavailable
|
||||
if test "$retcode" -eq 0
|
||||
then
|
||||
echo " --- Running 'make mrproper' in order to run kunit." | tee -a $rrdir/log > $rrdir/mrproper.out
|
||||
make mrproper > $rrdir/mrproper.out 2>&1
|
||||
retcode=$?
|
||||
echo $retcode > $rrdir/mrproper.exitcode
|
||||
buildphase=mrproper
|
||||
fi
|
||||
if test "$retcode" -eq 0
|
||||
then
|
||||
echo " --- Running rust_doctests_kernel." | tee -a $rrdir/log > $rrdir/rust_doctests_kernel.out
|
||||
./tools/testing/kunit/kunit.py run --make_options LLVM=1 --make_options CLIPPY=1 --arch arm64 --kconfig_add CONFIG_SMP=y --kconfig_add CONFIG_WERROR=y --kconfig_add CONFIG_RUST=y rust_doctests_kernel >> $rrdir/rust_doctests_kernel.out 2>&1
|
||||
# @@@ Remove "--arch arm64" in order to test on native architecture?
|
||||
# @@@ Analyze $rrdir/rust_doctests_kernel.out contents?
|
||||
retcode=$?
|
||||
echo $retcode > $rrdir/rust_doctests_kernel.exitcode
|
||||
buildphase=rust_doctests_kernel
|
||||
fi
|
||||
if test "$retcode" -eq 0
|
||||
then
|
||||
echo "rcu-rust($retcode)" $rrdir >> $T/successes
|
||||
echo Success >> $rrdir/log
|
||||
else
|
||||
echo "rcu-rust($retcode)" $rrdir >> $T/failures
|
||||
echo " --- rcu-rust Test summary:" >> $rrdir/log
|
||||
echo " --- Summary: Exit code $retcode from $buildphase, see $rrdir/$buildphase.out" >> $rrdir/log
|
||||
fi
|
||||
fi
|
||||
|
||||
if test "$do_srcu_lockdep" = "yes"
|
||||
|
||||
@ -8,8 +8,6 @@ CONFIG_NO_HZ_IDLE=y
|
||||
CONFIG_NO_HZ_FULL=n
|
||||
CONFIG_RCU_TRACE=y
|
||||
CONFIG_HOTPLUG_CPU=y
|
||||
CONFIG_MAXSMP=y
|
||||
CONFIG_CPUMASK_OFFSTACK=y
|
||||
CONFIG_RCU_NOCB_CPU=y
|
||||
CONFIG_DEBUG_LOCK_ALLOC=n
|
||||
CONFIG_RCU_BOOST=n
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
maxcpus=8 nr_cpus=43
|
||||
maxcpus=8 nr_cpus=17
|
||||
rcutree.gp_preinit_delay=3
|
||||
rcutree.gp_init_delay=3
|
||||
rcutree.gp_cleanup_delay=3
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user