mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
tracing fixes for v6.19:
- Remove useless assignment of soft_mode variable The function __ftrace_event_enable_disable() sets "soft_mode" in one of the branch paths but doesn't use it after that. Remove the setting of that variable. - Add a cond_resched() in ring_buffer_resize() The resize function that allocates all the pages for the ring buffer was causing a soft lockup on PREEMPT_NONE configs when allocating large buffers on machines with many CPUs. Hopefully this is the last cond_resched() needed to be added as PREEMPT_LAZY becomes the norm in the future. - Make ftrace_graph_ent depth field signed The "depth" field of struct ftrace_graph_ent was converted from "int" to "unsigned long" for alignment reasons to work with being embedded in other structures. The conversion from a signed to unsigned caused integrity checks to always pass as they were comparing "depth" to less than zero. Make the field signed long. - Add recursion protection to stack trace events A infinite recursion was triggered by a stack trace event calling RCU which internally called rcu_read_unlock_special(), which triggered an event that was also doing stacktraces which cause it to trigger the same RCU lock that called rcu_read_unlock_special() again. Update the trace_test_and_set_recursion() to add a set of context checks for events to use, and have the stack trace event use that for recursion protection. - Make the variable ftrace_dump_on_oops static The cleanup of sysctl that moved all the updates to the files that use them moved the reference of ftrace_dump_on_oops to where it is used. It is no longer used outside of the trace.c file. Make it static. -----BEGIN PGP SIGNATURE----- iIoEABYKADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCaV/2wRQccm9zdGVkdEBn b29kbWlzLm9yZwAKCRAp5XQQmuv6qmMqAQD+LyAOb7bKlgFjwRABjszg1yDhJPb0 gQGSNPchQyq/7gD8Cu3/ze5UxrNV8cNNsbAPu0/xEg4eyozbRiP/VjzZ4gU= =uLUP -----END PGP SIGNATURE----- Merge tag 'trace-v6.19-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace Pull tracing fixes from Steven Rostedt: - Remove useless assignment of soft_mode variable The function __ftrace_event_enable_disable() sets "soft_mode" in one of the branch paths but doesn't use it after that. Remove the setting of that variable. - Add a cond_resched() in ring_buffer_resize() The resize function that allocates all the pages for the ring buffer was causing a soft lockup on PREEMPT_NONE configs when allocating large buffers on machines with many CPUs. Hopefully this is the last cond_resched() needed to be added as PREEMPT_LAZY becomes the norm in the future. - Make ftrace_graph_ent depth field signed The "depth" field of struct ftrace_graph_ent was converted from "int" to "unsigned long" for alignment reasons to work with being embedded in other structures. The conversion from a signed to unsigned caused integrity checks to always pass as they were comparing "depth" to less than zero. Make the field signed long. - Add recursion protection to stack trace events A infinite recursion was triggered by a stack trace event calling RCU which internally called rcu_read_unlock_special(), which triggered an event that was also doing stacktraces which cause it to trigger the same RCU lock that called rcu_read_unlock_special() again. Update the trace_test_and_set_recursion() to add a set of context checks for events to use, and have the stack trace event use that for recursion protection. - Make the variable ftrace_dump_on_oops static The cleanup of sysctl that moved all the updates to the files that use them moved the reference of ftrace_dump_on_oops to where it is used. It is no longer used outside of the trace.c file. Make it static. * tag 'trace-v6.19-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: trace: ftrace_dump_on_oops[] is not exported, make it static tracing: Add recursion protection in kernel stack trace recording ftrace: Make ftrace_graph_ent depth field signed ring-buffer: Avoid softlockup in ring_buffer_resize() during memory free tracing: Drop unneeded assignment to soft_mode
This commit is contained in:
commit
5572ad8fdd
@ -1167,7 +1167,7 @@ static inline void ftrace_init(void) { }
|
||||
*/
|
||||
struct ftrace_graph_ent {
|
||||
unsigned long func; /* Current function */
|
||||
unsigned long depth;
|
||||
long depth; /* signed to check for less than zero */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
|
||||
@ -34,6 +34,13 @@ enum {
|
||||
TRACE_INTERNAL_SIRQ_BIT,
|
||||
TRACE_INTERNAL_TRANSITION_BIT,
|
||||
|
||||
/* Internal event use recursion bits */
|
||||
TRACE_INTERNAL_EVENT_BIT,
|
||||
TRACE_INTERNAL_EVENT_NMI_BIT,
|
||||
TRACE_INTERNAL_EVENT_IRQ_BIT,
|
||||
TRACE_INTERNAL_EVENT_SIRQ_BIT,
|
||||
TRACE_INTERNAL_EVENT_TRANSITION_BIT,
|
||||
|
||||
TRACE_BRANCH_BIT,
|
||||
/*
|
||||
* Abuse of the trace_recursion.
|
||||
@ -58,6 +65,8 @@ enum {
|
||||
|
||||
#define TRACE_LIST_START TRACE_INTERNAL_BIT
|
||||
|
||||
#define TRACE_EVENT_START TRACE_INTERNAL_EVENT_BIT
|
||||
|
||||
#define TRACE_CONTEXT_MASK ((1 << (TRACE_LIST_START + TRACE_CONTEXT_BITS)) - 1)
|
||||
|
||||
/*
|
||||
|
||||
@ -3137,6 +3137,8 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
|
||||
list) {
|
||||
list_del_init(&bpage->list);
|
||||
free_buffer_page(bpage);
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
out_err_unlock:
|
||||
|
||||
@ -138,7 +138,7 @@ cpumask_var_t __read_mostly tracing_buffer_mask;
|
||||
* by commas.
|
||||
*/
|
||||
/* Set to string format zero to disable by default */
|
||||
char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0";
|
||||
static char ftrace_dump_on_oops[MAX_TRACER_SIZE] = "0";
|
||||
|
||||
/* When set, tracing will stop when a WARN*() is hit */
|
||||
static int __disable_trace_on_warning;
|
||||
@ -3012,6 +3012,11 @@ static void __ftrace_trace_stack(struct trace_array *tr,
|
||||
struct ftrace_stack *fstack;
|
||||
struct stack_entry *entry;
|
||||
int stackidx;
|
||||
int bit;
|
||||
|
||||
bit = trace_test_and_set_recursion(_THIS_IP_, _RET_IP_, TRACE_EVENT_START);
|
||||
if (bit < 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Add one, for this function and the call to save_stack_trace()
|
||||
@ -3080,6 +3085,7 @@ static void __ftrace_trace_stack(struct trace_array *tr,
|
||||
/* Again, don't let gcc optimize things here */
|
||||
barrier();
|
||||
__this_cpu_dec(ftrace_stack_reserve);
|
||||
trace_clear_recursion(bit);
|
||||
}
|
||||
|
||||
static inline void ftrace_trace_stack(struct trace_array *tr,
|
||||
|
||||
@ -826,16 +826,15 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
|
||||
* When soft_disable is set and enable is set, we want to
|
||||
* register the tracepoint for the event, but leave the event
|
||||
* as is. That means, if the event was already enabled, we do
|
||||
* nothing (but set soft_mode). If the event is disabled, we
|
||||
* set SOFT_DISABLED before enabling the event tracepoint, so
|
||||
* it still seems to be disabled.
|
||||
* nothing. If the event is disabled, we set SOFT_DISABLED
|
||||
* before enabling the event tracepoint, so it still seems
|
||||
* to be disabled.
|
||||
*/
|
||||
if (!soft_disable)
|
||||
clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
|
||||
else {
|
||||
if (atomic_inc_return(&file->sm_ref) > 1)
|
||||
break;
|
||||
soft_mode = true;
|
||||
/* Enable use of trace_buffered_event */
|
||||
trace_buffered_event_enable();
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user