mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
Compare commits
15 Commits
9d9c1cfec0
...
4a298a43f5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4a298a43f5 | ||
|
|
cba09e3ed0 | ||
|
|
db0130185e | ||
|
|
edbe407235 | ||
|
|
bdae29d651 | ||
|
|
d36067d6ea | ||
|
|
b5e51ef787 | ||
|
|
9415f749d3 | ||
|
|
c94291914b | ||
|
|
7dbc0d40d8 | ||
|
|
55026a9670 | ||
|
|
89acaa5537 | ||
|
|
c418d8b4d7 | ||
|
|
0143928651 | ||
|
|
92546f6b52 |
@ -109,10 +109,6 @@ irq_domain维护着从hwirq号到Linux IRQ的radix的树状映射。 当一个hw
|
||||
如果hwirq号可以非常大,树状映射是一个很好的选择,因为它不需要分配一个和最大hwirq
|
||||
号一样大的表。 缺点是,hwirq到IRQ号的查找取决于表中有多少条目。
|
||||
|
||||
irq_domain_add_tree()和irq_domain_create_tree()在功能上是等价的,除了第一
|
||||
个参数不同——前者接受一个Open Firmware特定的 'struct device_node' ,而后者接受
|
||||
一个更通用的抽象 'struct fwnode_handle' 。
|
||||
|
||||
很少有驱动应该需要这个映射。
|
||||
|
||||
无映射
|
||||
|
||||
@ -656,14 +656,11 @@ static int amd_uncore_df_event_init(struct perf_event *event)
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int ret = amd_uncore_event_init(event);
|
||||
|
||||
if (ret || pmu_version < 2)
|
||||
return ret;
|
||||
|
||||
hwc->config = event->attr.config &
|
||||
(pmu_version >= 2 ? AMD64_PERFMON_V2_RAW_EVENT_MASK_NB :
|
||||
AMD64_RAW_EVENT_MASK_NB);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amd_uncore_df_add(struct perf_event *event, int flags)
|
||||
|
||||
@ -3378,6 +3378,9 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
|
||||
|
||||
if (!test_bit(bit, cpuc->active_mask))
|
||||
continue;
|
||||
/* Event may have already been cleared: */
|
||||
if (!event)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* There may be unprocessed PEBS records in the PEBS buffer,
|
||||
|
||||
@ -170,7 +170,7 @@ static int mchp_eic_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
|
||||
ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
|
||||
if (ret || hwirq >= MCHP_EIC_NIRQ)
|
||||
return ret;
|
||||
return ret ?: -EINVAL;
|
||||
|
||||
switch (type) {
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
|
||||
@ -730,22 +730,6 @@ static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsig
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
|
||||
const struct irq_domain_ops *ops,
|
||||
void *host_data)
|
||||
{
|
||||
struct irq_domain_info info = {
|
||||
.fwnode = of_fwnode_handle(of_node),
|
||||
.hwirq_max = ~0U,
|
||||
.ops = ops,
|
||||
.host_data = host_data,
|
||||
};
|
||||
struct irq_domain *d;
|
||||
|
||||
d = irq_domain_instantiate(&info);
|
||||
return IS_ERR(d) ? NULL : d;
|
||||
}
|
||||
|
||||
static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
|
||||
unsigned int size,
|
||||
const struct irq_domain_ops *ops,
|
||||
|
||||
@ -596,7 +596,7 @@ static __always_inline void rseq_exit_to_user_mode_legacy(void)
|
||||
|
||||
void __rseq_debug_syscall_return(struct pt_regs *regs);
|
||||
|
||||
static inline void rseq_debug_syscall_return(struct pt_regs *regs)
|
||||
static __always_inline void rseq_debug_syscall_return(struct pt_regs *regs)
|
||||
{
|
||||
if (static_branch_unlikely(&rseq_debug_enabled))
|
||||
__rseq_debug_syscall_return(regs);
|
||||
|
||||
25
kernel/cpu.c
25
kernel/cpu.c
@ -249,6 +249,14 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The former STARTING/DYING states, ran with IRQs disabled and must not fail.
|
||||
*/
|
||||
static bool cpuhp_is_atomic_state(enum cpuhp_state state)
|
||||
{
|
||||
return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static bool cpuhp_is_ap_state(enum cpuhp_state state)
|
||||
{
|
||||
@ -271,14 +279,6 @@ static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
|
||||
complete(done);
|
||||
}
|
||||
|
||||
/*
|
||||
* The former STARTING/DYING states, ran with IRQs disabled and must not fail.
|
||||
*/
|
||||
static bool cpuhp_is_atomic_state(enum cpuhp_state state)
|
||||
{
|
||||
return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
|
||||
}
|
||||
|
||||
/* Synchronization state management */
|
||||
enum cpuhp_sync_state {
|
||||
SYNC_STATE_DEAD,
|
||||
@ -2364,7 +2364,14 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
|
||||
else
|
||||
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
|
||||
#else
|
||||
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
|
||||
if (cpuhp_is_atomic_state(state)) {
|
||||
guard(irqsave)();
|
||||
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
|
||||
/* STARTING/DYING must not fail! */
|
||||
WARN_ON_ONCE(ret);
|
||||
} else {
|
||||
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
|
||||
}
|
||||
#endif
|
||||
BUG_ON(ret && !bringup);
|
||||
return ret;
|
||||
|
||||
@ -2317,8 +2317,6 @@ out:
|
||||
perf_event__header_size(leader);
|
||||
}
|
||||
|
||||
static void sync_child_event(struct perf_event *child_event);
|
||||
|
||||
static void perf_child_detach(struct perf_event *event)
|
||||
{
|
||||
struct perf_event *parent_event = event->parent;
|
||||
@ -2337,7 +2335,6 @@ static void perf_child_detach(struct perf_event *event)
|
||||
lockdep_assert_held(&parent_event->child_mutex);
|
||||
*/
|
||||
|
||||
sync_child_event(event);
|
||||
list_del_init(&event->child_list);
|
||||
}
|
||||
|
||||
@ -4588,6 +4585,7 @@ out:
|
||||
static void perf_remove_from_owner(struct perf_event *event);
|
||||
static void perf_event_exit_event(struct perf_event *event,
|
||||
struct perf_event_context *ctx,
|
||||
struct task_struct *task,
|
||||
bool revoke);
|
||||
|
||||
/*
|
||||
@ -4615,7 +4613,7 @@ static void perf_event_remove_on_exec(struct perf_event_context *ctx)
|
||||
|
||||
modified = true;
|
||||
|
||||
perf_event_exit_event(event, ctx, false);
|
||||
perf_event_exit_event(event, ctx, ctx->task, false);
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||
@ -12518,7 +12516,7 @@ static void __pmu_detach_event(struct pmu *pmu, struct perf_event *event,
|
||||
/*
|
||||
* De-schedule the event and mark it REVOKED.
|
||||
*/
|
||||
perf_event_exit_event(event, ctx, true);
|
||||
perf_event_exit_event(event, ctx, ctx->task, true);
|
||||
|
||||
/*
|
||||
* All _free_event() bits that rely on event->pmu:
|
||||
@ -14075,14 +14073,13 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
|
||||
|
||||
static void sync_child_event(struct perf_event *child_event)
|
||||
static void sync_child_event(struct perf_event *child_event,
|
||||
struct task_struct *task)
|
||||
{
|
||||
struct perf_event *parent_event = child_event->parent;
|
||||
u64 child_val;
|
||||
|
||||
if (child_event->attr.inherit_stat) {
|
||||
struct task_struct *task = child_event->ctx->task;
|
||||
|
||||
if (task && task != TASK_TOMBSTONE)
|
||||
perf_event_read_event(child_event, task);
|
||||
}
|
||||
@ -14101,7 +14098,9 @@ static void sync_child_event(struct perf_event *child_event)
|
||||
|
||||
static void
|
||||
perf_event_exit_event(struct perf_event *event,
|
||||
struct perf_event_context *ctx, bool revoke)
|
||||
struct perf_event_context *ctx,
|
||||
struct task_struct *task,
|
||||
bool revoke)
|
||||
{
|
||||
struct perf_event *parent_event = event->parent;
|
||||
unsigned long detach_flags = DETACH_EXIT;
|
||||
@ -14124,6 +14123,9 @@ perf_event_exit_event(struct perf_event *event,
|
||||
mutex_lock(&parent_event->child_mutex);
|
||||
/* PERF_ATTACH_ITRACE might be set concurrently */
|
||||
attach_state = READ_ONCE(event->attach_state);
|
||||
|
||||
if (attach_state & PERF_ATTACH_CHILD)
|
||||
sync_child_event(event, task);
|
||||
}
|
||||
|
||||
if (revoke)
|
||||
@ -14215,7 +14217,7 @@ static void perf_event_exit_task_context(struct task_struct *task, bool exit)
|
||||
perf_event_task(task, ctx, 0);
|
||||
|
||||
list_for_each_entry_safe(child_event, next, &ctx->event_list, event_entry)
|
||||
perf_event_exit_event(child_event, ctx, false);
|
||||
perf_event_exit_event(child_event, ctx, exit ? task : NULL, false);
|
||||
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
|
||||
@ -79,7 +79,7 @@ struct uprobe {
|
||||
* The generic code assumes that it has two members of unknown type
|
||||
* owned by the arch-specific code:
|
||||
*
|
||||
* insn - copy_insn() saves the original instruction here for
|
||||
* insn - copy_insn() saves the original instruction here for
|
||||
* arch_uprobe_analyze_insn().
|
||||
*
|
||||
* ixol - potentially modified instruction to execute out of
|
||||
@ -107,8 +107,8 @@ static LIST_HEAD(delayed_uprobe_list);
|
||||
* allocated.
|
||||
*/
|
||||
struct xol_area {
|
||||
wait_queue_head_t wq; /* if all slots are busy */
|
||||
unsigned long *bitmap; /* 0 = free slot */
|
||||
wait_queue_head_t wq; /* if all slots are busy */
|
||||
unsigned long *bitmap; /* 0 = free slot */
|
||||
|
||||
struct page *page;
|
||||
/*
|
||||
@ -116,7 +116,7 @@ struct xol_area {
|
||||
* itself. The probed process or a naughty kernel module could make
|
||||
* the vma go away, and we must handle that reasonably gracefully.
|
||||
*/
|
||||
unsigned long vaddr; /* Page(s) of instruction slots */
|
||||
unsigned long vaddr; /* Page(s) of instruction slots */
|
||||
};
|
||||
|
||||
static void uprobe_warn(struct task_struct *t, const char *msg)
|
||||
|
||||
@ -2470,6 +2470,9 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
|
||||
if (retval < 0)
|
||||
return retval;
|
||||
|
||||
if (!act->affinity)
|
||||
act->affinity = cpu_online_mask;
|
||||
|
||||
retval = __setup_irq(irq, desc, act);
|
||||
|
||||
if (retval)
|
||||
|
||||
@ -173,6 +173,9 @@ struct bug_entry *find_bug(unsigned long bugaddr)
|
||||
return module_find_bug(bugaddr);
|
||||
}
|
||||
|
||||
__diag_push();
|
||||
__diag_ignore(GCC, all, "-Wsuggest-attribute=format",
|
||||
"Not a valid __printf() conversion candidate.");
|
||||
static void __warn_printf(const char *fmt, struct pt_regs *regs)
|
||||
{
|
||||
if (!fmt)
|
||||
@ -192,6 +195,7 @@ static void __warn_printf(const char *fmt, struct pt_regs *regs)
|
||||
|
||||
printk("%s", fmt);
|
||||
}
|
||||
__diag_pop();
|
||||
|
||||
static enum bug_trap_type __report_bug(struct bug_entry *bug, unsigned long bugaddr, struct pt_regs *regs)
|
||||
{
|
||||
@ -262,7 +266,7 @@ enum bug_trap_type report_bug_entry(struct bug_entry *bug, struct pt_regs *regs)
|
||||
bool rcu = false;
|
||||
|
||||
rcu = warn_rcu_enter();
|
||||
ret = __report_bug(bug, 0, regs);
|
||||
ret = __report_bug(bug, bug_addr(bug), regs);
|
||||
warn_rcu_exit(rcu);
|
||||
|
||||
return ret;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user