1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00
Joe Lawrence b137312fbf powerpc64/modules: replace stub allocation sentinel with an explicit counter
The logic for allocating ppc64_stub_entry trampolines in the .stubs
section relies on an inline sentinel, where a NULL .funcdata member
indicates an available slot.

While preceding commits fixed the initialization bugs that led to ftrace
stub corruption, the sentinel-based approach remains fragile: it depends
on an implicit convention between subsystems modifying different
struct types in the same memory area.

Replace the sentinel with an explicit counter, module->arch.num_stubs.
Instead of iterating through memory to find a NULL marker, the module
loader uses this counter as the boundary for the next free slot.

This simplifies the allocation code, hardens it against future changes
to stub structures, and removes the need for an extra relocation slot
previously reserved to terminate the sentinel search.

Signed-off-by: Joe Lawrence <joe.lawrence@redhat.com>
Acked-by: Naveen N Rao (AMD) <naveen@kernel.org>
Signed-off-by: Madhavan Srinivasan <maddy@linux.ibm.com>
Link: https://patch.msgid.link/20250912142740.3581368-4-joe.lawrence@redhat.com
2025-09-15 16:40:52 +05:30

93 lines
2.4 KiB
C

/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _ASM_POWERPC_MODULE_H
#define _ASM_POWERPC_MODULE_H
#ifdef __KERNEL__
#include <linux/list.h>
#include <asm/bug.h>
#include <asm-generic/module.h>
#ifndef __powerpc64__
/*
* Thanks to Paul M for explaining this.
*
* PPC can only do rel jumps += 32MB, and often the kernel and other
* modules are further away than this. So, we jump to a table of
* trampolines attached to the module (the Procedure Linkage Table)
* whenever that happens.
*/
struct ppc_plt_entry {
/* 16 byte jump instruction sequence (4 instructions) */
unsigned int jump[4];
};
#endif /* __powerpc64__ */
struct mod_arch_specific {
#ifdef __powerpc64__
unsigned int stubs_section; /* Index of stubs section in module */
unsigned int stub_count; /* Number of stubs used */
#ifdef CONFIG_PPC_KERNEL_PCREL
unsigned int got_section; /* What section is the GOT? */
unsigned int pcpu_section; /* .data..percpu section */
#else
unsigned int toc_section; /* What section is the TOC? */
bool toc_fixed; /* Have we fixed up .TOC.? */
#endif
#ifdef CONFIG_PPC64_ELF_ABI_V1
/* For module function descriptor dereference */
unsigned long start_opd;
unsigned long end_opd;
#endif
#else /* powerpc64 */
/* Indices of PLT sections within module. */
unsigned int core_plt_section;
unsigned int init_plt_section;
#endif /* powerpc64 */
#ifdef CONFIG_DYNAMIC_FTRACE
unsigned long tramp;
unsigned long tramp_regs;
#ifdef CONFIG_PPC_FTRACE_OUT_OF_LINE
struct ftrace_ool_stub *ool_stubs;
unsigned int ool_stub_count;
unsigned int ool_stub_index;
#endif
#endif
};
/*
* Select ELF headers.
* Make empty sections for module_frob_arch_sections to expand.
*/
#ifdef __powerpc64__
# ifdef MODULE
asm(".section .stubs,\"ax\",@nobits; .align 3; .previous");
# ifdef CONFIG_PPC_KERNEL_PCREL
asm(".section .mygot,\"a\",@nobits; .align 3; .previous");
# endif
# endif
#else
# ifdef MODULE
asm(".section .plt,\"ax\",@nobits; .align 3; .previous");
asm(".section .init.plt,\"ax\",@nobits; .align 3; .previous");
# endif /* MODULE */
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
int module_trampoline_target(struct module *mod, unsigned long trampoline,
unsigned long *target);
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs);
#else
static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
{
return 0;
}
#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MODULE_H */