mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 09:00:12 +00:00
LoongArch: BPF: Enhance the bpf_arch_text_poke() function
Enhance the bpf_arch_text_poke() function to enable accurate location of BPF program entry points. When modifying the entry point of a BPF program, skip the "move t0, ra" instruction to ensure the correct logic and copy of the jump address. Cc: stable@vger.kernel.org Fixes: 677e6123e3d2 ("LoongArch: BPF: Disable trampoline for kernel module function trace") Signed-off-by: Chenghao Duan <duanchenghao@kylinos.cn> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
parent
26138762d9
commit
73721d8676
@ -1309,15 +1309,30 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
|
||||
{
|
||||
int ret;
|
||||
bool is_call;
|
||||
unsigned long size = 0;
|
||||
unsigned long offset = 0;
|
||||
void *image = NULL;
|
||||
char namebuf[KSYM_NAME_LEN];
|
||||
u32 old_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
|
||||
u32 new_insns[LOONGARCH_LONG_JUMP_NINSNS] = {[0 ... 4] = INSN_NOP};
|
||||
|
||||
/* Only poking bpf text is supported. Since kernel function entry
|
||||
* is set up by ftrace, we rely on ftrace to poke kernel functions.
|
||||
*/
|
||||
if (!is_bpf_text_address((unsigned long)ip))
|
||||
if (!__bpf_address_lookup((unsigned long)ip, &size, &offset, namebuf))
|
||||
return -ENOTSUPP;
|
||||
|
||||
image = ip - offset;
|
||||
|
||||
/* zero offset means we're poking bpf prog entry */
|
||||
if (offset == 0) {
|
||||
/* skip to the nop instruction in bpf prog entry:
|
||||
* move t0, ra
|
||||
* nop
|
||||
*/
|
||||
ip = image + LOONGARCH_INSN_SIZE;
|
||||
}
|
||||
|
||||
is_call = old_t == BPF_MOD_CALL;
|
||||
ret = emit_jump_or_nops(old_addr, ip, old_insns, is_call);
|
||||
if (ret)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user