mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
Compare commits
25 Commits
1b907d0507
...
b57b17e88b
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b57b17e88b | ||
|
|
4eeee6636a | ||
|
|
5dd2020f33 | ||
|
|
a406b8b424 | ||
|
|
8e8e46a603 | ||
|
|
1d375d6546 | ||
|
|
7b6b13d329 | ||
|
|
2425c9e002 | ||
|
|
9ddd2b8d1a | ||
|
|
4ebf9216e7 | ||
|
|
f48012f161 | ||
|
|
7111afe8fb | ||
|
|
add2802440 | ||
|
|
a2ccf46333 | ||
|
|
affef66b65 | ||
|
|
71945968d8 | ||
|
|
21eb2bfe27 | ||
|
|
80c7889de7 | ||
|
|
166b0110d1 | ||
|
|
644b6025bc | ||
|
|
65083333d3 | ||
|
|
deebe5f607 | ||
|
|
1f92a844c3 | ||
|
|
322948c319 | ||
|
|
a6bdc082ad |
@ -136,6 +136,7 @@ config LOONGARCH
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_PREEMPT_DYNAMIC_KEY
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RETHOOK
|
||||
select HAVE_RSEQ
|
||||
|
||||
@ -68,6 +68,8 @@ LDFLAGS_vmlinux += -static -n -nostdlib
|
||||
ifdef CONFIG_AS_HAS_EXPLICIT_RELOCS
|
||||
cflags-y += $(call cc-option,-mexplicit-relocs)
|
||||
KBUILD_CFLAGS_KERNEL += $(call cc-option,-mdirect-extern-access)
|
||||
KBUILD_AFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
|
||||
KBUILD_CFLAGS_MODULE += $(call cc-option,-fno-direct-access-external-data)
|
||||
KBUILD_AFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
|
||||
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) $(call cc-option,-Wa$(comma)-mno-relax)
|
||||
else
|
||||
|
||||
@ -36,19 +36,19 @@
|
||||
static inline void arch_atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.w" " $zero, %1, %0 \n" \
|
||||
"am"#asm_op".w" " $zero, %1, %0 \n" \
|
||||
: "+ZB" (v->counter) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
}
|
||||
|
||||
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
|
||||
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
|
||||
#define ATOMIC_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
|
||||
static inline int arch_atomic_##op##_return##suffix(int i, atomic_t *v) \
|
||||
{ \
|
||||
int result; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.w" " %1, %2, %0 \n" \
|
||||
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
|
||||
: "+ZB" (v->counter), "=&r" (result) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
@ -56,13 +56,13 @@ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
|
||||
return result c_op I; \
|
||||
}
|
||||
|
||||
#define ATOMIC_FETCH_OP(op, I, asm_op) \
|
||||
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
|
||||
#define ATOMIC_FETCH_OP(op, I, asm_op, mb, suffix) \
|
||||
static inline int arch_atomic_fetch_##op##suffix(int i, atomic_t *v) \
|
||||
{ \
|
||||
int result; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.w" " %1, %2, %0 \n" \
|
||||
"am"#asm_op#mb".w" " %1, %2, %0 \n" \
|
||||
: "+ZB" (v->counter), "=&r" (result) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
@ -72,29 +72,53 @@ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
|
||||
|
||||
#define ATOMIC_OPS(op, I, asm_op, c_op) \
|
||||
ATOMIC_OP(op, I, asm_op) \
|
||||
ATOMIC_OP_RETURN(op, I, asm_op, c_op) \
|
||||
ATOMIC_FETCH_OP(op, I, asm_op)
|
||||
ATOMIC_OP_RETURN(op, I, asm_op, c_op, _db, ) \
|
||||
ATOMIC_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
|
||||
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
|
||||
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
|
||||
|
||||
ATOMIC_OPS(add, i, add, +)
|
||||
ATOMIC_OPS(sub, -i, add, +)
|
||||
|
||||
#define arch_atomic_add_return arch_atomic_add_return
|
||||
#define arch_atomic_add_return_acquire arch_atomic_add_return
|
||||
#define arch_atomic_add_return_release arch_atomic_add_return
|
||||
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
|
||||
#define arch_atomic_sub_return arch_atomic_sub_return
|
||||
#define arch_atomic_sub_return_acquire arch_atomic_sub_return
|
||||
#define arch_atomic_sub_return_release arch_atomic_sub_return
|
||||
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
|
||||
#define arch_atomic_fetch_add arch_atomic_fetch_add
|
||||
#define arch_atomic_fetch_add_acquire arch_atomic_fetch_add
|
||||
#define arch_atomic_fetch_add_release arch_atomic_fetch_add
|
||||
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
|
||||
#define arch_atomic_fetch_sub arch_atomic_fetch_sub
|
||||
#define arch_atomic_fetch_sub_acquire arch_atomic_fetch_sub
|
||||
#define arch_atomic_fetch_sub_release arch_atomic_fetch_sub
|
||||
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
|
||||
#define ATOMIC_OPS(op, I, asm_op) \
|
||||
ATOMIC_OP(op, I, asm_op) \
|
||||
ATOMIC_FETCH_OP(op, I, asm_op)
|
||||
ATOMIC_FETCH_OP(op, I, asm_op, _db, ) \
|
||||
ATOMIC_FETCH_OP(op, I, asm_op, , _relaxed)
|
||||
|
||||
ATOMIC_OPS(and, i, and)
|
||||
ATOMIC_OPS(or, i, or)
|
||||
ATOMIC_OPS(xor, i, xor)
|
||||
|
||||
#define arch_atomic_fetch_and arch_atomic_fetch_and
|
||||
#define arch_atomic_fetch_and_acquire arch_atomic_fetch_and
|
||||
#define arch_atomic_fetch_and_release arch_atomic_fetch_and
|
||||
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
|
||||
#define arch_atomic_fetch_or arch_atomic_fetch_or
|
||||
#define arch_atomic_fetch_or_acquire arch_atomic_fetch_or
|
||||
#define arch_atomic_fetch_or_release arch_atomic_fetch_or
|
||||
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
|
||||
#define arch_atomic_fetch_xor arch_atomic_fetch_xor
|
||||
#define arch_atomic_fetch_xor_acquire arch_atomic_fetch_xor
|
||||
#define arch_atomic_fetch_xor_release arch_atomic_fetch_xor
|
||||
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
@ -172,18 +196,18 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
|
||||
static inline void arch_atomic64_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.d " " $zero, %1, %0 \n" \
|
||||
"am"#asm_op".d " " $zero, %1, %0 \n" \
|
||||
: "+ZB" (v->counter) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
|
||||
static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
|
||||
#define ATOMIC64_OP_RETURN(op, I, asm_op, c_op, mb, suffix) \
|
||||
static inline long arch_atomic64_##op##_return##suffix(long i, atomic64_t *v) \
|
||||
{ \
|
||||
long result; \
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.d " " %1, %2, %0 \n" \
|
||||
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
|
||||
: "+ZB" (v->counter), "=&r" (result) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
@ -191,13 +215,13 @@ static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v) \
|
||||
return result c_op I; \
|
||||
}
|
||||
|
||||
#define ATOMIC64_FETCH_OP(op, I, asm_op) \
|
||||
static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
|
||||
#define ATOMIC64_FETCH_OP(op, I, asm_op, mb, suffix) \
|
||||
static inline long arch_atomic64_fetch_##op##suffix(long i, atomic64_t *v) \
|
||||
{ \
|
||||
long result; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
"am"#asm_op"_db.d " " %1, %2, %0 \n" \
|
||||
"am"#asm_op#mb".d " " %1, %2, %0 \n" \
|
||||
: "+ZB" (v->counter), "=&r" (result) \
|
||||
: "r" (I) \
|
||||
: "memory"); \
|
||||
@ -207,29 +231,53 @@ static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \
|
||||
|
||||
#define ATOMIC64_OPS(op, I, asm_op, c_op) \
|
||||
ATOMIC64_OP(op, I, asm_op) \
|
||||
ATOMIC64_OP_RETURN(op, I, asm_op, c_op) \
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op)
|
||||
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, _db, ) \
|
||||
ATOMIC64_OP_RETURN(op, I, asm_op, c_op, , _relaxed) \
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
|
||||
|
||||
ATOMIC64_OPS(add, i, add, +)
|
||||
ATOMIC64_OPS(sub, -i, add, +)
|
||||
|
||||
#define arch_atomic64_add_return arch_atomic64_add_return
|
||||
#define arch_atomic64_add_return_acquire arch_atomic64_add_return
|
||||
#define arch_atomic64_add_return_release arch_atomic64_add_return
|
||||
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
|
||||
#define arch_atomic64_sub_return arch_atomic64_sub_return
|
||||
#define arch_atomic64_sub_return_acquire arch_atomic64_sub_return
|
||||
#define arch_atomic64_sub_return_release arch_atomic64_sub_return
|
||||
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
|
||||
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
|
||||
#define arch_atomic64_fetch_add_acquire arch_atomic64_fetch_add
|
||||
#define arch_atomic64_fetch_add_release arch_atomic64_fetch_add
|
||||
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
|
||||
#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
|
||||
#define arch_atomic64_fetch_sub_acquire arch_atomic64_fetch_sub
|
||||
#define arch_atomic64_fetch_sub_release arch_atomic64_fetch_sub
|
||||
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
|
||||
#define ATOMIC64_OPS(op, I, asm_op) \
|
||||
ATOMIC64_OP(op, I, asm_op) \
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op)
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op, _db, ) \
|
||||
ATOMIC64_FETCH_OP(op, I, asm_op, , _relaxed)
|
||||
|
||||
ATOMIC64_OPS(and, i, and)
|
||||
ATOMIC64_OPS(or, i, or)
|
||||
ATOMIC64_OPS(xor, i, xor)
|
||||
|
||||
#define arch_atomic64_fetch_and arch_atomic64_fetch_and
|
||||
#define arch_atomic64_fetch_and_acquire arch_atomic64_fetch_and
|
||||
#define arch_atomic64_fetch_and_release arch_atomic64_fetch_and
|
||||
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
|
||||
#define arch_atomic64_fetch_or arch_atomic64_fetch_or
|
||||
#define arch_atomic64_fetch_or_acquire arch_atomic64_fetch_or
|
||||
#define arch_atomic64_fetch_or_release arch_atomic64_fetch_or
|
||||
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
|
||||
#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
|
||||
#define arch_atomic64_fetch_xor_acquire arch_atomic64_fetch_xor
|
||||
#define arch_atomic64_fetch_xor_release arch_atomic64_fetch_xor
|
||||
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
|
||||
@ -65,6 +65,8 @@ enum reg2_op {
|
||||
revbd_op = 0x0f,
|
||||
revh2w_op = 0x10,
|
||||
revhd_op = 0x11,
|
||||
extwh_op = 0x16,
|
||||
extwb_op = 0x17,
|
||||
iocsrrdb_op = 0x19200,
|
||||
iocsrrdh_op = 0x19201,
|
||||
iocsrrdw_op = 0x19202,
|
||||
@ -572,6 +574,8 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
|
||||
DEF_EMIT_REG2_FORMAT(revb2h, revb2h_op)
|
||||
DEF_EMIT_REG2_FORMAT(revb2w, revb2w_op)
|
||||
DEF_EMIT_REG2_FORMAT(revbd, revbd_op)
|
||||
DEF_EMIT_REG2_FORMAT(extwh, extwh_op)
|
||||
DEF_EMIT_REG2_FORMAT(extwb, extwb_op)
|
||||
|
||||
#define DEF_EMIT_REG2I5_FORMAT(NAME, OP) \
|
||||
static inline void emit_##NAME(union loongarch_instruction *insn, \
|
||||
@ -623,6 +627,9 @@ DEF_EMIT_REG2I12_FORMAT(lu52id, lu52id_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(andi, andi_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ori, ori_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(xori, xori_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldb, ldb_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldh, ldh_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldw, ldw_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldbu, ldbu_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldhu, ldhu_op)
|
||||
DEF_EMIT_REG2I12_FORMAT(ldwu, ldwu_op)
|
||||
@ -701,9 +708,12 @@ static inline void emit_##NAME(union loongarch_instruction *insn, \
|
||||
insn->reg3_format.rk = rk; \
|
||||
}
|
||||
|
||||
DEF_EMIT_REG3_FORMAT(addw, addw_op)
|
||||
DEF_EMIT_REG3_FORMAT(addd, addd_op)
|
||||
DEF_EMIT_REG3_FORMAT(subd, subd_op)
|
||||
DEF_EMIT_REG3_FORMAT(muld, muld_op)
|
||||
DEF_EMIT_REG3_FORMAT(divd, divd_op)
|
||||
DEF_EMIT_REG3_FORMAT(modd, modd_op)
|
||||
DEF_EMIT_REG3_FORMAT(divdu, divdu_op)
|
||||
DEF_EMIT_REG3_FORMAT(moddu, moddu_op)
|
||||
DEF_EMIT_REG3_FORMAT(and, and_op)
|
||||
@ -715,6 +725,9 @@ DEF_EMIT_REG3_FORMAT(srlw, srlw_op)
|
||||
DEF_EMIT_REG3_FORMAT(srld, srld_op)
|
||||
DEF_EMIT_REG3_FORMAT(sraw, sraw_op)
|
||||
DEF_EMIT_REG3_FORMAT(srad, srad_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxb, ldxb_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxh, ldxh_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxw, ldxw_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxbu, ldxbu_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxhu, ldxhu_op)
|
||||
DEF_EMIT_REG3_FORMAT(ldxwu, ldxwu_op)
|
||||
|
||||
@ -32,7 +32,7 @@ static inline void set_my_cpu_offset(unsigned long off)
|
||||
#define __my_cpu_offset __my_cpu_offset
|
||||
|
||||
#define PERCPU_OP(op, asm_op, c_op) \
|
||||
static inline unsigned long __percpu_##op(void *ptr, \
|
||||
static __always_inline unsigned long __percpu_##op(void *ptr, \
|
||||
unsigned long val, int size) \
|
||||
{ \
|
||||
unsigned long ret; \
|
||||
@ -63,7 +63,7 @@ PERCPU_OP(and, and, &)
|
||||
PERCPU_OP(or, or, |)
|
||||
#undef PERCPU_OP
|
||||
|
||||
static inline unsigned long __percpu_read(void *ptr, int size)
|
||||
static __always_inline unsigned long __percpu_read(void *ptr, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
@ -100,7 +100,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __percpu_write(void *ptr, unsigned long val, int size)
|
||||
static __always_inline void __percpu_write(void *ptr, unsigned long val, int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
@ -132,8 +132,8 @@ static inline void __percpu_write(void *ptr, unsigned long val, int size)
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
int size)
|
||||
static __always_inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
|
||||
int size)
|
||||
{
|
||||
switch (size) {
|
||||
case 1:
|
||||
|
||||
@ -504,8 +504,9 @@ asmlinkage void start_secondary(void)
|
||||
unsigned int cpu;
|
||||
|
||||
sync_counter();
|
||||
cpu = smp_processor_id();
|
||||
cpu = raw_smp_processor_id();
|
||||
set_my_cpu_offset(per_cpu_offset(cpu));
|
||||
rcutree_report_cpu_starting(cpu);
|
||||
|
||||
cpu_probe();
|
||||
constant_clockevent_init();
|
||||
|
||||
@ -411,7 +411,11 @@ static int add_exception_handler(const struct bpf_insn *insn,
|
||||
off_t offset;
|
||||
struct exception_table_entry *ex;
|
||||
|
||||
if (!ctx->image || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM)
|
||||
if (!ctx->image || !ctx->prog->aux->extable)
|
||||
return 0;
|
||||
|
||||
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
|
||||
BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
|
||||
return 0;
|
||||
|
||||
if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
|
||||
@ -450,7 +454,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
{
|
||||
u8 tm = -1;
|
||||
u64 func_addr;
|
||||
bool func_addr_fixed;
|
||||
bool func_addr_fixed, sign_extend;
|
||||
int i = insn - ctx->prog->insnsi;
|
||||
int ret, jmp_offset;
|
||||
const u8 code = insn->code;
|
||||
@ -468,8 +472,23 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
/* dst = src */
|
||||
case BPF_ALU | BPF_MOV | BPF_X:
|
||||
case BPF_ALU64 | BPF_MOV | BPF_X:
|
||||
move_reg(ctx, dst, src);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
switch (off) {
|
||||
case 0:
|
||||
move_reg(ctx, dst, src);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
break;
|
||||
case 8:
|
||||
move_reg(ctx, t1, src);
|
||||
emit_insn(ctx, extwb, dst, t1);
|
||||
break;
|
||||
case 16:
|
||||
move_reg(ctx, t1, src);
|
||||
emit_insn(ctx, extwh, dst, t1);
|
||||
break;
|
||||
case 32:
|
||||
emit_insn(ctx, addw, dst, src, LOONGARCH_GPR_ZERO);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
/* dst = imm */
|
||||
@ -534,39 +553,71 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
/* dst = dst / src */
|
||||
case BPF_ALU | BPF_DIV | BPF_X:
|
||||
case BPF_ALU64 | BPF_DIV | BPF_X:
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_zext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, divdu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
if (!off) {
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_zext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, divdu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
} else {
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_sext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, divd, dst, dst, t1);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
}
|
||||
break;
|
||||
|
||||
/* dst = dst / imm */
|
||||
case BPF_ALU | BPF_DIV | BPF_K:
|
||||
case BPF_ALU64 | BPF_DIV | BPF_K:
|
||||
move_imm(ctx, t1, imm, is32);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, divdu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
if (!off) {
|
||||
move_imm(ctx, t1, imm, is32);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, divdu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
} else {
|
||||
move_imm(ctx, t1, imm, false);
|
||||
emit_sext_32(ctx, t1, is32);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, divd, dst, dst, t1);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
}
|
||||
break;
|
||||
|
||||
/* dst = dst % src */
|
||||
case BPF_ALU | BPF_MOD | BPF_X:
|
||||
case BPF_ALU64 | BPF_MOD | BPF_X:
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_zext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, moddu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
if (!off) {
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_zext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, moddu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
} else {
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
move_reg(ctx, t1, src);
|
||||
emit_sext_32(ctx, t1, is32);
|
||||
emit_insn(ctx, modd, dst, dst, t1);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
}
|
||||
break;
|
||||
|
||||
/* dst = dst % imm */
|
||||
case BPF_ALU | BPF_MOD | BPF_K:
|
||||
case BPF_ALU64 | BPF_MOD | BPF_K:
|
||||
move_imm(ctx, t1, imm, is32);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, moddu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
if (!off) {
|
||||
move_imm(ctx, t1, imm, is32);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, moddu, dst, dst, t1);
|
||||
emit_zext_32(ctx, dst, is32);
|
||||
} else {
|
||||
move_imm(ctx, t1, imm, false);
|
||||
emit_sext_32(ctx, t1, is32);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
emit_insn(ctx, modd, dst, dst, t1);
|
||||
emit_sext_32(ctx, dst, is32);
|
||||
}
|
||||
break;
|
||||
|
||||
/* dst = -dst */
|
||||
@ -712,6 +763,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
break;
|
||||
|
||||
case BPF_ALU | BPF_END | BPF_FROM_BE:
|
||||
case BPF_ALU64 | BPF_END | BPF_FROM_LE:
|
||||
switch (imm) {
|
||||
case 16:
|
||||
emit_insn(ctx, revb2h, dst, dst);
|
||||
@ -828,7 +880,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
|
||||
/* PC += off */
|
||||
case BPF_JMP | BPF_JA:
|
||||
jmp_offset = bpf2la_offset(i, off, ctx);
|
||||
case BPF_JMP32 | BPF_JA:
|
||||
if (BPF_CLASS(code) == BPF_JMP)
|
||||
jmp_offset = bpf2la_offset(i, off, ctx);
|
||||
else
|
||||
jmp_offset = bpf2la_offset(i, imm, ctx);
|
||||
if (emit_uncond_jmp(ctx, jmp_offset) < 0)
|
||||
goto toofar;
|
||||
break;
|
||||
@ -879,31 +935,56 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
|
||||
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
|
||||
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
|
||||
/* dst_reg = (s64)*(signed size *)(src_reg + off) */
|
||||
case BPF_LDX | BPF_MEMSX | BPF_B:
|
||||
case BPF_LDX | BPF_MEMSX | BPF_H:
|
||||
case BPF_LDX | BPF_MEMSX | BPF_W:
|
||||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
|
||||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
|
||||
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
|
||||
sign_extend = BPF_MODE(insn->code) == BPF_MEMSX ||
|
||||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
|
||||
switch (BPF_SIZE(code)) {
|
||||
case BPF_B:
|
||||
if (is_signed_imm12(off)) {
|
||||
emit_insn(ctx, ldbu, dst, src, off);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldb, dst, src, off);
|
||||
else
|
||||
emit_insn(ctx, ldbu, dst, src, off);
|
||||
} else {
|
||||
move_imm(ctx, t1, off, is32);
|
||||
emit_insn(ctx, ldxbu, dst, src, t1);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldxb, dst, src, t1);
|
||||
else
|
||||
emit_insn(ctx, ldxbu, dst, src, t1);
|
||||
}
|
||||
break;
|
||||
case BPF_H:
|
||||
if (is_signed_imm12(off)) {
|
||||
emit_insn(ctx, ldhu, dst, src, off);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldh, dst, src, off);
|
||||
else
|
||||
emit_insn(ctx, ldhu, dst, src, off);
|
||||
} else {
|
||||
move_imm(ctx, t1, off, is32);
|
||||
emit_insn(ctx, ldxhu, dst, src, t1);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldxh, dst, src, t1);
|
||||
else
|
||||
emit_insn(ctx, ldxhu, dst, src, t1);
|
||||
}
|
||||
break;
|
||||
case BPF_W:
|
||||
if (is_signed_imm12(off)) {
|
||||
emit_insn(ctx, ldwu, dst, src, off);
|
||||
} else if (is_signed_imm14(off)) {
|
||||
emit_insn(ctx, ldptrw, dst, src, off);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldw, dst, src, off);
|
||||
else
|
||||
emit_insn(ctx, ldwu, dst, src, off);
|
||||
} else {
|
||||
move_imm(ctx, t1, off, is32);
|
||||
emit_insn(ctx, ldxwu, dst, src, t1);
|
||||
if (sign_extend)
|
||||
emit_insn(ctx, ldxw, dst, src, t1);
|
||||
else
|
||||
emit_insn(ctx, ldxwu, dst, src, t1);
|
||||
}
|
||||
break;
|
||||
case BPF_DW:
|
||||
|
||||
@ -475,13 +475,13 @@
|
||||
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
|
||||
#define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
|
||||
#define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
|
||||
#define PFN_START_BIT (63-ASM_PFN_PTE_SHIFT+(63-58)-PAGE_ADD_SHIFT)
|
||||
|
||||
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
|
||||
.macro convert_for_tlb_insert20 pte,tmp
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
copy \pte,\tmp
|
||||
extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
extrd,u \tmp,PFN_START_BIT,PFN_START_BIT+1,\pte
|
||||
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
@ -489,8 +489,7 @@
|
||||
depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_HUGE_SHIFT,\pte
|
||||
#else /* Huge pages disabled */
|
||||
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
|
||||
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
|
||||
extrd,u \pte,PFN_START_BIT,PFN_START_BIT+1,\pte
|
||||
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
|
||||
(63-58)+PAGE_ADD_SHIFT,\pte
|
||||
#endif
|
||||
|
||||
@ -70,9 +70,8 @@ $bss_loop:
|
||||
stw,ma %arg2,4(%r1)
|
||||
stw,ma %arg3,4(%r1)
|
||||
|
||||
#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
|
||||
/* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
|
||||
* and halt kernel if we detect a PA1.x CPU. */
|
||||
#if defined(CONFIG_PA20)
|
||||
/* check for 64-bit capable CPU as required by current kernel */
|
||||
ldi 32,%r10
|
||||
mtctl %r10,%cr11
|
||||
.level 2.0
|
||||
|
||||
@ -8,12 +8,7 @@ static inline pgprot_t pgprot_framebuffer(pgprot_t prot,
|
||||
unsigned long vm_start, unsigned long vm_end,
|
||||
unsigned long offset)
|
||||
{
|
||||
/*
|
||||
* PowerPC's implementation of phys_mem_access_prot() does
|
||||
* not use the file argument. Set it to NULL in preparation
|
||||
* of later updates to the interface.
|
||||
*/
|
||||
return phys_mem_access_prot(NULL, PHYS_PFN(offset), vm_end - vm_start, prot);
|
||||
return __phys_mem_access_prot(PHYS_PFN(offset), vm_end - vm_start, prot);
|
||||
}
|
||||
#define pgprot_framebuffer pgprot_framebuffer
|
||||
|
||||
|
||||
@ -10,7 +10,7 @@
|
||||
#include <linux/export.h>
|
||||
|
||||
struct pt_regs;
|
||||
struct pci_bus;
|
||||
struct pci_bus;
|
||||
struct device_node;
|
||||
struct iommu_table;
|
||||
struct rtc_time;
|
||||
@ -78,8 +78,8 @@ struct machdep_calls {
|
||||
unsigned char (*nvram_read_val)(int addr);
|
||||
void (*nvram_write_val)(int addr, unsigned char val);
|
||||
ssize_t (*nvram_write)(char *buf, size_t count, loff_t *index);
|
||||
ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
|
||||
ssize_t (*nvram_size)(void);
|
||||
ssize_t (*nvram_read)(char *buf, size_t count, loff_t *index);
|
||||
ssize_t (*nvram_size)(void);
|
||||
void (*nvram_sync)(void);
|
||||
|
||||
/* Exception handlers */
|
||||
@ -102,12 +102,11 @@ struct machdep_calls {
|
||||
*/
|
||||
long (*feature_call)(unsigned int feature, ...);
|
||||
|
||||
/* Get legacy PCI/IDE interrupt mapping */
|
||||
/* Get legacy PCI/IDE interrupt mapping */
|
||||
int (*pci_get_legacy_ide_irq)(struct pci_dev *dev, int channel);
|
||||
|
||||
|
||||
/* Get access protection for /dev/mem */
|
||||
pgprot_t (*phys_mem_access_prot)(struct file *file,
|
||||
unsigned long pfn,
|
||||
pgprot_t (*phys_mem_access_prot)(unsigned long pfn,
|
||||
unsigned long size,
|
||||
pgprot_t vma_prot);
|
||||
|
||||
|
||||
@ -105,9 +105,7 @@ extern void of_scan_pci_bridge(struct pci_dev *dev);
|
||||
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
|
||||
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
|
||||
|
||||
struct file;
|
||||
extern pgprot_t pci_phys_mem_access_prot(struct file *file,
|
||||
unsigned long pfn,
|
||||
extern pgprot_t pci_phys_mem_access_prot(unsigned long pfn,
|
||||
unsigned long size,
|
||||
pgprot_t prot);
|
||||
|
||||
|
||||
@ -120,9 +120,15 @@ static inline void mark_initmem_nx(void) { }
|
||||
int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *ptep, pte_t entry, int dirty);
|
||||
|
||||
pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
|
||||
pgprot_t vma_prot);
|
||||
|
||||
struct file;
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot);
|
||||
static inline pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
{
|
||||
return __phys_mem_access_prot(pfn, size, vma_prot);
|
||||
}
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
|
||||
void __update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
|
||||
|
||||
@ -521,8 +521,7 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma)
|
||||
* PCI device, it tries to find the PCI device first and calls the
|
||||
* above routine
|
||||
*/
|
||||
pgprot_t pci_phys_mem_access_prot(struct file *file,
|
||||
unsigned long pfn,
|
||||
pgprot_t pci_phys_mem_access_prot(unsigned long pfn,
|
||||
unsigned long size,
|
||||
pgprot_t prot)
|
||||
{
|
||||
|
||||
@ -752,6 +752,8 @@ static int ppc_rtas_tone_volume_show(struct seq_file *m, void *v)
|
||||
|
||||
/**
|
||||
* ppc_rtas_rmo_buf_show() - Describe RTAS-addressable region for user space.
|
||||
* @m: seq_file output target.
|
||||
* @v: Unused.
|
||||
*
|
||||
* Base + size description of a range of RTAS-addressable memory set
|
||||
* aside for user space to use as work area(s) for certain RTAS
|
||||
|
||||
@ -35,18 +35,18 @@ unsigned long long memory_limit;
|
||||
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
|
||||
pgprot_t vma_prot)
|
||||
{
|
||||
if (ppc_md.phys_mem_access_prot)
|
||||
return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
|
||||
return ppc_md.phys_mem_access_prot(pfn, size, vma_prot);
|
||||
|
||||
if (!page_is_ram(pfn))
|
||||
vma_prot = pgprot_noncached(vma_prot);
|
||||
|
||||
return vma_prot;
|
||||
}
|
||||
EXPORT_SYMBOL(phys_mem_access_prot);
|
||||
EXPORT_SYMBOL(__phys_mem_access_prot);
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
static DEFINE_MUTEX(linear_mapping_mutex);
|
||||
|
||||
@ -184,6 +184,7 @@ machine_arch_initcall(pseries, rtas_work_area_allocator_init);
|
||||
|
||||
/**
|
||||
* rtas_work_area_reserve_arena() - Reserve memory suitable for RTAS work areas.
|
||||
* @limit: Upper limit for memblock allocation.
|
||||
*/
|
||||
void __init rtas_work_area_reserve_arena(const phys_addr_t limit)
|
||||
{
|
||||
|
||||
@ -392,7 +392,7 @@ static struct parisc_driver parport_driver __refdata = {
|
||||
.remove = __exit_p(parport_remove_chip),
|
||||
};
|
||||
|
||||
int parport_gsc_init(void)
|
||||
static int parport_gsc_init(void)
|
||||
{
|
||||
return register_parisc_driver(&parport_driver);
|
||||
}
|
||||
|
||||
@ -7,7 +7,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_s390)) && __clang_major__ >= 18
|
||||
defined(__TARGET_ARCH_s390) || defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
const volatile int skip = 0;
|
||||
#else
|
||||
const volatile int skip = 1;
|
||||
|
||||
@ -6,7 +6,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
|
||||
defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
|
||||
@ -6,7 +6,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
|
||||
defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
|
||||
@ -6,7 +6,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
|
||||
defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
|
||||
@ -6,7 +6,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
|
||||
defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
|
||||
@ -6,7 +6,8 @@
|
||||
|
||||
#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
|
||||
(defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
|
||||
defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
|
||||
defined(__TARGET_ARCH_loongarch)) && \
|
||||
__clang_major__ >= 18
|
||||
|
||||
SEC("socket")
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user