mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-12 01:20:14 +00:00
RISC-V: KVM: Convert kvm_riscv_vcpu_sbi_forward() into extension handler
All uses of kvm_riscv_vcpu_sbi_forward() also updates retdata->uexit so to further reduce code duplication move retdata->uexit assignment to kvm_riscv_vcpu_sbi_forward() and convert it into SBI extension handler. Signed-off-by: Anup Patel <apatel@ventanamicro.com> Reviewed-by: Andrew Jones <ajones@ventanamicro.com> Link: https://lore.kernel.org/r/20251017155925.361560-2-apatel@ventanamicro.com Signed-off-by: Anup Patel <anup@brainfault.org>
This commit is contained in:
parent
ac3fd01e4c
commit
e2f3e2d37b
@ -69,7 +69,9 @@ struct kvm_vcpu_sbi_extension {
|
||||
unsigned long reg_size, const void *reg_val);
|
||||
};
|
||||
|
||||
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_riscv_vcpu_sbi_forward_handler(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu_sbi_return *retdata);
|
||||
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run,
|
||||
u32 type, u64 flags);
|
||||
|
||||
@ -120,7 +120,9 @@ static bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
|
||||
return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
|
||||
}
|
||||
|
||||
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
int kvm_riscv_vcpu_sbi_forward_handler(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu_sbi_return *retdata)
|
||||
{
|
||||
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
||||
|
||||
@ -137,6 +139,8 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
run->riscv_sbi.args[5] = cp->a5;
|
||||
run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
|
||||
run->riscv_sbi.ret[1] = 0;
|
||||
retdata->uexit = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
|
||||
|
||||
@ -41,8 +41,7 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
* For experimental/vendor extensions
|
||||
* forward it to the userspace
|
||||
*/
|
||||
kvm_riscv_vcpu_sbi_forward(vcpu, run);
|
||||
retdata->uexit = true;
|
||||
return kvm_riscv_vcpu_sbi_forward_handler(vcpu, run, retdata);
|
||||
} else {
|
||||
sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a0);
|
||||
*out_val = sbi_ext && sbi_ext->probe ?
|
||||
@ -72,27 +71,14 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base = {
|
||||
.handler = kvm_sbi_ext_base_handler,
|
||||
};
|
||||
|
||||
static int kvm_sbi_ext_forward_handler(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu_sbi_return *retdata)
|
||||
{
|
||||
/*
|
||||
* Both SBI experimental and vendor extensions are
|
||||
* unconditionally forwarded to userspace.
|
||||
*/
|
||||
kvm_riscv_vcpu_sbi_forward(vcpu, run);
|
||||
retdata->uexit = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental = {
|
||||
.extid_start = SBI_EXT_EXPERIMENTAL_START,
|
||||
.extid_end = SBI_EXT_EXPERIMENTAL_END,
|
||||
.handler = kvm_sbi_ext_forward_handler,
|
||||
.handler = kvm_riscv_vcpu_sbi_forward_handler,
|
||||
};
|
||||
|
||||
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor = {
|
||||
.extid_start = SBI_EXT_VENDOR_START,
|
||||
.extid_end = SBI_EXT_VENDOR_END,
|
||||
.handler = kvm_sbi_ext_forward_handler,
|
||||
.handler = kvm_riscv_vcpu_sbi_forward_handler,
|
||||
};
|
||||
|
||||
@ -186,34 +186,9 @@ const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst = {
|
||||
.handler = kvm_sbi_ext_srst_handler,
|
||||
};
|
||||
|
||||
static int kvm_sbi_ext_dbcn_handler(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu_sbi_return *retdata)
|
||||
{
|
||||
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
|
||||
unsigned long funcid = cp->a6;
|
||||
|
||||
switch (funcid) {
|
||||
case SBI_EXT_DBCN_CONSOLE_WRITE:
|
||||
case SBI_EXT_DBCN_CONSOLE_READ:
|
||||
case SBI_EXT_DBCN_CONSOLE_WRITE_BYTE:
|
||||
/*
|
||||
* The SBI debug console functions are unconditionally
|
||||
* forwarded to the userspace.
|
||||
*/
|
||||
kvm_riscv_vcpu_sbi_forward(vcpu, run);
|
||||
retdata->uexit = true;
|
||||
break;
|
||||
default:
|
||||
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn = {
|
||||
.extid_start = SBI_EXT_DBCN,
|
||||
.extid_end = SBI_EXT_DBCN,
|
||||
.default_disabled = true,
|
||||
.handler = kvm_sbi_ext_dbcn_handler,
|
||||
.handler = kvm_riscv_vcpu_sbi_forward_handler,
|
||||
};
|
||||
|
||||
@ -47,9 +47,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
kvm_riscv_vcpu_sbi_request_reset(vcpu, cp->a1, cp->a2);
|
||||
|
||||
/* userspace provides the suspend implementation */
|
||||
kvm_riscv_vcpu_sbi_forward(vcpu, run);
|
||||
retdata->uexit = true;
|
||||
break;
|
||||
return kvm_riscv_vcpu_sbi_forward_handler(vcpu, run, retdata);
|
||||
default:
|
||||
retdata->err_val = SBI_ERR_NOT_SUPPORTED;
|
||||
break;
|
||||
|
||||
@ -32,8 +32,7 @@ static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
* The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be
|
||||
* handled in kernel so we forward these to user-space
|
||||
*/
|
||||
kvm_riscv_vcpu_sbi_forward(vcpu, run);
|
||||
retdata->uexit = true;
|
||||
ret = kvm_riscv_vcpu_sbi_forward_handler(vcpu, run, retdata);
|
||||
break;
|
||||
case SBI_EXT_0_1_SET_TIMER:
|
||||
#if __riscv_xlen == 32
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user