mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
Compare commits
6 Commits
e7c375b181
...
8b690556d8
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8b690556d8 | ||
|
|
3fa05f96fc | ||
|
|
b82ebaf298 | ||
|
|
5bebe8de19 | ||
|
|
85592114ff | ||
|
|
0f559cd91e |
@ -33,8 +33,8 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
|
|||||||
unsigned long vaddr);
|
unsigned long vaddr);
|
||||||
#define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
|
#define vma_alloc_zeroed_movable_folio vma_alloc_zeroed_movable_folio
|
||||||
|
|
||||||
void tag_clear_highpage(struct page *to);
|
bool tag_clear_highpages(struct page *to, int numpages);
|
||||||
#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
|
#define __HAVE_ARCH_TAG_CLEAR_HIGHPAGES
|
||||||
|
|
||||||
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
#define clear_user_page(page, vaddr, pg) clear_page(page)
|
||||||
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
|
||||||
|
|||||||
@ -624,6 +624,7 @@ nommu:
|
|||||||
kvm_timer_vcpu_load(vcpu);
|
kvm_timer_vcpu_load(vcpu);
|
||||||
kvm_vgic_load(vcpu);
|
kvm_vgic_load(vcpu);
|
||||||
kvm_vcpu_load_debug(vcpu);
|
kvm_vcpu_load_debug(vcpu);
|
||||||
|
kvm_vcpu_load_fgt(vcpu);
|
||||||
if (has_vhe())
|
if (has_vhe())
|
||||||
kvm_vcpu_load_vhe(vcpu);
|
kvm_vcpu_load_vhe(vcpu);
|
||||||
kvm_arch_vcpu_load_fp(vcpu);
|
kvm_arch_vcpu_load_fp(vcpu);
|
||||||
@ -642,7 +643,6 @@ nommu:
|
|||||||
vcpu->arch.hcr_el2 |= HCR_TWI;
|
vcpu->arch.hcr_el2 |= HCR_TWI;
|
||||||
|
|
||||||
vcpu_set_pauth_traps(vcpu);
|
vcpu_set_pauth_traps(vcpu);
|
||||||
kvm_vcpu_load_fgt(vcpu);
|
|
||||||
|
|
||||||
if (is_protected_kvm_enabled()) {
|
if (is_protected_kvm_enabled()) {
|
||||||
kvm_call_hyp_nvhe(__pkvm_vcpu_load,
|
kvm_call_hyp_nvhe(__pkvm_vcpu_load,
|
||||||
|
|||||||
@ -5609,7 +5609,11 @@ int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
guard(mutex)(&kvm->arch.config_lock);
|
guard(mutex)(&kvm->arch.config_lock);
|
||||||
|
|
||||||
if (!irqchip_in_kernel(kvm)) {
|
/*
|
||||||
|
* This hacks into the ID registers, so only perform it when the
|
||||||
|
* first vcpu runs, or the kvm_set_vm_id_reg() helper will scream.
|
||||||
|
*/
|
||||||
|
if (!irqchip_in_kernel(kvm) && !kvm_vm_has_ran_once(kvm)) {
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
|
val = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
|
||||||
|
|||||||
@ -967,20 +967,21 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma,
|
|||||||
return vma_alloc_folio(flags, 0, vma, vaddr);
|
return vma_alloc_folio(flags, 0, vma, vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void tag_clear_highpage(struct page *page)
|
bool tag_clear_highpages(struct page *page, int numpages)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Check if MTE is supported and fall back to clear_highpage().
|
* Check if MTE is supported and fall back to clear_highpage().
|
||||||
* get_huge_zero_folio() unconditionally passes __GFP_ZEROTAGS and
|
* get_huge_zero_folio() unconditionally passes __GFP_ZEROTAGS and
|
||||||
* post_alloc_hook() will invoke tag_clear_highpage().
|
* post_alloc_hook() will invoke tag_clear_highpages().
|
||||||
*/
|
*/
|
||||||
if (!system_supports_mte()) {
|
if (!system_supports_mte())
|
||||||
clear_highpage(page);
|
return false;
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Newly allocated page, shouldn't have been tagged yet */
|
/* Newly allocated pages, shouldn't have been tagged yet */
|
||||||
WARN_ON_ONCE(!try_page_mte_tagging(page));
|
for (int i = 0; i < numpages; i++, page++) {
|
||||||
mte_zero_clear_page_tags(page_address(page));
|
WARN_ON_ONCE(!try_page_mte_tagging(page));
|
||||||
set_page_mte_tagged(page);
|
mte_zero_clear_page_tags(page_address(page));
|
||||||
|
set_page_mte_tagged(page);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -705,7 +705,11 @@ void *svm_alloc_permissions_map(unsigned long size, gfp_t gfp_mask)
|
|||||||
|
|
||||||
static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
|
static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
bool intercept = !(to_svm(vcpu)->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
|
struct vcpu_svm *svm = to_svm(vcpu);
|
||||||
|
bool intercept = !(svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK);
|
||||||
|
|
||||||
|
if (intercept == svm->lbr_msrs_intercepted)
|
||||||
|
return;
|
||||||
|
|
||||||
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept);
|
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHFROMIP, MSR_TYPE_RW, intercept);
|
||||||
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept);
|
svm_set_intercept_for_msr(vcpu, MSR_IA32_LASTBRANCHTOIP, MSR_TYPE_RW, intercept);
|
||||||
@ -714,6 +718,8 @@ static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
|
|||||||
|
|
||||||
if (sev_es_guest(vcpu->kvm))
|
if (sev_es_guest(vcpu->kvm))
|
||||||
svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
|
svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
|
||||||
|
|
||||||
|
svm->lbr_msrs_intercepted = intercept;
|
||||||
}
|
}
|
||||||
|
|
||||||
void svm_vcpu_free_msrpm(void *msrpm)
|
void svm_vcpu_free_msrpm(void *msrpm)
|
||||||
@ -1221,6 +1227,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
svm->x2avic_msrs_intercepted = true;
|
svm->x2avic_msrs_intercepted = true;
|
||||||
|
svm->lbr_msrs_intercepted = true;
|
||||||
|
|
||||||
svm->vmcb01.ptr = page_address(vmcb01_page);
|
svm->vmcb01.ptr = page_address(vmcb01_page);
|
||||||
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
|
svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
|
||||||
|
|||||||
@ -336,6 +336,7 @@ struct vcpu_svm {
|
|||||||
bool guest_state_loaded;
|
bool guest_state_loaded;
|
||||||
|
|
||||||
bool x2avic_msrs_intercepted;
|
bool x2avic_msrs_intercepted;
|
||||||
|
bool lbr_msrs_intercepted;
|
||||||
|
|
||||||
/* Guest GIF value, used when vGIF is not enabled */
|
/* Guest GIF value, used when vGIF is not enabled */
|
||||||
bool guest_gif;
|
bool guest_gif;
|
||||||
|
|||||||
@ -249,10 +249,12 @@ static inline void clear_highpage_kasan_tagged(struct page *page)
|
|||||||
kunmap_local(kaddr);
|
kunmap_local(kaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGE
|
#ifndef __HAVE_ARCH_TAG_CLEAR_HIGHPAGES
|
||||||
|
|
||||||
static inline void tag_clear_highpage(struct page *page)
|
/* Return false to let people know we did not initialize the pages */
|
||||||
|
static inline bool tag_clear_highpages(struct page *page, int numpages)
|
||||||
{
|
{
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@ -1822,14 +1822,9 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
|
|||||||
* If memory tags should be zeroed
|
* If memory tags should be zeroed
|
||||||
* (which happens only when memory should be initialized as well).
|
* (which happens only when memory should be initialized as well).
|
||||||
*/
|
*/
|
||||||
if (zero_tags) {
|
if (zero_tags)
|
||||||
/* Initialize both memory and memory tags. */
|
init = !tag_clear_highpages(page, 1 << order);
|
||||||
for (i = 0; i != 1 << order; ++i)
|
|
||||||
tag_clear_highpage(page + i);
|
|
||||||
|
|
||||||
/* Take note that memory was initialized by the loop above. */
|
|
||||||
init = false;
|
|
||||||
}
|
|
||||||
if (!should_skip_kasan_unpoison(gfp_flags) &&
|
if (!should_skip_kasan_unpoison(gfp_flags) &&
|
||||||
kasan_unpoison_pages(page, order, init)) {
|
kasan_unpoison_pages(page, order, init)) {
|
||||||
/* Take note that memory was initialized by KASAN. */
|
/* Take note that memory was initialized by KASAN. */
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user