mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
Introduce softleaf_from_pmd() to do the equivalent operation for PMDs that softleaf_from_pte() fulfils, and cascade changes through code base accordingly, introducing helpers as necessary. We are then able to eliminate pmd_to_swp_entry(), is_pmd_migration_entry(), is_pmd_device_private_entry() and is_pmd_non_present_folio_entry(). This further establishes the use of leaf operations throughout the code base and further establishes the foundations for eliminating is_swap_pmd(). No functional change intended. [lorenzo.stoakes@oracle.com: check writable, not readable/writable, per Vlastimil] Link: https://lkml.kernel.org/r/cd97b6ec-00f9-45a4-9ae0-8f009c212a94@lucifer.local Link: https://lkml.kernel.org/r/3fb431699639ded8fdc63d2210aa77a38c8891f1.1762812360.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Reviewed-by: SeongJae Park <sj@kernel.org>\ Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Baoquan He <bhe@redhat.com> Cc: Barry Song <baohua@kernel.org> Cc: Byungchul Park <byungchul@sk.com> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Chris Li <chrisl@kernel.org> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Claudio Imbrenda <imbrenda@linux.ibm.com> Cc: David Hildenbrand <david@redhat.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Gregory Price <gourry@gourry.net> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: "Huang, Ying" <ying.huang@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: Jann Horn <jannh@google.com> Cc: Janosch Frank <frankja@linux.ibm.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Joshua Hahn <joshua.hahnjy@gmail.com> Cc: Kairui Song <kasong@tencent.com> Cc: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Lance Yang <lance.yang@linux.dev> Cc: Leon Romanovsky <leon@kernel.org> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mathew Brost <matthew.brost@intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Nico Pache <npache@redhat.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Pasha Tatashin <pasha.tatashin@soleen.com> Cc: Peter Xu <peterx@redhat.com> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Rik van Riel <riel@surriel.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Wei Xu <weixugc@google.com> Cc: xu xin <xu.xin16@zte.com.cn> Cc: Yuanchu Xie <yuanchu@google.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
496 lines
13 KiB
C
496 lines
13 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_SWAPOPS_H
|
|
#define _LINUX_SWAPOPS_H
|
|
|
|
#include <linux/radix-tree.h>
|
|
#include <linux/bug.h>
|
|
#include <linux/mm_types.h>
|
|
|
|
#ifdef CONFIG_MMU
|
|
|
|
#ifdef CONFIG_SWAP
|
|
#include <linux/swapfile.h>
|
|
#endif /* CONFIG_SWAP */
|
|
|
|
/*
|
|
* swapcache pages are stored in the swapper_space radix tree. We want to
|
|
* get good packing density in that tree, so the index should be dense in
|
|
* the low-order bits.
|
|
*
|
|
* We arrange the `type' and `offset' fields so that `type' is at the six
|
|
* high-order bits of the swp_entry_t and `offset' is right-aligned in the
|
|
* remaining bits. Although `type' itself needs only five bits, we allow for
|
|
* shmem/tmpfs to shift it all up a further one bit: see swp_to_radix_entry().
|
|
*
|
|
* swp_entry_t's are *never* stored anywhere in their arch-dependent format.
|
|
*/
|
|
#define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
|
|
#define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
|
|
|
|
/*
|
|
* Definitions only for PFN swap entries (see is_pfn_swap_entry()). To
|
|
* store PFN, we only need SWP_PFN_BITS bits. Each of the pfn swap entries
|
|
* can use the extra bits to store other information besides PFN.
|
|
*/
|
|
#ifdef MAX_PHYSMEM_BITS
|
|
#define SWP_PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT)
|
|
#else /* MAX_PHYSMEM_BITS */
|
|
#define SWP_PFN_BITS min_t(int, \
|
|
sizeof(phys_addr_t) * 8 - PAGE_SHIFT, \
|
|
SWP_TYPE_SHIFT)
|
|
#endif /* MAX_PHYSMEM_BITS */
|
|
#define SWP_PFN_MASK (BIT(SWP_PFN_BITS) - 1)
|
|
|
|
/**
|
|
* Migration swap entry specific bitfield definitions. Layout:
|
|
*
|
|
* |----------+--------------------|
|
|
* | swp_type | swp_offset |
|
|
* |----------+--------+-+-+-------|
|
|
* | | resv |D|A| PFN |
|
|
* |----------+--------+-+-+-------|
|
|
*
|
|
* @SWP_MIG_YOUNG_BIT: Whether the page used to have young bit set (bit A)
|
|
* @SWP_MIG_DIRTY_BIT: Whether the page used to have dirty bit set (bit D)
|
|
*
|
|
* Note: A/D bits will be stored in migration entries iff there're enough
|
|
* free bits in arch specific swp offset. By default we'll ignore A/D bits
|
|
* when migrating a page. Please refer to migration_entry_supports_ad()
|
|
* for more information. If there're more bits besides PFN and A/D bits,
|
|
* they should be reserved and always be zeros.
|
|
*/
|
|
#define SWP_MIG_YOUNG_BIT (SWP_PFN_BITS)
|
|
#define SWP_MIG_DIRTY_BIT (SWP_PFN_BITS + 1)
|
|
#define SWP_MIG_TOTAL_BITS (SWP_PFN_BITS + 2)
|
|
|
|
#define SWP_MIG_YOUNG BIT(SWP_MIG_YOUNG_BIT)
|
|
#define SWP_MIG_DIRTY BIT(SWP_MIG_DIRTY_BIT)
|
|
|
|
static inline bool is_pfn_swap_entry(swp_entry_t entry);
|
|
|
|
/* Clear all flags but only keep swp_entry_t related information */
|
|
static inline pte_t pte_swp_clear_flags(pte_t pte)
|
|
{
|
|
if (pte_swp_exclusive(pte))
|
|
pte = pte_swp_clear_exclusive(pte);
|
|
if (pte_swp_soft_dirty(pte))
|
|
pte = pte_swp_clear_soft_dirty(pte);
|
|
if (pte_swp_uffd_wp(pte))
|
|
pte = pte_swp_clear_uffd_wp(pte);
|
|
return pte;
|
|
}
|
|
|
|
/*
|
|
* Store a type+offset into a swp_entry_t in an arch-independent format
|
|
*/
|
|
static inline swp_entry_t swp_entry(unsigned long type, pgoff_t offset)
|
|
{
|
|
swp_entry_t ret;
|
|
|
|
ret.val = (type << SWP_TYPE_SHIFT) | (offset & SWP_OFFSET_MASK);
|
|
return ret;
|
|
}
|
|
|
|
/*
|
|
* Extract the `type' field from a swp_entry_t. The swp_entry_t is in
|
|
* arch-independent format
|
|
*/
|
|
static inline unsigned swp_type(swp_entry_t entry)
|
|
{
|
|
return (entry.val >> SWP_TYPE_SHIFT);
|
|
}
|
|
|
|
/*
|
|
* Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
|
|
* arch-independent format
|
|
*/
|
|
static inline pgoff_t swp_offset(swp_entry_t entry)
|
|
{
|
|
return entry.val & SWP_OFFSET_MASK;
|
|
}
|
|
|
|
/*
|
|
* This should only be called upon a pfn swap entry to get the PFN stored
|
|
* in the swap entry. Please refers to is_pfn_swap_entry() for definition
|
|
* of pfn swap entry.
|
|
*/
|
|
static inline unsigned long swp_offset_pfn(swp_entry_t entry)
|
|
{
|
|
VM_BUG_ON(!is_pfn_swap_entry(entry));
|
|
return swp_offset(entry) & SWP_PFN_MASK;
|
|
}
|
|
|
|
/*
|
|
* Convert the arch-dependent pte representation of a swp_entry_t into an
|
|
* arch-independent swp_entry_t.
|
|
*/
|
|
static inline swp_entry_t pte_to_swp_entry(pte_t pte)
|
|
{
|
|
swp_entry_t arch_entry;
|
|
|
|
pte = pte_swp_clear_flags(pte);
|
|
arch_entry = __pte_to_swp_entry(pte);
|
|
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
|
|
}
|
|
|
|
/*
|
|
* Convert the arch-independent representation of a swp_entry_t into the
|
|
* arch-dependent pte representation.
|
|
*/
|
|
static inline pte_t swp_entry_to_pte(swp_entry_t entry)
|
|
{
|
|
swp_entry_t arch_entry;
|
|
|
|
arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
|
|
return __swp_entry_to_pte(arch_entry);
|
|
}
|
|
|
|
static inline swp_entry_t radix_to_swp_entry(void *arg)
|
|
{
|
|
swp_entry_t entry;
|
|
|
|
entry.val = xa_to_value(arg);
|
|
return entry;
|
|
}
|
|
|
|
static inline void *swp_to_radix_entry(swp_entry_t entry)
|
|
{
|
|
return xa_mk_value(entry.val);
|
|
}
|
|
|
|
#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
|
|
static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(SWP_DEVICE_READ, offset);
|
|
}
|
|
|
|
static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(SWP_DEVICE_WRITE, offset);
|
|
}
|
|
|
|
static inline bool is_device_private_entry(swp_entry_t entry)
|
|
{
|
|
int type = swp_type(entry);
|
|
return type == SWP_DEVICE_READ || type == SWP_DEVICE_WRITE;
|
|
}
|
|
|
|
static inline bool is_writable_device_private_entry(swp_entry_t entry)
|
|
{
|
|
return unlikely(swp_type(entry) == SWP_DEVICE_WRITE);
|
|
}
|
|
|
|
static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(SWP_DEVICE_EXCLUSIVE, offset);
|
|
}
|
|
|
|
static inline bool is_device_exclusive_entry(swp_entry_t entry)
|
|
{
|
|
return swp_type(entry) == SWP_DEVICE_EXCLUSIVE;
|
|
}
|
|
|
|
#else /* CONFIG_DEVICE_PRIVATE */
|
|
static inline swp_entry_t make_readable_device_private_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(0, 0);
|
|
}
|
|
|
|
static inline swp_entry_t make_writable_device_private_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(0, 0);
|
|
}
|
|
|
|
static inline bool is_device_private_entry(swp_entry_t entry)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline bool is_writable_device_private_entry(swp_entry_t entry)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
static inline swp_entry_t make_device_exclusive_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(0, 0);
|
|
}
|
|
|
|
static inline bool is_device_exclusive_entry(swp_entry_t entry)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* CONFIG_DEVICE_PRIVATE */
|
|
|
|
#ifdef CONFIG_MIGRATION
|
|
static inline int is_migration_entry(swp_entry_t entry)
|
|
{
|
|
return unlikely(swp_type(entry) == SWP_MIGRATION_READ ||
|
|
swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE ||
|
|
swp_type(entry) == SWP_MIGRATION_WRITE);
|
|
}
|
|
|
|
static inline int is_writable_migration_entry(swp_entry_t entry)
|
|
{
|
|
return unlikely(swp_type(entry) == SWP_MIGRATION_WRITE);
|
|
}
|
|
|
|
static inline int is_readable_migration_entry(swp_entry_t entry)
|
|
{
|
|
return unlikely(swp_type(entry) == SWP_MIGRATION_READ);
|
|
}
|
|
|
|
static inline int is_readable_exclusive_migration_entry(swp_entry_t entry)
|
|
{
|
|
return unlikely(swp_type(entry) == SWP_MIGRATION_READ_EXCLUSIVE);
|
|
}
|
|
|
|
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(SWP_MIGRATION_READ, offset);
|
|
}
|
|
|
|
static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(SWP_MIGRATION_READ_EXCLUSIVE, offset);
|
|
}
|
|
|
|
static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(SWP_MIGRATION_WRITE, offset);
|
|
}
|
|
|
|
/*
|
|
* Returns whether the host has large enough swap offset field to support
|
|
* carrying over pgtable A/D bits for page migrations. The result is
|
|
* pretty much arch specific.
|
|
*/
|
|
static inline bool migration_entry_supports_ad(void)
|
|
{
|
|
#ifdef CONFIG_SWAP
|
|
return swap_migration_ad_supported;
|
|
#else /* CONFIG_SWAP */
|
|
return false;
|
|
#endif /* CONFIG_SWAP */
|
|
}
|
|
|
|
static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
|
|
{
|
|
if (migration_entry_supports_ad())
|
|
return swp_entry(swp_type(entry),
|
|
swp_offset(entry) | SWP_MIG_YOUNG);
|
|
return entry;
|
|
}
|
|
|
|
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
|
|
{
|
|
if (migration_entry_supports_ad())
|
|
return swp_entry(swp_type(entry),
|
|
swp_offset(entry) | SWP_MIG_DIRTY);
|
|
return entry;
|
|
}
|
|
|
|
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
|
|
unsigned long address);
|
|
extern void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *pte);
|
|
#else /* CONFIG_MIGRATION */
|
|
static inline swp_entry_t make_readable_migration_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(0, 0);
|
|
}
|
|
|
|
static inline swp_entry_t make_readable_exclusive_migration_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(0, 0);
|
|
}
|
|
|
|
static inline swp_entry_t make_writable_migration_entry(pgoff_t offset)
|
|
{
|
|
return swp_entry(0, 0);
|
|
}
|
|
|
|
static inline int is_migration_entry(swp_entry_t swp)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
|
|
unsigned long address) { }
|
|
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
|
|
unsigned long addr, pte_t *pte) { }
|
|
static inline int is_writable_migration_entry(swp_entry_t entry)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline int is_readable_migration_entry(swp_entry_t entry)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline swp_entry_t make_migration_entry_young(swp_entry_t entry)
|
|
{
|
|
return entry;
|
|
}
|
|
|
|
static inline swp_entry_t make_migration_entry_dirty(swp_entry_t entry)
|
|
{
|
|
return entry;
|
|
}
|
|
|
|
#endif /* CONFIG_MIGRATION */
|
|
|
|
#ifdef CONFIG_MEMORY_FAILURE
|
|
|
|
/*
|
|
* Support for hardware poisoned pages
|
|
*/
|
|
static inline swp_entry_t make_hwpoison_entry(struct page *page)
|
|
{
|
|
BUG_ON(!PageLocked(page));
|
|
return swp_entry(SWP_HWPOISON, page_to_pfn(page));
|
|
}
|
|
|
|
static inline int is_hwpoison_entry(swp_entry_t entry)
|
|
{
|
|
return swp_type(entry) == SWP_HWPOISON;
|
|
}
|
|
|
|
#else
|
|
|
|
static inline swp_entry_t make_hwpoison_entry(struct page *page)
|
|
{
|
|
return swp_entry(0, 0);
|
|
}
|
|
|
|
static inline int is_hwpoison_entry(swp_entry_t swp)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
typedef unsigned long pte_marker;
|
|
|
|
#define PTE_MARKER_UFFD_WP BIT(0)
|
|
/*
|
|
* "Poisoned" here is meant in the very general sense of "future accesses are
|
|
* invalid", instead of referring very specifically to hardware memory errors.
|
|
* This marker is meant to represent any of various different causes of this.
|
|
*
|
|
* Note that, when encountered by the faulting logic, PTEs with this marker will
|
|
* result in VM_FAULT_HWPOISON and thus regardless trigger hardware memory error
|
|
* logic.
|
|
*/
|
|
#define PTE_MARKER_POISONED BIT(1)
|
|
/*
|
|
* Indicates that, on fault, this PTE will case a SIGSEGV signal to be
|
|
* sent. This means guard markers behave in effect as if the region were mapped
|
|
* PROT_NONE, rather than if they were a memory hole or equivalent.
|
|
*/
|
|
#define PTE_MARKER_GUARD BIT(2)
|
|
#define PTE_MARKER_MASK (BIT(3) - 1)
|
|
|
|
static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
|
|
{
|
|
return swp_entry(SWP_PTE_MARKER, marker);
|
|
}
|
|
|
|
static inline pte_t make_pte_marker(pte_marker marker)
|
|
{
|
|
return swp_entry_to_pte(make_pte_marker_entry(marker));
|
|
}
|
|
|
|
static inline swp_entry_t make_poisoned_swp_entry(void)
|
|
{
|
|
return make_pte_marker_entry(PTE_MARKER_POISONED);
|
|
}
|
|
|
|
static inline swp_entry_t make_guard_swp_entry(void)
|
|
{
|
|
return make_pte_marker_entry(PTE_MARKER_GUARD);
|
|
}
|
|
|
|
static inline struct page *pfn_swap_entry_to_page(swp_entry_t entry)
|
|
{
|
|
struct page *p = pfn_to_page(swp_offset_pfn(entry));
|
|
|
|
/*
|
|
* Any use of migration entries may only occur while the
|
|
* corresponding page is locked
|
|
*/
|
|
BUG_ON(is_migration_entry(entry) && !PageLocked(p));
|
|
|
|
return p;
|
|
}
|
|
|
|
static inline struct folio *pfn_swap_entry_folio(swp_entry_t entry)
|
|
{
|
|
struct folio *folio = pfn_folio(swp_offset_pfn(entry));
|
|
|
|
/*
|
|
* Any use of migration entries may only occur while the
|
|
* corresponding folio is locked
|
|
*/
|
|
BUG_ON(is_migration_entry(entry) && !folio_test_locked(folio));
|
|
|
|
return folio;
|
|
}
|
|
|
|
/*
|
|
* A pfn swap entry is a special type of swap entry that always has a pfn stored
|
|
* in the swap offset. They can either be used to represent unaddressable device
|
|
* memory, to restrict access to a page undergoing migration or to represent a
|
|
* pfn which has been hwpoisoned and unmapped.
|
|
*/
|
|
static inline bool is_pfn_swap_entry(swp_entry_t entry)
|
|
{
|
|
/* Make sure the swp offset can always store the needed fields */
|
|
BUILD_BUG_ON(SWP_TYPE_SHIFT < SWP_PFN_BITS);
|
|
|
|
return is_migration_entry(entry) || is_device_private_entry(entry) ||
|
|
is_device_exclusive_entry(entry) || is_hwpoison_entry(entry);
|
|
}
|
|
|
|
struct page_vma_mapped_walk;
|
|
|
|
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
|
|
extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
|
|
struct page *page);
|
|
|
|
extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
|
|
struct page *new);
|
|
|
|
extern void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd);
|
|
|
|
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
|
|
{
|
|
swp_entry_t arch_entry;
|
|
|
|
arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
|
|
return __swp_entry_to_pmd(arch_entry);
|
|
}
|
|
|
|
#else /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
|
|
static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
|
|
struct page *new)
|
|
{
|
|
BUILD_BUG();
|
|
}
|
|
|
|
static inline void pmd_migration_entry_wait(struct mm_struct *m, pmd_t *p) { }
|
|
|
|
static inline pmd_t swp_entry_to_pmd(swp_entry_t entry)
|
|
{
|
|
return __pmd(0);
|
|
}
|
|
|
|
#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
|
|
|
|
static inline int non_swap_entry(swp_entry_t entry)
|
|
{
|
|
return swp_type(entry) >= MAX_SWAPFILES;
|
|
}
|
|
|
|
#endif /* CONFIG_MMU */
|
|
#endif /* _LINUX_SWAPOPS_H */
|