mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-18 04:20:44 +00:00
Originally, the file pages collapse was intended for tmpfs/shmem to merge into THP in the background. However, now not only tmpfs/shmem can support large folios, but some other file systems (such as XFS, erofs ...) also support large folios. Therefore, it is time to decouple the support of file folios collapse from SHMEM. Link: https://lkml.kernel.org/r/ce5c2314e0368cf34bda26f9bacf01c982d4da17.1747119309.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Zi Yan <ziy@nvidia.com> Cc: Dev Jain <dev.jain@arm.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Mariano Pache <npache@redhat.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
60 lines
1.6 KiB
C
60 lines
1.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_KHUGEPAGED_H
|
|
#define _LINUX_KHUGEPAGED_H
|
|
|
|
extern unsigned int khugepaged_max_ptes_none __read_mostly;
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
extern struct attribute_group khugepaged_attr_group;
|
|
|
|
extern int khugepaged_init(void);
|
|
extern void khugepaged_destroy(void);
|
|
extern int start_stop_khugepaged(void);
|
|
extern void __khugepaged_enter(struct mm_struct *mm);
|
|
extern void __khugepaged_exit(struct mm_struct *mm);
|
|
extern void khugepaged_enter_vma(struct vm_area_struct *vma,
|
|
unsigned long vm_flags);
|
|
extern void khugepaged_min_free_kbytes_update(void);
|
|
extern bool current_is_khugepaged(void);
|
|
extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
|
|
bool install_pmd);
|
|
|
|
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
{
|
|
if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
|
|
__khugepaged_enter(mm);
|
|
}
|
|
|
|
static inline void khugepaged_exit(struct mm_struct *mm)
|
|
{
|
|
if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
|
|
__khugepaged_exit(mm);
|
|
}
|
|
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
{
|
|
}
|
|
static inline void khugepaged_exit(struct mm_struct *mm)
|
|
{
|
|
}
|
|
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
|
|
unsigned long vm_flags)
|
|
{
|
|
}
|
|
static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
|
|
unsigned long addr, bool install_pmd)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void khugepaged_min_free_kbytes_update(void)
|
|
{
|
|
}
|
|
|
|
static inline bool current_is_khugepaged(void)
|
|
{
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
#endif /* _LINUX_KHUGEPAGED_H */
|