mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
27 hotfixes. 12 are cc:stable, 18 are MM.
There's a three patch series from Jiayuan Chen which fixes some issues with KASAN and vmalloc. Apart from that it's the usual shower of singletons - please see the respective changelogs for details. -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCaVIWxwAKCRDdBJ7gKXxA jt6HAP49s/mEIIbuZbqnX8hxDrvYdYffs+RsSLPEZoR+yKG/7gD/VqSZhMoPw53b rMZ56djXNjWxsOAfiVbZit3SFuQfnQ4= =5rGD -----END PGP SIGNATURE----- Merge tag 'mm-hotfixes-stable-2025-12-28-21-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull misc fixes from Andrew Morton: "27 hotfixes. 12 are cc:stable, 18 are MM. There's a patch series from Jiayuan Chen which fixes some issues with KASAN and vmalloc. Apart from that it's the usual shower of singletons - please see the respective changelogs for details" * tag 'mm-hotfixes-stable-2025-12-28-21-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (27 commits) mm/ksm: fix pte_unmap_unlock of wrong address in break_ksm_pmd_entry mm/page_owner: fix memory leak in page_owner_stack_fops->release() mm/memremap: fix spurious large folio warning for FS-DAX MAINTAINERS: notify the "Device Memory" community of memory hotplug changes sparse: update MAINTAINERS info mm/page_alloc: report 1 as zone_batchsize for !CONFIG_MMU mm: consider non-anon swap cache folios in folio_expected_ref_count() rust: maple_tree: rcu_read_lock() in destructor to silence lockdep mm: memcg: fix unit conversion for K() macro in OOM log mm: fixup pfnmap memory failure handling to use pgoff tools/mm/page_owner_sort: fix timestamp comparison for stable sorting selftests/mm: fix thread state check in uffd-unit-tests kernel/kexec: fix IMA when allocation happens in CMA area kernel/kexec: change the prototype of kimage_map_segment() MAINTAINERS: add ABI headers to KHO and LIVE UPDATE .mailmap: remove one of the entries for WangYuli mm/damon/vaddr: fix missing pte_unmap_unlock in damos_va_migrate_pmd_entry() MAINTAINERS: update one straggling entry for Bartosz Golaszewski mm/page_alloc: change all pageblocks migrate type on coalescing mm: leafops.h: correct kernel-doc function param. names ...
This commit is contained in:
commit
0b34fd0fea
4
.mailmap
4
.mailmap
@ -127,7 +127,8 @@ Barry Song <baohua@kernel.org> <Baohua.Song@csr.com>
|
|||||||
Barry Song <baohua@kernel.org> <barry.song@analog.com>
|
Barry Song <baohua@kernel.org> <barry.song@analog.com>
|
||||||
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
|
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
|
||||||
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
|
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
|
||||||
Bartosz Golaszewski <brgl@bgdev.pl> <bgolaszewski@baylibre.com>
|
Bartosz Golaszewski <brgl@kernel.org> <bartosz.golaszewski@linaro.org>
|
||||||
|
Bartosz Golaszewski <brgl@kernel.org> <bgolaszewski@baylibre.com>
|
||||||
Ben Dooks <ben-linux@fluff.org> <ben.dooks@simtec.co.uk>
|
Ben Dooks <ben-linux@fluff.org> <ben.dooks@simtec.co.uk>
|
||||||
Ben Dooks <ben-linux@fluff.org> <ben.dooks@sifive.com>
|
Ben Dooks <ben-linux@fluff.org> <ben.dooks@sifive.com>
|
||||||
Ben Gardner <bgardner@wabtec.com>
|
Ben Gardner <bgardner@wabtec.com>
|
||||||
@ -857,7 +858,6 @@ Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@parallels.com>
|
|||||||
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
|
Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com>
|
||||||
WangYuli <wangyuli@aosc.io> <wangyl5933@chinaunicom.cn>
|
WangYuli <wangyuli@aosc.io> <wangyl5933@chinaunicom.cn>
|
||||||
WangYuli <wangyuli@aosc.io> <wangyuli@deepin.org>
|
WangYuli <wangyuli@aosc.io> <wangyuli@deepin.org>
|
||||||
WangYuli <wangyuli@aosc.io> <wangyuli@uniontech.com>
|
|
||||||
Weiwen Hu <huweiwen@linux.alibaba.com> <sehuww@mail.scut.edu.cn>
|
Weiwen Hu <huweiwen@linux.alibaba.com> <sehuww@mail.scut.edu.cn>
|
||||||
WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com>
|
WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com>
|
||||||
Wen Gong <quic_wgong@quicinc.com> <wgong@codeaurora.org>
|
Wen Gong <quic_wgong@quicinc.com> <wgong@codeaurora.org>
|
||||||
|
|||||||
@ -13959,6 +13959,7 @@ S: Maintained
|
|||||||
F: Documentation/admin-guide/mm/kho.rst
|
F: Documentation/admin-guide/mm/kho.rst
|
||||||
F: Documentation/core-api/kho/*
|
F: Documentation/core-api/kho/*
|
||||||
F: include/linux/kexec_handover.h
|
F: include/linux/kexec_handover.h
|
||||||
|
F: include/linux/kho/
|
||||||
F: kernel/liveupdate/kexec_handover*
|
F: kernel/liveupdate/kexec_handover*
|
||||||
F: lib/test_kho.c
|
F: lib/test_kho.c
|
||||||
F: tools/testing/selftests/kho/
|
F: tools/testing/selftests/kho/
|
||||||
@ -14637,6 +14638,7 @@ S: Maintained
|
|||||||
F: Documentation/core-api/liveupdate.rst
|
F: Documentation/core-api/liveupdate.rst
|
||||||
F: Documentation/mm/memfd_preservation.rst
|
F: Documentation/mm/memfd_preservation.rst
|
||||||
F: Documentation/userspace-api/liveupdate.rst
|
F: Documentation/userspace-api/liveupdate.rst
|
||||||
|
F: include/linux/kho/abi/
|
||||||
F: include/linux/liveupdate.h
|
F: include/linux/liveupdate.h
|
||||||
F: include/linux/liveupdate/
|
F: include/linux/liveupdate/
|
||||||
F: include/uapi/linux/liveupdate.h
|
F: include/uapi/linux/liveupdate.h
|
||||||
@ -16426,6 +16428,7 @@ MEMORY HOT(UN)PLUG
|
|||||||
M: David Hildenbrand <david@kernel.org>
|
M: David Hildenbrand <david@kernel.org>
|
||||||
M: Oscar Salvador <osalvador@suse.de>
|
M: Oscar Salvador <osalvador@suse.de>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
|
L: linux-cxl@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/admin-guide/mm/memory-hotplug.rst
|
F: Documentation/admin-guide/mm/memory-hotplug.rst
|
||||||
F: Documentation/core-api/memory-hotplug.rst
|
F: Documentation/core-api/memory-hotplug.rst
|
||||||
@ -16751,6 +16754,7 @@ F: tools/testing/selftests/mm/transhuge-stress.c
|
|||||||
|
|
||||||
MEMORY MANAGEMENT - USERFAULTFD
|
MEMORY MANAGEMENT - USERFAULTFD
|
||||||
M: Andrew Morton <akpm@linux-foundation.org>
|
M: Andrew Morton <akpm@linux-foundation.org>
|
||||||
|
M: Mike Rapoport <rppt@kernel.org>
|
||||||
R: Peter Xu <peterx@redhat.com>
|
R: Peter Xu <peterx@redhat.com>
|
||||||
L: linux-mm@kvack.org
|
L: linux-mm@kvack.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -21345,7 +21349,7 @@ F: Documentation/devicetree/bindings/net/qcom,bam-dmux.yaml
|
|||||||
F: drivers/net/wwan/qcom_bam_dmux.c
|
F: drivers/net/wwan/qcom_bam_dmux.c
|
||||||
|
|
||||||
QUALCOMM BLUETOOTH DRIVER
|
QUALCOMM BLUETOOTH DRIVER
|
||||||
M: Bartosz Golaszewski <brgl@bgdev.pl>
|
M: Bartosz Golaszewski <brgl@kernel.org>
|
||||||
L: linux-arm-msm@vger.kernel.org
|
L: linux-arm-msm@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/bluetooth/btqca.[ch]
|
F: drivers/bluetooth/btqca.[ch]
|
||||||
@ -24571,7 +24575,7 @@ F: drivers/tty/vcc.c
|
|||||||
F: include/linux/sunserialcore.h
|
F: include/linux/sunserialcore.h
|
||||||
|
|
||||||
SPARSE CHECKER
|
SPARSE CHECKER
|
||||||
M: "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com>
|
M: Chris Li <sparse@chrisli.org>
|
||||||
L: linux-sparse@vger.kernel.org
|
L: linux-sparse@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
W: https://sparse.docs.kernel.org/
|
W: https://sparse.docs.kernel.org/
|
||||||
|
|||||||
@ -44,6 +44,7 @@ struct gen_pool;
|
|||||||
* @nr: The number of zeroed bits we're looking for
|
* @nr: The number of zeroed bits we're looking for
|
||||||
* @data: optional additional data used by the callback
|
* @data: optional additional data used by the callback
|
||||||
* @pool: the pool being allocated from
|
* @pool: the pool being allocated from
|
||||||
|
* @start_addr: start address of memory chunk
|
||||||
*/
|
*/
|
||||||
typedef unsigned long (*genpool_algo_t)(unsigned long *map,
|
typedef unsigned long (*genpool_algo_t)(unsigned long *map,
|
||||||
unsigned long size,
|
unsigned long size,
|
||||||
|
|||||||
@ -28,6 +28,7 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t;
|
|||||||
#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
|
#define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
|
||||||
#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
|
#define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
|
||||||
#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
|
#define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
|
||||||
|
#define KASAN_VMALLOC_KEEP_TAG ((__force kasan_vmalloc_flags_t)0x08u)
|
||||||
|
|
||||||
#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
|
#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
|
||||||
#define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */
|
#define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */
|
||||||
@ -630,6 +631,16 @@ static __always_inline void kasan_poison_vmalloc(const void *start,
|
|||||||
__kasan_poison_vmalloc(start, size);
|
__kasan_poison_vmalloc(start, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
|
||||||
|
kasan_vmalloc_flags_t flags);
|
||||||
|
static __always_inline void
|
||||||
|
kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
|
||||||
|
kasan_vmalloc_flags_t flags)
|
||||||
|
{
|
||||||
|
if (kasan_enabled())
|
||||||
|
__kasan_unpoison_vmap_areas(vms, nr_vms, flags);
|
||||||
|
}
|
||||||
|
|
||||||
#else /* CONFIG_KASAN_VMALLOC */
|
#else /* CONFIG_KASAN_VMALLOC */
|
||||||
|
|
||||||
static inline void kasan_populate_early_vm_area_shadow(void *start,
|
static inline void kasan_populate_early_vm_area_shadow(void *start,
|
||||||
@ -654,6 +665,11 @@ static inline void *kasan_unpoison_vmalloc(const void *start,
|
|||||||
static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
|
static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
|
||||||
|
kasan_vmalloc_flags_t flags)
|
||||||
|
{ }
|
||||||
|
|
||||||
#endif /* CONFIG_KASAN_VMALLOC */
|
#endif /* CONFIG_KASAN_VMALLOC */
|
||||||
|
|
||||||
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
|
#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
|
||||||
|
|||||||
@ -530,7 +530,7 @@ extern bool kexec_file_dbg_print;
|
|||||||
#define kexec_dprintk(fmt, arg...) \
|
#define kexec_dprintk(fmt, arg...) \
|
||||||
do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
|
do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
|
||||||
|
|
||||||
extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size);
|
extern void *kimage_map_segment(struct kimage *image, int idx);
|
||||||
extern void kimage_unmap_segment(void *buffer);
|
extern void kimage_unmap_segment(void *buffer);
|
||||||
#else /* !CONFIG_KEXEC_CORE */
|
#else /* !CONFIG_KEXEC_CORE */
|
||||||
struct pt_regs;
|
struct pt_regs;
|
||||||
@ -540,7 +540,7 @@ static inline void __crash_kexec(struct pt_regs *regs) { }
|
|||||||
static inline void crash_kexec(struct pt_regs *regs) { }
|
static inline void crash_kexec(struct pt_regs *regs) { }
|
||||||
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
|
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
|
||||||
static inline int kexec_crash_loaded(void) { return 0; }
|
static inline int kexec_crash_loaded(void) { return 0; }
|
||||||
static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size)
|
static inline void *kimage_map_segment(struct kimage *image, int idx)
|
||||||
{ return NULL; }
|
{ return NULL; }
|
||||||
static inline void kimage_unmap_segment(void *buffer) { }
|
static inline void kimage_unmap_segment(void *buffer) { }
|
||||||
#define kexec_in_progress false
|
#define kexec_in_progress false
|
||||||
|
|||||||
@ -133,7 +133,7 @@ static inline bool softleaf_is_none(softleaf_t entry)
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* softleaf_type() - Identify the type of leaf entry.
|
* softleaf_type() - Identify the type of leaf entry.
|
||||||
* @enntry: Leaf entry.
|
* @entry: Leaf entry.
|
||||||
*
|
*
|
||||||
* Returns: the leaf entry type associated with @entry.
|
* Returns: the leaf entry type associated with @entry.
|
||||||
*/
|
*/
|
||||||
@ -534,7 +534,7 @@ static inline bool pte_is_uffd_wp_marker(pte_t pte)
|
|||||||
/**
|
/**
|
||||||
* pte_is_uffd_marker() - Does this PTE entry encode a userfault-specific marker
|
* pte_is_uffd_marker() - Does this PTE entry encode a userfault-specific marker
|
||||||
* leaf entry?
|
* leaf entry?
|
||||||
* @entry: Leaf entry.
|
* @pte: PTE entry.
|
||||||
*
|
*
|
||||||
* It's useful to be able to determine which leaf entries encode UFFD-specific
|
* It's useful to be able to determine which leaf entries encode UFFD-specific
|
||||||
* markers so we can handle these correctly.
|
* markers so we can handle these correctly.
|
||||||
|
|||||||
@ -9,6 +9,8 @@ struct pfn_address_space;
|
|||||||
struct pfn_address_space {
|
struct pfn_address_space {
|
||||||
struct interval_tree_node node;
|
struct interval_tree_node node;
|
||||||
struct address_space *mapping;
|
struct address_space *mapping;
|
||||||
|
int (*pfn_to_vma_pgoff)(struct vm_area_struct *vma,
|
||||||
|
unsigned long pfn, pgoff_t *pgoff);
|
||||||
};
|
};
|
||||||
|
|
||||||
int register_pfn_address_space(struct pfn_address_space *pfn_space);
|
int register_pfn_address_space(struct pfn_address_space *pfn_space);
|
||||||
|
|||||||
@ -2459,10 +2459,10 @@ static inline int folio_expected_ref_count(const struct folio *folio)
|
|||||||
if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
|
if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (folio_test_anon(folio)) {
|
/* One reference per page from the swapcache. */
|
||||||
/* One reference per page from the swapcache. */
|
ref_count += folio_test_swapcache(folio) << order;
|
||||||
ref_count += folio_test_swapcache(folio) << order;
|
|
||||||
} else {
|
if (!folio_test_anon(folio)) {
|
||||||
/* One reference per page from the pagecache. */
|
/* One reference per page from the pagecache. */
|
||||||
ref_count += !!folio->mapping << order;
|
ref_count += !!folio->mapping << order;
|
||||||
/* One reference from PG_private. */
|
/* One reference from PG_private. */
|
||||||
|
|||||||
@ -953,17 +953,24 @@ int kimage_load_segment(struct kimage *image, int idx)
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *kimage_map_segment(struct kimage *image,
|
void *kimage_map_segment(struct kimage *image, int idx)
|
||||||
unsigned long addr, unsigned long size)
|
|
||||||
{
|
{
|
||||||
|
unsigned long addr, size, eaddr;
|
||||||
unsigned long src_page_addr, dest_page_addr = 0;
|
unsigned long src_page_addr, dest_page_addr = 0;
|
||||||
unsigned long eaddr = addr + size;
|
|
||||||
kimage_entry_t *ptr, entry;
|
kimage_entry_t *ptr, entry;
|
||||||
struct page **src_pages;
|
struct page **src_pages;
|
||||||
unsigned int npages;
|
unsigned int npages;
|
||||||
|
struct page *cma;
|
||||||
void *vaddr = NULL;
|
void *vaddr = NULL;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
cma = image->segment_cma[idx];
|
||||||
|
if (cma)
|
||||||
|
return page_address(cma);
|
||||||
|
|
||||||
|
addr = image->segment[idx].mem;
|
||||||
|
size = image->segment[idx].memsz;
|
||||||
|
eaddr = addr + size;
|
||||||
/*
|
/*
|
||||||
* Collect the source pages and map them in a contiguous VA range.
|
* Collect the source pages and map them in a contiguous VA range.
|
||||||
*/
|
*/
|
||||||
@ -1004,7 +1011,8 @@ void *kimage_map_segment(struct kimage *image,
|
|||||||
|
|
||||||
void kimage_unmap_segment(void *segment_buffer)
|
void kimage_unmap_segment(void *segment_buffer)
|
||||||
{
|
{
|
||||||
vunmap(segment_buffer);
|
if (is_vmalloc_addr(segment_buffer))
|
||||||
|
vunmap(segment_buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct kexec_load_limit {
|
struct kexec_load_limit {
|
||||||
|
|||||||
@ -40,6 +40,8 @@ int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
|
|||||||
|
|
||||||
if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
|
if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
|
||||||
idr->idr_rt.xa_flags |= IDR_RT_MARKER;
|
idr->idr_rt.xa_flags |= IDR_RT_MARKER;
|
||||||
|
if (max < base)
|
||||||
|
return -ENOSPC;
|
||||||
|
|
||||||
id = (id < base) ? 0 : id - base;
|
id = (id < base) ? 0 : id - base;
|
||||||
radix_tree_iter_init(&iter, id);
|
radix_tree_iter_init(&iter, id);
|
||||||
|
|||||||
@ -743,7 +743,7 @@ huge_out:
|
|||||||
if (!folio)
|
if (!folio)
|
||||||
continue;
|
continue;
|
||||||
if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL))
|
if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL))
|
||||||
return 0;
|
continue;
|
||||||
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
|
damos_va_migrate_dests_add(folio, walk->vma, addr, dests,
|
||||||
migration_lists);
|
migration_lists);
|
||||||
nr = folio_nr_pages(folio);
|
nr = folio_nr_pages(folio);
|
||||||
|
|||||||
@ -28,6 +28,7 @@
|
|||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
|
|
||||||
#include "kasan.h"
|
#include "kasan.h"
|
||||||
#include "../slab.h"
|
#include "../slab.h"
|
||||||
@ -575,3 +576,34 @@ bool __kasan_check_byte(const void *address, unsigned long ip)
|
|||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_KASAN_VMALLOC
|
||||||
|
void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
|
||||||
|
kasan_vmalloc_flags_t flags)
|
||||||
|
{
|
||||||
|
unsigned long size;
|
||||||
|
void *addr;
|
||||||
|
int area;
|
||||||
|
u8 tag;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If KASAN_VMALLOC_KEEP_TAG was set at this point, all vms[] pointers
|
||||||
|
* would be unpoisoned with the KASAN_TAG_KERNEL which would disable
|
||||||
|
* KASAN checks down the line.
|
||||||
|
*/
|
||||||
|
if (WARN_ON_ONCE(flags & KASAN_VMALLOC_KEEP_TAG))
|
||||||
|
return;
|
||||||
|
|
||||||
|
size = vms[0]->size;
|
||||||
|
addr = vms[0]->addr;
|
||||||
|
vms[0]->addr = __kasan_unpoison_vmalloc(addr, size, flags);
|
||||||
|
tag = get_tag(vms[0]->addr);
|
||||||
|
|
||||||
|
for (area = 1 ; area < nr_vms ; area++) {
|
||||||
|
size = vms[area]->size;
|
||||||
|
addr = set_tag(vms[area]->addr, tag);
|
||||||
|
vms[area]->addr =
|
||||||
|
__kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|||||||
@ -361,7 +361,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
|
|||||||
return (void *)start;
|
return (void *)start;
|
||||||
}
|
}
|
||||||
|
|
||||||
tag = kasan_random_tag();
|
tag = (flags & KASAN_VMALLOC_KEEP_TAG) ? get_tag(start) : kasan_random_tag();
|
||||||
start = set_tag(start, tag);
|
start = set_tag(start, tag);
|
||||||
|
|
||||||
/* Unpoison and initialize memory up to size. */
|
/* Unpoison and initialize memory up to size. */
|
||||||
|
|||||||
@ -631,7 +631,9 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
|
|||||||
!(flags & KASAN_VMALLOC_PROT_NORMAL))
|
!(flags & KASAN_VMALLOC_PROT_NORMAL))
|
||||||
return (void *)start;
|
return (void *)start;
|
||||||
|
|
||||||
start = set_tag(start, kasan_random_tag());
|
if (unlikely(!(flags & KASAN_VMALLOC_KEEP_TAG)))
|
||||||
|
start = set_tag(start, kasan_random_tag());
|
||||||
|
|
||||||
kasan_unpoison(start, size, false);
|
kasan_unpoison(start, size, false);
|
||||||
return (void *)start;
|
return (void *)start;
|
||||||
}
|
}
|
||||||
|
|||||||
2
mm/ksm.c
2
mm/ksm.c
@ -650,7 +650,7 @@ static int break_ksm_pmd_entry(pmd_t *pmdp, unsigned long addr, unsigned long en
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
out_unlock:
|
out_unlock:
|
||||||
pte_unmap_unlock(ptep, ptl);
|
pte_unmap_unlock(start_ptep, ptl);
|
||||||
return found;
|
return found;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -5638,6 +5638,6 @@ void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg)
|
|||||||
memcg = root_mem_cgroup;
|
memcg = root_mem_cgroup;
|
||||||
|
|
||||||
pr_warn("Memory cgroup min protection %lukB -- low protection %lukB",
|
pr_warn("Memory cgroup min protection %lukB -- low protection %lukB",
|
||||||
K(atomic_long_read(&memcg->memory.children_min_usage)*PAGE_SIZE),
|
K(atomic_long_read(&memcg->memory.children_min_usage)),
|
||||||
K(atomic_long_read(&memcg->memory.children_low_usage)*PAGE_SIZE));
|
K(atomic_long_read(&memcg->memory.children_low_usage)));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -2161,6 +2161,9 @@ int register_pfn_address_space(struct pfn_address_space *pfn_space)
|
|||||||
{
|
{
|
||||||
guard(mutex)(&pfn_space_lock);
|
guard(mutex)(&pfn_space_lock);
|
||||||
|
|
||||||
|
if (!pfn_space->pfn_to_vma_pgoff)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (interval_tree_iter_first(&pfn_space_itree,
|
if (interval_tree_iter_first(&pfn_space_itree,
|
||||||
pfn_space->node.start,
|
pfn_space->node.start,
|
||||||
pfn_space->node.last))
|
pfn_space->node.last))
|
||||||
@ -2183,10 +2186,10 @@ void unregister_pfn_address_space(struct pfn_address_space *pfn_space)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(unregister_pfn_address_space);
|
EXPORT_SYMBOL_GPL(unregister_pfn_address_space);
|
||||||
|
|
||||||
static void add_to_kill_pfn(struct task_struct *tsk,
|
static void add_to_kill_pgoff(struct task_struct *tsk,
|
||||||
struct vm_area_struct *vma,
|
struct vm_area_struct *vma,
|
||||||
struct list_head *to_kill,
|
struct list_head *to_kill,
|
||||||
unsigned long pfn)
|
pgoff_t pgoff)
|
||||||
{
|
{
|
||||||
struct to_kill *tk;
|
struct to_kill *tk;
|
||||||
|
|
||||||
@ -2197,12 +2200,12 @@ static void add_to_kill_pfn(struct task_struct *tsk,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Check for pgoff not backed by struct page */
|
/* Check for pgoff not backed by struct page */
|
||||||
tk->addr = vma_address(vma, pfn, 1);
|
tk->addr = vma_address(vma, pgoff, 1);
|
||||||
tk->size_shift = PAGE_SHIFT;
|
tk->size_shift = PAGE_SHIFT;
|
||||||
|
|
||||||
if (tk->addr == -EFAULT)
|
if (tk->addr == -EFAULT)
|
||||||
pr_info("Unable to find address %lx in %s\n",
|
pr_info("Unable to find address %lx in %s\n",
|
||||||
pfn, tsk->comm);
|
pgoff, tsk->comm);
|
||||||
|
|
||||||
get_task_struct(tsk);
|
get_task_struct(tsk);
|
||||||
tk->tsk = tsk;
|
tk->tsk = tsk;
|
||||||
@ -2212,11 +2215,12 @@ static void add_to_kill_pfn(struct task_struct *tsk,
|
|||||||
/*
|
/*
|
||||||
* Collect processes when the error hit a PFN not backed by struct page.
|
* Collect processes when the error hit a PFN not backed by struct page.
|
||||||
*/
|
*/
|
||||||
static void collect_procs_pfn(struct address_space *mapping,
|
static void collect_procs_pfn(struct pfn_address_space *pfn_space,
|
||||||
unsigned long pfn, struct list_head *to_kill)
|
unsigned long pfn, struct list_head *to_kill)
|
||||||
{
|
{
|
||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
struct task_struct *tsk;
|
struct task_struct *tsk;
|
||||||
|
struct address_space *mapping = pfn_space->mapping;
|
||||||
|
|
||||||
i_mmap_lock_read(mapping);
|
i_mmap_lock_read(mapping);
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
@ -2226,9 +2230,12 @@ static void collect_procs_pfn(struct address_space *mapping,
|
|||||||
t = task_early_kill(tsk, true);
|
t = task_early_kill(tsk, true);
|
||||||
if (!t)
|
if (!t)
|
||||||
continue;
|
continue;
|
||||||
vma_interval_tree_foreach(vma, &mapping->i_mmap, pfn, pfn) {
|
vma_interval_tree_foreach(vma, &mapping->i_mmap, 0, ULONG_MAX) {
|
||||||
if (vma->vm_mm == t->mm)
|
pgoff_t pgoff;
|
||||||
add_to_kill_pfn(t, vma, to_kill, pfn);
|
|
||||||
|
if (vma->vm_mm == t->mm &&
|
||||||
|
!pfn_space->pfn_to_vma_pgoff(vma, pfn, &pgoff))
|
||||||
|
add_to_kill_pgoff(t, vma, to_kill, pgoff);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
@ -2264,7 +2271,7 @@ static int memory_failure_pfn(unsigned long pfn, int flags)
|
|||||||
struct pfn_address_space *pfn_space =
|
struct pfn_address_space *pfn_space =
|
||||||
container_of(node, struct pfn_address_space, node);
|
container_of(node, struct pfn_address_space, node);
|
||||||
|
|
||||||
collect_procs_pfn(pfn_space->mapping, pfn, &tokill);
|
collect_procs_pfn(pfn_space, pfn, &tokill);
|
||||||
|
|
||||||
mf_handled = true;
|
mf_handled = true;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -427,8 +427,6 @@ void free_zone_device_folio(struct folio *folio)
|
|||||||
if (folio_test_anon(folio)) {
|
if (folio_test_anon(folio)) {
|
||||||
for (i = 0; i < nr; i++)
|
for (i = 0; i < nr; i++)
|
||||||
__ClearPageAnonExclusive(folio_page(folio, i));
|
__ClearPageAnonExclusive(folio_page(folio, i));
|
||||||
} else {
|
|
||||||
VM_WARN_ON_ONCE(folio_test_large(folio));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|||||||
@ -914,6 +914,17 @@ buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
|
|||||||
NULL) != NULL;
|
NULL) != NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void change_pageblock_range(struct page *pageblock_page,
|
||||||
|
int start_order, int migratetype)
|
||||||
|
{
|
||||||
|
int nr_pageblocks = 1 << (start_order - pageblock_order);
|
||||||
|
|
||||||
|
while (nr_pageblocks--) {
|
||||||
|
set_pageblock_migratetype(pageblock_page, migratetype);
|
||||||
|
pageblock_page += pageblock_nr_pages;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Freeing function for a buddy system allocator.
|
* Freeing function for a buddy system allocator.
|
||||||
*
|
*
|
||||||
@ -1000,7 +1011,7 @@ static inline void __free_one_page(struct page *page,
|
|||||||
* expand() down the line puts the sub-blocks
|
* expand() down the line puts the sub-blocks
|
||||||
* on the right freelists.
|
* on the right freelists.
|
||||||
*/
|
*/
|
||||||
set_pageblock_migratetype(buddy, migratetype);
|
change_pageblock_range(buddy, order, migratetype);
|
||||||
}
|
}
|
||||||
|
|
||||||
combined_pfn = buddy_pfn & pfn;
|
combined_pfn = buddy_pfn & pfn;
|
||||||
@ -2147,17 +2158,6 @@ bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *pag
|
|||||||
|
|
||||||
#endif /* CONFIG_MEMORY_ISOLATION */
|
#endif /* CONFIG_MEMORY_ISOLATION */
|
||||||
|
|
||||||
static void change_pageblock_range(struct page *pageblock_page,
|
|
||||||
int start_order, int migratetype)
|
|
||||||
{
|
|
||||||
int nr_pageblocks = 1 << (start_order - pageblock_order);
|
|
||||||
|
|
||||||
while (nr_pageblocks--) {
|
|
||||||
set_pageblock_migratetype(pageblock_page, migratetype);
|
|
||||||
pageblock_page += pageblock_nr_pages;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool boost_watermark(struct zone *zone)
|
static inline bool boost_watermark(struct zone *zone)
|
||||||
{
|
{
|
||||||
unsigned long max_boost;
|
unsigned long max_boost;
|
||||||
@ -5924,7 +5924,7 @@ static int zone_batchsize(struct zone *zone)
|
|||||||
* recycled, this leads to the once large chunks of space being
|
* recycled, this leads to the once large chunks of space being
|
||||||
* fragmented and becoming unavailable for high-order allocations.
|
* fragmented and becoming unavailable for high-order allocations.
|
||||||
*/
|
*/
|
||||||
return 0;
|
return 1;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -952,7 +952,7 @@ static const struct file_operations page_owner_stack_fops = {
|
|||||||
.open = page_owner_stack_open,
|
.open = page_owner_stack_open,
|
||||||
.read = seq_read,
|
.read = seq_read,
|
||||||
.llseek = seq_lseek,
|
.llseek = seq_lseek,
|
||||||
.release = seq_release,
|
.release = seq_release_private,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int page_owner_threshold_get(void *data, u64 *val)
|
static int page_owner_threshold_get(void *data, u64 *val)
|
||||||
|
|||||||
@ -4331,7 +4331,9 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align
|
|||||||
*/
|
*/
|
||||||
if (size <= alloced_size) {
|
if (size <= alloced_size) {
|
||||||
kasan_unpoison_vmalloc(p + old_size, size - old_size,
|
kasan_unpoison_vmalloc(p + old_size, size - old_size,
|
||||||
KASAN_VMALLOC_PROT_NORMAL);
|
KASAN_VMALLOC_PROT_NORMAL |
|
||||||
|
KASAN_VMALLOC_VM_ALLOC |
|
||||||
|
KASAN_VMALLOC_KEEP_TAG);
|
||||||
/*
|
/*
|
||||||
* No need to zero memory here, as unused memory will have
|
* No need to zero memory here, as unused memory will have
|
||||||
* already been zeroed at initial allocation time or during
|
* already been zeroed at initial allocation time or during
|
||||||
@ -5025,9 +5027,7 @@ retry:
|
|||||||
* With hardware tag-based KASAN, marking is skipped for
|
* With hardware tag-based KASAN, marking is skipped for
|
||||||
* non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
|
* non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
|
||||||
*/
|
*/
|
||||||
for (area = 0; area < nr_vms; area++)
|
kasan_unpoison_vmap_areas(vms, nr_vms, KASAN_VMALLOC_PROT_NORMAL);
|
||||||
vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
|
|
||||||
vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
|
|
||||||
|
|
||||||
kfree(vas);
|
kfree(vas);
|
||||||
return vms;
|
return vms;
|
||||||
|
|||||||
@ -265,7 +265,16 @@ impl<T: ForeignOwnable> MapleTree<T> {
|
|||||||
loop {
|
loop {
|
||||||
// This uses the raw accessor because we're destroying pointers without removing them
|
// This uses the raw accessor because we're destroying pointers without removing them
|
||||||
// from the maple tree, which is only valid because this is the destructor.
|
// from the maple tree, which is only valid because this is the destructor.
|
||||||
let ptr = ma_state.mas_find_raw(usize::MAX);
|
//
|
||||||
|
// Take the rcu lock because mas_find_raw() requires that you hold either the spinlock
|
||||||
|
// or the rcu read lock. This is only really required if memory reclaim might
|
||||||
|
// reallocate entries in the tree, as we otherwise have exclusive access. That feature
|
||||||
|
// doesn't exist yet, so for now, taking the rcu lock only serves the purpose of
|
||||||
|
// silencing lockdep.
|
||||||
|
let ptr = {
|
||||||
|
let _rcu = kernel::sync::rcu::Guard::new();
|
||||||
|
ma_state.mas_find_raw(usize::MAX)
|
||||||
|
};
|
||||||
if ptr.is_null() {
|
if ptr.is_null() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -250,9 +250,7 @@ void ima_kexec_post_load(struct kimage *image)
|
|||||||
if (!image->ima_buffer_addr)
|
if (!image->ima_buffer_addr)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ima_kexec_buffer = kimage_map_segment(image,
|
ima_kexec_buffer = kimage_map_segment(image, image->ima_segment_index);
|
||||||
image->ima_buffer_addr,
|
|
||||||
image->ima_buffer_size);
|
|
||||||
if (!ima_kexec_buffer) {
|
if (!ima_kexec_buffer) {
|
||||||
pr_err("Could not map measurements buffer.\n");
|
pr_err("Could not map measurements buffer.\n");
|
||||||
return;
|
return;
|
||||||
|
|||||||
@ -181,7 +181,11 @@ static int compare_ts(const void *p1, const void *p2)
|
|||||||
{
|
{
|
||||||
const struct block_list *l1 = p1, *l2 = p2;
|
const struct block_list *l1 = p1, *l2 = p2;
|
||||||
|
|
||||||
return l1->ts_nsec < l2->ts_nsec ? -1 : 1;
|
if (l1->ts_nsec < l2->ts_nsec)
|
||||||
|
return -1;
|
||||||
|
if (l1->ts_nsec > l2->ts_nsec)
|
||||||
|
return 1;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int compare_cull_condition(const void *p1, const void *p2)
|
static int compare_cull_condition(const void *p1, const void *p2)
|
||||||
|
|||||||
@ -57,6 +57,26 @@ void idr_alloc_test(void)
|
|||||||
idr_destroy(&idr);
|
idr_destroy(&idr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void idr_alloc2_test(void)
|
||||||
|
{
|
||||||
|
int id;
|
||||||
|
struct idr idr = IDR_INIT_BASE(idr, 1);
|
||||||
|
|
||||||
|
id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL);
|
||||||
|
assert(id == -ENOSPC);
|
||||||
|
|
||||||
|
id = idr_alloc(&idr, idr_alloc2_test, 1, 2, GFP_KERNEL);
|
||||||
|
assert(id == 1);
|
||||||
|
|
||||||
|
id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL);
|
||||||
|
assert(id == -ENOSPC);
|
||||||
|
|
||||||
|
id = idr_alloc(&idr, idr_alloc2_test, 0, 2, GFP_KERNEL);
|
||||||
|
assert(id == -ENOSPC);
|
||||||
|
|
||||||
|
idr_destroy(&idr);
|
||||||
|
}
|
||||||
|
|
||||||
void idr_replace_test(void)
|
void idr_replace_test(void)
|
||||||
{
|
{
|
||||||
DEFINE_IDR(idr);
|
DEFINE_IDR(idr);
|
||||||
@ -409,6 +429,7 @@ void idr_checks(void)
|
|||||||
|
|
||||||
idr_replace_test();
|
idr_replace_test();
|
||||||
idr_alloc_test();
|
idr_alloc_test();
|
||||||
|
idr_alloc2_test();
|
||||||
idr_null_test();
|
idr_null_test();
|
||||||
idr_nowait_test();
|
idr_nowait_test();
|
||||||
idr_get_next_test(0);
|
idr_get_next_test(0);
|
||||||
|
|||||||
@ -1317,7 +1317,7 @@ static thread_state thread_state_get(pid_t tid)
|
|||||||
p = strstr(tmp, header);
|
p = strstr(tmp, header);
|
||||||
if (p) {
|
if (p) {
|
||||||
/* For example, "State:\tD (disk sleep)" */
|
/* For example, "State:\tD (disk sleep)" */
|
||||||
c = *(p + sizeof(header) - 1);
|
c = *(p + strlen(header));
|
||||||
return c == 'D' ?
|
return c == 'D' ?
|
||||||
THR_STATE_UNINTERRUPTIBLE : THR_STATE_UNKNOWN;
|
THR_STATE_UNINTERRUPTIBLE : THR_STATE_UNKNOWN;
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user