1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-12 01:20:14 +00:00

mm/huge_memory: optimize and simplify folio stat update after split

The loop executed after a successful folio split currently has two
combined responsibilities:

  * updating statistics for the new folios
  * determining the folio for the next split iteration.

This commit refactors the logic to directly calculate and update folio
statistics, eliminating the need for the iteration step.

We can do this because all necessary information is already available:

  * All resulting new folios have the same order, which is @split_order.
  * The exact number of new folios can be calculated directly using
    @old_order and @split_order.
  * The folio for the subsequent split is simply the one containing
    @split_at.

By leveraging this knowledge, we can achieve the stat update more cleanly
and efficiently without the looping logic.

Link: https://lkml.kernel.org/r/20251021212142.25766-4-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: wang lian <lianux.mm@gmail.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <baohua@kernel.org>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Mariano Pache <npache@redhat.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Wei Yang 2025-10-21 21:21:41 +00:00 committed by Andrew Morton
parent 092ef38997
commit fc4f15ee0b

View File

@ -3404,7 +3404,6 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
const bool is_anon = folio_test_anon(folio);
int order = folio_order(folio);
int start_order = uniform_split ? new_order : order - 1;
struct folio *next;
int split_order;
/*
@ -3414,9 +3413,8 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
for (split_order = start_order;
split_order >= new_order;
split_order--) {
struct folio *end_folio = folio_next(folio);
int old_order = folio_order(folio);
struct folio *new_folio;
int nr_new_folios = 1UL << (old_order - split_order);
/* order-1 anonymous folio is not supported */
if (is_anon && split_order == 1)
@ -3445,19 +3443,11 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
pgalloc_tag_split(folio, old_order, split_order);
__split_folio_to_order(folio, old_order, split_order);
if (is_anon)
if (is_anon) {
mod_mthp_stat(old_order, MTHP_STAT_NR_ANON, -1);
/*
* Iterate through after-split folios and update folio stats.
*/
for (new_folio = folio; new_folio != end_folio; new_folio = next) {
next = folio_next(new_folio);
if (new_folio == page_folio(split_at))
folio = new_folio;
if (is_anon)
mod_mthp_stat(folio_order(new_folio),
MTHP_STAT_NR_ANON, 1);
mod_mthp_stat(split_order, MTHP_STAT_NR_ANON, nr_new_folios);
}
folio = page_folio(split_at);
}
return 0;