1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

mm, swap: remove fragment clusters counter

It was used for calculating the iteration number when the swap allocator
wants to scan the whole fragment list.  Now the allocator only scans one
fragment cluster at a time, so no one uses this counter anymore.

Remove it as a cleanup; the performance change is marginal:

Build linux kernel using 10G ZRAM, make -j96, defconfig with 2G cgroup
memory limit, on top of tmpfs, 64kB mTHP enabled:

Before:  sys time: 6278.45s
After:   sys time: 6176.34s

Change to 8G ZRAM:

Before:  sys time: 5572.85s
After:   sys time: 5531.49s

Link: https://lkml.kernel.org/r/20250806161748.76651-3-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Acked-by: Chris Li <chrisl@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Barry Song <baohua@kernel.org>
Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
Cc: Kemeng Shi <shikemeng@huaweicloud.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kairui Song 2025-08-07 00:17:47 +08:00 committed by Andrew Morton
parent b25786b4a9
commit 913fff3145
2 changed files with 0 additions and 8 deletions

View File

@ -310,7 +310,6 @@ struct swap_info_struct {
/* list of cluster that contains at least one free slot */
struct list_head frag_clusters[SWAP_NR_ORDERS];
/* list of cluster that are fragmented or contented */
atomic_long_t frag_cluster_nr[SWAP_NR_ORDERS];
unsigned int pages; /* total of usable pages of swap */
atomic_long_t inuse_pages; /* number of those currently in use */
struct swap_sequential_cluster *global_cluster; /* Use one global cluster for rotating device */

View File

@ -470,11 +470,6 @@ static void move_cluster(struct swap_info_struct *si,
else
list_move_tail(&ci->list, list);
spin_unlock(&si->lock);
if (ci->flags == CLUSTER_FLAG_FRAG)
atomic_long_dec(&si->frag_cluster_nr[ci->order]);
else if (new_flags == CLUSTER_FLAG_FRAG)
atomic_long_inc(&si->frag_cluster_nr[ci->order]);
ci->flags = new_flags;
}
@ -965,7 +960,6 @@ new_cluster:
* allocation, but reclaim may drop si->lock and race with another user.
*/
while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) {
atomic_long_dec(&si->frag_cluster_nr[o]);
found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
0, usage);
if (found)
@ -3217,7 +3211,6 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
for (i = 0; i < SWAP_NR_ORDERS; i++) {
INIT_LIST_HEAD(&si->nonfull_clusters[i]);
INIT_LIST_HEAD(&si->frag_clusters[i]);
atomic_long_set(&si->frag_cluster_nr[i], 0);
}
/*