1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

mm: swap: use swap_entries_free() to free swap entry in swap_entry_put_locked()

In swap_entry_put_locked(), we will set slot to SWAP_HAS_CACHE before
using swap_entries_free() to do actual swap entry freeing.  This introduce
an unnecessary intermediate state.  By using swap_entries_free() in
swap_entry_put_locked(), we can eliminate the need to set slot to
SWAP_HAS_CACHE.  This change would make the behavior of
swap_entry_put_locked() more consistent with other put() operations which
will do actual free work after put last reference.

Link: https://lkml.kernel.org/r/20250325162528.68385-4-shikemeng@huaweicloud.com
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Kairui Song <kasong@tencent.com>
Reviewed-by: Baoquan He <bhe@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kemeng Shi 2025-03-26 00:25:23 +08:00 committed by Andrew Morton
parent 64944ef6a1
commit 835b868878

View File

@ -1356,9 +1356,11 @@ out:
}
static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
unsigned long offset,
struct swap_cluster_info *ci,
swp_entry_t entry,
unsigned char usage)
{
unsigned long offset = swp_offset(entry);
unsigned char count;
unsigned char has_cache;
@ -1390,7 +1392,7 @@ static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
if (usage)
WRITE_ONCE(si->swap_map[offset], usage);
else
WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
swap_entries_free(si, ci, entry, 1);
return usage;
}
@ -1469,9 +1471,7 @@ static unsigned char swap_entry_put(struct swap_info_struct *si,
unsigned char usage;
ci = lock_cluster(si, offset);
usage = swap_entry_put_locked(si, offset, 1);
if (!usage)
swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
usage = swap_entry_put_locked(si, ci, entry, 1);
unlock_cluster(ci);
return usage;
@ -1570,8 +1570,8 @@ static void cluster_swap_free_nr(struct swap_info_struct *si,
ci = lock_cluster(si, offset);
do {
if (!swap_entry_put_locked(si, offset, usage))
swap_entries_free(si, ci, swp_entry(si->type, offset), 1);
swap_entry_put_locked(si, ci, swp_entry(si->type, offset),
usage);
} while (++offset < end);
unlock_cluster(ci);
}
@ -1616,10 +1616,8 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
if (swap_only_has_cache(si, offset, size))
swap_entries_free(si, ci, entry, size);
else {
for (int i = 0; i < size; i++, entry.val++) {
if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
swap_entries_free(si, ci, entry, 1);
}
for (int i = 0; i < size; i++, entry.val++)
swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE);
}
unlock_cluster(ci);
}