mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
mm: swap: rename __swap_[entry/entries]_free[_locked] to swap_[entry/entries]_put[_locked]
Patch series "Minor cleanups and improvements to swap freeing code", v4.
This series contains some cleanups and improvements which are made
during learning swapfile. Here is a summary of the changes:
1. Function naming improvments.
- Use "put" instead of "free" to name functions which only do actual
free when count drops to zero.
- Use "entry" to name function only frees one swap slot. Use
"entries" to name function could may free multi swap slots within one
cluster. Use "_nr" suffix to name function which could free multi
swap slots spanning cross multi clusters.
2. Eliminate the need to set swap slot to intermediate SWAP_HAS_CACHE
value before do actual free by using swap_entry_range_free()
3. Add helpers swap_entries_put_map() and swap_entries_put_cache() as
a general-purpose routine to free swap entries within a single cluster
which will try batch-remove first and fallback to put eatch entry
indvidually with cluster lock acquired/released only once. By using
these helpers, we could remove repeated code, levarage batch-remove in
more cases and aoivd to acquire/release cluster lock for each single
swap entry.
This patch (of 8):
In __swap_entry_free[_locked] and __swap_entries_free, we decrease count
first and only free swap entry if count drops to zero. This behavior is
more akin to a put() operation rather than a free() operation. Therefore,
rename these functions with "put" instead of "free". Additionally, add
"_nr" suffix to swap_entries_put to indicate the input range may span swap
clusters.
Link: https://lkml.kernel.org/r/20250325162528.68385-1-shikemeng@huaweicloud.com
Link: https://lkml.kernel.org/r/20250325162528.68385-2-shikemeng@huaweicloud.com
Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com>
Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>
Reviewed-by: Baoquan He <bhe@redhat.com>
Cc: Kairui Song <kasong@tencent.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
ac26920d58
commit
9c1c38bcdc
@ -1355,9 +1355,9 @@ out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
|
||||
unsigned long offset,
|
||||
unsigned char usage)
|
||||
static unsigned char swap_entry_put_locked(struct swap_info_struct *si,
|
||||
unsigned long offset,
|
||||
unsigned char usage)
|
||||
{
|
||||
unsigned char count;
|
||||
unsigned char has_cache;
|
||||
@ -1461,15 +1461,15 @@ put_out:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static unsigned char __swap_entry_free(struct swap_info_struct *si,
|
||||
swp_entry_t entry)
|
||||
static unsigned char swap_entry_put(struct swap_info_struct *si,
|
||||
swp_entry_t entry)
|
||||
{
|
||||
struct swap_cluster_info *ci;
|
||||
unsigned long offset = swp_offset(entry);
|
||||
unsigned char usage;
|
||||
|
||||
ci = lock_cluster(si, offset);
|
||||
usage = __swap_entry_free_locked(si, offset, 1);
|
||||
usage = swap_entry_put_locked(si, offset, 1);
|
||||
if (!usage)
|
||||
swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
|
||||
unlock_cluster(ci);
|
||||
@ -1477,8 +1477,8 @@ static unsigned char __swap_entry_free(struct swap_info_struct *si,
|
||||
return usage;
|
||||
}
|
||||
|
||||
static bool __swap_entries_free(struct swap_info_struct *si,
|
||||
swp_entry_t entry, int nr)
|
||||
static bool swap_entries_put_nr(struct swap_info_struct *si,
|
||||
swp_entry_t entry, int nr)
|
||||
{
|
||||
unsigned long offset = swp_offset(entry);
|
||||
unsigned int type = swp_type(entry);
|
||||
@ -1509,7 +1509,7 @@ static bool __swap_entries_free(struct swap_info_struct *si,
|
||||
fallback:
|
||||
for (i = 0; i < nr; i++) {
|
||||
if (data_race(si->swap_map[offset + i])) {
|
||||
count = __swap_entry_free(si, swp_entry(type, offset + i));
|
||||
count = swap_entry_put(si, swp_entry(type, offset + i));
|
||||
if (count == SWAP_HAS_CACHE)
|
||||
has_cache = true;
|
||||
} else {
|
||||
@ -1560,7 +1560,7 @@ static void cluster_swap_free_nr(struct swap_info_struct *si,
|
||||
|
||||
ci = lock_cluster(si, offset);
|
||||
do {
|
||||
if (!__swap_entry_free_locked(si, offset, usage))
|
||||
if (!swap_entry_put_locked(si, offset, usage))
|
||||
swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
|
||||
} while (++offset < end);
|
||||
unlock_cluster(ci);
|
||||
@ -1607,7 +1607,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry)
|
||||
swap_entry_range_free(si, ci, entry, size);
|
||||
else {
|
||||
for (int i = 0; i < size; i++, entry.val++) {
|
||||
if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE))
|
||||
if (!swap_entry_put_locked(si, offset + i, SWAP_HAS_CACHE))
|
||||
swap_entry_range_free(si, ci, entry, 1);
|
||||
}
|
||||
}
|
||||
@ -1806,7 +1806,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
|
||||
/*
|
||||
* First free all entries in the range.
|
||||
*/
|
||||
any_only_cache = __swap_entries_free(si, entry, nr);
|
||||
any_only_cache = swap_entries_put_nr(si, entry, nr);
|
||||
|
||||
/*
|
||||
* Short-circuit the below loop if none of the entries had their
|
||||
@ -1819,7 +1819,7 @@ void free_swap_and_cache_nr(swp_entry_t entry, int nr)
|
||||
* Now go back over the range trying to reclaim the swap cache. This is
|
||||
* more efficient for large folios because we will only try to reclaim
|
||||
* the swap once per folio in the common case. If we do
|
||||
* __swap_entry_free() and __try_to_reclaim_swap() in the same loop, the
|
||||
* swap_entry_put() and __try_to_reclaim_swap() in the same loop, the
|
||||
* latter will get a reference and lock the folio for every individual
|
||||
* page but will only succeed once the swap slot for every subpage is
|
||||
* zero.
|
||||
@ -3789,7 +3789,7 @@ outer:
|
||||
* into, carry if so, or else fail until a new continuation page is allocated;
|
||||
* when the original swap_map count is decremented from 0 with continuation,
|
||||
* borrow from the continuation and report whether it still holds more.
|
||||
* Called while __swap_duplicate() or caller of __swap_entry_free_locked()
|
||||
* Called while __swap_duplicate() or caller of swap_entry_put_locked()
|
||||
* holds cluster lock.
|
||||
*/
|
||||
static bool swap_count_continued(struct swap_info_struct *si,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user