mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
mm: split out a writeout helper from pageout
Patch series "stop passing a writeback_control to swap/shmem writeout", v3. This series was intended to remove the last remaining users of AOP_WRITEPAGE_ACTIVATE after my other pending patches removed the rest, but spectacularly failed at that. But instead it nicely improves the code, and removes two pointers from struct writeback_control. This patch (of 6): Move the code to write back swap / shmem folios into a self-contained helper to keep prepare for refactoring it. Link: https://lkml.kernel.org/r/20250610054959.2057526-1-hch@lst.de Link: https://lkml.kernel.org/r/20250610054959.2057526-2-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Baolin Wang <baolin.wang@linux.aibaba.com> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Nhat Pham <nphamcs@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
453742ba5b
commit
86c4a94643
94
mm/vmscan.c
94
mm/vmscan.c
@ -652,14 +652,55 @@ typedef enum {
|
||||
PAGE_CLEAN,
|
||||
} pageout_t;
|
||||
|
||||
static pageout_t writeout(struct folio *folio, struct address_space *mapping,
|
||||
struct swap_iocb **plug, struct list_head *folio_list)
|
||||
{
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.nr_to_write = SWAP_CLUSTER_MAX,
|
||||
.range_start = 0,
|
||||
.range_end = LLONG_MAX,
|
||||
.for_reclaim = 1,
|
||||
.swap_plug = plug,
|
||||
};
|
||||
int res;
|
||||
|
||||
folio_set_reclaim(folio);
|
||||
|
||||
/*
|
||||
* The large shmem folio can be split if CONFIG_THP_SWAP is not enabled
|
||||
* or we failed to allocate contiguous swap entries.
|
||||
*/
|
||||
if (shmem_mapping(mapping)) {
|
||||
if (folio_test_large(folio))
|
||||
wbc.list = folio_list;
|
||||
res = shmem_writeout(folio, &wbc);
|
||||
} else {
|
||||
res = swap_writeout(folio, &wbc);
|
||||
}
|
||||
|
||||
if (res < 0)
|
||||
handle_write_error(mapping, folio, res);
|
||||
if (res == AOP_WRITEPAGE_ACTIVATE) {
|
||||
folio_clear_reclaim(folio);
|
||||
return PAGE_ACTIVATE;
|
||||
}
|
||||
|
||||
/* synchronous write? */
|
||||
if (!folio_test_writeback(folio))
|
||||
folio_clear_reclaim(folio);
|
||||
|
||||
trace_mm_vmscan_write_folio(folio);
|
||||
node_stat_add_folio(folio, NR_VMSCAN_WRITE);
|
||||
return PAGE_SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
* pageout is called by shrink_folio_list() for each dirty folio.
|
||||
*/
|
||||
static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
||||
struct swap_iocb **plug, struct list_head *folio_list)
|
||||
{
|
||||
int (*writeout)(struct folio *, struct writeback_control *);
|
||||
|
||||
/*
|
||||
* We no longer attempt to writeback filesystem folios here, other
|
||||
* than tmpfs/shmem. That's taken care of in page-writeback.
|
||||
@ -690,51 +731,12 @@ static pageout_t pageout(struct folio *folio, struct address_space *mapping,
|
||||
}
|
||||
return PAGE_KEEP;
|
||||
}
|
||||
if (shmem_mapping(mapping))
|
||||
writeout = shmem_writeout;
|
||||
else if (folio_test_anon(folio))
|
||||
writeout = swap_writeout;
|
||||
else
|
||||
|
||||
if (!shmem_mapping(mapping) && !folio_test_anon(folio))
|
||||
return PAGE_ACTIVATE;
|
||||
|
||||
if (folio_clear_dirty_for_io(folio)) {
|
||||
int res;
|
||||
struct writeback_control wbc = {
|
||||
.sync_mode = WB_SYNC_NONE,
|
||||
.nr_to_write = SWAP_CLUSTER_MAX,
|
||||
.range_start = 0,
|
||||
.range_end = LLONG_MAX,
|
||||
.for_reclaim = 1,
|
||||
.swap_plug = plug,
|
||||
};
|
||||
|
||||
/*
|
||||
* The large shmem folio can be split if CONFIG_THP_SWAP is
|
||||
* not enabled or contiguous swap entries are failed to
|
||||
* allocate.
|
||||
*/
|
||||
if (shmem_mapping(mapping) && folio_test_large(folio))
|
||||
wbc.list = folio_list;
|
||||
|
||||
folio_set_reclaim(folio);
|
||||
res = writeout(folio, &wbc);
|
||||
if (res < 0)
|
||||
handle_write_error(mapping, folio, res);
|
||||
if (res == AOP_WRITEPAGE_ACTIVATE) {
|
||||
folio_clear_reclaim(folio);
|
||||
return PAGE_ACTIVATE;
|
||||
}
|
||||
|
||||
if (!folio_test_writeback(folio)) {
|
||||
/* synchronous write? */
|
||||
folio_clear_reclaim(folio);
|
||||
}
|
||||
trace_mm_vmscan_write_folio(folio);
|
||||
node_stat_add_folio(folio, NR_VMSCAN_WRITE);
|
||||
return PAGE_SUCCESS;
|
||||
}
|
||||
|
||||
return PAGE_CLEAN;
|
||||
if (!folio_clear_dirty_for_io(folio))
|
||||
return PAGE_CLEAN;
|
||||
return writeout(folio, mapping, plug, folio_list);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user