1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

iomap: use find_next_bit() for dirty bitmap scanning

Use find_next_bit()/find_next_zero_bit() for iomap dirty bitmap
scanning. This uses __ffs() internally and is more efficient for
finding the next dirty or clean bit than iterating through the bitmap
range testing every bit.

Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Link: https://patch.msgid.link/20251111193658.3495942-9-joannelkoong@gmail.com
Reviewed-by: Darrick J. Wong <djwong@kernel.org>
Suggested-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
Joanne Koong 2025-11-11 11:36:57 -08:00 committed by Christian Brauner
parent a298febc47
commit fed9c62d28
No known key found for this signature in database
GPG Key ID: 91C61BC06578DCA2

View File

@ -76,13 +76,34 @@ static void iomap_set_range_uptodate(struct folio *folio, size_t off,
folio_mark_uptodate(folio);
}
static inline bool ifs_block_is_dirty(struct folio *folio,
struct iomap_folio_state *ifs, int block)
/*
* Find the next dirty block in the folio. end_blk is inclusive.
* If no dirty block is found, this will return end_blk + 1.
*/
static unsigned ifs_next_dirty_block(struct folio *folio,
unsigned start_blk, unsigned end_blk)
{
struct iomap_folio_state *ifs = folio->private;
struct inode *inode = folio->mapping->host;
unsigned int blks_per_folio = i_blocks_per_folio(inode, folio);
unsigned int blks = i_blocks_per_folio(inode, folio);
return test_bit(block + blks_per_folio, ifs->state);
return find_next_bit(ifs->state, blks + end_blk + 1,
blks + start_blk) - blks;
}
/*
* Find the next clean block in the folio. end_blk is inclusive.
* If no clean block is found, this will return end_blk + 1.
*/
static unsigned ifs_next_clean_block(struct folio *folio,
unsigned start_blk, unsigned end_blk)
{
struct iomap_folio_state *ifs = folio->private;
struct inode *inode = folio->mapping->host;
unsigned int blks = i_blocks_per_folio(inode, folio);
return find_next_zero_bit(ifs->state, blks + end_blk + 1,
blks + start_blk) - blks;
}
static unsigned ifs_find_dirty_range(struct folio *folio,
@ -93,18 +114,17 @@ static unsigned ifs_find_dirty_range(struct folio *folio,
offset_in_folio(folio, *range_start) >> inode->i_blkbits;
unsigned end_blk = min_not_zero(
offset_in_folio(folio, range_end) >> inode->i_blkbits,
i_blocks_per_folio(inode, folio));
unsigned nblks = 1;
i_blocks_per_folio(inode, folio)) - 1;
unsigned nblks;
while (!ifs_block_is_dirty(folio, ifs, start_blk))
if (++start_blk == end_blk)
return 0;
while (start_blk + nblks < end_blk) {
if (!ifs_block_is_dirty(folio, ifs, start_blk + nblks))
break;
nblks++;
}
start_blk = ifs_next_dirty_block(folio, start_blk, end_blk);
if (start_blk > end_blk)
return 0;
if (start_blk == end_blk)
nblks = 1;
else
nblks = ifs_next_clean_block(folio, start_blk + 1, end_blk) -
start_blk;
*range_start = folio_pos(folio) + (start_blk << inode->i_blkbits);
return nblks << inode->i_blkbits;
@ -1166,7 +1186,7 @@ static void iomap_write_delalloc_ifs_punch(struct inode *inode,
struct folio *folio, loff_t start_byte, loff_t end_byte,
struct iomap *iomap, iomap_punch_t punch)
{
unsigned int first_blk, last_blk, i;
unsigned int first_blk, last_blk;
loff_t last_byte;
u8 blkbits = inode->i_blkbits;
struct iomap_folio_state *ifs;
@ -1185,10 +1205,11 @@ static void iomap_write_delalloc_ifs_punch(struct inode *inode,
folio_pos(folio) + folio_size(folio) - 1);
first_blk = offset_in_folio(folio, start_byte) >> blkbits;
last_blk = offset_in_folio(folio, last_byte) >> blkbits;
for (i = first_blk; i <= last_blk; i++) {
if (!ifs_block_is_dirty(folio, ifs, i))
punch(inode, folio_pos(folio) + (i << blkbits),
1 << blkbits, iomap);
while ((first_blk = ifs_next_clean_block(folio, first_blk, last_blk))
<= last_blk) {
punch(inode, folio_pos(folio) + (first_blk << blkbits),
1 << blkbits, iomap);
first_blk++;
}
}