mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-12 01:20:14 +00:00
btrfs: use rb_entry_safe() where possible to simplify code
Simplify conditionally reading an rb_entry(), there's the rb_entry_safe() helper that checks the node pointer for NULL so we don't have to write it explicitly. Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
c4669e4a8b
commit
6aa79c4f25
@ -191,10 +191,7 @@ static struct inode_defrag *btrfs_pick_defrag_inode(
|
||||
|
||||
if (parent && compare_inode_defrag(&tmp, entry) > 0) {
|
||||
parent = rb_next(parent);
|
||||
if (parent)
|
||||
entry = rb_entry(parent, struct inode_defrag, rb_node);
|
||||
else
|
||||
entry = NULL;
|
||||
entry = rb_entry_safe(parent, struct inode_defrag, rb_node);
|
||||
}
|
||||
out:
|
||||
if (entry)
|
||||
|
||||
@ -454,40 +454,25 @@ static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
|
||||
static struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
|
||||
struct btrfs_delayed_node *delayed_node)
|
||||
{
|
||||
struct rb_node *p;
|
||||
struct btrfs_delayed_item *item = NULL;
|
||||
struct rb_node *p = rb_first_cached(&delayed_node->ins_root);
|
||||
|
||||
p = rb_first_cached(&delayed_node->ins_root);
|
||||
if (p)
|
||||
item = rb_entry(p, struct btrfs_delayed_item, rb_node);
|
||||
|
||||
return item;
|
||||
return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
|
||||
}
|
||||
|
||||
static struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
|
||||
struct btrfs_delayed_node *delayed_node)
|
||||
{
|
||||
struct rb_node *p;
|
||||
struct btrfs_delayed_item *item = NULL;
|
||||
struct rb_node *p = rb_first_cached(&delayed_node->del_root);
|
||||
|
||||
p = rb_first_cached(&delayed_node->del_root);
|
||||
if (p)
|
||||
item = rb_entry(p, struct btrfs_delayed_item, rb_node);
|
||||
|
||||
return item;
|
||||
return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
|
||||
}
|
||||
|
||||
static struct btrfs_delayed_item *__btrfs_next_delayed_item(
|
||||
struct btrfs_delayed_item *item)
|
||||
{
|
||||
struct rb_node *p;
|
||||
struct btrfs_delayed_item *next = NULL;
|
||||
struct rb_node *p = rb_next(&item->rb_node);
|
||||
|
||||
p = rb_next(&item->rb_node);
|
||||
if (p)
|
||||
next = rb_entry(p, struct btrfs_delayed_item, rb_node);
|
||||
|
||||
return next;
|
||||
return rb_entry_safe(p, struct btrfs_delayed_item, rb_node);
|
||||
}
|
||||
|
||||
static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
|
||||
|
||||
@ -331,12 +331,9 @@ static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
|
||||
struct btrfs_delayed_ref_node *ins)
|
||||
{
|
||||
struct rb_node *node = &ins->ref_node;
|
||||
struct rb_node *exist;
|
||||
struct rb_node *exist = rb_find_add_cached(node, root, cmp_refs_node);
|
||||
|
||||
exist = rb_find_add_cached(node, root, cmp_refs_node);
|
||||
if (exist)
|
||||
return rb_entry(exist, struct btrfs_delayed_ref_node, ref_node);
|
||||
return NULL;
|
||||
return rb_entry_safe(exist, struct btrfs_delayed_ref_node, ref_node);
|
||||
}
|
||||
|
||||
static struct btrfs_delayed_ref_head *find_first_ref_head(
|
||||
|
||||
@ -222,20 +222,14 @@ static inline struct extent_state *next_state(struct extent_state *state)
|
||||
{
|
||||
struct rb_node *next = rb_next(&state->rb_node);
|
||||
|
||||
if (next)
|
||||
return rb_entry(next, struct extent_state, rb_node);
|
||||
else
|
||||
return NULL;
|
||||
return rb_entry_safe(next, struct extent_state, rb_node);
|
||||
}
|
||||
|
||||
static inline struct extent_state *prev_state(struct extent_state *state)
|
||||
{
|
||||
struct rb_node *next = rb_prev(&state->rb_node);
|
||||
|
||||
if (next)
|
||||
return rb_entry(next, struct extent_state, rb_node);
|
||||
else
|
||||
return NULL;
|
||||
return rb_entry_safe(next, struct extent_state, rb_node);
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@ -361,8 +361,8 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
|
||||
|
||||
if (em->start != 0) {
|
||||
rb = rb_prev(&em->rb_node);
|
||||
if (rb)
|
||||
merge = rb_entry(rb, struct extent_map, rb_node);
|
||||
merge = rb_entry_safe(rb, struct extent_map, rb_node);
|
||||
|
||||
if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) {
|
||||
em->start = merge->start;
|
||||
em->len += merge->len;
|
||||
@ -379,8 +379,8 @@ static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em)
|
||||
}
|
||||
|
||||
rb = rb_next(&em->rb_node);
|
||||
if (rb)
|
||||
merge = rb_entry(rb, struct extent_map, rb_node);
|
||||
merge = rb_entry_safe(rb, struct extent_map, rb_node);
|
||||
|
||||
if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) {
|
||||
em->len += merge->len;
|
||||
if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user