From ed2b1d36a9d027f9b841be5bfc9d61011462d447 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 24 Sep 2019 19:17:17 +0200 Subject: [PATCH] btrfs: move btrfs_set_path_blocking to other locking functions The function belongs to the family of locking functions, so move it there. The 'noinline' keyword is dropped as it's now an exported function that does not need it. Reviewed-by: Josef Bacik Signed-off-by: David Sterba --- fs/btrfs/ctree.c | 25 ------------------------- fs/btrfs/locking.c | 26 ++++++++++++++++++++++++++ fs/btrfs/locking.h | 2 ++ 3 files changed, 28 insertions(+), 25 deletions(-) diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 0231141de289f..a55d55e5c913e 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -56,31 +56,6 @@ struct btrfs_path *btrfs_alloc_path(void) return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); } -/* - * set all locked nodes in the path to blocking locks. This should - * be done before scheduling - */ -noinline void btrfs_set_path_blocking(struct btrfs_path *p) -{ - int i; - for (i = 0; i < BTRFS_MAX_LEVEL; i++) { - if (!p->nodes[i] || !p->locks[i]) - continue; - /* - * If we currently have a spinning reader or writer lock this - * will bump the count of blocking holders and drop the - * spinlock. - */ - if (p->locks[i] == BTRFS_READ_LOCK) { - btrfs_set_lock_blocking_read(p->nodes[i]); - p->locks[i] = BTRFS_READ_LOCK_BLOCKING; - } else if (p->locks[i] == BTRFS_WRITE_LOCK) { - btrfs_set_lock_blocking_write(p->nodes[i]); - p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; - } - } -} - /* this also releases the path */ void btrfs_free_path(struct btrfs_path *p) { diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 028513153ac4e..f58606887859b 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c @@ -316,3 +316,29 @@ void btrfs_tree_unlock(struct extent_buffer *eb) write_unlock(&eb->lock); } } + +/* + * Set all locked nodes in the path to blocking locks. This should be done + * before scheduling + */ +void btrfs_set_path_blocking(struct btrfs_path *p) +{ + int i; + + for (i = 0; i < BTRFS_MAX_LEVEL; i++) { + if (!p->nodes[i] || !p->locks[i]) + continue; + /* + * If we currently have a spinning reader or writer lock this + * will bump the count of blocking holders and drop the + * spinlock. + */ + if (p->locks[i] == BTRFS_READ_LOCK) { + btrfs_set_lock_blocking_read(p->nodes[i]); + p->locks[i] = BTRFS_READ_LOCK_BLOCKING; + } else if (p->locks[i] == BTRFS_WRITE_LOCK) { + btrfs_set_lock_blocking_write(p->nodes[i]); + p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING; + } + } +} diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index ab4020de25e70..98c92222eaf06 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h @@ -33,6 +33,8 @@ static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { } #endif +void btrfs_set_path_blocking(struct btrfs_path *p); + static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) { if (rw == BTRFS_WRITE_LOCK || rw == BTRFS_WRITE_LOCK_BLOCKING) -- 2.39.5