]> git.baikalelectronics.ru Git - kernel.git/commitdiff
dm: refactor dm_md_mempool allocation
authorChristoph Hellwig <hch@lst.de>
Wed, 8 Jun 2022 06:34:09 +0000 (08:34 +0200)
committerMike Snitzer <snitzer@kernel.org>
Wed, 29 Jun 2022 16:46:06 +0000 (12:46 -0400)
The current split between dm_table_alloc_md_mempools and
dm_alloc_md_mempools is rather arbitrary, so merge the two
into one easy to follow function.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
drivers/md/dm-core.h
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/dm.h

index c954ff91870e78c1f538917a4ee5018b627e9800..5d9afca0d10584bc4945dafd4f2d986694c7b6d1 100644 (file)
@@ -230,6 +230,9 @@ struct dm_target_io {
        sector_t old_sector;
        struct bio clone;
 };
+#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
+#define DM_IO_BIO_OFFSET \
+       (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
 
 /*
  * dm_target_io flags
index bd539afbfe88f729baa10d6b0af5276fb412b10b..3f29b1113294e80a8ce5ad1f6e6276f62e9a2e5c 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include "dm-core.h"
+#include "dm-rq.h"
 
 #include <linux/module.h>
 #include <linux/vmalloc.h>
@@ -1010,32 +1011,56 @@ static bool dm_table_supports_poll(struct dm_table *t);
 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
 {
        enum dm_queue_mode type = dm_table_get_type(t);
-       unsigned per_io_data_size = 0;
-       unsigned min_pool_size = 0;
-       struct dm_target *ti;
-       unsigned i;
-       bool poll_supported = false;
+       unsigned int per_io_data_size = 0, front_pad, io_front_pad;
+       unsigned int min_pool_size = 0, pool_size;
+       struct dm_md_mempools *pools;
 
        if (unlikely(type == DM_TYPE_NONE)) {
                DMWARN("no table type is set, can't allocate mempools");
                return -EINVAL;
        }
 
-       if (__table_type_bio_based(type)) {
-               for (i = 0; i < t->num_targets; i++) {
-                       ti = t->targets + i;
-                       per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
-                       min_pool_size = max(min_pool_size, ti->num_flush_bios);
-               }
-               poll_supported = dm_table_supports_poll(t);
+       pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
+       if (!pools)
+               return -ENOMEM;
+
+       if (type == DM_TYPE_REQUEST_BASED) {
+               pool_size = dm_get_reserved_rq_based_ios();
+               front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
+               goto init_bs;
        }
 
-       t->mempools = dm_alloc_md_mempools(md, type, per_io_data_size, min_pool_size,
-                                          t->integrity_supported, poll_supported);
-       if (!t->mempools)
-               return -ENOMEM;
+       for (unsigned int i = 0; i < t->num_targets; i++) {
+               struct dm_target *ti = t->targets + i;
 
+               per_io_data_size = max(per_io_data_size, ti->per_io_data_size);
+               min_pool_size = max(min_pool_size, ti->num_flush_bios);
+       }
+       pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
+       front_pad = roundup(per_io_data_size,
+               __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
+
+       io_front_pad = roundup(per_io_data_size,
+               __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
+       if (bioset_init(&pools->io_bs, pool_size, io_front_pad,
+                       dm_table_supports_poll(t) ? BIOSET_PERCPU_CACHE : 0))
+               goto out_free_pools;
+       if (t->integrity_supported &&
+           bioset_integrity_create(&pools->io_bs, pool_size))
+               goto out_free_pools;
+init_bs:
+       if (bioset_init(&pools->bs, pool_size, front_pad, 0))
+               goto out_free_pools;
+       if (t->integrity_supported &&
+           bioset_integrity_create(&pools->bs, pool_size))
+               goto out_free_pools;
+
+       t->mempools = pools;
        return 0;
+
+out_free_pools:
+       dm_free_md_mempools(pools);
+       return -ENOMEM;
 }
 
 static int setup_indexes(struct dm_table *t)
index 8872f9c6368895116917948240a7ffd9c52749b3..84929bd137d0c3414ac4e1c71355585fd05878b2 100644 (file)
@@ -88,10 +88,6 @@ struct clone_info {
        bool submit_as_polled:1;
 };
 
-#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
-#define DM_IO_BIO_OFFSET \
-       (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
-
 static inline struct dm_target_io *clone_to_tio(struct bio *clone)
 {
        return container_of(clone, struct dm_target_io, clone);
@@ -2978,54 +2974,6 @@ int dm_noflush_suspending(struct dm_target *ti)
 }
 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
 
-struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
-                                           unsigned per_io_data_size, unsigned min_pool_size,
-                                           bool integrity, bool poll)
-{
-       struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id);
-       unsigned int pool_size = 0;
-       unsigned int front_pad, io_front_pad;
-       int ret;
-
-       if (!pools)
-               return NULL;
-
-       switch (type) {
-       case DM_TYPE_BIO_BASED:
-       case DM_TYPE_DAX_BIO_BASED:
-               pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
-               front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET;
-               io_front_pad = roundup(per_io_data_size,  __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET;
-               ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, poll ? BIOSET_PERCPU_CACHE : 0);
-               if (ret)
-                       goto out;
-               if (integrity && bioset_integrity_create(&pools->io_bs, pool_size))
-                       goto out;
-               break;
-       case DM_TYPE_REQUEST_BASED:
-               pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
-               front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
-               /* per_io_data_size is used for blk-mq pdu at queue allocation */
-               break;
-       default:
-               BUG();
-       }
-
-       ret = bioset_init(&pools->bs, pool_size, front_pad, 0);
-       if (ret)
-               goto out;
-
-       if (integrity && bioset_integrity_create(&pools->bs, pool_size))
-               goto out;
-
-       return pools;
-
-out:
-       dm_free_md_mempools(pools);
-
-       return NULL;
-}
-
 void dm_free_md_mempools(struct dm_md_mempools *pools)
 {
        if (!pools)
index a8405ce305a968977d68d03e3820b5d3238ef5d5..62816b647f8279d2cfd6c2c34c2db3d3d832a35f 100644 (file)
@@ -218,9 +218,6 @@ void dm_kcopyd_exit(void);
 /*
  * Mempool operations
  */
-struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
-                                           unsigned per_io_data_size, unsigned min_pool_size,
-                                           bool integrity, bool poll);
 void dm_free_md_mempools(struct dm_md_mempools *pools);
 
 /*