]> git.baikalelectronics.ru Git - kernel.git/commitdiff
md/bcache: Combine two uuid_io() arguments
authorBart Van Assche <bvanassche@acm.org>
Thu, 14 Jul 2022 18:06:58 +0000 (11:06 -0700)
committerJens Axboe <axboe@kernel.dk>
Thu, 14 Jul 2022 18:14:31 +0000 (12:14 -0600)
Improve uniformity in the kernel of handling of request operation and
flags by passing these as a single argument.

Cc: Coly Li <colyli@suse.de>
Cc: Mingzhe Zou <mingzhe.zou@easystack.cn>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Link: https://lore.kernel.org/r/20220714180729.1065367-33-bvanassche@acm.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/md/bcache/super.c

index 9dd752d272f6083b5a74b62f455b26965f0c3a2c..a2f61a2225d27cf1c905aae41f2ce6411faea336 100644 (file)
@@ -414,8 +414,8 @@ static void uuid_io_unlock(struct closure *cl)
        up(&c->uuid_write_mutex);
 }
 
-static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
-                   struct bkey *k, struct closure *parent)
+static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k,
+                   struct closure *parent)
 {
        struct closure *cl = &c->uuid_write;
        struct uuid_entry *u;
@@ -429,22 +429,22 @@ static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
        for (i = 0; i < KEY_PTRS(k); i++) {
                struct bio *bio = bch_bbio_alloc(c);
 
-               bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
+               bio->bi_opf = opf | REQ_SYNC | REQ_META;
                bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
 
                bio->bi_end_io  = uuid_endio;
                bio->bi_private = cl;
-               bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
                bch_bio_map(bio, c->uuids);
 
                bch_submit_bbio(bio, c, k, i);
 
-               if (op != REQ_OP_WRITE)
+               if ((opf & REQ_OP_MASK) != REQ_OP_WRITE)
                        break;
        }
 
        bch_extent_to_text(buf, sizeof(buf), k);
-       pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf);
+       pr_debug("%s UUIDs at %s\n", (opf & REQ_OP_MASK) == REQ_OP_WRITE ?
+                "wrote" : "read", buf);
 
        for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
                if (!bch_is_zero(u->uuid, 16))
@@ -463,7 +463,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
                return "bad uuid pointer";
 
        bkey_copy(&c->uuid_bucket, k);
-       uuid_io(c, REQ_OP_READ, 0, k, cl);
+       uuid_io(c, REQ_OP_READ, k, cl);
 
        if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
                struct uuid_entry_v0    *u0 = (void *) c->uuids;
@@ -511,7 +511,7 @@ static int __uuid_write(struct cache_set *c)
 
        size =  meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
        SET_KEY_SIZE(&k.key, size);
-       uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
+       uuid_io(c, REQ_OP_WRITE, &k.key, &cl);
        closure_sync(&cl);
 
        /* Only one bucket used for uuid write */