]> git.baikalelectronics.ru Git - kernel.git/commitdiff
dm thin: replace dm_cell_release_singleton with cell_defer_except
authorJoe Thornber <ejt@redhat.com>
Fri, 21 Dec 2012 20:23:31 +0000 (20:23 +0000)
committerAlasdair G Kergon <agk@redhat.com>
Fri, 21 Dec 2012 20:23:31 +0000 (20:23 +0000)
Change existing users of the function dm_cell_release_singleton to share
cell_defer_except instead, and then remove the now-unused function.

Everywhere that calls dm_cell_release_singleton, the bio in question
is the holder of the cell.

If there are no non-holder entries in the cell then cell_defer_except
behaves exactly like dm_cell_release_singleton.  Conversely, if there
*are* non-holder entries then dm_cell_release_singleton must not be used
because those entries would need to be deferred.

Consequently, it is safe to replace use of dm_cell_release_singleton
with cell_defer_except.

This patch is a pre-requisite for "dm thin: fix race between
simultaneous io and discards to same block".

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
drivers/md/dm-bio-prison.c
drivers/md/dm-bio-prison.h
drivers/md/dm-thin.c

index e4e84156745972b6b910901ed7d4f6e5bceeb749..aefb78e3cbf9beda9a1bfe3170123a22d02ccfbb 100644 (file)
@@ -207,31 +207,6 @@ void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
 }
 EXPORT_SYMBOL_GPL(dm_cell_release);
 
-/*
- * There are a couple of places where we put a bio into a cell briefly
- * before taking it out again.  In these situations we know that no other
- * bio may be in the cell.  This function releases the cell, and also does
- * a sanity check.
- */
-static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
-       BUG_ON(cell->holder != bio);
-       BUG_ON(!bio_list_empty(&cell->bios));
-
-       __cell_release(cell, NULL);
-}
-
-void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
-{
-       unsigned long flags;
-       struct dm_bio_prison *prison = cell->prison;
-
-       spin_lock_irqsave(&prison->lock, flags);
-       __cell_release_singleton(cell, bio);
-       spin_unlock_irqrestore(&prison->lock, flags);
-}
-EXPORT_SYMBOL_GPL(dm_cell_release_singleton);
-
 /*
  * Sometimes we don't want the holder, just the additional bios.
  */
index 4e0ac376700ab7c7384f2f8aeceed3d4e6f107b5..53d1a7a84e2fec12da0bccfa23e0a86fb01fcbfb 100644 (file)
@@ -44,7 +44,6 @@ int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
                  struct bio *inmate, struct dm_bio_prison_cell **ref);
 
 void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
-void dm_cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio); // FIXME: bio arg not needed
 void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
 void dm_cell_error(struct dm_bio_prison_cell *cell);
 
index 058acf3a5ba7aaa43ba58e0eb7dd1acfd1f6399b..25dfd2311a61259422c04454fc882f70bd37ecd2 100644 (file)
@@ -513,8 +513,7 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
 }
 
 /*
- * Same as cell_defer above, except it omits one particular detainee,
- * a write bio that covers the block and has already been processed.
+ * Same as cell_defer except it omits the original holder of the cell.
  */
 static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
 {
@@ -936,7 +935,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
                 */
                build_data_key(tc->td, lookup_result.block, &key2);
                if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
-                       dm_cell_release_singleton(cell, bio);
+                       cell_defer_except(tc, cell);
                        break;
                }
 
@@ -967,8 +966,8 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
                         * a block boundary.  So we submit the discard of a
                         * partial block appropriately.
                         */
-                       dm_cell_release_singleton(cell, bio);
-                       dm_cell_release_singleton(cell2, bio);
+                       cell_defer_except(tc, cell);
+                       cell_defer_except(tc, cell2);
                        if ((!lookup_result.shared) && pool->pf.discard_passdown)
                                remap_and_issue(tc, bio, lookup_result.block);
                        else
@@ -980,13 +979,13 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
                /*
                 * It isn't provisioned, just forget it.
                 */
-               dm_cell_release_singleton(cell, bio);
+               cell_defer_except(tc, cell);
                bio_endio(bio, 0);
                break;
 
        default:
                DMERR("discard: find block unexpectedly returned %d", r);
-               dm_cell_release_singleton(cell, bio);
+               cell_defer_except(tc, cell);
                bio_io_error(bio);
                break;
        }
@@ -1041,7 +1040,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
 
                h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
 
-               dm_cell_release_singleton(cell, bio);
+               cell_defer_except(tc, cell);
                remap_and_issue(tc, bio, lookup_result->block);
        }
 }
@@ -1056,7 +1055,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
         * Remap empty bios (flushes) immediately, without provisioning.
         */
        if (!bio->bi_size) {
-               dm_cell_release_singleton(cell, bio);
+               cell_defer_except(tc, cell);
                remap_and_issue(tc, bio, 0);
                return;
        }
@@ -1066,7 +1065,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
         */
        if (bio_data_dir(bio) == READ) {
                zero_fill_bio(bio);
-               dm_cell_release_singleton(cell, bio);
+               cell_defer_except(tc, cell);
                bio_endio(bio, 0);
                return;
        }
@@ -1120,7 +1119,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
                 * TODO: this will probably have to change when discard goes
                 * back in.
                 */
-               dm_cell_release_singleton(cell, bio);
+               cell_defer_except(tc, cell);
 
                if (lookup_result.shared)
                        process_shared_bio(tc, bio, block, &lookup_result);
@@ -1130,7 +1129,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
 
        case -ENODATA:
                if (bio_data_dir(bio) == READ && tc->origin_dev) {
-                       dm_cell_release_singleton(cell, bio);
+                       cell_defer_except(tc, cell);
                        remap_to_origin_and_issue(tc, bio);
                } else
                        provision_block(tc, bio, block, cell);
@@ -1138,7 +1137,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
 
        default:
                DMERR("dm_thin_find_block() failed, error = %d", r);
-               dm_cell_release_singleton(cell, bio);
+               cell_defer_except(tc, cell);
                bio_io_error(bio);
                break;
        }