#define DMZ_MIN_BIOS 8192
-#define DMZ_MAX_DEVS 2
-
/*
* Zone BIO context.
*/
* Target descriptor.
*/
struct dmz_target {
- struct dm_dev *ddev[DMZ_MAX_DEVS];
+ struct dm_dev **ddev;
unsigned int nr_ddevs;
- unsigned long flags;
+ unsigned int flags;
/* Zoned block device information */
struct dmz_dev *dev;
struct dmz_target *dmz = ti->private;
int i;
- for (i = 0; i < DMZ_MAX_DEVS; i++) {
+ for (i = 0; i < dmz->nr_ddevs; i++) {
if (dmz->ddev[i]) {
dm_put_device(ti, dmz->ddev[i]);
dmz->ddev[i] = NULL;
struct dmz_target *dmz = ti->private;
struct dmz_dev *reg_dev, *zoned_dev;
struct request_queue *q;
+ sector_t zone_nr_sectors = 0;
+ int i;
/*
- * When we have two devices, the first one must be a regular block
- * device and the second a zoned block device.
+ * When we have more than on devices, the first one must be a
+ * regular block device and the others zoned block devices.
*/
- if (dmz->ddev[0] && dmz->ddev[1]) {
+ if (dmz->nr_ddevs > 1) {
reg_dev = &dmz->dev[0];
if (!(reg_dev->flags & DMZ_BDEV_REGULAR)) {
ti->error = "Primary disk is not a regular device";
return -EINVAL;
}
- zoned_dev = &dmz->dev[1];
- if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
- ti->error = "Secondary disk is not a zoned device";
- return -EINVAL;
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ zoned_dev = &dmz->dev[i];
+ if (zoned_dev->flags & DMZ_BDEV_REGULAR) {
+ ti->error = "Secondary disk is not a zoned device";
+ return -EINVAL;
+ }
+ q = bdev_get_queue(zoned_dev->bdev);
+ if (zone_nr_sectors &&
+ zone_nr_sectors != blk_queue_zone_sectors(q)) {
+ ti->error = "Zone nr sectors mismatch";
+ return -EINVAL;
+ }
+ zone_nr_sectors = blk_queue_zone_sectors(q);
+ zoned_dev->zone_nr_sectors = zone_nr_sectors;
+ zoned_dev->nr_zones =
+ blkdev_nr_zones(zoned_dev->bdev->bd_disk);
}
} else {
reg_dev = NULL;
ti->error = "Disk is not a zoned device";
return -EINVAL;
}
+ q = bdev_get_queue(zoned_dev->bdev);
+ zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q);
+ zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk);
}
- q = bdev_get_queue(zoned_dev->bdev);
- zoned_dev->zone_nr_sectors = blk_queue_zone_sectors(q);
- zoned_dev->nr_zones = blkdev_nr_zones(zoned_dev->bdev->bd_disk);
if (reg_dev) {
- reg_dev->zone_nr_sectors = zoned_dev->zone_nr_sectors;
+ sector_t zone_offset;
+
+ reg_dev->zone_nr_sectors = zone_nr_sectors;
reg_dev->nr_zones =
DIV_ROUND_UP_SECTOR_T(reg_dev->capacity,
reg_dev->zone_nr_sectors);
- zoned_dev->zone_offset = reg_dev->nr_zones;
+ reg_dev->zone_offset = 0;
+ zone_offset = reg_dev->nr_zones;
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ dmz->dev[i].zone_offset = zone_offset;
+ zone_offset += dmz->dev[i].nr_zones;
+ }
}
return 0;
}
int ret, i;
/* Check arguments */
- if (argc < 1 || argc > 2) {
+ if (argc < 1) {
ti->error = "Invalid argument count";
return -EINVAL;
}
ti->error = "Unable to allocate the zoned target descriptor";
return -ENOMEM;
}
- dmz->dev = kcalloc(2, sizeof(struct dmz_dev), GFP_KERNEL);
+ dmz->dev = kcalloc(argc, sizeof(struct dmz_dev), GFP_KERNEL);
if (!dmz->dev) {
ti->error = "Unable to allocate the zoned device descriptors";
kfree(dmz);
return -ENOMEM;
}
+ dmz->ddev = kcalloc(argc, sizeof(struct dm_dev *), GFP_KERNEL);
+ if (!dmz->ddev) {
+ ti->error = "Unable to allocate the dm device descriptors";
+ ret = -ENOMEM;
+ goto err;
+ }
dmz->nr_ddevs = argc;
+
ti->private = dmz;
/* Get the target zoned block device */
- ret = dmz_get_zoned_device(ti, argv[0], 0, argc);
- if (ret)
- goto err;
-
- if (argc == 2) {
- ret = dmz_get_zoned_device(ti, argv[1], 1, argc);
- if (ret) {
- dmz_put_zoned_device(ti);
- goto err;
- }
+ for (i = 0; i < argc; i++) {
+ ret = dmz_get_zoned_device(ti, argv[i], i, argc);
+ if (ret)
+ goto err_dev;
}
ret = dmz_fixup_devices(ti);
- if (ret) {
- dmz_put_zoned_device(ti);
- goto err;
- }
+ if (ret)
+ goto err_dev;
/* Initialize metadata */
ret = dmz_ctr_metadata(dmz->dev, argc, &dmz->metadata,
struct dmz_target *dmz = ti->private;
unsigned int zone_nr_sectors = dmz_zone_nr_sectors(dmz->metadata);
sector_t capacity;
- int r;
+ int i, r;
- capacity = dmz->dev[0].capacity & ~(zone_nr_sectors - 1);
- r = fn(ti, dmz->ddev[0], 0, capacity, data);
- if (!r && dmz->ddev[1]) {
- capacity = dmz->dev[1].capacity & ~(zone_nr_sectors - 1);
- r = fn(ti, dmz->ddev[1], 0, capacity, data);
+ for (i = 0; i < dmz->nr_ddevs; i++) {
+ capacity = dmz->dev[i].capacity & ~(zone_nr_sectors - 1);
+ r = fn(ti, dmz->ddev[i], 0, capacity, data);
+ if (r)
+ break;
}
return r;
}
dmz_nr_zones(dmz->metadata),
dmz_nr_unmap_cache_zones(dmz->metadata),
dmz_nr_cache_zones(dmz->metadata));
- for (i = 0; i < DMZ_MAX_DEVS; i++) {
- if (!dmz->ddev[i])
- continue;
+ for (i = 0; i < dmz->nr_ddevs; i++) {
/*
* For a multi-device setup the first device
* contains only cache zones.
dev = &dmz->dev[0];
format_dev_t(buf, dev->bdev->bd_dev);
DMEMIT("%s", buf);
- if (dmz->dev[1].bdev) {
- dev = &dmz->dev[1];
+ for (i = 1; i < dmz->nr_ddevs; i++) {
+ dev = &dmz->dev[i];
format_dev_t(buf, dev->bdev->bd_dev);
DMEMIT(" %s", buf);
}