SD_ZERO_WS10_UNMAP, /* Use WRITE SAME(10) with UNMAP */
};
+/**
+ * struct zoned_disk_info - Specific properties of a ZBC SCSI device.
+ * @nr_zones: number of zones.
+ * @zone_blocks: number of logical blocks per zone.
+ *
+ * This data structure holds the ZBC SCSI device properties that are retrieved
+ * twice: a first time before the gendisk capacity is known and a second time
+ * after the gendisk capacity is known.
+ */
+struct zoned_disk_info {
+ u32 nr_zones;
+ u32 zone_blocks;
+};
+
struct scsi_disk {
struct scsi_device *device;
struct gendisk *disk;
struct opal_dev *opal_dev;
#ifdef CONFIG_BLK_DEV_ZONED
- u32 nr_zones;
- u32 rev_nr_zones;
- u32 zone_blocks;
- u32 rev_zone_blocks;
+ /* Updated during revalidation before the gendisk capacity is known. */
+ struct zoned_disk_info early_zone_info;
+ /* Updated during revalidation after the gendisk capacity is known. */
+ struct zoned_disk_info zone_info;
u32 zones_optimal_open;
u32 zones_optimal_nonseq;
u32 zones_max_open;
* sure that the allocated buffer can always be mapped by limiting the
* number of pages allocated to the HBA max segments limit.
*/
- nr_zones = min(nr_zones, sdkp->nr_zones);
+ nr_zones = min(nr_zones, sdkp->zone_info.nr_zones);
bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE);
bufsize = min_t(size_t, bufsize,
queue_max_hw_sectors(q) << SECTOR_SHIFT);
*/
static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
{
- return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
+ return logical_to_sectors(sdkp->device, sdkp->zone_info.zone_blocks);
}
/**
zone_idx++;
}
- lba += sdkp->zone_blocks * i;
+ lba += sdkp->zone_info.zone_blocks * i;
}
ret = zone_idx;
sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
- for (zno = 0; zno < sdkp->nr_zones; zno++) {
+ for (zno = 0; zno < sdkp->zone_info.nr_zones; zno++) {
if (sdkp->zones_wp_offset[zno] != SD_ZBC_UPDATING_WP_OFST)
continue;
spin_unlock_irqrestore(&sdkp->zones_wp_offset_lock, flags);
ret = sd_zbc_do_report_zones(sdkp, sdkp->zone_wp_update_buf,
SD_BUF_SIZE,
- zno * sdkp->zone_blocks, true);
+ zno * sdkp->zone_info.zone_blocks, true);
spin_lock_irqsave(&sdkp->zones_wp_offset_lock, flags);
if (!ret)
sd_zbc_parse_report(sdkp, sdkp->zone_wp_update_buf + 64,
break;
default:
wp_offset = sectors_to_logical(sdkp->device, wp_offset);
- if (wp_offset + nr_blocks > sdkp->zone_blocks) {
+ if (wp_offset + nr_blocks > sdkp->zone_info.zone_blocks) {
ret = BLK_STS_IOERR;
break;
}
break;
case REQ_OP_ZONE_RESET_ALL:
memset(sdkp->zones_wp_offset, 0,
- sdkp->nr_zones * sizeof(unsigned int));
+ sdkp->zone_info.nr_zones * sizeof(unsigned int));
break;
default:
break;
if (!sd_is_zoned(sdkp) || !sdkp->capacity)
return;
- if (sdkp->capacity & (sdkp->zone_blocks - 1))
+ if (sdkp->capacity & (sdkp->zone_info.zone_blocks - 1))
sd_printk(KERN_NOTICE, sdkp,
"%u zones of %u logical blocks + 1 runt zone\n",
- sdkp->nr_zones - 1,
- sdkp->zone_blocks);
+ sdkp->zone_info.nr_zones - 1,
+ sdkp->zone_info.zone_blocks);
else
sd_printk(KERN_NOTICE, sdkp,
"%u zones of %u logical blocks\n",
- sdkp->nr_zones,
- sdkp->zone_blocks);
+ sdkp->zone_info.nr_zones,
+ sdkp->zone_info.zone_blocks);
}
static int sd_zbc_init_disk(struct scsi_disk *sdkp)
kfree(sdkp->zone_wp_update_buf);
sdkp->zone_wp_update_buf = NULL;
- sdkp->nr_zones = 0;
- sdkp->rev_nr_zones = 0;
- sdkp->zone_blocks = 0;
- sdkp->rev_zone_blocks = 0;
+ sdkp->early_zone_info = (struct zoned_disk_info){ };
+ sdkp->zone_info = (struct zoned_disk_info){ };
mutex_unlock(&sdkp->rev_mutex);
}
{
struct gendisk *disk = sdkp->disk;
struct request_queue *q = disk->queue;
- u32 zone_blocks = sdkp->rev_zone_blocks;
- unsigned int nr_zones = sdkp->rev_nr_zones;
+ u32 zone_blocks = sdkp->early_zone_info.zone_blocks;
+ unsigned int nr_zones = sdkp->early_zone_info.nr_zones;
u32 max_append;
int ret = 0;
unsigned int flags;
*/
mutex_lock(&sdkp->rev_mutex);
- if (sdkp->zone_blocks == zone_blocks &&
- sdkp->nr_zones == nr_zones &&
+ if (sdkp->zone_info.zone_blocks == zone_blocks &&
+ sdkp->zone_info.nr_zones == nr_zones &&
disk->queue->nr_zones == nr_zones)
goto unlock;
flags = memalloc_noio_save();
- sdkp->zone_blocks = zone_blocks;
- sdkp->nr_zones = nr_zones;
+ sdkp->zone_info.zone_blocks = zone_blocks;
+ sdkp->zone_info.nr_zones = nr_zones;
sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
if (!sdkp->rev_wp_offset) {
ret = -ENOMEM;
sdkp->rev_wp_offset = NULL;
if (ret) {
- sdkp->zone_blocks = 0;
- sdkp->nr_zones = 0;
+ sdkp->zone_info = (struct zoned_disk_info){ };
sdkp->capacity = 0;
goto unlock;
}
if (blk_queue_zoned_model(q) == BLK_ZONED_HM)
blk_queue_zone_write_granularity(q, sdkp->physical_block_size);
- sdkp->rev_nr_zones = nr_zones;
- sdkp->rev_zone_blocks = zone_blocks;
+ sdkp->early_zone_info.nr_zones = nr_zones;
+ sdkp->early_zone_info.zone_blocks = zone_blocks;
return 0;