static void sd_read_block_limits(struct scsi_disk *sdkp)
{
unsigned int sector_sz = sdkp->device->sector_size;
- const int vpd_len = 64;
- unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
+ struct scsi_vpd *vpd;
- if (!buffer ||
- /* Block Limits VPD */
- scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
+ rcu_read_lock();
+
+ vpd = rcu_dereference(sdkp->device->vpd_pgb0);
+ if (!vpd || vpd->len < 16)
goto out;
blk_queue_io_min(sdkp->disk->queue,
- get_unaligned_be16(&buffer[6]) * sector_sz);
+ get_unaligned_be16(&vpd->data[6]) * sector_sz);
- sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
- sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
+ sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
+ sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
- if (buffer[3] == 0x3c) {
+ if (vpd->len >= 64) {
unsigned int lba_count, desc_count;
- sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
+ sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
if (!sdkp->lbpme)
goto out;
- lba_count = get_unaligned_be32(&buffer[20]);
- desc_count = get_unaligned_be32(&buffer[24]);
+ lba_count = get_unaligned_be32(&vpd->data[20]);
+ desc_count = get_unaligned_be32(&vpd->data[24]);
if (lba_count && desc_count)
sdkp->max_unmap_blocks = lba_count;
- sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
+ sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
- if (buffer[32] & 0x80)
+ if (vpd->data[32] & 0x80)
sdkp->unmap_alignment =
- get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+ get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
if (!sdkp->lbpvpd) { /* LBP VPD page not provided */
}
out:
- kfree(buffer);
+ rcu_read_unlock();
}
/**
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
struct request_queue *q = sdkp->disk->queue;
- unsigned char *buffer;
+ struct scsi_vpd *vpd;
u16 rot;
- const int vpd_len = 64;
+ u8 zoned;
- buffer = kmalloc(vpd_len, GFP_KERNEL);
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb1);
- if (!buffer ||
- /* Block Device Characteristics VPD */
- scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
- goto out;
+ if (!vpd || vpd->len < 8) {
+ rcu_read_unlock();
+ return;
+ }
- rot = get_unaligned_be16(&buffer[4]);
+ rot = get_unaligned_be16(&vpd->data[4]);
+ zoned = (vpd->data[8] >> 4) & 3;
+ rcu_read_unlock();
if (rot == 1) {
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* Host-managed */
blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
} else {
- sdkp->zoned = (buffer[8] >> 4) & 3;
+ sdkp->zoned = zoned;
if (sdkp->zoned == 1) {
/* Host-aware */
blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
}
if (!sdkp->first_scan)
- goto out;
+ return;
if (blk_queue_is_zoned(q)) {
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
sd_printk(KERN_NOTICE, sdkp,
"Drive-managed SMR disk\n");
}
-
- out:
- kfree(buffer);
}
/**
*/
static void sd_read_block_provisioning(struct scsi_disk *sdkp)
{
- unsigned char *buffer;
- const int vpd_len = 8;
+ struct scsi_vpd *vpd;
if (sdkp->lbpme == 0)
return;
- buffer = kmalloc(vpd_len, GFP_KERNEL);
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb2);
- if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
- goto out;
+ if (!vpd || vpd->len < 8) {
+ rcu_read_unlock();
+ return;
+ }
sdkp->lbpvpd = 1;
- sdkp->lbpu = (buffer[5] >> 7) & 1; /* UNMAP */
- sdkp->lbpws = (buffer[5] >> 6) & 1; /* WRITE SAME(16) with UNMAP */
- sdkp->lbpws10 = (buffer[5] >> 5) & 1; /* WRITE SAME(10) with UNMAP */
-
- out:
- kfree(buffer);
+ sdkp->lbpu = (vpd->data[5] >> 7) & 1; /* UNMAP */
+ sdkp->lbpws = (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
+ sdkp->lbpws10 = (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
+ rcu_read_unlock();
}
static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)