* in ATA and we need to set TPE=1
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q)
+ struct block_device *bdev)
{
- int block_size = queue_logical_block_size(q);
+ struct request_queue *q = bdev_get_queue(bdev);
+ int block_size = bdev_logical_block_size(bdev);
if (!blk_queue_discard(q))
return false;
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
- struct request_queue *q = bdev_get_queue(I_BDEV(inode));
+ struct block_device *bdev = I_BDEV(inode);
+ struct request_queue *q = bdev_get_queue(bdev);
unsigned long long dev_size;
- fd_dev->fd_block_size = bdev_logical_block_size(I_BDEV(inode));
+ fd_dev->fd_block_size = bdev_logical_block_size(bdev);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, bdev))
pr_debug("IFILE: BLOCK Discard support available,"
" disabled by default\n");
/*
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, bd))
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2
#define TRANSPORT_FLAG_PASSTHROUGH_PGR 0x4
-struct request_queue;
+struct block_device;
struct scatterlist;
struct target_backend_ops {
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q);
+ struct block_device *bdev);
static inline bool target_dev_configured(struct se_device *se_dev)
{