struct blk_mq_tag_set tag_set;
struct blkfront_ring_info *rinfo;
unsigned int nr_rings;
+ unsigned int rinfo_size;
/* Save uncomplete reqs and bios for migration. */
struct list_head requests;
struct bio_list bio_list;
static void blkfront_gather_backend_features(struct blkfront_info *info);
static int negotiate_mq(struct blkfront_info *info);
+#define for_each_rinfo(info, ptr, idx) \
+ for ((ptr) = (info)->rinfo, (idx) = 0; \
+ (idx) < (info)->nr_rings; \
+ (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
+
+static inline struct blkfront_ring_info *
+get_rinfo(const struct blkfront_info *info, unsigned int i)
+{
+ BUG_ON(i >= info->nr_rings);
+ return (void *)info->rinfo + i * info->rinfo_size;
+}
+
static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
{
unsigned long free = rinfo->shadow_free;
struct blkfront_info *info = hctx->queue->queuedata;
struct blkfront_ring_info *rinfo = NULL;
- BUG_ON(info->nr_rings <= qid);
- rinfo = &info->rinfo[qid];
+ rinfo = get_rinfo(info, qid);
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring))
static void xlvbd_release_gendisk(struct blkfront_info *info)
{
unsigned int minor, nr_minors, i;
+ struct blkfront_ring_info *rinfo;
if (info->rq == NULL)
return;
/* No more blkif_request(). */
blk_mq_stop_hw_queues(info->rq);
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
-
+ for_each_rinfo(info, rinfo, i) {
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&rinfo->callback);
static void blkif_free(struct blkfront_info *info, int suspend)
{
unsigned int i;
+ struct blkfront_ring_info *rinfo;
/* Prevent new requests being issued until we fix things up. */
info->connected = suspend ?
if (info->rq)
blk_mq_stop_hw_queues(info->rq);
- for (i = 0; i < info->nr_rings; i++)
- blkif_free_ring(&info->rinfo[i]);
+ for_each_rinfo(info, rinfo, i)
+ blkif_free_ring(rinfo);
kvfree(info->rinfo);
info->rinfo = NULL;
int err;
unsigned int i, max_page_order;
unsigned int ring_page_order;
+ struct blkfront_ring_info *rinfo;
if (!info)
return -ENODEV;
if (err)
goto destroy_blkring;
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
-
+ for_each_rinfo(info, rinfo, i) {
/* Create shared ring, alloc event channel. */
err = setup_blkring(dev, rinfo);
if (err)
/* We already got the number of queues/rings in _probe */
if (info->nr_rings == 1) {
- err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
+ err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
if (err)
goto destroy_blkring;
} else {
goto abort_transaction;
}
- for (i = 0; i < info->nr_rings; i++) {
+ for_each_rinfo(info, rinfo, i) {
memset(path, 0, pathsize);
snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
- err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
+ err = write_per_ring_nodes(xbt, rinfo, path);
if (err) {
kfree(path);
goto destroy_blkring;
goto destroy_blkring;
}
- for (i = 0; i < info->nr_rings; i++) {
+ for_each_rinfo(info, rinfo, i) {
unsigned int j;
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
for (j = 0; j < BLK_RING_SIZE(info); j++)
rinfo->shadow[j].req.u.rw.id = j + 1;
{
unsigned int backend_max_queues;
unsigned int i;
+ struct blkfront_ring_info *rinfo;
BUG_ON(info->nr_rings);
if (!info->nr_rings)
info->nr_rings = 1;
- info->rinfo = kvcalloc(info->nr_rings,
- struct_size(info->rinfo, shadow,
- BLK_RING_SIZE(info)),
- GFP_KERNEL);
+ info->rinfo_size = struct_size(info->rinfo, shadow,
+ BLK_RING_SIZE(info));
+ info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
info->nr_rings = 0;
return -ENOMEM;
}
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo;
-
- rinfo = &info->rinfo[i];
+ for_each_rinfo(info, rinfo, i) {
INIT_LIST_HEAD(&rinfo->indirect_pages);
INIT_LIST_HEAD(&rinfo->grants);
rinfo->dev_info = info;
int rc;
struct bio *bio;
unsigned int segs;
+ struct blkfront_ring_info *rinfo;
blkfront_gather_backend_features(info);
/* Reset limits changed by blk_mq_update_nr_hw_queues(). */
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
- for (r_index = 0; r_index < info->nr_rings; r_index++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
-
+ for_each_rinfo(info, rinfo, r_index) {
rc = blkfront_setup_indirect(rinfo);
if (rc)
return rc;
/* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED;
- for (r_index = 0; r_index < info->nr_rings; r_index++) {
- struct blkfront_ring_info *rinfo;
-
- rinfo = &info->rinfo[r_index];
+ for_each_rinfo(info, rinfo, r_index) {
/* Kick any other new requests queued since we resumed */
kick_pending_request_queues(rinfo);
}
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err = 0;
unsigned int i, j;
+ struct blkfront_ring_info *rinfo;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
bio_list_init(&info->bio_list);
INIT_LIST_HEAD(&info->requests);
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ for_each_rinfo(info, rinfo, i) {
struct bio_list merge_bio;
struct blk_shadow *shadow = rinfo->shadow;
unsigned int binfo;
char *envp[] = { "RESIZE=1", NULL };
int err, i;
+ struct blkfront_ring_info *rinfo;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
"physical-sector-size",
sector_size);
blkfront_gather_backend_features(info);
- for (i = 0; i < info->nr_rings; i++) {
- err = blkfront_setup_indirect(&info->rinfo[i]);
+ for_each_rinfo(info, rinfo, i) {
+ err = blkfront_setup_indirect(rinfo);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend);
/* Kick pending requests. */
info->connected = BLKIF_STATE_CONNECTED;
- for (i = 0; i < info->nr_rings; i++)
- kick_pending_request_queues(&info->rinfo[i]);
+ for_each_rinfo(info, rinfo, i)
+ kick_pending_request_queues(rinfo);
device_add_disk(&info->xbdev->dev, info->gd, NULL);
{
unsigned int i;
unsigned long flags;
+ struct blkfront_ring_info *rinfo;
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ for_each_rinfo(info, rinfo, i) {
struct grant *gnt_list_entry, *tmp;
spin_lock_irqsave(&rinfo->ring_lock, flags);