bio_get_first_bvec(next, &nb);
if (biovec_phys_mergeable(q, &pb, &nb))
return false;
- return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
+ return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
}
static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
* is defined as 'unsigned int', meantime it has to be aligned to with the
* logical block size, which is the minimum accepted unit by hardware.
*/
-static unsigned int bio_allowed_max_sectors(struct request_queue *q)
+static unsigned int bio_allowed_max_sectors(struct queue_limits *lim)
{
- return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
+ return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
}
-static struct bio *bio_split_discard(struct bio *bio, struct request_queue *q,
+static struct bio *bio_split_discard(struct bio *bio, struct queue_limits *lim,
unsigned *nsegs, struct bio_set *bs)
{
unsigned int max_discard_sectors, granularity;
- int alignment;
sector_t tmp;
unsigned split_sectors;
*nsegs = 1;
/* Zero-sector (unknown) and one-sector granularities are the same. */
- granularity = max(q->limits.discard_granularity >> 9, 1U);
+ granularity = max(lim->discard_granularity >> 9, 1U);
- max_discard_sectors = min(q->limits.max_discard_sectors,
- bio_allowed_max_sectors(q));
+ max_discard_sectors =
+ min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) {
* If the next starting sector would be misaligned, stop the discard at
* the previous aligned sector.
*/
- alignment = (q->limits.discard_alignment >> 9) % granularity;
-
- tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
+ tmp = bio->bi_iter.bi_sector + split_sectors -
+ ((lim->discard_alignment >> 9) % granularity);
tmp = sector_div(tmp, granularity);
if (split_sectors > tmp)
}
static struct bio *bio_split_write_zeroes(struct bio *bio,
- struct request_queue *q, unsigned *nsegs, struct bio_set *bs)
+ struct queue_limits *lim, unsigned *nsegs, struct bio_set *bs)
{
*nsegs = 0;
-
- if (!q->limits.max_write_zeroes_sectors)
+ if (!lim->max_write_zeroes_sectors)
return NULL;
-
- if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
+ if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
return NULL;
-
- return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
+ return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
}
/*
* aligned to a physical block boundary.
*/
static inline unsigned get_max_io_size(struct bio *bio,
- struct request_queue *q)
+ struct queue_limits *lim)
{
- unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
- unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
- unsigned max_sectors = queue_max_sectors(q), start, end;
+ unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
+ unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
+ unsigned max_sectors = lim->max_sectors, start, end;
- if (q->limits.chunk_sectors) {
+ if (lim->chunk_sectors) {
max_sectors = min(max_sectors,
blk_chunk_sectors_left(bio->bi_iter.bi_sector,
- q->limits.chunk_sectors));
+ lim->chunk_sectors));
}
start = bio->bi_iter.bi_sector & (pbs - 1);
return max_sectors & ~(lbs - 1);
}
-static inline unsigned get_max_segment_size(const struct request_queue *q,
- struct page *start_page,
- unsigned long offset)
+static inline unsigned get_max_segment_size(struct queue_limits *lim,
+ struct page *start_page, unsigned long offset)
{
- unsigned long mask = queue_segment_boundary(q);
+ unsigned long mask = lim->seg_boundary_mask;
offset = mask & (page_to_phys(start_page) + offset);
* on 32bit arch, use queue's max segment size when that happens.
*/
return min_not_zero(mask - offset + 1,
- (unsigned long)queue_max_segment_size(q));
+ (unsigned long)lim->max_segment_size);
}
/**
* bvec_split_segs - verify whether or not a bvec should be split in the middle
- * @q: [in] request queue associated with the bio associated with @bv
+ * @lim: [in] queue limits to split based on
* @bv: [in] bvec to examine
* @nsegs: [in,out] Number of segments in the bio being built. Incremented
* by the number of segments from @bv that may be appended to that
* *@nsegs segments and *@sectors sectors would make that bio unacceptable for
* the block driver.
*/
-static bool bvec_split_segs(const struct request_queue *q,
- const struct bio_vec *bv, unsigned *nsegs,
- unsigned *bytes, unsigned max_segs,
- unsigned max_bytes)
+static bool bvec_split_segs(struct queue_limits *lim, const struct bio_vec *bv,
+ unsigned *nsegs, unsigned *bytes, unsigned max_segs,
+ unsigned max_bytes)
{
unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
unsigned len = min(bv->bv_len, max_len);
unsigned seg_size = 0;
while (len && *nsegs < max_segs) {
- seg_size = get_max_segment_size(q, bv->bv_page,
+ seg_size = get_max_segment_size(lim, bv->bv_page,
bv->bv_offset + total_len);
seg_size = min(seg_size, len);
total_len += seg_size;
len -= seg_size;
- if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
+ if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
break;
}
/**
* bio_split_rw - split a bio in two bios
* @bio: [in] bio to be split
- * @q: [in] request queue pointer
+ * @lim: [in] queue limits to split based on
* @segs: [out] number of segments in the bio with the first half of the sectors
* @bs: [in] bio set to allocate the clone from
* @max_bytes: [in] maximum number of bytes per bio
* responsible for ensuring that @bs is only destroyed after processing of the
* split bio has finished.
*/
-static struct bio *bio_split_rw(struct bio *bio, struct request_queue *q,
+static struct bio *bio_split_rw(struct bio *bio, struct queue_limits *lim,
unsigned *segs, struct bio_set *bs, unsigned max_bytes)
{
struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
unsigned nsegs = 0, bytes = 0;
- const unsigned max_segs = queue_max_segments(q);
bio_for_each_bvec(bv, bio, iter) {
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
- if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
+ if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
goto split;
- if (nsegs < max_segs &&
+ if (nsegs < lim->max_segments &&
bytes + bv.bv_len <= max_bytes &&
bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
nsegs++;
bytes += bv.bv_len;
- } else if (bvec_split_segs(q, &bv, &nsegs, &bytes, max_segs,
- max_bytes)) {
- goto split;
+ } else {
+ if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
+ lim->max_segments, max_bytes))
+ goto split;
}
bvprv = bv;
* split size so that each bio is properly block size aligned, even if
* we do not use the full hardware limits.
*/
- bytes = ALIGN_DOWN(bytes, queue_logical_block_size(q));
+ bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
/*
* Bio splitting may cause subtle trouble such as hang when doing sync
/**
* __bio_split_to_limits - split a bio to fit the queue limits
* @bio: bio to be split
- * @q: request_queue new bio is being queued at
+ * @lim: queue limits to split based on
* @nr_segs: returns the number of segments in the returned bio
*
* Check if @bio needs splitting based on the queue limits, and if so split off
* The split bio is allocated from @q->bio_split, which is provided by the
* block layer.
*/
-struct bio *__bio_split_to_limits(struct bio *bio, struct request_queue *q,
+struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
unsigned int *nr_segs)
{
struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
- split = bio_split_discard(bio, q, nr_segs, bs);
+ split = bio_split_discard(bio, lim, nr_segs, bs);
break;
case REQ_OP_WRITE_ZEROES:
- split = bio_split_write_zeroes(bio, q, nr_segs, bs);
+ split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
break;
default:
- split = bio_split_rw(bio, q, nr_segs, bs,
- get_max_io_size(bio, q) << SECTOR_SHIFT);
+ split = bio_split_rw(bio, lim, nr_segs, bs,
+ get_max_io_size(bio, lim) << SECTOR_SHIFT);
break;
}
*/
struct bio *bio_split_to_limits(struct bio *bio)
{
- struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
unsigned int nr_segs;
- if (bio_may_exceed_limits(bio, q))
- return __bio_split_to_limits(bio, q, &nr_segs);
+ if (bio_may_exceed_limits(bio, lim))
+ return __bio_split_to_limits(bio, lim, &nr_segs);
return bio;
}
EXPORT_SYMBOL(bio_split_to_limits);
}
rq_for_each_bvec(bv, rq, iter)
- bvec_split_segs(rq->q, &bv, &nr_phys_segs, &bytes,
+ bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
UINT_MAX, UINT_MAX);
return nr_phys_segs;
}
while (nbytes > 0) {
unsigned offset = bvec->bv_offset + total;
- unsigned len = min(get_max_segment_size(q, bvec->bv_page,
- offset), nbytes);
+ unsigned len = min(get_max_segment_size(&q->limits,
+ bvec->bv_page, offset), nbytes);
struct page *page = bvec->bv_page;
/*
return true;
}
-static inline bool __bvec_gap_to_prev(struct request_queue *q,
+static inline bool __bvec_gap_to_prev(struct queue_limits *lim,
struct bio_vec *bprv, unsigned int offset)
{
- return (offset & queue_virt_boundary(q)) ||
- ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
+ return (offset & lim->virt_boundary_mask) ||
+ ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask);
}
/*
* Check if adding a bio_vec after bprv with offset would create a gap in
* the SG list. Most drivers don't care about this, but some do.
*/
-static inline bool bvec_gap_to_prev(struct request_queue *q,
+static inline bool bvec_gap_to_prev(struct queue_limits *lim,
struct bio_vec *bprv, unsigned int offset)
{
- if (!queue_virt_boundary(q))
+ if (!lim->virt_boundary_mask)
return false;
- return __bvec_gap_to_prev(q, bprv, offset);
+ return __bvec_gap_to_prev(lim, bprv, offset);
}
static inline bool rq_mergeable(struct request *rq)
struct bio_integrity_payload *bip = bio_integrity(req->bio);
struct bio_integrity_payload *bip_next = bio_integrity(next);
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ return bvec_gap_to_prev(&req->q->limits,
+ &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
struct bio_integrity_payload *bip = bio_integrity(bio);
struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
- return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ return bvec_gap_to_prev(&req->q->limits,
+ &bip->bip_vec[bip->bip_vcnt - 1],
bip_next->bip_vec[0].bv_offset);
}
ssize_t part_timeout_store(struct device *, struct device_attribute *,
const char *, size_t);
-static inline bool bio_may_exceed_limits(struct bio *bio, struct request_queue *q)
+static inline bool bio_may_exceed_limits(struct bio *bio,
+ struct queue_limits *lim)
{
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
* to the performance impact of cloned bios themselves the loop below
* doesn't matter anyway.
*/
- return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
+ return lim->chunk_sectors || bio->bi_vcnt != 1 ||
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
}
-struct bio *__bio_split_to_limits(struct bio *bio, struct request_queue *q,
+struct bio *__bio_split_to_limits(struct bio *bio, struct queue_limits *lim,
unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio,
unsigned int nr_segs);