iv = bip->bip_vec + bip->bip_vcnt;
+ if (bip->bip_vcnt &&
+ bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev),
+ &bip->bip_vec[bip->bip_vcnt - 1], offset))
+ return 0;
+
iv->bv_page = page;
iv->bv_len = len;
iv->bv_offset = offset;
blkg_destroy(blkg);
spin_unlock(&blkcg->lock);
}
+
+ q->root_blkg = NULL;
+ q->root_rl.blkg = NULL;
}
/*
q->limits.max_integrity_segments)
return false;
+ if (integrity_req_gap_back_merge(req, next->bio))
+ return false;
+
return true;
}
EXPORT_SYMBOL(blk_integrity_merge_rq);
#include "blk.h"
+static bool iovec_gap_to_prv(struct request_queue *q,
+ struct iovec *prv, struct iovec *cur)
+{
+ unsigned long prev_end;
+
+ if (!queue_virt_boundary(q))
+ return false;
+
+ if (prv->iov_base == NULL && prv->iov_len == 0)
+ /* prv is not set - don't check */
+ return false;
+
+ prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
+
+ return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
+ prev_end & queue_virt_boundary(q));
+}
+
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct bio *bio;
int unaligned = 0;
struct iov_iter i;
- struct iovec iov;
+ struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
if (!iter || !iter->count)
return -EINVAL;
/*
* Keep going so we check length of all segments
*/
- if (uaddr & queue_dma_alignment(q))
+ if ((uaddr & queue_dma_alignment(q)) ||
+ iovec_gap_to_prv(q, &prv, &iov))
unaligned = 1;
+
+ prv.iov_base = iov.iov_base;
+ prv.iov_len = iov.iov_len;
}
if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
struct bio *bio,
struct bio_set *bs)
{
- struct bio *split;
- struct bio_vec bv, bvprv;
+ struct bio_vec bv, bvprv, *bvprvp = NULL;
struct bvec_iter iter;
unsigned seg_size = 0, nsegs = 0, sectors = 0;
- int prev = 0;
bio_for_each_segment(bv, bio, iter) {
- sectors += bv.bv_len >> 9;
-
- if (sectors > queue_max_sectors(q))
+ if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
goto split;
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, disallow it.
*/
- if (prev && bvec_gap_to_prev(q, &bvprv, bv.bv_offset))
+ if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
goto split;
- if (prev && blk_queue_cluster(q)) {
+ if (bvprvp && blk_queue_cluster(q)) {
if (seg_size + bv.bv_len > queue_max_segment_size(q))
goto new_segment;
- if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
+ if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv))
goto new_segment;
- if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
+ if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv))
goto new_segment;
seg_size += bv.bv_len;
bvprv = bv;
- prev = 1;
+ bvprvp = &bv;
+ sectors += bv.bv_len >> 9;
continue;
}
new_segment:
nsegs++;
bvprv = bv;
- prev = 1;
+ bvprvp = &bv;
seg_size = bv.bv_len;
+ sectors += bv.bv_len >> 9;
}
return NULL;
split:
- split = bio_clone_bioset(bio, GFP_NOIO, bs);
-
- split->bi_iter.bi_size -= iter.bi_size;
- bio->bi_iter = iter;
-
- if (bio_integrity(bio)) {
- bio_integrity_advance(bio, split->bi_iter.bi_size);
- bio_integrity_trim(split, 0, bio_sectors(split));
- }
-
- return split;
+ return bio_split(bio, sectors, GFP_NOIO, bs);
}
void blk_queue_split(struct request_queue *q, struct bio **bio,
int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
+ if (req_gap_back_merge(req, bio))
+ return 0;
+ if (blk_integrity_rq(req) &&
+ integrity_req_gap_back_merge(req, bio))
+ return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req)) {
req->cmd_flags |= REQ_NOMERGE;
int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
+
+ if (req_gap_front_merge(req, bio))
+ return 0;
+ if (blk_integrity_rq(req) &&
+ integrity_req_gap_front_merge(req, bio))
+ return 0;
if (blk_rq_sectors(req) + bio_sectors(bio) >
blk_rq_get_max_sectors(req)) {
req->cmd_flags |= REQ_NOMERGE;
return !q->mq_ops && req->special;
}
-static int req_gap_to_prev(struct request *req, struct bio *next)
-{
- struct bio *prev = req->biotail;
-
- return bvec_gap_to_prev(req->q, &prev->bi_io_vec[prev->bi_vcnt - 1],
- next->bi_io_vec[0].bv_offset);
-}
-
static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
struct request *next)
{
if (req_no_special_merge(req) || req_no_special_merge(next))
return 0;
- if (req_gap_to_prev(req, next->bio))
+ if (req_gap_back_merge(req, next->bio))
return 0;
/*
!blk_write_same_mergeable(rq->bio, bio))
return false;
- /* Only check gaps if the bio carries data */
- if (bio_has_data(bio) && req_gap_to_prev(rq, bio))
- return false;
-
return true;
}
struct bio *bio_orig = bio->bi_private;
struct bio_vec *bvec, *org_vec;
int i;
+ int start = bio_orig->bi_iter.bi_idx;
/*
* free up bounce indirect pages used
*/
bio_for_each_segment_all(bvec, bio, i) {
- org_vec = bio_orig->bi_io_vec + i;
+ org_vec = bio_orig->bi_io_vec + i + start;
+
if (bvec->bv_page == org_vec->bv_page)
continue;
.complete = null_softirq_done_fn,
};
+static void cleanup_queue(struct nullb_queue *nq)
+{
+ kfree(nq->tag_map);
+ kfree(nq->cmds);
+}
+
+static void cleanup_queues(struct nullb *nullb)
+{
+ int i;
+
+ for (i = 0; i < nullb->nr_queues; i++)
+ cleanup_queue(&nullb->queues[i]);
+
+ kfree(nullb->queues);
+}
+
static void null_del_dev(struct nullb *nullb)
{
list_del_init(&nullb->list);
if (queue_mode == NULL_Q_MQ)
blk_mq_free_tag_set(&nullb->tag_set);
put_disk(nullb->disk);
+ cleanup_queues(nullb);
kfree(nullb);
}
return 0;
}
-static void cleanup_queue(struct nullb_queue *nq)
-{
- kfree(nq->tag_map);
- kfree(nq->cmds);
-}
-
-static void cleanup_queues(struct nullb *nullb)
-{
- int i;
-
- for (i = 0; i < nullb->nr_queues; i++)
- cleanup_queue(&nullb->queues[i]);
-
- kfree(nullb->queues);
-}
-
static int setup_queues(struct nullb *nullb)
{
nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
blk_queue_physical_block_size(nullb->q, bs);
size = gb * 1024 * 1024 * 1024ULL;
- sector_div(size, bs);
- set_capacity(disk, size);
+ set_capacity(disk, size >> 9);
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
}
+static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
+ struct bio *next)
+{
+ if (!bio_has_data(prev))
+ return false;
+
+ return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
+ next->bi_io_vec[0].bv_offset);
+}
+
+static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, req->biotail, bio);
+}
+
+static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
+{
+ return bio_will_gap(req->q, bio, req->bio);
+}
+
struct work_struct;
int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
return q->limits.max_integrity_segments;
}
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
+{
+ struct bio_integrity_payload *bip = bio_integrity(req->bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(next);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ struct bio_integrity_payload *bip = bio_integrity(bio);
+ struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
+
+ return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
+ bip_next->bip_vec[0].bv_offset);
+}
+
#else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio;
{
return 0;
}
+static inline bool integrity_req_gap_back_merge(struct request *req,
+ struct bio *next)
+{
+ return false;
+}
+static inline bool integrity_req_gap_front_merge(struct request *req,
+ struct bio *bio)
+{
+ return false;
+}
#endif /* CONFIG_BLK_DEV_INTEGRITY */