aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-map.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-map.c')
-rw-r--r--block/blk-map.c150
1 files changed, 132 insertions, 18 deletions
diff --git a/block/blk-map.c b/block/blk-map.c
index 7693f8e3c454..34735626b00f 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -231,7 +231,7 @@ out_bmd:
return ret;
}
-static void bio_map_put(struct bio *bio)
+static void blk_mq_map_bio_put(struct bio *bio)
{
if (bio->bi_opf & REQ_ALLOC_CACHE) {
bio_put(bio);
@@ -241,17 +241,10 @@ static void bio_map_put(struct bio *bio)
}
}
-static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
- gfp_t gfp_mask)
+static struct bio *blk_rq_map_bio_alloc(struct request *rq,
+ unsigned int nr_vecs, gfp_t gfp_mask)
{
- unsigned int max_sectors = queue_max_hw_sectors(rq->q);
- unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
struct bio *bio;
- int ret;
- int j;
-
- if (!iov_iter_count(iter))
- return -EINVAL;
if (rq->cmd_flags & REQ_POLLED) {
blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
@@ -259,13 +252,31 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
&fs_bio_set);
if (!bio)
- return -ENOMEM;
+ return NULL;
} else {
bio = bio_kmalloc(nr_vecs, gfp_mask);
if (!bio)
- return -ENOMEM;
+ return NULL;
bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
}
+ return bio;
+}
+
+static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
+ gfp_t gfp_mask)
+{
+ unsigned int max_sectors = queue_max_hw_sectors(rq->q);
+ unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
+ struct bio *bio;
+ int ret;
+ int j;
+
+ if (!iov_iter_count(iter))
+ return -EINVAL;
+
+ bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
+ if (bio == NULL)
+ return -ENOMEM;
while (iov_iter_count(iter)) {
struct page **pages, *stack_pages[UIO_FASTIOV];
@@ -331,7 +342,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
out_unmap:
bio_release_pages(bio, false);
- bio_map_put(bio);
+ blk_mq_map_bio_put(bio);
return ret;
}
@@ -537,6 +548,62 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
}
EXPORT_SYMBOL(blk_rq_append_bio);
+/* Prepare bio for passthrough IO given ITER_BVEC iter */
+static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
+{
+ struct request_queue *q = rq->q;
+ size_t nr_iter = iov_iter_count(iter);
+ size_t nr_segs = iter->nr_segs;
+ struct bio_vec *bvecs, *bvprvp = NULL;
+ struct queue_limits *lim = &q->limits;
+ unsigned int nsegs = 0, bytes = 0;
+ struct bio *bio;
+ size_t i;
+
+ if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
+ return -EINVAL;
+ if (nr_segs > queue_max_segments(q))
+ return -EINVAL;
+
+ /* no iovecs to alloc, as we already have a BVEC iterator */
+ bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
+ if (bio == NULL)
+ return -ENOMEM;
+
+ bio_iov_bvec_set(bio, (struct iov_iter *)iter);
+ blk_rq_bio_prep(rq, bio, nr_segs);
+
+ /* loop to perform a bunch of sanity checks */
+ bvecs = (struct bio_vec *)iter->bvec;
+ for (i = 0; i < nr_segs; i++) {
+ struct bio_vec *bv = &bvecs[i];
+
+ /*
+ * If the queue doesn't support SG gaps and adding this
+ * offset would create a gap, fallback to copy.
+ */
+ if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
+ blk_mq_map_bio_put(bio);
+ return -EREMOTEIO;
+ }
+ /* check full condition */
+ if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
+ goto put_bio;
+ if (bytes + bv->bv_len > nr_iter)
+ goto put_bio;
+ if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
+ goto put_bio;
+
+ nsegs++;
+ bytes += bv->bv_len;
+ bvprvp = bv;
+ }
+ return 0;
+put_bio:
+ blk_mq_map_bio_put(bio);
+ return -EINVAL;
+}
+
/**
* blk_rq_map_user_iov - map user data to a request, for passthrough requests
* @q: request queue where request should be inserted
@@ -556,24 +623,35 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask)
{
- bool copy = false;
+ bool copy = false, map_bvec = false;
unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL;
struct iov_iter i;
int ret = -EINVAL;
- if (!iter_is_iovec(iter))
- goto fail;
-
if (map_data)
copy = true;
else if (blk_queue_may_bounce(q))
copy = true;
else if (iov_iter_alignment(iter) & align)
copy = true;
+ else if (iov_iter_is_bvec(iter))
+ map_bvec = true;
+ else if (!iter_is_iovec(iter))
+ copy = true;
else if (queue_virt_boundary(q))
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
+ if (map_bvec) {
+ ret = blk_rq_map_user_bvec(rq, iter);
+ if (!ret)
+ return 0;
+ if (ret != -EREMOTEIO)
+ goto fail;
+ /* fall back to copying the data on limits mismatches */
+ copy = true;
+ }
+
i = *iter;
do {
if (copy)
@@ -611,6 +689,42 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
}
EXPORT_SYMBOL(blk_rq_map_user);
+int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
+ void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
+ bool vec, int iov_count, bool check_iter_count, int rw)
+{
+ int ret = 0;
+
+ if (vec) {
+ struct iovec fast_iov[UIO_FASTIOV];
+ struct iovec *iov = fast_iov;
+ struct iov_iter iter;
+
+ ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
+ UIO_FASTIOV, &iov, &iter);
+ if (ret < 0)
+ return ret;
+
+ if (iov_count) {
+ /* SG_IO howto says that the shorter of the two wins */
+ iov_iter_truncate(&iter, buf_len);
+ if (check_iter_count && !iov_iter_count(&iter)) {
+ kfree(iov);
+ return -EINVAL;
+ }
+ }
+
+ ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
+ gfp_mask);
+ kfree(iov);
+ } else if (buf_len) {
+ ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
+ gfp_mask);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(blk_rq_map_user_io);
+
/**
* blk_rq_unmap_user - unmap a request with user data
* @bio: start of bio list
@@ -636,7 +750,7 @@ int blk_rq_unmap_user(struct bio *bio)
next_bio = bio;
bio = bio->bi_next;
- bio_map_put(next_bio);
+ blk_mq_map_bio_put(next_bio);
}
return ret;