[RFC PATCH 18/28] block: Introduce bio_add_dma_addr()

From: Logan Gunthorpe
Date: Thu Jun 20 2019 - 12:13:11 EST


bio_add_dma_addr() is analagous to bio_add_page() except it
adds a dma address to a dma-direct bio instead of a struct page.

It also checks to ensure that the queue supports dma address bios and
that we are not mixing dma addresses and struct pages.

Signed-off-by: Logan Gunthorpe <logang@xxxxxxxxxxxx>
---
block/bio.c | 38 ++++++++++++++++++++++++++++++++++++++
include/linux/bio.h | 10 ++++++++++
2 files changed, 48 insertions(+)

diff --git a/block/bio.c b/block/bio.c
index 6998fceddd36..02ae72e3ccfa 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -874,6 +874,44 @@ static void bio_release_pages(struct bio *bio)
put_page(bvec->bv_page);
}

+/**
+ * bio_add_dma_addr - attempt to add a dma address to a bio
+ * @q: the target queue
+ * @bio: destination bio
+ * @dma_addr: dma address to add
+ * @len: vec entry length
+ *
+ * Attempt to add a dma address to the dma_vec maplist. This can
+ * fail for a number of reasons, such as the bio being full or
+ * target block device limitations. The target request queue must
+ * support dma-only bios and bios can not mix pages and dma_addresses.
+ */
+int bio_add_dma_addr(struct request_queue *q, struct bio *bio,
+ dma_addr_t dma_addr, unsigned int len)
+{
+ struct dma_vec *dv = &bio->bi_dma_vec[bio->bi_vcnt];
+
+ if (!blk_queue_dma_direct(q))
+ return -EINVAL;
+
+ if (!bio_is_dma_direct(bio))
+ return -EINVAL;
+
+ if (bio_dma_full(bio))
+ return 0;
+
+ WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
+
+ dv->dv_addr = dma_addr;
+ dv->dv_len = len;
+
+ bio->bi_iter.bi_size += len;
+ bio->bi_vcnt++;
+
+ return len;
+}
+EXPORT_SYMBOL_GPL(bio_add_dma_addr);
+
static int __bio_iov_bvec_add_pages(struct bio *bio, struct iov_iter *iter)
{
const struct bio_vec *bv = iter->bvec;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index df7973932525..d775f381ae00 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -112,6 +112,13 @@ static inline bool bio_full(struct bio *bio)
return bio->bi_vcnt >= bio->bi_max_vecs;
}

+static inline bool bio_dma_full(struct bio *bio)
+{
+ size_t vec_size = bio->bi_max_vecs * sizeof(struct bio_vec);
+
+ return bio->bi_vcnt >= (vec_size / sizeof(struct dma_vec));
+}
+
static inline bool bio_next_segment(const struct bio *bio,
struct bvec_iter_all *iter)
{
@@ -438,6 +445,9 @@ void bio_chain(struct bio *, struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
+extern int bio_add_dma_addr(struct request_queue *q, struct bio *bio,
+ dma_addr_t dma_addr, unsigned int len);
+
bool __bio_try_merge_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off, bool same_page);
void __bio_add_page(struct bio *bio, struct page *page,
--
2.20.1