[PATCH 06/32] xen blkback: prepare for bi_rw split

From: mchristi
Date: Wed Nov 04 2015 - 17:29:04 EST


From: Mike Christie <mchristi@xxxxxxxxxx>

This patch prepares xen blkback submit_bio use for the next
patches that split bi_rw into a operation and flags field.
Instead of passing in a bitmap with both the operation and
flags mixed in, the callers will now pass them in seperately.

This patch modifies the code related to the submit_bio calls
so the flags and operation are seperated. When this is done
for all code, one of the later patches in the series will
modify the actual submit_bio call, so the patches are bisectable.

Signed-off-by: Mike Christie <mchristi@xxxxxxxxxx>
---
drivers/block/xen-blkback/blkback.c | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 6a685ae..bfffab3 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -488,7 +488,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
struct xen_vbd *vbd = &blkif->vbd;
int rc = -EACCES;

- if ((operation != READ) && vbd->readonly)
+ if ((operation != REQ_OP_READ) && vbd->readonly)
goto out;

if (likely(req->nr_sects)) {
@@ -990,7 +990,7 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
preq.sector_number = req->u.discard.sector_number;
preq.nr_sects = req->u.discard.nr_sectors;

- err = xen_vbd_translate(&preq, blkif, WRITE);
+ err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
if (err) {
pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
preq.sector_number,
@@ -1203,6 +1203,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
struct bio **biolist = pending_req->biolist;
int i, nbio = 0;
int operation;
+ int operation_flags = 0;
struct blk_plug plug;
bool drain = false;
struct grant_page **pages = pending_req->segments;
@@ -1220,17 +1221,19 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
switch (req_operation) {
case BLKIF_OP_READ:
blkif->st_rd_req++;
- operation = READ;
+ operation = REQ_OP_READ;
break;
case BLKIF_OP_WRITE:
blkif->st_wr_req++;
- operation = WRITE_ODIRECT;
+ operation = REQ_OP_WRITE;
+ operation_flags = WRITE_ODIRECT;
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
case BLKIF_OP_FLUSH_DISKCACHE:
blkif->st_f_req++;
- operation = WRITE_FLUSH;
+ operation = REQ_OP_WRITE;
+ operation_flags = WRITE_FLUSH;
break;
default:
operation = 0; /* make gcc happy */
@@ -1242,7 +1245,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
nseg = req->operation == BLKIF_OP_INDIRECT ?
req->u.indirect.nr_segments : req->u.rw.nr_segments;

- if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
+ if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
unlikely((req->operation != BLKIF_OP_INDIRECT) &&
(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@@ -1349,7 +1352,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,

/* This will be hit if the operation was a flush or discard. */
if (!bio) {
- BUG_ON(operation != WRITE_FLUSH);
+ BUG_ON(operation_flags != WRITE_FLUSH);

bio = bio_alloc(GFP_KERNEL, 0);
if (unlikely(bio == NULL))
@@ -1365,14 +1368,14 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
blk_start_plug(&plug);

for (i = 0; i < nbio; i++)
- submit_bio(operation, biolist[i]);
+ submit_bio(operation | operation_flags, biolist[i]);

/* Let the I/Os go.. */
blk_finish_plug(&plug);

- if (operation == READ)
+ if (operation == REQ_OP_READ)
blkif->st_rd_sect += preq.nr_sects;
- else if (operation & WRITE)
+ else if (operation == REQ_OP_WRITE)
blkif->st_wr_sect += preq.nr_sects;

return 0;
--
1.8.3.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/