[PATCH 4.6 31/96] xen-blkfront: fix resume issues after a migration

From: Greg Kroah-Hartman
Date: Mon Aug 08 2016 - 15:37:15 EST


4.6-stable review patch. If anyone has any objections, please let me know.

------------------

From: Bob Liu <bob.liu@xxxxxxxxxx>

commit 2a6f71ad99cabe436e70c3f5fcf58072cb3bc07f upstream.

After a migrate to another host (which may not have multiqueue
support), the number of rings (block hardware queues)
may be changed and the ring info structure will also be reallocated.

This patch fixes two related bugs:
* call blk_mq_update_nr_hw_queues() to make blk-core know the number
of hardware queues have been changed.
* Don't store rinfo pointer to hctx->driver_data, because rinfo may be
reallocated so use hctx->queue_num to get the rinfo structure instead.

Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
drivers/block/xen-blkfront.c | 20 ++++++++------------
1 file changed, 8 insertions(+), 12 deletions(-)

--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -877,8 +877,12 @@ static int blkif_queue_rq(struct blk_mq_
const struct blk_mq_queue_data *qd)
{
unsigned long flags;
- struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
+ int qid = hctx->queue_num;
+ struct blkfront_info *info = hctx->queue->queuedata;
+ struct blkfront_ring_info *rinfo = NULL;

+ BUG_ON(info->nr_rings <= qid);
+ rinfo = &info->rinfo[qid];
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring))
@@ -904,20 +908,9 @@ out_busy:
return BLK_MQ_RQ_QUEUE_BUSY;
}

-static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
- unsigned int index)
-{
- struct blkfront_info *info = (struct blkfront_info *)data;
-
- BUG_ON(info->nr_rings <= index);
- hctx->driver_data = &info->rinfo[index];
- return 0;
-}
-
static struct blk_mq_ops blkfront_mq_ops = {
.queue_rq = blkif_queue_rq,
.map_queue = blk_mq_map_queue,
- .init_hctx = blk_mq_init_hctx,
};

static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
@@ -953,6 +946,7 @@ static int xlvbd_init_blk_queue(struct g
return PTR_ERR(rq);
}

+ rq->queuedata = info;
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);

if (info->feature_discard) {
@@ -2137,6 +2131,8 @@ static int blkfront_resume(struct xenbus
return err;

err = talk_to_blkback(dev, info);
+ if (!err)
+ blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);

/*
* We have to wait for the backend to switch to