Re: [PATCH 8/8] writeback: throttle buffered writeback

From: xiakaixu
Date: Sat Apr 23 2016 - 04:22:04 EST


> diff --git a/block/blk-core.c b/block/blk-core.c
> index 40b57bf4852c..d941f69dfb4b 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -39,6 +39,7 @@
>
> #include "blk.h"
> #include "blk-mq.h"
> +#include "blk-wb.h"
>
> EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
> EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
> @@ -880,6 +881,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
>
> fail:
> blk_free_flush_queue(q->fq);
> + blk_wb_exit(q);
> return NULL;
> }
> EXPORT_SYMBOL(blk_init_allocated_queue);
> @@ -1395,6 +1397,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
> blk_delete_timer(rq);
> blk_clear_rq_complete(rq);
> trace_block_rq_requeue(q, rq);
> + blk_wb_requeue(q->rq_wb, rq);
>
> if (rq->cmd_flags & REQ_QUEUED)
> blk_queue_end_tag(q, rq);
> @@ -1485,6 +1488,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
> /* this is a bio leak */
> WARN_ON(req->bio != NULL);
>
> + blk_wb_done(q->rq_wb, req);
> +
> /*
> * Request may not have originated from ll_rw_blk. if not,
> * it didn't come out of our reserved rq pools
> @@ -1714,6 +1719,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
> int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
> struct request *req;
> unsigned int request_count = 0;
> + bool wb_acct;
>
> /*
> * low level driver can indicate that it wants pages above a
> @@ -1766,6 +1772,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
> }
>
> get_rq:
> + wb_acct = blk_wb_wait(q->rq_wb, bio, q->queue_lock);
> +
> /*
> * This sync check and mask will be re-done in init_request_from_bio(),
> * but we need to set it earlier to expose the sync flag to the
> @@ -1781,11 +1789,16 @@ get_rq:
> */
> req = get_request(q, rw_flags, bio, GFP_NOIO);
> if (IS_ERR(req)) {
> + if (wb_acct)
> + __blk_wb_done(q->rq_wb);
> bio->bi_error = PTR_ERR(req);
> bio_endio(bio);
> goto out_unlock;
> }
>
> + if (wb_acct)
> + req->cmd_flags |= REQ_BUF_INFLIGHT;
> +
> /*
> * After dropping the lock and possibly sleeping here, our request
> * may now be mergeable after it had proven unmergeable (above).
> @@ -2515,6 +2528,7 @@ void blk_start_request(struct request *req)
> blk_dequeue_request(req);
>
> req->issue_time = ktime_to_ns(ktime_get());
> + blk_wb_issue(req->q->rq_wb, req);
>
> /*
> * We are now handing the request to the hardware, initialize
> @@ -2751,6 +2765,7 @@ void blk_finish_request(struct request *req, int error)
> blk_unprep_request(req);
>
> blk_account_io_done(req);
> + blk_wb_done(req->q->rq_wb, req);

Hi Jens,

Seems the function blk_wb_done() will be executed twice even if the end_io
callback is set.
Maybe the same thing would happen in blk-mq.c.

>
> if (req->end_io)
> req->end_io(req, error);
> diff --git a/block/blk-mq.c b/block/blk-mq.c
> index 71b4a13fbf94..c0c5207fe7fd 100644
> --- a/block/blk-mq.c
> +++ b/block/blk-mq.c
> @@ -30,6 +30,7 @@
> #include "blk-mq.h"
> #include "blk-mq-tag.h"
> #include "blk-stat.h"
> +#include "blk-wb.h"
>
> static DEFINE_MUTEX(all_q_mutex);
> static LIST_HEAD(all_q_list);
> @@ -275,6 +276,9 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
>
> if (rq->cmd_flags & REQ_MQ_INFLIGHT)
> atomic_dec(&hctx->nr_active);
> +
> + blk_wb_done(q->rq_wb, rq);
> +
> rq->cmd_flags = 0;
>
> clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
> @@ -305,6 +309,7 @@ EXPORT_SYMBOL_GPL(blk_mq_free_request);
> inline void __blk_mq_end_request(struct request *rq, int error)
> {
> blk_account_io_done(rq);
> + blk_wb_done(rq->q->rq_wb, rq);
>
> if (rq->end_io) {
> rq->end_io(rq, error);
> @@ -414,6 +419,7 @@ void blk_mq_start_request(struct request *rq)
> rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq);
>
> rq->issue_time = ktime_to_ns(ktime_get());
> + blk_wb_issue(q->rq_wb, rq);
>
> blk_add_timer(rq);
>
> @@ -450,6 +456,7 @@ static void __blk_mq_requeue_request(struct request *rq)
> struct request_queue *q = rq->q;
>
> trace_block_rq_requeue(q, rq);
> + blk_wb_requeue(q->rq_wb, rq);
>
> if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
> if (q->dma_drain_size && blk_rq_bytes(rq))
> @@ -1265,6 +1272,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
> struct blk_plug *plug;
> struct request *same_queue_rq = NULL;
> blk_qc_t cookie;
> + bool wb_acct;
>
> blk_queue_bounce(q, &bio);
>
> @@ -1282,9 +1290,17 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
> } else
> request_count = blk_plug_queued_count(q);
>
> + wb_acct = blk_wb_wait(q->rq_wb, bio, NULL);
> +
> rq = blk_mq_map_request(q, bio, &data);
> - if (unlikely(!rq))
> + if (unlikely(!rq)) {
> + if (wb_acct)
> + __blk_wb_done(q->rq_wb);
> return BLK_QC_T_NONE;
> + }
> +
> + if (wb_acct)
> + rq->cmd_flags |= REQ_BUF_INFLIGHT;
>
> cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
>
> @@ -1361,6 +1377,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
> struct blk_map_ctx data;
> struct request *rq;
> blk_qc_t cookie;
> + bool wb_acct;
>
> blk_queue_bounce(q, &bio);
>
> @@ -1375,9 +1392,17 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
> blk_attempt_plug_merge(q, bio, &request_count, NULL))
> return BLK_QC_T_NONE;
>
> + wb_acct = blk_wb_wait(q->rq_wb, bio, NULL);
> +
> rq = blk_mq_map_request(q, bio, &data);
> - if (unlikely(!rq))
> + if (unlikely(!rq)) {
> + if (wb_acct)
> + __blk_wb_done(q->rq_wb);
> return BLK_QC_T_NONE;
> + }
> +
> + if (wb_acct)
> + rq->cmd_flags |= REQ_BUF_INFLIGHT;
>
> cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num);
>
> @@ -2111,6 +2136,8 @@ void blk_mq_free_queue(struct request_queue *q)
> list_del_init(&q->all_q_node);



--
Regards
Kaixu Xia