From 19435f618c1072a48a66531c17218a3fbc0a41cd Mon Sep 17 00:00:00 2001 From: Corrado Zoccolo Date: Sat, 24 Apr 2010 15:39:31 +0200 Subject: [PATCH 2/2] cfq-iosched: optimistic queue merging. --- block/cfq-iosched.c | 41 +++++++++++++++++++++++++++++++++-------- 1 files changed, 33 insertions(+), 8 deletions(-) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 6551726..4e9e015 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -146,6 +146,7 @@ struct cfq_queue { /* Sectors dispatched in current dispatch round */ unsigned long nr_sectors; unsigned transferred; + unsigned last_bw; }; /* @@ -1574,6 +1575,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, if (used_slice > HZ / 100 && used_slice > 2) { bw = cfqq->transferred / used_slice; cfqd->cur_bw = bw; + cfqq->last_bw = bw; cfqd->max_bw[cfq_cfqq_coop(cfqq)] = max(cfqd->max_bw[cfq_cfqq_coop(cfqq)], bw); } @@ -1696,8 +1698,9 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, { struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; struct rb_node *parent, *node; - struct cfq_queue *__cfqq; + struct cfq_queue *__cfqq, *__cfqq1; sector_t sector = cfqd->last_position; + unsigned rs; if (RB_EMPTY_ROOT(root)) return NULL; @@ -1715,7 +1718,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, * will contain the closest sector. */ __cfqq = rb_entry(parent, struct cfq_queue, p_node); - if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) + if (!CFQQ_SEEKY(__cfqq) && cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) return __cfqq; if (blk_rq_pos(__cfqq->next_rq) < sector) @@ -1723,12 +1726,34 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, else node = rb_prev(&__cfqq->p_node); if (!node) - return NULL; - - __cfqq = rb_entry(node, struct cfq_queue, p_node); - if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) - return __cfqq; - + __cfqq1 = __cfqq; + else + __cfqq1 = rb_entry(node, struct cfq_queue, p_node); + + if (!CFQQ_SEEKY(__cfqq1) && cfq_rq_close(cfqd, cur_cfqq, __cfqq1->next_rq)) + return __cfqq1; + + // Opportunistic queue merging could be beneficial even on far queues + // We enable it only on NCQ disks, if we observed that merged queues + // can reach higher bandwidth than single queues. + rs = cur_cfqq->allocated[READ] + cur_cfqq->allocated[WRITE]; + if (cfqd->hw_tag && cfqd->max_bw[1] > cfqd->max_bw[0] && + // Do not overload the device queue + rs < cfqd->hw_tag_est_depth / 2) { + unsigned r1 = __cfqq->allocated[READ] + __cfqq->allocated[WRITE]; + unsigned r2 = __cfqq1->allocated[READ] + __cfqq1->allocated[WRITE]; + // Prefer merging with a queue that has fewer pending requests + if (r1 > r2 && !CFQQ_SEEKY(__cfqq1)) { + __cfqq = __cfqq1; + r1 = r2; + } + // Do not merge if the merged queue would have too many requests + if (r1 + rs > cfqd->hw_tag_est_depth / 2) + return NULL; + // Merge only if the BW of the two queues is comparable + if (abs(__cfqq->last_bw - cur_cfqq->last_bw) * 4 < cfqd->max_bw[0]) + return __cfqq; + } return NULL; } -- 1.6.2.5