From ba5bd520f679c450fb6efa439618703bd0956daa Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 19 Jan 2011 08:25:02 -0700 Subject: cfq: rename a function to give it more appropriate name o Rename a function to give it more approprate name. We are calculating cfq queue slice and function name gives the impression as if cfq group slice length is being calculated. Signed-off-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 501ffdf..ace1686 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -599,7 +599,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) } static inline unsigned -cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) +cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { unsigned slice = cfq_prio_to_slice(cfqd, cfqq); if (cfqd->cfq_latency) { @@ -631,7 +631,7 @@ cfq_scaled_group_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) static inline void cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - unsigned slice = cfq_scaled_group_slice(cfqd, cfqq); + unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); cfqq->slice_start = jiffies; cfqq->slice_end = jiffies + slice; @@ -1671,7 +1671,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, */ if (timed_out) { if (cfq_cfqq_slice_new(cfqq)) - cfqq->slice_resid = cfq_scaled_group_slice(cfqd, cfqq); + cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); else cfqq->slice_resid = cfqq->slice_end - jiffies; cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); -- cgit v1.1 From be2c6b1990904dbd43f3d9b90fa2c530504375cd Mon Sep 17 00:00:00 2001 From: Vivek Goyal Date: Wed, 19 Jan 2011 08:25:02 -0700 Subject: blkio-throttle: Avoid calling blkiocg_lookup_group() for root group o Jeff Moyer was doing some testing on a RAM backed disk and blkiocg_lookup_group() showed up high overhead after memcpy(). Similarly somebody else reported that blkiocg_lookup_group() is eating 6% extra cpu. Though looking at the code I can't think why the overhead of this function is so high. One thing is that it is called with very high frequency (once for every IO). o For lot of folks blkio controller will be compiled in but they might not have actually created cgroups. Hence optimize the case of root cgroup where we can avoid calling blkiocg_lookup_group() if IO is happening in root group (common case). Reported-by: Jeff Moyer Signed-off-by: Vivek Goyal Acked-by: Jeff Moyer Signed-off-by: Jens Axboe --- block/blk-throttle.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 381b09b..a89043a 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -168,7 +168,15 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td, * tree of blkg (instead of traversing through hash list all * the time. */ - tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); + + /* + * This is the common case when there are no blkio cgroups. + * Avoid lookup in this case + */ + if (blkcg == &blkio_root_cgroup) + tg = &td->root_tg; + else + tg = tg_of_blkg(blkiocg_lookup_group(blkcg, key)); /* Fill in device details for root group */ if (tg && !tg->blkg.dev && bdi->dev && dev_name(bdi->dev)) { -- cgit v1.1 From 02a8f01b5a9f396d0327977af4c232d0f94c45fd Mon Sep 17 00:00:00 2001 From: Justin TerAvest Date: Wed, 9 Feb 2011 14:20:03 +0100 Subject: cfq-iosched: Don't wait if queue already has requests. Commit 7667aa0630407bc07dc38dcc79d29cc0a65553c1 added logic to wait for the last queue of the group to become busy (have at least one request), so that the group does not lose out for not being continuously backlogged. The commit did not check for the condition that the last queue already has some requests. As a result, if the queue already has requests, wait_busy is set. Later on, cfq_select_queue() checks the flag, and decides that since the queue has a request now and wait_busy is set, the queue is expired. This results in early expiration of the queue. This patch fixes the problem by adding a check to see if queue already has requests. If it does, wait_busy is not set. As a result, time slices do not expire early. The queues with more than one request are usually buffered writers. Testing shows improvement in isolation between buffered writers. Cc: stable@kernel.org Signed-off-by: Justin TerAvest Reviewed-by: Gui Jianfeng Acked-by: Vivek Goyal Signed-off-by: Jens Axboe --- block/cfq-iosched.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'block') diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ace1686..7be4c79 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -3432,6 +3432,10 @@ static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) { struct cfq_io_context *cic = cfqd->active_cic; + /* If the queue already has requests, don't wait */ + if (!RB_EMPTY_ROOT(&cfqq->sort_list)) + return false; + /* If there are other queues in the group, don't wait */ if (cfqq->cfqg->nr_cfqq > 1) return false; -- cgit v1.1