diff options
author | Paul Mackerras <paulus@samba.org> | 2006-05-19 15:02:42 +1000 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2006-05-19 15:02:42 +1000 |
commit | 3c06da5ae5358e9d325d541a053e1059e9654bcc (patch) | |
tree | 04c953cc82fe57cff248ac523095cd4f0d9611a7 /block | |
parent | 4d1f3f25d9c303d1ce63b42cc94c54ac0ab2e950 (diff) | |
parent | a54c9d30dbb06391ec4422aaf0e1dc2c8c53bd3e (diff) | |
download | kernel_samsung_smdk4412-3c06da5ae5358e9d325d541a053e1059e9654bcc.zip kernel_samsung_smdk4412-3c06da5ae5358e9d325d541a053e1059e9654bcc.tar.gz kernel_samsung_smdk4412-3c06da5ae5358e9d325d541a053e1059e9654bcc.tar.bz2 |
Merge ../linux-2.6
Diffstat (limited to 'block')
-rw-r--r-- | block/elevator.c | 8 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 17 |
2 files changed, 22 insertions, 3 deletions
diff --git a/block/elevator.c b/block/elevator.c index 2982579..8768a36 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -333,6 +333,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) { struct list_head *pos; unsigned ordseq; + int unplug_it = 1; blk_add_trace_rq(q, rq, BLK_TA_INSERT); @@ -399,6 +400,11 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) } list_add_tail(&rq->queuelist, pos); + /* + * most requeues happen because of a busy condition, don't + * force unplug of the queue for that case. + */ + unplug_it = 0; break; default: @@ -407,7 +413,7 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) BUG(); } - if (blk_queue_plugged(q)) { + if (unplug_it && blk_queue_plugged(q)) { int nrq = q->rq.count[READ] + q->rq.count[WRITE] - q->in_flight; diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index e5041a0..eac48be 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -1732,8 +1732,21 @@ void blk_run_queue(struct request_queue *q) spin_lock_irqsave(q->queue_lock, flags); blk_remove_plug(q); - if (!elv_queue_empty(q)) - q->request_fn(q); + + /* + * Only recurse once to avoid overrunning the stack, let the unplug + * handling reinvoke the handler shortly if we already got there. + */ + if (!elv_queue_empty(q)) { + if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { + q->request_fn(q); + clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); + } else { + blk_plug_device(q); + kblockd_schedule_work(&q->unplug_work); + } + } + spin_unlock_irqrestore(q->queue_lock, flags); } EXPORT_SYMBOL(blk_run_queue); |