aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-05-20 20:52:16 +0200
committerJens Axboe <jaxboe@fusionio.com>2011-05-20 20:52:16 +0200
commit771949d03b4f5295f648f09141325fd478f6c7ce (patch)
tree5e69b743566cc8f1eaafffcd03554bbd2b84e028 /block
parent0eb8e885726a3a93206510092bbc7e39e272f6ef (diff)
downloadkernel_samsung_smdk4412-771949d03b4f5295f648f09141325fd478f6c7ce.zip
kernel_samsung_smdk4412-771949d03b4f5295f648f09141325fd478f6c7ce.tar.gz
kernel_samsung_smdk4412-771949d03b4f5295f648f09141325fd478f6c7ce.tar.bz2
block: get rid of on-stack plugging debug checks
We don't need them anymore, so kill: - REQ_ON_PLUG checks in various places - !rq_mergeable() check in plug merging Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c27
-rw-r--r--block/elevator.c4
2 files changed, 0 insertions, 31 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 9e8e297..7369eee 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -569,8 +569,6 @@ int blk_get_queue(struct request_queue *q)
static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
- BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
-
if (rq->cmd_flags & REQ_ELVPRIV)
elv_put_request(q, rq);
mempool_free(rq, q->rq.rq_pool);
@@ -1110,14 +1108,6 @@ static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
{
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
- /*
- * Debug stuff, kill later
- */
- if (!rq_mergeable(req)) {
- blk_dump_rq_flags(req, "back");
- return false;
- }
-
if (!ll_back_merge_fn(q, req, bio))
return false;
@@ -1141,14 +1131,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
sector_t sector;
- /*
- * Debug stuff, kill later
- */
- if (!rq_mergeable(req)) {
- blk_dump_rq_flags(req, "front");
- return false;
- }
-
if (!ll_front_merge_fn(q, req, bio))
return false;
@@ -1258,14 +1240,12 @@ static int __make_request(struct request_queue *q, struct bio *bio)
el_ret = elv_merge(q, &req, bio);
if (el_ret == ELEVATOR_BACK_MERGE) {
- BUG_ON(req->cmd_flags & REQ_ON_PLUG);
if (bio_attempt_back_merge(q, req, bio)) {
if (!attempt_back_merge(q, req))
elv_merged_request(q, req, el_ret);
goto out_unlock;
}
} else if (el_ret == ELEVATOR_FRONT_MERGE) {
- BUG_ON(req->cmd_flags & REQ_ON_PLUG);
if (bio_attempt_front_merge(q, req, bio)) {
if (!attempt_front_merge(q, req))
elv_merged_request(q, req, el_ret);
@@ -1320,10 +1300,6 @@ get_rq:
if (__rq->q != q)
plug->should_sort = 1;
}
- /*
- * Debug flag, kill later
- */
- req->cmd_flags |= REQ_ON_PLUG;
list_add_tail(&req->queuelist, &plug->list);
drive_stat_acct(req, 1);
} else {
@@ -2749,7 +2725,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
- BUG_ON(!(rq->cmd_flags & REQ_ON_PLUG));
BUG_ON(!rq->q);
if (rq->q != q) {
/*
@@ -2761,8 +2736,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
depth = 0;
spin_lock(q->queue_lock);
}
- rq->cmd_flags &= ~REQ_ON_PLUG;
-
/*
* rq is already accounted, so use raw insert
*/
diff --git a/block/elevator.c b/block/elevator.c
index 2a0b653..b0b38ce 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -416,8 +416,6 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
struct list_head *entry;
int stop_flags;
- BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
-
if (q->last_merge == rq)
q->last_merge = NULL;
@@ -656,8 +654,6 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
rq->q = q;
- BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
-
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS ||