aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authormajianpeng <majianpeng@gmail.com>2012-06-12 08:31:10 +0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-07-16 08:47:51 -0700
commit2a9ff20c3a8c4e907eb8ae0f7f4f14785cfe2e49 (patch)
treef4c01d0007c4f4d3c7c38c0cadd8541c7a6bb742 /drivers/md
parentc8d210c8900b8e4f495c15b4e6238552921eb65c (diff)
downloadkernel_samsung_smdk4412-2a9ff20c3a8c4e907eb8ae0f7f4f14785cfe2e49.zip
kernel_samsung_smdk4412-2a9ff20c3a8c4e907eb8ae0f7f4f14785cfe2e49.tar.gz
kernel_samsung_smdk4412-2a9ff20c3a8c4e907eb8ae0f7f4f14785cfe2e49.tar.bz2
md/raid5: Do not add data_offset before call to is_badblock
commit 6c0544e255dd6582a9899572e120fb55d9f672a4 upstream. In chunk_aligned_read() we are adding data_offset before calling is_badblock. But is_badblock also adds data_offset, so that is bad. So move the addition of data_offset to after the call to is_badblock. This bug was introduced by commit 31c176ecdf3563140e639 md/raid5: avoid reading from known bad blocks. which first appeared in 3.0. So that patch is suitable for any -stable kernel from 3.0.y onwards. However it will need minor revision for most of those (as the comment didn't appear until recently). Signed-off-by: majianpeng <majianpeng@gmail.com> Signed-off-by: NeilBrown <neilb@suse.de> [bwh: Backported to 3.2: ignored missing comment] Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d0c8a1e..cff955a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3848,7 +3848,6 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev;
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
- align_bi->bi_sector += rdev->data_offset;
if (!bio_fits_rdev(align_bi)) {
/* too big in some way */
@@ -3857,6 +3856,9 @@ static int chunk_aligned_read(mddev_t *mddev, struct bio * raid_bio)
return 0;
}
+ /* No reshape active, so we can trust rdev->data_offset */
+ align_bi->bi_sector += rdev->data_offset;
+
spin_lock_irq(&conf->device_lock);
wait_event_lock_irq(conf->wait_for_stripe,
conf->quiesce == 0,