diff options
author | codeworkx <daniel.hillenbrand@codeworkx.de> | 2012-06-02 13:09:29 +0200 |
---|---|---|
committer | codeworkx <daniel.hillenbrand@codeworkx.de> | 2012-06-02 13:09:29 +0200 |
commit | c6da2cfeb05178a11c6d062a06f8078150ee492f (patch) | |
tree | f3b4021d252c52d6463a9b3c1bb7245e399b009c /drivers/md | |
parent | c6d7c4dbff353eac7919342ae6b3299a378160a6 (diff) | |
download | kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.zip kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.gz kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.bz2 |
samsung update 1
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-crypt.c | 47 |
1 files changed, 28 insertions, 19 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index c8827ff..da5f5f2 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -177,7 +177,6 @@ struct crypt_config { #define MIN_IOS 16 #define MIN_POOL_PAGES 32 -#define MIN_BIO_PAGES 8 static struct kmem_cache *_crypt_io_pool; @@ -849,12 +848,11 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, } /* - * if additional pages cannot be allocated without waiting, - * return a partially allocated bio, the caller will then try - * to allocate additional bios while submitting this partial bio + * If additional pages cannot be allocated without waiting, + * return a partially-allocated bio. The caller will then try + * to allocate more bios while submitting this partial bio. */ - if (i == (MIN_BIO_PAGES - 1)) - gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; + gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; len = (size > PAGE_SIZE) ? PAGE_SIZE : size; @@ -1047,16 +1045,14 @@ static void kcryptd_queue_io(struct dm_crypt_io *io) queue_work(cc->io_queue, &io->work); } -static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, - int error, int async) +static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) { struct bio *clone = io->ctx.bio_out; struct crypt_config *cc = io->target->private; - if (unlikely(error < 0)) { + if (unlikely(io->error < 0)) { crypt_free_buffer_pages(cc, clone); bio_put(clone); - io->error = -EIO; crypt_dec_pending(io); return; } @@ -1107,12 +1103,16 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) sector += bio_sectors(clone); crypt_inc_pending(io); + r = crypt_convert(cc, &io->ctx); + if (r < 0) + io->error = -EIO; + crypt_finished = atomic_dec_and_test(&io->ctx.pending); /* Encryption was already finished, submit io now */ if (crypt_finished) { - kcryptd_crypt_write_io_submit(io, r, 0); + kcryptd_crypt_write_io_submit(io, 0); /* * If there was an error, do not try next fragments. @@ -1163,11 +1163,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) crypt_dec_pending(io); } -static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) +static void kcryptd_crypt_read_done(struct dm_crypt_io *io) { - if (unlikely(error < 0)) - io->error = -EIO; - crypt_dec_pending(io); } @@ -1182,9 +1179,11 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) io->sector); r = crypt_convert(cc, &io->ctx); + if (r < 0) + io->error = -EIO; if (atomic_dec_and_test(&io->ctx.pending)) - kcryptd_crypt_read_done(io, r); + kcryptd_crypt_read_done(io); crypt_dec_pending(io); } @@ -1205,15 +1204,18 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); + if (error < 0) + io->error = -EIO; + mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); if (!atomic_dec_and_test(&ctx->pending)) return; if (bio_data_dir(io->base_bio) == READ) - kcryptd_crypt_read_done(io, error); + kcryptd_crypt_read_done(io); else - kcryptd_crypt_write_io_submit(io, error, 1); + kcryptd_crypt_write_io_submit(io, 1); } static void kcryptd_crypt(struct work_struct *work) @@ -1649,20 +1651,27 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) cc->start = tmpll; ret = -ENOMEM; +#if 1 + cc->io_queue = create_singlethread_workqueue("kcryptd_io"); +#else cc->io_queue = alloc_workqueue("kcryptd_io", WQ_NON_REENTRANT| WQ_MEM_RECLAIM, 1); +#endif if (!cc->io_queue) { ti->error = "Couldn't create kcryptd io queue"; goto bad; } - +#if 1 + cc->crypt_queue = create_singlethread_workqueue("kcryptd"); +#else cc->crypt_queue = alloc_workqueue("kcryptd", WQ_NON_REENTRANT| WQ_CPU_INTENSIVE| WQ_MEM_RECLAIM, 1); +#endif if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; goto bad; |