From 0c3cf4cc9abd854dcc6488719348a75b8f328c54 Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 19 Nov 2010 16:04:22 +0200 Subject: crypto: omap-sham - uses digest buffer in request context Currently driver storred digest results in req->results provided by the client. But some clients do not set it until final() call. It leads to crash. Changed to use internal buffer to store temporary digest results. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index a081c7c..2222370 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -97,6 +97,7 @@ struct omap_sham_reqctx { unsigned long flags; unsigned long op; + u8 digest[SHA1_DIGEST_SIZE]; size_t digcnt; u8 *buffer; size_t bufcnt; @@ -194,7 +195,7 @@ static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit) static void omap_sham_copy_hash(struct ahash_request *req, int out) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - u32 *hash = (u32 *)req->result; + u32 *hash = (u32 *)ctx->digest; int i; if (likely(ctx->flags & FLAGS_SHA1)) { @@ -453,8 +454,11 @@ static void omap_sham_cleanup(struct ahash_request *req) ctx->flags |= FLAGS_CLEAN; spin_unlock_irqrestore(&dd->lock, flags); - if (ctx->digcnt) + if (ctx->digcnt) { clk_disable(dd->iclk); + memcpy(req->result, ctx->digest, (ctx->flags & FLAGS_SHA1) ? + SHA1_DIGEST_SIZE : MD5_DIGEST_SIZE); + } if (ctx->dma_addr) dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, @@ -576,6 +580,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) static int omap_sham_finish_req_hmac(struct ahash_request *req) { + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_hmac_ctx *bctx = tctx->base; int bs = crypto_shash_blocksize(bctx->shash); @@ -590,7 +595,7 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req) return crypto_shash_init(&desc.shash) ?: crypto_shash_update(&desc.shash, bctx->opad, bs) ?: - crypto_shash_finup(&desc.shash, req->result, ds, req->result); + crypto_shash_finup(&desc.shash, ctx->digest, ds, ctx->digest); } static void omap_sham_finish_req(struct ahash_request *req, int err) -- cgit v1.1 From c8eb54041acc4dedeaf0d9da34b91b8658386751 Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 19 Nov 2010 16:04:23 +0200 Subject: crypto: omap-sham - DMA initialization fixes for off mode DMA parameters for constant data were initialized during driver probe(). It seems that those settings sometimes are lost when devices goes to off mode. This patch makes DMA initialization just before use. It solves off mode problems. Fixes: NB#202786 - Aegis & SHA1 block off mode changes Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 2222370..9dfbc4a 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -318,6 +318,16 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0); + omap_set_dma_dest_params(dd->dma_lch, 0, + OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + SHA_REG_DIN(0), 0, 16); + + omap_set_dma_dest_burst_mode(dd->dma_lch, + OMAP_DMA_DATA_BURST_16); + + omap_set_dma_src_burst_mode(dd->dma_lch, + OMAP_DMA_DATA_BURST_4); + err = omap_sham_write_ctrl(dd, length, final, 1); if (err) return err; @@ -1071,15 +1081,6 @@ static int omap_sham_dma_init(struct omap_sham_dev *dd) dev_err(dd->dev, "Unable to request DMA channel\n"); return err; } - omap_set_dma_dest_params(dd->dma_lch, 0, - OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + SHA_REG_DIN(0), 0, 16); - - omap_set_dma_dest_burst_mode(dd->dma_lch, - OMAP_DMA_DATA_BURST_16); - - omap_set_dma_src_burst_mode(dd->dma_lch, - OMAP_DMA_DATA_BURST_4); return 0; } -- cgit v1.1 From 3e133c8bf6a6723377d94bc97aa52596e49a4090 Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 19 Nov 2010 16:04:24 +0200 Subject: crypto: omap-sham - error handling improved Introduces DMA error handling. DMA error is returned as a result code of the hash request. Clients needs to handle error codes and may repeat hash calculation attempt. Also in the case of DMA error, SHAM module is set to be re-initialized again. It significantly improves stability against possible HW failures. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 67 ++++++++++++++++++++++++++++++---------------- 1 file changed, 44 insertions(+), 23 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 9dfbc4a..db20628 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -83,6 +83,7 @@ #define FLAGS_INIT 0x0100 #define FLAGS_CPU 0x0200 #define FLAGS_HMAC 0x0400 +#define FLAGS_ERROR 0x0800 /* 3rd byte */ #define FLAGS_BUSY 16 @@ -137,6 +138,7 @@ struct omap_sham_dev { int irq; struct clk *iclk; spinlock_t lock; + int err; int dma; int dma_lch; struct tasklet_struct done_task; @@ -234,10 +236,12 @@ static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, - SHA_REG_SYSSTATUS_RESETDONE)) + SHA_REG_SYSSTATUS_RESETDONE)) { + clk_disable(dd->iclk); return -ETIMEDOUT; - + } dd->flags |= FLAGS_INIT; + dd->err = 0; } } else { omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); @@ -279,11 +283,12 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, if (err) return err; + /* should be non-zero before next lines to disable clocks later */ + ctx->digcnt += length; + if (omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY)) return -ETIMEDOUT; - ctx->digcnt += length; - if (final) ctx->flags |= FLAGS_FINAL; /* catch last interrupt */ @@ -303,7 +308,6 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); - /* flush cache entries related to our page */ if (dma_addr == ctx->dma_addr) dma_sync_single_for_device(dd->dev, dma_addr, length, @@ -411,6 +415,7 @@ static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); unsigned int length; + int err; ctx->flags |= FLAGS_FAST; @@ -424,7 +429,11 @@ static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) ctx->total -= length; - return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); + err = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); + if (err != -EINPROGRESS) + dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); + + return err; } static int omap_sham_update_cpu(struct omap_sham_dev *dd) @@ -580,9 +589,6 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) ctx->bufcnt = 0; - if (err != -EINPROGRESS) - omap_sham_cleanup(req); - dev_dbg(dd->dev, "final_req: err: %d\n", err); return err; @@ -616,9 +622,11 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) omap_sham_copy_hash(ctx->dd->req, 1); if (ctx->flags & FLAGS_HMAC) err = omap_sham_finish_req_hmac(req); + } else { + ctx->flags |= FLAGS_ERROR; } - if (ctx->flags & FLAGS_FINAL) + if ((ctx->flags & FLAGS_FINAL) || err) omap_sham_cleanup(req); clear_bit(FLAGS_BUSY, &ctx->dd->flags); @@ -776,12 +784,14 @@ static int omap_sham_final(struct ahash_request *req) ctx->flags |= FLAGS_FINUP; - /* OMAP HW accel works only with buffers >= 9 */ - /* HMAC is always >= 9 because of ipad */ - if ((ctx->digcnt + ctx->bufcnt) < 9) - err = omap_sham_final_shash(req); - else if (ctx->bufcnt) - return omap_sham_enqueue(req, OP_FINAL); + if (!(ctx->flags & FLAGS_ERROR)) { + /* OMAP HW accel works only with buffers >= 9 */ + /* HMAC is always >= 9 because of ipad */ + if ((ctx->digcnt + ctx->bufcnt) < 9) + err = omap_sham_final_shash(req); + else if (ctx->bufcnt) + return omap_sham_enqueue(req, OP_FINAL); + } omap_sham_cleanup(req); @@ -851,6 +861,8 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm); const char *alg_name = crypto_tfm_alg_name(tfm); + pr_info("enter\n"); + /* Allocate a fallback and abort if it failed. */ tctx->fallback = crypto_alloc_shash(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK); @@ -1008,7 +1020,7 @@ static void omap_sham_done_task(unsigned long data) struct omap_sham_dev *dd = (struct omap_sham_dev *)data; struct ahash_request *req = dd->req; struct omap_sham_reqctx *ctx = ahash_request_ctx(req); - int ready = 1; + int ready = 0, err = 0; if (ctx->flags & FLAGS_OUTPUT_READY) { ctx->flags &= ~FLAGS_OUTPUT_READY; @@ -1018,13 +1030,16 @@ static void omap_sham_done_task(unsigned long data) if (dd->flags & FLAGS_DMA_ACTIVE) { dd->flags &= ~FLAGS_DMA_ACTIVE; omap_sham_update_dma_stop(dd); - omap_sham_update_dma_slow(dd); + if (!dd->err) + err = omap_sham_update_dma_slow(dd); } - if (ready && !(dd->flags & FLAGS_DMA_ACTIVE)) { - dev_dbg(dd->dev, "update done\n"); + err = dd->err ? : err; + + if (err != -EINPROGRESS && (ready || err)) { + dev_dbg(dd->dev, "update done: err: %d\n", err); /* finish curent request */ - omap_sham_finish_req(req, 0); + omap_sham_finish_req(req, err); /* start new request */ omap_sham_handle_queue(dd); } @@ -1056,6 +1071,7 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id) omap_sham_read(dd, SHA_REG_CTRL); ctx->flags |= FLAGS_OUTPUT_READY; + dd->err = 0; tasklet_schedule(&dd->done_task); return IRQ_HANDLED; @@ -1065,8 +1081,13 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data) { struct omap_sham_dev *dd = data; - if (likely(lch == dd->dma_lch)) - tasklet_schedule(&dd->done_task); + if (ch_status != OMAP_DMA_BLOCK_IRQ) { + pr_err("omap-sham DMA error status: 0x%hx\n", ch_status); + dd->err = -EIO; + dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ + } + + tasklet_schedule(&dd->done_task); } static int omap_sham_dma_init(struct omap_sham_dev *dd) -- cgit v1.1 From a5d87237bb15eed8449e5a30c0bbe626e0e7f43d Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 19 Nov 2010 16:04:25 +0200 Subject: crypto: omap-sham - removed redundunt locking Locking for queuing and dequeuing is combined. test_and_set_bit() is also replaced with checking under dd->lock. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 47 +++++++++++++++++++++------------------------- 1 file changed, 21 insertions(+), 26 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index db20628..6340c5e 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -84,9 +84,7 @@ #define FLAGS_CPU 0x0200 #define FLAGS_HMAC 0x0400 #define FLAGS_ERROR 0x0800 - -/* 3rd byte */ -#define FLAGS_BUSY 16 +#define FLAGS_BUSY 0x1000 #define OP_UPDATE 1 #define OP_FINAL 2 @@ -629,32 +627,37 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) if ((ctx->flags & FLAGS_FINAL) || err) omap_sham_cleanup(req); - clear_bit(FLAGS_BUSY, &ctx->dd->flags); + ctx->dd->flags &= ~FLAGS_BUSY; if (req->base.complete) req->base.complete(&req->base, err); } -static int omap_sham_handle_queue(struct omap_sham_dev *dd) +static int omap_sham_handle_queue(struct omap_sham_dev *dd, + struct ahash_request *req) { struct crypto_async_request *async_req, *backlog; struct omap_sham_reqctx *ctx; - struct ahash_request *req, *prev_req; + struct ahash_request *prev_req; unsigned long flags; - int err = 0; - - if (test_and_set_bit(FLAGS_BUSY, &dd->flags)) - return 0; + int err = 0, ret = 0; spin_lock_irqsave(&dd->lock, flags); - backlog = crypto_get_backlog(&dd->queue); + if (req) + ret = ahash_enqueue_request(&dd->queue, req); + if (dd->flags & FLAGS_BUSY) { + spin_unlock_irqrestore(&dd->lock, flags); + return ret; + } async_req = crypto_dequeue_request(&dd->queue); - if (!async_req) - clear_bit(FLAGS_BUSY, &dd->flags); + if (async_req) { + dd->flags |= FLAGS_BUSY; + backlog = crypto_get_backlog(&dd->queue); + } spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) - return 0; + return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -690,7 +693,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd) dev_dbg(dd->dev, "exit, err: %d\n", err); - return err; + return ret; } static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) @@ -698,18 +701,10 @@ static int omap_sham_enqueue(struct ahash_request *req, unsigned int op) struct omap_sham_reqctx *ctx = ahash_request_ctx(req); struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm); struct omap_sham_dev *dd = tctx->dd; - unsigned long flags; - int err; ctx->op = op; - spin_lock_irqsave(&dd->lock, flags); - err = ahash_enqueue_request(&dd->queue, req); - spin_unlock_irqrestore(&dd->lock, flags); - - omap_sham_handle_queue(dd); - - return err; + return omap_sham_handle_queue(dd, req); } static int omap_sham_update(struct ahash_request *req) @@ -1041,7 +1036,7 @@ static void omap_sham_done_task(unsigned long data) /* finish curent request */ omap_sham_finish_req(req, err); /* start new request */ - omap_sham_handle_queue(dd); + omap_sham_handle_queue(dd, NULL); } } @@ -1049,7 +1044,7 @@ static void omap_sham_queue_task(unsigned long data) { struct omap_sham_dev *dd = (struct omap_sham_dev *)data; - omap_sham_handle_queue(dd); + omap_sham_handle_queue(dd, NULL); } static irqreturn_t omap_sham_irq(int irq, void *dev_id) -- cgit v1.1 From 798eed5d9204b01862985ba0643ce5cf84114072 Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 19 Nov 2010 16:04:26 +0200 Subject: crypto: omap-sham - crypto_ahash_final() now not need to be called. According to the Herbert Xu, client may not always call crypto_ahash_final(). In the case of error in hash calculation resources will be automatically cleaned up. But if no hash calculation error happens and client will not call crypto_ahash_final() at all, then internal buffer will not be freed, and clocks will not be disabled. This patch provides support for atomic crypto_ahash_update() call. Clocks are now enabled and disabled per update request. Data buffer is now allocated as a part of request context. Client is obligated to free it with crypto_free_ahash(). Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 168 ++++++++++++++++++++++----------------------- 1 file changed, 82 insertions(+), 86 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 6340c5e..85d6277 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -89,6 +89,11 @@ #define OP_UPDATE 1 #define OP_FINAL 2 +#define OMAP_ALIGN_MASK (sizeof(u32)-1) +#define OMAP_ALIGNED __attribute__((aligned(sizeof(u32)))) + +#define BUFLEN PAGE_SIZE + struct omap_sham_dev; struct omap_sham_reqctx { @@ -96,9 +101,8 @@ struct omap_sham_reqctx { unsigned long flags; unsigned long op; - u8 digest[SHA1_DIGEST_SIZE]; + u8 digest[SHA1_DIGEST_SIZE] OMAP_ALIGNED; size_t digcnt; - u8 *buffer; size_t bufcnt; size_t buflen; dma_addr_t dma_addr; @@ -107,6 +111,8 @@ struct omap_sham_reqctx { struct scatterlist *sg; unsigned int offset; /* offset in current sg */ unsigned int total; /* total request */ + + u8 buffer[0] OMAP_ALIGNED; }; struct omap_sham_hmac_ctx { @@ -219,31 +225,33 @@ static void omap_sham_copy_hash(struct ahash_request *req, int out) } } -static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, - int final, int dma) +static int omap_sham_hw_init(struct omap_sham_dev *dd) { - struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - u32 val = length << 5, mask; + clk_enable(dd->iclk); - if (unlikely(!ctx->digcnt)) { + if (!(dd->flags & FLAGS_INIT)) { + omap_sham_write_mask(dd, SHA_REG_MASK, + SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); - clk_enable(dd->iclk); + if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, + SHA_REG_SYSSTATUS_RESETDONE)) + return -ETIMEDOUT; - if (!(dd->flags & FLAGS_INIT)) { - omap_sham_write_mask(dd, SHA_REG_MASK, - SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET); + dd->flags |= FLAGS_INIT; + dd->err = 0; + } - if (omap_sham_wait(dd, SHA_REG_SYSSTATUS, - SHA_REG_SYSSTATUS_RESETDONE)) { - clk_disable(dd->iclk); - return -ETIMEDOUT; - } - dd->flags |= FLAGS_INIT; - dd->err = 0; - } - } else { + return 0; +} + +static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, + int final, int dma) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); + u32 val = length << 5, mask; + + if (likely(ctx->digcnt)) omap_sham_write(dd, SHA_REG_DIGCNT, ctx->digcnt); - } omap_sham_write_mask(dd, SHA_REG_MASK, SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0), @@ -263,23 +271,19 @@ static int omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length, SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH; omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask); - - return 0; } static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf, size_t length, int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - int err, count, len32; + int count, len32; const u32 *buffer = (const u32 *)buf; dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); - err = omap_sham_write_ctrl(dd, length, final, 0); - if (err) - return err; + omap_sham_write_ctrl(dd, length, final, 0); /* should be non-zero before next lines to disable clocks later */ ctx->digcnt += length; @@ -302,14 +306,10 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, size_t length, int final) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - int err, len32; + int len32; dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n", ctx->digcnt, length, final); - /* flush cache entries related to our page */ - if (dma_addr == ctx->dma_addr) - dma_sync_single_for_device(dd->dev, dma_addr, length, - DMA_TO_DEVICE); len32 = DIV_ROUND_UP(length, sizeof(u32)); @@ -320,19 +320,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, omap_set_dma_src_params(dd->dma_lch, 0, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0); - omap_set_dma_dest_params(dd->dma_lch, 0, - OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + SHA_REG_DIN(0), 0, 16); - - omap_set_dma_dest_burst_mode(dd->dma_lch, - OMAP_DMA_DATA_BURST_16); - - omap_set_dma_src_burst_mode(dd->dma_lch, - OMAP_DMA_DATA_BURST_4); - - err = omap_sham_write_ctrl(dd, length, final, 1); - if (err) - return err; + omap_sham_write_ctrl(dd, length, final, 1); ctx->digcnt += length; @@ -384,6 +372,21 @@ static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) return 0; } +static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, + struct omap_sham_reqctx *ctx, + size_t length, int final) +{ + ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, + DMA_TO_DEVICE); + if (dma_mapping_error(dd->dev, ctx->dma_addr)) { + dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); + return -EINVAL; + } + + /* next call does not fail... so no unmap in the case of error */ + return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); +} + static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); @@ -403,7 +406,7 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { count = ctx->bufcnt; ctx->bufcnt = 0; - return omap_sham_xmit_dma(dd, ctx->dma_addr, count, final); + return omap_sham_xmit_dma_map(dd, ctx, count, final); } return 0; @@ -413,7 +416,6 @@ static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); unsigned int length; - int err; ctx->flags |= FLAGS_FAST; @@ -427,11 +429,8 @@ static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) ctx->total -= length; - err = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); - if (err != -EINPROGRESS) - dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); - - return err; + /* next call does not fail... so no unmap in the case of error */ + return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); } static int omap_sham_update_cpu(struct omap_sham_dev *dd) @@ -453,6 +452,9 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) omap_stop_dma(dd->dma_lch); if (ctx->flags & FLAGS_FAST) dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); + else + dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, + DMA_TO_DEVICE); return 0; } @@ -471,18 +473,9 @@ static void omap_sham_cleanup(struct ahash_request *req) ctx->flags |= FLAGS_CLEAN; spin_unlock_irqrestore(&dd->lock, flags); - if (ctx->digcnt) { - clk_disable(dd->iclk); + if (ctx->digcnt) memcpy(req->result, ctx->digest, (ctx->flags & FLAGS_SHA1) ? SHA1_DIGEST_SIZE : MD5_DIGEST_SIZE); - } - - if (ctx->dma_addr) - dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, - DMA_TO_DEVICE); - - if (ctx->buffer) - free_page((unsigned long)ctx->buffer); dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); } @@ -520,21 +513,7 @@ static int omap_sham_init(struct ahash_request *req) ctx->bufcnt = 0; ctx->digcnt = 0; - - ctx->buflen = PAGE_SIZE; - ctx->buffer = (void *)__get_free_page( - (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? - GFP_KERNEL : GFP_ATOMIC); - if (!ctx->buffer) - return -ENOMEM; - - ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, - DMA_TO_DEVICE); - if (dma_mapping_error(dd->dev, ctx->dma_addr)) { - dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); - free_page((unsigned long)ctx->buffer); - return -EINVAL; - } + ctx->buflen = BUFLEN; if (tctx->flags & FLAGS_HMAC) { struct omap_sham_hmac_ctx *bctx = tctx->base; @@ -581,7 +560,7 @@ static int omap_sham_final_req(struct omap_sham_dev *dd) use_dma = 0; if (use_dma) - err = omap_sham_xmit_dma(dd, ctx->dma_addr, ctx->bufcnt, 1); + err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); else err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); @@ -615,6 +594,7 @@ static int omap_sham_finish_req_hmac(struct ahash_request *req) static void omap_sham_finish_req(struct ahash_request *req, int err) { struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + struct omap_sham_dev *dd = ctx->dd; if (!err) { omap_sham_copy_hash(ctx->dd->req, 1); @@ -627,7 +607,8 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) if ((ctx->flags & FLAGS_FINAL) || err) omap_sham_cleanup(req); - ctx->dd->flags &= ~FLAGS_BUSY; + clk_disable(dd->iclk); + dd->flags &= ~FLAGS_BUSY; if (req->base.complete) req->base.complete(&req->base, err); @@ -636,7 +617,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) static int omap_sham_handle_queue(struct omap_sham_dev *dd, struct ahash_request *req) { - struct crypto_async_request *async_req, *backlog; + struct crypto_async_request *async_req, *backlog = 0; struct omap_sham_reqctx *ctx; struct ahash_request *prev_req; unsigned long flags; @@ -672,7 +653,22 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", ctx->op, req->nbytes); - if (req != prev_req && ctx->digcnt) + + err = omap_sham_hw_init(dd); + if (err) + goto err1; + + omap_set_dma_dest_params(dd->dma_lch, 0, + OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + SHA_REG_DIN(0), 0, 16); + + omap_set_dma_dest_burst_mode(dd->dma_lch, + OMAP_DMA_DATA_BURST_16); + + omap_set_dma_src_burst_mode(dd->dma_lch, + OMAP_DMA_DATA_BURST_4); + + if (ctx->digcnt) /* request has changed - restore hash */ omap_sham_copy_hash(req, 0); @@ -684,7 +680,7 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, } else if (ctx->op == OP_FINAL) { err = omap_sham_final_req(dd); } - +err1: if (err != -EINPROGRESS) { /* done_task will not finish it, so do it here */ omap_sham_finish_req(req, err); @@ -868,7 +864,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base) } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct omap_sham_reqctx)); + sizeof(struct omap_sham_reqctx) + BUFLEN); if (alg_base) { struct omap_sham_hmac_ctx *bctx = tctx->base; @@ -954,7 +950,7 @@ static struct ahash_alg algs[] = { CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_init, .cra_exit = omap_sham_cra_exit, @@ -978,7 +974,7 @@ static struct ahash_alg algs[] = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_sha1_init, .cra_exit = omap_sham_cra_exit, @@ -1002,7 +998,7 @@ static struct ahash_alg algs[] = { .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_sham_ctx) + sizeof(struct omap_sham_hmac_ctx), - .cra_alignmask = 0, + .cra_alignmask = OMAP_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = omap_sham_cra_md5_init, .cra_exit = omap_sham_cra_exit, -- cgit v1.1 From 3c8d758ab528317ecd6d91f8651170ffd2331899 Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 19 Nov 2010 16:04:27 +0200 Subject: crypto: omap-sham - hash-in-progress is stored in hw format Hash-in-progress is now stored in hw format. Only on final call, hash is converted to correct format. Speedup copy procedure and will allow to use OMAP burst mode. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 38 ++++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 14 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index 85d6277..c8d30eb 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -204,24 +204,35 @@ static void omap_sham_copy_hash(struct ahash_request *req, int out) u32 *hash = (u32 *)ctx->digest; int i; + /* MD5 is almost unused. So copy sha1 size to reduce code */ + for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) { + if (out) + hash[i] = omap_sham_read(ctx->dd, + SHA_REG_DIGEST(i)); + else + omap_sham_write(ctx->dd, + SHA_REG_DIGEST(i), hash[i]); + } +} + +static void omap_sham_copy_ready_hash(struct ahash_request *req) +{ + struct omap_sham_reqctx *ctx = ahash_request_ctx(req); + u32 *in = (u32 *)ctx->digest; + u32 *hash = (u32 *)req->result; + int i; + + if (!hash) + return; + if (likely(ctx->flags & FLAGS_SHA1)) { /* SHA1 results are in big endian */ for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) - if (out) - hash[i] = be32_to_cpu(omap_sham_read(ctx->dd, - SHA_REG_DIGEST(i))); - else - omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), - cpu_to_be32(hash[i])); + hash[i] = be32_to_cpu(in[i]); } else { /* MD5 results are in little endian */ for (i = 0; i < MD5_DIGEST_SIZE / sizeof(u32); i++) - if (out) - hash[i] = le32_to_cpu(omap_sham_read(ctx->dd, - SHA_REG_DIGEST(i))); - else - omap_sham_write(ctx->dd, SHA_REG_DIGEST(i), - cpu_to_le32(hash[i])); + hash[i] = le32_to_cpu(in[i]); } } @@ -474,8 +485,7 @@ static void omap_sham_cleanup(struct ahash_request *req) spin_unlock_irqrestore(&dd->lock, flags); if (ctx->digcnt) - memcpy(req->result, ctx->digest, (ctx->flags & FLAGS_SHA1) ? - SHA1_DIGEST_SIZE : MD5_DIGEST_SIZE); + omap_sham_copy_ready_hash(req); dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); } -- cgit v1.1 From a55b290b0e41e02d1969589c5d77d966ac2b7ec8 Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 19 Nov 2010 16:04:28 +0200 Subject: crypto: omap-sham - FLAGS_FIRST is redundant and removed bufcnt is 0 if it was no update requests before, which is exact meaning of FLAGS_FIRST. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index c8d30eb..d88d7eb 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -72,7 +72,6 @@ #define DEFAULT_TIMEOUT_INTERVAL HZ -#define FLAGS_FIRST 0x0001 #define FLAGS_FINUP 0x0002 #define FLAGS_FINAL 0x0004 #define FLAGS_FAST 0x0008 @@ -513,8 +512,6 @@ static int omap_sham_init(struct ahash_request *req) ctx->flags = 0; - ctx->flags |= FLAGS_FIRST; - dev_dbg(dd->dev, "init: digest size: %d\n", crypto_ahash_digestsize(tfm)); @@ -739,12 +736,9 @@ static int omap_sham_update(struct ahash_request *req) /* may be can use faster functions */ int aligned = IS_ALIGNED((u32)ctx->sg->offset, sizeof(u32)); - - if (aligned && (ctx->flags & FLAGS_FIRST)) + if (aligned) /* digest: first and final */ ctx->flags |= FLAGS_FAST; - - ctx->flags &= ~FLAGS_FIRST; } } else if (ctx->bufcnt + ctx->total <= ctx->buflen) { /* if not finaup -> not fast */ -- cgit v1.1 From 887c883eea9867535059f3c8414c8cfc952ccff1 Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Fri, 19 Nov 2010 16:04:29 +0200 Subject: crypto: omap-sham - zero-copy scatterlist handling If scatterlist have more than one entry, current driver uses aligned buffer to copy data to to accelerator to tackle possible issues with DMA and SHA buffer alignment. This commit adds more intelligence to verify SG alignment and possibility to use DMA directly on the data without using copy buffer. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 87 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 61 insertions(+), 26 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index d88d7eb..eb988e7 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -74,7 +74,7 @@ #define FLAGS_FINUP 0x0002 #define FLAGS_FINAL 0x0004 -#define FLAGS_FAST 0x0008 +#define FLAGS_SG 0x0008 #define FLAGS_SHA1 0x0010 #define FLAGS_DMA_ACTIVE 0x0020 #define FLAGS_OUTPUT_READY 0x0040 @@ -393,6 +393,8 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd, return -EINVAL; } + ctx->flags &= ~FLAGS_SG; + /* next call does not fail... so no unmap in the case of error */ return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final); } @@ -403,9 +405,6 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) unsigned int final; size_t count; - if (!ctx->total) - return 0; - omap_sham_append_sg(ctx); final = (ctx->flags & FLAGS_FINUP) && !ctx->total; @@ -422,25 +421,62 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd) return 0; } -static int omap_sham_update_dma_fast(struct omap_sham_dev *dd) +/* Start address alignment */ +#define SG_AA(sg) (IS_ALIGNED(sg->offset, sizeof(u32))) +/* SHA1 block size alignment */ +#define SG_SA(sg) (IS_ALIGNED(sg->length, SHA1_MD5_BLOCK_SIZE)) + +static int omap_sham_update_dma_start(struct omap_sham_dev *dd) { struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); - unsigned int length; + unsigned int length, final, tail; + struct scatterlist *sg; + + if (!ctx->total) + return 0; + + if (ctx->bufcnt || ctx->offset) + return omap_sham_update_dma_slow(dd); + + dev_dbg(dd->dev, "fast: digcnt: %d, bufcnt: %u, total: %u\n", + ctx->digcnt, ctx->bufcnt, ctx->total); + + sg = ctx->sg; - ctx->flags |= FLAGS_FAST; + if (!SG_AA(sg)) + return omap_sham_update_dma_slow(dd); - length = min(ctx->total, sg_dma_len(ctx->sg)); - ctx->total = length; + if (!sg_is_last(sg) && !SG_SA(sg)) + /* size is not SHA1_BLOCK_SIZE aligned */ + return omap_sham_update_dma_slow(dd); + + length = min(ctx->total, sg->length); + + if (sg_is_last(sg)) { + if (!(ctx->flags & FLAGS_FINUP)) { + /* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */ + tail = length & (SHA1_MD5_BLOCK_SIZE - 1); + /* without finup() we need one block to close hash */ + if (!tail) + tail = SHA1_MD5_BLOCK_SIZE; + length -= tail; + } + } if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { dev_err(dd->dev, "dma_map_sg error\n"); return -EINVAL; } + ctx->flags |= FLAGS_SG; + ctx->total -= length; + ctx->offset = length; /* offset where to start slow */ + + final = (ctx->flags & FLAGS_FINUP) && !ctx->total; /* next call does not fail... so no unmap in the case of error */ - return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, 1); + return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final); } static int omap_sham_update_cpu(struct omap_sham_dev *dd) @@ -460,11 +496,17 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd) struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_stop_dma(dd->dma_lch); - if (ctx->flags & FLAGS_FAST) + if (ctx->flags & FLAGS_SG) { dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); - else + if (ctx->sg->length == ctx->offset) { + ctx->sg = sg_next(ctx->sg); + if (ctx->sg) + ctx->offset = 0; + } + } else { dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, DMA_TO_DEVICE); + } return 0; } @@ -545,10 +587,8 @@ static int omap_sham_update_req(struct omap_sham_dev *dd) if (ctx->flags & FLAGS_CPU) err = omap_sham_update_cpu(dd); - else if (ctx->flags & FLAGS_FAST) - err = omap_sham_update_dma_fast(dd); else - err = omap_sham_update_dma_slow(dd); + err = omap_sham_update_dma_start(dd); /* wait for dma completion before can take more data */ dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); @@ -730,18 +770,13 @@ static int omap_sham_update(struct ahash_request *req) */ omap_sham_append_sg(ctx); return 0; - } else if (ctx->bufcnt + ctx->total <= 64) { + } else if (ctx->bufcnt + ctx->total <= SHA1_MD5_BLOCK_SIZE) { + /* + * faster to use CPU for short transfers + */ ctx->flags |= FLAGS_CPU; - } else if (!ctx->bufcnt && sg_is_last(ctx->sg)) { - /* may be can use faster functions */ - int aligned = IS_ALIGNED((u32)ctx->sg->offset, - sizeof(u32)); - if (aligned) - /* digest: first and final */ - ctx->flags |= FLAGS_FAST; } - } else if (ctx->bufcnt + ctx->total <= ctx->buflen) { - /* if not finaup -> not fast */ + } else if (ctx->bufcnt + ctx->total < ctx->buflen) { omap_sham_append_sg(ctx); return 0; } @@ -1026,7 +1061,7 @@ static void omap_sham_done_task(unsigned long data) dd->flags &= ~FLAGS_DMA_ACTIVE; omap_sham_update_dma_stop(dd); if (!dd->err) - err = omap_sham_update_dma_slow(dd); + err = omap_sham_update_dma_start(dd); } err = dd->err ? : err; -- cgit v1.1 From 3bd2e2216bc82a83fc5048f8e61d2d22dd5d9cda Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Tue, 30 Nov 2010 10:13:27 +0200 Subject: crypto: omap-aes - DMA initialization fixes for OMAP off mode DMA parameters for constant data were initialized during driver probe(). It seems that those settings sometimes are lost when devices goes to off mode. This patch makes DMA initialization just before use. It solves off mode problems. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 799ca51..41c91f3 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -339,18 +339,6 @@ static int omap_aes_dma_init(struct omap_aes_dev *dd) goto err_dma_out; } - omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + AES_REG_DATA, 0, 4); - - omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); - omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); - - omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + AES_REG_DATA, 0, 4); - - omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); - omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); - return 0; err_dma_out: @@ -443,6 +431,12 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, len32 = DIV_ROUND_UP(length, sizeof(u32)); /* IN */ + omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + AES_REG_DATA, 0, 4); + + omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); + omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); + omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32, len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in, OMAP_DMA_DST_SYNC); @@ -451,6 +445,12 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, dma_addr_in, 0, 0); /* OUT */ + omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + AES_REG_DATA, 0, 4); + + omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); + omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); + omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32, len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_out, OMAP_DMA_SRC_SYNC); -- cgit v1.1 From eeb2b202c5b886b76c3bfa76f47e450fa69389fb Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Tue, 30 Nov 2010 10:13:28 +0200 Subject: crypto: omap-aes - redundant locking is removed Submitting request involved double locking for enqueuing and dequeuing. Now it is done under the same lock. FLAGS_BUSY is now handled under the same lock. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 70 ++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 38 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 41c91f3..2d8f72e 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -78,7 +78,7 @@ #define FLAGS_NEW_IV BIT(5) #define FLAGS_INIT BIT(6) #define FLAGS_FAST BIT(7) -#define FLAGS_BUSY 8 +#define FLAGS_BUSY BIT(8) struct omap_aes_ctx { struct omap_aes_dev *dd; @@ -179,9 +179,8 @@ static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit) static int omap_aes_hw_init(struct omap_aes_dev *dd) { - int err = 0; - clk_enable(dd->iclk); + if (!(dd->flags & FLAGS_INIT)) { /* is it necessary to reset before every operation? */ omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_SOFTRESET, @@ -193,18 +192,15 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) __asm__ __volatile__("nop"); __asm__ __volatile__("nop"); - err = omap_aes_wait(dd, AES_REG_SYSSTATUS, - AES_REG_SYSSTATUS_RESETDONE); - if (!err) - dd->flags |= FLAGS_INIT; + if (omap_aes_wait(dd, AES_REG_SYSSTATUS, + AES_REG_SYSSTATUS_RESETDONE)) { + clk_disable(dd->iclk); + return -ETIMEDOUT; + } + dd->flags |= FLAGS_INIT; } - return err; -} - -static void omap_aes_hw_cleanup(struct omap_aes_dev *dd) -{ - clk_disable(dd->iclk); + return 0; } static void omap_aes_write_ctrl(struct omap_aes_dev *dd) @@ -538,6 +534,8 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) pr_debug("err: %d\n", err); + dd->flags &= ~FLAGS_BUSY; + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req)); if (!dd->total) @@ -553,7 +551,7 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); - omap_aes_hw_cleanup(dd); + clk_disable(dd->iclk); omap_stop_dma(dd->dma_lch_in); omap_stop_dma(dd->dma_lch_out); @@ -580,22 +578,26 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) return err; } -static int omap_aes_handle_req(struct omap_aes_dev *dd) +static int omap_aes_handle_req(struct omap_aes_dev *dd, + struct ablkcipher_request *req) { struct crypto_async_request *async_req, *backlog; struct omap_aes_ctx *ctx; struct omap_aes_reqctx *rctx; - struct ablkcipher_request *req; unsigned long flags; - - if (dd->total) - goto start; + int err = 0; spin_lock_irqsave(&dd->lock, flags); + if (req) + err = ablkcipher_enqueue_request(&dd->queue, req); + if (dd->flags & FLAGS_BUSY) { + spin_unlock_irqrestore(&dd->lock, flags); + return err; + } backlog = crypto_get_backlog(&dd->queue); async_req = crypto_dequeue_request(&dd->queue); - if (!async_req) - clear_bit(FLAGS_BUSY, &dd->flags); + if (async_req) + dd->flags |= FLAGS_BUSY; spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) @@ -637,20 +639,23 @@ static int omap_aes_handle_req(struct omap_aes_dev *dd) if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) pr_err("request size is not exact amount of AES blocks\n"); -start: - return omap_aes_crypt_dma_start(dd); + omap_aes_crypt_dma_start(dd); + + return err; } static void omap_aes_task(unsigned long data) { struct omap_aes_dev *dd = (struct omap_aes_dev *)data; - int err; pr_debug("enter\n"); - err = omap_aes_crypt_dma_stop(dd); + omap_aes_crypt_dma_stop(dd); - err = omap_aes_handle_req(dd); + if (dd->total) + omap_aes_crypt_dma_start(dd); + else + omap_aes_handle_req(dd, NULL); pr_debug("exit\n"); } @@ -661,8 +666,6 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) crypto_ablkcipher_reqtfm(req)); struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req); struct omap_aes_dev *dd; - unsigned long flags; - int err; pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes, !!(mode & FLAGS_ENCRYPT), @@ -674,16 +677,7 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) rctx->mode = mode; - spin_lock_irqsave(&dd->lock, flags); - err = ablkcipher_enqueue_request(&dd->queue, req); - spin_unlock_irqrestore(&dd->lock, flags); - - if (!test_and_set_bit(FLAGS_BUSY, &dd->flags)) - omap_aes_handle_req(dd); - - pr_debug("exit\n"); - - return err; + return omap_aes_handle_req(dd, req); } /* ********************** ALG API ************************************ */ -- cgit v1.1 From 21fe9767f3bd56fd9a271dc43b93cd4608d47f4a Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Tue, 30 Nov 2010 10:13:29 +0200 Subject: crypto: omap-aes - error handling implementation improved Previous version had not error handling. Request could remain uncompleted. Also in the case of DMA error, FLAGS_INIT is unset and accelerator will be initialized again. Buffer size allignment is checked. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 134 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 93 insertions(+), 41 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 2d8f72e..704cc70 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -103,14 +103,16 @@ struct omap_aes_dev { struct omap_aes_ctx *ctx; struct device *dev; unsigned long flags; + int err; u32 *iv; u32 ctrl; - spinlock_t lock; - struct crypto_queue queue; + spinlock_t lock; + struct crypto_queue queue; - struct tasklet_struct task; + struct tasklet_struct done_task; + struct tasklet_struct queue_task; struct ablkcipher_request *req; size_t total; @@ -198,24 +200,30 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) return -ETIMEDOUT; } dd->flags |= FLAGS_INIT; + dd->err = 0; } return 0; } -static void omap_aes_write_ctrl(struct omap_aes_dev *dd) +static int omap_aes_write_ctrl(struct omap_aes_dev *dd) { unsigned int key32; - int i; + int i, err, init = dd->flags & FLAGS_INIT; u32 val, mask; + err = omap_aes_hw_init(dd); + if (err) + return err; + val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); if (dd->flags & FLAGS_CBC) val |= AES_REG_CTRL_CBC; if (dd->flags & FLAGS_ENCRYPT) val |= AES_REG_CTRL_DIRECTION; - if (dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) && + /* check if hw state & mode have not changed */ + if (init && dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) && !(dd->ctx->flags & FLAGS_NEW_KEY)) goto out; @@ -257,6 +265,8 @@ out: /* start DMA or disable idle mode */ omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, AES_REG_MASK_START); + + return 0; } static struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_ctx *ctx) @@ -284,8 +294,16 @@ static void omap_aes_dma_callback(int lch, u16 ch_status, void *data) { struct omap_aes_dev *dd = data; - if (lch == dd->dma_lch_out) - tasklet_schedule(&dd->task); + if (ch_status != OMAP_DMA_BLOCK_IRQ) { + pr_err("omap-aes DMA error status: 0x%hx\n", ch_status); + dd->err = -EIO; + dd->flags &= ~FLAGS_INIT; /* request to re-initialize */ + } else if (lch == dd->dma_lch_in) { + return; + } + + /* dma_lch_out - completed */ + tasklet_schedule(&dd->done_task); } static int omap_aes_dma_init(struct omap_aes_dev *dd) @@ -390,6 +408,11 @@ static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf, if (!count) return off; + /* + * buflen and total are AES_BLOCK_SIZE size aligned, + * so count should be also aligned + */ + sg_copy_buf(buf + off, *sg, *offset, count, out); off += count; @@ -415,6 +438,7 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); struct omap_aes_dev *dd = ctx->dd; int len32; + int err; pr_debug("len: %d\n", length); @@ -454,11 +478,13 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC, dma_addr_out, 0, 0); + err = omap_aes_write_ctrl(dd); + if (err) + return err; + omap_start_dma(dd->dma_lch_in); omap_start_dma(dd->dma_lch_out); - omap_aes_write_ctrl(dd); - return 0; } @@ -484,8 +510,10 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) count = min(dd->total, sg_dma_len(dd->in_sg)); count = min(count, sg_dma_len(dd->out_sg)); - if (count != dd->total) + if (count != dd->total) { + pr_err("request length != buffer length\n"); return -EINVAL; + } pr_debug("fast\n"); @@ -521,25 +549,28 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) dd->total -= count; - err = omap_aes_hw_init(dd); - err = omap_aes_crypt_dma(tfm, addr_in, addr_out, count); + if (err) { + dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); + dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_TO_DEVICE); + } return err; } static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) { + struct ablkcipher_request *req = dd->req; struct omap_aes_ctx *ctx; pr_debug("err: %d\n", err); dd->flags &= ~FLAGS_BUSY; - ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(dd->req)); + ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); - if (!dd->total) - dd->req->base.complete(&dd->req->base, err); + if (req->base.complete) + req->base.complete(&req->base, err); } static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) @@ -551,11 +582,11 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) omap_aes_write_mask(dd, AES_REG_MASK, 0, AES_REG_MASK_START); - clk_disable(dd->iclk); - omap_stop_dma(dd->dma_lch_in); omap_stop_dma(dd->dma_lch_out); + clk_disable(dd->iclk); + if (dd->flags & FLAGS_FAST) { dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); @@ -572,27 +603,24 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) } } - if (err || !dd->total) - omap_aes_finish_req(dd, err); - return err; } -static int omap_aes_handle_req(struct omap_aes_dev *dd, +static int omap_aes_handle_queue(struct omap_aes_dev *dd, struct ablkcipher_request *req) { struct crypto_async_request *async_req, *backlog; struct omap_aes_ctx *ctx; struct omap_aes_reqctx *rctx; unsigned long flags; - int err = 0; + int err, ret = 0; spin_lock_irqsave(&dd->lock, flags); if (req) - err = ablkcipher_enqueue_request(&dd->queue, req); + ret = ablkcipher_enqueue_request(&dd->queue, req); if (dd->flags & FLAGS_BUSY) { spin_unlock_irqrestore(&dd->lock, flags); - return err; + return ret; } backlog = crypto_get_backlog(&dd->queue); async_req = crypto_dequeue_request(&dd->queue); @@ -601,7 +629,7 @@ static int omap_aes_handle_req(struct omap_aes_dev *dd, spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) - return 0; + return ret; if (backlog) backlog->complete(backlog, -EINPROGRESS); @@ -636,30 +664,46 @@ static int omap_aes_handle_req(struct omap_aes_dev *dd, ctx->flags |= FLAGS_NEW_KEY; } - if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) - pr_err("request size is not exact amount of AES blocks\n"); - - omap_aes_crypt_dma_start(dd); + err = omap_aes_crypt_dma_start(dd); + if (err) { + /* aes_task will not finish it, so do it here */ + omap_aes_finish_req(dd, err); + tasklet_schedule(&dd->queue_task); + } - return err; + return ret; /* return ret, which is enqueue return value */ } -static void omap_aes_task(unsigned long data) +static void omap_aes_done_task(unsigned long data) { struct omap_aes_dev *dd = (struct omap_aes_dev *)data; + int err; pr_debug("enter\n"); - omap_aes_crypt_dma_stop(dd); + err = omap_aes_crypt_dma_stop(dd); - if (dd->total) - omap_aes_crypt_dma_start(dd); - else - omap_aes_handle_req(dd, NULL); + err = dd->err ? : err; + + if (dd->total && !err) { + err = omap_aes_crypt_dma_start(dd); + if (!err) + return; /* DMA started. Not fininishing. */ + } + + omap_aes_finish_req(dd, err); + omap_aes_handle_queue(dd, NULL); pr_debug("exit\n"); } +static void omap_aes_queue_task(unsigned long data) +{ + struct omap_aes_dev *dd = (struct omap_aes_dev *)data; + + omap_aes_handle_queue(dd, NULL); +} + static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) { struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx( @@ -671,13 +715,18 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode) !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); + if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { + pr_err("request size is not exact amount of AES blocks\n"); + return -EINVAL; + } + dd = omap_aes_find_dev(ctx); if (!dd) return -ENODEV; rctx->mode = mode; - return omap_aes_handle_req(dd, req); + return omap_aes_handle_queue(dd, req); } /* ********************** ALG API ************************************ */ @@ -843,7 +892,8 @@ static int omap_aes_probe(struct platform_device *pdev) (reg & AES_REG_REV_MAJOR) >> 4, reg & AES_REG_REV_MINOR); clk_disable(dd->iclk); - tasklet_init(&dd->task, omap_aes_task, (unsigned long)dd); + tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd); + tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd); err = omap_aes_dma_init(dd); if (err) @@ -870,7 +920,8 @@ err_algs: crypto_unregister_alg(&algs[j]); omap_aes_dma_cleanup(dd); err_dma: - tasklet_kill(&dd->task); + tasklet_kill(&dd->done_task); + tasklet_kill(&dd->queue_task); iounmap(dd->io_base); err_io: clk_put(dd->iclk); @@ -897,7 +948,8 @@ static int omap_aes_remove(struct platform_device *pdev) for (i = 0; i < ARRAY_SIZE(algs); i++) crypto_unregister_alg(&algs[i]); - tasklet_kill(&dd->task); + tasklet_kill(&dd->done_task); + tasklet_kill(&dd->queue_task); omap_aes_dma_cleanup(dd); iounmap(dd->io_base); clk_put(dd->iclk); -- cgit v1.1 From 67a730ce449561f6df838f0b38a2b72cbf4e3c4c Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Tue, 30 Nov 2010 10:13:30 +0200 Subject: crypto: omap-aes - unnecessary code removed Key and IV should always be set before AES operation. So no need to check if it has changed or not. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 70 ++++++++++++----------------------------------- 1 file changed, 17 insertions(+), 53 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 704cc70..0b21dce 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -74,11 +74,9 @@ #define FLAGS_CBC BIT(1) #define FLAGS_GIV BIT(2) -#define FLAGS_NEW_KEY BIT(4) -#define FLAGS_NEW_IV BIT(5) -#define FLAGS_INIT BIT(6) -#define FLAGS_FAST BIT(7) -#define FLAGS_BUSY BIT(8) +#define FLAGS_INIT BIT(4) +#define FLAGS_FAST BIT(5) +#define FLAGS_BUSY BIT(6) struct omap_aes_ctx { struct omap_aes_dev *dd; @@ -105,9 +103,6 @@ struct omap_aes_dev { unsigned long flags; int err; - u32 *iv; - u32 ctrl; - spinlock_t lock; struct crypto_queue queue; @@ -209,28 +204,13 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) static int omap_aes_write_ctrl(struct omap_aes_dev *dd) { unsigned int key32; - int i, err, init = dd->flags & FLAGS_INIT; + int i, err; u32 val, mask; err = omap_aes_hw_init(dd); if (err) return err; - val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); - if (dd->flags & FLAGS_CBC) - val |= AES_REG_CTRL_CBC; - if (dd->flags & FLAGS_ENCRYPT) - val |= AES_REG_CTRL_DIRECTION; - - /* check if hw state & mode have not changed */ - if (init && dd->ctrl == val && !(dd->flags & FLAGS_NEW_IV) && - !(dd->ctx->flags & FLAGS_NEW_KEY)) - goto out; - - /* only need to write control registers for new settings */ - - dd->ctrl = val; - val = 0; if (dd->dma_lch_out >= 0) val |= AES_REG_MASK_DMA_OUT_EN; @@ -241,27 +221,28 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) omap_aes_write_mask(dd, AES_REG_MASK, val, mask); - pr_debug("Set key\n"); key32 = dd->ctx->keylen / sizeof(u32); - /* set a key */ + + /* it seems a key should always be set even if it has not changed */ for (i = 0; i < key32; i++) { omap_aes_write(dd, AES_REG_KEY(i), __le32_to_cpu(dd->ctx->key[i])); } - dd->ctx->flags &= ~FLAGS_NEW_KEY; - if (dd->flags & FLAGS_NEW_IV) { - pr_debug("Set IV\n"); - omap_aes_write_n(dd, AES_REG_IV(0), dd->iv, 4); - dd->flags &= ~FLAGS_NEW_IV; - } + if ((dd->flags & FLAGS_CBC) && dd->req->info) + omap_aes_write_n(dd, AES_REG_IV(0), dd->req->info, 4); + + val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3); + if (dd->flags & FLAGS_CBC) + val |= AES_REG_CTRL_CBC; + if (dd->flags & FLAGS_ENCRYPT) + val |= AES_REG_CTRL_DIRECTION; mask = AES_REG_CTRL_CBC | AES_REG_CTRL_DIRECTION | AES_REG_CTRL_KEY_SIZE; - omap_aes_write_mask(dd, AES_REG_CTRL, dd->ctrl, mask); + omap_aes_write_mask(dd, AES_REG_CTRL, val, mask); -out: /* start DMA or disable idle mode */ omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, AES_REG_MASK_START); @@ -561,16 +542,12 @@ static int omap_aes_crypt_dma_start(struct omap_aes_dev *dd) static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) { struct ablkcipher_request *req = dd->req; - struct omap_aes_ctx *ctx; pr_debug("err: %d\n", err); dd->flags &= ~FLAGS_BUSY; - ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); - - if (req->base.complete) - req->base.complete(&req->base, err); + req->base.complete(&req->base, err); } static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) @@ -636,8 +613,6 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, req = ablkcipher_request_cast(async_req); - pr_debug("get new req\n"); - /* assign new request to device */ dd->req = req; dd->total = req->nbytes; @@ -651,18 +626,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, rctx->mode &= FLAGS_MODE_MASK; dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode; - dd->iv = req->info; - if ((dd->flags & FLAGS_CBC) && dd->iv) - dd->flags |= FLAGS_NEW_IV; - else - dd->flags &= ~FLAGS_NEW_IV; - + dd->ctx = ctx; ctx->dd = dd; - if (dd->ctx != ctx) { - /* assign new context to device */ - dd->ctx = ctx; - ctx->flags |= FLAGS_NEW_KEY; - } err = omap_aes_crypt_dma_start(dd); if (err) { @@ -744,7 +709,6 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key, memcpy(ctx->key, key, keylen); ctx->keylen = keylen; - ctx->flags |= FLAGS_NEW_KEY; return 0; } -- cgit v1.1 From 83ea7e0fe1471508ab8e8d7b317e743fe7a05a5f Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Tue, 30 Nov 2010 10:13:31 +0200 Subject: crypto: omap-aes - initialize aes module once per request AES module was initialized for every DMA transaction. That is redundant. Now it is initialized once per request. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 54 ++++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 26 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index 0b21dce..b69da4f 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -176,6 +176,11 @@ static int omap_aes_wait(struct omap_aes_dev *dd, u32 offset, u32 bit) static int omap_aes_hw_init(struct omap_aes_dev *dd) { + /* + * clocks are enabled when request starts and disabled when finished. + * It may be long delays between requests. + * Device might go to off mode to save power. + */ clk_enable(dd->iclk); if (!(dd->flags & FLAGS_INIT)) { @@ -190,10 +195,9 @@ static int omap_aes_hw_init(struct omap_aes_dev *dd) __asm__ __volatile__("nop"); if (omap_aes_wait(dd, AES_REG_SYSSTATUS, - AES_REG_SYSSTATUS_RESETDONE)) { - clk_disable(dd->iclk); + AES_REG_SYSSTATUS_RESETDONE)) return -ETIMEDOUT; - } + dd->flags |= FLAGS_INIT; dd->err = 0; } @@ -243,9 +247,19 @@ static int omap_aes_write_ctrl(struct omap_aes_dev *dd) omap_aes_write_mask(dd, AES_REG_CTRL, val, mask); - /* start DMA or disable idle mode */ - omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, - AES_REG_MASK_START); + /* IN */ + omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + AES_REG_DATA, 0, 4); + + omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); + omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); + + /* OUT */ + omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, + dd->phys_base + AES_REG_DATA, 0, 4); + + omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); + omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); return 0; } @@ -419,7 +433,6 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm); struct omap_aes_dev *dd = ctx->dd; int len32; - int err; pr_debug("len: %d\n", length); @@ -432,12 +445,6 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, len32 = DIV_ROUND_UP(length, sizeof(u32)); /* IN */ - omap_set_dma_dest_params(dd->dma_lch_in, 0, OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + AES_REG_DATA, 0, 4); - - omap_set_dma_dest_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); - omap_set_dma_src_burst_mode(dd->dma_lch_in, OMAP_DMA_DATA_BURST_4); - omap_set_dma_transfer_params(dd->dma_lch_in, OMAP_DMA_DATA_TYPE_S32, len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_in, OMAP_DMA_DST_SYNC); @@ -446,12 +453,6 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, dma_addr_in, 0, 0); /* OUT */ - omap_set_dma_src_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_CONSTANT, - dd->phys_base + AES_REG_DATA, 0, 4); - - omap_set_dma_src_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); - omap_set_dma_dest_burst_mode(dd->dma_lch_out, OMAP_DMA_DATA_BURST_4); - omap_set_dma_transfer_params(dd->dma_lch_out, OMAP_DMA_DATA_TYPE_S32, len32, 1, OMAP_DMA_SYNC_PACKET, dd->dma_out, OMAP_DMA_SRC_SYNC); @@ -459,13 +460,13 @@ static int omap_aes_crypt_dma(struct crypto_tfm *tfm, dma_addr_t dma_addr_in, omap_set_dma_dest_params(dd->dma_lch_out, 0, OMAP_DMA_AMODE_POST_INC, dma_addr_out, 0, 0); - err = omap_aes_write_ctrl(dd); - if (err) - return err; - omap_start_dma(dd->dma_lch_in); omap_start_dma(dd->dma_lch_out); + /* start DMA or disable idle mode */ + omap_aes_write_mask(dd, AES_REG_MASK, AES_REG_MASK_START, + AES_REG_MASK_START); + return 0; } @@ -545,6 +546,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err) pr_debug("err: %d\n", err); + clk_disable(dd->iclk); dd->flags &= ~FLAGS_BUSY; req->base.complete(&req->base, err); @@ -562,8 +564,6 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd) omap_stop_dma(dd->dma_lch_in); omap_stop_dma(dd->dma_lch_out); - clk_disable(dd->iclk); - if (dd->flags & FLAGS_FAST) { dma_unmap_sg(dd->dev, dd->out_sg, 1, DMA_FROM_DEVICE); dma_unmap_sg(dd->dev, dd->in_sg, 1, DMA_TO_DEVICE); @@ -629,7 +629,9 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd, dd->ctx = ctx; ctx->dd = dd; - err = omap_aes_crypt_dma_start(dd); + err = omap_aes_write_ctrl(dd); + if (!err) + err = omap_aes_crypt_dma_start(dd); if (err) { /* aes_task will not finish it, so do it here */ omap_aes_finish_req(dd, err); -- cgit v1.1 From efce41b65f66251d60484781df305e8a85c9507b Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Tue, 30 Nov 2010 10:13:32 +0200 Subject: crypto: omap-aes - checkpatch --file warning fixes Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-aes.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c index b69da4f..add2a1a 100644 --- a/drivers/crypto/omap-aes.c +++ b/drivers/crypto/omap-aes.c @@ -96,7 +96,7 @@ struct omap_aes_reqctx { struct omap_aes_dev { struct list_head list; unsigned long phys_base; - void __iomem *io_base; + void __iomem *io_base; struct clk *iclk; struct omap_aes_ctx *ctx; struct device *dev; @@ -759,7 +759,7 @@ static struct crypto_alg algs[] = { .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_aes_ctx), - .cra_alignmask = 0, + .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = omap_aes_cra_init, @@ -779,7 +779,7 @@ static struct crypto_alg algs[] = { .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct omap_aes_ctx), - .cra_alignmask = 0, + .cra_alignmask = 0, .cra_type = &crypto_ablkcipher_type, .cra_module = THIS_MODULE, .cra_init = omap_aes_cra_init, -- cgit v1.1 From 6c39d116ba308ccf9007773a090ca6d20eb68459 Mon Sep 17 00:00:00 2001 From: Dmitry Kasatkin Date: Wed, 29 Dec 2010 21:52:04 +1100 Subject: crypto: omap-sham - backlog handling fix Previous commit "removed redundant locking" introduced a bug in handling backlog. In certain cases, when async request complete callback will call complete() on -EINPROGRESS code, it will cause uncompleted requests. It does not happen in implementation similar to crypto test manager, but it will happen in implementation similar to dm-crypt. Backlog needs to be checked before dequeuing next request. Signed-off-by: Dmitry Kasatkin Signed-off-by: Herbert Xu --- drivers/crypto/omap-sham.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c index eb988e7..2e71123 100644 --- a/drivers/crypto/omap-sham.c +++ b/drivers/crypto/omap-sham.c @@ -664,7 +664,7 @@ static void omap_sham_finish_req(struct ahash_request *req, int err) static int omap_sham_handle_queue(struct omap_sham_dev *dd, struct ahash_request *req) { - struct crypto_async_request *async_req, *backlog = 0; + struct crypto_async_request *async_req, *backlog; struct omap_sham_reqctx *ctx; struct ahash_request *prev_req; unsigned long flags; @@ -677,11 +677,10 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, spin_unlock_irqrestore(&dd->lock, flags); return ret; } + backlog = crypto_get_backlog(&dd->queue); async_req = crypto_dequeue_request(&dd->queue); - if (async_req) { + if (async_req) dd->flags |= FLAGS_BUSY; - backlog = crypto_get_backlog(&dd->queue); - } spin_unlock_irqrestore(&dd->lock, flags); if (!async_req) -- cgit v1.1 From 41f2977d40798ce45f4da7a1291039ffbe9e1dbc Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Tue, 4 Jan 2011 15:37:16 +1100 Subject: crypto: mv_cesa - dont return PTR_ERR() of wrong pointer Fix a PTR_ERR() return of the wrong pointer Signed-off-by: Roel Kluin Signed-off-by: Herbert Xu --- drivers/crypto/mv_cesa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 7d279e5..c99305a 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -857,7 +857,7 @@ static int mv_cra_hash_init(struct crypto_tfm *tfm, const char *base_hash_name, printk(KERN_WARNING MV_CESA "Base driver '%s' could not be loaded!\n", base_hash_name); - err = PTR_ERR(fallback_tfm); + err = PTR_ERR(base_hash); goto err_bad_base; } } -- cgit v1.1 From dffa18449a334cf436c1fabfb2fcf7d4240c994b Mon Sep 17 00:00:00 2001 From: Dennis Gilmore Date: Thu, 6 Jan 2011 17:15:31 +1100 Subject: crypto: n2 - use __devexit not __exit in n2_unregister_algs fixes fedora sparc build failure, thanks to kylem for helping with debugging Signed-off-by: Dennis Gilmore Acked-by: David S. Miller Signed-off-by: Herbert Xu --- drivers/crypto/n2_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/crypto') diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c index 88ee015..3372491 100644 --- a/drivers/crypto/n2_core.c +++ b/drivers/crypto/n2_core.c @@ -1542,7 +1542,7 @@ out: return err; } -static void __exit n2_unregister_algs(void) +static void __devexit n2_unregister_algs(void) { mutex_lock(&spu_lock); if (!--algs_registered) -- cgit v1.1 From 21493088733e6e09dac6f54595a1b6b8ab1e68fd Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Fri, 7 Jan 2011 14:52:00 +1100 Subject: crypto: padlock - Move padlock.h into include/crypto This patch moves padlock.h from drivers/crypto into include/crypto so that it may be used by the via-rng driver. Signed-off-by: Herbert Xu --- drivers/crypto/padlock-aes.c | 2 +- drivers/crypto/padlock-sha.c | 8 +------- drivers/crypto/padlock.h | 23 ----------------------- 3 files changed, 2 insertions(+), 31 deletions(-) delete mode 100644 drivers/crypto/padlock.h (limited to 'drivers/crypto') diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 2e992bc..2e56508 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -21,7 +22,6 @@ #include #include #include -#include "padlock.h" /* * Number of data blocks actually fetched for each xcrypt insn. diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index d3a27e0..adf075b 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -13,6 +13,7 @@ */ #include +#include #include #include #include @@ -22,13 +23,6 @@ #include #include #include -#include "padlock.h" - -#ifdef CONFIG_64BIT -#define STACK_ALIGN 16 -#else -#define STACK_ALIGN 4 -#endif struct padlock_sha_desc { struct shash_desc fallback; diff --git a/drivers/crypto/padlock.h b/drivers/crypto/padlock.h deleted file mode 100644 index b728e45..0000000 --- a/drivers/crypto/padlock.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Driver for VIA PadLock - * - * Copyright (c) 2004 Michal Ludvig - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - */ - -#ifndef _CRYPTO_PADLOCK_H -#define _CRYPTO_PADLOCK_H - -#define PADLOCK_ALIGNMENT 16 - -#define PFX "padlock: " - -#define PADLOCK_CRA_PRIORITY 300 -#define PADLOCK_COMPOSITE_PRIORITY 400 - -#endif /* _CRYPTO_PADLOCK_H */ -- cgit v1.1