aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Kconfig58
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/ace.c2651
-rw-r--r--drivers/crypto/ace.h103
-rw-r--r--drivers/crypto/ace_sfr.h497
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c5
-rw-r--r--drivers/crypto/hifn_795x.c6
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/mv_cesa.c1
-rw-r--r--drivers/crypto/n2_core.c37
-rw-r--r--drivers/crypto/omap-sham.c180
-rw-r--r--drivers/crypto/padlock-aes.c6
-rw-r--r--drivers/crypto/padlock-sha.c8
-rw-r--r--drivers/crypto/picoxcell_crypto.c121
-rw-r--r--drivers/crypto/talitos.c66
15 files changed, 215 insertions, 3526 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 40b6342..7e3002b 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -201,6 +201,7 @@ config CRYPTO_DEV_HIFN_795X
select CRYPTO_BLKCIPHER
select HW_RANDOM if CRYPTO_DEV_HIFN_795X_RNG
depends on PCI
+ depends on !ARCH_DMA_ADDR_T_64BIT
help
This option allows you to have support for HIFN 795x crypto adapters.
@@ -267,7 +268,7 @@ config CRYPTO_DEV_OMAP_AES
config CRYPTO_DEV_PICOXCELL
tristate "Support for picoXcell IPSEC and Layer2 crypto engines"
- depends on ARCH_PICOXCELL
+ depends on ARCH_PICOXCELL && HAVE_CLK
select CRYPTO_AES
select CRYPTO_AUTHENC
select CRYPTO_ALGAPI
@@ -293,59 +294,4 @@ config CRYPTO_DEV_S5P
Select this to offload Samsung S5PV210 or S5PC110 from AES
algorithms execution.
-config CRYPTO_S5P_DEV_ACE
- tristate "Support for Samsung ACE (Advanced Crypto Engine)"
- depends on ARCH_EXYNOS4 || ARCH_EXYNOS5 || ARCH_S5PV210
- select S5P_DEV_ACE
- select CRYPTO_ALGAPI
- help
- Use ACE for AES (ECB, CBC, CTR) and SHA1/SHA256.
- Available in EXYNOS4/S5PV210/S5PC110 and newer CPUs.
-
-config ACE_BC
- bool "Support for AES block cipher (ECB, CBC, CTR mode)"
- depends on CRYPTO_S5P_DEV_ACE
- select CRYPTO_AES
- select CRYPTO_BLKCIPHER
- select CRYPTO_ECB
- select CRYPTO_CTR
- select CRYPTO_CBC
- default y
- help
- Use ACE for ACE (ECB, CBC, CTR) for Samsung Hardware Crypto engine.
-
-config ACE_BC_ASYNC
- bool "Support for AES async mode"
- default y
- depends on ACE_BC
-
-config ACE_BC_IRQMODE
- bool "Support for AES IRQ mode"
- default n
- depends on ACE_BC_ASYNC
-
-config ACE_HASH_SHA1
- bool "Support for SHA1 hash algorithm"
- depends on CRYPTO_S5P_DEV_ACE
- select CRYPTO_HASH
- select CRYPTO_SHA1
- default y
- help
- Use SHA1 hash algorithm for Samsung Hardware Crypto engine
-
-config ACE_HASH_SHA256
- bool "Support for SHA256 hash algorithm"
- depends on CRYPTO_S5P_DEV_ACE && !ARCH_S5PV210
- select CRYPTO_HASH
- select CRYPTO_SHA256
- default y
- help
- Use SHA256 hash algorithm for Samsung Hardware Crypto engine
-
-config ACE_DEBUG
- bool "Debug message for crypto driver"
- depends on CRYPTO_S5P_DEV_ACE
- help
- This option allows you to check the debug print message for crypto driver.
-
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 4fe1e44..53ea501 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -13,4 +13,3 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o
obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
-obj-$(CONFIG_CRYPTO_S5P_DEV_ACE) += ace.o
diff --git a/drivers/crypto/ace.c b/drivers/crypto/ace.c
deleted file mode 100644
index 21ddf96..0000000
--- a/drivers/crypto/ace.c
+++ /dev/null
@@ -1,2651 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Support for ACE (Advanced Crypto Engine) for S5PV210/EXYNOS4210.
- *
- * Copyright (c) 2011 Samsung Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/scatterlist.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/memory.h>
-#include <linux/delay.h>
-#include <linux/version.h>
-#include <linux/hrtimer.h>
-
-#include <asm/cacheflush.h>
-
-#include <crypto/aes.h>
-#include <crypto/internal/hash.h>
-#include <crypto/sha.h>
-#include <crypto/scatterwalk.h>
-
-#include <mach/secmem.h>
-
-#include "ace.h"
-#include "ace_sfr.h"
-
-#define S5P_ACE_DRIVER_NAME "s5p-ace"
-#define ACE_AES_MIN_BLOCK_SIZE 16
-
-#undef ACE_USE_ACP
-#ifdef ACE_USE_ACP
-#define PA_SSS_USER_CON 0x10010344
-#define ACE_ARCACHE 0xA
-#define ACE_AWCACHE 0xA
-#endif
-
-#undef ACE_DEBUG_HEARTBEAT
-#undef ACE_DEBUG_WATCHDOG
-
-#ifdef CONFIG_ACE_DEBUG
-#define S5P_ACE_DEBUG(args...) printk(KERN_INFO args)
-#else
-#define S5P_ACE_DEBUG(args...)
-#endif
-
-#define s5p_ace_read_sfr(_sfr_) __raw_readl(s5p_ace_dev.ace_base + (_sfr_))
-#define s5p_ace_write_sfr(_sfr_, _val_) __raw_writel((_val_), s5p_ace_dev.ace_base + (_sfr_))
-
-enum s5p_cpu_type {
- TYPE_S5PV210,
- TYPE_EXYNOS,
-};
-
-enum {
- FLAGS_BC_BUSY,
- FLAGS_HASH_BUSY,
- FLAGS_SUSPENDED,
- FLAGS_USE_SW
-};
-
-static struct s5p_ace_device s5p_ace_dev;
-
-#ifdef CONFIG_ACE_BC_ASYNC
-static void s5p_ace_bc_task(unsigned long data);
-#endif
-
-#define ACE_CLOCK_ON 0
-#define ACE_CLOCK_OFF 1
-
-static int count_clk;
-static int count_clk_delta;
-
-static int count_use_sw;
-
-#if defined(ACE_DEBUG_HEARTBEAT) || defined(ACE_DEBUG_WATCHDOG)
-#define ACE_HEARTBEAT_MS 10000
-#define ACE_WATCHDOG_MS 500
-
-struct timeval timestamp_base;
-struct timeval timestamp[5];
-
-static inline void s5p_ace_dump(void)
-{
- int i;
- char *str[] = {"request: ", "dma start: ", "dma end: ", "suspend: ", "resume: "};
-
- for (i = 0; i < 5; i++)
- printk(KERN_INFO "%s%5lu.%06lu\n",
- str[i], timestamp[i].tv_sec - timestamp_base.tv_sec, timestamp[i].tv_usec);
- printk(KERN_INFO "clock: [%d - %d]\n", count_clk, count_clk_delta);
-}
-#endif
-
-struct s5p_ace_reqctx {
- u32 mode;
-};
-
-struct s5p_ace_device {
- void __iomem *ace_base;
- struct clk *clock;
-#if defined(CONFIG_ACE_BC_IRQMODE) || defined(CONFIG_ACE_HASH_IRQMODE)
- int irq;
-#endif
-#ifdef ACE_USE_ACP
- void __iomem *sss_usercon;
-#endif
- spinlock_t lock;
- unsigned long flags;
-
- struct hrtimer timer;
- struct work_struct work;
-#ifdef ACE_DEBUG_HEARTBEAT
- struct hrtimer heartbeat;
-#endif
-#ifdef ACE_DEBUG_WATCHDOG
- struct hrtimer watchdog_bc;
-#endif
-
-#ifdef CONFIG_ACE_BC_ASYNC
- struct crypto_queue queue_bc;
- struct tasklet_struct task_bc;
- int rc_depth_bc;
-#endif
-
- struct s5p_ace_aes_ctx *ctx_bc;
-
-#ifdef CONFIG_ACE_HASH_ASYNC
- struct crypto_queue queue_hash;
- struct tasklet_struct task_hash;
-#endif
- enum s5p_cpu_type cputype;
-};
-
-#if defined(CONFIG_ACE_HASH_SHA1) || defined(CONFIG_ACE_HASH_SHA256)
-struct crypto_shash *sw_tfm;
-struct crypto_hash **fallback_hash;
-#endif
-struct secmem_crypto_driver_ftn secmem_ftn;
-
-static void s5p_ace_init_clock_gating(void)
-{
- count_clk = 0;
- count_clk_delta = 0;
-}
-
-static void s5p_ace_deferred_clock_disable(struct work_struct *work)
-{
- unsigned long flags;
- int tmp;
-
- if (count_clk_delta == 0)
- return;
-
- spin_lock_irqsave(&s5p_ace_dev.lock, flags);
- count_clk -= count_clk_delta;
- count_clk_delta = 0;
- tmp = count_clk;
- spin_unlock_irqrestore(&s5p_ace_dev.lock, flags);
-
- if (tmp == 0) {
- clk_disable(s5p_ace_dev.clock);
- S5P_ACE_DEBUG("ACE clock OFF\n");
- }
-}
-
-static enum hrtimer_restart s5p_ace_timer_func(struct hrtimer *timer)
-{
- S5P_ACE_DEBUG("ACE HRTIMER\n");
-
- /* It seems that "schedule_work" is expensive. */
- schedule_work(&s5p_ace_dev.work);
-
- return HRTIMER_NORESTART;
-}
-
-static void s5p_ace_clock_gating(int status)
-{
- unsigned long flags;
- int tmp;
-
- if (status == ACE_CLOCK_ON) {
- spin_lock_irqsave(&s5p_ace_dev.lock, flags);
- tmp = count_clk++;
- spin_unlock_irqrestore(&s5p_ace_dev.lock, flags);
-
- if (tmp == 0) {
- clk_enable(s5p_ace_dev.clock);
- S5P_ACE_DEBUG("ACE clock ON\n");
- }
- } else if (status == ACE_CLOCK_OFF) {
- spin_lock_irqsave(&s5p_ace_dev.lock, flags);
- if (count_clk > 1)
- count_clk--;
- else
- count_clk_delta++;
- spin_unlock_irqrestore(&s5p_ace_dev.lock, flags);
-
- hrtimer_start(&s5p_ace_dev.timer,
- ns_to_ktime((u64)500 * NSEC_PER_MSEC),
- HRTIMER_MODE_REL);
- }
-}
-
-struct s5p_ace_aes_ctx {
- u32 keylen;
-
- u32 sfr_ctrl;
- u8 sfr_key[AES_MAX_KEY_SIZE];
- u8 sfr_semikey[AES_BLOCK_SIZE];
-
- struct crypto_blkcipher *fallback_bc;
-#ifdef CONFIG_ACE_BC_ASYNC
- struct ablkcipher_request *req;
- struct crypto_ablkcipher *fallback_abc;
- struct crypto_tfm *origin_tfm;
-#else
- struct crypto_blkcipher *origin_tfm;
-
-#endif
- size_t total;
- struct scatterlist *in_sg;
- size_t in_ofs;
- struct scatterlist *out_sg;
- size_t out_ofs;
-
- int directcall;
-
- u8 *src_addr;
- u8 *dst_addr;
- u32 dma_size;
- u8 tbuf[AES_BLOCK_SIZE];
-};
-
-#if defined(ACE_DEBUG_HEARTBEAT) || defined(ACE_DEBUG_WATCHDOG)
-static void s5p_ace_print_info(void)
-{
- struct s5p_ace_aes_ctx *sctx = s5p_ace_dev.ctx_bc;
-
- printk(KERN_INFO "flags: 0x%X\n", (u32)s5p_ace_dev.flags);
- s5p_ace_dump();
- if (sctx == NULL) {
- printk(KERN_INFO "sctx == NULL\n");
- } else {
-#ifdef CONFIG_ACE_BC_ASYNC
- printk(KERN_INFO "sctx->req: 0x%08X\n", (u32)sctx->req);
-#endif
- printk(KERN_INFO "sctx->total: 0x%08X\n", sctx->total);
- printk(KERN_INFO "sctx->dma_size: 0x%08X\n", sctx->dma_size);
- }
-}
-#endif
-
-#ifdef ACE_DEBUG_HEARTBEAT
-static enum hrtimer_restart s5p_ace_heartbeat_func(struct hrtimer *timer)
-{
- printk(KERN_INFO "[[ACE HEARTBEAT]] -- START ----------\n");
-
- s5p_ace_print_info();
-
- printk(KERN_INFO "[[ACE HEARTBEAT]] -- END ------------\n");
-
- hrtimer_start(&s5p_ace_dev.heartbeat,
- ns_to_ktime((u64)ACE_HEARTBEAT_MS * NSEC_PER_MSEC),
- HRTIMER_MODE_REL);
-
- return HRTIMER_NORESTART;
-}
-#endif
-
-#ifdef ACE_DEBUG_WATCHDOG
-static enum hrtimer_restart s5p_ace_watchdog_bc_func(struct hrtimer *timer)
-{
- printk(KERN_ERR "[[ACE WATCHDOG BC]] ============\n");
-
- s5p_ace_print_info();
-
- return HRTIMER_NORESTART;
-}
-#endif
-
-static void s5p_ace_resume_device(struct s5p_ace_device *dev)
-{
- if (test_and_clear_bit(FLAGS_SUSPENDED, &dev->flags)) {
- clear_bit(FLAGS_BC_BUSY, &dev->flags);
- clear_bit(FLAGS_HASH_BUSY, &dev->flags);
-
-#ifdef ACE_USE_ACP
- /* Set ARUSER[12:8] and AWUSER[4:0] */
- writel(0x101, dev->sss_usercon
- + (PA_SSS_USER_CON & (PAGE_SIZE - 1)));
-#endif
- }
-}
-
-#if defined(CONFIG_ACE_BC)
-static int s5p_ace_aes_set_cipher(struct s5p_ace_aes_ctx *sctx,
- u32 alg_id, u32 key_size)
-{
- u32 new_status = 0;
-
- /* Fixed setting */
- new_status |= ACE_AES_FIFO_ON;
-
- if (s5p_ace_dev.cputype == TYPE_S5PV210)
- new_status |= ACE_AES_KEYCNGMODE_ON;
-
- new_status |= ACE_AES_SWAPKEY_ON;
- new_status |= ACE_AES_SWAPCNT_ON;
- new_status |= ACE_AES_SWAPIV_ON;
-
- if (s5p_ace_dev.cputype == TYPE_EXYNOS) {
- new_status |= ACE_AES_SWAPDO_ON;
- new_status |= ACE_AES_SWAPDI_ON;
- new_status |= ACE_AES_COUNTERSIZE_128;
- }
-
- switch (MI_GET_MODE(alg_id)) {
- case _MODE_ECB_:
- new_status |= ACE_AES_OPERMODE_ECB;
- break;
- case _MODE_CBC_:
- new_status |= ACE_AES_OPERMODE_CBC;
- break;
- case _MODE_CTR_:
- new_status |= ACE_AES_OPERMODE_CTR;
- break;
- default:
- return -EINVAL;
- }
-
- switch (key_size) {
- case 128:
- new_status |= ACE_AES_KEYSIZE_128;
- break;
- case 192:
- new_status |= ACE_AES_KEYSIZE_192;
- break;
- case 256:
- new_status |= ACE_AES_KEYSIZE_256;
- break;
- default:
- return -EINVAL;
- }
-
- /* Set AES context */
- sctx->sfr_ctrl = new_status;
- sctx->keylen = key_size >> 3;
-
- return 0;
-}
-
-/*
- * enc: BC_MODE_ENC - encryption, BC_MODE_DEC - decryption
- */
-static int s5p_ace_aes_set_encmode(struct s5p_ace_aes_ctx *sctx, u32 enc)
-{
- u32 status = sctx->sfr_ctrl;
- u32 enc_mode = ACE_AES_MODE_ENC;
-
- if ((status & ACE_AES_OPERMODE_MASK) != ACE_AES_OPERMODE_CTR)
- enc_mode = (enc == BC_MODE_ENC ?
- ACE_AES_MODE_ENC : ACE_AES_MODE_DEC);
-
- sctx->sfr_ctrl = (status & ~ACE_AES_MODE_MASK) | enc_mode;
-
- return 0;
-}
-
-static int s5p_ace_aes_update_semikey(struct s5p_ace_aes_ctx *sctx,
- u8 *in, u8 *out, u32 len)
-{
- u32 *addr = (u32 *)sctx->sfr_semikey;
- u32 tmp1, tmp2;
-
- switch (sctx->sfr_ctrl & ACE_AES_OPERMODE_MASK) {
- case ACE_AES_OPERMODE_ECB:
- break;
- case ACE_AES_OPERMODE_CBC:
- if ((sctx->sfr_ctrl & ACE_AES_MODE_MASK) == ACE_AES_MODE_ENC)
- memcpy(sctx->sfr_semikey, out, AES_BLOCK_SIZE);
- else
- memcpy(sctx->sfr_semikey, in, AES_BLOCK_SIZE);
- break;
- case ACE_AES_OPERMODE_CTR:
- tmp1 = be32_to_cpu(addr[3]);
- tmp2 = tmp1 + (len >> 4);
- addr[3] = be32_to_cpu(tmp2);
- if (tmp2 < tmp1) {
- tmp1 = be32_to_cpu(addr[2]) + 1;
- addr[2] = be32_to_cpu(tmp1);
- if (addr[2] == 0) {
- tmp1 = be32_to_cpu(addr[1]) + 1;
- addr[1] = be32_to_cpu(tmp1);
- if (addr[1] == 0) {
- tmp1 = be32_to_cpu(addr[0]) + 1;
- addr[0] = be32_to_cpu(tmp1);
- }
- }
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int s5p_ace_aes_write_sfr(struct s5p_ace_aes_ctx *sctx)
-{
- u32 *addr;
-
- s5p_ace_write_sfr(ACE_AES_CONTROL, sctx->sfr_ctrl);
-
- addr = (u32 *)sctx->sfr_key;
- switch (sctx->keylen) {
- case 16:
- s5p_ace_write_sfr(ACE_AES_KEY5, addr[0]);
- s5p_ace_write_sfr(ACE_AES_KEY6, addr[1]);
- s5p_ace_write_sfr(ACE_AES_KEY7, addr[2]);
- s5p_ace_write_sfr(ACE_AES_KEY8, addr[3]);
- break;
- case 24:
- s5p_ace_write_sfr(ACE_AES_KEY3, addr[0]);
- s5p_ace_write_sfr(ACE_AES_KEY4, addr[1]);
- s5p_ace_write_sfr(ACE_AES_KEY5, addr[2]);
- s5p_ace_write_sfr(ACE_AES_KEY6, addr[3]);
- s5p_ace_write_sfr(ACE_AES_KEY7, addr[4]);
- s5p_ace_write_sfr(ACE_AES_KEY8, addr[5]);
- break;
- case 32:
- s5p_ace_write_sfr(ACE_AES_KEY1, addr[0]);
- s5p_ace_write_sfr(ACE_AES_KEY2, addr[1]);
- s5p_ace_write_sfr(ACE_AES_KEY3, addr[2]);
- s5p_ace_write_sfr(ACE_AES_KEY4, addr[3]);
- s5p_ace_write_sfr(ACE_AES_KEY5, addr[4]);
- s5p_ace_write_sfr(ACE_AES_KEY6, addr[5]);
- s5p_ace_write_sfr(ACE_AES_KEY7, addr[6]);
- s5p_ace_write_sfr(ACE_AES_KEY8, addr[7]);
- break;
- default:
- return -EINVAL;
- }
-
- addr = (u32 *)sctx->sfr_semikey;
- switch (sctx->sfr_ctrl & ACE_AES_OPERMODE_MASK) {
- case ACE_AES_OPERMODE_ECB:
- break;
- case ACE_AES_OPERMODE_CBC:
- s5p_ace_write_sfr(ACE_AES_IV1, addr[0]);
- s5p_ace_write_sfr(ACE_AES_IV2, addr[1]);
- s5p_ace_write_sfr(ACE_AES_IV3, addr[2]);
- s5p_ace_write_sfr(ACE_AES_IV4, addr[3]);
- break;
- case ACE_AES_OPERMODE_CTR:
- s5p_ace_write_sfr(ACE_AES_CNT1, addr[0]);
- s5p_ace_write_sfr(ACE_AES_CNT2, addr[1]);
- s5p_ace_write_sfr(ACE_AES_CNT3, addr[2]);
- s5p_ace_write_sfr(ACE_AES_CNT4, addr[3]);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int s5p_ace_aes_engine_start(struct s5p_ace_aes_ctx *sctx,
- u8 *out, const u8 *in, u32 len, int irqen)
-{
- u32 reg;
- u32 first_blklen;
-
- if ((sctx == NULL) || (out == NULL) || (in == NULL)) {
- printk(KERN_ERR "%s : NULL input.\n", __func__);
- return -EINVAL;
- }
-
- if (len & (AES_BLOCK_SIZE - 1)) {
- printk(KERN_ERR "Invalid len for AES engine (%d)\n", len);
- return -EINVAL;
- }
-
- if (s5p_ace_aes_write_sfr(sctx) != 0)
- return -EINVAL;
-
- S5P_ACE_DEBUG("AES: %s, in: 0x%08X, out: 0x%08X, len: 0x%08X\n",
- __func__, (u32)in, (u32)out, len);
- S5P_ACE_DEBUG("AES: %s, AES_control : 0x%08X\n",
- __func__, s5p_ace_read_sfr(ACE_AES_CONTROL));
-
- /* Assert code */
- reg = s5p_ace_read_sfr(ACE_AES_STATUS);
- if ((reg & ACE_AES_BUSY_MASK) == ACE_AES_BUSY_ON)
- return -EBUSY;
-
- /* Flush BRDMA and BTDMA */
- s5p_ace_write_sfr(ACE_FC_BRDMAC, ACE_FC_BRDMACFLUSH_ON);
- s5p_ace_write_sfr(ACE_FC_BTDMAC, ACE_FC_BTDMACFLUSH_ON);
-
- /* Select Input MUX as AES */
- reg = s5p_ace_read_sfr(ACE_FC_FIFOCTRL);
- reg = (reg & ~ACE_FC_SELBC_MASK) | ACE_FC_SELBC_AES;
- s5p_ace_write_sfr(ACE_FC_FIFOCTRL, reg);
-
- /* Stop flushing BRDMA and BTDMA */
- reg = ACE_FC_BRDMACFLUSH_OFF;
- if (s5p_ace_dev.cputype == TYPE_S5PV210)
- reg |= ACE_FC_BRDMACSWAP_ON;
-
-#ifdef ACE_USE_ACP
- reg |= ACE_ARCACHE << ACE_FC_BRDMACARCACHE_OFS;
-#endif
- s5p_ace_write_sfr(ACE_FC_BRDMAC, reg);
- reg = ACE_FC_BTDMACFLUSH_OFF;
- if (s5p_ace_dev.cputype == TYPE_S5PV210)
- reg |= ACE_FC_BTDMACSWAP_ON;
-
-#ifdef ACE_USE_ACP
- reg |= ACE_AWCACHE << ACE_FC_BTDMACAWCACHE_OFS;
-#endif
- s5p_ace_write_sfr(ACE_FC_BTDMAC, reg);
-
- /* Set DMA */
- s5p_ace_write_sfr(ACE_FC_BRDMAS, (u32)in);
- s5p_ace_write_sfr(ACE_FC_BTDMAS, (u32)out);
-
- if (s5p_ace_dev.cputype == TYPE_S5PV210) {
- /* Set the length of first block (Key Change Mode On) */
- if ((((u32)in) & (2 * AES_BLOCK_SIZE - 1)) == 0)
- first_blklen = 2 * AES_BLOCK_SIZE;
- else
- first_blklen = AES_BLOCK_SIZE;
-
- if (len <= first_blklen) {
-#ifdef CONFIG_ACE_BC_IRQMODE
- if (irqen)
- s5p_ace_write_sfr(ACE_FC_INTENSET, ACE_FC_BTDMA);
-#endif
-
- /* Set DMA */
- s5p_ace_write_sfr(ACE_FC_BRDMAL, len);
- s5p_ace_write_sfr(ACE_FC_BTDMAL, len);
- } else {
- unsigned long timeout;
-
- /* Set DMA */
- s5p_ace_write_sfr(ACE_FC_BRDMAL, first_blklen);
- s5p_ace_write_sfr(ACE_FC_BTDMAL, first_blklen);
-
- timeout = jiffies + msecs_to_jiffies(10);
- while (time_before(jiffies, timeout)) {
- if (s5p_ace_read_sfr(ACE_FC_INTPEND) & ACE_FC_BTDMA)
- break;
- }
- if (!(s5p_ace_read_sfr(ACE_FC_INTPEND) & ACE_FC_BTDMA)) {
- printk(KERN_ERR "AES : DMA time out\n");
- return -EBUSY;
- }
- s5p_ace_write_sfr(ACE_FC_INTPEND, ACE_FC_BTDMA | ACE_FC_BRDMA);
-
- reg = sctx->sfr_ctrl;
- reg = (reg & ~ACE_AES_KEYCNGMODE_MASK) | ACE_AES_KEYCNGMODE_OFF;
- s5p_ace_write_sfr(ACE_AES_CONTROL, reg);
-
-#ifdef CONFIG_ACE_BC_IRQMODE
- if (irqen)
- s5p_ace_write_sfr(ACE_FC_INTENSET, ACE_FC_BTDMA);
-#endif
-
- /* Set DMA */
- s5p_ace_write_sfr(ACE_FC_BRDMAL, len - first_blklen);
- s5p_ace_write_sfr(ACE_FC_BTDMAL, len - first_blklen);
- }
- } else {
-#ifdef CONFIG_ACE_BC_IRQMODE
- if (irqen)
- s5p_ace_write_sfr(ACE_FC_INTENSET, ACE_FC_BTDMA);
-#endif
-
- /* Set DMA */
- s5p_ace_write_sfr(ACE_FC_BRDMAL, len);
- s5p_ace_write_sfr(ACE_FC_BTDMAL, len);
- }
-
- return 0;
-}
-
-static void s5p_ace_aes_engine_wait(struct s5p_ace_aes_ctx *sctx,
- u8 *out, const u8 *in, u32 len)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(10);
- while (time_before(jiffies, timeout))
- if (s5p_ace_read_sfr(ACE_FC_INTPEND) & ACE_FC_BTDMA)
- break;
- if (!(s5p_ace_read_sfr(ACE_FC_INTPEND) & ACE_FC_BTDMA))
- printk(KERN_ERR "%s : DMA time out\n", __func__);
- s5p_ace_write_sfr(ACE_FC_INTPEND, ACE_FC_BTDMA | ACE_FC_BRDMA);
-}
-
-void s5p_ace_sg_update(struct scatterlist **sg, size_t *offset,
- size_t count)
-{
- *offset += count;
- if (*offset >= sg_dma_len(*sg)) {
- *offset -= sg_dma_len(*sg);
- *sg = scatterwalk_sg_next(*sg);
- }
-}
-
-int s5p_ace_sg_set_from_sg(struct scatterlist *dst, struct scatterlist *src,
- u32 num)
-{
- sg_init_table(dst, num);
- while (num--) {
- sg_set_page(dst, sg_page(src), sg_dma_len(src), src->offset);
-
- dst++;
- src = scatterwalk_sg_next(src);
- if (!src)
- return -ENOMEM;
- }
- return 0;
-}
-
-/* Unaligned data Handling
- * - size should be a multiple of ACE_AES_MIN_BLOCK_SIZE.
- */
-static int s5p_ace_aes_crypt_unaligned(struct s5p_ace_aes_ctx *sctx,
- size_t size)
-{
- struct blkcipher_desc desc;
- struct scatterlist in_sg[2], out_sg[2];
- int ret;
-
- S5P_ACE_DEBUG("%s - %s (size: %d / %d)\n", __func__,
- sctx->fallback_bc->base.__crt_alg->cra_driver_name,
- size, sctx->total);
-
- desc.tfm = sctx->fallback_bc;
- desc.info = sctx->sfr_semikey;
- desc.flags = 0;
-
- s5p_ace_sg_set_from_sg(in_sg, sctx->in_sg, 2);
- in_sg->length -= sctx->in_ofs;
- in_sg->offset += sctx->in_ofs;
-
- s5p_ace_sg_set_from_sg(out_sg, sctx->out_sg, 2);
- out_sg->length -= sctx->out_ofs;
- out_sg->offset += sctx->out_ofs;
-
- if ((sctx->sfr_ctrl & ACE_AES_MODE_MASK) == ACE_AES_MODE_ENC)
- ret = crypto_blkcipher_encrypt_iv(
- &desc, out_sg, in_sg, size);
- else
- ret = crypto_blkcipher_decrypt_iv(
- &desc, out_sg, in_sg, size);
-
- sctx->dma_size = 0;
- sctx->total -= size;
- if (!sctx->total)
- return 0;
-
- s5p_ace_sg_update(&sctx->in_sg, &sctx->in_ofs, size);
- s5p_ace_sg_update(&sctx->out_sg, &sctx->out_ofs, size);
-
- return 0;
-}
-
-static int s5p_ace_aes_crypt_dma_start(struct s5p_ace_device *dev)
-{
- struct s5p_ace_aes_ctx *sctx = dev->ctx_bc;
- u8 *src, *dst;
- size_t count;
- int i;
- int ret;
-
-#if defined(ACE_DEBUG_HEARTBEAT) || defined(ACE_DEBUG_WATCHDOG)
- do_gettimeofday(&timestamp[1]); /* 1: dma start */
-#endif
-
- sctx->directcall = 0;
-
- while (1) {
- count = sctx->total;
- count = min(count, sg_dma_len(sctx->in_sg) - sctx->in_ofs);
- count = min(count, sg_dma_len(sctx->out_sg) - sctx->out_ofs);
-
- S5P_ACE_DEBUG("total_start: %d (%d)\n", sctx->total, count);
- S5P_ACE_DEBUG(" in(ofs: %x, len: %x), %x\n",
- sctx->in_sg->offset, sg_dma_len(sctx->in_sg),
- sctx->in_ofs);
- S5P_ACE_DEBUG(" out(ofs: %x, len: %x), %x\n",
- sctx->out_sg->offset, sg_dma_len(sctx->out_sg),
- sctx->out_ofs);
-
- if (count > ACE_AES_MIN_BLOCK_SIZE)
- break;
-
- count = min(sctx->total, (size_t)ACE_AES_MIN_BLOCK_SIZE);
- if (count & (AES_BLOCK_SIZE - 1))
- printk(KERN_ERR "%s - Invalid count\n", __func__);
- ret = s5p_ace_aes_crypt_unaligned(sctx, count);
- if (!sctx->total) {
-#if defined(ACE_DEBUG_HEARTBEAT) || defined(ACE_DEBUG_WATCHDOG)
- do_gettimeofday(&timestamp[2]); /* 2: dma end */
-#endif
-#ifdef CONFIG_ACE_BC_IRQMODE
- tasklet_schedule(&dev->task_bc);
- return 0;
-#else
- goto run;
-#endif
- }
- }
-
- count &= ~(AES_BLOCK_SIZE - 1);
- sctx->dma_size = count;
-
- src = (u8 *)page_to_phys(sg_page(sctx->in_sg));
- src += sctx->in_sg->offset + sctx->in_ofs;
- if (!PageHighMem(sg_page(sctx->in_sg))) {
- sctx->src_addr = (u8 *)phys_to_virt((u32)src);
- } else {
- sctx->src_addr = crypto_kmap(sg_page(sctx->in_sg),
- crypto_kmap_type(0));
- sctx->src_addr += sctx->in_sg->offset + sctx->in_ofs;
- }
-
- dst = (u8 *)page_to_phys(sg_page(sctx->out_sg));
- dst += sctx->out_sg->offset + sctx->out_ofs;
- if (!PageHighMem(sg_page(sctx->out_sg))) {
- sctx->dst_addr = (u8 *)phys_to_virt((u32)dst);
- } else {
- sctx->dst_addr = crypto_kmap(sg_page(sctx->out_sg),
- crypto_kmap_type(1));
- sctx->dst_addr += sctx->out_sg->offset + sctx->out_ofs;
- }
-
- S5P_ACE_DEBUG(" phys(src: %x, dst: %x)\n", (u32)src, (u32)dst);
- S5P_ACE_DEBUG(" virt(src: %x, dst: %x)\n",
- (u32)sctx->src_addr, (u32)sctx->dst_addr);
-
- if (src == dst)
- memcpy(sctx->tbuf, sctx->src_addr + count - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE);
-
-#ifndef ACE_USE_ACP
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
- dmac_clean_range((void *)sctx->src_addr,
- (void *)sctx->src_addr + count);
- dmac_clean_range((void *)sctx->dst_addr,
- (void *)sctx->dst_addr + count);
-#else
- dmac_map_area((void *)sctx->src_addr, count, DMA_TO_DEVICE);
- outer_clean_range((unsigned long)src, (unsigned long)src + count);
- dmac_map_area((void *)sctx->dst_addr, count, DMA_FROM_DEVICE);
- outer_clean_range((unsigned long)dst, (unsigned long)dst + count);
-#endif
-#endif
-
- for (i = 0; i < 100; i++) {
- ret = s5p_ace_aes_engine_start(sctx, dst, src, count, 1);
- if (ret != -EBUSY)
- break;
- }
- if (i == 100) {
- printk(KERN_ERR "%s : DMA Start Failed\n", __func__);
- return ret;
- }
-
-run:
-#ifdef CONFIG_ACE_BC_ASYNC
-#ifndef CONFIG_ACE_BC_IRQMODE
- if (!ret) {
- if ((count <= 2048) && ((s5p_ace_dev.rc_depth_bc++) < 1)) {
- sctx->directcall = 1;
- s5p_ace_bc_task((unsigned long)&s5p_ace_dev);
- return ret;
- }
- }
-#endif
-
- if (sctx->dma_size) {
- if (PageHighMem(sg_page(sctx->in_sg)))
- crypto_kunmap(sctx->src_addr, crypto_kmap_type(0));
- if (PageHighMem(sg_page(sctx->out_sg)))
- crypto_kunmap(sctx->dst_addr, crypto_kmap_type(1));
- }
-
-#ifndef CONFIG_ACE_BC_IRQMODE
- if (!ret)
- tasklet_schedule(&dev->task_bc);
-#endif
-#endif
- return ret;
-}
-
-static int s5p_ace_aes_crypt_dma_wait(struct s5p_ace_device *dev)
-{
- struct s5p_ace_aes_ctx *sctx = dev->ctx_bc;
- u8 *src, *dst;
- u8 *src_lb_addr;
- u32 lastblock;
- int ret = 0;
-
- S5P_ACE_DEBUG("%s\n", __func__);
-
- src = (u8 *)page_to_phys(sg_page(sctx->in_sg));
- src += sctx->in_sg->offset + sctx->in_ofs;
- dst = (u8 *)page_to_phys(sg_page(sctx->out_sg));
- dst += sctx->out_sg->offset + sctx->out_ofs;
-
-#ifdef CONFIG_ACE_BC_ASYNC
- if (!sctx->directcall) {
- if (PageHighMem(sg_page(sctx->in_sg))) {
- sctx->src_addr = crypto_kmap(sg_page(sctx->in_sg),
- crypto_kmap_type(0));
- sctx->src_addr += sctx->in_sg->offset + sctx->in_ofs;
- }
-
- if (PageHighMem(sg_page(sctx->out_sg))) {
- sctx->dst_addr = crypto_kmap(sg_page(sctx->out_sg),
- crypto_kmap_type(1));
- sctx->dst_addr += sctx->out_sg->offset + sctx->out_ofs;
- }
- }
-#endif
-
-#ifndef CONFIG_ACE_BC_IRQMODE
- s5p_ace_aes_engine_wait(sctx, dst, src, sctx->dma_size);
-#endif
-
-#ifndef ACE_USE_ACP
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
- dmac_inv_range((void *)sctx->dst_addr,
- (void *)sctx->dst_addr + sctx->dma_size);
-#else
- dmac_unmap_area((void *)sctx->dst_addr, sctx->dma_size,
- DMA_FROM_DEVICE);
- outer_inv_range((unsigned long)dst,
- (unsigned long)dst + sctx->dma_size);
-#endif
-#endif
-
- lastblock = sctx->dma_size - AES_BLOCK_SIZE;
- if (src == dst)
- src_lb_addr = sctx->tbuf;
- else
- src_lb_addr = sctx->src_addr + lastblock;
- if (s5p_ace_aes_update_semikey(sctx, src_lb_addr,
- sctx->dst_addr + lastblock,
- sctx->dma_size) != 0)
- return -EINVAL;
-
- if (PageHighMem(sg_page(sctx->in_sg)))
- crypto_kunmap(sctx->src_addr, crypto_kmap_type(0));
- if (PageHighMem(sg_page(sctx->out_sg)))
- crypto_kunmap(sctx->dst_addr, crypto_kmap_type(1));
-
- sctx->total -= sctx->dma_size;
-
- S5P_ACE_DEBUG("total_end: %d\n", sctx->total);
-
- if (ret || !sctx->total) {
- if (ret)
- printk(KERN_NOTICE "err: %d\n", ret);
- } else {
- s5p_ace_sg_update(&sctx->in_sg, &sctx->in_ofs,
- sctx->dma_size);
- s5p_ace_sg_update(&sctx->out_sg, &sctx->out_ofs,
- sctx->dma_size);
- }
-
-#if defined(ACE_DEBUG_HEARTBEAT) || defined(ACE_DEBUG_WATCHDOG)
- do_gettimeofday(&timestamp[2]); /* 2: dma end */
-#endif
-
- return ret;
-}
-
-#ifdef CONFIG_ACE_BC_ASYNC
-static int s5p_ace_handle_lock_req(struct s5p_ace_device *dev,
- struct s5p_ace_aes_ctx *sctx,
- struct ablkcipher_request *req, u32 encmode)
-{
- int ret;
-
- sctx->origin_tfm = req->base.tfm;
- crypto_ablkcipher_set_flags(sctx->fallback_abc, 0);
- ablkcipher_request_set_tfm(req, sctx->fallback_abc);
-
- if (encmode == BC_MODE_ENC)
- ret = crypto_ablkcipher_encrypt(req);
- else
- ret = crypto_ablkcipher_decrypt(req);
-
- sctx->req = req;
- dev->ctx_bc = sctx;
- tasklet_schedule(&dev->task_bc);
-
- return ret;
-}
-
-static int s5p_ace_aes_handle_req(struct s5p_ace_device *dev)
-{
- struct crypto_async_request *async_req;
- struct crypto_async_request *backlog;
- struct s5p_ace_aes_ctx *sctx;
- struct s5p_ace_reqctx *rctx;
- struct ablkcipher_request *req;
- unsigned long flags;
-
- if (dev->ctx_bc)
- goto start;
-
- S5P_ACE_DEBUG("%s\n", __func__);
-
- spin_lock_irqsave(&s5p_ace_dev.lock, flags);
- backlog = crypto_get_backlog(&dev->queue_bc);
- async_req = crypto_dequeue_request(&dev->queue_bc);
- S5P_ACE_DEBUG("[[ dequeue (%u) ]]\n", dev->queue_bc.qlen);
- spin_unlock_irqrestore(&s5p_ace_dev.lock, flags);
-
- if (!async_req) {
- clear_bit(FLAGS_BC_BUSY, &dev->flags);
- s5p_ace_clock_gating(ACE_CLOCK_OFF);
- return 0;
- }
-
- if (backlog) {
- S5P_ACE_DEBUG("backlog.\n");
- backlog->complete(backlog, -EINPROGRESS);
- }
-
- S5P_ACE_DEBUG("get new req\n");
-
- req = ablkcipher_request_cast(async_req);
- sctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
-
-#ifdef ACE_DEBUG_WATCHDOG
- hrtimer_start(&s5p_ace_dev.watchdog_bc,
- ns_to_ktime((u64)ACE_WATCHDOG_MS * NSEC_PER_MSEC),
- HRTIMER_MODE_REL);
-#endif
- rctx = ablkcipher_request_ctx(req);
-
- if (s5p_ace_dev.flags & BIT_MASK(FLAGS_USE_SW))
- return s5p_ace_handle_lock_req(dev, sctx, req, rctx->mode);
-
- /* assign new request to device */
- sctx->req = req;
- sctx->total = req->nbytes;
- sctx->in_sg = req->src;
- sctx->in_ofs = 0;
- sctx->out_sg = req->dst;
- sctx->out_ofs = 0;
-
- if ((sctx->sfr_ctrl & ACE_AES_OPERMODE_MASK) != ACE_AES_OPERMODE_ECB)
- memcpy(sctx->sfr_semikey, req->info, AES_BLOCK_SIZE);
-
- s5p_ace_aes_set_encmode(sctx, rctx->mode);
-
- dev->ctx_bc = sctx;
-
-start:
- return s5p_ace_aes_crypt_dma_start(dev);
-}
-
-static void s5p_ace_bc_task(unsigned long data)
-{
- struct s5p_ace_device *dev = (struct s5p_ace_device *)data;
- struct s5p_ace_aes_ctx *sctx = dev->ctx_bc;
- int ret = 0;
-
- S5P_ACE_DEBUG("%s (total: %d, dma_size: %d)\n", __func__,
- sctx->total, sctx->dma_size);
-
- /* check if it is handled by SW or HW */
- if (sctx->req->base.tfm ==
- crypto_ablkcipher_tfm
- (crypto_ablkcipher_crt(sctx->fallback_abc)->base)) {
- sctx->req->base.tfm = sctx->origin_tfm;
- sctx->req->base.complete(&sctx->req->base, ret);
- dev->ctx_bc = NULL;
- s5p_ace_aes_handle_req(dev);
-
- return;
- }
-
- if (sctx->dma_size)
- ret = s5p_ace_aes_crypt_dma_wait(dev);
-
- if (!sctx->total) {
- if ((sctx->sfr_ctrl & ACE_AES_OPERMODE_MASK)
- != ACE_AES_OPERMODE_ECB)
- memcpy(sctx->req->info, sctx->sfr_semikey,
- AES_BLOCK_SIZE);
- sctx->req->base.complete(&sctx->req->base, ret);
- dev->ctx_bc = NULL;
-
-#ifdef ACE_DEBUG_WATCHDOG
- hrtimer_cancel(&s5p_ace_dev.watchdog_bc);
-#endif
- }
-
- s5p_ace_aes_handle_req(dev);
-}
-
-static int s5p_ace_aes_crypt(struct ablkcipher_request *req, u32 encmode)
-{
- struct s5p_ace_reqctx *rctx = ablkcipher_request_ctx(req);
- unsigned long flags;
- int ret;
- unsigned long timeout;
-
-#ifdef ACE_DEBUG_WATCHDOG
- do_gettimeofday(&timestamp[0]); /* 0: request */
-#endif
-
- S5P_ACE_DEBUG("%s (nbytes: 0x%x, mode: 0x%x)\n",
- __func__, (u32)req->nbytes, encmode);
-
- rctx->mode = encmode;
-
- timeout = jiffies + msecs_to_jiffies(10);
- while (time_before(jiffies, timeout)) {
- if (s5p_ace_dev.queue_bc.list.prev != &req->base.list)
- break;
- udelay(1); /* wait */
- }
- if (s5p_ace_dev.queue_bc.list.prev == &req->base.list) {
- printk(KERN_ERR "%s : Time Out.\n", __func__);
- return -EAGAIN;
- }
-
- spin_lock_irqsave(&s5p_ace_dev.lock, flags);
- ret = ablkcipher_enqueue_request(&s5p_ace_dev.queue_bc, req);
- spin_unlock_irqrestore(&s5p_ace_dev.lock, flags);
-
- S5P_ACE_DEBUG("[[ enqueue (%u) ]]\n", s5p_ace_dev.queue_bc.qlen);
-
- s5p_ace_resume_device(&s5p_ace_dev);
- if (!test_and_set_bit(FLAGS_BC_BUSY, &s5p_ace_dev.flags)) {
- s5p_ace_clock_gating(ACE_CLOCK_ON);
- s5p_ace_dev.rc_depth_bc = 0;
- s5p_ace_aes_handle_req(&s5p_ace_dev);
- }
-
- return ret;
-}
-#else
-static int s5p_ace_handle_lock_req(struct s5p_ace_aes_ctx *sctx,
- struct blkcipher_desc *desc,
- struct scatterlist *sg_dst,
- struct scatterlist *sg_src,
- unsigned int size, int encmode)
-{
- int ret;
-
- sctx->origin_tfm = desc->tfm;
- desc->tfm = sctx->fallback_bc;
-
- if (encmode == BC_MODE_ENC)
- ret = crypto_blkcipher_encrypt_iv(desc, sg_dst, sg_src, size);
- else
- ret = crypto_blkcipher_decrypt_iv(desc, sg_dst, sg_src, size);
-
- desc->tfm = sctx->origin_tfm;
-
- return ret;
-}
-
-static int s5p_ace_aes_crypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes, int encmode)
-{
- struct s5p_ace_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
- int ret;
-
-#if defined(ACE_DEBUG_HEARTBEAT) || defined(ACE_DEBUG_WATCHDOG)
- do_gettimeofday(&timestamp[0]); /* 0: request */
-#endif
-
-#ifdef ACE_DEBUG_WATCHDOG
- hrtimer_start(&s5p_ace_dev.watchdog_bc,
- ns_to_ktime((u64)ACE_WATCHDOG_MS * NSEC_PER_MSEC),
- HRTIMER_MODE_REL);
-#endif
-
- sctx->total = nbytes;
- sctx->in_sg = src;
- sctx->in_ofs = 0;
- sctx->out_sg = dst;
- sctx->out_ofs = 0;
-
- if ((sctx->sfr_ctrl & ACE_AES_OPERMODE_MASK) != ACE_AES_OPERMODE_ECB)
- memcpy(sctx->sfr_semikey, desc->info, AES_BLOCK_SIZE);
-
- s5p_ace_aes_set_encmode(sctx, encmode);
-
- s5p_ace_resume_device(&s5p_ace_dev);
- s5p_ace_clock_gating(ACE_CLOCK_ON);
- local_bh_disable();
- while (test_and_set_bit(FLAGS_BC_BUSY, &s5p_ace_dev.flags))
- udelay(1);
-
- if (s5p_ace_dev.flags & BIT_MASK(FLAGS_USE_SW)) {
- clear_bit(FLAGS_BC_BUSY, &s5p_ace_dev.flags);
- local_bh_enable();
- return s5p_ace_handle_lock_req(sctx, desc, dst, src, nbytes,
- encmode);
- }
-
- s5p_ace_dev.ctx_bc = sctx;
-
- do {
- ret = s5p_ace_aes_crypt_dma_start(&s5p_ace_dev);
-
- if (sctx->dma_size)
- ret = s5p_ace_aes_crypt_dma_wait(&s5p_ace_dev);
- } while (sctx->total);
-
- s5p_ace_dev.ctx_bc = NULL;
-
- clear_bit(FLAGS_BC_BUSY, &s5p_ace_dev.flags);
- local_bh_enable();
- s5p_ace_clock_gating(ACE_CLOCK_OFF);
-
- if ((sctx->sfr_ctrl & ACE_AES_OPERMODE_MASK) != ACE_AES_OPERMODE_ECB)
- memcpy(desc->info, sctx->sfr_semikey, AES_BLOCK_SIZE);
-
-#ifdef ACE_DEBUG_WATCHDOG
- hrtimer_cancel(&s5p_ace_dev.watchdog_bc);
-#endif
-
- return ret;
-}
-#endif
-
-static int s5p_ace_aes_set_key(struct s5p_ace_aes_ctx *sctx, const u8 *key,
- unsigned int key_len)
-{
- memcpy(sctx->sfr_key, key, key_len);
- crypto_blkcipher_setkey(sctx->fallback_bc, key, key_len);
-
-#ifdef CONFIG_ACE_BC_ASYNC
- crypto_ablkcipher_setkey(sctx->fallback_abc, key, key_len);
-#endif
-
- return 0;
-}
-
-#ifdef CONFIG_ACE_BC_ASYNC
-static int s5p_ace_ecb_aes_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct s5p_ace_aes_ctx *sctx = crypto_ablkcipher_ctx(tfm);
- s5p_ace_aes_set_cipher(sctx, MI_AES_ECB, key_len * 8);
- return s5p_ace_aes_set_key(sctx, key, key_len);
-}
-
-static int s5p_ace_cbc_aes_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct s5p_ace_aes_ctx *sctx = crypto_ablkcipher_ctx(tfm);
- s5p_ace_aes_set_cipher(sctx, MI_AES_CBC, key_len * 8);
- return s5p_ace_aes_set_key(sctx, key, key_len);
-}
-
-static int s5p_ace_ctr_aes_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct s5p_ace_aes_ctx *sctx = crypto_ablkcipher_ctx(tfm);
- s5p_ace_aes_set_cipher(sctx, MI_AES_CTR, key_len * 8);
- return s5p_ace_aes_set_key(sctx, key, key_len);
-}
-
-static int s5p_ace_ecb_aes_encrypt(struct ablkcipher_request *req)
-{
- return s5p_ace_aes_crypt(req, BC_MODE_ENC);
-}
-
-static int s5p_ace_ecb_aes_decrypt(struct ablkcipher_request *req)
-{
- return s5p_ace_aes_crypt(req, BC_MODE_DEC);
-}
-
-static int s5p_ace_cbc_aes_encrypt(struct ablkcipher_request *req)
-{
- return s5p_ace_aes_crypt(req, BC_MODE_ENC);
-}
-
-static int s5p_ace_cbc_aes_decrypt(struct ablkcipher_request *req)
-{
- return s5p_ace_aes_crypt(req, BC_MODE_DEC);
-}
-
-static int s5p_ace_ctr_aes_encrypt(struct ablkcipher_request *req)
-{
- return s5p_ace_aes_crypt(req, BC_MODE_ENC);
-}
-
-static int s5p_ace_ctr_aes_decrypt(struct ablkcipher_request *req)
-{
- return s5p_ace_aes_crypt(req, BC_MODE_DEC);
-}
-#else
-static int s5p_ace_ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct s5p_ace_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- s5p_ace_aes_set_cipher(sctx, MI_AES_ECB, key_len * 8);
- return s5p_ace_aes_set_key(sctx, key, key_len);
-}
-
-static int s5p_ace_cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct s5p_ace_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- s5p_ace_aes_set_cipher(sctx, MI_AES_CBC, key_len * 8);
- return s5p_ace_aes_set_key(sctx, key, key_len);
-}
-
-static int s5p_ace_ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *key,
- unsigned int key_len)
-{
- struct s5p_ace_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- s5p_ace_aes_set_cipher(sctx, MI_AES_CTR, key_len * 8);
- return s5p_ace_aes_set_key(sctx, key, key_len);
-}
-
-static int s5p_ace_ecb_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return s5p_ace_aes_crypt(desc, dst, src, nbytes, BC_MODE_ENC);
-}
-
-static int s5p_ace_ecb_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return s5p_ace_aes_crypt(desc, dst, src, nbytes, BC_MODE_DEC);
-}
-
-static int s5p_ace_cbc_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return s5p_ace_aes_crypt(desc, dst, src, nbytes, BC_MODE_ENC);
-}
-
-static int s5p_ace_cbc_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return s5p_ace_aes_crypt(desc, dst, src, nbytes, BC_MODE_DEC);
-}
-
-static int s5p_ace_ctr_aes_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return s5p_ace_aes_crypt(desc, dst, src, nbytes, BC_MODE_ENC);
-}
-
-static int s5p_ace_ctr_aes_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return s5p_ace_aes_crypt(desc, dst, src, nbytes, BC_MODE_DEC);
-}
-#endif
-
-static int s5p_ace_cra_init_tfm(struct crypto_tfm *tfm)
-{
- const char *name = tfm->__crt_alg->cra_name;
- struct s5p_ace_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
- sctx->fallback_bc = crypto_alloc_blkcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
-
- if (IS_ERR(sctx->fallback_bc)) {
- printk(KERN_ERR "Error allocating fallback algo %s\n", name);
- return PTR_ERR(sctx->fallback_bc);
- }
-#ifdef CONFIG_ACE_BC_ASYNC
- tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_ace_reqctx);
- sctx->fallback_abc = crypto_alloc_ablkcipher(name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
-
- if (IS_ERR(sctx->fallback_abc)) {
- printk(KERN_ERR "Error allocating abc fallback algo %s\n",
- name);
- return PTR_ERR(sctx->fallback_abc);
- }
-
-#endif
- S5P_ACE_DEBUG("%s\n", __func__);
-
- return 0;
-}
-
-static void s5p_ace_cra_exit_tfm(struct crypto_tfm *tfm)
-{
- struct s5p_ace_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
- crypto_free_blkcipher(sctx->fallback_bc);
- sctx->fallback_bc = NULL;
-
-#ifdef CONFIG_ACE_BC_ASYNC
- crypto_free_ablkcipher(sctx->fallback_abc);
- sctx->fallback_abc = NULL;
-#endif
-
- S5P_ACE_DEBUG("%s\n", __func__);
-}
-
-static struct crypto_alg algs_bc[] = {
- {
- .cra_name = "ecb(aes)",
- .cra_driver_name = "ecb-aes-s5p-ace",
- .cra_priority = 300,
-#ifdef CONFIG_ACE_BC_ASYNC
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER
- | CRYPTO_ALG_ASYNC,
-#else
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
-#endif
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_ace_aes_ctx),
- .cra_alignmask = 0,
-#ifdef CONFIG_ACE_BC_ASYNC
- .cra_type = &crypto_ablkcipher_type,
-#else
- .cra_type = &crypto_blkcipher_type,
-#endif
- .cra_module = THIS_MODULE,
- .cra_init = s5p_ace_cra_init_tfm,
- .cra_exit = s5p_ace_cra_exit_tfm,
-#ifdef CONFIG_ACE_BC_ASYNC
- .cra_ablkcipher = {
-#else
- .cra_blkcipher = {
-#endif
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .setkey = s5p_ace_ecb_aes_set_key,
- .encrypt = s5p_ace_ecb_aes_encrypt,
- .decrypt = s5p_ace_ecb_aes_decrypt,
- }
- },
- {
- .cra_name = "cbc(aes)",
- .cra_driver_name = "cbc-aes-s5p-ace",
- .cra_priority = 300,
-#ifdef CONFIG_ACE_BC_ASYNC
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER
- | CRYPTO_ALG_ASYNC,
-#else
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
-#endif
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_ace_aes_ctx),
- .cra_alignmask = 0,
-#ifdef CONFIG_ACE_BC_ASYNC
- .cra_type = &crypto_ablkcipher_type,
-#else
- .cra_type = &crypto_blkcipher_type,
-#endif
- .cra_module = THIS_MODULE,
- .cra_init = s5p_ace_cra_init_tfm,
- .cra_exit = s5p_ace_cra_exit_tfm,
-#ifdef CONFIG_ACE_BC_ASYNC
- .cra_ablkcipher = {
-#else
- .cra_blkcipher = {
-#endif
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = s5p_ace_cbc_aes_set_key,
- .encrypt = s5p_ace_cbc_aes_encrypt,
- .decrypt = s5p_ace_cbc_aes_decrypt,
- }
- },
- {
- .cra_name = "ctr(aes)",
- .cra_driver_name = "ctr-aes-s5p-ace",
- .cra_priority = 300,
-#ifdef CONFIG_ACE_BC_ASYNC
- .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER
- | CRYPTO_ALG_ASYNC,
-#else
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
-#endif
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_ace_aes_ctx),
- .cra_alignmask = 0,
-#ifdef CONFIG_ACE_BC_ASYNC
- .cra_type = &crypto_ablkcipher_type,
-#else
- .cra_type = &crypto_blkcipher_type,
-#endif
- .cra_module = THIS_MODULE,
- .cra_init = s5p_ace_cra_init_tfm,
- .cra_exit = s5p_ace_cra_exit_tfm,
-#ifdef CONFIG_ACE_BC_ASYNC
- .cra_ablkcipher = {
-#else
- .cra_blkcipher = {
-#endif
- .min_keysize = AES_MIN_KEY_SIZE,
- .max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
- .setkey = s5p_ace_ctr_aes_set_key,
- .encrypt = s5p_ace_ctr_aes_encrypt,
- .decrypt = s5p_ace_ctr_aes_decrypt,
- }
- }
-};
-#endif
-
-#define TYPE_HASH_SHA1 0
-#define TYPE_HASH_SHA256 1
-
-#if defined(CONFIG_ACE_HASH_SHA1) || defined(CONFIG_ACE_HASH_SHA256)
-struct s5p_ace_hash_ctx {
- u32 type;
- u32 prelen_high;
- u32 prelen_low;
-
- u32 buflen;
- u8 buffer[SHA256_BLOCK_SIZE];
-
- u32 state[SHA256_DIGEST_SIZE / 4];
-
- u32 sw_init;
-
- struct shash_desc sw_desc;
- struct sha256_state dummy;
-};
-
-/*
- * out == NULL - This is not a final message block.
- * Intermediate value is stored at pCtx->digest.
- * out != NULL - This is a final message block.
- * Digest value will be stored at out.
- */
-static int s5p_ace_sha_engine(struct s5p_ace_hash_ctx *sctx,
- u8 *out, const u8* in, u32 len)
-{
- u32 reg;
- u32 *buffer;
- u32 block_size, digest_size;
- u8 *in_phys;
- int transformmode = 0;
-
- S5P_ACE_DEBUG("Out: 0x%08X, In: 0x%08X, Len: %d\n",
- (u32)out, (u32)in, len);
- S5P_ACE_DEBUG("PreLen_Hi: %u, PreLen_Lo: %u\n",
- sctx->prelen_high, sctx->prelen_low);
-
- block_size = (sctx->type == TYPE_HASH_SHA1) ?
- SHA1_BLOCK_SIZE : SHA256_BLOCK_SIZE;
- digest_size = (sctx->type == TYPE_HASH_SHA1) ?
- SHA1_DIGEST_SIZE : SHA256_DIGEST_SIZE;
-
- if (out == NULL) {
- if (len == 0) {
- return 0;
- } else if (len < digest_size) {
- printk(KERN_ERR "%s: Invalid input\n", __func__);
- return -EINVAL;
- }
- transformmode = 1;
- }
-
- if (len == 0) {
- S5P_ACE_DEBUG("%s: Workaround for empty input\n", __func__);
-
- memset(sctx->buffer, 0, block_size - 8);
- sctx->buffer[0] = 0x80;
- reg = cpu_to_be32(sctx->prelen_high);
- memcpy(sctx->buffer + block_size - 8, &reg, 4);
- reg = cpu_to_be32(sctx->prelen_low);
- memcpy(sctx->buffer + block_size - 4, &reg, 4);
-
- in = sctx->buffer;
- len = block_size;
- transformmode = 1;
- }
-
- if ((void *)in < high_memory) {
- in_phys = (u8 *)virt_to_phys((void*)in);
- } else {
- struct page *page;
- S5P_ACE_DEBUG("%s: high memory - 0x%08x\n", __func__, (u32)in);
- page = vmalloc_to_page(in);
- if (!page)
- printk(KERN_ERR "ERROR: %s: Null page\n", __func__);
- in_phys = (u8 *)page_to_phys(page);
- in_phys += ((u32)in & ~PAGE_MASK);
- }
-
- /* Flush HRDMA */
- s5p_ace_write_sfr(ACE_FC_HRDMAC, ACE_FC_HRDMACFLUSH_ON);
- reg = ACE_FC_HRDMACFLUSH_OFF;
- if (s5p_ace_dev.cputype == TYPE_S5PV210)
- reg |= ACE_FC_HRDMACSWAP_ON;
-
-#ifdef ACE_USE_ACP
- reg |= ACE_ARCACHE << ACE_FC_HRDMACARCACHE_OFS;
-#endif
- s5p_ace_write_sfr(ACE_FC_HRDMAC, reg);
-
- /* Set byte swap of data in */
- if (s5p_ace_dev.cputype == TYPE_EXYNOS)
- s5p_ace_write_sfr(ACE_HASH_BYTESWAP, ACE_HASH_SWAPDI_ON |
- ACE_HASH_SWAPDO_ON | ACE_HASH_SWAPIV_ON);
- else
- s5p_ace_write_sfr(ACE_HASH_BYTESWAP,
- ACE_HASH_SWAPDO_ON | ACE_HASH_SWAPIV_ON);
-
- /* Select Hash input mux as external source */
- reg = s5p_ace_read_sfr(ACE_FC_FIFOCTRL);
- reg = (reg & ~ACE_FC_SELHASH_MASK) | ACE_FC_SELHASH_EXOUT;
- s5p_ace_write_sfr(ACE_FC_FIFOCTRL, reg);
-
- /* Set Hash as SHA1 or SHA256 and start Hash engine */
- reg = (sctx->type == TYPE_HASH_SHA1) ?
- ACE_HASH_ENGSEL_SHA1HASH : ACE_HASH_ENGSEL_SHA256HASH;
- reg |= ACE_HASH_STARTBIT_ON;
- if ((sctx->prelen_low | sctx->prelen_high) != 0) {
- reg |= ACE_HASH_USERIV_EN;
- buffer = (u32 *)sctx->state;
- s5p_ace_write_sfr(ACE_HASH_IV1, buffer[0]);
- s5p_ace_write_sfr(ACE_HASH_IV2, buffer[1]);
- s5p_ace_write_sfr(ACE_HASH_IV3, buffer[2]);
- s5p_ace_write_sfr(ACE_HASH_IV4, buffer[3]);
- s5p_ace_write_sfr(ACE_HASH_IV5, buffer[4]);
-
- if (sctx->type == TYPE_HASH_SHA256) {
- s5p_ace_write_sfr(ACE_HASH_IV6, buffer[5]);
- s5p_ace_write_sfr(ACE_HASH_IV7, buffer[6]);
- s5p_ace_write_sfr(ACE_HASH_IV8, buffer[7]);
- }
- }
- s5p_ace_write_sfr(ACE_HASH_CONTROL, reg);
-
- /* Enable FIFO mode */
- s5p_ace_write_sfr(ACE_HASH_FIFO_MODE, ACE_HASH_FIFO_ON);
-
- /* Clean data cache */
-#ifndef ACE_USE_ACP
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35)
- dmac_clean_range((void *)in, (void *)in + len);
-#else
- dmac_map_area((void *)in, len, DMA_TO_DEVICE);
- outer_clean_range((unsigned long)in_phys, (unsigned long)in_phys + len);
-#endif
-#endif
-
- if (transformmode) {
- /* Set message length */
- s5p_ace_write_sfr(ACE_HASH_MSGSIZE_LOW, 0);
- s5p_ace_write_sfr(ACE_HASH_MSGSIZE_HIGH, 0x80000000);
-
- /* Set pre-message length */
- s5p_ace_write_sfr(ACE_HASH_PRELEN_LOW, 0);
- s5p_ace_write_sfr(ACE_HASH_PRELEN_HIGH, 0);
- } else {
- /* Set message length */
- s5p_ace_write_sfr(ACE_HASH_MSGSIZE_LOW, len);
- s5p_ace_write_sfr(ACE_HASH_MSGSIZE_HIGH, 0);
-
- /* Set pre-message length */
- s5p_ace_write_sfr(ACE_HASH_PRELEN_LOW, sctx->prelen_low);
- s5p_ace_write_sfr(ACE_HASH_PRELEN_HIGH, sctx->prelen_high);
- }
-
- /* Set HRDMA */
- s5p_ace_write_sfr(ACE_FC_HRDMAS, (u32)in_phys);
- s5p_ace_write_sfr(ACE_FC_HRDMAL, len);
-
- while (!(s5p_ace_read_sfr(ACE_FC_INTPEND) & ACE_FC_HRDMA))
- ; /* wait */
- s5p_ace_write_sfr(ACE_FC_INTPEND, ACE_FC_HRDMA);
-
- /*while ((s5p_ace_read_sfr(ACE_HASH_STATUS) & ACE_HASH_BUFRDY_MASK)
- == ACE_HASH_BUFRDY_OFF); */
-
- if (transformmode) {
- /* Set Pause bit */
- s5p_ace_write_sfr(ACE_HASH_CONTROL2, ACE_HASH_PAUSE_ON);
-
- while ((s5p_ace_read_sfr(ACE_HASH_STATUS)
- & ACE_HASH_PARTIALDONE_MASK)
- == ACE_HASH_PARTIALDONE_OFF)
- ; /* wait */
- s5p_ace_write_sfr(ACE_HASH_STATUS, ACE_HASH_PARTIALDONE_ON);
-
- if (out == NULL) {
- /* Update chaining variables */
- buffer = (u32 *)sctx->state;
-
- /* Update pre-message length */
- /* Note that the unit of pre-message length is a BIT! */
- sctx->prelen_low += (len << 3);
- if (sctx->prelen_low < len)
- sctx->prelen_high++;
- sctx->prelen_high += (len >> 29);
- } else {
- /* Read hash result */
- buffer = (u32 *)out;
- }
- } else {
- while ((s5p_ace_read_sfr(ACE_HASH_STATUS)
- & ACE_HASH_MSGDONE_MASK)
- == ACE_HASH_MSGDONE_OFF)
- ; /* wait */
- s5p_ace_write_sfr(ACE_HASH_STATUS, ACE_HASH_MSGDONE_ON);
-
- /* Read hash result */
- buffer = (u32 *)out;
- }
- buffer[0] = s5p_ace_read_sfr(ACE_HASH_RESULT1);
- buffer[1] = s5p_ace_read_sfr(ACE_HASH_RESULT2);
- buffer[2] = s5p_ace_read_sfr(ACE_HASH_RESULT3);
- buffer[3] = s5p_ace_read_sfr(ACE_HASH_RESULT4);
- buffer[4] = s5p_ace_read_sfr(ACE_HASH_RESULT5);
-
- if (sctx->type == TYPE_HASH_SHA256) {
- buffer[5] = s5p_ace_read_sfr(ACE_HASH_RESULT6);
- buffer[6] = s5p_ace_read_sfr(ACE_HASH_RESULT7);
- buffer[7] = s5p_ace_read_sfr(ACE_HASH_RESULT8);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_ACE_HASH_ASYNC
-static int s5p_ace_sha1_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct s5p_ace_hash_ctx *sctx = crypto_ahash_ctx(tfm);
-
- sctx->prelen_high = sctx->prelen_low = 0;
- sctx->buflen = 0;
-
- /* To Do */
-
- return 0;
-}
-
-static int s5p_ace_sha1_update(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct s5p_ace_hash_ctx *sctx = crypto_ahash_ctx(tfm);
-
- /* To Do */
-
- return 0;
-}
-
-static int s5p_ace_sha1_final(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct s5p_ace_hash_ctx *sctx = crypto_ahash_ctx(tfm);
-
- /* To Do */
-
- return 0;
-}
-
-static int s5p_ace_sha1_finup(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct s5p_ace_hash_ctx *sctx = crypto_ahash_ctx(tfm);
-
- /* To Do */
-
- return 0;
-}
-
-static int s5p_ace_sha1_digest(struct ahash_request *req)
-{
- s5p_ace_sha1_init(req);
- s5p_ace_sha1_update(req);
- s5p_ace_sha1_final(req);
-
- return 0;
-}
-#else
-static void sha1_export_ctx_to_sw(struct shash_desc *desc)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
- struct sha1_state *sw_ctx = shash_desc_ctx(&sctx->sw_desc);
- int i;
-
- if (sctx->prelen_low == 0 && sctx->prelen_high == 0)
- crypto_shash_alg(&sw_tfm[sctx->type])
- ->init(&sctx->sw_desc);
- else {
- for (i = 0; i < SHA1_DIGEST_SIZE/4; i++)
- sw_ctx->state[i] = be32_to_cpu(sctx->state[i]);
- }
-
- sw_ctx->count = (((u64)sctx->prelen_high << 29) |
- (sctx->prelen_low >> 3)) + sctx->buflen;
-
- if (sctx->buflen)
- memcpy(sw_ctx->buffer, sctx->buffer, sctx->buflen);
-}
-
-static void sha256_export_ctx_to_sw(struct shash_desc *desc)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
- struct sha256_state *sw_ctx = shash_desc_ctx(&sctx->sw_desc);
- int i;
-
- if (sctx->prelen_low == 0 && sctx->prelen_high == 0)
- crypto_shash_alg(&sw_tfm[sctx->type])
- ->init(&sctx->sw_desc);
- else {
- for (i = 0; i < SHA256_DIGEST_SIZE/4; i++)
- sw_ctx->state[i] = be32_to_cpu(sctx->state[i]);
- }
-
- sw_ctx->count = (((u64)sctx->prelen_high << 29) |
- (sctx->prelen_low >> 3)) + sctx->buflen;
-
- if (sctx->buflen)
- memcpy(sw_ctx->buf, sctx->buffer, sctx->buflen);
-}
-
-static void sha1_import_ctx_from_sw(struct shash_desc *desc)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
- struct sha1_state *sw_ctx = shash_desc_ctx(&sctx->sw_desc);
- int i;
-
- for (i = 0; i < SHA1_DIGEST_SIZE/4; i++)
- sctx->state[i] = cpu_to_be32(sw_ctx->state[i]);
-
- memcpy(sctx->buffer, sw_ctx->buffer, sw_ctx->count &
- (SHA1_BLOCK_SIZE - 1));
- sctx->buflen = sw_ctx->count & (SHA1_BLOCK_SIZE - 1);
-
- sctx->prelen_low = (sw_ctx->count - sctx->buflen) << 3;
- sctx->prelen_high = (sw_ctx->count - sctx->buflen) >> 29;
-}
-
-static void sha256_import_ctx_from_sw(struct shash_desc *desc)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
- struct sha256_state *sw_ctx = shash_desc_ctx(&sctx->sw_desc);
- int i;
-
- for (i = 0; i < SHA256_DIGEST_SIZE/4; i++)
- sctx->state[i] = cpu_to_be32(sw_ctx->state[i]);
-
- memcpy(sctx->buffer, sw_ctx->buf, sw_ctx->count &
- (SHA256_BLOCK_SIZE - 1));
- sctx->buflen = sw_ctx->count & (SHA256_BLOCK_SIZE - 1);
-
- sctx->prelen_low = (sw_ctx->count - sctx->buflen) << 3;
- sctx->prelen_high = (sw_ctx->count - sctx->buflen) >> 29;
-}
-
-static void hash_export_ctx_to_sw(struct shash_desc *desc)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
-
- if (!sctx->sw_init) {
- sctx->sw_init = 1;
- if (sctx->prelen_low == 0 && sctx->prelen_high == 0 &&
- sctx->buflen == 0) {
- crypto_shash_alg(&sw_tfm[sctx->type])
- ->init(&sctx->sw_desc);
- return;
- }
- }
-
- if (sctx->type == TYPE_HASH_SHA1)
- sha1_export_ctx_to_sw(desc);
- else
- sha256_export_ctx_to_sw(desc);
-}
-
-static void hash_import_ctx_from_sw(struct shash_desc *desc)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
-
- if (sctx->type == TYPE_HASH_SHA1)
- sha1_import_ctx_from_sw(desc);
- else
- sha256_import_ctx_from_sw(desc);
-
-}
-
-static int sha_sw_update(struct shash_desc *desc, const u8 *data, unsigned
- int len)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
-
- hash_export_ctx_to_sw(desc);
- crypto_shash_alg(&sw_tfm[sctx->type])->update(&sctx->sw_desc, data,
- len);
- hash_import_ctx_from_sw(desc);
-
- return 0;
-}
-
-static int sha_sw_final(struct shash_desc *desc, u8 *out)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
-
- hash_export_ctx_to_sw(desc);
- crypto_shash_alg(&sw_tfm[sctx->type])->final(&sctx->sw_desc, out);
- hash_import_ctx_from_sw(desc);
-
- return 0;
-}
-
-static int sha_sw_finup(struct shash_desc *desc, const u8 *data, unsigned int
- len, u8 *out)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
-
- hash_export_ctx_to_sw(desc);
- crypto_shash_alg(&sw_tfm[sctx->type])->update(&sctx->sw_desc, data,
- len);
- crypto_shash_alg(&sw_tfm[sctx->type])->final(&sctx->sw_desc, out);
- hash_import_ctx_from_sw(desc);
-
- return 0;
-}
-
-#if defined(CONFIG_ACE_HASH_SHA1)
-static int s5p_ace_sha1_init(struct shash_desc *desc)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
-
- sctx->prelen_high = sctx->prelen_low = 0;
- sctx->buflen = 0;
- sctx->type = TYPE_HASH_SHA1;
- sctx->sw_init = 0;
-
- return 0;
-}
-#endif
-
-#if defined(CONFIG_ACE_HASH_SHA256)
-static int s5p_ace_sha256_init(struct shash_desc *desc)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
-
- sctx->prelen_high = sctx->prelen_low = 0;
- sctx->buflen = 0;
- sctx->type = TYPE_HASH_SHA256;
- sctx->sw_init = 0;
-
- return 0;
-}
-#endif
-
-static int s5p_ace_sha_update(struct shash_desc *desc,
- const u8 *data, unsigned int len)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
- const u8 *src;
- int ret = 0;
- u32 partlen, tmplen, block_size;
-
- S5P_ACE_DEBUG("%s (buflen: 0x%x, len: 0x%x)\n",
- __func__, sctx->buflen, len);
-
- s5p_ace_resume_device(&s5p_ace_dev);
- local_bh_disable();
- while (test_and_set_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags))
- udelay(1);
-
- if (s5p_ace_dev.flags & BIT_MASK(FLAGS_USE_SW)) {
- clear_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags);
- local_bh_enable();
- return sha_sw_update(desc, data, len);
- }
-
- partlen = sctx->buflen;
- src = data;
-
- block_size = (sctx->type == TYPE_HASH_SHA1) ?
- SHA1_BLOCK_SIZE : SHA256_BLOCK_SIZE;
- s5p_ace_clock_gating(ACE_CLOCK_ON);
-
- if (partlen != 0) {
- if (partlen + len < block_size) {
- memcpy(sctx->buffer + partlen, src, len);
- sctx->buflen += len;
- goto out;
- } else {
- tmplen = block_size - partlen;
- memcpy(sctx->buffer + partlen, src, tmplen);
-
- ret = s5p_ace_sha_engine(sctx, NULL, sctx->buffer,
- block_size);
- if (ret)
- goto out;
-
- len -= tmplen;
- src += tmplen;
- }
- }
-
- partlen = len & (block_size - 1);
- len -= partlen;
- if (len > 0) {
- ret = s5p_ace_sha_engine(sctx, NULL, src, len);
- if (ret)
- goto out;
- }
-
- memcpy(sctx->buffer, src + len, partlen);
- sctx->buflen = partlen;
-
-out:
- s5p_ace_clock_gating(ACE_CLOCK_OFF);
- clear_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags);
- local_bh_enable();
-
- return ret;
-}
-
-static int s5p_ace_sha_final(struct shash_desc *desc, u8 *out)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
-
- S5P_ACE_DEBUG("%s (buflen: 0x%x)\n", __func__, sctx->buflen);
-
- s5p_ace_resume_device(&s5p_ace_dev);
- local_bh_disable();
- while (test_and_set_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags))
- udelay(1);
-
- if (s5p_ace_dev.flags & BIT_MASK(FLAGS_USE_SW)) {
- clear_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags);
- local_bh_enable();
- return sha_sw_final(desc, out);
- }
-
- s5p_ace_clock_gating(ACE_CLOCK_ON);
- s5p_ace_sha_engine(sctx, out, sctx->buffer, sctx->buflen);
- s5p_ace_clock_gating(ACE_CLOCK_OFF);
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
- clear_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags);
- local_bh_enable();
-
- return 0;
-}
-
-static int s5p_ace_sha_finup(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
- const u8 *src;
- int ret = 0;
- u32 block_size;
-
- S5P_ACE_DEBUG("%s (buflen: 0x%x, len: 0x%x)\n",
- __func__, sctx->buflen, len);
-
- s5p_ace_resume_device(&s5p_ace_dev);
- local_bh_disable();
- while (test_and_set_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags))
- udelay(1);
-
- if (s5p_ace_dev.flags & BIT_MASK(FLAGS_USE_SW)) {
- clear_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags);
- local_bh_enable();
- return sha_sw_finup(desc, data, len, out);
- }
-
- src = data;
- block_size = (sctx->type == TYPE_HASH_SHA1) ?
- SHA1_BLOCK_SIZE : SHA256_BLOCK_SIZE;
-
- s5p_ace_clock_gating(ACE_CLOCK_ON);
-
- if (sctx->buflen != 0) {
- if (sctx->buflen + len <= block_size) {
- memcpy(sctx->buffer + sctx->buflen, src, len);
-
- len += sctx->buflen;
- src = sctx->buffer;
- } else {
- u32 copylen = block_size - sctx->buflen;
- memcpy(sctx->buffer + sctx->buflen, src, copylen);
-
- ret = s5p_ace_sha_engine(sctx, NULL, sctx->buffer,
- block_size);
- if (ret)
- goto out;
-
- len -= copylen;
- src += copylen;
- }
- }
-
- ret = s5p_ace_sha_engine(sctx, out, src, len);
-
-out:
- s5p_ace_clock_gating(ACE_CLOCK_OFF);
-
- /* Wipe context */
- memset(sctx, 0, sizeof(*sctx));
- clear_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags);
- local_bh_enable();
-
- return ret;
-}
-
-#if defined(CONFIG_ACE_HASH_SHA1)
-static int s5p_ace_sha1_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- int ret;
-
- ret = s5p_ace_sha1_init(desc);
- if (ret)
- return ret;
-
- return s5p_ace_sha_finup(desc, data, len, out);
-}
-#endif
-
-#if defined(CONFIG_ACE_HASH_SHA256)
-static int s5p_ace_sha256_digest(struct shash_desc *desc, const u8 *data,
- unsigned int len, u8 *out)
-{
- int ret;
-
- ret = s5p_ace_sha256_init(desc);
- if (ret)
- return ret;
-
- return s5p_ace_sha_finup(desc, data, len, out);
-}
-#endif
-
-static int s5p_ace_hash_export(struct shash_desc *desc, void *out)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
- memcpy(out, sctx, sizeof(*sctx));
- return 0;
-}
-
-static int s5p_ace_hash_import(struct shash_desc *desc, const void *in)
-{
- struct s5p_ace_hash_ctx *sctx = shash_desc_ctx(desc);
- memcpy(sctx, in, sizeof(*sctx));
- return 0;
-}
-#endif
-
-static int s5p_ace_hash_cra_init(struct crypto_tfm *tfm)
-{
-#ifdef CONFIG_ACE_HASH_ASYNC
-#endif
-
- S5P_ACE_DEBUG("%s\n", __func__);
-
- return 0;
-}
-
-static void s5p_ace_hash_cra_exit(struct crypto_tfm *tfm)
-{
-#ifdef CONFIG_ACE_HASH_ASYNC
-#endif
-
- S5P_ACE_DEBUG("%s\n", __func__);
-}
-
-#ifdef CONFIG_ACE_HASH_ASYNC
-static struct ahash_alg algs_hash[] = {
-#if defined(CONFIG_ACE_HASH_SHA1)
- {
- .init = s5p_ace_sha1_init,
- .update = s5p_ace_sha_update,
- .final = s5p_ace_sha_final,
- .finup = s5p_ace_sha_finup,
- .digest = s5p_ace_sha1_digest,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.base = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1-s5p-ace",
- .cra_priority = 200,
- .cra_flags = CRYPTO_ALG_TYPE_AHASH
- | CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s5p_ace_hash_ctx),
- .cra_alignmask = 0,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_ace_hash_cra_init,
- .cra_exit = s5p_ace_hash_cra_exit,
- }
- }
-#endif
-};
-#else
-static struct shash_alg algs_hash[] = {
-#if defined(CONFIG_ACE_HASH_SHA1)
- {
- .digestsize = SHA1_DIGEST_SIZE,
- .init = s5p_ace_sha1_init,
- .update = s5p_ace_sha_update,
- .final = s5p_ace_sha_final,
- .finup = s5p_ace_sha_finup,
- .digest = s5p_ace_sha1_digest,
- .export = s5p_ace_hash_export,
- .import = s5p_ace_hash_import,
- .descsize = sizeof(struct s5p_ace_hash_ctx),
- .statesize = sizeof(struct s5p_ace_hash_ctx),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1-s5p-ace",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_ace_hash_cra_init,
- .cra_exit = s5p_ace_hash_cra_exit,
- }
- },
-#endif
-#if defined(CONFIG_ACE_HASH_SHA256)
- {
- .digestsize = SHA256_DIGEST_SIZE,
- .init = s5p_ace_sha256_init,
- .update = s5p_ace_sha_update,
- .final = s5p_ace_sha_final,
- .finup = s5p_ace_sha_finup,
- .digest = s5p_ace_sha256_digest,
- .export = s5p_ace_hash_export,
- .import = s5p_ace_hash_import,
- .descsize = sizeof(struct s5p_ace_hash_ctx),
- .statesize = sizeof(struct s5p_ace_hash_ctx),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-s5p-ace",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_SHASH,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_init = s5p_ace_hash_cra_init,
- .cra_exit = s5p_ace_hash_cra_exit,
- }
- }
-#endif
-};
-#endif /* CONFIG_ACE_HASH_ASYNC */
-#endif /* CONFIG_ACE_HASH_SHA1 or CONFIG_ACE_HASH_SHA256 */
-
-#if defined(CONFIG_ACE_BC_IRQMODE) || defined(CONFIG_ACE_HASH_IRQMODE)
-static irqreturn_t s5p_ace_interrupt(int irq, void *data)
-{
- struct s5p_ace_device *dev = data;
-
- s5p_ace_write_sfr(ACE_FC_INTPEND,
- ACE_FC_BRDMA | ACE_FC_BTDMA | ACE_FC_HRDMA);
-
-#ifdef CONFIG_ACE_BC_IRQMODE
- s5p_ace_write_sfr(ACE_FC_INTENCLR, ACE_FC_BRDMA | ACE_FC_BTDMA);
-
- tasklet_schedule(&dev->task_bc);
-#endif
-
-#ifdef CONFIG_ACE_HASH_IRQMODE
- s5p_ace_write_sfr(ACE_FC_INTENCLR, ACE_FC_HRDMA);
-#endif
-
- return IRQ_HANDLED;
-}
-#endif
-
-int ace_s5p_get_sync_lock(void)
-{
- unsigned long timeout;
- int get_lock_bc = 0, get_lock_hash = 0;
- unsigned long flags;
-
- timeout = jiffies + msecs_to_jiffies(10);
- while (time_before(jiffies, timeout)) {
- if (!test_and_set_bit(FLAGS_BC_BUSY, &s5p_ace_dev.flags)) {
- get_lock_bc = 1;
- break;
- }
- udelay(1);
- }
-
- timeout = jiffies + msecs_to_jiffies(10);
- while (time_before(jiffies, timeout)) {
- if (!test_and_set_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags)) {
- get_lock_hash = 1;
- break;
- }
- udelay(1);
- }
-
- /* set lock flag */
- if (get_lock_bc && get_lock_hash) {
- spin_lock_irqsave(&s5p_ace_dev.lock, flags);
- count_use_sw++;
- spin_unlock_irqrestore(&s5p_ace_dev.lock, flags);
- set_bit(FLAGS_USE_SW, &s5p_ace_dev.flags);
- }
-
- if (get_lock_bc) {
-#ifdef CONFIG_ACE_BC_ASYNC
- if (s5p_ace_dev.queue_bc.qlen > 0) {
- s5p_ace_clock_gating(ACE_CLOCK_ON);
- s5p_ace_dev.rc_depth_bc = 0;
- s5p_ace_aes_handle_req(&s5p_ace_dev);
- } else {
- clear_bit(FLAGS_BC_BUSY, &s5p_ace_dev.flags);
- }
-#else
- clear_bit(FLAGS_BC_BUSY, &s5p_ace_dev.flags);
-#endif
- }
-
- if (get_lock_hash)
- clear_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags);
-
- if (!(get_lock_bc && get_lock_hash))
- return -EBUSY;
-
- s5p_ace_clock_gating(ACE_CLOCK_ON);
-
- return 0;
-}
-
-int ace_s5p_release_sync_lock(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&s5p_ace_dev.lock, flags);
- count_use_sw--;
- spin_unlock_irqrestore(&s5p_ace_dev.lock, flags);
-
- /* clear lock flag */
- if (!count_use_sw)
- clear_bit(FLAGS_USE_SW, &s5p_ace_dev.flags);
-
- s5p_ace_clock_gating(ACE_CLOCK_OFF);
-
- return 0;
-}
-
-static int __devinit s5p_ace_probe(struct platform_device *pdev)
-{
- struct resource *res;
- struct s5p_ace_device *s5p_adt = &s5p_ace_dev;
- int i, j, k, m;
- int ret;
-
-#if defined(ACE_DEBUG_HEARTBEAT) || defined(ACE_DEBUG_WATCHDOG)
- do_gettimeofday(&timestamp_base);
- for (i = 0; i < 5; i++)
- do_gettimeofday(&timestamp[i]);
-#endif
-
- memset(s5p_adt, 0, sizeof(*s5p_adt));
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get platform resource\n");
- return -ENOENT;
- }
-
- s5p_adt->ace_base = ioremap(res->start, resource_size(res));
- if (s5p_adt->ace_base == NULL) {
- dev_err(&pdev->dev, "failed to remap register block\n");
- ret = -ENOMEM;
- goto err_mem1;
- }
-
- s5p_adt->clock = clk_get(&pdev->dev, "secss");
- if (IS_ERR(s5p_adt->clock)) {
- dev_err(&pdev->dev, "failed to find clock source\n");
- ret = -EBUSY;
- goto err_clk;
- }
- s5p_ace_init_clock_gating();
- s5p_adt->cputype = platform_get_device_id(pdev)->driver_data;
-
-#if defined(CONFIG_ACE_BC_IRQMODE) || defined(CONFIG_ACE_HASH_IRQMODE)
- s5p_adt->irq = platform_get_irq(pdev, 0);
- if (s5p_adt->irq < 0) {
- dev_err(&pdev->dev, "Failed to get irq#\n");
- s5p_adt->irq = 0;
- ret = -ENODEV;
- goto err_irq;
- }
- ret = request_irq(s5p_adt->irq, s5p_ace_interrupt, 0,
- S5P_ACE_DRIVER_NAME, (void *)s5p_adt);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request IRQ%d: err: %d.\n",
- s5p_adt->irq, ret);
- s5p_adt->irq = 0;
- ret = -ENODEV;
- goto err_irq;
- }
-#endif
-
-#ifdef ACE_USE_ACP
- s5p_adt->sss_usercon = ioremap(PA_SSS_USER_CON & PAGE_MASK, SZ_4K);
- if (s5p_adt->sss_usercon == NULL) {
- dev_err(&pdev->dev, "failed to remap register SSS_USER_CON\n");
- ret = -EBUSY;
- goto err_mem2;
- }
-
- /* Set ARUSER[12:8] and AWUSER[4:0] */
- writel(0x101, s5p_adt->sss_usercon
- + (PA_SSS_USER_CON & (PAGE_SIZE - 1)));
-#endif
-
- spin_lock_init(&s5p_adt->lock);
- s5p_adt->flags = 0;
- hrtimer_init(&s5p_adt->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- s5p_adt->timer.function = s5p_ace_timer_func;
- INIT_WORK(&s5p_adt->work, s5p_ace_deferred_clock_disable);
-#ifdef ACE_DEBUG_HEARTBEAT
- hrtimer_init(&s5p_adt->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- s5p_adt->heartbeat.function = s5p_ace_heartbeat_func;
- hrtimer_start(&s5p_ace_dev.heartbeat,
- ns_to_ktime((u64)ACE_HEARTBEAT_MS * NSEC_PER_MSEC),
- HRTIMER_MODE_REL);
-#endif
-#ifdef ACE_DEBUG_WATCHDOG
- hrtimer_init(&s5p_adt->watchdog_bc, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- s5p_adt->watchdog_bc.function = s5p_ace_watchdog_bc_func;
-#endif
-
-#ifdef CONFIG_ACE_BC_ASYNC
- crypto_init_queue(&s5p_adt->queue_bc, 1);
- tasklet_init(&s5p_adt->task_bc, s5p_ace_bc_task,
- (unsigned long)s5p_adt);
-#endif
-
-#ifdef CONFIG_ACE_HASH_ASYNC
- crypto_init_queue(&s5p_adt->queue_hash, 1);
- tasklet_init(&s5p_adt->task_hash, s5p_ace_hash_task,
- (unsigned long)s5p_adt);
-#endif
-
-#if defined(CONFIG_ACE_BC)
- for (i = 0; i < ARRAY_SIZE(algs_bc); i++) {
- INIT_LIST_HEAD(&algs_bc[i].cra_list);
- algs_bc[i].cra_flags |= CRYPTO_ALG_NEED_FALLBACK;
- ret = crypto_register_alg(&algs_bc[i]);
- if (ret)
- goto err_reg_bc;
- printk(KERN_INFO "ACE: %s\n", algs_bc[i].cra_driver_name);
- }
-#endif
-
-#if defined(CONFIG_ACE_HASH_SHA1) || defined(CONFIG_ACE_HASH_SHA256)
- fallback_hash = (struct crypto_hash **)
- kmalloc(sizeof(struct crypto_hash *) *
- ARRAY_SIZE(algs_hash), GFP_KERNEL);
- sw_tfm = (struct crypto_shash *) kmalloc(sizeof(struct crypto_shash)
- * ARRAY_SIZE(algs_hash),
- GFP_KERNEL);
-
- for (m = 0; m < ARRAY_SIZE(algs_hash); m++) {
- fallback_hash[m] =
- crypto_alloc_hash(algs_hash[m].base.cra_name, 0,
- CRYPTO_ALG_ASYNC);
-
- if (IS_ERR(fallback_hash[m])) {
- printk(KERN_ERR "failed to load transform for %s: %ld\n",
- algs_hash[m].base.cra_name,
- PTR_ERR(fallback_hash[m]));
- goto err_fallback_hash;
- }
-
- sw_tfm[m].base.__crt_alg = fallback_hash[m]->base.__crt_alg;
- }
-
- for (j = 0; j < ARRAY_SIZE(algs_hash); j++) {
-#ifdef CONFIG_ACE_HASH_ASYNC
- ret = crypto_register_ahash(&algs_hash[j]);
-#else
- ret = crypto_register_shash(&algs_hash[j]);
-#endif
- if (ret)
- goto err_reg_hash;
-#ifdef CONFIG_ACE_HASH_ASYNC
- printk(KERN_INFO "ACE: %s\n",
- algs_hash[j].halg.base.cra_driver_name);
-#else
- printk(KERN_INFO "ACE: %s\n",
- algs_hash[j].base.cra_driver_name);
-#endif
- }
-#endif
-
- secmem_ftn.lock = &ace_s5p_get_sync_lock;
- secmem_ftn.release = &ace_s5p_release_sync_lock;
- secmem_crypto_register(&secmem_ftn);
-
- count_use_sw = 0;
-
- printk(KERN_NOTICE "ACE driver is initialized\n");
-
- return 0;
-
-#if defined(CONFIG_ACE_HASH_SHA1) || defined(CONFIG_ACE_HASH_SHA256)
-err_reg_hash:
- for (k = 0; k < j; k++)
-#ifdef CONFIG_ACE_HASH_ASYNC
- crypto_unregister_ahash(&algs_hash[k]);
-#else
- crypto_unregister_shash(&algs_hash[k]);
-#endif
-err_fallback_hash:
- kfree(sw_tfm);
- for (k = 0; k < m; k++)
- crypto_free_hash(fallback_hash[k]);
- kfree(fallback_hash);
-#endif
-#if defined(CONFIG_ACE_BC)
-err_reg_bc:
- for (k = 0; k < i; k++)
- crypto_unregister_alg(&algs_bc[k]);
-#ifdef CONFIG_ACE_BC_ASYNC
- tasklet_kill(&s5p_adt->task_bc);
-#endif
-#endif
-#ifdef CONFIG_ACE_HASH_ASYNC
- tasklet_kill(&s5p_adt->task_hash);
-#endif
-#ifdef ACE_USE_ACP
- iounmap(s5p_adt->sss_usercon);
-err_mem2:
-#endif
-#if defined(CONFIG_ACE_BC_IRQMODE) || defined(CONFIG_ACE_HASH_IRQMODE)
-err_irq:
- free_irq(s5p_adt->irq, (void *)s5p_adt);
- s5p_adt->irq = 0;
-#endif
-err_clk:
- iounmap(s5p_adt->ace_base);
- s5p_adt->ace_base = NULL;
-err_mem1:
-
- printk(KERN_ERR "ACE driver initialization failed.\n");
-
- return ret;
-}
-
-static int s5p_ace_remove(struct platform_device *dev)
-{
- struct s5p_ace_device *s5p_adt = &s5p_ace_dev;
- int i;
-
-#ifdef ACE_DEBUG_HEARTBEAT
- hrtimer_cancel(&s5p_adt->heartbeat);
-#endif
-
-#if defined(CONFIG_ACE_BC_IRQMODE) || defined(CONFIG_ACE_HASH_IRQMODE)
- if (s5p_adt->irq) {
- free_irq(s5p_adt->irq, (void *)s5p_adt);
- s5p_adt->irq = 0;
- }
-#endif
-
- if (s5p_adt->clock) {
- clk_put(s5p_adt->clock);
- s5p_adt->clock = NULL;
- }
-
- if (s5p_adt->ace_base) {
- iounmap(s5p_adt->ace_base);
- s5p_adt->ace_base = NULL;
- }
-
-#ifdef ACE_USE_ACP
- if (s5p_adt->sss_usercon) {
- iounmap(s5p_adt->sss_usercon);
- s5p_adt->sss_usercon = NULL;
- }
-#endif
-
- secmem_crypto_deregister();
-
-#if defined(CONFIG_ACE_HASH_SHA1) || defined(CONFIG_ACE_HASH_SHA256)
- kfree(sw_tfm);
- for (i = 0; i < ARRAY_SIZE(algs_hash); i++)
- crypto_free_hash(fallback_hash[i]);
-
- kfree(fallback_hash);
-
- for (i = 0; i < ARRAY_SIZE(algs_hash); i++)
-#ifdef CONFIG_ACE_HASH_ASYNC
- crypto_unregister_ahash(&algs_hash[i]);
-#else
- crypto_unregister_shash(&algs_hash[i]);
-#endif
-#endif
-
-#if defined(CONFIG_ACE_BC)
- for (i = 0; i < ARRAY_SIZE(algs_bc); i++)
- crypto_unregister_alg(&algs_bc[i]);
-
-#ifdef CONFIG_ACE_BC_ASYNC
- tasklet_kill(&s5p_adt->task_bc);
-#endif
-#endif
-#ifdef CONFIG_ACE_HASH_ASYNC
- tasklet_kill(&s5p_adt->task_hash);
-#endif
-
- flush_work(&s5p_ace_dev.work);
-
- printk(KERN_INFO "ACE driver is removed\n");
-
- return 0;
-}
-
-static int s5p_ace_suspend(struct platform_device *dev, pm_message_t state)
-{
- unsigned long timeout;
- int get_lock_bc = 0, get_lock_hash = 0;
-
-#if defined(ACE_DEBUG_HEARTBEAT) || defined(ACE_DEBUG_WATCHDOG)
- do_gettimeofday(&timestamp[3]); /* 3: suspend */
-#endif
-
- timeout = jiffies + msecs_to_jiffies(10);
- while (time_before(jiffies, timeout)) {
- if (!test_and_set_bit(FLAGS_BC_BUSY, &s5p_ace_dev.flags)) {
- get_lock_bc = 1;
- break;
- }
- udelay(1);
- }
- timeout = jiffies + msecs_to_jiffies(10);
- while (time_before(jiffies, timeout)) {
- if (!test_and_set_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags)) {
- get_lock_hash = 1;
- break;
- }
- udelay(1);
- }
-
- if (get_lock_bc && get_lock_hash) {
- set_bit(FLAGS_SUSPENDED, &s5p_ace_dev.flags);
- return 0;
- }
-
- printk(KERN_ERR "ACE: suspend: time out.\n");
-
- if (get_lock_bc)
- clear_bit(FLAGS_BC_BUSY, &s5p_ace_dev.flags);
- if (get_lock_hash)
- clear_bit(FLAGS_HASH_BUSY, &s5p_ace_dev.flags);
-
- return -EBUSY;
-}
-
-static int s5p_ace_resume(struct platform_device *dev)
-{
-#if defined(ACE_DEBUG_HEARTBEAT) || defined(ACE_DEBUG_WATCHDOG)
- do_gettimeofday(&timestamp[4]); /* 4: resume */
-#endif
-
- s5p_ace_resume_device(&s5p_ace_dev);
-
- return 0;
-}
-
-static struct platform_device_id s5p_ace_driver_ids[] = {
- {
- .name = "s5pv210-ace",
- .driver_data = TYPE_S5PV210,
- }, {
- .name = "exynos-ace",
- .driver_data = TYPE_EXYNOS,
- },
- {}
-};
-MODULE_DEVICE_TABLE(platform, s5p_ace_driver_ids);
-
-static struct platform_driver s5p_ace_driver = {
- .probe = s5p_ace_probe,
- .remove = s5p_ace_remove,
- .suspend = s5p_ace_suspend,
- .resume = s5p_ace_resume,
- .id_table = s5p_ace_driver_ids,
- .driver = {
- .name = S5P_ACE_DRIVER_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s5p_ace_init(void)
-{
- printk(KERN_INFO "S5P ACE Driver, (c) 2010 Samsung Electronics\n");
-
- return platform_driver_register(&s5p_ace_driver);
-}
-
-static void __exit s5p_ace_exit(void)
-{
- platform_driver_unregister(&s5p_ace_driver);
-}
-
-module_init(s5p_ace_init);
-module_exit(s5p_ace_exit);
-
-MODULE_DESCRIPTION("S5P ACE(Advanced Crypto Engine) support");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Dong Jin PARK");
-
diff --git a/drivers/crypto/ace.h b/drivers/crypto/ace.h
deleted file mode 100644
index 8d75d14..0000000
--- a/drivers/crypto/ace.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Support for ACE (Advanced Crypto Engine) for S5PV210/EXYNOS4210.
- *
- * Copyright (c) 2011 Samsung Electronics
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef _CRYPTO_S5P_ACE_H
-#define _CRYPTO_S5P_ACE_H
-
-
-/*****************************************************************
- Definition - Mechanism
-*****************************************************************/
-#define BC_MODE_ENC 0
-#define BC_MODE_DEC 1
-
-/*
- * Mechanism ID definition
- * : Mech. Type (8-bit) : Algorithm (8-bit) : Info (8-bit)
- * : Reserved (8-bit)
- */
-#define _MECH_ID_(_TYPE_, _NAME_, _MODE_) \
- ((((_TYPE_) & 0xFF) << 24) \
- | (((_NAME_) & 0xFF) << 16) \
- | (((_MODE_) & 0xFF) << 8) \
- | (((0) & 0xFF) << 0))
-
-#define MI_MASK _MECH_ID_(0xFF, 0xFF, 0xFF)
-#define MI_GET_TYPE(_t_) (((_t_) >> 24) & 0xFF)
-#define MI_GET_NAME(_n_) (((_n_) >> 16) & 0xFF)
-#define MI_GET_INFO(_i_) (((_i_) >> 8) & 0xFF)
-
-/* type (8-bits) */
-#define _TYPE_BC_ 0x01
-#define _TYPE_HASH_ 0x02
-#define _TYPE_MAC_ 0x03
-
-/* block cipher: algorithm (8-bits) */
-#define _NAME_DES_ 0x01
-#define _NAME_TDES_ 0x02
-#define _NAME_AES_ 0x03
-
-/* block cipher: mode of operation */
-#define _MODE_ECB_ 0x10
-#define _MODE_CBC_ 0x20
-#define _MODE_CTR_ 0x30
-
-/* block cipher: padding method */
-#define _PAD_NO_ 0x00
-/*#define _PAD_ZERO_ 0x01 */ /* Not supported */
-#define _PAD_PKCS7_ 0x02 /* Default padding method */
-/*#define _PAD_ANSIX923_ 0x03 */ /* Not supported */
-/*#define _PAD_ISO10126_ 0x04 */ /* Not supported */
-
-#define MI_GET_MODE(_m_) (((_m_) >> 8) & 0xF0)
-#define MI_GET_PADDING(_i_) (((_i_) >> 8) & 0x0F)
-
-#define MI_AES_ECB _MECH_ID_(_TYPE_BC_, _NAME_AES_, \
- _MODE_ECB_ | _PAD_NO_)
-#define MI_AES_ECB_PAD _MECH_ID_(_TYPE_BC_, _NAME_AES_, \
- _MODE_ECB_ | _PAD_PKCS7_)
-#define MI_AES_CBC _MECH_ID_(_TYPE_BC_, _NAME_AES_, \
- _MODE_CBC_ | _PAD_NO_)
-#define MI_AES_CBC_PAD _MECH_ID_(_TYPE_BC_, _NAME_AES_, \
- _MODE_CBC_ | _PAD_PKCS7_)
-#define MI_AES_CTR _MECH_ID_(_TYPE_BC_, _NAME_AES_, \
- _MODE_CTR_ | _PAD_NO_)
-#define MI_AES_CTR_PAD _MECH_ID_(_TYPE_BC_, _NAME_AES_, \
- _MODE_CTR_ | _PAD_PKCS7_)
-
-/* hash: algorithm (8-bits) */
-#define _NAME_HASH_SHA1_ 0x01
-#define _NAME_HASH_MD5_ 0x02
-
-#define MI_SHA1 _MECH_ID_(_TYPE_HASH_, _NAME_HASH_SHA1_, 0)
-#define MI_MD5 _MECH_ID_(_TYPE_HASH_, _NAME_HASH_MD5_, 0)
-
-/* hash: algorithm (8-bits) */
-#define _NAME_HMAC_SHA1_ 0x01
-
-#define MI_HMAC_SHA1 _MECH_ID_(_TYPE_MAC_, _NAME_HMAC_SHA1_, 0)
-
-/* Flag bits */
-#define FLAG_ENC_BIT (1 << 0)
-
-#endif /* _CRYPTO_S5P_ACE_H */
diff --git a/drivers/crypto/ace_sfr.h b/drivers/crypto/ace_sfr.h
deleted file mode 100644
index 367bc14..0000000
--- a/drivers/crypto/ace_sfr.h
+++ /dev/null
@@ -1,497 +0,0 @@
-/*
- * Header file for Advanced Crypto Engine - SFR definitions
- *
- * Copyright (c) 2011 Samsung Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- */
-
-#ifndef __ACE_SFR_H__
-#define __ACE_SFR_H__
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/*****************************************************************
- SFR Addresses
-*****************************************************************/
-#if defined(CONFIG_ARCH_S5PV210)
-#define ACE_SFR_BASE (0xEA000000)
-#elif defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
-#define ACE_SFR_BASE (0x10830000)
-#else
-#error No ARCH is defined.
-#endif
-
-#if defined(CONFIG_ARCH_S5PV210)
-#define ACE_FC_OFFSET (0x0)
-#define ACE_AES_OFFSET (0x4000)
-#define ACE_TDES_OFFSET (0x5000)
-#define ACE_HASH_OFFSET (0x6000)
-#define ACE_PKA_OFFSET (0x7000)
-#elif defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
-#define ACE_FC_OFFSET (0x0)
-#define ACE_AES_OFFSET (0x200)
-#define ACE_TDES_OFFSET (0x300)
-#define ACE_HASH_OFFSET (0x400)
-#define ACE_PKA_OFFSET (0x700)
-#endif
-
-/* Feed control registers */
-#define ACE_FC_INTSTAT (ACE_FC_OFFSET + 0x00)
-#define ACE_FC_INTENSET (ACE_FC_OFFSET + 0x04)
-#define ACE_FC_INTENCLR (ACE_FC_OFFSET + 0x08)
-#define ACE_FC_INTPEND (ACE_FC_OFFSET + 0x0C)
-#define ACE_FC_FIFOSTAT (ACE_FC_OFFSET + 0x10)
-#define ACE_FC_FIFOCTRL (ACE_FC_OFFSET + 0x14)
-#define ACE_FC_GLOBAL (ACE_FC_OFFSET + 0x18)
-#define ACE_FC_BRDMAS (ACE_FC_OFFSET + 0x20)
-#define ACE_FC_BRDMAL (ACE_FC_OFFSET + 0x24)
-#define ACE_FC_BRDMAC (ACE_FC_OFFSET + 0x28)
-#define ACE_FC_BTDMAS (ACE_FC_OFFSET + 0x30)
-#define ACE_FC_BTDMAL (ACE_FC_OFFSET + 0x34)
-#define ACE_FC_BTDMAC (ACE_FC_OFFSET + 0x38)
-#define ACE_FC_HRDMAS (ACE_FC_OFFSET + 0x40)
-#define ACE_FC_HRDMAL (ACE_FC_OFFSET + 0x44)
-#define ACE_FC_HRDMAC (ACE_FC_OFFSET + 0x48)
-#define ACE_FC_PKDMAS (ACE_FC_OFFSET + 0x50)
-#define ACE_FC_PKDMAL (ACE_FC_OFFSET + 0x54)
-#define ACE_FC_PKDMAC (ACE_FC_OFFSET + 0x58)
-#define ACE_FC_PKDMAO (ACE_FC_OFFSET + 0x5C)
-
-/* AES control registers */
-#define ACE_AES_CONTROL (ACE_AES_OFFSET + 0x00)
-#define ACE_AES_STATUS (ACE_AES_OFFSET + 0x04)
-
-#define ACE_AES_IN1 (ACE_AES_OFFSET + 0x10)
-#define ACE_AES_IN2 (ACE_AES_OFFSET + 0x14)
-#define ACE_AES_IN3 (ACE_AES_OFFSET + 0x18)
-#define ACE_AES_IN4 (ACE_AES_OFFSET + 0x1C)
-
-#define ACE_AES_OUT1 (ACE_AES_OFFSET + 0x20)
-#define ACE_AES_OUT2 (ACE_AES_OFFSET + 0x24)
-#define ACE_AES_OUT3 (ACE_AES_OFFSET + 0x28)
-#define ACE_AES_OUT4 (ACE_AES_OFFSET + 0x2C)
-
-#define ACE_AES_IV1 (ACE_AES_OFFSET + 0x30)
-#define ACE_AES_IV2 (ACE_AES_OFFSET + 0x34)
-#define ACE_AES_IV3 (ACE_AES_OFFSET + 0x38)
-#define ACE_AES_IV4 (ACE_AES_OFFSET + 0x3C)
-
-#define ACE_AES_CNT1 (ACE_AES_OFFSET + 0x40)
-#define ACE_AES_CNT2 (ACE_AES_OFFSET + 0x44)
-#define ACE_AES_CNT3 (ACE_AES_OFFSET + 0x48)
-#define ACE_AES_CNT4 (ACE_AES_OFFSET + 0x4C)
-
-#define ACE_AES_KEY1 (ACE_AES_OFFSET + 0x80)
-#define ACE_AES_KEY2 (ACE_AES_OFFSET + 0x84)
-#define ACE_AES_KEY3 (ACE_AES_OFFSET + 0x88)
-#define ACE_AES_KEY4 (ACE_AES_OFFSET + 0x8C)
-#define ACE_AES_KEY5 (ACE_AES_OFFSET + 0x90)
-#define ACE_AES_KEY6 (ACE_AES_OFFSET + 0x94)
-#define ACE_AES_KEY7 (ACE_AES_OFFSET + 0x98)
-#define ACE_AES_KEY8 (ACE_AES_OFFSET + 0x9C)
-
-/* TDES control registers */
-#define ACE_TDES_CONTROL (ACE_TDES_OFFSET + 0x00)
-#define ACE_TDES_STATUS (ACE_TDES_OFFSET + 0x04)
-
-#define ACE_TDES_KEY11 (ACE_TDES_OFFSET + 0x10)
-#define ACE_TDES_KEY12 (ACE_TDES_OFFSET + 0x14)
-#define ACE_TDES_KEY21 (ACE_TDES_OFFSET + 0x18)
-#define ACE_TDES_KEY22 (ACE_TDES_OFFSET + 0x1C)
-#define ACE_TDES_KEY31 (ACE_TDES_OFFSET + 0x20)
-#define ACE_TDES_KEY32 (ACE_TDES_OFFSET + 0x24)
-
-#define ACE_TDES_IV1 (ACE_TDES_OFFSET + 0x28)
-#define ACE_TDES_IV2 (ACE_TDES_OFFSET + 0x2C)
-
-#define ACE_TDES_IN1 (ACE_TDES_OFFSET + 0x30)
-#define ACE_TDES_IN2 (ACE_TDES_OFFSET + 0x34)
-
-#define ACE_TDES_OUT1 (ACE_TDES_OFFSET + 0x38)
-#define ACE_TDES_OUT2 (ACE_TDES_OFFSET + 0x3C)
-
-/* HASH control registers */
-#if defined(CONFIG_ARCH_S5PV210)
-#define ACE_HASH_CONTROL (ACE_HASH_OFFSET + 0x00)
-#define ACE_HASH_CONTROL2 (ACE_HASH_OFFSET + 0x04)
-#define ACE_HASH_FIFO_MODE (ACE_HASH_OFFSET + 0x08)
-#define ACE_HASH_BYTESWAP (ACE_HASH_OFFSET + 0x0C)
-#define ACE_HASH_STATUS (ACE_HASH_OFFSET + 0x10)
-#define ACE_HASH_MSGSIZE_LOW (ACE_HASH_OFFSET + 0x14)
-#define ACE_HASH_MSGSIZE_HIGH (ACE_HASH_OFFSET + 0x18)
-
-#define ACE_HASH_IN1 (ACE_HASH_OFFSET + 0x20)
-#define ACE_HASH_IN2 (ACE_HASH_OFFSET + 0x24)
-#define ACE_HASH_IN3 (ACE_HASH_OFFSET + 0x28)
-#define ACE_HASH_IN4 (ACE_HASH_OFFSET + 0x2C)
-#define ACE_HASH_IN5 (ACE_HASH_OFFSET + 0x30)
-#define ACE_HASH_IN6 (ACE_HASH_OFFSET + 0x34)
-#define ACE_HASH_IN7 (ACE_HASH_OFFSET + 0x38)
-#define ACE_HASH_IN8 (ACE_HASH_OFFSET + 0x3C)
-
-#define ACE_HASH_SEED1 (ACE_HASH_OFFSET + 0x40)
-#define ACE_HASH_SEED2 (ACE_HASH_OFFSET + 0x44)
-#define ACE_HASH_SEED3 (ACE_HASH_OFFSET + 0x48)
-#define ACE_HASH_SEED4 (ACE_HASH_OFFSET + 0x4C)
-#define ACE_HASH_SEED5 (ACE_HASH_OFFSET + 0x50)
-
-#define ACE_HASH_RESULT1 (ACE_HASH_OFFSET + 0x60)
-#define ACE_HASH_RESULT2 (ACE_HASH_OFFSET + 0x64)
-#define ACE_HASH_RESULT3 (ACE_HASH_OFFSET + 0x68)
-#define ACE_HASH_RESULT4 (ACE_HASH_OFFSET + 0x6C)
-#define ACE_HASH_RESULT5 (ACE_HASH_OFFSET + 0x70)
-
-#define ACE_HASH_PRNG1 (ACE_HASH_OFFSET + 0x80)
-#define ACE_HASH_PRNG2 (ACE_HASH_OFFSET + 0x84)
-#define ACE_HASH_PRNG3 (ACE_HASH_OFFSET + 0x88)
-#define ACE_HASH_PRNG4 (ACE_HASH_OFFSET + 0x8C)
-#define ACE_HASH_PRNG5 (ACE_HASH_OFFSET + 0x90)
-
-#define ACE_HASH_IV1 (ACE_HASH_OFFSET + 0xA0)
-#define ACE_HASH_IV2 (ACE_HASH_OFFSET + 0xA4)
-#define ACE_HASH_IV3 (ACE_HASH_OFFSET + 0xA8)
-#define ACE_HASH_IV4 (ACE_HASH_OFFSET + 0xAC)
-#define ACE_HASH_IV5 (ACE_HASH_OFFSET + 0xB0)
-
-#define ACE_HASH_PRELEN_HIGH (ACE_HASH_OFFSET + 0xC0)
-#define ACE_HASH_PRELEN_LOW (ACE_HASH_OFFSET + 0xC4)
-#elif defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
-#define ACE_HASH_CONTROL (ACE_HASH_OFFSET + 0x00)
-#define ACE_HASH_CONTROL2 (ACE_HASH_OFFSET + 0x04)
-#define ACE_HASH_FIFO_MODE (ACE_HASH_OFFSET + 0x08)
-#define ACE_HASH_BYTESWAP (ACE_HASH_OFFSET + 0x0C)
-#define ACE_HASH_STATUS (ACE_HASH_OFFSET + 0x10)
-#define ACE_HASH_MSGSIZE_LOW (ACE_HASH_OFFSET + 0x20)
-#define ACE_HASH_MSGSIZE_HIGH (ACE_HASH_OFFSET + 0x24)
-#define ACE_HASH_PRELEN_LOW (ACE_HASH_OFFSET + 0x28)
-#define ACE_HASH_PRELEN_HIGH (ACE_HASH_OFFSET + 0x2C)
-
-#define ACE_HASH_IN1 (ACE_HASH_OFFSET + 0x30)
-#define ACE_HASH_IN2 (ACE_HASH_OFFSET + 0x34)
-#define ACE_HASH_IN3 (ACE_HASH_OFFSET + 0x38)
-#define ACE_HASH_IN4 (ACE_HASH_OFFSET + 0x3C)
-#define ACE_HASH_IN5 (ACE_HASH_OFFSET + 0x40)
-#define ACE_HASH_IN6 (ACE_HASH_OFFSET + 0x44)
-#define ACE_HASH_IN7 (ACE_HASH_OFFSET + 0x48)
-#define ACE_HASH_IN8 (ACE_HASH_OFFSET + 0x4C)
-#define ACE_HASH_IN9 (ACE_HASH_OFFSET + 0x50)
-#define ACE_HASH_IN10 (ACE_HASH_OFFSET + 0x54)
-#define ACE_HASH_IN11 (ACE_HASH_OFFSET + 0x58)
-#define ACE_HASH_IN12 (ACE_HASH_OFFSET + 0x5C)
-#define ACE_HASH_IN13 (ACE_HASH_OFFSET + 0x60)
-#define ACE_HASH_IN14 (ACE_HASH_OFFSET + 0x64)
-#define ACE_HASH_IN15 (ACE_HASH_OFFSET + 0x68)
-#define ACE_HASH_IN16 (ACE_HASH_OFFSET + 0x6C)
-
-#define ACE_HASH_HMAC_KEY_IN1 (ACE_HASH_OFFSET + 0x70)
-#define ACE_HASH_HMAC_KEY_IN2 (ACE_HASH_OFFSET + 0x74)
-#define ACE_HASH_HMAC_KEY_IN3 (ACE_HASH_OFFSET + 0x78)
-#define ACE_HASH_HMAC_KEY_IN4 (ACE_HASH_OFFSET + 0x7C)
-#define ACE_HASH_HMAC_KEY_IN5 (ACE_HASH_OFFSET + 0x80)
-#define ACE_HASH_HMAC_KEY_IN6 (ACE_HASH_OFFSET + 0x84)
-#define ACE_HASH_HMAC_KEY_IN7 (ACE_HASH_OFFSET + 0x88)
-#define ACE_HASH_HMAC_KEY_IN8 (ACE_HASH_OFFSET + 0x8C)
-#define ACE_HASH_HMAC_KEY_IN9 (ACE_HASH_OFFSET + 0x90)
-#define ACE_HASH_HMAC_KEY_IN10 (ACE_HASH_OFFSET + 0x94)
-#define ACE_HASH_HMAC_KEY_IN11 (ACE_HASH_OFFSET + 0x98)
-#define ACE_HASH_HMAC_KEY_IN12 (ACE_HASH_OFFSET + 0x9C)
-#define ACE_HASH_HMAC_KEY_IN13 (ACE_HASH_OFFSET + 0xA0)
-#define ACE_HASH_HMAC_KEY_IN14 (ACE_HASH_OFFSET + 0xA4)
-#define ACE_HASH_HMAC_KEY_IN15 (ACE_HASH_OFFSET + 0xA8)
-#define ACE_HASH_HMAC_KEY_IN16 (ACE_HASH_OFFSET + 0xAC)
-
-#define ACE_HASH_IV1 (ACE_HASH_OFFSET + 0xB0)
-#define ACE_HASH_IV2 (ACE_HASH_OFFSET + 0xB4)
-#define ACE_HASH_IV3 (ACE_HASH_OFFSET + 0xB8)
-#define ACE_HASH_IV4 (ACE_HASH_OFFSET + 0xBC)
-#define ACE_HASH_IV5 (ACE_HASH_OFFSET + 0xC0)
-#define ACE_HASH_IV6 (ACE_HASH_OFFSET + 0xC4)
-#define ACE_HASH_IV7 (ACE_HASH_OFFSET + 0xC8)
-#define ACE_HASH_IV8 (ACE_HASH_OFFSET + 0xCC)
-
-#define ACE_HASH_RESULT1 (ACE_HASH_OFFSET + 0x100)
-#define ACE_HASH_RESULT2 (ACE_HASH_OFFSET + 0x104)
-#define ACE_HASH_RESULT3 (ACE_HASH_OFFSET + 0x108)
-#define ACE_HASH_RESULT4 (ACE_HASH_OFFSET + 0x10C)
-#define ACE_HASH_RESULT5 (ACE_HASH_OFFSET + 0x110)
-#define ACE_HASH_RESULT6 (ACE_HASH_OFFSET + 0x114)
-#define ACE_HASH_RESULT7 (ACE_HASH_OFFSET + 0x118)
-#define ACE_HASH_RESULT8 (ACE_HASH_OFFSET + 0x11C)
-
-#define ACE_HASH_SEED1 (ACE_HASH_OFFSET + 0x140)
-#define ACE_HASH_SEED2 (ACE_HASH_OFFSET + 0x144)
-#define ACE_HASH_SEED3 (ACE_HASH_OFFSET + 0x148)
-#define ACE_HASH_SEED4 (ACE_HASH_OFFSET + 0x14C)
-#define ACE_HASH_SEED5 (ACE_HASH_OFFSET + 0x150)
-
-#define ACE_HASH_PRNG1 (ACE_HASH_OFFSET + 0x160)
-#define ACE_HASH_PRNG2 (ACE_HASH_OFFSET + 0x164)
-#define ACE_HASH_PRNG3 (ACE_HASH_OFFSET + 0x168)
-#define ACE_HASH_PRNG4 (ACE_HASH_OFFSET + 0x16C)
-#define ACE_HASH_PRNG5 (ACE_HASH_OFFSET + 0x170)
-#endif
-
-/* PKA control registers */
-#define ACE_PKA_SFR0 (ACE_PKA_OFFSET + 0x00)
-#define ACE_PKA_SFR1 (ACE_PKA_OFFSET + 0x04)
-#define ACE_PKA_SFR2 (ACE_PKA_OFFSET + 0x08)
-#define ACE_PKA_SFR3 (ACE_PKA_OFFSET + 0x0C)
-#define ACE_PKA_SFR4 (ACE_PKA_OFFSET + 0x10)
-
-
-/*****************************************************************
- OFFSET
-*****************************************************************/
-
-/* ACE_FC_INT */
-#define ACE_FC_PKDMA (1 << 0)
-#define ACE_FC_HRDMA (1 << 1)
-#define ACE_FC_BTDMA (1 << 2)
-#define ACE_FC_BRDMA (1 << 3)
-#define ACE_FC_PRNG_ERROR (1 << 4)
-#define ACE_FC_MSG_DONE (1 << 5)
-#define ACE_FC_PRNG_DONE (1 << 6)
-#define ACE_FC_PARTIAL_DONE (1 << 7)
-
-/* ACE_FC_FIFOSTAT */
-#define ACE_FC_PKFIFO_EMPTY (1 << 0)
-#define ACE_FC_PKFIFO_FULL (1 << 1)
-#define ACE_FC_HRFIFO_EMPTY (1 << 2)
-#define ACE_FC_HRFIFO_FULL (1 << 3)
-#define ACE_FC_BTFIFO_EMPTY (1 << 4)
-#define ACE_FC_BTFIFO_FULL (1 << 5)
-#define ACE_FC_BRFIFO_EMPTY (1 << 6)
-#define ACE_FC_BRFIFO_FULL (1 << 7)
-
-/* ACE_FC_FIFOCTRL */
-#define ACE_FC_SELHASH_MASK (3 << 0)
-#define ACE_FC_SELHASH_EXOUT (0 << 0) /*independent source*/
-#define ACE_FC_SELHASH_BCIN (1 << 0) /*block cipher input*/
-#define ACE_FC_SELHASH_BCOUT (2 << 0) /*block cipher output*/
-#define ACE_FC_SELBC_MASK (1 << 2)
-#define ACE_FC_SELBC_AES (0 << 2) /* AES */
-#define ACE_FC_SELBC_DES (1 << 2) /* DES */
-
-/* ACE_FC_GLOBAL */
-#define ACE_FC_SSS_RESET (1 << 0)
-#define ACE_FC_DMA_RESET (1 << 1)
-#define ACE_FC_AES_RESET (1 << 2)
-#define ACE_FC_DES_RESET (1 << 3)
-#define ACE_FC_HASH_RESET (1 << 4)
-#define ACE_FC_AXI_ENDIAN_MASK (3 << 6)
-#define ACE_FC_AXI_ENDIAN_LE (0 << 6)
-#define ACE_FC_AXI_ENDIAN_BIBE (1 << 6)
-#define ACE_FC_AXI_ENDIAN_WIBE (2 << 6)
-
-/* Feed control - BRDMA control */
-#define ACE_FC_BRDMACFLUSH_OFF (0 << 0)
-#define ACE_FC_BRDMACFLUSH_ON (1 << 0)
-#define ACE_FC_BRDMACSWAP_ON (1 << 1)
-#define ACE_FC_BRDMACARPROT_MASK (0x7 << 2)
-#define ACE_FC_BRDMACARPROT_OFS (2)
-#define ACE_FC_BRDMACARCACHE_MASK (0xF << 5)
-#define ACE_FC_BRDMACARCACHE_OFS (5)
-
-/* Feed control - BTDMA control */
-#define ACE_FC_BTDMACFLUSH_OFF (0 << 0)
-#define ACE_FC_BTDMACFLUSH_ON (1 << 0)
-#define ACE_FC_BTDMACSWAP_ON (1 << 1)
-#define ACE_FC_BTDMACAWPROT_MASK (0x7 << 2)
-#define ACE_FC_BTDMACAWPROT_OFS (2)
-#define ACE_FC_BTDMACAWCACHE_MASK (0xF << 5)
-#define ACE_FC_BTDMACAWCACHE_OFS (5)
-
-/* Feed control - HRDMA control */
-#define ACE_FC_HRDMACFLUSH_OFF (0 << 0)
-#define ACE_FC_HRDMACFLUSH_ON (1 << 0)
-#define ACE_FC_HRDMACSWAP_ON (1 << 1)
-#define ACE_FC_HRDMACARPROT_MASK (0x7 << 2)
-#define ACE_FC_HRDMACARPROT_OFS (2)
-#define ACE_FC_HRDMACARCACHE_MASK (0xF << 5)
-#define ACE_FC_HRDMACARCACHE_OFS (5)
-
-/* Feed control - PKDMA control */
-#define ACE_FC_PKDMACBYTESWAP_ON (1 << 3)
-#define ACE_FC_PKDMACDESEND_ON (1 << 2)
-#define ACE_FC_PKDMACTRANSMIT_ON (1 << 1)
-#define ACE_FC_PKDMACFLUSH_ON (1 << 0)
-
-/* Feed control - PKDMA offset */
-#define ACE_FC_SRAMOFFSET_MASK (0xFFF)
-
-/* AES control */
-#define ACE_AES_MODE_MASK (1 << 0)
-#define ACE_AES_MODE_ENC (0 << 0)
-#define ACE_AES_MODE_DEC (1 << 0)
-#define ACE_AES_OPERMODE_MASK (3 << 1)
-#define ACE_AES_OPERMODE_ECB (0 << 1)
-#define ACE_AES_OPERMODE_CBC (1 << 1)
-#define ACE_AES_OPERMODE_CTR (2 << 1)
-#define ACE_AES_FIFO_MASK (1 << 3)
-#define ACE_AES_FIFO_OFF (0 << 3) /* CPU mode */
-#define ACE_AES_FIFO_ON (1 << 3) /* FIFO mode */
-#define ACE_AES_KEYSIZE_MASK (3 << 4)
-#define ACE_AES_KEYSIZE_128 (0 << 4)
-#define ACE_AES_KEYSIZE_192 (1 << 4)
-#define ACE_AES_KEYSIZE_256 (2 << 4)
-#define ACE_AES_KEYCNGMODE_MASK (1 << 6)
-#define ACE_AES_KEYCNGMODE_OFF (0 << 6)
-#define ACE_AES_KEYCNGMODE_ON (1 << 6)
-#define ACE_AES_SWAP_MASK (0x1F << 7)
-#define ACE_AES_SWAPKEY_OFF (0 << 7)
-#define ACE_AES_SWAPKEY_ON (1 << 7)
-#define ACE_AES_SWAPCNT_OFF (0 << 8)
-#define ACE_AES_SWAPCNT_ON (1 << 8)
-#define ACE_AES_SWAPIV_OFF (0 << 9)
-#define ACE_AES_SWAPIV_ON (1 << 9)
-#define ACE_AES_SWAPDO_OFF (0 << 10)
-#define ACE_AES_SWAPDO_ON (1 << 10)
-#define ACE_AES_SWAPDI_OFF (0 << 11)
-#define ACE_AES_SWAPDI_ON (1 << 11)
-#define ACE_AES_COUNTERSIZE_MASK (3 << 12)
-#define ACE_AES_COUNTERSIZE_128 (0 << 12)
-#define ACE_AES_COUNTERSIZE_64 (1 << 12)
-#define ACE_AES_COUNTERSIZE_32 (2 << 12)
-#define ACE_AES_COUNTERSIZE_16 (3 << 12)
-
-/* AES status */
-#define ACE_AES_OUTRDY_MASK (1 << 0)
-#define ACE_AES_OUTRDY_OFF (0 << 0)
-#define ACE_AES_OUTRDY_ON (1 << 0)
-#define ACE_AES_INRDY_MASK (1 << 1)
-#define ACE_AES_INRDY_OFF (0 << 1)
-#define ACE_AES_INRDY_ON (1 << 1)
-#define ACE_AES_BUSY_MASK (1 << 2)
-#define ACE_AES_BUSY_OFF (0 << 2)
-#define ACE_AES_BUSY_ON (1 << 2)
-
-/* TDES control */
-#define ACE_TDES_MODE_MASK (1 << 0)
-#define ACE_TDES_MODE_ENC (0 << 0)
-#define ACE_TDES_MODE_DEC (1 << 0)
-#define ACE_TDES_OPERMODE_MASK (1 << 1)
-#define ACE_TDES_OPERMODE_ECB (0 << 1)
-#define ACE_TDES_OPERMODE_CBC (1 << 1)
-#define ACE_TDES_SEL_MASK (3 << 3)
-#define ACE_TDES_SEL_DES (0 << 3)
-#define ACE_TDES_SEL_TDESEDE (1 << 3) /* TDES EDE mode */
-#define ACE_TDES_SEL_TDESEEE (3 << 3) /* TDES EEE mode */
-#define ACE_TDES_FIFO_MASK (1 << 5)
-#define ACE_TDES_FIFO_OFF (0 << 5) /* CPU mode */
-#define ACE_TDES_FIFO_ON (1 << 5) /* FIFO mode */
-#define ACE_TDES_SWAP_MASK (0xF << 6)
-#define ACE_TDES_SWAPKEY_OFF (0 << 6)
-#define ACE_TDES_SWAPKEY_ON (1 << 6)
-#define ACE_TDES_SWAPIV_OFF (0 << 7)
-#define ACE_TDES_SWAPIV_ON (1 << 7)
-#define ACE_TDES_SWAPDO_OFF (0 << 8)
-#define ACE_TDES_SWAPDO_ON (1 << 8)
-#define ACE_TDES_SWAPDI_OFF (0 << 9)
-#define ACE_TDES_SWAPDI_ON (1 << 9)
-
-/* TDES status */
-#define ACE_TDES_OUTRDY_MASK (1 << 0)
-#define ACE_TDES_OUTRDY_OFF (0 << 0)
-#define ACE_TDES_OUTRDY_ON (1 << 0)
-#define ACE_TDES_INRDY_MASK (1 << 1)
-#define ACE_TDES_INRDY_OFF (0 << 1)
-#define ACE_TDES_INRDY_ON (1 << 1)
-#define ACE_TDES_BUSY_MASK (1 << 2)
-#define ACE_TDES_BUSY_OFF (0 << 2)
-#define ACE_TDES_BUSY_ON (1 << 2)
-
-/* Hash control */
-#define ACE_HASH_ENGSEL_MASK (0xF << 0)
-#define ACE_HASH_ENGSEL_SHA1HASH (0x0 << 0)
-#define ACE_HASH_ENGSEL_SHA1HMAC (0x1 << 0)
-#define ACE_HASH_ENGSEL_SHA1HMACIN (0x1 << 0)
-#define ACE_HASH_ENGSEL_SHA1HMACOUT (0x9 << 0)
-#define ACE_HASH_ENGSEL_MD5HASH (0x2 << 0)
-#define ACE_HASH_ENGSEL_MD5HMAC (0x3 << 0)
-#define ACE_HASH_ENGSEL_MD5HMACIN (0x3 << 0)
-#define ACE_HASH_ENGSEL_MD5HMACOUT (0xB << 0)
-#define ACE_HASH_ENGSEL_SHA256HASH (0x4 << 0)
-#define ACE_HASH_ENGSEL_SHA256HMAC (0x5 << 0)
-#if defined(CONFIG_ARCH_S5PV210)
-#define ACE_HASH_ENGSEL_PRNG (0x4 << 0)
-#elif defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
-#define ACE_HASH_ENGSEL_PRNG (0x8 << 0)
-#endif
-#define ACE_HASH_STARTBIT_ON (1 << 4)
-#define ACE_HASH_USERIV_EN (1 << 5)
-
-/* Hash control 2 */
-#if defined(CONFIG_ARCH_S5PV210)
-#define ACE_HASH_PAUSE_ON (1 << 3)
-#elif defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
-#define ACE_HASH_PAUSE_ON (1 << 0)
-#endif
-
-/* Hash control - FIFO mode */
-#define ACE_HASH_FIFO_MASK (1 << 0)
-#define ACE_HASH_FIFO_OFF (0 << 0)
-#define ACE_HASH_FIFO_ON (1 << 0)
-
-/* Hash control - byte swap */
-#if defined(CONFIG_ARCH_S5PV210)
-#define ACE_HASH_SWAP_MASK (0x7 << 1)
-#elif defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5)
-#define ACE_HASH_SWAP_MASK (0xF << 0)
-#endif
-#define ACE_HASH_SWAPKEY_OFF (0 << 0)
-#define ACE_HASH_SWAPKEY_ON (1 << 0)
-#define ACE_HASH_SWAPIV_OFF (0 << 1)
-#define ACE_HASH_SWAPIV_ON (1 << 1)
-#define ACE_HASH_SWAPDO_OFF (0 << 2)
-#define ACE_HASH_SWAPDO_ON (1 << 2)
-#define ACE_HASH_SWAPDI_OFF (0 << 3)
-#define ACE_HASH_SWAPDI_ON (1 << 3)
-
-/* Hash status */
-#define ACE_HASH_BUFRDY_MASK (1 << 0)
-#define ACE_HASH_BUFRDY_OFF (0 << 0)
-#define ACE_HASH_BUFRDY_ON (1 << 0)
-#define ACE_HASH_SEEDSETTING_MASK (1 << 1)
-#define ACE_HASH_SEEDSETTING_OFF (0 << 1)
-#define ACE_HASH_SEEDSETTING_ON (1 << 1)
-#define ACE_HASH_PRNGBUSY_MASK (1 << 2)
-#define ACE_HASH_PRNGBUSY_OFF (0 << 2)
-#define ACE_HASH_PRNGBUSY_ON (1 << 2)
-#define ACE_HASH_PARTIALDONE_MASK (1 << 4)
-#define ACE_HASH_PARTIALDONE_OFF (0 << 4)
-#define ACE_HASH_PARTIALDONE_ON (1 << 4)
-#define ACE_HASH_PRNGDONE_MASK (1 << 5)
-#define ACE_HASH_PRNGDONE_OFF (0 << 5)
-#define ACE_HASH_PRNGDONE_ON (1 << 5)
-#define ACE_HASH_MSGDONE_MASK (1 << 6)
-#define ACE_HASH_MSGDONE_OFF (0 << 6)
-#define ACE_HASH_MSGDONE_ON (1 << 6)
-#define ACE_HASH_PRNGERROR_MASK (1 << 7)
-#define ACE_HASH_PRNGERROR_OFF (0 << 7)
-#define ACE_HASH_PRNGERROR_ON (1 << 7)
-
-/* To Do: SFRs for PKA */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 1891252..1d103f9 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -51,6 +51,7 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
union ce_io_threshold io_threshold;
u32 rand_num;
union ce_pe_dma_cfg pe_dma_cfg;
+ u32 device_ctrl;
writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
/* setup pe dma, include reset sg, pdr and pe, then release reset */
@@ -84,7 +85,9 @@ static void crypto4xx_hw_init(struct crypto4xx_device *dev)
writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
ring_ctrl.w = 0;
writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
- writel(PPC4XX_DC_3DES_EN, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+ device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
+ device_ctrl |= PPC4XX_DC_3DES_EN;
+ writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
part_ring_size.w = 0;
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index a84250a..fe765f4 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -2744,10 +2744,8 @@ static int __init hifn_init(void)
unsigned int freq;
int err;
- if (sizeof(dma_addr_t) > 4) {
- printk(KERN_INFO "HIFN supports only 32-bit addresses.\n");
- return -EINVAL;
- }
+ /* HIFN supports only 32-bit addresses */
+ BUILD_BUG_ON(sizeof(dma_addr_t) != 4);
if (strncmp(hifn_pll_ref, "ext", 3) &&
strncmp(hifn_pll_ref, "pci", 3)) {
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 4c20c5b..8e9a8f0 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -914,7 +914,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
crypt->mode |= NPE_OP_NOT_IN_PLACE;
/* This was never tested by Intel
* for more than one dst buffer, I think. */
- BUG_ON(req->dst->length < nbytes);
req_ctx->dst = NULL;
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
flags, DMA_FROM_DEVICE))
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index f53dd83..fe79635 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
diff --git a/drivers/crypto/n2_core.c b/drivers/crypto/n2_core.c
index 2e5b204..8944dab 100644
--- a/drivers/crypto/n2_core.c
+++ b/drivers/crypto/n2_core.c
@@ -1,6 +1,6 @@
/* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
*
- * Copyright (C) 2010 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -31,8 +31,8 @@
#include "n2_core.h"
#define DRV_MODULE_NAME "n2_crypto"
-#define DRV_MODULE_VERSION "0.1"
-#define DRV_MODULE_RELDATE "April 29, 2010"
+#define DRV_MODULE_VERSION "0.2"
+#define DRV_MODULE_RELDATE "July 28, 2011"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -1006,9 +1006,9 @@ static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
spin_unlock_irqrestore(&qp->lock, flags);
+out:
put_cpu();
-out:
n2_chunk_complete(req, NULL);
return err;
}
@@ -1096,9 +1096,9 @@ static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
spin_unlock_irqrestore(&qp->lock, flags);
+out:
put_cpu();
-out:
n2_chunk_complete(req, err ? NULL : final_iv_addr);
return err;
}
@@ -1823,22 +1823,17 @@ static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *de
static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
struct spu_mdesc_info *ip)
{
- const u64 *intr, *ino;
- int intr_len, ino_len;
+ const u64 *ino;
+ int ino_len;
int i;
- intr = mdesc_get_property(mdesc, node, "intr", &intr_len);
- if (!intr)
- return -ENODEV;
-
ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
- if (!ino)
+ if (!ino) {
+ printk("NO 'ino'\n");
return -ENODEV;
+ }
- if (intr_len != ino_len)
- return -EINVAL;
-
- ip->num_intrs = intr_len / sizeof(u64);
+ ip->num_intrs = ino_len / sizeof(u64);
ip->ino_table = kzalloc((sizeof(struct ino_blob) *
ip->num_intrs),
GFP_KERNEL);
@@ -1847,7 +1842,7 @@ static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
for (i = 0; i < ip->num_intrs; i++) {
struct ino_blob *b = &ip->ino_table[i];
- b->intr = intr[i];
+ b->intr = i + 1;
b->ino = ino[i];
}
@@ -2204,6 +2199,10 @@ static struct of_device_id n2_crypto_match[] = {
.name = "n2cp",
.compatible = "SUNW,vf-cwq",
},
+ {
+ .name = "n2cp",
+ .compatible = "SUNW,kt-cwq",
+ },
{},
};
@@ -2228,6 +2227,10 @@ static struct of_device_id n2_mau_match[] = {
.name = "ncp",
.compatible = "SUNW,vf-mau",
},
+ {
+ .name = "ncp",
+ .compatible = "SUNW,kt-mau",
+ },
{},
};
diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
index ba8f1ea..6399a8f 100644
--- a/drivers/crypto/omap-sham.c
+++ b/drivers/crypto/omap-sham.c
@@ -72,17 +72,20 @@
#define DEFAULT_TIMEOUT_INTERVAL HZ
-#define FLAGS_FINUP 0x0002
-#define FLAGS_FINAL 0x0004
-#define FLAGS_SG 0x0008
-#define FLAGS_SHA1 0x0010
-#define FLAGS_DMA_ACTIVE 0x0020
-#define FLAGS_OUTPUT_READY 0x0040
-#define FLAGS_INIT 0x0100
-#define FLAGS_CPU 0x0200
-#define FLAGS_HMAC 0x0400
-#define FLAGS_ERROR 0x0800
-#define FLAGS_BUSY 0x1000
+/* mostly device flags */
+#define FLAGS_BUSY 0
+#define FLAGS_FINAL 1
+#define FLAGS_DMA_ACTIVE 2
+#define FLAGS_OUTPUT_READY 3
+#define FLAGS_INIT 4
+#define FLAGS_CPU 5
+#define FLAGS_DMA_READY 6
+/* context flags */
+#define FLAGS_FINUP 16
+#define FLAGS_SG 17
+#define FLAGS_SHA1 18
+#define FLAGS_HMAC 19
+#define FLAGS_ERROR 20
#define OP_UPDATE 1
#define OP_FINAL 2
@@ -144,7 +147,6 @@ struct omap_sham_dev {
int dma;
int dma_lch;
struct tasklet_struct done_task;
- struct tasklet_struct queue_task;
unsigned long flags;
struct crypto_queue queue;
@@ -223,7 +225,7 @@ static void omap_sham_copy_ready_hash(struct ahash_request *req)
if (!hash)
return;
- if (likely(ctx->flags & FLAGS_SHA1)) {
+ if (likely(ctx->flags & BIT(FLAGS_SHA1))) {
/* SHA1 results are in big endian */
for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
hash[i] = be32_to_cpu(in[i]);
@@ -238,7 +240,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
{
clk_enable(dd->iclk);
- if (!(dd->flags & FLAGS_INIT)) {
+ if (!test_bit(FLAGS_INIT, &dd->flags)) {
omap_sham_write_mask(dd, SHA_REG_MASK,
SHA_REG_MASK_SOFTRESET, SHA_REG_MASK_SOFTRESET);
@@ -246,7 +248,7 @@ static int omap_sham_hw_init(struct omap_sham_dev *dd)
SHA_REG_SYSSTATUS_RESETDONE))
return -ETIMEDOUT;
- dd->flags |= FLAGS_INIT;
+ set_bit(FLAGS_INIT, &dd->flags);
dd->err = 0;
}
@@ -269,7 +271,7 @@ static void omap_sham_write_ctrl(struct omap_sham_dev *dd, size_t length,
* Setting ALGO_CONST only for the first iteration
* and CLOSE_HASH only for the last one.
*/
- if (ctx->flags & FLAGS_SHA1)
+ if (ctx->flags & BIT(FLAGS_SHA1))
val |= SHA_REG_CTRL_ALGO;
if (!ctx->digcnt)
val |= SHA_REG_CTRL_ALGO_CONST;
@@ -301,7 +303,9 @@ static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, const u8 *buf,
return -ETIMEDOUT;
if (final)
- ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+ set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
+
+ set_bit(FLAGS_CPU, &dd->flags);
len32 = DIV_ROUND_UP(length, sizeof(u32));
@@ -334,9 +338,9 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
ctx->digcnt += length;
if (final)
- ctx->flags |= FLAGS_FINAL; /* catch last interrupt */
+ set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
- dd->flags |= FLAGS_DMA_ACTIVE;
+ set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
omap_start_dma(dd->dma_lch);
@@ -392,7 +396,7 @@ static int omap_sham_xmit_dma_map(struct omap_sham_dev *dd,
return -EINVAL;
}
- ctx->flags &= ~FLAGS_SG;
+ ctx->flags &= ~BIT(FLAGS_SG);
/* next call does not fail... so no unmap in the case of error */
return omap_sham_xmit_dma(dd, ctx->dma_addr, length, final);
@@ -406,7 +410,7 @@ static int omap_sham_update_dma_slow(struct omap_sham_dev *dd)
omap_sham_append_sg(ctx);
- final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+ final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
dev_dbg(dd->dev, "slow: bufcnt: %u, digcnt: %d, final: %d\n",
ctx->bufcnt, ctx->digcnt, final);
@@ -452,7 +456,7 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
length = min(ctx->total, sg->length);
if (sg_is_last(sg)) {
- if (!(ctx->flags & FLAGS_FINUP)) {
+ if (!(ctx->flags & BIT(FLAGS_FINUP))) {
/* not last sg must be SHA1_MD5_BLOCK_SIZE aligned */
tail = length & (SHA1_MD5_BLOCK_SIZE - 1);
/* without finup() we need one block to close hash */
@@ -467,12 +471,12 @@ static int omap_sham_update_dma_start(struct omap_sham_dev *dd)
return -EINVAL;
}
- ctx->flags |= FLAGS_SG;
+ ctx->flags |= BIT(FLAGS_SG);
ctx->total -= length;
ctx->offset = length; /* offset where to start slow */
- final = (ctx->flags & FLAGS_FINUP) && !ctx->total;
+ final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total;
/* next call does not fail... so no unmap in the case of error */
return omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final);
@@ -495,7 +499,7 @@ static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
omap_stop_dma(dd->dma_lch);
- if (ctx->flags & FLAGS_SG) {
+ if (ctx->flags & BIT(FLAGS_SG)) {
dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE);
if (ctx->sg->length == ctx->offset) {
ctx->sg = sg_next(ctx->sg);
@@ -537,18 +541,18 @@ static int omap_sham_init(struct ahash_request *req)
crypto_ahash_digestsize(tfm));
if (crypto_ahash_digestsize(tfm) == SHA1_DIGEST_SIZE)
- ctx->flags |= FLAGS_SHA1;
+ ctx->flags |= BIT(FLAGS_SHA1);
ctx->bufcnt = 0;
ctx->digcnt = 0;
ctx->buflen = BUFLEN;
- if (tctx->flags & FLAGS_HMAC) {
+ if (tctx->flags & BIT(FLAGS_HMAC)) {
struct omap_sham_hmac_ctx *bctx = tctx->base;
memcpy(ctx->buffer, bctx->ipad, SHA1_MD5_BLOCK_SIZE);
ctx->bufcnt = SHA1_MD5_BLOCK_SIZE;
- ctx->flags |= FLAGS_HMAC;
+ ctx->flags |= BIT(FLAGS_HMAC);
}
return 0;
@@ -562,9 +566,9 @@ static int omap_sham_update_req(struct omap_sham_dev *dd)
int err;
dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
- ctx->total, ctx->digcnt, (ctx->flags & FLAGS_FINUP) != 0);
+ ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
- if (ctx->flags & FLAGS_CPU)
+ if (ctx->flags & BIT(FLAGS_CPU))
err = omap_sham_update_cpu(dd);
else
err = omap_sham_update_dma_start(dd);
@@ -624,7 +628,7 @@ static int omap_sham_finish(struct ahash_request *req)
if (ctx->digcnt) {
omap_sham_copy_ready_hash(req);
- if (ctx->flags & FLAGS_HMAC)
+ if (ctx->flags & BIT(FLAGS_HMAC))
err = omap_sham_finish_hmac(req);
}
@@ -639,18 +643,23 @@ static void omap_sham_finish_req(struct ahash_request *req, int err)
struct omap_sham_dev *dd = ctx->dd;
if (!err) {
- omap_sham_copy_hash(ctx->dd->req, 1);
- if (ctx->flags & FLAGS_FINAL)
+ omap_sham_copy_hash(req, 1);
+ if (test_bit(FLAGS_FINAL, &dd->flags))
err = omap_sham_finish(req);
} else {
- ctx->flags |= FLAGS_ERROR;
+ ctx->flags |= BIT(FLAGS_ERROR);
}
+ /* atomic operation is not needed here */
+ dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
+ BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
clk_disable(dd->iclk);
- dd->flags &= ~FLAGS_BUSY;
if (req->base.complete)
req->base.complete(&req->base, err);
+
+ /* handle new request */
+ tasklet_schedule(&dd->done_task);
}
static int omap_sham_handle_queue(struct omap_sham_dev *dd,
@@ -658,21 +667,20 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
{
struct crypto_async_request *async_req, *backlog;
struct omap_sham_reqctx *ctx;
- struct ahash_request *prev_req;
unsigned long flags;
int err = 0, ret = 0;
spin_lock_irqsave(&dd->lock, flags);
if (req)
ret = ahash_enqueue_request(&dd->queue, req);
- if (dd->flags & FLAGS_BUSY) {
+ if (test_bit(FLAGS_BUSY, &dd->flags)) {
spin_unlock_irqrestore(&dd->lock, flags);
return ret;
}
backlog = crypto_get_backlog(&dd->queue);
async_req = crypto_dequeue_request(&dd->queue);
if (async_req)
- dd->flags |= FLAGS_BUSY;
+ set_bit(FLAGS_BUSY, &dd->flags);
spin_unlock_irqrestore(&dd->lock, flags);
if (!async_req)
@@ -682,16 +690,12 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
backlog->complete(backlog, -EINPROGRESS);
req = ahash_request_cast(async_req);
-
- prev_req = dd->req;
dd->req = req;
-
ctx = ahash_request_ctx(req);
dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
ctx->op, req->nbytes);
-
err = omap_sham_hw_init(dd);
if (err)
goto err1;
@@ -712,18 +716,16 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd,
if (ctx->op == OP_UPDATE) {
err = omap_sham_update_req(dd);
- if (err != -EINPROGRESS && (ctx->flags & FLAGS_FINUP))
+ if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
/* no final() after finup() */
err = omap_sham_final_req(dd);
} else if (ctx->op == OP_FINAL) {
err = omap_sham_final_req(dd);
}
err1:
- if (err != -EINPROGRESS) {
+ if (err != -EINPROGRESS)
/* done_task will not finish it, so do it here */
omap_sham_finish_req(req, err);
- tasklet_schedule(&dd->queue_task);
- }
dev_dbg(dd->dev, "exit, err: %d\n", err);
@@ -752,7 +754,7 @@ static int omap_sham_update(struct ahash_request *req)
ctx->sg = req->src;
ctx->offset = 0;
- if (ctx->flags & FLAGS_FINUP) {
+ if (ctx->flags & BIT(FLAGS_FINUP)) {
if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) {
/*
* OMAP HW accel works only with buffers >= 9
@@ -765,7 +767,7 @@ static int omap_sham_update(struct ahash_request *req)
/*
* faster to use CPU for short transfers
*/
- ctx->flags |= FLAGS_CPU;
+ ctx->flags |= BIT(FLAGS_CPU);
}
} else if (ctx->bufcnt + ctx->total < ctx->buflen) {
omap_sham_append_sg(ctx);
@@ -802,9 +804,9 @@ static int omap_sham_final(struct ahash_request *req)
{
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- ctx->flags |= FLAGS_FINUP;
+ ctx->flags |= BIT(FLAGS_FINUP);
- if (ctx->flags & FLAGS_ERROR)
+ if (ctx->flags & BIT(FLAGS_ERROR))
return 0; /* uncompleted hash is not needed */
/* OMAP HW accel works only with buffers >= 9 */
@@ -823,7 +825,7 @@ static int omap_sham_finup(struct ahash_request *req)
struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
int err1, err2;
- ctx->flags |= FLAGS_FINUP;
+ ctx->flags |= BIT(FLAGS_FINUP);
err1 = omap_sham_update(req);
if (err1 == -EINPROGRESS || err1 == -EBUSY)
@@ -895,7 +897,7 @@ static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
if (alg_base) {
struct omap_sham_hmac_ctx *bctx = tctx->base;
- tctx->flags |= FLAGS_HMAC;
+ tctx->flags |= BIT(FLAGS_HMAC);
bctx->shash = crypto_alloc_shash(alg_base, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(bctx->shash)) {
@@ -932,7 +934,7 @@ static void omap_sham_cra_exit(struct crypto_tfm *tfm)
crypto_free_shash(tctx->fallback);
tctx->fallback = NULL;
- if (tctx->flags & FLAGS_HMAC) {
+ if (tctx->flags & BIT(FLAGS_HMAC)) {
struct omap_sham_hmac_ctx *bctx = tctx->base;
crypto_free_shash(bctx->shash);
}
@@ -1036,51 +1038,46 @@ static struct ahash_alg algs[] = {
static void omap_sham_done_task(unsigned long data)
{
struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
- struct ahash_request *req = dd->req;
- struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
- int ready = 0, err = 0;
+ int err = 0;
- if (ctx->flags & FLAGS_OUTPUT_READY) {
- ctx->flags &= ~FLAGS_OUTPUT_READY;
- ready = 1;
+ if (!test_bit(FLAGS_BUSY, &dd->flags)) {
+ omap_sham_handle_queue(dd, NULL);
+ return;
}
- if (dd->flags & FLAGS_DMA_ACTIVE) {
- dd->flags &= ~FLAGS_DMA_ACTIVE;
- omap_sham_update_dma_stop(dd);
- if (!dd->err)
+ if (test_bit(FLAGS_CPU, &dd->flags)) {
+ if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
+ goto finish;
+ } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
+ if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
+ omap_sham_update_dma_stop(dd);
+ if (dd->err) {
+ err = dd->err;
+ goto finish;
+ }
+ }
+ if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
+ /* hash or semi-hash ready */
+ clear_bit(FLAGS_DMA_READY, &dd->flags);
err = omap_sham_update_dma_start(dd);
+ if (err != -EINPROGRESS)
+ goto finish;
+ }
}
- err = dd->err ? : err;
-
- if (err != -EINPROGRESS && (ready || err)) {
- dev_dbg(dd->dev, "update done: err: %d\n", err);
- /* finish curent request */
- omap_sham_finish_req(req, err);
- /* start new request */
- omap_sham_handle_queue(dd, NULL);
- }
-}
-
-static void omap_sham_queue_task(unsigned long data)
-{
- struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
+ return;
- omap_sham_handle_queue(dd, NULL);
+finish:
+ dev_dbg(dd->dev, "update done: err: %d\n", err);
+ /* finish curent request */
+ omap_sham_finish_req(dd->req, err);
}
static irqreturn_t omap_sham_irq(int irq, void *dev_id)
{
struct omap_sham_dev *dd = dev_id;
- struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
-
- if (!ctx) {
- dev_err(dd->dev, "unknown interrupt.\n");
- return IRQ_HANDLED;
- }
- if (unlikely(ctx->flags & FLAGS_FINAL))
+ if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
/* final -> allow device to go to power-saving mode */
omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
@@ -1088,8 +1085,12 @@ static irqreturn_t omap_sham_irq(int irq, void *dev_id)
SHA_REG_CTRL_OUTPUT_READY);
omap_sham_read(dd, SHA_REG_CTRL);
- ctx->flags |= FLAGS_OUTPUT_READY;
- dd->err = 0;
+ if (!test_bit(FLAGS_BUSY, &dd->flags)) {
+ dev_warn(dd->dev, "Interrupt when no active requests.\n");
+ return IRQ_HANDLED;
+ }
+
+ set_bit(FLAGS_OUTPUT_READY, &dd->flags);
tasklet_schedule(&dd->done_task);
return IRQ_HANDLED;
@@ -1102,9 +1103,10 @@ static void omap_sham_dma_callback(int lch, u16 ch_status, void *data)
if (ch_status != OMAP_DMA_BLOCK_IRQ) {
pr_err("omap-sham DMA error status: 0x%hx\n", ch_status);
dd->err = -EIO;
- dd->flags &= ~FLAGS_INIT; /* request to re-initialize */
+ clear_bit(FLAGS_INIT, &dd->flags);/* request to re-initialize */
}
+ set_bit(FLAGS_DMA_READY, &dd->flags);
tasklet_schedule(&dd->done_task);
}
@@ -1151,7 +1153,6 @@ static int __devinit omap_sham_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dd->list);
spin_lock_init(&dd->lock);
tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
- tasklet_init(&dd->queue_task, omap_sham_queue_task, (unsigned long)dd);
crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
dd->irq = -1;
@@ -1260,7 +1261,6 @@ static int __devexit omap_sham_remove(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(algs); i++)
crypto_unregister_ahash(&algs[i]);
tasklet_kill(&dd->done_task);
- tasklet_kill(&dd->queue_task);
iounmap(dd->io_base);
clk_put(dd->iclk);
omap_sham_dma_cleanup(dd);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index db33d30..87500e6 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -508,10 +508,8 @@ static int __init padlock_init(void)
int ret;
struct cpuinfo_x86 *c = &cpu_data(0);
- if (!cpu_has_xcrypt) {
- printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
+ if (!cpu_has_xcrypt)
return -ENODEV;
- }
if (!cpu_has_xcrypt_enabled) {
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
@@ -561,4 +559,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 06bdb4b..710f3cb 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michal Ludvig");
-MODULE_ALIAS("sha1-all");
-MODULE_ALIAS("sha256-all");
-MODULE_ALIAS("sha1-padlock");
-MODULE_ALIAS("sha256-padlock");
+MODULE_ALIAS_CRYPTO("sha1-all");
+MODULE_ALIAS_CRYPTO("sha256-all");
+MODULE_ALIAS_CRYPTO("sha1-padlock");
+MODULE_ALIAS_CRYPTO("sha256-padlock");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 230b5b8..a2b553e 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/list.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/rtnetlink.h>
@@ -1241,8 +1242,8 @@ static void spacc_spacc_complete(unsigned long data)
spin_unlock_irqrestore(&engine->hw_lock, flags);
list_for_each_entry_safe(req, tmp, &completed, list) {
- req->complete(req);
list_del(&req->list);
+ req->complete(req);
}
}
@@ -1657,10 +1658,33 @@ static struct spacc_alg l2_engine_algs[] = {
},
};
-static int __devinit spacc_probe(struct platform_device *pdev,
- unsigned max_ctxs, size_t cipher_pg_sz,
- size_t hash_pg_sz, size_t fifo_sz,
- struct spacc_alg *algs, size_t num_algs)
+#ifdef CONFIG_OF
+static const struct of_device_id spacc_of_id_table[] = {
+ { .compatible = "picochip,spacc-ipsec" },
+ { .compatible = "picochip,spacc-l2" },
+ {}
+};
+#else /* CONFIG_OF */
+#define spacc_of_id_table NULL
+#endif /* CONFIG_OF */
+
+static bool spacc_is_compatible(struct platform_device *pdev,
+ const char *spacc_type)
+{
+ const struct platform_device_id *platid = platform_get_device_id(pdev);
+
+ if (platid && !strcmp(platid->name, spacc_type))
+ return true;
+
+#ifdef CONFIG_OF
+ if (of_device_is_compatible(pdev->dev.of_node, spacc_type))
+ return true;
+#endif /* CONFIG_OF */
+
+ return false;
+}
+
+static int __devinit spacc_probe(struct platform_device *pdev)
{
int i, err, ret = -EINVAL;
struct resource *mem, *irq;
@@ -1669,13 +1693,25 @@ static int __devinit spacc_probe(struct platform_device *pdev,
if (!engine)
return -ENOMEM;
- engine->max_ctxs = max_ctxs;
- engine->cipher_pg_sz = cipher_pg_sz;
- engine->hash_pg_sz = hash_pg_sz;
- engine->fifo_sz = fifo_sz;
- engine->algs = algs;
- engine->num_algs = num_algs;
- engine->name = dev_name(&pdev->dev);
+ if (spacc_is_compatible(pdev, "picochip,spacc-ipsec")) {
+ engine->max_ctxs = SPACC_CRYPTO_IPSEC_MAX_CTXS;
+ engine->cipher_pg_sz = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
+ engine->hash_pg_sz = SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
+ engine->fifo_sz = SPACC_CRYPTO_IPSEC_FIFO_SZ;
+ engine->algs = ipsec_engine_algs;
+ engine->num_algs = ARRAY_SIZE(ipsec_engine_algs);
+ } else if (spacc_is_compatible(pdev, "picochip,spacc-l2")) {
+ engine->max_ctxs = SPACC_CRYPTO_L2_MAX_CTXS;
+ engine->cipher_pg_sz = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
+ engine->hash_pg_sz = SPACC_CRYPTO_L2_HASH_PG_SZ;
+ engine->fifo_sz = SPACC_CRYPTO_L2_FIFO_SZ;
+ engine->algs = l2_engine_algs;
+ engine->num_algs = ARRAY_SIZE(l2_engine_algs);
+ } else {
+ return -EINVAL;
+ }
+
+ engine->name = dev_name(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -1711,7 +1747,7 @@ static int __devinit spacc_probe(struct platform_device *pdev,
spin_lock_init(&engine->hw_lock);
- engine->clk = clk_get(&pdev->dev, NULL);
+ engine->clk = clk_get(&pdev->dev, "ref");
if (IS_ERR(engine->clk)) {
dev_info(&pdev->dev, "clk unavailable\n");
device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
@@ -1800,72 +1836,33 @@ static int __devexit spacc_remove(struct platform_device *pdev)
return 0;
}
-static int __devinit ipsec_probe(struct platform_device *pdev)
-{
- return spacc_probe(pdev, SPACC_CRYPTO_IPSEC_MAX_CTXS,
- SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ,
- SPACC_CRYPTO_IPSEC_HASH_PG_SZ,
- SPACC_CRYPTO_IPSEC_FIFO_SZ, ipsec_engine_algs,
- ARRAY_SIZE(ipsec_engine_algs));
-}
-
-static struct platform_driver ipsec_driver = {
- .probe = ipsec_probe,
- .remove = __devexit_p(spacc_remove),
- .driver = {
- .name = "picoxcell-ipsec",
-#ifdef CONFIG_PM
- .pm = &spacc_pm_ops,
-#endif /* CONFIG_PM */
- },
+static const struct platform_device_id spacc_id_table[] = {
+ { "picochip,spacc-ipsec", },
+ { "picochip,spacc-l2", },
};
-static int __devinit l2_probe(struct platform_device *pdev)
-{
- return spacc_probe(pdev, SPACC_CRYPTO_L2_MAX_CTXS,
- SPACC_CRYPTO_L2_CIPHER_PG_SZ,
- SPACC_CRYPTO_L2_HASH_PG_SZ, SPACC_CRYPTO_L2_FIFO_SZ,
- l2_engine_algs, ARRAY_SIZE(l2_engine_algs));
-}
-
-static struct platform_driver l2_driver = {
- .probe = l2_probe,
+static struct platform_driver spacc_driver = {
+ .probe = spacc_probe,
.remove = __devexit_p(spacc_remove),
.driver = {
- .name = "picoxcell-l2",
+ .name = "picochip,spacc",
#ifdef CONFIG_PM
.pm = &spacc_pm_ops,
#endif /* CONFIG_PM */
+ .of_match_table = spacc_of_id_table,
},
+ .id_table = spacc_id_table,
};
static int __init spacc_init(void)
{
- int ret = platform_driver_register(&ipsec_driver);
- if (ret) {
- pr_err("failed to register ipsec spacc driver");
- goto out;
- }
-
- ret = platform_driver_register(&l2_driver);
- if (ret) {
- pr_err("failed to register l2 spacc driver");
- goto l2_failed;
- }
-
- return 0;
-
-l2_failed:
- platform_driver_unregister(&ipsec_driver);
-out:
- return ret;
+ return platform_driver_register(&spacc_driver);
}
module_init(spacc_init);
static void __exit spacc_exit(void)
{
- platform_driver_unregister(&ipsec_driver);
- platform_driver_unregister(&l2_driver);
+ platform_driver_unregister(&spacc_driver);
}
module_exit(spacc_exit);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 854e263..90c76fc 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1,7 +1,7 @@
/*
* talitos - Freescale Integrated Security Engine (SEC) device driver
*
- * Copyright (c) 2008-2010 Freescale Semiconductor, Inc.
+ * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
*
* Scatterlist Crypto API glue code copied from files with the following:
* Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
@@ -282,6 +282,7 @@ static int init_device(struct device *dev)
/**
* talitos_submit - submits a descriptor to the device for processing
* @dev: the SEC device to be used
+ * @ch: the SEC device channel to be used
* @desc: the descriptor to be processed by the device
* @callback: whom to call when processing is complete
* @context: a handle for use by caller (optional)
@@ -290,7 +291,7 @@ static int init_device(struct device *dev)
* callback must check err and feedback in descriptor header
* for device processing status.
*/
-static int talitos_submit(struct device *dev, struct talitos_desc *desc,
+static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
void (*callback)(struct device *dev,
struct talitos_desc *desc,
void *context, int error),
@@ -298,15 +299,9 @@ static int talitos_submit(struct device *dev, struct talitos_desc *desc,
{
struct talitos_private *priv = dev_get_drvdata(dev);
struct talitos_request *request;
- unsigned long flags, ch;
+ unsigned long flags;
int head;
- /* select done notification */
- desc->hdr |= DESC_HDR_DONE_NOTIFY;
-
- /* emulate SEC's round-robin channel fifo polling scheme */
- ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);
-
spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
@@ -421,7 +416,7 @@ static void talitos_done(unsigned long data)
/*
* locate current (offending) descriptor
*/
-static struct talitos_desc *current_desc(struct device *dev, int ch)
+static u32 current_desc_hdr(struct device *dev, int ch)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int tail = priv->chan[ch].tail;
@@ -433,23 +428,25 @@ static struct talitos_desc *current_desc(struct device *dev, int ch)
tail = (tail + 1) & (priv->fifo_len - 1);
if (tail == priv->chan[ch].tail) {
dev_err(dev, "couldn't locate current descriptor\n");
- return NULL;
+ return 0;
}
}
- return priv->chan[ch].fifo[tail].desc;
+ return priv->chan[ch].fifo[tail].desc->hdr;
}
/*
* user diagnostics; report root cause of error based on execution unit status
*/
-static void report_eu_error(struct device *dev, int ch,
- struct talitos_desc *desc)
+static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
{
struct talitos_private *priv = dev_get_drvdata(dev);
int i;
- switch (desc->hdr & DESC_HDR_SEL0_MASK) {
+ if (!desc_hdr)
+ desc_hdr = in_be32(priv->reg + TALITOS_DESCBUF(ch));
+
+ switch (desc_hdr & DESC_HDR_SEL0_MASK) {
case DESC_HDR_SEL0_AFEU:
dev_err(dev, "AFEUISR 0x%08x_%08x\n",
in_be32(priv->reg + TALITOS_AFEUISR),
@@ -493,7 +490,7 @@ static void report_eu_error(struct device *dev, int ch,
break;
}
- switch (desc->hdr & DESC_HDR_SEL1_MASK) {
+ switch (desc_hdr & DESC_HDR_SEL1_MASK) {
case DESC_HDR_SEL1_MDEUA:
case DESC_HDR_SEL1_MDEUB:
dev_err(dev, "MDEUISR 0x%08x_%08x\n",
@@ -555,7 +552,7 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
if (v_lo & TALITOS_CCPSR_LO_IEU)
dev_err(dev, "invalid execution unit error\n");
if (v_lo & TALITOS_CCPSR_LO_EU)
- report_eu_error(dev, ch, current_desc(dev, ch));
+ report_eu_error(dev, ch, current_desc_hdr(dev, ch));
if (v_lo & TALITOS_CCPSR_LO_GB)
dev_err(dev, "gather boundary error\n");
if (v_lo & TALITOS_CCPSR_LO_GRL)
@@ -706,6 +703,7 @@ static void talitos_unregister_rng(struct device *dev)
struct talitos_ctx {
struct device *dev;
+ int ch;
__be32 desc_hdr_template;
u8 key[TALITOS_MAX_KEY_SIZE];
u8 iv[TALITOS_MAX_IV_LENGTH];
@@ -1117,7 +1115,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 0,
DMA_FROM_DEVICE);
- ret = talitos_submit(dev, desc, callback, areq);
+ ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
ipsec_esp_unmap(dev, edesc, areq);
kfree(edesc);
@@ -1382,22 +1380,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
const u8 *key, unsigned int keylen)
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
- struct ablkcipher_alg *alg = crypto_ablkcipher_alg(cipher);
-
- if (keylen > TALITOS_MAX_KEY_SIZE)
- goto badkey;
-
- if (keylen < alg->min_keysize || keylen > alg->max_keysize)
- goto badkey;
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;
return 0;
-
-badkey:
- crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
}
static void common_nonsnoop_unmap(struct device *dev,
@@ -1433,7 +1420,6 @@ static void ablkcipher_done(struct device *dev,
static int common_nonsnoop(struct talitos_edesc *edesc,
struct ablkcipher_request *areq,
- u8 *giv,
void (*callback) (struct device *dev,
struct talitos_desc *desc,
void *context, int error))
@@ -1453,7 +1439,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
/* cipher iv */
ivsize = crypto_ablkcipher_ivsize(cipher);
- map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, giv ?: areq->info, 0,
+ map_single_talitos_ptr(dev, &desc->ptr[1], ivsize, areq->info, 0,
DMA_TO_DEVICE);
/* cipher key */
@@ -1524,7 +1510,7 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
to_talitos_ptr(&desc->ptr[6], 0);
desc->ptr[6].j_extent = 0;
- ret = talitos_submit(dev, desc, callback, areq);
+ ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
common_nonsnoop_unmap(dev, edesc, areq);
kfree(edesc);
@@ -1556,7 +1542,7 @@ static int ablkcipher_encrypt(struct ablkcipher_request *areq)
/* set encrypt */
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
- return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+ return common_nonsnoop(edesc, areq, ablkcipher_done);
}
static int ablkcipher_decrypt(struct ablkcipher_request *areq)
@@ -1572,7 +1558,7 @@ static int ablkcipher_decrypt(struct ablkcipher_request *areq)
edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
- return common_nonsnoop(edesc, areq, NULL, ablkcipher_done);
+ return common_nonsnoop(edesc, areq, ablkcipher_done);
}
static void common_nonsnoop_hash_unmap(struct device *dev,
@@ -1703,7 +1689,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
/* last DWORD empty */
desc->ptr[6] = zero_entry;
- ret = talitos_submit(dev, desc, callback, areq);
+ ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
if (ret != -EINPROGRESS) {
common_nonsnoop_hash_unmap(dev, edesc, areq);
kfree(edesc);
@@ -2244,6 +2230,7 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
struct crypto_alg *alg = tfm->__crt_alg;
struct talitos_crypto_alg *talitos_alg;
struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct talitos_private *priv;
if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
talitos_alg = container_of(__crypto_ahash_alg(alg),
@@ -2256,9 +2243,17 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
+ /* assign SEC channel to tfm in round-robin fashion */
+ priv = dev_get_drvdata(ctx->dev);
+ ctx->ch = atomic_inc_return(&priv->last_chan) &
+ (priv->num_channels - 1);
+
/* copy descriptor header template value */
ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
+ /* select done notification */
+ ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
+
return 0;
}
@@ -2389,6 +2384,7 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
break;
default:
dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
+ kfree(t_alg);
return ERR_PTR(-EINVAL);
}