4 * Support for OMAP SHA1/MD5 HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
7 * Author: Dmitry Kasatkin <dmitry.kasatkin@nokia.com>
8 * Copyright (c) 2011 Texas Instruments Incorporated
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
14 * Some ideas are from old omap-sha1-md5.c driver.
17 #define pr_fmt(fmt) "%s: " fmt, __func__
19 #include <linux/err.h>
20 #include <linux/device.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/irq.h>
28 #include <linux/platform_device.h>
29 #include <linux/scatterlist.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmaengine.h>
32 #include <linux/pm_runtime.h>
34 #include <linux/of_device.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/delay.h>
38 #include <linux/crypto.h>
39 #include <linux/cryptohash.h>
40 #include <crypto/scatterwalk.h>
41 #include <crypto/algapi.h>
42 #include <crypto/sha.h>
43 #include <crypto/hash.h>
44 #include <crypto/hmac.h>
45 #include <crypto/internal/hash.h>
47 #define MD5_DIGEST_SIZE 16
49 #define SHA_REG_IDIGEST(dd, x) ((dd)->pdata->idigest_ofs + ((x)*0x04))
50 #define SHA_REG_DIN(dd, x) ((dd)->pdata->din_ofs + ((x) * 0x04))
51 #define SHA_REG_DIGCNT(dd) ((dd)->pdata->digcnt_ofs)
53 #define SHA_REG_ODIGEST(dd, x) ((dd)->pdata->odigest_ofs + (x * 0x04))
55 #define SHA_REG_CTRL 0x18
56 #define SHA_REG_CTRL_LENGTH (0xFFFFFFFF << 5)
57 #define SHA_REG_CTRL_CLOSE_HASH (1 << 4)
58 #define SHA_REG_CTRL_ALGO_CONST (1 << 3)
59 #define SHA_REG_CTRL_ALGO (1 << 2)
60 #define SHA_REG_CTRL_INPUT_READY (1 << 1)
61 #define SHA_REG_CTRL_OUTPUT_READY (1 << 0)
63 #define SHA_REG_REV(dd) ((dd)->pdata->rev_ofs)
65 #define SHA_REG_MASK(dd) ((dd)->pdata->mask_ofs)
66 #define SHA_REG_MASK_DMA_EN (1 << 3)
67 #define SHA_REG_MASK_IT_EN (1 << 2)
68 #define SHA_REG_MASK_SOFTRESET (1 << 1)
69 #define SHA_REG_AUTOIDLE (1 << 0)
71 #define SHA_REG_SYSSTATUS(dd) ((dd)->pdata->sysstatus_ofs)
72 #define SHA_REG_SYSSTATUS_RESETDONE (1 << 0)
74 #define SHA_REG_MODE(dd) ((dd)->pdata->mode_ofs)
75 #define SHA_REG_MODE_HMAC_OUTER_HASH (1 << 7)
76 #define SHA_REG_MODE_HMAC_KEY_PROC (1 << 5)
77 #define SHA_REG_MODE_CLOSE_HASH (1 << 4)
78 #define SHA_REG_MODE_ALGO_CONSTANT (1 << 3)
80 #define SHA_REG_MODE_ALGO_MASK (7 << 0)
81 #define SHA_REG_MODE_ALGO_MD5_128 (0 << 1)
82 #define SHA_REG_MODE_ALGO_SHA1_160 (1 << 1)
83 #define SHA_REG_MODE_ALGO_SHA2_224 (2 << 1)
84 #define SHA_REG_MODE_ALGO_SHA2_256 (3 << 1)
85 #define SHA_REG_MODE_ALGO_SHA2_384 (1 << 0)
86 #define SHA_REG_MODE_ALGO_SHA2_512 (3 << 0)
88 #define SHA_REG_LENGTH(dd) ((dd)->pdata->length_ofs)
90 #define SHA_REG_IRQSTATUS 0x118
91 #define SHA_REG_IRQSTATUS_CTX_RDY (1 << 3)
92 #define SHA_REG_IRQSTATUS_PARTHASH_RDY (1 << 2)
93 #define SHA_REG_IRQSTATUS_INPUT_RDY (1 << 1)
94 #define SHA_REG_IRQSTATUS_OUTPUT_RDY (1 << 0)
96 #define SHA_REG_IRQENA 0x11C
97 #define SHA_REG_IRQENA_CTX_RDY (1 << 3)
98 #define SHA_REG_IRQENA_PARTHASH_RDY (1 << 2)
99 #define SHA_REG_IRQENA_INPUT_RDY (1 << 1)
100 #define SHA_REG_IRQENA_OUTPUT_RDY (1 << 0)
102 #define DEFAULT_TIMEOUT_INTERVAL HZ
104 #define DEFAULT_AUTOSUSPEND_DELAY 1000
106 /* mostly device flags */
108 #define FLAGS_FINAL 1
109 #define FLAGS_DMA_ACTIVE 2
110 #define FLAGS_OUTPUT_READY 3
113 #define FLAGS_DMA_READY 6
114 #define FLAGS_AUTO_XOR 7
115 #define FLAGS_BE32_SHA1 8
116 #define FLAGS_SGS_COPIED 9
117 #define FLAGS_SGS_ALLOCED 10
119 #define FLAGS_FINUP 16
121 #define FLAGS_MODE_SHIFT 18
122 #define FLAGS_MODE_MASK (SHA_REG_MODE_ALGO_MASK << FLAGS_MODE_SHIFT)
123 #define FLAGS_MODE_MD5 (SHA_REG_MODE_ALGO_MD5_128 << FLAGS_MODE_SHIFT)
124 #define FLAGS_MODE_SHA1 (SHA_REG_MODE_ALGO_SHA1_160 << FLAGS_MODE_SHIFT)
125 #define FLAGS_MODE_SHA224 (SHA_REG_MODE_ALGO_SHA2_224 << FLAGS_MODE_SHIFT)
126 #define FLAGS_MODE_SHA256 (SHA_REG_MODE_ALGO_SHA2_256 << FLAGS_MODE_SHIFT)
127 #define FLAGS_MODE_SHA384 (SHA_REG_MODE_ALGO_SHA2_384 << FLAGS_MODE_SHIFT)
128 #define FLAGS_MODE_SHA512 (SHA_REG_MODE_ALGO_SHA2_512 << FLAGS_MODE_SHIFT)
130 #define FLAGS_HMAC 21
131 #define FLAGS_ERROR 22
136 #define OMAP_ALIGN_MASK (sizeof(u32)-1)
137 #define OMAP_ALIGNED __attribute__((aligned(sizeof(u32))))
139 #define BUFLEN SHA512_BLOCK_SIZE
140 #define OMAP_SHA_DMA_THRESHOLD 256
142 struct omap_sham_dev;
144 struct omap_sham_reqctx {
145 struct omap_sham_dev *dd;
149 u8 digest[SHA512_DIGEST_SIZE] OMAP_ALIGNED;
155 struct scatterlist *sg;
156 struct scatterlist sgl[2];
157 int offset; /* offset in current sg */
159 unsigned int total; /* total request */
161 u8 buffer[0] OMAP_ALIGNED;
164 struct omap_sham_hmac_ctx {
165 struct crypto_shash *shash;
166 u8 ipad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
167 u8 opad[SHA512_BLOCK_SIZE] OMAP_ALIGNED;
170 struct omap_sham_ctx {
171 struct omap_sham_dev *dd;
176 struct crypto_shash *fallback;
178 struct omap_sham_hmac_ctx base[0];
181 #define OMAP_SHAM_QUEUE_LENGTH 10
183 struct omap_sham_algs_info {
184 struct ahash_alg *algs_list;
186 unsigned int registered;
189 struct omap_sham_pdata {
190 struct omap_sham_algs_info *algs_info;
191 unsigned int algs_info_size;
195 void (*copy_hash)(struct ahash_request *req, int out);
196 void (*write_ctrl)(struct omap_sham_dev *dd, size_t length,
198 void (*trigger)(struct omap_sham_dev *dd, size_t length);
199 int (*poll_irq)(struct omap_sham_dev *dd);
200 irqreturn_t (*intr_hdlr)(int irq, void *dev_id);
218 struct omap_sham_dev {
219 struct list_head list;
220 unsigned long phys_base;
222 void __iomem *io_base;
226 struct dma_chan *dma_lch;
227 struct tasklet_struct done_task;
229 u8 xmit_buf[BUFLEN] OMAP_ALIGNED;
233 struct crypto_queue queue;
234 struct ahash_request *req;
236 const struct omap_sham_pdata *pdata;
239 struct omap_sham_drv {
240 struct list_head dev_list;
245 static struct omap_sham_drv sham = {
246 .dev_list = LIST_HEAD_INIT(sham.dev_list),
247 .lock = __SPIN_LOCK_UNLOCKED(sham.lock),
250 static inline u32 omap_sham_read(struct omap_sham_dev *dd, u32 offset)
252 return __raw_readl(dd->io_base + offset);
255 static inline void omap_sham_write(struct omap_sham_dev *dd,
256 u32 offset, u32 value)
258 __raw_writel(value, dd->io_base + offset);
261 static inline void omap_sham_write_mask(struct omap_sham_dev *dd, u32 address,
266 val = omap_sham_read(dd, address);
269 omap_sham_write(dd, address, val);
272 static inline int omap_sham_wait(struct omap_sham_dev *dd, u32 offset, u32 bit)
274 unsigned long timeout = jiffies + DEFAULT_TIMEOUT_INTERVAL;
276 while (!(omap_sham_read(dd, offset) & bit)) {
277 if (time_is_before_jiffies(timeout))
284 static void omap_sham_copy_hash_omap2(struct ahash_request *req, int out)
286 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
287 struct omap_sham_dev *dd = ctx->dd;
288 u32 *hash = (u32 *)ctx->digest;
291 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
293 hash[i] = omap_sham_read(dd, SHA_REG_IDIGEST(dd, i));
295 omap_sham_write(dd, SHA_REG_IDIGEST(dd, i), hash[i]);
299 static void omap_sham_copy_hash_omap4(struct ahash_request *req, int out)
301 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
302 struct omap_sham_dev *dd = ctx->dd;
305 if (ctx->flags & BIT(FLAGS_HMAC)) {
306 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
307 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
308 struct omap_sham_hmac_ctx *bctx = tctx->base;
309 u32 *opad = (u32 *)bctx->opad;
311 for (i = 0; i < dd->pdata->digest_size / sizeof(u32); i++) {
313 opad[i] = omap_sham_read(dd,
314 SHA_REG_ODIGEST(dd, i));
316 omap_sham_write(dd, SHA_REG_ODIGEST(dd, i),
321 omap_sham_copy_hash_omap2(req, out);
324 static void omap_sham_copy_ready_hash(struct ahash_request *req)
326 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
327 u32 *in = (u32 *)ctx->digest;
328 u32 *hash = (u32 *)req->result;
329 int i, d, big_endian = 0;
334 switch (ctx->flags & FLAGS_MODE_MASK) {
336 d = MD5_DIGEST_SIZE / sizeof(u32);
338 case FLAGS_MODE_SHA1:
339 /* OMAP2 SHA1 is big endian */
340 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags))
342 d = SHA1_DIGEST_SIZE / sizeof(u32);
344 case FLAGS_MODE_SHA224:
345 d = SHA224_DIGEST_SIZE / sizeof(u32);
347 case FLAGS_MODE_SHA256:
348 d = SHA256_DIGEST_SIZE / sizeof(u32);
350 case FLAGS_MODE_SHA384:
351 d = SHA384_DIGEST_SIZE / sizeof(u32);
353 case FLAGS_MODE_SHA512:
354 d = SHA512_DIGEST_SIZE / sizeof(u32);
361 for (i = 0; i < d; i++)
362 hash[i] = be32_to_cpu(in[i]);
364 for (i = 0; i < d; i++)
365 hash[i] = le32_to_cpu(in[i]);
368 static int omap_sham_hw_init(struct omap_sham_dev *dd)
372 err = pm_runtime_get_sync(dd->dev);
374 dev_err(dd->dev, "failed to get sync: %d\n", err);
378 if (!test_bit(FLAGS_INIT, &dd->flags)) {
379 set_bit(FLAGS_INIT, &dd->flags);
386 static void omap_sham_write_ctrl_omap2(struct omap_sham_dev *dd, size_t length,
389 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
390 u32 val = length << 5, mask;
392 if (likely(ctx->digcnt))
393 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt);
395 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
396 SHA_REG_MASK_IT_EN | (dma ? SHA_REG_MASK_DMA_EN : 0),
397 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
399 * Setting ALGO_CONST only for the first iteration
400 * and CLOSE_HASH only for the last one.
402 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1)
403 val |= SHA_REG_CTRL_ALGO;
405 val |= SHA_REG_CTRL_ALGO_CONST;
407 val |= SHA_REG_CTRL_CLOSE_HASH;
409 mask = SHA_REG_CTRL_ALGO_CONST | SHA_REG_CTRL_CLOSE_HASH |
410 SHA_REG_CTRL_ALGO | SHA_REG_CTRL_LENGTH;
412 omap_sham_write_mask(dd, SHA_REG_CTRL, val, mask);
415 static void omap_sham_trigger_omap2(struct omap_sham_dev *dd, size_t length)
419 static int omap_sham_poll_irq_omap2(struct omap_sham_dev *dd)
421 return omap_sham_wait(dd, SHA_REG_CTRL, SHA_REG_CTRL_INPUT_READY);
424 static int get_block_size(struct omap_sham_reqctx *ctx)
428 switch (ctx->flags & FLAGS_MODE_MASK) {
430 case FLAGS_MODE_SHA1:
433 case FLAGS_MODE_SHA224:
434 case FLAGS_MODE_SHA256:
435 d = SHA256_BLOCK_SIZE;
437 case FLAGS_MODE_SHA384:
438 case FLAGS_MODE_SHA512:
439 d = SHA512_BLOCK_SIZE;
448 static void omap_sham_write_n(struct omap_sham_dev *dd, u32 offset,
449 u32 *value, int count)
451 for (; count--; value++, offset += 4)
452 omap_sham_write(dd, offset, *value);
455 static void omap_sham_write_ctrl_omap4(struct omap_sham_dev *dd, size_t length,
458 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
462 * Setting ALGO_CONST only for the first iteration and
463 * CLOSE_HASH only for the last one. Note that flags mode bits
464 * correspond to algorithm encoding in mode register.
466 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT);
468 struct crypto_ahash *tfm = crypto_ahash_reqtfm(dd->req);
469 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
470 struct omap_sham_hmac_ctx *bctx = tctx->base;
473 val |= SHA_REG_MODE_ALGO_CONSTANT;
475 if (ctx->flags & BIT(FLAGS_HMAC)) {
476 bs = get_block_size(ctx);
477 nr_dr = bs / (2 * sizeof(u32));
478 val |= SHA_REG_MODE_HMAC_KEY_PROC;
479 omap_sham_write_n(dd, SHA_REG_ODIGEST(dd, 0),
480 (u32 *)bctx->ipad, nr_dr);
481 omap_sham_write_n(dd, SHA_REG_IDIGEST(dd, 0),
482 (u32 *)bctx->ipad + nr_dr, nr_dr);
488 val |= SHA_REG_MODE_CLOSE_HASH;
490 if (ctx->flags & BIT(FLAGS_HMAC))
491 val |= SHA_REG_MODE_HMAC_OUTER_HASH;
494 mask = SHA_REG_MODE_ALGO_CONSTANT | SHA_REG_MODE_CLOSE_HASH |
495 SHA_REG_MODE_ALGO_MASK | SHA_REG_MODE_HMAC_OUTER_HASH |
496 SHA_REG_MODE_HMAC_KEY_PROC;
498 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags);
499 omap_sham_write_mask(dd, SHA_REG_MODE(dd), val, mask);
500 omap_sham_write(dd, SHA_REG_IRQENA, SHA_REG_IRQENA_OUTPUT_RDY);
501 omap_sham_write_mask(dd, SHA_REG_MASK(dd),
503 (dma ? SHA_REG_MASK_DMA_EN : 0),
504 SHA_REG_MASK_IT_EN | SHA_REG_MASK_DMA_EN);
507 static void omap_sham_trigger_omap4(struct omap_sham_dev *dd, size_t length)
509 omap_sham_write(dd, SHA_REG_LENGTH(dd), length);
512 static int omap_sham_poll_irq_omap4(struct omap_sham_dev *dd)
514 return omap_sham_wait(dd, SHA_REG_IRQSTATUS,
515 SHA_REG_IRQSTATUS_INPUT_RDY);
518 static int omap_sham_xmit_cpu(struct omap_sham_dev *dd, size_t length,
521 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
522 int count, len32, bs32, offset = 0;
525 struct sg_mapping_iter mi;
527 dev_dbg(dd->dev, "xmit_cpu: digcnt: %d, length: %d, final: %d\n",
528 ctx->digcnt, length, final);
530 dd->pdata->write_ctrl(dd, length, final, 0);
531 dd->pdata->trigger(dd, length);
533 /* should be non-zero before next lines to disable clocks later */
534 ctx->digcnt += length;
535 ctx->total -= length;
538 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
540 set_bit(FLAGS_CPU, &dd->flags);
542 len32 = DIV_ROUND_UP(length, sizeof(u32));
543 bs32 = get_block_size(ctx) / sizeof(u32);
545 sg_miter_start(&mi, ctx->sg, ctx->sg_len,
546 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
551 if (dd->pdata->poll_irq(dd))
554 for (count = 0; count < min(len32, bs32); count++, offset++) {
559 pr_err("sg miter failure.\n");
565 omap_sham_write(dd, SHA_REG_DIN(dd, count),
569 len32 -= min(len32, bs32);
577 static void omap_sham_dma_callback(void *param)
579 struct omap_sham_dev *dd = param;
581 set_bit(FLAGS_DMA_READY, &dd->flags);
582 tasklet_schedule(&dd->done_task);
585 static int omap_sham_xmit_dma(struct omap_sham_dev *dd, size_t length,
588 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
589 struct dma_async_tx_descriptor *tx;
590 struct dma_slave_config cfg;
593 dev_dbg(dd->dev, "xmit_dma: digcnt: %d, length: %d, final: %d\n",
594 ctx->digcnt, length, final);
596 if (!dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE)) {
597 dev_err(dd->dev, "dma_map_sg error\n");
601 memset(&cfg, 0, sizeof(cfg));
603 cfg.dst_addr = dd->phys_base + SHA_REG_DIN(dd, 0);
604 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
605 cfg.dst_maxburst = get_block_size(ctx) / DMA_SLAVE_BUSWIDTH_4_BYTES;
607 ret = dmaengine_slave_config(dd->dma_lch, &cfg);
609 pr_err("omap-sham: can't configure dmaengine slave: %d\n", ret);
613 tx = dmaengine_prep_slave_sg(dd->dma_lch, ctx->sg, ctx->sg_len,
615 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
618 dev_err(dd->dev, "prep_slave_sg failed\n");
622 tx->callback = omap_sham_dma_callback;
623 tx->callback_param = dd;
625 dd->pdata->write_ctrl(dd, length, final, 1);
627 ctx->digcnt += length;
628 ctx->total -= length;
631 set_bit(FLAGS_FINAL, &dd->flags); /* catch last interrupt */
633 set_bit(FLAGS_DMA_ACTIVE, &dd->flags);
635 dmaengine_submit(tx);
636 dma_async_issue_pending(dd->dma_lch);
638 dd->pdata->trigger(dd, length);
643 static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx,
644 struct scatterlist *sg, int bs, int new_len)
646 int n = sg_nents(sg);
647 struct scatterlist *tmp;
648 int offset = ctx->offset;
653 ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
657 sg_init_table(ctx->sg, n);
664 sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
669 while (sg && new_len) {
670 int len = sg->length - offset;
673 offset -= sg->length;
683 sg_set_page(tmp, sg_page(sg), len, sg->offset);
693 set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags);
700 static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx,
701 struct scatterlist *sg, int bs, int new_len)
707 len = new_len + ctx->bufcnt;
709 pages = get_order(ctx->total);
711 buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
713 pr_err("Couldn't allocate pages for unaligned cases.\n");
718 memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
720 scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset,
721 ctx->total - ctx->bufcnt, 0);
722 sg_init_table(ctx->sgl, 1);
723 sg_set_buf(ctx->sgl, buf, len);
725 set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags);
733 static int omap_sham_align_sgs(struct scatterlist *sg,
734 int nbytes, int bs, bool final,
735 struct omap_sham_reqctx *rctx)
740 struct scatterlist *sg_tmp = sg;
742 int offset = rctx->offset;
744 if (!sg || !sg->length || !nbytes)
753 new_len = DIV_ROUND_UP(new_len, bs) * bs;
755 new_len = (new_len - 1) / bs * bs;
757 if (nbytes != new_len)
760 while (nbytes > 0 && sg_tmp) {
763 #ifdef CONFIG_ZONE_DMA
764 if (page_zonenum(sg_page(sg_tmp)) != ZONE_DMA) {
770 if (offset < sg_tmp->length) {
771 if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) {
776 if (!IS_ALIGNED(sg_tmp->length - offset, bs)) {
783 offset -= sg_tmp->length;
789 nbytes -= sg_tmp->length;
792 sg_tmp = sg_next(sg_tmp);
801 return omap_sham_copy_sgs(rctx, sg, bs, new_len);
803 return omap_sham_copy_sg_lists(rctx, sg, bs, new_len);
811 static int omap_sham_prepare_request(struct ahash_request *req, bool update)
813 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
817 bool final = rctx->flags & BIT(FLAGS_FINUP);
818 int xmit_len, hash_later;
820 bs = get_block_size(rctx);
823 nbytes = req->nbytes;
827 rctx->total = nbytes + rctx->bufcnt;
832 if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) {
833 int len = bs - rctx->bufcnt % bs;
837 scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src,
845 memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt);
847 ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx);
851 xmit_len = rctx->total;
853 if (!IS_ALIGNED(xmit_len, bs)) {
855 xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs;
857 xmit_len = xmit_len / bs * bs;
862 hash_later = rctx->total - xmit_len;
866 if (rctx->bufcnt && nbytes) {
867 /* have data from previous operation and current */
868 sg_init_table(rctx->sgl, 2);
869 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt);
871 sg_chain(rctx->sgl, 2, req->src);
873 rctx->sg = rctx->sgl;
876 } else if (rctx->bufcnt) {
877 /* have buffered data only */
878 sg_init_table(rctx->sgl, 1);
879 sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len);
881 rctx->sg = rctx->sgl;
889 if (hash_later > req->nbytes) {
890 memcpy(rctx->buffer, rctx->buffer + xmit_len,
891 hash_later - req->nbytes);
892 offset = hash_later - req->nbytes;
896 scatterwalk_map_and_copy(rctx->buffer + offset,
898 offset + req->nbytes -
899 hash_later, hash_later, 0);
902 rctx->bufcnt = hash_later;
908 rctx->total = xmit_len;
913 static int omap_sham_update_dma_stop(struct omap_sham_dev *dd)
915 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req);
917 dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
919 clear_bit(FLAGS_DMA_ACTIVE, &dd->flags);
924 static int omap_sham_init(struct ahash_request *req)
926 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
927 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
928 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
929 struct omap_sham_dev *dd = NULL, *tmp;
932 spin_lock_bh(&sham.lock);
934 list_for_each_entry(tmp, &sham.dev_list, list) {
942 spin_unlock_bh(&sham.lock);
948 dev_dbg(dd->dev, "init: digest size: %d\n",
949 crypto_ahash_digestsize(tfm));
951 switch (crypto_ahash_digestsize(tfm)) {
952 case MD5_DIGEST_SIZE:
953 ctx->flags |= FLAGS_MODE_MD5;
954 bs = SHA1_BLOCK_SIZE;
956 case SHA1_DIGEST_SIZE:
957 ctx->flags |= FLAGS_MODE_SHA1;
958 bs = SHA1_BLOCK_SIZE;
960 case SHA224_DIGEST_SIZE:
961 ctx->flags |= FLAGS_MODE_SHA224;
962 bs = SHA224_BLOCK_SIZE;
964 case SHA256_DIGEST_SIZE:
965 ctx->flags |= FLAGS_MODE_SHA256;
966 bs = SHA256_BLOCK_SIZE;
968 case SHA384_DIGEST_SIZE:
969 ctx->flags |= FLAGS_MODE_SHA384;
970 bs = SHA384_BLOCK_SIZE;
972 case SHA512_DIGEST_SIZE:
973 ctx->flags |= FLAGS_MODE_SHA512;
974 bs = SHA512_BLOCK_SIZE;
982 ctx->buflen = BUFLEN;
984 if (tctx->flags & BIT(FLAGS_HMAC)) {
985 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
986 struct omap_sham_hmac_ctx *bctx = tctx->base;
988 memcpy(ctx->buffer, bctx->ipad, bs);
992 ctx->flags |= BIT(FLAGS_HMAC);
999 static int omap_sham_update_req(struct omap_sham_dev *dd)
1001 struct ahash_request *req = dd->req;
1002 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1004 bool final = ctx->flags & BIT(FLAGS_FINUP);
1006 dev_dbg(dd->dev, "update_req: total: %u, digcnt: %d, finup: %d\n",
1007 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0);
1009 if (ctx->total < get_block_size(ctx) ||
1010 ctx->total < dd->fallback_sz)
1011 ctx->flags |= BIT(FLAGS_CPU);
1013 if (ctx->flags & BIT(FLAGS_CPU))
1014 err = omap_sham_xmit_cpu(dd, ctx->total, final);
1016 err = omap_sham_xmit_dma(dd, ctx->total, final);
1018 /* wait for dma completion before can take more data */
1019 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt);
1024 static int omap_sham_final_req(struct omap_sham_dev *dd)
1026 struct ahash_request *req = dd->req;
1027 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1028 int err = 0, use_dma = 1;
1030 if ((ctx->total <= get_block_size(ctx)) || dd->polling_mode)
1032 * faster to handle last block with cpu or
1033 * use cpu when dma is not present.
1038 err = omap_sham_xmit_dma(dd, ctx->total, 1);
1040 err = omap_sham_xmit_cpu(dd, ctx->total, 1);
1044 dev_dbg(dd->dev, "final_req: err: %d\n", err);
1049 static int omap_sham_finish_hmac(struct ahash_request *req)
1051 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1052 struct omap_sham_hmac_ctx *bctx = tctx->base;
1053 int bs = crypto_shash_blocksize(bctx->shash);
1054 int ds = crypto_shash_digestsize(bctx->shash);
1055 SHASH_DESC_ON_STACK(shash, bctx->shash);
1057 shash->tfm = bctx->shash;
1059 return crypto_shash_init(shash) ?:
1060 crypto_shash_update(shash, bctx->opad, bs) ?:
1061 crypto_shash_finup(shash, req->result, ds, req->result);
1064 static int omap_sham_finish(struct ahash_request *req)
1066 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1067 struct omap_sham_dev *dd = ctx->dd;
1071 omap_sham_copy_ready_hash(req);
1072 if ((ctx->flags & BIT(FLAGS_HMAC)) &&
1073 !test_bit(FLAGS_AUTO_XOR, &dd->flags))
1074 err = omap_sham_finish_hmac(req);
1077 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt);
1082 static void omap_sham_finish_req(struct ahash_request *req, int err)
1084 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1085 struct omap_sham_dev *dd = ctx->dd;
1087 if (test_bit(FLAGS_SGS_COPIED, &dd->flags))
1088 free_pages((unsigned long)sg_virt(ctx->sg),
1089 get_order(ctx->sg->length + ctx->bufcnt));
1091 if (test_bit(FLAGS_SGS_ALLOCED, &dd->flags))
1096 dd->flags &= ~(BIT(FLAGS_SGS_ALLOCED) | BIT(FLAGS_SGS_COPIED));
1099 dd->pdata->copy_hash(req, 1);
1100 if (test_bit(FLAGS_FINAL, &dd->flags))
1101 err = omap_sham_finish(req);
1103 ctx->flags |= BIT(FLAGS_ERROR);
1106 /* atomic operation is not needed here */
1107 dd->flags &= ~(BIT(FLAGS_BUSY) | BIT(FLAGS_FINAL) | BIT(FLAGS_CPU) |
1108 BIT(FLAGS_DMA_READY) | BIT(FLAGS_OUTPUT_READY));
1110 pm_runtime_mark_last_busy(dd->dev);
1111 pm_runtime_put_autosuspend(dd->dev);
1113 if (req->base.complete)
1114 req->base.complete(&req->base, err);
1117 static int omap_sham_handle_queue(struct omap_sham_dev *dd,
1118 struct ahash_request *req)
1120 struct crypto_async_request *async_req, *backlog;
1121 struct omap_sham_reqctx *ctx;
1122 unsigned long flags;
1123 int err = 0, ret = 0;
1126 spin_lock_irqsave(&dd->lock, flags);
1128 ret = ahash_enqueue_request(&dd->queue, req);
1129 if (test_bit(FLAGS_BUSY, &dd->flags)) {
1130 spin_unlock_irqrestore(&dd->lock, flags);
1133 backlog = crypto_get_backlog(&dd->queue);
1134 async_req = crypto_dequeue_request(&dd->queue);
1136 set_bit(FLAGS_BUSY, &dd->flags);
1137 spin_unlock_irqrestore(&dd->lock, flags);
1143 backlog->complete(backlog, -EINPROGRESS);
1145 req = ahash_request_cast(async_req);
1147 ctx = ahash_request_ctx(req);
1149 err = omap_sham_prepare_request(req, ctx->op == OP_UPDATE);
1150 if (err || !ctx->total)
1153 dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n",
1154 ctx->op, req->nbytes);
1156 err = omap_sham_hw_init(dd);
1161 /* request has changed - restore hash */
1162 dd->pdata->copy_hash(req, 0);
1164 if (ctx->op == OP_UPDATE) {
1165 err = omap_sham_update_req(dd);
1166 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP)))
1167 /* no final() after finup() */
1168 err = omap_sham_final_req(dd);
1169 } else if (ctx->op == OP_FINAL) {
1170 err = omap_sham_final_req(dd);
1173 dev_dbg(dd->dev, "exit, err: %d\n", err);
1175 if (err != -EINPROGRESS) {
1176 /* done_task will not finish it, so do it here */
1177 omap_sham_finish_req(req, err);
1181 * Execute next request immediately if there is anything
1190 static int omap_sham_enqueue(struct ahash_request *req, unsigned int op)
1192 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1193 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1194 struct omap_sham_dev *dd = tctx->dd;
1198 return omap_sham_handle_queue(dd, req);
1201 static int omap_sham_update(struct ahash_request *req)
1203 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1204 struct omap_sham_dev *dd = ctx->dd;
1209 if (ctx->bufcnt + req->nbytes <= ctx->buflen) {
1210 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1212 ctx->bufcnt += req->nbytes;
1216 if (dd->polling_mode)
1217 ctx->flags |= BIT(FLAGS_CPU);
1219 return omap_sham_enqueue(req, OP_UPDATE);
1222 static int omap_sham_shash_digest(struct crypto_shash *tfm, u32 flags,
1223 const u8 *data, unsigned int len, u8 *out)
1225 SHASH_DESC_ON_STACK(shash, tfm);
1229 return crypto_shash_digest(shash, data, len, out);
1232 static int omap_sham_final_shash(struct ahash_request *req)
1234 struct omap_sham_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1235 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1239 * If we are running HMAC on limited hardware support, skip
1240 * the ipad in the beginning of the buffer if we are going for
1241 * software fallback algorithm.
1243 if (test_bit(FLAGS_HMAC, &ctx->flags) &&
1244 !test_bit(FLAGS_AUTO_XOR, &ctx->dd->flags))
1245 offset = get_block_size(ctx);
1247 return omap_sham_shash_digest(tctx->fallback, req->base.flags,
1248 ctx->buffer + offset,
1249 ctx->bufcnt - offset, req->result);
1252 static int omap_sham_final(struct ahash_request *req)
1254 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1256 ctx->flags |= BIT(FLAGS_FINUP);
1258 if (ctx->flags & BIT(FLAGS_ERROR))
1259 return 0; /* uncompleted hash is not needed */
1262 * OMAP HW accel works only with buffers >= 9.
1263 * HMAC is always >= 9 because ipad == block size.
1264 * If buffersize is less than fallback_sz, we use fallback
1265 * SW encoding, as using DMA + HW in this case doesn't provide
1268 if (!ctx->digcnt && ctx->bufcnt < ctx->dd->fallback_sz)
1269 return omap_sham_final_shash(req);
1270 else if (ctx->bufcnt)
1271 return omap_sham_enqueue(req, OP_FINAL);
1273 /* copy ready hash (+ finalize hmac) */
1274 return omap_sham_finish(req);
1277 static int omap_sham_finup(struct ahash_request *req)
1279 struct omap_sham_reqctx *ctx = ahash_request_ctx(req);
1282 ctx->flags |= BIT(FLAGS_FINUP);
1284 err1 = omap_sham_update(req);
1285 if (err1 == -EINPROGRESS || err1 == -EBUSY)
1288 * final() has to be always called to cleanup resources
1289 * even if udpate() failed, except EINPROGRESS
1291 err2 = omap_sham_final(req);
1293 return err1 ?: err2;
1296 static int omap_sham_digest(struct ahash_request *req)
1298 return omap_sham_init(req) ?: omap_sham_finup(req);
1301 static int omap_sham_setkey(struct crypto_ahash *tfm, const u8 *key,
1302 unsigned int keylen)
1304 struct omap_sham_ctx *tctx = crypto_ahash_ctx(tfm);
1305 struct omap_sham_hmac_ctx *bctx = tctx->base;
1306 int bs = crypto_shash_blocksize(bctx->shash);
1307 int ds = crypto_shash_digestsize(bctx->shash);
1308 struct omap_sham_dev *dd = NULL, *tmp;
1311 spin_lock_bh(&sham.lock);
1313 list_for_each_entry(tmp, &sham.dev_list, list) {
1321 spin_unlock_bh(&sham.lock);
1323 err = crypto_shash_setkey(tctx->fallback, key, keylen);
1328 err = omap_sham_shash_digest(bctx->shash,
1329 crypto_shash_get_flags(bctx->shash),
1330 key, keylen, bctx->ipad);
1335 memcpy(bctx->ipad, key, keylen);
1338 memset(bctx->ipad + keylen, 0, bs - keylen);
1340 if (!test_bit(FLAGS_AUTO_XOR, &dd->flags)) {
1341 memcpy(bctx->opad, bctx->ipad, bs);
1343 for (i = 0; i < bs; i++) {
1344 bctx->ipad[i] ^= HMAC_IPAD_VALUE;
1345 bctx->opad[i] ^= HMAC_OPAD_VALUE;
1352 static int omap_sham_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
1354 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1355 const char *alg_name = crypto_tfm_alg_name(tfm);
1357 /* Allocate a fallback and abort if it failed. */
1358 tctx->fallback = crypto_alloc_shash(alg_name, 0,
1359 CRYPTO_ALG_NEED_FALLBACK);
1360 if (IS_ERR(tctx->fallback)) {
1361 pr_err("omap-sham: fallback driver '%s' "
1362 "could not be loaded.\n", alg_name);
1363 return PTR_ERR(tctx->fallback);
1366 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1367 sizeof(struct omap_sham_reqctx) + BUFLEN);
1370 struct omap_sham_hmac_ctx *bctx = tctx->base;
1371 tctx->flags |= BIT(FLAGS_HMAC);
1372 bctx->shash = crypto_alloc_shash(alg_base, 0,
1373 CRYPTO_ALG_NEED_FALLBACK);
1374 if (IS_ERR(bctx->shash)) {
1375 pr_err("omap-sham: base driver '%s' "
1376 "could not be loaded.\n", alg_base);
1377 crypto_free_shash(tctx->fallback);
1378 return PTR_ERR(bctx->shash);
1386 static int omap_sham_cra_init(struct crypto_tfm *tfm)
1388 return omap_sham_cra_init_alg(tfm, NULL);
1391 static int omap_sham_cra_sha1_init(struct crypto_tfm *tfm)
1393 return omap_sham_cra_init_alg(tfm, "sha1");
1396 static int omap_sham_cra_sha224_init(struct crypto_tfm *tfm)
1398 return omap_sham_cra_init_alg(tfm, "sha224");
1401 static int omap_sham_cra_sha256_init(struct crypto_tfm *tfm)
1403 return omap_sham_cra_init_alg(tfm, "sha256");
1406 static int omap_sham_cra_md5_init(struct crypto_tfm *tfm)
1408 return omap_sham_cra_init_alg(tfm, "md5");
1411 static int omap_sham_cra_sha384_init(struct crypto_tfm *tfm)
1413 return omap_sham_cra_init_alg(tfm, "sha384");
1416 static int omap_sham_cra_sha512_init(struct crypto_tfm *tfm)
1418 return omap_sham_cra_init_alg(tfm, "sha512");
1421 static void omap_sham_cra_exit(struct crypto_tfm *tfm)
1423 struct omap_sham_ctx *tctx = crypto_tfm_ctx(tfm);
1425 crypto_free_shash(tctx->fallback);
1426 tctx->fallback = NULL;
1428 if (tctx->flags & BIT(FLAGS_HMAC)) {
1429 struct omap_sham_hmac_ctx *bctx = tctx->base;
1430 crypto_free_shash(bctx->shash);
1434 static int omap_sham_export(struct ahash_request *req, void *out)
1436 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1438 memcpy(out, rctx, sizeof(*rctx) + rctx->bufcnt);
1443 static int omap_sham_import(struct ahash_request *req, const void *in)
1445 struct omap_sham_reqctx *rctx = ahash_request_ctx(req);
1446 const struct omap_sham_reqctx *ctx_in = in;
1448 memcpy(rctx, in, sizeof(*rctx) + ctx_in->bufcnt);
1453 static struct ahash_alg algs_sha1_md5[] = {
1455 .init = omap_sham_init,
1456 .update = omap_sham_update,
1457 .final = omap_sham_final,
1458 .finup = omap_sham_finup,
1459 .digest = omap_sham_digest,
1460 .halg.digestsize = SHA1_DIGEST_SIZE,
1463 .cra_driver_name = "omap-sha1",
1464 .cra_priority = 400,
1465 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1467 CRYPTO_ALG_NEED_FALLBACK,
1468 .cra_blocksize = SHA1_BLOCK_SIZE,
1469 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1470 .cra_alignmask = OMAP_ALIGN_MASK,
1471 .cra_module = THIS_MODULE,
1472 .cra_init = omap_sham_cra_init,
1473 .cra_exit = omap_sham_cra_exit,
1477 .init = omap_sham_init,
1478 .update = omap_sham_update,
1479 .final = omap_sham_final,
1480 .finup = omap_sham_finup,
1481 .digest = omap_sham_digest,
1482 .halg.digestsize = MD5_DIGEST_SIZE,
1485 .cra_driver_name = "omap-md5",
1486 .cra_priority = 400,
1487 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1489 CRYPTO_ALG_NEED_FALLBACK,
1490 .cra_blocksize = SHA1_BLOCK_SIZE,
1491 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1492 .cra_alignmask = OMAP_ALIGN_MASK,
1493 .cra_module = THIS_MODULE,
1494 .cra_init = omap_sham_cra_init,
1495 .cra_exit = omap_sham_cra_exit,
1499 .init = omap_sham_init,
1500 .update = omap_sham_update,
1501 .final = omap_sham_final,
1502 .finup = omap_sham_finup,
1503 .digest = omap_sham_digest,
1504 .setkey = omap_sham_setkey,
1505 .halg.digestsize = SHA1_DIGEST_SIZE,
1507 .cra_name = "hmac(sha1)",
1508 .cra_driver_name = "omap-hmac-sha1",
1509 .cra_priority = 400,
1510 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1512 CRYPTO_ALG_NEED_FALLBACK,
1513 .cra_blocksize = SHA1_BLOCK_SIZE,
1514 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1515 sizeof(struct omap_sham_hmac_ctx),
1516 .cra_alignmask = OMAP_ALIGN_MASK,
1517 .cra_module = THIS_MODULE,
1518 .cra_init = omap_sham_cra_sha1_init,
1519 .cra_exit = omap_sham_cra_exit,
1523 .init = omap_sham_init,
1524 .update = omap_sham_update,
1525 .final = omap_sham_final,
1526 .finup = omap_sham_finup,
1527 .digest = omap_sham_digest,
1528 .setkey = omap_sham_setkey,
1529 .halg.digestsize = MD5_DIGEST_SIZE,
1531 .cra_name = "hmac(md5)",
1532 .cra_driver_name = "omap-hmac-md5",
1533 .cra_priority = 400,
1534 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1536 CRYPTO_ALG_NEED_FALLBACK,
1537 .cra_blocksize = SHA1_BLOCK_SIZE,
1538 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1539 sizeof(struct omap_sham_hmac_ctx),
1540 .cra_alignmask = OMAP_ALIGN_MASK,
1541 .cra_module = THIS_MODULE,
1542 .cra_init = omap_sham_cra_md5_init,
1543 .cra_exit = omap_sham_cra_exit,
1548 /* OMAP4 has some algs in addition to what OMAP2 has */
1549 static struct ahash_alg algs_sha224_sha256[] = {
1551 .init = omap_sham_init,
1552 .update = omap_sham_update,
1553 .final = omap_sham_final,
1554 .finup = omap_sham_finup,
1555 .digest = omap_sham_digest,
1556 .halg.digestsize = SHA224_DIGEST_SIZE,
1558 .cra_name = "sha224",
1559 .cra_driver_name = "omap-sha224",
1560 .cra_priority = 400,
1561 .cra_flags = CRYPTO_ALG_ASYNC |
1562 CRYPTO_ALG_NEED_FALLBACK,
1563 .cra_blocksize = SHA224_BLOCK_SIZE,
1564 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1565 .cra_alignmask = OMAP_ALIGN_MASK,
1566 .cra_module = THIS_MODULE,
1567 .cra_init = omap_sham_cra_init,
1568 .cra_exit = omap_sham_cra_exit,
1572 .init = omap_sham_init,
1573 .update = omap_sham_update,
1574 .final = omap_sham_final,
1575 .finup = omap_sham_finup,
1576 .digest = omap_sham_digest,
1577 .halg.digestsize = SHA256_DIGEST_SIZE,
1579 .cra_name = "sha256",
1580 .cra_driver_name = "omap-sha256",
1581 .cra_priority = 400,
1582 .cra_flags = CRYPTO_ALG_ASYNC |
1583 CRYPTO_ALG_NEED_FALLBACK,
1584 .cra_blocksize = SHA256_BLOCK_SIZE,
1585 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1586 .cra_alignmask = OMAP_ALIGN_MASK,
1587 .cra_module = THIS_MODULE,
1588 .cra_init = omap_sham_cra_init,
1589 .cra_exit = omap_sham_cra_exit,
1593 .init = omap_sham_init,
1594 .update = omap_sham_update,
1595 .final = omap_sham_final,
1596 .finup = omap_sham_finup,
1597 .digest = omap_sham_digest,
1598 .setkey = omap_sham_setkey,
1599 .halg.digestsize = SHA224_DIGEST_SIZE,
1601 .cra_name = "hmac(sha224)",
1602 .cra_driver_name = "omap-hmac-sha224",
1603 .cra_priority = 400,
1604 .cra_flags = CRYPTO_ALG_ASYNC |
1605 CRYPTO_ALG_NEED_FALLBACK,
1606 .cra_blocksize = SHA224_BLOCK_SIZE,
1607 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1608 sizeof(struct omap_sham_hmac_ctx),
1609 .cra_alignmask = OMAP_ALIGN_MASK,
1610 .cra_module = THIS_MODULE,
1611 .cra_init = omap_sham_cra_sha224_init,
1612 .cra_exit = omap_sham_cra_exit,
1616 .init = omap_sham_init,
1617 .update = omap_sham_update,
1618 .final = omap_sham_final,
1619 .finup = omap_sham_finup,
1620 .digest = omap_sham_digest,
1621 .setkey = omap_sham_setkey,
1622 .halg.digestsize = SHA256_DIGEST_SIZE,
1624 .cra_name = "hmac(sha256)",
1625 .cra_driver_name = "omap-hmac-sha256",
1626 .cra_priority = 400,
1627 .cra_flags = CRYPTO_ALG_ASYNC |
1628 CRYPTO_ALG_NEED_FALLBACK,
1629 .cra_blocksize = SHA256_BLOCK_SIZE,
1630 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1631 sizeof(struct omap_sham_hmac_ctx),
1632 .cra_alignmask = OMAP_ALIGN_MASK,
1633 .cra_module = THIS_MODULE,
1634 .cra_init = omap_sham_cra_sha256_init,
1635 .cra_exit = omap_sham_cra_exit,
1640 static struct ahash_alg algs_sha384_sha512[] = {
1642 .init = omap_sham_init,
1643 .update = omap_sham_update,
1644 .final = omap_sham_final,
1645 .finup = omap_sham_finup,
1646 .digest = omap_sham_digest,
1647 .halg.digestsize = SHA384_DIGEST_SIZE,
1649 .cra_name = "sha384",
1650 .cra_driver_name = "omap-sha384",
1651 .cra_priority = 400,
1652 .cra_flags = CRYPTO_ALG_ASYNC |
1653 CRYPTO_ALG_NEED_FALLBACK,
1654 .cra_blocksize = SHA384_BLOCK_SIZE,
1655 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1656 .cra_alignmask = OMAP_ALIGN_MASK,
1657 .cra_module = THIS_MODULE,
1658 .cra_init = omap_sham_cra_init,
1659 .cra_exit = omap_sham_cra_exit,
1663 .init = omap_sham_init,
1664 .update = omap_sham_update,
1665 .final = omap_sham_final,
1666 .finup = omap_sham_finup,
1667 .digest = omap_sham_digest,
1668 .halg.digestsize = SHA512_DIGEST_SIZE,
1670 .cra_name = "sha512",
1671 .cra_driver_name = "omap-sha512",
1672 .cra_priority = 400,
1673 .cra_flags = CRYPTO_ALG_ASYNC |
1674 CRYPTO_ALG_NEED_FALLBACK,
1675 .cra_blocksize = SHA512_BLOCK_SIZE,
1676 .cra_ctxsize = sizeof(struct omap_sham_ctx),
1677 .cra_alignmask = OMAP_ALIGN_MASK,
1678 .cra_module = THIS_MODULE,
1679 .cra_init = omap_sham_cra_init,
1680 .cra_exit = omap_sham_cra_exit,
1684 .init = omap_sham_init,
1685 .update = omap_sham_update,
1686 .final = omap_sham_final,
1687 .finup = omap_sham_finup,
1688 .digest = omap_sham_digest,
1689 .setkey = omap_sham_setkey,
1690 .halg.digestsize = SHA384_DIGEST_SIZE,
1692 .cra_name = "hmac(sha384)",
1693 .cra_driver_name = "omap-hmac-sha384",
1694 .cra_priority = 400,
1695 .cra_flags = CRYPTO_ALG_ASYNC |
1696 CRYPTO_ALG_NEED_FALLBACK,
1697 .cra_blocksize = SHA384_BLOCK_SIZE,
1698 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1699 sizeof(struct omap_sham_hmac_ctx),
1700 .cra_alignmask = OMAP_ALIGN_MASK,
1701 .cra_module = THIS_MODULE,
1702 .cra_init = omap_sham_cra_sha384_init,
1703 .cra_exit = omap_sham_cra_exit,
1707 .init = omap_sham_init,
1708 .update = omap_sham_update,
1709 .final = omap_sham_final,
1710 .finup = omap_sham_finup,
1711 .digest = omap_sham_digest,
1712 .setkey = omap_sham_setkey,
1713 .halg.digestsize = SHA512_DIGEST_SIZE,
1715 .cra_name = "hmac(sha512)",
1716 .cra_driver_name = "omap-hmac-sha512",
1717 .cra_priority = 400,
1718 .cra_flags = CRYPTO_ALG_ASYNC |
1719 CRYPTO_ALG_NEED_FALLBACK,
1720 .cra_blocksize = SHA512_BLOCK_SIZE,
1721 .cra_ctxsize = sizeof(struct omap_sham_ctx) +
1722 sizeof(struct omap_sham_hmac_ctx),
1723 .cra_alignmask = OMAP_ALIGN_MASK,
1724 .cra_module = THIS_MODULE,
1725 .cra_init = omap_sham_cra_sha512_init,
1726 .cra_exit = omap_sham_cra_exit,
1731 static void omap_sham_done_task(unsigned long data)
1733 struct omap_sham_dev *dd = (struct omap_sham_dev *)data;
1736 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1737 omap_sham_handle_queue(dd, NULL);
1741 if (test_bit(FLAGS_CPU, &dd->flags)) {
1742 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
1744 } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
1745 if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
1746 omap_sham_update_dma_stop(dd);
1752 if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags)) {
1753 /* hash or semi-hash ready */
1754 clear_bit(FLAGS_DMA_READY, &dd->flags);
1762 dev_dbg(dd->dev, "update done: err: %d\n", err);
1763 /* finish curent request */
1764 omap_sham_finish_req(dd->req, err);
1766 /* If we are not busy, process next req */
1767 if (!test_bit(FLAGS_BUSY, &dd->flags))
1768 omap_sham_handle_queue(dd, NULL);
1771 static irqreturn_t omap_sham_irq_common(struct omap_sham_dev *dd)
1773 if (!test_bit(FLAGS_BUSY, &dd->flags)) {
1774 dev_warn(dd->dev, "Interrupt when no active requests.\n");
1776 set_bit(FLAGS_OUTPUT_READY, &dd->flags);
1777 tasklet_schedule(&dd->done_task);
1783 static irqreturn_t omap_sham_irq_omap2(int irq, void *dev_id)
1785 struct omap_sham_dev *dd = dev_id;
1787 if (unlikely(test_bit(FLAGS_FINAL, &dd->flags)))
1788 /* final -> allow device to go to power-saving mode */
1789 omap_sham_write_mask(dd, SHA_REG_CTRL, 0, SHA_REG_CTRL_LENGTH);
1791 omap_sham_write_mask(dd, SHA_REG_CTRL, SHA_REG_CTRL_OUTPUT_READY,
1792 SHA_REG_CTRL_OUTPUT_READY);
1793 omap_sham_read(dd, SHA_REG_CTRL);
1795 return omap_sham_irq_common(dd);
1798 static irqreturn_t omap_sham_irq_omap4(int irq, void *dev_id)
1800 struct omap_sham_dev *dd = dev_id;
1802 omap_sham_write_mask(dd, SHA_REG_MASK(dd), 0, SHA_REG_MASK_IT_EN);
1804 return omap_sham_irq_common(dd);
1807 static struct omap_sham_algs_info omap_sham_algs_info_omap2[] = {
1809 .algs_list = algs_sha1_md5,
1810 .size = ARRAY_SIZE(algs_sha1_md5),
1814 static const struct omap_sham_pdata omap_sham_pdata_omap2 = {
1815 .algs_info = omap_sham_algs_info_omap2,
1816 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap2),
1817 .flags = BIT(FLAGS_BE32_SHA1),
1818 .digest_size = SHA1_DIGEST_SIZE,
1819 .copy_hash = omap_sham_copy_hash_omap2,
1820 .write_ctrl = omap_sham_write_ctrl_omap2,
1821 .trigger = omap_sham_trigger_omap2,
1822 .poll_irq = omap_sham_poll_irq_omap2,
1823 .intr_hdlr = omap_sham_irq_omap2,
1824 .idigest_ofs = 0x00,
1829 .sysstatus_ofs = 0x64,
1837 static struct omap_sham_algs_info omap_sham_algs_info_omap4[] = {
1839 .algs_list = algs_sha1_md5,
1840 .size = ARRAY_SIZE(algs_sha1_md5),
1843 .algs_list = algs_sha224_sha256,
1844 .size = ARRAY_SIZE(algs_sha224_sha256),
1848 static const struct omap_sham_pdata omap_sham_pdata_omap4 = {
1849 .algs_info = omap_sham_algs_info_omap4,
1850 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap4),
1851 .flags = BIT(FLAGS_AUTO_XOR),
1852 .digest_size = SHA256_DIGEST_SIZE,
1853 .copy_hash = omap_sham_copy_hash_omap4,
1854 .write_ctrl = omap_sham_write_ctrl_omap4,
1855 .trigger = omap_sham_trigger_omap4,
1856 .poll_irq = omap_sham_poll_irq_omap4,
1857 .intr_hdlr = omap_sham_irq_omap4,
1858 .idigest_ofs = 0x020,
1861 .digcnt_ofs = 0x040,
1864 .sysstatus_ofs = 0x114,
1867 .major_mask = 0x0700,
1869 .minor_mask = 0x003f,
1873 static struct omap_sham_algs_info omap_sham_algs_info_omap5[] = {
1875 .algs_list = algs_sha1_md5,
1876 .size = ARRAY_SIZE(algs_sha1_md5),
1879 .algs_list = algs_sha224_sha256,
1880 .size = ARRAY_SIZE(algs_sha224_sha256),
1883 .algs_list = algs_sha384_sha512,
1884 .size = ARRAY_SIZE(algs_sha384_sha512),
1888 static const struct omap_sham_pdata omap_sham_pdata_omap5 = {
1889 .algs_info = omap_sham_algs_info_omap5,
1890 .algs_info_size = ARRAY_SIZE(omap_sham_algs_info_omap5),
1891 .flags = BIT(FLAGS_AUTO_XOR),
1892 .digest_size = SHA512_DIGEST_SIZE,
1893 .copy_hash = omap_sham_copy_hash_omap4,
1894 .write_ctrl = omap_sham_write_ctrl_omap4,
1895 .trigger = omap_sham_trigger_omap4,
1896 .poll_irq = omap_sham_poll_irq_omap4,
1897 .intr_hdlr = omap_sham_irq_omap4,
1898 .idigest_ofs = 0x240,
1899 .odigest_ofs = 0x200,
1901 .digcnt_ofs = 0x280,
1904 .sysstatus_ofs = 0x114,
1906 .length_ofs = 0x288,
1907 .major_mask = 0x0700,
1909 .minor_mask = 0x003f,
1913 static const struct of_device_id omap_sham_of_match[] = {
1915 .compatible = "ti,omap2-sham",
1916 .data = &omap_sham_pdata_omap2,
1919 .compatible = "ti,omap3-sham",
1920 .data = &omap_sham_pdata_omap2,
1923 .compatible = "ti,omap4-sham",
1924 .data = &omap_sham_pdata_omap4,
1927 .compatible = "ti,omap5-sham",
1928 .data = &omap_sham_pdata_omap5,
1932 MODULE_DEVICE_TABLE(of, omap_sham_of_match);
1934 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1935 struct device *dev, struct resource *res)
1937 struct device_node *node = dev->of_node;
1940 dd->pdata = of_device_get_match_data(dev);
1942 dev_err(dev, "no compatible OF match\n");
1947 err = of_address_to_resource(node, 0, res);
1949 dev_err(dev, "can't translate OF node address\n");
1954 dd->irq = irq_of_parse_and_map(node, 0);
1956 dev_err(dev, "can't translate OF irq value\n");
1965 static const struct of_device_id omap_sham_of_match[] = {
1969 static int omap_sham_get_res_of(struct omap_sham_dev *dd,
1970 struct device *dev, struct resource *res)
1976 static int omap_sham_get_res_pdev(struct omap_sham_dev *dd,
1977 struct platform_device *pdev, struct resource *res)
1979 struct device *dev = &pdev->dev;
1983 /* Get the base address */
1984 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1986 dev_err(dev, "no MEM resource info\n");
1990 memcpy(res, r, sizeof(*res));
1993 dd->irq = platform_get_irq(pdev, 0);
1995 dev_err(dev, "no IRQ resource info\n");
2000 /* Only OMAP2/3 can be non-DT */
2001 dd->pdata = &omap_sham_pdata_omap2;
2007 static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
2010 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2012 return sprintf(buf, "%d\n", dd->fallback_sz);
2015 static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
2016 const char *buf, size_t size)
2018 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2022 status = kstrtol(buf, 0, &value);
2026 /* HW accelerator only works with buffers > 9 */
2028 dev_err(dev, "minimum fallback size 9\n");
2032 dd->fallback_sz = value;
2037 static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
2040 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2042 return sprintf(buf, "%d\n", dd->queue.max_qlen);
2045 static ssize_t queue_len_store(struct device *dev,
2046 struct device_attribute *attr, const char *buf,
2049 struct omap_sham_dev *dd = dev_get_drvdata(dev);
2052 unsigned long flags;
2054 status = kstrtol(buf, 0, &value);
2062 * Changing the queue size in fly is safe, if size becomes smaller
2063 * than current size, it will just not accept new entries until
2064 * it has shrank enough.
2066 spin_lock_irqsave(&dd->lock, flags);
2067 dd->queue.max_qlen = value;
2068 spin_unlock_irqrestore(&dd->lock, flags);
2073 static DEVICE_ATTR_RW(queue_len);
2074 static DEVICE_ATTR_RW(fallback);
2076 static struct attribute *omap_sham_attrs[] = {
2077 &dev_attr_queue_len.attr,
2078 &dev_attr_fallback.attr,
2082 static struct attribute_group omap_sham_attr_group = {
2083 .attrs = omap_sham_attrs,
2086 static int omap_sham_probe(struct platform_device *pdev)
2088 struct omap_sham_dev *dd;
2089 struct device *dev = &pdev->dev;
2090 struct resource res;
2091 dma_cap_mask_t mask;
2095 dd = devm_kzalloc(dev, sizeof(struct omap_sham_dev), GFP_KERNEL);
2097 dev_err(dev, "unable to alloc data struct.\n");
2102 platform_set_drvdata(pdev, dd);
2104 INIT_LIST_HEAD(&dd->list);
2105 spin_lock_init(&dd->lock);
2106 tasklet_init(&dd->done_task, omap_sham_done_task, (unsigned long)dd);
2107 crypto_init_queue(&dd->queue, OMAP_SHAM_QUEUE_LENGTH);
2109 err = (dev->of_node) ? omap_sham_get_res_of(dd, dev, &res) :
2110 omap_sham_get_res_pdev(dd, pdev, &res);
2114 dd->io_base = devm_ioremap_resource(dev, &res);
2115 if (IS_ERR(dd->io_base)) {
2116 err = PTR_ERR(dd->io_base);
2119 dd->phys_base = res.start;
2121 err = devm_request_irq(dev, dd->irq, dd->pdata->intr_hdlr,
2122 IRQF_TRIGGER_NONE, dev_name(dev), dd);
2124 dev_err(dev, "unable to request irq %d, err = %d\n",
2130 dma_cap_set(DMA_SLAVE, mask);
2132 dd->dma_lch = dma_request_chan(dev, "rx");
2133 if (IS_ERR(dd->dma_lch)) {
2134 err = PTR_ERR(dd->dma_lch);
2135 if (err == -EPROBE_DEFER)
2138 dd->polling_mode = 1;
2139 dev_dbg(dev, "using polling mode instead of dma\n");
2142 dd->flags |= dd->pdata->flags;
2144 pm_runtime_use_autosuspend(dev);
2145 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
2147 dd->fallback_sz = OMAP_SHA_DMA_THRESHOLD;
2149 pm_runtime_enable(dev);
2150 pm_runtime_irq_safe(dev);
2152 err = pm_runtime_get_sync(dev);
2154 dev_err(dev, "failed to get sync: %d\n", err);
2158 rev = omap_sham_read(dd, SHA_REG_REV(dd));
2159 pm_runtime_put_sync(&pdev->dev);
2161 dev_info(dev, "hw accel on OMAP rev %u.%u\n",
2162 (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2163 (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2165 spin_lock(&sham.lock);
2166 list_add_tail(&dd->list, &sham.dev_list);
2167 spin_unlock(&sham.lock);
2169 for (i = 0; i < dd->pdata->algs_info_size; i++) {
2170 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
2171 struct ahash_alg *alg;
2173 alg = &dd->pdata->algs_info[i].algs_list[j];
2174 alg->export = omap_sham_export;
2175 alg->import = omap_sham_import;
2176 alg->halg.statesize = sizeof(struct omap_sham_reqctx) +
2178 err = crypto_register_ahash(alg);
2182 dd->pdata->algs_info[i].registered++;
2186 err = sysfs_create_group(&dev->kobj, &omap_sham_attr_group);
2188 dev_err(dev, "could not create sysfs device attrs\n");
2195 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2196 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2197 crypto_unregister_ahash(
2198 &dd->pdata->algs_info[i].algs_list[j]);
2200 pm_runtime_disable(dev);
2201 if (!dd->polling_mode)
2202 dma_release_channel(dd->dma_lch);
2204 dev_err(dev, "initialization failed.\n");
2209 static int omap_sham_remove(struct platform_device *pdev)
2211 struct omap_sham_dev *dd;
2214 dd = platform_get_drvdata(pdev);
2217 spin_lock(&sham.lock);
2218 list_del(&dd->list);
2219 spin_unlock(&sham.lock);
2220 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2221 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2222 crypto_unregister_ahash(
2223 &dd->pdata->algs_info[i].algs_list[j]);
2224 tasklet_kill(&dd->done_task);
2225 pm_runtime_disable(&pdev->dev);
2227 if (!dd->polling_mode)
2228 dma_release_channel(dd->dma_lch);
2233 #ifdef CONFIG_PM_SLEEP
2234 static int omap_sham_suspend(struct device *dev)
2236 pm_runtime_put_sync(dev);
2240 static int omap_sham_resume(struct device *dev)
2242 int err = pm_runtime_get_sync(dev);
2244 dev_err(dev, "failed to get sync: %d\n", err);
2251 static SIMPLE_DEV_PM_OPS(omap_sham_pm_ops, omap_sham_suspend, omap_sham_resume);
2253 static struct platform_driver omap_sham_driver = {
2254 .probe = omap_sham_probe,
2255 .remove = omap_sham_remove,
2257 .name = "omap-sham",
2258 .pm = &omap_sham_pm_ops,
2259 .of_match_table = omap_sham_of_match,
2263 module_platform_driver(omap_sham_driver);
2265 MODULE_DESCRIPTION("OMAP SHA1/MD5 hw acceleration support.");
2266 MODULE_LICENSE("GPL v2");
2267 MODULE_AUTHOR("Dmitry Kasatkin");
2268 MODULE_ALIAS("platform:omap-sham");