crypto: user - Fix crypto_alg_match race
[linux-2.6-block.git] / drivers / crypto / sahara.c
CommitLineData
5de88752
JM
1/*
2 * Cryptographic API.
3 *
4 * Support for SAHARA cryptographic accelerator.
5 *
5a2bb93f 6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
5de88752
JM
7 * Copyright (c) 2013 Vista Silicon S.L.
8 * Author: Javier Martin <javier.martin@vista-silicon.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 * Based on omap-aes.c and tegra-aes.c
15 */
16
17#include <crypto/algapi.h>
18#include <crypto/aes.h>
5a2bb93f
ST
19#include <crypto/hash.h>
20#include <crypto/internal/hash.h>
21#include <crypto/scatterwalk.h>
22#include <crypto/sha.h>
5de88752
JM
23
24#include <linux/clk.h>
25#include <linux/crypto.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/kernel.h>
c0c3c89a 30#include <linux/kthread.h>
5de88752 31#include <linux/module.h>
c0c3c89a 32#include <linux/mutex.h>
5de88752 33#include <linux/of.h>
5ed903b3 34#include <linux/of_device.h>
5de88752
JM
35#include <linux/platform_device.h>
36
5a2bb93f
ST
37#define SHA_BUFFER_LEN PAGE_SIZE
38#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
39
5de88752
JM
40#define SAHARA_NAME "sahara"
41#define SAHARA_VERSION_3 3
5ed903b3 42#define SAHARA_VERSION_4 4
5de88752
JM
43#define SAHARA_TIMEOUT_MS 1000
44#define SAHARA_MAX_HW_DESC 2
45#define SAHARA_MAX_HW_LINK 20
46
47#define FLAGS_MODE_MASK 0x000f
48#define FLAGS_ENCRYPT BIT(0)
49#define FLAGS_CBC BIT(1)
50#define FLAGS_NEW_KEY BIT(3)
5de88752
JM
51
52#define SAHARA_HDR_BASE 0x00800000
53#define SAHARA_HDR_SKHA_ALG_AES 0
54#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
55#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
56#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
57#define SAHARA_HDR_FORM_DATA (5 << 16)
58#define SAHARA_HDR_FORM_KEY (8 << 16)
59#define SAHARA_HDR_LLO (1 << 24)
60#define SAHARA_HDR_CHA_SKHA (1 << 28)
61#define SAHARA_HDR_CHA_MDHA (2 << 28)
62#define SAHARA_HDR_PARITY_BIT (1 << 31)
63
5a2bb93f
ST
64#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
65#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
66#define SAHARA_HDR_MDHA_HASH 0xA0850000
67#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
68#define SAHARA_HDR_MDHA_ALG_SHA1 0
69#define SAHARA_HDR_MDHA_ALG_MD5 1
70#define SAHARA_HDR_MDHA_ALG_SHA256 2
71#define SAHARA_HDR_MDHA_ALG_SHA224 3
72#define SAHARA_HDR_MDHA_PDATA (1 << 2)
73#define SAHARA_HDR_MDHA_HMAC (1 << 3)
74#define SAHARA_HDR_MDHA_INIT (1 << 5)
75#define SAHARA_HDR_MDHA_IPAD (1 << 6)
76#define SAHARA_HDR_MDHA_OPAD (1 << 7)
77#define SAHARA_HDR_MDHA_SWAP (1 << 8)
78#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
79#define SAHARA_HDR_MDHA_SSL (1 << 10)
80
5de88752
JM
81/* SAHARA can only process one request at a time */
82#define SAHARA_QUEUE_LENGTH 1
83
84#define SAHARA_REG_VERSION 0x00
85#define SAHARA_REG_DAR 0x04
86#define SAHARA_REG_CONTROL 0x08
87#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
88#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
89#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
90#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
91#define SAHARA_REG_CMD 0x0C
92#define SAHARA_CMD_RESET (1 << 0)
93#define SAHARA_CMD_CLEAR_INT (1 << 8)
94#define SAHARA_CMD_CLEAR_ERR (1 << 9)
95#define SAHARA_CMD_SINGLE_STEP (1 << 10)
96#define SAHARA_CMD_MODE_BATCH (1 << 16)
97#define SAHARA_CMD_MODE_DEBUG (1 << 18)
98#define SAHARA_REG_STATUS 0x10
99#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
100#define SAHARA_STATE_IDLE 0
101#define SAHARA_STATE_BUSY 1
102#define SAHARA_STATE_ERR 2
103#define SAHARA_STATE_FAULT 3
104#define SAHARA_STATE_COMPLETE 4
105#define SAHARA_STATE_COMP_FLAG (1 << 2)
106#define SAHARA_STATUS_DAR_FULL (1 << 3)
107#define SAHARA_STATUS_ERROR (1 << 4)
108#define SAHARA_STATUS_SECURE (1 << 5)
109#define SAHARA_STATUS_FAIL (1 << 6)
110#define SAHARA_STATUS_INIT (1 << 7)
111#define SAHARA_STATUS_RNG_RESEED (1 << 8)
112#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
113#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
114#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
115#define SAHARA_STATUS_MODE_BATCH (1 << 16)
116#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
117#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
118#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
119#define SAHARA_REG_ERRSTATUS 0x14
120#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
121#define SAHARA_ERRSOURCE_CHA 14
122#define SAHARA_ERRSOURCE_DMA 15
123#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
124#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
125#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
126#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
127#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
128#define SAHARA_REG_FADDR 0x18
129#define SAHARA_REG_CDAR 0x1C
130#define SAHARA_REG_IDAR 0x20
131
132struct sahara_hw_desc {
133 u32 hdr;
134 u32 len1;
135 dma_addr_t p1;
136 u32 len2;
137 dma_addr_t p2;
138 dma_addr_t next;
139};
140
141struct sahara_hw_link {
142 u32 len;
143 dma_addr_t p;
144 dma_addr_t next;
145};
146
147struct sahara_ctx {
5de88752 148 unsigned long flags;
5a2bb93f
ST
149
150 /* AES-specific context */
5de88752
JM
151 int keylen;
152 u8 key[AES_KEYSIZE_128];
153 struct crypto_ablkcipher *fallback;
5a2bb93f
ST
154
155 /* SHA-specific context */
156 struct crypto_shash *shash_fallback;
5de88752
JM
157};
158
159struct sahara_aes_reqctx {
160 unsigned long mode;
161};
162
5a2bb93f
ST
163/*
164 * struct sahara_sha_reqctx - private data per request
165 * @buf: holds data for requests smaller than block_size
166 * @rembuf: used to prepare one block_size-aligned request
167 * @context: hw-specific context for request. Digest is extracted from this
168 * @mode: specifies what type of hw-descriptor needs to be built
169 * @digest_size: length of digest for this request
170 * @context_size: length of hw-context for this request.
171 * Always digest_size + 4
172 * @buf_cnt: number of bytes saved in buf
173 * @sg_in_idx: number of hw links
174 * @in_sg: scatterlist for input data
175 * @in_sg_chain: scatterlists for chained input data
176 * @in_sg_chained: specifies if chained scatterlists are used or not
177 * @total: total number of bytes for transfer
178 * @last: is this the last block
179 * @first: is this the first block
180 * @active: inside a transfer
181 */
182struct sahara_sha_reqctx {
183 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
184 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
185 u8 context[SHA256_DIGEST_SIZE + 4];
186 struct mutex mutex;
187 unsigned int mode;
188 unsigned int digest_size;
189 unsigned int context_size;
190 unsigned int buf_cnt;
191 unsigned int sg_in_idx;
192 struct scatterlist *in_sg;
193 struct scatterlist in_sg_chain[2];
194 bool in_sg_chained;
195 size_t total;
196 unsigned int last;
197 unsigned int first;
198 unsigned int active;
199};
200
5de88752
JM
201struct sahara_dev {
202 struct device *device;
5ed903b3 203 unsigned int version;
5de88752
JM
204 void __iomem *regs_base;
205 struct clk *clk_ipg;
206 struct clk *clk_ahb;
c0c3c89a
ST
207 struct mutex queue_mutex;
208 struct task_struct *kthread;
209 struct completion dma_completion;
5de88752
JM
210
211 struct sahara_ctx *ctx;
212 spinlock_t lock;
213 struct crypto_queue queue;
214 unsigned long flags;
215
5de88752
JM
216 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
217 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
218
219 u8 *key_base;
220 dma_addr_t key_phys_base;
221
222 u8 *iv_base;
223 dma_addr_t iv_phys_base;
224
5a2bb93f
ST
225 u8 *context_base;
226 dma_addr_t context_phys_base;
227
5de88752
JM
228 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
229 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
230
5de88752
JM
231 size_t total;
232 struct scatterlist *in_sg;
233 unsigned int nb_in_sg;
234 struct scatterlist *out_sg;
235 unsigned int nb_out_sg;
236
237 u32 error;
5de88752
JM
238};
239
240static struct sahara_dev *dev_ptr;
241
242static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
243{
244 writel(data, dev->regs_base + reg);
245}
246
247static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
248{
249 return readl(dev->regs_base + reg);
250}
251
252static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
253{
254 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
255 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
256 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
257
258 if (dev->flags & FLAGS_CBC) {
259 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
260 hdr ^= SAHARA_HDR_PARITY_BIT;
261 }
262
263 if (dev->flags & FLAGS_ENCRYPT) {
264 hdr |= SAHARA_HDR_SKHA_OP_ENC;
265 hdr ^= SAHARA_HDR_PARITY_BIT;
266 }
267
268 return hdr;
269}
270
271static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
272{
273 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
274 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
275}
276
277static int sahara_sg_length(struct scatterlist *sg,
278 unsigned int total)
279{
280 int sg_nb;
281 unsigned int len;
282 struct scatterlist *sg_list;
283
284 sg_nb = 0;
285 sg_list = sg;
286
287 while (total) {
288 len = min(sg_list->length, total);
289
290 sg_nb++;
291 total -= len;
292
293 sg_list = sg_next(sg_list);
294 if (!sg_list)
295 total = 0;
296 }
297
298 return sg_nb;
299}
300
301static char *sahara_err_src[16] = {
302 "No error",
303 "Header error",
304 "Descriptor length error",
305 "Descriptor length or pointer error",
306 "Link length error",
307 "Link pointer error",
308 "Input buffer error",
309 "Output buffer error",
310 "Output buffer starvation",
311 "Internal state fault",
312 "General descriptor problem",
313 "Reserved",
314 "Descriptor address error",
315 "Link address error",
316 "CHA error",
317 "DMA error"
318};
319
320static char *sahara_err_dmasize[4] = {
321 "Byte transfer",
322 "Half-word transfer",
323 "Word transfer",
324 "Reserved"
325};
326
327static char *sahara_err_dmasrc[8] = {
328 "No error",
329 "AHB bus error",
330 "Internal IP bus error",
331 "Parity error",
332 "DMA crosses 256 byte boundary",
333 "DMA is busy",
334 "Reserved",
335 "DMA HW error"
336};
337
338static char *sahara_cha_errsrc[12] = {
339 "Input buffer non-empty",
340 "Illegal address",
341 "Illegal mode",
342 "Illegal data size",
343 "Illegal key size",
344 "Write during processing",
345 "CTX read during processing",
346 "HW error",
347 "Input buffer disabled/underflow",
348 "Output buffer disabled/overflow",
349 "DES key parity error",
350 "Reserved"
351};
352
353static char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
354
355static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
356{
357 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
358 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
359
360 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
361
362 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
363
364 if (source == SAHARA_ERRSOURCE_DMA) {
365 if (error & SAHARA_ERRSTATUS_DMA_DIR)
366 dev_err(dev->device, " * DMA read.\n");
367 else
368 dev_err(dev->device, " * DMA write.\n");
369
370 dev_err(dev->device, " * %s.\n",
371 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
372 dev_err(dev->device, " * %s.\n",
373 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
374 } else if (source == SAHARA_ERRSOURCE_CHA) {
375 dev_err(dev->device, " * %s.\n",
376 sahara_cha_errsrc[chasrc]);
377 dev_err(dev->device, " * %s.\n",
378 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
379 }
380 dev_err(dev->device, "\n");
381}
382
383static char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
384
385static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
386{
387 u8 state;
388
389 if (!IS_ENABLED(DEBUG))
390 return;
391
392 state = SAHARA_STATUS_GET_STATE(status);
393
394 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
395 __func__, status);
396
397 dev_dbg(dev->device, " - State = %d:\n", state);
398 if (state & SAHARA_STATE_COMP_FLAG)
399 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
400
401 dev_dbg(dev->device, " * %s.\n",
402 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
403
404 if (status & SAHARA_STATUS_DAR_FULL)
405 dev_dbg(dev->device, " - DAR Full.\n");
406 if (status & SAHARA_STATUS_ERROR)
407 dev_dbg(dev->device, " - Error.\n");
408 if (status & SAHARA_STATUS_SECURE)
409 dev_dbg(dev->device, " - Secure.\n");
410 if (status & SAHARA_STATUS_FAIL)
411 dev_dbg(dev->device, " - Fail.\n");
412 if (status & SAHARA_STATUS_RNG_RESEED)
413 dev_dbg(dev->device, " - RNG Reseed Request.\n");
414 if (status & SAHARA_STATUS_ACTIVE_RNG)
415 dev_dbg(dev->device, " - RNG Active.\n");
416 if (status & SAHARA_STATUS_ACTIVE_MDHA)
417 dev_dbg(dev->device, " - MDHA Active.\n");
418 if (status & SAHARA_STATUS_ACTIVE_SKHA)
419 dev_dbg(dev->device, " - SKHA Active.\n");
420
421 if (status & SAHARA_STATUS_MODE_BATCH)
422 dev_dbg(dev->device, " - Batch Mode.\n");
423 else if (status & SAHARA_STATUS_MODE_DEDICATED)
424 dev_dbg(dev->device, " - Decidated Mode.\n");
425 else if (status & SAHARA_STATUS_MODE_DEBUG)
426 dev_dbg(dev->device, " - Debug Mode.\n");
427
428 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
429 SAHARA_STATUS_GET_ISTATE(status));
430
431 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
432 sahara_read(dev, SAHARA_REG_CDAR));
433 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
434 sahara_read(dev, SAHARA_REG_IDAR));
435}
436
437static void sahara_dump_descriptors(struct sahara_dev *dev)
438{
439 int i;
440
441 if (!IS_ENABLED(DEBUG))
442 return;
443
444 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
445 dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
446 i, dev->hw_phys_desc[i]);
447 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
448 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
449 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
450 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
451 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
452 dev_dbg(dev->device, "\tnext = 0x%08x\n",
453 dev->hw_desc[i]->next);
454 }
455 dev_dbg(dev->device, "\n");
456}
457
458static void sahara_dump_links(struct sahara_dev *dev)
459{
460 int i;
461
462 if (!IS_ENABLED(DEBUG))
463 return;
464
465 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
466 dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
467 i, dev->hw_phys_link[i]);
468 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
469 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
470 dev_dbg(dev->device, "\tnext = 0x%08x\n",
471 dev->hw_link[i]->next);
472 }
473 dev_dbg(dev->device, "\n");
474}
475
5de88752
JM
476static int sahara_hw_descriptor_create(struct sahara_dev *dev)
477{
478 struct sahara_ctx *ctx = dev->ctx;
479 struct scatterlist *sg;
480 int ret;
481 int i, j;
482
483 /* Copy new key if necessary */
484 if (ctx->flags & FLAGS_NEW_KEY) {
485 memcpy(dev->key_base, ctx->key, ctx->keylen);
486 ctx->flags &= ~FLAGS_NEW_KEY;
487
488 if (dev->flags & FLAGS_CBC) {
489 dev->hw_desc[0]->len1 = AES_BLOCK_SIZE;
490 dev->hw_desc[0]->p1 = dev->iv_phys_base;
491 } else {
492 dev->hw_desc[0]->len1 = 0;
493 dev->hw_desc[0]->p1 = 0;
494 }
495 dev->hw_desc[0]->len2 = ctx->keylen;
496 dev->hw_desc[0]->p2 = dev->key_phys_base;
497 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
498 }
499 dev->hw_desc[0]->hdr = sahara_aes_key_hdr(dev);
500
501 dev->nb_in_sg = sahara_sg_length(dev->in_sg, dev->total);
502 dev->nb_out_sg = sahara_sg_length(dev->out_sg, dev->total);
503 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
504 dev_err(dev->device, "not enough hw links (%d)\n",
505 dev->nb_in_sg + dev->nb_out_sg);
506 return -EINVAL;
507 }
508
509 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
510 DMA_TO_DEVICE);
511 if (ret != dev->nb_in_sg) {
512 dev_err(dev->device, "couldn't map in sg\n");
513 goto unmap_in;
514 }
515 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
516 DMA_FROM_DEVICE);
517 if (ret != dev->nb_out_sg) {
518 dev_err(dev->device, "couldn't map out sg\n");
519 goto unmap_out;
520 }
521
522 /* Create input links */
523 dev->hw_desc[1]->p1 = dev->hw_phys_link[0];
524 sg = dev->in_sg;
525 for (i = 0; i < dev->nb_in_sg; i++) {
526 dev->hw_link[i]->len = sg->length;
527 dev->hw_link[i]->p = sg->dma_address;
528 if (i == (dev->nb_in_sg - 1)) {
529 dev->hw_link[i]->next = 0;
530 } else {
531 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
532 sg = sg_next(sg);
533 }
534 }
535
536 /* Create output links */
537 dev->hw_desc[1]->p2 = dev->hw_phys_link[i];
538 sg = dev->out_sg;
539 for (j = i; j < dev->nb_out_sg + i; j++) {
540 dev->hw_link[j]->len = sg->length;
541 dev->hw_link[j]->p = sg->dma_address;
542 if (j == (dev->nb_out_sg + i - 1)) {
543 dev->hw_link[j]->next = 0;
544 } else {
545 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
546 sg = sg_next(sg);
547 }
548 }
549
550 /* Fill remaining fields of hw_desc[1] */
551 dev->hw_desc[1]->hdr = sahara_aes_data_link_hdr(dev);
552 dev->hw_desc[1]->len1 = dev->total;
553 dev->hw_desc[1]->len2 = dev->total;
554 dev->hw_desc[1]->next = 0;
555
556 sahara_dump_descriptors(dev);
557 sahara_dump_links(dev);
558
5de88752
JM
559 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
560
561 return 0;
562
563unmap_out:
564 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
565 DMA_TO_DEVICE);
566unmap_in:
567 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
568 DMA_FROM_DEVICE);
569
570 return -EINVAL;
571}
572
c0c3c89a 573static int sahara_aes_process(struct ablkcipher_request *req)
5de88752 574{
c0c3c89a 575 struct sahara_dev *dev = dev_ptr;
5de88752
JM
576 struct sahara_ctx *ctx;
577 struct sahara_aes_reqctx *rctx;
5de88752 578 int ret;
58ed798b 579 unsigned long timeout;
5de88752 580
5de88752
JM
581 /* Request is ready to be dispatched by the device */
582 dev_dbg(dev->device,
583 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
584 req->nbytes, req->src, req->dst);
585
586 /* assign new request to device */
5de88752
JM
587 dev->total = req->nbytes;
588 dev->in_sg = req->src;
589 dev->out_sg = req->dst;
590
591 rctx = ablkcipher_request_ctx(req);
592 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
593 rctx->mode &= FLAGS_MODE_MASK;
594 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
595
596 if ((dev->flags & FLAGS_CBC) && req->info)
597 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
598
599 /* assign new context to device */
5de88752
JM
600 dev->ctx = ctx;
601
c0c3c89a
ST
602 reinit_completion(&dev->dma_completion);
603
5de88752 604 ret = sahara_hw_descriptor_create(dev);
6cf02fca
NMG
605 if (ret)
606 return -EINVAL;
c0c3c89a 607
58ed798b 608 timeout = wait_for_completion_timeout(&dev->dma_completion,
c0c3c89a 609 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
58ed798b 610 if (!timeout) {
c0c3c89a
ST
611 dev_err(dev->device, "AES timeout\n");
612 return -ETIMEDOUT;
5de88752 613 }
c0c3c89a
ST
614
615 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
616 DMA_TO_DEVICE);
617 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
618 DMA_FROM_DEVICE);
619
620 return 0;
5de88752
JM
621}
622
623static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
624 unsigned int keylen)
625{
626 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
627 int ret;
628
629 ctx->keylen = keylen;
630
631 /* SAHARA only supports 128bit keys */
632 if (keylen == AES_KEYSIZE_128) {
633 memcpy(ctx->key, key, keylen);
634 ctx->flags |= FLAGS_NEW_KEY;
635 return 0;
636 }
637
638 if (keylen != AES_KEYSIZE_128 &&
639 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
640 return -EINVAL;
641
642 /*
643 * The requested key size is not supported by HW, do a fallback.
644 */
645 ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
646 ctx->fallback->base.crt_flags |=
647 (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
648
649 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
650 if (ret) {
651 struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
652
653 tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
654 tfm_aux->crt_flags |=
655 (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
656 }
657 return ret;
658}
659
660static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
661{
5de88752
JM
662 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
663 struct sahara_dev *dev = dev_ptr;
664 int err = 0;
5de88752
JM
665
666 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
667 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
668
669 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
670 dev_err(dev->device,
671 "request size is not exact amount of AES blocks\n");
672 return -EINVAL;
673 }
674
5de88752 675 rctx->mode = mode;
c0c3c89a
ST
676
677 mutex_lock(&dev->queue_mutex);
5de88752 678 err = ablkcipher_enqueue_request(&dev->queue, req);
c0c3c89a 679 mutex_unlock(&dev->queue_mutex);
5de88752 680
c0c3c89a 681 wake_up_process(dev->kthread);
5de88752
JM
682
683 return err;
684}
685
686static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
687{
688 struct crypto_tfm *tfm =
689 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
690 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
691 crypto_ablkcipher_reqtfm(req));
692 int err;
693
694 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
695 ablkcipher_request_set_tfm(req, ctx->fallback);
696 err = crypto_ablkcipher_encrypt(req);
697 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
698 return err;
699 }
700
701 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
702}
703
704static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
705{
706 struct crypto_tfm *tfm =
707 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
708 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
709 crypto_ablkcipher_reqtfm(req));
710 int err;
711
712 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
713 ablkcipher_request_set_tfm(req, ctx->fallback);
714 err = crypto_ablkcipher_decrypt(req);
715 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
716 return err;
717 }
718
719 return sahara_aes_crypt(req, 0);
720}
721
722static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
723{
724 struct crypto_tfm *tfm =
725 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
726 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
727 crypto_ablkcipher_reqtfm(req));
728 int err;
729
730 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
731 ablkcipher_request_set_tfm(req, ctx->fallback);
732 err = crypto_ablkcipher_encrypt(req);
733 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
734 return err;
735 }
736
737 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
738}
739
740static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
741{
742 struct crypto_tfm *tfm =
743 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
744 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
745 crypto_ablkcipher_reqtfm(req));
746 int err;
747
748 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
749 ablkcipher_request_set_tfm(req, ctx->fallback);
750 err = crypto_ablkcipher_decrypt(req);
751 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
752 return err;
753 }
754
755 return sahara_aes_crypt(req, FLAGS_CBC);
756}
757
758static int sahara_aes_cra_init(struct crypto_tfm *tfm)
759{
efa59e2e 760 const char *name = crypto_tfm_alg_name(tfm);
5de88752
JM
761 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
762
763 ctx->fallback = crypto_alloc_ablkcipher(name, 0,
764 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
765 if (IS_ERR(ctx->fallback)) {
766 pr_err("Error allocating fallback algo %s\n", name);
767 return PTR_ERR(ctx->fallback);
768 }
769
770 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
771
772 return 0;
773}
774
775static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
776{
777 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
778
779 if (ctx->fallback)
780 crypto_free_ablkcipher(ctx->fallback);
781 ctx->fallback = NULL;
782}
783
5a2bb93f
ST
784static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
785 struct sahara_sha_reqctx *rctx)
786{
787 u32 hdr = 0;
788
789 hdr = rctx->mode;
790
791 if (rctx->first) {
792 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
793 hdr |= SAHARA_HDR_MDHA_INIT;
794 } else {
795 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
796 }
797
798 if (rctx->last)
799 hdr |= SAHARA_HDR_MDHA_PDATA;
800
801 if (hweight_long(hdr) % 2 == 0)
802 hdr |= SAHARA_HDR_PARITY_BIT;
803
804 return hdr;
805}
806
807static int sahara_sha_hw_links_create(struct sahara_dev *dev,
808 struct sahara_sha_reqctx *rctx,
809 int start)
810{
811 struct scatterlist *sg;
812 unsigned int i;
813 int ret;
814
815 dev->in_sg = rctx->in_sg;
816
817 dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total);
818 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
819 dev_err(dev->device, "not enough hw links (%d)\n",
820 dev->nb_in_sg + dev->nb_out_sg);
821 return -EINVAL;
822 }
823
824 if (rctx->in_sg_chained) {
825 i = start;
826 sg = dev->in_sg;
827 while (sg) {
828 ret = dma_map_sg(dev->device, sg, 1,
829 DMA_TO_DEVICE);
830 if (!ret)
831 return -EFAULT;
832
833 dev->hw_link[i]->len = sg->length;
834 dev->hw_link[i]->p = sg->dma_address;
835 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
836 sg = sg_next(sg);
837 i += 1;
838 }
839 dev->hw_link[i-1]->next = 0;
840 } else {
841 sg = dev->in_sg;
842 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
843 DMA_TO_DEVICE);
844 if (!ret)
845 return -EFAULT;
846
847 for (i = start; i < dev->nb_in_sg + start; i++) {
848 dev->hw_link[i]->len = sg->length;
849 dev->hw_link[i]->p = sg->dma_address;
850 if (i == (dev->nb_in_sg + start - 1)) {
851 dev->hw_link[i]->next = 0;
852 } else {
853 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
854 sg = sg_next(sg);
855 }
856 }
857 }
858
859 return i;
860}
861
862static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
863 struct sahara_sha_reqctx *rctx,
864 struct ahash_request *req,
865 int index)
866{
867 unsigned result_len;
868 int i = index;
869
870 if (rctx->first)
871 /* Create initial descriptor: #8*/
872 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
873 else
874 /* Create hash descriptor: #10. Must follow #6. */
875 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
876
877 dev->hw_desc[index]->len1 = rctx->total;
878 if (dev->hw_desc[index]->len1 == 0) {
879 /* if len1 is 0, p1 must be 0, too */
880 dev->hw_desc[index]->p1 = 0;
881 rctx->sg_in_idx = 0;
882 } else {
883 /* Create input links */
884 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
885 i = sahara_sha_hw_links_create(dev, rctx, index);
886
887 rctx->sg_in_idx = index;
888 if (i < 0)
889 return i;
890 }
891
892 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
893
894 /* Save the context for the next operation */
895 result_len = rctx->context_size;
896 dev->hw_link[i]->p = dev->context_phys_base;
897
898 dev->hw_link[i]->len = result_len;
899 dev->hw_desc[index]->len2 = result_len;
900
901 dev->hw_link[i]->next = 0;
902
903 return 0;
904}
905
906/*
907 * Load descriptor aka #6
908 *
909 * To load a previously saved context back to the MDHA unit
910 *
911 * p1: Saved Context
912 * p2: NULL
913 *
914 */
915static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
916 struct sahara_sha_reqctx *rctx,
917 struct ahash_request *req,
918 int index)
919{
920 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
921
922 dev->hw_desc[index]->len1 = rctx->context_size;
923 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
924 dev->hw_desc[index]->len2 = 0;
925 dev->hw_desc[index]->p2 = 0;
926
927 dev->hw_link[index]->len = rctx->context_size;
928 dev->hw_link[index]->p = dev->context_phys_base;
929 dev->hw_link[index]->next = 0;
930
931 return 0;
932}
933
934static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
935{
936 if (!sg || !sg->length)
937 return nbytes;
938
939 while (nbytes && sg) {
940 if (nbytes <= sg->length) {
941 sg->length = nbytes;
942 sg_mark_end(sg);
943 break;
944 }
945 nbytes -= sg->length;
5be4d4c9 946 sg = sg_next(sg);
5a2bb93f
ST
947 }
948
949 return nbytes;
950}
951
952static int sahara_sha_prepare_request(struct ahash_request *req)
953{
954 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
955 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
956 unsigned int hash_later;
957 unsigned int block_size;
958 unsigned int len;
959
960 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
961
962 /* append bytes from previous operation */
963 len = rctx->buf_cnt + req->nbytes;
964
965 /* only the last transfer can be padded in hardware */
966 if (!rctx->last && (len < block_size)) {
967 /* to few data, save for next operation */
968 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
969 0, req->nbytes, 0);
970 rctx->buf_cnt += req->nbytes;
971
972 return 0;
973 }
974
975 /* add data from previous operation first */
976 if (rctx->buf_cnt)
977 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
978
979 /* data must always be a multiple of block_size */
980 hash_later = rctx->last ? 0 : len & (block_size - 1);
981 if (hash_later) {
982 unsigned int offset = req->nbytes - hash_later;
983 /* Save remaining bytes for later use */
984 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
985 hash_later, 0);
986 }
987
988 /* nbytes should now be multiple of blocksize */
989 req->nbytes = req->nbytes - hash_later;
990
991 sahara_walk_and_recalc(req->src, req->nbytes);
992
993 /* have data from previous operation and current */
994 if (rctx->buf_cnt && req->nbytes) {
995 sg_init_table(rctx->in_sg_chain, 2);
996 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
997
998 scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src);
999
1000 rctx->total = req->nbytes + rctx->buf_cnt;
1001 rctx->in_sg = rctx->in_sg_chain;
1002
1003 rctx->in_sg_chained = true;
1004 req->src = rctx->in_sg_chain;
1005 /* only data from previous operation */
1006 } else if (rctx->buf_cnt) {
1007 if (req->src)
1008 rctx->in_sg = req->src;
1009 else
1010 rctx->in_sg = rctx->in_sg_chain;
1011 /* buf was copied into rembuf above */
1012 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
1013 rctx->total = rctx->buf_cnt;
1014 rctx->in_sg_chained = false;
1015 /* no data from previous operation */
1016 } else {
1017 rctx->in_sg = req->src;
1018 rctx->total = req->nbytes;
1019 req->src = rctx->in_sg;
1020 rctx->in_sg_chained = false;
1021 }
1022
1023 /* on next call, we only have the remaining data in the buffer */
1024 rctx->buf_cnt = hash_later;
1025
1026 return -EINPROGRESS;
1027}
1028
1029static void sahara_sha_unmap_sg(struct sahara_dev *dev,
1030 struct sahara_sha_reqctx *rctx)
1031{
1032 struct scatterlist *sg;
1033
1034 if (rctx->in_sg_chained) {
1035 sg = dev->in_sg;
1036 while (sg) {
1037 dma_unmap_sg(dev->device, sg, 1, DMA_TO_DEVICE);
1038 sg = sg_next(sg);
1039 }
1040 } else {
1041 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1042 DMA_TO_DEVICE);
1043 }
1044}
1045
1046static int sahara_sha_process(struct ahash_request *req)
1047{
1048 struct sahara_dev *dev = dev_ptr;
1049 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
df586cbb 1050 int ret;
58ed798b 1051 unsigned long timeout;
5a2bb93f
ST
1052
1053 ret = sahara_sha_prepare_request(req);
1054 if (!ret)
1055 return ret;
1056
1057 if (rctx->first) {
1058 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1059 dev->hw_desc[0]->next = 0;
1060 rctx->first = 0;
1061 } else {
1062 memcpy(dev->context_base, rctx->context, rctx->context_size);
1063
1064 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1065 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1066 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1067 dev->hw_desc[1]->next = 0;
1068 }
1069
1070 sahara_dump_descriptors(dev);
1071 sahara_dump_links(dev);
1072
1073 reinit_completion(&dev->dma_completion);
1074
1075 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1076
58ed798b 1077 timeout = wait_for_completion_timeout(&dev->dma_completion,
5a2bb93f 1078 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
58ed798b 1079 if (!timeout) {
5a2bb93f
ST
1080 dev_err(dev->device, "SHA timeout\n");
1081 return -ETIMEDOUT;
1082 }
1083
1084 if (rctx->sg_in_idx)
1085 sahara_sha_unmap_sg(dev, rctx);
1086
1087 memcpy(rctx->context, dev->context_base, rctx->context_size);
1088
1089 if (req->result)
1090 memcpy(req->result, rctx->context, rctx->digest_size);
1091
1092 return 0;
1093}
1094
c0c3c89a
ST
1095static int sahara_queue_manage(void *data)
1096{
1097 struct sahara_dev *dev = (struct sahara_dev *)data;
1098 struct crypto_async_request *async_req;
1099 int ret = 0;
1100
1101 do {
1102 __set_current_state(TASK_INTERRUPTIBLE);
1103
1104 mutex_lock(&dev->queue_mutex);
1105 async_req = crypto_dequeue_request(&dev->queue);
1106 mutex_unlock(&dev->queue_mutex);
1107
1108 if (async_req) {
5a2bb93f
ST
1109 if (crypto_tfm_alg_type(async_req->tfm) ==
1110 CRYPTO_ALG_TYPE_AHASH) {
1111 struct ahash_request *req =
1112 ahash_request_cast(async_req);
1113
1114 ret = sahara_sha_process(req);
1115 } else {
1116 struct ablkcipher_request *req =
1117 ablkcipher_request_cast(async_req);
c0c3c89a 1118
5a2bb93f
ST
1119 ret = sahara_aes_process(req);
1120 }
c0c3c89a
ST
1121
1122 async_req->complete(async_req, ret);
1123
1124 continue;
1125 }
1126
1127 schedule();
1128 } while (!kthread_should_stop());
1129
1130 return 0;
1131}
1132
5a2bb93f
ST
1133static int sahara_sha_enqueue(struct ahash_request *req, int last)
1134{
1135 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1136 struct sahara_dev *dev = dev_ptr;
1137 int ret;
1138
1139 if (!req->nbytes && !last)
1140 return 0;
1141
1142 mutex_lock(&rctx->mutex);
1143 rctx->last = last;
1144
1145 if (!rctx->active) {
1146 rctx->active = 1;
1147 rctx->first = 1;
1148 }
1149
1150 mutex_lock(&dev->queue_mutex);
1151 ret = crypto_enqueue_request(&dev->queue, &req->base);
1152 mutex_unlock(&dev->queue_mutex);
1153
1154 wake_up_process(dev->kthread);
1155 mutex_unlock(&rctx->mutex);
1156
1157 return ret;
1158}
1159
1160static int sahara_sha_init(struct ahash_request *req)
1161{
1162 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1163 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1164
1165 memset(rctx, 0, sizeof(*rctx));
1166
1167 switch (crypto_ahash_digestsize(tfm)) {
1168 case SHA1_DIGEST_SIZE:
1169 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1170 rctx->digest_size = SHA1_DIGEST_SIZE;
1171 break;
1172 case SHA256_DIGEST_SIZE:
1173 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1174 rctx->digest_size = SHA256_DIGEST_SIZE;
1175 break;
1176 default:
1177 return -EINVAL;
1178 }
1179
1180 rctx->context_size = rctx->digest_size + 4;
1181 rctx->active = 0;
1182
1183 mutex_init(&rctx->mutex);
1184
1185 return 0;
1186}
1187
1188static int sahara_sha_update(struct ahash_request *req)
1189{
1190 return sahara_sha_enqueue(req, 0);
1191}
1192
1193static int sahara_sha_final(struct ahash_request *req)
1194{
1195 req->nbytes = 0;
1196 return sahara_sha_enqueue(req, 1);
1197}
1198
1199static int sahara_sha_finup(struct ahash_request *req)
1200{
1201 return sahara_sha_enqueue(req, 1);
1202}
1203
1204static int sahara_sha_digest(struct ahash_request *req)
1205{
1206 sahara_sha_init(req);
1207
1208 return sahara_sha_finup(req);
1209}
1210
1211static int sahara_sha_export(struct ahash_request *req, void *out)
1212{
1213 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1214 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1215 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1216
1217 memcpy(out, ctx, sizeof(struct sahara_ctx));
1218 memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
1219 sizeof(struct sahara_sha_reqctx));
1220
1221 return 0;
1222}
1223
1224static int sahara_sha_import(struct ahash_request *req, const void *in)
1225{
1226 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1227 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1228 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1229
1230 memcpy(ctx, in, sizeof(struct sahara_ctx));
1231 memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
1232 sizeof(struct sahara_sha_reqctx));
1233
1234 return 0;
1235}
1236
1237static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1238{
1239 const char *name = crypto_tfm_alg_name(tfm);
1240 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1241
1242 ctx->shash_fallback = crypto_alloc_shash(name, 0,
1243 CRYPTO_ALG_NEED_FALLBACK);
1244 if (IS_ERR(ctx->shash_fallback)) {
1245 pr_err("Error allocating fallback algo %s\n", name);
1246 return PTR_ERR(ctx->shash_fallback);
1247 }
1248 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1249 sizeof(struct sahara_sha_reqctx) +
1250 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1251
1252 return 0;
1253}
1254
1255static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1256{
1257 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1258
1259 crypto_free_shash(ctx->shash_fallback);
1260 ctx->shash_fallback = NULL;
1261}
1262
5de88752
JM
1263static struct crypto_alg aes_algs[] = {
1264{
1265 .cra_name = "ecb(aes)",
1266 .cra_driver_name = "sahara-ecb-aes",
1267 .cra_priority = 300,
1268 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1269 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1270 .cra_blocksize = AES_BLOCK_SIZE,
1271 .cra_ctxsize = sizeof(struct sahara_ctx),
1272 .cra_alignmask = 0x0,
1273 .cra_type = &crypto_ablkcipher_type,
1274 .cra_module = THIS_MODULE,
1275 .cra_init = sahara_aes_cra_init,
1276 .cra_exit = sahara_aes_cra_exit,
1277 .cra_u.ablkcipher = {
1278 .min_keysize = AES_MIN_KEY_SIZE ,
1279 .max_keysize = AES_MAX_KEY_SIZE,
1280 .setkey = sahara_aes_setkey,
1281 .encrypt = sahara_aes_ecb_encrypt,
1282 .decrypt = sahara_aes_ecb_decrypt,
1283 }
1284}, {
1285 .cra_name = "cbc(aes)",
1286 .cra_driver_name = "sahara-cbc-aes",
1287 .cra_priority = 300,
1288 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1289 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1290 .cra_blocksize = AES_BLOCK_SIZE,
1291 .cra_ctxsize = sizeof(struct sahara_ctx),
1292 .cra_alignmask = 0x0,
1293 .cra_type = &crypto_ablkcipher_type,
1294 .cra_module = THIS_MODULE,
1295 .cra_init = sahara_aes_cra_init,
1296 .cra_exit = sahara_aes_cra_exit,
1297 .cra_u.ablkcipher = {
1298 .min_keysize = AES_MIN_KEY_SIZE ,
1299 .max_keysize = AES_MAX_KEY_SIZE,
1300 .ivsize = AES_BLOCK_SIZE,
1301 .setkey = sahara_aes_setkey,
1302 .encrypt = sahara_aes_cbc_encrypt,
1303 .decrypt = sahara_aes_cbc_decrypt,
1304 }
1305}
1306};
1307
5a2bb93f
ST
1308static struct ahash_alg sha_v3_algs[] = {
1309{
1310 .init = sahara_sha_init,
1311 .update = sahara_sha_update,
1312 .final = sahara_sha_final,
1313 .finup = sahara_sha_finup,
1314 .digest = sahara_sha_digest,
1315 .export = sahara_sha_export,
1316 .import = sahara_sha_import,
1317 .halg.digestsize = SHA1_DIGEST_SIZE,
1318 .halg.base = {
1319 .cra_name = "sha1",
1320 .cra_driver_name = "sahara-sha1",
1321 .cra_priority = 300,
1322 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1323 CRYPTO_ALG_ASYNC |
1324 CRYPTO_ALG_NEED_FALLBACK,
1325 .cra_blocksize = SHA1_BLOCK_SIZE,
1326 .cra_ctxsize = sizeof(struct sahara_ctx),
1327 .cra_alignmask = 0,
1328 .cra_module = THIS_MODULE,
1329 .cra_init = sahara_sha_cra_init,
1330 .cra_exit = sahara_sha_cra_exit,
1331 }
1332},
1333};
1334
1335static struct ahash_alg sha_v4_algs[] = {
1336{
1337 .init = sahara_sha_init,
1338 .update = sahara_sha_update,
1339 .final = sahara_sha_final,
1340 .finup = sahara_sha_finup,
1341 .digest = sahara_sha_digest,
1342 .export = sahara_sha_export,
1343 .import = sahara_sha_import,
1344 .halg.digestsize = SHA256_DIGEST_SIZE,
1345 .halg.base = {
1346 .cra_name = "sha256",
1347 .cra_driver_name = "sahara-sha256",
1348 .cra_priority = 300,
1349 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1350 CRYPTO_ALG_ASYNC |
1351 CRYPTO_ALG_NEED_FALLBACK,
1352 .cra_blocksize = SHA256_BLOCK_SIZE,
1353 .cra_ctxsize = sizeof(struct sahara_ctx),
1354 .cra_alignmask = 0,
1355 .cra_module = THIS_MODULE,
1356 .cra_init = sahara_sha_cra_init,
1357 .cra_exit = sahara_sha_cra_exit,
1358 }
1359},
1360};
1361
5de88752
JM
1362static irqreturn_t sahara_irq_handler(int irq, void *data)
1363{
1364 struct sahara_dev *dev = (struct sahara_dev *)data;
1365 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1366 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1367
5de88752
JM
1368 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1369 SAHARA_REG_CMD);
1370
1371 sahara_decode_status(dev, stat);
1372
1373 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1374 return IRQ_NONE;
1375 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1376 dev->error = 0;
1377 } else {
1378 sahara_decode_error(dev, err);
1379 dev->error = -EINVAL;
1380 }
1381
c0c3c89a 1382 complete(&dev->dma_completion);
5de88752
JM
1383
1384 return IRQ_HANDLED;
1385}
1386
1387
1388static int sahara_register_algs(struct sahara_dev *dev)
1389{
5a2bb93f
ST
1390 int err;
1391 unsigned int i, j, k, l;
5de88752
JM
1392
1393 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1394 INIT_LIST_HEAD(&aes_algs[i].cra_list);
1395 err = crypto_register_alg(&aes_algs[i]);
1396 if (err)
1397 goto err_aes_algs;
1398 }
1399
5a2bb93f
ST
1400 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1401 err = crypto_register_ahash(&sha_v3_algs[k]);
1402 if (err)
1403 goto err_sha_v3_algs;
1404 }
1405
1406 if (dev->version > SAHARA_VERSION_3)
1407 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1408 err = crypto_register_ahash(&sha_v4_algs[l]);
1409 if (err)
1410 goto err_sha_v4_algs;
1411 }
1412
5de88752
JM
1413 return 0;
1414
5a2bb93f
ST
1415err_sha_v4_algs:
1416 for (j = 0; j < l; j++)
1417 crypto_unregister_ahash(&sha_v4_algs[j]);
1418
1419err_sha_v3_algs:
1420 for (j = 0; j < k; j++)
1421 crypto_unregister_ahash(&sha_v4_algs[j]);
1422
5de88752
JM
1423err_aes_algs:
1424 for (j = 0; j < i; j++)
1425 crypto_unregister_alg(&aes_algs[j]);
1426
1427 return err;
1428}
1429
1430static void sahara_unregister_algs(struct sahara_dev *dev)
1431{
5a2bb93f 1432 unsigned int i;
5de88752
JM
1433
1434 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1435 crypto_unregister_alg(&aes_algs[i]);
5a2bb93f
ST
1436
1437 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1438 crypto_unregister_ahash(&sha_v3_algs[i]);
1439
1440 if (dev->version > SAHARA_VERSION_3)
1441 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1442 crypto_unregister_ahash(&sha_v4_algs[i]);
5de88752
JM
1443}
1444
1445static struct platform_device_id sahara_platform_ids[] = {
1446 { .name = "sahara-imx27" },
1447 { /* sentinel */ }
1448};
1449MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1450
1451static struct of_device_id sahara_dt_ids[] = {
5ed903b3 1452 { .compatible = "fsl,imx53-sahara" },
5de88752
JM
1453 { .compatible = "fsl,imx27-sahara" },
1454 { /* sentinel */ }
1455};
68be0b1a 1456MODULE_DEVICE_TABLE(of, sahara_dt_ids);
5de88752
JM
1457
1458static int sahara_probe(struct platform_device *pdev)
1459{
1460 struct sahara_dev *dev;
1461 struct resource *res;
1462 u32 version;
1463 int irq;
1464 int err;
1465 int i;
1466
1467 dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1468 if (dev == NULL) {
1469 dev_err(&pdev->dev, "unable to alloc data struct.\n");
1470 return -ENOMEM;
1471 }
1472
1473 dev->device = &pdev->dev;
1474 platform_set_drvdata(pdev, dev);
1475
1476 /* Get the base address */
1477 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
9e95275c
JH
1478 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1479 if (IS_ERR(dev->regs_base))
1480 return PTR_ERR(dev->regs_base);
5de88752
JM
1481
1482 /* Get the IRQ */
1483 irq = platform_get_irq(pdev, 0);
1484 if (irq < 0) {
1485 dev_err(&pdev->dev, "failed to get irq resource\n");
1486 return irq;
1487 }
1488
3d6f1d12
AS
1489 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1490 0, dev_name(&pdev->dev), dev);
1491 if (err) {
5de88752 1492 dev_err(&pdev->dev, "failed to request irq\n");
3d6f1d12 1493 return err;
5de88752
JM
1494 }
1495
1496 /* clocks */
1497 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1498 if (IS_ERR(dev->clk_ipg)) {
1499 dev_err(&pdev->dev, "Could not get ipg clock\n");
1500 return PTR_ERR(dev->clk_ipg);
1501 }
1502
1503 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1504 if (IS_ERR(dev->clk_ahb)) {
1505 dev_err(&pdev->dev, "Could not get ahb clock\n");
1506 return PTR_ERR(dev->clk_ahb);
1507 }
1508
1509 /* Allocate HW descriptors */
1510 dev->hw_desc[0] = dma_alloc_coherent(&pdev->dev,
1511 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1512 &dev->hw_phys_desc[0], GFP_KERNEL);
1513 if (!dev->hw_desc[0]) {
1514 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1515 return -ENOMEM;
1516 }
1517 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1518 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1519 sizeof(struct sahara_hw_desc);
1520
1521 /* Allocate space for iv and key */
1522 dev->key_base = dma_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1523 &dev->key_phys_base, GFP_KERNEL);
1524 if (!dev->key_base) {
1525 dev_err(&pdev->dev, "Could not allocate memory for key\n");
1526 err = -ENOMEM;
1527 goto err_key;
1528 }
1529 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1530 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1531
5a2bb93f
ST
1532 /* Allocate space for context: largest digest + message length field */
1533 dev->context_base = dma_alloc_coherent(&pdev->dev,
1534 SHA256_DIGEST_SIZE + 4,
1535 &dev->context_phys_base, GFP_KERNEL);
1536 if (!dev->context_base) {
1537 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1538 err = -ENOMEM;
1539 goto err_key;
1540 }
1541
5de88752
JM
1542 /* Allocate space for HW links */
1543 dev->hw_link[0] = dma_alloc_coherent(&pdev->dev,
1544 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1545 &dev->hw_phys_link[0], GFP_KERNEL);
393e661d 1546 if (!dev->hw_link[0]) {
5de88752
JM
1547 dev_err(&pdev->dev, "Could not allocate hw links\n");
1548 err = -ENOMEM;
1549 goto err_link;
1550 }
1551 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1552 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1553 sizeof(struct sahara_hw_link);
1554 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1555 }
1556
1557 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1558
20ec9d81 1559 spin_lock_init(&dev->lock);
c0c3c89a 1560 mutex_init(&dev->queue_mutex);
20ec9d81 1561
5de88752
JM
1562 dev_ptr = dev;
1563
c0c3c89a
ST
1564 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1565 if (IS_ERR(dev->kthread)) {
1566 err = PTR_ERR(dev->kthread);
1567 goto err_link;
1568 }
5de88752 1569
c0c3c89a 1570 init_completion(&dev->dma_completion);
5de88752
JM
1571
1572 clk_prepare_enable(dev->clk_ipg);
1573 clk_prepare_enable(dev->clk_ahb);
1574
1575 version = sahara_read(dev, SAHARA_REG_VERSION);
5ed903b3
ST
1576 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1577 if (version != SAHARA_VERSION_3)
1578 err = -ENODEV;
1579 } else if (of_device_is_compatible(pdev->dev.of_node,
1580 "fsl,imx53-sahara")) {
1581 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1582 err = -ENODEV;
1583 version = (version >> 8) & 0xff;
1584 }
1585 if (err == -ENODEV) {
5de88752 1586 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
5ed903b3 1587 version);
5de88752
JM
1588 goto err_algs;
1589 }
1590
5ed903b3
ST
1591 dev->version = version;
1592
5de88752
JM
1593 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1594 SAHARA_REG_CMD);
1595 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1596 SAHARA_CONTROL_SET_MAXBURST(8) |
1597 SAHARA_CONTROL_RNG_AUTORSD |
1598 SAHARA_CONTROL_ENABLE_INT,
1599 SAHARA_REG_CONTROL);
1600
1601 err = sahara_register_algs(dev);
1602 if (err)
1603 goto err_algs;
1604
1605 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1606
1607 return 0;
1608
1609err_algs:
1610 dma_free_coherent(&pdev->dev,
1611 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1612 dev->hw_link[0], dev->hw_phys_link[0]);
1613 clk_disable_unprepare(dev->clk_ipg);
1614 clk_disable_unprepare(dev->clk_ahb);
c0c3c89a 1615 kthread_stop(dev->kthread);
5de88752
JM
1616 dev_ptr = NULL;
1617err_link:
1618 dma_free_coherent(&pdev->dev,
1619 2 * AES_KEYSIZE_128,
1620 dev->key_base, dev->key_phys_base);
5a2bb93f
ST
1621 dma_free_coherent(&pdev->dev,
1622 SHA256_DIGEST_SIZE,
1623 dev->context_base, dev->context_phys_base);
5de88752
JM
1624err_key:
1625 dma_free_coherent(&pdev->dev,
1626 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1627 dev->hw_desc[0], dev->hw_phys_desc[0]);
1628
1629 return err;
1630}
1631
1632static int sahara_remove(struct platform_device *pdev)
1633{
1634 struct sahara_dev *dev = platform_get_drvdata(pdev);
1635
1636 dma_free_coherent(&pdev->dev,
1637 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1638 dev->hw_link[0], dev->hw_phys_link[0]);
1639 dma_free_coherent(&pdev->dev,
1640 2 * AES_KEYSIZE_128,
1641 dev->key_base, dev->key_phys_base);
1642 dma_free_coherent(&pdev->dev,
1643 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1644 dev->hw_desc[0], dev->hw_phys_desc[0]);
1645
c0c3c89a 1646 kthread_stop(dev->kthread);
5de88752
JM
1647
1648 sahara_unregister_algs(dev);
1649
1650 clk_disable_unprepare(dev->clk_ipg);
1651 clk_disable_unprepare(dev->clk_ahb);
1652
1653 dev_ptr = NULL;
1654
1655 return 0;
1656}
1657
1658static struct platform_driver sahara_driver = {
1659 .probe = sahara_probe,
1660 .remove = sahara_remove,
1661 .driver = {
1662 .name = SAHARA_NAME,
1b0b2605 1663 .of_match_table = sahara_dt_ids,
5de88752
JM
1664 },
1665 .id_table = sahara_platform_ids,
1666};
1667
1668module_platform_driver(sahara_driver);
1669
1670MODULE_LICENSE("GPL");
1671MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
5a2bb93f 1672MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
5de88752 1673MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");