Merge branch 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / crypto / sahara.c
CommitLineData
5de88752
JM
1/*
2 * Cryptographic API.
3 *
4 * Support for SAHARA cryptographic accelerator.
5 *
5a2bb93f 6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
5de88752
JM
7 * Copyright (c) 2013 Vista Silicon S.L.
8 * Author: Javier Martin <javier.martin@vista-silicon.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 * Based on omap-aes.c and tegra-aes.c
15 */
16
5de88752 17#include <crypto/aes.h>
5a2bb93f 18#include <crypto/internal/hash.h>
678adecd 19#include <crypto/internal/skcipher.h>
5a2bb93f
ST
20#include <crypto/scatterwalk.h>
21#include <crypto/sha.h>
5de88752
JM
22
23#include <linux/clk.h>
24#include <linux/crypto.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/kernel.h>
c0c3c89a 29#include <linux/kthread.h>
5de88752 30#include <linux/module.h>
c0c3c89a 31#include <linux/mutex.h>
5de88752 32#include <linux/of.h>
5ed903b3 33#include <linux/of_device.h>
5de88752
JM
34#include <linux/platform_device.h>
35
5a2bb93f
ST
36#define SHA_BUFFER_LEN PAGE_SIZE
37#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
38
5de88752
JM
39#define SAHARA_NAME "sahara"
40#define SAHARA_VERSION_3 3
5ed903b3 41#define SAHARA_VERSION_4 4
5de88752
JM
42#define SAHARA_TIMEOUT_MS 1000
43#define SAHARA_MAX_HW_DESC 2
44#define SAHARA_MAX_HW_LINK 20
45
46#define FLAGS_MODE_MASK 0x000f
47#define FLAGS_ENCRYPT BIT(0)
48#define FLAGS_CBC BIT(1)
49#define FLAGS_NEW_KEY BIT(3)
5de88752
JM
50
51#define SAHARA_HDR_BASE 0x00800000
52#define SAHARA_HDR_SKHA_ALG_AES 0
53#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
54#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
55#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
56#define SAHARA_HDR_FORM_DATA (5 << 16)
57#define SAHARA_HDR_FORM_KEY (8 << 16)
58#define SAHARA_HDR_LLO (1 << 24)
59#define SAHARA_HDR_CHA_SKHA (1 << 28)
60#define SAHARA_HDR_CHA_MDHA (2 << 28)
61#define SAHARA_HDR_PARITY_BIT (1 << 31)
62
5a2bb93f
ST
63#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
64#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
65#define SAHARA_HDR_MDHA_HASH 0xA0850000
66#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
67#define SAHARA_HDR_MDHA_ALG_SHA1 0
68#define SAHARA_HDR_MDHA_ALG_MD5 1
69#define SAHARA_HDR_MDHA_ALG_SHA256 2
70#define SAHARA_HDR_MDHA_ALG_SHA224 3
71#define SAHARA_HDR_MDHA_PDATA (1 << 2)
72#define SAHARA_HDR_MDHA_HMAC (1 << 3)
73#define SAHARA_HDR_MDHA_INIT (1 << 5)
74#define SAHARA_HDR_MDHA_IPAD (1 << 6)
75#define SAHARA_HDR_MDHA_OPAD (1 << 7)
76#define SAHARA_HDR_MDHA_SWAP (1 << 8)
77#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
78#define SAHARA_HDR_MDHA_SSL (1 << 10)
79
5de88752
JM
80/* SAHARA can only process one request at a time */
81#define SAHARA_QUEUE_LENGTH 1
82
83#define SAHARA_REG_VERSION 0x00
84#define SAHARA_REG_DAR 0x04
85#define SAHARA_REG_CONTROL 0x08
86#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
87#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
88#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
89#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
90#define SAHARA_REG_CMD 0x0C
91#define SAHARA_CMD_RESET (1 << 0)
92#define SAHARA_CMD_CLEAR_INT (1 << 8)
93#define SAHARA_CMD_CLEAR_ERR (1 << 9)
94#define SAHARA_CMD_SINGLE_STEP (1 << 10)
95#define SAHARA_CMD_MODE_BATCH (1 << 16)
96#define SAHARA_CMD_MODE_DEBUG (1 << 18)
97#define SAHARA_REG_STATUS 0x10
98#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
99#define SAHARA_STATE_IDLE 0
100#define SAHARA_STATE_BUSY 1
101#define SAHARA_STATE_ERR 2
102#define SAHARA_STATE_FAULT 3
103#define SAHARA_STATE_COMPLETE 4
104#define SAHARA_STATE_COMP_FLAG (1 << 2)
105#define SAHARA_STATUS_DAR_FULL (1 << 3)
106#define SAHARA_STATUS_ERROR (1 << 4)
107#define SAHARA_STATUS_SECURE (1 << 5)
108#define SAHARA_STATUS_FAIL (1 << 6)
109#define SAHARA_STATUS_INIT (1 << 7)
110#define SAHARA_STATUS_RNG_RESEED (1 << 8)
111#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
112#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
113#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
114#define SAHARA_STATUS_MODE_BATCH (1 << 16)
115#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
116#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
117#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
118#define SAHARA_REG_ERRSTATUS 0x14
119#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
120#define SAHARA_ERRSOURCE_CHA 14
121#define SAHARA_ERRSOURCE_DMA 15
122#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
123#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
124#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
125#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
126#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
127#define SAHARA_REG_FADDR 0x18
128#define SAHARA_REG_CDAR 0x1C
129#define SAHARA_REG_IDAR 0x20
130
131struct sahara_hw_desc {
75d3f811
AB
132 u32 hdr;
133 u32 len1;
134 u32 p1;
135 u32 len2;
136 u32 p2;
137 u32 next;
5de88752
JM
138};
139
140struct sahara_hw_link {
75d3f811
AB
141 u32 len;
142 u32 p;
143 u32 next;
5de88752
JM
144};
145
146struct sahara_ctx {
5de88752 147 unsigned long flags;
5a2bb93f
ST
148
149 /* AES-specific context */
5de88752
JM
150 int keylen;
151 u8 key[AES_KEYSIZE_128];
ba70152b 152 struct crypto_sync_skcipher *fallback;
5de88752
JM
153};
154
155struct sahara_aes_reqctx {
156 unsigned long mode;
157};
158
5a2bb93f
ST
159/*
160 * struct sahara_sha_reqctx - private data per request
161 * @buf: holds data for requests smaller than block_size
162 * @rembuf: used to prepare one block_size-aligned request
163 * @context: hw-specific context for request. Digest is extracted from this
164 * @mode: specifies what type of hw-descriptor needs to be built
165 * @digest_size: length of digest for this request
166 * @context_size: length of hw-context for this request.
167 * Always digest_size + 4
168 * @buf_cnt: number of bytes saved in buf
169 * @sg_in_idx: number of hw links
170 * @in_sg: scatterlist for input data
171 * @in_sg_chain: scatterlists for chained input data
5a2bb93f
ST
172 * @total: total number of bytes for transfer
173 * @last: is this the last block
174 * @first: is this the first block
175 * @active: inside a transfer
176 */
177struct sahara_sha_reqctx {
178 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
179 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
180 u8 context[SHA256_DIGEST_SIZE + 4];
5a2bb93f
ST
181 unsigned int mode;
182 unsigned int digest_size;
183 unsigned int context_size;
184 unsigned int buf_cnt;
185 unsigned int sg_in_idx;
186 struct scatterlist *in_sg;
187 struct scatterlist in_sg_chain[2];
5a2bb93f
ST
188 size_t total;
189 unsigned int last;
190 unsigned int first;
191 unsigned int active;
192};
193
5de88752
JM
194struct sahara_dev {
195 struct device *device;
5ed903b3 196 unsigned int version;
5de88752
JM
197 void __iomem *regs_base;
198 struct clk *clk_ipg;
199 struct clk *clk_ahb;
c0c3c89a
ST
200 struct mutex queue_mutex;
201 struct task_struct *kthread;
202 struct completion dma_completion;
5de88752
JM
203
204 struct sahara_ctx *ctx;
5de88752
JM
205 struct crypto_queue queue;
206 unsigned long flags;
207
5de88752
JM
208 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
209 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
210
211 u8 *key_base;
212 dma_addr_t key_phys_base;
213
214 u8 *iv_base;
215 dma_addr_t iv_phys_base;
216
5a2bb93f
ST
217 u8 *context_base;
218 dma_addr_t context_phys_base;
219
5de88752
JM
220 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
221 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
222
5de88752
JM
223 size_t total;
224 struct scatterlist *in_sg;
f8e28a0d 225 int nb_in_sg;
5de88752 226 struct scatterlist *out_sg;
f8e28a0d 227 int nb_out_sg;
5de88752
JM
228
229 u32 error;
5de88752
JM
230};
231
232static struct sahara_dev *dev_ptr;
233
234static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
235{
236 writel(data, dev->regs_base + reg);
237}
238
239static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
240{
241 return readl(dev->regs_base + reg);
242}
243
244static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
245{
246 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
247 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
248 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
249
250 if (dev->flags & FLAGS_CBC) {
251 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
252 hdr ^= SAHARA_HDR_PARITY_BIT;
253 }
254
255 if (dev->flags & FLAGS_ENCRYPT) {
256 hdr |= SAHARA_HDR_SKHA_OP_ENC;
257 hdr ^= SAHARA_HDR_PARITY_BIT;
258 }
259
260 return hdr;
261}
262
263static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
264{
265 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
266 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
267}
268
cac367bf 269static const char *sahara_err_src[16] = {
5de88752
JM
270 "No error",
271 "Header error",
272 "Descriptor length error",
273 "Descriptor length or pointer error",
274 "Link length error",
275 "Link pointer error",
276 "Input buffer error",
277 "Output buffer error",
278 "Output buffer starvation",
279 "Internal state fault",
280 "General descriptor problem",
281 "Reserved",
282 "Descriptor address error",
283 "Link address error",
284 "CHA error",
285 "DMA error"
286};
287
cac367bf 288static const char *sahara_err_dmasize[4] = {
5de88752
JM
289 "Byte transfer",
290 "Half-word transfer",
291 "Word transfer",
292 "Reserved"
293};
294
cac367bf 295static const char *sahara_err_dmasrc[8] = {
5de88752
JM
296 "No error",
297 "AHB bus error",
298 "Internal IP bus error",
299 "Parity error",
300 "DMA crosses 256 byte boundary",
301 "DMA is busy",
302 "Reserved",
303 "DMA HW error"
304};
305
cac367bf 306static const char *sahara_cha_errsrc[12] = {
5de88752
JM
307 "Input buffer non-empty",
308 "Illegal address",
309 "Illegal mode",
310 "Illegal data size",
311 "Illegal key size",
312 "Write during processing",
313 "CTX read during processing",
314 "HW error",
315 "Input buffer disabled/underflow",
316 "Output buffer disabled/overflow",
317 "DES key parity error",
318 "Reserved"
319};
320
cac367bf 321static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
5de88752
JM
322
323static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
324{
325 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
326 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
327
328 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
329
330 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
331
332 if (source == SAHARA_ERRSOURCE_DMA) {
333 if (error & SAHARA_ERRSTATUS_DMA_DIR)
334 dev_err(dev->device, " * DMA read.\n");
335 else
336 dev_err(dev->device, " * DMA write.\n");
337
338 dev_err(dev->device, " * %s.\n",
339 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
340 dev_err(dev->device, " * %s.\n",
341 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
342 } else if (source == SAHARA_ERRSOURCE_CHA) {
343 dev_err(dev->device, " * %s.\n",
344 sahara_cha_errsrc[chasrc]);
345 dev_err(dev->device, " * %s.\n",
346 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
347 }
348 dev_err(dev->device, "\n");
349}
350
cac367bf 351static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
5de88752
JM
352
353static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
354{
355 u8 state;
356
357 if (!IS_ENABLED(DEBUG))
358 return;
359
360 state = SAHARA_STATUS_GET_STATE(status);
361
362 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
363 __func__, status);
364
365 dev_dbg(dev->device, " - State = %d:\n", state);
366 if (state & SAHARA_STATE_COMP_FLAG)
367 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
368
369 dev_dbg(dev->device, " * %s.\n",
370 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
371
372 if (status & SAHARA_STATUS_DAR_FULL)
373 dev_dbg(dev->device, " - DAR Full.\n");
374 if (status & SAHARA_STATUS_ERROR)
375 dev_dbg(dev->device, " - Error.\n");
376 if (status & SAHARA_STATUS_SECURE)
377 dev_dbg(dev->device, " - Secure.\n");
378 if (status & SAHARA_STATUS_FAIL)
379 dev_dbg(dev->device, " - Fail.\n");
380 if (status & SAHARA_STATUS_RNG_RESEED)
381 dev_dbg(dev->device, " - RNG Reseed Request.\n");
382 if (status & SAHARA_STATUS_ACTIVE_RNG)
383 dev_dbg(dev->device, " - RNG Active.\n");
384 if (status & SAHARA_STATUS_ACTIVE_MDHA)
385 dev_dbg(dev->device, " - MDHA Active.\n");
386 if (status & SAHARA_STATUS_ACTIVE_SKHA)
387 dev_dbg(dev->device, " - SKHA Active.\n");
388
389 if (status & SAHARA_STATUS_MODE_BATCH)
390 dev_dbg(dev->device, " - Batch Mode.\n");
391 else if (status & SAHARA_STATUS_MODE_DEDICATED)
9ae811f2 392 dev_dbg(dev->device, " - Dedicated Mode.\n");
5de88752
JM
393 else if (status & SAHARA_STATUS_MODE_DEBUG)
394 dev_dbg(dev->device, " - Debug Mode.\n");
395
396 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
397 SAHARA_STATUS_GET_ISTATE(status));
398
399 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
400 sahara_read(dev, SAHARA_REG_CDAR));
401 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
402 sahara_read(dev, SAHARA_REG_IDAR));
403}
404
405static void sahara_dump_descriptors(struct sahara_dev *dev)
406{
407 int i;
408
409 if (!IS_ENABLED(DEBUG))
410 return;
411
412 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
d4b98f20
AB
413 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
414 i, &dev->hw_phys_desc[i]);
5de88752
JM
415 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
416 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
417 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
418 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
419 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
420 dev_dbg(dev->device, "\tnext = 0x%08x\n",
421 dev->hw_desc[i]->next);
422 }
423 dev_dbg(dev->device, "\n");
424}
425
426static void sahara_dump_links(struct sahara_dev *dev)
427{
428 int i;
429
430 if (!IS_ENABLED(DEBUG))
431 return;
432
433 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
d4b98f20
AB
434 dev_dbg(dev->device, "Link (%d) (%pad):\n",
435 i, &dev->hw_phys_link[i]);
5de88752
JM
436 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
437 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
438 dev_dbg(dev->device, "\tnext = 0x%08x\n",
439 dev->hw_link[i]->next);
440 }
441 dev_dbg(dev->device, "\n");
442}
443
5de88752
JM
444static int sahara_hw_descriptor_create(struct sahara_dev *dev)
445{
446 struct sahara_ctx *ctx = dev->ctx;
447 struct scatterlist *sg;
448 int ret;
449 int i, j;
1711045f 450 int idx = 0;
5de88752
JM
451
452 /* Copy new key if necessary */
453 if (ctx->flags & FLAGS_NEW_KEY) {
454 memcpy(dev->key_base, ctx->key, ctx->keylen);
455 ctx->flags &= ~FLAGS_NEW_KEY;
456
457 if (dev->flags & FLAGS_CBC) {
1711045f
ST
458 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
459 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
5de88752 460 } else {
1711045f
ST
461 dev->hw_desc[idx]->len1 = 0;
462 dev->hw_desc[idx]->p1 = 0;
5de88752 463 }
1711045f
ST
464 dev->hw_desc[idx]->len2 = ctx->keylen;
465 dev->hw_desc[idx]->p2 = dev->key_phys_base;
466 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
467
468 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
469
470 idx++;
5de88752 471 }
5de88752 472
d23afa1a 473 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
6c2b74d4
LC
474 if (dev->nb_in_sg < 0) {
475 dev_err(dev->device, "Invalid numbers of src SG.\n");
476 return dev->nb_in_sg;
477 }
d23afa1a 478 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
6c2b74d4
LC
479 if (dev->nb_out_sg < 0) {
480 dev_err(dev->device, "Invalid numbers of dst SG.\n");
481 return dev->nb_out_sg;
482 }
5de88752
JM
483 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
484 dev_err(dev->device, "not enough hw links (%d)\n",
485 dev->nb_in_sg + dev->nb_out_sg);
486 return -EINVAL;
487 }
488
489 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
490 DMA_TO_DEVICE);
491 if (ret != dev->nb_in_sg) {
492 dev_err(dev->device, "couldn't map in sg\n");
493 goto unmap_in;
494 }
495 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
496 DMA_FROM_DEVICE);
497 if (ret != dev->nb_out_sg) {
498 dev_err(dev->device, "couldn't map out sg\n");
499 goto unmap_out;
500 }
501
502 /* Create input links */
1711045f 503 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
5de88752
JM
504 sg = dev->in_sg;
505 for (i = 0; i < dev->nb_in_sg; i++) {
506 dev->hw_link[i]->len = sg->length;
507 dev->hw_link[i]->p = sg->dma_address;
508 if (i == (dev->nb_in_sg - 1)) {
509 dev->hw_link[i]->next = 0;
510 } else {
511 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
512 sg = sg_next(sg);
513 }
514 }
515
516 /* Create output links */
1711045f 517 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
5de88752
JM
518 sg = dev->out_sg;
519 for (j = i; j < dev->nb_out_sg + i; j++) {
520 dev->hw_link[j]->len = sg->length;
521 dev->hw_link[j]->p = sg->dma_address;
522 if (j == (dev->nb_out_sg + i - 1)) {
523 dev->hw_link[j]->next = 0;
524 } else {
525 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
526 sg = sg_next(sg);
527 }
528 }
529
530 /* Fill remaining fields of hw_desc[1] */
1711045f
ST
531 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
532 dev->hw_desc[idx]->len1 = dev->total;
533 dev->hw_desc[idx]->len2 = dev->total;
534 dev->hw_desc[idx]->next = 0;
5de88752
JM
535
536 sahara_dump_descriptors(dev);
537 sahara_dump_links(dev);
538
5de88752
JM
539 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
540
541 return 0;
542
543unmap_out:
544 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
1e320410 545 DMA_FROM_DEVICE);
5de88752
JM
546unmap_in:
547 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1e320410 548 DMA_TO_DEVICE);
5de88752
JM
549
550 return -EINVAL;
551}
552
c0c3c89a 553static int sahara_aes_process(struct ablkcipher_request *req)
5de88752 554{
c0c3c89a 555 struct sahara_dev *dev = dev_ptr;
5de88752
JM
556 struct sahara_ctx *ctx;
557 struct sahara_aes_reqctx *rctx;
5de88752 558 int ret;
58ed798b 559 unsigned long timeout;
5de88752 560
5de88752
JM
561 /* Request is ready to be dispatched by the device */
562 dev_dbg(dev->device,
563 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
564 req->nbytes, req->src, req->dst);
565
566 /* assign new request to device */
5de88752
JM
567 dev->total = req->nbytes;
568 dev->in_sg = req->src;
569 dev->out_sg = req->dst;
570
571 rctx = ablkcipher_request_ctx(req);
572 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
573 rctx->mode &= FLAGS_MODE_MASK;
574 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
575
576 if ((dev->flags & FLAGS_CBC) && req->info)
577 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
578
579 /* assign new context to device */
5de88752
JM
580 dev->ctx = ctx;
581
c0c3c89a
ST
582 reinit_completion(&dev->dma_completion);
583
5de88752 584 ret = sahara_hw_descriptor_create(dev);
6cf02fca
NMG
585 if (ret)
586 return -EINVAL;
c0c3c89a 587
58ed798b 588 timeout = wait_for_completion_timeout(&dev->dma_completion,
c0c3c89a 589 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
58ed798b 590 if (!timeout) {
c0c3c89a
ST
591 dev_err(dev->device, "AES timeout\n");
592 return -ETIMEDOUT;
5de88752 593 }
c0c3c89a
ST
594
595 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
c0c3c89a 596 DMA_FROM_DEVICE);
1e320410
ML
597 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
598 DMA_TO_DEVICE);
c0c3c89a
ST
599
600 return 0;
5de88752
JM
601}
602
603static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
604 unsigned int keylen)
605{
606 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
607 int ret;
608
609 ctx->keylen = keylen;
610
611 /* SAHARA only supports 128bit keys */
612 if (keylen == AES_KEYSIZE_128) {
613 memcpy(ctx->key, key, keylen);
614 ctx->flags |= FLAGS_NEW_KEY;
615 return 0;
616 }
617
678adecd 618 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
5de88752
JM
619 return -EINVAL;
620
621 /*
622 * The requested key size is not supported by HW, do a fallback.
623 */
ba70152b
KC
624 crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
625 crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
678adecd 626 CRYPTO_TFM_REQ_MASK);
5de88752 627
ba70152b 628 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
5de88752 629
678adecd 630 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
ba70152b 631 tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
678adecd 632 CRYPTO_TFM_RES_MASK;
5de88752
JM
633 return ret;
634}
635
636static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
637{
5de88752
JM
638 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
639 struct sahara_dev *dev = dev_ptr;
640 int err = 0;
5de88752
JM
641
642 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
643 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
644
645 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
646 dev_err(dev->device,
647 "request size is not exact amount of AES blocks\n");
648 return -EINVAL;
649 }
650
5de88752 651 rctx->mode = mode;
c0c3c89a
ST
652
653 mutex_lock(&dev->queue_mutex);
5de88752 654 err = ablkcipher_enqueue_request(&dev->queue, req);
c0c3c89a 655 mutex_unlock(&dev->queue_mutex);
5de88752 656
c0c3c89a 657 wake_up_process(dev->kthread);
5de88752
JM
658
659 return err;
660}
661
662static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
663{
5de88752
JM
664 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
665 crypto_ablkcipher_reqtfm(req));
666 int err;
667
668 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
ba70152b 669 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
678adecd 670
ba70152b 671 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
678adecd
HX
672 skcipher_request_set_callback(subreq, req->base.flags,
673 NULL, NULL);
674 skcipher_request_set_crypt(subreq, req->src, req->dst,
675 req->nbytes, req->info);
676 err = crypto_skcipher_encrypt(subreq);
677 skcipher_request_zero(subreq);
5de88752
JM
678 return err;
679 }
680
681 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
682}
683
684static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
685{
5de88752
JM
686 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
687 crypto_ablkcipher_reqtfm(req));
688 int err;
689
690 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
ba70152b 691 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
678adecd 692
ba70152b 693 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
678adecd
HX
694 skcipher_request_set_callback(subreq, req->base.flags,
695 NULL, NULL);
696 skcipher_request_set_crypt(subreq, req->src, req->dst,
697 req->nbytes, req->info);
698 err = crypto_skcipher_decrypt(subreq);
699 skcipher_request_zero(subreq);
5de88752
JM
700 return err;
701 }
702
703 return sahara_aes_crypt(req, 0);
704}
705
706static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
707{
5de88752
JM
708 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
709 crypto_ablkcipher_reqtfm(req));
710 int err;
711
712 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
ba70152b 713 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
678adecd 714
ba70152b 715 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
678adecd
HX
716 skcipher_request_set_callback(subreq, req->base.flags,
717 NULL, NULL);
718 skcipher_request_set_crypt(subreq, req->src, req->dst,
719 req->nbytes, req->info);
720 err = crypto_skcipher_encrypt(subreq);
721 skcipher_request_zero(subreq);
5de88752
JM
722 return err;
723 }
724
725 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
726}
727
728static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
729{
5de88752
JM
730 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
731 crypto_ablkcipher_reqtfm(req));
732 int err;
733
734 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
ba70152b 735 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
678adecd 736
ba70152b 737 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
678adecd
HX
738 skcipher_request_set_callback(subreq, req->base.flags,
739 NULL, NULL);
740 skcipher_request_set_crypt(subreq, req->src, req->dst,
741 req->nbytes, req->info);
742 err = crypto_skcipher_decrypt(subreq);
743 skcipher_request_zero(subreq);
5de88752
JM
744 return err;
745 }
746
747 return sahara_aes_crypt(req, FLAGS_CBC);
748}
749
750static int sahara_aes_cra_init(struct crypto_tfm *tfm)
751{
efa59e2e 752 const char *name = crypto_tfm_alg_name(tfm);
5de88752
JM
753 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
754
ba70152b 755 ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
678adecd 756 CRYPTO_ALG_NEED_FALLBACK);
5de88752
JM
757 if (IS_ERR(ctx->fallback)) {
758 pr_err("Error allocating fallback algo %s\n", name);
759 return PTR_ERR(ctx->fallback);
760 }
761
762 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
763
764 return 0;
765}
766
767static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
768{
769 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
770
ba70152b 771 crypto_free_sync_skcipher(ctx->fallback);
5de88752
JM
772}
773
5a2bb93f
ST
774static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
775 struct sahara_sha_reqctx *rctx)
776{
777 u32 hdr = 0;
778
779 hdr = rctx->mode;
780
781 if (rctx->first) {
782 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
783 hdr |= SAHARA_HDR_MDHA_INIT;
784 } else {
785 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
786 }
787
788 if (rctx->last)
789 hdr |= SAHARA_HDR_MDHA_PDATA;
790
791 if (hweight_long(hdr) % 2 == 0)
792 hdr |= SAHARA_HDR_PARITY_BIT;
793
794 return hdr;
795}
796
797static int sahara_sha_hw_links_create(struct sahara_dev *dev,
798 struct sahara_sha_reqctx *rctx,
799 int start)
800{
801 struct scatterlist *sg;
802 unsigned int i;
803 int ret;
804
805 dev->in_sg = rctx->in_sg;
806
d23afa1a 807 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
6c2b74d4
LC
808 if (dev->nb_in_sg < 0) {
809 dev_err(dev->device, "Invalid numbers of src SG.\n");
810 return dev->nb_in_sg;
811 }
5a2bb93f
ST
812 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
813 dev_err(dev->device, "not enough hw links (%d)\n",
814 dev->nb_in_sg + dev->nb_out_sg);
815 return -EINVAL;
816 }
817
640eec52
LC
818 sg = dev->in_sg;
819 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
820 if (!ret)
821 return -EFAULT;
822
823 for (i = start; i < dev->nb_in_sg + start; i++) {
824 dev->hw_link[i]->len = sg->length;
825 dev->hw_link[i]->p = sg->dma_address;
826 if (i == (dev->nb_in_sg + start - 1)) {
827 dev->hw_link[i]->next = 0;
828 } else {
5a2bb93f
ST
829 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
830 sg = sg_next(sg);
5a2bb93f
ST
831 }
832 }
833
834 return i;
835}
836
837static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
838 struct sahara_sha_reqctx *rctx,
839 struct ahash_request *req,
840 int index)
841{
842 unsigned result_len;
843 int i = index;
844
845 if (rctx->first)
846 /* Create initial descriptor: #8*/
847 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
848 else
849 /* Create hash descriptor: #10. Must follow #6. */
850 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
851
852 dev->hw_desc[index]->len1 = rctx->total;
853 if (dev->hw_desc[index]->len1 == 0) {
854 /* if len1 is 0, p1 must be 0, too */
855 dev->hw_desc[index]->p1 = 0;
856 rctx->sg_in_idx = 0;
857 } else {
858 /* Create input links */
859 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
860 i = sahara_sha_hw_links_create(dev, rctx, index);
861
862 rctx->sg_in_idx = index;
863 if (i < 0)
864 return i;
865 }
866
867 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
868
869 /* Save the context for the next operation */
870 result_len = rctx->context_size;
871 dev->hw_link[i]->p = dev->context_phys_base;
872
873 dev->hw_link[i]->len = result_len;
874 dev->hw_desc[index]->len2 = result_len;
875
876 dev->hw_link[i]->next = 0;
877
878 return 0;
879}
880
881/*
882 * Load descriptor aka #6
883 *
884 * To load a previously saved context back to the MDHA unit
885 *
886 * p1: Saved Context
887 * p2: NULL
888 *
889 */
890static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
891 struct sahara_sha_reqctx *rctx,
892 struct ahash_request *req,
893 int index)
894{
895 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
896
897 dev->hw_desc[index]->len1 = rctx->context_size;
898 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
899 dev->hw_desc[index]->len2 = 0;
900 dev->hw_desc[index]->p2 = 0;
901
902 dev->hw_link[index]->len = rctx->context_size;
903 dev->hw_link[index]->p = dev->context_phys_base;
904 dev->hw_link[index]->next = 0;
905
906 return 0;
907}
908
909static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
910{
911 if (!sg || !sg->length)
912 return nbytes;
913
914 while (nbytes && sg) {
915 if (nbytes <= sg->length) {
916 sg->length = nbytes;
917 sg_mark_end(sg);
918 break;
919 }
920 nbytes -= sg->length;
5be4d4c9 921 sg = sg_next(sg);
5a2bb93f
ST
922 }
923
924 return nbytes;
925}
926
927static int sahara_sha_prepare_request(struct ahash_request *req)
928{
929 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
930 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
931 unsigned int hash_later;
932 unsigned int block_size;
933 unsigned int len;
934
935 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
936
937 /* append bytes from previous operation */
938 len = rctx->buf_cnt + req->nbytes;
939
940 /* only the last transfer can be padded in hardware */
941 if (!rctx->last && (len < block_size)) {
942 /* to few data, save for next operation */
943 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
944 0, req->nbytes, 0);
945 rctx->buf_cnt += req->nbytes;
946
947 return 0;
948 }
949
950 /* add data from previous operation first */
951 if (rctx->buf_cnt)
952 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
953
954 /* data must always be a multiple of block_size */
955 hash_later = rctx->last ? 0 : len & (block_size - 1);
956 if (hash_later) {
957 unsigned int offset = req->nbytes - hash_later;
958 /* Save remaining bytes for later use */
959 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
960 hash_later, 0);
961 }
962
963 /* nbytes should now be multiple of blocksize */
964 req->nbytes = req->nbytes - hash_later;
965
966 sahara_walk_and_recalc(req->src, req->nbytes);
967
968 /* have data from previous operation and current */
969 if (rctx->buf_cnt && req->nbytes) {
970 sg_init_table(rctx->in_sg_chain, 2);
971 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
972
c56f6d12 973 sg_chain(rctx->in_sg_chain, 2, req->src);
5a2bb93f
ST
974
975 rctx->total = req->nbytes + rctx->buf_cnt;
976 rctx->in_sg = rctx->in_sg_chain;
977
5a2bb93f
ST
978 req->src = rctx->in_sg_chain;
979 /* only data from previous operation */
980 } else if (rctx->buf_cnt) {
981 if (req->src)
982 rctx->in_sg = req->src;
983 else
984 rctx->in_sg = rctx->in_sg_chain;
985 /* buf was copied into rembuf above */
986 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
987 rctx->total = rctx->buf_cnt;
5a2bb93f
ST
988 /* no data from previous operation */
989 } else {
990 rctx->in_sg = req->src;
991 rctx->total = req->nbytes;
992 req->src = rctx->in_sg;
5a2bb93f
ST
993 }
994
995 /* on next call, we only have the remaining data in the buffer */
996 rctx->buf_cnt = hash_later;
997
998 return -EINPROGRESS;
999}
1000
5a2bb93f
ST
1001static int sahara_sha_process(struct ahash_request *req)
1002{
1003 struct sahara_dev *dev = dev_ptr;
1004 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
df586cbb 1005 int ret;
58ed798b 1006 unsigned long timeout;
5a2bb93f
ST
1007
1008 ret = sahara_sha_prepare_request(req);
1009 if (!ret)
1010 return ret;
1011
1012 if (rctx->first) {
1013 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1014 dev->hw_desc[0]->next = 0;
1015 rctx->first = 0;
1016 } else {
1017 memcpy(dev->context_base, rctx->context, rctx->context_size);
1018
1019 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1020 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1021 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1022 dev->hw_desc[1]->next = 0;
1023 }
1024
1025 sahara_dump_descriptors(dev);
1026 sahara_dump_links(dev);
1027
1028 reinit_completion(&dev->dma_completion);
1029
1030 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1031
58ed798b 1032 timeout = wait_for_completion_timeout(&dev->dma_completion,
5a2bb93f 1033 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
58ed798b 1034 if (!timeout) {
5a2bb93f
ST
1035 dev_err(dev->device, "SHA timeout\n");
1036 return -ETIMEDOUT;
1037 }
1038
1039 if (rctx->sg_in_idx)
640eec52
LC
1040 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1041 DMA_TO_DEVICE);
5a2bb93f
ST
1042
1043 memcpy(rctx->context, dev->context_base, rctx->context_size);
1044
1045 if (req->result)
1046 memcpy(req->result, rctx->context, rctx->digest_size);
1047
1048 return 0;
1049}
1050
c0c3c89a
ST
1051static int sahara_queue_manage(void *data)
1052{
1053 struct sahara_dev *dev = (struct sahara_dev *)data;
1054 struct crypto_async_request *async_req;
ddacc621 1055 struct crypto_async_request *backlog;
c0c3c89a
ST
1056 int ret = 0;
1057
1058 do {
1059 __set_current_state(TASK_INTERRUPTIBLE);
1060
1061 mutex_lock(&dev->queue_mutex);
ddacc621 1062 backlog = crypto_get_backlog(&dev->queue);
c0c3c89a
ST
1063 async_req = crypto_dequeue_request(&dev->queue);
1064 mutex_unlock(&dev->queue_mutex);
1065
ddacc621
ST
1066 if (backlog)
1067 backlog->complete(backlog, -EINPROGRESS);
1068
c0c3c89a 1069 if (async_req) {
5a2bb93f
ST
1070 if (crypto_tfm_alg_type(async_req->tfm) ==
1071 CRYPTO_ALG_TYPE_AHASH) {
1072 struct ahash_request *req =
1073 ahash_request_cast(async_req);
1074
1075 ret = sahara_sha_process(req);
1076 } else {
1077 struct ablkcipher_request *req =
1078 ablkcipher_request_cast(async_req);
c0c3c89a 1079
5a2bb93f
ST
1080 ret = sahara_aes_process(req);
1081 }
c0c3c89a
ST
1082
1083 async_req->complete(async_req, ret);
1084
1085 continue;
1086 }
1087
1088 schedule();
1089 } while (!kthread_should_stop());
1090
1091 return 0;
1092}
1093
5a2bb93f
ST
1094static int sahara_sha_enqueue(struct ahash_request *req, int last)
1095{
1096 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1097 struct sahara_dev *dev = dev_ptr;
1098 int ret;
1099
1100 if (!req->nbytes && !last)
1101 return 0;
1102
5a2bb93f
ST
1103 rctx->last = last;
1104
1105 if (!rctx->active) {
1106 rctx->active = 1;
1107 rctx->first = 1;
1108 }
1109
1110 mutex_lock(&dev->queue_mutex);
1111 ret = crypto_enqueue_request(&dev->queue, &req->base);
1112 mutex_unlock(&dev->queue_mutex);
1113
1114 wake_up_process(dev->kthread);
5a2bb93f
ST
1115
1116 return ret;
1117}
1118
1119static int sahara_sha_init(struct ahash_request *req)
1120{
1121 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1122 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1123
1124 memset(rctx, 0, sizeof(*rctx));
1125
1126 switch (crypto_ahash_digestsize(tfm)) {
1127 case SHA1_DIGEST_SIZE:
1128 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1129 rctx->digest_size = SHA1_DIGEST_SIZE;
1130 break;
1131 case SHA256_DIGEST_SIZE:
1132 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1133 rctx->digest_size = SHA256_DIGEST_SIZE;
1134 break;
1135 default:
1136 return -EINVAL;
1137 }
1138
1139 rctx->context_size = rctx->digest_size + 4;
1140 rctx->active = 0;
1141
5a2bb93f
ST
1142 return 0;
1143}
1144
1145static int sahara_sha_update(struct ahash_request *req)
1146{
1147 return sahara_sha_enqueue(req, 0);
1148}
1149
1150static int sahara_sha_final(struct ahash_request *req)
1151{
1152 req->nbytes = 0;
1153 return sahara_sha_enqueue(req, 1);
1154}
1155
1156static int sahara_sha_finup(struct ahash_request *req)
1157{
1158 return sahara_sha_enqueue(req, 1);
1159}
1160
1161static int sahara_sha_digest(struct ahash_request *req)
1162{
1163 sahara_sha_init(req);
1164
1165 return sahara_sha_finup(req);
1166}
1167
1168static int sahara_sha_export(struct ahash_request *req, void *out)
1169{
5a2bb93f
ST
1170 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1171
bceab44e 1172 memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
5a2bb93f
ST
1173
1174 return 0;
1175}
1176
1177static int sahara_sha_import(struct ahash_request *req, const void *in)
1178{
5a2bb93f
ST
1179 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1180
bceab44e 1181 memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
5a2bb93f
ST
1182
1183 return 0;
1184}
1185
1186static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1187{
5a2bb93f
ST
1188 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1189 sizeof(struct sahara_sha_reqctx) +
1190 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1191
1192 return 0;
1193}
1194
5de88752
JM
1195static struct crypto_alg aes_algs[] = {
1196{
1197 .cra_name = "ecb(aes)",
1198 .cra_driver_name = "sahara-ecb-aes",
1199 .cra_priority = 300,
1200 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1201 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1202 .cra_blocksize = AES_BLOCK_SIZE,
1203 .cra_ctxsize = sizeof(struct sahara_ctx),
1204 .cra_alignmask = 0x0,
1205 .cra_type = &crypto_ablkcipher_type,
1206 .cra_module = THIS_MODULE,
1207 .cra_init = sahara_aes_cra_init,
1208 .cra_exit = sahara_aes_cra_exit,
1209 .cra_u.ablkcipher = {
1210 .min_keysize = AES_MIN_KEY_SIZE ,
1211 .max_keysize = AES_MAX_KEY_SIZE,
1212 .setkey = sahara_aes_setkey,
1213 .encrypt = sahara_aes_ecb_encrypt,
1214 .decrypt = sahara_aes_ecb_decrypt,
1215 }
1216}, {
1217 .cra_name = "cbc(aes)",
1218 .cra_driver_name = "sahara-cbc-aes",
1219 .cra_priority = 300,
1220 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1221 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1222 .cra_blocksize = AES_BLOCK_SIZE,
1223 .cra_ctxsize = sizeof(struct sahara_ctx),
1224 .cra_alignmask = 0x0,
1225 .cra_type = &crypto_ablkcipher_type,
1226 .cra_module = THIS_MODULE,
1227 .cra_init = sahara_aes_cra_init,
1228 .cra_exit = sahara_aes_cra_exit,
1229 .cra_u.ablkcipher = {
1230 .min_keysize = AES_MIN_KEY_SIZE ,
1231 .max_keysize = AES_MAX_KEY_SIZE,
1232 .ivsize = AES_BLOCK_SIZE,
1233 .setkey = sahara_aes_setkey,
1234 .encrypt = sahara_aes_cbc_encrypt,
1235 .decrypt = sahara_aes_cbc_decrypt,
1236 }
1237}
1238};
1239
5a2bb93f
ST
1240static struct ahash_alg sha_v3_algs[] = {
1241{
1242 .init = sahara_sha_init,
1243 .update = sahara_sha_update,
1244 .final = sahara_sha_final,
1245 .finup = sahara_sha_finup,
1246 .digest = sahara_sha_digest,
1247 .export = sahara_sha_export,
1248 .import = sahara_sha_import,
1249 .halg.digestsize = SHA1_DIGEST_SIZE,
d42cf2f1 1250 .halg.statesize = sizeof(struct sahara_sha_reqctx),
5a2bb93f
ST
1251 .halg.base = {
1252 .cra_name = "sha1",
1253 .cra_driver_name = "sahara-sha1",
1254 .cra_priority = 300,
6a38f622 1255 .cra_flags = CRYPTO_ALG_ASYNC |
5a2bb93f
ST
1256 CRYPTO_ALG_NEED_FALLBACK,
1257 .cra_blocksize = SHA1_BLOCK_SIZE,
1258 .cra_ctxsize = sizeof(struct sahara_ctx),
1259 .cra_alignmask = 0,
1260 .cra_module = THIS_MODULE,
1261 .cra_init = sahara_sha_cra_init,
5a2bb93f
ST
1262 }
1263},
1264};
1265
1266static struct ahash_alg sha_v4_algs[] = {
1267{
1268 .init = sahara_sha_init,
1269 .update = sahara_sha_update,
1270 .final = sahara_sha_final,
1271 .finup = sahara_sha_finup,
1272 .digest = sahara_sha_digest,
1273 .export = sahara_sha_export,
1274 .import = sahara_sha_import,
1275 .halg.digestsize = SHA256_DIGEST_SIZE,
d42cf2f1 1276 .halg.statesize = sizeof(struct sahara_sha_reqctx),
5a2bb93f
ST
1277 .halg.base = {
1278 .cra_name = "sha256",
1279 .cra_driver_name = "sahara-sha256",
1280 .cra_priority = 300,
6a38f622 1281 .cra_flags = CRYPTO_ALG_ASYNC |
5a2bb93f
ST
1282 CRYPTO_ALG_NEED_FALLBACK,
1283 .cra_blocksize = SHA256_BLOCK_SIZE,
1284 .cra_ctxsize = sizeof(struct sahara_ctx),
1285 .cra_alignmask = 0,
1286 .cra_module = THIS_MODULE,
1287 .cra_init = sahara_sha_cra_init,
5a2bb93f
ST
1288 }
1289},
1290};
1291
5de88752
JM
1292static irqreturn_t sahara_irq_handler(int irq, void *data)
1293{
1294 struct sahara_dev *dev = (struct sahara_dev *)data;
1295 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1296 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1297
5de88752
JM
1298 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1299 SAHARA_REG_CMD);
1300
1301 sahara_decode_status(dev, stat);
1302
1303 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1304 return IRQ_NONE;
1305 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1306 dev->error = 0;
1307 } else {
1308 sahara_decode_error(dev, err);
1309 dev->error = -EINVAL;
1310 }
1311
c0c3c89a 1312 complete(&dev->dma_completion);
5de88752
JM
1313
1314 return IRQ_HANDLED;
1315}
1316
1317
1318static int sahara_register_algs(struct sahara_dev *dev)
1319{
5a2bb93f
ST
1320 int err;
1321 unsigned int i, j, k, l;
5de88752
JM
1322
1323 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
5de88752
JM
1324 err = crypto_register_alg(&aes_algs[i]);
1325 if (err)
1326 goto err_aes_algs;
1327 }
1328
5a2bb93f
ST
1329 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1330 err = crypto_register_ahash(&sha_v3_algs[k]);
1331 if (err)
1332 goto err_sha_v3_algs;
1333 }
1334
1335 if (dev->version > SAHARA_VERSION_3)
1336 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1337 err = crypto_register_ahash(&sha_v4_algs[l]);
1338 if (err)
1339 goto err_sha_v4_algs;
1340 }
1341
5de88752
JM
1342 return 0;
1343
5a2bb93f
ST
1344err_sha_v4_algs:
1345 for (j = 0; j < l; j++)
1346 crypto_unregister_ahash(&sha_v4_algs[j]);
1347
1348err_sha_v3_algs:
1349 for (j = 0; j < k; j++)
0e7d4d93 1350 crypto_unregister_ahash(&sha_v3_algs[j]);
5a2bb93f 1351
5de88752
JM
1352err_aes_algs:
1353 for (j = 0; j < i; j++)
1354 crypto_unregister_alg(&aes_algs[j]);
1355
1356 return err;
1357}
1358
1359static void sahara_unregister_algs(struct sahara_dev *dev)
1360{
5a2bb93f 1361 unsigned int i;
5de88752
JM
1362
1363 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1364 crypto_unregister_alg(&aes_algs[i]);
5a2bb93f 1365
0e7d4d93 1366 for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
5a2bb93f
ST
1367 crypto_unregister_ahash(&sha_v3_algs[i]);
1368
1369 if (dev->version > SAHARA_VERSION_3)
1370 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1371 crypto_unregister_ahash(&sha_v4_algs[i]);
5de88752
JM
1372}
1373
249cb063 1374static const struct platform_device_id sahara_platform_ids[] = {
5de88752
JM
1375 { .name = "sahara-imx27" },
1376 { /* sentinel */ }
1377};
1378MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1379
30aabe36 1380static const struct of_device_id sahara_dt_ids[] = {
5ed903b3 1381 { .compatible = "fsl,imx53-sahara" },
5de88752
JM
1382 { .compatible = "fsl,imx27-sahara" },
1383 { /* sentinel */ }
1384};
68be0b1a 1385MODULE_DEVICE_TABLE(of, sahara_dt_ids);
5de88752
JM
1386
1387static int sahara_probe(struct platform_device *pdev)
1388{
1389 struct sahara_dev *dev;
1390 struct resource *res;
1391 u32 version;
1392 int irq;
1393 int err;
1394 int i;
1395
a8bc22f3 1396 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
0d576d92 1397 if (!dev)
5de88752 1398 return -ENOMEM;
5de88752
JM
1399
1400 dev->device = &pdev->dev;
1401 platform_set_drvdata(pdev, dev);
1402
1403 /* Get the base address */
1404 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
9e95275c
JH
1405 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1406 if (IS_ERR(dev->regs_base))
1407 return PTR_ERR(dev->regs_base);
5de88752
JM
1408
1409 /* Get the IRQ */
1410 irq = platform_get_irq(pdev, 0);
1411 if (irq < 0) {
1412 dev_err(&pdev->dev, "failed to get irq resource\n");
1413 return irq;
1414 }
1415
3d6f1d12
AS
1416 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1417 0, dev_name(&pdev->dev), dev);
1418 if (err) {
5de88752 1419 dev_err(&pdev->dev, "failed to request irq\n");
3d6f1d12 1420 return err;
5de88752
JM
1421 }
1422
1423 /* clocks */
1424 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1425 if (IS_ERR(dev->clk_ipg)) {
1426 dev_err(&pdev->dev, "Could not get ipg clock\n");
1427 return PTR_ERR(dev->clk_ipg);
1428 }
1429
1430 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1431 if (IS_ERR(dev->clk_ahb)) {
1432 dev_err(&pdev->dev, "Could not get ahb clock\n");
1433 return PTR_ERR(dev->clk_ahb);
1434 }
1435
1436 /* Allocate HW descriptors */
66c9a04e 1437 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
5de88752
JM
1438 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1439 &dev->hw_phys_desc[0], GFP_KERNEL);
1440 if (!dev->hw_desc[0]) {
1441 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1442 return -ENOMEM;
1443 }
1444 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1445 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1446 sizeof(struct sahara_hw_desc);
1447
1448 /* Allocate space for iv and key */
66c9a04e 1449 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
5de88752
JM
1450 &dev->key_phys_base, GFP_KERNEL);
1451 if (!dev->key_base) {
1452 dev_err(&pdev->dev, "Could not allocate memory for key\n");
66c9a04e 1453 return -ENOMEM;
5de88752
JM
1454 }
1455 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1456 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1457
5a2bb93f 1458 /* Allocate space for context: largest digest + message length field */
66c9a04e 1459 dev->context_base = dmam_alloc_coherent(&pdev->dev,
5a2bb93f
ST
1460 SHA256_DIGEST_SIZE + 4,
1461 &dev->context_phys_base, GFP_KERNEL);
1462 if (!dev->context_base) {
1463 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
66c9a04e 1464 return -ENOMEM;
5a2bb93f
ST
1465 }
1466
5de88752 1467 /* Allocate space for HW links */
66c9a04e 1468 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
5de88752
JM
1469 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1470 &dev->hw_phys_link[0], GFP_KERNEL);
393e661d 1471 if (!dev->hw_link[0]) {
5de88752 1472 dev_err(&pdev->dev, "Could not allocate hw links\n");
66c9a04e 1473 return -ENOMEM;
5de88752
JM
1474 }
1475 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1476 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1477 sizeof(struct sahara_hw_link);
1478 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1479 }
1480
1481 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1482
c0c3c89a 1483 mutex_init(&dev->queue_mutex);
20ec9d81 1484
5de88752
JM
1485 dev_ptr = dev;
1486
c0c3c89a
ST
1487 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1488 if (IS_ERR(dev->kthread)) {
66c9a04e 1489 return PTR_ERR(dev->kthread);
c0c3c89a 1490 }
5de88752 1491
c0c3c89a 1492 init_completion(&dev->dma_completion);
5de88752 1493
7eac7144
FE
1494 err = clk_prepare_enable(dev->clk_ipg);
1495 if (err)
66c9a04e 1496 return err;
7eac7144
FE
1497 err = clk_prepare_enable(dev->clk_ahb);
1498 if (err)
1499 goto clk_ipg_disable;
5de88752
JM
1500
1501 version = sahara_read(dev, SAHARA_REG_VERSION);
5ed903b3
ST
1502 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1503 if (version != SAHARA_VERSION_3)
1504 err = -ENODEV;
1505 } else if (of_device_is_compatible(pdev->dev.of_node,
1506 "fsl,imx53-sahara")) {
1507 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1508 err = -ENODEV;
1509 version = (version >> 8) & 0xff;
1510 }
1511 if (err == -ENODEV) {
5de88752 1512 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
5ed903b3 1513 version);
5de88752
JM
1514 goto err_algs;
1515 }
1516
5ed903b3
ST
1517 dev->version = version;
1518
5de88752
JM
1519 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1520 SAHARA_REG_CMD);
1521 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1522 SAHARA_CONTROL_SET_MAXBURST(8) |
1523 SAHARA_CONTROL_RNG_AUTORSD |
1524 SAHARA_CONTROL_ENABLE_INT,
1525 SAHARA_REG_CONTROL);
1526
1527 err = sahara_register_algs(dev);
1528 if (err)
1529 goto err_algs;
1530
1531 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1532
1533 return 0;
1534
1535err_algs:
c0c3c89a 1536 kthread_stop(dev->kthread);
5de88752 1537 dev_ptr = NULL;
7eac7144
FE
1538 clk_disable_unprepare(dev->clk_ahb);
1539clk_ipg_disable:
1540 clk_disable_unprepare(dev->clk_ipg);
5de88752
JM
1541
1542 return err;
1543}
1544
1545static int sahara_remove(struct platform_device *pdev)
1546{
1547 struct sahara_dev *dev = platform_get_drvdata(pdev);
1548
c0c3c89a 1549 kthread_stop(dev->kthread);
5de88752
JM
1550
1551 sahara_unregister_algs(dev);
1552
1553 clk_disable_unprepare(dev->clk_ipg);
1554 clk_disable_unprepare(dev->clk_ahb);
1555
1556 dev_ptr = NULL;
1557
1558 return 0;
1559}
1560
1561static struct platform_driver sahara_driver = {
1562 .probe = sahara_probe,
1563 .remove = sahara_remove,
1564 .driver = {
1565 .name = SAHARA_NAME,
1b0b2605 1566 .of_match_table = sahara_dt_ids,
5de88752
JM
1567 },
1568 .id_table = sahara_platform_ids,
1569};
1570
1571module_platform_driver(sahara_driver);
1572
1573MODULE_LICENSE("GPL");
1574MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
5a2bb93f 1575MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
5de88752 1576MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");