Merge tag 'driver-core-5.3-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / crypto / sahara.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
5de88752
JM
2/*
3 * Cryptographic API.
4 *
5 * Support for SAHARA cryptographic accelerator.
6 *
5a2bb93f 7 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
5de88752
JM
8 * Copyright (c) 2013 Vista Silicon S.L.
9 * Author: Javier Martin <javier.martin@vista-silicon.com>
10 *
5de88752
JM
11 * Based on omap-aes.c and tegra-aes.c
12 */
13
5de88752 14#include <crypto/aes.h>
5a2bb93f 15#include <crypto/internal/hash.h>
678adecd 16#include <crypto/internal/skcipher.h>
5a2bb93f
ST
17#include <crypto/scatterwalk.h>
18#include <crypto/sha.h>
5de88752
JM
19
20#include <linux/clk.h>
21#include <linux/crypto.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
24#include <linux/irq.h>
25#include <linux/kernel.h>
c0c3c89a 26#include <linux/kthread.h>
5de88752 27#include <linux/module.h>
c0c3c89a 28#include <linux/mutex.h>
5de88752 29#include <linux/of.h>
5ed903b3 30#include <linux/of_device.h>
5de88752
JM
31#include <linux/platform_device.h>
32
5a2bb93f
ST
33#define SHA_BUFFER_LEN PAGE_SIZE
34#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
35
5de88752
JM
36#define SAHARA_NAME "sahara"
37#define SAHARA_VERSION_3 3
5ed903b3 38#define SAHARA_VERSION_4 4
5de88752
JM
39#define SAHARA_TIMEOUT_MS 1000
40#define SAHARA_MAX_HW_DESC 2
41#define SAHARA_MAX_HW_LINK 20
42
43#define FLAGS_MODE_MASK 0x000f
44#define FLAGS_ENCRYPT BIT(0)
45#define FLAGS_CBC BIT(1)
46#define FLAGS_NEW_KEY BIT(3)
5de88752
JM
47
48#define SAHARA_HDR_BASE 0x00800000
49#define SAHARA_HDR_SKHA_ALG_AES 0
50#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
51#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
52#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
53#define SAHARA_HDR_FORM_DATA (5 << 16)
54#define SAHARA_HDR_FORM_KEY (8 << 16)
55#define SAHARA_HDR_LLO (1 << 24)
56#define SAHARA_HDR_CHA_SKHA (1 << 28)
57#define SAHARA_HDR_CHA_MDHA (2 << 28)
58#define SAHARA_HDR_PARITY_BIT (1 << 31)
59
5a2bb93f
ST
60#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
61#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
62#define SAHARA_HDR_MDHA_HASH 0xA0850000
63#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
64#define SAHARA_HDR_MDHA_ALG_SHA1 0
65#define SAHARA_HDR_MDHA_ALG_MD5 1
66#define SAHARA_HDR_MDHA_ALG_SHA256 2
67#define SAHARA_HDR_MDHA_ALG_SHA224 3
68#define SAHARA_HDR_MDHA_PDATA (1 << 2)
69#define SAHARA_HDR_MDHA_HMAC (1 << 3)
70#define SAHARA_HDR_MDHA_INIT (1 << 5)
71#define SAHARA_HDR_MDHA_IPAD (1 << 6)
72#define SAHARA_HDR_MDHA_OPAD (1 << 7)
73#define SAHARA_HDR_MDHA_SWAP (1 << 8)
74#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
75#define SAHARA_HDR_MDHA_SSL (1 << 10)
76
5de88752
JM
77/* SAHARA can only process one request at a time */
78#define SAHARA_QUEUE_LENGTH 1
79
80#define SAHARA_REG_VERSION 0x00
81#define SAHARA_REG_DAR 0x04
82#define SAHARA_REG_CONTROL 0x08
83#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
84#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
85#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
86#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
87#define SAHARA_REG_CMD 0x0C
88#define SAHARA_CMD_RESET (1 << 0)
89#define SAHARA_CMD_CLEAR_INT (1 << 8)
90#define SAHARA_CMD_CLEAR_ERR (1 << 9)
91#define SAHARA_CMD_SINGLE_STEP (1 << 10)
92#define SAHARA_CMD_MODE_BATCH (1 << 16)
93#define SAHARA_CMD_MODE_DEBUG (1 << 18)
94#define SAHARA_REG_STATUS 0x10
95#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
96#define SAHARA_STATE_IDLE 0
97#define SAHARA_STATE_BUSY 1
98#define SAHARA_STATE_ERR 2
99#define SAHARA_STATE_FAULT 3
100#define SAHARA_STATE_COMPLETE 4
101#define SAHARA_STATE_COMP_FLAG (1 << 2)
102#define SAHARA_STATUS_DAR_FULL (1 << 3)
103#define SAHARA_STATUS_ERROR (1 << 4)
104#define SAHARA_STATUS_SECURE (1 << 5)
105#define SAHARA_STATUS_FAIL (1 << 6)
106#define SAHARA_STATUS_INIT (1 << 7)
107#define SAHARA_STATUS_RNG_RESEED (1 << 8)
108#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
109#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
110#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
111#define SAHARA_STATUS_MODE_BATCH (1 << 16)
112#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
113#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
114#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
115#define SAHARA_REG_ERRSTATUS 0x14
116#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
117#define SAHARA_ERRSOURCE_CHA 14
118#define SAHARA_ERRSOURCE_DMA 15
119#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
120#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
121#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
122#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
123#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
124#define SAHARA_REG_FADDR 0x18
125#define SAHARA_REG_CDAR 0x1C
126#define SAHARA_REG_IDAR 0x20
127
128struct sahara_hw_desc {
75d3f811
AB
129 u32 hdr;
130 u32 len1;
131 u32 p1;
132 u32 len2;
133 u32 p2;
134 u32 next;
5de88752
JM
135};
136
137struct sahara_hw_link {
75d3f811
AB
138 u32 len;
139 u32 p;
140 u32 next;
5de88752
JM
141};
142
143struct sahara_ctx {
5de88752 144 unsigned long flags;
5a2bb93f
ST
145
146 /* AES-specific context */
5de88752
JM
147 int keylen;
148 u8 key[AES_KEYSIZE_128];
ba70152b 149 struct crypto_sync_skcipher *fallback;
5de88752
JM
150};
151
152struct sahara_aes_reqctx {
153 unsigned long mode;
154};
155
5a2bb93f
ST
156/*
157 * struct sahara_sha_reqctx - private data per request
158 * @buf: holds data for requests smaller than block_size
159 * @rembuf: used to prepare one block_size-aligned request
160 * @context: hw-specific context for request. Digest is extracted from this
161 * @mode: specifies what type of hw-descriptor needs to be built
162 * @digest_size: length of digest for this request
163 * @context_size: length of hw-context for this request.
164 * Always digest_size + 4
165 * @buf_cnt: number of bytes saved in buf
166 * @sg_in_idx: number of hw links
167 * @in_sg: scatterlist for input data
168 * @in_sg_chain: scatterlists for chained input data
5a2bb93f
ST
169 * @total: total number of bytes for transfer
170 * @last: is this the last block
171 * @first: is this the first block
172 * @active: inside a transfer
173 */
174struct sahara_sha_reqctx {
175 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
176 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
177 u8 context[SHA256_DIGEST_SIZE + 4];
5a2bb93f
ST
178 unsigned int mode;
179 unsigned int digest_size;
180 unsigned int context_size;
181 unsigned int buf_cnt;
182 unsigned int sg_in_idx;
183 struct scatterlist *in_sg;
184 struct scatterlist in_sg_chain[2];
5a2bb93f
ST
185 size_t total;
186 unsigned int last;
187 unsigned int first;
188 unsigned int active;
189};
190
5de88752
JM
191struct sahara_dev {
192 struct device *device;
5ed903b3 193 unsigned int version;
5de88752
JM
194 void __iomem *regs_base;
195 struct clk *clk_ipg;
196 struct clk *clk_ahb;
c0c3c89a
ST
197 struct mutex queue_mutex;
198 struct task_struct *kthread;
199 struct completion dma_completion;
5de88752
JM
200
201 struct sahara_ctx *ctx;
5de88752
JM
202 struct crypto_queue queue;
203 unsigned long flags;
204
5de88752
JM
205 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
206 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
207
208 u8 *key_base;
209 dma_addr_t key_phys_base;
210
211 u8 *iv_base;
212 dma_addr_t iv_phys_base;
213
5a2bb93f
ST
214 u8 *context_base;
215 dma_addr_t context_phys_base;
216
5de88752
JM
217 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
218 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
219
5de88752
JM
220 size_t total;
221 struct scatterlist *in_sg;
f8e28a0d 222 int nb_in_sg;
5de88752 223 struct scatterlist *out_sg;
f8e28a0d 224 int nb_out_sg;
5de88752
JM
225
226 u32 error;
5de88752
JM
227};
228
229static struct sahara_dev *dev_ptr;
230
231static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
232{
233 writel(data, dev->regs_base + reg);
234}
235
236static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
237{
238 return readl(dev->regs_base + reg);
239}
240
241static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
242{
243 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
244 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
245 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
246
247 if (dev->flags & FLAGS_CBC) {
248 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
249 hdr ^= SAHARA_HDR_PARITY_BIT;
250 }
251
252 if (dev->flags & FLAGS_ENCRYPT) {
253 hdr |= SAHARA_HDR_SKHA_OP_ENC;
254 hdr ^= SAHARA_HDR_PARITY_BIT;
255 }
256
257 return hdr;
258}
259
260static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
261{
262 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
263 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
264}
265
cac367bf 266static const char *sahara_err_src[16] = {
5de88752
JM
267 "No error",
268 "Header error",
269 "Descriptor length error",
270 "Descriptor length or pointer error",
271 "Link length error",
272 "Link pointer error",
273 "Input buffer error",
274 "Output buffer error",
275 "Output buffer starvation",
276 "Internal state fault",
277 "General descriptor problem",
278 "Reserved",
279 "Descriptor address error",
280 "Link address error",
281 "CHA error",
282 "DMA error"
283};
284
cac367bf 285static const char *sahara_err_dmasize[4] = {
5de88752
JM
286 "Byte transfer",
287 "Half-word transfer",
288 "Word transfer",
289 "Reserved"
290};
291
cac367bf 292static const char *sahara_err_dmasrc[8] = {
5de88752
JM
293 "No error",
294 "AHB bus error",
295 "Internal IP bus error",
296 "Parity error",
297 "DMA crosses 256 byte boundary",
298 "DMA is busy",
299 "Reserved",
300 "DMA HW error"
301};
302
cac367bf 303static const char *sahara_cha_errsrc[12] = {
5de88752
JM
304 "Input buffer non-empty",
305 "Illegal address",
306 "Illegal mode",
307 "Illegal data size",
308 "Illegal key size",
309 "Write during processing",
310 "CTX read during processing",
311 "HW error",
312 "Input buffer disabled/underflow",
313 "Output buffer disabled/overflow",
314 "DES key parity error",
315 "Reserved"
316};
317
cac367bf 318static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
5de88752
JM
319
320static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
321{
322 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
323 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
324
325 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
326
327 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
328
329 if (source == SAHARA_ERRSOURCE_DMA) {
330 if (error & SAHARA_ERRSTATUS_DMA_DIR)
331 dev_err(dev->device, " * DMA read.\n");
332 else
333 dev_err(dev->device, " * DMA write.\n");
334
335 dev_err(dev->device, " * %s.\n",
336 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
337 dev_err(dev->device, " * %s.\n",
338 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
339 } else if (source == SAHARA_ERRSOURCE_CHA) {
340 dev_err(dev->device, " * %s.\n",
341 sahara_cha_errsrc[chasrc]);
342 dev_err(dev->device, " * %s.\n",
343 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
344 }
345 dev_err(dev->device, "\n");
346}
347
cac367bf 348static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
5de88752
JM
349
350static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
351{
352 u8 state;
353
222f6b85 354 if (!__is_defined(DEBUG))
5de88752
JM
355 return;
356
357 state = SAHARA_STATUS_GET_STATE(status);
358
359 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
360 __func__, status);
361
362 dev_dbg(dev->device, " - State = %d:\n", state);
363 if (state & SAHARA_STATE_COMP_FLAG)
364 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
365
366 dev_dbg(dev->device, " * %s.\n",
367 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
368
369 if (status & SAHARA_STATUS_DAR_FULL)
370 dev_dbg(dev->device, " - DAR Full.\n");
371 if (status & SAHARA_STATUS_ERROR)
372 dev_dbg(dev->device, " - Error.\n");
373 if (status & SAHARA_STATUS_SECURE)
374 dev_dbg(dev->device, " - Secure.\n");
375 if (status & SAHARA_STATUS_FAIL)
376 dev_dbg(dev->device, " - Fail.\n");
377 if (status & SAHARA_STATUS_RNG_RESEED)
378 dev_dbg(dev->device, " - RNG Reseed Request.\n");
379 if (status & SAHARA_STATUS_ACTIVE_RNG)
380 dev_dbg(dev->device, " - RNG Active.\n");
381 if (status & SAHARA_STATUS_ACTIVE_MDHA)
382 dev_dbg(dev->device, " - MDHA Active.\n");
383 if (status & SAHARA_STATUS_ACTIVE_SKHA)
384 dev_dbg(dev->device, " - SKHA Active.\n");
385
386 if (status & SAHARA_STATUS_MODE_BATCH)
387 dev_dbg(dev->device, " - Batch Mode.\n");
388 else if (status & SAHARA_STATUS_MODE_DEDICATED)
9ae811f2 389 dev_dbg(dev->device, " - Dedicated Mode.\n");
5de88752
JM
390 else if (status & SAHARA_STATUS_MODE_DEBUG)
391 dev_dbg(dev->device, " - Debug Mode.\n");
392
393 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
394 SAHARA_STATUS_GET_ISTATE(status));
395
396 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
397 sahara_read(dev, SAHARA_REG_CDAR));
398 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
399 sahara_read(dev, SAHARA_REG_IDAR));
400}
401
402static void sahara_dump_descriptors(struct sahara_dev *dev)
403{
404 int i;
405
222f6b85 406 if (!__is_defined(DEBUG))
5de88752
JM
407 return;
408
409 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
d4b98f20
AB
410 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
411 i, &dev->hw_phys_desc[i]);
5de88752
JM
412 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
413 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
414 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
415 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
416 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
417 dev_dbg(dev->device, "\tnext = 0x%08x\n",
418 dev->hw_desc[i]->next);
419 }
420 dev_dbg(dev->device, "\n");
421}
422
423static void sahara_dump_links(struct sahara_dev *dev)
424{
425 int i;
426
222f6b85 427 if (!__is_defined(DEBUG))
5de88752
JM
428 return;
429
430 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
d4b98f20
AB
431 dev_dbg(dev->device, "Link (%d) (%pad):\n",
432 i, &dev->hw_phys_link[i]);
5de88752
JM
433 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
434 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
435 dev_dbg(dev->device, "\tnext = 0x%08x\n",
436 dev->hw_link[i]->next);
437 }
438 dev_dbg(dev->device, "\n");
439}
440
5de88752
JM
441static int sahara_hw_descriptor_create(struct sahara_dev *dev)
442{
443 struct sahara_ctx *ctx = dev->ctx;
444 struct scatterlist *sg;
445 int ret;
446 int i, j;
1711045f 447 int idx = 0;
5de88752
JM
448
449 /* Copy new key if necessary */
450 if (ctx->flags & FLAGS_NEW_KEY) {
451 memcpy(dev->key_base, ctx->key, ctx->keylen);
452 ctx->flags &= ~FLAGS_NEW_KEY;
453
454 if (dev->flags & FLAGS_CBC) {
1711045f
ST
455 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
456 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
5de88752 457 } else {
1711045f
ST
458 dev->hw_desc[idx]->len1 = 0;
459 dev->hw_desc[idx]->p1 = 0;
5de88752 460 }
1711045f
ST
461 dev->hw_desc[idx]->len2 = ctx->keylen;
462 dev->hw_desc[idx]->p2 = dev->key_phys_base;
463 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
464
465 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
466
467 idx++;
5de88752 468 }
5de88752 469
d23afa1a 470 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
6c2b74d4
LC
471 if (dev->nb_in_sg < 0) {
472 dev_err(dev->device, "Invalid numbers of src SG.\n");
473 return dev->nb_in_sg;
474 }
d23afa1a 475 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
6c2b74d4
LC
476 if (dev->nb_out_sg < 0) {
477 dev_err(dev->device, "Invalid numbers of dst SG.\n");
478 return dev->nb_out_sg;
479 }
5de88752
JM
480 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
481 dev_err(dev->device, "not enough hw links (%d)\n",
482 dev->nb_in_sg + dev->nb_out_sg);
483 return -EINVAL;
484 }
485
486 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
487 DMA_TO_DEVICE);
488 if (ret != dev->nb_in_sg) {
489 dev_err(dev->device, "couldn't map in sg\n");
490 goto unmap_in;
491 }
492 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
493 DMA_FROM_DEVICE);
494 if (ret != dev->nb_out_sg) {
495 dev_err(dev->device, "couldn't map out sg\n");
496 goto unmap_out;
497 }
498
499 /* Create input links */
1711045f 500 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
5de88752
JM
501 sg = dev->in_sg;
502 for (i = 0; i < dev->nb_in_sg; i++) {
503 dev->hw_link[i]->len = sg->length;
504 dev->hw_link[i]->p = sg->dma_address;
505 if (i == (dev->nb_in_sg - 1)) {
506 dev->hw_link[i]->next = 0;
507 } else {
508 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
509 sg = sg_next(sg);
510 }
511 }
512
513 /* Create output links */
1711045f 514 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
5de88752
JM
515 sg = dev->out_sg;
516 for (j = i; j < dev->nb_out_sg + i; j++) {
517 dev->hw_link[j]->len = sg->length;
518 dev->hw_link[j]->p = sg->dma_address;
519 if (j == (dev->nb_out_sg + i - 1)) {
520 dev->hw_link[j]->next = 0;
521 } else {
522 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
523 sg = sg_next(sg);
524 }
525 }
526
527 /* Fill remaining fields of hw_desc[1] */
1711045f
ST
528 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
529 dev->hw_desc[idx]->len1 = dev->total;
530 dev->hw_desc[idx]->len2 = dev->total;
531 dev->hw_desc[idx]->next = 0;
5de88752
JM
532
533 sahara_dump_descriptors(dev);
534 sahara_dump_links(dev);
535
5de88752
JM
536 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
537
538 return 0;
539
540unmap_out:
541 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
1e320410 542 DMA_FROM_DEVICE);
5de88752
JM
543unmap_in:
544 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1e320410 545 DMA_TO_DEVICE);
5de88752
JM
546
547 return -EINVAL;
548}
549
c0c3c89a 550static int sahara_aes_process(struct ablkcipher_request *req)
5de88752 551{
c0c3c89a 552 struct sahara_dev *dev = dev_ptr;
5de88752
JM
553 struct sahara_ctx *ctx;
554 struct sahara_aes_reqctx *rctx;
5de88752 555 int ret;
58ed798b 556 unsigned long timeout;
5de88752 557
5de88752
JM
558 /* Request is ready to be dispatched by the device */
559 dev_dbg(dev->device,
560 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
561 req->nbytes, req->src, req->dst);
562
563 /* assign new request to device */
5de88752
JM
564 dev->total = req->nbytes;
565 dev->in_sg = req->src;
566 dev->out_sg = req->dst;
567
568 rctx = ablkcipher_request_ctx(req);
569 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
570 rctx->mode &= FLAGS_MODE_MASK;
571 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
572
573 if ((dev->flags & FLAGS_CBC) && req->info)
574 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
575
576 /* assign new context to device */
5de88752
JM
577 dev->ctx = ctx;
578
c0c3c89a
ST
579 reinit_completion(&dev->dma_completion);
580
5de88752 581 ret = sahara_hw_descriptor_create(dev);
6cf02fca
NMG
582 if (ret)
583 return -EINVAL;
c0c3c89a 584
58ed798b 585 timeout = wait_for_completion_timeout(&dev->dma_completion,
c0c3c89a 586 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
58ed798b 587 if (!timeout) {
c0c3c89a
ST
588 dev_err(dev->device, "AES timeout\n");
589 return -ETIMEDOUT;
5de88752 590 }
c0c3c89a
ST
591
592 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
c0c3c89a 593 DMA_FROM_DEVICE);
1e320410
ML
594 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
595 DMA_TO_DEVICE);
c0c3c89a
ST
596
597 return 0;
5de88752
JM
598}
599
600static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
601 unsigned int keylen)
602{
603 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
604 int ret;
605
606 ctx->keylen = keylen;
607
608 /* SAHARA only supports 128bit keys */
609 if (keylen == AES_KEYSIZE_128) {
610 memcpy(ctx->key, key, keylen);
611 ctx->flags |= FLAGS_NEW_KEY;
612 return 0;
613 }
614
678adecd 615 if (keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
5de88752
JM
616 return -EINVAL;
617
618 /*
619 * The requested key size is not supported by HW, do a fallback.
620 */
ba70152b
KC
621 crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
622 crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
678adecd 623 CRYPTO_TFM_REQ_MASK);
5de88752 624
ba70152b 625 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
5de88752 626
678adecd 627 tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
ba70152b 628 tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
678adecd 629 CRYPTO_TFM_RES_MASK;
5de88752
JM
630 return ret;
631}
632
633static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
634{
5de88752
JM
635 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
636 struct sahara_dev *dev = dev_ptr;
637 int err = 0;
5de88752
JM
638
639 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
640 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
641
642 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
643 dev_err(dev->device,
644 "request size is not exact amount of AES blocks\n");
645 return -EINVAL;
646 }
647
5de88752 648 rctx->mode = mode;
c0c3c89a
ST
649
650 mutex_lock(&dev->queue_mutex);
5de88752 651 err = ablkcipher_enqueue_request(&dev->queue, req);
c0c3c89a 652 mutex_unlock(&dev->queue_mutex);
5de88752 653
c0c3c89a 654 wake_up_process(dev->kthread);
5de88752
JM
655
656 return err;
657}
658
659static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
660{
5de88752
JM
661 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
662 crypto_ablkcipher_reqtfm(req));
663 int err;
664
665 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
ba70152b 666 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
678adecd 667
ba70152b 668 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
678adecd
HX
669 skcipher_request_set_callback(subreq, req->base.flags,
670 NULL, NULL);
671 skcipher_request_set_crypt(subreq, req->src, req->dst,
672 req->nbytes, req->info);
673 err = crypto_skcipher_encrypt(subreq);
674 skcipher_request_zero(subreq);
5de88752
JM
675 return err;
676 }
677
678 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
679}
680
681static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
682{
5de88752
JM
683 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
684 crypto_ablkcipher_reqtfm(req));
685 int err;
686
687 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
ba70152b 688 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
678adecd 689
ba70152b 690 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
678adecd
HX
691 skcipher_request_set_callback(subreq, req->base.flags,
692 NULL, NULL);
693 skcipher_request_set_crypt(subreq, req->src, req->dst,
694 req->nbytes, req->info);
695 err = crypto_skcipher_decrypt(subreq);
696 skcipher_request_zero(subreq);
5de88752
JM
697 return err;
698 }
699
700 return sahara_aes_crypt(req, 0);
701}
702
703static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
704{
5de88752
JM
705 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
706 crypto_ablkcipher_reqtfm(req));
707 int err;
708
709 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
ba70152b 710 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
678adecd 711
ba70152b 712 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
678adecd
HX
713 skcipher_request_set_callback(subreq, req->base.flags,
714 NULL, NULL);
715 skcipher_request_set_crypt(subreq, req->src, req->dst,
716 req->nbytes, req->info);
717 err = crypto_skcipher_encrypt(subreq);
718 skcipher_request_zero(subreq);
5de88752
JM
719 return err;
720 }
721
722 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
723}
724
725static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
726{
5de88752
JM
727 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
728 crypto_ablkcipher_reqtfm(req));
729 int err;
730
731 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
ba70152b 732 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
678adecd 733
ba70152b 734 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
678adecd
HX
735 skcipher_request_set_callback(subreq, req->base.flags,
736 NULL, NULL);
737 skcipher_request_set_crypt(subreq, req->src, req->dst,
738 req->nbytes, req->info);
739 err = crypto_skcipher_decrypt(subreq);
740 skcipher_request_zero(subreq);
5de88752
JM
741 return err;
742 }
743
744 return sahara_aes_crypt(req, FLAGS_CBC);
745}
746
747static int sahara_aes_cra_init(struct crypto_tfm *tfm)
748{
efa59e2e 749 const char *name = crypto_tfm_alg_name(tfm);
5de88752
JM
750 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
751
ba70152b 752 ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
678adecd 753 CRYPTO_ALG_NEED_FALLBACK);
5de88752
JM
754 if (IS_ERR(ctx->fallback)) {
755 pr_err("Error allocating fallback algo %s\n", name);
756 return PTR_ERR(ctx->fallback);
757 }
758
759 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
760
761 return 0;
762}
763
764static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
765{
766 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
767
ba70152b 768 crypto_free_sync_skcipher(ctx->fallback);
5de88752
JM
769}
770
5a2bb93f
ST
771static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
772 struct sahara_sha_reqctx *rctx)
773{
774 u32 hdr = 0;
775
776 hdr = rctx->mode;
777
778 if (rctx->first) {
779 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
780 hdr |= SAHARA_HDR_MDHA_INIT;
781 } else {
782 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
783 }
784
785 if (rctx->last)
786 hdr |= SAHARA_HDR_MDHA_PDATA;
787
788 if (hweight_long(hdr) % 2 == 0)
789 hdr |= SAHARA_HDR_PARITY_BIT;
790
791 return hdr;
792}
793
794static int sahara_sha_hw_links_create(struct sahara_dev *dev,
795 struct sahara_sha_reqctx *rctx,
796 int start)
797{
798 struct scatterlist *sg;
799 unsigned int i;
800 int ret;
801
802 dev->in_sg = rctx->in_sg;
803
d23afa1a 804 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
6c2b74d4
LC
805 if (dev->nb_in_sg < 0) {
806 dev_err(dev->device, "Invalid numbers of src SG.\n");
807 return dev->nb_in_sg;
808 }
5a2bb93f
ST
809 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
810 dev_err(dev->device, "not enough hw links (%d)\n",
811 dev->nb_in_sg + dev->nb_out_sg);
812 return -EINVAL;
813 }
814
640eec52
LC
815 sg = dev->in_sg;
816 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
817 if (!ret)
818 return -EFAULT;
819
820 for (i = start; i < dev->nb_in_sg + start; i++) {
821 dev->hw_link[i]->len = sg->length;
822 dev->hw_link[i]->p = sg->dma_address;
823 if (i == (dev->nb_in_sg + start - 1)) {
824 dev->hw_link[i]->next = 0;
825 } else {
5a2bb93f
ST
826 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
827 sg = sg_next(sg);
5a2bb93f
ST
828 }
829 }
830
831 return i;
832}
833
834static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
835 struct sahara_sha_reqctx *rctx,
836 struct ahash_request *req,
837 int index)
838{
839 unsigned result_len;
840 int i = index;
841
842 if (rctx->first)
843 /* Create initial descriptor: #8*/
844 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
845 else
846 /* Create hash descriptor: #10. Must follow #6. */
847 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
848
849 dev->hw_desc[index]->len1 = rctx->total;
850 if (dev->hw_desc[index]->len1 == 0) {
851 /* if len1 is 0, p1 must be 0, too */
852 dev->hw_desc[index]->p1 = 0;
853 rctx->sg_in_idx = 0;
854 } else {
855 /* Create input links */
856 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
857 i = sahara_sha_hw_links_create(dev, rctx, index);
858
859 rctx->sg_in_idx = index;
860 if (i < 0)
861 return i;
862 }
863
864 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
865
866 /* Save the context for the next operation */
867 result_len = rctx->context_size;
868 dev->hw_link[i]->p = dev->context_phys_base;
869
870 dev->hw_link[i]->len = result_len;
871 dev->hw_desc[index]->len2 = result_len;
872
873 dev->hw_link[i]->next = 0;
874
875 return 0;
876}
877
878/*
879 * Load descriptor aka #6
880 *
881 * To load a previously saved context back to the MDHA unit
882 *
883 * p1: Saved Context
884 * p2: NULL
885 *
886 */
887static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
888 struct sahara_sha_reqctx *rctx,
889 struct ahash_request *req,
890 int index)
891{
892 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
893
894 dev->hw_desc[index]->len1 = rctx->context_size;
895 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
896 dev->hw_desc[index]->len2 = 0;
897 dev->hw_desc[index]->p2 = 0;
898
899 dev->hw_link[index]->len = rctx->context_size;
900 dev->hw_link[index]->p = dev->context_phys_base;
901 dev->hw_link[index]->next = 0;
902
903 return 0;
904}
905
906static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
907{
908 if (!sg || !sg->length)
909 return nbytes;
910
911 while (nbytes && sg) {
912 if (nbytes <= sg->length) {
913 sg->length = nbytes;
914 sg_mark_end(sg);
915 break;
916 }
917 nbytes -= sg->length;
5be4d4c9 918 sg = sg_next(sg);
5a2bb93f
ST
919 }
920
921 return nbytes;
922}
923
924static int sahara_sha_prepare_request(struct ahash_request *req)
925{
926 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
927 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
928 unsigned int hash_later;
929 unsigned int block_size;
930 unsigned int len;
931
932 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
933
934 /* append bytes from previous operation */
935 len = rctx->buf_cnt + req->nbytes;
936
937 /* only the last transfer can be padded in hardware */
938 if (!rctx->last && (len < block_size)) {
939 /* to few data, save for next operation */
940 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
941 0, req->nbytes, 0);
942 rctx->buf_cnt += req->nbytes;
943
944 return 0;
945 }
946
947 /* add data from previous operation first */
948 if (rctx->buf_cnt)
949 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
950
951 /* data must always be a multiple of block_size */
952 hash_later = rctx->last ? 0 : len & (block_size - 1);
953 if (hash_later) {
954 unsigned int offset = req->nbytes - hash_later;
955 /* Save remaining bytes for later use */
956 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
957 hash_later, 0);
958 }
959
960 /* nbytes should now be multiple of blocksize */
961 req->nbytes = req->nbytes - hash_later;
962
963 sahara_walk_and_recalc(req->src, req->nbytes);
964
965 /* have data from previous operation and current */
966 if (rctx->buf_cnt && req->nbytes) {
967 sg_init_table(rctx->in_sg_chain, 2);
968 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
969
c56f6d12 970 sg_chain(rctx->in_sg_chain, 2, req->src);
5a2bb93f
ST
971
972 rctx->total = req->nbytes + rctx->buf_cnt;
973 rctx->in_sg = rctx->in_sg_chain;
974
5a2bb93f
ST
975 req->src = rctx->in_sg_chain;
976 /* only data from previous operation */
977 } else if (rctx->buf_cnt) {
978 if (req->src)
979 rctx->in_sg = req->src;
980 else
981 rctx->in_sg = rctx->in_sg_chain;
982 /* buf was copied into rembuf above */
983 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
984 rctx->total = rctx->buf_cnt;
5a2bb93f
ST
985 /* no data from previous operation */
986 } else {
987 rctx->in_sg = req->src;
988 rctx->total = req->nbytes;
989 req->src = rctx->in_sg;
5a2bb93f
ST
990 }
991
992 /* on next call, we only have the remaining data in the buffer */
993 rctx->buf_cnt = hash_later;
994
995 return -EINPROGRESS;
996}
997
5a2bb93f
ST
998static int sahara_sha_process(struct ahash_request *req)
999{
1000 struct sahara_dev *dev = dev_ptr;
1001 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
df586cbb 1002 int ret;
58ed798b 1003 unsigned long timeout;
5a2bb93f
ST
1004
1005 ret = sahara_sha_prepare_request(req);
1006 if (!ret)
1007 return ret;
1008
1009 if (rctx->first) {
1010 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1011 dev->hw_desc[0]->next = 0;
1012 rctx->first = 0;
1013 } else {
1014 memcpy(dev->context_base, rctx->context, rctx->context_size);
1015
1016 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1017 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1018 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1019 dev->hw_desc[1]->next = 0;
1020 }
1021
1022 sahara_dump_descriptors(dev);
1023 sahara_dump_links(dev);
1024
1025 reinit_completion(&dev->dma_completion);
1026
1027 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1028
58ed798b 1029 timeout = wait_for_completion_timeout(&dev->dma_completion,
5a2bb93f 1030 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
58ed798b 1031 if (!timeout) {
5a2bb93f
ST
1032 dev_err(dev->device, "SHA timeout\n");
1033 return -ETIMEDOUT;
1034 }
1035
1036 if (rctx->sg_in_idx)
640eec52
LC
1037 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1038 DMA_TO_DEVICE);
5a2bb93f
ST
1039
1040 memcpy(rctx->context, dev->context_base, rctx->context_size);
1041
1042 if (req->result)
1043 memcpy(req->result, rctx->context, rctx->digest_size);
1044
1045 return 0;
1046}
1047
c0c3c89a
ST
1048static int sahara_queue_manage(void *data)
1049{
1050 struct sahara_dev *dev = (struct sahara_dev *)data;
1051 struct crypto_async_request *async_req;
ddacc621 1052 struct crypto_async_request *backlog;
c0c3c89a
ST
1053 int ret = 0;
1054
1055 do {
1056 __set_current_state(TASK_INTERRUPTIBLE);
1057
1058 mutex_lock(&dev->queue_mutex);
ddacc621 1059 backlog = crypto_get_backlog(&dev->queue);
c0c3c89a
ST
1060 async_req = crypto_dequeue_request(&dev->queue);
1061 mutex_unlock(&dev->queue_mutex);
1062
ddacc621
ST
1063 if (backlog)
1064 backlog->complete(backlog, -EINPROGRESS);
1065
c0c3c89a 1066 if (async_req) {
5a2bb93f
ST
1067 if (crypto_tfm_alg_type(async_req->tfm) ==
1068 CRYPTO_ALG_TYPE_AHASH) {
1069 struct ahash_request *req =
1070 ahash_request_cast(async_req);
1071
1072 ret = sahara_sha_process(req);
1073 } else {
1074 struct ablkcipher_request *req =
1075 ablkcipher_request_cast(async_req);
c0c3c89a 1076
5a2bb93f
ST
1077 ret = sahara_aes_process(req);
1078 }
c0c3c89a
ST
1079
1080 async_req->complete(async_req, ret);
1081
1082 continue;
1083 }
1084
1085 schedule();
1086 } while (!kthread_should_stop());
1087
1088 return 0;
1089}
1090
5a2bb93f
ST
1091static int sahara_sha_enqueue(struct ahash_request *req, int last)
1092{
1093 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1094 struct sahara_dev *dev = dev_ptr;
1095 int ret;
1096
1097 if (!req->nbytes && !last)
1098 return 0;
1099
5a2bb93f
ST
1100 rctx->last = last;
1101
1102 if (!rctx->active) {
1103 rctx->active = 1;
1104 rctx->first = 1;
1105 }
1106
1107 mutex_lock(&dev->queue_mutex);
1108 ret = crypto_enqueue_request(&dev->queue, &req->base);
1109 mutex_unlock(&dev->queue_mutex);
1110
1111 wake_up_process(dev->kthread);
5a2bb93f
ST
1112
1113 return ret;
1114}
1115
1116static int sahara_sha_init(struct ahash_request *req)
1117{
1118 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1119 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1120
1121 memset(rctx, 0, sizeof(*rctx));
1122
1123 switch (crypto_ahash_digestsize(tfm)) {
1124 case SHA1_DIGEST_SIZE:
1125 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1126 rctx->digest_size = SHA1_DIGEST_SIZE;
1127 break;
1128 case SHA256_DIGEST_SIZE:
1129 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1130 rctx->digest_size = SHA256_DIGEST_SIZE;
1131 break;
1132 default:
1133 return -EINVAL;
1134 }
1135
1136 rctx->context_size = rctx->digest_size + 4;
1137 rctx->active = 0;
1138
5a2bb93f
ST
1139 return 0;
1140}
1141
1142static int sahara_sha_update(struct ahash_request *req)
1143{
1144 return sahara_sha_enqueue(req, 0);
1145}
1146
1147static int sahara_sha_final(struct ahash_request *req)
1148{
1149 req->nbytes = 0;
1150 return sahara_sha_enqueue(req, 1);
1151}
1152
1153static int sahara_sha_finup(struct ahash_request *req)
1154{
1155 return sahara_sha_enqueue(req, 1);
1156}
1157
1158static int sahara_sha_digest(struct ahash_request *req)
1159{
1160 sahara_sha_init(req);
1161
1162 return sahara_sha_finup(req);
1163}
1164
1165static int sahara_sha_export(struct ahash_request *req, void *out)
1166{
5a2bb93f
ST
1167 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1168
bceab44e 1169 memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
5a2bb93f
ST
1170
1171 return 0;
1172}
1173
1174static int sahara_sha_import(struct ahash_request *req, const void *in)
1175{
5a2bb93f
ST
1176 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1177
bceab44e 1178 memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
5a2bb93f
ST
1179
1180 return 0;
1181}
1182
1183static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1184{
5a2bb93f
ST
1185 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1186 sizeof(struct sahara_sha_reqctx) +
1187 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1188
1189 return 0;
1190}
1191
5de88752
JM
1192static struct crypto_alg aes_algs[] = {
1193{
1194 .cra_name = "ecb(aes)",
1195 .cra_driver_name = "sahara-ecb-aes",
1196 .cra_priority = 300,
1197 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1198 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1199 .cra_blocksize = AES_BLOCK_SIZE,
1200 .cra_ctxsize = sizeof(struct sahara_ctx),
1201 .cra_alignmask = 0x0,
1202 .cra_type = &crypto_ablkcipher_type,
1203 .cra_module = THIS_MODULE,
1204 .cra_init = sahara_aes_cra_init,
1205 .cra_exit = sahara_aes_cra_exit,
1206 .cra_u.ablkcipher = {
1207 .min_keysize = AES_MIN_KEY_SIZE ,
1208 .max_keysize = AES_MAX_KEY_SIZE,
1209 .setkey = sahara_aes_setkey,
1210 .encrypt = sahara_aes_ecb_encrypt,
1211 .decrypt = sahara_aes_ecb_decrypt,
1212 }
1213}, {
1214 .cra_name = "cbc(aes)",
1215 .cra_driver_name = "sahara-cbc-aes",
1216 .cra_priority = 300,
1217 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1218 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1219 .cra_blocksize = AES_BLOCK_SIZE,
1220 .cra_ctxsize = sizeof(struct sahara_ctx),
1221 .cra_alignmask = 0x0,
1222 .cra_type = &crypto_ablkcipher_type,
1223 .cra_module = THIS_MODULE,
1224 .cra_init = sahara_aes_cra_init,
1225 .cra_exit = sahara_aes_cra_exit,
1226 .cra_u.ablkcipher = {
1227 .min_keysize = AES_MIN_KEY_SIZE ,
1228 .max_keysize = AES_MAX_KEY_SIZE,
1229 .ivsize = AES_BLOCK_SIZE,
1230 .setkey = sahara_aes_setkey,
1231 .encrypt = sahara_aes_cbc_encrypt,
1232 .decrypt = sahara_aes_cbc_decrypt,
1233 }
1234}
1235};
1236
5a2bb93f
ST
1237static struct ahash_alg sha_v3_algs[] = {
1238{
1239 .init = sahara_sha_init,
1240 .update = sahara_sha_update,
1241 .final = sahara_sha_final,
1242 .finup = sahara_sha_finup,
1243 .digest = sahara_sha_digest,
1244 .export = sahara_sha_export,
1245 .import = sahara_sha_import,
1246 .halg.digestsize = SHA1_DIGEST_SIZE,
d42cf2f1 1247 .halg.statesize = sizeof(struct sahara_sha_reqctx),
5a2bb93f
ST
1248 .halg.base = {
1249 .cra_name = "sha1",
1250 .cra_driver_name = "sahara-sha1",
1251 .cra_priority = 300,
6a38f622 1252 .cra_flags = CRYPTO_ALG_ASYNC |
5a2bb93f
ST
1253 CRYPTO_ALG_NEED_FALLBACK,
1254 .cra_blocksize = SHA1_BLOCK_SIZE,
1255 .cra_ctxsize = sizeof(struct sahara_ctx),
1256 .cra_alignmask = 0,
1257 .cra_module = THIS_MODULE,
1258 .cra_init = sahara_sha_cra_init,
5a2bb93f
ST
1259 }
1260},
1261};
1262
1263static struct ahash_alg sha_v4_algs[] = {
1264{
1265 .init = sahara_sha_init,
1266 .update = sahara_sha_update,
1267 .final = sahara_sha_final,
1268 .finup = sahara_sha_finup,
1269 .digest = sahara_sha_digest,
1270 .export = sahara_sha_export,
1271 .import = sahara_sha_import,
1272 .halg.digestsize = SHA256_DIGEST_SIZE,
d42cf2f1 1273 .halg.statesize = sizeof(struct sahara_sha_reqctx),
5a2bb93f
ST
1274 .halg.base = {
1275 .cra_name = "sha256",
1276 .cra_driver_name = "sahara-sha256",
1277 .cra_priority = 300,
6a38f622 1278 .cra_flags = CRYPTO_ALG_ASYNC |
5a2bb93f
ST
1279 CRYPTO_ALG_NEED_FALLBACK,
1280 .cra_blocksize = SHA256_BLOCK_SIZE,
1281 .cra_ctxsize = sizeof(struct sahara_ctx),
1282 .cra_alignmask = 0,
1283 .cra_module = THIS_MODULE,
1284 .cra_init = sahara_sha_cra_init,
5a2bb93f
ST
1285 }
1286},
1287};
1288
5de88752
JM
1289static irqreturn_t sahara_irq_handler(int irq, void *data)
1290{
1291 struct sahara_dev *dev = (struct sahara_dev *)data;
1292 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1293 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1294
5de88752
JM
1295 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1296 SAHARA_REG_CMD);
1297
1298 sahara_decode_status(dev, stat);
1299
1300 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1301 return IRQ_NONE;
1302 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1303 dev->error = 0;
1304 } else {
1305 sahara_decode_error(dev, err);
1306 dev->error = -EINVAL;
1307 }
1308
c0c3c89a 1309 complete(&dev->dma_completion);
5de88752
JM
1310
1311 return IRQ_HANDLED;
1312}
1313
1314
1315static int sahara_register_algs(struct sahara_dev *dev)
1316{
5a2bb93f
ST
1317 int err;
1318 unsigned int i, j, k, l;
5de88752
JM
1319
1320 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
5de88752
JM
1321 err = crypto_register_alg(&aes_algs[i]);
1322 if (err)
1323 goto err_aes_algs;
1324 }
1325
5a2bb93f
ST
1326 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1327 err = crypto_register_ahash(&sha_v3_algs[k]);
1328 if (err)
1329 goto err_sha_v3_algs;
1330 }
1331
1332 if (dev->version > SAHARA_VERSION_3)
1333 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1334 err = crypto_register_ahash(&sha_v4_algs[l]);
1335 if (err)
1336 goto err_sha_v4_algs;
1337 }
1338
5de88752
JM
1339 return 0;
1340
5a2bb93f
ST
1341err_sha_v4_algs:
1342 for (j = 0; j < l; j++)
1343 crypto_unregister_ahash(&sha_v4_algs[j]);
1344
1345err_sha_v3_algs:
1346 for (j = 0; j < k; j++)
0e7d4d93 1347 crypto_unregister_ahash(&sha_v3_algs[j]);
5a2bb93f 1348
5de88752
JM
1349err_aes_algs:
1350 for (j = 0; j < i; j++)
1351 crypto_unregister_alg(&aes_algs[j]);
1352
1353 return err;
1354}
1355
1356static void sahara_unregister_algs(struct sahara_dev *dev)
1357{
5a2bb93f 1358 unsigned int i;
5de88752
JM
1359
1360 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1361 crypto_unregister_alg(&aes_algs[i]);
5a2bb93f 1362
0e7d4d93 1363 for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
5a2bb93f
ST
1364 crypto_unregister_ahash(&sha_v3_algs[i]);
1365
1366 if (dev->version > SAHARA_VERSION_3)
1367 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1368 crypto_unregister_ahash(&sha_v4_algs[i]);
5de88752
JM
1369}
1370
249cb063 1371static const struct platform_device_id sahara_platform_ids[] = {
5de88752
JM
1372 { .name = "sahara-imx27" },
1373 { /* sentinel */ }
1374};
1375MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1376
30aabe36 1377static const struct of_device_id sahara_dt_ids[] = {
5ed903b3 1378 { .compatible = "fsl,imx53-sahara" },
5de88752
JM
1379 { .compatible = "fsl,imx27-sahara" },
1380 { /* sentinel */ }
1381};
68be0b1a 1382MODULE_DEVICE_TABLE(of, sahara_dt_ids);
5de88752
JM
1383
1384static int sahara_probe(struct platform_device *pdev)
1385{
1386 struct sahara_dev *dev;
5de88752
JM
1387 u32 version;
1388 int irq;
1389 int err;
1390 int i;
1391
a8bc22f3 1392 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
0d576d92 1393 if (!dev)
5de88752 1394 return -ENOMEM;
5de88752
JM
1395
1396 dev->device = &pdev->dev;
1397 platform_set_drvdata(pdev, dev);
1398
1399 /* Get the base address */
b0d76521 1400 dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
9e95275c
JH
1401 if (IS_ERR(dev->regs_base))
1402 return PTR_ERR(dev->regs_base);
5de88752
JM
1403
1404 /* Get the IRQ */
1405 irq = platform_get_irq(pdev, 0);
1406 if (irq < 0) {
1407 dev_err(&pdev->dev, "failed to get irq resource\n");
1408 return irq;
1409 }
1410
3d6f1d12
AS
1411 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1412 0, dev_name(&pdev->dev), dev);
1413 if (err) {
5de88752 1414 dev_err(&pdev->dev, "failed to request irq\n");
3d6f1d12 1415 return err;
5de88752
JM
1416 }
1417
1418 /* clocks */
1419 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1420 if (IS_ERR(dev->clk_ipg)) {
1421 dev_err(&pdev->dev, "Could not get ipg clock\n");
1422 return PTR_ERR(dev->clk_ipg);
1423 }
1424
1425 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1426 if (IS_ERR(dev->clk_ahb)) {
1427 dev_err(&pdev->dev, "Could not get ahb clock\n");
1428 return PTR_ERR(dev->clk_ahb);
1429 }
1430
1431 /* Allocate HW descriptors */
66c9a04e 1432 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
5de88752
JM
1433 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1434 &dev->hw_phys_desc[0], GFP_KERNEL);
1435 if (!dev->hw_desc[0]) {
1436 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1437 return -ENOMEM;
1438 }
1439 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1440 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1441 sizeof(struct sahara_hw_desc);
1442
1443 /* Allocate space for iv and key */
66c9a04e 1444 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
5de88752
JM
1445 &dev->key_phys_base, GFP_KERNEL);
1446 if (!dev->key_base) {
1447 dev_err(&pdev->dev, "Could not allocate memory for key\n");
66c9a04e 1448 return -ENOMEM;
5de88752
JM
1449 }
1450 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1451 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1452
5a2bb93f 1453 /* Allocate space for context: largest digest + message length field */
66c9a04e 1454 dev->context_base = dmam_alloc_coherent(&pdev->dev,
5a2bb93f
ST
1455 SHA256_DIGEST_SIZE + 4,
1456 &dev->context_phys_base, GFP_KERNEL);
1457 if (!dev->context_base) {
1458 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
66c9a04e 1459 return -ENOMEM;
5a2bb93f
ST
1460 }
1461
5de88752 1462 /* Allocate space for HW links */
66c9a04e 1463 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
5de88752
JM
1464 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1465 &dev->hw_phys_link[0], GFP_KERNEL);
393e661d 1466 if (!dev->hw_link[0]) {
5de88752 1467 dev_err(&pdev->dev, "Could not allocate hw links\n");
66c9a04e 1468 return -ENOMEM;
5de88752
JM
1469 }
1470 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1471 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1472 sizeof(struct sahara_hw_link);
1473 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1474 }
1475
1476 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1477
c0c3c89a 1478 mutex_init(&dev->queue_mutex);
20ec9d81 1479
5de88752
JM
1480 dev_ptr = dev;
1481
c0c3c89a
ST
1482 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1483 if (IS_ERR(dev->kthread)) {
66c9a04e 1484 return PTR_ERR(dev->kthread);
c0c3c89a 1485 }
5de88752 1486
c0c3c89a 1487 init_completion(&dev->dma_completion);
5de88752 1488
7eac7144
FE
1489 err = clk_prepare_enable(dev->clk_ipg);
1490 if (err)
66c9a04e 1491 return err;
7eac7144
FE
1492 err = clk_prepare_enable(dev->clk_ahb);
1493 if (err)
1494 goto clk_ipg_disable;
5de88752
JM
1495
1496 version = sahara_read(dev, SAHARA_REG_VERSION);
5ed903b3
ST
1497 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1498 if (version != SAHARA_VERSION_3)
1499 err = -ENODEV;
1500 } else if (of_device_is_compatible(pdev->dev.of_node,
1501 "fsl,imx53-sahara")) {
1502 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1503 err = -ENODEV;
1504 version = (version >> 8) & 0xff;
1505 }
1506 if (err == -ENODEV) {
5de88752 1507 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
5ed903b3 1508 version);
5de88752
JM
1509 goto err_algs;
1510 }
1511
5ed903b3
ST
1512 dev->version = version;
1513
5de88752
JM
1514 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1515 SAHARA_REG_CMD);
1516 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1517 SAHARA_CONTROL_SET_MAXBURST(8) |
1518 SAHARA_CONTROL_RNG_AUTORSD |
1519 SAHARA_CONTROL_ENABLE_INT,
1520 SAHARA_REG_CONTROL);
1521
1522 err = sahara_register_algs(dev);
1523 if (err)
1524 goto err_algs;
1525
1526 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1527
1528 return 0;
1529
1530err_algs:
c0c3c89a 1531 kthread_stop(dev->kthread);
5de88752 1532 dev_ptr = NULL;
7eac7144
FE
1533 clk_disable_unprepare(dev->clk_ahb);
1534clk_ipg_disable:
1535 clk_disable_unprepare(dev->clk_ipg);
5de88752
JM
1536
1537 return err;
1538}
1539
1540static int sahara_remove(struct platform_device *pdev)
1541{
1542 struct sahara_dev *dev = platform_get_drvdata(pdev);
1543
c0c3c89a 1544 kthread_stop(dev->kthread);
5de88752
JM
1545
1546 sahara_unregister_algs(dev);
1547
1548 clk_disable_unprepare(dev->clk_ipg);
1549 clk_disable_unprepare(dev->clk_ahb);
1550
1551 dev_ptr = NULL;
1552
1553 return 0;
1554}
1555
1556static struct platform_driver sahara_driver = {
1557 .probe = sahara_probe,
1558 .remove = sahara_remove,
1559 .driver = {
1560 .name = SAHARA_NAME,
1b0b2605 1561 .of_match_table = sahara_dt_ids,
5de88752
JM
1562 },
1563 .id_table = sahara_platform_ids,
1564};
1565
1566module_platform_driver(sahara_driver);
1567
1568MODULE_LICENSE("GPL");
1569MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
5a2bb93f 1570MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
5de88752 1571MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");