1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * talitos - Freescale Integrated Security Engine (SEC) device driver
5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
7 * Scatterlist Crypto API glue code copied from files with the following:
8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
10 * Crypto algorithm registration code copied from hifn driver:
11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12 * All rights reserved.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/crypto.h>
21 #include <linux/hw_random.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/spinlock.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/slab.h>
31 #include <crypto/algapi.h>
32 #include <crypto/aes.h>
33 #include <crypto/des.h>
34 #include <crypto/sha.h>
35 #include <crypto/md5.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/authenc.h>
38 #include <crypto/skcipher.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41 #include <crypto/scatterwalk.h>
45 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
46 unsigned int len, bool is_sec1)
48 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
50 ptr->len1 = cpu_to_be16(len);
52 ptr->len = cpu_to_be16(len);
53 ptr->eptr = upper_32_bits(dma_addr);
57 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
58 struct talitos_ptr *src_ptr, bool is_sec1)
60 dst_ptr->ptr = src_ptr->ptr;
62 dst_ptr->len1 = src_ptr->len1;
64 dst_ptr->len = src_ptr->len;
65 dst_ptr->eptr = src_ptr->eptr;
69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
73 return be16_to_cpu(ptr->len1);
75 return be16_to_cpu(ptr->len);
78 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
85 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
92 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 static void __map_single_talitos_ptr(struct device *dev,
95 struct talitos_ptr *ptr,
96 unsigned int len, void *data,
97 enum dma_data_direction dir,
100 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
104 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
107 static void map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir)
112 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
115 static void map_single_talitos_ptr_nosync(struct device *dev,
116 struct talitos_ptr *ptr,
117 unsigned int len, void *data,
118 enum dma_data_direction dir)
120 __map_single_talitos_ptr(dev, ptr, len, data, dir,
121 DMA_ATTR_SKIP_CPU_SYNC);
125 * unmap bus single (contiguous) h/w descriptor pointer
127 static void unmap_single_talitos_ptr(struct device *dev,
128 struct talitos_ptr *ptr,
129 enum dma_data_direction dir)
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 from_talitos_ptr_len(ptr, is_sec1), dir);
138 static int reset_channel(struct device *dev, int ch)
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
142 bool is_sec1 = has_ftr_sec1(priv);
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
161 dev_err(dev, "failed to reset channel %d\n", ch);
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168 /* enable chaining descriptors */
170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
173 /* and ICCR writeback, if available */
174 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
175 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
176 TALITOS_CCCR_LO_IWSE);
181 static int reset_device(struct device *dev)
183 struct talitos_private *priv = dev_get_drvdata(dev);
184 unsigned int timeout = TALITOS_TIMEOUT;
185 bool is_sec1 = has_ftr_sec1(priv);
186 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
188 setbits32(priv->reg + TALITOS_MCR, mcr);
190 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
195 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
196 setbits32(priv->reg + TALITOS_MCR, mcr);
200 dev_err(dev, "failed to reset device\n");
208 * Reset and initialize the device
210 static int init_device(struct device *dev)
212 struct talitos_private *priv = dev_get_drvdata(dev);
214 bool is_sec1 = has_ftr_sec1(priv);
218 * errata documentation: warning: certain SEC interrupts
219 * are not fully cleared by writing the MCR:SWR bit,
220 * set bit twice to completely reset
222 err = reset_device(dev);
226 err = reset_device(dev);
231 for (ch = 0; ch < priv->num_channels; ch++) {
232 err = reset_channel(dev, ch);
237 /* enable channel done and error interrupts */
239 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
240 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
241 /* disable parity error check in DEU (erroneous? test vect.) */
242 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
244 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
245 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
248 /* disable integrity check error interrupts (use writeback instead) */
249 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
250 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
251 TALITOS_MDEUICR_LO_ICE);
257 * talitos_submit - submits a descriptor to the device for processing
258 * @dev: the SEC device to be used
259 * @ch: the SEC device channel to be used
260 * @desc: the descriptor to be processed by the device
261 * @callback: whom to call when processing is complete
262 * @context: a handle for use by caller (optional)
264 * desc must contain valid dma-mapped (bus physical) address pointers.
265 * callback must check err and feedback in descriptor header
266 * for device processing status.
268 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269 void (*callback)(struct device *dev,
270 struct talitos_desc *desc,
271 void *context, int error),
274 struct talitos_private *priv = dev_get_drvdata(dev);
275 struct talitos_request *request;
278 bool is_sec1 = has_ftr_sec1(priv);
280 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
282 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
283 /* h/w fifo is full */
284 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
288 head = priv->chan[ch].head;
289 request = &priv->chan[ch].fifo[head];
291 /* map descriptor and save caller data */
293 desc->hdr1 = desc->hdr;
294 request->dma_desc = dma_map_single(dev, &desc->hdr1,
298 request->dma_desc = dma_map_single(dev, desc,
302 request->callback = callback;
303 request->context = context;
305 /* increment fifo head */
306 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
309 request->desc = desc;
313 out_be32(priv->chan[ch].reg + TALITOS_FF,
314 upper_32_bits(request->dma_desc));
315 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
316 lower_32_bits(request->dma_desc));
318 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
322 EXPORT_SYMBOL(talitos_submit);
325 * process what was done, notify callback of error if not
327 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
329 struct talitos_private *priv = dev_get_drvdata(dev);
330 struct talitos_request *request, saved_req;
333 bool is_sec1 = has_ftr_sec1(priv);
335 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
337 tail = priv->chan[ch].tail;
338 while (priv->chan[ch].fifo[tail].desc) {
341 request = &priv->chan[ch].fifo[tail];
343 /* descriptors with their done bits set don't get the error */
346 hdr = request->desc->hdr;
347 else if (request->desc->next_desc)
348 hdr = (request->desc + 1)->hdr1;
350 hdr = request->desc->hdr1;
352 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
360 dma_unmap_single(dev, request->dma_desc,
364 /* copy entries so we can call callback outside lock */
365 saved_req.desc = request->desc;
366 saved_req.callback = request->callback;
367 saved_req.context = request->context;
369 /* release request entry in fifo */
371 request->desc = NULL;
373 /* increment fifo tail */
374 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
376 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
378 atomic_dec(&priv->chan[ch].submit_count);
380 saved_req.callback(dev, saved_req.desc, saved_req.context,
382 /* channel may resume processing in single desc error case */
383 if (error && !reset_ch && status == error)
385 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
386 tail = priv->chan[ch].tail;
389 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
393 * process completed requests for channels that have done status
395 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
396 static void talitos1_done_##name(unsigned long data) \
398 struct device *dev = (struct device *)data; \
399 struct talitos_private *priv = dev_get_drvdata(dev); \
400 unsigned long flags; \
402 if (ch_done_mask & 0x10000000) \
403 flush_channel(dev, 0, 0, 0); \
404 if (ch_done_mask & 0x40000000) \
405 flush_channel(dev, 1, 0, 0); \
406 if (ch_done_mask & 0x00010000) \
407 flush_channel(dev, 2, 0, 0); \
408 if (ch_done_mask & 0x00040000) \
409 flush_channel(dev, 3, 0, 0); \
411 /* At this point, all completed channels have been processed */ \
412 /* Unmask done interrupts for channels completed later on. */ \
413 spin_lock_irqsave(&priv->reg_lock, flags); \
414 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
415 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
416 spin_unlock_irqrestore(&priv->reg_lock, flags); \
419 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
420 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
422 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
423 static void talitos2_done_##name(unsigned long data) \
425 struct device *dev = (struct device *)data; \
426 struct talitos_private *priv = dev_get_drvdata(dev); \
427 unsigned long flags; \
429 if (ch_done_mask & 1) \
430 flush_channel(dev, 0, 0, 0); \
431 if (ch_done_mask & (1 << 2)) \
432 flush_channel(dev, 1, 0, 0); \
433 if (ch_done_mask & (1 << 4)) \
434 flush_channel(dev, 2, 0, 0); \
435 if (ch_done_mask & (1 << 6)) \
436 flush_channel(dev, 3, 0, 0); \
438 /* At this point, all completed channels have been processed */ \
439 /* Unmask done interrupts for channels completed later on. */ \
440 spin_lock_irqsave(&priv->reg_lock, flags); \
441 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
442 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
443 spin_unlock_irqrestore(&priv->reg_lock, flags); \
446 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
447 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
448 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
449 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
452 * locate current (offending) descriptor
454 static u32 current_desc_hdr(struct device *dev, int ch)
456 struct talitos_private *priv = dev_get_drvdata(dev);
460 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
461 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
464 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
468 tail = priv->chan[ch].tail;
471 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
472 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
473 iter = (iter + 1) & (priv->fifo_len - 1);
475 dev_err(dev, "couldn't locate current descriptor\n");
480 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
481 return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
483 return priv->chan[ch].fifo[iter].desc->hdr;
487 * user diagnostics; report root cause of error based on execution unit status
489 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
491 struct talitos_private *priv = dev_get_drvdata(dev);
495 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
497 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
498 case DESC_HDR_SEL0_AFEU:
499 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
500 in_be32(priv->reg_afeu + TALITOS_EUISR),
501 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
503 case DESC_HDR_SEL0_DEU:
504 dev_err(dev, "DEUISR 0x%08x_%08x\n",
505 in_be32(priv->reg_deu + TALITOS_EUISR),
506 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
508 case DESC_HDR_SEL0_MDEUA:
509 case DESC_HDR_SEL0_MDEUB:
510 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
511 in_be32(priv->reg_mdeu + TALITOS_EUISR),
512 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
514 case DESC_HDR_SEL0_RNG:
515 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
516 in_be32(priv->reg_rngu + TALITOS_ISR),
517 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
519 case DESC_HDR_SEL0_PKEU:
520 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
521 in_be32(priv->reg_pkeu + TALITOS_EUISR),
522 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
524 case DESC_HDR_SEL0_AESU:
525 dev_err(dev, "AESUISR 0x%08x_%08x\n",
526 in_be32(priv->reg_aesu + TALITOS_EUISR),
527 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
529 case DESC_HDR_SEL0_CRCU:
530 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
531 in_be32(priv->reg_crcu + TALITOS_EUISR),
532 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
534 case DESC_HDR_SEL0_KEU:
535 dev_err(dev, "KEUISR 0x%08x_%08x\n",
536 in_be32(priv->reg_pkeu + TALITOS_EUISR),
537 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
541 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
542 case DESC_HDR_SEL1_MDEUA:
543 case DESC_HDR_SEL1_MDEUB:
544 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
545 in_be32(priv->reg_mdeu + TALITOS_EUISR),
546 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
548 case DESC_HDR_SEL1_CRCU:
549 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
550 in_be32(priv->reg_crcu + TALITOS_EUISR),
551 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
555 for (i = 0; i < 8; i++)
556 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
557 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
558 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
562 * recover from error interrupts
564 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
566 struct talitos_private *priv = dev_get_drvdata(dev);
567 unsigned int timeout = TALITOS_TIMEOUT;
568 int ch, error, reset_dev = 0;
570 bool is_sec1 = has_ftr_sec1(priv);
571 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
573 for (ch = 0; ch < priv->num_channels; ch++) {
574 /* skip channels without errors */
576 /* bits 29, 31, 17, 19 */
577 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
580 if (!(isr & (1 << (ch * 2 + 1))))
586 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
588 if (v_lo & TALITOS_CCPSR_LO_DOF) {
589 dev_err(dev, "double fetch fifo overflow error\n");
593 if (v_lo & TALITOS_CCPSR_LO_SOF) {
594 /* h/w dropped descriptor */
595 dev_err(dev, "single fetch fifo overflow error\n");
598 if (v_lo & TALITOS_CCPSR_LO_MDTE)
599 dev_err(dev, "master data transfer error\n");
600 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
601 dev_err(dev, is_sec1 ? "pointer not complete error\n"
602 : "s/g data length zero error\n");
603 if (v_lo & TALITOS_CCPSR_LO_FPZ)
604 dev_err(dev, is_sec1 ? "parity error\n"
605 : "fetch pointer zero error\n");
606 if (v_lo & TALITOS_CCPSR_LO_IDH)
607 dev_err(dev, "illegal descriptor header error\n");
608 if (v_lo & TALITOS_CCPSR_LO_IEU)
609 dev_err(dev, is_sec1 ? "static assignment error\n"
610 : "invalid exec unit error\n");
611 if (v_lo & TALITOS_CCPSR_LO_EU)
612 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
614 if (v_lo & TALITOS_CCPSR_LO_GB)
615 dev_err(dev, "gather boundary error\n");
616 if (v_lo & TALITOS_CCPSR_LO_GRL)
617 dev_err(dev, "gather return/length error\n");
618 if (v_lo & TALITOS_CCPSR_LO_SB)
619 dev_err(dev, "scatter boundary error\n");
620 if (v_lo & TALITOS_CCPSR_LO_SRL)
621 dev_err(dev, "scatter return/length error\n");
624 flush_channel(dev, ch, error, reset_ch);
627 reset_channel(dev, ch);
629 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
631 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
632 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
633 TALITOS2_CCCR_CONT) && --timeout)
636 dev_err(dev, "failed to restart channel %d\n",
642 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
643 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
644 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
645 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
648 dev_err(dev, "done overflow, internal time out, or "
649 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
651 /* purge request queues */
652 for (ch = 0; ch < priv->num_channels; ch++)
653 flush_channel(dev, ch, -EIO, 1);
655 /* reset and reinitialize the device */
660 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
661 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
663 struct device *dev = data; \
664 struct talitos_private *priv = dev_get_drvdata(dev); \
666 unsigned long flags; \
668 spin_lock_irqsave(&priv->reg_lock, flags); \
669 isr = in_be32(priv->reg + TALITOS_ISR); \
670 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
671 /* Acknowledge interrupt */ \
672 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
673 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
675 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
676 spin_unlock_irqrestore(&priv->reg_lock, flags); \
677 talitos_error(dev, isr & ch_err_mask, isr_lo); \
680 if (likely(isr & ch_done_mask)) { \
681 /* mask further done interrupts. */ \
682 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
683 /* done_task will unmask done interrupts at exit */ \
684 tasklet_schedule(&priv->done_task[tlet]); \
686 spin_unlock_irqrestore(&priv->reg_lock, flags); \
689 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
693 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
695 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
696 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
698 struct device *dev = data; \
699 struct talitos_private *priv = dev_get_drvdata(dev); \
701 unsigned long flags; \
703 spin_lock_irqsave(&priv->reg_lock, flags); \
704 isr = in_be32(priv->reg + TALITOS_ISR); \
705 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
706 /* Acknowledge interrupt */ \
707 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
708 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
710 if (unlikely(isr & ch_err_mask || isr_lo)) { \
711 spin_unlock_irqrestore(&priv->reg_lock, flags); \
712 talitos_error(dev, isr & ch_err_mask, isr_lo); \
715 if (likely(isr & ch_done_mask)) { \
716 /* mask further done interrupts. */ \
717 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
718 /* done_task will unmask done interrupts at exit */ \
719 tasklet_schedule(&priv->done_task[tlet]); \
721 spin_unlock_irqrestore(&priv->reg_lock, flags); \
724 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
728 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
729 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
731 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
737 static int talitos_rng_data_present(struct hwrng *rng, int wait)
739 struct device *dev = (struct device *)rng->priv;
740 struct talitos_private *priv = dev_get_drvdata(dev);
744 for (i = 0; i < 20; i++) {
745 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
746 TALITOS_RNGUSR_LO_OFL;
755 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
757 struct device *dev = (struct device *)rng->priv;
758 struct talitos_private *priv = dev_get_drvdata(dev);
760 /* rng fifo requires 64-bit accesses */
761 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
762 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
767 static int talitos_rng_init(struct hwrng *rng)
769 struct device *dev = (struct device *)rng->priv;
770 struct talitos_private *priv = dev_get_drvdata(dev);
771 unsigned int timeout = TALITOS_TIMEOUT;
773 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
774 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
775 & TALITOS_RNGUSR_LO_RD)
779 dev_err(dev, "failed to reset rng hw\n");
783 /* start generating */
784 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
789 static int talitos_register_rng(struct device *dev)
791 struct talitos_private *priv = dev_get_drvdata(dev);
794 priv->rng.name = dev_driver_string(dev),
795 priv->rng.init = talitos_rng_init,
796 priv->rng.data_present = talitos_rng_data_present,
797 priv->rng.data_read = talitos_rng_data_read,
798 priv->rng.priv = (unsigned long)dev;
800 err = hwrng_register(&priv->rng);
802 priv->rng_registered = true;
807 static void talitos_unregister_rng(struct device *dev)
809 struct talitos_private *priv = dev_get_drvdata(dev);
811 if (!priv->rng_registered)
814 hwrng_unregister(&priv->rng);
815 priv->rng_registered = false;
821 #define TALITOS_CRA_PRIORITY 3000
823 * Defines a priority for doing AEAD with descriptors type
824 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
826 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
827 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
828 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
833 __be32 desc_hdr_template;
834 u8 key[TALITOS_MAX_KEY_SIZE];
835 u8 iv[TALITOS_MAX_IV_LENGTH];
838 unsigned int enckeylen;
839 unsigned int authkeylen;
842 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
843 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
845 struct talitos_ahash_req_ctx {
846 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
847 unsigned int hw_context_size;
848 u8 buf[2][HASH_MAX_BLOCK_SIZE];
853 unsigned int to_hash_later;
855 struct scatterlist bufsl[2];
856 struct scatterlist *psrc;
859 struct talitos_export_state {
860 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
861 u8 buf[HASH_MAX_BLOCK_SIZE];
865 unsigned int to_hash_later;
869 static int aead_setkey(struct crypto_aead *authenc,
870 const u8 *key, unsigned int keylen)
872 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
873 struct device *dev = ctx->dev;
874 struct crypto_authenc_keys keys;
876 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
879 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
883 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
885 memcpy(ctx->key, keys.authkey, keys.authkeylen);
886 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
888 ctx->keylen = keys.authkeylen + keys.enckeylen;
889 ctx->enckeylen = keys.enckeylen;
890 ctx->authkeylen = keys.authkeylen;
891 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
894 memzero_explicit(&keys, sizeof(keys));
898 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
899 memzero_explicit(&keys, sizeof(keys));
903 static int aead_des3_setkey(struct crypto_aead *authenc,
904 const u8 *key, unsigned int keylen)
906 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
907 struct device *dev = ctx->dev;
908 struct crypto_authenc_keys keys;
912 err = crypto_authenc_extractkeys(&keys, key, keylen);
917 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
920 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
923 flags = crypto_aead_get_flags(authenc);
924 err = __des3_verify_key(&flags, keys.enckey);
926 crypto_aead_set_flags(authenc, flags);
931 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
933 memcpy(ctx->key, keys.authkey, keys.authkeylen);
934 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
936 ctx->keylen = keys.authkeylen + keys.enckeylen;
937 ctx->enckeylen = keys.enckeylen;
938 ctx->authkeylen = keys.authkeylen;
939 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
943 memzero_explicit(&keys, sizeof(keys));
947 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
952 * talitos_edesc - s/w-extended descriptor
953 * @src_nents: number of segments in input scatterlist
954 * @dst_nents: number of segments in output scatterlist
955 * @icv_ool: whether ICV is out-of-line
956 * @iv_dma: dma address of iv for checking continuity and link table
957 * @dma_len: length of dma mapped link_tbl space
958 * @dma_link_tbl: bus physical address of link_tbl/buf
959 * @desc: h/w descriptor
960 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
961 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
963 * if decrypting (with authcheck), or either one of src_nents or dst_nents
964 * is greater than 1, an integrity check value is concatenated to the end
967 struct talitos_edesc {
973 dma_addr_t dma_link_tbl;
974 struct talitos_desc desc;
976 struct talitos_ptr link_tbl[0];
981 static void talitos_sg_unmap(struct device *dev,
982 struct talitos_edesc *edesc,
983 struct scatterlist *src,
984 struct scatterlist *dst,
985 unsigned int len, unsigned int offset)
987 struct talitos_private *priv = dev_get_drvdata(dev);
988 bool is_sec1 = has_ftr_sec1(priv);
989 unsigned int src_nents = edesc->src_nents ? : 1;
990 unsigned int dst_nents = edesc->dst_nents ? : 1;
992 if (is_sec1 && dst && dst_nents > 1) {
993 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
994 len, DMA_FROM_DEVICE);
995 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
999 if (src_nents == 1 || !is_sec1)
1000 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
1002 if (dst && (dst_nents == 1 || !is_sec1))
1003 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
1004 } else if (src_nents == 1 || !is_sec1) {
1005 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
1009 static void ipsec_esp_unmap(struct device *dev,
1010 struct talitos_edesc *edesc,
1011 struct aead_request *areq)
1013 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1014 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1015 unsigned int ivsize = crypto_aead_ivsize(aead);
1016 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1017 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1020 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1022 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1024 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
1028 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1031 if (!is_ipsec_esp) {
1032 unsigned int dst_nents = edesc->dst_nents ? : 1;
1034 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1035 areq->assoclen + areq->cryptlen - ivsize);
1040 * ipsec_esp descriptor callbacks
1042 static void ipsec_esp_encrypt_done(struct device *dev,
1043 struct talitos_desc *desc, void *context,
1046 struct talitos_private *priv = dev_get_drvdata(dev);
1047 bool is_sec1 = has_ftr_sec1(priv);
1048 struct aead_request *areq = context;
1049 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1050 unsigned int authsize = crypto_aead_authsize(authenc);
1051 unsigned int ivsize = crypto_aead_ivsize(authenc);
1052 struct talitos_edesc *edesc;
1053 struct scatterlist *sg;
1056 edesc = container_of(desc, struct talitos_edesc, desc);
1058 ipsec_esp_unmap(dev, edesc, areq);
1060 /* copy the generated ICV to dst */
1061 if (edesc->icv_ool) {
1063 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1065 icvdata = &edesc->link_tbl[edesc->src_nents +
1066 edesc->dst_nents + 2];
1067 sg = sg_last(areq->dst, edesc->dst_nents);
1068 memcpy((char *)sg_virt(sg) + sg->length - authsize,
1072 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1076 aead_request_complete(areq, err);
1079 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1080 struct talitos_desc *desc,
1081 void *context, int err)
1083 struct aead_request *req = context;
1084 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1085 unsigned int authsize = crypto_aead_authsize(authenc);
1086 struct talitos_edesc *edesc;
1087 struct scatterlist *sg;
1089 struct talitos_private *priv = dev_get_drvdata(dev);
1090 bool is_sec1 = has_ftr_sec1(priv);
1092 edesc = container_of(desc, struct talitos_edesc, desc);
1094 ipsec_esp_unmap(dev, edesc, req);
1098 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1099 icv = (char *)sg_virt(sg) + sg->length - authsize;
1101 if (edesc->dma_len) {
1103 oicv = (char *)&edesc->dma_link_tbl +
1104 req->assoclen + req->cryptlen;
1107 &edesc->link_tbl[edesc->src_nents +
1108 edesc->dst_nents + 2];
1110 icv = oicv + authsize;
1112 oicv = (char *)&edesc->link_tbl[0];
1114 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1119 aead_request_complete(req, err);
1122 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1123 struct talitos_desc *desc,
1124 void *context, int err)
1126 struct aead_request *req = context;
1127 struct talitos_edesc *edesc;
1129 edesc = container_of(desc, struct talitos_edesc, desc);
1131 ipsec_esp_unmap(dev, edesc, req);
1133 /* check ICV auth status */
1134 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1135 DESC_HDR_LO_ICCR1_PASS))
1140 aead_request_complete(req, err);
1144 * convert scatterlist to SEC h/w link table format
1145 * stop at cryptlen bytes
1147 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1148 unsigned int offset, int cryptlen,
1149 struct talitos_ptr *link_tbl_ptr)
1151 int n_sg = sg_count;
1154 while (cryptlen && sg && n_sg--) {
1155 unsigned int len = sg_dma_len(sg);
1157 if (offset >= len) {
1167 to_talitos_ptr(link_tbl_ptr + count,
1168 sg_dma_address(sg) + offset, len, 0);
1169 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1178 /* tag end of link table */
1180 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1181 DESC_PTR_LNKTBL_RETURN, 0);
1186 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1187 unsigned int len, struct talitos_edesc *edesc,
1188 struct talitos_ptr *ptr, int sg_count,
1189 unsigned int offset, int tbl_off, int elen)
1191 struct talitos_private *priv = dev_get_drvdata(dev);
1192 bool is_sec1 = has_ftr_sec1(priv);
1195 to_talitos_ptr(ptr, 0, 0, is_sec1);
1198 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1199 if (sg_count == 1) {
1200 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1204 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1207 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1208 &edesc->link_tbl[tbl_off]);
1209 if (sg_count == 1) {
1210 /* Only one segment now, so no link tbl needed*/
1211 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1214 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1215 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1216 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1221 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1222 unsigned int len, struct talitos_edesc *edesc,
1223 struct talitos_ptr *ptr, int sg_count,
1224 unsigned int offset, int tbl_off)
1226 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1231 * fill in and submit ipsec_esp descriptor
1233 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1234 void (*callback)(struct device *dev,
1235 struct talitos_desc *desc,
1236 void *context, int error))
1238 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1239 unsigned int authsize = crypto_aead_authsize(aead);
1240 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1241 struct device *dev = ctx->dev;
1242 struct talitos_desc *desc = &edesc->desc;
1243 unsigned int cryptlen = areq->cryptlen;
1244 unsigned int ivsize = crypto_aead_ivsize(aead);
1248 bool sync_needed = false;
1249 struct talitos_private *priv = dev_get_drvdata(dev);
1250 bool is_sec1 = has_ftr_sec1(priv);
1251 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1252 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1253 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1256 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1258 sg_count = edesc->src_nents ?: 1;
1259 if (is_sec1 && sg_count > 1)
1260 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1261 areq->assoclen + cryptlen);
1263 sg_count = dma_map_sg(dev, areq->src, sg_count,
1264 (areq->src == areq->dst) ?
1265 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1268 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1269 &desc->ptr[1], sg_count, 0, tbl_off);
1277 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1280 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1281 ctx->enckeylen, is_sec1);
1285 * map and adjust cipher len to aead request cryptlen.
1286 * extent is bytes of HMAC postpended to ciphertext,
1287 * typically 12 for ipsec
1289 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1292 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1293 sg_count, areq->assoclen, tbl_off, elen);
1301 if (areq->src != areq->dst) {
1302 sg_count = edesc->dst_nents ? : 1;
1303 if (!is_sec1 || sg_count == 1)
1304 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1307 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1308 sg_count, areq->assoclen, tbl_off);
1311 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1316 edesc->icv_ool = true;
1320 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1321 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1322 sizeof(struct talitos_ptr) + authsize;
1324 /* Add an entry to the link table for ICV data */
1325 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1326 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1329 /* icv data follows link tables */
1330 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1333 dma_addr_t addr = edesc->dma_link_tbl;
1336 addr += areq->assoclen + cryptlen;
1338 addr += sizeof(struct talitos_ptr) * tbl_off;
1340 to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1342 } else if (!is_ipsec_esp) {
1343 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1344 &desc->ptr[6], sg_count, areq->assoclen +
1349 edesc->icv_ool = true;
1352 edesc->icv_ool = false;
1355 edesc->icv_ool = false;
1360 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1364 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1368 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1369 if (ret != -EINPROGRESS) {
1370 ipsec_esp_unmap(dev, edesc, areq);
1377 * allocate and map the extended descriptor
1379 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1380 struct scatterlist *src,
1381 struct scatterlist *dst,
1383 unsigned int assoclen,
1384 unsigned int cryptlen,
1385 unsigned int authsize,
1386 unsigned int ivsize,
1391 struct talitos_edesc *edesc;
1392 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1393 dma_addr_t iv_dma = 0;
1394 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1396 struct talitos_private *priv = dev_get_drvdata(dev);
1397 bool is_sec1 = has_ftr_sec1(priv);
1398 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1400 if (cryptlen + authsize > max_len) {
1401 dev_err(dev, "length exceeds h/w max limit\n");
1402 return ERR_PTR(-EINVAL);
1405 if (!dst || dst == src) {
1406 src_len = assoclen + cryptlen + authsize;
1407 src_nents = sg_nents_for_len(src, src_len);
1408 if (src_nents < 0) {
1409 dev_err(dev, "Invalid number of src SG.\n");
1410 return ERR_PTR(-EINVAL);
1412 src_nents = (src_nents == 1) ? 0 : src_nents;
1413 dst_nents = dst ? src_nents : 0;
1415 } else { /* dst && dst != src*/
1416 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1417 src_nents = sg_nents_for_len(src, src_len);
1418 if (src_nents < 0) {
1419 dev_err(dev, "Invalid number of src SG.\n");
1420 return ERR_PTR(-EINVAL);
1422 src_nents = (src_nents == 1) ? 0 : src_nents;
1423 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1424 dst_nents = sg_nents_for_len(dst, dst_len);
1425 if (dst_nents < 0) {
1426 dev_err(dev, "Invalid number of dst SG.\n");
1427 return ERR_PTR(-EINVAL);
1429 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1433 * allocate space for base edesc plus the link tables,
1434 * allowing for two separate entries for AD and generated ICV (+ 2),
1435 * and space for two sets of ICVs (stashed and generated)
1437 alloc_len = sizeof(struct talitos_edesc);
1438 if (src_nents || dst_nents) {
1440 dma_len = (src_nents ? src_len : 0) +
1441 (dst_nents ? dst_len : 0);
1443 dma_len = (src_nents + dst_nents + 2) *
1444 sizeof(struct talitos_ptr) + authsize * 2;
1445 alloc_len += dma_len;
1448 alloc_len += icv_stashing ? authsize : 0;
1451 /* if its a ahash, add space for a second desc next to the first one */
1452 if (is_sec1 && !dst)
1453 alloc_len += sizeof(struct talitos_desc);
1454 alloc_len += ivsize;
1456 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1458 return ERR_PTR(-ENOMEM);
1460 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1461 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1463 memset(&edesc->desc, 0, sizeof(edesc->desc));
1465 edesc->src_nents = src_nents;
1466 edesc->dst_nents = dst_nents;
1467 edesc->iv_dma = iv_dma;
1468 edesc->dma_len = dma_len;
1470 void *addr = &edesc->link_tbl[0];
1472 if (is_sec1 && !dst)
1473 addr += sizeof(struct talitos_desc);
1474 edesc->dma_link_tbl = dma_map_single(dev, addr,
1481 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1482 int icv_stashing, bool encrypt)
1484 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1485 unsigned int authsize = crypto_aead_authsize(authenc);
1486 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1487 unsigned int ivsize = crypto_aead_ivsize(authenc);
1489 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1490 iv, areq->assoclen, areq->cryptlen,
1491 authsize, ivsize, icv_stashing,
1492 areq->base.flags, encrypt);
1495 static int aead_encrypt(struct aead_request *req)
1497 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1498 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1499 struct talitos_edesc *edesc;
1501 /* allocate extended descriptor */
1502 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1504 return PTR_ERR(edesc);
1507 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1509 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1512 static int aead_decrypt(struct aead_request *req)
1514 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1515 unsigned int authsize = crypto_aead_authsize(authenc);
1516 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1517 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1518 struct talitos_edesc *edesc;
1519 struct scatterlist *sg;
1522 req->cryptlen -= authsize;
1524 /* allocate extended descriptor */
1525 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1527 return PTR_ERR(edesc);
1529 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1530 ((!edesc->src_nents && !edesc->dst_nents) ||
1531 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1533 /* decrypt and check the ICV */
1534 edesc->desc.hdr = ctx->desc_hdr_template |
1535 DESC_HDR_DIR_INBOUND |
1536 DESC_HDR_MODE1_MDEU_CICV;
1538 /* reset integrity check result bits */
1540 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1543 /* Have to check the ICV with software */
1544 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1546 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1548 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1549 edesc->dst_nents + 2];
1551 icvdata = &edesc->link_tbl[0];
1553 sg = sg_last(req->src, edesc->src_nents ? : 1);
1555 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1557 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1560 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1561 const u8 *key, unsigned int keylen)
1563 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1564 struct device *dev = ctx->dev;
1567 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1569 memcpy(&ctx->key, key, keylen);
1570 ctx->keylen = keylen;
1572 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1577 static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1578 const u8 *key, unsigned int keylen)
1580 u32 tmp[DES_EXPKEY_WORDS];
1582 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1583 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
1584 !des_ekey(tmp, key)) {
1585 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1589 return ablkcipher_setkey(cipher, key, keylen);
1592 static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1593 const u8 *key, unsigned int keylen)
1598 flags = crypto_ablkcipher_get_flags(cipher);
1599 err = __des3_verify_key(&flags, key);
1600 if (unlikely(err)) {
1601 crypto_ablkcipher_set_flags(cipher, flags);
1605 return ablkcipher_setkey(cipher, key, keylen);
1608 static void common_nonsnoop_unmap(struct device *dev,
1609 struct talitos_edesc *edesc,
1610 struct ablkcipher_request *areq)
1612 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1614 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1615 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1618 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1622 static void ablkcipher_done(struct device *dev,
1623 struct talitos_desc *desc, void *context,
1626 struct ablkcipher_request *areq = context;
1627 struct talitos_edesc *edesc;
1629 edesc = container_of(desc, struct talitos_edesc, desc);
1631 common_nonsnoop_unmap(dev, edesc, areq);
1635 areq->base.complete(&areq->base, err);
1638 static int common_nonsnoop(struct talitos_edesc *edesc,
1639 struct ablkcipher_request *areq,
1640 void (*callback) (struct device *dev,
1641 struct talitos_desc *desc,
1642 void *context, int error))
1644 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1645 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1646 struct device *dev = ctx->dev;
1647 struct talitos_desc *desc = &edesc->desc;
1648 unsigned int cryptlen = areq->nbytes;
1649 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1651 bool sync_needed = false;
1652 struct talitos_private *priv = dev_get_drvdata(dev);
1653 bool is_sec1 = has_ftr_sec1(priv);
1655 /* first DWORD empty */
1658 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1661 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1663 sg_count = edesc->src_nents ?: 1;
1664 if (is_sec1 && sg_count > 1)
1665 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1668 sg_count = dma_map_sg(dev, areq->src, sg_count,
1669 (areq->src == areq->dst) ?
1670 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1674 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1675 &desc->ptr[3], sg_count, 0, 0);
1680 if (areq->src != areq->dst) {
1681 sg_count = edesc->dst_nents ? : 1;
1682 if (!is_sec1 || sg_count == 1)
1683 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1686 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1687 sg_count, 0, (edesc->src_nents + 1));
1692 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1695 /* last DWORD empty */
1698 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1699 edesc->dma_len, DMA_BIDIRECTIONAL);
1701 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1702 if (ret != -EINPROGRESS) {
1703 common_nonsnoop_unmap(dev, edesc, areq);
1709 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1712 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1713 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1714 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1716 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1717 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1718 areq->base.flags, encrypt);
1721 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1723 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1724 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1725 struct talitos_edesc *edesc;
1727 /* allocate extended descriptor */
1728 edesc = ablkcipher_edesc_alloc(areq, true);
1730 return PTR_ERR(edesc);
1733 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1735 return common_nonsnoop(edesc, areq, ablkcipher_done);
1738 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1740 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1741 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1742 struct talitos_edesc *edesc;
1744 /* allocate extended descriptor */
1745 edesc = ablkcipher_edesc_alloc(areq, false);
1747 return PTR_ERR(edesc);
1749 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1751 return common_nonsnoop(edesc, areq, ablkcipher_done);
1754 static void common_nonsnoop_hash_unmap(struct device *dev,
1755 struct talitos_edesc *edesc,
1756 struct ahash_request *areq)
1758 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1759 struct talitos_private *priv = dev_get_drvdata(dev);
1760 bool is_sec1 = has_ftr_sec1(priv);
1761 struct talitos_desc *desc = &edesc->desc;
1762 struct talitos_desc *desc2 = desc + 1;
1764 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1765 if (desc->next_desc &&
1766 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1767 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1769 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1771 /* When using hashctx-in, must unmap it. */
1772 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1773 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1775 else if (desc->next_desc)
1776 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1779 if (is_sec1 && req_ctx->nbuf)
1780 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1784 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1787 if (edesc->desc.next_desc)
1788 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1789 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1792 static void ahash_done(struct device *dev,
1793 struct talitos_desc *desc, void *context,
1796 struct ahash_request *areq = context;
1797 struct talitos_edesc *edesc =
1798 container_of(desc, struct talitos_edesc, desc);
1799 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1801 if (!req_ctx->last && req_ctx->to_hash_later) {
1802 /* Position any partial block for next update/final/finup */
1803 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1804 req_ctx->nbuf = req_ctx->to_hash_later;
1806 common_nonsnoop_hash_unmap(dev, edesc, areq);
1810 areq->base.complete(&areq->base, err);
1814 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1815 * ourself and submit a padded block
1817 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1818 struct talitos_edesc *edesc,
1819 struct talitos_ptr *ptr)
1821 static u8 padded_hash[64] = {
1822 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1823 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1824 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1825 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1828 pr_err_once("Bug in SEC1, padding ourself\n");
1829 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1830 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1831 (char *)padded_hash, DMA_TO_DEVICE);
1834 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1835 struct ahash_request *areq, unsigned int length,
1836 unsigned int offset,
1837 void (*callback) (struct device *dev,
1838 struct talitos_desc *desc,
1839 void *context, int error))
1841 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1842 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1843 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1844 struct device *dev = ctx->dev;
1845 struct talitos_desc *desc = &edesc->desc;
1847 bool sync_needed = false;
1848 struct talitos_private *priv = dev_get_drvdata(dev);
1849 bool is_sec1 = has_ftr_sec1(priv);
1852 /* first DWORD empty */
1854 /* hash context in */
1855 if (!req_ctx->first || req_ctx->swinit) {
1856 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1857 req_ctx->hw_context_size,
1858 req_ctx->hw_context,
1860 req_ctx->swinit = 0;
1862 /* Indicate next op is not the first. */
1867 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1870 if (is_sec1 && req_ctx->nbuf)
1871 length -= req_ctx->nbuf;
1873 sg_count = edesc->src_nents ?: 1;
1874 if (is_sec1 && sg_count > 1)
1875 sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1876 edesc->buf + sizeof(struct talitos_desc),
1877 length, req_ctx->nbuf);
1879 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1884 if (is_sec1 && req_ctx->nbuf) {
1885 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1886 req_ctx->buf[req_ctx->buf_idx],
1889 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1890 &desc->ptr[3], sg_count, offset, 0);
1895 /* fifth DWORD empty */
1897 /* hash/HMAC out -or- hash context out */
1899 map_single_talitos_ptr(dev, &desc->ptr[5],
1900 crypto_ahash_digestsize(tfm),
1901 areq->result, DMA_FROM_DEVICE);
1903 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1904 req_ctx->hw_context_size,
1905 req_ctx->hw_context,
1908 /* last DWORD empty */
1910 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1911 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1913 if (is_sec1 && req_ctx->nbuf && length) {
1914 struct talitos_desc *desc2 = desc + 1;
1915 dma_addr_t next_desc;
1917 memset(desc2, 0, sizeof(*desc2));
1918 desc2->hdr = desc->hdr;
1919 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1920 desc2->hdr1 = desc2->hdr;
1921 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1922 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1923 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1925 if (desc->ptr[1].ptr)
1926 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1929 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1930 req_ctx->hw_context_size,
1931 req_ctx->hw_context,
1933 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1934 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1935 &desc2->ptr[3], sg_count, offset, 0);
1938 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1940 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1941 req_ctx->hw_context_size,
1942 req_ctx->hw_context,
1945 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1947 desc->next_desc = cpu_to_be32(next_desc);
1951 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1952 edesc->dma_len, DMA_BIDIRECTIONAL);
1954 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1955 if (ret != -EINPROGRESS) {
1956 common_nonsnoop_hash_unmap(dev, edesc, areq);
1962 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1963 unsigned int nbytes)
1965 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1966 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1967 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1968 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1969 bool is_sec1 = has_ftr_sec1(priv);
1972 nbytes -= req_ctx->nbuf;
1974 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1975 nbytes, 0, 0, 0, areq->base.flags, false);
1978 static int ahash_init(struct ahash_request *areq)
1980 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1981 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1982 struct device *dev = ctx->dev;
1983 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1987 /* Initialize the context */
1988 req_ctx->buf_idx = 0;
1990 req_ctx->first = 1; /* first indicates h/w must init its context */
1991 req_ctx->swinit = 0; /* assume h/w init of context */
1992 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1993 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1994 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1995 req_ctx->hw_context_size = size;
1997 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1999 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2005 * on h/w without explicit sha224 support, we initialize h/w context
2006 * manually with sha224 constants, and tell it to run sha256.
2008 static int ahash_init_sha224_swinit(struct ahash_request *areq)
2010 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2012 req_ctx->hw_context[0] = SHA224_H0;
2013 req_ctx->hw_context[1] = SHA224_H1;
2014 req_ctx->hw_context[2] = SHA224_H2;
2015 req_ctx->hw_context[3] = SHA224_H3;
2016 req_ctx->hw_context[4] = SHA224_H4;
2017 req_ctx->hw_context[5] = SHA224_H5;
2018 req_ctx->hw_context[6] = SHA224_H6;
2019 req_ctx->hw_context[7] = SHA224_H7;
2021 /* init 64-bit count */
2022 req_ctx->hw_context[8] = 0;
2023 req_ctx->hw_context[9] = 0;
2026 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
2031 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2033 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2034 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2035 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2036 struct talitos_edesc *edesc;
2037 unsigned int blocksize =
2038 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2039 unsigned int nbytes_to_hash;
2040 unsigned int to_hash_later;
2043 struct device *dev = ctx->dev;
2044 struct talitos_private *priv = dev_get_drvdata(dev);
2045 bool is_sec1 = has_ftr_sec1(priv);
2047 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2049 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2050 /* Buffer up to one whole block */
2051 nents = sg_nents_for_len(areq->src, nbytes);
2053 dev_err(ctx->dev, "Invalid number of src SG.\n");
2056 sg_copy_to_buffer(areq->src, nents,
2057 ctx_buf + req_ctx->nbuf, nbytes);
2058 req_ctx->nbuf += nbytes;
2062 /* At least (blocksize + 1) bytes are available to hash */
2063 nbytes_to_hash = nbytes + req_ctx->nbuf;
2064 to_hash_later = nbytes_to_hash & (blocksize - 1);
2068 else if (to_hash_later)
2069 /* There is a partial block. Hash the full block(s) now */
2070 nbytes_to_hash -= to_hash_later;
2072 /* Keep one block buffered */
2073 nbytes_to_hash -= blocksize;
2074 to_hash_later = blocksize;
2077 /* Chain in any previously buffered data */
2078 if (!is_sec1 && req_ctx->nbuf) {
2079 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2080 sg_init_table(req_ctx->bufsl, nsg);
2081 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2083 sg_chain(req_ctx->bufsl, 2, areq->src);
2084 req_ctx->psrc = req_ctx->bufsl;
2085 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2086 if (nbytes_to_hash > blocksize)
2087 offset = blocksize - req_ctx->nbuf;
2089 offset = nbytes_to_hash - req_ctx->nbuf;
2090 nents = sg_nents_for_len(areq->src, offset);
2092 dev_err(ctx->dev, "Invalid number of src SG.\n");
2095 sg_copy_to_buffer(areq->src, nents,
2096 ctx_buf + req_ctx->nbuf, offset);
2097 req_ctx->nbuf += offset;
2098 req_ctx->psrc = areq->src;
2100 req_ctx->psrc = areq->src;
2102 if (to_hash_later) {
2103 nents = sg_nents_for_len(areq->src, nbytes);
2105 dev_err(ctx->dev, "Invalid number of src SG.\n");
2108 sg_pcopy_to_buffer(areq->src, nents,
2109 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2111 nbytes - to_hash_later);
2113 req_ctx->to_hash_later = to_hash_later;
2115 /* Allocate extended descriptor */
2116 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2118 return PTR_ERR(edesc);
2120 edesc->desc.hdr = ctx->desc_hdr_template;
2122 /* On last one, request SEC to pad; otherwise continue */
2124 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2126 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2128 /* request SEC to INIT hash. */
2129 if (req_ctx->first && !req_ctx->swinit)
2130 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2132 /* When the tfm context has a keylen, it's an HMAC.
2133 * A first or last (ie. not middle) descriptor must request HMAC.
2135 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2136 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2138 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2142 static int ahash_update(struct ahash_request *areq)
2144 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2148 return ahash_process_req(areq, areq->nbytes);
2151 static int ahash_final(struct ahash_request *areq)
2153 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2157 return ahash_process_req(areq, 0);
2160 static int ahash_finup(struct ahash_request *areq)
2162 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2166 return ahash_process_req(areq, areq->nbytes);
2169 static int ahash_digest(struct ahash_request *areq)
2171 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2172 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2177 return ahash_process_req(areq, areq->nbytes);
2180 static int ahash_export(struct ahash_request *areq, void *out)
2182 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2183 struct talitos_export_state *export = out;
2184 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2185 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2186 struct device *dev = ctx->dev;
2189 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2191 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2193 memcpy(export->hw_context, req_ctx->hw_context,
2194 req_ctx->hw_context_size);
2195 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2196 export->swinit = req_ctx->swinit;
2197 export->first = req_ctx->first;
2198 export->last = req_ctx->last;
2199 export->to_hash_later = req_ctx->to_hash_later;
2200 export->nbuf = req_ctx->nbuf;
2205 static int ahash_import(struct ahash_request *areq, const void *in)
2207 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2208 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2209 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2210 struct device *dev = ctx->dev;
2211 const struct talitos_export_state *export = in;
2215 memset(req_ctx, 0, sizeof(*req_ctx));
2216 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2217 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2218 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2219 req_ctx->hw_context_size = size;
2220 memcpy(req_ctx->hw_context, export->hw_context, size);
2221 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2222 req_ctx->swinit = export->swinit;
2223 req_ctx->first = export->first;
2224 req_ctx->last = export->last;
2225 req_ctx->to_hash_later = export->to_hash_later;
2226 req_ctx->nbuf = export->nbuf;
2228 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2230 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2235 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2238 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2240 struct scatterlist sg[1];
2241 struct ahash_request *req;
2242 struct crypto_wait wait;
2245 crypto_init_wait(&wait);
2247 req = ahash_request_alloc(tfm, GFP_KERNEL);
2251 /* Keep tfm keylen == 0 during hash of the long key */
2253 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2254 crypto_req_done, &wait);
2256 sg_init_one(&sg[0], key, keylen);
2258 ahash_request_set_crypt(req, sg, hash, keylen);
2259 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2261 ahash_request_free(req);
2266 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2267 unsigned int keylen)
2269 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2270 struct device *dev = ctx->dev;
2271 unsigned int blocksize =
2272 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2273 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2274 unsigned int keysize = keylen;
2275 u8 hash[SHA512_DIGEST_SIZE];
2278 if (keylen <= blocksize)
2279 memcpy(ctx->key, key, keysize);
2281 /* Must get the hash of the long key */
2282 ret = keyhash(tfm, key, keylen, hash);
2285 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2289 keysize = digestsize;
2290 memcpy(ctx->key, hash, digestsize);
2294 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2296 ctx->keylen = keysize;
2297 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2303 struct talitos_alg_template {
2307 struct crypto_alg crypto;
2308 struct ahash_alg hash;
2309 struct aead_alg aead;
2311 __be32 desc_hdr_template;
2314 static struct talitos_alg_template driver_algs[] = {
2315 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2316 { .type = CRYPTO_ALG_TYPE_AEAD,
2319 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2320 .cra_driver_name = "authenc-hmac-sha1-"
2322 .cra_blocksize = AES_BLOCK_SIZE,
2323 .cra_flags = CRYPTO_ALG_ASYNC,
2325 .ivsize = AES_BLOCK_SIZE,
2326 .maxauthsize = SHA1_DIGEST_SIZE,
2328 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2329 DESC_HDR_SEL0_AESU |
2330 DESC_HDR_MODE0_AESU_CBC |
2331 DESC_HDR_SEL1_MDEUA |
2332 DESC_HDR_MODE1_MDEU_INIT |
2333 DESC_HDR_MODE1_MDEU_PAD |
2334 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2336 { .type = CRYPTO_ALG_TYPE_AEAD,
2337 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2340 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2341 .cra_driver_name = "authenc-hmac-sha1-"
2343 .cra_blocksize = AES_BLOCK_SIZE,
2344 .cra_flags = CRYPTO_ALG_ASYNC,
2346 .ivsize = AES_BLOCK_SIZE,
2347 .maxauthsize = SHA1_DIGEST_SIZE,
2349 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2350 DESC_HDR_SEL0_AESU |
2351 DESC_HDR_MODE0_AESU_CBC |
2352 DESC_HDR_SEL1_MDEUA |
2353 DESC_HDR_MODE1_MDEU_INIT |
2354 DESC_HDR_MODE1_MDEU_PAD |
2355 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2357 { .type = CRYPTO_ALG_TYPE_AEAD,
2360 .cra_name = "authenc(hmac(sha1),"
2362 .cra_driver_name = "authenc-hmac-sha1-"
2364 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2365 .cra_flags = CRYPTO_ALG_ASYNC,
2367 .ivsize = DES3_EDE_BLOCK_SIZE,
2368 .maxauthsize = SHA1_DIGEST_SIZE,
2369 .setkey = aead_des3_setkey,
2371 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2373 DESC_HDR_MODE0_DEU_CBC |
2374 DESC_HDR_MODE0_DEU_3DES |
2375 DESC_HDR_SEL1_MDEUA |
2376 DESC_HDR_MODE1_MDEU_INIT |
2377 DESC_HDR_MODE1_MDEU_PAD |
2378 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2380 { .type = CRYPTO_ALG_TYPE_AEAD,
2381 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2384 .cra_name = "authenc(hmac(sha1),"
2386 .cra_driver_name = "authenc-hmac-sha1-"
2388 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2389 .cra_flags = CRYPTO_ALG_ASYNC,
2391 .ivsize = DES3_EDE_BLOCK_SIZE,
2392 .maxauthsize = SHA1_DIGEST_SIZE,
2393 .setkey = aead_des3_setkey,
2395 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2397 DESC_HDR_MODE0_DEU_CBC |
2398 DESC_HDR_MODE0_DEU_3DES |
2399 DESC_HDR_SEL1_MDEUA |
2400 DESC_HDR_MODE1_MDEU_INIT |
2401 DESC_HDR_MODE1_MDEU_PAD |
2402 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2404 { .type = CRYPTO_ALG_TYPE_AEAD,
2407 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2408 .cra_driver_name = "authenc-hmac-sha224-"
2410 .cra_blocksize = AES_BLOCK_SIZE,
2411 .cra_flags = CRYPTO_ALG_ASYNC,
2413 .ivsize = AES_BLOCK_SIZE,
2414 .maxauthsize = SHA224_DIGEST_SIZE,
2416 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2417 DESC_HDR_SEL0_AESU |
2418 DESC_HDR_MODE0_AESU_CBC |
2419 DESC_HDR_SEL1_MDEUA |
2420 DESC_HDR_MODE1_MDEU_INIT |
2421 DESC_HDR_MODE1_MDEU_PAD |
2422 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2424 { .type = CRYPTO_ALG_TYPE_AEAD,
2425 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2428 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2429 .cra_driver_name = "authenc-hmac-sha224-"
2431 .cra_blocksize = AES_BLOCK_SIZE,
2432 .cra_flags = CRYPTO_ALG_ASYNC,
2434 .ivsize = AES_BLOCK_SIZE,
2435 .maxauthsize = SHA224_DIGEST_SIZE,
2437 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2438 DESC_HDR_SEL0_AESU |
2439 DESC_HDR_MODE0_AESU_CBC |
2440 DESC_HDR_SEL1_MDEUA |
2441 DESC_HDR_MODE1_MDEU_INIT |
2442 DESC_HDR_MODE1_MDEU_PAD |
2443 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2445 { .type = CRYPTO_ALG_TYPE_AEAD,
2448 .cra_name = "authenc(hmac(sha224),"
2450 .cra_driver_name = "authenc-hmac-sha224-"
2452 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2453 .cra_flags = CRYPTO_ALG_ASYNC,
2455 .ivsize = DES3_EDE_BLOCK_SIZE,
2456 .maxauthsize = SHA224_DIGEST_SIZE,
2457 .setkey = aead_des3_setkey,
2459 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2461 DESC_HDR_MODE0_DEU_CBC |
2462 DESC_HDR_MODE0_DEU_3DES |
2463 DESC_HDR_SEL1_MDEUA |
2464 DESC_HDR_MODE1_MDEU_INIT |
2465 DESC_HDR_MODE1_MDEU_PAD |
2466 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2468 { .type = CRYPTO_ALG_TYPE_AEAD,
2469 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2472 .cra_name = "authenc(hmac(sha224),"
2474 .cra_driver_name = "authenc-hmac-sha224-"
2476 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2477 .cra_flags = CRYPTO_ALG_ASYNC,
2479 .ivsize = DES3_EDE_BLOCK_SIZE,
2480 .maxauthsize = SHA224_DIGEST_SIZE,
2481 .setkey = aead_des3_setkey,
2483 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2485 DESC_HDR_MODE0_DEU_CBC |
2486 DESC_HDR_MODE0_DEU_3DES |
2487 DESC_HDR_SEL1_MDEUA |
2488 DESC_HDR_MODE1_MDEU_INIT |
2489 DESC_HDR_MODE1_MDEU_PAD |
2490 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2492 { .type = CRYPTO_ALG_TYPE_AEAD,
2495 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2496 .cra_driver_name = "authenc-hmac-sha256-"
2498 .cra_blocksize = AES_BLOCK_SIZE,
2499 .cra_flags = CRYPTO_ALG_ASYNC,
2501 .ivsize = AES_BLOCK_SIZE,
2502 .maxauthsize = SHA256_DIGEST_SIZE,
2504 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2505 DESC_HDR_SEL0_AESU |
2506 DESC_HDR_MODE0_AESU_CBC |
2507 DESC_HDR_SEL1_MDEUA |
2508 DESC_HDR_MODE1_MDEU_INIT |
2509 DESC_HDR_MODE1_MDEU_PAD |
2510 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2512 { .type = CRYPTO_ALG_TYPE_AEAD,
2513 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2516 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2517 .cra_driver_name = "authenc-hmac-sha256-"
2519 .cra_blocksize = AES_BLOCK_SIZE,
2520 .cra_flags = CRYPTO_ALG_ASYNC,
2522 .ivsize = AES_BLOCK_SIZE,
2523 .maxauthsize = SHA256_DIGEST_SIZE,
2525 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2526 DESC_HDR_SEL0_AESU |
2527 DESC_HDR_MODE0_AESU_CBC |
2528 DESC_HDR_SEL1_MDEUA |
2529 DESC_HDR_MODE1_MDEU_INIT |
2530 DESC_HDR_MODE1_MDEU_PAD |
2531 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2533 { .type = CRYPTO_ALG_TYPE_AEAD,
2536 .cra_name = "authenc(hmac(sha256),"
2538 .cra_driver_name = "authenc-hmac-sha256-"
2540 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2541 .cra_flags = CRYPTO_ALG_ASYNC,
2543 .ivsize = DES3_EDE_BLOCK_SIZE,
2544 .maxauthsize = SHA256_DIGEST_SIZE,
2545 .setkey = aead_des3_setkey,
2547 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2549 DESC_HDR_MODE0_DEU_CBC |
2550 DESC_HDR_MODE0_DEU_3DES |
2551 DESC_HDR_SEL1_MDEUA |
2552 DESC_HDR_MODE1_MDEU_INIT |
2553 DESC_HDR_MODE1_MDEU_PAD |
2554 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2556 { .type = CRYPTO_ALG_TYPE_AEAD,
2557 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2560 .cra_name = "authenc(hmac(sha256),"
2562 .cra_driver_name = "authenc-hmac-sha256-"
2564 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2565 .cra_flags = CRYPTO_ALG_ASYNC,
2567 .ivsize = DES3_EDE_BLOCK_SIZE,
2568 .maxauthsize = SHA256_DIGEST_SIZE,
2569 .setkey = aead_des3_setkey,
2571 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2573 DESC_HDR_MODE0_DEU_CBC |
2574 DESC_HDR_MODE0_DEU_3DES |
2575 DESC_HDR_SEL1_MDEUA |
2576 DESC_HDR_MODE1_MDEU_INIT |
2577 DESC_HDR_MODE1_MDEU_PAD |
2578 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2580 { .type = CRYPTO_ALG_TYPE_AEAD,
2583 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2584 .cra_driver_name = "authenc-hmac-sha384-"
2586 .cra_blocksize = AES_BLOCK_SIZE,
2587 .cra_flags = CRYPTO_ALG_ASYNC,
2589 .ivsize = AES_BLOCK_SIZE,
2590 .maxauthsize = SHA384_DIGEST_SIZE,
2592 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2593 DESC_HDR_SEL0_AESU |
2594 DESC_HDR_MODE0_AESU_CBC |
2595 DESC_HDR_SEL1_MDEUB |
2596 DESC_HDR_MODE1_MDEU_INIT |
2597 DESC_HDR_MODE1_MDEU_PAD |
2598 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2600 { .type = CRYPTO_ALG_TYPE_AEAD,
2603 .cra_name = "authenc(hmac(sha384),"
2605 .cra_driver_name = "authenc-hmac-sha384-"
2607 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2608 .cra_flags = CRYPTO_ALG_ASYNC,
2610 .ivsize = DES3_EDE_BLOCK_SIZE,
2611 .maxauthsize = SHA384_DIGEST_SIZE,
2612 .setkey = aead_des3_setkey,
2614 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2616 DESC_HDR_MODE0_DEU_CBC |
2617 DESC_HDR_MODE0_DEU_3DES |
2618 DESC_HDR_SEL1_MDEUB |
2619 DESC_HDR_MODE1_MDEU_INIT |
2620 DESC_HDR_MODE1_MDEU_PAD |
2621 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2623 { .type = CRYPTO_ALG_TYPE_AEAD,
2626 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2627 .cra_driver_name = "authenc-hmac-sha512-"
2629 .cra_blocksize = AES_BLOCK_SIZE,
2630 .cra_flags = CRYPTO_ALG_ASYNC,
2632 .ivsize = AES_BLOCK_SIZE,
2633 .maxauthsize = SHA512_DIGEST_SIZE,
2635 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2636 DESC_HDR_SEL0_AESU |
2637 DESC_HDR_MODE0_AESU_CBC |
2638 DESC_HDR_SEL1_MDEUB |
2639 DESC_HDR_MODE1_MDEU_INIT |
2640 DESC_HDR_MODE1_MDEU_PAD |
2641 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2643 { .type = CRYPTO_ALG_TYPE_AEAD,
2646 .cra_name = "authenc(hmac(sha512),"
2648 .cra_driver_name = "authenc-hmac-sha512-"
2650 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2651 .cra_flags = CRYPTO_ALG_ASYNC,
2653 .ivsize = DES3_EDE_BLOCK_SIZE,
2654 .maxauthsize = SHA512_DIGEST_SIZE,
2655 .setkey = aead_des3_setkey,
2657 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2659 DESC_HDR_MODE0_DEU_CBC |
2660 DESC_HDR_MODE0_DEU_3DES |
2661 DESC_HDR_SEL1_MDEUB |
2662 DESC_HDR_MODE1_MDEU_INIT |
2663 DESC_HDR_MODE1_MDEU_PAD |
2664 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2666 { .type = CRYPTO_ALG_TYPE_AEAD,
2669 .cra_name = "authenc(hmac(md5),cbc(aes))",
2670 .cra_driver_name = "authenc-hmac-md5-"
2672 .cra_blocksize = AES_BLOCK_SIZE,
2673 .cra_flags = CRYPTO_ALG_ASYNC,
2675 .ivsize = AES_BLOCK_SIZE,
2676 .maxauthsize = MD5_DIGEST_SIZE,
2678 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2679 DESC_HDR_SEL0_AESU |
2680 DESC_HDR_MODE0_AESU_CBC |
2681 DESC_HDR_SEL1_MDEUA |
2682 DESC_HDR_MODE1_MDEU_INIT |
2683 DESC_HDR_MODE1_MDEU_PAD |
2684 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2686 { .type = CRYPTO_ALG_TYPE_AEAD,
2687 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2690 .cra_name = "authenc(hmac(md5),cbc(aes))",
2691 .cra_driver_name = "authenc-hmac-md5-"
2693 .cra_blocksize = AES_BLOCK_SIZE,
2694 .cra_flags = CRYPTO_ALG_ASYNC,
2696 .ivsize = AES_BLOCK_SIZE,
2697 .maxauthsize = MD5_DIGEST_SIZE,
2699 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2700 DESC_HDR_SEL0_AESU |
2701 DESC_HDR_MODE0_AESU_CBC |
2702 DESC_HDR_SEL1_MDEUA |
2703 DESC_HDR_MODE1_MDEU_INIT |
2704 DESC_HDR_MODE1_MDEU_PAD |
2705 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2707 { .type = CRYPTO_ALG_TYPE_AEAD,
2710 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2711 .cra_driver_name = "authenc-hmac-md5-"
2713 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2714 .cra_flags = CRYPTO_ALG_ASYNC,
2716 .ivsize = DES3_EDE_BLOCK_SIZE,
2717 .maxauthsize = MD5_DIGEST_SIZE,
2718 .setkey = aead_des3_setkey,
2720 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2722 DESC_HDR_MODE0_DEU_CBC |
2723 DESC_HDR_MODE0_DEU_3DES |
2724 DESC_HDR_SEL1_MDEUA |
2725 DESC_HDR_MODE1_MDEU_INIT |
2726 DESC_HDR_MODE1_MDEU_PAD |
2727 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2729 { .type = CRYPTO_ALG_TYPE_AEAD,
2730 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2733 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2734 .cra_driver_name = "authenc-hmac-md5-"
2736 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2737 .cra_flags = CRYPTO_ALG_ASYNC,
2739 .ivsize = DES3_EDE_BLOCK_SIZE,
2740 .maxauthsize = MD5_DIGEST_SIZE,
2741 .setkey = aead_des3_setkey,
2743 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2745 DESC_HDR_MODE0_DEU_CBC |
2746 DESC_HDR_MODE0_DEU_3DES |
2747 DESC_HDR_SEL1_MDEUA |
2748 DESC_HDR_MODE1_MDEU_INIT |
2749 DESC_HDR_MODE1_MDEU_PAD |
2750 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2752 /* ABLKCIPHER algorithms. */
2753 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2755 .cra_name = "ecb(aes)",
2756 .cra_driver_name = "ecb-aes-talitos",
2757 .cra_blocksize = AES_BLOCK_SIZE,
2758 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2761 .min_keysize = AES_MIN_KEY_SIZE,
2762 .max_keysize = AES_MAX_KEY_SIZE,
2763 .ivsize = AES_BLOCK_SIZE,
2766 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2769 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2771 .cra_name = "cbc(aes)",
2772 .cra_driver_name = "cbc-aes-talitos",
2773 .cra_blocksize = AES_BLOCK_SIZE,
2774 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2777 .min_keysize = AES_MIN_KEY_SIZE,
2778 .max_keysize = AES_MAX_KEY_SIZE,
2779 .ivsize = AES_BLOCK_SIZE,
2782 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2783 DESC_HDR_SEL0_AESU |
2784 DESC_HDR_MODE0_AESU_CBC,
2786 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2788 .cra_name = "ctr(aes)",
2789 .cra_driver_name = "ctr-aes-talitos",
2790 .cra_blocksize = AES_BLOCK_SIZE,
2791 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2794 .min_keysize = AES_MIN_KEY_SIZE,
2795 .max_keysize = AES_MAX_KEY_SIZE,
2796 .ivsize = AES_BLOCK_SIZE,
2799 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2800 DESC_HDR_SEL0_AESU |
2801 DESC_HDR_MODE0_AESU_CTR,
2803 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2805 .cra_name = "ecb(des)",
2806 .cra_driver_name = "ecb-des-talitos",
2807 .cra_blocksize = DES_BLOCK_SIZE,
2808 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2811 .min_keysize = DES_KEY_SIZE,
2812 .max_keysize = DES_KEY_SIZE,
2813 .ivsize = DES_BLOCK_SIZE,
2814 .setkey = ablkcipher_des_setkey,
2817 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2820 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2822 .cra_name = "cbc(des)",
2823 .cra_driver_name = "cbc-des-talitos",
2824 .cra_blocksize = DES_BLOCK_SIZE,
2825 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2828 .min_keysize = DES_KEY_SIZE,
2829 .max_keysize = DES_KEY_SIZE,
2830 .ivsize = DES_BLOCK_SIZE,
2831 .setkey = ablkcipher_des_setkey,
2834 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2836 DESC_HDR_MODE0_DEU_CBC,
2838 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2840 .cra_name = "ecb(des3_ede)",
2841 .cra_driver_name = "ecb-3des-talitos",
2842 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2843 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2846 .min_keysize = DES3_EDE_KEY_SIZE,
2847 .max_keysize = DES3_EDE_KEY_SIZE,
2848 .ivsize = DES3_EDE_BLOCK_SIZE,
2849 .setkey = ablkcipher_des3_setkey,
2852 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2854 DESC_HDR_MODE0_DEU_3DES,
2856 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2858 .cra_name = "cbc(des3_ede)",
2859 .cra_driver_name = "cbc-3des-talitos",
2860 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2861 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2864 .min_keysize = DES3_EDE_KEY_SIZE,
2865 .max_keysize = DES3_EDE_KEY_SIZE,
2866 .ivsize = DES3_EDE_BLOCK_SIZE,
2867 .setkey = ablkcipher_des3_setkey,
2870 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2872 DESC_HDR_MODE0_DEU_CBC |
2873 DESC_HDR_MODE0_DEU_3DES,
2875 /* AHASH algorithms. */
2876 { .type = CRYPTO_ALG_TYPE_AHASH,
2878 .halg.digestsize = MD5_DIGEST_SIZE,
2879 .halg.statesize = sizeof(struct talitos_export_state),
2882 .cra_driver_name = "md5-talitos",
2883 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2884 .cra_flags = CRYPTO_ALG_ASYNC,
2887 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2888 DESC_HDR_SEL0_MDEUA |
2889 DESC_HDR_MODE0_MDEU_MD5,
2891 { .type = CRYPTO_ALG_TYPE_AHASH,
2893 .halg.digestsize = SHA1_DIGEST_SIZE,
2894 .halg.statesize = sizeof(struct talitos_export_state),
2897 .cra_driver_name = "sha1-talitos",
2898 .cra_blocksize = SHA1_BLOCK_SIZE,
2899 .cra_flags = CRYPTO_ALG_ASYNC,
2902 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2903 DESC_HDR_SEL0_MDEUA |
2904 DESC_HDR_MODE0_MDEU_SHA1,
2906 { .type = CRYPTO_ALG_TYPE_AHASH,
2908 .halg.digestsize = SHA224_DIGEST_SIZE,
2909 .halg.statesize = sizeof(struct talitos_export_state),
2911 .cra_name = "sha224",
2912 .cra_driver_name = "sha224-talitos",
2913 .cra_blocksize = SHA224_BLOCK_SIZE,
2914 .cra_flags = CRYPTO_ALG_ASYNC,
2917 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2918 DESC_HDR_SEL0_MDEUA |
2919 DESC_HDR_MODE0_MDEU_SHA224,
2921 { .type = CRYPTO_ALG_TYPE_AHASH,
2923 .halg.digestsize = SHA256_DIGEST_SIZE,
2924 .halg.statesize = sizeof(struct talitos_export_state),
2926 .cra_name = "sha256",
2927 .cra_driver_name = "sha256-talitos",
2928 .cra_blocksize = SHA256_BLOCK_SIZE,
2929 .cra_flags = CRYPTO_ALG_ASYNC,
2932 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2933 DESC_HDR_SEL0_MDEUA |
2934 DESC_HDR_MODE0_MDEU_SHA256,
2936 { .type = CRYPTO_ALG_TYPE_AHASH,
2938 .halg.digestsize = SHA384_DIGEST_SIZE,
2939 .halg.statesize = sizeof(struct talitos_export_state),
2941 .cra_name = "sha384",
2942 .cra_driver_name = "sha384-talitos",
2943 .cra_blocksize = SHA384_BLOCK_SIZE,
2944 .cra_flags = CRYPTO_ALG_ASYNC,
2947 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2948 DESC_HDR_SEL0_MDEUB |
2949 DESC_HDR_MODE0_MDEUB_SHA384,
2951 { .type = CRYPTO_ALG_TYPE_AHASH,
2953 .halg.digestsize = SHA512_DIGEST_SIZE,
2954 .halg.statesize = sizeof(struct talitos_export_state),
2956 .cra_name = "sha512",
2957 .cra_driver_name = "sha512-talitos",
2958 .cra_blocksize = SHA512_BLOCK_SIZE,
2959 .cra_flags = CRYPTO_ALG_ASYNC,
2962 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2963 DESC_HDR_SEL0_MDEUB |
2964 DESC_HDR_MODE0_MDEUB_SHA512,
2966 { .type = CRYPTO_ALG_TYPE_AHASH,
2968 .halg.digestsize = MD5_DIGEST_SIZE,
2969 .halg.statesize = sizeof(struct talitos_export_state),
2971 .cra_name = "hmac(md5)",
2972 .cra_driver_name = "hmac-md5-talitos",
2973 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2974 .cra_flags = CRYPTO_ALG_ASYNC,
2977 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2978 DESC_HDR_SEL0_MDEUA |
2979 DESC_HDR_MODE0_MDEU_MD5,
2981 { .type = CRYPTO_ALG_TYPE_AHASH,
2983 .halg.digestsize = SHA1_DIGEST_SIZE,
2984 .halg.statesize = sizeof(struct talitos_export_state),
2986 .cra_name = "hmac(sha1)",
2987 .cra_driver_name = "hmac-sha1-talitos",
2988 .cra_blocksize = SHA1_BLOCK_SIZE,
2989 .cra_flags = CRYPTO_ALG_ASYNC,
2992 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2993 DESC_HDR_SEL0_MDEUA |
2994 DESC_HDR_MODE0_MDEU_SHA1,
2996 { .type = CRYPTO_ALG_TYPE_AHASH,
2998 .halg.digestsize = SHA224_DIGEST_SIZE,
2999 .halg.statesize = sizeof(struct talitos_export_state),
3001 .cra_name = "hmac(sha224)",
3002 .cra_driver_name = "hmac-sha224-talitos",
3003 .cra_blocksize = SHA224_BLOCK_SIZE,
3004 .cra_flags = CRYPTO_ALG_ASYNC,
3007 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3008 DESC_HDR_SEL0_MDEUA |
3009 DESC_HDR_MODE0_MDEU_SHA224,
3011 { .type = CRYPTO_ALG_TYPE_AHASH,
3013 .halg.digestsize = SHA256_DIGEST_SIZE,
3014 .halg.statesize = sizeof(struct talitos_export_state),
3016 .cra_name = "hmac(sha256)",
3017 .cra_driver_name = "hmac-sha256-talitos",
3018 .cra_blocksize = SHA256_BLOCK_SIZE,
3019 .cra_flags = CRYPTO_ALG_ASYNC,
3022 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3023 DESC_HDR_SEL0_MDEUA |
3024 DESC_HDR_MODE0_MDEU_SHA256,
3026 { .type = CRYPTO_ALG_TYPE_AHASH,
3028 .halg.digestsize = SHA384_DIGEST_SIZE,
3029 .halg.statesize = sizeof(struct talitos_export_state),
3031 .cra_name = "hmac(sha384)",
3032 .cra_driver_name = "hmac-sha384-talitos",
3033 .cra_blocksize = SHA384_BLOCK_SIZE,
3034 .cra_flags = CRYPTO_ALG_ASYNC,
3037 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3038 DESC_HDR_SEL0_MDEUB |
3039 DESC_HDR_MODE0_MDEUB_SHA384,
3041 { .type = CRYPTO_ALG_TYPE_AHASH,
3043 .halg.digestsize = SHA512_DIGEST_SIZE,
3044 .halg.statesize = sizeof(struct talitos_export_state),
3046 .cra_name = "hmac(sha512)",
3047 .cra_driver_name = "hmac-sha512-talitos",
3048 .cra_blocksize = SHA512_BLOCK_SIZE,
3049 .cra_flags = CRYPTO_ALG_ASYNC,
3052 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3053 DESC_HDR_SEL0_MDEUB |
3054 DESC_HDR_MODE0_MDEUB_SHA512,
3058 struct talitos_crypto_alg {
3059 struct list_head entry;
3061 struct talitos_alg_template algt;
3064 static int talitos_init_common(struct talitos_ctx *ctx,
3065 struct talitos_crypto_alg *talitos_alg)
3067 struct talitos_private *priv;
3069 /* update context with ptr to dev */
3070 ctx->dev = talitos_alg->dev;
3072 /* assign SEC channel to tfm in round-robin fashion */
3073 priv = dev_get_drvdata(ctx->dev);
3074 ctx->ch = atomic_inc_return(&priv->last_chan) &
3075 (priv->num_channels - 1);
3077 /* copy descriptor header template value */
3078 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3080 /* select done notification */
3081 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3086 static int talitos_cra_init(struct crypto_tfm *tfm)
3088 struct crypto_alg *alg = tfm->__crt_alg;
3089 struct talitos_crypto_alg *talitos_alg;
3090 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3092 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3093 talitos_alg = container_of(__crypto_ahash_alg(alg),
3094 struct talitos_crypto_alg,
3097 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3100 return talitos_init_common(ctx, talitos_alg);
3103 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3105 struct aead_alg *alg = crypto_aead_alg(tfm);
3106 struct talitos_crypto_alg *talitos_alg;
3107 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3109 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3112 return talitos_init_common(ctx, talitos_alg);
3115 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3117 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3119 talitos_cra_init(tfm);
3122 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3123 sizeof(struct talitos_ahash_req_ctx));
3128 static void talitos_cra_exit(struct crypto_tfm *tfm)
3130 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3131 struct device *dev = ctx->dev;
3134 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3138 * given the alg's descriptor header template, determine whether descriptor
3139 * type and primary/secondary execution units required match the hw
3140 * capabilities description provided in the device tree node.
3142 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3144 struct talitos_private *priv = dev_get_drvdata(dev);
3147 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3148 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3150 if (SECONDARY_EU(desc_hdr_template))
3151 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3152 & priv->exec_units);
3157 static int talitos_remove(struct platform_device *ofdev)
3159 struct device *dev = &ofdev->dev;
3160 struct talitos_private *priv = dev_get_drvdata(dev);
3161 struct talitos_crypto_alg *t_alg, *n;
3164 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3165 switch (t_alg->algt.type) {
3166 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3168 case CRYPTO_ALG_TYPE_AEAD:
3169 crypto_unregister_aead(&t_alg->algt.alg.aead);
3170 case CRYPTO_ALG_TYPE_AHASH:
3171 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3174 list_del(&t_alg->entry);
3177 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3178 talitos_unregister_rng(dev);
3180 for (i = 0; i < 2; i++)
3182 free_irq(priv->irq[i], dev);
3183 irq_dispose_mapping(priv->irq[i]);
3186 tasklet_kill(&priv->done_task[0]);
3188 tasklet_kill(&priv->done_task[1]);
3193 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3194 struct talitos_alg_template
3197 struct talitos_private *priv = dev_get_drvdata(dev);
3198 struct talitos_crypto_alg *t_alg;
3199 struct crypto_alg *alg;
3201 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3204 return ERR_PTR(-ENOMEM);
3206 t_alg->algt = *template;
3208 switch (t_alg->algt.type) {
3209 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3210 alg = &t_alg->algt.alg.crypto;
3211 alg->cra_init = talitos_cra_init;
3212 alg->cra_exit = talitos_cra_exit;
3213 alg->cra_type = &crypto_ablkcipher_type;
3214 alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3216 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3217 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3219 case CRYPTO_ALG_TYPE_AEAD:
3220 alg = &t_alg->algt.alg.aead.base;
3221 alg->cra_exit = talitos_cra_exit;
3222 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3223 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3225 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3226 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3227 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3228 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3229 devm_kfree(dev, t_alg);
3230 return ERR_PTR(-ENOTSUPP);
3233 case CRYPTO_ALG_TYPE_AHASH:
3234 alg = &t_alg->algt.alg.hash.halg.base;
3235 alg->cra_init = talitos_cra_init_ahash;
3236 alg->cra_exit = talitos_cra_exit;
3237 t_alg->algt.alg.hash.init = ahash_init;
3238 t_alg->algt.alg.hash.update = ahash_update;
3239 t_alg->algt.alg.hash.final = ahash_final;
3240 t_alg->algt.alg.hash.finup = ahash_finup;
3241 t_alg->algt.alg.hash.digest = ahash_digest;
3242 if (!strncmp(alg->cra_name, "hmac", 4))
3243 t_alg->algt.alg.hash.setkey = ahash_setkey;
3244 t_alg->algt.alg.hash.import = ahash_import;
3245 t_alg->algt.alg.hash.export = ahash_export;
3247 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3248 !strncmp(alg->cra_name, "hmac", 4)) {
3249 devm_kfree(dev, t_alg);
3250 return ERR_PTR(-ENOTSUPP);
3252 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3253 (!strcmp(alg->cra_name, "sha224") ||
3254 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3255 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3256 t_alg->algt.desc_hdr_template =
3257 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3258 DESC_HDR_SEL0_MDEUA |
3259 DESC_HDR_MODE0_MDEU_SHA256;
3263 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3264 devm_kfree(dev, t_alg);
3265 return ERR_PTR(-EINVAL);
3268 alg->cra_module = THIS_MODULE;
3269 if (t_alg->algt.priority)
3270 alg->cra_priority = t_alg->algt.priority;
3272 alg->cra_priority = TALITOS_CRA_PRIORITY;
3273 alg->cra_alignmask = 0;
3274 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3275 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3282 static int talitos_probe_irq(struct platform_device *ofdev)
3284 struct device *dev = &ofdev->dev;
3285 struct device_node *np = ofdev->dev.of_node;
3286 struct talitos_private *priv = dev_get_drvdata(dev);
3288 bool is_sec1 = has_ftr_sec1(priv);
3290 priv->irq[0] = irq_of_parse_and_map(np, 0);
3291 if (!priv->irq[0]) {
3292 dev_err(dev, "failed to map irq\n");
3296 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3297 dev_driver_string(dev), dev);
3301 priv->irq[1] = irq_of_parse_and_map(np, 1);
3303 /* get the primary irq line */
3304 if (!priv->irq[1]) {
3305 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3306 dev_driver_string(dev), dev);
3310 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3311 dev_driver_string(dev), dev);
3315 /* get the secondary irq line */
3316 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3317 dev_driver_string(dev), dev);
3319 dev_err(dev, "failed to request secondary irq\n");
3320 irq_dispose_mapping(priv->irq[1]);
3328 dev_err(dev, "failed to request primary irq\n");
3329 irq_dispose_mapping(priv->irq[0]);
3336 static int talitos_probe(struct platform_device *ofdev)
3338 struct device *dev = &ofdev->dev;
3339 struct device_node *np = ofdev->dev.of_node;
3340 struct talitos_private *priv;
3343 struct resource *res;
3345 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3349 INIT_LIST_HEAD(&priv->alg_list);
3351 dev_set_drvdata(dev, priv);
3353 priv->ofdev = ofdev;
3355 spin_lock_init(&priv->reg_lock);
3357 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3360 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3362 dev_err(dev, "failed to of_iomap\n");
3367 /* get SEC version capabilities from device tree */
3368 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3369 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3370 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3371 of_property_read_u32(np, "fsl,descriptor-types-mask",
3374 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3375 !priv->exec_units || !priv->desc_types) {
3376 dev_err(dev, "invalid property data in device tree node\n");
3381 if (of_device_is_compatible(np, "fsl,sec3.0"))
3382 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3384 if (of_device_is_compatible(np, "fsl,sec2.1"))
3385 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3386 TALITOS_FTR_SHA224_HWINIT |
3387 TALITOS_FTR_HMAC_OK;
3389 if (of_device_is_compatible(np, "fsl,sec1.0"))
3390 priv->features |= TALITOS_FTR_SEC1;
3392 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3393 priv->reg_deu = priv->reg + TALITOS12_DEU;
3394 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3395 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3396 stride = TALITOS1_CH_STRIDE;
3397 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3398 priv->reg_deu = priv->reg + TALITOS10_DEU;
3399 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3400 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3401 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3402 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3403 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3404 stride = TALITOS1_CH_STRIDE;
3406 priv->reg_deu = priv->reg + TALITOS2_DEU;
3407 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3408 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3409 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3410 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3411 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3412 priv->reg_keu = priv->reg + TALITOS2_KEU;
3413 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3414 stride = TALITOS2_CH_STRIDE;
3417 err = talitos_probe_irq(ofdev);
3421 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3422 if (priv->num_channels == 1)
3423 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3424 (unsigned long)dev);
3426 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3427 (unsigned long)dev);
3430 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3431 (unsigned long)dev);
3432 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3433 (unsigned long)dev);
3434 } else if (priv->num_channels == 1) {
3435 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3436 (unsigned long)dev);
3438 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3439 (unsigned long)dev);
3443 priv->chan = devm_kcalloc(dev,
3445 sizeof(struct talitos_channel),
3448 dev_err(dev, "failed to allocate channel management space\n");
3453 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3455 for (i = 0; i < priv->num_channels; i++) {
3456 priv->chan[i].reg = priv->reg + stride * (i + 1);
3457 if (!priv->irq[1] || !(i & 1))
3458 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3460 spin_lock_init(&priv->chan[i].head_lock);
3461 spin_lock_init(&priv->chan[i].tail_lock);
3463 priv->chan[i].fifo = devm_kcalloc(dev,
3465 sizeof(struct talitos_request),
3467 if (!priv->chan[i].fifo) {
3468 dev_err(dev, "failed to allocate request fifo %d\n", i);
3473 atomic_set(&priv->chan[i].submit_count,
3474 -(priv->chfifo_len - 1));
3477 dma_set_mask(dev, DMA_BIT_MASK(36));
3479 /* reset and initialize the h/w */
3480 err = init_device(dev);
3482 dev_err(dev, "failed to initialize device\n");
3486 /* register the RNG, if available */
3487 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3488 err = talitos_register_rng(dev);
3490 dev_err(dev, "failed to register hwrng: %d\n", err);
3493 dev_info(dev, "hwrng\n");
3496 /* register crypto algorithms the device supports */
3497 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3498 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3499 struct talitos_crypto_alg *t_alg;
3500 struct crypto_alg *alg = NULL;
3502 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3503 if (IS_ERR(t_alg)) {
3504 err = PTR_ERR(t_alg);
3505 if (err == -ENOTSUPP)
3510 switch (t_alg->algt.type) {
3511 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3512 err = crypto_register_alg(
3513 &t_alg->algt.alg.crypto);
3514 alg = &t_alg->algt.alg.crypto;
3517 case CRYPTO_ALG_TYPE_AEAD:
3518 err = crypto_register_aead(
3519 &t_alg->algt.alg.aead);
3520 alg = &t_alg->algt.alg.aead.base;
3523 case CRYPTO_ALG_TYPE_AHASH:
3524 err = crypto_register_ahash(
3525 &t_alg->algt.alg.hash);
3526 alg = &t_alg->algt.alg.hash.halg.base;
3530 dev_err(dev, "%s alg registration failed\n",
3531 alg->cra_driver_name);
3532 devm_kfree(dev, t_alg);
3534 list_add_tail(&t_alg->entry, &priv->alg_list);
3537 if (!list_empty(&priv->alg_list))
3538 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3539 (char *)of_get_property(np, "compatible", NULL));
3544 talitos_remove(ofdev);
3549 static const struct of_device_id talitos_match[] = {
3550 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3552 .compatible = "fsl,sec1.0",
3555 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3557 .compatible = "fsl,sec2.0",
3562 MODULE_DEVICE_TABLE(of, talitos_match);
3564 static struct platform_driver talitos_driver = {
3567 .of_match_table = talitos_match,
3569 .probe = talitos_probe,
3570 .remove = talitos_remove,
3573 module_platform_driver(talitos_driver);
3575 MODULE_LICENSE("GPL");
3576 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3577 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");