1 // SPDX-License-Identifier: GPL-2.0+
3 * talitos - Freescale Integrated Security Engine (SEC) device driver
5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
7 * Scatterlist Crypto API glue code copied from files with the following:
8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
10 * Crypto algorithm registration code copied from hifn driver:
11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12 * All rights reserved.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/crypto.h>
21 #include <linux/hw_random.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/spinlock.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/slab.h>
31 #include <crypto/algapi.h>
32 #include <crypto/aes.h>
33 #include <crypto/des.h>
34 #include <crypto/sha.h>
35 #include <crypto/md5.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/authenc.h>
38 #include <crypto/skcipher.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41 #include <crypto/scatterwalk.h>
45 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
46 unsigned int len, bool is_sec1)
48 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
50 ptr->len1 = cpu_to_be16(len);
52 ptr->len = cpu_to_be16(len);
53 ptr->eptr = upper_32_bits(dma_addr);
57 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
58 struct talitos_ptr *src_ptr, bool is_sec1)
60 dst_ptr->ptr = src_ptr->ptr;
62 dst_ptr->len1 = src_ptr->len1;
64 dst_ptr->len = src_ptr->len;
65 dst_ptr->eptr = src_ptr->eptr;
69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
73 return be16_to_cpu(ptr->len1);
75 return be16_to_cpu(ptr->len);
78 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
85 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
92 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 static void __map_single_talitos_ptr(struct device *dev,
95 struct talitos_ptr *ptr,
96 unsigned int len, void *data,
97 enum dma_data_direction dir,
100 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
104 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
107 static void map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir)
112 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
115 static void map_single_talitos_ptr_nosync(struct device *dev,
116 struct talitos_ptr *ptr,
117 unsigned int len, void *data,
118 enum dma_data_direction dir)
120 __map_single_talitos_ptr(dev, ptr, len, data, dir,
121 DMA_ATTR_SKIP_CPU_SYNC);
125 * unmap bus single (contiguous) h/w descriptor pointer
127 static void unmap_single_talitos_ptr(struct device *dev,
128 struct talitos_ptr *ptr,
129 enum dma_data_direction dir)
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 from_talitos_ptr_len(ptr, is_sec1), dir);
138 static int reset_channel(struct device *dev, int ch)
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
142 bool is_sec1 = has_ftr_sec1(priv);
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
161 dev_err(dev, "failed to reset channel %d\n", ch);
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168 /* enable chaining descriptors */
170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
173 /* and ICCR writeback, if available */
174 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
175 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
176 TALITOS_CCCR_LO_IWSE);
181 static int reset_device(struct device *dev)
183 struct talitos_private *priv = dev_get_drvdata(dev);
184 unsigned int timeout = TALITOS_TIMEOUT;
185 bool is_sec1 = has_ftr_sec1(priv);
186 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
188 setbits32(priv->reg + TALITOS_MCR, mcr);
190 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
195 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
196 setbits32(priv->reg + TALITOS_MCR, mcr);
200 dev_err(dev, "failed to reset device\n");
208 * Reset and initialize the device
210 static int init_device(struct device *dev)
212 struct talitos_private *priv = dev_get_drvdata(dev);
214 bool is_sec1 = has_ftr_sec1(priv);
218 * errata documentation: warning: certain SEC interrupts
219 * are not fully cleared by writing the MCR:SWR bit,
220 * set bit twice to completely reset
222 err = reset_device(dev);
226 err = reset_device(dev);
231 for (ch = 0; ch < priv->num_channels; ch++) {
232 err = reset_channel(dev, ch);
237 /* enable channel done and error interrupts */
239 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
240 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
241 /* disable parity error check in DEU (erroneous? test vect.) */
242 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
244 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
245 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
248 /* disable integrity check error interrupts (use writeback instead) */
249 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
250 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
251 TALITOS_MDEUICR_LO_ICE);
257 * talitos_submit - submits a descriptor to the device for processing
258 * @dev: the SEC device to be used
259 * @ch: the SEC device channel to be used
260 * @desc: the descriptor to be processed by the device
261 * @callback: whom to call when processing is complete
262 * @context: a handle for use by caller (optional)
264 * desc must contain valid dma-mapped (bus physical) address pointers.
265 * callback must check err and feedback in descriptor header
266 * for device processing status.
268 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269 void (*callback)(struct device *dev,
270 struct talitos_desc *desc,
271 void *context, int error),
274 struct talitos_private *priv = dev_get_drvdata(dev);
275 struct talitos_request *request;
278 bool is_sec1 = has_ftr_sec1(priv);
280 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
282 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
283 /* h/w fifo is full */
284 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
288 head = priv->chan[ch].head;
289 request = &priv->chan[ch].fifo[head];
291 /* map descriptor and save caller data */
293 desc->hdr1 = desc->hdr;
294 request->dma_desc = dma_map_single(dev, &desc->hdr1,
298 request->dma_desc = dma_map_single(dev, desc,
302 request->callback = callback;
303 request->context = context;
305 /* increment fifo head */
306 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
309 request->desc = desc;
313 out_be32(priv->chan[ch].reg + TALITOS_FF,
314 upper_32_bits(request->dma_desc));
315 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
316 lower_32_bits(request->dma_desc));
318 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
324 * process what was done, notify callback of error if not
326 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
328 struct talitos_private *priv = dev_get_drvdata(dev);
329 struct talitos_request *request, saved_req;
332 bool is_sec1 = has_ftr_sec1(priv);
334 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
336 tail = priv->chan[ch].tail;
337 while (priv->chan[ch].fifo[tail].desc) {
340 request = &priv->chan[ch].fifo[tail];
342 /* descriptors with their done bits set don't get the error */
345 hdr = request->desc->hdr;
346 else if (request->desc->next_desc)
347 hdr = (request->desc + 1)->hdr1;
349 hdr = request->desc->hdr1;
351 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
359 dma_unmap_single(dev, request->dma_desc,
363 /* copy entries so we can call callback outside lock */
364 saved_req.desc = request->desc;
365 saved_req.callback = request->callback;
366 saved_req.context = request->context;
368 /* release request entry in fifo */
370 request->desc = NULL;
372 /* increment fifo tail */
373 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
375 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
377 atomic_dec(&priv->chan[ch].submit_count);
379 saved_req.callback(dev, saved_req.desc, saved_req.context,
381 /* channel may resume processing in single desc error case */
382 if (error && !reset_ch && status == error)
384 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
385 tail = priv->chan[ch].tail;
388 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
392 * process completed requests for channels that have done status
394 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
395 static void talitos1_done_##name(unsigned long data) \
397 struct device *dev = (struct device *)data; \
398 struct talitos_private *priv = dev_get_drvdata(dev); \
399 unsigned long flags; \
401 if (ch_done_mask & 0x10000000) \
402 flush_channel(dev, 0, 0, 0); \
403 if (ch_done_mask & 0x40000000) \
404 flush_channel(dev, 1, 0, 0); \
405 if (ch_done_mask & 0x00010000) \
406 flush_channel(dev, 2, 0, 0); \
407 if (ch_done_mask & 0x00040000) \
408 flush_channel(dev, 3, 0, 0); \
410 /* At this point, all completed channels have been processed */ \
411 /* Unmask done interrupts for channels completed later on. */ \
412 spin_lock_irqsave(&priv->reg_lock, flags); \
413 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
414 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
415 spin_unlock_irqrestore(&priv->reg_lock, flags); \
418 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
419 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
421 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
422 static void talitos2_done_##name(unsigned long data) \
424 struct device *dev = (struct device *)data; \
425 struct talitos_private *priv = dev_get_drvdata(dev); \
426 unsigned long flags; \
428 if (ch_done_mask & 1) \
429 flush_channel(dev, 0, 0, 0); \
430 if (ch_done_mask & (1 << 2)) \
431 flush_channel(dev, 1, 0, 0); \
432 if (ch_done_mask & (1 << 4)) \
433 flush_channel(dev, 2, 0, 0); \
434 if (ch_done_mask & (1 << 6)) \
435 flush_channel(dev, 3, 0, 0); \
437 /* At this point, all completed channels have been processed */ \
438 /* Unmask done interrupts for channels completed later on. */ \
439 spin_lock_irqsave(&priv->reg_lock, flags); \
440 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
441 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
442 spin_unlock_irqrestore(&priv->reg_lock, flags); \
445 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
446 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
447 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
448 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
451 * locate current (offending) descriptor
453 static u32 current_desc_hdr(struct device *dev, int ch)
455 struct talitos_private *priv = dev_get_drvdata(dev);
459 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
460 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
463 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
467 tail = priv->chan[ch].tail;
470 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
471 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
472 iter = (iter + 1) & (priv->fifo_len - 1);
474 dev_err(dev, "couldn't locate current descriptor\n");
479 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
480 return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
482 return priv->chan[ch].fifo[iter].desc->hdr;
486 * user diagnostics; report root cause of error based on execution unit status
488 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
490 struct talitos_private *priv = dev_get_drvdata(dev);
494 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
496 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
497 case DESC_HDR_SEL0_AFEU:
498 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
499 in_be32(priv->reg_afeu + TALITOS_EUISR),
500 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
502 case DESC_HDR_SEL0_DEU:
503 dev_err(dev, "DEUISR 0x%08x_%08x\n",
504 in_be32(priv->reg_deu + TALITOS_EUISR),
505 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
507 case DESC_HDR_SEL0_MDEUA:
508 case DESC_HDR_SEL0_MDEUB:
509 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
510 in_be32(priv->reg_mdeu + TALITOS_EUISR),
511 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
513 case DESC_HDR_SEL0_RNG:
514 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
515 in_be32(priv->reg_rngu + TALITOS_ISR),
516 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
518 case DESC_HDR_SEL0_PKEU:
519 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
520 in_be32(priv->reg_pkeu + TALITOS_EUISR),
521 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
523 case DESC_HDR_SEL0_AESU:
524 dev_err(dev, "AESUISR 0x%08x_%08x\n",
525 in_be32(priv->reg_aesu + TALITOS_EUISR),
526 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
528 case DESC_HDR_SEL0_CRCU:
529 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
530 in_be32(priv->reg_crcu + TALITOS_EUISR),
531 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
533 case DESC_HDR_SEL0_KEU:
534 dev_err(dev, "KEUISR 0x%08x_%08x\n",
535 in_be32(priv->reg_pkeu + TALITOS_EUISR),
536 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
540 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
541 case DESC_HDR_SEL1_MDEUA:
542 case DESC_HDR_SEL1_MDEUB:
543 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
544 in_be32(priv->reg_mdeu + TALITOS_EUISR),
545 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
547 case DESC_HDR_SEL1_CRCU:
548 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
549 in_be32(priv->reg_crcu + TALITOS_EUISR),
550 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
554 for (i = 0; i < 8; i++)
555 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
556 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
557 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
561 * recover from error interrupts
563 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
565 struct talitos_private *priv = dev_get_drvdata(dev);
566 unsigned int timeout = TALITOS_TIMEOUT;
567 int ch, error, reset_dev = 0;
569 bool is_sec1 = has_ftr_sec1(priv);
570 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
572 for (ch = 0; ch < priv->num_channels; ch++) {
573 /* skip channels without errors */
575 /* bits 29, 31, 17, 19 */
576 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
579 if (!(isr & (1 << (ch * 2 + 1))))
585 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
587 if (v_lo & TALITOS_CCPSR_LO_DOF) {
588 dev_err(dev, "double fetch fifo overflow error\n");
592 if (v_lo & TALITOS_CCPSR_LO_SOF) {
593 /* h/w dropped descriptor */
594 dev_err(dev, "single fetch fifo overflow error\n");
597 if (v_lo & TALITOS_CCPSR_LO_MDTE)
598 dev_err(dev, "master data transfer error\n");
599 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
600 dev_err(dev, is_sec1 ? "pointer not complete error\n"
601 : "s/g data length zero error\n");
602 if (v_lo & TALITOS_CCPSR_LO_FPZ)
603 dev_err(dev, is_sec1 ? "parity error\n"
604 : "fetch pointer zero error\n");
605 if (v_lo & TALITOS_CCPSR_LO_IDH)
606 dev_err(dev, "illegal descriptor header error\n");
607 if (v_lo & TALITOS_CCPSR_LO_IEU)
608 dev_err(dev, is_sec1 ? "static assignment error\n"
609 : "invalid exec unit error\n");
610 if (v_lo & TALITOS_CCPSR_LO_EU)
611 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
613 if (v_lo & TALITOS_CCPSR_LO_GB)
614 dev_err(dev, "gather boundary error\n");
615 if (v_lo & TALITOS_CCPSR_LO_GRL)
616 dev_err(dev, "gather return/length error\n");
617 if (v_lo & TALITOS_CCPSR_LO_SB)
618 dev_err(dev, "scatter boundary error\n");
619 if (v_lo & TALITOS_CCPSR_LO_SRL)
620 dev_err(dev, "scatter return/length error\n");
623 flush_channel(dev, ch, error, reset_ch);
626 reset_channel(dev, ch);
628 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
630 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
631 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
632 TALITOS2_CCCR_CONT) && --timeout)
635 dev_err(dev, "failed to restart channel %d\n",
641 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
642 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
643 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
644 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
647 dev_err(dev, "done overflow, internal time out, or "
648 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
650 /* purge request queues */
651 for (ch = 0; ch < priv->num_channels; ch++)
652 flush_channel(dev, ch, -EIO, 1);
654 /* reset and reinitialize the device */
659 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
660 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
662 struct device *dev = data; \
663 struct talitos_private *priv = dev_get_drvdata(dev); \
665 unsigned long flags; \
667 spin_lock_irqsave(&priv->reg_lock, flags); \
668 isr = in_be32(priv->reg + TALITOS_ISR); \
669 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
670 /* Acknowledge interrupt */ \
671 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
672 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
674 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
675 spin_unlock_irqrestore(&priv->reg_lock, flags); \
676 talitos_error(dev, isr & ch_err_mask, isr_lo); \
679 if (likely(isr & ch_done_mask)) { \
680 /* mask further done interrupts. */ \
681 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
682 /* done_task will unmask done interrupts at exit */ \
683 tasklet_schedule(&priv->done_task[tlet]); \
685 spin_unlock_irqrestore(&priv->reg_lock, flags); \
688 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
692 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
694 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
695 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
697 struct device *dev = data; \
698 struct talitos_private *priv = dev_get_drvdata(dev); \
700 unsigned long flags; \
702 spin_lock_irqsave(&priv->reg_lock, flags); \
703 isr = in_be32(priv->reg + TALITOS_ISR); \
704 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
705 /* Acknowledge interrupt */ \
706 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
707 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
709 if (unlikely(isr & ch_err_mask || isr_lo)) { \
710 spin_unlock_irqrestore(&priv->reg_lock, flags); \
711 talitos_error(dev, isr & ch_err_mask, isr_lo); \
714 if (likely(isr & ch_done_mask)) { \
715 /* mask further done interrupts. */ \
716 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
717 /* done_task will unmask done interrupts at exit */ \
718 tasklet_schedule(&priv->done_task[tlet]); \
720 spin_unlock_irqrestore(&priv->reg_lock, flags); \
723 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
727 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
728 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
730 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
736 static int talitos_rng_data_present(struct hwrng *rng, int wait)
738 struct device *dev = (struct device *)rng->priv;
739 struct talitos_private *priv = dev_get_drvdata(dev);
743 for (i = 0; i < 20; i++) {
744 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
745 TALITOS_RNGUSR_LO_OFL;
754 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
756 struct device *dev = (struct device *)rng->priv;
757 struct talitos_private *priv = dev_get_drvdata(dev);
759 /* rng fifo requires 64-bit accesses */
760 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
761 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
766 static int talitos_rng_init(struct hwrng *rng)
768 struct device *dev = (struct device *)rng->priv;
769 struct talitos_private *priv = dev_get_drvdata(dev);
770 unsigned int timeout = TALITOS_TIMEOUT;
772 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
773 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
774 & TALITOS_RNGUSR_LO_RD)
778 dev_err(dev, "failed to reset rng hw\n");
782 /* start generating */
783 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
788 static int talitos_register_rng(struct device *dev)
790 struct talitos_private *priv = dev_get_drvdata(dev);
793 priv->rng.name = dev_driver_string(dev),
794 priv->rng.init = talitos_rng_init,
795 priv->rng.data_present = talitos_rng_data_present,
796 priv->rng.data_read = talitos_rng_data_read,
797 priv->rng.priv = (unsigned long)dev;
799 err = hwrng_register(&priv->rng);
801 priv->rng_registered = true;
806 static void talitos_unregister_rng(struct device *dev)
808 struct talitos_private *priv = dev_get_drvdata(dev);
810 if (!priv->rng_registered)
813 hwrng_unregister(&priv->rng);
814 priv->rng_registered = false;
820 #define TALITOS_CRA_PRIORITY 3000
822 * Defines a priority for doing AEAD with descriptors type
823 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
825 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
826 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
827 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
829 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
831 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
836 __be32 desc_hdr_template;
837 u8 key[TALITOS_MAX_KEY_SIZE];
838 u8 iv[TALITOS_MAX_IV_LENGTH];
841 unsigned int enckeylen;
842 unsigned int authkeylen;
845 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
846 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
848 struct talitos_ahash_req_ctx {
849 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
850 unsigned int hw_context_size;
851 u8 buf[2][HASH_MAX_BLOCK_SIZE];
856 unsigned int to_hash_later;
858 struct scatterlist bufsl[2];
859 struct scatterlist *psrc;
862 struct talitos_export_state {
863 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
864 u8 buf[HASH_MAX_BLOCK_SIZE];
868 unsigned int to_hash_later;
872 static int aead_setkey(struct crypto_aead *authenc,
873 const u8 *key, unsigned int keylen)
875 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
876 struct device *dev = ctx->dev;
877 struct crypto_authenc_keys keys;
879 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
882 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
886 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
888 memcpy(ctx->key, keys.authkey, keys.authkeylen);
889 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
891 ctx->keylen = keys.authkeylen + keys.enckeylen;
892 ctx->enckeylen = keys.enckeylen;
893 ctx->authkeylen = keys.authkeylen;
894 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
897 memzero_explicit(&keys, sizeof(keys));
901 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
902 memzero_explicit(&keys, sizeof(keys));
906 static int aead_des3_setkey(struct crypto_aead *authenc,
907 const u8 *key, unsigned int keylen)
909 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
910 struct device *dev = ctx->dev;
911 struct crypto_authenc_keys keys;
915 err = crypto_authenc_extractkeys(&keys, key, keylen);
920 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
923 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
926 flags = crypto_aead_get_flags(authenc);
927 err = __des3_verify_key(&flags, keys.enckey);
929 crypto_aead_set_flags(authenc, flags);
934 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
936 memcpy(ctx->key, keys.authkey, keys.authkeylen);
937 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
939 ctx->keylen = keys.authkeylen + keys.enckeylen;
940 ctx->enckeylen = keys.enckeylen;
941 ctx->authkeylen = keys.authkeylen;
942 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
946 memzero_explicit(&keys, sizeof(keys));
950 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
954 static void talitos_sg_unmap(struct device *dev,
955 struct talitos_edesc *edesc,
956 struct scatterlist *src,
957 struct scatterlist *dst,
958 unsigned int len, unsigned int offset)
960 struct talitos_private *priv = dev_get_drvdata(dev);
961 bool is_sec1 = has_ftr_sec1(priv);
962 unsigned int src_nents = edesc->src_nents ? : 1;
963 unsigned int dst_nents = edesc->dst_nents ? : 1;
965 if (is_sec1 && dst && dst_nents > 1) {
966 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
967 len, DMA_FROM_DEVICE);
968 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
972 if (src_nents == 1 || !is_sec1)
973 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
975 if (dst && (dst_nents == 1 || !is_sec1))
976 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
977 } else if (src_nents == 1 || !is_sec1) {
978 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
982 static void ipsec_esp_unmap(struct device *dev,
983 struct talitos_edesc *edesc,
984 struct aead_request *areq, bool encrypt)
986 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
987 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
988 unsigned int ivsize = crypto_aead_ivsize(aead);
989 unsigned int authsize = crypto_aead_authsize(aead);
990 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
991 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
992 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
995 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
997 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
999 talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1000 cryptlen + authsize, areq->assoclen);
1003 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1006 if (!is_ipsec_esp) {
1007 unsigned int dst_nents = edesc->dst_nents ? : 1;
1009 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1010 areq->assoclen + cryptlen - ivsize);
1015 * ipsec_esp descriptor callbacks
1017 static void ipsec_esp_encrypt_done(struct device *dev,
1018 struct talitos_desc *desc, void *context,
1021 struct aead_request *areq = context;
1022 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1023 unsigned int ivsize = crypto_aead_ivsize(authenc);
1024 struct talitos_edesc *edesc;
1026 edesc = container_of(desc, struct talitos_edesc, desc);
1028 ipsec_esp_unmap(dev, edesc, areq, true);
1030 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1034 aead_request_complete(areq, err);
1037 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1038 struct talitos_desc *desc,
1039 void *context, int err)
1041 struct aead_request *req = context;
1042 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1043 unsigned int authsize = crypto_aead_authsize(authenc);
1044 struct talitos_edesc *edesc;
1047 edesc = container_of(desc, struct talitos_edesc, desc);
1049 ipsec_esp_unmap(dev, edesc, req, false);
1053 oicv = edesc->buf + edesc->dma_len;
1054 icv = oicv - authsize;
1056 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1061 aead_request_complete(req, err);
1064 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1065 struct talitos_desc *desc,
1066 void *context, int err)
1068 struct aead_request *req = context;
1069 struct talitos_edesc *edesc;
1071 edesc = container_of(desc, struct talitos_edesc, desc);
1073 ipsec_esp_unmap(dev, edesc, req, false);
1075 /* check ICV auth status */
1076 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1077 DESC_HDR_LO_ICCR1_PASS))
1082 aead_request_complete(req, err);
1086 * convert scatterlist to SEC h/w link table format
1087 * stop at cryptlen bytes
1089 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1090 unsigned int offset, int datalen, int elen,
1091 struct talitos_ptr *link_tbl_ptr)
1093 int n_sg = elen ? sg_count + 1 : sg_count;
1095 int cryptlen = datalen + elen;
1097 while (cryptlen && sg && n_sg--) {
1098 unsigned int len = sg_dma_len(sg);
1100 if (offset >= len) {
1110 if (datalen > 0 && len > datalen) {
1111 to_talitos_ptr(link_tbl_ptr + count,
1112 sg_dma_address(sg) + offset, datalen, 0);
1113 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1118 to_talitos_ptr(link_tbl_ptr + count,
1119 sg_dma_address(sg) + offset, len, 0);
1120 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1130 /* tag end of link table */
1132 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1133 DESC_PTR_LNKTBL_RET, 0);
1138 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1139 unsigned int len, struct talitos_edesc *edesc,
1140 struct talitos_ptr *ptr, int sg_count,
1141 unsigned int offset, int tbl_off, int elen,
1144 struct talitos_private *priv = dev_get_drvdata(dev);
1145 bool is_sec1 = has_ftr_sec1(priv);
1148 to_talitos_ptr(ptr, 0, 0, is_sec1);
1151 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1152 if (sg_count == 1 && !force) {
1153 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1157 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1160 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1161 &edesc->link_tbl[tbl_off]);
1162 if (sg_count == 1 && !force) {
1163 /* Only one segment now, so no link tbl needed*/
1164 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1167 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1168 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1169 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1174 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1175 unsigned int len, struct talitos_edesc *edesc,
1176 struct talitos_ptr *ptr, int sg_count,
1177 unsigned int offset, int tbl_off)
1179 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1184 * fill in and submit ipsec_esp descriptor
1186 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1188 void (*callback)(struct device *dev,
1189 struct talitos_desc *desc,
1190 void *context, int error))
1192 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1193 unsigned int authsize = crypto_aead_authsize(aead);
1194 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1195 struct device *dev = ctx->dev;
1196 struct talitos_desc *desc = &edesc->desc;
1197 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1198 unsigned int ivsize = crypto_aead_ivsize(aead);
1202 bool sync_needed = false;
1203 struct talitos_private *priv = dev_get_drvdata(dev);
1204 bool is_sec1 = has_ftr_sec1(priv);
1205 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1206 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1207 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1208 dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1211 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1213 sg_count = edesc->src_nents ?: 1;
1214 if (is_sec1 && sg_count > 1)
1215 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1216 areq->assoclen + cryptlen);
1218 sg_count = dma_map_sg(dev, areq->src, sg_count,
1219 (areq->src == areq->dst) ?
1220 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1223 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1224 &desc->ptr[1], sg_count, 0, tbl_off);
1232 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1235 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1236 ctx->enckeylen, is_sec1);
1240 * map and adjust cipher len to aead request cryptlen.
1241 * extent is bytes of HMAC postpended to ciphertext,
1242 * typically 12 for ipsec
1244 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1247 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1248 sg_count, areq->assoclen, tbl_off, elen,
1257 if (areq->src != areq->dst) {
1258 sg_count = edesc->dst_nents ? : 1;
1259 if (!is_sec1 || sg_count == 1)
1260 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1263 if (is_ipsec_esp && encrypt)
1267 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1268 sg_count, areq->assoclen, tbl_off, elen,
1269 is_ipsec_esp && !encrypt);
1273 edesc->icv_ool = !encrypt;
1275 if (!encrypt && is_ipsec_esp) {
1276 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1278 /* Add an entry to the link table for ICV data */
1279 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1280 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1282 /* icv data follows link tables */
1283 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1284 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1286 } else if (!encrypt) {
1287 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1289 } else if (!is_ipsec_esp) {
1290 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1291 sg_count, areq->assoclen + cryptlen, tbl_off);
1296 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1300 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1304 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1305 if (ret != -EINPROGRESS) {
1306 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1313 * allocate and map the extended descriptor
1315 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1316 struct scatterlist *src,
1317 struct scatterlist *dst,
1319 unsigned int assoclen,
1320 unsigned int cryptlen,
1321 unsigned int authsize,
1322 unsigned int ivsize,
1327 struct talitos_edesc *edesc;
1328 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1329 dma_addr_t iv_dma = 0;
1330 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1332 struct talitos_private *priv = dev_get_drvdata(dev);
1333 bool is_sec1 = has_ftr_sec1(priv);
1334 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1336 if (cryptlen + authsize > max_len) {
1337 dev_err(dev, "length exceeds h/w max limit\n");
1338 return ERR_PTR(-EINVAL);
1341 if (!dst || dst == src) {
1342 src_len = assoclen + cryptlen + authsize;
1343 src_nents = sg_nents_for_len(src, src_len);
1344 if (src_nents < 0) {
1345 dev_err(dev, "Invalid number of src SG.\n");
1346 return ERR_PTR(-EINVAL);
1348 src_nents = (src_nents == 1) ? 0 : src_nents;
1349 dst_nents = dst ? src_nents : 0;
1351 } else { /* dst && dst != src*/
1352 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1353 src_nents = sg_nents_for_len(src, src_len);
1354 if (src_nents < 0) {
1355 dev_err(dev, "Invalid number of src SG.\n");
1356 return ERR_PTR(-EINVAL);
1358 src_nents = (src_nents == 1) ? 0 : src_nents;
1359 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1360 dst_nents = sg_nents_for_len(dst, dst_len);
1361 if (dst_nents < 0) {
1362 dev_err(dev, "Invalid number of dst SG.\n");
1363 return ERR_PTR(-EINVAL);
1365 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1369 * allocate space for base edesc plus the link tables,
1370 * allowing for two separate entries for AD and generated ICV (+ 2),
1371 * and space for two sets of ICVs (stashed and generated)
1373 alloc_len = sizeof(struct talitos_edesc);
1374 if (src_nents || dst_nents || !encrypt) {
1376 dma_len = (src_nents ? src_len : 0) +
1377 (dst_nents ? dst_len : 0) + authsize;
1379 dma_len = (src_nents + dst_nents + 2) *
1380 sizeof(struct talitos_ptr) + authsize;
1381 alloc_len += dma_len;
1385 alloc_len += icv_stashing ? authsize : 0;
1387 /* if its a ahash, add space for a second desc next to the first one */
1388 if (is_sec1 && !dst)
1389 alloc_len += sizeof(struct talitos_desc);
1390 alloc_len += ivsize;
1392 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1394 return ERR_PTR(-ENOMEM);
1396 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1397 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1399 memset(&edesc->desc, 0, sizeof(edesc->desc));
1401 edesc->src_nents = src_nents;
1402 edesc->dst_nents = dst_nents;
1403 edesc->iv_dma = iv_dma;
1404 edesc->dma_len = dma_len;
1406 void *addr = &edesc->link_tbl[0];
1408 if (is_sec1 && !dst)
1409 addr += sizeof(struct talitos_desc);
1410 edesc->dma_link_tbl = dma_map_single(dev, addr,
1417 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1418 int icv_stashing, bool encrypt)
1420 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1421 unsigned int authsize = crypto_aead_authsize(authenc);
1422 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1423 unsigned int ivsize = crypto_aead_ivsize(authenc);
1424 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1426 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1427 iv, areq->assoclen, cryptlen,
1428 authsize, ivsize, icv_stashing,
1429 areq->base.flags, encrypt);
1432 static int aead_encrypt(struct aead_request *req)
1434 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1435 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1436 struct talitos_edesc *edesc;
1438 /* allocate extended descriptor */
1439 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1441 return PTR_ERR(edesc);
1444 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1446 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1449 static int aead_decrypt(struct aead_request *req)
1451 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1452 unsigned int authsize = crypto_aead_authsize(authenc);
1453 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1454 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1455 struct talitos_edesc *edesc;
1458 /* allocate extended descriptor */
1459 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1461 return PTR_ERR(edesc);
1463 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1464 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1465 ((!edesc->src_nents && !edesc->dst_nents) ||
1466 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1468 /* decrypt and check the ICV */
1469 edesc->desc.hdr = ctx->desc_hdr_template |
1470 DESC_HDR_DIR_INBOUND |
1471 DESC_HDR_MODE1_MDEU_CICV;
1473 /* reset integrity check result bits */
1475 return ipsec_esp(edesc, req, false,
1476 ipsec_esp_decrypt_hwauth_done);
1479 /* Have to check the ICV with software */
1480 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1482 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1483 icvdata = edesc->buf + edesc->dma_len;
1485 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1486 req->assoclen + req->cryptlen - authsize);
1488 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1491 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1492 const u8 *key, unsigned int keylen)
1494 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1495 struct device *dev = ctx->dev;
1498 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1500 memcpy(&ctx->key, key, keylen);
1501 ctx->keylen = keylen;
1503 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1508 static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1509 const u8 *key, unsigned int keylen)
1511 u32 tmp[DES_EXPKEY_WORDS];
1513 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1514 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
1515 !des_ekey(tmp, key)) {
1516 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1520 return ablkcipher_setkey(cipher, key, keylen);
1523 static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1524 const u8 *key, unsigned int keylen)
1529 flags = crypto_ablkcipher_get_flags(cipher);
1530 err = __des3_verify_key(&flags, key);
1531 if (unlikely(err)) {
1532 crypto_ablkcipher_set_flags(cipher, flags);
1536 return ablkcipher_setkey(cipher, key, keylen);
1539 static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1540 const u8 *key, unsigned int keylen)
1542 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1543 keylen == AES_KEYSIZE_256)
1544 return ablkcipher_setkey(cipher, key, keylen);
1546 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1551 static void common_nonsnoop_unmap(struct device *dev,
1552 struct talitos_edesc *edesc,
1553 struct ablkcipher_request *areq)
1555 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1557 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1558 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1561 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1565 static void ablkcipher_done(struct device *dev,
1566 struct talitos_desc *desc, void *context,
1569 struct ablkcipher_request *areq = context;
1570 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1571 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1572 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1573 struct talitos_edesc *edesc;
1575 edesc = container_of(desc, struct talitos_edesc, desc);
1577 common_nonsnoop_unmap(dev, edesc, areq);
1578 memcpy(areq->info, ctx->iv, ivsize);
1582 areq->base.complete(&areq->base, err);
1585 static int common_nonsnoop(struct talitos_edesc *edesc,
1586 struct ablkcipher_request *areq,
1587 void (*callback) (struct device *dev,
1588 struct talitos_desc *desc,
1589 void *context, int error))
1591 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1592 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1593 struct device *dev = ctx->dev;
1594 struct talitos_desc *desc = &edesc->desc;
1595 unsigned int cryptlen = areq->nbytes;
1596 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1598 bool sync_needed = false;
1599 struct talitos_private *priv = dev_get_drvdata(dev);
1600 bool is_sec1 = has_ftr_sec1(priv);
1602 /* first DWORD empty */
1605 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1608 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1610 sg_count = edesc->src_nents ?: 1;
1611 if (is_sec1 && sg_count > 1)
1612 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1615 sg_count = dma_map_sg(dev, areq->src, sg_count,
1616 (areq->src == areq->dst) ?
1617 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1621 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1622 &desc->ptr[3], sg_count, 0, 0);
1627 if (areq->src != areq->dst) {
1628 sg_count = edesc->dst_nents ? : 1;
1629 if (!is_sec1 || sg_count == 1)
1630 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1633 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1634 sg_count, 0, (edesc->src_nents + 1));
1639 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1642 /* last DWORD empty */
1645 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1646 edesc->dma_len, DMA_BIDIRECTIONAL);
1648 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1649 if (ret != -EINPROGRESS) {
1650 common_nonsnoop_unmap(dev, edesc, areq);
1656 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1659 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1660 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1661 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1663 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1664 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1665 areq->base.flags, encrypt);
1668 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1670 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1671 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1672 struct talitos_edesc *edesc;
1673 unsigned int blocksize =
1674 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1679 if (areq->nbytes % blocksize)
1682 /* allocate extended descriptor */
1683 edesc = ablkcipher_edesc_alloc(areq, true);
1685 return PTR_ERR(edesc);
1688 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1690 return common_nonsnoop(edesc, areq, ablkcipher_done);
1693 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1695 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1696 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1697 struct talitos_edesc *edesc;
1698 unsigned int blocksize =
1699 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1704 if (areq->nbytes % blocksize)
1707 /* allocate extended descriptor */
1708 edesc = ablkcipher_edesc_alloc(areq, false);
1710 return PTR_ERR(edesc);
1712 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1714 return common_nonsnoop(edesc, areq, ablkcipher_done);
1717 static void common_nonsnoop_hash_unmap(struct device *dev,
1718 struct talitos_edesc *edesc,
1719 struct ahash_request *areq)
1721 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1722 struct talitos_private *priv = dev_get_drvdata(dev);
1723 bool is_sec1 = has_ftr_sec1(priv);
1724 struct talitos_desc *desc = &edesc->desc;
1725 struct talitos_desc *desc2 = desc + 1;
1727 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1728 if (desc->next_desc &&
1729 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1730 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1732 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1734 /* When using hashctx-in, must unmap it. */
1735 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1736 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1738 else if (desc->next_desc)
1739 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1742 if (is_sec1 && req_ctx->nbuf)
1743 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1747 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1750 if (edesc->desc.next_desc)
1751 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1752 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1755 static void ahash_done(struct device *dev,
1756 struct talitos_desc *desc, void *context,
1759 struct ahash_request *areq = context;
1760 struct talitos_edesc *edesc =
1761 container_of(desc, struct talitos_edesc, desc);
1762 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1764 if (!req_ctx->last && req_ctx->to_hash_later) {
1765 /* Position any partial block for next update/final/finup */
1766 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1767 req_ctx->nbuf = req_ctx->to_hash_later;
1769 common_nonsnoop_hash_unmap(dev, edesc, areq);
1773 areq->base.complete(&areq->base, err);
1777 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1778 * ourself and submit a padded block
1780 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1781 struct talitos_edesc *edesc,
1782 struct talitos_ptr *ptr)
1784 static u8 padded_hash[64] = {
1785 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1786 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1787 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1788 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1791 pr_err_once("Bug in SEC1, padding ourself\n");
1792 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1793 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1794 (char *)padded_hash, DMA_TO_DEVICE);
1797 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1798 struct ahash_request *areq, unsigned int length,
1799 unsigned int offset,
1800 void (*callback) (struct device *dev,
1801 struct talitos_desc *desc,
1802 void *context, int error))
1804 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1805 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1806 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1807 struct device *dev = ctx->dev;
1808 struct talitos_desc *desc = &edesc->desc;
1810 bool sync_needed = false;
1811 struct talitos_private *priv = dev_get_drvdata(dev);
1812 bool is_sec1 = has_ftr_sec1(priv);
1815 /* first DWORD empty */
1817 /* hash context in */
1818 if (!req_ctx->first || req_ctx->swinit) {
1819 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1820 req_ctx->hw_context_size,
1821 req_ctx->hw_context,
1823 req_ctx->swinit = 0;
1825 /* Indicate next op is not the first. */
1830 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1833 if (is_sec1 && req_ctx->nbuf)
1834 length -= req_ctx->nbuf;
1836 sg_count = edesc->src_nents ?: 1;
1837 if (is_sec1 && sg_count > 1)
1838 sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1839 edesc->buf + sizeof(struct talitos_desc),
1840 length, req_ctx->nbuf);
1842 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1847 if (is_sec1 && req_ctx->nbuf) {
1848 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1849 req_ctx->buf[req_ctx->buf_idx],
1852 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1853 &desc->ptr[3], sg_count, offset, 0);
1858 /* fifth DWORD empty */
1860 /* hash/HMAC out -or- hash context out */
1862 map_single_talitos_ptr(dev, &desc->ptr[5],
1863 crypto_ahash_digestsize(tfm),
1864 areq->result, DMA_FROM_DEVICE);
1866 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1867 req_ctx->hw_context_size,
1868 req_ctx->hw_context,
1871 /* last DWORD empty */
1873 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1874 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1876 if (is_sec1 && req_ctx->nbuf && length) {
1877 struct talitos_desc *desc2 = desc + 1;
1878 dma_addr_t next_desc;
1880 memset(desc2, 0, sizeof(*desc2));
1881 desc2->hdr = desc->hdr;
1882 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1883 desc2->hdr1 = desc2->hdr;
1884 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1885 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1886 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1888 if (desc->ptr[1].ptr)
1889 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1892 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1893 req_ctx->hw_context_size,
1894 req_ctx->hw_context,
1896 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1897 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1898 &desc2->ptr[3], sg_count, offset, 0);
1901 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1903 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1904 req_ctx->hw_context_size,
1905 req_ctx->hw_context,
1908 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1910 desc->next_desc = cpu_to_be32(next_desc);
1914 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1915 edesc->dma_len, DMA_BIDIRECTIONAL);
1917 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1918 if (ret != -EINPROGRESS) {
1919 common_nonsnoop_hash_unmap(dev, edesc, areq);
1925 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1926 unsigned int nbytes)
1928 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1929 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1930 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1931 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1932 bool is_sec1 = has_ftr_sec1(priv);
1935 nbytes -= req_ctx->nbuf;
1937 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1938 nbytes, 0, 0, 0, areq->base.flags, false);
1941 static int ahash_init(struct ahash_request *areq)
1943 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1944 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1945 struct device *dev = ctx->dev;
1946 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1950 /* Initialize the context */
1951 req_ctx->buf_idx = 0;
1953 req_ctx->first = 1; /* first indicates h/w must init its context */
1954 req_ctx->swinit = 0; /* assume h/w init of context */
1955 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1956 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1957 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1958 req_ctx->hw_context_size = size;
1960 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1962 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1968 * on h/w without explicit sha224 support, we initialize h/w context
1969 * manually with sha224 constants, and tell it to run sha256.
1971 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1973 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1975 req_ctx->hw_context[0] = SHA224_H0;
1976 req_ctx->hw_context[1] = SHA224_H1;
1977 req_ctx->hw_context[2] = SHA224_H2;
1978 req_ctx->hw_context[3] = SHA224_H3;
1979 req_ctx->hw_context[4] = SHA224_H4;
1980 req_ctx->hw_context[5] = SHA224_H5;
1981 req_ctx->hw_context[6] = SHA224_H6;
1982 req_ctx->hw_context[7] = SHA224_H7;
1984 /* init 64-bit count */
1985 req_ctx->hw_context[8] = 0;
1986 req_ctx->hw_context[9] = 0;
1989 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1994 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1996 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1997 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1998 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1999 struct talitos_edesc *edesc;
2000 unsigned int blocksize =
2001 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2002 unsigned int nbytes_to_hash;
2003 unsigned int to_hash_later;
2006 struct device *dev = ctx->dev;
2007 struct talitos_private *priv = dev_get_drvdata(dev);
2008 bool is_sec1 = has_ftr_sec1(priv);
2010 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2012 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2013 /* Buffer up to one whole block */
2014 nents = sg_nents_for_len(areq->src, nbytes);
2016 dev_err(ctx->dev, "Invalid number of src SG.\n");
2019 sg_copy_to_buffer(areq->src, nents,
2020 ctx_buf + req_ctx->nbuf, nbytes);
2021 req_ctx->nbuf += nbytes;
2025 /* At least (blocksize + 1) bytes are available to hash */
2026 nbytes_to_hash = nbytes + req_ctx->nbuf;
2027 to_hash_later = nbytes_to_hash & (blocksize - 1);
2031 else if (to_hash_later)
2032 /* There is a partial block. Hash the full block(s) now */
2033 nbytes_to_hash -= to_hash_later;
2035 /* Keep one block buffered */
2036 nbytes_to_hash -= blocksize;
2037 to_hash_later = blocksize;
2040 /* Chain in any previously buffered data */
2041 if (!is_sec1 && req_ctx->nbuf) {
2042 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2043 sg_init_table(req_ctx->bufsl, nsg);
2044 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2046 sg_chain(req_ctx->bufsl, 2, areq->src);
2047 req_ctx->psrc = req_ctx->bufsl;
2048 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2049 if (nbytes_to_hash > blocksize)
2050 offset = blocksize - req_ctx->nbuf;
2052 offset = nbytes_to_hash - req_ctx->nbuf;
2053 nents = sg_nents_for_len(areq->src, offset);
2055 dev_err(ctx->dev, "Invalid number of src SG.\n");
2058 sg_copy_to_buffer(areq->src, nents,
2059 ctx_buf + req_ctx->nbuf, offset);
2060 req_ctx->nbuf += offset;
2061 req_ctx->psrc = areq->src;
2063 req_ctx->psrc = areq->src;
2065 if (to_hash_later) {
2066 nents = sg_nents_for_len(areq->src, nbytes);
2068 dev_err(ctx->dev, "Invalid number of src SG.\n");
2071 sg_pcopy_to_buffer(areq->src, nents,
2072 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2074 nbytes - to_hash_later);
2076 req_ctx->to_hash_later = to_hash_later;
2078 /* Allocate extended descriptor */
2079 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2081 return PTR_ERR(edesc);
2083 edesc->desc.hdr = ctx->desc_hdr_template;
2085 /* On last one, request SEC to pad; otherwise continue */
2087 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2089 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2091 /* request SEC to INIT hash. */
2092 if (req_ctx->first && !req_ctx->swinit)
2093 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2095 /* When the tfm context has a keylen, it's an HMAC.
2096 * A first or last (ie. not middle) descriptor must request HMAC.
2098 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2099 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2101 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2105 static int ahash_update(struct ahash_request *areq)
2107 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2111 return ahash_process_req(areq, areq->nbytes);
2114 static int ahash_final(struct ahash_request *areq)
2116 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2120 return ahash_process_req(areq, 0);
2123 static int ahash_finup(struct ahash_request *areq)
2125 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2129 return ahash_process_req(areq, areq->nbytes);
2132 static int ahash_digest(struct ahash_request *areq)
2134 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2135 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2140 return ahash_process_req(areq, areq->nbytes);
2143 static int ahash_export(struct ahash_request *areq, void *out)
2145 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2146 struct talitos_export_state *export = out;
2147 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2148 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2149 struct device *dev = ctx->dev;
2152 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2154 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2156 memcpy(export->hw_context, req_ctx->hw_context,
2157 req_ctx->hw_context_size);
2158 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2159 export->swinit = req_ctx->swinit;
2160 export->first = req_ctx->first;
2161 export->last = req_ctx->last;
2162 export->to_hash_later = req_ctx->to_hash_later;
2163 export->nbuf = req_ctx->nbuf;
2168 static int ahash_import(struct ahash_request *areq, const void *in)
2170 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2171 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2172 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2173 struct device *dev = ctx->dev;
2174 const struct talitos_export_state *export = in;
2178 memset(req_ctx, 0, sizeof(*req_ctx));
2179 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2180 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2181 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2182 req_ctx->hw_context_size = size;
2183 memcpy(req_ctx->hw_context, export->hw_context, size);
2184 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2185 req_ctx->swinit = export->swinit;
2186 req_ctx->first = export->first;
2187 req_ctx->last = export->last;
2188 req_ctx->to_hash_later = export->to_hash_later;
2189 req_ctx->nbuf = export->nbuf;
2191 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2193 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2198 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2201 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2203 struct scatterlist sg[1];
2204 struct ahash_request *req;
2205 struct crypto_wait wait;
2208 crypto_init_wait(&wait);
2210 req = ahash_request_alloc(tfm, GFP_KERNEL);
2214 /* Keep tfm keylen == 0 during hash of the long key */
2216 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2217 crypto_req_done, &wait);
2219 sg_init_one(&sg[0], key, keylen);
2221 ahash_request_set_crypt(req, sg, hash, keylen);
2222 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2224 ahash_request_free(req);
2229 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2230 unsigned int keylen)
2232 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2233 struct device *dev = ctx->dev;
2234 unsigned int blocksize =
2235 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2236 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2237 unsigned int keysize = keylen;
2238 u8 hash[SHA512_DIGEST_SIZE];
2241 if (keylen <= blocksize)
2242 memcpy(ctx->key, key, keysize);
2244 /* Must get the hash of the long key */
2245 ret = keyhash(tfm, key, keylen, hash);
2248 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2252 keysize = digestsize;
2253 memcpy(ctx->key, hash, digestsize);
2257 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2259 ctx->keylen = keysize;
2260 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2266 struct talitos_alg_template {
2270 struct crypto_alg crypto;
2271 struct ahash_alg hash;
2272 struct aead_alg aead;
2274 __be32 desc_hdr_template;
2277 static struct talitos_alg_template driver_algs[] = {
2278 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2279 { .type = CRYPTO_ALG_TYPE_AEAD,
2282 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2283 .cra_driver_name = "authenc-hmac-sha1-"
2285 .cra_blocksize = AES_BLOCK_SIZE,
2286 .cra_flags = CRYPTO_ALG_ASYNC,
2288 .ivsize = AES_BLOCK_SIZE,
2289 .maxauthsize = SHA1_DIGEST_SIZE,
2291 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2292 DESC_HDR_SEL0_AESU |
2293 DESC_HDR_MODE0_AESU_CBC |
2294 DESC_HDR_SEL1_MDEUA |
2295 DESC_HDR_MODE1_MDEU_INIT |
2296 DESC_HDR_MODE1_MDEU_PAD |
2297 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2299 { .type = CRYPTO_ALG_TYPE_AEAD,
2300 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2303 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2304 .cra_driver_name = "authenc-hmac-sha1-"
2305 "cbc-aes-talitos-hsna",
2306 .cra_blocksize = AES_BLOCK_SIZE,
2307 .cra_flags = CRYPTO_ALG_ASYNC,
2309 .ivsize = AES_BLOCK_SIZE,
2310 .maxauthsize = SHA1_DIGEST_SIZE,
2312 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2313 DESC_HDR_SEL0_AESU |
2314 DESC_HDR_MODE0_AESU_CBC |
2315 DESC_HDR_SEL1_MDEUA |
2316 DESC_HDR_MODE1_MDEU_INIT |
2317 DESC_HDR_MODE1_MDEU_PAD |
2318 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2320 { .type = CRYPTO_ALG_TYPE_AEAD,
2323 .cra_name = "authenc(hmac(sha1),"
2325 .cra_driver_name = "authenc-hmac-sha1-"
2327 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2328 .cra_flags = CRYPTO_ALG_ASYNC,
2330 .ivsize = DES3_EDE_BLOCK_SIZE,
2331 .maxauthsize = SHA1_DIGEST_SIZE,
2332 .setkey = aead_des3_setkey,
2334 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2336 DESC_HDR_MODE0_DEU_CBC |
2337 DESC_HDR_MODE0_DEU_3DES |
2338 DESC_HDR_SEL1_MDEUA |
2339 DESC_HDR_MODE1_MDEU_INIT |
2340 DESC_HDR_MODE1_MDEU_PAD |
2341 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2343 { .type = CRYPTO_ALG_TYPE_AEAD,
2344 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2347 .cra_name = "authenc(hmac(sha1),"
2349 .cra_driver_name = "authenc-hmac-sha1-"
2350 "cbc-3des-talitos-hsna",
2351 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2352 .cra_flags = CRYPTO_ALG_ASYNC,
2354 .ivsize = DES3_EDE_BLOCK_SIZE,
2355 .maxauthsize = SHA1_DIGEST_SIZE,
2356 .setkey = aead_des3_setkey,
2358 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2360 DESC_HDR_MODE0_DEU_CBC |
2361 DESC_HDR_MODE0_DEU_3DES |
2362 DESC_HDR_SEL1_MDEUA |
2363 DESC_HDR_MODE1_MDEU_INIT |
2364 DESC_HDR_MODE1_MDEU_PAD |
2365 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2367 { .type = CRYPTO_ALG_TYPE_AEAD,
2370 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2371 .cra_driver_name = "authenc-hmac-sha224-"
2373 .cra_blocksize = AES_BLOCK_SIZE,
2374 .cra_flags = CRYPTO_ALG_ASYNC,
2376 .ivsize = AES_BLOCK_SIZE,
2377 .maxauthsize = SHA224_DIGEST_SIZE,
2379 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2380 DESC_HDR_SEL0_AESU |
2381 DESC_HDR_MODE0_AESU_CBC |
2382 DESC_HDR_SEL1_MDEUA |
2383 DESC_HDR_MODE1_MDEU_INIT |
2384 DESC_HDR_MODE1_MDEU_PAD |
2385 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2387 { .type = CRYPTO_ALG_TYPE_AEAD,
2388 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2391 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2392 .cra_driver_name = "authenc-hmac-sha224-"
2393 "cbc-aes-talitos-hsna",
2394 .cra_blocksize = AES_BLOCK_SIZE,
2395 .cra_flags = CRYPTO_ALG_ASYNC,
2397 .ivsize = AES_BLOCK_SIZE,
2398 .maxauthsize = SHA224_DIGEST_SIZE,
2400 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2401 DESC_HDR_SEL0_AESU |
2402 DESC_HDR_MODE0_AESU_CBC |
2403 DESC_HDR_SEL1_MDEUA |
2404 DESC_HDR_MODE1_MDEU_INIT |
2405 DESC_HDR_MODE1_MDEU_PAD |
2406 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2408 { .type = CRYPTO_ALG_TYPE_AEAD,
2411 .cra_name = "authenc(hmac(sha224),"
2413 .cra_driver_name = "authenc-hmac-sha224-"
2415 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2416 .cra_flags = CRYPTO_ALG_ASYNC,
2418 .ivsize = DES3_EDE_BLOCK_SIZE,
2419 .maxauthsize = SHA224_DIGEST_SIZE,
2420 .setkey = aead_des3_setkey,
2422 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2424 DESC_HDR_MODE0_DEU_CBC |
2425 DESC_HDR_MODE0_DEU_3DES |
2426 DESC_HDR_SEL1_MDEUA |
2427 DESC_HDR_MODE1_MDEU_INIT |
2428 DESC_HDR_MODE1_MDEU_PAD |
2429 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2431 { .type = CRYPTO_ALG_TYPE_AEAD,
2432 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2435 .cra_name = "authenc(hmac(sha224),"
2437 .cra_driver_name = "authenc-hmac-sha224-"
2438 "cbc-3des-talitos-hsna",
2439 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2440 .cra_flags = CRYPTO_ALG_ASYNC,
2442 .ivsize = DES3_EDE_BLOCK_SIZE,
2443 .maxauthsize = SHA224_DIGEST_SIZE,
2444 .setkey = aead_des3_setkey,
2446 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2448 DESC_HDR_MODE0_DEU_CBC |
2449 DESC_HDR_MODE0_DEU_3DES |
2450 DESC_HDR_SEL1_MDEUA |
2451 DESC_HDR_MODE1_MDEU_INIT |
2452 DESC_HDR_MODE1_MDEU_PAD |
2453 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2455 { .type = CRYPTO_ALG_TYPE_AEAD,
2458 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2459 .cra_driver_name = "authenc-hmac-sha256-"
2461 .cra_blocksize = AES_BLOCK_SIZE,
2462 .cra_flags = CRYPTO_ALG_ASYNC,
2464 .ivsize = AES_BLOCK_SIZE,
2465 .maxauthsize = SHA256_DIGEST_SIZE,
2467 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2468 DESC_HDR_SEL0_AESU |
2469 DESC_HDR_MODE0_AESU_CBC |
2470 DESC_HDR_SEL1_MDEUA |
2471 DESC_HDR_MODE1_MDEU_INIT |
2472 DESC_HDR_MODE1_MDEU_PAD |
2473 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2475 { .type = CRYPTO_ALG_TYPE_AEAD,
2476 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2479 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2480 .cra_driver_name = "authenc-hmac-sha256-"
2481 "cbc-aes-talitos-hsna",
2482 .cra_blocksize = AES_BLOCK_SIZE,
2483 .cra_flags = CRYPTO_ALG_ASYNC,
2485 .ivsize = AES_BLOCK_SIZE,
2486 .maxauthsize = SHA256_DIGEST_SIZE,
2488 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2489 DESC_HDR_SEL0_AESU |
2490 DESC_HDR_MODE0_AESU_CBC |
2491 DESC_HDR_SEL1_MDEUA |
2492 DESC_HDR_MODE1_MDEU_INIT |
2493 DESC_HDR_MODE1_MDEU_PAD |
2494 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2496 { .type = CRYPTO_ALG_TYPE_AEAD,
2499 .cra_name = "authenc(hmac(sha256),"
2501 .cra_driver_name = "authenc-hmac-sha256-"
2503 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2504 .cra_flags = CRYPTO_ALG_ASYNC,
2506 .ivsize = DES3_EDE_BLOCK_SIZE,
2507 .maxauthsize = SHA256_DIGEST_SIZE,
2508 .setkey = aead_des3_setkey,
2510 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2512 DESC_HDR_MODE0_DEU_CBC |
2513 DESC_HDR_MODE0_DEU_3DES |
2514 DESC_HDR_SEL1_MDEUA |
2515 DESC_HDR_MODE1_MDEU_INIT |
2516 DESC_HDR_MODE1_MDEU_PAD |
2517 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2519 { .type = CRYPTO_ALG_TYPE_AEAD,
2520 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2523 .cra_name = "authenc(hmac(sha256),"
2525 .cra_driver_name = "authenc-hmac-sha256-"
2526 "cbc-3des-talitos-hsna",
2527 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2528 .cra_flags = CRYPTO_ALG_ASYNC,
2530 .ivsize = DES3_EDE_BLOCK_SIZE,
2531 .maxauthsize = SHA256_DIGEST_SIZE,
2532 .setkey = aead_des3_setkey,
2534 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2536 DESC_HDR_MODE0_DEU_CBC |
2537 DESC_HDR_MODE0_DEU_3DES |
2538 DESC_HDR_SEL1_MDEUA |
2539 DESC_HDR_MODE1_MDEU_INIT |
2540 DESC_HDR_MODE1_MDEU_PAD |
2541 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2543 { .type = CRYPTO_ALG_TYPE_AEAD,
2546 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2547 .cra_driver_name = "authenc-hmac-sha384-"
2549 .cra_blocksize = AES_BLOCK_SIZE,
2550 .cra_flags = CRYPTO_ALG_ASYNC,
2552 .ivsize = AES_BLOCK_SIZE,
2553 .maxauthsize = SHA384_DIGEST_SIZE,
2555 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2556 DESC_HDR_SEL0_AESU |
2557 DESC_HDR_MODE0_AESU_CBC |
2558 DESC_HDR_SEL1_MDEUB |
2559 DESC_HDR_MODE1_MDEU_INIT |
2560 DESC_HDR_MODE1_MDEU_PAD |
2561 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2563 { .type = CRYPTO_ALG_TYPE_AEAD,
2566 .cra_name = "authenc(hmac(sha384),"
2568 .cra_driver_name = "authenc-hmac-sha384-"
2570 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2571 .cra_flags = CRYPTO_ALG_ASYNC,
2573 .ivsize = DES3_EDE_BLOCK_SIZE,
2574 .maxauthsize = SHA384_DIGEST_SIZE,
2575 .setkey = aead_des3_setkey,
2577 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2579 DESC_HDR_MODE0_DEU_CBC |
2580 DESC_HDR_MODE0_DEU_3DES |
2581 DESC_HDR_SEL1_MDEUB |
2582 DESC_HDR_MODE1_MDEU_INIT |
2583 DESC_HDR_MODE1_MDEU_PAD |
2584 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2586 { .type = CRYPTO_ALG_TYPE_AEAD,
2589 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2590 .cra_driver_name = "authenc-hmac-sha512-"
2592 .cra_blocksize = AES_BLOCK_SIZE,
2593 .cra_flags = CRYPTO_ALG_ASYNC,
2595 .ivsize = AES_BLOCK_SIZE,
2596 .maxauthsize = SHA512_DIGEST_SIZE,
2598 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2599 DESC_HDR_SEL0_AESU |
2600 DESC_HDR_MODE0_AESU_CBC |
2601 DESC_HDR_SEL1_MDEUB |
2602 DESC_HDR_MODE1_MDEU_INIT |
2603 DESC_HDR_MODE1_MDEU_PAD |
2604 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2606 { .type = CRYPTO_ALG_TYPE_AEAD,
2609 .cra_name = "authenc(hmac(sha512),"
2611 .cra_driver_name = "authenc-hmac-sha512-"
2613 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2614 .cra_flags = CRYPTO_ALG_ASYNC,
2616 .ivsize = DES3_EDE_BLOCK_SIZE,
2617 .maxauthsize = SHA512_DIGEST_SIZE,
2618 .setkey = aead_des3_setkey,
2620 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2622 DESC_HDR_MODE0_DEU_CBC |
2623 DESC_HDR_MODE0_DEU_3DES |
2624 DESC_HDR_SEL1_MDEUB |
2625 DESC_HDR_MODE1_MDEU_INIT |
2626 DESC_HDR_MODE1_MDEU_PAD |
2627 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2629 { .type = CRYPTO_ALG_TYPE_AEAD,
2632 .cra_name = "authenc(hmac(md5),cbc(aes))",
2633 .cra_driver_name = "authenc-hmac-md5-"
2635 .cra_blocksize = AES_BLOCK_SIZE,
2636 .cra_flags = CRYPTO_ALG_ASYNC,
2638 .ivsize = AES_BLOCK_SIZE,
2639 .maxauthsize = MD5_DIGEST_SIZE,
2641 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2642 DESC_HDR_SEL0_AESU |
2643 DESC_HDR_MODE0_AESU_CBC |
2644 DESC_HDR_SEL1_MDEUA |
2645 DESC_HDR_MODE1_MDEU_INIT |
2646 DESC_HDR_MODE1_MDEU_PAD |
2647 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2649 { .type = CRYPTO_ALG_TYPE_AEAD,
2650 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2653 .cra_name = "authenc(hmac(md5),cbc(aes))",
2654 .cra_driver_name = "authenc-hmac-md5-"
2655 "cbc-aes-talitos-hsna",
2656 .cra_blocksize = AES_BLOCK_SIZE,
2657 .cra_flags = CRYPTO_ALG_ASYNC,
2659 .ivsize = AES_BLOCK_SIZE,
2660 .maxauthsize = MD5_DIGEST_SIZE,
2662 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2663 DESC_HDR_SEL0_AESU |
2664 DESC_HDR_MODE0_AESU_CBC |
2665 DESC_HDR_SEL1_MDEUA |
2666 DESC_HDR_MODE1_MDEU_INIT |
2667 DESC_HDR_MODE1_MDEU_PAD |
2668 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2670 { .type = CRYPTO_ALG_TYPE_AEAD,
2673 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2674 .cra_driver_name = "authenc-hmac-md5-"
2676 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2677 .cra_flags = CRYPTO_ALG_ASYNC,
2679 .ivsize = DES3_EDE_BLOCK_SIZE,
2680 .maxauthsize = MD5_DIGEST_SIZE,
2681 .setkey = aead_des3_setkey,
2683 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2685 DESC_HDR_MODE0_DEU_CBC |
2686 DESC_HDR_MODE0_DEU_3DES |
2687 DESC_HDR_SEL1_MDEUA |
2688 DESC_HDR_MODE1_MDEU_INIT |
2689 DESC_HDR_MODE1_MDEU_PAD |
2690 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2692 { .type = CRYPTO_ALG_TYPE_AEAD,
2693 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2696 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2697 .cra_driver_name = "authenc-hmac-md5-"
2698 "cbc-3des-talitos-hsna",
2699 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2700 .cra_flags = CRYPTO_ALG_ASYNC,
2702 .ivsize = DES3_EDE_BLOCK_SIZE,
2703 .maxauthsize = MD5_DIGEST_SIZE,
2704 .setkey = aead_des3_setkey,
2706 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2708 DESC_HDR_MODE0_DEU_CBC |
2709 DESC_HDR_MODE0_DEU_3DES |
2710 DESC_HDR_SEL1_MDEUA |
2711 DESC_HDR_MODE1_MDEU_INIT |
2712 DESC_HDR_MODE1_MDEU_PAD |
2713 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2715 /* ABLKCIPHER algorithms. */
2716 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2718 .cra_name = "ecb(aes)",
2719 .cra_driver_name = "ecb-aes-talitos",
2720 .cra_blocksize = AES_BLOCK_SIZE,
2721 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2724 .min_keysize = AES_MIN_KEY_SIZE,
2725 .max_keysize = AES_MAX_KEY_SIZE,
2726 .setkey = ablkcipher_aes_setkey,
2729 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2732 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2734 .cra_name = "cbc(aes)",
2735 .cra_driver_name = "cbc-aes-talitos",
2736 .cra_blocksize = AES_BLOCK_SIZE,
2737 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2740 .min_keysize = AES_MIN_KEY_SIZE,
2741 .max_keysize = AES_MAX_KEY_SIZE,
2742 .ivsize = AES_BLOCK_SIZE,
2743 .setkey = ablkcipher_aes_setkey,
2746 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2747 DESC_HDR_SEL0_AESU |
2748 DESC_HDR_MODE0_AESU_CBC,
2750 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2752 .cra_name = "ctr(aes)",
2753 .cra_driver_name = "ctr-aes-talitos",
2755 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2758 .min_keysize = AES_MIN_KEY_SIZE,
2759 .max_keysize = AES_MAX_KEY_SIZE,
2760 .ivsize = AES_BLOCK_SIZE,
2761 .setkey = ablkcipher_aes_setkey,
2764 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2765 DESC_HDR_SEL0_AESU |
2766 DESC_HDR_MODE0_AESU_CTR,
2768 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2770 .cra_name = "ecb(des)",
2771 .cra_driver_name = "ecb-des-talitos",
2772 .cra_blocksize = DES_BLOCK_SIZE,
2773 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2776 .min_keysize = DES_KEY_SIZE,
2777 .max_keysize = DES_KEY_SIZE,
2778 .setkey = ablkcipher_des_setkey,
2781 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2784 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2786 .cra_name = "cbc(des)",
2787 .cra_driver_name = "cbc-des-talitos",
2788 .cra_blocksize = DES_BLOCK_SIZE,
2789 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2792 .min_keysize = DES_KEY_SIZE,
2793 .max_keysize = DES_KEY_SIZE,
2794 .ivsize = DES_BLOCK_SIZE,
2795 .setkey = ablkcipher_des_setkey,
2798 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2800 DESC_HDR_MODE0_DEU_CBC,
2802 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2804 .cra_name = "ecb(des3_ede)",
2805 .cra_driver_name = "ecb-3des-talitos",
2806 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2807 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2810 .min_keysize = DES3_EDE_KEY_SIZE,
2811 .max_keysize = DES3_EDE_KEY_SIZE,
2812 .setkey = ablkcipher_des3_setkey,
2815 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2817 DESC_HDR_MODE0_DEU_3DES,
2819 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2821 .cra_name = "cbc(des3_ede)",
2822 .cra_driver_name = "cbc-3des-talitos",
2823 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2824 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2827 .min_keysize = DES3_EDE_KEY_SIZE,
2828 .max_keysize = DES3_EDE_KEY_SIZE,
2829 .ivsize = DES3_EDE_BLOCK_SIZE,
2830 .setkey = ablkcipher_des3_setkey,
2833 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2835 DESC_HDR_MODE0_DEU_CBC |
2836 DESC_HDR_MODE0_DEU_3DES,
2838 /* AHASH algorithms. */
2839 { .type = CRYPTO_ALG_TYPE_AHASH,
2841 .halg.digestsize = MD5_DIGEST_SIZE,
2842 .halg.statesize = sizeof(struct talitos_export_state),
2845 .cra_driver_name = "md5-talitos",
2846 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2847 .cra_flags = CRYPTO_ALG_ASYNC,
2850 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2851 DESC_HDR_SEL0_MDEUA |
2852 DESC_HDR_MODE0_MDEU_MD5,
2854 { .type = CRYPTO_ALG_TYPE_AHASH,
2856 .halg.digestsize = SHA1_DIGEST_SIZE,
2857 .halg.statesize = sizeof(struct talitos_export_state),
2860 .cra_driver_name = "sha1-talitos",
2861 .cra_blocksize = SHA1_BLOCK_SIZE,
2862 .cra_flags = CRYPTO_ALG_ASYNC,
2865 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2866 DESC_HDR_SEL0_MDEUA |
2867 DESC_HDR_MODE0_MDEU_SHA1,
2869 { .type = CRYPTO_ALG_TYPE_AHASH,
2871 .halg.digestsize = SHA224_DIGEST_SIZE,
2872 .halg.statesize = sizeof(struct talitos_export_state),
2874 .cra_name = "sha224",
2875 .cra_driver_name = "sha224-talitos",
2876 .cra_blocksize = SHA224_BLOCK_SIZE,
2877 .cra_flags = CRYPTO_ALG_ASYNC,
2880 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2881 DESC_HDR_SEL0_MDEUA |
2882 DESC_HDR_MODE0_MDEU_SHA224,
2884 { .type = CRYPTO_ALG_TYPE_AHASH,
2886 .halg.digestsize = SHA256_DIGEST_SIZE,
2887 .halg.statesize = sizeof(struct talitos_export_state),
2889 .cra_name = "sha256",
2890 .cra_driver_name = "sha256-talitos",
2891 .cra_blocksize = SHA256_BLOCK_SIZE,
2892 .cra_flags = CRYPTO_ALG_ASYNC,
2895 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2896 DESC_HDR_SEL0_MDEUA |
2897 DESC_HDR_MODE0_MDEU_SHA256,
2899 { .type = CRYPTO_ALG_TYPE_AHASH,
2901 .halg.digestsize = SHA384_DIGEST_SIZE,
2902 .halg.statesize = sizeof(struct talitos_export_state),
2904 .cra_name = "sha384",
2905 .cra_driver_name = "sha384-talitos",
2906 .cra_blocksize = SHA384_BLOCK_SIZE,
2907 .cra_flags = CRYPTO_ALG_ASYNC,
2910 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2911 DESC_HDR_SEL0_MDEUB |
2912 DESC_HDR_MODE0_MDEUB_SHA384,
2914 { .type = CRYPTO_ALG_TYPE_AHASH,
2916 .halg.digestsize = SHA512_DIGEST_SIZE,
2917 .halg.statesize = sizeof(struct talitos_export_state),
2919 .cra_name = "sha512",
2920 .cra_driver_name = "sha512-talitos",
2921 .cra_blocksize = SHA512_BLOCK_SIZE,
2922 .cra_flags = CRYPTO_ALG_ASYNC,
2925 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2926 DESC_HDR_SEL0_MDEUB |
2927 DESC_HDR_MODE0_MDEUB_SHA512,
2929 { .type = CRYPTO_ALG_TYPE_AHASH,
2931 .halg.digestsize = MD5_DIGEST_SIZE,
2932 .halg.statesize = sizeof(struct talitos_export_state),
2934 .cra_name = "hmac(md5)",
2935 .cra_driver_name = "hmac-md5-talitos",
2936 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2937 .cra_flags = CRYPTO_ALG_ASYNC,
2940 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2941 DESC_HDR_SEL0_MDEUA |
2942 DESC_HDR_MODE0_MDEU_MD5,
2944 { .type = CRYPTO_ALG_TYPE_AHASH,
2946 .halg.digestsize = SHA1_DIGEST_SIZE,
2947 .halg.statesize = sizeof(struct talitos_export_state),
2949 .cra_name = "hmac(sha1)",
2950 .cra_driver_name = "hmac-sha1-talitos",
2951 .cra_blocksize = SHA1_BLOCK_SIZE,
2952 .cra_flags = CRYPTO_ALG_ASYNC,
2955 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2956 DESC_HDR_SEL0_MDEUA |
2957 DESC_HDR_MODE0_MDEU_SHA1,
2959 { .type = CRYPTO_ALG_TYPE_AHASH,
2961 .halg.digestsize = SHA224_DIGEST_SIZE,
2962 .halg.statesize = sizeof(struct talitos_export_state),
2964 .cra_name = "hmac(sha224)",
2965 .cra_driver_name = "hmac-sha224-talitos",
2966 .cra_blocksize = SHA224_BLOCK_SIZE,
2967 .cra_flags = CRYPTO_ALG_ASYNC,
2970 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2971 DESC_HDR_SEL0_MDEUA |
2972 DESC_HDR_MODE0_MDEU_SHA224,
2974 { .type = CRYPTO_ALG_TYPE_AHASH,
2976 .halg.digestsize = SHA256_DIGEST_SIZE,
2977 .halg.statesize = sizeof(struct talitos_export_state),
2979 .cra_name = "hmac(sha256)",
2980 .cra_driver_name = "hmac-sha256-talitos",
2981 .cra_blocksize = SHA256_BLOCK_SIZE,
2982 .cra_flags = CRYPTO_ALG_ASYNC,
2985 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2986 DESC_HDR_SEL0_MDEUA |
2987 DESC_HDR_MODE0_MDEU_SHA256,
2989 { .type = CRYPTO_ALG_TYPE_AHASH,
2991 .halg.digestsize = SHA384_DIGEST_SIZE,
2992 .halg.statesize = sizeof(struct talitos_export_state),
2994 .cra_name = "hmac(sha384)",
2995 .cra_driver_name = "hmac-sha384-talitos",
2996 .cra_blocksize = SHA384_BLOCK_SIZE,
2997 .cra_flags = CRYPTO_ALG_ASYNC,
3000 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3001 DESC_HDR_SEL0_MDEUB |
3002 DESC_HDR_MODE0_MDEUB_SHA384,
3004 { .type = CRYPTO_ALG_TYPE_AHASH,
3006 .halg.digestsize = SHA512_DIGEST_SIZE,
3007 .halg.statesize = sizeof(struct talitos_export_state),
3009 .cra_name = "hmac(sha512)",
3010 .cra_driver_name = "hmac-sha512-talitos",
3011 .cra_blocksize = SHA512_BLOCK_SIZE,
3012 .cra_flags = CRYPTO_ALG_ASYNC,
3015 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3016 DESC_HDR_SEL0_MDEUB |
3017 DESC_HDR_MODE0_MDEUB_SHA512,
3021 struct talitos_crypto_alg {
3022 struct list_head entry;
3024 struct talitos_alg_template algt;
3027 static int talitos_init_common(struct talitos_ctx *ctx,
3028 struct talitos_crypto_alg *talitos_alg)
3030 struct talitos_private *priv;
3032 /* update context with ptr to dev */
3033 ctx->dev = talitos_alg->dev;
3035 /* assign SEC channel to tfm in round-robin fashion */
3036 priv = dev_get_drvdata(ctx->dev);
3037 ctx->ch = atomic_inc_return(&priv->last_chan) &
3038 (priv->num_channels - 1);
3040 /* copy descriptor header template value */
3041 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3043 /* select done notification */
3044 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3049 static int talitos_cra_init(struct crypto_tfm *tfm)
3051 struct crypto_alg *alg = tfm->__crt_alg;
3052 struct talitos_crypto_alg *talitos_alg;
3053 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3055 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3056 talitos_alg = container_of(__crypto_ahash_alg(alg),
3057 struct talitos_crypto_alg,
3060 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3063 return talitos_init_common(ctx, talitos_alg);
3066 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3068 struct aead_alg *alg = crypto_aead_alg(tfm);
3069 struct talitos_crypto_alg *talitos_alg;
3070 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3072 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3075 return talitos_init_common(ctx, talitos_alg);
3078 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3080 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3082 talitos_cra_init(tfm);
3085 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3086 sizeof(struct talitos_ahash_req_ctx));
3091 static void talitos_cra_exit(struct crypto_tfm *tfm)
3093 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3094 struct device *dev = ctx->dev;
3097 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3101 * given the alg's descriptor header template, determine whether descriptor
3102 * type and primary/secondary execution units required match the hw
3103 * capabilities description provided in the device tree node.
3105 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3107 struct talitos_private *priv = dev_get_drvdata(dev);
3110 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3111 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3113 if (SECONDARY_EU(desc_hdr_template))
3114 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3115 & priv->exec_units);
3120 static int talitos_remove(struct platform_device *ofdev)
3122 struct device *dev = &ofdev->dev;
3123 struct talitos_private *priv = dev_get_drvdata(dev);
3124 struct talitos_crypto_alg *t_alg, *n;
3127 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3128 switch (t_alg->algt.type) {
3129 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3131 case CRYPTO_ALG_TYPE_AEAD:
3132 crypto_unregister_aead(&t_alg->algt.alg.aead);
3133 case CRYPTO_ALG_TYPE_AHASH:
3134 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3137 list_del(&t_alg->entry);
3140 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3141 talitos_unregister_rng(dev);
3143 for (i = 0; i < 2; i++)
3145 free_irq(priv->irq[i], dev);
3146 irq_dispose_mapping(priv->irq[i]);
3149 tasklet_kill(&priv->done_task[0]);
3151 tasklet_kill(&priv->done_task[1]);
3156 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3157 struct talitos_alg_template
3160 struct talitos_private *priv = dev_get_drvdata(dev);
3161 struct talitos_crypto_alg *t_alg;
3162 struct crypto_alg *alg;
3164 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3167 return ERR_PTR(-ENOMEM);
3169 t_alg->algt = *template;
3171 switch (t_alg->algt.type) {
3172 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3173 alg = &t_alg->algt.alg.crypto;
3174 alg->cra_init = talitos_cra_init;
3175 alg->cra_exit = talitos_cra_exit;
3176 alg->cra_type = &crypto_ablkcipher_type;
3177 alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3179 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3180 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3182 case CRYPTO_ALG_TYPE_AEAD:
3183 alg = &t_alg->algt.alg.aead.base;
3184 alg->cra_exit = talitos_cra_exit;
3185 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3186 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3188 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3189 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3190 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3191 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3192 devm_kfree(dev, t_alg);
3193 return ERR_PTR(-ENOTSUPP);
3196 case CRYPTO_ALG_TYPE_AHASH:
3197 alg = &t_alg->algt.alg.hash.halg.base;
3198 alg->cra_init = talitos_cra_init_ahash;
3199 alg->cra_exit = talitos_cra_exit;
3200 t_alg->algt.alg.hash.init = ahash_init;
3201 t_alg->algt.alg.hash.update = ahash_update;
3202 t_alg->algt.alg.hash.final = ahash_final;
3203 t_alg->algt.alg.hash.finup = ahash_finup;
3204 t_alg->algt.alg.hash.digest = ahash_digest;
3205 if (!strncmp(alg->cra_name, "hmac", 4))
3206 t_alg->algt.alg.hash.setkey = ahash_setkey;
3207 t_alg->algt.alg.hash.import = ahash_import;
3208 t_alg->algt.alg.hash.export = ahash_export;
3210 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3211 !strncmp(alg->cra_name, "hmac", 4)) {
3212 devm_kfree(dev, t_alg);
3213 return ERR_PTR(-ENOTSUPP);
3215 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3216 (!strcmp(alg->cra_name, "sha224") ||
3217 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3218 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3219 t_alg->algt.desc_hdr_template =
3220 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3221 DESC_HDR_SEL0_MDEUA |
3222 DESC_HDR_MODE0_MDEU_SHA256;
3226 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3227 devm_kfree(dev, t_alg);
3228 return ERR_PTR(-EINVAL);
3231 alg->cra_module = THIS_MODULE;
3232 if (t_alg->algt.priority)
3233 alg->cra_priority = t_alg->algt.priority;
3235 alg->cra_priority = TALITOS_CRA_PRIORITY;
3236 if (has_ftr_sec1(priv))
3237 alg->cra_alignmask = 3;
3239 alg->cra_alignmask = 0;
3240 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3241 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3248 static int talitos_probe_irq(struct platform_device *ofdev)
3250 struct device *dev = &ofdev->dev;
3251 struct device_node *np = ofdev->dev.of_node;
3252 struct talitos_private *priv = dev_get_drvdata(dev);
3254 bool is_sec1 = has_ftr_sec1(priv);
3256 priv->irq[0] = irq_of_parse_and_map(np, 0);
3257 if (!priv->irq[0]) {
3258 dev_err(dev, "failed to map irq\n");
3262 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3263 dev_driver_string(dev), dev);
3267 priv->irq[1] = irq_of_parse_and_map(np, 1);
3269 /* get the primary irq line */
3270 if (!priv->irq[1]) {
3271 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3272 dev_driver_string(dev), dev);
3276 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3277 dev_driver_string(dev), dev);
3281 /* get the secondary irq line */
3282 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3283 dev_driver_string(dev), dev);
3285 dev_err(dev, "failed to request secondary irq\n");
3286 irq_dispose_mapping(priv->irq[1]);
3294 dev_err(dev, "failed to request primary irq\n");
3295 irq_dispose_mapping(priv->irq[0]);
3302 static int talitos_probe(struct platform_device *ofdev)
3304 struct device *dev = &ofdev->dev;
3305 struct device_node *np = ofdev->dev.of_node;
3306 struct talitos_private *priv;
3309 struct resource *res;
3311 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3315 INIT_LIST_HEAD(&priv->alg_list);
3317 dev_set_drvdata(dev, priv);
3319 priv->ofdev = ofdev;
3321 spin_lock_init(&priv->reg_lock);
3323 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3326 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3328 dev_err(dev, "failed to of_iomap\n");
3333 /* get SEC version capabilities from device tree */
3334 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3335 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3336 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3337 of_property_read_u32(np, "fsl,descriptor-types-mask",
3340 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3341 !priv->exec_units || !priv->desc_types) {
3342 dev_err(dev, "invalid property data in device tree node\n");
3347 if (of_device_is_compatible(np, "fsl,sec3.0"))
3348 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3350 if (of_device_is_compatible(np, "fsl,sec2.1"))
3351 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3352 TALITOS_FTR_SHA224_HWINIT |
3353 TALITOS_FTR_HMAC_OK;
3355 if (of_device_is_compatible(np, "fsl,sec1.0"))
3356 priv->features |= TALITOS_FTR_SEC1;
3358 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3359 priv->reg_deu = priv->reg + TALITOS12_DEU;
3360 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3361 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3362 stride = TALITOS1_CH_STRIDE;
3363 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3364 priv->reg_deu = priv->reg + TALITOS10_DEU;
3365 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3366 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3367 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3368 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3369 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3370 stride = TALITOS1_CH_STRIDE;
3372 priv->reg_deu = priv->reg + TALITOS2_DEU;
3373 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3374 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3375 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3376 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3377 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3378 priv->reg_keu = priv->reg + TALITOS2_KEU;
3379 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3380 stride = TALITOS2_CH_STRIDE;
3383 err = talitos_probe_irq(ofdev);
3387 if (has_ftr_sec1(priv)) {
3388 if (priv->num_channels == 1)
3389 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3390 (unsigned long)dev);
3392 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3393 (unsigned long)dev);
3396 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3397 (unsigned long)dev);
3398 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3399 (unsigned long)dev);
3400 } else if (priv->num_channels == 1) {
3401 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3402 (unsigned long)dev);
3404 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3405 (unsigned long)dev);
3409 priv->chan = devm_kcalloc(dev,
3411 sizeof(struct talitos_channel),
3414 dev_err(dev, "failed to allocate channel management space\n");
3419 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3421 for (i = 0; i < priv->num_channels; i++) {
3422 priv->chan[i].reg = priv->reg + stride * (i + 1);
3423 if (!priv->irq[1] || !(i & 1))
3424 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3426 spin_lock_init(&priv->chan[i].head_lock);
3427 spin_lock_init(&priv->chan[i].tail_lock);
3429 priv->chan[i].fifo = devm_kcalloc(dev,
3431 sizeof(struct talitos_request),
3433 if (!priv->chan[i].fifo) {
3434 dev_err(dev, "failed to allocate request fifo %d\n", i);
3439 atomic_set(&priv->chan[i].submit_count,
3440 -(priv->chfifo_len - 1));
3443 dma_set_mask(dev, DMA_BIT_MASK(36));
3445 /* reset and initialize the h/w */
3446 err = init_device(dev);
3448 dev_err(dev, "failed to initialize device\n");
3452 /* register the RNG, if available */
3453 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3454 err = talitos_register_rng(dev);
3456 dev_err(dev, "failed to register hwrng: %d\n", err);
3459 dev_info(dev, "hwrng\n");
3462 /* register crypto algorithms the device supports */
3463 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3464 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3465 struct talitos_crypto_alg *t_alg;
3466 struct crypto_alg *alg = NULL;
3468 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3469 if (IS_ERR(t_alg)) {
3470 err = PTR_ERR(t_alg);
3471 if (err == -ENOTSUPP)
3476 switch (t_alg->algt.type) {
3477 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3478 err = crypto_register_alg(
3479 &t_alg->algt.alg.crypto);
3480 alg = &t_alg->algt.alg.crypto;
3483 case CRYPTO_ALG_TYPE_AEAD:
3484 err = crypto_register_aead(
3485 &t_alg->algt.alg.aead);
3486 alg = &t_alg->algt.alg.aead.base;
3489 case CRYPTO_ALG_TYPE_AHASH:
3490 err = crypto_register_ahash(
3491 &t_alg->algt.alg.hash);
3492 alg = &t_alg->algt.alg.hash.halg.base;
3496 dev_err(dev, "%s alg registration failed\n",
3497 alg->cra_driver_name);
3498 devm_kfree(dev, t_alg);
3500 list_add_tail(&t_alg->entry, &priv->alg_list);
3503 if (!list_empty(&priv->alg_list))
3504 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3505 (char *)of_get_property(np, "compatible", NULL));
3510 talitos_remove(ofdev);
3515 static const struct of_device_id talitos_match[] = {
3516 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3518 .compatible = "fsl,sec1.0",
3521 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3523 .compatible = "fsl,sec2.0",
3528 MODULE_DEVICE_TABLE(of, talitos_match);
3530 static struct platform_driver talitos_driver = {
3533 .of_match_table = talitos_match,
3535 .probe = talitos_probe,
3536 .remove = talitos_remove,
3539 module_platform_driver(talitos_driver);
3541 MODULE_LICENSE("GPL");
3542 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3543 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");