1 // SPDX-License-Identifier: GPL-2.0+
3 * talitos - Freescale Integrated Security Engine (SEC) device driver
5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
7 * Scatterlist Crypto API glue code copied from files with the following:
8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
10 * Crypto algorithm registration code copied from hifn driver:
11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12 * All rights reserved.
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/crypto.h>
21 #include <linux/hw_random.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/dma-mapping.h>
27 #include <linux/spinlock.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/slab.h>
31 #include <crypto/algapi.h>
32 #include <crypto/aes.h>
33 #include <crypto/des.h>
34 #include <crypto/sha.h>
35 #include <crypto/md5.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/authenc.h>
38 #include <crypto/skcipher.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41 #include <crypto/scatterwalk.h>
45 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
46 unsigned int len, bool is_sec1)
48 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
50 ptr->len1 = cpu_to_be16(len);
52 ptr->len = cpu_to_be16(len);
53 ptr->eptr = upper_32_bits(dma_addr);
57 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
58 struct talitos_ptr *src_ptr, bool is_sec1)
60 dst_ptr->ptr = src_ptr->ptr;
62 dst_ptr->len1 = src_ptr->len1;
64 dst_ptr->len = src_ptr->len;
65 dst_ptr->eptr = src_ptr->eptr;
69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
73 return be16_to_cpu(ptr->len1);
75 return be16_to_cpu(ptr->len);
78 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
85 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
92 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 static void __map_single_talitos_ptr(struct device *dev,
95 struct talitos_ptr *ptr,
96 unsigned int len, void *data,
97 enum dma_data_direction dir,
100 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
104 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
107 static void map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir)
112 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
115 static void map_single_talitos_ptr_nosync(struct device *dev,
116 struct talitos_ptr *ptr,
117 unsigned int len, void *data,
118 enum dma_data_direction dir)
120 __map_single_talitos_ptr(dev, ptr, len, data, dir,
121 DMA_ATTR_SKIP_CPU_SYNC);
125 * unmap bus single (contiguous) h/w descriptor pointer
127 static void unmap_single_talitos_ptr(struct device *dev,
128 struct talitos_ptr *ptr,
129 enum dma_data_direction dir)
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 from_talitos_ptr_len(ptr, is_sec1), dir);
138 static int reset_channel(struct device *dev, int ch)
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
142 bool is_sec1 = has_ftr_sec1(priv);
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
161 dev_err(dev, "failed to reset channel %d\n", ch);
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168 /* enable chaining descriptors */
170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
173 /* and ICCR writeback, if available */
174 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
175 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
176 TALITOS_CCCR_LO_IWSE);
181 static int reset_device(struct device *dev)
183 struct talitos_private *priv = dev_get_drvdata(dev);
184 unsigned int timeout = TALITOS_TIMEOUT;
185 bool is_sec1 = has_ftr_sec1(priv);
186 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
188 setbits32(priv->reg + TALITOS_MCR, mcr);
190 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
195 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
196 setbits32(priv->reg + TALITOS_MCR, mcr);
200 dev_err(dev, "failed to reset device\n");
208 * Reset and initialize the device
210 static int init_device(struct device *dev)
212 struct talitos_private *priv = dev_get_drvdata(dev);
214 bool is_sec1 = has_ftr_sec1(priv);
218 * errata documentation: warning: certain SEC interrupts
219 * are not fully cleared by writing the MCR:SWR bit,
220 * set bit twice to completely reset
222 err = reset_device(dev);
226 err = reset_device(dev);
231 for (ch = 0; ch < priv->num_channels; ch++) {
232 err = reset_channel(dev, ch);
237 /* enable channel done and error interrupts */
239 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
240 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
241 /* disable parity error check in DEU (erroneous? test vect.) */
242 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
244 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
245 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
248 /* disable integrity check error interrupts (use writeback instead) */
249 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
250 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
251 TALITOS_MDEUICR_LO_ICE);
257 * talitos_submit - submits a descriptor to the device for processing
258 * @dev: the SEC device to be used
259 * @ch: the SEC device channel to be used
260 * @desc: the descriptor to be processed by the device
261 * @callback: whom to call when processing is complete
262 * @context: a handle for use by caller (optional)
264 * desc must contain valid dma-mapped (bus physical) address pointers.
265 * callback must check err and feedback in descriptor header
266 * for device processing status.
268 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269 void (*callback)(struct device *dev,
270 struct talitos_desc *desc,
271 void *context, int error),
274 struct talitos_private *priv = dev_get_drvdata(dev);
275 struct talitos_request *request;
278 bool is_sec1 = has_ftr_sec1(priv);
280 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
282 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
283 /* h/w fifo is full */
284 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
288 head = priv->chan[ch].head;
289 request = &priv->chan[ch].fifo[head];
291 /* map descriptor and save caller data */
293 desc->hdr1 = desc->hdr;
294 request->dma_desc = dma_map_single(dev, &desc->hdr1,
298 request->dma_desc = dma_map_single(dev, desc,
302 request->callback = callback;
303 request->context = context;
305 /* increment fifo head */
306 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
309 request->desc = desc;
313 out_be32(priv->chan[ch].reg + TALITOS_FF,
314 upper_32_bits(request->dma_desc));
315 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
316 lower_32_bits(request->dma_desc));
318 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
324 * process what was done, notify callback of error if not
326 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
328 struct talitos_private *priv = dev_get_drvdata(dev);
329 struct talitos_request *request, saved_req;
332 bool is_sec1 = has_ftr_sec1(priv);
334 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
336 tail = priv->chan[ch].tail;
337 while (priv->chan[ch].fifo[tail].desc) {
340 request = &priv->chan[ch].fifo[tail];
342 /* descriptors with their done bits set don't get the error */
345 hdr = request->desc->hdr;
346 else if (request->desc->next_desc)
347 hdr = (request->desc + 1)->hdr1;
349 hdr = request->desc->hdr1;
351 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
359 dma_unmap_single(dev, request->dma_desc,
363 /* copy entries so we can call callback outside lock */
364 saved_req.desc = request->desc;
365 saved_req.callback = request->callback;
366 saved_req.context = request->context;
368 /* release request entry in fifo */
370 request->desc = NULL;
372 /* increment fifo tail */
373 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
375 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
377 atomic_dec(&priv->chan[ch].submit_count);
379 saved_req.callback(dev, saved_req.desc, saved_req.context,
381 /* channel may resume processing in single desc error case */
382 if (error && !reset_ch && status == error)
384 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
385 tail = priv->chan[ch].tail;
388 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
392 * process completed requests for channels that have done status
394 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
395 static void talitos1_done_##name(unsigned long data) \
397 struct device *dev = (struct device *)data; \
398 struct talitos_private *priv = dev_get_drvdata(dev); \
399 unsigned long flags; \
401 if (ch_done_mask & 0x10000000) \
402 flush_channel(dev, 0, 0, 0); \
403 if (ch_done_mask & 0x40000000) \
404 flush_channel(dev, 1, 0, 0); \
405 if (ch_done_mask & 0x00010000) \
406 flush_channel(dev, 2, 0, 0); \
407 if (ch_done_mask & 0x00040000) \
408 flush_channel(dev, 3, 0, 0); \
410 /* At this point, all completed channels have been processed */ \
411 /* Unmask done interrupts for channels completed later on. */ \
412 spin_lock_irqsave(&priv->reg_lock, flags); \
413 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
414 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
415 spin_unlock_irqrestore(&priv->reg_lock, flags); \
418 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
419 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
421 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
422 static void talitos2_done_##name(unsigned long data) \
424 struct device *dev = (struct device *)data; \
425 struct talitos_private *priv = dev_get_drvdata(dev); \
426 unsigned long flags; \
428 if (ch_done_mask & 1) \
429 flush_channel(dev, 0, 0, 0); \
430 if (ch_done_mask & (1 << 2)) \
431 flush_channel(dev, 1, 0, 0); \
432 if (ch_done_mask & (1 << 4)) \
433 flush_channel(dev, 2, 0, 0); \
434 if (ch_done_mask & (1 << 6)) \
435 flush_channel(dev, 3, 0, 0); \
437 /* At this point, all completed channels have been processed */ \
438 /* Unmask done interrupts for channels completed later on. */ \
439 spin_lock_irqsave(&priv->reg_lock, flags); \
440 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
441 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
442 spin_unlock_irqrestore(&priv->reg_lock, flags); \
445 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
446 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
447 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
448 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
451 * locate current (offending) descriptor
453 static u32 current_desc_hdr(struct device *dev, int ch)
455 struct talitos_private *priv = dev_get_drvdata(dev);
459 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
460 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
463 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
467 tail = priv->chan[ch].tail;
470 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
471 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
472 iter = (iter + 1) & (priv->fifo_len - 1);
474 dev_err(dev, "couldn't locate current descriptor\n");
479 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
480 return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
482 return priv->chan[ch].fifo[iter].desc->hdr;
486 * user diagnostics; report root cause of error based on execution unit status
488 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
490 struct talitos_private *priv = dev_get_drvdata(dev);
494 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
496 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
497 case DESC_HDR_SEL0_AFEU:
498 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
499 in_be32(priv->reg_afeu + TALITOS_EUISR),
500 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
502 case DESC_HDR_SEL0_DEU:
503 dev_err(dev, "DEUISR 0x%08x_%08x\n",
504 in_be32(priv->reg_deu + TALITOS_EUISR),
505 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
507 case DESC_HDR_SEL0_MDEUA:
508 case DESC_HDR_SEL0_MDEUB:
509 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
510 in_be32(priv->reg_mdeu + TALITOS_EUISR),
511 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
513 case DESC_HDR_SEL0_RNG:
514 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
515 in_be32(priv->reg_rngu + TALITOS_ISR),
516 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
518 case DESC_HDR_SEL0_PKEU:
519 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
520 in_be32(priv->reg_pkeu + TALITOS_EUISR),
521 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
523 case DESC_HDR_SEL0_AESU:
524 dev_err(dev, "AESUISR 0x%08x_%08x\n",
525 in_be32(priv->reg_aesu + TALITOS_EUISR),
526 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
528 case DESC_HDR_SEL0_CRCU:
529 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
530 in_be32(priv->reg_crcu + TALITOS_EUISR),
531 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
533 case DESC_HDR_SEL0_KEU:
534 dev_err(dev, "KEUISR 0x%08x_%08x\n",
535 in_be32(priv->reg_pkeu + TALITOS_EUISR),
536 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
540 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
541 case DESC_HDR_SEL1_MDEUA:
542 case DESC_HDR_SEL1_MDEUB:
543 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
544 in_be32(priv->reg_mdeu + TALITOS_EUISR),
545 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
547 case DESC_HDR_SEL1_CRCU:
548 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
549 in_be32(priv->reg_crcu + TALITOS_EUISR),
550 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
554 for (i = 0; i < 8; i++)
555 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
556 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
557 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
561 * recover from error interrupts
563 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
565 struct talitos_private *priv = dev_get_drvdata(dev);
566 unsigned int timeout = TALITOS_TIMEOUT;
567 int ch, error, reset_dev = 0;
569 bool is_sec1 = has_ftr_sec1(priv);
570 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
572 for (ch = 0; ch < priv->num_channels; ch++) {
573 /* skip channels without errors */
575 /* bits 29, 31, 17, 19 */
576 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
579 if (!(isr & (1 << (ch * 2 + 1))))
585 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
587 if (v_lo & TALITOS_CCPSR_LO_DOF) {
588 dev_err(dev, "double fetch fifo overflow error\n");
592 if (v_lo & TALITOS_CCPSR_LO_SOF) {
593 /* h/w dropped descriptor */
594 dev_err(dev, "single fetch fifo overflow error\n");
597 if (v_lo & TALITOS_CCPSR_LO_MDTE)
598 dev_err(dev, "master data transfer error\n");
599 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
600 dev_err(dev, is_sec1 ? "pointer not complete error\n"
601 : "s/g data length zero error\n");
602 if (v_lo & TALITOS_CCPSR_LO_FPZ)
603 dev_err(dev, is_sec1 ? "parity error\n"
604 : "fetch pointer zero error\n");
605 if (v_lo & TALITOS_CCPSR_LO_IDH)
606 dev_err(dev, "illegal descriptor header error\n");
607 if (v_lo & TALITOS_CCPSR_LO_IEU)
608 dev_err(dev, is_sec1 ? "static assignment error\n"
609 : "invalid exec unit error\n");
610 if (v_lo & TALITOS_CCPSR_LO_EU)
611 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
613 if (v_lo & TALITOS_CCPSR_LO_GB)
614 dev_err(dev, "gather boundary error\n");
615 if (v_lo & TALITOS_CCPSR_LO_GRL)
616 dev_err(dev, "gather return/length error\n");
617 if (v_lo & TALITOS_CCPSR_LO_SB)
618 dev_err(dev, "scatter boundary error\n");
619 if (v_lo & TALITOS_CCPSR_LO_SRL)
620 dev_err(dev, "scatter return/length error\n");
623 flush_channel(dev, ch, error, reset_ch);
626 reset_channel(dev, ch);
628 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
630 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
631 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
632 TALITOS2_CCCR_CONT) && --timeout)
635 dev_err(dev, "failed to restart channel %d\n",
641 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
642 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
643 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
644 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
647 dev_err(dev, "done overflow, internal time out, or "
648 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
650 /* purge request queues */
651 for (ch = 0; ch < priv->num_channels; ch++)
652 flush_channel(dev, ch, -EIO, 1);
654 /* reset and reinitialize the device */
659 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
660 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
662 struct device *dev = data; \
663 struct talitos_private *priv = dev_get_drvdata(dev); \
665 unsigned long flags; \
667 spin_lock_irqsave(&priv->reg_lock, flags); \
668 isr = in_be32(priv->reg + TALITOS_ISR); \
669 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
670 /* Acknowledge interrupt */ \
671 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
672 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
674 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
675 spin_unlock_irqrestore(&priv->reg_lock, flags); \
676 talitos_error(dev, isr & ch_err_mask, isr_lo); \
679 if (likely(isr & ch_done_mask)) { \
680 /* mask further done interrupts. */ \
681 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
682 /* done_task will unmask done interrupts at exit */ \
683 tasklet_schedule(&priv->done_task[tlet]); \
685 spin_unlock_irqrestore(&priv->reg_lock, flags); \
688 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
692 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
694 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
695 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
697 struct device *dev = data; \
698 struct talitos_private *priv = dev_get_drvdata(dev); \
700 unsigned long flags; \
702 spin_lock_irqsave(&priv->reg_lock, flags); \
703 isr = in_be32(priv->reg + TALITOS_ISR); \
704 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
705 /* Acknowledge interrupt */ \
706 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
707 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
709 if (unlikely(isr & ch_err_mask || isr_lo)) { \
710 spin_unlock_irqrestore(&priv->reg_lock, flags); \
711 talitos_error(dev, isr & ch_err_mask, isr_lo); \
714 if (likely(isr & ch_done_mask)) { \
715 /* mask further done interrupts. */ \
716 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
717 /* done_task will unmask done interrupts at exit */ \
718 tasklet_schedule(&priv->done_task[tlet]); \
720 spin_unlock_irqrestore(&priv->reg_lock, flags); \
723 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
727 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
728 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
730 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
736 static int talitos_rng_data_present(struct hwrng *rng, int wait)
738 struct device *dev = (struct device *)rng->priv;
739 struct talitos_private *priv = dev_get_drvdata(dev);
743 for (i = 0; i < 20; i++) {
744 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
745 TALITOS_RNGUSR_LO_OFL;
754 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
756 struct device *dev = (struct device *)rng->priv;
757 struct talitos_private *priv = dev_get_drvdata(dev);
759 /* rng fifo requires 64-bit accesses */
760 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
761 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
766 static int talitos_rng_init(struct hwrng *rng)
768 struct device *dev = (struct device *)rng->priv;
769 struct talitos_private *priv = dev_get_drvdata(dev);
770 unsigned int timeout = TALITOS_TIMEOUT;
772 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
773 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
774 & TALITOS_RNGUSR_LO_RD)
778 dev_err(dev, "failed to reset rng hw\n");
782 /* start generating */
783 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
788 static int talitos_register_rng(struct device *dev)
790 struct talitos_private *priv = dev_get_drvdata(dev);
793 priv->rng.name = dev_driver_string(dev),
794 priv->rng.init = talitos_rng_init,
795 priv->rng.data_present = talitos_rng_data_present,
796 priv->rng.data_read = talitos_rng_data_read,
797 priv->rng.priv = (unsigned long)dev;
799 err = hwrng_register(&priv->rng);
801 priv->rng_registered = true;
806 static void talitos_unregister_rng(struct device *dev)
808 struct talitos_private *priv = dev_get_drvdata(dev);
810 if (!priv->rng_registered)
813 hwrng_unregister(&priv->rng);
814 priv->rng_registered = false;
820 #define TALITOS_CRA_PRIORITY 3000
822 * Defines a priority for doing AEAD with descriptors type
823 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
825 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
826 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
827 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
829 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
831 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
836 __be32 desc_hdr_template;
837 u8 key[TALITOS_MAX_KEY_SIZE];
838 u8 iv[TALITOS_MAX_IV_LENGTH];
841 unsigned int enckeylen;
842 unsigned int authkeylen;
845 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
846 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
848 struct talitos_ahash_req_ctx {
849 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
850 unsigned int hw_context_size;
851 u8 buf[2][HASH_MAX_BLOCK_SIZE];
856 unsigned int to_hash_later;
858 struct scatterlist bufsl[2];
859 struct scatterlist *psrc;
862 struct talitos_export_state {
863 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
864 u8 buf[HASH_MAX_BLOCK_SIZE];
868 unsigned int to_hash_later;
872 static int aead_setkey(struct crypto_aead *authenc,
873 const u8 *key, unsigned int keylen)
875 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
876 struct device *dev = ctx->dev;
877 struct crypto_authenc_keys keys;
879 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
882 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
886 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
888 memcpy(ctx->key, keys.authkey, keys.authkeylen);
889 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
891 ctx->keylen = keys.authkeylen + keys.enckeylen;
892 ctx->enckeylen = keys.enckeylen;
893 ctx->authkeylen = keys.authkeylen;
894 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
897 memzero_explicit(&keys, sizeof(keys));
901 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
902 memzero_explicit(&keys, sizeof(keys));
906 static int aead_des3_setkey(struct crypto_aead *authenc,
907 const u8 *key, unsigned int keylen)
909 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
910 struct device *dev = ctx->dev;
911 struct crypto_authenc_keys keys;
915 err = crypto_authenc_extractkeys(&keys, key, keylen);
920 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
923 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
926 flags = crypto_aead_get_flags(authenc);
927 err = __des3_verify_key(&flags, keys.enckey);
929 crypto_aead_set_flags(authenc, flags);
934 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
936 memcpy(ctx->key, keys.authkey, keys.authkeylen);
937 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
939 ctx->keylen = keys.authkeylen + keys.enckeylen;
940 ctx->enckeylen = keys.enckeylen;
941 ctx->authkeylen = keys.authkeylen;
942 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
946 memzero_explicit(&keys, sizeof(keys));
950 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
955 * talitos_edesc - s/w-extended descriptor
956 * @src_nents: number of segments in input scatterlist
957 * @dst_nents: number of segments in output scatterlist
958 * @icv_ool: whether ICV is out-of-line
959 * @iv_dma: dma address of iv for checking continuity and link table
960 * @dma_len: length of dma mapped link_tbl space
961 * @dma_link_tbl: bus physical address of link_tbl/buf
962 * @desc: h/w descriptor
963 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
964 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
966 * if decrypting (with authcheck), or either one of src_nents or dst_nents
967 * is greater than 1, an integrity check value is concatenated to the end
970 struct talitos_edesc {
976 dma_addr_t dma_link_tbl;
977 struct talitos_desc desc;
979 struct talitos_ptr link_tbl[0];
984 static void talitos_sg_unmap(struct device *dev,
985 struct talitos_edesc *edesc,
986 struct scatterlist *src,
987 struct scatterlist *dst,
988 unsigned int len, unsigned int offset)
990 struct talitos_private *priv = dev_get_drvdata(dev);
991 bool is_sec1 = has_ftr_sec1(priv);
992 unsigned int src_nents = edesc->src_nents ? : 1;
993 unsigned int dst_nents = edesc->dst_nents ? : 1;
995 if (is_sec1 && dst && dst_nents > 1) {
996 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
997 len, DMA_FROM_DEVICE);
998 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
1002 if (src_nents == 1 || !is_sec1)
1003 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
1005 if (dst && (dst_nents == 1 || !is_sec1))
1006 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
1007 } else if (src_nents == 1 || !is_sec1) {
1008 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
1012 static void ipsec_esp_unmap(struct device *dev,
1013 struct talitos_edesc *edesc,
1014 struct aead_request *areq, bool encrypt)
1016 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1017 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1018 unsigned int ivsize = crypto_aead_ivsize(aead);
1019 unsigned int authsize = crypto_aead_authsize(aead);
1020 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1021 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1022 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1025 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1027 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1029 talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1030 cryptlen + authsize, areq->assoclen);
1033 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1036 if (!is_ipsec_esp) {
1037 unsigned int dst_nents = edesc->dst_nents ? : 1;
1039 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1040 areq->assoclen + cryptlen - ivsize);
1045 * ipsec_esp descriptor callbacks
1047 static void ipsec_esp_encrypt_done(struct device *dev,
1048 struct talitos_desc *desc, void *context,
1051 struct aead_request *areq = context;
1052 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1053 unsigned int ivsize = crypto_aead_ivsize(authenc);
1054 struct talitos_edesc *edesc;
1056 edesc = container_of(desc, struct talitos_edesc, desc);
1058 ipsec_esp_unmap(dev, edesc, areq, true);
1060 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1064 aead_request_complete(areq, err);
1067 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1068 struct talitos_desc *desc,
1069 void *context, int err)
1071 struct aead_request *req = context;
1072 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1073 unsigned int authsize = crypto_aead_authsize(authenc);
1074 struct talitos_edesc *edesc;
1077 edesc = container_of(desc, struct talitos_edesc, desc);
1079 ipsec_esp_unmap(dev, edesc, req, false);
1083 oicv = edesc->buf + edesc->dma_len;
1084 icv = oicv - authsize;
1086 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1091 aead_request_complete(req, err);
1094 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1095 struct talitos_desc *desc,
1096 void *context, int err)
1098 struct aead_request *req = context;
1099 struct talitos_edesc *edesc;
1101 edesc = container_of(desc, struct talitos_edesc, desc);
1103 ipsec_esp_unmap(dev, edesc, req, false);
1105 /* check ICV auth status */
1106 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1107 DESC_HDR_LO_ICCR1_PASS))
1112 aead_request_complete(req, err);
1116 * convert scatterlist to SEC h/w link table format
1117 * stop at cryptlen bytes
1119 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1120 unsigned int offset, int datalen, int elen,
1121 struct talitos_ptr *link_tbl_ptr)
1123 int n_sg = elen ? sg_count + 1 : sg_count;
1125 int cryptlen = datalen + elen;
1127 while (cryptlen && sg && n_sg--) {
1128 unsigned int len = sg_dma_len(sg);
1130 if (offset >= len) {
1140 if (datalen > 0 && len > datalen) {
1141 to_talitos_ptr(link_tbl_ptr + count,
1142 sg_dma_address(sg) + offset, datalen, 0);
1143 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1148 to_talitos_ptr(link_tbl_ptr + count,
1149 sg_dma_address(sg) + offset, len, 0);
1150 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1160 /* tag end of link table */
1162 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1163 DESC_PTR_LNKTBL_RET, 0);
1168 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1169 unsigned int len, struct talitos_edesc *edesc,
1170 struct talitos_ptr *ptr, int sg_count,
1171 unsigned int offset, int tbl_off, int elen,
1174 struct talitos_private *priv = dev_get_drvdata(dev);
1175 bool is_sec1 = has_ftr_sec1(priv);
1178 to_talitos_ptr(ptr, 0, 0, is_sec1);
1181 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1182 if (sg_count == 1 && !force) {
1183 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1187 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1190 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1191 &edesc->link_tbl[tbl_off]);
1192 if (sg_count == 1 && !force) {
1193 /* Only one segment now, so no link tbl needed*/
1194 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1197 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1198 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1199 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1204 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1205 unsigned int len, struct talitos_edesc *edesc,
1206 struct talitos_ptr *ptr, int sg_count,
1207 unsigned int offset, int tbl_off)
1209 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1214 * fill in and submit ipsec_esp descriptor
1216 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1218 void (*callback)(struct device *dev,
1219 struct talitos_desc *desc,
1220 void *context, int error))
1222 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1223 unsigned int authsize = crypto_aead_authsize(aead);
1224 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1225 struct device *dev = ctx->dev;
1226 struct talitos_desc *desc = &edesc->desc;
1227 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1228 unsigned int ivsize = crypto_aead_ivsize(aead);
1232 bool sync_needed = false;
1233 struct talitos_private *priv = dev_get_drvdata(dev);
1234 bool is_sec1 = has_ftr_sec1(priv);
1235 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1236 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1237 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1238 dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1241 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1243 sg_count = edesc->src_nents ?: 1;
1244 if (is_sec1 && sg_count > 1)
1245 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1246 areq->assoclen + cryptlen);
1248 sg_count = dma_map_sg(dev, areq->src, sg_count,
1249 (areq->src == areq->dst) ?
1250 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1253 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1254 &desc->ptr[1], sg_count, 0, tbl_off);
1262 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1265 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1266 ctx->enckeylen, is_sec1);
1270 * map and adjust cipher len to aead request cryptlen.
1271 * extent is bytes of HMAC postpended to ciphertext,
1272 * typically 12 for ipsec
1274 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1277 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1278 sg_count, areq->assoclen, tbl_off, elen,
1287 if (areq->src != areq->dst) {
1288 sg_count = edesc->dst_nents ? : 1;
1289 if (!is_sec1 || sg_count == 1)
1290 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1293 if (is_ipsec_esp && encrypt)
1297 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1298 sg_count, areq->assoclen, tbl_off, elen,
1299 is_ipsec_esp && !encrypt);
1303 edesc->icv_ool = !encrypt;
1305 if (!encrypt && is_ipsec_esp) {
1306 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1308 /* Add an entry to the link table for ICV data */
1309 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1310 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1312 /* icv data follows link tables */
1313 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1314 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1316 } else if (!encrypt) {
1317 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1319 } else if (!is_ipsec_esp) {
1320 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1321 sg_count, areq->assoclen + cryptlen, tbl_off);
1326 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1330 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1334 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1335 if (ret != -EINPROGRESS) {
1336 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1343 * allocate and map the extended descriptor
1345 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1346 struct scatterlist *src,
1347 struct scatterlist *dst,
1349 unsigned int assoclen,
1350 unsigned int cryptlen,
1351 unsigned int authsize,
1352 unsigned int ivsize,
1357 struct talitos_edesc *edesc;
1358 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1359 dma_addr_t iv_dma = 0;
1360 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1362 struct talitos_private *priv = dev_get_drvdata(dev);
1363 bool is_sec1 = has_ftr_sec1(priv);
1364 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1366 if (cryptlen + authsize > max_len) {
1367 dev_err(dev, "length exceeds h/w max limit\n");
1368 return ERR_PTR(-EINVAL);
1371 if (!dst || dst == src) {
1372 src_len = assoclen + cryptlen + authsize;
1373 src_nents = sg_nents_for_len(src, src_len);
1374 if (src_nents < 0) {
1375 dev_err(dev, "Invalid number of src SG.\n");
1376 return ERR_PTR(-EINVAL);
1378 src_nents = (src_nents == 1) ? 0 : src_nents;
1379 dst_nents = dst ? src_nents : 0;
1381 } else { /* dst && dst != src*/
1382 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1383 src_nents = sg_nents_for_len(src, src_len);
1384 if (src_nents < 0) {
1385 dev_err(dev, "Invalid number of src SG.\n");
1386 return ERR_PTR(-EINVAL);
1388 src_nents = (src_nents == 1) ? 0 : src_nents;
1389 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1390 dst_nents = sg_nents_for_len(dst, dst_len);
1391 if (dst_nents < 0) {
1392 dev_err(dev, "Invalid number of dst SG.\n");
1393 return ERR_PTR(-EINVAL);
1395 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1399 * allocate space for base edesc plus the link tables,
1400 * allowing for two separate entries for AD and generated ICV (+ 2),
1401 * and space for two sets of ICVs (stashed and generated)
1403 alloc_len = sizeof(struct talitos_edesc);
1404 if (src_nents || dst_nents || !encrypt) {
1406 dma_len = (src_nents ? src_len : 0) +
1407 (dst_nents ? dst_len : 0) + authsize;
1409 dma_len = (src_nents + dst_nents + 2) *
1410 sizeof(struct talitos_ptr) + authsize;
1411 alloc_len += dma_len;
1415 alloc_len += icv_stashing ? authsize : 0;
1417 /* if its a ahash, add space for a second desc next to the first one */
1418 if (is_sec1 && !dst)
1419 alloc_len += sizeof(struct talitos_desc);
1420 alloc_len += ivsize;
1422 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1424 return ERR_PTR(-ENOMEM);
1426 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1427 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1429 memset(&edesc->desc, 0, sizeof(edesc->desc));
1431 edesc->src_nents = src_nents;
1432 edesc->dst_nents = dst_nents;
1433 edesc->iv_dma = iv_dma;
1434 edesc->dma_len = dma_len;
1436 void *addr = &edesc->link_tbl[0];
1438 if (is_sec1 && !dst)
1439 addr += sizeof(struct talitos_desc);
1440 edesc->dma_link_tbl = dma_map_single(dev, addr,
1447 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1448 int icv_stashing, bool encrypt)
1450 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1451 unsigned int authsize = crypto_aead_authsize(authenc);
1452 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1453 unsigned int ivsize = crypto_aead_ivsize(authenc);
1454 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1456 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1457 iv, areq->assoclen, cryptlen,
1458 authsize, ivsize, icv_stashing,
1459 areq->base.flags, encrypt);
1462 static int aead_encrypt(struct aead_request *req)
1464 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1465 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1466 struct talitos_edesc *edesc;
1468 /* allocate extended descriptor */
1469 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1471 return PTR_ERR(edesc);
1474 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1476 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1479 static int aead_decrypt(struct aead_request *req)
1481 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1482 unsigned int authsize = crypto_aead_authsize(authenc);
1483 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1484 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1485 struct talitos_edesc *edesc;
1488 /* allocate extended descriptor */
1489 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1491 return PTR_ERR(edesc);
1493 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1494 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1495 ((!edesc->src_nents && !edesc->dst_nents) ||
1496 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1498 /* decrypt and check the ICV */
1499 edesc->desc.hdr = ctx->desc_hdr_template |
1500 DESC_HDR_DIR_INBOUND |
1501 DESC_HDR_MODE1_MDEU_CICV;
1503 /* reset integrity check result bits */
1505 return ipsec_esp(edesc, req, false,
1506 ipsec_esp_decrypt_hwauth_done);
1509 /* Have to check the ICV with software */
1510 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1512 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1513 icvdata = edesc->buf + edesc->dma_len;
1515 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1516 req->assoclen + req->cryptlen - authsize);
1518 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1521 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1522 const u8 *key, unsigned int keylen)
1524 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1525 struct device *dev = ctx->dev;
1528 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1530 memcpy(&ctx->key, key, keylen);
1531 ctx->keylen = keylen;
1533 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1538 static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1539 const u8 *key, unsigned int keylen)
1541 u32 tmp[DES_EXPKEY_WORDS];
1543 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1544 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
1545 !des_ekey(tmp, key)) {
1546 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1550 return ablkcipher_setkey(cipher, key, keylen);
1553 static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1554 const u8 *key, unsigned int keylen)
1559 flags = crypto_ablkcipher_get_flags(cipher);
1560 err = __des3_verify_key(&flags, key);
1561 if (unlikely(err)) {
1562 crypto_ablkcipher_set_flags(cipher, flags);
1566 return ablkcipher_setkey(cipher, key, keylen);
1569 static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1570 const u8 *key, unsigned int keylen)
1572 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1573 keylen == AES_KEYSIZE_256)
1574 return ablkcipher_setkey(cipher, key, keylen);
1576 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1581 static void common_nonsnoop_unmap(struct device *dev,
1582 struct talitos_edesc *edesc,
1583 struct ablkcipher_request *areq)
1585 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1587 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1588 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1591 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1595 static void ablkcipher_done(struct device *dev,
1596 struct talitos_desc *desc, void *context,
1599 struct ablkcipher_request *areq = context;
1600 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1601 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1602 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1603 struct talitos_edesc *edesc;
1605 edesc = container_of(desc, struct talitos_edesc, desc);
1607 common_nonsnoop_unmap(dev, edesc, areq);
1608 memcpy(areq->info, ctx->iv, ivsize);
1612 areq->base.complete(&areq->base, err);
1615 static int common_nonsnoop(struct talitos_edesc *edesc,
1616 struct ablkcipher_request *areq,
1617 void (*callback) (struct device *dev,
1618 struct talitos_desc *desc,
1619 void *context, int error))
1621 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1622 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1623 struct device *dev = ctx->dev;
1624 struct talitos_desc *desc = &edesc->desc;
1625 unsigned int cryptlen = areq->nbytes;
1626 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1628 bool sync_needed = false;
1629 struct talitos_private *priv = dev_get_drvdata(dev);
1630 bool is_sec1 = has_ftr_sec1(priv);
1632 /* first DWORD empty */
1635 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1638 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1640 sg_count = edesc->src_nents ?: 1;
1641 if (is_sec1 && sg_count > 1)
1642 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1645 sg_count = dma_map_sg(dev, areq->src, sg_count,
1646 (areq->src == areq->dst) ?
1647 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1651 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1652 &desc->ptr[3], sg_count, 0, 0);
1657 if (areq->src != areq->dst) {
1658 sg_count = edesc->dst_nents ? : 1;
1659 if (!is_sec1 || sg_count == 1)
1660 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1663 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1664 sg_count, 0, (edesc->src_nents + 1));
1669 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1672 /* last DWORD empty */
1675 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1676 edesc->dma_len, DMA_BIDIRECTIONAL);
1678 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1679 if (ret != -EINPROGRESS) {
1680 common_nonsnoop_unmap(dev, edesc, areq);
1686 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1689 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1690 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1691 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1693 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1694 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1695 areq->base.flags, encrypt);
1698 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1700 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1701 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1702 struct talitos_edesc *edesc;
1703 unsigned int blocksize =
1704 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1709 if (areq->nbytes % blocksize)
1712 /* allocate extended descriptor */
1713 edesc = ablkcipher_edesc_alloc(areq, true);
1715 return PTR_ERR(edesc);
1718 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1720 return common_nonsnoop(edesc, areq, ablkcipher_done);
1723 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1725 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1726 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1727 struct talitos_edesc *edesc;
1728 unsigned int blocksize =
1729 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1734 if (areq->nbytes % blocksize)
1737 /* allocate extended descriptor */
1738 edesc = ablkcipher_edesc_alloc(areq, false);
1740 return PTR_ERR(edesc);
1742 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1744 return common_nonsnoop(edesc, areq, ablkcipher_done);
1747 static void common_nonsnoop_hash_unmap(struct device *dev,
1748 struct talitos_edesc *edesc,
1749 struct ahash_request *areq)
1751 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1752 struct talitos_private *priv = dev_get_drvdata(dev);
1753 bool is_sec1 = has_ftr_sec1(priv);
1754 struct talitos_desc *desc = &edesc->desc;
1755 struct talitos_desc *desc2 = desc + 1;
1757 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1758 if (desc->next_desc &&
1759 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1760 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1762 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1764 /* When using hashctx-in, must unmap it. */
1765 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1766 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1768 else if (desc->next_desc)
1769 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1772 if (is_sec1 && req_ctx->nbuf)
1773 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1777 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1780 if (edesc->desc.next_desc)
1781 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1782 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1785 static void ahash_done(struct device *dev,
1786 struct talitos_desc *desc, void *context,
1789 struct ahash_request *areq = context;
1790 struct talitos_edesc *edesc =
1791 container_of(desc, struct talitos_edesc, desc);
1792 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1794 if (!req_ctx->last && req_ctx->to_hash_later) {
1795 /* Position any partial block for next update/final/finup */
1796 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1797 req_ctx->nbuf = req_ctx->to_hash_later;
1799 common_nonsnoop_hash_unmap(dev, edesc, areq);
1803 areq->base.complete(&areq->base, err);
1807 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1808 * ourself and submit a padded block
1810 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1811 struct talitos_edesc *edesc,
1812 struct talitos_ptr *ptr)
1814 static u8 padded_hash[64] = {
1815 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1816 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1817 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1818 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1821 pr_err_once("Bug in SEC1, padding ourself\n");
1822 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1823 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1824 (char *)padded_hash, DMA_TO_DEVICE);
1827 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1828 struct ahash_request *areq, unsigned int length,
1829 unsigned int offset,
1830 void (*callback) (struct device *dev,
1831 struct talitos_desc *desc,
1832 void *context, int error))
1834 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1835 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1836 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1837 struct device *dev = ctx->dev;
1838 struct talitos_desc *desc = &edesc->desc;
1840 bool sync_needed = false;
1841 struct talitos_private *priv = dev_get_drvdata(dev);
1842 bool is_sec1 = has_ftr_sec1(priv);
1845 /* first DWORD empty */
1847 /* hash context in */
1848 if (!req_ctx->first || req_ctx->swinit) {
1849 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1850 req_ctx->hw_context_size,
1851 req_ctx->hw_context,
1853 req_ctx->swinit = 0;
1855 /* Indicate next op is not the first. */
1860 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1863 if (is_sec1 && req_ctx->nbuf)
1864 length -= req_ctx->nbuf;
1866 sg_count = edesc->src_nents ?: 1;
1867 if (is_sec1 && sg_count > 1)
1868 sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1869 edesc->buf + sizeof(struct talitos_desc),
1870 length, req_ctx->nbuf);
1872 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1877 if (is_sec1 && req_ctx->nbuf) {
1878 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1879 req_ctx->buf[req_ctx->buf_idx],
1882 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1883 &desc->ptr[3], sg_count, offset, 0);
1888 /* fifth DWORD empty */
1890 /* hash/HMAC out -or- hash context out */
1892 map_single_talitos_ptr(dev, &desc->ptr[5],
1893 crypto_ahash_digestsize(tfm),
1894 areq->result, DMA_FROM_DEVICE);
1896 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1897 req_ctx->hw_context_size,
1898 req_ctx->hw_context,
1901 /* last DWORD empty */
1903 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1904 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1906 if (is_sec1 && req_ctx->nbuf && length) {
1907 struct talitos_desc *desc2 = desc + 1;
1908 dma_addr_t next_desc;
1910 memset(desc2, 0, sizeof(*desc2));
1911 desc2->hdr = desc->hdr;
1912 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1913 desc2->hdr1 = desc2->hdr;
1914 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1915 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1916 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1918 if (desc->ptr[1].ptr)
1919 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1922 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1923 req_ctx->hw_context_size,
1924 req_ctx->hw_context,
1926 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1927 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1928 &desc2->ptr[3], sg_count, offset, 0);
1931 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1933 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1934 req_ctx->hw_context_size,
1935 req_ctx->hw_context,
1938 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1940 desc->next_desc = cpu_to_be32(next_desc);
1944 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1945 edesc->dma_len, DMA_BIDIRECTIONAL);
1947 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1948 if (ret != -EINPROGRESS) {
1949 common_nonsnoop_hash_unmap(dev, edesc, areq);
1955 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1956 unsigned int nbytes)
1958 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1959 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1960 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1961 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1962 bool is_sec1 = has_ftr_sec1(priv);
1965 nbytes -= req_ctx->nbuf;
1967 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1968 nbytes, 0, 0, 0, areq->base.flags, false);
1971 static int ahash_init(struct ahash_request *areq)
1973 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1974 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1975 struct device *dev = ctx->dev;
1976 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1980 /* Initialize the context */
1981 req_ctx->buf_idx = 0;
1983 req_ctx->first = 1; /* first indicates h/w must init its context */
1984 req_ctx->swinit = 0; /* assume h/w init of context */
1985 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1986 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1987 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1988 req_ctx->hw_context_size = size;
1990 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1992 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1998 * on h/w without explicit sha224 support, we initialize h/w context
1999 * manually with sha224 constants, and tell it to run sha256.
2001 static int ahash_init_sha224_swinit(struct ahash_request *areq)
2003 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2005 req_ctx->hw_context[0] = SHA224_H0;
2006 req_ctx->hw_context[1] = SHA224_H1;
2007 req_ctx->hw_context[2] = SHA224_H2;
2008 req_ctx->hw_context[3] = SHA224_H3;
2009 req_ctx->hw_context[4] = SHA224_H4;
2010 req_ctx->hw_context[5] = SHA224_H5;
2011 req_ctx->hw_context[6] = SHA224_H6;
2012 req_ctx->hw_context[7] = SHA224_H7;
2014 /* init 64-bit count */
2015 req_ctx->hw_context[8] = 0;
2016 req_ctx->hw_context[9] = 0;
2019 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
2024 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2026 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2027 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2028 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2029 struct talitos_edesc *edesc;
2030 unsigned int blocksize =
2031 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2032 unsigned int nbytes_to_hash;
2033 unsigned int to_hash_later;
2036 struct device *dev = ctx->dev;
2037 struct talitos_private *priv = dev_get_drvdata(dev);
2038 bool is_sec1 = has_ftr_sec1(priv);
2040 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2042 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2043 /* Buffer up to one whole block */
2044 nents = sg_nents_for_len(areq->src, nbytes);
2046 dev_err(ctx->dev, "Invalid number of src SG.\n");
2049 sg_copy_to_buffer(areq->src, nents,
2050 ctx_buf + req_ctx->nbuf, nbytes);
2051 req_ctx->nbuf += nbytes;
2055 /* At least (blocksize + 1) bytes are available to hash */
2056 nbytes_to_hash = nbytes + req_ctx->nbuf;
2057 to_hash_later = nbytes_to_hash & (blocksize - 1);
2061 else if (to_hash_later)
2062 /* There is a partial block. Hash the full block(s) now */
2063 nbytes_to_hash -= to_hash_later;
2065 /* Keep one block buffered */
2066 nbytes_to_hash -= blocksize;
2067 to_hash_later = blocksize;
2070 /* Chain in any previously buffered data */
2071 if (!is_sec1 && req_ctx->nbuf) {
2072 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2073 sg_init_table(req_ctx->bufsl, nsg);
2074 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2076 sg_chain(req_ctx->bufsl, 2, areq->src);
2077 req_ctx->psrc = req_ctx->bufsl;
2078 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2079 if (nbytes_to_hash > blocksize)
2080 offset = blocksize - req_ctx->nbuf;
2082 offset = nbytes_to_hash - req_ctx->nbuf;
2083 nents = sg_nents_for_len(areq->src, offset);
2085 dev_err(ctx->dev, "Invalid number of src SG.\n");
2088 sg_copy_to_buffer(areq->src, nents,
2089 ctx_buf + req_ctx->nbuf, offset);
2090 req_ctx->nbuf += offset;
2091 req_ctx->psrc = areq->src;
2093 req_ctx->psrc = areq->src;
2095 if (to_hash_later) {
2096 nents = sg_nents_for_len(areq->src, nbytes);
2098 dev_err(ctx->dev, "Invalid number of src SG.\n");
2101 sg_pcopy_to_buffer(areq->src, nents,
2102 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2104 nbytes - to_hash_later);
2106 req_ctx->to_hash_later = to_hash_later;
2108 /* Allocate extended descriptor */
2109 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2111 return PTR_ERR(edesc);
2113 edesc->desc.hdr = ctx->desc_hdr_template;
2115 /* On last one, request SEC to pad; otherwise continue */
2117 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2119 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2121 /* request SEC to INIT hash. */
2122 if (req_ctx->first && !req_ctx->swinit)
2123 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2125 /* When the tfm context has a keylen, it's an HMAC.
2126 * A first or last (ie. not middle) descriptor must request HMAC.
2128 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2129 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2131 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2135 static int ahash_update(struct ahash_request *areq)
2137 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2141 return ahash_process_req(areq, areq->nbytes);
2144 static int ahash_final(struct ahash_request *areq)
2146 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2150 return ahash_process_req(areq, 0);
2153 static int ahash_finup(struct ahash_request *areq)
2155 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2159 return ahash_process_req(areq, areq->nbytes);
2162 static int ahash_digest(struct ahash_request *areq)
2164 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2165 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2170 return ahash_process_req(areq, areq->nbytes);
2173 static int ahash_export(struct ahash_request *areq, void *out)
2175 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2176 struct talitos_export_state *export = out;
2177 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2178 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2179 struct device *dev = ctx->dev;
2182 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2184 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2186 memcpy(export->hw_context, req_ctx->hw_context,
2187 req_ctx->hw_context_size);
2188 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2189 export->swinit = req_ctx->swinit;
2190 export->first = req_ctx->first;
2191 export->last = req_ctx->last;
2192 export->to_hash_later = req_ctx->to_hash_later;
2193 export->nbuf = req_ctx->nbuf;
2198 static int ahash_import(struct ahash_request *areq, const void *in)
2200 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2201 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2202 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2203 struct device *dev = ctx->dev;
2204 const struct talitos_export_state *export = in;
2208 memset(req_ctx, 0, sizeof(*req_ctx));
2209 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2210 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2211 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2212 req_ctx->hw_context_size = size;
2213 memcpy(req_ctx->hw_context, export->hw_context, size);
2214 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2215 req_ctx->swinit = export->swinit;
2216 req_ctx->first = export->first;
2217 req_ctx->last = export->last;
2218 req_ctx->to_hash_later = export->to_hash_later;
2219 req_ctx->nbuf = export->nbuf;
2221 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2223 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2228 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2231 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2233 struct scatterlist sg[1];
2234 struct ahash_request *req;
2235 struct crypto_wait wait;
2238 crypto_init_wait(&wait);
2240 req = ahash_request_alloc(tfm, GFP_KERNEL);
2244 /* Keep tfm keylen == 0 during hash of the long key */
2246 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2247 crypto_req_done, &wait);
2249 sg_init_one(&sg[0], key, keylen);
2251 ahash_request_set_crypt(req, sg, hash, keylen);
2252 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2254 ahash_request_free(req);
2259 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2260 unsigned int keylen)
2262 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2263 struct device *dev = ctx->dev;
2264 unsigned int blocksize =
2265 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2266 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2267 unsigned int keysize = keylen;
2268 u8 hash[SHA512_DIGEST_SIZE];
2271 if (keylen <= blocksize)
2272 memcpy(ctx->key, key, keysize);
2274 /* Must get the hash of the long key */
2275 ret = keyhash(tfm, key, keylen, hash);
2278 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2282 keysize = digestsize;
2283 memcpy(ctx->key, hash, digestsize);
2287 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2289 ctx->keylen = keysize;
2290 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2296 struct talitos_alg_template {
2300 struct crypto_alg crypto;
2301 struct ahash_alg hash;
2302 struct aead_alg aead;
2304 __be32 desc_hdr_template;
2307 static struct talitos_alg_template driver_algs[] = {
2308 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2309 { .type = CRYPTO_ALG_TYPE_AEAD,
2312 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2313 .cra_driver_name = "authenc-hmac-sha1-"
2315 .cra_blocksize = AES_BLOCK_SIZE,
2316 .cra_flags = CRYPTO_ALG_ASYNC,
2318 .ivsize = AES_BLOCK_SIZE,
2319 .maxauthsize = SHA1_DIGEST_SIZE,
2321 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2322 DESC_HDR_SEL0_AESU |
2323 DESC_HDR_MODE0_AESU_CBC |
2324 DESC_HDR_SEL1_MDEUA |
2325 DESC_HDR_MODE1_MDEU_INIT |
2326 DESC_HDR_MODE1_MDEU_PAD |
2327 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2329 { .type = CRYPTO_ALG_TYPE_AEAD,
2330 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2333 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2334 .cra_driver_name = "authenc-hmac-sha1-"
2335 "cbc-aes-talitos-hsna",
2336 .cra_blocksize = AES_BLOCK_SIZE,
2337 .cra_flags = CRYPTO_ALG_ASYNC,
2339 .ivsize = AES_BLOCK_SIZE,
2340 .maxauthsize = SHA1_DIGEST_SIZE,
2342 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2343 DESC_HDR_SEL0_AESU |
2344 DESC_HDR_MODE0_AESU_CBC |
2345 DESC_HDR_SEL1_MDEUA |
2346 DESC_HDR_MODE1_MDEU_INIT |
2347 DESC_HDR_MODE1_MDEU_PAD |
2348 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2350 { .type = CRYPTO_ALG_TYPE_AEAD,
2353 .cra_name = "authenc(hmac(sha1),"
2355 .cra_driver_name = "authenc-hmac-sha1-"
2357 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2358 .cra_flags = CRYPTO_ALG_ASYNC,
2360 .ivsize = DES3_EDE_BLOCK_SIZE,
2361 .maxauthsize = SHA1_DIGEST_SIZE,
2362 .setkey = aead_des3_setkey,
2364 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2366 DESC_HDR_MODE0_DEU_CBC |
2367 DESC_HDR_MODE0_DEU_3DES |
2368 DESC_HDR_SEL1_MDEUA |
2369 DESC_HDR_MODE1_MDEU_INIT |
2370 DESC_HDR_MODE1_MDEU_PAD |
2371 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2373 { .type = CRYPTO_ALG_TYPE_AEAD,
2374 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2377 .cra_name = "authenc(hmac(sha1),"
2379 .cra_driver_name = "authenc-hmac-sha1-"
2380 "cbc-3des-talitos-hsna",
2381 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2382 .cra_flags = CRYPTO_ALG_ASYNC,
2384 .ivsize = DES3_EDE_BLOCK_SIZE,
2385 .maxauthsize = SHA1_DIGEST_SIZE,
2386 .setkey = aead_des3_setkey,
2388 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2390 DESC_HDR_MODE0_DEU_CBC |
2391 DESC_HDR_MODE0_DEU_3DES |
2392 DESC_HDR_SEL1_MDEUA |
2393 DESC_HDR_MODE1_MDEU_INIT |
2394 DESC_HDR_MODE1_MDEU_PAD |
2395 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2397 { .type = CRYPTO_ALG_TYPE_AEAD,
2400 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2401 .cra_driver_name = "authenc-hmac-sha224-"
2403 .cra_blocksize = AES_BLOCK_SIZE,
2404 .cra_flags = CRYPTO_ALG_ASYNC,
2406 .ivsize = AES_BLOCK_SIZE,
2407 .maxauthsize = SHA224_DIGEST_SIZE,
2409 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2410 DESC_HDR_SEL0_AESU |
2411 DESC_HDR_MODE0_AESU_CBC |
2412 DESC_HDR_SEL1_MDEUA |
2413 DESC_HDR_MODE1_MDEU_INIT |
2414 DESC_HDR_MODE1_MDEU_PAD |
2415 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2417 { .type = CRYPTO_ALG_TYPE_AEAD,
2418 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2421 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2422 .cra_driver_name = "authenc-hmac-sha224-"
2423 "cbc-aes-talitos-hsna",
2424 .cra_blocksize = AES_BLOCK_SIZE,
2425 .cra_flags = CRYPTO_ALG_ASYNC,
2427 .ivsize = AES_BLOCK_SIZE,
2428 .maxauthsize = SHA224_DIGEST_SIZE,
2430 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2431 DESC_HDR_SEL0_AESU |
2432 DESC_HDR_MODE0_AESU_CBC |
2433 DESC_HDR_SEL1_MDEUA |
2434 DESC_HDR_MODE1_MDEU_INIT |
2435 DESC_HDR_MODE1_MDEU_PAD |
2436 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2438 { .type = CRYPTO_ALG_TYPE_AEAD,
2441 .cra_name = "authenc(hmac(sha224),"
2443 .cra_driver_name = "authenc-hmac-sha224-"
2445 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2446 .cra_flags = CRYPTO_ALG_ASYNC,
2448 .ivsize = DES3_EDE_BLOCK_SIZE,
2449 .maxauthsize = SHA224_DIGEST_SIZE,
2450 .setkey = aead_des3_setkey,
2452 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2454 DESC_HDR_MODE0_DEU_CBC |
2455 DESC_HDR_MODE0_DEU_3DES |
2456 DESC_HDR_SEL1_MDEUA |
2457 DESC_HDR_MODE1_MDEU_INIT |
2458 DESC_HDR_MODE1_MDEU_PAD |
2459 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2461 { .type = CRYPTO_ALG_TYPE_AEAD,
2462 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2465 .cra_name = "authenc(hmac(sha224),"
2467 .cra_driver_name = "authenc-hmac-sha224-"
2468 "cbc-3des-talitos-hsna",
2469 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2470 .cra_flags = CRYPTO_ALG_ASYNC,
2472 .ivsize = DES3_EDE_BLOCK_SIZE,
2473 .maxauthsize = SHA224_DIGEST_SIZE,
2474 .setkey = aead_des3_setkey,
2476 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2478 DESC_HDR_MODE0_DEU_CBC |
2479 DESC_HDR_MODE0_DEU_3DES |
2480 DESC_HDR_SEL1_MDEUA |
2481 DESC_HDR_MODE1_MDEU_INIT |
2482 DESC_HDR_MODE1_MDEU_PAD |
2483 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2485 { .type = CRYPTO_ALG_TYPE_AEAD,
2488 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2489 .cra_driver_name = "authenc-hmac-sha256-"
2491 .cra_blocksize = AES_BLOCK_SIZE,
2492 .cra_flags = CRYPTO_ALG_ASYNC,
2494 .ivsize = AES_BLOCK_SIZE,
2495 .maxauthsize = SHA256_DIGEST_SIZE,
2497 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2498 DESC_HDR_SEL0_AESU |
2499 DESC_HDR_MODE0_AESU_CBC |
2500 DESC_HDR_SEL1_MDEUA |
2501 DESC_HDR_MODE1_MDEU_INIT |
2502 DESC_HDR_MODE1_MDEU_PAD |
2503 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2505 { .type = CRYPTO_ALG_TYPE_AEAD,
2506 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2509 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2510 .cra_driver_name = "authenc-hmac-sha256-"
2511 "cbc-aes-talitos-hsna",
2512 .cra_blocksize = AES_BLOCK_SIZE,
2513 .cra_flags = CRYPTO_ALG_ASYNC,
2515 .ivsize = AES_BLOCK_SIZE,
2516 .maxauthsize = SHA256_DIGEST_SIZE,
2518 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2519 DESC_HDR_SEL0_AESU |
2520 DESC_HDR_MODE0_AESU_CBC |
2521 DESC_HDR_SEL1_MDEUA |
2522 DESC_HDR_MODE1_MDEU_INIT |
2523 DESC_HDR_MODE1_MDEU_PAD |
2524 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2526 { .type = CRYPTO_ALG_TYPE_AEAD,
2529 .cra_name = "authenc(hmac(sha256),"
2531 .cra_driver_name = "authenc-hmac-sha256-"
2533 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2534 .cra_flags = CRYPTO_ALG_ASYNC,
2536 .ivsize = DES3_EDE_BLOCK_SIZE,
2537 .maxauthsize = SHA256_DIGEST_SIZE,
2538 .setkey = aead_des3_setkey,
2540 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2542 DESC_HDR_MODE0_DEU_CBC |
2543 DESC_HDR_MODE0_DEU_3DES |
2544 DESC_HDR_SEL1_MDEUA |
2545 DESC_HDR_MODE1_MDEU_INIT |
2546 DESC_HDR_MODE1_MDEU_PAD |
2547 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2549 { .type = CRYPTO_ALG_TYPE_AEAD,
2550 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2553 .cra_name = "authenc(hmac(sha256),"
2555 .cra_driver_name = "authenc-hmac-sha256-"
2556 "cbc-3des-talitos-hsna",
2557 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2558 .cra_flags = CRYPTO_ALG_ASYNC,
2560 .ivsize = DES3_EDE_BLOCK_SIZE,
2561 .maxauthsize = SHA256_DIGEST_SIZE,
2562 .setkey = aead_des3_setkey,
2564 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2566 DESC_HDR_MODE0_DEU_CBC |
2567 DESC_HDR_MODE0_DEU_3DES |
2568 DESC_HDR_SEL1_MDEUA |
2569 DESC_HDR_MODE1_MDEU_INIT |
2570 DESC_HDR_MODE1_MDEU_PAD |
2571 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2573 { .type = CRYPTO_ALG_TYPE_AEAD,
2576 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2577 .cra_driver_name = "authenc-hmac-sha384-"
2579 .cra_blocksize = AES_BLOCK_SIZE,
2580 .cra_flags = CRYPTO_ALG_ASYNC,
2582 .ivsize = AES_BLOCK_SIZE,
2583 .maxauthsize = SHA384_DIGEST_SIZE,
2585 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2586 DESC_HDR_SEL0_AESU |
2587 DESC_HDR_MODE0_AESU_CBC |
2588 DESC_HDR_SEL1_MDEUB |
2589 DESC_HDR_MODE1_MDEU_INIT |
2590 DESC_HDR_MODE1_MDEU_PAD |
2591 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2593 { .type = CRYPTO_ALG_TYPE_AEAD,
2596 .cra_name = "authenc(hmac(sha384),"
2598 .cra_driver_name = "authenc-hmac-sha384-"
2600 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2601 .cra_flags = CRYPTO_ALG_ASYNC,
2603 .ivsize = DES3_EDE_BLOCK_SIZE,
2604 .maxauthsize = SHA384_DIGEST_SIZE,
2605 .setkey = aead_des3_setkey,
2607 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2609 DESC_HDR_MODE0_DEU_CBC |
2610 DESC_HDR_MODE0_DEU_3DES |
2611 DESC_HDR_SEL1_MDEUB |
2612 DESC_HDR_MODE1_MDEU_INIT |
2613 DESC_HDR_MODE1_MDEU_PAD |
2614 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2616 { .type = CRYPTO_ALG_TYPE_AEAD,
2619 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2620 .cra_driver_name = "authenc-hmac-sha512-"
2622 .cra_blocksize = AES_BLOCK_SIZE,
2623 .cra_flags = CRYPTO_ALG_ASYNC,
2625 .ivsize = AES_BLOCK_SIZE,
2626 .maxauthsize = SHA512_DIGEST_SIZE,
2628 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2629 DESC_HDR_SEL0_AESU |
2630 DESC_HDR_MODE0_AESU_CBC |
2631 DESC_HDR_SEL1_MDEUB |
2632 DESC_HDR_MODE1_MDEU_INIT |
2633 DESC_HDR_MODE1_MDEU_PAD |
2634 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2636 { .type = CRYPTO_ALG_TYPE_AEAD,
2639 .cra_name = "authenc(hmac(sha512),"
2641 .cra_driver_name = "authenc-hmac-sha512-"
2643 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2644 .cra_flags = CRYPTO_ALG_ASYNC,
2646 .ivsize = DES3_EDE_BLOCK_SIZE,
2647 .maxauthsize = SHA512_DIGEST_SIZE,
2648 .setkey = aead_des3_setkey,
2650 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2652 DESC_HDR_MODE0_DEU_CBC |
2653 DESC_HDR_MODE0_DEU_3DES |
2654 DESC_HDR_SEL1_MDEUB |
2655 DESC_HDR_MODE1_MDEU_INIT |
2656 DESC_HDR_MODE1_MDEU_PAD |
2657 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2659 { .type = CRYPTO_ALG_TYPE_AEAD,
2662 .cra_name = "authenc(hmac(md5),cbc(aes))",
2663 .cra_driver_name = "authenc-hmac-md5-"
2665 .cra_blocksize = AES_BLOCK_SIZE,
2666 .cra_flags = CRYPTO_ALG_ASYNC,
2668 .ivsize = AES_BLOCK_SIZE,
2669 .maxauthsize = MD5_DIGEST_SIZE,
2671 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2672 DESC_HDR_SEL0_AESU |
2673 DESC_HDR_MODE0_AESU_CBC |
2674 DESC_HDR_SEL1_MDEUA |
2675 DESC_HDR_MODE1_MDEU_INIT |
2676 DESC_HDR_MODE1_MDEU_PAD |
2677 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2679 { .type = CRYPTO_ALG_TYPE_AEAD,
2680 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2683 .cra_name = "authenc(hmac(md5),cbc(aes))",
2684 .cra_driver_name = "authenc-hmac-md5-"
2685 "cbc-aes-talitos-hsna",
2686 .cra_blocksize = AES_BLOCK_SIZE,
2687 .cra_flags = CRYPTO_ALG_ASYNC,
2689 .ivsize = AES_BLOCK_SIZE,
2690 .maxauthsize = MD5_DIGEST_SIZE,
2692 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2693 DESC_HDR_SEL0_AESU |
2694 DESC_HDR_MODE0_AESU_CBC |
2695 DESC_HDR_SEL1_MDEUA |
2696 DESC_HDR_MODE1_MDEU_INIT |
2697 DESC_HDR_MODE1_MDEU_PAD |
2698 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2700 { .type = CRYPTO_ALG_TYPE_AEAD,
2703 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2704 .cra_driver_name = "authenc-hmac-md5-"
2706 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2707 .cra_flags = CRYPTO_ALG_ASYNC,
2709 .ivsize = DES3_EDE_BLOCK_SIZE,
2710 .maxauthsize = MD5_DIGEST_SIZE,
2711 .setkey = aead_des3_setkey,
2713 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2715 DESC_HDR_MODE0_DEU_CBC |
2716 DESC_HDR_MODE0_DEU_3DES |
2717 DESC_HDR_SEL1_MDEUA |
2718 DESC_HDR_MODE1_MDEU_INIT |
2719 DESC_HDR_MODE1_MDEU_PAD |
2720 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2722 { .type = CRYPTO_ALG_TYPE_AEAD,
2723 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2726 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2727 .cra_driver_name = "authenc-hmac-md5-"
2728 "cbc-3des-talitos-hsna",
2729 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2730 .cra_flags = CRYPTO_ALG_ASYNC,
2732 .ivsize = DES3_EDE_BLOCK_SIZE,
2733 .maxauthsize = MD5_DIGEST_SIZE,
2734 .setkey = aead_des3_setkey,
2736 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2738 DESC_HDR_MODE0_DEU_CBC |
2739 DESC_HDR_MODE0_DEU_3DES |
2740 DESC_HDR_SEL1_MDEUA |
2741 DESC_HDR_MODE1_MDEU_INIT |
2742 DESC_HDR_MODE1_MDEU_PAD |
2743 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2745 /* ABLKCIPHER algorithms. */
2746 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2748 .cra_name = "ecb(aes)",
2749 .cra_driver_name = "ecb-aes-talitos",
2750 .cra_blocksize = AES_BLOCK_SIZE,
2751 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2754 .min_keysize = AES_MIN_KEY_SIZE,
2755 .max_keysize = AES_MAX_KEY_SIZE,
2756 .setkey = ablkcipher_aes_setkey,
2759 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2762 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2764 .cra_name = "cbc(aes)",
2765 .cra_driver_name = "cbc-aes-talitos",
2766 .cra_blocksize = AES_BLOCK_SIZE,
2767 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2770 .min_keysize = AES_MIN_KEY_SIZE,
2771 .max_keysize = AES_MAX_KEY_SIZE,
2772 .ivsize = AES_BLOCK_SIZE,
2773 .setkey = ablkcipher_aes_setkey,
2776 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2777 DESC_HDR_SEL0_AESU |
2778 DESC_HDR_MODE0_AESU_CBC,
2780 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2782 .cra_name = "ctr(aes)",
2783 .cra_driver_name = "ctr-aes-talitos",
2785 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2788 .min_keysize = AES_MIN_KEY_SIZE,
2789 .max_keysize = AES_MAX_KEY_SIZE,
2790 .ivsize = AES_BLOCK_SIZE,
2791 .setkey = ablkcipher_aes_setkey,
2794 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2795 DESC_HDR_SEL0_AESU |
2796 DESC_HDR_MODE0_AESU_CTR,
2798 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2800 .cra_name = "ecb(des)",
2801 .cra_driver_name = "ecb-des-talitos",
2802 .cra_blocksize = DES_BLOCK_SIZE,
2803 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2806 .min_keysize = DES_KEY_SIZE,
2807 .max_keysize = DES_KEY_SIZE,
2808 .setkey = ablkcipher_des_setkey,
2811 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2814 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2816 .cra_name = "cbc(des)",
2817 .cra_driver_name = "cbc-des-talitos",
2818 .cra_blocksize = DES_BLOCK_SIZE,
2819 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2822 .min_keysize = DES_KEY_SIZE,
2823 .max_keysize = DES_KEY_SIZE,
2824 .ivsize = DES_BLOCK_SIZE,
2825 .setkey = ablkcipher_des_setkey,
2828 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2830 DESC_HDR_MODE0_DEU_CBC,
2832 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2834 .cra_name = "ecb(des3_ede)",
2835 .cra_driver_name = "ecb-3des-talitos",
2836 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2837 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2840 .min_keysize = DES3_EDE_KEY_SIZE,
2841 .max_keysize = DES3_EDE_KEY_SIZE,
2842 .setkey = ablkcipher_des3_setkey,
2845 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2847 DESC_HDR_MODE0_DEU_3DES,
2849 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2851 .cra_name = "cbc(des3_ede)",
2852 .cra_driver_name = "cbc-3des-talitos",
2853 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2854 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2857 .min_keysize = DES3_EDE_KEY_SIZE,
2858 .max_keysize = DES3_EDE_KEY_SIZE,
2859 .ivsize = DES3_EDE_BLOCK_SIZE,
2860 .setkey = ablkcipher_des3_setkey,
2863 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2865 DESC_HDR_MODE0_DEU_CBC |
2866 DESC_HDR_MODE0_DEU_3DES,
2868 /* AHASH algorithms. */
2869 { .type = CRYPTO_ALG_TYPE_AHASH,
2871 .halg.digestsize = MD5_DIGEST_SIZE,
2872 .halg.statesize = sizeof(struct talitos_export_state),
2875 .cra_driver_name = "md5-talitos",
2876 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2877 .cra_flags = CRYPTO_ALG_ASYNC,
2880 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2881 DESC_HDR_SEL0_MDEUA |
2882 DESC_HDR_MODE0_MDEU_MD5,
2884 { .type = CRYPTO_ALG_TYPE_AHASH,
2886 .halg.digestsize = SHA1_DIGEST_SIZE,
2887 .halg.statesize = sizeof(struct talitos_export_state),
2890 .cra_driver_name = "sha1-talitos",
2891 .cra_blocksize = SHA1_BLOCK_SIZE,
2892 .cra_flags = CRYPTO_ALG_ASYNC,
2895 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2896 DESC_HDR_SEL0_MDEUA |
2897 DESC_HDR_MODE0_MDEU_SHA1,
2899 { .type = CRYPTO_ALG_TYPE_AHASH,
2901 .halg.digestsize = SHA224_DIGEST_SIZE,
2902 .halg.statesize = sizeof(struct talitos_export_state),
2904 .cra_name = "sha224",
2905 .cra_driver_name = "sha224-talitos",
2906 .cra_blocksize = SHA224_BLOCK_SIZE,
2907 .cra_flags = CRYPTO_ALG_ASYNC,
2910 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2911 DESC_HDR_SEL0_MDEUA |
2912 DESC_HDR_MODE0_MDEU_SHA224,
2914 { .type = CRYPTO_ALG_TYPE_AHASH,
2916 .halg.digestsize = SHA256_DIGEST_SIZE,
2917 .halg.statesize = sizeof(struct talitos_export_state),
2919 .cra_name = "sha256",
2920 .cra_driver_name = "sha256-talitos",
2921 .cra_blocksize = SHA256_BLOCK_SIZE,
2922 .cra_flags = CRYPTO_ALG_ASYNC,
2925 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2926 DESC_HDR_SEL0_MDEUA |
2927 DESC_HDR_MODE0_MDEU_SHA256,
2929 { .type = CRYPTO_ALG_TYPE_AHASH,
2931 .halg.digestsize = SHA384_DIGEST_SIZE,
2932 .halg.statesize = sizeof(struct talitos_export_state),
2934 .cra_name = "sha384",
2935 .cra_driver_name = "sha384-talitos",
2936 .cra_blocksize = SHA384_BLOCK_SIZE,
2937 .cra_flags = CRYPTO_ALG_ASYNC,
2940 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2941 DESC_HDR_SEL0_MDEUB |
2942 DESC_HDR_MODE0_MDEUB_SHA384,
2944 { .type = CRYPTO_ALG_TYPE_AHASH,
2946 .halg.digestsize = SHA512_DIGEST_SIZE,
2947 .halg.statesize = sizeof(struct talitos_export_state),
2949 .cra_name = "sha512",
2950 .cra_driver_name = "sha512-talitos",
2951 .cra_blocksize = SHA512_BLOCK_SIZE,
2952 .cra_flags = CRYPTO_ALG_ASYNC,
2955 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2956 DESC_HDR_SEL0_MDEUB |
2957 DESC_HDR_MODE0_MDEUB_SHA512,
2959 { .type = CRYPTO_ALG_TYPE_AHASH,
2961 .halg.digestsize = MD5_DIGEST_SIZE,
2962 .halg.statesize = sizeof(struct talitos_export_state),
2964 .cra_name = "hmac(md5)",
2965 .cra_driver_name = "hmac-md5-talitos",
2966 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2967 .cra_flags = CRYPTO_ALG_ASYNC,
2970 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2971 DESC_HDR_SEL0_MDEUA |
2972 DESC_HDR_MODE0_MDEU_MD5,
2974 { .type = CRYPTO_ALG_TYPE_AHASH,
2976 .halg.digestsize = SHA1_DIGEST_SIZE,
2977 .halg.statesize = sizeof(struct talitos_export_state),
2979 .cra_name = "hmac(sha1)",
2980 .cra_driver_name = "hmac-sha1-talitos",
2981 .cra_blocksize = SHA1_BLOCK_SIZE,
2982 .cra_flags = CRYPTO_ALG_ASYNC,
2985 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2986 DESC_HDR_SEL0_MDEUA |
2987 DESC_HDR_MODE0_MDEU_SHA1,
2989 { .type = CRYPTO_ALG_TYPE_AHASH,
2991 .halg.digestsize = SHA224_DIGEST_SIZE,
2992 .halg.statesize = sizeof(struct talitos_export_state),
2994 .cra_name = "hmac(sha224)",
2995 .cra_driver_name = "hmac-sha224-talitos",
2996 .cra_blocksize = SHA224_BLOCK_SIZE,
2997 .cra_flags = CRYPTO_ALG_ASYNC,
3000 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3001 DESC_HDR_SEL0_MDEUA |
3002 DESC_HDR_MODE0_MDEU_SHA224,
3004 { .type = CRYPTO_ALG_TYPE_AHASH,
3006 .halg.digestsize = SHA256_DIGEST_SIZE,
3007 .halg.statesize = sizeof(struct talitos_export_state),
3009 .cra_name = "hmac(sha256)",
3010 .cra_driver_name = "hmac-sha256-talitos",
3011 .cra_blocksize = SHA256_BLOCK_SIZE,
3012 .cra_flags = CRYPTO_ALG_ASYNC,
3015 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3016 DESC_HDR_SEL0_MDEUA |
3017 DESC_HDR_MODE0_MDEU_SHA256,
3019 { .type = CRYPTO_ALG_TYPE_AHASH,
3021 .halg.digestsize = SHA384_DIGEST_SIZE,
3022 .halg.statesize = sizeof(struct talitos_export_state),
3024 .cra_name = "hmac(sha384)",
3025 .cra_driver_name = "hmac-sha384-talitos",
3026 .cra_blocksize = SHA384_BLOCK_SIZE,
3027 .cra_flags = CRYPTO_ALG_ASYNC,
3030 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3031 DESC_HDR_SEL0_MDEUB |
3032 DESC_HDR_MODE0_MDEUB_SHA384,
3034 { .type = CRYPTO_ALG_TYPE_AHASH,
3036 .halg.digestsize = SHA512_DIGEST_SIZE,
3037 .halg.statesize = sizeof(struct talitos_export_state),
3039 .cra_name = "hmac(sha512)",
3040 .cra_driver_name = "hmac-sha512-talitos",
3041 .cra_blocksize = SHA512_BLOCK_SIZE,
3042 .cra_flags = CRYPTO_ALG_ASYNC,
3045 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3046 DESC_HDR_SEL0_MDEUB |
3047 DESC_HDR_MODE0_MDEUB_SHA512,
3051 struct talitos_crypto_alg {
3052 struct list_head entry;
3054 struct talitos_alg_template algt;
3057 static int talitos_init_common(struct talitos_ctx *ctx,
3058 struct talitos_crypto_alg *talitos_alg)
3060 struct talitos_private *priv;
3062 /* update context with ptr to dev */
3063 ctx->dev = talitos_alg->dev;
3065 /* assign SEC channel to tfm in round-robin fashion */
3066 priv = dev_get_drvdata(ctx->dev);
3067 ctx->ch = atomic_inc_return(&priv->last_chan) &
3068 (priv->num_channels - 1);
3070 /* copy descriptor header template value */
3071 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3073 /* select done notification */
3074 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3079 static int talitos_cra_init(struct crypto_tfm *tfm)
3081 struct crypto_alg *alg = tfm->__crt_alg;
3082 struct talitos_crypto_alg *talitos_alg;
3083 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3085 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3086 talitos_alg = container_of(__crypto_ahash_alg(alg),
3087 struct talitos_crypto_alg,
3090 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3093 return talitos_init_common(ctx, talitos_alg);
3096 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3098 struct aead_alg *alg = crypto_aead_alg(tfm);
3099 struct talitos_crypto_alg *talitos_alg;
3100 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3102 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3105 return talitos_init_common(ctx, talitos_alg);
3108 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3110 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3112 talitos_cra_init(tfm);
3115 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3116 sizeof(struct talitos_ahash_req_ctx));
3121 static void talitos_cra_exit(struct crypto_tfm *tfm)
3123 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3124 struct device *dev = ctx->dev;
3127 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3131 * given the alg's descriptor header template, determine whether descriptor
3132 * type and primary/secondary execution units required match the hw
3133 * capabilities description provided in the device tree node.
3135 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3137 struct talitos_private *priv = dev_get_drvdata(dev);
3140 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3141 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3143 if (SECONDARY_EU(desc_hdr_template))
3144 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3145 & priv->exec_units);
3150 static int talitos_remove(struct platform_device *ofdev)
3152 struct device *dev = &ofdev->dev;
3153 struct talitos_private *priv = dev_get_drvdata(dev);
3154 struct talitos_crypto_alg *t_alg, *n;
3157 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3158 switch (t_alg->algt.type) {
3159 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3161 case CRYPTO_ALG_TYPE_AEAD:
3162 crypto_unregister_aead(&t_alg->algt.alg.aead);
3163 case CRYPTO_ALG_TYPE_AHASH:
3164 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3167 list_del(&t_alg->entry);
3170 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3171 talitos_unregister_rng(dev);
3173 for (i = 0; i < 2; i++)
3175 free_irq(priv->irq[i], dev);
3176 irq_dispose_mapping(priv->irq[i]);
3179 tasklet_kill(&priv->done_task[0]);
3181 tasklet_kill(&priv->done_task[1]);
3186 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3187 struct talitos_alg_template
3190 struct talitos_private *priv = dev_get_drvdata(dev);
3191 struct talitos_crypto_alg *t_alg;
3192 struct crypto_alg *alg;
3194 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3197 return ERR_PTR(-ENOMEM);
3199 t_alg->algt = *template;
3201 switch (t_alg->algt.type) {
3202 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3203 alg = &t_alg->algt.alg.crypto;
3204 alg->cra_init = talitos_cra_init;
3205 alg->cra_exit = talitos_cra_exit;
3206 alg->cra_type = &crypto_ablkcipher_type;
3207 alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3209 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3210 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3212 case CRYPTO_ALG_TYPE_AEAD:
3213 alg = &t_alg->algt.alg.aead.base;
3214 alg->cra_exit = talitos_cra_exit;
3215 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3216 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3218 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3219 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3220 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3221 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3222 devm_kfree(dev, t_alg);
3223 return ERR_PTR(-ENOTSUPP);
3226 case CRYPTO_ALG_TYPE_AHASH:
3227 alg = &t_alg->algt.alg.hash.halg.base;
3228 alg->cra_init = talitos_cra_init_ahash;
3229 alg->cra_exit = talitos_cra_exit;
3230 t_alg->algt.alg.hash.init = ahash_init;
3231 t_alg->algt.alg.hash.update = ahash_update;
3232 t_alg->algt.alg.hash.final = ahash_final;
3233 t_alg->algt.alg.hash.finup = ahash_finup;
3234 t_alg->algt.alg.hash.digest = ahash_digest;
3235 if (!strncmp(alg->cra_name, "hmac", 4))
3236 t_alg->algt.alg.hash.setkey = ahash_setkey;
3237 t_alg->algt.alg.hash.import = ahash_import;
3238 t_alg->algt.alg.hash.export = ahash_export;
3240 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3241 !strncmp(alg->cra_name, "hmac", 4)) {
3242 devm_kfree(dev, t_alg);
3243 return ERR_PTR(-ENOTSUPP);
3245 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3246 (!strcmp(alg->cra_name, "sha224") ||
3247 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3248 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3249 t_alg->algt.desc_hdr_template =
3250 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3251 DESC_HDR_SEL0_MDEUA |
3252 DESC_HDR_MODE0_MDEU_SHA256;
3256 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3257 devm_kfree(dev, t_alg);
3258 return ERR_PTR(-EINVAL);
3261 alg->cra_module = THIS_MODULE;
3262 if (t_alg->algt.priority)
3263 alg->cra_priority = t_alg->algt.priority;
3265 alg->cra_priority = TALITOS_CRA_PRIORITY;
3266 if (has_ftr_sec1(priv))
3267 alg->cra_alignmask = 3;
3269 alg->cra_alignmask = 0;
3270 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3271 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3278 static int talitos_probe_irq(struct platform_device *ofdev)
3280 struct device *dev = &ofdev->dev;
3281 struct device_node *np = ofdev->dev.of_node;
3282 struct talitos_private *priv = dev_get_drvdata(dev);
3284 bool is_sec1 = has_ftr_sec1(priv);
3286 priv->irq[0] = irq_of_parse_and_map(np, 0);
3287 if (!priv->irq[0]) {
3288 dev_err(dev, "failed to map irq\n");
3292 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3293 dev_driver_string(dev), dev);
3297 priv->irq[1] = irq_of_parse_and_map(np, 1);
3299 /* get the primary irq line */
3300 if (!priv->irq[1]) {
3301 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3302 dev_driver_string(dev), dev);
3306 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3307 dev_driver_string(dev), dev);
3311 /* get the secondary irq line */
3312 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3313 dev_driver_string(dev), dev);
3315 dev_err(dev, "failed to request secondary irq\n");
3316 irq_dispose_mapping(priv->irq[1]);
3324 dev_err(dev, "failed to request primary irq\n");
3325 irq_dispose_mapping(priv->irq[0]);
3332 static int talitos_probe(struct platform_device *ofdev)
3334 struct device *dev = &ofdev->dev;
3335 struct device_node *np = ofdev->dev.of_node;
3336 struct talitos_private *priv;
3339 struct resource *res;
3341 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3345 INIT_LIST_HEAD(&priv->alg_list);
3347 dev_set_drvdata(dev, priv);
3349 priv->ofdev = ofdev;
3351 spin_lock_init(&priv->reg_lock);
3353 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3356 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3358 dev_err(dev, "failed to of_iomap\n");
3363 /* get SEC version capabilities from device tree */
3364 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3365 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3366 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3367 of_property_read_u32(np, "fsl,descriptor-types-mask",
3370 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3371 !priv->exec_units || !priv->desc_types) {
3372 dev_err(dev, "invalid property data in device tree node\n");
3377 if (of_device_is_compatible(np, "fsl,sec3.0"))
3378 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3380 if (of_device_is_compatible(np, "fsl,sec2.1"))
3381 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3382 TALITOS_FTR_SHA224_HWINIT |
3383 TALITOS_FTR_HMAC_OK;
3385 if (of_device_is_compatible(np, "fsl,sec1.0"))
3386 priv->features |= TALITOS_FTR_SEC1;
3388 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3389 priv->reg_deu = priv->reg + TALITOS12_DEU;
3390 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3391 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3392 stride = TALITOS1_CH_STRIDE;
3393 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3394 priv->reg_deu = priv->reg + TALITOS10_DEU;
3395 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3396 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3397 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3398 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3399 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3400 stride = TALITOS1_CH_STRIDE;
3402 priv->reg_deu = priv->reg + TALITOS2_DEU;
3403 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3404 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3405 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3406 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3407 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3408 priv->reg_keu = priv->reg + TALITOS2_KEU;
3409 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3410 stride = TALITOS2_CH_STRIDE;
3413 err = talitos_probe_irq(ofdev);
3417 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3418 if (priv->num_channels == 1)
3419 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3420 (unsigned long)dev);
3422 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3423 (unsigned long)dev);
3426 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3427 (unsigned long)dev);
3428 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3429 (unsigned long)dev);
3430 } else if (priv->num_channels == 1) {
3431 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3432 (unsigned long)dev);
3434 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3435 (unsigned long)dev);
3439 priv->chan = devm_kcalloc(dev,
3441 sizeof(struct talitos_channel),
3444 dev_err(dev, "failed to allocate channel management space\n");
3449 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3451 for (i = 0; i < priv->num_channels; i++) {
3452 priv->chan[i].reg = priv->reg + stride * (i + 1);
3453 if (!priv->irq[1] || !(i & 1))
3454 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3456 spin_lock_init(&priv->chan[i].head_lock);
3457 spin_lock_init(&priv->chan[i].tail_lock);
3459 priv->chan[i].fifo = devm_kcalloc(dev,
3461 sizeof(struct talitos_request),
3463 if (!priv->chan[i].fifo) {
3464 dev_err(dev, "failed to allocate request fifo %d\n", i);
3469 atomic_set(&priv->chan[i].submit_count,
3470 -(priv->chfifo_len - 1));
3473 dma_set_mask(dev, DMA_BIT_MASK(36));
3475 /* reset and initialize the h/w */
3476 err = init_device(dev);
3478 dev_err(dev, "failed to initialize device\n");
3482 /* register the RNG, if available */
3483 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3484 err = talitos_register_rng(dev);
3486 dev_err(dev, "failed to register hwrng: %d\n", err);
3489 dev_info(dev, "hwrng\n");
3492 /* register crypto algorithms the device supports */
3493 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3494 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3495 struct talitos_crypto_alg *t_alg;
3496 struct crypto_alg *alg = NULL;
3498 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3499 if (IS_ERR(t_alg)) {
3500 err = PTR_ERR(t_alg);
3501 if (err == -ENOTSUPP)
3506 switch (t_alg->algt.type) {
3507 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3508 err = crypto_register_alg(
3509 &t_alg->algt.alg.crypto);
3510 alg = &t_alg->algt.alg.crypto;
3513 case CRYPTO_ALG_TYPE_AEAD:
3514 err = crypto_register_aead(
3515 &t_alg->algt.alg.aead);
3516 alg = &t_alg->algt.alg.aead.base;
3519 case CRYPTO_ALG_TYPE_AHASH:
3520 err = crypto_register_ahash(
3521 &t_alg->algt.alg.hash);
3522 alg = &t_alg->algt.alg.hash.halg.base;
3526 dev_err(dev, "%s alg registration failed\n",
3527 alg->cra_driver_name);
3528 devm_kfree(dev, t_alg);
3530 list_add_tail(&t_alg->entry, &priv->alg_list);
3533 if (!list_empty(&priv->alg_list))
3534 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3535 (char *)of_get_property(np, "compatible", NULL));
3540 talitos_remove(ofdev);
3545 static const struct of_device_id talitos_match[] = {
3546 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3548 .compatible = "fsl,sec1.0",
3551 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3553 .compatible = "fsl,sec2.0",
3558 MODULE_DEVICE_TABLE(of, talitos_match);
3560 static struct platform_driver talitos_driver = {
3563 .of_match_table = talitos_match,
3565 .probe = talitos_probe,
3566 .remove = talitos_remove,
3569 module_platform_driver(talitos_driver);
3571 MODULE_LICENSE("GPL");
3572 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3573 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");