3918b3dbe6846960c07529b2ca9d33ef888617ff
[linux-2.6-block.git] / drivers / crypto / talitos.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * talitos - Freescale Integrated Security Engine (SEC) device driver
4  *
5  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6  *
7  * Scatterlist Crypto API glue code copied from files with the following:
8  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * Crypto algorithm registration code copied from hifn driver:
11  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12  * All rights reserved.
13  */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/crypto.h>
21 #include <linux/hw_random.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/io.h>
27 #include <linux/spinlock.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/slab.h>
30
31 #include <crypto/algapi.h>
32 #include <crypto/aes.h>
33 #include <crypto/des.h>
34 #include <crypto/sha.h>
35 #include <crypto/md5.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/authenc.h>
38 #include <crypto/skcipher.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41 #include <crypto/scatterwalk.h>
42
43 #include "talitos.h"
44
45 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
46                            unsigned int len, bool is_sec1)
47 {
48         ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
49         if (is_sec1) {
50                 ptr->len1 = cpu_to_be16(len);
51         } else {
52                 ptr->len = cpu_to_be16(len);
53                 ptr->eptr = upper_32_bits(dma_addr);
54         }
55 }
56
57 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
58                              struct talitos_ptr *src_ptr, bool is_sec1)
59 {
60         dst_ptr->ptr = src_ptr->ptr;
61         if (is_sec1) {
62                 dst_ptr->len1 = src_ptr->len1;
63         } else {
64                 dst_ptr->len = src_ptr->len;
65                 dst_ptr->eptr = src_ptr->eptr;
66         }
67 }
68
69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
70                                            bool is_sec1)
71 {
72         if (is_sec1)
73                 return be16_to_cpu(ptr->len1);
74         else
75                 return be16_to_cpu(ptr->len);
76 }
77
78 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
79                                    bool is_sec1)
80 {
81         if (!is_sec1)
82                 ptr->j_extent = val;
83 }
84
85 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
86 {
87         if (!is_sec1)
88                 ptr->j_extent |= val;
89 }
90
91 /*
92  * map virtual single (contiguous) pointer to h/w descriptor pointer
93  */
94 static void __map_single_talitos_ptr(struct device *dev,
95                                      struct talitos_ptr *ptr,
96                                      unsigned int len, void *data,
97                                      enum dma_data_direction dir,
98                                      unsigned long attrs)
99 {
100         dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
101         struct talitos_private *priv = dev_get_drvdata(dev);
102         bool is_sec1 = has_ftr_sec1(priv);
103
104         to_talitos_ptr(ptr, dma_addr, len, is_sec1);
105 }
106
107 static void map_single_talitos_ptr(struct device *dev,
108                                    struct talitos_ptr *ptr,
109                                    unsigned int len, void *data,
110                                    enum dma_data_direction dir)
111 {
112         __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
113 }
114
115 static void map_single_talitos_ptr_nosync(struct device *dev,
116                                           struct talitos_ptr *ptr,
117                                           unsigned int len, void *data,
118                                           enum dma_data_direction dir)
119 {
120         __map_single_talitos_ptr(dev, ptr, len, data, dir,
121                                  DMA_ATTR_SKIP_CPU_SYNC);
122 }
123
124 /*
125  * unmap bus single (contiguous) h/w descriptor pointer
126  */
127 static void unmap_single_talitos_ptr(struct device *dev,
128                                      struct talitos_ptr *ptr,
129                                      enum dma_data_direction dir)
130 {
131         struct talitos_private *priv = dev_get_drvdata(dev);
132         bool is_sec1 = has_ftr_sec1(priv);
133
134         dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135                          from_talitos_ptr_len(ptr, is_sec1), dir);
136 }
137
138 static int reset_channel(struct device *dev, int ch)
139 {
140         struct talitos_private *priv = dev_get_drvdata(dev);
141         unsigned int timeout = TALITOS_TIMEOUT;
142         bool is_sec1 = has_ftr_sec1(priv);
143
144         if (is_sec1) {
145                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146                           TALITOS1_CCCR_LO_RESET);
147
148                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149                         TALITOS1_CCCR_LO_RESET) && --timeout)
150                         cpu_relax();
151         } else {
152                 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153                           TALITOS2_CCCR_RESET);
154
155                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156                         TALITOS2_CCCR_RESET) && --timeout)
157                         cpu_relax();
158         }
159
160         if (timeout == 0) {
161                 dev_err(dev, "failed to reset channel %d\n", ch);
162                 return -EIO;
163         }
164
165         /* set 36-bit addressing, done writeback enable and done IRQ enable */
166         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167                   TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168         /* enable chaining descriptors */
169         if (is_sec1)
170                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171                           TALITOS_CCCR_LO_NE);
172
173         /* and ICCR writeback, if available */
174         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
175                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
176                           TALITOS_CCCR_LO_IWSE);
177
178         return 0;
179 }
180
181 static int reset_device(struct device *dev)
182 {
183         struct talitos_private *priv = dev_get_drvdata(dev);
184         unsigned int timeout = TALITOS_TIMEOUT;
185         bool is_sec1 = has_ftr_sec1(priv);
186         u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
187
188         setbits32(priv->reg + TALITOS_MCR, mcr);
189
190         while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
191                && --timeout)
192                 cpu_relax();
193
194         if (priv->irq[1]) {
195                 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
196                 setbits32(priv->reg + TALITOS_MCR, mcr);
197         }
198
199         if (timeout == 0) {
200                 dev_err(dev, "failed to reset device\n");
201                 return -EIO;
202         }
203
204         return 0;
205 }
206
207 /*
208  * Reset and initialize the device
209  */
210 static int init_device(struct device *dev)
211 {
212         struct talitos_private *priv = dev_get_drvdata(dev);
213         int ch, err;
214         bool is_sec1 = has_ftr_sec1(priv);
215
216         /*
217          * Master reset
218          * errata documentation: warning: certain SEC interrupts
219          * are not fully cleared by writing the MCR:SWR bit,
220          * set bit twice to completely reset
221          */
222         err = reset_device(dev);
223         if (err)
224                 return err;
225
226         err = reset_device(dev);
227         if (err)
228                 return err;
229
230         /* reset channels */
231         for (ch = 0; ch < priv->num_channels; ch++) {
232                 err = reset_channel(dev, ch);
233                 if (err)
234                         return err;
235         }
236
237         /* enable channel done and error interrupts */
238         if (is_sec1) {
239                 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
240                 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
241                 /* disable parity error check in DEU (erroneous? test vect.) */
242                 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
243         } else {
244                 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
245                 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
246         }
247
248         /* disable integrity check error interrupts (use writeback instead) */
249         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
250                 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
251                           TALITOS_MDEUICR_LO_ICE);
252
253         return 0;
254 }
255
256 /**
257  * talitos_submit - submits a descriptor to the device for processing
258  * @dev:        the SEC device to be used
259  * @ch:         the SEC device channel to be used
260  * @desc:       the descriptor to be processed by the device
261  * @callback:   whom to call when processing is complete
262  * @context:    a handle for use by caller (optional)
263  *
264  * desc must contain valid dma-mapped (bus physical) address pointers.
265  * callback must check err and feedback in descriptor header
266  * for device processing status.
267  */
268 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269                           void (*callback)(struct device *dev,
270                                            struct talitos_desc *desc,
271                                            void *context, int error),
272                           void *context)
273 {
274         struct talitos_private *priv = dev_get_drvdata(dev);
275         struct talitos_request *request;
276         unsigned long flags;
277         int head;
278         bool is_sec1 = has_ftr_sec1(priv);
279
280         spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
281
282         if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
283                 /* h/w fifo is full */
284                 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
285                 return -EAGAIN;
286         }
287
288         head = priv->chan[ch].head;
289         request = &priv->chan[ch].fifo[head];
290
291         /* map descriptor and save caller data */
292         if (is_sec1) {
293                 desc->hdr1 = desc->hdr;
294                 request->dma_desc = dma_map_single(dev, &desc->hdr1,
295                                                    TALITOS_DESC_SIZE,
296                                                    DMA_BIDIRECTIONAL);
297         } else {
298                 request->dma_desc = dma_map_single(dev, desc,
299                                                    TALITOS_DESC_SIZE,
300                                                    DMA_BIDIRECTIONAL);
301         }
302         request->callback = callback;
303         request->context = context;
304
305         /* increment fifo head */
306         priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
307
308         smp_wmb();
309         request->desc = desc;
310
311         /* GO! */
312         wmb();
313         out_be32(priv->chan[ch].reg + TALITOS_FF,
314                  upper_32_bits(request->dma_desc));
315         out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
316                  lower_32_bits(request->dma_desc));
317
318         spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
319
320         return -EINPROGRESS;
321 }
322
323 /*
324  * process what was done, notify callback of error if not
325  */
326 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
327 {
328         struct talitos_private *priv = dev_get_drvdata(dev);
329         struct talitos_request *request, saved_req;
330         unsigned long flags;
331         int tail, status;
332         bool is_sec1 = has_ftr_sec1(priv);
333
334         spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
335
336         tail = priv->chan[ch].tail;
337         while (priv->chan[ch].fifo[tail].desc) {
338                 __be32 hdr;
339
340                 request = &priv->chan[ch].fifo[tail];
341
342                 /* descriptors with their done bits set don't get the error */
343                 rmb();
344                 if (!is_sec1)
345                         hdr = request->desc->hdr;
346                 else if (request->desc->next_desc)
347                         hdr = (request->desc + 1)->hdr1;
348                 else
349                         hdr = request->desc->hdr1;
350
351                 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
352                         status = 0;
353                 else
354                         if (!error)
355                                 break;
356                         else
357                                 status = error;
358
359                 dma_unmap_single(dev, request->dma_desc,
360                                  TALITOS_DESC_SIZE,
361                                  DMA_BIDIRECTIONAL);
362
363                 /* copy entries so we can call callback outside lock */
364                 saved_req.desc = request->desc;
365                 saved_req.callback = request->callback;
366                 saved_req.context = request->context;
367
368                 /* release request entry in fifo */
369                 smp_wmb();
370                 request->desc = NULL;
371
372                 /* increment fifo tail */
373                 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
374
375                 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
376
377                 atomic_dec(&priv->chan[ch].submit_count);
378
379                 saved_req.callback(dev, saved_req.desc, saved_req.context,
380                                    status);
381                 /* channel may resume processing in single desc error case */
382                 if (error && !reset_ch && status == error)
383                         return;
384                 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
385                 tail = priv->chan[ch].tail;
386         }
387
388         spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
389 }
390
391 /*
392  * process completed requests for channels that have done status
393  */
394 #define DEF_TALITOS1_DONE(name, ch_done_mask)                           \
395 static void talitos1_done_##name(unsigned long data)                    \
396 {                                                                       \
397         struct device *dev = (struct device *)data;                     \
398         struct talitos_private *priv = dev_get_drvdata(dev);            \
399         unsigned long flags;                                            \
400                                                                         \
401         if (ch_done_mask & 0x10000000)                                  \
402                 flush_channel(dev, 0, 0, 0);                    \
403         if (ch_done_mask & 0x40000000)                                  \
404                 flush_channel(dev, 1, 0, 0);                    \
405         if (ch_done_mask & 0x00010000)                                  \
406                 flush_channel(dev, 2, 0, 0);                    \
407         if (ch_done_mask & 0x00040000)                                  \
408                 flush_channel(dev, 3, 0, 0);                    \
409                                                                         \
410         /* At this point, all completed channels have been processed */ \
411         /* Unmask done interrupts for channels completed later on. */   \
412         spin_lock_irqsave(&priv->reg_lock, flags);                      \
413         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
414         clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);    \
415         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
416 }
417
418 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
419 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
420
421 #define DEF_TALITOS2_DONE(name, ch_done_mask)                           \
422 static void talitos2_done_##name(unsigned long data)                    \
423 {                                                                       \
424         struct device *dev = (struct device *)data;                     \
425         struct talitos_private *priv = dev_get_drvdata(dev);            \
426         unsigned long flags;                                            \
427                                                                         \
428         if (ch_done_mask & 1)                                           \
429                 flush_channel(dev, 0, 0, 0);                            \
430         if (ch_done_mask & (1 << 2))                                    \
431                 flush_channel(dev, 1, 0, 0);                            \
432         if (ch_done_mask & (1 << 4))                                    \
433                 flush_channel(dev, 2, 0, 0);                            \
434         if (ch_done_mask & (1 << 6))                                    \
435                 flush_channel(dev, 3, 0, 0);                            \
436                                                                         \
437         /* At this point, all completed channels have been processed */ \
438         /* Unmask done interrupts for channels completed later on. */   \
439         spin_lock_irqsave(&priv->reg_lock, flags);                      \
440         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
441         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);    \
442         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
443 }
444
445 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
446 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
447 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
448 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
449
450 /*
451  * locate current (offending) descriptor
452  */
453 static u32 current_desc_hdr(struct device *dev, int ch)
454 {
455         struct talitos_private *priv = dev_get_drvdata(dev);
456         int tail, iter;
457         dma_addr_t cur_desc;
458
459         cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
460         cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
461
462         if (!cur_desc) {
463                 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
464                 return 0;
465         }
466
467         tail = priv->chan[ch].tail;
468
469         iter = tail;
470         while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
471                priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
472                 iter = (iter + 1) & (priv->fifo_len - 1);
473                 if (iter == tail) {
474                         dev_err(dev, "couldn't locate current descriptor\n");
475                         return 0;
476                 }
477         }
478
479         if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
480                 return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
481
482         return priv->chan[ch].fifo[iter].desc->hdr;
483 }
484
485 /*
486  * user diagnostics; report root cause of error based on execution unit status
487  */
488 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
489 {
490         struct talitos_private *priv = dev_get_drvdata(dev);
491         int i;
492
493         if (!desc_hdr)
494                 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
495
496         switch (desc_hdr & DESC_HDR_SEL0_MASK) {
497         case DESC_HDR_SEL0_AFEU:
498                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
499                         in_be32(priv->reg_afeu + TALITOS_EUISR),
500                         in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
501                 break;
502         case DESC_HDR_SEL0_DEU:
503                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
504                         in_be32(priv->reg_deu + TALITOS_EUISR),
505                         in_be32(priv->reg_deu + TALITOS_EUISR_LO));
506                 break;
507         case DESC_HDR_SEL0_MDEUA:
508         case DESC_HDR_SEL0_MDEUB:
509                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
510                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
511                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
512                 break;
513         case DESC_HDR_SEL0_RNG:
514                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
515                         in_be32(priv->reg_rngu + TALITOS_ISR),
516                         in_be32(priv->reg_rngu + TALITOS_ISR_LO));
517                 break;
518         case DESC_HDR_SEL0_PKEU:
519                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
520                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
521                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
522                 break;
523         case DESC_HDR_SEL0_AESU:
524                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
525                         in_be32(priv->reg_aesu + TALITOS_EUISR),
526                         in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
527                 break;
528         case DESC_HDR_SEL0_CRCU:
529                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
530                         in_be32(priv->reg_crcu + TALITOS_EUISR),
531                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
532                 break;
533         case DESC_HDR_SEL0_KEU:
534                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
535                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
536                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
537                 break;
538         }
539
540         switch (desc_hdr & DESC_HDR_SEL1_MASK) {
541         case DESC_HDR_SEL1_MDEUA:
542         case DESC_HDR_SEL1_MDEUB:
543                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
544                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
545                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
546                 break;
547         case DESC_HDR_SEL1_CRCU:
548                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
549                         in_be32(priv->reg_crcu + TALITOS_EUISR),
550                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
551                 break;
552         }
553
554         for (i = 0; i < 8; i++)
555                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
556                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
557                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
558 }
559
560 /*
561  * recover from error interrupts
562  */
563 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
564 {
565         struct talitos_private *priv = dev_get_drvdata(dev);
566         unsigned int timeout = TALITOS_TIMEOUT;
567         int ch, error, reset_dev = 0;
568         u32 v_lo;
569         bool is_sec1 = has_ftr_sec1(priv);
570         int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
571
572         for (ch = 0; ch < priv->num_channels; ch++) {
573                 /* skip channels without errors */
574                 if (is_sec1) {
575                         /* bits 29, 31, 17, 19 */
576                         if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
577                                 continue;
578                 } else {
579                         if (!(isr & (1 << (ch * 2 + 1))))
580                                 continue;
581                 }
582
583                 error = -EINVAL;
584
585                 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
586
587                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
588                         dev_err(dev, "double fetch fifo overflow error\n");
589                         error = -EAGAIN;
590                         reset_ch = 1;
591                 }
592                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
593                         /* h/w dropped descriptor */
594                         dev_err(dev, "single fetch fifo overflow error\n");
595                         error = -EAGAIN;
596                 }
597                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
598                         dev_err(dev, "master data transfer error\n");
599                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
600                         dev_err(dev, is_sec1 ? "pointer not complete error\n"
601                                              : "s/g data length zero error\n");
602                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
603                         dev_err(dev, is_sec1 ? "parity error\n"
604                                              : "fetch pointer zero error\n");
605                 if (v_lo & TALITOS_CCPSR_LO_IDH)
606                         dev_err(dev, "illegal descriptor header error\n");
607                 if (v_lo & TALITOS_CCPSR_LO_IEU)
608                         dev_err(dev, is_sec1 ? "static assignment error\n"
609                                              : "invalid exec unit error\n");
610                 if (v_lo & TALITOS_CCPSR_LO_EU)
611                         report_eu_error(dev, ch, current_desc_hdr(dev, ch));
612                 if (!is_sec1) {
613                         if (v_lo & TALITOS_CCPSR_LO_GB)
614                                 dev_err(dev, "gather boundary error\n");
615                         if (v_lo & TALITOS_CCPSR_LO_GRL)
616                                 dev_err(dev, "gather return/length error\n");
617                         if (v_lo & TALITOS_CCPSR_LO_SB)
618                                 dev_err(dev, "scatter boundary error\n");
619                         if (v_lo & TALITOS_CCPSR_LO_SRL)
620                                 dev_err(dev, "scatter return/length error\n");
621                 }
622
623                 flush_channel(dev, ch, error, reset_ch);
624
625                 if (reset_ch) {
626                         reset_channel(dev, ch);
627                 } else {
628                         setbits32(priv->chan[ch].reg + TALITOS_CCCR,
629                                   TALITOS2_CCCR_CONT);
630                         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
631                         while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
632                                TALITOS2_CCCR_CONT) && --timeout)
633                                 cpu_relax();
634                         if (timeout == 0) {
635                                 dev_err(dev, "failed to restart channel %d\n",
636                                         ch);
637                                 reset_dev = 1;
638                         }
639                 }
640         }
641         if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
642             (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
643                 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
644                         dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
645                                 isr, isr_lo);
646                 else
647                         dev_err(dev, "done overflow, internal time out, or "
648                                 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
649
650                 /* purge request queues */
651                 for (ch = 0; ch < priv->num_channels; ch++)
652                         flush_channel(dev, ch, -EIO, 1);
653
654                 /* reset and reinitialize the device */
655                 init_device(dev);
656         }
657 }
658
659 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
660 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)              \
661 {                                                                              \
662         struct device *dev = data;                                             \
663         struct talitos_private *priv = dev_get_drvdata(dev);                   \
664         u32 isr, isr_lo;                                                       \
665         unsigned long flags;                                                   \
666                                                                                \
667         spin_lock_irqsave(&priv->reg_lock, flags);                             \
668         isr = in_be32(priv->reg + TALITOS_ISR);                                \
669         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
670         /* Acknowledge interrupt */                                            \
671         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
672         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
673                                                                                \
674         if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
675                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
676                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
677         }                                                                      \
678         else {                                                                 \
679                 if (likely(isr & ch_done_mask)) {                              \
680                         /* mask further done interrupts. */                    \
681                         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
682                         /* done_task will unmask done interrupts at exit */    \
683                         tasklet_schedule(&priv->done_task[tlet]);              \
684                 }                                                              \
685                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
686         }                                                                      \
687                                                                                \
688         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
689                                                                 IRQ_NONE;      \
690 }
691
692 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
693
694 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
695 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)              \
696 {                                                                              \
697         struct device *dev = data;                                             \
698         struct talitos_private *priv = dev_get_drvdata(dev);                   \
699         u32 isr, isr_lo;                                                       \
700         unsigned long flags;                                                   \
701                                                                                \
702         spin_lock_irqsave(&priv->reg_lock, flags);                             \
703         isr = in_be32(priv->reg + TALITOS_ISR);                                \
704         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
705         /* Acknowledge interrupt */                                            \
706         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
707         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
708                                                                                \
709         if (unlikely(isr & ch_err_mask || isr_lo)) {                           \
710                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
711                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
712         }                                                                      \
713         else {                                                                 \
714                 if (likely(isr & ch_done_mask)) {                              \
715                         /* mask further done interrupts. */                    \
716                         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
717                         /* done_task will unmask done interrupts at exit */    \
718                         tasklet_schedule(&priv->done_task[tlet]);              \
719                 }                                                              \
720                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
721         }                                                                      \
722                                                                                \
723         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
724                                                                 IRQ_NONE;      \
725 }
726
727 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
728 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
729                        0)
730 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
731                        1)
732
733 /*
734  * hwrng
735  */
736 static int talitos_rng_data_present(struct hwrng *rng, int wait)
737 {
738         struct device *dev = (struct device *)rng->priv;
739         struct talitos_private *priv = dev_get_drvdata(dev);
740         u32 ofl;
741         int i;
742
743         for (i = 0; i < 20; i++) {
744                 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
745                       TALITOS_RNGUSR_LO_OFL;
746                 if (ofl || !wait)
747                         break;
748                 udelay(10);
749         }
750
751         return !!ofl;
752 }
753
754 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
755 {
756         struct device *dev = (struct device *)rng->priv;
757         struct talitos_private *priv = dev_get_drvdata(dev);
758
759         /* rng fifo requires 64-bit accesses */
760         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
761         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
762
763         return sizeof(u32);
764 }
765
766 static int talitos_rng_init(struct hwrng *rng)
767 {
768         struct device *dev = (struct device *)rng->priv;
769         struct talitos_private *priv = dev_get_drvdata(dev);
770         unsigned int timeout = TALITOS_TIMEOUT;
771
772         setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
773         while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
774                  & TALITOS_RNGUSR_LO_RD)
775                && --timeout)
776                 cpu_relax();
777         if (timeout == 0) {
778                 dev_err(dev, "failed to reset rng hw\n");
779                 return -ENODEV;
780         }
781
782         /* start generating */
783         setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
784
785         return 0;
786 }
787
788 static int talitos_register_rng(struct device *dev)
789 {
790         struct talitos_private *priv = dev_get_drvdata(dev);
791         int err;
792
793         priv->rng.name          = dev_driver_string(dev),
794         priv->rng.init          = talitos_rng_init,
795         priv->rng.data_present  = talitos_rng_data_present,
796         priv->rng.data_read     = talitos_rng_data_read,
797         priv->rng.priv          = (unsigned long)dev;
798
799         err = hwrng_register(&priv->rng);
800         if (!err)
801                 priv->rng_registered = true;
802
803         return err;
804 }
805
806 static void talitos_unregister_rng(struct device *dev)
807 {
808         struct talitos_private *priv = dev_get_drvdata(dev);
809
810         if (!priv->rng_registered)
811                 return;
812
813         hwrng_unregister(&priv->rng);
814         priv->rng_registered = false;
815 }
816
817 /*
818  * crypto alg
819  */
820 #define TALITOS_CRA_PRIORITY            3000
821 /*
822  * Defines a priority for doing AEAD with descriptors type
823  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
824  */
825 #define TALITOS_CRA_PRIORITY_AEAD_HSNA  (TALITOS_CRA_PRIORITY - 1)
826 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
827 #define TALITOS_MAX_KEY_SIZE            (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
828 #else
829 #define TALITOS_MAX_KEY_SIZE            (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
830 #endif
831 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
832
833 struct talitos_ctx {
834         struct device *dev;
835         int ch;
836         __be32 desc_hdr_template;
837         u8 key[TALITOS_MAX_KEY_SIZE];
838         u8 iv[TALITOS_MAX_IV_LENGTH];
839         dma_addr_t dma_key;
840         unsigned int keylen;
841         unsigned int enckeylen;
842         unsigned int authkeylen;
843 };
844
845 #define HASH_MAX_BLOCK_SIZE             SHA512_BLOCK_SIZE
846 #define TALITOS_MDEU_MAX_CONTEXT_SIZE   TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
847
848 struct talitos_ahash_req_ctx {
849         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
850         unsigned int hw_context_size;
851         u8 buf[2][HASH_MAX_BLOCK_SIZE];
852         int buf_idx;
853         unsigned int swinit;
854         unsigned int first;
855         unsigned int last;
856         unsigned int to_hash_later;
857         unsigned int nbuf;
858         struct scatterlist bufsl[2];
859         struct scatterlist *psrc;
860 };
861
862 struct talitos_export_state {
863         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
864         u8 buf[HASH_MAX_BLOCK_SIZE];
865         unsigned int swinit;
866         unsigned int first;
867         unsigned int last;
868         unsigned int to_hash_later;
869         unsigned int nbuf;
870 };
871
872 static int aead_setkey(struct crypto_aead *authenc,
873                        const u8 *key, unsigned int keylen)
874 {
875         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
876         struct device *dev = ctx->dev;
877         struct crypto_authenc_keys keys;
878
879         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
880                 goto badkey;
881
882         if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
883                 goto badkey;
884
885         if (ctx->keylen)
886                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
887
888         memcpy(ctx->key, keys.authkey, keys.authkeylen);
889         memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
890
891         ctx->keylen = keys.authkeylen + keys.enckeylen;
892         ctx->enckeylen = keys.enckeylen;
893         ctx->authkeylen = keys.authkeylen;
894         ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
895                                       DMA_TO_DEVICE);
896
897         memzero_explicit(&keys, sizeof(keys));
898         return 0;
899
900 badkey:
901         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
902         memzero_explicit(&keys, sizeof(keys));
903         return -EINVAL;
904 }
905
906 static int aead_des3_setkey(struct crypto_aead *authenc,
907                             const u8 *key, unsigned int keylen)
908 {
909         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
910         struct device *dev = ctx->dev;
911         struct crypto_authenc_keys keys;
912         u32 flags;
913         int err;
914
915         err = crypto_authenc_extractkeys(&keys, key, keylen);
916         if (unlikely(err))
917                 goto badkey;
918
919         err = -EINVAL;
920         if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
921                 goto badkey;
922
923         if (keys.enckeylen != DES3_EDE_KEY_SIZE)
924                 goto badkey;
925
926         flags = crypto_aead_get_flags(authenc);
927         err = __des3_verify_key(&flags, keys.enckey);
928         if (unlikely(err)) {
929                 crypto_aead_set_flags(authenc, flags);
930                 goto out;
931         }
932
933         if (ctx->keylen)
934                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
935
936         memcpy(ctx->key, keys.authkey, keys.authkeylen);
937         memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
938
939         ctx->keylen = keys.authkeylen + keys.enckeylen;
940         ctx->enckeylen = keys.enckeylen;
941         ctx->authkeylen = keys.authkeylen;
942         ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
943                                       DMA_TO_DEVICE);
944
945 out:
946         memzero_explicit(&keys, sizeof(keys));
947         return err;
948
949 badkey:
950         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
951         goto out;
952 }
953
954 static void talitos_sg_unmap(struct device *dev,
955                              struct talitos_edesc *edesc,
956                              struct scatterlist *src,
957                              struct scatterlist *dst,
958                              unsigned int len, unsigned int offset)
959 {
960         struct talitos_private *priv = dev_get_drvdata(dev);
961         bool is_sec1 = has_ftr_sec1(priv);
962         unsigned int src_nents = edesc->src_nents ? : 1;
963         unsigned int dst_nents = edesc->dst_nents ? : 1;
964
965         if (is_sec1 && dst && dst_nents > 1) {
966                 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
967                                            len, DMA_FROM_DEVICE);
968                 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
969                                      offset);
970         }
971         if (src != dst) {
972                 if (src_nents == 1 || !is_sec1)
973                         dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
974
975                 if (dst && (dst_nents == 1 || !is_sec1))
976                         dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
977         } else if (src_nents == 1 || !is_sec1) {
978                 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
979         }
980 }
981
982 static void ipsec_esp_unmap(struct device *dev,
983                             struct talitos_edesc *edesc,
984                             struct aead_request *areq, bool encrypt)
985 {
986         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
987         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
988         unsigned int ivsize = crypto_aead_ivsize(aead);
989         unsigned int authsize = crypto_aead_authsize(aead);
990         unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
991         bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
992         struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
993
994         if (is_ipsec_esp)
995                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
996                                          DMA_FROM_DEVICE);
997         unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
998
999         talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1000                          cryptlen + authsize, areq->assoclen);
1001
1002         if (edesc->dma_len)
1003                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1004                                  DMA_BIDIRECTIONAL);
1005
1006         if (!is_ipsec_esp) {
1007                 unsigned int dst_nents = edesc->dst_nents ? : 1;
1008
1009                 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1010                                    areq->assoclen + cryptlen - ivsize);
1011         }
1012 }
1013
1014 /*
1015  * ipsec_esp descriptor callbacks
1016  */
1017 static void ipsec_esp_encrypt_done(struct device *dev,
1018                                    struct talitos_desc *desc, void *context,
1019                                    int err)
1020 {
1021         struct aead_request *areq = context;
1022         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1023         unsigned int ivsize = crypto_aead_ivsize(authenc);
1024         struct talitos_edesc *edesc;
1025
1026         edesc = container_of(desc, struct talitos_edesc, desc);
1027
1028         ipsec_esp_unmap(dev, edesc, areq, true);
1029
1030         dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1031
1032         kfree(edesc);
1033
1034         aead_request_complete(areq, err);
1035 }
1036
1037 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1038                                           struct talitos_desc *desc,
1039                                           void *context, int err)
1040 {
1041         struct aead_request *req = context;
1042         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1043         unsigned int authsize = crypto_aead_authsize(authenc);
1044         struct talitos_edesc *edesc;
1045         char *oicv, *icv;
1046
1047         edesc = container_of(desc, struct talitos_edesc, desc);
1048
1049         ipsec_esp_unmap(dev, edesc, req, false);
1050
1051         if (!err) {
1052                 /* auth check */
1053                 oicv = edesc->buf + edesc->dma_len;
1054                 icv = oicv - authsize;
1055
1056                 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1057         }
1058
1059         kfree(edesc);
1060
1061         aead_request_complete(req, err);
1062 }
1063
1064 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1065                                           struct talitos_desc *desc,
1066                                           void *context, int err)
1067 {
1068         struct aead_request *req = context;
1069         struct talitos_edesc *edesc;
1070
1071         edesc = container_of(desc, struct talitos_edesc, desc);
1072
1073         ipsec_esp_unmap(dev, edesc, req, false);
1074
1075         /* check ICV auth status */
1076         if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1077                      DESC_HDR_LO_ICCR1_PASS))
1078                 err = -EBADMSG;
1079
1080         kfree(edesc);
1081
1082         aead_request_complete(req, err);
1083 }
1084
1085 /*
1086  * convert scatterlist to SEC h/w link table format
1087  * stop at cryptlen bytes
1088  */
1089 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1090                                  unsigned int offset, int datalen, int elen,
1091                                  struct talitos_ptr *link_tbl_ptr)
1092 {
1093         int n_sg = elen ? sg_count + 1 : sg_count;
1094         int count = 0;
1095         int cryptlen = datalen + elen;
1096
1097         while (cryptlen && sg && n_sg--) {
1098                 unsigned int len = sg_dma_len(sg);
1099
1100                 if (offset >= len) {
1101                         offset -= len;
1102                         goto next;
1103                 }
1104
1105                 len -= offset;
1106
1107                 if (len > cryptlen)
1108                         len = cryptlen;
1109
1110                 if (datalen > 0 && len > datalen) {
1111                         to_talitos_ptr(link_tbl_ptr + count,
1112                                        sg_dma_address(sg) + offset, datalen, 0);
1113                         to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1114                         count++;
1115                         len -= datalen;
1116                         offset += datalen;
1117                 }
1118                 to_talitos_ptr(link_tbl_ptr + count,
1119                                sg_dma_address(sg) + offset, len, 0);
1120                 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1121                 count++;
1122                 cryptlen -= len;
1123                 datalen -= len;
1124                 offset = 0;
1125
1126 next:
1127                 sg = sg_next(sg);
1128         }
1129
1130         /* tag end of link table */
1131         if (count > 0)
1132                 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1133                                        DESC_PTR_LNKTBL_RET, 0);
1134
1135         return count;
1136 }
1137
1138 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1139                               unsigned int len, struct talitos_edesc *edesc,
1140                               struct talitos_ptr *ptr, int sg_count,
1141                               unsigned int offset, int tbl_off, int elen,
1142                               bool force)
1143 {
1144         struct talitos_private *priv = dev_get_drvdata(dev);
1145         bool is_sec1 = has_ftr_sec1(priv);
1146
1147         if (!src) {
1148                 to_talitos_ptr(ptr, 0, 0, is_sec1);
1149                 return 1;
1150         }
1151         to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1152         if (sg_count == 1 && !force) {
1153                 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1154                 return sg_count;
1155         }
1156         if (is_sec1) {
1157                 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1158                 return sg_count;
1159         }
1160         sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1161                                          &edesc->link_tbl[tbl_off]);
1162         if (sg_count == 1 && !force) {
1163                 /* Only one segment now, so no link tbl needed*/
1164                 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1165                 return sg_count;
1166         }
1167         to_talitos_ptr(ptr, edesc->dma_link_tbl +
1168                             tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1169         to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1170
1171         return sg_count;
1172 }
1173
1174 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1175                           unsigned int len, struct talitos_edesc *edesc,
1176                           struct talitos_ptr *ptr, int sg_count,
1177                           unsigned int offset, int tbl_off)
1178 {
1179         return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1180                                   tbl_off, 0, false);
1181 }
1182
1183 /*
1184  * fill in and submit ipsec_esp descriptor
1185  */
1186 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1187                      bool encrypt,
1188                      void (*callback)(struct device *dev,
1189                                       struct talitos_desc *desc,
1190                                       void *context, int error))
1191 {
1192         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1193         unsigned int authsize = crypto_aead_authsize(aead);
1194         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1195         struct device *dev = ctx->dev;
1196         struct talitos_desc *desc = &edesc->desc;
1197         unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1198         unsigned int ivsize = crypto_aead_ivsize(aead);
1199         int tbl_off = 0;
1200         int sg_count, ret;
1201         int elen = 0;
1202         bool sync_needed = false;
1203         struct talitos_private *priv = dev_get_drvdata(dev);
1204         bool is_sec1 = has_ftr_sec1(priv);
1205         bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1206         struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1207         struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1208         dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1209
1210         /* hmac key */
1211         to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1212
1213         sg_count = edesc->src_nents ?: 1;
1214         if (is_sec1 && sg_count > 1)
1215                 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1216                                   areq->assoclen + cryptlen);
1217         else
1218                 sg_count = dma_map_sg(dev, areq->src, sg_count,
1219                                       (areq->src == areq->dst) ?
1220                                       DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1221
1222         /* hmac data */
1223         ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1224                              &desc->ptr[1], sg_count, 0, tbl_off);
1225
1226         if (ret > 1) {
1227                 tbl_off += ret;
1228                 sync_needed = true;
1229         }
1230
1231         /* cipher iv */
1232         to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1233
1234         /* cipher key */
1235         to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1236                        ctx->enckeylen, is_sec1);
1237
1238         /*
1239          * cipher in
1240          * map and adjust cipher len to aead request cryptlen.
1241          * extent is bytes of HMAC postpended to ciphertext,
1242          * typically 12 for ipsec
1243          */
1244         if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1245                 elen = authsize;
1246
1247         ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1248                                  sg_count, areq->assoclen, tbl_off, elen,
1249                                  false);
1250
1251         if (ret > 1) {
1252                 tbl_off += ret;
1253                 sync_needed = true;
1254         }
1255
1256         /* cipher out */
1257         if (areq->src != areq->dst) {
1258                 sg_count = edesc->dst_nents ? : 1;
1259                 if (!is_sec1 || sg_count == 1)
1260                         dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1261         }
1262
1263         if (is_ipsec_esp && encrypt)
1264                 elen = authsize;
1265         else
1266                 elen = 0;
1267         ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1268                                  sg_count, areq->assoclen, tbl_off, elen,
1269                                  is_ipsec_esp && !encrypt);
1270         tbl_off += ret;
1271
1272         /* ICV data */
1273         edesc->icv_ool = !encrypt;
1274
1275         if (!encrypt && is_ipsec_esp) {
1276                 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1277
1278                 /* Add an entry to the link table for ICV data */
1279                 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1280                 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1281
1282                 /* icv data follows link tables */
1283                 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1284                 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1285                 sync_needed = true;
1286         } else if (!encrypt) {
1287                 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1288                 sync_needed = true;
1289         } else if (!is_ipsec_esp) {
1290                 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1291                                sg_count, areq->assoclen + cryptlen, tbl_off);
1292         }
1293
1294         /* iv out */
1295         if (is_ipsec_esp)
1296                 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1297                                        DMA_FROM_DEVICE);
1298
1299         if (sync_needed)
1300                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1301                                            edesc->dma_len,
1302                                            DMA_BIDIRECTIONAL);
1303
1304         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1305         if (ret != -EINPROGRESS) {
1306                 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1307                 kfree(edesc);
1308         }
1309         return ret;
1310 }
1311
1312 /*
1313  * allocate and map the extended descriptor
1314  */
1315 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1316                                                  struct scatterlist *src,
1317                                                  struct scatterlist *dst,
1318                                                  u8 *iv,
1319                                                  unsigned int assoclen,
1320                                                  unsigned int cryptlen,
1321                                                  unsigned int authsize,
1322                                                  unsigned int ivsize,
1323                                                  int icv_stashing,
1324                                                  u32 cryptoflags,
1325                                                  bool encrypt)
1326 {
1327         struct talitos_edesc *edesc;
1328         int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1329         dma_addr_t iv_dma = 0;
1330         gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1331                       GFP_ATOMIC;
1332         struct talitos_private *priv = dev_get_drvdata(dev);
1333         bool is_sec1 = has_ftr_sec1(priv);
1334         int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1335
1336         if (cryptlen + authsize > max_len) {
1337                 dev_err(dev, "length exceeds h/w max limit\n");
1338                 return ERR_PTR(-EINVAL);
1339         }
1340
1341         if (!dst || dst == src) {
1342                 src_len = assoclen + cryptlen + authsize;
1343                 src_nents = sg_nents_for_len(src, src_len);
1344                 if (src_nents < 0) {
1345                         dev_err(dev, "Invalid number of src SG.\n");
1346                         return ERR_PTR(-EINVAL);
1347                 }
1348                 src_nents = (src_nents == 1) ? 0 : src_nents;
1349                 dst_nents = dst ? src_nents : 0;
1350                 dst_len = 0;
1351         } else { /* dst && dst != src*/
1352                 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1353                 src_nents = sg_nents_for_len(src, src_len);
1354                 if (src_nents < 0) {
1355                         dev_err(dev, "Invalid number of src SG.\n");
1356                         return ERR_PTR(-EINVAL);
1357                 }
1358                 src_nents = (src_nents == 1) ? 0 : src_nents;
1359                 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1360                 dst_nents = sg_nents_for_len(dst, dst_len);
1361                 if (dst_nents < 0) {
1362                         dev_err(dev, "Invalid number of dst SG.\n");
1363                         return ERR_PTR(-EINVAL);
1364                 }
1365                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1366         }
1367
1368         /*
1369          * allocate space for base edesc plus the link tables,
1370          * allowing for two separate entries for AD and generated ICV (+ 2),
1371          * and space for two sets of ICVs (stashed and generated)
1372          */
1373         alloc_len = sizeof(struct talitos_edesc);
1374         if (src_nents || dst_nents || !encrypt) {
1375                 if (is_sec1)
1376                         dma_len = (src_nents ? src_len : 0) +
1377                                   (dst_nents ? dst_len : 0) + authsize;
1378                 else
1379                         dma_len = (src_nents + dst_nents + 2) *
1380                                   sizeof(struct talitos_ptr) + authsize;
1381                 alloc_len += dma_len;
1382         } else {
1383                 dma_len = 0;
1384         }
1385         alloc_len += icv_stashing ? authsize : 0;
1386
1387         /* if its a ahash, add space for a second desc next to the first one */
1388         if (is_sec1 && !dst)
1389                 alloc_len += sizeof(struct talitos_desc);
1390         alloc_len += ivsize;
1391
1392         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1393         if (!edesc)
1394                 return ERR_PTR(-ENOMEM);
1395         if (ivsize) {
1396                 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1397                 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1398         }
1399         memset(&edesc->desc, 0, sizeof(edesc->desc));
1400
1401         edesc->src_nents = src_nents;
1402         edesc->dst_nents = dst_nents;
1403         edesc->iv_dma = iv_dma;
1404         edesc->dma_len = dma_len;
1405         if (dma_len) {
1406                 void *addr = &edesc->link_tbl[0];
1407
1408                 if (is_sec1 && !dst)
1409                         addr += sizeof(struct talitos_desc);
1410                 edesc->dma_link_tbl = dma_map_single(dev, addr,
1411                                                      edesc->dma_len,
1412                                                      DMA_BIDIRECTIONAL);
1413         }
1414         return edesc;
1415 }
1416
1417 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1418                                               int icv_stashing, bool encrypt)
1419 {
1420         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1421         unsigned int authsize = crypto_aead_authsize(authenc);
1422         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1423         unsigned int ivsize = crypto_aead_ivsize(authenc);
1424         unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1425
1426         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1427                                    iv, areq->assoclen, cryptlen,
1428                                    authsize, ivsize, icv_stashing,
1429                                    areq->base.flags, encrypt);
1430 }
1431
1432 static int aead_encrypt(struct aead_request *req)
1433 {
1434         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1435         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1436         struct talitos_edesc *edesc;
1437
1438         /* allocate extended descriptor */
1439         edesc = aead_edesc_alloc(req, req->iv, 0, true);
1440         if (IS_ERR(edesc))
1441                 return PTR_ERR(edesc);
1442
1443         /* set encrypt */
1444         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1445
1446         return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1447 }
1448
1449 static int aead_decrypt(struct aead_request *req)
1450 {
1451         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1452         unsigned int authsize = crypto_aead_authsize(authenc);
1453         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1454         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1455         struct talitos_edesc *edesc;
1456         void *icvdata;
1457
1458         /* allocate extended descriptor */
1459         edesc = aead_edesc_alloc(req, req->iv, 1, false);
1460         if (IS_ERR(edesc))
1461                 return PTR_ERR(edesc);
1462
1463         if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1464             (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1465             ((!edesc->src_nents && !edesc->dst_nents) ||
1466              priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1467
1468                 /* decrypt and check the ICV */
1469                 edesc->desc.hdr = ctx->desc_hdr_template |
1470                                   DESC_HDR_DIR_INBOUND |
1471                                   DESC_HDR_MODE1_MDEU_CICV;
1472
1473                 /* reset integrity check result bits */
1474
1475                 return ipsec_esp(edesc, req, false,
1476                                  ipsec_esp_decrypt_hwauth_done);
1477         }
1478
1479         /* Have to check the ICV with software */
1480         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1481
1482         /* stash incoming ICV for later cmp with ICV generated by the h/w */
1483         icvdata = edesc->buf + edesc->dma_len;
1484
1485         sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1486                            req->assoclen + req->cryptlen - authsize);
1487
1488         return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1489 }
1490
1491 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1492                              const u8 *key, unsigned int keylen)
1493 {
1494         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1495         struct device *dev = ctx->dev;
1496
1497         if (ctx->keylen)
1498                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1499
1500         memcpy(&ctx->key, key, keylen);
1501         ctx->keylen = keylen;
1502
1503         ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1504
1505         return 0;
1506 }
1507
1508 static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1509                                  const u8 *key, unsigned int keylen)
1510 {
1511         u32 tmp[DES_EXPKEY_WORDS];
1512
1513         if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1514                      CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
1515             !des_ekey(tmp, key)) {
1516                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1517                 return -EINVAL;
1518         }
1519
1520         return ablkcipher_setkey(cipher, key, keylen);
1521 }
1522
1523 static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1524                                   const u8 *key, unsigned int keylen)
1525 {
1526         u32 flags;
1527         int err;
1528
1529         flags = crypto_ablkcipher_get_flags(cipher);
1530         err = __des3_verify_key(&flags, key);
1531         if (unlikely(err)) {
1532                 crypto_ablkcipher_set_flags(cipher, flags);
1533                 return err;
1534         }
1535
1536         return ablkcipher_setkey(cipher, key, keylen);
1537 }
1538
1539 static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1540                                   const u8 *key, unsigned int keylen)
1541 {
1542         if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1543             keylen == AES_KEYSIZE_256)
1544                 return ablkcipher_setkey(cipher, key, keylen);
1545
1546         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1547
1548         return -EINVAL;
1549 }
1550
1551 static void common_nonsnoop_unmap(struct device *dev,
1552                                   struct talitos_edesc *edesc,
1553                                   struct ablkcipher_request *areq)
1554 {
1555         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1556
1557         talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1558         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1559
1560         if (edesc->dma_len)
1561                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1562                                  DMA_BIDIRECTIONAL);
1563 }
1564
1565 static void ablkcipher_done(struct device *dev,
1566                             struct talitos_desc *desc, void *context,
1567                             int err)
1568 {
1569         struct ablkcipher_request *areq = context;
1570         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1571         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1572         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1573         struct talitos_edesc *edesc;
1574
1575         edesc = container_of(desc, struct talitos_edesc, desc);
1576
1577         common_nonsnoop_unmap(dev, edesc, areq);
1578         memcpy(areq->info, ctx->iv, ivsize);
1579
1580         kfree(edesc);
1581
1582         areq->base.complete(&areq->base, err);
1583 }
1584
1585 static int common_nonsnoop(struct talitos_edesc *edesc,
1586                            struct ablkcipher_request *areq,
1587                            void (*callback) (struct device *dev,
1588                                              struct talitos_desc *desc,
1589                                              void *context, int error))
1590 {
1591         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1592         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1593         struct device *dev = ctx->dev;
1594         struct talitos_desc *desc = &edesc->desc;
1595         unsigned int cryptlen = areq->nbytes;
1596         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1597         int sg_count, ret;
1598         bool sync_needed = false;
1599         struct talitos_private *priv = dev_get_drvdata(dev);
1600         bool is_sec1 = has_ftr_sec1(priv);
1601
1602         /* first DWORD empty */
1603
1604         /* cipher iv */
1605         to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1606
1607         /* cipher key */
1608         to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1609
1610         sg_count = edesc->src_nents ?: 1;
1611         if (is_sec1 && sg_count > 1)
1612                 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1613                                   cryptlen);
1614         else
1615                 sg_count = dma_map_sg(dev, areq->src, sg_count,
1616                                       (areq->src == areq->dst) ?
1617                                       DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1618         /*
1619          * cipher in
1620          */
1621         sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1622                                   &desc->ptr[3], sg_count, 0, 0);
1623         if (sg_count > 1)
1624                 sync_needed = true;
1625
1626         /* cipher out */
1627         if (areq->src != areq->dst) {
1628                 sg_count = edesc->dst_nents ? : 1;
1629                 if (!is_sec1 || sg_count == 1)
1630                         dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1631         }
1632
1633         ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1634                              sg_count, 0, (edesc->src_nents + 1));
1635         if (ret > 1)
1636                 sync_needed = true;
1637
1638         /* iv out */
1639         map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1640                                DMA_FROM_DEVICE);
1641
1642         /* last DWORD empty */
1643
1644         if (sync_needed)
1645                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1646                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1647
1648         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1649         if (ret != -EINPROGRESS) {
1650                 common_nonsnoop_unmap(dev, edesc, areq);
1651                 kfree(edesc);
1652         }
1653         return ret;
1654 }
1655
1656 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1657                                                     areq, bool encrypt)
1658 {
1659         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1660         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1661         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1662
1663         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1664                                    areq->info, 0, areq->nbytes, 0, ivsize, 0,
1665                                    areq->base.flags, encrypt);
1666 }
1667
1668 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1669 {
1670         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1671         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1672         struct talitos_edesc *edesc;
1673         unsigned int blocksize =
1674                         crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1675
1676         if (!areq->nbytes)
1677                 return 0;
1678
1679         if (areq->nbytes % blocksize)
1680                 return -EINVAL;
1681
1682         /* allocate extended descriptor */
1683         edesc = ablkcipher_edesc_alloc(areq, true);
1684         if (IS_ERR(edesc))
1685                 return PTR_ERR(edesc);
1686
1687         /* set encrypt */
1688         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1689
1690         return common_nonsnoop(edesc, areq, ablkcipher_done);
1691 }
1692
1693 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1694 {
1695         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1696         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1697         struct talitos_edesc *edesc;
1698         unsigned int blocksize =
1699                         crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1700
1701         if (!areq->nbytes)
1702                 return 0;
1703
1704         if (areq->nbytes % blocksize)
1705                 return -EINVAL;
1706
1707         /* allocate extended descriptor */
1708         edesc = ablkcipher_edesc_alloc(areq, false);
1709         if (IS_ERR(edesc))
1710                 return PTR_ERR(edesc);
1711
1712         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1713
1714         return common_nonsnoop(edesc, areq, ablkcipher_done);
1715 }
1716
1717 static void common_nonsnoop_hash_unmap(struct device *dev,
1718                                        struct talitos_edesc *edesc,
1719                                        struct ahash_request *areq)
1720 {
1721         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1722         struct talitos_private *priv = dev_get_drvdata(dev);
1723         bool is_sec1 = has_ftr_sec1(priv);
1724         struct talitos_desc *desc = &edesc->desc;
1725         struct talitos_desc *desc2 = desc + 1;
1726
1727         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1728         if (desc->next_desc &&
1729             desc->ptr[5].ptr != desc2->ptr[5].ptr)
1730                 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1731
1732         talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1733
1734         /* When using hashctx-in, must unmap it. */
1735         if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1736                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1737                                          DMA_TO_DEVICE);
1738         else if (desc->next_desc)
1739                 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1740                                          DMA_TO_DEVICE);
1741
1742         if (is_sec1 && req_ctx->nbuf)
1743                 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1744                                          DMA_TO_DEVICE);
1745
1746         if (edesc->dma_len)
1747                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1748                                  DMA_BIDIRECTIONAL);
1749
1750         if (edesc->desc.next_desc)
1751                 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1752                                  TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1753 }
1754
1755 static void ahash_done(struct device *dev,
1756                        struct talitos_desc *desc, void *context,
1757                        int err)
1758 {
1759         struct ahash_request *areq = context;
1760         struct talitos_edesc *edesc =
1761                  container_of(desc, struct talitos_edesc, desc);
1762         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1763
1764         if (!req_ctx->last && req_ctx->to_hash_later) {
1765                 /* Position any partial block for next update/final/finup */
1766                 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1767                 req_ctx->nbuf = req_ctx->to_hash_later;
1768         }
1769         common_nonsnoop_hash_unmap(dev, edesc, areq);
1770
1771         kfree(edesc);
1772
1773         areq->base.complete(&areq->base, err);
1774 }
1775
1776 /*
1777  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1778  * ourself and submit a padded block
1779  */
1780 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1781                                struct talitos_edesc *edesc,
1782                                struct talitos_ptr *ptr)
1783 {
1784         static u8 padded_hash[64] = {
1785                 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1786                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1787                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1788                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1789         };
1790
1791         pr_err_once("Bug in SEC1, padding ourself\n");
1792         edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1793         map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1794                                (char *)padded_hash, DMA_TO_DEVICE);
1795 }
1796
1797 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1798                                 struct ahash_request *areq, unsigned int length,
1799                                 unsigned int offset,
1800                                 void (*callback) (struct device *dev,
1801                                                   struct talitos_desc *desc,
1802                                                   void *context, int error))
1803 {
1804         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1805         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1806         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1807         struct device *dev = ctx->dev;
1808         struct talitos_desc *desc = &edesc->desc;
1809         int ret;
1810         bool sync_needed = false;
1811         struct talitos_private *priv = dev_get_drvdata(dev);
1812         bool is_sec1 = has_ftr_sec1(priv);
1813         int sg_count;
1814
1815         /* first DWORD empty */
1816
1817         /* hash context in */
1818         if (!req_ctx->first || req_ctx->swinit) {
1819                 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1820                                               req_ctx->hw_context_size,
1821                                               req_ctx->hw_context,
1822                                               DMA_TO_DEVICE);
1823                 req_ctx->swinit = 0;
1824         }
1825         /* Indicate next op is not the first. */
1826         req_ctx->first = 0;
1827
1828         /* HMAC key */
1829         if (ctx->keylen)
1830                 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1831                                is_sec1);
1832
1833         if (is_sec1 && req_ctx->nbuf)
1834                 length -= req_ctx->nbuf;
1835
1836         sg_count = edesc->src_nents ?: 1;
1837         if (is_sec1 && sg_count > 1)
1838                 sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1839                                    edesc->buf + sizeof(struct talitos_desc),
1840                                    length, req_ctx->nbuf);
1841         else if (length)
1842                 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1843                                       DMA_TO_DEVICE);
1844         /*
1845          * data in
1846          */
1847         if (is_sec1 && req_ctx->nbuf) {
1848                 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1849                                        req_ctx->buf[req_ctx->buf_idx],
1850                                        DMA_TO_DEVICE);
1851         } else {
1852                 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1853                                           &desc->ptr[3], sg_count, offset, 0);
1854                 if (sg_count > 1)
1855                         sync_needed = true;
1856         }
1857
1858         /* fifth DWORD empty */
1859
1860         /* hash/HMAC out -or- hash context out */
1861         if (req_ctx->last)
1862                 map_single_talitos_ptr(dev, &desc->ptr[5],
1863                                        crypto_ahash_digestsize(tfm),
1864                                        areq->result, DMA_FROM_DEVICE);
1865         else
1866                 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1867                                               req_ctx->hw_context_size,
1868                                               req_ctx->hw_context,
1869                                               DMA_FROM_DEVICE);
1870
1871         /* last DWORD empty */
1872
1873         if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1874                 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1875
1876         if (is_sec1 && req_ctx->nbuf && length) {
1877                 struct talitos_desc *desc2 = desc + 1;
1878                 dma_addr_t next_desc;
1879
1880                 memset(desc2, 0, sizeof(*desc2));
1881                 desc2->hdr = desc->hdr;
1882                 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1883                 desc2->hdr1 = desc2->hdr;
1884                 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1885                 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1886                 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1887
1888                 if (desc->ptr[1].ptr)
1889                         copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1890                                          is_sec1);
1891                 else
1892                         map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1893                                                       req_ctx->hw_context_size,
1894                                                       req_ctx->hw_context,
1895                                                       DMA_TO_DEVICE);
1896                 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1897                 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1898                                           &desc2->ptr[3], sg_count, offset, 0);
1899                 if (sg_count > 1)
1900                         sync_needed = true;
1901                 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1902                 if (req_ctx->last)
1903                         map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1904                                                       req_ctx->hw_context_size,
1905                                                       req_ctx->hw_context,
1906                                                       DMA_FROM_DEVICE);
1907
1908                 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1909                                            DMA_BIDIRECTIONAL);
1910                 desc->next_desc = cpu_to_be32(next_desc);
1911         }
1912
1913         if (sync_needed)
1914                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1915                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1916
1917         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1918         if (ret != -EINPROGRESS) {
1919                 common_nonsnoop_hash_unmap(dev, edesc, areq);
1920                 kfree(edesc);
1921         }
1922         return ret;
1923 }
1924
1925 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1926                                                unsigned int nbytes)
1927 {
1928         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1929         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1930         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1931         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1932         bool is_sec1 = has_ftr_sec1(priv);
1933
1934         if (is_sec1)
1935                 nbytes -= req_ctx->nbuf;
1936
1937         return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1938                                    nbytes, 0, 0, 0, areq->base.flags, false);
1939 }
1940
1941 static int ahash_init(struct ahash_request *areq)
1942 {
1943         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1944         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1945         struct device *dev = ctx->dev;
1946         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1947         unsigned int size;
1948         dma_addr_t dma;
1949
1950         /* Initialize the context */
1951         req_ctx->buf_idx = 0;
1952         req_ctx->nbuf = 0;
1953         req_ctx->first = 1; /* first indicates h/w must init its context */
1954         req_ctx->swinit = 0; /* assume h/w init of context */
1955         size =  (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1956                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1957                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1958         req_ctx->hw_context_size = size;
1959
1960         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1961                              DMA_TO_DEVICE);
1962         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1963
1964         return 0;
1965 }
1966
1967 /*
1968  * on h/w without explicit sha224 support, we initialize h/w context
1969  * manually with sha224 constants, and tell it to run sha256.
1970  */
1971 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1972 {
1973         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1974
1975         req_ctx->hw_context[0] = SHA224_H0;
1976         req_ctx->hw_context[1] = SHA224_H1;
1977         req_ctx->hw_context[2] = SHA224_H2;
1978         req_ctx->hw_context[3] = SHA224_H3;
1979         req_ctx->hw_context[4] = SHA224_H4;
1980         req_ctx->hw_context[5] = SHA224_H5;
1981         req_ctx->hw_context[6] = SHA224_H6;
1982         req_ctx->hw_context[7] = SHA224_H7;
1983
1984         /* init 64-bit count */
1985         req_ctx->hw_context[8] = 0;
1986         req_ctx->hw_context[9] = 0;
1987
1988         ahash_init(areq);
1989         req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1990
1991         return 0;
1992 }
1993
1994 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1995 {
1996         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1997         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1998         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1999         struct talitos_edesc *edesc;
2000         unsigned int blocksize =
2001                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2002         unsigned int nbytes_to_hash;
2003         unsigned int to_hash_later;
2004         unsigned int nsg;
2005         int nents;
2006         struct device *dev = ctx->dev;
2007         struct talitos_private *priv = dev_get_drvdata(dev);
2008         bool is_sec1 = has_ftr_sec1(priv);
2009         int offset = 0;
2010         u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2011
2012         if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2013                 /* Buffer up to one whole block */
2014                 nents = sg_nents_for_len(areq->src, nbytes);
2015                 if (nents < 0) {
2016                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2017                         return nents;
2018                 }
2019                 sg_copy_to_buffer(areq->src, nents,
2020                                   ctx_buf + req_ctx->nbuf, nbytes);
2021                 req_ctx->nbuf += nbytes;
2022                 return 0;
2023         }
2024
2025         /* At least (blocksize + 1) bytes are available to hash */
2026         nbytes_to_hash = nbytes + req_ctx->nbuf;
2027         to_hash_later = nbytes_to_hash & (blocksize - 1);
2028
2029         if (req_ctx->last)
2030                 to_hash_later = 0;
2031         else if (to_hash_later)
2032                 /* There is a partial block. Hash the full block(s) now */
2033                 nbytes_to_hash -= to_hash_later;
2034         else {
2035                 /* Keep one block buffered */
2036                 nbytes_to_hash -= blocksize;
2037                 to_hash_later = blocksize;
2038         }
2039
2040         /* Chain in any previously buffered data */
2041         if (!is_sec1 && req_ctx->nbuf) {
2042                 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2043                 sg_init_table(req_ctx->bufsl, nsg);
2044                 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2045                 if (nsg > 1)
2046                         sg_chain(req_ctx->bufsl, 2, areq->src);
2047                 req_ctx->psrc = req_ctx->bufsl;
2048         } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2049                 if (nbytes_to_hash > blocksize)
2050                         offset = blocksize - req_ctx->nbuf;
2051                 else
2052                         offset = nbytes_to_hash - req_ctx->nbuf;
2053                 nents = sg_nents_for_len(areq->src, offset);
2054                 if (nents < 0) {
2055                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2056                         return nents;
2057                 }
2058                 sg_copy_to_buffer(areq->src, nents,
2059                                   ctx_buf + req_ctx->nbuf, offset);
2060                 req_ctx->nbuf += offset;
2061                 req_ctx->psrc = areq->src;
2062         } else
2063                 req_ctx->psrc = areq->src;
2064
2065         if (to_hash_later) {
2066                 nents = sg_nents_for_len(areq->src, nbytes);
2067                 if (nents < 0) {
2068                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2069                         return nents;
2070                 }
2071                 sg_pcopy_to_buffer(areq->src, nents,
2072                                    req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2073                                       to_hash_later,
2074                                       nbytes - to_hash_later);
2075         }
2076         req_ctx->to_hash_later = to_hash_later;
2077
2078         /* Allocate extended descriptor */
2079         edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2080         if (IS_ERR(edesc))
2081                 return PTR_ERR(edesc);
2082
2083         edesc->desc.hdr = ctx->desc_hdr_template;
2084
2085         /* On last one, request SEC to pad; otherwise continue */
2086         if (req_ctx->last)
2087                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2088         else
2089                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2090
2091         /* request SEC to INIT hash. */
2092         if (req_ctx->first && !req_ctx->swinit)
2093                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2094
2095         /* When the tfm context has a keylen, it's an HMAC.
2096          * A first or last (ie. not middle) descriptor must request HMAC.
2097          */
2098         if (ctx->keylen && (req_ctx->first || req_ctx->last))
2099                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2100
2101         return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2102                                     ahash_done);
2103 }
2104
2105 static int ahash_update(struct ahash_request *areq)
2106 {
2107         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2108
2109         req_ctx->last = 0;
2110
2111         return ahash_process_req(areq, areq->nbytes);
2112 }
2113
2114 static int ahash_final(struct ahash_request *areq)
2115 {
2116         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2117
2118         req_ctx->last = 1;
2119
2120         return ahash_process_req(areq, 0);
2121 }
2122
2123 static int ahash_finup(struct ahash_request *areq)
2124 {
2125         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2126
2127         req_ctx->last = 1;
2128
2129         return ahash_process_req(areq, areq->nbytes);
2130 }
2131
2132 static int ahash_digest(struct ahash_request *areq)
2133 {
2134         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2135         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2136
2137         ahash->init(areq);
2138         req_ctx->last = 1;
2139
2140         return ahash_process_req(areq, areq->nbytes);
2141 }
2142
2143 static int ahash_export(struct ahash_request *areq, void *out)
2144 {
2145         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2146         struct talitos_export_state *export = out;
2147         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2148         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2149         struct device *dev = ctx->dev;
2150         dma_addr_t dma;
2151
2152         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2153                              DMA_FROM_DEVICE);
2154         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2155
2156         memcpy(export->hw_context, req_ctx->hw_context,
2157                req_ctx->hw_context_size);
2158         memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2159         export->swinit = req_ctx->swinit;
2160         export->first = req_ctx->first;
2161         export->last = req_ctx->last;
2162         export->to_hash_later = req_ctx->to_hash_later;
2163         export->nbuf = req_ctx->nbuf;
2164
2165         return 0;
2166 }
2167
2168 static int ahash_import(struct ahash_request *areq, const void *in)
2169 {
2170         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2171         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2172         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2173         struct device *dev = ctx->dev;
2174         const struct talitos_export_state *export = in;
2175         unsigned int size;
2176         dma_addr_t dma;
2177
2178         memset(req_ctx, 0, sizeof(*req_ctx));
2179         size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2180                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2181                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2182         req_ctx->hw_context_size = size;
2183         memcpy(req_ctx->hw_context, export->hw_context, size);
2184         memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2185         req_ctx->swinit = export->swinit;
2186         req_ctx->first = export->first;
2187         req_ctx->last = export->last;
2188         req_ctx->to_hash_later = export->to_hash_later;
2189         req_ctx->nbuf = export->nbuf;
2190
2191         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2192                              DMA_TO_DEVICE);
2193         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2194
2195         return 0;
2196 }
2197
2198 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2199                    u8 *hash)
2200 {
2201         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2202
2203         struct scatterlist sg[1];
2204         struct ahash_request *req;
2205         struct crypto_wait wait;
2206         int ret;
2207
2208         crypto_init_wait(&wait);
2209
2210         req = ahash_request_alloc(tfm, GFP_KERNEL);
2211         if (!req)
2212                 return -ENOMEM;
2213
2214         /* Keep tfm keylen == 0 during hash of the long key */
2215         ctx->keylen = 0;
2216         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2217                                    crypto_req_done, &wait);
2218
2219         sg_init_one(&sg[0], key, keylen);
2220
2221         ahash_request_set_crypt(req, sg, hash, keylen);
2222         ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2223
2224         ahash_request_free(req);
2225
2226         return ret;
2227 }
2228
2229 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2230                         unsigned int keylen)
2231 {
2232         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2233         struct device *dev = ctx->dev;
2234         unsigned int blocksize =
2235                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2236         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2237         unsigned int keysize = keylen;
2238         u8 hash[SHA512_DIGEST_SIZE];
2239         int ret;
2240
2241         if (keylen <= blocksize)
2242                 memcpy(ctx->key, key, keysize);
2243         else {
2244                 /* Must get the hash of the long key */
2245                 ret = keyhash(tfm, key, keylen, hash);
2246
2247                 if (ret) {
2248                         crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2249                         return -EINVAL;
2250                 }
2251
2252                 keysize = digestsize;
2253                 memcpy(ctx->key, hash, digestsize);
2254         }
2255
2256         if (ctx->keylen)
2257                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2258
2259         ctx->keylen = keysize;
2260         ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2261
2262         return 0;
2263 }
2264
2265
2266 struct talitos_alg_template {
2267         u32 type;
2268         u32 priority;
2269         union {
2270                 struct crypto_alg crypto;
2271                 struct ahash_alg hash;
2272                 struct aead_alg aead;
2273         } alg;
2274         __be32 desc_hdr_template;
2275 };
2276
2277 static struct talitos_alg_template driver_algs[] = {
2278         /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2279         {       .type = CRYPTO_ALG_TYPE_AEAD,
2280                 .alg.aead = {
2281                         .base = {
2282                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2283                                 .cra_driver_name = "authenc-hmac-sha1-"
2284                                                    "cbc-aes-talitos",
2285                                 .cra_blocksize = AES_BLOCK_SIZE,
2286                                 .cra_flags = CRYPTO_ALG_ASYNC,
2287                         },
2288                         .ivsize = AES_BLOCK_SIZE,
2289                         .maxauthsize = SHA1_DIGEST_SIZE,
2290                 },
2291                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2292                                      DESC_HDR_SEL0_AESU |
2293                                      DESC_HDR_MODE0_AESU_CBC |
2294                                      DESC_HDR_SEL1_MDEUA |
2295                                      DESC_HDR_MODE1_MDEU_INIT |
2296                                      DESC_HDR_MODE1_MDEU_PAD |
2297                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2298         },
2299         {       .type = CRYPTO_ALG_TYPE_AEAD,
2300                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2301                 .alg.aead = {
2302                         .base = {
2303                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2304                                 .cra_driver_name = "authenc-hmac-sha1-"
2305                                                    "cbc-aes-talitos-hsna",
2306                                 .cra_blocksize = AES_BLOCK_SIZE,
2307                                 .cra_flags = CRYPTO_ALG_ASYNC,
2308                         },
2309                         .ivsize = AES_BLOCK_SIZE,
2310                         .maxauthsize = SHA1_DIGEST_SIZE,
2311                 },
2312                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2313                                      DESC_HDR_SEL0_AESU |
2314                                      DESC_HDR_MODE0_AESU_CBC |
2315                                      DESC_HDR_SEL1_MDEUA |
2316                                      DESC_HDR_MODE1_MDEU_INIT |
2317                                      DESC_HDR_MODE1_MDEU_PAD |
2318                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2319         },
2320         {       .type = CRYPTO_ALG_TYPE_AEAD,
2321                 .alg.aead = {
2322                         .base = {
2323                                 .cra_name = "authenc(hmac(sha1),"
2324                                             "cbc(des3_ede))",
2325                                 .cra_driver_name = "authenc-hmac-sha1-"
2326                                                    "cbc-3des-talitos",
2327                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2328                                 .cra_flags = CRYPTO_ALG_ASYNC,
2329                         },
2330                         .ivsize = DES3_EDE_BLOCK_SIZE,
2331                         .maxauthsize = SHA1_DIGEST_SIZE,
2332                         .setkey = aead_des3_setkey,
2333                 },
2334                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2335                                      DESC_HDR_SEL0_DEU |
2336                                      DESC_HDR_MODE0_DEU_CBC |
2337                                      DESC_HDR_MODE0_DEU_3DES |
2338                                      DESC_HDR_SEL1_MDEUA |
2339                                      DESC_HDR_MODE1_MDEU_INIT |
2340                                      DESC_HDR_MODE1_MDEU_PAD |
2341                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2342         },
2343         {       .type = CRYPTO_ALG_TYPE_AEAD,
2344                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2345                 .alg.aead = {
2346                         .base = {
2347                                 .cra_name = "authenc(hmac(sha1),"
2348                                             "cbc(des3_ede))",
2349                                 .cra_driver_name = "authenc-hmac-sha1-"
2350                                                    "cbc-3des-talitos-hsna",
2351                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2352                                 .cra_flags = CRYPTO_ALG_ASYNC,
2353                         },
2354                         .ivsize = DES3_EDE_BLOCK_SIZE,
2355                         .maxauthsize = SHA1_DIGEST_SIZE,
2356                         .setkey = aead_des3_setkey,
2357                 },
2358                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2359                                      DESC_HDR_SEL0_DEU |
2360                                      DESC_HDR_MODE0_DEU_CBC |
2361                                      DESC_HDR_MODE0_DEU_3DES |
2362                                      DESC_HDR_SEL1_MDEUA |
2363                                      DESC_HDR_MODE1_MDEU_INIT |
2364                                      DESC_HDR_MODE1_MDEU_PAD |
2365                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2366         },
2367         {       .type = CRYPTO_ALG_TYPE_AEAD,
2368                 .alg.aead = {
2369                         .base = {
2370                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2371                                 .cra_driver_name = "authenc-hmac-sha224-"
2372                                                    "cbc-aes-talitos",
2373                                 .cra_blocksize = AES_BLOCK_SIZE,
2374                                 .cra_flags = CRYPTO_ALG_ASYNC,
2375                         },
2376                         .ivsize = AES_BLOCK_SIZE,
2377                         .maxauthsize = SHA224_DIGEST_SIZE,
2378                 },
2379                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2380                                      DESC_HDR_SEL0_AESU |
2381                                      DESC_HDR_MODE0_AESU_CBC |
2382                                      DESC_HDR_SEL1_MDEUA |
2383                                      DESC_HDR_MODE1_MDEU_INIT |
2384                                      DESC_HDR_MODE1_MDEU_PAD |
2385                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2386         },
2387         {       .type = CRYPTO_ALG_TYPE_AEAD,
2388                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2389                 .alg.aead = {
2390                         .base = {
2391                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2392                                 .cra_driver_name = "authenc-hmac-sha224-"
2393                                                    "cbc-aes-talitos-hsna",
2394                                 .cra_blocksize = AES_BLOCK_SIZE,
2395                                 .cra_flags = CRYPTO_ALG_ASYNC,
2396                         },
2397                         .ivsize = AES_BLOCK_SIZE,
2398                         .maxauthsize = SHA224_DIGEST_SIZE,
2399                 },
2400                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2401                                      DESC_HDR_SEL0_AESU |
2402                                      DESC_HDR_MODE0_AESU_CBC |
2403                                      DESC_HDR_SEL1_MDEUA |
2404                                      DESC_HDR_MODE1_MDEU_INIT |
2405                                      DESC_HDR_MODE1_MDEU_PAD |
2406                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2407         },
2408         {       .type = CRYPTO_ALG_TYPE_AEAD,
2409                 .alg.aead = {
2410                         .base = {
2411                                 .cra_name = "authenc(hmac(sha224),"
2412                                             "cbc(des3_ede))",
2413                                 .cra_driver_name = "authenc-hmac-sha224-"
2414                                                    "cbc-3des-talitos",
2415                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2416                                 .cra_flags = CRYPTO_ALG_ASYNC,
2417                         },
2418                         .ivsize = DES3_EDE_BLOCK_SIZE,
2419                         .maxauthsize = SHA224_DIGEST_SIZE,
2420                         .setkey = aead_des3_setkey,
2421                 },
2422                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2423                                      DESC_HDR_SEL0_DEU |
2424                                      DESC_HDR_MODE0_DEU_CBC |
2425                                      DESC_HDR_MODE0_DEU_3DES |
2426                                      DESC_HDR_SEL1_MDEUA |
2427                                      DESC_HDR_MODE1_MDEU_INIT |
2428                                      DESC_HDR_MODE1_MDEU_PAD |
2429                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2430         },
2431         {       .type = CRYPTO_ALG_TYPE_AEAD,
2432                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2433                 .alg.aead = {
2434                         .base = {
2435                                 .cra_name = "authenc(hmac(sha224),"
2436                                             "cbc(des3_ede))",
2437                                 .cra_driver_name = "authenc-hmac-sha224-"
2438                                                    "cbc-3des-talitos-hsna",
2439                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2440                                 .cra_flags = CRYPTO_ALG_ASYNC,
2441                         },
2442                         .ivsize = DES3_EDE_BLOCK_SIZE,
2443                         .maxauthsize = SHA224_DIGEST_SIZE,
2444                         .setkey = aead_des3_setkey,
2445                 },
2446                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2447                                      DESC_HDR_SEL0_DEU |
2448                                      DESC_HDR_MODE0_DEU_CBC |
2449                                      DESC_HDR_MODE0_DEU_3DES |
2450                                      DESC_HDR_SEL1_MDEUA |
2451                                      DESC_HDR_MODE1_MDEU_INIT |
2452                                      DESC_HDR_MODE1_MDEU_PAD |
2453                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2454         },
2455         {       .type = CRYPTO_ALG_TYPE_AEAD,
2456                 .alg.aead = {
2457                         .base = {
2458                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2459                                 .cra_driver_name = "authenc-hmac-sha256-"
2460                                                    "cbc-aes-talitos",
2461                                 .cra_blocksize = AES_BLOCK_SIZE,
2462                                 .cra_flags = CRYPTO_ALG_ASYNC,
2463                         },
2464                         .ivsize = AES_BLOCK_SIZE,
2465                         .maxauthsize = SHA256_DIGEST_SIZE,
2466                 },
2467                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2468                                      DESC_HDR_SEL0_AESU |
2469                                      DESC_HDR_MODE0_AESU_CBC |
2470                                      DESC_HDR_SEL1_MDEUA |
2471                                      DESC_HDR_MODE1_MDEU_INIT |
2472                                      DESC_HDR_MODE1_MDEU_PAD |
2473                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2474         },
2475         {       .type = CRYPTO_ALG_TYPE_AEAD,
2476                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2477                 .alg.aead = {
2478                         .base = {
2479                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2480                                 .cra_driver_name = "authenc-hmac-sha256-"
2481                                                    "cbc-aes-talitos-hsna",
2482                                 .cra_blocksize = AES_BLOCK_SIZE,
2483                                 .cra_flags = CRYPTO_ALG_ASYNC,
2484                         },
2485                         .ivsize = AES_BLOCK_SIZE,
2486                         .maxauthsize = SHA256_DIGEST_SIZE,
2487                 },
2488                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2489                                      DESC_HDR_SEL0_AESU |
2490                                      DESC_HDR_MODE0_AESU_CBC |
2491                                      DESC_HDR_SEL1_MDEUA |
2492                                      DESC_HDR_MODE1_MDEU_INIT |
2493                                      DESC_HDR_MODE1_MDEU_PAD |
2494                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2495         },
2496         {       .type = CRYPTO_ALG_TYPE_AEAD,
2497                 .alg.aead = {
2498                         .base = {
2499                                 .cra_name = "authenc(hmac(sha256),"
2500                                             "cbc(des3_ede))",
2501                                 .cra_driver_name = "authenc-hmac-sha256-"
2502                                                    "cbc-3des-talitos",
2503                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2504                                 .cra_flags = CRYPTO_ALG_ASYNC,
2505                         },
2506                         .ivsize = DES3_EDE_BLOCK_SIZE,
2507                         .maxauthsize = SHA256_DIGEST_SIZE,
2508                         .setkey = aead_des3_setkey,
2509                 },
2510                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2511                                      DESC_HDR_SEL0_DEU |
2512                                      DESC_HDR_MODE0_DEU_CBC |
2513                                      DESC_HDR_MODE0_DEU_3DES |
2514                                      DESC_HDR_SEL1_MDEUA |
2515                                      DESC_HDR_MODE1_MDEU_INIT |
2516                                      DESC_HDR_MODE1_MDEU_PAD |
2517                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2518         },
2519         {       .type = CRYPTO_ALG_TYPE_AEAD,
2520                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2521                 .alg.aead = {
2522                         .base = {
2523                                 .cra_name = "authenc(hmac(sha256),"
2524                                             "cbc(des3_ede))",
2525                                 .cra_driver_name = "authenc-hmac-sha256-"
2526                                                    "cbc-3des-talitos-hsna",
2527                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2528                                 .cra_flags = CRYPTO_ALG_ASYNC,
2529                         },
2530                         .ivsize = DES3_EDE_BLOCK_SIZE,
2531                         .maxauthsize = SHA256_DIGEST_SIZE,
2532                         .setkey = aead_des3_setkey,
2533                 },
2534                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2535                                      DESC_HDR_SEL0_DEU |
2536                                      DESC_HDR_MODE0_DEU_CBC |
2537                                      DESC_HDR_MODE0_DEU_3DES |
2538                                      DESC_HDR_SEL1_MDEUA |
2539                                      DESC_HDR_MODE1_MDEU_INIT |
2540                                      DESC_HDR_MODE1_MDEU_PAD |
2541                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2542         },
2543         {       .type = CRYPTO_ALG_TYPE_AEAD,
2544                 .alg.aead = {
2545                         .base = {
2546                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2547                                 .cra_driver_name = "authenc-hmac-sha384-"
2548                                                    "cbc-aes-talitos",
2549                                 .cra_blocksize = AES_BLOCK_SIZE,
2550                                 .cra_flags = CRYPTO_ALG_ASYNC,
2551                         },
2552                         .ivsize = AES_BLOCK_SIZE,
2553                         .maxauthsize = SHA384_DIGEST_SIZE,
2554                 },
2555                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2556                                      DESC_HDR_SEL0_AESU |
2557                                      DESC_HDR_MODE0_AESU_CBC |
2558                                      DESC_HDR_SEL1_MDEUB |
2559                                      DESC_HDR_MODE1_MDEU_INIT |
2560                                      DESC_HDR_MODE1_MDEU_PAD |
2561                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2562         },
2563         {       .type = CRYPTO_ALG_TYPE_AEAD,
2564                 .alg.aead = {
2565                         .base = {
2566                                 .cra_name = "authenc(hmac(sha384),"
2567                                             "cbc(des3_ede))",
2568                                 .cra_driver_name = "authenc-hmac-sha384-"
2569                                                    "cbc-3des-talitos",
2570                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2571                                 .cra_flags = CRYPTO_ALG_ASYNC,
2572                         },
2573                         .ivsize = DES3_EDE_BLOCK_SIZE,
2574                         .maxauthsize = SHA384_DIGEST_SIZE,
2575                         .setkey = aead_des3_setkey,
2576                 },
2577                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2578                                      DESC_HDR_SEL0_DEU |
2579                                      DESC_HDR_MODE0_DEU_CBC |
2580                                      DESC_HDR_MODE0_DEU_3DES |
2581                                      DESC_HDR_SEL1_MDEUB |
2582                                      DESC_HDR_MODE1_MDEU_INIT |
2583                                      DESC_HDR_MODE1_MDEU_PAD |
2584                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2585         },
2586         {       .type = CRYPTO_ALG_TYPE_AEAD,
2587                 .alg.aead = {
2588                         .base = {
2589                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2590                                 .cra_driver_name = "authenc-hmac-sha512-"
2591                                                    "cbc-aes-talitos",
2592                                 .cra_blocksize = AES_BLOCK_SIZE,
2593                                 .cra_flags = CRYPTO_ALG_ASYNC,
2594                         },
2595                         .ivsize = AES_BLOCK_SIZE,
2596                         .maxauthsize = SHA512_DIGEST_SIZE,
2597                 },
2598                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2599                                      DESC_HDR_SEL0_AESU |
2600                                      DESC_HDR_MODE0_AESU_CBC |
2601                                      DESC_HDR_SEL1_MDEUB |
2602                                      DESC_HDR_MODE1_MDEU_INIT |
2603                                      DESC_HDR_MODE1_MDEU_PAD |
2604                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2605         },
2606         {       .type = CRYPTO_ALG_TYPE_AEAD,
2607                 .alg.aead = {
2608                         .base = {
2609                                 .cra_name = "authenc(hmac(sha512),"
2610                                             "cbc(des3_ede))",
2611                                 .cra_driver_name = "authenc-hmac-sha512-"
2612                                                    "cbc-3des-talitos",
2613                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2614                                 .cra_flags = CRYPTO_ALG_ASYNC,
2615                         },
2616                         .ivsize = DES3_EDE_BLOCK_SIZE,
2617                         .maxauthsize = SHA512_DIGEST_SIZE,
2618                         .setkey = aead_des3_setkey,
2619                 },
2620                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2621                                      DESC_HDR_SEL0_DEU |
2622                                      DESC_HDR_MODE0_DEU_CBC |
2623                                      DESC_HDR_MODE0_DEU_3DES |
2624                                      DESC_HDR_SEL1_MDEUB |
2625                                      DESC_HDR_MODE1_MDEU_INIT |
2626                                      DESC_HDR_MODE1_MDEU_PAD |
2627                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2628         },
2629         {       .type = CRYPTO_ALG_TYPE_AEAD,
2630                 .alg.aead = {
2631                         .base = {
2632                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2633                                 .cra_driver_name = "authenc-hmac-md5-"
2634                                                    "cbc-aes-talitos",
2635                                 .cra_blocksize = AES_BLOCK_SIZE,
2636                                 .cra_flags = CRYPTO_ALG_ASYNC,
2637                         },
2638                         .ivsize = AES_BLOCK_SIZE,
2639                         .maxauthsize = MD5_DIGEST_SIZE,
2640                 },
2641                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2642                                      DESC_HDR_SEL0_AESU |
2643                                      DESC_HDR_MODE0_AESU_CBC |
2644                                      DESC_HDR_SEL1_MDEUA |
2645                                      DESC_HDR_MODE1_MDEU_INIT |
2646                                      DESC_HDR_MODE1_MDEU_PAD |
2647                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2648         },
2649         {       .type = CRYPTO_ALG_TYPE_AEAD,
2650                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2651                 .alg.aead = {
2652                         .base = {
2653                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2654                                 .cra_driver_name = "authenc-hmac-md5-"
2655                                                    "cbc-aes-talitos-hsna",
2656                                 .cra_blocksize = AES_BLOCK_SIZE,
2657                                 .cra_flags = CRYPTO_ALG_ASYNC,
2658                         },
2659                         .ivsize = AES_BLOCK_SIZE,
2660                         .maxauthsize = MD5_DIGEST_SIZE,
2661                 },
2662                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2663                                      DESC_HDR_SEL0_AESU |
2664                                      DESC_HDR_MODE0_AESU_CBC |
2665                                      DESC_HDR_SEL1_MDEUA |
2666                                      DESC_HDR_MODE1_MDEU_INIT |
2667                                      DESC_HDR_MODE1_MDEU_PAD |
2668                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2669         },
2670         {       .type = CRYPTO_ALG_TYPE_AEAD,
2671                 .alg.aead = {
2672                         .base = {
2673                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2674                                 .cra_driver_name = "authenc-hmac-md5-"
2675                                                    "cbc-3des-talitos",
2676                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2677                                 .cra_flags = CRYPTO_ALG_ASYNC,
2678                         },
2679                         .ivsize = DES3_EDE_BLOCK_SIZE,
2680                         .maxauthsize = MD5_DIGEST_SIZE,
2681                         .setkey = aead_des3_setkey,
2682                 },
2683                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2684                                      DESC_HDR_SEL0_DEU |
2685                                      DESC_HDR_MODE0_DEU_CBC |
2686                                      DESC_HDR_MODE0_DEU_3DES |
2687                                      DESC_HDR_SEL1_MDEUA |
2688                                      DESC_HDR_MODE1_MDEU_INIT |
2689                                      DESC_HDR_MODE1_MDEU_PAD |
2690                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2691         },
2692         {       .type = CRYPTO_ALG_TYPE_AEAD,
2693                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2694                 .alg.aead = {
2695                         .base = {
2696                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2697                                 .cra_driver_name = "authenc-hmac-md5-"
2698                                                    "cbc-3des-talitos-hsna",
2699                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2700                                 .cra_flags = CRYPTO_ALG_ASYNC,
2701                         },
2702                         .ivsize = DES3_EDE_BLOCK_SIZE,
2703                         .maxauthsize = MD5_DIGEST_SIZE,
2704                         .setkey = aead_des3_setkey,
2705                 },
2706                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2707                                      DESC_HDR_SEL0_DEU |
2708                                      DESC_HDR_MODE0_DEU_CBC |
2709                                      DESC_HDR_MODE0_DEU_3DES |
2710                                      DESC_HDR_SEL1_MDEUA |
2711                                      DESC_HDR_MODE1_MDEU_INIT |
2712                                      DESC_HDR_MODE1_MDEU_PAD |
2713                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2714         },
2715         /* ABLKCIPHER algorithms. */
2716         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2717                 .alg.crypto = {
2718                         .cra_name = "ecb(aes)",
2719                         .cra_driver_name = "ecb-aes-talitos",
2720                         .cra_blocksize = AES_BLOCK_SIZE,
2721                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2722                                      CRYPTO_ALG_ASYNC,
2723                         .cra_ablkcipher = {
2724                                 .min_keysize = AES_MIN_KEY_SIZE,
2725                                 .max_keysize = AES_MAX_KEY_SIZE,
2726                                 .setkey = ablkcipher_aes_setkey,
2727                         }
2728                 },
2729                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2730                                      DESC_HDR_SEL0_AESU,
2731         },
2732         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2733                 .alg.crypto = {
2734                         .cra_name = "cbc(aes)",
2735                         .cra_driver_name = "cbc-aes-talitos",
2736                         .cra_blocksize = AES_BLOCK_SIZE,
2737                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2738                                      CRYPTO_ALG_ASYNC,
2739                         .cra_ablkcipher = {
2740                                 .min_keysize = AES_MIN_KEY_SIZE,
2741                                 .max_keysize = AES_MAX_KEY_SIZE,
2742                                 .ivsize = AES_BLOCK_SIZE,
2743                                 .setkey = ablkcipher_aes_setkey,
2744                         }
2745                 },
2746                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2747                                      DESC_HDR_SEL0_AESU |
2748                                      DESC_HDR_MODE0_AESU_CBC,
2749         },
2750         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2751                 .alg.crypto = {
2752                         .cra_name = "ctr(aes)",
2753                         .cra_driver_name = "ctr-aes-talitos",
2754                         .cra_blocksize = 1,
2755                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2756                                      CRYPTO_ALG_ASYNC,
2757                         .cra_ablkcipher = {
2758                                 .min_keysize = AES_MIN_KEY_SIZE,
2759                                 .max_keysize = AES_MAX_KEY_SIZE,
2760                                 .ivsize = AES_BLOCK_SIZE,
2761                                 .setkey = ablkcipher_aes_setkey,
2762                         }
2763                 },
2764                 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2765                                      DESC_HDR_SEL0_AESU |
2766                                      DESC_HDR_MODE0_AESU_CTR,
2767         },
2768         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2769                 .alg.crypto = {
2770                         .cra_name = "ecb(des)",
2771                         .cra_driver_name = "ecb-des-talitos",
2772                         .cra_blocksize = DES_BLOCK_SIZE,
2773                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2774                                      CRYPTO_ALG_ASYNC,
2775                         .cra_ablkcipher = {
2776                                 .min_keysize = DES_KEY_SIZE,
2777                                 .max_keysize = DES_KEY_SIZE,
2778                                 .setkey = ablkcipher_des_setkey,
2779                         }
2780                 },
2781                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2782                                      DESC_HDR_SEL0_DEU,
2783         },
2784         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2785                 .alg.crypto = {
2786                         .cra_name = "cbc(des)",
2787                         .cra_driver_name = "cbc-des-talitos",
2788                         .cra_blocksize = DES_BLOCK_SIZE,
2789                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2790                                      CRYPTO_ALG_ASYNC,
2791                         .cra_ablkcipher = {
2792                                 .min_keysize = DES_KEY_SIZE,
2793                                 .max_keysize = DES_KEY_SIZE,
2794                                 .ivsize = DES_BLOCK_SIZE,
2795                                 .setkey = ablkcipher_des_setkey,
2796                         }
2797                 },
2798                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2799                                      DESC_HDR_SEL0_DEU |
2800                                      DESC_HDR_MODE0_DEU_CBC,
2801         },
2802         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2803                 .alg.crypto = {
2804                         .cra_name = "ecb(des3_ede)",
2805                         .cra_driver_name = "ecb-3des-talitos",
2806                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2807                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2808                                      CRYPTO_ALG_ASYNC,
2809                         .cra_ablkcipher = {
2810                                 .min_keysize = DES3_EDE_KEY_SIZE,
2811                                 .max_keysize = DES3_EDE_KEY_SIZE,
2812                                 .setkey = ablkcipher_des3_setkey,
2813                         }
2814                 },
2815                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2816                                      DESC_HDR_SEL0_DEU |
2817                                      DESC_HDR_MODE0_DEU_3DES,
2818         },
2819         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2820                 .alg.crypto = {
2821                         .cra_name = "cbc(des3_ede)",
2822                         .cra_driver_name = "cbc-3des-talitos",
2823                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2824                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2825                                      CRYPTO_ALG_ASYNC,
2826                         .cra_ablkcipher = {
2827                                 .min_keysize = DES3_EDE_KEY_SIZE,
2828                                 .max_keysize = DES3_EDE_KEY_SIZE,
2829                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2830                                 .setkey = ablkcipher_des3_setkey,
2831                         }
2832                 },
2833                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2834                                      DESC_HDR_SEL0_DEU |
2835                                      DESC_HDR_MODE0_DEU_CBC |
2836                                      DESC_HDR_MODE0_DEU_3DES,
2837         },
2838         /* AHASH algorithms. */
2839         {       .type = CRYPTO_ALG_TYPE_AHASH,
2840                 .alg.hash = {
2841                         .halg.digestsize = MD5_DIGEST_SIZE,
2842                         .halg.statesize = sizeof(struct talitos_export_state),
2843                         .halg.base = {
2844                                 .cra_name = "md5",
2845                                 .cra_driver_name = "md5-talitos",
2846                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2847                                 .cra_flags = CRYPTO_ALG_ASYNC,
2848                         }
2849                 },
2850                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2851                                      DESC_HDR_SEL0_MDEUA |
2852                                      DESC_HDR_MODE0_MDEU_MD5,
2853         },
2854         {       .type = CRYPTO_ALG_TYPE_AHASH,
2855                 .alg.hash = {
2856                         .halg.digestsize = SHA1_DIGEST_SIZE,
2857                         .halg.statesize = sizeof(struct talitos_export_state),
2858                         .halg.base = {
2859                                 .cra_name = "sha1",
2860                                 .cra_driver_name = "sha1-talitos",
2861                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2862                                 .cra_flags = CRYPTO_ALG_ASYNC,
2863                         }
2864                 },
2865                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2866                                      DESC_HDR_SEL0_MDEUA |
2867                                      DESC_HDR_MODE0_MDEU_SHA1,
2868         },
2869         {       .type = CRYPTO_ALG_TYPE_AHASH,
2870                 .alg.hash = {
2871                         .halg.digestsize = SHA224_DIGEST_SIZE,
2872                         .halg.statesize = sizeof(struct talitos_export_state),
2873                         .halg.base = {
2874                                 .cra_name = "sha224",
2875                                 .cra_driver_name = "sha224-talitos",
2876                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2877                                 .cra_flags = CRYPTO_ALG_ASYNC,
2878                         }
2879                 },
2880                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2881                                      DESC_HDR_SEL0_MDEUA |
2882                                      DESC_HDR_MODE0_MDEU_SHA224,
2883         },
2884         {       .type = CRYPTO_ALG_TYPE_AHASH,
2885                 .alg.hash = {
2886                         .halg.digestsize = SHA256_DIGEST_SIZE,
2887                         .halg.statesize = sizeof(struct talitos_export_state),
2888                         .halg.base = {
2889                                 .cra_name = "sha256",
2890                                 .cra_driver_name = "sha256-talitos",
2891                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2892                                 .cra_flags = CRYPTO_ALG_ASYNC,
2893                         }
2894                 },
2895                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2896                                      DESC_HDR_SEL0_MDEUA |
2897                                      DESC_HDR_MODE0_MDEU_SHA256,
2898         },
2899         {       .type = CRYPTO_ALG_TYPE_AHASH,
2900                 .alg.hash = {
2901                         .halg.digestsize = SHA384_DIGEST_SIZE,
2902                         .halg.statesize = sizeof(struct talitos_export_state),
2903                         .halg.base = {
2904                                 .cra_name = "sha384",
2905                                 .cra_driver_name = "sha384-talitos",
2906                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2907                                 .cra_flags = CRYPTO_ALG_ASYNC,
2908                         }
2909                 },
2910                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2911                                      DESC_HDR_SEL0_MDEUB |
2912                                      DESC_HDR_MODE0_MDEUB_SHA384,
2913         },
2914         {       .type = CRYPTO_ALG_TYPE_AHASH,
2915                 .alg.hash = {
2916                         .halg.digestsize = SHA512_DIGEST_SIZE,
2917                         .halg.statesize = sizeof(struct talitos_export_state),
2918                         .halg.base = {
2919                                 .cra_name = "sha512",
2920                                 .cra_driver_name = "sha512-talitos",
2921                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2922                                 .cra_flags = CRYPTO_ALG_ASYNC,
2923                         }
2924                 },
2925                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2926                                      DESC_HDR_SEL0_MDEUB |
2927                                      DESC_HDR_MODE0_MDEUB_SHA512,
2928         },
2929         {       .type = CRYPTO_ALG_TYPE_AHASH,
2930                 .alg.hash = {
2931                         .halg.digestsize = MD5_DIGEST_SIZE,
2932                         .halg.statesize = sizeof(struct talitos_export_state),
2933                         .halg.base = {
2934                                 .cra_name = "hmac(md5)",
2935                                 .cra_driver_name = "hmac-md5-talitos",
2936                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2937                                 .cra_flags = CRYPTO_ALG_ASYNC,
2938                         }
2939                 },
2940                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2941                                      DESC_HDR_SEL0_MDEUA |
2942                                      DESC_HDR_MODE0_MDEU_MD5,
2943         },
2944         {       .type = CRYPTO_ALG_TYPE_AHASH,
2945                 .alg.hash = {
2946                         .halg.digestsize = SHA1_DIGEST_SIZE,
2947                         .halg.statesize = sizeof(struct talitos_export_state),
2948                         .halg.base = {
2949                                 .cra_name = "hmac(sha1)",
2950                                 .cra_driver_name = "hmac-sha1-talitos",
2951                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2952                                 .cra_flags = CRYPTO_ALG_ASYNC,
2953                         }
2954                 },
2955                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2956                                      DESC_HDR_SEL0_MDEUA |
2957                                      DESC_HDR_MODE0_MDEU_SHA1,
2958         },
2959         {       .type = CRYPTO_ALG_TYPE_AHASH,
2960                 .alg.hash = {
2961                         .halg.digestsize = SHA224_DIGEST_SIZE,
2962                         .halg.statesize = sizeof(struct talitos_export_state),
2963                         .halg.base = {
2964                                 .cra_name = "hmac(sha224)",
2965                                 .cra_driver_name = "hmac-sha224-talitos",
2966                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2967                                 .cra_flags = CRYPTO_ALG_ASYNC,
2968                         }
2969                 },
2970                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2971                                      DESC_HDR_SEL0_MDEUA |
2972                                      DESC_HDR_MODE0_MDEU_SHA224,
2973         },
2974         {       .type = CRYPTO_ALG_TYPE_AHASH,
2975                 .alg.hash = {
2976                         .halg.digestsize = SHA256_DIGEST_SIZE,
2977                         .halg.statesize = sizeof(struct talitos_export_state),
2978                         .halg.base = {
2979                                 .cra_name = "hmac(sha256)",
2980                                 .cra_driver_name = "hmac-sha256-talitos",
2981                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2982                                 .cra_flags = CRYPTO_ALG_ASYNC,
2983                         }
2984                 },
2985                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2986                                      DESC_HDR_SEL0_MDEUA |
2987                                      DESC_HDR_MODE0_MDEU_SHA256,
2988         },
2989         {       .type = CRYPTO_ALG_TYPE_AHASH,
2990                 .alg.hash = {
2991                         .halg.digestsize = SHA384_DIGEST_SIZE,
2992                         .halg.statesize = sizeof(struct talitos_export_state),
2993                         .halg.base = {
2994                                 .cra_name = "hmac(sha384)",
2995                                 .cra_driver_name = "hmac-sha384-talitos",
2996                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2997                                 .cra_flags = CRYPTO_ALG_ASYNC,
2998                         }
2999                 },
3000                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3001                                      DESC_HDR_SEL0_MDEUB |
3002                                      DESC_HDR_MODE0_MDEUB_SHA384,
3003         },
3004         {       .type = CRYPTO_ALG_TYPE_AHASH,
3005                 .alg.hash = {
3006                         .halg.digestsize = SHA512_DIGEST_SIZE,
3007                         .halg.statesize = sizeof(struct talitos_export_state),
3008                         .halg.base = {
3009                                 .cra_name = "hmac(sha512)",
3010                                 .cra_driver_name = "hmac-sha512-talitos",
3011                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3012                                 .cra_flags = CRYPTO_ALG_ASYNC,
3013                         }
3014                 },
3015                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3016                                      DESC_HDR_SEL0_MDEUB |
3017                                      DESC_HDR_MODE0_MDEUB_SHA512,
3018         }
3019 };
3020
3021 struct talitos_crypto_alg {
3022         struct list_head entry;
3023         struct device *dev;
3024         struct talitos_alg_template algt;
3025 };
3026
3027 static int talitos_init_common(struct talitos_ctx *ctx,
3028                                struct talitos_crypto_alg *talitos_alg)
3029 {
3030         struct talitos_private *priv;
3031
3032         /* update context with ptr to dev */
3033         ctx->dev = talitos_alg->dev;
3034
3035         /* assign SEC channel to tfm in round-robin fashion */
3036         priv = dev_get_drvdata(ctx->dev);
3037         ctx->ch = atomic_inc_return(&priv->last_chan) &
3038                   (priv->num_channels - 1);
3039
3040         /* copy descriptor header template value */
3041         ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3042
3043         /* select done notification */
3044         ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3045
3046         return 0;
3047 }
3048
3049 static int talitos_cra_init(struct crypto_tfm *tfm)
3050 {
3051         struct crypto_alg *alg = tfm->__crt_alg;
3052         struct talitos_crypto_alg *talitos_alg;
3053         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3054
3055         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3056                 talitos_alg = container_of(__crypto_ahash_alg(alg),
3057                                            struct talitos_crypto_alg,
3058                                            algt.alg.hash);
3059         else
3060                 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3061                                            algt.alg.crypto);
3062
3063         return talitos_init_common(ctx, talitos_alg);
3064 }
3065
3066 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3067 {
3068         struct aead_alg *alg = crypto_aead_alg(tfm);
3069         struct talitos_crypto_alg *talitos_alg;
3070         struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3071
3072         talitos_alg = container_of(alg, struct talitos_crypto_alg,
3073                                    algt.alg.aead);
3074
3075         return talitos_init_common(ctx, talitos_alg);
3076 }
3077
3078 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3079 {
3080         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3081
3082         talitos_cra_init(tfm);
3083
3084         ctx->keylen = 0;
3085         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3086                                  sizeof(struct talitos_ahash_req_ctx));
3087
3088         return 0;
3089 }
3090
3091 static void talitos_cra_exit(struct crypto_tfm *tfm)
3092 {
3093         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3094         struct device *dev = ctx->dev;
3095
3096         if (ctx->keylen)
3097                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3098 }
3099
3100 /*
3101  * given the alg's descriptor header template, determine whether descriptor
3102  * type and primary/secondary execution units required match the hw
3103  * capabilities description provided in the device tree node.
3104  */
3105 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3106 {
3107         struct talitos_private *priv = dev_get_drvdata(dev);
3108         int ret;
3109
3110         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3111               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3112
3113         if (SECONDARY_EU(desc_hdr_template))
3114                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3115                               & priv->exec_units);
3116
3117         return ret;
3118 }
3119
3120 static int talitos_remove(struct platform_device *ofdev)
3121 {
3122         struct device *dev = &ofdev->dev;
3123         struct talitos_private *priv = dev_get_drvdata(dev);
3124         struct talitos_crypto_alg *t_alg, *n;
3125         int i;
3126
3127         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3128                 switch (t_alg->algt.type) {
3129                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3130                         break;
3131                 case CRYPTO_ALG_TYPE_AEAD:
3132                         crypto_unregister_aead(&t_alg->algt.alg.aead);
3133                 case CRYPTO_ALG_TYPE_AHASH:
3134                         crypto_unregister_ahash(&t_alg->algt.alg.hash);
3135                         break;
3136                 }
3137                 list_del(&t_alg->entry);
3138         }
3139
3140         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3141                 talitos_unregister_rng(dev);
3142
3143         for (i = 0; i < 2; i++)
3144                 if (priv->irq[i]) {
3145                         free_irq(priv->irq[i], dev);
3146                         irq_dispose_mapping(priv->irq[i]);
3147                 }
3148
3149         tasklet_kill(&priv->done_task[0]);
3150         if (priv->irq[1])
3151                 tasklet_kill(&priv->done_task[1]);
3152
3153         return 0;
3154 }
3155
3156 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3157                                                     struct talitos_alg_template
3158                                                            *template)
3159 {
3160         struct talitos_private *priv = dev_get_drvdata(dev);
3161         struct talitos_crypto_alg *t_alg;
3162         struct crypto_alg *alg;
3163
3164         t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3165                              GFP_KERNEL);
3166         if (!t_alg)
3167                 return ERR_PTR(-ENOMEM);
3168
3169         t_alg->algt = *template;
3170
3171         switch (t_alg->algt.type) {
3172         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3173                 alg = &t_alg->algt.alg.crypto;
3174                 alg->cra_init = talitos_cra_init;
3175                 alg->cra_exit = talitos_cra_exit;
3176                 alg->cra_type = &crypto_ablkcipher_type;
3177                 alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3178                                              ablkcipher_setkey;
3179                 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3180                 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3181                 break;
3182         case CRYPTO_ALG_TYPE_AEAD:
3183                 alg = &t_alg->algt.alg.aead.base;
3184                 alg->cra_exit = talitos_cra_exit;
3185                 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3186                 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3187                                               aead_setkey;
3188                 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3189                 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3190                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3191                     !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3192                         devm_kfree(dev, t_alg);
3193                         return ERR_PTR(-ENOTSUPP);
3194                 }
3195                 break;
3196         case CRYPTO_ALG_TYPE_AHASH:
3197                 alg = &t_alg->algt.alg.hash.halg.base;
3198                 alg->cra_init = talitos_cra_init_ahash;
3199                 alg->cra_exit = talitos_cra_exit;
3200                 t_alg->algt.alg.hash.init = ahash_init;
3201                 t_alg->algt.alg.hash.update = ahash_update;
3202                 t_alg->algt.alg.hash.final = ahash_final;
3203                 t_alg->algt.alg.hash.finup = ahash_finup;
3204                 t_alg->algt.alg.hash.digest = ahash_digest;
3205                 if (!strncmp(alg->cra_name, "hmac", 4))
3206                         t_alg->algt.alg.hash.setkey = ahash_setkey;
3207                 t_alg->algt.alg.hash.import = ahash_import;
3208                 t_alg->algt.alg.hash.export = ahash_export;
3209
3210                 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3211                     !strncmp(alg->cra_name, "hmac", 4)) {
3212                         devm_kfree(dev, t_alg);
3213                         return ERR_PTR(-ENOTSUPP);
3214                 }
3215                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3216                     (!strcmp(alg->cra_name, "sha224") ||
3217                      !strcmp(alg->cra_name, "hmac(sha224)"))) {
3218                         t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3219                         t_alg->algt.desc_hdr_template =
3220                                         DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3221                                         DESC_HDR_SEL0_MDEUA |
3222                                         DESC_HDR_MODE0_MDEU_SHA256;
3223                 }
3224                 break;
3225         default:
3226                 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3227                 devm_kfree(dev, t_alg);
3228                 return ERR_PTR(-EINVAL);
3229         }
3230
3231         alg->cra_module = THIS_MODULE;
3232         if (t_alg->algt.priority)
3233                 alg->cra_priority = t_alg->algt.priority;
3234         else
3235                 alg->cra_priority = TALITOS_CRA_PRIORITY;
3236         if (has_ftr_sec1(priv))
3237                 alg->cra_alignmask = 3;
3238         else
3239                 alg->cra_alignmask = 0;
3240         alg->cra_ctxsize = sizeof(struct talitos_ctx);
3241         alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3242
3243         t_alg->dev = dev;
3244
3245         return t_alg;
3246 }
3247
3248 static int talitos_probe_irq(struct platform_device *ofdev)
3249 {
3250         struct device *dev = &ofdev->dev;
3251         struct device_node *np = ofdev->dev.of_node;
3252         struct talitos_private *priv = dev_get_drvdata(dev);
3253         int err;
3254         bool is_sec1 = has_ftr_sec1(priv);
3255
3256         priv->irq[0] = irq_of_parse_and_map(np, 0);
3257         if (!priv->irq[0]) {
3258                 dev_err(dev, "failed to map irq\n");
3259                 return -EINVAL;
3260         }
3261         if (is_sec1) {
3262                 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3263                                   dev_driver_string(dev), dev);
3264                 goto primary_out;
3265         }
3266
3267         priv->irq[1] = irq_of_parse_and_map(np, 1);
3268
3269         /* get the primary irq line */
3270         if (!priv->irq[1]) {
3271                 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3272                                   dev_driver_string(dev), dev);
3273                 goto primary_out;
3274         }
3275
3276         err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3277                           dev_driver_string(dev), dev);
3278         if (err)
3279                 goto primary_out;
3280
3281         /* get the secondary irq line */
3282         err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3283                           dev_driver_string(dev), dev);
3284         if (err) {
3285                 dev_err(dev, "failed to request secondary irq\n");
3286                 irq_dispose_mapping(priv->irq[1]);
3287                 priv->irq[1] = 0;
3288         }
3289
3290         return err;
3291
3292 primary_out:
3293         if (err) {
3294                 dev_err(dev, "failed to request primary irq\n");
3295                 irq_dispose_mapping(priv->irq[0]);
3296                 priv->irq[0] = 0;
3297         }
3298
3299         return err;
3300 }
3301
3302 static int talitos_probe(struct platform_device *ofdev)
3303 {
3304         struct device *dev = &ofdev->dev;
3305         struct device_node *np = ofdev->dev.of_node;
3306         struct talitos_private *priv;
3307         int i, err;
3308         int stride;
3309         struct resource *res;
3310
3311         priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3312         if (!priv)
3313                 return -ENOMEM;
3314
3315         INIT_LIST_HEAD(&priv->alg_list);
3316
3317         dev_set_drvdata(dev, priv);
3318
3319         priv->ofdev = ofdev;
3320
3321         spin_lock_init(&priv->reg_lock);
3322
3323         res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3324         if (!res)
3325                 return -ENXIO;
3326         priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3327         if (!priv->reg) {
3328                 dev_err(dev, "failed to of_iomap\n");
3329                 err = -ENOMEM;
3330                 goto err_out;
3331         }
3332
3333         /* get SEC version capabilities from device tree */
3334         of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3335         of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3336         of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3337         of_property_read_u32(np, "fsl,descriptor-types-mask",
3338                              &priv->desc_types);
3339
3340         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3341             !priv->exec_units || !priv->desc_types) {
3342                 dev_err(dev, "invalid property data in device tree node\n");
3343                 err = -EINVAL;
3344                 goto err_out;
3345         }
3346
3347         if (of_device_is_compatible(np, "fsl,sec3.0"))
3348                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3349
3350         if (of_device_is_compatible(np, "fsl,sec2.1"))
3351                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3352                                   TALITOS_FTR_SHA224_HWINIT |
3353                                   TALITOS_FTR_HMAC_OK;
3354
3355         if (of_device_is_compatible(np, "fsl,sec1.0"))
3356                 priv->features |= TALITOS_FTR_SEC1;
3357
3358         if (of_device_is_compatible(np, "fsl,sec1.2")) {
3359                 priv->reg_deu = priv->reg + TALITOS12_DEU;
3360                 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3361                 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3362                 stride = TALITOS1_CH_STRIDE;
3363         } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3364                 priv->reg_deu = priv->reg + TALITOS10_DEU;
3365                 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3366                 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3367                 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3368                 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3369                 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3370                 stride = TALITOS1_CH_STRIDE;
3371         } else {
3372                 priv->reg_deu = priv->reg + TALITOS2_DEU;
3373                 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3374                 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3375                 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3376                 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3377                 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3378                 priv->reg_keu = priv->reg + TALITOS2_KEU;
3379                 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3380                 stride = TALITOS2_CH_STRIDE;
3381         }
3382
3383         err = talitos_probe_irq(ofdev);
3384         if (err)
3385                 goto err_out;
3386
3387         if (has_ftr_sec1(priv)) {
3388                 if (priv->num_channels == 1)
3389                         tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3390                                      (unsigned long)dev);
3391                 else
3392                         tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3393                                      (unsigned long)dev);
3394         } else {
3395                 if (priv->irq[1]) {
3396                         tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3397                                      (unsigned long)dev);
3398                         tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3399                                      (unsigned long)dev);
3400                 } else if (priv->num_channels == 1) {
3401                         tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3402                                      (unsigned long)dev);
3403                 } else {
3404                         tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3405                                      (unsigned long)dev);
3406                 }
3407         }
3408
3409         priv->chan = devm_kcalloc(dev,
3410                                   priv->num_channels,
3411                                   sizeof(struct talitos_channel),
3412                                   GFP_KERNEL);
3413         if (!priv->chan) {
3414                 dev_err(dev, "failed to allocate channel management space\n");
3415                 err = -ENOMEM;
3416                 goto err_out;
3417         }
3418
3419         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3420
3421         for (i = 0; i < priv->num_channels; i++) {
3422                 priv->chan[i].reg = priv->reg + stride * (i + 1);
3423                 if (!priv->irq[1] || !(i & 1))
3424                         priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3425
3426                 spin_lock_init(&priv->chan[i].head_lock);
3427                 spin_lock_init(&priv->chan[i].tail_lock);
3428
3429                 priv->chan[i].fifo = devm_kcalloc(dev,
3430                                                 priv->fifo_len,
3431                                                 sizeof(struct talitos_request),
3432                                                 GFP_KERNEL);
3433                 if (!priv->chan[i].fifo) {
3434                         dev_err(dev, "failed to allocate request fifo %d\n", i);
3435                         err = -ENOMEM;
3436                         goto err_out;
3437                 }
3438
3439                 atomic_set(&priv->chan[i].submit_count,
3440                            -(priv->chfifo_len - 1));
3441         }
3442
3443         dma_set_mask(dev, DMA_BIT_MASK(36));
3444
3445         /* reset and initialize the h/w */
3446         err = init_device(dev);
3447         if (err) {
3448                 dev_err(dev, "failed to initialize device\n");
3449                 goto err_out;
3450         }
3451
3452         /* register the RNG, if available */
3453         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3454                 err = talitos_register_rng(dev);
3455                 if (err) {
3456                         dev_err(dev, "failed to register hwrng: %d\n", err);
3457                         goto err_out;
3458                 } else
3459                         dev_info(dev, "hwrng\n");
3460         }
3461
3462         /* register crypto algorithms the device supports */
3463         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3464                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3465                         struct talitos_crypto_alg *t_alg;
3466                         struct crypto_alg *alg = NULL;
3467
3468                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3469                         if (IS_ERR(t_alg)) {
3470                                 err = PTR_ERR(t_alg);
3471                                 if (err == -ENOTSUPP)
3472                                         continue;
3473                                 goto err_out;
3474                         }
3475
3476                         switch (t_alg->algt.type) {
3477                         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3478                                 err = crypto_register_alg(
3479                                                 &t_alg->algt.alg.crypto);
3480                                 alg = &t_alg->algt.alg.crypto;
3481                                 break;
3482
3483                         case CRYPTO_ALG_TYPE_AEAD:
3484                                 err = crypto_register_aead(
3485                                         &t_alg->algt.alg.aead);
3486                                 alg = &t_alg->algt.alg.aead.base;
3487                                 break;
3488
3489                         case CRYPTO_ALG_TYPE_AHASH:
3490                                 err = crypto_register_ahash(
3491                                                 &t_alg->algt.alg.hash);
3492                                 alg = &t_alg->algt.alg.hash.halg.base;
3493                                 break;
3494                         }
3495                         if (err) {
3496                                 dev_err(dev, "%s alg registration failed\n",
3497                                         alg->cra_driver_name);
3498                                 devm_kfree(dev, t_alg);
3499                         } else
3500                                 list_add_tail(&t_alg->entry, &priv->alg_list);
3501                 }
3502         }
3503         if (!list_empty(&priv->alg_list))
3504                 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3505                          (char *)of_get_property(np, "compatible", NULL));
3506
3507         return 0;
3508
3509 err_out:
3510         talitos_remove(ofdev);
3511
3512         return err;
3513 }
3514
3515 static const struct of_device_id talitos_match[] = {
3516 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3517         {
3518                 .compatible = "fsl,sec1.0",
3519         },
3520 #endif
3521 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3522         {
3523                 .compatible = "fsl,sec2.0",
3524         },
3525 #endif
3526         {},
3527 };
3528 MODULE_DEVICE_TABLE(of, talitos_match);
3529
3530 static struct platform_driver talitos_driver = {
3531         .driver = {
3532                 .name = "talitos",
3533                 .of_match_table = talitos_match,
3534         },
3535         .probe = talitos_probe,
3536         .remove = talitos_remove,
3537 };
3538
3539 module_platform_driver(talitos_driver);
3540
3541 MODULE_LICENSE("GPL");
3542 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3543 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");