crypto: talitos - fix max key size for sha384 and sha512
[linux-2.6-block.git] / drivers / crypto / talitos.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * talitos - Freescale Integrated Security Engine (SEC) device driver
4  *
5  * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6  *
7  * Scatterlist Crypto API glue code copied from files with the following:
8  * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9  *
10  * Crypto algorithm registration code copied from hifn driver:
11  * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
12  * All rights reserved.
13  */
14
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mod_devicetable.h>
18 #include <linux/device.h>
19 #include <linux/interrupt.h>
20 #include <linux/crypto.h>
21 #include <linux/hw_random.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/io.h>
27 #include <linux/spinlock.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/slab.h>
30
31 #include <crypto/algapi.h>
32 #include <crypto/aes.h>
33 #include <crypto/des.h>
34 #include <crypto/sha.h>
35 #include <crypto/md5.h>
36 #include <crypto/internal/aead.h>
37 #include <crypto/authenc.h>
38 #include <crypto/skcipher.h>
39 #include <crypto/hash.h>
40 #include <crypto/internal/hash.h>
41 #include <crypto/scatterwalk.h>
42
43 #include "talitos.h"
44
45 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
46                            unsigned int len, bool is_sec1)
47 {
48         ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
49         if (is_sec1) {
50                 ptr->len1 = cpu_to_be16(len);
51         } else {
52                 ptr->len = cpu_to_be16(len);
53                 ptr->eptr = upper_32_bits(dma_addr);
54         }
55 }
56
57 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
58                              struct talitos_ptr *src_ptr, bool is_sec1)
59 {
60         dst_ptr->ptr = src_ptr->ptr;
61         if (is_sec1) {
62                 dst_ptr->len1 = src_ptr->len1;
63         } else {
64                 dst_ptr->len = src_ptr->len;
65                 dst_ptr->eptr = src_ptr->eptr;
66         }
67 }
68
69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
70                                            bool is_sec1)
71 {
72         if (is_sec1)
73                 return be16_to_cpu(ptr->len1);
74         else
75                 return be16_to_cpu(ptr->len);
76 }
77
78 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
79                                    bool is_sec1)
80 {
81         if (!is_sec1)
82                 ptr->j_extent = val;
83 }
84
85 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
86 {
87         if (!is_sec1)
88                 ptr->j_extent |= val;
89 }
90
91 /*
92  * map virtual single (contiguous) pointer to h/w descriptor pointer
93  */
94 static void __map_single_talitos_ptr(struct device *dev,
95                                      struct talitos_ptr *ptr,
96                                      unsigned int len, void *data,
97                                      enum dma_data_direction dir,
98                                      unsigned long attrs)
99 {
100         dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
101         struct talitos_private *priv = dev_get_drvdata(dev);
102         bool is_sec1 = has_ftr_sec1(priv);
103
104         to_talitos_ptr(ptr, dma_addr, len, is_sec1);
105 }
106
107 static void map_single_talitos_ptr(struct device *dev,
108                                    struct talitos_ptr *ptr,
109                                    unsigned int len, void *data,
110                                    enum dma_data_direction dir)
111 {
112         __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
113 }
114
115 static void map_single_talitos_ptr_nosync(struct device *dev,
116                                           struct talitos_ptr *ptr,
117                                           unsigned int len, void *data,
118                                           enum dma_data_direction dir)
119 {
120         __map_single_talitos_ptr(dev, ptr, len, data, dir,
121                                  DMA_ATTR_SKIP_CPU_SYNC);
122 }
123
124 /*
125  * unmap bus single (contiguous) h/w descriptor pointer
126  */
127 static void unmap_single_talitos_ptr(struct device *dev,
128                                      struct talitos_ptr *ptr,
129                                      enum dma_data_direction dir)
130 {
131         struct talitos_private *priv = dev_get_drvdata(dev);
132         bool is_sec1 = has_ftr_sec1(priv);
133
134         dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135                          from_talitos_ptr_len(ptr, is_sec1), dir);
136 }
137
138 static int reset_channel(struct device *dev, int ch)
139 {
140         struct talitos_private *priv = dev_get_drvdata(dev);
141         unsigned int timeout = TALITOS_TIMEOUT;
142         bool is_sec1 = has_ftr_sec1(priv);
143
144         if (is_sec1) {
145                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146                           TALITOS1_CCCR_LO_RESET);
147
148                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149                         TALITOS1_CCCR_LO_RESET) && --timeout)
150                         cpu_relax();
151         } else {
152                 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153                           TALITOS2_CCCR_RESET);
154
155                 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156                         TALITOS2_CCCR_RESET) && --timeout)
157                         cpu_relax();
158         }
159
160         if (timeout == 0) {
161                 dev_err(dev, "failed to reset channel %d\n", ch);
162                 return -EIO;
163         }
164
165         /* set 36-bit addressing, done writeback enable and done IRQ enable */
166         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167                   TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
168         /* enable chaining descriptors */
169         if (is_sec1)
170                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171                           TALITOS_CCCR_LO_NE);
172
173         /* and ICCR writeback, if available */
174         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
175                 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
176                           TALITOS_CCCR_LO_IWSE);
177
178         return 0;
179 }
180
181 static int reset_device(struct device *dev)
182 {
183         struct talitos_private *priv = dev_get_drvdata(dev);
184         unsigned int timeout = TALITOS_TIMEOUT;
185         bool is_sec1 = has_ftr_sec1(priv);
186         u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
187
188         setbits32(priv->reg + TALITOS_MCR, mcr);
189
190         while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
191                && --timeout)
192                 cpu_relax();
193
194         if (priv->irq[1]) {
195                 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
196                 setbits32(priv->reg + TALITOS_MCR, mcr);
197         }
198
199         if (timeout == 0) {
200                 dev_err(dev, "failed to reset device\n");
201                 return -EIO;
202         }
203
204         return 0;
205 }
206
207 /*
208  * Reset and initialize the device
209  */
210 static int init_device(struct device *dev)
211 {
212         struct talitos_private *priv = dev_get_drvdata(dev);
213         int ch, err;
214         bool is_sec1 = has_ftr_sec1(priv);
215
216         /*
217          * Master reset
218          * errata documentation: warning: certain SEC interrupts
219          * are not fully cleared by writing the MCR:SWR bit,
220          * set bit twice to completely reset
221          */
222         err = reset_device(dev);
223         if (err)
224                 return err;
225
226         err = reset_device(dev);
227         if (err)
228                 return err;
229
230         /* reset channels */
231         for (ch = 0; ch < priv->num_channels; ch++) {
232                 err = reset_channel(dev, ch);
233                 if (err)
234                         return err;
235         }
236
237         /* enable channel done and error interrupts */
238         if (is_sec1) {
239                 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
240                 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
241                 /* disable parity error check in DEU (erroneous? test vect.) */
242                 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
243         } else {
244                 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
245                 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
246         }
247
248         /* disable integrity check error interrupts (use writeback instead) */
249         if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
250                 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
251                           TALITOS_MDEUICR_LO_ICE);
252
253         return 0;
254 }
255
256 /**
257  * talitos_submit - submits a descriptor to the device for processing
258  * @dev:        the SEC device to be used
259  * @ch:         the SEC device channel to be used
260  * @desc:       the descriptor to be processed by the device
261  * @callback:   whom to call when processing is complete
262  * @context:    a handle for use by caller (optional)
263  *
264  * desc must contain valid dma-mapped (bus physical) address pointers.
265  * callback must check err and feedback in descriptor header
266  * for device processing status.
267  */
268 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
269                           void (*callback)(struct device *dev,
270                                            struct talitos_desc *desc,
271                                            void *context, int error),
272                           void *context)
273 {
274         struct talitos_private *priv = dev_get_drvdata(dev);
275         struct talitos_request *request;
276         unsigned long flags;
277         int head;
278         bool is_sec1 = has_ftr_sec1(priv);
279
280         spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
281
282         if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
283                 /* h/w fifo is full */
284                 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
285                 return -EAGAIN;
286         }
287
288         head = priv->chan[ch].head;
289         request = &priv->chan[ch].fifo[head];
290
291         /* map descriptor and save caller data */
292         if (is_sec1) {
293                 desc->hdr1 = desc->hdr;
294                 request->dma_desc = dma_map_single(dev, &desc->hdr1,
295                                                    TALITOS_DESC_SIZE,
296                                                    DMA_BIDIRECTIONAL);
297         } else {
298                 request->dma_desc = dma_map_single(dev, desc,
299                                                    TALITOS_DESC_SIZE,
300                                                    DMA_BIDIRECTIONAL);
301         }
302         request->callback = callback;
303         request->context = context;
304
305         /* increment fifo head */
306         priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
307
308         smp_wmb();
309         request->desc = desc;
310
311         /* GO! */
312         wmb();
313         out_be32(priv->chan[ch].reg + TALITOS_FF,
314                  upper_32_bits(request->dma_desc));
315         out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
316                  lower_32_bits(request->dma_desc));
317
318         spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
319
320         return -EINPROGRESS;
321 }
322
323 /*
324  * process what was done, notify callback of error if not
325  */
326 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
327 {
328         struct talitos_private *priv = dev_get_drvdata(dev);
329         struct talitos_request *request, saved_req;
330         unsigned long flags;
331         int tail, status;
332         bool is_sec1 = has_ftr_sec1(priv);
333
334         spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
335
336         tail = priv->chan[ch].tail;
337         while (priv->chan[ch].fifo[tail].desc) {
338                 __be32 hdr;
339
340                 request = &priv->chan[ch].fifo[tail];
341
342                 /* descriptors with their done bits set don't get the error */
343                 rmb();
344                 if (!is_sec1)
345                         hdr = request->desc->hdr;
346                 else if (request->desc->next_desc)
347                         hdr = (request->desc + 1)->hdr1;
348                 else
349                         hdr = request->desc->hdr1;
350
351                 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
352                         status = 0;
353                 else
354                         if (!error)
355                                 break;
356                         else
357                                 status = error;
358
359                 dma_unmap_single(dev, request->dma_desc,
360                                  TALITOS_DESC_SIZE,
361                                  DMA_BIDIRECTIONAL);
362
363                 /* copy entries so we can call callback outside lock */
364                 saved_req.desc = request->desc;
365                 saved_req.callback = request->callback;
366                 saved_req.context = request->context;
367
368                 /* release request entry in fifo */
369                 smp_wmb();
370                 request->desc = NULL;
371
372                 /* increment fifo tail */
373                 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
374
375                 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
376
377                 atomic_dec(&priv->chan[ch].submit_count);
378
379                 saved_req.callback(dev, saved_req.desc, saved_req.context,
380                                    status);
381                 /* channel may resume processing in single desc error case */
382                 if (error && !reset_ch && status == error)
383                         return;
384                 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
385                 tail = priv->chan[ch].tail;
386         }
387
388         spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
389 }
390
391 /*
392  * process completed requests for channels that have done status
393  */
394 #define DEF_TALITOS1_DONE(name, ch_done_mask)                           \
395 static void talitos1_done_##name(unsigned long data)                    \
396 {                                                                       \
397         struct device *dev = (struct device *)data;                     \
398         struct talitos_private *priv = dev_get_drvdata(dev);            \
399         unsigned long flags;                                            \
400                                                                         \
401         if (ch_done_mask & 0x10000000)                                  \
402                 flush_channel(dev, 0, 0, 0);                    \
403         if (ch_done_mask & 0x40000000)                                  \
404                 flush_channel(dev, 1, 0, 0);                    \
405         if (ch_done_mask & 0x00010000)                                  \
406                 flush_channel(dev, 2, 0, 0);                    \
407         if (ch_done_mask & 0x00040000)                                  \
408                 flush_channel(dev, 3, 0, 0);                    \
409                                                                         \
410         /* At this point, all completed channels have been processed */ \
411         /* Unmask done interrupts for channels completed later on. */   \
412         spin_lock_irqsave(&priv->reg_lock, flags);                      \
413         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
414         clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);    \
415         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
416 }
417
418 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
419 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
420
421 #define DEF_TALITOS2_DONE(name, ch_done_mask)                           \
422 static void talitos2_done_##name(unsigned long data)                    \
423 {                                                                       \
424         struct device *dev = (struct device *)data;                     \
425         struct talitos_private *priv = dev_get_drvdata(dev);            \
426         unsigned long flags;                                            \
427                                                                         \
428         if (ch_done_mask & 1)                                           \
429                 flush_channel(dev, 0, 0, 0);                            \
430         if (ch_done_mask & (1 << 2))                                    \
431                 flush_channel(dev, 1, 0, 0);                            \
432         if (ch_done_mask & (1 << 4))                                    \
433                 flush_channel(dev, 2, 0, 0);                            \
434         if (ch_done_mask & (1 << 6))                                    \
435                 flush_channel(dev, 3, 0, 0);                            \
436                                                                         \
437         /* At this point, all completed channels have been processed */ \
438         /* Unmask done interrupts for channels completed later on. */   \
439         spin_lock_irqsave(&priv->reg_lock, flags);                      \
440         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);               \
441         setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);    \
442         spin_unlock_irqrestore(&priv->reg_lock, flags);                 \
443 }
444
445 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
446 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
447 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
448 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
449
450 /*
451  * locate current (offending) descriptor
452  */
453 static u32 current_desc_hdr(struct device *dev, int ch)
454 {
455         struct talitos_private *priv = dev_get_drvdata(dev);
456         int tail, iter;
457         dma_addr_t cur_desc;
458
459         cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
460         cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
461
462         if (!cur_desc) {
463                 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
464                 return 0;
465         }
466
467         tail = priv->chan[ch].tail;
468
469         iter = tail;
470         while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
471                priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
472                 iter = (iter + 1) & (priv->fifo_len - 1);
473                 if (iter == tail) {
474                         dev_err(dev, "couldn't locate current descriptor\n");
475                         return 0;
476                 }
477         }
478
479         if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
480                 return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
481
482         return priv->chan[ch].fifo[iter].desc->hdr;
483 }
484
485 /*
486  * user diagnostics; report root cause of error based on execution unit status
487  */
488 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
489 {
490         struct talitos_private *priv = dev_get_drvdata(dev);
491         int i;
492
493         if (!desc_hdr)
494                 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
495
496         switch (desc_hdr & DESC_HDR_SEL0_MASK) {
497         case DESC_HDR_SEL0_AFEU:
498                 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
499                         in_be32(priv->reg_afeu + TALITOS_EUISR),
500                         in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
501                 break;
502         case DESC_HDR_SEL0_DEU:
503                 dev_err(dev, "DEUISR 0x%08x_%08x\n",
504                         in_be32(priv->reg_deu + TALITOS_EUISR),
505                         in_be32(priv->reg_deu + TALITOS_EUISR_LO));
506                 break;
507         case DESC_HDR_SEL0_MDEUA:
508         case DESC_HDR_SEL0_MDEUB:
509                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
510                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
511                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
512                 break;
513         case DESC_HDR_SEL0_RNG:
514                 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
515                         in_be32(priv->reg_rngu + TALITOS_ISR),
516                         in_be32(priv->reg_rngu + TALITOS_ISR_LO));
517                 break;
518         case DESC_HDR_SEL0_PKEU:
519                 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
520                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
521                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
522                 break;
523         case DESC_HDR_SEL0_AESU:
524                 dev_err(dev, "AESUISR 0x%08x_%08x\n",
525                         in_be32(priv->reg_aesu + TALITOS_EUISR),
526                         in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
527                 break;
528         case DESC_HDR_SEL0_CRCU:
529                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
530                         in_be32(priv->reg_crcu + TALITOS_EUISR),
531                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
532                 break;
533         case DESC_HDR_SEL0_KEU:
534                 dev_err(dev, "KEUISR 0x%08x_%08x\n",
535                         in_be32(priv->reg_pkeu + TALITOS_EUISR),
536                         in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
537                 break;
538         }
539
540         switch (desc_hdr & DESC_HDR_SEL1_MASK) {
541         case DESC_HDR_SEL1_MDEUA:
542         case DESC_HDR_SEL1_MDEUB:
543                 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
544                         in_be32(priv->reg_mdeu + TALITOS_EUISR),
545                         in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
546                 break;
547         case DESC_HDR_SEL1_CRCU:
548                 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
549                         in_be32(priv->reg_crcu + TALITOS_EUISR),
550                         in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
551                 break;
552         }
553
554         for (i = 0; i < 8; i++)
555                 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
556                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
557                         in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
558 }
559
560 /*
561  * recover from error interrupts
562  */
563 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
564 {
565         struct talitos_private *priv = dev_get_drvdata(dev);
566         unsigned int timeout = TALITOS_TIMEOUT;
567         int ch, error, reset_dev = 0;
568         u32 v_lo;
569         bool is_sec1 = has_ftr_sec1(priv);
570         int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
571
572         for (ch = 0; ch < priv->num_channels; ch++) {
573                 /* skip channels without errors */
574                 if (is_sec1) {
575                         /* bits 29, 31, 17, 19 */
576                         if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
577                                 continue;
578                 } else {
579                         if (!(isr & (1 << (ch * 2 + 1))))
580                                 continue;
581                 }
582
583                 error = -EINVAL;
584
585                 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
586
587                 if (v_lo & TALITOS_CCPSR_LO_DOF) {
588                         dev_err(dev, "double fetch fifo overflow error\n");
589                         error = -EAGAIN;
590                         reset_ch = 1;
591                 }
592                 if (v_lo & TALITOS_CCPSR_LO_SOF) {
593                         /* h/w dropped descriptor */
594                         dev_err(dev, "single fetch fifo overflow error\n");
595                         error = -EAGAIN;
596                 }
597                 if (v_lo & TALITOS_CCPSR_LO_MDTE)
598                         dev_err(dev, "master data transfer error\n");
599                 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
600                         dev_err(dev, is_sec1 ? "pointer not complete error\n"
601                                              : "s/g data length zero error\n");
602                 if (v_lo & TALITOS_CCPSR_LO_FPZ)
603                         dev_err(dev, is_sec1 ? "parity error\n"
604                                              : "fetch pointer zero error\n");
605                 if (v_lo & TALITOS_CCPSR_LO_IDH)
606                         dev_err(dev, "illegal descriptor header error\n");
607                 if (v_lo & TALITOS_CCPSR_LO_IEU)
608                         dev_err(dev, is_sec1 ? "static assignment error\n"
609                                              : "invalid exec unit error\n");
610                 if (v_lo & TALITOS_CCPSR_LO_EU)
611                         report_eu_error(dev, ch, current_desc_hdr(dev, ch));
612                 if (!is_sec1) {
613                         if (v_lo & TALITOS_CCPSR_LO_GB)
614                                 dev_err(dev, "gather boundary error\n");
615                         if (v_lo & TALITOS_CCPSR_LO_GRL)
616                                 dev_err(dev, "gather return/length error\n");
617                         if (v_lo & TALITOS_CCPSR_LO_SB)
618                                 dev_err(dev, "scatter boundary error\n");
619                         if (v_lo & TALITOS_CCPSR_LO_SRL)
620                                 dev_err(dev, "scatter return/length error\n");
621                 }
622
623                 flush_channel(dev, ch, error, reset_ch);
624
625                 if (reset_ch) {
626                         reset_channel(dev, ch);
627                 } else {
628                         setbits32(priv->chan[ch].reg + TALITOS_CCCR,
629                                   TALITOS2_CCCR_CONT);
630                         setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
631                         while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
632                                TALITOS2_CCCR_CONT) && --timeout)
633                                 cpu_relax();
634                         if (timeout == 0) {
635                                 dev_err(dev, "failed to restart channel %d\n",
636                                         ch);
637                                 reset_dev = 1;
638                         }
639                 }
640         }
641         if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
642             (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
643                 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
644                         dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
645                                 isr, isr_lo);
646                 else
647                         dev_err(dev, "done overflow, internal time out, or "
648                                 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
649
650                 /* purge request queues */
651                 for (ch = 0; ch < priv->num_channels; ch++)
652                         flush_channel(dev, ch, -EIO, 1);
653
654                 /* reset and reinitialize the device */
655                 init_device(dev);
656         }
657 }
658
659 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
660 static irqreturn_t talitos1_interrupt_##name(int irq, void *data)              \
661 {                                                                              \
662         struct device *dev = data;                                             \
663         struct talitos_private *priv = dev_get_drvdata(dev);                   \
664         u32 isr, isr_lo;                                                       \
665         unsigned long flags;                                                   \
666                                                                                \
667         spin_lock_irqsave(&priv->reg_lock, flags);                             \
668         isr = in_be32(priv->reg + TALITOS_ISR);                                \
669         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
670         /* Acknowledge interrupt */                                            \
671         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
672         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
673                                                                                \
674         if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) {    \
675                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
676                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
677         }                                                                      \
678         else {                                                                 \
679                 if (likely(isr & ch_done_mask)) {                              \
680                         /* mask further done interrupts. */                    \
681                         setbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
682                         /* done_task will unmask done interrupts at exit */    \
683                         tasklet_schedule(&priv->done_task[tlet]);              \
684                 }                                                              \
685                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
686         }                                                                      \
687                                                                                \
688         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
689                                                                 IRQ_NONE;      \
690 }
691
692 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
693
694 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet)          \
695 static irqreturn_t talitos2_interrupt_##name(int irq, void *data)              \
696 {                                                                              \
697         struct device *dev = data;                                             \
698         struct talitos_private *priv = dev_get_drvdata(dev);                   \
699         u32 isr, isr_lo;                                                       \
700         unsigned long flags;                                                   \
701                                                                                \
702         spin_lock_irqsave(&priv->reg_lock, flags);                             \
703         isr = in_be32(priv->reg + TALITOS_ISR);                                \
704         isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);                          \
705         /* Acknowledge interrupt */                                            \
706         out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
707         out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);                          \
708                                                                                \
709         if (unlikely(isr & ch_err_mask || isr_lo)) {                           \
710                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
711                 talitos_error(dev, isr & ch_err_mask, isr_lo);                 \
712         }                                                                      \
713         else {                                                                 \
714                 if (likely(isr & ch_done_mask)) {                              \
715                         /* mask further done interrupts. */                    \
716                         clrbits32(priv->reg + TALITOS_IMR, ch_done_mask);      \
717                         /* done_task will unmask done interrupts at exit */    \
718                         tasklet_schedule(&priv->done_task[tlet]);              \
719                 }                                                              \
720                 spin_unlock_irqrestore(&priv->reg_lock, flags);                \
721         }                                                                      \
722                                                                                \
723         return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED :  \
724                                                                 IRQ_NONE;      \
725 }
726
727 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
728 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
729                        0)
730 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
731                        1)
732
733 /*
734  * hwrng
735  */
736 static int talitos_rng_data_present(struct hwrng *rng, int wait)
737 {
738         struct device *dev = (struct device *)rng->priv;
739         struct talitos_private *priv = dev_get_drvdata(dev);
740         u32 ofl;
741         int i;
742
743         for (i = 0; i < 20; i++) {
744                 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
745                       TALITOS_RNGUSR_LO_OFL;
746                 if (ofl || !wait)
747                         break;
748                 udelay(10);
749         }
750
751         return !!ofl;
752 }
753
754 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
755 {
756         struct device *dev = (struct device *)rng->priv;
757         struct talitos_private *priv = dev_get_drvdata(dev);
758
759         /* rng fifo requires 64-bit accesses */
760         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
761         *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
762
763         return sizeof(u32);
764 }
765
766 static int talitos_rng_init(struct hwrng *rng)
767 {
768         struct device *dev = (struct device *)rng->priv;
769         struct talitos_private *priv = dev_get_drvdata(dev);
770         unsigned int timeout = TALITOS_TIMEOUT;
771
772         setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
773         while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
774                  & TALITOS_RNGUSR_LO_RD)
775                && --timeout)
776                 cpu_relax();
777         if (timeout == 0) {
778                 dev_err(dev, "failed to reset rng hw\n");
779                 return -ENODEV;
780         }
781
782         /* start generating */
783         setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
784
785         return 0;
786 }
787
788 static int talitos_register_rng(struct device *dev)
789 {
790         struct talitos_private *priv = dev_get_drvdata(dev);
791         int err;
792
793         priv->rng.name          = dev_driver_string(dev),
794         priv->rng.init          = talitos_rng_init,
795         priv->rng.data_present  = talitos_rng_data_present,
796         priv->rng.data_read     = talitos_rng_data_read,
797         priv->rng.priv          = (unsigned long)dev;
798
799         err = hwrng_register(&priv->rng);
800         if (!err)
801                 priv->rng_registered = true;
802
803         return err;
804 }
805
806 static void talitos_unregister_rng(struct device *dev)
807 {
808         struct talitos_private *priv = dev_get_drvdata(dev);
809
810         if (!priv->rng_registered)
811                 return;
812
813         hwrng_unregister(&priv->rng);
814         priv->rng_registered = false;
815 }
816
817 /*
818  * crypto alg
819  */
820 #define TALITOS_CRA_PRIORITY            3000
821 /*
822  * Defines a priority for doing AEAD with descriptors type
823  * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
824  */
825 #define TALITOS_CRA_PRIORITY_AEAD_HSNA  (TALITOS_CRA_PRIORITY - 1)
826 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
827 #define TALITOS_MAX_KEY_SIZE            (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
828 #else
829 #define TALITOS_MAX_KEY_SIZE            (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
830 #endif
831 #define TALITOS_MAX_IV_LENGTH           16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
832
833 struct talitos_ctx {
834         struct device *dev;
835         int ch;
836         __be32 desc_hdr_template;
837         u8 key[TALITOS_MAX_KEY_SIZE];
838         u8 iv[TALITOS_MAX_IV_LENGTH];
839         dma_addr_t dma_key;
840         unsigned int keylen;
841         unsigned int enckeylen;
842         unsigned int authkeylen;
843 };
844
845 #define HASH_MAX_BLOCK_SIZE             SHA512_BLOCK_SIZE
846 #define TALITOS_MDEU_MAX_CONTEXT_SIZE   TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
847
848 struct talitos_ahash_req_ctx {
849         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
850         unsigned int hw_context_size;
851         u8 buf[2][HASH_MAX_BLOCK_SIZE];
852         int buf_idx;
853         unsigned int swinit;
854         unsigned int first;
855         unsigned int last;
856         unsigned int to_hash_later;
857         unsigned int nbuf;
858         struct scatterlist bufsl[2];
859         struct scatterlist *psrc;
860 };
861
862 struct talitos_export_state {
863         u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
864         u8 buf[HASH_MAX_BLOCK_SIZE];
865         unsigned int swinit;
866         unsigned int first;
867         unsigned int last;
868         unsigned int to_hash_later;
869         unsigned int nbuf;
870 };
871
872 static int aead_setkey(struct crypto_aead *authenc,
873                        const u8 *key, unsigned int keylen)
874 {
875         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
876         struct device *dev = ctx->dev;
877         struct crypto_authenc_keys keys;
878
879         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
880                 goto badkey;
881
882         if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
883                 goto badkey;
884
885         if (ctx->keylen)
886                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
887
888         memcpy(ctx->key, keys.authkey, keys.authkeylen);
889         memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
890
891         ctx->keylen = keys.authkeylen + keys.enckeylen;
892         ctx->enckeylen = keys.enckeylen;
893         ctx->authkeylen = keys.authkeylen;
894         ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
895                                       DMA_TO_DEVICE);
896
897         memzero_explicit(&keys, sizeof(keys));
898         return 0;
899
900 badkey:
901         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
902         memzero_explicit(&keys, sizeof(keys));
903         return -EINVAL;
904 }
905
906 static int aead_des3_setkey(struct crypto_aead *authenc,
907                             const u8 *key, unsigned int keylen)
908 {
909         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
910         struct device *dev = ctx->dev;
911         struct crypto_authenc_keys keys;
912         u32 flags;
913         int err;
914
915         err = crypto_authenc_extractkeys(&keys, key, keylen);
916         if (unlikely(err))
917                 goto badkey;
918
919         err = -EINVAL;
920         if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
921                 goto badkey;
922
923         if (keys.enckeylen != DES3_EDE_KEY_SIZE)
924                 goto badkey;
925
926         flags = crypto_aead_get_flags(authenc);
927         err = __des3_verify_key(&flags, keys.enckey);
928         if (unlikely(err)) {
929                 crypto_aead_set_flags(authenc, flags);
930                 goto out;
931         }
932
933         if (ctx->keylen)
934                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
935
936         memcpy(ctx->key, keys.authkey, keys.authkeylen);
937         memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
938
939         ctx->keylen = keys.authkeylen + keys.enckeylen;
940         ctx->enckeylen = keys.enckeylen;
941         ctx->authkeylen = keys.authkeylen;
942         ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
943                                       DMA_TO_DEVICE);
944
945 out:
946         memzero_explicit(&keys, sizeof(keys));
947         return err;
948
949 badkey:
950         crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
951         goto out;
952 }
953
954 /*
955  * talitos_edesc - s/w-extended descriptor
956  * @src_nents: number of segments in input scatterlist
957  * @dst_nents: number of segments in output scatterlist
958  * @icv_ool: whether ICV is out-of-line
959  * @iv_dma: dma address of iv for checking continuity and link table
960  * @dma_len: length of dma mapped link_tbl space
961  * @dma_link_tbl: bus physical address of link_tbl/buf
962  * @desc: h/w descriptor
963  * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
964  * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
965  *
966  * if decrypting (with authcheck), or either one of src_nents or dst_nents
967  * is greater than 1, an integrity check value is concatenated to the end
968  * of link_tbl data
969  */
970 struct talitos_edesc {
971         int src_nents;
972         int dst_nents;
973         bool icv_ool;
974         dma_addr_t iv_dma;
975         int dma_len;
976         dma_addr_t dma_link_tbl;
977         struct talitos_desc desc;
978         union {
979                 struct talitos_ptr link_tbl[0];
980                 u8 buf[0];
981         };
982 };
983
984 static void talitos_sg_unmap(struct device *dev,
985                              struct talitos_edesc *edesc,
986                              struct scatterlist *src,
987                              struct scatterlist *dst,
988                              unsigned int len, unsigned int offset)
989 {
990         struct talitos_private *priv = dev_get_drvdata(dev);
991         bool is_sec1 = has_ftr_sec1(priv);
992         unsigned int src_nents = edesc->src_nents ? : 1;
993         unsigned int dst_nents = edesc->dst_nents ? : 1;
994
995         if (is_sec1 && dst && dst_nents > 1) {
996                 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
997                                            len, DMA_FROM_DEVICE);
998                 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
999                                      offset);
1000         }
1001         if (src != dst) {
1002                 if (src_nents == 1 || !is_sec1)
1003                         dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
1004
1005                 if (dst && (dst_nents == 1 || !is_sec1))
1006                         dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
1007         } else if (src_nents == 1 || !is_sec1) {
1008                 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
1009         }
1010 }
1011
1012 static void ipsec_esp_unmap(struct device *dev,
1013                             struct talitos_edesc *edesc,
1014                             struct aead_request *areq, bool encrypt)
1015 {
1016         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1017         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1018         unsigned int ivsize = crypto_aead_ivsize(aead);
1019         unsigned int authsize = crypto_aead_authsize(aead);
1020         unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1021         bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
1022         struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
1023
1024         if (is_ipsec_esp)
1025                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
1026                                          DMA_FROM_DEVICE);
1027         unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
1028
1029         talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
1030                          cryptlen + authsize, areq->assoclen);
1031
1032         if (edesc->dma_len)
1033                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1034                                  DMA_BIDIRECTIONAL);
1035
1036         if (!is_ipsec_esp) {
1037                 unsigned int dst_nents = edesc->dst_nents ? : 1;
1038
1039                 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
1040                                    areq->assoclen + cryptlen - ivsize);
1041         }
1042 }
1043
1044 /*
1045  * ipsec_esp descriptor callbacks
1046  */
1047 static void ipsec_esp_encrypt_done(struct device *dev,
1048                                    struct talitos_desc *desc, void *context,
1049                                    int err)
1050 {
1051         struct aead_request *areq = context;
1052         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1053         unsigned int ivsize = crypto_aead_ivsize(authenc);
1054         struct talitos_edesc *edesc;
1055
1056         edesc = container_of(desc, struct talitos_edesc, desc);
1057
1058         ipsec_esp_unmap(dev, edesc, areq, true);
1059
1060         dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1061
1062         kfree(edesc);
1063
1064         aead_request_complete(areq, err);
1065 }
1066
1067 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1068                                           struct talitos_desc *desc,
1069                                           void *context, int err)
1070 {
1071         struct aead_request *req = context;
1072         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1073         unsigned int authsize = crypto_aead_authsize(authenc);
1074         struct talitos_edesc *edesc;
1075         char *oicv, *icv;
1076
1077         edesc = container_of(desc, struct talitos_edesc, desc);
1078
1079         ipsec_esp_unmap(dev, edesc, req, false);
1080
1081         if (!err) {
1082                 /* auth check */
1083                 oicv = edesc->buf + edesc->dma_len;
1084                 icv = oicv - authsize;
1085
1086                 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1087         }
1088
1089         kfree(edesc);
1090
1091         aead_request_complete(req, err);
1092 }
1093
1094 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1095                                           struct talitos_desc *desc,
1096                                           void *context, int err)
1097 {
1098         struct aead_request *req = context;
1099         struct talitos_edesc *edesc;
1100
1101         edesc = container_of(desc, struct talitos_edesc, desc);
1102
1103         ipsec_esp_unmap(dev, edesc, req, false);
1104
1105         /* check ICV auth status */
1106         if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1107                      DESC_HDR_LO_ICCR1_PASS))
1108                 err = -EBADMSG;
1109
1110         kfree(edesc);
1111
1112         aead_request_complete(req, err);
1113 }
1114
1115 /*
1116  * convert scatterlist to SEC h/w link table format
1117  * stop at cryptlen bytes
1118  */
1119 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1120                                  unsigned int offset, int datalen, int elen,
1121                                  struct talitos_ptr *link_tbl_ptr)
1122 {
1123         int n_sg = elen ? sg_count + 1 : sg_count;
1124         int count = 0;
1125         int cryptlen = datalen + elen;
1126
1127         while (cryptlen && sg && n_sg--) {
1128                 unsigned int len = sg_dma_len(sg);
1129
1130                 if (offset >= len) {
1131                         offset -= len;
1132                         goto next;
1133                 }
1134
1135                 len -= offset;
1136
1137                 if (len > cryptlen)
1138                         len = cryptlen;
1139
1140                 if (datalen > 0 && len > datalen) {
1141                         to_talitos_ptr(link_tbl_ptr + count,
1142                                        sg_dma_address(sg) + offset, datalen, 0);
1143                         to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1144                         count++;
1145                         len -= datalen;
1146                         offset += datalen;
1147                 }
1148                 to_talitos_ptr(link_tbl_ptr + count,
1149                                sg_dma_address(sg) + offset, len, 0);
1150                 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1151                 count++;
1152                 cryptlen -= len;
1153                 datalen -= len;
1154                 offset = 0;
1155
1156 next:
1157                 sg = sg_next(sg);
1158         }
1159
1160         /* tag end of link table */
1161         if (count > 0)
1162                 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1163                                        DESC_PTR_LNKTBL_RET, 0);
1164
1165         return count;
1166 }
1167
1168 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1169                               unsigned int len, struct talitos_edesc *edesc,
1170                               struct talitos_ptr *ptr, int sg_count,
1171                               unsigned int offset, int tbl_off, int elen,
1172                               bool force)
1173 {
1174         struct talitos_private *priv = dev_get_drvdata(dev);
1175         bool is_sec1 = has_ftr_sec1(priv);
1176
1177         if (!src) {
1178                 to_talitos_ptr(ptr, 0, 0, is_sec1);
1179                 return 1;
1180         }
1181         to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1182         if (sg_count == 1 && !force) {
1183                 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1184                 return sg_count;
1185         }
1186         if (is_sec1) {
1187                 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1188                 return sg_count;
1189         }
1190         sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1191                                          &edesc->link_tbl[tbl_off]);
1192         if (sg_count == 1 && !force) {
1193                 /* Only one segment now, so no link tbl needed*/
1194                 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1195                 return sg_count;
1196         }
1197         to_talitos_ptr(ptr, edesc->dma_link_tbl +
1198                             tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1199         to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1200
1201         return sg_count;
1202 }
1203
1204 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1205                           unsigned int len, struct talitos_edesc *edesc,
1206                           struct talitos_ptr *ptr, int sg_count,
1207                           unsigned int offset, int tbl_off)
1208 {
1209         return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1210                                   tbl_off, 0, false);
1211 }
1212
1213 /*
1214  * fill in and submit ipsec_esp descriptor
1215  */
1216 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1217                      bool encrypt,
1218                      void (*callback)(struct device *dev,
1219                                       struct talitos_desc *desc,
1220                                       void *context, int error))
1221 {
1222         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1223         unsigned int authsize = crypto_aead_authsize(aead);
1224         struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1225         struct device *dev = ctx->dev;
1226         struct talitos_desc *desc = &edesc->desc;
1227         unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1228         unsigned int ivsize = crypto_aead_ivsize(aead);
1229         int tbl_off = 0;
1230         int sg_count, ret;
1231         int elen = 0;
1232         bool sync_needed = false;
1233         struct talitos_private *priv = dev_get_drvdata(dev);
1234         bool is_sec1 = has_ftr_sec1(priv);
1235         bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1236         struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1237         struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1238         dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1239
1240         /* hmac key */
1241         to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1242
1243         sg_count = edesc->src_nents ?: 1;
1244         if (is_sec1 && sg_count > 1)
1245                 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1246                                   areq->assoclen + cryptlen);
1247         else
1248                 sg_count = dma_map_sg(dev, areq->src, sg_count,
1249                                       (areq->src == areq->dst) ?
1250                                       DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1251
1252         /* hmac data */
1253         ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1254                              &desc->ptr[1], sg_count, 0, tbl_off);
1255
1256         if (ret > 1) {
1257                 tbl_off += ret;
1258                 sync_needed = true;
1259         }
1260
1261         /* cipher iv */
1262         to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1263
1264         /* cipher key */
1265         to_talitos_ptr(ckey_ptr, ctx->dma_key  + ctx->authkeylen,
1266                        ctx->enckeylen, is_sec1);
1267
1268         /*
1269          * cipher in
1270          * map and adjust cipher len to aead request cryptlen.
1271          * extent is bytes of HMAC postpended to ciphertext,
1272          * typically 12 for ipsec
1273          */
1274         if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1275                 elen = authsize;
1276
1277         ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1278                                  sg_count, areq->assoclen, tbl_off, elen,
1279                                  false);
1280
1281         if (ret > 1) {
1282                 tbl_off += ret;
1283                 sync_needed = true;
1284         }
1285
1286         /* cipher out */
1287         if (areq->src != areq->dst) {
1288                 sg_count = edesc->dst_nents ? : 1;
1289                 if (!is_sec1 || sg_count == 1)
1290                         dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1291         }
1292
1293         if (is_ipsec_esp && encrypt)
1294                 elen = authsize;
1295         else
1296                 elen = 0;
1297         ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1298                                  sg_count, areq->assoclen, tbl_off, elen,
1299                                  is_ipsec_esp && !encrypt);
1300         tbl_off += ret;
1301
1302         /* ICV data */
1303         edesc->icv_ool = !encrypt;
1304
1305         if (!encrypt && is_ipsec_esp) {
1306                 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1307
1308                 /* Add an entry to the link table for ICV data */
1309                 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1310                 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1311
1312                 /* icv data follows link tables */
1313                 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1314                 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1315                 sync_needed = true;
1316         } else if (!encrypt) {
1317                 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1318                 sync_needed = true;
1319         } else if (!is_ipsec_esp) {
1320                 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1321                                sg_count, areq->assoclen + cryptlen, tbl_off);
1322         }
1323
1324         /* iv out */
1325         if (is_ipsec_esp)
1326                 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1327                                        DMA_FROM_DEVICE);
1328
1329         if (sync_needed)
1330                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1331                                            edesc->dma_len,
1332                                            DMA_BIDIRECTIONAL);
1333
1334         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1335         if (ret != -EINPROGRESS) {
1336                 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1337                 kfree(edesc);
1338         }
1339         return ret;
1340 }
1341
1342 /*
1343  * allocate and map the extended descriptor
1344  */
1345 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1346                                                  struct scatterlist *src,
1347                                                  struct scatterlist *dst,
1348                                                  u8 *iv,
1349                                                  unsigned int assoclen,
1350                                                  unsigned int cryptlen,
1351                                                  unsigned int authsize,
1352                                                  unsigned int ivsize,
1353                                                  int icv_stashing,
1354                                                  u32 cryptoflags,
1355                                                  bool encrypt)
1356 {
1357         struct talitos_edesc *edesc;
1358         int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1359         dma_addr_t iv_dma = 0;
1360         gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1361                       GFP_ATOMIC;
1362         struct talitos_private *priv = dev_get_drvdata(dev);
1363         bool is_sec1 = has_ftr_sec1(priv);
1364         int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1365
1366         if (cryptlen + authsize > max_len) {
1367                 dev_err(dev, "length exceeds h/w max limit\n");
1368                 return ERR_PTR(-EINVAL);
1369         }
1370
1371         if (!dst || dst == src) {
1372                 src_len = assoclen + cryptlen + authsize;
1373                 src_nents = sg_nents_for_len(src, src_len);
1374                 if (src_nents < 0) {
1375                         dev_err(dev, "Invalid number of src SG.\n");
1376                         return ERR_PTR(-EINVAL);
1377                 }
1378                 src_nents = (src_nents == 1) ? 0 : src_nents;
1379                 dst_nents = dst ? src_nents : 0;
1380                 dst_len = 0;
1381         } else { /* dst && dst != src*/
1382                 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1383                 src_nents = sg_nents_for_len(src, src_len);
1384                 if (src_nents < 0) {
1385                         dev_err(dev, "Invalid number of src SG.\n");
1386                         return ERR_PTR(-EINVAL);
1387                 }
1388                 src_nents = (src_nents == 1) ? 0 : src_nents;
1389                 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1390                 dst_nents = sg_nents_for_len(dst, dst_len);
1391                 if (dst_nents < 0) {
1392                         dev_err(dev, "Invalid number of dst SG.\n");
1393                         return ERR_PTR(-EINVAL);
1394                 }
1395                 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1396         }
1397
1398         /*
1399          * allocate space for base edesc plus the link tables,
1400          * allowing for two separate entries for AD and generated ICV (+ 2),
1401          * and space for two sets of ICVs (stashed and generated)
1402          */
1403         alloc_len = sizeof(struct talitos_edesc);
1404         if (src_nents || dst_nents || !encrypt) {
1405                 if (is_sec1)
1406                         dma_len = (src_nents ? src_len : 0) +
1407                                   (dst_nents ? dst_len : 0) + authsize;
1408                 else
1409                         dma_len = (src_nents + dst_nents + 2) *
1410                                   sizeof(struct talitos_ptr) + authsize;
1411                 alloc_len += dma_len;
1412         } else {
1413                 dma_len = 0;
1414         }
1415         alloc_len += icv_stashing ? authsize : 0;
1416
1417         /* if its a ahash, add space for a second desc next to the first one */
1418         if (is_sec1 && !dst)
1419                 alloc_len += sizeof(struct talitos_desc);
1420         alloc_len += ivsize;
1421
1422         edesc = kmalloc(alloc_len, GFP_DMA | flags);
1423         if (!edesc)
1424                 return ERR_PTR(-ENOMEM);
1425         if (ivsize) {
1426                 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1427                 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1428         }
1429         memset(&edesc->desc, 0, sizeof(edesc->desc));
1430
1431         edesc->src_nents = src_nents;
1432         edesc->dst_nents = dst_nents;
1433         edesc->iv_dma = iv_dma;
1434         edesc->dma_len = dma_len;
1435         if (dma_len) {
1436                 void *addr = &edesc->link_tbl[0];
1437
1438                 if (is_sec1 && !dst)
1439                         addr += sizeof(struct talitos_desc);
1440                 edesc->dma_link_tbl = dma_map_single(dev, addr,
1441                                                      edesc->dma_len,
1442                                                      DMA_BIDIRECTIONAL);
1443         }
1444         return edesc;
1445 }
1446
1447 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1448                                               int icv_stashing, bool encrypt)
1449 {
1450         struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1451         unsigned int authsize = crypto_aead_authsize(authenc);
1452         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1453         unsigned int ivsize = crypto_aead_ivsize(authenc);
1454         unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1455
1456         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1457                                    iv, areq->assoclen, cryptlen,
1458                                    authsize, ivsize, icv_stashing,
1459                                    areq->base.flags, encrypt);
1460 }
1461
1462 static int aead_encrypt(struct aead_request *req)
1463 {
1464         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1465         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1466         struct talitos_edesc *edesc;
1467
1468         /* allocate extended descriptor */
1469         edesc = aead_edesc_alloc(req, req->iv, 0, true);
1470         if (IS_ERR(edesc))
1471                 return PTR_ERR(edesc);
1472
1473         /* set encrypt */
1474         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1475
1476         return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1477 }
1478
1479 static int aead_decrypt(struct aead_request *req)
1480 {
1481         struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1482         unsigned int authsize = crypto_aead_authsize(authenc);
1483         struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1484         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1485         struct talitos_edesc *edesc;
1486         void *icvdata;
1487
1488         /* allocate extended descriptor */
1489         edesc = aead_edesc_alloc(req, req->iv, 1, false);
1490         if (IS_ERR(edesc))
1491                 return PTR_ERR(edesc);
1492
1493         if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1494             (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1495             ((!edesc->src_nents && !edesc->dst_nents) ||
1496              priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1497
1498                 /* decrypt and check the ICV */
1499                 edesc->desc.hdr = ctx->desc_hdr_template |
1500                                   DESC_HDR_DIR_INBOUND |
1501                                   DESC_HDR_MODE1_MDEU_CICV;
1502
1503                 /* reset integrity check result bits */
1504
1505                 return ipsec_esp(edesc, req, false,
1506                                  ipsec_esp_decrypt_hwauth_done);
1507         }
1508
1509         /* Have to check the ICV with software */
1510         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1511
1512         /* stash incoming ICV for later cmp with ICV generated by the h/w */
1513         icvdata = edesc->buf + edesc->dma_len;
1514
1515         sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1516                            req->assoclen + req->cryptlen - authsize);
1517
1518         return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1519 }
1520
1521 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1522                              const u8 *key, unsigned int keylen)
1523 {
1524         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1525         struct device *dev = ctx->dev;
1526
1527         if (ctx->keylen)
1528                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1529
1530         memcpy(&ctx->key, key, keylen);
1531         ctx->keylen = keylen;
1532
1533         ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1534
1535         return 0;
1536 }
1537
1538 static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher,
1539                                  const u8 *key, unsigned int keylen)
1540 {
1541         u32 tmp[DES_EXPKEY_WORDS];
1542
1543         if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1544                      CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) &&
1545             !des_ekey(tmp, key)) {
1546                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1547                 return -EINVAL;
1548         }
1549
1550         return ablkcipher_setkey(cipher, key, keylen);
1551 }
1552
1553 static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher,
1554                                   const u8 *key, unsigned int keylen)
1555 {
1556         u32 flags;
1557         int err;
1558
1559         flags = crypto_ablkcipher_get_flags(cipher);
1560         err = __des3_verify_key(&flags, key);
1561         if (unlikely(err)) {
1562                 crypto_ablkcipher_set_flags(cipher, flags);
1563                 return err;
1564         }
1565
1566         return ablkcipher_setkey(cipher, key, keylen);
1567 }
1568
1569 static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1570                                   const u8 *key, unsigned int keylen)
1571 {
1572         if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1573             keylen == AES_KEYSIZE_256)
1574                 return ablkcipher_setkey(cipher, key, keylen);
1575
1576         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1577
1578         return -EINVAL;
1579 }
1580
1581 static void common_nonsnoop_unmap(struct device *dev,
1582                                   struct talitos_edesc *edesc,
1583                                   struct ablkcipher_request *areq)
1584 {
1585         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1586
1587         talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1588         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1589
1590         if (edesc->dma_len)
1591                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1592                                  DMA_BIDIRECTIONAL);
1593 }
1594
1595 static void ablkcipher_done(struct device *dev,
1596                             struct talitos_desc *desc, void *context,
1597                             int err)
1598 {
1599         struct ablkcipher_request *areq = context;
1600         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1601         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1602         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1603         struct talitos_edesc *edesc;
1604
1605         edesc = container_of(desc, struct talitos_edesc, desc);
1606
1607         common_nonsnoop_unmap(dev, edesc, areq);
1608         memcpy(areq->info, ctx->iv, ivsize);
1609
1610         kfree(edesc);
1611
1612         areq->base.complete(&areq->base, err);
1613 }
1614
1615 static int common_nonsnoop(struct talitos_edesc *edesc,
1616                            struct ablkcipher_request *areq,
1617                            void (*callback) (struct device *dev,
1618                                              struct talitos_desc *desc,
1619                                              void *context, int error))
1620 {
1621         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1622         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1623         struct device *dev = ctx->dev;
1624         struct talitos_desc *desc = &edesc->desc;
1625         unsigned int cryptlen = areq->nbytes;
1626         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1627         int sg_count, ret;
1628         bool sync_needed = false;
1629         struct talitos_private *priv = dev_get_drvdata(dev);
1630         bool is_sec1 = has_ftr_sec1(priv);
1631
1632         /* first DWORD empty */
1633
1634         /* cipher iv */
1635         to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1636
1637         /* cipher key */
1638         to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1639
1640         sg_count = edesc->src_nents ?: 1;
1641         if (is_sec1 && sg_count > 1)
1642                 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1643                                   cryptlen);
1644         else
1645                 sg_count = dma_map_sg(dev, areq->src, sg_count,
1646                                       (areq->src == areq->dst) ?
1647                                       DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1648         /*
1649          * cipher in
1650          */
1651         sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1652                                   &desc->ptr[3], sg_count, 0, 0);
1653         if (sg_count > 1)
1654                 sync_needed = true;
1655
1656         /* cipher out */
1657         if (areq->src != areq->dst) {
1658                 sg_count = edesc->dst_nents ? : 1;
1659                 if (!is_sec1 || sg_count == 1)
1660                         dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1661         }
1662
1663         ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1664                              sg_count, 0, (edesc->src_nents + 1));
1665         if (ret > 1)
1666                 sync_needed = true;
1667
1668         /* iv out */
1669         map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1670                                DMA_FROM_DEVICE);
1671
1672         /* last DWORD empty */
1673
1674         if (sync_needed)
1675                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1676                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1677
1678         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1679         if (ret != -EINPROGRESS) {
1680                 common_nonsnoop_unmap(dev, edesc, areq);
1681                 kfree(edesc);
1682         }
1683         return ret;
1684 }
1685
1686 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1687                                                     areq, bool encrypt)
1688 {
1689         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1690         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1691         unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1692
1693         return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1694                                    areq->info, 0, areq->nbytes, 0, ivsize, 0,
1695                                    areq->base.flags, encrypt);
1696 }
1697
1698 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1699 {
1700         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1701         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1702         struct talitos_edesc *edesc;
1703         unsigned int blocksize =
1704                         crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1705
1706         if (!areq->nbytes)
1707                 return 0;
1708
1709         if (areq->nbytes % blocksize)
1710                 return -EINVAL;
1711
1712         /* allocate extended descriptor */
1713         edesc = ablkcipher_edesc_alloc(areq, true);
1714         if (IS_ERR(edesc))
1715                 return PTR_ERR(edesc);
1716
1717         /* set encrypt */
1718         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1719
1720         return common_nonsnoop(edesc, areq, ablkcipher_done);
1721 }
1722
1723 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1724 {
1725         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1726         struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1727         struct talitos_edesc *edesc;
1728         unsigned int blocksize =
1729                         crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1730
1731         if (!areq->nbytes)
1732                 return 0;
1733
1734         if (areq->nbytes % blocksize)
1735                 return -EINVAL;
1736
1737         /* allocate extended descriptor */
1738         edesc = ablkcipher_edesc_alloc(areq, false);
1739         if (IS_ERR(edesc))
1740                 return PTR_ERR(edesc);
1741
1742         edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1743
1744         return common_nonsnoop(edesc, areq, ablkcipher_done);
1745 }
1746
1747 static void common_nonsnoop_hash_unmap(struct device *dev,
1748                                        struct talitos_edesc *edesc,
1749                                        struct ahash_request *areq)
1750 {
1751         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1752         struct talitos_private *priv = dev_get_drvdata(dev);
1753         bool is_sec1 = has_ftr_sec1(priv);
1754         struct talitos_desc *desc = &edesc->desc;
1755         struct talitos_desc *desc2 = desc + 1;
1756
1757         unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1758         if (desc->next_desc &&
1759             desc->ptr[5].ptr != desc2->ptr[5].ptr)
1760                 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1761
1762         talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1763
1764         /* When using hashctx-in, must unmap it. */
1765         if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1766                 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1767                                          DMA_TO_DEVICE);
1768         else if (desc->next_desc)
1769                 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1770                                          DMA_TO_DEVICE);
1771
1772         if (is_sec1 && req_ctx->nbuf)
1773                 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1774                                          DMA_TO_DEVICE);
1775
1776         if (edesc->dma_len)
1777                 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1778                                  DMA_BIDIRECTIONAL);
1779
1780         if (edesc->desc.next_desc)
1781                 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1782                                  TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1783 }
1784
1785 static void ahash_done(struct device *dev,
1786                        struct talitos_desc *desc, void *context,
1787                        int err)
1788 {
1789         struct ahash_request *areq = context;
1790         struct talitos_edesc *edesc =
1791                  container_of(desc, struct talitos_edesc, desc);
1792         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1793
1794         if (!req_ctx->last && req_ctx->to_hash_later) {
1795                 /* Position any partial block for next update/final/finup */
1796                 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1797                 req_ctx->nbuf = req_ctx->to_hash_later;
1798         }
1799         common_nonsnoop_hash_unmap(dev, edesc, areq);
1800
1801         kfree(edesc);
1802
1803         areq->base.complete(&areq->base, err);
1804 }
1805
1806 /*
1807  * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1808  * ourself and submit a padded block
1809  */
1810 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1811                                struct talitos_edesc *edesc,
1812                                struct talitos_ptr *ptr)
1813 {
1814         static u8 padded_hash[64] = {
1815                 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1816                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1817                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1818                 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1819         };
1820
1821         pr_err_once("Bug in SEC1, padding ourself\n");
1822         edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1823         map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1824                                (char *)padded_hash, DMA_TO_DEVICE);
1825 }
1826
1827 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1828                                 struct ahash_request *areq, unsigned int length,
1829                                 unsigned int offset,
1830                                 void (*callback) (struct device *dev,
1831                                                   struct talitos_desc *desc,
1832                                                   void *context, int error))
1833 {
1834         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1835         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1836         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1837         struct device *dev = ctx->dev;
1838         struct talitos_desc *desc = &edesc->desc;
1839         int ret;
1840         bool sync_needed = false;
1841         struct talitos_private *priv = dev_get_drvdata(dev);
1842         bool is_sec1 = has_ftr_sec1(priv);
1843         int sg_count;
1844
1845         /* first DWORD empty */
1846
1847         /* hash context in */
1848         if (!req_ctx->first || req_ctx->swinit) {
1849                 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1850                                               req_ctx->hw_context_size,
1851                                               req_ctx->hw_context,
1852                                               DMA_TO_DEVICE);
1853                 req_ctx->swinit = 0;
1854         }
1855         /* Indicate next op is not the first. */
1856         req_ctx->first = 0;
1857
1858         /* HMAC key */
1859         if (ctx->keylen)
1860                 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1861                                is_sec1);
1862
1863         if (is_sec1 && req_ctx->nbuf)
1864                 length -= req_ctx->nbuf;
1865
1866         sg_count = edesc->src_nents ?: 1;
1867         if (is_sec1 && sg_count > 1)
1868                 sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1869                                    edesc->buf + sizeof(struct talitos_desc),
1870                                    length, req_ctx->nbuf);
1871         else if (length)
1872                 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1873                                       DMA_TO_DEVICE);
1874         /*
1875          * data in
1876          */
1877         if (is_sec1 && req_ctx->nbuf) {
1878                 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1879                                        req_ctx->buf[req_ctx->buf_idx],
1880                                        DMA_TO_DEVICE);
1881         } else {
1882                 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1883                                           &desc->ptr[3], sg_count, offset, 0);
1884                 if (sg_count > 1)
1885                         sync_needed = true;
1886         }
1887
1888         /* fifth DWORD empty */
1889
1890         /* hash/HMAC out -or- hash context out */
1891         if (req_ctx->last)
1892                 map_single_talitos_ptr(dev, &desc->ptr[5],
1893                                        crypto_ahash_digestsize(tfm),
1894                                        areq->result, DMA_FROM_DEVICE);
1895         else
1896                 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1897                                               req_ctx->hw_context_size,
1898                                               req_ctx->hw_context,
1899                                               DMA_FROM_DEVICE);
1900
1901         /* last DWORD empty */
1902
1903         if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1904                 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1905
1906         if (is_sec1 && req_ctx->nbuf && length) {
1907                 struct talitos_desc *desc2 = desc + 1;
1908                 dma_addr_t next_desc;
1909
1910                 memset(desc2, 0, sizeof(*desc2));
1911                 desc2->hdr = desc->hdr;
1912                 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1913                 desc2->hdr1 = desc2->hdr;
1914                 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1915                 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1916                 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1917
1918                 if (desc->ptr[1].ptr)
1919                         copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1920                                          is_sec1);
1921                 else
1922                         map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1923                                                       req_ctx->hw_context_size,
1924                                                       req_ctx->hw_context,
1925                                                       DMA_TO_DEVICE);
1926                 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1927                 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1928                                           &desc2->ptr[3], sg_count, offset, 0);
1929                 if (sg_count > 1)
1930                         sync_needed = true;
1931                 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1932                 if (req_ctx->last)
1933                         map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1934                                                       req_ctx->hw_context_size,
1935                                                       req_ctx->hw_context,
1936                                                       DMA_FROM_DEVICE);
1937
1938                 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1939                                            DMA_BIDIRECTIONAL);
1940                 desc->next_desc = cpu_to_be32(next_desc);
1941         }
1942
1943         if (sync_needed)
1944                 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1945                                            edesc->dma_len, DMA_BIDIRECTIONAL);
1946
1947         ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1948         if (ret != -EINPROGRESS) {
1949                 common_nonsnoop_hash_unmap(dev, edesc, areq);
1950                 kfree(edesc);
1951         }
1952         return ret;
1953 }
1954
1955 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1956                                                unsigned int nbytes)
1957 {
1958         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1959         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1960         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1961         struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1962         bool is_sec1 = has_ftr_sec1(priv);
1963
1964         if (is_sec1)
1965                 nbytes -= req_ctx->nbuf;
1966
1967         return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1968                                    nbytes, 0, 0, 0, areq->base.flags, false);
1969 }
1970
1971 static int ahash_init(struct ahash_request *areq)
1972 {
1973         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1974         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1975         struct device *dev = ctx->dev;
1976         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1977         unsigned int size;
1978         dma_addr_t dma;
1979
1980         /* Initialize the context */
1981         req_ctx->buf_idx = 0;
1982         req_ctx->nbuf = 0;
1983         req_ctx->first = 1; /* first indicates h/w must init its context */
1984         req_ctx->swinit = 0; /* assume h/w init of context */
1985         size =  (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1986                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1987                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1988         req_ctx->hw_context_size = size;
1989
1990         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1991                              DMA_TO_DEVICE);
1992         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1993
1994         return 0;
1995 }
1996
1997 /*
1998  * on h/w without explicit sha224 support, we initialize h/w context
1999  * manually with sha224 constants, and tell it to run sha256.
2000  */
2001 static int ahash_init_sha224_swinit(struct ahash_request *areq)
2002 {
2003         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2004
2005         req_ctx->hw_context[0] = SHA224_H0;
2006         req_ctx->hw_context[1] = SHA224_H1;
2007         req_ctx->hw_context[2] = SHA224_H2;
2008         req_ctx->hw_context[3] = SHA224_H3;
2009         req_ctx->hw_context[4] = SHA224_H4;
2010         req_ctx->hw_context[5] = SHA224_H5;
2011         req_ctx->hw_context[6] = SHA224_H6;
2012         req_ctx->hw_context[7] = SHA224_H7;
2013
2014         /* init 64-bit count */
2015         req_ctx->hw_context[8] = 0;
2016         req_ctx->hw_context[9] = 0;
2017
2018         ahash_init(areq);
2019         req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
2020
2021         return 0;
2022 }
2023
2024 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
2025 {
2026         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2027         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2028         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2029         struct talitos_edesc *edesc;
2030         unsigned int blocksize =
2031                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2032         unsigned int nbytes_to_hash;
2033         unsigned int to_hash_later;
2034         unsigned int nsg;
2035         int nents;
2036         struct device *dev = ctx->dev;
2037         struct talitos_private *priv = dev_get_drvdata(dev);
2038         bool is_sec1 = has_ftr_sec1(priv);
2039         int offset = 0;
2040         u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
2041
2042         if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
2043                 /* Buffer up to one whole block */
2044                 nents = sg_nents_for_len(areq->src, nbytes);
2045                 if (nents < 0) {
2046                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2047                         return nents;
2048                 }
2049                 sg_copy_to_buffer(areq->src, nents,
2050                                   ctx_buf + req_ctx->nbuf, nbytes);
2051                 req_ctx->nbuf += nbytes;
2052                 return 0;
2053         }
2054
2055         /* At least (blocksize + 1) bytes are available to hash */
2056         nbytes_to_hash = nbytes + req_ctx->nbuf;
2057         to_hash_later = nbytes_to_hash & (blocksize - 1);
2058
2059         if (req_ctx->last)
2060                 to_hash_later = 0;
2061         else if (to_hash_later)
2062                 /* There is a partial block. Hash the full block(s) now */
2063                 nbytes_to_hash -= to_hash_later;
2064         else {
2065                 /* Keep one block buffered */
2066                 nbytes_to_hash -= blocksize;
2067                 to_hash_later = blocksize;
2068         }
2069
2070         /* Chain in any previously buffered data */
2071         if (!is_sec1 && req_ctx->nbuf) {
2072                 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2073                 sg_init_table(req_ctx->bufsl, nsg);
2074                 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2075                 if (nsg > 1)
2076                         sg_chain(req_ctx->bufsl, 2, areq->src);
2077                 req_ctx->psrc = req_ctx->bufsl;
2078         } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2079                 if (nbytes_to_hash > blocksize)
2080                         offset = blocksize - req_ctx->nbuf;
2081                 else
2082                         offset = nbytes_to_hash - req_ctx->nbuf;
2083                 nents = sg_nents_for_len(areq->src, offset);
2084                 if (nents < 0) {
2085                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2086                         return nents;
2087                 }
2088                 sg_copy_to_buffer(areq->src, nents,
2089                                   ctx_buf + req_ctx->nbuf, offset);
2090                 req_ctx->nbuf += offset;
2091                 req_ctx->psrc = areq->src;
2092         } else
2093                 req_ctx->psrc = areq->src;
2094
2095         if (to_hash_later) {
2096                 nents = sg_nents_for_len(areq->src, nbytes);
2097                 if (nents < 0) {
2098                         dev_err(ctx->dev, "Invalid number of src SG.\n");
2099                         return nents;
2100                 }
2101                 sg_pcopy_to_buffer(areq->src, nents,
2102                                    req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2103                                       to_hash_later,
2104                                       nbytes - to_hash_later);
2105         }
2106         req_ctx->to_hash_later = to_hash_later;
2107
2108         /* Allocate extended descriptor */
2109         edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2110         if (IS_ERR(edesc))
2111                 return PTR_ERR(edesc);
2112
2113         edesc->desc.hdr = ctx->desc_hdr_template;
2114
2115         /* On last one, request SEC to pad; otherwise continue */
2116         if (req_ctx->last)
2117                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2118         else
2119                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2120
2121         /* request SEC to INIT hash. */
2122         if (req_ctx->first && !req_ctx->swinit)
2123                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2124
2125         /* When the tfm context has a keylen, it's an HMAC.
2126          * A first or last (ie. not middle) descriptor must request HMAC.
2127          */
2128         if (ctx->keylen && (req_ctx->first || req_ctx->last))
2129                 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2130
2131         return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2132                                     ahash_done);
2133 }
2134
2135 static int ahash_update(struct ahash_request *areq)
2136 {
2137         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2138
2139         req_ctx->last = 0;
2140
2141         return ahash_process_req(areq, areq->nbytes);
2142 }
2143
2144 static int ahash_final(struct ahash_request *areq)
2145 {
2146         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2147
2148         req_ctx->last = 1;
2149
2150         return ahash_process_req(areq, 0);
2151 }
2152
2153 static int ahash_finup(struct ahash_request *areq)
2154 {
2155         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2156
2157         req_ctx->last = 1;
2158
2159         return ahash_process_req(areq, areq->nbytes);
2160 }
2161
2162 static int ahash_digest(struct ahash_request *areq)
2163 {
2164         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2165         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2166
2167         ahash->init(areq);
2168         req_ctx->last = 1;
2169
2170         return ahash_process_req(areq, areq->nbytes);
2171 }
2172
2173 static int ahash_export(struct ahash_request *areq, void *out)
2174 {
2175         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2176         struct talitos_export_state *export = out;
2177         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2178         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2179         struct device *dev = ctx->dev;
2180         dma_addr_t dma;
2181
2182         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2183                              DMA_FROM_DEVICE);
2184         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2185
2186         memcpy(export->hw_context, req_ctx->hw_context,
2187                req_ctx->hw_context_size);
2188         memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2189         export->swinit = req_ctx->swinit;
2190         export->first = req_ctx->first;
2191         export->last = req_ctx->last;
2192         export->to_hash_later = req_ctx->to_hash_later;
2193         export->nbuf = req_ctx->nbuf;
2194
2195         return 0;
2196 }
2197
2198 static int ahash_import(struct ahash_request *areq, const void *in)
2199 {
2200         struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2201         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2202         struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2203         struct device *dev = ctx->dev;
2204         const struct talitos_export_state *export = in;
2205         unsigned int size;
2206         dma_addr_t dma;
2207
2208         memset(req_ctx, 0, sizeof(*req_ctx));
2209         size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2210                         ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2211                         : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2212         req_ctx->hw_context_size = size;
2213         memcpy(req_ctx->hw_context, export->hw_context, size);
2214         memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2215         req_ctx->swinit = export->swinit;
2216         req_ctx->first = export->first;
2217         req_ctx->last = export->last;
2218         req_ctx->to_hash_later = export->to_hash_later;
2219         req_ctx->nbuf = export->nbuf;
2220
2221         dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2222                              DMA_TO_DEVICE);
2223         dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2224
2225         return 0;
2226 }
2227
2228 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2229                    u8 *hash)
2230 {
2231         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2232
2233         struct scatterlist sg[1];
2234         struct ahash_request *req;
2235         struct crypto_wait wait;
2236         int ret;
2237
2238         crypto_init_wait(&wait);
2239
2240         req = ahash_request_alloc(tfm, GFP_KERNEL);
2241         if (!req)
2242                 return -ENOMEM;
2243
2244         /* Keep tfm keylen == 0 during hash of the long key */
2245         ctx->keylen = 0;
2246         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2247                                    crypto_req_done, &wait);
2248
2249         sg_init_one(&sg[0], key, keylen);
2250
2251         ahash_request_set_crypt(req, sg, hash, keylen);
2252         ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2253
2254         ahash_request_free(req);
2255
2256         return ret;
2257 }
2258
2259 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2260                         unsigned int keylen)
2261 {
2262         struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2263         struct device *dev = ctx->dev;
2264         unsigned int blocksize =
2265                         crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2266         unsigned int digestsize = crypto_ahash_digestsize(tfm);
2267         unsigned int keysize = keylen;
2268         u8 hash[SHA512_DIGEST_SIZE];
2269         int ret;
2270
2271         if (keylen <= blocksize)
2272                 memcpy(ctx->key, key, keysize);
2273         else {
2274                 /* Must get the hash of the long key */
2275                 ret = keyhash(tfm, key, keylen, hash);
2276
2277                 if (ret) {
2278                         crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2279                         return -EINVAL;
2280                 }
2281
2282                 keysize = digestsize;
2283                 memcpy(ctx->key, hash, digestsize);
2284         }
2285
2286         if (ctx->keylen)
2287                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2288
2289         ctx->keylen = keysize;
2290         ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2291
2292         return 0;
2293 }
2294
2295
2296 struct talitos_alg_template {
2297         u32 type;
2298         u32 priority;
2299         union {
2300                 struct crypto_alg crypto;
2301                 struct ahash_alg hash;
2302                 struct aead_alg aead;
2303         } alg;
2304         __be32 desc_hdr_template;
2305 };
2306
2307 static struct talitos_alg_template driver_algs[] = {
2308         /* AEAD algorithms.  These use a single-pass ipsec_esp descriptor */
2309         {       .type = CRYPTO_ALG_TYPE_AEAD,
2310                 .alg.aead = {
2311                         .base = {
2312                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2313                                 .cra_driver_name = "authenc-hmac-sha1-"
2314                                                    "cbc-aes-talitos",
2315                                 .cra_blocksize = AES_BLOCK_SIZE,
2316                                 .cra_flags = CRYPTO_ALG_ASYNC,
2317                         },
2318                         .ivsize = AES_BLOCK_SIZE,
2319                         .maxauthsize = SHA1_DIGEST_SIZE,
2320                 },
2321                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2322                                      DESC_HDR_SEL0_AESU |
2323                                      DESC_HDR_MODE0_AESU_CBC |
2324                                      DESC_HDR_SEL1_MDEUA |
2325                                      DESC_HDR_MODE1_MDEU_INIT |
2326                                      DESC_HDR_MODE1_MDEU_PAD |
2327                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2328         },
2329         {       .type = CRYPTO_ALG_TYPE_AEAD,
2330                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2331                 .alg.aead = {
2332                         .base = {
2333                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2334                                 .cra_driver_name = "authenc-hmac-sha1-"
2335                                                    "cbc-aes-talitos-hsna",
2336                                 .cra_blocksize = AES_BLOCK_SIZE,
2337                                 .cra_flags = CRYPTO_ALG_ASYNC,
2338                         },
2339                         .ivsize = AES_BLOCK_SIZE,
2340                         .maxauthsize = SHA1_DIGEST_SIZE,
2341                 },
2342                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2343                                      DESC_HDR_SEL0_AESU |
2344                                      DESC_HDR_MODE0_AESU_CBC |
2345                                      DESC_HDR_SEL1_MDEUA |
2346                                      DESC_HDR_MODE1_MDEU_INIT |
2347                                      DESC_HDR_MODE1_MDEU_PAD |
2348                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2349         },
2350         {       .type = CRYPTO_ALG_TYPE_AEAD,
2351                 .alg.aead = {
2352                         .base = {
2353                                 .cra_name = "authenc(hmac(sha1),"
2354                                             "cbc(des3_ede))",
2355                                 .cra_driver_name = "authenc-hmac-sha1-"
2356                                                    "cbc-3des-talitos",
2357                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2358                                 .cra_flags = CRYPTO_ALG_ASYNC,
2359                         },
2360                         .ivsize = DES3_EDE_BLOCK_SIZE,
2361                         .maxauthsize = SHA1_DIGEST_SIZE,
2362                         .setkey = aead_des3_setkey,
2363                 },
2364                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2365                                      DESC_HDR_SEL0_DEU |
2366                                      DESC_HDR_MODE0_DEU_CBC |
2367                                      DESC_HDR_MODE0_DEU_3DES |
2368                                      DESC_HDR_SEL1_MDEUA |
2369                                      DESC_HDR_MODE1_MDEU_INIT |
2370                                      DESC_HDR_MODE1_MDEU_PAD |
2371                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2372         },
2373         {       .type = CRYPTO_ALG_TYPE_AEAD,
2374                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2375                 .alg.aead = {
2376                         .base = {
2377                                 .cra_name = "authenc(hmac(sha1),"
2378                                             "cbc(des3_ede))",
2379                                 .cra_driver_name = "authenc-hmac-sha1-"
2380                                                    "cbc-3des-talitos-hsna",
2381                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2382                                 .cra_flags = CRYPTO_ALG_ASYNC,
2383                         },
2384                         .ivsize = DES3_EDE_BLOCK_SIZE,
2385                         .maxauthsize = SHA1_DIGEST_SIZE,
2386                         .setkey = aead_des3_setkey,
2387                 },
2388                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2389                                      DESC_HDR_SEL0_DEU |
2390                                      DESC_HDR_MODE0_DEU_CBC |
2391                                      DESC_HDR_MODE0_DEU_3DES |
2392                                      DESC_HDR_SEL1_MDEUA |
2393                                      DESC_HDR_MODE1_MDEU_INIT |
2394                                      DESC_HDR_MODE1_MDEU_PAD |
2395                                      DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2396         },
2397         {       .type = CRYPTO_ALG_TYPE_AEAD,
2398                 .alg.aead = {
2399                         .base = {
2400                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2401                                 .cra_driver_name = "authenc-hmac-sha224-"
2402                                                    "cbc-aes-talitos",
2403                                 .cra_blocksize = AES_BLOCK_SIZE,
2404                                 .cra_flags = CRYPTO_ALG_ASYNC,
2405                         },
2406                         .ivsize = AES_BLOCK_SIZE,
2407                         .maxauthsize = SHA224_DIGEST_SIZE,
2408                 },
2409                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2410                                      DESC_HDR_SEL0_AESU |
2411                                      DESC_HDR_MODE0_AESU_CBC |
2412                                      DESC_HDR_SEL1_MDEUA |
2413                                      DESC_HDR_MODE1_MDEU_INIT |
2414                                      DESC_HDR_MODE1_MDEU_PAD |
2415                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2416         },
2417         {       .type = CRYPTO_ALG_TYPE_AEAD,
2418                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2419                 .alg.aead = {
2420                         .base = {
2421                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2422                                 .cra_driver_name = "authenc-hmac-sha224-"
2423                                                    "cbc-aes-talitos-hsna",
2424                                 .cra_blocksize = AES_BLOCK_SIZE,
2425                                 .cra_flags = CRYPTO_ALG_ASYNC,
2426                         },
2427                         .ivsize = AES_BLOCK_SIZE,
2428                         .maxauthsize = SHA224_DIGEST_SIZE,
2429                 },
2430                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2431                                      DESC_HDR_SEL0_AESU |
2432                                      DESC_HDR_MODE0_AESU_CBC |
2433                                      DESC_HDR_SEL1_MDEUA |
2434                                      DESC_HDR_MODE1_MDEU_INIT |
2435                                      DESC_HDR_MODE1_MDEU_PAD |
2436                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2437         },
2438         {       .type = CRYPTO_ALG_TYPE_AEAD,
2439                 .alg.aead = {
2440                         .base = {
2441                                 .cra_name = "authenc(hmac(sha224),"
2442                                             "cbc(des3_ede))",
2443                                 .cra_driver_name = "authenc-hmac-sha224-"
2444                                                    "cbc-3des-talitos",
2445                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2446                                 .cra_flags = CRYPTO_ALG_ASYNC,
2447                         },
2448                         .ivsize = DES3_EDE_BLOCK_SIZE,
2449                         .maxauthsize = SHA224_DIGEST_SIZE,
2450                         .setkey = aead_des3_setkey,
2451                 },
2452                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2453                                      DESC_HDR_SEL0_DEU |
2454                                      DESC_HDR_MODE0_DEU_CBC |
2455                                      DESC_HDR_MODE0_DEU_3DES |
2456                                      DESC_HDR_SEL1_MDEUA |
2457                                      DESC_HDR_MODE1_MDEU_INIT |
2458                                      DESC_HDR_MODE1_MDEU_PAD |
2459                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2460         },
2461         {       .type = CRYPTO_ALG_TYPE_AEAD,
2462                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2463                 .alg.aead = {
2464                         .base = {
2465                                 .cra_name = "authenc(hmac(sha224),"
2466                                             "cbc(des3_ede))",
2467                                 .cra_driver_name = "authenc-hmac-sha224-"
2468                                                    "cbc-3des-talitos-hsna",
2469                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2470                                 .cra_flags = CRYPTO_ALG_ASYNC,
2471                         },
2472                         .ivsize = DES3_EDE_BLOCK_SIZE,
2473                         .maxauthsize = SHA224_DIGEST_SIZE,
2474                         .setkey = aead_des3_setkey,
2475                 },
2476                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2477                                      DESC_HDR_SEL0_DEU |
2478                                      DESC_HDR_MODE0_DEU_CBC |
2479                                      DESC_HDR_MODE0_DEU_3DES |
2480                                      DESC_HDR_SEL1_MDEUA |
2481                                      DESC_HDR_MODE1_MDEU_INIT |
2482                                      DESC_HDR_MODE1_MDEU_PAD |
2483                                      DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2484         },
2485         {       .type = CRYPTO_ALG_TYPE_AEAD,
2486                 .alg.aead = {
2487                         .base = {
2488                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2489                                 .cra_driver_name = "authenc-hmac-sha256-"
2490                                                    "cbc-aes-talitos",
2491                                 .cra_blocksize = AES_BLOCK_SIZE,
2492                                 .cra_flags = CRYPTO_ALG_ASYNC,
2493                         },
2494                         .ivsize = AES_BLOCK_SIZE,
2495                         .maxauthsize = SHA256_DIGEST_SIZE,
2496                 },
2497                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2498                                      DESC_HDR_SEL0_AESU |
2499                                      DESC_HDR_MODE0_AESU_CBC |
2500                                      DESC_HDR_SEL1_MDEUA |
2501                                      DESC_HDR_MODE1_MDEU_INIT |
2502                                      DESC_HDR_MODE1_MDEU_PAD |
2503                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2504         },
2505         {       .type = CRYPTO_ALG_TYPE_AEAD,
2506                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2507                 .alg.aead = {
2508                         .base = {
2509                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2510                                 .cra_driver_name = "authenc-hmac-sha256-"
2511                                                    "cbc-aes-talitos-hsna",
2512                                 .cra_blocksize = AES_BLOCK_SIZE,
2513                                 .cra_flags = CRYPTO_ALG_ASYNC,
2514                         },
2515                         .ivsize = AES_BLOCK_SIZE,
2516                         .maxauthsize = SHA256_DIGEST_SIZE,
2517                 },
2518                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2519                                      DESC_HDR_SEL0_AESU |
2520                                      DESC_HDR_MODE0_AESU_CBC |
2521                                      DESC_HDR_SEL1_MDEUA |
2522                                      DESC_HDR_MODE1_MDEU_INIT |
2523                                      DESC_HDR_MODE1_MDEU_PAD |
2524                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2525         },
2526         {       .type = CRYPTO_ALG_TYPE_AEAD,
2527                 .alg.aead = {
2528                         .base = {
2529                                 .cra_name = "authenc(hmac(sha256),"
2530                                             "cbc(des3_ede))",
2531                                 .cra_driver_name = "authenc-hmac-sha256-"
2532                                                    "cbc-3des-talitos",
2533                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2534                                 .cra_flags = CRYPTO_ALG_ASYNC,
2535                         },
2536                         .ivsize = DES3_EDE_BLOCK_SIZE,
2537                         .maxauthsize = SHA256_DIGEST_SIZE,
2538                         .setkey = aead_des3_setkey,
2539                 },
2540                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2541                                      DESC_HDR_SEL0_DEU |
2542                                      DESC_HDR_MODE0_DEU_CBC |
2543                                      DESC_HDR_MODE0_DEU_3DES |
2544                                      DESC_HDR_SEL1_MDEUA |
2545                                      DESC_HDR_MODE1_MDEU_INIT |
2546                                      DESC_HDR_MODE1_MDEU_PAD |
2547                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2548         },
2549         {       .type = CRYPTO_ALG_TYPE_AEAD,
2550                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2551                 .alg.aead = {
2552                         .base = {
2553                                 .cra_name = "authenc(hmac(sha256),"
2554                                             "cbc(des3_ede))",
2555                                 .cra_driver_name = "authenc-hmac-sha256-"
2556                                                    "cbc-3des-talitos-hsna",
2557                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2558                                 .cra_flags = CRYPTO_ALG_ASYNC,
2559                         },
2560                         .ivsize = DES3_EDE_BLOCK_SIZE,
2561                         .maxauthsize = SHA256_DIGEST_SIZE,
2562                         .setkey = aead_des3_setkey,
2563                 },
2564                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2565                                      DESC_HDR_SEL0_DEU |
2566                                      DESC_HDR_MODE0_DEU_CBC |
2567                                      DESC_HDR_MODE0_DEU_3DES |
2568                                      DESC_HDR_SEL1_MDEUA |
2569                                      DESC_HDR_MODE1_MDEU_INIT |
2570                                      DESC_HDR_MODE1_MDEU_PAD |
2571                                      DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2572         },
2573         {       .type = CRYPTO_ALG_TYPE_AEAD,
2574                 .alg.aead = {
2575                         .base = {
2576                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2577                                 .cra_driver_name = "authenc-hmac-sha384-"
2578                                                    "cbc-aes-talitos",
2579                                 .cra_blocksize = AES_BLOCK_SIZE,
2580                                 .cra_flags = CRYPTO_ALG_ASYNC,
2581                         },
2582                         .ivsize = AES_BLOCK_SIZE,
2583                         .maxauthsize = SHA384_DIGEST_SIZE,
2584                 },
2585                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2586                                      DESC_HDR_SEL0_AESU |
2587                                      DESC_HDR_MODE0_AESU_CBC |
2588                                      DESC_HDR_SEL1_MDEUB |
2589                                      DESC_HDR_MODE1_MDEU_INIT |
2590                                      DESC_HDR_MODE1_MDEU_PAD |
2591                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2592         },
2593         {       .type = CRYPTO_ALG_TYPE_AEAD,
2594                 .alg.aead = {
2595                         .base = {
2596                                 .cra_name = "authenc(hmac(sha384),"
2597                                             "cbc(des3_ede))",
2598                                 .cra_driver_name = "authenc-hmac-sha384-"
2599                                                    "cbc-3des-talitos",
2600                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2601                                 .cra_flags = CRYPTO_ALG_ASYNC,
2602                         },
2603                         .ivsize = DES3_EDE_BLOCK_SIZE,
2604                         .maxauthsize = SHA384_DIGEST_SIZE,
2605                         .setkey = aead_des3_setkey,
2606                 },
2607                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2608                                      DESC_HDR_SEL0_DEU |
2609                                      DESC_HDR_MODE0_DEU_CBC |
2610                                      DESC_HDR_MODE0_DEU_3DES |
2611                                      DESC_HDR_SEL1_MDEUB |
2612                                      DESC_HDR_MODE1_MDEU_INIT |
2613                                      DESC_HDR_MODE1_MDEU_PAD |
2614                                      DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2615         },
2616         {       .type = CRYPTO_ALG_TYPE_AEAD,
2617                 .alg.aead = {
2618                         .base = {
2619                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2620                                 .cra_driver_name = "authenc-hmac-sha512-"
2621                                                    "cbc-aes-talitos",
2622                                 .cra_blocksize = AES_BLOCK_SIZE,
2623                                 .cra_flags = CRYPTO_ALG_ASYNC,
2624                         },
2625                         .ivsize = AES_BLOCK_SIZE,
2626                         .maxauthsize = SHA512_DIGEST_SIZE,
2627                 },
2628                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2629                                      DESC_HDR_SEL0_AESU |
2630                                      DESC_HDR_MODE0_AESU_CBC |
2631                                      DESC_HDR_SEL1_MDEUB |
2632                                      DESC_HDR_MODE1_MDEU_INIT |
2633                                      DESC_HDR_MODE1_MDEU_PAD |
2634                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2635         },
2636         {       .type = CRYPTO_ALG_TYPE_AEAD,
2637                 .alg.aead = {
2638                         .base = {
2639                                 .cra_name = "authenc(hmac(sha512),"
2640                                             "cbc(des3_ede))",
2641                                 .cra_driver_name = "authenc-hmac-sha512-"
2642                                                    "cbc-3des-talitos",
2643                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2644                                 .cra_flags = CRYPTO_ALG_ASYNC,
2645                         },
2646                         .ivsize = DES3_EDE_BLOCK_SIZE,
2647                         .maxauthsize = SHA512_DIGEST_SIZE,
2648                         .setkey = aead_des3_setkey,
2649                 },
2650                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2651                                      DESC_HDR_SEL0_DEU |
2652                                      DESC_HDR_MODE0_DEU_CBC |
2653                                      DESC_HDR_MODE0_DEU_3DES |
2654                                      DESC_HDR_SEL1_MDEUB |
2655                                      DESC_HDR_MODE1_MDEU_INIT |
2656                                      DESC_HDR_MODE1_MDEU_PAD |
2657                                      DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2658         },
2659         {       .type = CRYPTO_ALG_TYPE_AEAD,
2660                 .alg.aead = {
2661                         .base = {
2662                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2663                                 .cra_driver_name = "authenc-hmac-md5-"
2664                                                    "cbc-aes-talitos",
2665                                 .cra_blocksize = AES_BLOCK_SIZE,
2666                                 .cra_flags = CRYPTO_ALG_ASYNC,
2667                         },
2668                         .ivsize = AES_BLOCK_SIZE,
2669                         .maxauthsize = MD5_DIGEST_SIZE,
2670                 },
2671                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2672                                      DESC_HDR_SEL0_AESU |
2673                                      DESC_HDR_MODE0_AESU_CBC |
2674                                      DESC_HDR_SEL1_MDEUA |
2675                                      DESC_HDR_MODE1_MDEU_INIT |
2676                                      DESC_HDR_MODE1_MDEU_PAD |
2677                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2678         },
2679         {       .type = CRYPTO_ALG_TYPE_AEAD,
2680                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2681                 .alg.aead = {
2682                         .base = {
2683                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
2684                                 .cra_driver_name = "authenc-hmac-md5-"
2685                                                    "cbc-aes-talitos-hsna",
2686                                 .cra_blocksize = AES_BLOCK_SIZE,
2687                                 .cra_flags = CRYPTO_ALG_ASYNC,
2688                         },
2689                         .ivsize = AES_BLOCK_SIZE,
2690                         .maxauthsize = MD5_DIGEST_SIZE,
2691                 },
2692                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2693                                      DESC_HDR_SEL0_AESU |
2694                                      DESC_HDR_MODE0_AESU_CBC |
2695                                      DESC_HDR_SEL1_MDEUA |
2696                                      DESC_HDR_MODE1_MDEU_INIT |
2697                                      DESC_HDR_MODE1_MDEU_PAD |
2698                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2699         },
2700         {       .type = CRYPTO_ALG_TYPE_AEAD,
2701                 .alg.aead = {
2702                         .base = {
2703                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2704                                 .cra_driver_name = "authenc-hmac-md5-"
2705                                                    "cbc-3des-talitos",
2706                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2707                                 .cra_flags = CRYPTO_ALG_ASYNC,
2708                         },
2709                         .ivsize = DES3_EDE_BLOCK_SIZE,
2710                         .maxauthsize = MD5_DIGEST_SIZE,
2711                         .setkey = aead_des3_setkey,
2712                 },
2713                 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2714                                      DESC_HDR_SEL0_DEU |
2715                                      DESC_HDR_MODE0_DEU_CBC |
2716                                      DESC_HDR_MODE0_DEU_3DES |
2717                                      DESC_HDR_SEL1_MDEUA |
2718                                      DESC_HDR_MODE1_MDEU_INIT |
2719                                      DESC_HDR_MODE1_MDEU_PAD |
2720                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2721         },
2722         {       .type = CRYPTO_ALG_TYPE_AEAD,
2723                 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2724                 .alg.aead = {
2725                         .base = {
2726                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2727                                 .cra_driver_name = "authenc-hmac-md5-"
2728                                                    "cbc-3des-talitos-hsna",
2729                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2730                                 .cra_flags = CRYPTO_ALG_ASYNC,
2731                         },
2732                         .ivsize = DES3_EDE_BLOCK_SIZE,
2733                         .maxauthsize = MD5_DIGEST_SIZE,
2734                         .setkey = aead_des3_setkey,
2735                 },
2736                 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2737                                      DESC_HDR_SEL0_DEU |
2738                                      DESC_HDR_MODE0_DEU_CBC |
2739                                      DESC_HDR_MODE0_DEU_3DES |
2740                                      DESC_HDR_SEL1_MDEUA |
2741                                      DESC_HDR_MODE1_MDEU_INIT |
2742                                      DESC_HDR_MODE1_MDEU_PAD |
2743                                      DESC_HDR_MODE1_MDEU_MD5_HMAC,
2744         },
2745         /* ABLKCIPHER algorithms. */
2746         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2747                 .alg.crypto = {
2748                         .cra_name = "ecb(aes)",
2749                         .cra_driver_name = "ecb-aes-talitos",
2750                         .cra_blocksize = AES_BLOCK_SIZE,
2751                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2752                                      CRYPTO_ALG_ASYNC,
2753                         .cra_ablkcipher = {
2754                                 .min_keysize = AES_MIN_KEY_SIZE,
2755                                 .max_keysize = AES_MAX_KEY_SIZE,
2756                                 .setkey = ablkcipher_aes_setkey,
2757                         }
2758                 },
2759                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2760                                      DESC_HDR_SEL0_AESU,
2761         },
2762         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2763                 .alg.crypto = {
2764                         .cra_name = "cbc(aes)",
2765                         .cra_driver_name = "cbc-aes-talitos",
2766                         .cra_blocksize = AES_BLOCK_SIZE,
2767                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2768                                      CRYPTO_ALG_ASYNC,
2769                         .cra_ablkcipher = {
2770                                 .min_keysize = AES_MIN_KEY_SIZE,
2771                                 .max_keysize = AES_MAX_KEY_SIZE,
2772                                 .ivsize = AES_BLOCK_SIZE,
2773                                 .setkey = ablkcipher_aes_setkey,
2774                         }
2775                 },
2776                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2777                                      DESC_HDR_SEL0_AESU |
2778                                      DESC_HDR_MODE0_AESU_CBC,
2779         },
2780         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2781                 .alg.crypto = {
2782                         .cra_name = "ctr(aes)",
2783                         .cra_driver_name = "ctr-aes-talitos",
2784                         .cra_blocksize = 1,
2785                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2786                                      CRYPTO_ALG_ASYNC,
2787                         .cra_ablkcipher = {
2788                                 .min_keysize = AES_MIN_KEY_SIZE,
2789                                 .max_keysize = AES_MAX_KEY_SIZE,
2790                                 .ivsize = AES_BLOCK_SIZE,
2791                                 .setkey = ablkcipher_aes_setkey,
2792                         }
2793                 },
2794                 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2795                                      DESC_HDR_SEL0_AESU |
2796                                      DESC_HDR_MODE0_AESU_CTR,
2797         },
2798         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2799                 .alg.crypto = {
2800                         .cra_name = "ecb(des)",
2801                         .cra_driver_name = "ecb-des-talitos",
2802                         .cra_blocksize = DES_BLOCK_SIZE,
2803                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2804                                      CRYPTO_ALG_ASYNC,
2805                         .cra_ablkcipher = {
2806                                 .min_keysize = DES_KEY_SIZE,
2807                                 .max_keysize = DES_KEY_SIZE,
2808                                 .setkey = ablkcipher_des_setkey,
2809                         }
2810                 },
2811                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2812                                      DESC_HDR_SEL0_DEU,
2813         },
2814         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2815                 .alg.crypto = {
2816                         .cra_name = "cbc(des)",
2817                         .cra_driver_name = "cbc-des-talitos",
2818                         .cra_blocksize = DES_BLOCK_SIZE,
2819                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2820                                      CRYPTO_ALG_ASYNC,
2821                         .cra_ablkcipher = {
2822                                 .min_keysize = DES_KEY_SIZE,
2823                                 .max_keysize = DES_KEY_SIZE,
2824                                 .ivsize = DES_BLOCK_SIZE,
2825                                 .setkey = ablkcipher_des_setkey,
2826                         }
2827                 },
2828                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2829                                      DESC_HDR_SEL0_DEU |
2830                                      DESC_HDR_MODE0_DEU_CBC,
2831         },
2832         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2833                 .alg.crypto = {
2834                         .cra_name = "ecb(des3_ede)",
2835                         .cra_driver_name = "ecb-3des-talitos",
2836                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2837                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2838                                      CRYPTO_ALG_ASYNC,
2839                         .cra_ablkcipher = {
2840                                 .min_keysize = DES3_EDE_KEY_SIZE,
2841                                 .max_keysize = DES3_EDE_KEY_SIZE,
2842                                 .setkey = ablkcipher_des3_setkey,
2843                         }
2844                 },
2845                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2846                                      DESC_HDR_SEL0_DEU |
2847                                      DESC_HDR_MODE0_DEU_3DES,
2848         },
2849         {       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2850                 .alg.crypto = {
2851                         .cra_name = "cbc(des3_ede)",
2852                         .cra_driver_name = "cbc-3des-talitos",
2853                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2854                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2855                                      CRYPTO_ALG_ASYNC,
2856                         .cra_ablkcipher = {
2857                                 .min_keysize = DES3_EDE_KEY_SIZE,
2858                                 .max_keysize = DES3_EDE_KEY_SIZE,
2859                                 .ivsize = DES3_EDE_BLOCK_SIZE,
2860                                 .setkey = ablkcipher_des3_setkey,
2861                         }
2862                 },
2863                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2864                                      DESC_HDR_SEL0_DEU |
2865                                      DESC_HDR_MODE0_DEU_CBC |
2866                                      DESC_HDR_MODE0_DEU_3DES,
2867         },
2868         /* AHASH algorithms. */
2869         {       .type = CRYPTO_ALG_TYPE_AHASH,
2870                 .alg.hash = {
2871                         .halg.digestsize = MD5_DIGEST_SIZE,
2872                         .halg.statesize = sizeof(struct talitos_export_state),
2873                         .halg.base = {
2874                                 .cra_name = "md5",
2875                                 .cra_driver_name = "md5-talitos",
2876                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2877                                 .cra_flags = CRYPTO_ALG_ASYNC,
2878                         }
2879                 },
2880                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2881                                      DESC_HDR_SEL0_MDEUA |
2882                                      DESC_HDR_MODE0_MDEU_MD5,
2883         },
2884         {       .type = CRYPTO_ALG_TYPE_AHASH,
2885                 .alg.hash = {
2886                         .halg.digestsize = SHA1_DIGEST_SIZE,
2887                         .halg.statesize = sizeof(struct talitos_export_state),
2888                         .halg.base = {
2889                                 .cra_name = "sha1",
2890                                 .cra_driver_name = "sha1-talitos",
2891                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2892                                 .cra_flags = CRYPTO_ALG_ASYNC,
2893                         }
2894                 },
2895                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2896                                      DESC_HDR_SEL0_MDEUA |
2897                                      DESC_HDR_MODE0_MDEU_SHA1,
2898         },
2899         {       .type = CRYPTO_ALG_TYPE_AHASH,
2900                 .alg.hash = {
2901                         .halg.digestsize = SHA224_DIGEST_SIZE,
2902                         .halg.statesize = sizeof(struct talitos_export_state),
2903                         .halg.base = {
2904                                 .cra_name = "sha224",
2905                                 .cra_driver_name = "sha224-talitos",
2906                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2907                                 .cra_flags = CRYPTO_ALG_ASYNC,
2908                         }
2909                 },
2910                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2911                                      DESC_HDR_SEL0_MDEUA |
2912                                      DESC_HDR_MODE0_MDEU_SHA224,
2913         },
2914         {       .type = CRYPTO_ALG_TYPE_AHASH,
2915                 .alg.hash = {
2916                         .halg.digestsize = SHA256_DIGEST_SIZE,
2917                         .halg.statesize = sizeof(struct talitos_export_state),
2918                         .halg.base = {
2919                                 .cra_name = "sha256",
2920                                 .cra_driver_name = "sha256-talitos",
2921                                 .cra_blocksize = SHA256_BLOCK_SIZE,
2922                                 .cra_flags = CRYPTO_ALG_ASYNC,
2923                         }
2924                 },
2925                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2926                                      DESC_HDR_SEL0_MDEUA |
2927                                      DESC_HDR_MODE0_MDEU_SHA256,
2928         },
2929         {       .type = CRYPTO_ALG_TYPE_AHASH,
2930                 .alg.hash = {
2931                         .halg.digestsize = SHA384_DIGEST_SIZE,
2932                         .halg.statesize = sizeof(struct talitos_export_state),
2933                         .halg.base = {
2934                                 .cra_name = "sha384",
2935                                 .cra_driver_name = "sha384-talitos",
2936                                 .cra_blocksize = SHA384_BLOCK_SIZE,
2937                                 .cra_flags = CRYPTO_ALG_ASYNC,
2938                         }
2939                 },
2940                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2941                                      DESC_HDR_SEL0_MDEUB |
2942                                      DESC_HDR_MODE0_MDEUB_SHA384,
2943         },
2944         {       .type = CRYPTO_ALG_TYPE_AHASH,
2945                 .alg.hash = {
2946                         .halg.digestsize = SHA512_DIGEST_SIZE,
2947                         .halg.statesize = sizeof(struct talitos_export_state),
2948                         .halg.base = {
2949                                 .cra_name = "sha512",
2950                                 .cra_driver_name = "sha512-talitos",
2951                                 .cra_blocksize = SHA512_BLOCK_SIZE,
2952                                 .cra_flags = CRYPTO_ALG_ASYNC,
2953                         }
2954                 },
2955                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2956                                      DESC_HDR_SEL0_MDEUB |
2957                                      DESC_HDR_MODE0_MDEUB_SHA512,
2958         },
2959         {       .type = CRYPTO_ALG_TYPE_AHASH,
2960                 .alg.hash = {
2961                         .halg.digestsize = MD5_DIGEST_SIZE,
2962                         .halg.statesize = sizeof(struct talitos_export_state),
2963                         .halg.base = {
2964                                 .cra_name = "hmac(md5)",
2965                                 .cra_driver_name = "hmac-md5-talitos",
2966                                 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2967                                 .cra_flags = CRYPTO_ALG_ASYNC,
2968                         }
2969                 },
2970                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2971                                      DESC_HDR_SEL0_MDEUA |
2972                                      DESC_HDR_MODE0_MDEU_MD5,
2973         },
2974         {       .type = CRYPTO_ALG_TYPE_AHASH,
2975                 .alg.hash = {
2976                         .halg.digestsize = SHA1_DIGEST_SIZE,
2977                         .halg.statesize = sizeof(struct talitos_export_state),
2978                         .halg.base = {
2979                                 .cra_name = "hmac(sha1)",
2980                                 .cra_driver_name = "hmac-sha1-talitos",
2981                                 .cra_blocksize = SHA1_BLOCK_SIZE,
2982                                 .cra_flags = CRYPTO_ALG_ASYNC,
2983                         }
2984                 },
2985                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2986                                      DESC_HDR_SEL0_MDEUA |
2987                                      DESC_HDR_MODE0_MDEU_SHA1,
2988         },
2989         {       .type = CRYPTO_ALG_TYPE_AHASH,
2990                 .alg.hash = {
2991                         .halg.digestsize = SHA224_DIGEST_SIZE,
2992                         .halg.statesize = sizeof(struct talitos_export_state),
2993                         .halg.base = {
2994                                 .cra_name = "hmac(sha224)",
2995                                 .cra_driver_name = "hmac-sha224-talitos",
2996                                 .cra_blocksize = SHA224_BLOCK_SIZE,
2997                                 .cra_flags = CRYPTO_ALG_ASYNC,
2998                         }
2999                 },
3000                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3001                                      DESC_HDR_SEL0_MDEUA |
3002                                      DESC_HDR_MODE0_MDEU_SHA224,
3003         },
3004         {       .type = CRYPTO_ALG_TYPE_AHASH,
3005                 .alg.hash = {
3006                         .halg.digestsize = SHA256_DIGEST_SIZE,
3007                         .halg.statesize = sizeof(struct talitos_export_state),
3008                         .halg.base = {
3009                                 .cra_name = "hmac(sha256)",
3010                                 .cra_driver_name = "hmac-sha256-talitos",
3011                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3012                                 .cra_flags = CRYPTO_ALG_ASYNC,
3013                         }
3014                 },
3015                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3016                                      DESC_HDR_SEL0_MDEUA |
3017                                      DESC_HDR_MODE0_MDEU_SHA256,
3018         },
3019         {       .type = CRYPTO_ALG_TYPE_AHASH,
3020                 .alg.hash = {
3021                         .halg.digestsize = SHA384_DIGEST_SIZE,
3022                         .halg.statesize = sizeof(struct talitos_export_state),
3023                         .halg.base = {
3024                                 .cra_name = "hmac(sha384)",
3025                                 .cra_driver_name = "hmac-sha384-talitos",
3026                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3027                                 .cra_flags = CRYPTO_ALG_ASYNC,
3028                         }
3029                 },
3030                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3031                                      DESC_HDR_SEL0_MDEUB |
3032                                      DESC_HDR_MODE0_MDEUB_SHA384,
3033         },
3034         {       .type = CRYPTO_ALG_TYPE_AHASH,
3035                 .alg.hash = {
3036                         .halg.digestsize = SHA512_DIGEST_SIZE,
3037                         .halg.statesize = sizeof(struct talitos_export_state),
3038                         .halg.base = {
3039                                 .cra_name = "hmac(sha512)",
3040                                 .cra_driver_name = "hmac-sha512-talitos",
3041                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3042                                 .cra_flags = CRYPTO_ALG_ASYNC,
3043                         }
3044                 },
3045                 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3046                                      DESC_HDR_SEL0_MDEUB |
3047                                      DESC_HDR_MODE0_MDEUB_SHA512,
3048         }
3049 };
3050
3051 struct talitos_crypto_alg {
3052         struct list_head entry;
3053         struct device *dev;
3054         struct talitos_alg_template algt;
3055 };
3056
3057 static int talitos_init_common(struct talitos_ctx *ctx,
3058                                struct talitos_crypto_alg *talitos_alg)
3059 {
3060         struct talitos_private *priv;
3061
3062         /* update context with ptr to dev */
3063         ctx->dev = talitos_alg->dev;
3064
3065         /* assign SEC channel to tfm in round-robin fashion */
3066         priv = dev_get_drvdata(ctx->dev);
3067         ctx->ch = atomic_inc_return(&priv->last_chan) &
3068                   (priv->num_channels - 1);
3069
3070         /* copy descriptor header template value */
3071         ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
3072
3073         /* select done notification */
3074         ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
3075
3076         return 0;
3077 }
3078
3079 static int talitos_cra_init(struct crypto_tfm *tfm)
3080 {
3081         struct crypto_alg *alg = tfm->__crt_alg;
3082         struct talitos_crypto_alg *talitos_alg;
3083         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3084
3085         if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3086                 talitos_alg = container_of(__crypto_ahash_alg(alg),
3087                                            struct talitos_crypto_alg,
3088                                            algt.alg.hash);
3089         else
3090                 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3091                                            algt.alg.crypto);
3092
3093         return talitos_init_common(ctx, talitos_alg);
3094 }
3095
3096 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3097 {
3098         struct aead_alg *alg = crypto_aead_alg(tfm);
3099         struct talitos_crypto_alg *talitos_alg;
3100         struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3101
3102         talitos_alg = container_of(alg, struct talitos_crypto_alg,
3103                                    algt.alg.aead);
3104
3105         return talitos_init_common(ctx, talitos_alg);
3106 }
3107
3108 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3109 {
3110         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3111
3112         talitos_cra_init(tfm);
3113
3114         ctx->keylen = 0;
3115         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3116                                  sizeof(struct talitos_ahash_req_ctx));
3117
3118         return 0;
3119 }
3120
3121 static void talitos_cra_exit(struct crypto_tfm *tfm)
3122 {
3123         struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3124         struct device *dev = ctx->dev;
3125
3126         if (ctx->keylen)
3127                 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3128 }
3129
3130 /*
3131  * given the alg's descriptor header template, determine whether descriptor
3132  * type and primary/secondary execution units required match the hw
3133  * capabilities description provided in the device tree node.
3134  */
3135 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3136 {
3137         struct talitos_private *priv = dev_get_drvdata(dev);
3138         int ret;
3139
3140         ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3141               (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3142
3143         if (SECONDARY_EU(desc_hdr_template))
3144                 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3145                               & priv->exec_units);
3146
3147         return ret;
3148 }
3149
3150 static int talitos_remove(struct platform_device *ofdev)
3151 {
3152         struct device *dev = &ofdev->dev;
3153         struct talitos_private *priv = dev_get_drvdata(dev);
3154         struct talitos_crypto_alg *t_alg, *n;
3155         int i;
3156
3157         list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3158                 switch (t_alg->algt.type) {
3159                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3160                         break;
3161                 case CRYPTO_ALG_TYPE_AEAD:
3162                         crypto_unregister_aead(&t_alg->algt.alg.aead);
3163                 case CRYPTO_ALG_TYPE_AHASH:
3164                         crypto_unregister_ahash(&t_alg->algt.alg.hash);
3165                         break;
3166                 }
3167                 list_del(&t_alg->entry);
3168         }
3169
3170         if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3171                 talitos_unregister_rng(dev);
3172
3173         for (i = 0; i < 2; i++)
3174                 if (priv->irq[i]) {
3175                         free_irq(priv->irq[i], dev);
3176                         irq_dispose_mapping(priv->irq[i]);
3177                 }
3178
3179         tasklet_kill(&priv->done_task[0]);
3180         if (priv->irq[1])
3181                 tasklet_kill(&priv->done_task[1]);
3182
3183         return 0;
3184 }
3185
3186 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3187                                                     struct talitos_alg_template
3188                                                            *template)
3189 {
3190         struct talitos_private *priv = dev_get_drvdata(dev);
3191         struct talitos_crypto_alg *t_alg;
3192         struct crypto_alg *alg;
3193
3194         t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3195                              GFP_KERNEL);
3196         if (!t_alg)
3197                 return ERR_PTR(-ENOMEM);
3198
3199         t_alg->algt = *template;
3200
3201         switch (t_alg->algt.type) {
3202         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3203                 alg = &t_alg->algt.alg.crypto;
3204                 alg->cra_init = talitos_cra_init;
3205                 alg->cra_exit = talitos_cra_exit;
3206                 alg->cra_type = &crypto_ablkcipher_type;
3207                 alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?:
3208                                              ablkcipher_setkey;
3209                 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3210                 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3211                 break;
3212         case CRYPTO_ALG_TYPE_AEAD:
3213                 alg = &t_alg->algt.alg.aead.base;
3214                 alg->cra_exit = talitos_cra_exit;
3215                 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3216                 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?:
3217                                               aead_setkey;
3218                 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3219                 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3220                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3221                     !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3222                         devm_kfree(dev, t_alg);
3223                         return ERR_PTR(-ENOTSUPP);
3224                 }
3225                 break;
3226         case CRYPTO_ALG_TYPE_AHASH:
3227                 alg = &t_alg->algt.alg.hash.halg.base;
3228                 alg->cra_init = talitos_cra_init_ahash;
3229                 alg->cra_exit = talitos_cra_exit;
3230                 t_alg->algt.alg.hash.init = ahash_init;
3231                 t_alg->algt.alg.hash.update = ahash_update;
3232                 t_alg->algt.alg.hash.final = ahash_final;
3233                 t_alg->algt.alg.hash.finup = ahash_finup;
3234                 t_alg->algt.alg.hash.digest = ahash_digest;
3235                 if (!strncmp(alg->cra_name, "hmac", 4))
3236                         t_alg->algt.alg.hash.setkey = ahash_setkey;
3237                 t_alg->algt.alg.hash.import = ahash_import;
3238                 t_alg->algt.alg.hash.export = ahash_export;
3239
3240                 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3241                     !strncmp(alg->cra_name, "hmac", 4)) {
3242                         devm_kfree(dev, t_alg);
3243                         return ERR_PTR(-ENOTSUPP);
3244                 }
3245                 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3246                     (!strcmp(alg->cra_name, "sha224") ||
3247                      !strcmp(alg->cra_name, "hmac(sha224)"))) {
3248                         t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3249                         t_alg->algt.desc_hdr_template =
3250                                         DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3251                                         DESC_HDR_SEL0_MDEUA |
3252                                         DESC_HDR_MODE0_MDEU_SHA256;
3253                 }
3254                 break;
3255         default:
3256                 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3257                 devm_kfree(dev, t_alg);
3258                 return ERR_PTR(-EINVAL);
3259         }
3260
3261         alg->cra_module = THIS_MODULE;
3262         if (t_alg->algt.priority)
3263                 alg->cra_priority = t_alg->algt.priority;
3264         else
3265                 alg->cra_priority = TALITOS_CRA_PRIORITY;
3266         if (has_ftr_sec1(priv))
3267                 alg->cra_alignmask = 3;
3268         else
3269                 alg->cra_alignmask = 0;
3270         alg->cra_ctxsize = sizeof(struct talitos_ctx);
3271         alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3272
3273         t_alg->dev = dev;
3274
3275         return t_alg;
3276 }
3277
3278 static int talitos_probe_irq(struct platform_device *ofdev)
3279 {
3280         struct device *dev = &ofdev->dev;
3281         struct device_node *np = ofdev->dev.of_node;
3282         struct talitos_private *priv = dev_get_drvdata(dev);
3283         int err;
3284         bool is_sec1 = has_ftr_sec1(priv);
3285
3286         priv->irq[0] = irq_of_parse_and_map(np, 0);
3287         if (!priv->irq[0]) {
3288                 dev_err(dev, "failed to map irq\n");
3289                 return -EINVAL;
3290         }
3291         if (is_sec1) {
3292                 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3293                                   dev_driver_string(dev), dev);
3294                 goto primary_out;
3295         }
3296
3297         priv->irq[1] = irq_of_parse_and_map(np, 1);
3298
3299         /* get the primary irq line */
3300         if (!priv->irq[1]) {
3301                 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3302                                   dev_driver_string(dev), dev);
3303                 goto primary_out;
3304         }
3305
3306         err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3307                           dev_driver_string(dev), dev);
3308         if (err)
3309                 goto primary_out;
3310
3311         /* get the secondary irq line */
3312         err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3313                           dev_driver_string(dev), dev);
3314         if (err) {
3315                 dev_err(dev, "failed to request secondary irq\n");
3316                 irq_dispose_mapping(priv->irq[1]);
3317                 priv->irq[1] = 0;
3318         }
3319
3320         return err;
3321
3322 primary_out:
3323         if (err) {
3324                 dev_err(dev, "failed to request primary irq\n");
3325                 irq_dispose_mapping(priv->irq[0]);
3326                 priv->irq[0] = 0;
3327         }
3328
3329         return err;
3330 }
3331
3332 static int talitos_probe(struct platform_device *ofdev)
3333 {
3334         struct device *dev = &ofdev->dev;
3335         struct device_node *np = ofdev->dev.of_node;
3336         struct talitos_private *priv;
3337         int i, err;
3338         int stride;
3339         struct resource *res;
3340
3341         priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3342         if (!priv)
3343                 return -ENOMEM;
3344
3345         INIT_LIST_HEAD(&priv->alg_list);
3346
3347         dev_set_drvdata(dev, priv);
3348
3349         priv->ofdev = ofdev;
3350
3351         spin_lock_init(&priv->reg_lock);
3352
3353         res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3354         if (!res)
3355                 return -ENXIO;
3356         priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3357         if (!priv->reg) {
3358                 dev_err(dev, "failed to of_iomap\n");
3359                 err = -ENOMEM;
3360                 goto err_out;
3361         }
3362
3363         /* get SEC version capabilities from device tree */
3364         of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3365         of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3366         of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3367         of_property_read_u32(np, "fsl,descriptor-types-mask",
3368                              &priv->desc_types);
3369
3370         if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3371             !priv->exec_units || !priv->desc_types) {
3372                 dev_err(dev, "invalid property data in device tree node\n");
3373                 err = -EINVAL;
3374                 goto err_out;
3375         }
3376
3377         if (of_device_is_compatible(np, "fsl,sec3.0"))
3378                 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3379
3380         if (of_device_is_compatible(np, "fsl,sec2.1"))
3381                 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3382                                   TALITOS_FTR_SHA224_HWINIT |
3383                                   TALITOS_FTR_HMAC_OK;
3384
3385         if (of_device_is_compatible(np, "fsl,sec1.0"))
3386                 priv->features |= TALITOS_FTR_SEC1;
3387
3388         if (of_device_is_compatible(np, "fsl,sec1.2")) {
3389                 priv->reg_deu = priv->reg + TALITOS12_DEU;
3390                 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3391                 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3392                 stride = TALITOS1_CH_STRIDE;
3393         } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3394                 priv->reg_deu = priv->reg + TALITOS10_DEU;
3395                 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3396                 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3397                 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3398                 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3399                 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3400                 stride = TALITOS1_CH_STRIDE;
3401         } else {
3402                 priv->reg_deu = priv->reg + TALITOS2_DEU;
3403                 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3404                 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3405                 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3406                 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3407                 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3408                 priv->reg_keu = priv->reg + TALITOS2_KEU;
3409                 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3410                 stride = TALITOS2_CH_STRIDE;
3411         }
3412
3413         err = talitos_probe_irq(ofdev);
3414         if (err)
3415                 goto err_out;
3416
3417         if (of_device_is_compatible(np, "fsl,sec1.0")) {
3418                 if (priv->num_channels == 1)
3419                         tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3420                                      (unsigned long)dev);
3421                 else
3422                         tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3423                                      (unsigned long)dev);
3424         } else {
3425                 if (priv->irq[1]) {
3426                         tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3427                                      (unsigned long)dev);
3428                         tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3429                                      (unsigned long)dev);
3430                 } else if (priv->num_channels == 1) {
3431                         tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3432                                      (unsigned long)dev);
3433                 } else {
3434                         tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3435                                      (unsigned long)dev);
3436                 }
3437         }
3438
3439         priv->chan = devm_kcalloc(dev,
3440                                   priv->num_channels,
3441                                   sizeof(struct talitos_channel),
3442                                   GFP_KERNEL);
3443         if (!priv->chan) {
3444                 dev_err(dev, "failed to allocate channel management space\n");
3445                 err = -ENOMEM;
3446                 goto err_out;
3447         }
3448
3449         priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3450
3451         for (i = 0; i < priv->num_channels; i++) {
3452                 priv->chan[i].reg = priv->reg + stride * (i + 1);
3453                 if (!priv->irq[1] || !(i & 1))
3454                         priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3455
3456                 spin_lock_init(&priv->chan[i].head_lock);
3457                 spin_lock_init(&priv->chan[i].tail_lock);
3458
3459                 priv->chan[i].fifo = devm_kcalloc(dev,
3460                                                 priv->fifo_len,
3461                                                 sizeof(struct talitos_request),
3462                                                 GFP_KERNEL);
3463                 if (!priv->chan[i].fifo) {
3464                         dev_err(dev, "failed to allocate request fifo %d\n", i);
3465                         err = -ENOMEM;
3466                         goto err_out;
3467                 }
3468
3469                 atomic_set(&priv->chan[i].submit_count,
3470                            -(priv->chfifo_len - 1));
3471         }
3472
3473         dma_set_mask(dev, DMA_BIT_MASK(36));
3474
3475         /* reset and initialize the h/w */
3476         err = init_device(dev);
3477         if (err) {
3478                 dev_err(dev, "failed to initialize device\n");
3479                 goto err_out;
3480         }
3481
3482         /* register the RNG, if available */
3483         if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3484                 err = talitos_register_rng(dev);
3485                 if (err) {
3486                         dev_err(dev, "failed to register hwrng: %d\n", err);
3487                         goto err_out;
3488                 } else
3489                         dev_info(dev, "hwrng\n");
3490         }
3491
3492         /* register crypto algorithms the device supports */
3493         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3494                 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3495                         struct talitos_crypto_alg *t_alg;
3496                         struct crypto_alg *alg = NULL;
3497
3498                         t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3499                         if (IS_ERR(t_alg)) {
3500                                 err = PTR_ERR(t_alg);
3501                                 if (err == -ENOTSUPP)
3502                                         continue;
3503                                 goto err_out;
3504                         }
3505
3506                         switch (t_alg->algt.type) {
3507                         case CRYPTO_ALG_TYPE_ABLKCIPHER:
3508                                 err = crypto_register_alg(
3509                                                 &t_alg->algt.alg.crypto);
3510                                 alg = &t_alg->algt.alg.crypto;
3511                                 break;
3512
3513                         case CRYPTO_ALG_TYPE_AEAD:
3514                                 err = crypto_register_aead(
3515                                         &t_alg->algt.alg.aead);
3516                                 alg = &t_alg->algt.alg.aead.base;
3517                                 break;
3518
3519                         case CRYPTO_ALG_TYPE_AHASH:
3520                                 err = crypto_register_ahash(
3521                                                 &t_alg->algt.alg.hash);
3522                                 alg = &t_alg->algt.alg.hash.halg.base;
3523                                 break;
3524                         }
3525                         if (err) {
3526                                 dev_err(dev, "%s alg registration failed\n",
3527                                         alg->cra_driver_name);
3528                                 devm_kfree(dev, t_alg);
3529                         } else
3530                                 list_add_tail(&t_alg->entry, &priv->alg_list);
3531                 }
3532         }
3533         if (!list_empty(&priv->alg_list))
3534                 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3535                          (char *)of_get_property(np, "compatible", NULL));
3536
3537         return 0;
3538
3539 err_out:
3540         talitos_remove(ofdev);
3541
3542         return err;
3543 }
3544
3545 static const struct of_device_id talitos_match[] = {
3546 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3547         {
3548                 .compatible = "fsl,sec1.0",
3549         },
3550 #endif
3551 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3552         {
3553                 .compatible = "fsl,sec2.0",
3554         },
3555 #endif
3556         {},
3557 };
3558 MODULE_DEVICE_TABLE(of, talitos_match);
3559
3560 static struct platform_driver talitos_driver = {
3561         .driver = {
3562                 .name = "talitos",
3563                 .of_match_table = talitos_match,
3564         },
3565         .probe = talitos_probe,
3566         .remove = talitos_remove,
3567 };
3568
3569 module_platform_driver(talitos_driver);
3570
3571 MODULE_LICENSE("GPL");
3572 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3573 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");