crypto: talitos - implement cra_priority
[linux-2.6-block.git] / drivers / crypto / talitos.c
CommitLineData
9c4a7965
KP
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
5228f0f7 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
9c4a7965
KP
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
5af50730
RH
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
9c4a7965
KP
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
5a0e3ad6 42#include <linux/slab.h>
9c4a7965
KP
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
3952f17e 46#include <crypto/des.h>
9c4a7965 47#include <crypto/sha.h>
497f2e6b 48#include <crypto/md5.h>
e98014ab 49#include <crypto/internal/aead.h>
9c4a7965 50#include <crypto/authenc.h>
4de9d0b5 51#include <crypto/skcipher.h>
acbf7c62
LN
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
4de9d0b5 54#include <crypto/scatterwalk.h>
9c4a7965
KP
55
56#include "talitos.h"
57
922f9dc8
LC
58static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
81eb024c 60{
edc6bd69 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
922f9dc8
LC
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
81eb024c
KP
64}
65
340ff60a
HG
66static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
68{
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
72}
73
42e8b0d7 74static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
922f9dc8 75 bool is_sec1)
538caf83 76{
922f9dc8
LC
77 if (is_sec1) {
78 ptr->res = 0;
79 ptr->len1 = cpu_to_be16(len);
80 } else {
81 ptr->len = cpu_to_be16(len);
82 }
538caf83
LC
83}
84
922f9dc8
LC
85static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 bool is_sec1)
538caf83 87{
922f9dc8
LC
88 if (is_sec1)
89 return be16_to_cpu(ptr->len1);
90 else
91 return be16_to_cpu(ptr->len);
538caf83
LC
92}
93
b096b544
LC
94static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
95 bool is_sec1)
185eb79f 96{
922f9dc8 97 if (!is_sec1)
b096b544
LC
98 ptr->j_extent = val;
99}
100
101static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
102{
103 if (!is_sec1)
104 ptr->j_extent |= val;
185eb79f
LC
105}
106
9c4a7965
KP
107/*
108 * map virtual single (contiguous) pointer to h/w descriptor pointer
109 */
110static void map_single_talitos_ptr(struct device *dev,
edc6bd69 111 struct talitos_ptr *ptr,
42e8b0d7 112 unsigned int len, void *data,
9c4a7965
KP
113 enum dma_data_direction dir)
114{
81eb024c 115 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
922f9dc8
LC
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
81eb024c 118
922f9dc8
LC
119 to_talitos_ptr_len(ptr, len, is_sec1);
120 to_talitos_ptr(ptr, dma_addr, is_sec1);
b096b544 121 to_talitos_ptr_ext_set(ptr, 0, is_sec1);
9c4a7965
KP
122}
123
124/*
125 * unmap bus single (contiguous) h/w descriptor pointer
126 */
127static void unmap_single_talitos_ptr(struct device *dev,
edc6bd69 128 struct talitos_ptr *ptr,
9c4a7965
KP
129 enum dma_data_direction dir)
130{
922f9dc8
LC
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
133
edc6bd69 134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
922f9dc8 135 from_talitos_ptr_len(ptr, is_sec1), dir);
9c4a7965
KP
136}
137
138static int reset_channel(struct device *dev, int ch)
139{
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 142 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 143
dd3c0987
LC
144 if (is_sec1) {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
9c4a7965 147
dd3c0987
LC
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
150 cpu_relax();
151 } else {
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
154
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
157 cpu_relax();
158 }
9c4a7965
KP
159
160 if (timeout == 0) {
161 dev_err(dev, "failed to reset channel %d\n", ch);
162 return -EIO;
163 }
164
81eb024c 165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
ad42d5fc 166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
81eb024c 167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
9c4a7965 168
fe5720e2
KP
169 /* and ICCR writeback, if available */
170 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
ad42d5fc 171 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
fe5720e2
KP
172 TALITOS_CCCR_LO_IWSE);
173
9c4a7965
KP
174 return 0;
175}
176
177static int reset_device(struct device *dev)
178{
179 struct talitos_private *priv = dev_get_drvdata(dev);
180 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987
LC
181 bool is_sec1 = has_ftr_sec1(priv);
182 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
9c4a7965 183
c3e337f8 184 setbits32(priv->reg + TALITOS_MCR, mcr);
9c4a7965 185
dd3c0987 186 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
9c4a7965
KP
187 && --timeout)
188 cpu_relax();
189
2cdba3cf 190 if (priv->irq[1]) {
c3e337f8
KP
191 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
192 setbits32(priv->reg + TALITOS_MCR, mcr);
193 }
194
9c4a7965
KP
195 if (timeout == 0) {
196 dev_err(dev, "failed to reset device\n");
197 return -EIO;
198 }
199
200 return 0;
201}
202
203/*
204 * Reset and initialize the device
205 */
206static int init_device(struct device *dev)
207{
208 struct talitos_private *priv = dev_get_drvdata(dev);
209 int ch, err;
dd3c0987 210 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965
KP
211
212 /*
213 * Master reset
214 * errata documentation: warning: certain SEC interrupts
215 * are not fully cleared by writing the MCR:SWR bit,
216 * set bit twice to completely reset
217 */
218 err = reset_device(dev);
219 if (err)
220 return err;
221
222 err = reset_device(dev);
223 if (err)
224 return err;
225
226 /* reset channels */
227 for (ch = 0; ch < priv->num_channels; ch++) {
228 err = reset_channel(dev, ch);
229 if (err)
230 return err;
231 }
232
233 /* enable channel done and error interrupts */
dd3c0987
LC
234 if (is_sec1) {
235 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
236 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
237 /* disable parity error check in DEU (erroneous? test vect.) */
238 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
239 } else {
240 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
241 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
242 }
9c4a7965 243
fe5720e2
KP
244 /* disable integrity check error interrupts (use writeback instead) */
245 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
5fa7fa14 246 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
fe5720e2
KP
247 TALITOS_MDEUICR_LO_ICE);
248
9c4a7965
KP
249 return 0;
250}
251
252/**
253 * talitos_submit - submits a descriptor to the device for processing
254 * @dev: the SEC device to be used
5228f0f7 255 * @ch: the SEC device channel to be used
9c4a7965
KP
256 * @desc: the descriptor to be processed by the device
257 * @callback: whom to call when processing is complete
258 * @context: a handle for use by caller (optional)
259 *
260 * desc must contain valid dma-mapped (bus physical) address pointers.
261 * callback must check err and feedback in descriptor header
262 * for device processing status.
263 */
865d5061
HG
264int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
265 void (*callback)(struct device *dev,
266 struct talitos_desc *desc,
267 void *context, int error),
268 void *context)
9c4a7965
KP
269{
270 struct talitos_private *priv = dev_get_drvdata(dev);
271 struct talitos_request *request;
5228f0f7 272 unsigned long flags;
9c4a7965 273 int head;
7d607c6a 274 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 275
4b992628 276 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
9c4a7965 277
4b992628 278 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
ec6644d6 279 /* h/w fifo is full */
4b992628 280 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
281 return -EAGAIN;
282 }
283
4b992628
KP
284 head = priv->chan[ch].head;
285 request = &priv->chan[ch].fifo[head];
ec6644d6 286
9c4a7965 287 /* map descriptor and save caller data */
7d607c6a
LC
288 if (is_sec1) {
289 desc->hdr1 = desc->hdr;
290 desc->next_desc = 0;
291 request->dma_desc = dma_map_single(dev, &desc->hdr1,
292 TALITOS_DESC_SIZE,
293 DMA_BIDIRECTIONAL);
294 } else {
295 request->dma_desc = dma_map_single(dev, desc,
296 TALITOS_DESC_SIZE,
297 DMA_BIDIRECTIONAL);
298 }
9c4a7965
KP
299 request->callback = callback;
300 request->context = context;
301
302 /* increment fifo head */
4b992628 303 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
9c4a7965
KP
304
305 smp_wmb();
306 request->desc = desc;
307
308 /* GO! */
309 wmb();
ad42d5fc
KP
310 out_be32(priv->chan[ch].reg + TALITOS_FF,
311 upper_32_bits(request->dma_desc));
312 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
a752447a 313 lower_32_bits(request->dma_desc));
9c4a7965 314
4b992628 315 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
316
317 return -EINPROGRESS;
318}
865d5061 319EXPORT_SYMBOL(talitos_submit);
9c4a7965
KP
320
321/*
322 * process what was done, notify callback of error if not
323 */
324static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
325{
326 struct talitos_private *priv = dev_get_drvdata(dev);
327 struct talitos_request *request, saved_req;
328 unsigned long flags;
329 int tail, status;
7d607c6a 330 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 331
4b992628 332 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
9c4a7965 333
4b992628
KP
334 tail = priv->chan[ch].tail;
335 while (priv->chan[ch].fifo[tail].desc) {
7d607c6a
LC
336 __be32 hdr;
337
4b992628 338 request = &priv->chan[ch].fifo[tail];
9c4a7965
KP
339
340 /* descriptors with their done bits set don't get the error */
341 rmb();
7d607c6a
LC
342 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
343
344 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
9c4a7965 345 status = 0;
ca38a814 346 else
9c4a7965
KP
347 if (!error)
348 break;
349 else
350 status = error;
351
352 dma_unmap_single(dev, request->dma_desc,
7d607c6a 353 TALITOS_DESC_SIZE,
e938e465 354 DMA_BIDIRECTIONAL);
9c4a7965
KP
355
356 /* copy entries so we can call callback outside lock */
357 saved_req.desc = request->desc;
358 saved_req.callback = request->callback;
359 saved_req.context = request->context;
360
361 /* release request entry in fifo */
362 smp_wmb();
363 request->desc = NULL;
364
365 /* increment fifo tail */
4b992628 366 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
9c4a7965 367
4b992628 368 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
ec6644d6 369
4b992628 370 atomic_dec(&priv->chan[ch].submit_count);
ec6644d6 371
9c4a7965
KP
372 saved_req.callback(dev, saved_req.desc, saved_req.context,
373 status);
374 /* channel may resume processing in single desc error case */
375 if (error && !reset_ch && status == error)
376 return;
4b992628
KP
377 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
378 tail = priv->chan[ch].tail;
9c4a7965
KP
379 }
380
4b992628 381 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
9c4a7965
KP
382}
383
384/*
385 * process completed requests for channels that have done status
386 */
dd3c0987
LC
387#define DEF_TALITOS1_DONE(name, ch_done_mask) \
388static void talitos1_done_##name(unsigned long data) \
389{ \
390 struct device *dev = (struct device *)data; \
391 struct talitos_private *priv = dev_get_drvdata(dev); \
392 unsigned long flags; \
393 \
394 if (ch_done_mask & 0x10000000) \
395 flush_channel(dev, 0, 0, 0); \
396 if (priv->num_channels == 1) \
397 goto out; \
398 if (ch_done_mask & 0x40000000) \
399 flush_channel(dev, 1, 0, 0); \
400 if (ch_done_mask & 0x00010000) \
401 flush_channel(dev, 2, 0, 0); \
402 if (ch_done_mask & 0x00040000) \
403 flush_channel(dev, 3, 0, 0); \
404 \
405out: \
406 /* At this point, all completed channels have been processed */ \
407 /* Unmask done interrupts for channels completed later on. */ \
408 spin_lock_irqsave(&priv->reg_lock, flags); \
409 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
410 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
411 spin_unlock_irqrestore(&priv->reg_lock, flags); \
412}
413
414DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
415
416#define DEF_TALITOS2_DONE(name, ch_done_mask) \
417static void talitos2_done_##name(unsigned long data) \
c3e337f8
KP
418{ \
419 struct device *dev = (struct device *)data; \
420 struct talitos_private *priv = dev_get_drvdata(dev); \
511d63cb 421 unsigned long flags; \
c3e337f8
KP
422 \
423 if (ch_done_mask & 1) \
424 flush_channel(dev, 0, 0, 0); \
425 if (priv->num_channels == 1) \
426 goto out; \
427 if (ch_done_mask & (1 << 2)) \
428 flush_channel(dev, 1, 0, 0); \
429 if (ch_done_mask & (1 << 4)) \
430 flush_channel(dev, 2, 0, 0); \
431 if (ch_done_mask & (1 << 6)) \
432 flush_channel(dev, 3, 0, 0); \
433 \
434out: \
435 /* At this point, all completed channels have been processed */ \
436 /* Unmask done interrupts for channels completed later on. */ \
511d63cb 437 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8 438 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
dd3c0987 439 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
511d63cb 440 spin_unlock_irqrestore(&priv->reg_lock, flags); \
9c4a7965 441}
dd3c0987
LC
442
443DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
444DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
445DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
9c4a7965
KP
446
447/*
448 * locate current (offending) descriptor
449 */
3e721aeb 450static u32 current_desc_hdr(struct device *dev, int ch)
9c4a7965
KP
451{
452 struct talitos_private *priv = dev_get_drvdata(dev);
b62ffd8c 453 int tail, iter;
9c4a7965
KP
454 dma_addr_t cur_desc;
455
b62ffd8c
HG
456 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
457 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
9c4a7965 458
b62ffd8c
HG
459 if (!cur_desc) {
460 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
461 return 0;
462 }
463
464 tail = priv->chan[ch].tail;
465
466 iter = tail;
467 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
468 iter = (iter + 1) & (priv->fifo_len - 1);
469 if (iter == tail) {
9c4a7965 470 dev_err(dev, "couldn't locate current descriptor\n");
3e721aeb 471 return 0;
9c4a7965
KP
472 }
473 }
474
b62ffd8c 475 return priv->chan[ch].fifo[iter].desc->hdr;
9c4a7965
KP
476}
477
478/*
479 * user diagnostics; report root cause of error based on execution unit status
480 */
3e721aeb 481static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
9c4a7965
KP
482{
483 struct talitos_private *priv = dev_get_drvdata(dev);
484 int i;
485
3e721aeb 486 if (!desc_hdr)
ad42d5fc 487 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
3e721aeb
KP
488
489 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
9c4a7965
KP
490 case DESC_HDR_SEL0_AFEU:
491 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
5fa7fa14
LC
492 in_be32(priv->reg_afeu + TALITOS_EUISR),
493 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
9c4a7965
KP
494 break;
495 case DESC_HDR_SEL0_DEU:
496 dev_err(dev, "DEUISR 0x%08x_%08x\n",
5fa7fa14
LC
497 in_be32(priv->reg_deu + TALITOS_EUISR),
498 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
9c4a7965
KP
499 break;
500 case DESC_HDR_SEL0_MDEUA:
501 case DESC_HDR_SEL0_MDEUB:
502 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
503 in_be32(priv->reg_mdeu + TALITOS_EUISR),
504 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
505 break;
506 case DESC_HDR_SEL0_RNG:
507 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
5fa7fa14
LC
508 in_be32(priv->reg_rngu + TALITOS_ISR),
509 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
9c4a7965
KP
510 break;
511 case DESC_HDR_SEL0_PKEU:
512 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
5fa7fa14
LC
513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
515 break;
516 case DESC_HDR_SEL0_AESU:
517 dev_err(dev, "AESUISR 0x%08x_%08x\n",
5fa7fa14
LC
518 in_be32(priv->reg_aesu + TALITOS_EUISR),
519 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
9c4a7965
KP
520 break;
521 case DESC_HDR_SEL0_CRCU:
522 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
523 in_be32(priv->reg_crcu + TALITOS_EUISR),
524 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
525 break;
526 case DESC_HDR_SEL0_KEU:
527 dev_err(dev, "KEUISR 0x%08x_%08x\n",
5fa7fa14
LC
528 in_be32(priv->reg_pkeu + TALITOS_EUISR),
529 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
9c4a7965
KP
530 break;
531 }
532
3e721aeb 533 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
9c4a7965
KP
534 case DESC_HDR_SEL1_MDEUA:
535 case DESC_HDR_SEL1_MDEUB:
536 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
5fa7fa14
LC
537 in_be32(priv->reg_mdeu + TALITOS_EUISR),
538 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
9c4a7965
KP
539 break;
540 case DESC_HDR_SEL1_CRCU:
541 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
5fa7fa14
LC
542 in_be32(priv->reg_crcu + TALITOS_EUISR),
543 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
9c4a7965
KP
544 break;
545 }
546
547 for (i = 0; i < 8; i++)
548 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
ad42d5fc
KP
549 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
550 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
9c4a7965
KP
551}
552
553/*
554 * recover from error interrupts
555 */
5e718a09 556static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
9c4a7965 557{
9c4a7965
KP
558 struct talitos_private *priv = dev_get_drvdata(dev);
559 unsigned int timeout = TALITOS_TIMEOUT;
dd3c0987 560 int ch, error, reset_dev = 0;
42e8b0d7 561 u32 v_lo;
dd3c0987
LC
562 bool is_sec1 = has_ftr_sec1(priv);
563 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
9c4a7965
KP
564
565 for (ch = 0; ch < priv->num_channels; ch++) {
566 /* skip channels without errors */
dd3c0987
LC
567 if (is_sec1) {
568 /* bits 29, 31, 17, 19 */
569 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
570 continue;
571 } else {
572 if (!(isr & (1 << (ch * 2 + 1))))
573 continue;
574 }
9c4a7965
KP
575
576 error = -EINVAL;
577
ad42d5fc 578 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
9c4a7965
KP
579
580 if (v_lo & TALITOS_CCPSR_LO_DOF) {
581 dev_err(dev, "double fetch fifo overflow error\n");
582 error = -EAGAIN;
583 reset_ch = 1;
584 }
585 if (v_lo & TALITOS_CCPSR_LO_SOF) {
586 /* h/w dropped descriptor */
587 dev_err(dev, "single fetch fifo overflow error\n");
588 error = -EAGAIN;
589 }
590 if (v_lo & TALITOS_CCPSR_LO_MDTE)
591 dev_err(dev, "master data transfer error\n");
592 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
dd3c0987
LC
593 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
594 : "s/g data length zero error\n");
9c4a7965 595 if (v_lo & TALITOS_CCPSR_LO_FPZ)
dd3c0987
LC
596 dev_err(dev, is_sec1 ? "parity error\n"
597 : "fetch pointer zero error\n");
9c4a7965
KP
598 if (v_lo & TALITOS_CCPSR_LO_IDH)
599 dev_err(dev, "illegal descriptor header error\n");
600 if (v_lo & TALITOS_CCPSR_LO_IEU)
dd3c0987
LC
601 dev_err(dev, is_sec1 ? "static assignment error\n"
602 : "invalid exec unit error\n");
9c4a7965 603 if (v_lo & TALITOS_CCPSR_LO_EU)
3e721aeb 604 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
dd3c0987
LC
605 if (!is_sec1) {
606 if (v_lo & TALITOS_CCPSR_LO_GB)
607 dev_err(dev, "gather boundary error\n");
608 if (v_lo & TALITOS_CCPSR_LO_GRL)
609 dev_err(dev, "gather return/length error\n");
610 if (v_lo & TALITOS_CCPSR_LO_SB)
611 dev_err(dev, "scatter boundary error\n");
612 if (v_lo & TALITOS_CCPSR_LO_SRL)
613 dev_err(dev, "scatter return/length error\n");
614 }
9c4a7965
KP
615
616 flush_channel(dev, ch, error, reset_ch);
617
618 if (reset_ch) {
619 reset_channel(dev, ch);
620 } else {
ad42d5fc 621 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
dd3c0987 622 TALITOS2_CCCR_CONT);
ad42d5fc
KP
623 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
624 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
dd3c0987 625 TALITOS2_CCCR_CONT) && --timeout)
9c4a7965
KP
626 cpu_relax();
627 if (timeout == 0) {
628 dev_err(dev, "failed to restart channel %d\n",
629 ch);
630 reset_dev = 1;
631 }
632 }
633 }
dd3c0987
LC
634 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
635 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
636 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
637 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
638 isr, isr_lo);
639 else
640 dev_err(dev, "done overflow, internal time out, or "
641 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
9c4a7965
KP
642
643 /* purge request queues */
644 for (ch = 0; ch < priv->num_channels; ch++)
645 flush_channel(dev, ch, -EIO, 1);
646
647 /* reset and reinitialize the device */
648 init_device(dev);
649 }
650}
651
dd3c0987
LC
652#define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
653static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
654{ \
655 struct device *dev = data; \
656 struct talitos_private *priv = dev_get_drvdata(dev); \
657 u32 isr, isr_lo; \
658 unsigned long flags; \
659 \
660 spin_lock_irqsave(&priv->reg_lock, flags); \
661 isr = in_be32(priv->reg + TALITOS_ISR); \
662 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
663 /* Acknowledge interrupt */ \
664 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
665 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
666 \
667 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
668 spin_unlock_irqrestore(&priv->reg_lock, flags); \
669 talitos_error(dev, isr & ch_err_mask, isr_lo); \
670 } \
671 else { \
672 if (likely(isr & ch_done_mask)) { \
673 /* mask further done interrupts. */ \
674 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
675 /* done_task will unmask done interrupts at exit */ \
676 tasklet_schedule(&priv->done_task[tlet]); \
677 } \
678 spin_unlock_irqrestore(&priv->reg_lock, flags); \
679 } \
680 \
681 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
682 IRQ_NONE; \
683}
684
685DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
686
687#define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
688static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
c3e337f8
KP
689{ \
690 struct device *dev = data; \
691 struct talitos_private *priv = dev_get_drvdata(dev); \
692 u32 isr, isr_lo; \
511d63cb 693 unsigned long flags; \
c3e337f8 694 \
511d63cb 695 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8
KP
696 isr = in_be32(priv->reg + TALITOS_ISR); \
697 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
698 /* Acknowledge interrupt */ \
699 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
700 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
701 \
511d63cb
HG
702 if (unlikely(isr & ch_err_mask || isr_lo)) { \
703 spin_unlock_irqrestore(&priv->reg_lock, flags); \
704 talitos_error(dev, isr & ch_err_mask, isr_lo); \
705 } \
706 else { \
c3e337f8
KP
707 if (likely(isr & ch_done_mask)) { \
708 /* mask further done interrupts. */ \
709 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
710 /* done_task will unmask done interrupts at exit */ \
711 tasklet_schedule(&priv->done_task[tlet]); \
712 } \
511d63cb
HG
713 spin_unlock_irqrestore(&priv->reg_lock, flags); \
714 } \
c3e337f8
KP
715 \
716 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
717 IRQ_NONE; \
9c4a7965 718}
dd3c0987
LC
719
720DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
721DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
722 0)
723DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
724 1)
9c4a7965
KP
725
726/*
727 * hwrng
728 */
729static int talitos_rng_data_present(struct hwrng *rng, int wait)
730{
731 struct device *dev = (struct device *)rng->priv;
732 struct talitos_private *priv = dev_get_drvdata(dev);
733 u32 ofl;
734 int i;
735
736 for (i = 0; i < 20; i++) {
5fa7fa14 737 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
9c4a7965
KP
738 TALITOS_RNGUSR_LO_OFL;
739 if (ofl || !wait)
740 break;
741 udelay(10);
742 }
743
744 return !!ofl;
745}
746
747static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
748{
749 struct device *dev = (struct device *)rng->priv;
750 struct talitos_private *priv = dev_get_drvdata(dev);
751
752 /* rng fifo requires 64-bit accesses */
5fa7fa14
LC
753 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
754 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
9c4a7965
KP
755
756 return sizeof(u32);
757}
758
759static int talitos_rng_init(struct hwrng *rng)
760{
761 struct device *dev = (struct device *)rng->priv;
762 struct talitos_private *priv = dev_get_drvdata(dev);
763 unsigned int timeout = TALITOS_TIMEOUT;
764
5fa7fa14
LC
765 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
766 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
767 & TALITOS_RNGUSR_LO_RD)
9c4a7965
KP
768 && --timeout)
769 cpu_relax();
770 if (timeout == 0) {
771 dev_err(dev, "failed to reset rng hw\n");
772 return -ENODEV;
773 }
774
775 /* start generating */
5fa7fa14 776 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
9c4a7965
KP
777
778 return 0;
779}
780
781static int talitos_register_rng(struct device *dev)
782{
783 struct talitos_private *priv = dev_get_drvdata(dev);
35a3bb3d 784 int err;
9c4a7965
KP
785
786 priv->rng.name = dev_driver_string(dev),
787 priv->rng.init = talitos_rng_init,
788 priv->rng.data_present = talitos_rng_data_present,
789 priv->rng.data_read = talitos_rng_data_read,
790 priv->rng.priv = (unsigned long)dev;
791
35a3bb3d
AS
792 err = hwrng_register(&priv->rng);
793 if (!err)
794 priv->rng_registered = true;
795
796 return err;
9c4a7965
KP
797}
798
799static void talitos_unregister_rng(struct device *dev)
800{
801 struct talitos_private *priv = dev_get_drvdata(dev);
802
35a3bb3d
AS
803 if (!priv->rng_registered)
804 return;
805
9c4a7965 806 hwrng_unregister(&priv->rng);
35a3bb3d 807 priv->rng_registered = false;
9c4a7965
KP
808}
809
810/*
811 * crypto alg
812 */
813#define TALITOS_CRA_PRIORITY 3000
357fb605 814#define TALITOS_MAX_KEY_SIZE 96
3952f17e 815#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
70bcaca7 816
9c4a7965
KP
817struct talitos_ctx {
818 struct device *dev;
5228f0f7 819 int ch;
9c4a7965
KP
820 __be32 desc_hdr_template;
821 u8 key[TALITOS_MAX_KEY_SIZE];
70bcaca7 822 u8 iv[TALITOS_MAX_IV_LENGTH];
9c4a7965
KP
823 unsigned int keylen;
824 unsigned int enckeylen;
825 unsigned int authkeylen;
9c4a7965
KP
826};
827
497f2e6b
LN
828#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
829#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
830
831struct talitos_ahash_req_ctx {
60f208d7 832 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
497f2e6b
LN
833 unsigned int hw_context_size;
834 u8 buf[HASH_MAX_BLOCK_SIZE];
835 u8 bufnext[HASH_MAX_BLOCK_SIZE];
60f208d7 836 unsigned int swinit;
497f2e6b
LN
837 unsigned int first;
838 unsigned int last;
839 unsigned int to_hash_later;
42e8b0d7 840 unsigned int nbuf;
497f2e6b
LN
841 struct scatterlist bufsl[2];
842 struct scatterlist *psrc;
843};
844
3639ca84
HG
845struct talitos_export_state {
846 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
847 u8 buf[HASH_MAX_BLOCK_SIZE];
848 unsigned int swinit;
849 unsigned int first;
850 unsigned int last;
851 unsigned int to_hash_later;
852 unsigned int nbuf;
853};
854
56af8cd4
LN
855static int aead_setkey(struct crypto_aead *authenc,
856 const u8 *key, unsigned int keylen)
9c4a7965
KP
857{
858 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
c306a98d 859 struct crypto_authenc_keys keys;
9c4a7965 860
c306a98d 861 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
9c4a7965
KP
862 goto badkey;
863
c306a98d 864 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
9c4a7965
KP
865 goto badkey;
866
c306a98d
MK
867 memcpy(ctx->key, keys.authkey, keys.authkeylen);
868 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
9c4a7965 869
c306a98d
MK
870 ctx->keylen = keys.authkeylen + keys.enckeylen;
871 ctx->enckeylen = keys.enckeylen;
872 ctx->authkeylen = keys.authkeylen;
9c4a7965
KP
873
874 return 0;
875
876badkey:
877 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
878 return -EINVAL;
879}
880
881/*
56af8cd4 882 * talitos_edesc - s/w-extended descriptor
9c4a7965
KP
883 * @src_nents: number of segments in input scatterlist
884 * @dst_nents: number of segments in output scatterlist
aeb4c132 885 * @icv_ool: whether ICV is out-of-line
79fd31d3 886 * @iv_dma: dma address of iv for checking continuity and link table
9c4a7965 887 * @dma_len: length of dma mapped link_tbl space
6f65f6ac 888 * @dma_link_tbl: bus physical address of link_tbl/buf
9c4a7965 889 * @desc: h/w descriptor
6f65f6ac
LC
890 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
891 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
9c4a7965
KP
892 *
893 * if decrypting (with authcheck), or either one of src_nents or dst_nents
894 * is greater than 1, an integrity check value is concatenated to the end
895 * of link_tbl data
896 */
56af8cd4 897struct talitos_edesc {
9c4a7965
KP
898 int src_nents;
899 int dst_nents;
aeb4c132 900 bool icv_ool;
79fd31d3 901 dma_addr_t iv_dma;
9c4a7965
KP
902 int dma_len;
903 dma_addr_t dma_link_tbl;
904 struct talitos_desc desc;
6f65f6ac
LC
905 union {
906 struct talitos_ptr link_tbl[0];
907 u8 buf[0];
908 };
9c4a7965
KP
909};
910
4de9d0b5
LN
911static void talitos_sg_unmap(struct device *dev,
912 struct talitos_edesc *edesc,
913 struct scatterlist *src,
6a1e8d14
LC
914 struct scatterlist *dst,
915 unsigned int len, unsigned int offset)
4de9d0b5 916{
6a1e8d14
LC
917 struct talitos_private *priv = dev_get_drvdata(dev);
918 bool is_sec1 = has_ftr_sec1(priv);
4de9d0b5
LN
919 unsigned int src_nents = edesc->src_nents ? : 1;
920 unsigned int dst_nents = edesc->dst_nents ? : 1;
921
6a1e8d14
LC
922 if (is_sec1 && dst && dst_nents > 1) {
923 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
924 len, DMA_FROM_DEVICE);
925 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
926 offset);
927 }
4de9d0b5 928 if (src != dst) {
6a1e8d14
LC
929 if (src_nents == 1 || !is_sec1)
930 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
4de9d0b5 931
6a1e8d14 932 if (dst && (dst_nents == 1 || !is_sec1))
b8a011d4 933 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
6a1e8d14 934 } else if (src_nents == 1 || !is_sec1) {
b8a011d4 935 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
246a87cd
LC
936 }
937}
938
9c4a7965 939static void ipsec_esp_unmap(struct device *dev,
56af8cd4 940 struct talitos_edesc *edesc,
9c4a7965
KP
941 struct aead_request *areq)
942{
549bd8bc
LC
943 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
944 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
945 unsigned int ivsize = crypto_aead_ivsize(aead);
946
947 if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
948 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
949 DMA_FROM_DEVICE);
9c4a7965
KP
950 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
951 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
952 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
953
6a1e8d14
LC
954 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
955 areq->assoclen);
9c4a7965
KP
956
957 if (edesc->dma_len)
958 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
959 DMA_BIDIRECTIONAL);
549bd8bc
LC
960
961 if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
962 unsigned int dst_nents = edesc->dst_nents ? : 1;
963
964 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
965 areq->assoclen + areq->cryptlen - ivsize);
966 }
9c4a7965
KP
967}
968
969/*
970 * ipsec_esp descriptor callbacks
971 */
972static void ipsec_esp_encrypt_done(struct device *dev,
973 struct talitos_desc *desc, void *context,
974 int err)
975{
549bd8bc
LC
976 struct talitos_private *priv = dev_get_drvdata(dev);
977 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 978 struct aead_request *areq = context;
9c4a7965 979 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 980 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 981 struct talitos_edesc *edesc;
9c4a7965
KP
982 struct scatterlist *sg;
983 void *icvdata;
984
19bbbc63
KP
985 edesc = container_of(desc, struct talitos_edesc, desc);
986
9c4a7965
KP
987 ipsec_esp_unmap(dev, edesc, areq);
988
989 /* copy the generated ICV to dst */
aeb4c132 990 if (edesc->icv_ool) {
549bd8bc
LC
991 if (is_sec1)
992 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
993 else
994 icvdata = &edesc->link_tbl[edesc->src_nents +
995 edesc->dst_nents + 2];
9c4a7965 996 sg = sg_last(areq->dst, edesc->dst_nents);
aeb4c132
HX
997 memcpy((char *)sg_virt(sg) + sg->length - authsize,
998 icvdata, authsize);
9c4a7965
KP
999 }
1000
1001 kfree(edesc);
1002
1003 aead_request_complete(areq, err);
1004}
1005
fe5720e2 1006static void ipsec_esp_decrypt_swauth_done(struct device *dev,
e938e465
KP
1007 struct talitos_desc *desc,
1008 void *context, int err)
9c4a7965
KP
1009{
1010 struct aead_request *req = context;
9c4a7965 1011 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 1012 unsigned int authsize = crypto_aead_authsize(authenc);
19bbbc63 1013 struct talitos_edesc *edesc;
9c4a7965 1014 struct scatterlist *sg;
aeb4c132 1015 char *oicv, *icv;
549bd8bc
LC
1016 struct talitos_private *priv = dev_get_drvdata(dev);
1017 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965 1018
19bbbc63
KP
1019 edesc = container_of(desc, struct talitos_edesc, desc);
1020
9c4a7965
KP
1021 ipsec_esp_unmap(dev, edesc, req);
1022
1023 if (!err) {
1024 /* auth check */
9c4a7965 1025 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
aeb4c132
HX
1026 icv = (char *)sg_virt(sg) + sg->length - authsize;
1027
1028 if (edesc->dma_len) {
549bd8bc
LC
1029 if (is_sec1)
1030 oicv = (char *)&edesc->dma_link_tbl +
1031 req->assoclen + req->cryptlen;
1032 else
1033 oicv = (char *)
1034 &edesc->link_tbl[edesc->src_nents +
aeb4c132
HX
1035 edesc->dst_nents + 2];
1036 if (edesc->icv_ool)
1037 icv = oicv + authsize;
1038 } else
1039 oicv = (char *)&edesc->link_tbl[0];
1040
79960943 1041 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
9c4a7965
KP
1042 }
1043
1044 kfree(edesc);
1045
1046 aead_request_complete(req, err);
1047}
1048
fe5720e2 1049static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
e938e465
KP
1050 struct talitos_desc *desc,
1051 void *context, int err)
fe5720e2
KP
1052{
1053 struct aead_request *req = context;
19bbbc63
KP
1054 struct talitos_edesc *edesc;
1055
1056 edesc = container_of(desc, struct talitos_edesc, desc);
fe5720e2
KP
1057
1058 ipsec_esp_unmap(dev, edesc, req);
1059
1060 /* check ICV auth status */
e938e465
KP
1061 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1062 DESC_HDR_LO_ICCR1_PASS))
1063 err = -EBADMSG;
fe5720e2
KP
1064
1065 kfree(edesc);
1066
1067 aead_request_complete(req, err);
1068}
1069
9c4a7965
KP
1070/*
1071 * convert scatterlist to SEC h/w link table format
1072 * stop at cryptlen bytes
1073 */
aeb4c132
HX
1074static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1075 unsigned int offset, int cryptlen,
1076 struct talitos_ptr *link_tbl_ptr)
9c4a7965 1077{
70bcaca7 1078 int n_sg = sg_count;
aeb4c132 1079 int count = 0;
70bcaca7 1080
aeb4c132
HX
1081 while (cryptlen && sg && n_sg--) {
1082 unsigned int len = sg_dma_len(sg);
9c4a7965 1083
aeb4c132
HX
1084 if (offset >= len) {
1085 offset -= len;
1086 goto next;
1087 }
1088
1089 len -= offset;
1090
1091 if (len > cryptlen)
1092 len = cryptlen;
1093
1094 to_talitos_ptr(link_tbl_ptr + count,
1095 sg_dma_address(sg) + offset, 0);
b096b544
LC
1096 to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
1097 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
aeb4c132
HX
1098 count++;
1099 cryptlen -= len;
1100 offset = 0;
1101
1102next:
1103 sg = sg_next(sg);
70bcaca7 1104 }
9c4a7965
KP
1105
1106 /* tag end of link table */
aeb4c132 1107 if (count > 0)
b096b544
LC
1108 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1109 DESC_PTR_LNKTBL_RETURN, 0);
70bcaca7 1110
aeb4c132
HX
1111 return count;
1112}
1113
6a1e8d14
LC
1114int talitos_sg_map(struct device *dev, struct scatterlist *src,
1115 unsigned int len, struct talitos_edesc *edesc,
1116 struct talitos_ptr *ptr,
1117 int sg_count, unsigned int offset, int tbl_off)
246a87cd 1118{
246a87cd
LC
1119 struct talitos_private *priv = dev_get_drvdata(dev);
1120 bool is_sec1 = has_ftr_sec1(priv);
1121
1122 to_talitos_ptr_len(ptr, len, is_sec1);
6a1e8d14 1123 to_talitos_ptr_ext_set(ptr, 0, is_sec1);
246a87cd 1124
6a1e8d14
LC
1125 if (sg_count == 1) {
1126 to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
1127 return sg_count;
246a87cd 1128 }
246a87cd 1129 if (is_sec1) {
6a1e8d14
LC
1130 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
1131 return sg_count;
246a87cd 1132 }
6a1e8d14
LC
1133 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len,
1134 &edesc->link_tbl[tbl_off]);
1135 if (sg_count == 1) {
1136 /* Only one segment now, so no link tbl needed*/
1137 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1138 return sg_count;
1139 }
1140 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1141 tbl_off * sizeof(struct talitos_ptr), is_sec1);
1142 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1143
1144 return sg_count;
246a87cd
LC
1145}
1146
9c4a7965
KP
1147/*
1148 * fill in and submit ipsec_esp descriptor
1149 */
56af8cd4 1150static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
aeb4c132
HX
1151 void (*callback)(struct device *dev,
1152 struct talitos_desc *desc,
1153 void *context, int error))
9c4a7965
KP
1154{
1155 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
aeb4c132 1156 unsigned int authsize = crypto_aead_authsize(aead);
9c4a7965
KP
1157 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1158 struct device *dev = ctx->dev;
1159 struct talitos_desc *desc = &edesc->desc;
1160 unsigned int cryptlen = areq->cryptlen;
e41256f1 1161 unsigned int ivsize = crypto_aead_ivsize(aead);
aeb4c132 1162 int tbl_off = 0;
fa86a267 1163 int sg_count, ret;
fe5720e2 1164 int sg_link_tbl_len;
549bd8bc
LC
1165 bool sync_needed = false;
1166 struct talitos_private *priv = dev_get_drvdata(dev);
1167 bool is_sec1 = has_ftr_sec1(priv);
9c4a7965
KP
1168
1169 /* hmac key */
1170 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
a2b35aa8 1171 DMA_TO_DEVICE);
79fd31d3 1172
549bd8bc
LC
1173 sg_count = edesc->src_nents ?: 1;
1174 if (is_sec1 && sg_count > 1)
1175 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1176 areq->assoclen + cryptlen);
1177 else
1178 sg_count = dma_map_sg(dev, areq->src, sg_count,
1179 (areq->src == areq->dst) ?
1180 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
79fd31d3 1181
549bd8bc
LC
1182 /* hmac data */
1183 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1184 &desc->ptr[1], sg_count, 0, tbl_off);
340ff60a 1185
549bd8bc 1186 if (ret > 1) {
340ff60a 1187 tbl_off += ret;
549bd8bc 1188 sync_needed = true;
79fd31d3
HG
1189 }
1190
9c4a7965 1191 /* cipher iv */
549bd8bc
LC
1192 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1193 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
1194 to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
1195 to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
1196 } else {
1197 to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
1198 to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
1199 to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
1200 }
9c4a7965
KP
1201
1202 /* cipher key */
549bd8bc
LC
1203 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1204 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1205 (char *)&ctx->key + ctx->authkeylen,
1206 DMA_TO_DEVICE);
1207 else
1208 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
1209 (char *)&ctx->key + ctx->authkeylen,
1210 DMA_TO_DEVICE);
9c4a7965
KP
1211
1212 /*
1213 * cipher in
1214 * map and adjust cipher len to aead request cryptlen.
1215 * extent is bytes of HMAC postpended to ciphertext,
1216 * typically 12 for ipsec
1217 */
549bd8bc
LC
1218 to_talitos_ptr_len(&desc->ptr[4], cryptlen, is_sec1);
1219 to_talitos_ptr_ext_set(&desc->ptr[4], 0, is_sec1);
9c4a7965 1220
aeb4c132 1221 sg_link_tbl_len = cryptlen;
aeb4c132 1222
549bd8bc
LC
1223 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1224 to_talitos_ptr_ext_set(&desc->ptr[4], authsize, is_sec1);
1225
1226 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1227 sg_link_tbl_len += authsize;
340ff60a 1228 }
9c4a7965 1229
549bd8bc
LC
1230 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1231 &desc->ptr[4], sg_count, areq->assoclen,
1232 tbl_off);
1233
1234 if (sg_count > 1) {
1235 tbl_off += sg_count;
1236 sync_needed = true;
1237 }
9c4a7965 1238
549bd8bc
LC
1239 /* cipher out */
1240 if (areq->src != areq->dst) {
1241 sg_count = edesc->dst_nents ? : 1;
1242 if (!is_sec1 || sg_count == 1)
1243 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1244 }
9c4a7965 1245
549bd8bc
LC
1246 sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
1247 &desc->ptr[5], sg_count, areq->assoclen,
1248 tbl_off);
aeb4c132 1249
549bd8bc
LC
1250 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1251 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
aeb4c132 1252
549bd8bc 1253 if (sg_count > 1) {
aeb4c132 1254 edesc->icv_ool = true;
549bd8bc
LC
1255 sync_needed = true;
1256
1257 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1258 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1259 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1260 sizeof(struct talitos_ptr) + authsize;
1261
1262 /* Add an entry to the link table for ICV data */
1263 tbl_ptr += sg_count - 1;
1264 to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
1265 tbl_ptr++;
1266 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1267 is_sec1);
1268 to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
1269
1270 /* icv data follows link tables */
1271 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1272 is_sec1);
1273 }
340ff60a 1274 } else {
549bd8bc
LC
1275 edesc->icv_ool = false;
1276 }
1277
1278 /* ICV data */
1279 if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1280 to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1281 to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
1282 areq->assoclen + cryptlen, is_sec1);
340ff60a 1283 }
9c4a7965
KP
1284
1285 /* iv out */
549bd8bc
LC
1286 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1287 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1288 DMA_FROM_DEVICE);
1289
1290 if (sync_needed)
1291 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1292 edesc->dma_len,
1293 DMA_BIDIRECTIONAL);
9c4a7965 1294
5228f0f7 1295 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
fa86a267
KP
1296 if (ret != -EINPROGRESS) {
1297 ipsec_esp_unmap(dev, edesc, areq);
1298 kfree(edesc);
1299 }
1300 return ret;
9c4a7965
KP
1301}
1302
9c4a7965 1303/*
56af8cd4 1304 * allocate and map the extended descriptor
9c4a7965 1305 */
4de9d0b5
LN
1306static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1307 struct scatterlist *src,
1308 struct scatterlist *dst,
79fd31d3
HG
1309 u8 *iv,
1310 unsigned int assoclen,
4de9d0b5
LN
1311 unsigned int cryptlen,
1312 unsigned int authsize,
79fd31d3 1313 unsigned int ivsize,
4de9d0b5 1314 int icv_stashing,
62293a37
HG
1315 u32 cryptoflags,
1316 bool encrypt)
9c4a7965 1317{
56af8cd4 1318 struct talitos_edesc *edesc;
6a1e8d14 1319 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
79fd31d3 1320 dma_addr_t iv_dma = 0;
4de9d0b5 1321 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
586725f8 1322 GFP_ATOMIC;
6f65f6ac
LC
1323 struct talitos_private *priv = dev_get_drvdata(dev);
1324 bool is_sec1 = has_ftr_sec1(priv);
1325 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
8e409fe1 1326 void *err;
9c4a7965 1327
6f65f6ac 1328 if (cryptlen + authsize > max_len) {
4de9d0b5 1329 dev_err(dev, "length exceeds h/w max limit\n");
9c4a7965
KP
1330 return ERR_PTR(-EINVAL);
1331 }
1332
935e99a3 1333 if (ivsize)
79fd31d3
HG
1334 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1335
62293a37 1336 if (!dst || dst == src) {
6a1e8d14
LC
1337 src_len = assoclen + cryptlen + authsize;
1338 src_nents = sg_nents_for_len(src, src_len);
8e409fe1
LC
1339 if (src_nents < 0) {
1340 dev_err(dev, "Invalid number of src SG.\n");
1341 err = ERR_PTR(-EINVAL);
1342 goto error_sg;
1343 }
62293a37
HG
1344 src_nents = (src_nents == 1) ? 0 : src_nents;
1345 dst_nents = dst ? src_nents : 0;
6a1e8d14 1346 dst_len = 0;
62293a37 1347 } else { /* dst && dst != src*/
6a1e8d14
LC
1348 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1349 src_nents = sg_nents_for_len(src, src_len);
8e409fe1
LC
1350 if (src_nents < 0) {
1351 dev_err(dev, "Invalid number of src SG.\n");
1352 err = ERR_PTR(-EINVAL);
1353 goto error_sg;
1354 }
62293a37 1355 src_nents = (src_nents == 1) ? 0 : src_nents;
6a1e8d14
LC
1356 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1357 dst_nents = sg_nents_for_len(dst, dst_len);
8e409fe1
LC
1358 if (dst_nents < 0) {
1359 dev_err(dev, "Invalid number of dst SG.\n");
1360 err = ERR_PTR(-EINVAL);
1361 goto error_sg;
1362 }
62293a37 1363 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
9c4a7965
KP
1364 }
1365
1366 /*
1367 * allocate space for base edesc plus the link tables,
aeb4c132
HX
1368 * allowing for two separate entries for AD and generated ICV (+ 2),
1369 * and space for two sets of ICVs (stashed and generated)
9c4a7965 1370 */
56af8cd4 1371 alloc_len = sizeof(struct talitos_edesc);
aeb4c132 1372 if (src_nents || dst_nents) {
6f65f6ac 1373 if (is_sec1)
6a1e8d14
LC
1374 dma_len = (src_nents ? src_len : 0) +
1375 (dst_nents ? dst_len : 0);
6f65f6ac 1376 else
aeb4c132
HX
1377 dma_len = (src_nents + dst_nents + 2) *
1378 sizeof(struct talitos_ptr) + authsize * 2;
9c4a7965
KP
1379 alloc_len += dma_len;
1380 } else {
1381 dma_len = 0;
4de9d0b5 1382 alloc_len += icv_stashing ? authsize : 0;
9c4a7965
KP
1383 }
1384
586725f8 1385 edesc = kmalloc(alloc_len, GFP_DMA | flags);
9c4a7965 1386 if (!edesc) {
4de9d0b5 1387 dev_err(dev, "could not allocate edescriptor\n");
8e409fe1
LC
1388 err = ERR_PTR(-ENOMEM);
1389 goto error_sg;
9c4a7965
KP
1390 }
1391
1392 edesc->src_nents = src_nents;
1393 edesc->dst_nents = dst_nents;
79fd31d3 1394 edesc->iv_dma = iv_dma;
9c4a7965 1395 edesc->dma_len = dma_len;
497f2e6b
LN
1396 if (dma_len)
1397 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1398 edesc->dma_len,
1399 DMA_BIDIRECTIONAL);
9c4a7965
KP
1400
1401 return edesc;
8e409fe1
LC
1402error_sg:
1403 if (iv_dma)
1404 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1405 return err;
9c4a7965
KP
1406}
1407
79fd31d3 1408static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
62293a37 1409 int icv_stashing, bool encrypt)
4de9d0b5
LN
1410{
1411 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
aeb4c132 1412 unsigned int authsize = crypto_aead_authsize(authenc);
4de9d0b5 1413 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
79fd31d3 1414 unsigned int ivsize = crypto_aead_ivsize(authenc);
4de9d0b5 1415
aeb4c132 1416 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1417 iv, areq->assoclen, areq->cryptlen,
aeb4c132 1418 authsize, ivsize, icv_stashing,
62293a37 1419 areq->base.flags, encrypt);
4de9d0b5
LN
1420}
1421
56af8cd4 1422static int aead_encrypt(struct aead_request *req)
9c4a7965
KP
1423{
1424 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1425 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1426 struct talitos_edesc *edesc;
9c4a7965
KP
1427
1428 /* allocate extended descriptor */
62293a37 1429 edesc = aead_edesc_alloc(req, req->iv, 0, true);
9c4a7965
KP
1430 if (IS_ERR(edesc))
1431 return PTR_ERR(edesc);
1432
1433 /* set encrypt */
70bcaca7 1434 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965 1435
aeb4c132 1436 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
9c4a7965
KP
1437}
1438
56af8cd4 1439static int aead_decrypt(struct aead_request *req)
9c4a7965
KP
1440{
1441 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
aeb4c132 1442 unsigned int authsize = crypto_aead_authsize(authenc);
9c4a7965 1443 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
fe5720e2 1444 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
56af8cd4 1445 struct talitos_edesc *edesc;
9c4a7965
KP
1446 struct scatterlist *sg;
1447 void *icvdata;
1448
1449 req->cryptlen -= authsize;
1450
1451 /* allocate extended descriptor */
62293a37 1452 edesc = aead_edesc_alloc(req, req->iv, 1, false);
9c4a7965
KP
1453 if (IS_ERR(edesc))
1454 return PTR_ERR(edesc);
1455
fe5720e2 1456 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
e938e465
KP
1457 ((!edesc->src_nents && !edesc->dst_nents) ||
1458 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
9c4a7965 1459
fe5720e2 1460 /* decrypt and check the ICV */
e938e465
KP
1461 edesc->desc.hdr = ctx->desc_hdr_template |
1462 DESC_HDR_DIR_INBOUND |
fe5720e2 1463 DESC_HDR_MODE1_MDEU_CICV;
9c4a7965 1464
fe5720e2
KP
1465 /* reset integrity check result bits */
1466 edesc->desc.hdr_lo = 0;
9c4a7965 1467
aeb4c132 1468 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
e938e465 1469 }
fe5720e2 1470
e938e465
KP
1471 /* Have to check the ICV with software */
1472 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
fe5720e2 1473
e938e465
KP
1474 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1475 if (edesc->dma_len)
aeb4c132
HX
1476 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1477 edesc->dst_nents + 2];
e938e465
KP
1478 else
1479 icvdata = &edesc->link_tbl[0];
fe5720e2 1480
e938e465 1481 sg = sg_last(req->src, edesc->src_nents ? : 1);
fe5720e2 1482
aeb4c132 1483 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
9c4a7965 1484
aeb4c132 1485 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
9c4a7965
KP
1486}
1487
4de9d0b5
LN
1488static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1489 const u8 *key, unsigned int keylen)
1490{
1491 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
4de9d0b5
LN
1492
1493 memcpy(&ctx->key, key, keylen);
1494 ctx->keylen = keylen;
1495
1496 return 0;
4de9d0b5
LN
1497}
1498
1499static void common_nonsnoop_unmap(struct device *dev,
1500 struct talitos_edesc *edesc,
1501 struct ablkcipher_request *areq)
1502{
1503 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
032d197e 1504
6a1e8d14 1505 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
4de9d0b5
LN
1506 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1507 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1508
4de9d0b5
LN
1509 if (edesc->dma_len)
1510 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1511 DMA_BIDIRECTIONAL);
1512}
1513
1514static void ablkcipher_done(struct device *dev,
1515 struct talitos_desc *desc, void *context,
1516 int err)
1517{
1518 struct ablkcipher_request *areq = context;
19bbbc63
KP
1519 struct talitos_edesc *edesc;
1520
1521 edesc = container_of(desc, struct talitos_edesc, desc);
4de9d0b5
LN
1522
1523 common_nonsnoop_unmap(dev, edesc, areq);
1524
1525 kfree(edesc);
1526
1527 areq->base.complete(&areq->base, err);
1528}
1529
1530static int common_nonsnoop(struct talitos_edesc *edesc,
1531 struct ablkcipher_request *areq,
4de9d0b5
LN
1532 void (*callback) (struct device *dev,
1533 struct talitos_desc *desc,
1534 void *context, int error))
1535{
1536 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1537 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1538 struct device *dev = ctx->dev;
1539 struct talitos_desc *desc = &edesc->desc;
1540 unsigned int cryptlen = areq->nbytes;
79fd31d3 1541 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1542 int sg_count, ret;
6a1e8d14 1543 bool sync_needed = false;
922f9dc8
LC
1544 struct talitos_private *priv = dev_get_drvdata(dev);
1545 bool is_sec1 = has_ftr_sec1(priv);
4de9d0b5
LN
1546
1547 /* first DWORD empty */
2529bc37 1548 desc->ptr[0] = zero_entry;
4de9d0b5
LN
1549
1550 /* cipher iv */
922f9dc8
LC
1551 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1552 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
b096b544 1553 to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
4de9d0b5
LN
1554
1555 /* cipher key */
1556 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1557 (char *)&ctx->key, DMA_TO_DEVICE);
4de9d0b5 1558
6a1e8d14
LC
1559 sg_count = edesc->src_nents ?: 1;
1560 if (is_sec1 && sg_count > 1)
1561 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1562 cryptlen);
1563 else
1564 sg_count = dma_map_sg(dev, areq->src, sg_count,
1565 (areq->src == areq->dst) ?
1566 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
4de9d0b5
LN
1567 /*
1568 * cipher in
1569 */
6a1e8d14
LC
1570 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1571 &desc->ptr[3], sg_count, 0, 0);
1572 if (sg_count > 1)
1573 sync_needed = true;
4de9d0b5
LN
1574
1575 /* cipher out */
6a1e8d14
LC
1576 if (areq->src != areq->dst) {
1577 sg_count = edesc->dst_nents ? : 1;
1578 if (!is_sec1 || sg_count == 1)
1579 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1580 }
1581
1582 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1583 sg_count, 0, (edesc->src_nents + 1));
1584 if (ret > 1)
1585 sync_needed = true;
4de9d0b5
LN
1586
1587 /* iv out */
a2b35aa8 1588 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
4de9d0b5
LN
1589 DMA_FROM_DEVICE);
1590
1591 /* last DWORD empty */
2529bc37 1592 desc->ptr[6] = zero_entry;
4de9d0b5 1593
6a1e8d14
LC
1594 if (sync_needed)
1595 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1596 edesc->dma_len, DMA_BIDIRECTIONAL);
1597
5228f0f7 1598 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
4de9d0b5
LN
1599 if (ret != -EINPROGRESS) {
1600 common_nonsnoop_unmap(dev, edesc, areq);
1601 kfree(edesc);
1602 }
1603 return ret;
1604}
1605
e938e465 1606static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
62293a37 1607 areq, bool encrypt)
4de9d0b5
LN
1608{
1609 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1610 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
79fd31d3 1611 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1612
aeb4c132 1613 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
79fd31d3 1614 areq->info, 0, areq->nbytes, 0, ivsize, 0,
62293a37 1615 areq->base.flags, encrypt);
4de9d0b5
LN
1616}
1617
1618static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1619{
1620 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1621 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1622 struct talitos_edesc *edesc;
1623
1624 /* allocate extended descriptor */
62293a37 1625 edesc = ablkcipher_edesc_alloc(areq, true);
4de9d0b5
LN
1626 if (IS_ERR(edesc))
1627 return PTR_ERR(edesc);
1628
1629 /* set encrypt */
1630 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1631
febec542 1632 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1633}
1634
1635static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1636{
1637 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1638 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1639 struct talitos_edesc *edesc;
1640
1641 /* allocate extended descriptor */
62293a37 1642 edesc = ablkcipher_edesc_alloc(areq, false);
4de9d0b5
LN
1643 if (IS_ERR(edesc))
1644 return PTR_ERR(edesc);
1645
1646 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1647
febec542 1648 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1649}
1650
497f2e6b
LN
1651static void common_nonsnoop_hash_unmap(struct device *dev,
1652 struct talitos_edesc *edesc,
1653 struct ahash_request *areq)
1654{
1655 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
922f9dc8
LC
1656 struct talitos_private *priv = dev_get_drvdata(dev);
1657 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1658
1659 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1660
6a1e8d14 1661 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
032d197e 1662
497f2e6b 1663 /* When using hashctx-in, must unmap it. */
922f9dc8 1664 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
497f2e6b
LN
1665 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1666 DMA_TO_DEVICE);
1667
922f9dc8 1668 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
497f2e6b
LN
1669 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1670 DMA_TO_DEVICE);
1671
497f2e6b
LN
1672 if (edesc->dma_len)
1673 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1674 DMA_BIDIRECTIONAL);
1675
1676}
1677
1678static void ahash_done(struct device *dev,
1679 struct talitos_desc *desc, void *context,
1680 int err)
1681{
1682 struct ahash_request *areq = context;
1683 struct talitos_edesc *edesc =
1684 container_of(desc, struct talitos_edesc, desc);
1685 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1686
1687 if (!req_ctx->last && req_ctx->to_hash_later) {
1688 /* Position any partial block for next update/final/finup */
1689 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
5e833bc4 1690 req_ctx->nbuf = req_ctx->to_hash_later;
497f2e6b
LN
1691 }
1692 common_nonsnoop_hash_unmap(dev, edesc, areq);
1693
1694 kfree(edesc);
1695
1696 areq->base.complete(&areq->base, err);
1697}
1698
2d02905e
LC
1699/*
1700 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1701 * ourself and submit a padded block
1702 */
1703void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1704 struct talitos_edesc *edesc,
1705 struct talitos_ptr *ptr)
1706{
1707 static u8 padded_hash[64] = {
1708 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1709 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1710 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1711 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1712 };
1713
1714 pr_err_once("Bug in SEC1, padding ourself\n");
1715 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1716 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1717 (char *)padded_hash, DMA_TO_DEVICE);
1718}
1719
497f2e6b
LN
1720static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1721 struct ahash_request *areq, unsigned int length,
1722 void (*callback) (struct device *dev,
1723 struct talitos_desc *desc,
1724 void *context, int error))
1725{
1726 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1727 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1728 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1729 struct device *dev = ctx->dev;
1730 struct talitos_desc *desc = &edesc->desc;
032d197e 1731 int ret;
6a1e8d14 1732 bool sync_needed = false;
922f9dc8
LC
1733 struct talitos_private *priv = dev_get_drvdata(dev);
1734 bool is_sec1 = has_ftr_sec1(priv);
6a1e8d14 1735 int sg_count;
497f2e6b
LN
1736
1737 /* first DWORD empty */
1738 desc->ptr[0] = zero_entry;
1739
60f208d7
KP
1740 /* hash context in */
1741 if (!req_ctx->first || req_ctx->swinit) {
497f2e6b
LN
1742 map_single_talitos_ptr(dev, &desc->ptr[1],
1743 req_ctx->hw_context_size,
a2b35aa8 1744 (char *)req_ctx->hw_context,
497f2e6b 1745 DMA_TO_DEVICE);
60f208d7 1746 req_ctx->swinit = 0;
497f2e6b
LN
1747 } else {
1748 desc->ptr[1] = zero_entry;
1749 /* Indicate next op is not the first. */
1750 req_ctx->first = 0;
1751 }
1752
1753 /* HMAC key */
1754 if (ctx->keylen)
1755 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1756 (char *)&ctx->key, DMA_TO_DEVICE);
497f2e6b
LN
1757 else
1758 desc->ptr[2] = zero_entry;
1759
6a1e8d14
LC
1760 sg_count = edesc->src_nents ?: 1;
1761 if (is_sec1 && sg_count > 1)
1762 sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length);
1763 else
1764 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1765 DMA_TO_DEVICE);
497f2e6b
LN
1766 /*
1767 * data in
1768 */
6a1e8d14
LC
1769 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1770 &desc->ptr[3], sg_count, 0, 0);
1771 if (sg_count > 1)
1772 sync_needed = true;
497f2e6b
LN
1773
1774 /* fifth DWORD empty */
1775 desc->ptr[4] = zero_entry;
1776
1777 /* hash/HMAC out -or- hash context out */
1778 if (req_ctx->last)
1779 map_single_talitos_ptr(dev, &desc->ptr[5],
1780 crypto_ahash_digestsize(tfm),
a2b35aa8 1781 areq->result, DMA_FROM_DEVICE);
497f2e6b
LN
1782 else
1783 map_single_talitos_ptr(dev, &desc->ptr[5],
1784 req_ctx->hw_context_size,
a2b35aa8 1785 req_ctx->hw_context, DMA_FROM_DEVICE);
497f2e6b
LN
1786
1787 /* last DWORD empty */
1788 desc->ptr[6] = zero_entry;
1789
2d02905e
LC
1790 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1791 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1792
6a1e8d14
LC
1793 if (sync_needed)
1794 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1795 edesc->dma_len, DMA_BIDIRECTIONAL);
1796
5228f0f7 1797 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
497f2e6b
LN
1798 if (ret != -EINPROGRESS) {
1799 common_nonsnoop_hash_unmap(dev, edesc, areq);
1800 kfree(edesc);
1801 }
1802 return ret;
1803}
1804
1805static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1806 unsigned int nbytes)
1807{
1808 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1809 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1810 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1811
aeb4c132 1812 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
62293a37 1813 nbytes, 0, 0, 0, areq->base.flags, false);
497f2e6b
LN
1814}
1815
1816static int ahash_init(struct ahash_request *areq)
1817{
1818 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1819 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1820
1821 /* Initialize the context */
5e833bc4 1822 req_ctx->nbuf = 0;
60f208d7
KP
1823 req_ctx->first = 1; /* first indicates h/w must init its context */
1824 req_ctx->swinit = 0; /* assume h/w init of context */
497f2e6b
LN
1825 req_ctx->hw_context_size =
1826 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1827 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1828 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1829
1830 return 0;
1831}
1832
60f208d7
KP
1833/*
1834 * on h/w without explicit sha224 support, we initialize h/w context
1835 * manually with sha224 constants, and tell it to run sha256.
1836 */
1837static int ahash_init_sha224_swinit(struct ahash_request *areq)
1838{
1839 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1840
1841 ahash_init(areq);
1842 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1843
a752447a
KP
1844 req_ctx->hw_context[0] = SHA224_H0;
1845 req_ctx->hw_context[1] = SHA224_H1;
1846 req_ctx->hw_context[2] = SHA224_H2;
1847 req_ctx->hw_context[3] = SHA224_H3;
1848 req_ctx->hw_context[4] = SHA224_H4;
1849 req_ctx->hw_context[5] = SHA224_H5;
1850 req_ctx->hw_context[6] = SHA224_H6;
1851 req_ctx->hw_context[7] = SHA224_H7;
60f208d7
KP
1852
1853 /* init 64-bit count */
1854 req_ctx->hw_context[8] = 0;
1855 req_ctx->hw_context[9] = 0;
1856
1857 return 0;
1858}
1859
497f2e6b
LN
1860static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1861{
1862 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1863 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1864 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1865 struct talitos_edesc *edesc;
1866 unsigned int blocksize =
1867 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1868 unsigned int nbytes_to_hash;
1869 unsigned int to_hash_later;
5e833bc4 1870 unsigned int nsg;
8e409fe1 1871 int nents;
497f2e6b 1872
5e833bc4
LN
1873 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1874 /* Buffer up to one whole block */
8e409fe1
LC
1875 nents = sg_nents_for_len(areq->src, nbytes);
1876 if (nents < 0) {
1877 dev_err(ctx->dev, "Invalid number of src SG.\n");
1878 return nents;
1879 }
1880 sg_copy_to_buffer(areq->src, nents,
5e833bc4
LN
1881 req_ctx->buf + req_ctx->nbuf, nbytes);
1882 req_ctx->nbuf += nbytes;
497f2e6b
LN
1883 return 0;
1884 }
1885
5e833bc4
LN
1886 /* At least (blocksize + 1) bytes are available to hash */
1887 nbytes_to_hash = nbytes + req_ctx->nbuf;
1888 to_hash_later = nbytes_to_hash & (blocksize - 1);
1889
1890 if (req_ctx->last)
1891 to_hash_later = 0;
1892 else if (to_hash_later)
1893 /* There is a partial block. Hash the full block(s) now */
1894 nbytes_to_hash -= to_hash_later;
1895 else {
1896 /* Keep one block buffered */
1897 nbytes_to_hash -= blocksize;
1898 to_hash_later = blocksize;
1899 }
1900
1901 /* Chain in any previously buffered data */
1902 if (req_ctx->nbuf) {
1903 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1904 sg_init_table(req_ctx->bufsl, nsg);
1905 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1906 if (nsg > 1)
c56f6d12 1907 sg_chain(req_ctx->bufsl, 2, areq->src);
497f2e6b 1908 req_ctx->psrc = req_ctx->bufsl;
5e833bc4 1909 } else
497f2e6b 1910 req_ctx->psrc = areq->src;
5e833bc4
LN
1911
1912 if (to_hash_later) {
8e409fe1
LC
1913 nents = sg_nents_for_len(areq->src, nbytes);
1914 if (nents < 0) {
1915 dev_err(ctx->dev, "Invalid number of src SG.\n");
1916 return nents;
1917 }
d0525723 1918 sg_pcopy_to_buffer(areq->src, nents,
5e833bc4
LN
1919 req_ctx->bufnext,
1920 to_hash_later,
1921 nbytes - to_hash_later);
497f2e6b 1922 }
5e833bc4 1923 req_ctx->to_hash_later = to_hash_later;
497f2e6b 1924
5e833bc4 1925 /* Allocate extended descriptor */
497f2e6b
LN
1926 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1927 if (IS_ERR(edesc))
1928 return PTR_ERR(edesc);
1929
1930 edesc->desc.hdr = ctx->desc_hdr_template;
1931
1932 /* On last one, request SEC to pad; otherwise continue */
1933 if (req_ctx->last)
1934 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1935 else
1936 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1937
60f208d7
KP
1938 /* request SEC to INIT hash. */
1939 if (req_ctx->first && !req_ctx->swinit)
497f2e6b
LN
1940 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1941
1942 /* When the tfm context has a keylen, it's an HMAC.
1943 * A first or last (ie. not middle) descriptor must request HMAC.
1944 */
1945 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1946 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1947
1948 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1949 ahash_done);
1950}
1951
1952static int ahash_update(struct ahash_request *areq)
1953{
1954 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1955
1956 req_ctx->last = 0;
1957
1958 return ahash_process_req(areq, areq->nbytes);
1959}
1960
1961static int ahash_final(struct ahash_request *areq)
1962{
1963 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1964
1965 req_ctx->last = 1;
1966
1967 return ahash_process_req(areq, 0);
1968}
1969
1970static int ahash_finup(struct ahash_request *areq)
1971{
1972 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1973
1974 req_ctx->last = 1;
1975
1976 return ahash_process_req(areq, areq->nbytes);
1977}
1978
1979static int ahash_digest(struct ahash_request *areq)
1980{
1981 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
60f208d7 1982 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
497f2e6b 1983
60f208d7 1984 ahash->init(areq);
497f2e6b
LN
1985 req_ctx->last = 1;
1986
1987 return ahash_process_req(areq, areq->nbytes);
1988}
1989
3639ca84
HG
1990static int ahash_export(struct ahash_request *areq, void *out)
1991{
1992 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1993 struct talitos_export_state *export = out;
1994
1995 memcpy(export->hw_context, req_ctx->hw_context,
1996 req_ctx->hw_context_size);
1997 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
1998 export->swinit = req_ctx->swinit;
1999 export->first = req_ctx->first;
2000 export->last = req_ctx->last;
2001 export->to_hash_later = req_ctx->to_hash_later;
2002 export->nbuf = req_ctx->nbuf;
2003
2004 return 0;
2005}
2006
2007static int ahash_import(struct ahash_request *areq, const void *in)
2008{
2009 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2010 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2011 const struct talitos_export_state *export = in;
2012
2013 memset(req_ctx, 0, sizeof(*req_ctx));
2014 req_ctx->hw_context_size =
2015 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2016 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2017 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2018 memcpy(req_ctx->hw_context, export->hw_context,
2019 req_ctx->hw_context_size);
2020 memcpy(req_ctx->buf, export->buf, export->nbuf);
2021 req_ctx->swinit = export->swinit;
2022 req_ctx->first = export->first;
2023 req_ctx->last = export->last;
2024 req_ctx->to_hash_later = export->to_hash_later;
2025 req_ctx->nbuf = export->nbuf;
2026
2027 return 0;
2028}
2029
79b3a418
LN
2030struct keyhash_result {
2031 struct completion completion;
2032 int err;
2033};
2034
2035static void keyhash_complete(struct crypto_async_request *req, int err)
2036{
2037 struct keyhash_result *res = req->data;
2038
2039 if (err == -EINPROGRESS)
2040 return;
2041
2042 res->err = err;
2043 complete(&res->completion);
2044}
2045
2046static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2047 u8 *hash)
2048{
2049 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2050
2051 struct scatterlist sg[1];
2052 struct ahash_request *req;
2053 struct keyhash_result hresult;
2054 int ret;
2055
2056 init_completion(&hresult.completion);
2057
2058 req = ahash_request_alloc(tfm, GFP_KERNEL);
2059 if (!req)
2060 return -ENOMEM;
2061
2062 /* Keep tfm keylen == 0 during hash of the long key */
2063 ctx->keylen = 0;
2064 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2065 keyhash_complete, &hresult);
2066
2067 sg_init_one(&sg[0], key, keylen);
2068
2069 ahash_request_set_crypt(req, sg, hash, keylen);
2070 ret = crypto_ahash_digest(req);
2071 switch (ret) {
2072 case 0:
2073 break;
2074 case -EINPROGRESS:
2075 case -EBUSY:
2076 ret = wait_for_completion_interruptible(
2077 &hresult.completion);
2078 if (!ret)
2079 ret = hresult.err;
2080 break;
2081 default:
2082 break;
2083 }
2084 ahash_request_free(req);
2085
2086 return ret;
2087}
2088
2089static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2090 unsigned int keylen)
2091{
2092 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2093 unsigned int blocksize =
2094 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2095 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2096 unsigned int keysize = keylen;
2097 u8 hash[SHA512_DIGEST_SIZE];
2098 int ret;
2099
2100 if (keylen <= blocksize)
2101 memcpy(ctx->key, key, keysize);
2102 else {
2103 /* Must get the hash of the long key */
2104 ret = keyhash(tfm, key, keylen, hash);
2105
2106 if (ret) {
2107 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2108 return -EINVAL;
2109 }
2110
2111 keysize = digestsize;
2112 memcpy(ctx->key, hash, digestsize);
2113 }
2114
2115 ctx->keylen = keysize;
2116
2117 return 0;
2118}
2119
2120
9c4a7965 2121struct talitos_alg_template {
d5e4aaef 2122 u32 type;
b0057763 2123 u32 priority;
d5e4aaef
LN
2124 union {
2125 struct crypto_alg crypto;
acbf7c62 2126 struct ahash_alg hash;
aeb4c132 2127 struct aead_alg aead;
d5e4aaef 2128 } alg;
9c4a7965
KP
2129 __be32 desc_hdr_template;
2130};
2131
2132static struct talitos_alg_template driver_algs[] = {
991155ba 2133 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
d5e4aaef 2134 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2135 .alg.aead = {
2136 .base = {
2137 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2138 .cra_driver_name = "authenc-hmac-sha1-"
2139 "cbc-aes-talitos",
2140 .cra_blocksize = AES_BLOCK_SIZE,
2141 .cra_flags = CRYPTO_ALG_ASYNC,
2142 },
2143 .ivsize = AES_BLOCK_SIZE,
2144 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2145 },
9c4a7965
KP
2146 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2147 DESC_HDR_SEL0_AESU |
2148 DESC_HDR_MODE0_AESU_CBC |
2149 DESC_HDR_SEL1_MDEUA |
2150 DESC_HDR_MODE1_MDEU_INIT |
2151 DESC_HDR_MODE1_MDEU_PAD |
2152 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
70bcaca7 2153 },
d5e4aaef 2154 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2155 .alg.aead = {
2156 .base = {
2157 .cra_name = "authenc(hmac(sha1),"
2158 "cbc(des3_ede))",
2159 .cra_driver_name = "authenc-hmac-sha1-"
2160 "cbc-3des-talitos",
2161 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2162 .cra_flags = CRYPTO_ALG_ASYNC,
2163 },
2164 .ivsize = DES3_EDE_BLOCK_SIZE,
2165 .maxauthsize = SHA1_DIGEST_SIZE,
56af8cd4 2166 },
70bcaca7
LN
2167 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2168 DESC_HDR_SEL0_DEU |
2169 DESC_HDR_MODE0_DEU_CBC |
2170 DESC_HDR_MODE0_DEU_3DES |
2171 DESC_HDR_SEL1_MDEUA |
2172 DESC_HDR_MODE1_MDEU_INIT |
2173 DESC_HDR_MODE1_MDEU_PAD |
2174 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
3952f17e 2175 },
357fb605 2176 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2177 .alg.aead = {
2178 .base = {
2179 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2180 .cra_driver_name = "authenc-hmac-sha224-"
2181 "cbc-aes-talitos",
2182 .cra_blocksize = AES_BLOCK_SIZE,
2183 .cra_flags = CRYPTO_ALG_ASYNC,
2184 },
2185 .ivsize = AES_BLOCK_SIZE,
2186 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2187 },
2188 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2189 DESC_HDR_SEL0_AESU |
2190 DESC_HDR_MODE0_AESU_CBC |
2191 DESC_HDR_SEL1_MDEUA |
2192 DESC_HDR_MODE1_MDEU_INIT |
2193 DESC_HDR_MODE1_MDEU_PAD |
2194 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2195 },
2196 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2197 .alg.aead = {
2198 .base = {
2199 .cra_name = "authenc(hmac(sha224),"
2200 "cbc(des3_ede))",
2201 .cra_driver_name = "authenc-hmac-sha224-"
2202 "cbc-3des-talitos",
2203 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2204 .cra_flags = CRYPTO_ALG_ASYNC,
2205 },
2206 .ivsize = DES3_EDE_BLOCK_SIZE,
2207 .maxauthsize = SHA224_DIGEST_SIZE,
357fb605
HG
2208 },
2209 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2210 DESC_HDR_SEL0_DEU |
2211 DESC_HDR_MODE0_DEU_CBC |
2212 DESC_HDR_MODE0_DEU_3DES |
2213 DESC_HDR_SEL1_MDEUA |
2214 DESC_HDR_MODE1_MDEU_INIT |
2215 DESC_HDR_MODE1_MDEU_PAD |
2216 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2217 },
d5e4aaef 2218 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2219 .alg.aead = {
2220 .base = {
2221 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2222 .cra_driver_name = "authenc-hmac-sha256-"
2223 "cbc-aes-talitos",
2224 .cra_blocksize = AES_BLOCK_SIZE,
2225 .cra_flags = CRYPTO_ALG_ASYNC,
2226 },
2227 .ivsize = AES_BLOCK_SIZE,
2228 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2229 },
3952f17e
LN
2230 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2231 DESC_HDR_SEL0_AESU |
2232 DESC_HDR_MODE0_AESU_CBC |
2233 DESC_HDR_SEL1_MDEUA |
2234 DESC_HDR_MODE1_MDEU_INIT |
2235 DESC_HDR_MODE1_MDEU_PAD |
2236 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2237 },
d5e4aaef 2238 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2239 .alg.aead = {
2240 .base = {
2241 .cra_name = "authenc(hmac(sha256),"
2242 "cbc(des3_ede))",
2243 .cra_driver_name = "authenc-hmac-sha256-"
2244 "cbc-3des-talitos",
2245 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2246 .cra_flags = CRYPTO_ALG_ASYNC,
2247 },
2248 .ivsize = DES3_EDE_BLOCK_SIZE,
2249 .maxauthsize = SHA256_DIGEST_SIZE,
56af8cd4 2250 },
3952f17e
LN
2251 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2252 DESC_HDR_SEL0_DEU |
2253 DESC_HDR_MODE0_DEU_CBC |
2254 DESC_HDR_MODE0_DEU_3DES |
2255 DESC_HDR_SEL1_MDEUA |
2256 DESC_HDR_MODE1_MDEU_INIT |
2257 DESC_HDR_MODE1_MDEU_PAD |
2258 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2259 },
d5e4aaef 2260 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2261 .alg.aead = {
2262 .base = {
2263 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2264 .cra_driver_name = "authenc-hmac-sha384-"
2265 "cbc-aes-talitos",
2266 .cra_blocksize = AES_BLOCK_SIZE,
2267 .cra_flags = CRYPTO_ALG_ASYNC,
2268 },
2269 .ivsize = AES_BLOCK_SIZE,
2270 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2271 },
2272 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2273 DESC_HDR_SEL0_AESU |
2274 DESC_HDR_MODE0_AESU_CBC |
2275 DESC_HDR_SEL1_MDEUB |
2276 DESC_HDR_MODE1_MDEU_INIT |
2277 DESC_HDR_MODE1_MDEU_PAD |
2278 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2279 },
2280 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2281 .alg.aead = {
2282 .base = {
2283 .cra_name = "authenc(hmac(sha384),"
2284 "cbc(des3_ede))",
2285 .cra_driver_name = "authenc-hmac-sha384-"
2286 "cbc-3des-talitos",
2287 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2288 .cra_flags = CRYPTO_ALG_ASYNC,
2289 },
2290 .ivsize = DES3_EDE_BLOCK_SIZE,
2291 .maxauthsize = SHA384_DIGEST_SIZE,
357fb605
HG
2292 },
2293 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2294 DESC_HDR_SEL0_DEU |
2295 DESC_HDR_MODE0_DEU_CBC |
2296 DESC_HDR_MODE0_DEU_3DES |
2297 DESC_HDR_SEL1_MDEUB |
2298 DESC_HDR_MODE1_MDEU_INIT |
2299 DESC_HDR_MODE1_MDEU_PAD |
2300 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2301 },
2302 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2303 .alg.aead = {
2304 .base = {
2305 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2306 .cra_driver_name = "authenc-hmac-sha512-"
2307 "cbc-aes-talitos",
2308 .cra_blocksize = AES_BLOCK_SIZE,
2309 .cra_flags = CRYPTO_ALG_ASYNC,
2310 },
2311 .ivsize = AES_BLOCK_SIZE,
2312 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2313 },
2314 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2315 DESC_HDR_SEL0_AESU |
2316 DESC_HDR_MODE0_AESU_CBC |
2317 DESC_HDR_SEL1_MDEUB |
2318 DESC_HDR_MODE1_MDEU_INIT |
2319 DESC_HDR_MODE1_MDEU_PAD |
2320 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2321 },
2322 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2323 .alg.aead = {
2324 .base = {
2325 .cra_name = "authenc(hmac(sha512),"
2326 "cbc(des3_ede))",
2327 .cra_driver_name = "authenc-hmac-sha512-"
2328 "cbc-3des-talitos",
2329 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2330 .cra_flags = CRYPTO_ALG_ASYNC,
2331 },
2332 .ivsize = DES3_EDE_BLOCK_SIZE,
2333 .maxauthsize = SHA512_DIGEST_SIZE,
357fb605
HG
2334 },
2335 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2336 DESC_HDR_SEL0_DEU |
2337 DESC_HDR_MODE0_DEU_CBC |
2338 DESC_HDR_MODE0_DEU_3DES |
2339 DESC_HDR_SEL1_MDEUB |
2340 DESC_HDR_MODE1_MDEU_INIT |
2341 DESC_HDR_MODE1_MDEU_PAD |
2342 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2343 },
2344 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2345 .alg.aead = {
2346 .base = {
2347 .cra_name = "authenc(hmac(md5),cbc(aes))",
2348 .cra_driver_name = "authenc-hmac-md5-"
2349 "cbc-aes-talitos",
2350 .cra_blocksize = AES_BLOCK_SIZE,
2351 .cra_flags = CRYPTO_ALG_ASYNC,
2352 },
2353 .ivsize = AES_BLOCK_SIZE,
2354 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2355 },
3952f17e
LN
2356 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2357 DESC_HDR_SEL0_AESU |
2358 DESC_HDR_MODE0_AESU_CBC |
2359 DESC_HDR_SEL1_MDEUA |
2360 DESC_HDR_MODE1_MDEU_INIT |
2361 DESC_HDR_MODE1_MDEU_PAD |
2362 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2363 },
d5e4aaef 2364 { .type = CRYPTO_ALG_TYPE_AEAD,
aeb4c132
HX
2365 .alg.aead = {
2366 .base = {
2367 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2368 .cra_driver_name = "authenc-hmac-md5-"
2369 "cbc-3des-talitos",
2370 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2371 .cra_flags = CRYPTO_ALG_ASYNC,
2372 },
2373 .ivsize = DES3_EDE_BLOCK_SIZE,
2374 .maxauthsize = MD5_DIGEST_SIZE,
56af8cd4 2375 },
3952f17e
LN
2376 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2377 DESC_HDR_SEL0_DEU |
2378 DESC_HDR_MODE0_DEU_CBC |
2379 DESC_HDR_MODE0_DEU_3DES |
2380 DESC_HDR_SEL1_MDEUA |
2381 DESC_HDR_MODE1_MDEU_INIT |
2382 DESC_HDR_MODE1_MDEU_PAD |
2383 DESC_HDR_MODE1_MDEU_MD5_HMAC,
4de9d0b5
LN
2384 },
2385 /* ABLKCIPHER algorithms. */
5e75ae1b
LC
2386 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2387 .alg.crypto = {
2388 .cra_name = "ecb(aes)",
2389 .cra_driver_name = "ecb-aes-talitos",
2390 .cra_blocksize = AES_BLOCK_SIZE,
2391 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2392 CRYPTO_ALG_ASYNC,
2393 .cra_ablkcipher = {
2394 .min_keysize = AES_MIN_KEY_SIZE,
2395 .max_keysize = AES_MAX_KEY_SIZE,
2396 .ivsize = AES_BLOCK_SIZE,
2397 }
2398 },
2399 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2400 DESC_HDR_SEL0_AESU,
2401 },
d5e4aaef
LN
2402 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2403 .alg.crypto = {
4de9d0b5
LN
2404 .cra_name = "cbc(aes)",
2405 .cra_driver_name = "cbc-aes-talitos",
2406 .cra_blocksize = AES_BLOCK_SIZE,
2407 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2408 CRYPTO_ALG_ASYNC,
4de9d0b5 2409 .cra_ablkcipher = {
4de9d0b5
LN
2410 .min_keysize = AES_MIN_KEY_SIZE,
2411 .max_keysize = AES_MAX_KEY_SIZE,
2412 .ivsize = AES_BLOCK_SIZE,
2413 }
2414 },
2415 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2416 DESC_HDR_SEL0_AESU |
2417 DESC_HDR_MODE0_AESU_CBC,
2418 },
5e75ae1b
LC
2419 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2420 .alg.crypto = {
2421 .cra_name = "ctr(aes)",
2422 .cra_driver_name = "ctr-aes-talitos",
2423 .cra_blocksize = AES_BLOCK_SIZE,
2424 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2425 CRYPTO_ALG_ASYNC,
2426 .cra_ablkcipher = {
2427 .min_keysize = AES_MIN_KEY_SIZE,
2428 .max_keysize = AES_MAX_KEY_SIZE,
2429 .ivsize = AES_BLOCK_SIZE,
2430 }
2431 },
2432 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2433 DESC_HDR_SEL0_AESU |
2434 DESC_HDR_MODE0_AESU_CTR,
2435 },
2436 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2437 .alg.crypto = {
2438 .cra_name = "ecb(des)",
2439 .cra_driver_name = "ecb-des-talitos",
2440 .cra_blocksize = DES_BLOCK_SIZE,
2441 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2442 CRYPTO_ALG_ASYNC,
2443 .cra_ablkcipher = {
2444 .min_keysize = DES_KEY_SIZE,
2445 .max_keysize = DES_KEY_SIZE,
2446 .ivsize = DES_BLOCK_SIZE,
2447 }
2448 },
2449 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2450 DESC_HDR_SEL0_DEU,
2451 },
2452 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2453 .alg.crypto = {
2454 .cra_name = "cbc(des)",
2455 .cra_driver_name = "cbc-des-talitos",
2456 .cra_blocksize = DES_BLOCK_SIZE,
2457 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2458 CRYPTO_ALG_ASYNC,
2459 .cra_ablkcipher = {
2460 .min_keysize = DES_KEY_SIZE,
2461 .max_keysize = DES_KEY_SIZE,
2462 .ivsize = DES_BLOCK_SIZE,
2463 }
2464 },
2465 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2466 DESC_HDR_SEL0_DEU |
2467 DESC_HDR_MODE0_DEU_CBC,
2468 },
2469 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2470 .alg.crypto = {
2471 .cra_name = "ecb(des3_ede)",
2472 .cra_driver_name = "ecb-3des-talitos",
2473 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2474 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2475 CRYPTO_ALG_ASYNC,
2476 .cra_ablkcipher = {
2477 .min_keysize = DES3_EDE_KEY_SIZE,
2478 .max_keysize = DES3_EDE_KEY_SIZE,
2479 .ivsize = DES3_EDE_BLOCK_SIZE,
2480 }
2481 },
2482 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2483 DESC_HDR_SEL0_DEU |
2484 DESC_HDR_MODE0_DEU_3DES,
2485 },
d5e4aaef
LN
2486 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2487 .alg.crypto = {
4de9d0b5
LN
2488 .cra_name = "cbc(des3_ede)",
2489 .cra_driver_name = "cbc-3des-talitos",
2490 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2491 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2492 CRYPTO_ALG_ASYNC,
4de9d0b5 2493 .cra_ablkcipher = {
4de9d0b5
LN
2494 .min_keysize = DES3_EDE_KEY_SIZE,
2495 .max_keysize = DES3_EDE_KEY_SIZE,
2496 .ivsize = DES3_EDE_BLOCK_SIZE,
2497 }
2498 },
2499 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2500 DESC_HDR_SEL0_DEU |
2501 DESC_HDR_MODE0_DEU_CBC |
2502 DESC_HDR_MODE0_DEU_3DES,
497f2e6b
LN
2503 },
2504 /* AHASH algorithms. */
2505 { .type = CRYPTO_ALG_TYPE_AHASH,
2506 .alg.hash = {
497f2e6b 2507 .halg.digestsize = MD5_DIGEST_SIZE,
3639ca84 2508 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2509 .halg.base = {
2510 .cra_name = "md5",
2511 .cra_driver_name = "md5-talitos",
b3988618 2512 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
497f2e6b
LN
2513 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2514 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2515 }
2516 },
2517 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2518 DESC_HDR_SEL0_MDEUA |
2519 DESC_HDR_MODE0_MDEU_MD5,
2520 },
2521 { .type = CRYPTO_ALG_TYPE_AHASH,
2522 .alg.hash = {
497f2e6b 2523 .halg.digestsize = SHA1_DIGEST_SIZE,
3639ca84 2524 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2525 .halg.base = {
2526 .cra_name = "sha1",
2527 .cra_driver_name = "sha1-talitos",
2528 .cra_blocksize = SHA1_BLOCK_SIZE,
2529 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2530 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2531 }
2532 },
2533 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2534 DESC_HDR_SEL0_MDEUA |
2535 DESC_HDR_MODE0_MDEU_SHA1,
2536 },
60f208d7
KP
2537 { .type = CRYPTO_ALG_TYPE_AHASH,
2538 .alg.hash = {
60f208d7 2539 .halg.digestsize = SHA224_DIGEST_SIZE,
3639ca84 2540 .halg.statesize = sizeof(struct talitos_export_state),
60f208d7
KP
2541 .halg.base = {
2542 .cra_name = "sha224",
2543 .cra_driver_name = "sha224-talitos",
2544 .cra_blocksize = SHA224_BLOCK_SIZE,
2545 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2546 CRYPTO_ALG_ASYNC,
60f208d7
KP
2547 }
2548 },
2549 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2550 DESC_HDR_SEL0_MDEUA |
2551 DESC_HDR_MODE0_MDEU_SHA224,
2552 },
497f2e6b
LN
2553 { .type = CRYPTO_ALG_TYPE_AHASH,
2554 .alg.hash = {
497f2e6b 2555 .halg.digestsize = SHA256_DIGEST_SIZE,
3639ca84 2556 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2557 .halg.base = {
2558 .cra_name = "sha256",
2559 .cra_driver_name = "sha256-talitos",
2560 .cra_blocksize = SHA256_BLOCK_SIZE,
2561 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2562 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2563 }
2564 },
2565 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2566 DESC_HDR_SEL0_MDEUA |
2567 DESC_HDR_MODE0_MDEU_SHA256,
2568 },
2569 { .type = CRYPTO_ALG_TYPE_AHASH,
2570 .alg.hash = {
497f2e6b 2571 .halg.digestsize = SHA384_DIGEST_SIZE,
3639ca84 2572 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2573 .halg.base = {
2574 .cra_name = "sha384",
2575 .cra_driver_name = "sha384-talitos",
2576 .cra_blocksize = SHA384_BLOCK_SIZE,
2577 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2578 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2579 }
2580 },
2581 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2582 DESC_HDR_SEL0_MDEUB |
2583 DESC_HDR_MODE0_MDEUB_SHA384,
2584 },
2585 { .type = CRYPTO_ALG_TYPE_AHASH,
2586 .alg.hash = {
497f2e6b 2587 .halg.digestsize = SHA512_DIGEST_SIZE,
3639ca84 2588 .halg.statesize = sizeof(struct talitos_export_state),
497f2e6b
LN
2589 .halg.base = {
2590 .cra_name = "sha512",
2591 .cra_driver_name = "sha512-talitos",
2592 .cra_blocksize = SHA512_BLOCK_SIZE,
2593 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2594 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2595 }
2596 },
2597 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2598 DESC_HDR_SEL0_MDEUB |
2599 DESC_HDR_MODE0_MDEUB_SHA512,
2600 },
79b3a418
LN
2601 { .type = CRYPTO_ALG_TYPE_AHASH,
2602 .alg.hash = {
79b3a418 2603 .halg.digestsize = MD5_DIGEST_SIZE,
3639ca84 2604 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2605 .halg.base = {
2606 .cra_name = "hmac(md5)",
2607 .cra_driver_name = "hmac-md5-talitos",
b3988618 2608 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
79b3a418
LN
2609 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2610 CRYPTO_ALG_ASYNC,
79b3a418
LN
2611 }
2612 },
2613 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2614 DESC_HDR_SEL0_MDEUA |
2615 DESC_HDR_MODE0_MDEU_MD5,
2616 },
2617 { .type = CRYPTO_ALG_TYPE_AHASH,
2618 .alg.hash = {
79b3a418 2619 .halg.digestsize = SHA1_DIGEST_SIZE,
3639ca84 2620 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2621 .halg.base = {
2622 .cra_name = "hmac(sha1)",
2623 .cra_driver_name = "hmac-sha1-talitos",
2624 .cra_blocksize = SHA1_BLOCK_SIZE,
2625 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2626 CRYPTO_ALG_ASYNC,
79b3a418
LN
2627 }
2628 },
2629 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2630 DESC_HDR_SEL0_MDEUA |
2631 DESC_HDR_MODE0_MDEU_SHA1,
2632 },
2633 { .type = CRYPTO_ALG_TYPE_AHASH,
2634 .alg.hash = {
79b3a418 2635 .halg.digestsize = SHA224_DIGEST_SIZE,
3639ca84 2636 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2637 .halg.base = {
2638 .cra_name = "hmac(sha224)",
2639 .cra_driver_name = "hmac-sha224-talitos",
2640 .cra_blocksize = SHA224_BLOCK_SIZE,
2641 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2642 CRYPTO_ALG_ASYNC,
79b3a418
LN
2643 }
2644 },
2645 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2646 DESC_HDR_SEL0_MDEUA |
2647 DESC_HDR_MODE0_MDEU_SHA224,
2648 },
2649 { .type = CRYPTO_ALG_TYPE_AHASH,
2650 .alg.hash = {
79b3a418 2651 .halg.digestsize = SHA256_DIGEST_SIZE,
3639ca84 2652 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2653 .halg.base = {
2654 .cra_name = "hmac(sha256)",
2655 .cra_driver_name = "hmac-sha256-talitos",
2656 .cra_blocksize = SHA256_BLOCK_SIZE,
2657 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2658 CRYPTO_ALG_ASYNC,
79b3a418
LN
2659 }
2660 },
2661 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2662 DESC_HDR_SEL0_MDEUA |
2663 DESC_HDR_MODE0_MDEU_SHA256,
2664 },
2665 { .type = CRYPTO_ALG_TYPE_AHASH,
2666 .alg.hash = {
79b3a418 2667 .halg.digestsize = SHA384_DIGEST_SIZE,
3639ca84 2668 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2669 .halg.base = {
2670 .cra_name = "hmac(sha384)",
2671 .cra_driver_name = "hmac-sha384-talitos",
2672 .cra_blocksize = SHA384_BLOCK_SIZE,
2673 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2674 CRYPTO_ALG_ASYNC,
79b3a418
LN
2675 }
2676 },
2677 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2678 DESC_HDR_SEL0_MDEUB |
2679 DESC_HDR_MODE0_MDEUB_SHA384,
2680 },
2681 { .type = CRYPTO_ALG_TYPE_AHASH,
2682 .alg.hash = {
79b3a418 2683 .halg.digestsize = SHA512_DIGEST_SIZE,
3639ca84 2684 .halg.statesize = sizeof(struct talitos_export_state),
79b3a418
LN
2685 .halg.base = {
2686 .cra_name = "hmac(sha512)",
2687 .cra_driver_name = "hmac-sha512-talitos",
2688 .cra_blocksize = SHA512_BLOCK_SIZE,
2689 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2690 CRYPTO_ALG_ASYNC,
79b3a418
LN
2691 }
2692 },
2693 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2694 DESC_HDR_SEL0_MDEUB |
2695 DESC_HDR_MODE0_MDEUB_SHA512,
2696 }
9c4a7965
KP
2697};
2698
2699struct talitos_crypto_alg {
2700 struct list_head entry;
2701 struct device *dev;
acbf7c62 2702 struct talitos_alg_template algt;
9c4a7965
KP
2703};
2704
89d124cb
JE
2705static int talitos_init_common(struct talitos_ctx *ctx,
2706 struct talitos_crypto_alg *talitos_alg)
9c4a7965 2707{
5228f0f7 2708 struct talitos_private *priv;
9c4a7965
KP
2709
2710 /* update context with ptr to dev */
2711 ctx->dev = talitos_alg->dev;
19bbbc63 2712
5228f0f7
KP
2713 /* assign SEC channel to tfm in round-robin fashion */
2714 priv = dev_get_drvdata(ctx->dev);
2715 ctx->ch = atomic_inc_return(&priv->last_chan) &
2716 (priv->num_channels - 1);
2717
9c4a7965 2718 /* copy descriptor header template value */
acbf7c62 2719 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
9c4a7965 2720
602dba5a
KP
2721 /* select done notification */
2722 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2723
497f2e6b
LN
2724 return 0;
2725}
2726
89d124cb
JE
2727static int talitos_cra_init(struct crypto_tfm *tfm)
2728{
2729 struct crypto_alg *alg = tfm->__crt_alg;
2730 struct talitos_crypto_alg *talitos_alg;
2731 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2732
2733 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2734 talitos_alg = container_of(__crypto_ahash_alg(alg),
2735 struct talitos_crypto_alg,
2736 algt.alg.hash);
2737 else
2738 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2739 algt.alg.crypto);
2740
2741 return talitos_init_common(ctx, talitos_alg);
2742}
2743
aeb4c132 2744static int talitos_cra_init_aead(struct crypto_aead *tfm)
497f2e6b 2745{
89d124cb
JE
2746 struct aead_alg *alg = crypto_aead_alg(tfm);
2747 struct talitos_crypto_alg *talitos_alg;
2748 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2749
2750 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2751 algt.alg.aead);
2752
2753 return talitos_init_common(ctx, talitos_alg);
9c4a7965
KP
2754}
2755
497f2e6b
LN
2756static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2757{
2758 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2759
2760 talitos_cra_init(tfm);
2761
2762 ctx->keylen = 0;
2763 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2764 sizeof(struct talitos_ahash_req_ctx));
2765
2766 return 0;
2767}
2768
9c4a7965
KP
2769/*
2770 * given the alg's descriptor header template, determine whether descriptor
2771 * type and primary/secondary execution units required match the hw
2772 * capabilities description provided in the device tree node.
2773 */
2774static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2775{
2776 struct talitos_private *priv = dev_get_drvdata(dev);
2777 int ret;
2778
2779 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2780 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2781
2782 if (SECONDARY_EU(desc_hdr_template))
2783 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2784 & priv->exec_units);
2785
2786 return ret;
2787}
2788
2dc11581 2789static int talitos_remove(struct platform_device *ofdev)
9c4a7965
KP
2790{
2791 struct device *dev = &ofdev->dev;
2792 struct talitos_private *priv = dev_get_drvdata(dev);
2793 struct talitos_crypto_alg *t_alg, *n;
2794 int i;
2795
2796 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
acbf7c62
LN
2797 switch (t_alg->algt.type) {
2798 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62 2799 break;
aeb4c132
HX
2800 case CRYPTO_ALG_TYPE_AEAD:
2801 crypto_unregister_aead(&t_alg->algt.alg.aead);
acbf7c62
LN
2802 case CRYPTO_ALG_TYPE_AHASH:
2803 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2804 break;
2805 }
9c4a7965
KP
2806 list_del(&t_alg->entry);
2807 kfree(t_alg);
2808 }
2809
2810 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2811 talitos_unregister_rng(dev);
2812
35a3bb3d 2813 for (i = 0; priv->chan && i < priv->num_channels; i++)
0b798247 2814 kfree(priv->chan[i].fifo);
9c4a7965 2815
4b992628 2816 kfree(priv->chan);
9c4a7965 2817
c3e337f8 2818 for (i = 0; i < 2; i++)
2cdba3cf 2819 if (priv->irq[i]) {
c3e337f8
KP
2820 free_irq(priv->irq[i], dev);
2821 irq_dispose_mapping(priv->irq[i]);
2822 }
9c4a7965 2823
c3e337f8 2824 tasklet_kill(&priv->done_task[0]);
2cdba3cf 2825 if (priv->irq[1])
c3e337f8 2826 tasklet_kill(&priv->done_task[1]);
9c4a7965
KP
2827
2828 iounmap(priv->reg);
2829
9c4a7965
KP
2830 kfree(priv);
2831
2832 return 0;
2833}
2834
2835static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2836 struct talitos_alg_template
2837 *template)
2838{
60f208d7 2839 struct talitos_private *priv = dev_get_drvdata(dev);
9c4a7965
KP
2840 struct talitos_crypto_alg *t_alg;
2841 struct crypto_alg *alg;
2842
2843 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2844 if (!t_alg)
2845 return ERR_PTR(-ENOMEM);
2846
acbf7c62
LN
2847 t_alg->algt = *template;
2848
2849 switch (t_alg->algt.type) {
2850 case CRYPTO_ALG_TYPE_ABLKCIPHER:
497f2e6b
LN
2851 alg = &t_alg->algt.alg.crypto;
2852 alg->cra_init = talitos_cra_init;
d4cd3283 2853 alg->cra_type = &crypto_ablkcipher_type;
b286e003
KP
2854 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2855 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2856 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2857 alg->cra_ablkcipher.geniv = "eseqiv";
497f2e6b 2858 break;
acbf7c62 2859 case CRYPTO_ALG_TYPE_AEAD:
aeb4c132 2860 alg = &t_alg->algt.alg.aead.base;
aeb4c132
HX
2861 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2862 t_alg->algt.alg.aead.setkey = aead_setkey;
2863 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2864 t_alg->algt.alg.aead.decrypt = aead_decrypt;
acbf7c62
LN
2865 break;
2866 case CRYPTO_ALG_TYPE_AHASH:
2867 alg = &t_alg->algt.alg.hash.halg.base;
497f2e6b 2868 alg->cra_init = talitos_cra_init_ahash;
d4cd3283 2869 alg->cra_type = &crypto_ahash_type;
b286e003
KP
2870 t_alg->algt.alg.hash.init = ahash_init;
2871 t_alg->algt.alg.hash.update = ahash_update;
2872 t_alg->algt.alg.hash.final = ahash_final;
2873 t_alg->algt.alg.hash.finup = ahash_finup;
2874 t_alg->algt.alg.hash.digest = ahash_digest;
2875 t_alg->algt.alg.hash.setkey = ahash_setkey;
3639ca84
HG
2876 t_alg->algt.alg.hash.import = ahash_import;
2877 t_alg->algt.alg.hash.export = ahash_export;
b286e003 2878
79b3a418 2879 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
0b2730d8
KP
2880 !strncmp(alg->cra_name, "hmac", 4)) {
2881 kfree(t_alg);
79b3a418 2882 return ERR_PTR(-ENOTSUPP);
0b2730d8 2883 }
60f208d7 2884 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
79b3a418
LN
2885 (!strcmp(alg->cra_name, "sha224") ||
2886 !strcmp(alg->cra_name, "hmac(sha224)"))) {
60f208d7
KP
2887 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2888 t_alg->algt.desc_hdr_template =
2889 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2890 DESC_HDR_SEL0_MDEUA |
2891 DESC_HDR_MODE0_MDEU_SHA256;
2892 }
497f2e6b 2893 break;
1d11911a
KP
2894 default:
2895 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
5fa7dadc 2896 kfree(t_alg);
1d11911a 2897 return ERR_PTR(-EINVAL);
acbf7c62 2898 }
9c4a7965 2899
9c4a7965 2900 alg->cra_module = THIS_MODULE;
b0057763
LC
2901 if (t_alg->algt.priority)
2902 alg->cra_priority = t_alg->algt.priority;
2903 else
2904 alg->cra_priority = TALITOS_CRA_PRIORITY;
9c4a7965 2905 alg->cra_alignmask = 0;
9c4a7965 2906 alg->cra_ctxsize = sizeof(struct talitos_ctx);
d912bb76 2907 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
9c4a7965 2908
9c4a7965
KP
2909 t_alg->dev = dev;
2910
2911 return t_alg;
2912}
2913
c3e337f8
KP
2914static int talitos_probe_irq(struct platform_device *ofdev)
2915{
2916 struct device *dev = &ofdev->dev;
2917 struct device_node *np = ofdev->dev.of_node;
2918 struct talitos_private *priv = dev_get_drvdata(dev);
2919 int err;
dd3c0987 2920 bool is_sec1 = has_ftr_sec1(priv);
c3e337f8
KP
2921
2922 priv->irq[0] = irq_of_parse_and_map(np, 0);
2cdba3cf 2923 if (!priv->irq[0]) {
c3e337f8
KP
2924 dev_err(dev, "failed to map irq\n");
2925 return -EINVAL;
2926 }
dd3c0987
LC
2927 if (is_sec1) {
2928 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2929 dev_driver_string(dev), dev);
2930 goto primary_out;
2931 }
c3e337f8
KP
2932
2933 priv->irq[1] = irq_of_parse_and_map(np, 1);
2934
2935 /* get the primary irq line */
2cdba3cf 2936 if (!priv->irq[1]) {
dd3c0987 2937 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
c3e337f8
KP
2938 dev_driver_string(dev), dev);
2939 goto primary_out;
2940 }
2941
dd3c0987 2942 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
c3e337f8
KP
2943 dev_driver_string(dev), dev);
2944 if (err)
2945 goto primary_out;
2946
2947 /* get the secondary irq line */
dd3c0987 2948 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
c3e337f8
KP
2949 dev_driver_string(dev), dev);
2950 if (err) {
2951 dev_err(dev, "failed to request secondary irq\n");
2952 irq_dispose_mapping(priv->irq[1]);
2cdba3cf 2953 priv->irq[1] = 0;
c3e337f8
KP
2954 }
2955
2956 return err;
2957
2958primary_out:
2959 if (err) {
2960 dev_err(dev, "failed to request primary irq\n");
2961 irq_dispose_mapping(priv->irq[0]);
2cdba3cf 2962 priv->irq[0] = 0;
c3e337f8
KP
2963 }
2964
2965 return err;
2966}
2967
1c48a5c9 2968static int talitos_probe(struct platform_device *ofdev)
9c4a7965
KP
2969{
2970 struct device *dev = &ofdev->dev;
61c7a080 2971 struct device_node *np = ofdev->dev.of_node;
9c4a7965
KP
2972 struct talitos_private *priv;
2973 const unsigned int *prop;
2974 int i, err;
5fa7fa14 2975 int stride;
9c4a7965
KP
2976
2977 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2978 if (!priv)
2979 return -ENOMEM;
2980
f3de9cb1
KH
2981 INIT_LIST_HEAD(&priv->alg_list);
2982
9c4a7965
KP
2983 dev_set_drvdata(dev, priv);
2984
2985 priv->ofdev = ofdev;
2986
511d63cb
HG
2987 spin_lock_init(&priv->reg_lock);
2988
9c4a7965
KP
2989 priv->reg = of_iomap(np, 0);
2990 if (!priv->reg) {
2991 dev_err(dev, "failed to of_iomap\n");
2992 err = -ENOMEM;
2993 goto err_out;
2994 }
2995
2996 /* get SEC version capabilities from device tree */
2997 prop = of_get_property(np, "fsl,num-channels", NULL);
2998 if (prop)
2999 priv->num_channels = *prop;
3000
3001 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
3002 if (prop)
3003 priv->chfifo_len = *prop;
3004
3005 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
3006 if (prop)
3007 priv->exec_units = *prop;
3008
3009 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
3010 if (prop)
3011 priv->desc_types = *prop;
3012
3013 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3014 !priv->exec_units || !priv->desc_types) {
3015 dev_err(dev, "invalid property data in device tree node\n");
3016 err = -EINVAL;
3017 goto err_out;
3018 }
3019
f3c85bc1
LN
3020 if (of_device_is_compatible(np, "fsl,sec3.0"))
3021 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3022
fe5720e2 3023 if (of_device_is_compatible(np, "fsl,sec2.1"))
60f208d7 3024 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
79b3a418
LN
3025 TALITOS_FTR_SHA224_HWINIT |
3026 TALITOS_FTR_HMAC_OK;
fe5720e2 3027
21590888
LC
3028 if (of_device_is_compatible(np, "fsl,sec1.0"))
3029 priv->features |= TALITOS_FTR_SEC1;
3030
5fa7fa14
LC
3031 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3032 priv->reg_deu = priv->reg + TALITOS12_DEU;
3033 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3034 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3035 stride = TALITOS1_CH_STRIDE;
3036 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3037 priv->reg_deu = priv->reg + TALITOS10_DEU;
3038 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3039 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3040 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3041 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3042 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3043 stride = TALITOS1_CH_STRIDE;
3044 } else {
3045 priv->reg_deu = priv->reg + TALITOS2_DEU;
3046 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3047 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3048 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3049 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3050 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3051 priv->reg_keu = priv->reg + TALITOS2_KEU;
3052 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3053 stride = TALITOS2_CH_STRIDE;
3054 }
3055
dd3c0987
LC
3056 err = talitos_probe_irq(ofdev);
3057 if (err)
3058 goto err_out;
3059
3060 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3061 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3062 (unsigned long)dev);
3063 } else {
3064 if (!priv->irq[1]) {
3065 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3066 (unsigned long)dev);
3067 } else {
3068 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3069 (unsigned long)dev);
3070 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3071 (unsigned long)dev);
3072 }
3073 }
3074
4b992628
KP
3075 priv->chan = kzalloc(sizeof(struct talitos_channel) *
3076 priv->num_channels, GFP_KERNEL);
3077 if (!priv->chan) {
3078 dev_err(dev, "failed to allocate channel management space\n");
9c4a7965
KP
3079 err = -ENOMEM;
3080 goto err_out;
3081 }
3082
f641dddd
MH
3083 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3084
c3e337f8 3085 for (i = 0; i < priv->num_channels; i++) {
5fa7fa14 3086 priv->chan[i].reg = priv->reg + stride * (i + 1);
2cdba3cf 3087 if (!priv->irq[1] || !(i & 1))
c3e337f8 3088 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
ad42d5fc 3089
4b992628
KP
3090 spin_lock_init(&priv->chan[i].head_lock);
3091 spin_lock_init(&priv->chan[i].tail_lock);
9c4a7965 3092
4b992628
KP
3093 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3094 priv->fifo_len, GFP_KERNEL);
3095 if (!priv->chan[i].fifo) {
9c4a7965
KP
3096 dev_err(dev, "failed to allocate request fifo %d\n", i);
3097 err = -ENOMEM;
3098 goto err_out;
3099 }
9c4a7965 3100
4b992628
KP
3101 atomic_set(&priv->chan[i].submit_count,
3102 -(priv->chfifo_len - 1));
f641dddd 3103 }
9c4a7965 3104
81eb024c
KP
3105 dma_set_mask(dev, DMA_BIT_MASK(36));
3106
9c4a7965
KP
3107 /* reset and initialize the h/w */
3108 err = init_device(dev);
3109 if (err) {
3110 dev_err(dev, "failed to initialize device\n");
3111 goto err_out;
3112 }
3113
3114 /* register the RNG, if available */
3115 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3116 err = talitos_register_rng(dev);
3117 if (err) {
3118 dev_err(dev, "failed to register hwrng: %d\n", err);
3119 goto err_out;
3120 } else
3121 dev_info(dev, "hwrng\n");
3122 }
3123
3124 /* register crypto algorithms the device supports */
9c4a7965
KP
3125 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3126 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3127 struct talitos_crypto_alg *t_alg;
aeb4c132 3128 struct crypto_alg *alg = NULL;
9c4a7965
KP
3129
3130 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3131 if (IS_ERR(t_alg)) {
3132 err = PTR_ERR(t_alg);
0b2730d8 3133 if (err == -ENOTSUPP)
79b3a418 3134 continue;
9c4a7965
KP
3135 goto err_out;
3136 }
3137
acbf7c62
LN
3138 switch (t_alg->algt.type) {
3139 case CRYPTO_ALG_TYPE_ABLKCIPHER:
acbf7c62
LN
3140 err = crypto_register_alg(
3141 &t_alg->algt.alg.crypto);
aeb4c132 3142 alg = &t_alg->algt.alg.crypto;
acbf7c62 3143 break;
aeb4c132
HX
3144
3145 case CRYPTO_ALG_TYPE_AEAD:
3146 err = crypto_register_aead(
3147 &t_alg->algt.alg.aead);
3148 alg = &t_alg->algt.alg.aead.base;
3149 break;
3150
acbf7c62
LN
3151 case CRYPTO_ALG_TYPE_AHASH:
3152 err = crypto_register_ahash(
3153 &t_alg->algt.alg.hash);
aeb4c132 3154 alg = &t_alg->algt.alg.hash.halg.base;
acbf7c62
LN
3155 break;
3156 }
9c4a7965
KP
3157 if (err) {
3158 dev_err(dev, "%s alg registration failed\n",
aeb4c132 3159 alg->cra_driver_name);
9c4a7965 3160 kfree(t_alg);
991155ba 3161 } else
9c4a7965 3162 list_add_tail(&t_alg->entry, &priv->alg_list);
9c4a7965
KP
3163 }
3164 }
5b859b6e
KP
3165 if (!list_empty(&priv->alg_list))
3166 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3167 (char *)of_get_property(np, "compatible", NULL));
9c4a7965
KP
3168
3169 return 0;
3170
3171err_out:
3172 talitos_remove(ofdev);
9c4a7965
KP
3173
3174 return err;
3175}
3176
6c3f975a 3177static const struct of_device_id talitos_match[] = {
0635b7db
LC
3178#ifdef CONFIG_CRYPTO_DEV_TALITOS1
3179 {
3180 .compatible = "fsl,sec1.0",
3181 },
3182#endif
3183#ifdef CONFIG_CRYPTO_DEV_TALITOS2
9c4a7965
KP
3184 {
3185 .compatible = "fsl,sec2.0",
3186 },
0635b7db 3187#endif
9c4a7965
KP
3188 {},
3189};
3190MODULE_DEVICE_TABLE(of, talitos_match);
3191
1c48a5c9 3192static struct platform_driver talitos_driver = {
4018294b
GL
3193 .driver = {
3194 .name = "talitos",
4018294b
GL
3195 .of_match_table = talitos_match,
3196 },
9c4a7965 3197 .probe = talitos_probe,
596f1034 3198 .remove = talitos_remove,
9c4a7965
KP
3199};
3200
741e8c2d 3201module_platform_driver(talitos_driver);
9c4a7965
KP
3202
3203MODULE_LICENSE("GPL");
3204MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3205MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");