crypto: talitos - fill in talitos descriptor iaw SEC1 or SEC2+
[linux-2.6-block.git] / drivers / crypto / talitos.c
CommitLineData
9c4a7965
KP
1/*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
3 *
5228f0f7 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
9c4a7965
KP
5 *
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
8 *
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/mod_devicetable.h>
31#include <linux/device.h>
32#include <linux/interrupt.h>
33#include <linux/crypto.h>
34#include <linux/hw_random.h>
5af50730
RH
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
9c4a7965
KP
37#include <linux/of_platform.h>
38#include <linux/dma-mapping.h>
39#include <linux/io.h>
40#include <linux/spinlock.h>
41#include <linux/rtnetlink.h>
5a0e3ad6 42#include <linux/slab.h>
9c4a7965
KP
43
44#include <crypto/algapi.h>
45#include <crypto/aes.h>
3952f17e 46#include <crypto/des.h>
9c4a7965 47#include <crypto/sha.h>
497f2e6b 48#include <crypto/md5.h>
9c4a7965
KP
49#include <crypto/aead.h>
50#include <crypto/authenc.h>
4de9d0b5 51#include <crypto/skcipher.h>
acbf7c62
LN
52#include <crypto/hash.h>
53#include <crypto/internal/hash.h>
4de9d0b5 54#include <crypto/scatterwalk.h>
9c4a7965
KP
55
56#include "talitos.h"
57
922f9dc8
LC
58static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
81eb024c 60{
edc6bd69 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
922f9dc8
LC
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
81eb024c
KP
64}
65
922f9dc8
LC
66static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned short len,
67 bool is_sec1)
538caf83 68{
922f9dc8
LC
69 if (is_sec1) {
70 ptr->res = 0;
71 ptr->len1 = cpu_to_be16(len);
72 } else {
73 ptr->len = cpu_to_be16(len);
74 }
538caf83
LC
75}
76
922f9dc8
LC
77static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
78 bool is_sec1)
538caf83 79{
922f9dc8
LC
80 if (is_sec1)
81 return be16_to_cpu(ptr->len1);
82 else
83 return be16_to_cpu(ptr->len);
538caf83
LC
84}
85
922f9dc8 86static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
185eb79f 87{
922f9dc8
LC
88 if (!is_sec1)
89 ptr->j_extent = 0;
185eb79f
LC
90}
91
9c4a7965
KP
92/*
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
94 */
95static void map_single_talitos_ptr(struct device *dev,
edc6bd69 96 struct talitos_ptr *ptr,
9c4a7965 97 unsigned short len, void *data,
9c4a7965
KP
98 enum dma_data_direction dir)
99{
81eb024c 100 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
922f9dc8
LC
101 struct talitos_private *priv = dev_get_drvdata(dev);
102 bool is_sec1 = has_ftr_sec1(priv);
81eb024c 103
922f9dc8
LC
104 to_talitos_ptr_len(ptr, len, is_sec1);
105 to_talitos_ptr(ptr, dma_addr, is_sec1);
106 to_talitos_ptr_extent_clear(ptr, is_sec1);
9c4a7965
KP
107}
108
109/*
110 * unmap bus single (contiguous) h/w descriptor pointer
111 */
112static void unmap_single_talitos_ptr(struct device *dev,
edc6bd69 113 struct talitos_ptr *ptr,
9c4a7965
KP
114 enum dma_data_direction dir)
115{
922f9dc8
LC
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
118
edc6bd69 119 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
922f9dc8 120 from_talitos_ptr_len(ptr, is_sec1), dir);
9c4a7965
KP
121}
122
123static int reset_channel(struct device *dev, int ch)
124{
125 struct talitos_private *priv = dev_get_drvdata(dev);
126 unsigned int timeout = TALITOS_TIMEOUT;
127
ad42d5fc 128 setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
9c4a7965 129
ad42d5fc 130 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
9c4a7965
KP
131 && --timeout)
132 cpu_relax();
133
134 if (timeout == 0) {
135 dev_err(dev, "failed to reset channel %d\n", ch);
136 return -EIO;
137 }
138
81eb024c 139 /* set 36-bit addressing, done writeback enable and done IRQ enable */
ad42d5fc 140 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
81eb024c 141 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
9c4a7965 142
fe5720e2
KP
143 /* and ICCR writeback, if available */
144 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
ad42d5fc 145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
fe5720e2
KP
146 TALITOS_CCCR_LO_IWSE);
147
9c4a7965
KP
148 return 0;
149}
150
151static int reset_device(struct device *dev)
152{
153 struct talitos_private *priv = dev_get_drvdata(dev);
154 unsigned int timeout = TALITOS_TIMEOUT;
c3e337f8 155 u32 mcr = TALITOS_MCR_SWR;
9c4a7965 156
c3e337f8 157 setbits32(priv->reg + TALITOS_MCR, mcr);
9c4a7965
KP
158
159 while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
160 && --timeout)
161 cpu_relax();
162
2cdba3cf 163 if (priv->irq[1]) {
c3e337f8
KP
164 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
165 setbits32(priv->reg + TALITOS_MCR, mcr);
166 }
167
9c4a7965
KP
168 if (timeout == 0) {
169 dev_err(dev, "failed to reset device\n");
170 return -EIO;
171 }
172
173 return 0;
174}
175
176/*
177 * Reset and initialize the device
178 */
179static int init_device(struct device *dev)
180{
181 struct talitos_private *priv = dev_get_drvdata(dev);
182 int ch, err;
183
184 /*
185 * Master reset
186 * errata documentation: warning: certain SEC interrupts
187 * are not fully cleared by writing the MCR:SWR bit,
188 * set bit twice to completely reset
189 */
190 err = reset_device(dev);
191 if (err)
192 return err;
193
194 err = reset_device(dev);
195 if (err)
196 return err;
197
198 /* reset channels */
199 for (ch = 0; ch < priv->num_channels; ch++) {
200 err = reset_channel(dev, ch);
201 if (err)
202 return err;
203 }
204
205 /* enable channel done and error interrupts */
206 setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
207 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
208
fe5720e2
KP
209 /* disable integrity check error interrupts (use writeback instead) */
210 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
211 setbits32(priv->reg + TALITOS_MDEUICR_LO,
212 TALITOS_MDEUICR_LO_ICE);
213
9c4a7965
KP
214 return 0;
215}
216
217/**
218 * talitos_submit - submits a descriptor to the device for processing
219 * @dev: the SEC device to be used
5228f0f7 220 * @ch: the SEC device channel to be used
9c4a7965
KP
221 * @desc: the descriptor to be processed by the device
222 * @callback: whom to call when processing is complete
223 * @context: a handle for use by caller (optional)
224 *
225 * desc must contain valid dma-mapped (bus physical) address pointers.
226 * callback must check err and feedback in descriptor header
227 * for device processing status.
228 */
865d5061
HG
229int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
230 void (*callback)(struct device *dev,
231 struct talitos_desc *desc,
232 void *context, int error),
233 void *context)
9c4a7965
KP
234{
235 struct talitos_private *priv = dev_get_drvdata(dev);
236 struct talitos_request *request;
5228f0f7 237 unsigned long flags;
9c4a7965
KP
238 int head;
239
4b992628 240 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
9c4a7965 241
4b992628 242 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
ec6644d6 243 /* h/w fifo is full */
4b992628 244 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
245 return -EAGAIN;
246 }
247
4b992628
KP
248 head = priv->chan[ch].head;
249 request = &priv->chan[ch].fifo[head];
ec6644d6 250
9c4a7965
KP
251 /* map descriptor and save caller data */
252 request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
253 DMA_BIDIRECTIONAL);
254 request->callback = callback;
255 request->context = context;
256
257 /* increment fifo head */
4b992628 258 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
9c4a7965
KP
259
260 smp_wmb();
261 request->desc = desc;
262
263 /* GO! */
264 wmb();
ad42d5fc
KP
265 out_be32(priv->chan[ch].reg + TALITOS_FF,
266 upper_32_bits(request->dma_desc));
267 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
a752447a 268 lower_32_bits(request->dma_desc));
9c4a7965 269
4b992628 270 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
9c4a7965
KP
271
272 return -EINPROGRESS;
273}
865d5061 274EXPORT_SYMBOL(talitos_submit);
9c4a7965
KP
275
276/*
277 * process what was done, notify callback of error if not
278 */
279static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
280{
281 struct talitos_private *priv = dev_get_drvdata(dev);
282 struct talitos_request *request, saved_req;
283 unsigned long flags;
284 int tail, status;
285
4b992628 286 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
9c4a7965 287
4b992628
KP
288 tail = priv->chan[ch].tail;
289 while (priv->chan[ch].fifo[tail].desc) {
290 request = &priv->chan[ch].fifo[tail];
9c4a7965
KP
291
292 /* descriptors with their done bits set don't get the error */
293 rmb();
ca38a814 294 if ((request->desc->hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
9c4a7965 295 status = 0;
ca38a814 296 else
9c4a7965
KP
297 if (!error)
298 break;
299 else
300 status = error;
301
302 dma_unmap_single(dev, request->dma_desc,
e938e465
KP
303 sizeof(struct talitos_desc),
304 DMA_BIDIRECTIONAL);
9c4a7965
KP
305
306 /* copy entries so we can call callback outside lock */
307 saved_req.desc = request->desc;
308 saved_req.callback = request->callback;
309 saved_req.context = request->context;
310
311 /* release request entry in fifo */
312 smp_wmb();
313 request->desc = NULL;
314
315 /* increment fifo tail */
4b992628 316 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
9c4a7965 317
4b992628 318 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
ec6644d6 319
4b992628 320 atomic_dec(&priv->chan[ch].submit_count);
ec6644d6 321
9c4a7965
KP
322 saved_req.callback(dev, saved_req.desc, saved_req.context,
323 status);
324 /* channel may resume processing in single desc error case */
325 if (error && !reset_ch && status == error)
326 return;
4b992628
KP
327 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
328 tail = priv->chan[ch].tail;
9c4a7965
KP
329 }
330
4b992628 331 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
9c4a7965
KP
332}
333
334/*
335 * process completed requests for channels that have done status
336 */
c3e337f8
KP
337#define DEF_TALITOS_DONE(name, ch_done_mask) \
338static void talitos_done_##name(unsigned long data) \
339{ \
340 struct device *dev = (struct device *)data; \
341 struct talitos_private *priv = dev_get_drvdata(dev); \
511d63cb 342 unsigned long flags; \
c3e337f8
KP
343 \
344 if (ch_done_mask & 1) \
345 flush_channel(dev, 0, 0, 0); \
346 if (priv->num_channels == 1) \
347 goto out; \
348 if (ch_done_mask & (1 << 2)) \
349 flush_channel(dev, 1, 0, 0); \
350 if (ch_done_mask & (1 << 4)) \
351 flush_channel(dev, 2, 0, 0); \
352 if (ch_done_mask & (1 << 6)) \
353 flush_channel(dev, 3, 0, 0); \
354 \
355out: \
356 /* At this point, all completed channels have been processed */ \
357 /* Unmask done interrupts for channels completed later on. */ \
511d63cb 358 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8
KP
359 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
360 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
511d63cb 361 spin_unlock_irqrestore(&priv->reg_lock, flags); \
9c4a7965 362}
c3e337f8
KP
363DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
364DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
365DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
9c4a7965
KP
366
367/*
368 * locate current (offending) descriptor
369 */
3e721aeb 370static u32 current_desc_hdr(struct device *dev, int ch)
9c4a7965
KP
371{
372 struct talitos_private *priv = dev_get_drvdata(dev);
b62ffd8c 373 int tail, iter;
9c4a7965
KP
374 dma_addr_t cur_desc;
375
b62ffd8c
HG
376 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
377 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
9c4a7965 378
b62ffd8c
HG
379 if (!cur_desc) {
380 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
381 return 0;
382 }
383
384 tail = priv->chan[ch].tail;
385
386 iter = tail;
387 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
388 iter = (iter + 1) & (priv->fifo_len - 1);
389 if (iter == tail) {
9c4a7965 390 dev_err(dev, "couldn't locate current descriptor\n");
3e721aeb 391 return 0;
9c4a7965
KP
392 }
393 }
394
b62ffd8c 395 return priv->chan[ch].fifo[iter].desc->hdr;
9c4a7965
KP
396}
397
398/*
399 * user diagnostics; report root cause of error based on execution unit status
400 */
3e721aeb 401static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
9c4a7965
KP
402{
403 struct talitos_private *priv = dev_get_drvdata(dev);
404 int i;
405
3e721aeb 406 if (!desc_hdr)
ad42d5fc 407 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
3e721aeb
KP
408
409 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
9c4a7965
KP
410 case DESC_HDR_SEL0_AFEU:
411 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
412 in_be32(priv->reg + TALITOS_AFEUISR),
413 in_be32(priv->reg + TALITOS_AFEUISR_LO));
414 break;
415 case DESC_HDR_SEL0_DEU:
416 dev_err(dev, "DEUISR 0x%08x_%08x\n",
417 in_be32(priv->reg + TALITOS_DEUISR),
418 in_be32(priv->reg + TALITOS_DEUISR_LO));
419 break;
420 case DESC_HDR_SEL0_MDEUA:
421 case DESC_HDR_SEL0_MDEUB:
422 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
423 in_be32(priv->reg + TALITOS_MDEUISR),
424 in_be32(priv->reg + TALITOS_MDEUISR_LO));
425 break;
426 case DESC_HDR_SEL0_RNG:
427 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
428 in_be32(priv->reg + TALITOS_RNGUISR),
429 in_be32(priv->reg + TALITOS_RNGUISR_LO));
430 break;
431 case DESC_HDR_SEL0_PKEU:
432 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
433 in_be32(priv->reg + TALITOS_PKEUISR),
434 in_be32(priv->reg + TALITOS_PKEUISR_LO));
435 break;
436 case DESC_HDR_SEL0_AESU:
437 dev_err(dev, "AESUISR 0x%08x_%08x\n",
438 in_be32(priv->reg + TALITOS_AESUISR),
439 in_be32(priv->reg + TALITOS_AESUISR_LO));
440 break;
441 case DESC_HDR_SEL0_CRCU:
442 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
443 in_be32(priv->reg + TALITOS_CRCUISR),
444 in_be32(priv->reg + TALITOS_CRCUISR_LO));
445 break;
446 case DESC_HDR_SEL0_KEU:
447 dev_err(dev, "KEUISR 0x%08x_%08x\n",
448 in_be32(priv->reg + TALITOS_KEUISR),
449 in_be32(priv->reg + TALITOS_KEUISR_LO));
450 break;
451 }
452
3e721aeb 453 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
9c4a7965
KP
454 case DESC_HDR_SEL1_MDEUA:
455 case DESC_HDR_SEL1_MDEUB:
456 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
457 in_be32(priv->reg + TALITOS_MDEUISR),
458 in_be32(priv->reg + TALITOS_MDEUISR_LO));
459 break;
460 case DESC_HDR_SEL1_CRCU:
461 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
462 in_be32(priv->reg + TALITOS_CRCUISR),
463 in_be32(priv->reg + TALITOS_CRCUISR_LO));
464 break;
465 }
466
467 for (i = 0; i < 8; i++)
468 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
ad42d5fc
KP
469 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
470 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
9c4a7965
KP
471}
472
473/*
474 * recover from error interrupts
475 */
5e718a09 476static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
9c4a7965 477{
9c4a7965
KP
478 struct talitos_private *priv = dev_get_drvdata(dev);
479 unsigned int timeout = TALITOS_TIMEOUT;
480 int ch, error, reset_dev = 0, reset_ch = 0;
40405f10 481 u32 v, v_lo;
9c4a7965
KP
482
483 for (ch = 0; ch < priv->num_channels; ch++) {
484 /* skip channels without errors */
485 if (!(isr & (1 << (ch * 2 + 1))))
486 continue;
487
488 error = -EINVAL;
489
ad42d5fc
KP
490 v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
491 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
9c4a7965
KP
492
493 if (v_lo & TALITOS_CCPSR_LO_DOF) {
494 dev_err(dev, "double fetch fifo overflow error\n");
495 error = -EAGAIN;
496 reset_ch = 1;
497 }
498 if (v_lo & TALITOS_CCPSR_LO_SOF) {
499 /* h/w dropped descriptor */
500 dev_err(dev, "single fetch fifo overflow error\n");
501 error = -EAGAIN;
502 }
503 if (v_lo & TALITOS_CCPSR_LO_MDTE)
504 dev_err(dev, "master data transfer error\n");
505 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
506 dev_err(dev, "s/g data length zero error\n");
507 if (v_lo & TALITOS_CCPSR_LO_FPZ)
508 dev_err(dev, "fetch pointer zero error\n");
509 if (v_lo & TALITOS_CCPSR_LO_IDH)
510 dev_err(dev, "illegal descriptor header error\n");
511 if (v_lo & TALITOS_CCPSR_LO_IEU)
512 dev_err(dev, "invalid execution unit error\n");
513 if (v_lo & TALITOS_CCPSR_LO_EU)
3e721aeb 514 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
9c4a7965
KP
515 if (v_lo & TALITOS_CCPSR_LO_GB)
516 dev_err(dev, "gather boundary error\n");
517 if (v_lo & TALITOS_CCPSR_LO_GRL)
518 dev_err(dev, "gather return/length error\n");
519 if (v_lo & TALITOS_CCPSR_LO_SB)
520 dev_err(dev, "scatter boundary error\n");
521 if (v_lo & TALITOS_CCPSR_LO_SRL)
522 dev_err(dev, "scatter return/length error\n");
523
524 flush_channel(dev, ch, error, reset_ch);
525
526 if (reset_ch) {
527 reset_channel(dev, ch);
528 } else {
ad42d5fc 529 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
9c4a7965 530 TALITOS_CCCR_CONT);
ad42d5fc
KP
531 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
532 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
9c4a7965
KP
533 TALITOS_CCCR_CONT) && --timeout)
534 cpu_relax();
535 if (timeout == 0) {
536 dev_err(dev, "failed to restart channel %d\n",
537 ch);
538 reset_dev = 1;
539 }
540 }
541 }
c3e337f8 542 if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
9c4a7965
KP
543 dev_err(dev, "done overflow, internal time out, or rngu error: "
544 "ISR 0x%08x_%08x\n", isr, isr_lo);
545
546 /* purge request queues */
547 for (ch = 0; ch < priv->num_channels; ch++)
548 flush_channel(dev, ch, -EIO, 1);
549
550 /* reset and reinitialize the device */
551 init_device(dev);
552 }
553}
554
c3e337f8
KP
555#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
556static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
557{ \
558 struct device *dev = data; \
559 struct talitos_private *priv = dev_get_drvdata(dev); \
560 u32 isr, isr_lo; \
511d63cb 561 unsigned long flags; \
c3e337f8 562 \
511d63cb 563 spin_lock_irqsave(&priv->reg_lock, flags); \
c3e337f8
KP
564 isr = in_be32(priv->reg + TALITOS_ISR); \
565 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
566 /* Acknowledge interrupt */ \
567 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
568 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
569 \
511d63cb
HG
570 if (unlikely(isr & ch_err_mask || isr_lo)) { \
571 spin_unlock_irqrestore(&priv->reg_lock, flags); \
572 talitos_error(dev, isr & ch_err_mask, isr_lo); \
573 } \
574 else { \
c3e337f8
KP
575 if (likely(isr & ch_done_mask)) { \
576 /* mask further done interrupts. */ \
577 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
578 /* done_task will unmask done interrupts at exit */ \
579 tasklet_schedule(&priv->done_task[tlet]); \
580 } \
511d63cb
HG
581 spin_unlock_irqrestore(&priv->reg_lock, flags); \
582 } \
c3e337f8
KP
583 \
584 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
585 IRQ_NONE; \
9c4a7965 586}
c3e337f8
KP
587DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
588DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
589DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
9c4a7965
KP
590
591/*
592 * hwrng
593 */
594static int talitos_rng_data_present(struct hwrng *rng, int wait)
595{
596 struct device *dev = (struct device *)rng->priv;
597 struct talitos_private *priv = dev_get_drvdata(dev);
598 u32 ofl;
599 int i;
600
601 for (i = 0; i < 20; i++) {
602 ofl = in_be32(priv->reg + TALITOS_RNGUSR_LO) &
603 TALITOS_RNGUSR_LO_OFL;
604 if (ofl || !wait)
605 break;
606 udelay(10);
607 }
608
609 return !!ofl;
610}
611
612static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
613{
614 struct device *dev = (struct device *)rng->priv;
615 struct talitos_private *priv = dev_get_drvdata(dev);
616
617 /* rng fifo requires 64-bit accesses */
618 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO);
619 *data = in_be32(priv->reg + TALITOS_RNGU_FIFO_LO);
620
621 return sizeof(u32);
622}
623
624static int talitos_rng_init(struct hwrng *rng)
625{
626 struct device *dev = (struct device *)rng->priv;
627 struct talitos_private *priv = dev_get_drvdata(dev);
628 unsigned int timeout = TALITOS_TIMEOUT;
629
630 setbits32(priv->reg + TALITOS_RNGURCR_LO, TALITOS_RNGURCR_LO_SR);
631 while (!(in_be32(priv->reg + TALITOS_RNGUSR_LO) & TALITOS_RNGUSR_LO_RD)
632 && --timeout)
633 cpu_relax();
634 if (timeout == 0) {
635 dev_err(dev, "failed to reset rng hw\n");
636 return -ENODEV;
637 }
638
639 /* start generating */
640 setbits32(priv->reg + TALITOS_RNGUDSR_LO, 0);
641
642 return 0;
643}
644
645static int talitos_register_rng(struct device *dev)
646{
647 struct talitos_private *priv = dev_get_drvdata(dev);
648
649 priv->rng.name = dev_driver_string(dev),
650 priv->rng.init = talitos_rng_init,
651 priv->rng.data_present = talitos_rng_data_present,
652 priv->rng.data_read = talitos_rng_data_read,
653 priv->rng.priv = (unsigned long)dev;
654
655 return hwrng_register(&priv->rng);
656}
657
658static void talitos_unregister_rng(struct device *dev)
659{
660 struct talitos_private *priv = dev_get_drvdata(dev);
661
662 hwrng_unregister(&priv->rng);
663}
664
665/*
666 * crypto alg
667 */
668#define TALITOS_CRA_PRIORITY 3000
357fb605 669#define TALITOS_MAX_KEY_SIZE 96
3952f17e 670#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
70bcaca7 671
9c4a7965
KP
672struct talitos_ctx {
673 struct device *dev;
5228f0f7 674 int ch;
9c4a7965
KP
675 __be32 desc_hdr_template;
676 u8 key[TALITOS_MAX_KEY_SIZE];
70bcaca7 677 u8 iv[TALITOS_MAX_IV_LENGTH];
9c4a7965
KP
678 unsigned int keylen;
679 unsigned int enckeylen;
680 unsigned int authkeylen;
681 unsigned int authsize;
682};
683
497f2e6b
LN
684#define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
685#define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
686
687struct talitos_ahash_req_ctx {
60f208d7 688 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
497f2e6b
LN
689 unsigned int hw_context_size;
690 u8 buf[HASH_MAX_BLOCK_SIZE];
691 u8 bufnext[HASH_MAX_BLOCK_SIZE];
60f208d7 692 unsigned int swinit;
497f2e6b
LN
693 unsigned int first;
694 unsigned int last;
695 unsigned int to_hash_later;
5e833bc4 696 u64 nbuf;
497f2e6b
LN
697 struct scatterlist bufsl[2];
698 struct scatterlist *psrc;
699};
700
56af8cd4
LN
701static int aead_setauthsize(struct crypto_aead *authenc,
702 unsigned int authsize)
9c4a7965
KP
703{
704 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
705
706 ctx->authsize = authsize;
707
708 return 0;
709}
710
56af8cd4
LN
711static int aead_setkey(struct crypto_aead *authenc,
712 const u8 *key, unsigned int keylen)
9c4a7965
KP
713{
714 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
c306a98d 715 struct crypto_authenc_keys keys;
9c4a7965 716
c306a98d 717 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
9c4a7965
KP
718 goto badkey;
719
c306a98d 720 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
9c4a7965
KP
721 goto badkey;
722
c306a98d
MK
723 memcpy(ctx->key, keys.authkey, keys.authkeylen);
724 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
9c4a7965 725
c306a98d
MK
726 ctx->keylen = keys.authkeylen + keys.enckeylen;
727 ctx->enckeylen = keys.enckeylen;
728 ctx->authkeylen = keys.authkeylen;
9c4a7965
KP
729
730 return 0;
731
732badkey:
733 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
734 return -EINVAL;
735}
736
737/*
56af8cd4 738 * talitos_edesc - s/w-extended descriptor
79fd31d3 739 * @assoc_nents: number of segments in associated data scatterlist
9c4a7965
KP
740 * @src_nents: number of segments in input scatterlist
741 * @dst_nents: number of segments in output scatterlist
79fd31d3 742 * @assoc_chained: whether assoc is chained or not
2a1cfe46
HG
743 * @src_chained: whether src is chained or not
744 * @dst_chained: whether dst is chained or not
79fd31d3 745 * @iv_dma: dma address of iv for checking continuity and link table
9c4a7965
KP
746 * @dma_len: length of dma mapped link_tbl space
747 * @dma_link_tbl: bus physical address of link_tbl
748 * @desc: h/w descriptor
749 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
750 *
751 * if decrypting (with authcheck), or either one of src_nents or dst_nents
752 * is greater than 1, an integrity check value is concatenated to the end
753 * of link_tbl data
754 */
56af8cd4 755struct talitos_edesc {
79fd31d3 756 int assoc_nents;
9c4a7965
KP
757 int src_nents;
758 int dst_nents;
79fd31d3 759 bool assoc_chained;
2a1cfe46
HG
760 bool src_chained;
761 bool dst_chained;
79fd31d3 762 dma_addr_t iv_dma;
9c4a7965
KP
763 int dma_len;
764 dma_addr_t dma_link_tbl;
765 struct talitos_desc desc;
766 struct talitos_ptr link_tbl[0];
767};
768
4de9d0b5
LN
769static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
770 unsigned int nents, enum dma_data_direction dir,
2a1cfe46 771 bool chained)
4de9d0b5
LN
772{
773 if (unlikely(chained))
774 while (sg) {
775 dma_map_sg(dev, sg, 1, dir);
5be4d4c9 776 sg = sg_next(sg);
4de9d0b5
LN
777 }
778 else
779 dma_map_sg(dev, sg, nents, dir);
780 return nents;
781}
782
783static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
784 enum dma_data_direction dir)
785{
786 while (sg) {
787 dma_unmap_sg(dev, sg, 1, dir);
5be4d4c9 788 sg = sg_next(sg);
4de9d0b5
LN
789 }
790}
791
792static void talitos_sg_unmap(struct device *dev,
793 struct talitos_edesc *edesc,
794 struct scatterlist *src,
795 struct scatterlist *dst)
796{
797 unsigned int src_nents = edesc->src_nents ? : 1;
798 unsigned int dst_nents = edesc->dst_nents ? : 1;
799
800 if (src != dst) {
2a1cfe46 801 if (edesc->src_chained)
4de9d0b5
LN
802 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE);
803 else
804 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
805
497f2e6b 806 if (dst) {
2a1cfe46 807 if (edesc->dst_chained)
497f2e6b
LN
808 talitos_unmap_sg_chain(dev, dst,
809 DMA_FROM_DEVICE);
810 else
811 dma_unmap_sg(dev, dst, dst_nents,
812 DMA_FROM_DEVICE);
813 }
4de9d0b5 814 } else
2a1cfe46 815 if (edesc->src_chained)
4de9d0b5
LN
816 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL);
817 else
818 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
819}
820
9c4a7965 821static void ipsec_esp_unmap(struct device *dev,
56af8cd4 822 struct talitos_edesc *edesc,
9c4a7965
KP
823 struct aead_request *areq)
824{
825 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
826 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
827 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
828 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
829
79fd31d3
HG
830 if (edesc->assoc_chained)
831 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE);
935e99a3 832 else if (areq->assoclen)
79fd31d3
HG
833 /* assoc_nents counts also for IV in non-contiguous cases */
834 dma_unmap_sg(dev, areq->assoc,
835 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1,
836 DMA_TO_DEVICE);
9c4a7965 837
4de9d0b5 838 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
9c4a7965
KP
839
840 if (edesc->dma_len)
841 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
842 DMA_BIDIRECTIONAL);
843}
844
845/*
846 * ipsec_esp descriptor callbacks
847 */
848static void ipsec_esp_encrypt_done(struct device *dev,
849 struct talitos_desc *desc, void *context,
850 int err)
851{
852 struct aead_request *areq = context;
9c4a7965
KP
853 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
854 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
19bbbc63 855 struct talitos_edesc *edesc;
9c4a7965
KP
856 struct scatterlist *sg;
857 void *icvdata;
858
19bbbc63
KP
859 edesc = container_of(desc, struct talitos_edesc, desc);
860
9c4a7965
KP
861 ipsec_esp_unmap(dev, edesc, areq);
862
863 /* copy the generated ICV to dst */
60542505 864 if (edesc->dst_nents) {
9c4a7965 865 icvdata = &edesc->link_tbl[edesc->src_nents +
79fd31d3
HG
866 edesc->dst_nents + 2 +
867 edesc->assoc_nents];
9c4a7965
KP
868 sg = sg_last(areq->dst, edesc->dst_nents);
869 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize,
870 icvdata, ctx->authsize);
871 }
872
873 kfree(edesc);
874
875 aead_request_complete(areq, err);
876}
877
fe5720e2 878static void ipsec_esp_decrypt_swauth_done(struct device *dev,
e938e465
KP
879 struct talitos_desc *desc,
880 void *context, int err)
9c4a7965
KP
881{
882 struct aead_request *req = context;
9c4a7965
KP
883 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
884 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
19bbbc63 885 struct talitos_edesc *edesc;
9c4a7965
KP
886 struct scatterlist *sg;
887 void *icvdata;
888
19bbbc63
KP
889 edesc = container_of(desc, struct talitos_edesc, desc);
890
9c4a7965
KP
891 ipsec_esp_unmap(dev, edesc, req);
892
893 if (!err) {
894 /* auth check */
895 if (edesc->dma_len)
896 icvdata = &edesc->link_tbl[edesc->src_nents +
79fd31d3
HG
897 edesc->dst_nents + 2 +
898 edesc->assoc_nents];
9c4a7965
KP
899 else
900 icvdata = &edesc->link_tbl[0];
901
902 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
903 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length -
904 ctx->authsize, ctx->authsize) ? -EBADMSG : 0;
905 }
906
907 kfree(edesc);
908
909 aead_request_complete(req, err);
910}
911
fe5720e2 912static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
e938e465
KP
913 struct talitos_desc *desc,
914 void *context, int err)
fe5720e2
KP
915{
916 struct aead_request *req = context;
19bbbc63
KP
917 struct talitos_edesc *edesc;
918
919 edesc = container_of(desc, struct talitos_edesc, desc);
fe5720e2
KP
920
921 ipsec_esp_unmap(dev, edesc, req);
922
923 /* check ICV auth status */
e938e465
KP
924 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
925 DESC_HDR_LO_ICCR1_PASS))
926 err = -EBADMSG;
fe5720e2
KP
927
928 kfree(edesc);
929
930 aead_request_complete(req, err);
931}
932
9c4a7965
KP
933/*
934 * convert scatterlist to SEC h/w link table format
935 * stop at cryptlen bytes
936 */
70bcaca7 937static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
9c4a7965
KP
938 int cryptlen, struct talitos_ptr *link_tbl_ptr)
939{
70bcaca7
LN
940 int n_sg = sg_count;
941
942 while (n_sg--) {
922f9dc8 943 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0);
9c4a7965
KP
944 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg));
945 link_tbl_ptr->j_extent = 0;
946 link_tbl_ptr++;
947 cryptlen -= sg_dma_len(sg);
5be4d4c9 948 sg = sg_next(sg);
9c4a7965
KP
949 }
950
70bcaca7 951 /* adjust (decrease) last one (or two) entry's len to cryptlen */
9c4a7965 952 link_tbl_ptr--;
c0e741d4 953 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) {
70bcaca7
LN
954 /* Empty this entry, and move to previous one */
955 cryptlen += be16_to_cpu(link_tbl_ptr->len);
956 link_tbl_ptr->len = 0;
957 sg_count--;
958 link_tbl_ptr--;
959 }
7291a932 960 be16_add_cpu(&link_tbl_ptr->len, cryptlen);
9c4a7965
KP
961
962 /* tag end of link table */
963 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
70bcaca7
LN
964
965 return sg_count;
9c4a7965
KP
966}
967
968/*
969 * fill in and submit ipsec_esp descriptor
970 */
56af8cd4 971static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
79fd31d3
HG
972 u64 seq, void (*callback) (struct device *dev,
973 struct talitos_desc *desc,
974 void *context, int error))
9c4a7965
KP
975{
976 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
977 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
978 struct device *dev = ctx->dev;
979 struct talitos_desc *desc = &edesc->desc;
980 unsigned int cryptlen = areq->cryptlen;
981 unsigned int authsize = ctx->authsize;
e41256f1 982 unsigned int ivsize = crypto_aead_ivsize(aead);
fa86a267 983 int sg_count, ret;
fe5720e2 984 int sg_link_tbl_len;
9c4a7965
KP
985
986 /* hmac key */
987 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
a2b35aa8 988 DMA_TO_DEVICE);
79fd31d3 989
9c4a7965 990 /* hmac data */
79fd31d3
HG
991 desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize);
992 if (edesc->assoc_nents) {
993 int tbl_off = edesc->src_nents + edesc->dst_nents + 2;
994 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
995
996 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
922f9dc8 997 sizeof(struct talitos_ptr), 0);
79fd31d3
HG
998 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
999
1000 /* assoc_nents - 1 entries for assoc, 1 for IV */
1001 sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1,
1002 areq->assoclen, tbl_ptr);
1003
1004 /* add IV to link table */
1005 tbl_ptr += sg_count - 1;
1006 tbl_ptr->j_extent = 0;
1007 tbl_ptr++;
922f9dc8 1008 to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0);
79fd31d3
HG
1009 tbl_ptr->len = cpu_to_be16(ivsize);
1010 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1011
1012 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1013 edesc->dma_len, DMA_BIDIRECTIONAL);
1014 } else {
935e99a3
HG
1015 if (areq->assoclen)
1016 to_talitos_ptr(&desc->ptr[1],
922f9dc8 1017 sg_dma_address(areq->assoc), 0);
935e99a3 1018 else
922f9dc8 1019 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0);
79fd31d3
HG
1020 desc->ptr[1].j_extent = 0;
1021 }
1022
9c4a7965 1023 /* cipher iv */
922f9dc8 1024 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
79fd31d3
HG
1025 desc->ptr[2].len = cpu_to_be16(ivsize);
1026 desc->ptr[2].j_extent = 0;
1027 /* Sync needed for the aead_givencrypt case */
1028 dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
9c4a7965
KP
1029
1030 /* cipher key */
1031 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
a2b35aa8 1032 (char *)&ctx->key + ctx->authkeylen,
9c4a7965
KP
1033 DMA_TO_DEVICE);
1034
1035 /*
1036 * cipher in
1037 * map and adjust cipher len to aead request cryptlen.
1038 * extent is bytes of HMAC postpended to ciphertext,
1039 * typically 12 for ipsec
1040 */
1041 desc->ptr[4].len = cpu_to_be16(cryptlen);
1042 desc->ptr[4].j_extent = authsize;
1043
e938e465
KP
1044 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1,
1045 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1046 : DMA_TO_DEVICE,
2a1cfe46 1047 edesc->src_chained);
9c4a7965
KP
1048
1049 if (sg_count == 1) {
922f9dc8 1050 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
9c4a7965 1051 } else {
fe5720e2
KP
1052 sg_link_tbl_len = cryptlen;
1053
962a9c99 1054 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
fe5720e2 1055 sg_link_tbl_len = cryptlen + authsize;
e938e465 1056
fe5720e2 1057 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len,
70bcaca7
LN
1058 &edesc->link_tbl[0]);
1059 if (sg_count > 1) {
1060 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
922f9dc8 1061 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0);
e938e465
KP
1062 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1063 edesc->dma_len,
1064 DMA_BIDIRECTIONAL);
70bcaca7
LN
1065 } else {
1066 /* Only one segment now, so no link tbl needed */
81eb024c 1067 to_talitos_ptr(&desc->ptr[4],
922f9dc8 1068 sg_dma_address(areq->src), 0);
70bcaca7 1069 }
9c4a7965
KP
1070 }
1071
1072 /* cipher out */
1073 desc->ptr[5].len = cpu_to_be16(cryptlen);
1074 desc->ptr[5].j_extent = authsize;
1075
e938e465 1076 if (areq->src != areq->dst)
4de9d0b5
LN
1077 sg_count = talitos_map_sg(dev, areq->dst,
1078 edesc->dst_nents ? : 1,
2a1cfe46 1079 DMA_FROM_DEVICE, edesc->dst_chained);
9c4a7965
KP
1080
1081 if (sg_count == 1) {
922f9dc8 1082 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
9c4a7965 1083 } else {
79fd31d3
HG
1084 int tbl_off = edesc->src_nents + 1;
1085 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
9c4a7965 1086
81eb024c 1087 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
922f9dc8 1088 tbl_off * sizeof(struct talitos_ptr), 0);
fe5720e2 1089 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen,
79fd31d3 1090 tbl_ptr);
fe5720e2 1091
f3c85bc1 1092 /* Add an entry to the link table for ICV data */
79fd31d3
HG
1093 tbl_ptr += sg_count - 1;
1094 tbl_ptr->j_extent = 0;
1095 tbl_ptr++;
1096 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1097 tbl_ptr->len = cpu_to_be16(authsize);
9c4a7965
KP
1098
1099 /* icv data follows link tables */
79fd31d3
HG
1100 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1101 (tbl_off + edesc->dst_nents + 1 +
1102 edesc->assoc_nents) *
922f9dc8 1103 sizeof(struct talitos_ptr), 0);
9c4a7965
KP
1104 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1105 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1106 edesc->dma_len, DMA_BIDIRECTIONAL);
1107 }
1108
1109 /* iv out */
a2b35aa8 1110 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
9c4a7965
KP
1111 DMA_FROM_DEVICE);
1112
5228f0f7 1113 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
fa86a267
KP
1114 if (ret != -EINPROGRESS) {
1115 ipsec_esp_unmap(dev, edesc, areq);
1116 kfree(edesc);
1117 }
1118 return ret;
9c4a7965
KP
1119}
1120
9c4a7965
KP
1121/*
1122 * derive number of elements in scatterlist
1123 */
2a1cfe46 1124static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
9c4a7965
KP
1125{
1126 struct scatterlist *sg = sg_list;
1127 int sg_nents = 0;
1128
2a1cfe46 1129 *chained = false;
4de9d0b5 1130 while (nbytes > 0) {
9c4a7965
KP
1131 sg_nents++;
1132 nbytes -= sg->length;
4de9d0b5 1133 if (!sg_is_last(sg) && (sg + 1)->length == 0)
2a1cfe46 1134 *chained = true;
5be4d4c9 1135 sg = sg_next(sg);
9c4a7965
KP
1136 }
1137
1138 return sg_nents;
1139}
1140
1141/*
56af8cd4 1142 * allocate and map the extended descriptor
9c4a7965 1143 */
4de9d0b5 1144static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
79fd31d3 1145 struct scatterlist *assoc,
4de9d0b5
LN
1146 struct scatterlist *src,
1147 struct scatterlist *dst,
79fd31d3
HG
1148 u8 *iv,
1149 unsigned int assoclen,
4de9d0b5
LN
1150 unsigned int cryptlen,
1151 unsigned int authsize,
79fd31d3 1152 unsigned int ivsize,
4de9d0b5 1153 int icv_stashing,
62293a37
HG
1154 u32 cryptoflags,
1155 bool encrypt)
9c4a7965 1156{
56af8cd4 1157 struct talitos_edesc *edesc;
79fd31d3
HG
1158 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len;
1159 bool assoc_chained = false, src_chained = false, dst_chained = false;
1160 dma_addr_t iv_dma = 0;
4de9d0b5 1161 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
586725f8 1162 GFP_ATOMIC;
9c4a7965 1163
4de9d0b5
LN
1164 if (cryptlen + authsize > TALITOS_MAX_DATA_LEN) {
1165 dev_err(dev, "length exceeds h/w max limit\n");
9c4a7965
KP
1166 return ERR_PTR(-EINVAL);
1167 }
1168
935e99a3 1169 if (ivsize)
79fd31d3
HG
1170 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1171
935e99a3 1172 if (assoclen) {
79fd31d3
HG
1173 /*
1174 * Currently it is assumed that iv is provided whenever assoc
1175 * is.
1176 */
1177 BUG_ON(!iv);
1178
1179 assoc_nents = sg_count(assoc, assoclen, &assoc_chained);
1180 talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE,
1181 assoc_chained);
1182 assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents;
1183
1184 if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma)
1185 assoc_nents = assoc_nents ? assoc_nents + 1 : 2;
1186 }
1187
62293a37
HG
1188 if (!dst || dst == src) {
1189 src_nents = sg_count(src, cryptlen + authsize, &src_chained);
1190 src_nents = (src_nents == 1) ? 0 : src_nents;
1191 dst_nents = dst ? src_nents : 0;
1192 } else { /* dst && dst != src*/
1193 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize),
1194 &src_chained);
1195 src_nents = (src_nents == 1) ? 0 : src_nents;
1196 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0),
1197 &dst_chained);
1198 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
9c4a7965
KP
1199 }
1200
1201 /*
1202 * allocate space for base edesc plus the link tables,
f3c85bc1 1203 * allowing for two separate entries for ICV and generated ICV (+ 2),
9c4a7965
KP
1204 * and the ICV data itself
1205 */
56af8cd4 1206 alloc_len = sizeof(struct talitos_edesc);
79fd31d3
HG
1207 if (assoc_nents || src_nents || dst_nents) {
1208 dma_len = (src_nents + dst_nents + 2 + assoc_nents) *
1209 sizeof(struct talitos_ptr) + authsize;
9c4a7965
KP
1210 alloc_len += dma_len;
1211 } else {
1212 dma_len = 0;
4de9d0b5 1213 alloc_len += icv_stashing ? authsize : 0;
9c4a7965
KP
1214 }
1215
586725f8 1216 edesc = kmalloc(alloc_len, GFP_DMA | flags);
9c4a7965 1217 if (!edesc) {
935e99a3
HG
1218 if (assoc_chained)
1219 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE);
1220 else if (assoclen)
1221 dma_unmap_sg(dev, assoc,
1222 assoc_nents ? assoc_nents - 1 : 1,
1223 DMA_TO_DEVICE);
1224
79fd31d3
HG
1225 if (iv_dma)
1226 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
935e99a3 1227
4de9d0b5 1228 dev_err(dev, "could not allocate edescriptor\n");
9c4a7965
KP
1229 return ERR_PTR(-ENOMEM);
1230 }
1231
79fd31d3 1232 edesc->assoc_nents = assoc_nents;
9c4a7965
KP
1233 edesc->src_nents = src_nents;
1234 edesc->dst_nents = dst_nents;
79fd31d3 1235 edesc->assoc_chained = assoc_chained;
2a1cfe46
HG
1236 edesc->src_chained = src_chained;
1237 edesc->dst_chained = dst_chained;
79fd31d3 1238 edesc->iv_dma = iv_dma;
9c4a7965 1239 edesc->dma_len = dma_len;
497f2e6b
LN
1240 if (dma_len)
1241 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1242 edesc->dma_len,
1243 DMA_BIDIRECTIONAL);
9c4a7965
KP
1244
1245 return edesc;
1246}
1247
79fd31d3 1248static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
62293a37 1249 int icv_stashing, bool encrypt)
4de9d0b5
LN
1250{
1251 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1252 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
79fd31d3 1253 unsigned int ivsize = crypto_aead_ivsize(authenc);
4de9d0b5 1254
79fd31d3
HG
1255 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst,
1256 iv, areq->assoclen, areq->cryptlen,
1257 ctx->authsize, ivsize, icv_stashing,
62293a37 1258 areq->base.flags, encrypt);
4de9d0b5
LN
1259}
1260
56af8cd4 1261static int aead_encrypt(struct aead_request *req)
9c4a7965
KP
1262{
1263 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1264 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1265 struct talitos_edesc *edesc;
9c4a7965
KP
1266
1267 /* allocate extended descriptor */
62293a37 1268 edesc = aead_edesc_alloc(req, req->iv, 0, true);
9c4a7965
KP
1269 if (IS_ERR(edesc))
1270 return PTR_ERR(edesc);
1271
1272 /* set encrypt */
70bcaca7 1273 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965 1274
79fd31d3 1275 return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done);
9c4a7965
KP
1276}
1277
56af8cd4 1278static int aead_decrypt(struct aead_request *req)
9c4a7965
KP
1279{
1280 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1281 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1282 unsigned int authsize = ctx->authsize;
fe5720e2 1283 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
56af8cd4 1284 struct talitos_edesc *edesc;
9c4a7965
KP
1285 struct scatterlist *sg;
1286 void *icvdata;
1287
1288 req->cryptlen -= authsize;
1289
1290 /* allocate extended descriptor */
62293a37 1291 edesc = aead_edesc_alloc(req, req->iv, 1, false);
9c4a7965
KP
1292 if (IS_ERR(edesc))
1293 return PTR_ERR(edesc);
1294
fe5720e2 1295 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
e938e465
KP
1296 ((!edesc->src_nents && !edesc->dst_nents) ||
1297 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
9c4a7965 1298
fe5720e2 1299 /* decrypt and check the ICV */
e938e465
KP
1300 edesc->desc.hdr = ctx->desc_hdr_template |
1301 DESC_HDR_DIR_INBOUND |
fe5720e2 1302 DESC_HDR_MODE1_MDEU_CICV;
9c4a7965 1303
fe5720e2
KP
1304 /* reset integrity check result bits */
1305 edesc->desc.hdr_lo = 0;
9c4a7965 1306
79fd31d3 1307 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done);
e938e465 1308 }
fe5720e2 1309
e938e465
KP
1310 /* Have to check the ICV with software */
1311 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
fe5720e2 1312
e938e465
KP
1313 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1314 if (edesc->dma_len)
1315 icvdata = &edesc->link_tbl[edesc->src_nents +
79fd31d3
HG
1316 edesc->dst_nents + 2 +
1317 edesc->assoc_nents];
e938e465
KP
1318 else
1319 icvdata = &edesc->link_tbl[0];
fe5720e2 1320
e938e465 1321 sg = sg_last(req->src, edesc->src_nents ? : 1);
fe5720e2 1322
e938e465
KP
1323 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize,
1324 ctx->authsize);
fe5720e2 1325
79fd31d3 1326 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done);
9c4a7965
KP
1327}
1328
56af8cd4 1329static int aead_givencrypt(struct aead_givcrypt_request *req)
9c4a7965
KP
1330{
1331 struct aead_request *areq = &req->areq;
1332 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1333 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
56af8cd4 1334 struct talitos_edesc *edesc;
9c4a7965
KP
1335
1336 /* allocate extended descriptor */
62293a37 1337 edesc = aead_edesc_alloc(areq, req->giv, 0, true);
9c4a7965
KP
1338 if (IS_ERR(edesc))
1339 return PTR_ERR(edesc);
1340
1341 /* set encrypt */
70bcaca7 1342 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
9c4a7965
KP
1343
1344 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc));
ba95487d
KP
1345 /* avoid consecutive packets going out with same IV */
1346 *(__be64 *)req->giv ^= cpu_to_be64(req->seq);
9c4a7965 1347
79fd31d3 1348 return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done);
9c4a7965
KP
1349}
1350
4de9d0b5
LN
1351static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1352 const u8 *key, unsigned int keylen)
1353{
1354 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
4de9d0b5
LN
1355
1356 memcpy(&ctx->key, key, keylen);
1357 ctx->keylen = keylen;
1358
1359 return 0;
4de9d0b5
LN
1360}
1361
032d197e
LC
1362static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1363 struct scatterlist *dst, unsigned int len,
1364 struct talitos_edesc *edesc)
1365{
1366 talitos_sg_unmap(dev, edesc, src, dst);
1367}
1368
4de9d0b5
LN
1369static void common_nonsnoop_unmap(struct device *dev,
1370 struct talitos_edesc *edesc,
1371 struct ablkcipher_request *areq)
1372{
1373 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
032d197e
LC
1374
1375 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
4de9d0b5
LN
1376 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1377 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1378
4de9d0b5
LN
1379 if (edesc->dma_len)
1380 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1381 DMA_BIDIRECTIONAL);
1382}
1383
1384static void ablkcipher_done(struct device *dev,
1385 struct talitos_desc *desc, void *context,
1386 int err)
1387{
1388 struct ablkcipher_request *areq = context;
19bbbc63
KP
1389 struct talitos_edesc *edesc;
1390
1391 edesc = container_of(desc, struct talitos_edesc, desc);
4de9d0b5
LN
1392
1393 common_nonsnoop_unmap(dev, edesc, areq);
1394
1395 kfree(edesc);
1396
1397 areq->base.complete(&areq->base, err);
1398}
1399
032d197e
LC
1400int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1401 unsigned int len, struct talitos_edesc *edesc,
1402 enum dma_data_direction dir, struct talitos_ptr *ptr)
1403{
1404 int sg_count;
922f9dc8
LC
1405 struct talitos_private *priv = dev_get_drvdata(dev);
1406 bool is_sec1 = has_ftr_sec1(priv);
032d197e 1407
922f9dc8
LC
1408 to_talitos_ptr_len(ptr, len, is_sec1);
1409 to_talitos_ptr_extent_clear(ptr, is_sec1);
032d197e
LC
1410
1411 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir,
1412 edesc->src_chained);
1413
1414 if (sg_count == 1) {
922f9dc8 1415 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
032d197e
LC
1416 } else {
1417 sg_count = sg_to_link_tbl(src, sg_count, len,
1418 &edesc->link_tbl[0]);
1419 if (sg_count > 1) {
922f9dc8 1420 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
032d197e
LC
1421 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1422 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1423 edesc->dma_len,
1424 DMA_BIDIRECTIONAL);
1425 } else {
1426 /* Only one segment now, so no link tbl needed */
922f9dc8 1427 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
032d197e
LC
1428 }
1429 }
1430 return sg_count;
1431}
1432
1433void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1434 unsigned int len, struct talitos_edesc *edesc,
1435 enum dma_data_direction dir,
1436 struct talitos_ptr *ptr, int sg_count)
1437{
922f9dc8
LC
1438 struct talitos_private *priv = dev_get_drvdata(dev);
1439 bool is_sec1 = has_ftr_sec1(priv);
1440
1441 to_talitos_ptr_len(ptr, len, is_sec1);
1442 to_talitos_ptr_extent_clear(ptr, is_sec1);
032d197e
LC
1443
1444 if (dir != DMA_NONE)
1445 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1,
1446 dir, edesc->dst_chained);
1447
1448 if (sg_count == 1) {
922f9dc8 1449 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
032d197e
LC
1450 } else {
1451 struct talitos_ptr *link_tbl_ptr =
1452 &edesc->link_tbl[edesc->src_nents + 1];
1453
1454 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1455 (edesc->src_nents + 1) *
922f9dc8 1456 sizeof(struct talitos_ptr), 0);
032d197e
LC
1457 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1458 sg_count = sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1459 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1460 edesc->dma_len, DMA_BIDIRECTIONAL);
1461 }
1462}
1463
4de9d0b5
LN
1464static int common_nonsnoop(struct talitos_edesc *edesc,
1465 struct ablkcipher_request *areq,
4de9d0b5
LN
1466 void (*callback) (struct device *dev,
1467 struct talitos_desc *desc,
1468 void *context, int error))
1469{
1470 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1471 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1472 struct device *dev = ctx->dev;
1473 struct talitos_desc *desc = &edesc->desc;
1474 unsigned int cryptlen = areq->nbytes;
79fd31d3 1475 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1476 int sg_count, ret;
922f9dc8
LC
1477 struct talitos_private *priv = dev_get_drvdata(dev);
1478 bool is_sec1 = has_ftr_sec1(priv);
4de9d0b5
LN
1479
1480 /* first DWORD empty */
2529bc37 1481 desc->ptr[0] = zero_entry;
4de9d0b5
LN
1482
1483 /* cipher iv */
922f9dc8
LC
1484 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1485 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1486 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
4de9d0b5
LN
1487
1488 /* cipher key */
1489 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1490 (char *)&ctx->key, DMA_TO_DEVICE);
4de9d0b5
LN
1491
1492 /*
1493 * cipher in
1494 */
032d197e
LC
1495 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1496 (areq->src == areq->dst) ?
1497 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1498 &desc->ptr[3]);
4de9d0b5
LN
1499
1500 /* cipher out */
032d197e
LC
1501 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1502 (areq->src == areq->dst) ? DMA_NONE
1503 : DMA_FROM_DEVICE,
1504 &desc->ptr[4], sg_count);
4de9d0b5
LN
1505
1506 /* iv out */
a2b35aa8 1507 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
4de9d0b5
LN
1508 DMA_FROM_DEVICE);
1509
1510 /* last DWORD empty */
2529bc37 1511 desc->ptr[6] = zero_entry;
4de9d0b5 1512
5228f0f7 1513 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
4de9d0b5
LN
1514 if (ret != -EINPROGRESS) {
1515 common_nonsnoop_unmap(dev, edesc, areq);
1516 kfree(edesc);
1517 }
1518 return ret;
1519}
1520
e938e465 1521static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
62293a37 1522 areq, bool encrypt)
4de9d0b5
LN
1523{
1524 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1525 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
79fd31d3 1526 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
4de9d0b5 1527
79fd31d3
HG
1528 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst,
1529 areq->info, 0, areq->nbytes, 0, ivsize, 0,
62293a37 1530 areq->base.flags, encrypt);
4de9d0b5
LN
1531}
1532
1533static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1534{
1535 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1536 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1537 struct talitos_edesc *edesc;
1538
1539 /* allocate extended descriptor */
62293a37 1540 edesc = ablkcipher_edesc_alloc(areq, true);
4de9d0b5
LN
1541 if (IS_ERR(edesc))
1542 return PTR_ERR(edesc);
1543
1544 /* set encrypt */
1545 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1546
febec542 1547 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1548}
1549
1550static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1551{
1552 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1553 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1554 struct talitos_edesc *edesc;
1555
1556 /* allocate extended descriptor */
62293a37 1557 edesc = ablkcipher_edesc_alloc(areq, false);
4de9d0b5
LN
1558 if (IS_ERR(edesc))
1559 return PTR_ERR(edesc);
1560
1561 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1562
febec542 1563 return common_nonsnoop(edesc, areq, ablkcipher_done);
4de9d0b5
LN
1564}
1565
497f2e6b
LN
1566static void common_nonsnoop_hash_unmap(struct device *dev,
1567 struct talitos_edesc *edesc,
1568 struct ahash_request *areq)
1569{
1570 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
922f9dc8
LC
1571 struct talitos_private *priv = dev_get_drvdata(dev);
1572 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1573
1574 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1575
032d197e
LC
1576 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1577
497f2e6b 1578 /* When using hashctx-in, must unmap it. */
922f9dc8 1579 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
497f2e6b
LN
1580 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1581 DMA_TO_DEVICE);
1582
922f9dc8 1583 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
497f2e6b
LN
1584 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1585 DMA_TO_DEVICE);
1586
497f2e6b
LN
1587 if (edesc->dma_len)
1588 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1589 DMA_BIDIRECTIONAL);
1590
1591}
1592
1593static void ahash_done(struct device *dev,
1594 struct talitos_desc *desc, void *context,
1595 int err)
1596{
1597 struct ahash_request *areq = context;
1598 struct talitos_edesc *edesc =
1599 container_of(desc, struct talitos_edesc, desc);
1600 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1601
1602 if (!req_ctx->last && req_ctx->to_hash_later) {
1603 /* Position any partial block for next update/final/finup */
1604 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
5e833bc4 1605 req_ctx->nbuf = req_ctx->to_hash_later;
497f2e6b
LN
1606 }
1607 common_nonsnoop_hash_unmap(dev, edesc, areq);
1608
1609 kfree(edesc);
1610
1611 areq->base.complete(&areq->base, err);
1612}
1613
1614static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1615 struct ahash_request *areq, unsigned int length,
1616 void (*callback) (struct device *dev,
1617 struct talitos_desc *desc,
1618 void *context, int error))
1619{
1620 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1621 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1622 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1623 struct device *dev = ctx->dev;
1624 struct talitos_desc *desc = &edesc->desc;
032d197e 1625 int ret;
922f9dc8
LC
1626 struct talitos_private *priv = dev_get_drvdata(dev);
1627 bool is_sec1 = has_ftr_sec1(priv);
497f2e6b
LN
1628
1629 /* first DWORD empty */
1630 desc->ptr[0] = zero_entry;
1631
60f208d7
KP
1632 /* hash context in */
1633 if (!req_ctx->first || req_ctx->swinit) {
497f2e6b
LN
1634 map_single_talitos_ptr(dev, &desc->ptr[1],
1635 req_ctx->hw_context_size,
a2b35aa8 1636 (char *)req_ctx->hw_context,
497f2e6b 1637 DMA_TO_DEVICE);
60f208d7 1638 req_ctx->swinit = 0;
497f2e6b
LN
1639 } else {
1640 desc->ptr[1] = zero_entry;
1641 /* Indicate next op is not the first. */
1642 req_ctx->first = 0;
1643 }
1644
1645 /* HMAC key */
1646 if (ctx->keylen)
1647 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
a2b35aa8 1648 (char *)&ctx->key, DMA_TO_DEVICE);
497f2e6b
LN
1649 else
1650 desc->ptr[2] = zero_entry;
1651
1652 /*
1653 * data in
1654 */
032d197e
LC
1655 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1656 DMA_TO_DEVICE, &desc->ptr[3]);
497f2e6b
LN
1657
1658 /* fifth DWORD empty */
1659 desc->ptr[4] = zero_entry;
1660
1661 /* hash/HMAC out -or- hash context out */
1662 if (req_ctx->last)
1663 map_single_talitos_ptr(dev, &desc->ptr[5],
1664 crypto_ahash_digestsize(tfm),
a2b35aa8 1665 areq->result, DMA_FROM_DEVICE);
497f2e6b
LN
1666 else
1667 map_single_talitos_ptr(dev, &desc->ptr[5],
1668 req_ctx->hw_context_size,
a2b35aa8 1669 req_ctx->hw_context, DMA_FROM_DEVICE);
497f2e6b
LN
1670
1671 /* last DWORD empty */
1672 desc->ptr[6] = zero_entry;
1673
5228f0f7 1674 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
497f2e6b
LN
1675 if (ret != -EINPROGRESS) {
1676 common_nonsnoop_hash_unmap(dev, edesc, areq);
1677 kfree(edesc);
1678 }
1679 return ret;
1680}
1681
1682static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1683 unsigned int nbytes)
1684{
1685 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1686 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1687 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1688
79fd31d3 1689 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0,
62293a37 1690 nbytes, 0, 0, 0, areq->base.flags, false);
497f2e6b
LN
1691}
1692
1693static int ahash_init(struct ahash_request *areq)
1694{
1695 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1696 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1697
1698 /* Initialize the context */
5e833bc4 1699 req_ctx->nbuf = 0;
60f208d7
KP
1700 req_ctx->first = 1; /* first indicates h/w must init its context */
1701 req_ctx->swinit = 0; /* assume h/w init of context */
497f2e6b
LN
1702 req_ctx->hw_context_size =
1703 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1704 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1705 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1706
1707 return 0;
1708}
1709
60f208d7
KP
1710/*
1711 * on h/w without explicit sha224 support, we initialize h/w context
1712 * manually with sha224 constants, and tell it to run sha256.
1713 */
1714static int ahash_init_sha224_swinit(struct ahash_request *areq)
1715{
1716 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1717
1718 ahash_init(areq);
1719 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1720
a752447a
KP
1721 req_ctx->hw_context[0] = SHA224_H0;
1722 req_ctx->hw_context[1] = SHA224_H1;
1723 req_ctx->hw_context[2] = SHA224_H2;
1724 req_ctx->hw_context[3] = SHA224_H3;
1725 req_ctx->hw_context[4] = SHA224_H4;
1726 req_ctx->hw_context[5] = SHA224_H5;
1727 req_ctx->hw_context[6] = SHA224_H6;
1728 req_ctx->hw_context[7] = SHA224_H7;
60f208d7
KP
1729
1730 /* init 64-bit count */
1731 req_ctx->hw_context[8] = 0;
1732 req_ctx->hw_context[9] = 0;
1733
1734 return 0;
1735}
1736
497f2e6b
LN
1737static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1738{
1739 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1740 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1741 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1742 struct talitos_edesc *edesc;
1743 unsigned int blocksize =
1744 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1745 unsigned int nbytes_to_hash;
1746 unsigned int to_hash_later;
5e833bc4 1747 unsigned int nsg;
2a1cfe46 1748 bool chained;
497f2e6b 1749
5e833bc4
LN
1750 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1751 /* Buffer up to one whole block */
497f2e6b
LN
1752 sg_copy_to_buffer(areq->src,
1753 sg_count(areq->src, nbytes, &chained),
5e833bc4
LN
1754 req_ctx->buf + req_ctx->nbuf, nbytes);
1755 req_ctx->nbuf += nbytes;
497f2e6b
LN
1756 return 0;
1757 }
1758
5e833bc4
LN
1759 /* At least (blocksize + 1) bytes are available to hash */
1760 nbytes_to_hash = nbytes + req_ctx->nbuf;
1761 to_hash_later = nbytes_to_hash & (blocksize - 1);
1762
1763 if (req_ctx->last)
1764 to_hash_later = 0;
1765 else if (to_hash_later)
1766 /* There is a partial block. Hash the full block(s) now */
1767 nbytes_to_hash -= to_hash_later;
1768 else {
1769 /* Keep one block buffered */
1770 nbytes_to_hash -= blocksize;
1771 to_hash_later = blocksize;
1772 }
1773
1774 /* Chain in any previously buffered data */
1775 if (req_ctx->nbuf) {
1776 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1777 sg_init_table(req_ctx->bufsl, nsg);
1778 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1779 if (nsg > 1)
1780 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
497f2e6b 1781 req_ctx->psrc = req_ctx->bufsl;
5e833bc4 1782 } else
497f2e6b 1783 req_ctx->psrc = areq->src;
5e833bc4
LN
1784
1785 if (to_hash_later) {
1786 int nents = sg_count(areq->src, nbytes, &chained);
d0525723 1787 sg_pcopy_to_buffer(areq->src, nents,
5e833bc4
LN
1788 req_ctx->bufnext,
1789 to_hash_later,
1790 nbytes - to_hash_later);
497f2e6b 1791 }
5e833bc4 1792 req_ctx->to_hash_later = to_hash_later;
497f2e6b 1793
5e833bc4 1794 /* Allocate extended descriptor */
497f2e6b
LN
1795 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1796 if (IS_ERR(edesc))
1797 return PTR_ERR(edesc);
1798
1799 edesc->desc.hdr = ctx->desc_hdr_template;
1800
1801 /* On last one, request SEC to pad; otherwise continue */
1802 if (req_ctx->last)
1803 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1804 else
1805 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1806
60f208d7
KP
1807 /* request SEC to INIT hash. */
1808 if (req_ctx->first && !req_ctx->swinit)
497f2e6b
LN
1809 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1810
1811 /* When the tfm context has a keylen, it's an HMAC.
1812 * A first or last (ie. not middle) descriptor must request HMAC.
1813 */
1814 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1815 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1816
1817 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1818 ahash_done);
1819}
1820
1821static int ahash_update(struct ahash_request *areq)
1822{
1823 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1824
1825 req_ctx->last = 0;
1826
1827 return ahash_process_req(areq, areq->nbytes);
1828}
1829
1830static int ahash_final(struct ahash_request *areq)
1831{
1832 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1833
1834 req_ctx->last = 1;
1835
1836 return ahash_process_req(areq, 0);
1837}
1838
1839static int ahash_finup(struct ahash_request *areq)
1840{
1841 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1842
1843 req_ctx->last = 1;
1844
1845 return ahash_process_req(areq, areq->nbytes);
1846}
1847
1848static int ahash_digest(struct ahash_request *areq)
1849{
1850 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
60f208d7 1851 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
497f2e6b 1852
60f208d7 1853 ahash->init(areq);
497f2e6b
LN
1854 req_ctx->last = 1;
1855
1856 return ahash_process_req(areq, areq->nbytes);
1857}
1858
79b3a418
LN
1859struct keyhash_result {
1860 struct completion completion;
1861 int err;
1862};
1863
1864static void keyhash_complete(struct crypto_async_request *req, int err)
1865{
1866 struct keyhash_result *res = req->data;
1867
1868 if (err == -EINPROGRESS)
1869 return;
1870
1871 res->err = err;
1872 complete(&res->completion);
1873}
1874
1875static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
1876 u8 *hash)
1877{
1878 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1879
1880 struct scatterlist sg[1];
1881 struct ahash_request *req;
1882 struct keyhash_result hresult;
1883 int ret;
1884
1885 init_completion(&hresult.completion);
1886
1887 req = ahash_request_alloc(tfm, GFP_KERNEL);
1888 if (!req)
1889 return -ENOMEM;
1890
1891 /* Keep tfm keylen == 0 during hash of the long key */
1892 ctx->keylen = 0;
1893 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1894 keyhash_complete, &hresult);
1895
1896 sg_init_one(&sg[0], key, keylen);
1897
1898 ahash_request_set_crypt(req, sg, hash, keylen);
1899 ret = crypto_ahash_digest(req);
1900 switch (ret) {
1901 case 0:
1902 break;
1903 case -EINPROGRESS:
1904 case -EBUSY:
1905 ret = wait_for_completion_interruptible(
1906 &hresult.completion);
1907 if (!ret)
1908 ret = hresult.err;
1909 break;
1910 default:
1911 break;
1912 }
1913 ahash_request_free(req);
1914
1915 return ret;
1916}
1917
1918static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1919 unsigned int keylen)
1920{
1921 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1922 unsigned int blocksize =
1923 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1924 unsigned int digestsize = crypto_ahash_digestsize(tfm);
1925 unsigned int keysize = keylen;
1926 u8 hash[SHA512_DIGEST_SIZE];
1927 int ret;
1928
1929 if (keylen <= blocksize)
1930 memcpy(ctx->key, key, keysize);
1931 else {
1932 /* Must get the hash of the long key */
1933 ret = keyhash(tfm, key, keylen, hash);
1934
1935 if (ret) {
1936 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1937 return -EINVAL;
1938 }
1939
1940 keysize = digestsize;
1941 memcpy(ctx->key, hash, digestsize);
1942 }
1943
1944 ctx->keylen = keysize;
1945
1946 return 0;
1947}
1948
1949
9c4a7965 1950struct talitos_alg_template {
d5e4aaef
LN
1951 u32 type;
1952 union {
1953 struct crypto_alg crypto;
acbf7c62 1954 struct ahash_alg hash;
d5e4aaef 1955 } alg;
9c4a7965
KP
1956 __be32 desc_hdr_template;
1957};
1958
1959static struct talitos_alg_template driver_algs[] = {
991155ba 1960 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
d5e4aaef
LN
1961 { .type = CRYPTO_ALG_TYPE_AEAD,
1962 .alg.crypto = {
56af8cd4
LN
1963 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1964 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos",
1965 .cra_blocksize = AES_BLOCK_SIZE,
1966 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 1967 .cra_aead = {
56af8cd4
LN
1968 .ivsize = AES_BLOCK_SIZE,
1969 .maxauthsize = SHA1_DIGEST_SIZE,
1970 }
1971 },
9c4a7965
KP
1972 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1973 DESC_HDR_SEL0_AESU |
1974 DESC_HDR_MODE0_AESU_CBC |
1975 DESC_HDR_SEL1_MDEUA |
1976 DESC_HDR_MODE1_MDEU_INIT |
1977 DESC_HDR_MODE1_MDEU_PAD |
1978 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
70bcaca7 1979 },
d5e4aaef
LN
1980 { .type = CRYPTO_ALG_TYPE_AEAD,
1981 .alg.crypto = {
56af8cd4
LN
1982 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1983 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos",
1984 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1985 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 1986 .cra_aead = {
56af8cd4
LN
1987 .ivsize = DES3_EDE_BLOCK_SIZE,
1988 .maxauthsize = SHA1_DIGEST_SIZE,
1989 }
1990 },
70bcaca7
LN
1991 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
1992 DESC_HDR_SEL0_DEU |
1993 DESC_HDR_MODE0_DEU_CBC |
1994 DESC_HDR_MODE0_DEU_3DES |
1995 DESC_HDR_SEL1_MDEUA |
1996 DESC_HDR_MODE1_MDEU_INIT |
1997 DESC_HDR_MODE1_MDEU_PAD |
1998 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
3952f17e 1999 },
357fb605
HG
2000 { .type = CRYPTO_ALG_TYPE_AEAD,
2001 .alg.crypto = {
2002 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2003 .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos",
2004 .cra_blocksize = AES_BLOCK_SIZE,
2005 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2006 .cra_aead = {
357fb605
HG
2007 .ivsize = AES_BLOCK_SIZE,
2008 .maxauthsize = SHA224_DIGEST_SIZE,
2009 }
2010 },
2011 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2012 DESC_HDR_SEL0_AESU |
2013 DESC_HDR_MODE0_AESU_CBC |
2014 DESC_HDR_SEL1_MDEUA |
2015 DESC_HDR_MODE1_MDEU_INIT |
2016 DESC_HDR_MODE1_MDEU_PAD |
2017 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2018 },
2019 { .type = CRYPTO_ALG_TYPE_AEAD,
2020 .alg.crypto = {
2021 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
2022 .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos",
2023 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2024 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2025 .cra_aead = {
357fb605
HG
2026 .ivsize = DES3_EDE_BLOCK_SIZE,
2027 .maxauthsize = SHA224_DIGEST_SIZE,
2028 }
2029 },
2030 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2031 DESC_HDR_SEL0_DEU |
2032 DESC_HDR_MODE0_DEU_CBC |
2033 DESC_HDR_MODE0_DEU_3DES |
2034 DESC_HDR_SEL1_MDEUA |
2035 DESC_HDR_MODE1_MDEU_INIT |
2036 DESC_HDR_MODE1_MDEU_PAD |
2037 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2038 },
d5e4aaef
LN
2039 { .type = CRYPTO_ALG_TYPE_AEAD,
2040 .alg.crypto = {
56af8cd4
LN
2041 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2042 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos",
2043 .cra_blocksize = AES_BLOCK_SIZE,
2044 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 2045 .cra_aead = {
56af8cd4
LN
2046 .ivsize = AES_BLOCK_SIZE,
2047 .maxauthsize = SHA256_DIGEST_SIZE,
2048 }
2049 },
3952f17e
LN
2050 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2051 DESC_HDR_SEL0_AESU |
2052 DESC_HDR_MODE0_AESU_CBC |
2053 DESC_HDR_SEL1_MDEUA |
2054 DESC_HDR_MODE1_MDEU_INIT |
2055 DESC_HDR_MODE1_MDEU_PAD |
2056 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2057 },
d5e4aaef
LN
2058 { .type = CRYPTO_ALG_TYPE_AEAD,
2059 .alg.crypto = {
56af8cd4
LN
2060 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
2061 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos",
2062 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2063 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 2064 .cra_aead = {
56af8cd4
LN
2065 .ivsize = DES3_EDE_BLOCK_SIZE,
2066 .maxauthsize = SHA256_DIGEST_SIZE,
2067 }
2068 },
3952f17e
LN
2069 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2070 DESC_HDR_SEL0_DEU |
2071 DESC_HDR_MODE0_DEU_CBC |
2072 DESC_HDR_MODE0_DEU_3DES |
2073 DESC_HDR_SEL1_MDEUA |
2074 DESC_HDR_MODE1_MDEU_INIT |
2075 DESC_HDR_MODE1_MDEU_PAD |
2076 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2077 },
d5e4aaef 2078 { .type = CRYPTO_ALG_TYPE_AEAD,
357fb605
HG
2079 .alg.crypto = {
2080 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2081 .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos",
2082 .cra_blocksize = AES_BLOCK_SIZE,
2083 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2084 .cra_aead = {
357fb605
HG
2085 .ivsize = AES_BLOCK_SIZE,
2086 .maxauthsize = SHA384_DIGEST_SIZE,
2087 }
2088 },
2089 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2090 DESC_HDR_SEL0_AESU |
2091 DESC_HDR_MODE0_AESU_CBC |
2092 DESC_HDR_SEL1_MDEUB |
2093 DESC_HDR_MODE1_MDEU_INIT |
2094 DESC_HDR_MODE1_MDEU_PAD |
2095 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2096 },
2097 { .type = CRYPTO_ALG_TYPE_AEAD,
2098 .alg.crypto = {
2099 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
2100 .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos",
2101 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2102 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2103 .cra_aead = {
357fb605
HG
2104 .ivsize = DES3_EDE_BLOCK_SIZE,
2105 .maxauthsize = SHA384_DIGEST_SIZE,
2106 }
2107 },
2108 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2109 DESC_HDR_SEL0_DEU |
2110 DESC_HDR_MODE0_DEU_CBC |
2111 DESC_HDR_MODE0_DEU_3DES |
2112 DESC_HDR_SEL1_MDEUB |
2113 DESC_HDR_MODE1_MDEU_INIT |
2114 DESC_HDR_MODE1_MDEU_PAD |
2115 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2116 },
2117 { .type = CRYPTO_ALG_TYPE_AEAD,
2118 .alg.crypto = {
2119 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2120 .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos",
2121 .cra_blocksize = AES_BLOCK_SIZE,
2122 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2123 .cra_aead = {
357fb605
HG
2124 .ivsize = AES_BLOCK_SIZE,
2125 .maxauthsize = SHA512_DIGEST_SIZE,
2126 }
2127 },
2128 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2129 DESC_HDR_SEL0_AESU |
2130 DESC_HDR_MODE0_AESU_CBC |
2131 DESC_HDR_SEL1_MDEUB |
2132 DESC_HDR_MODE1_MDEU_INIT |
2133 DESC_HDR_MODE1_MDEU_PAD |
2134 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2135 },
2136 { .type = CRYPTO_ALG_TYPE_AEAD,
2137 .alg.crypto = {
2138 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
2139 .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos",
2140 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2141 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
357fb605 2142 .cra_aead = {
357fb605
HG
2143 .ivsize = DES3_EDE_BLOCK_SIZE,
2144 .maxauthsize = SHA512_DIGEST_SIZE,
2145 }
2146 },
2147 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2148 DESC_HDR_SEL0_DEU |
2149 DESC_HDR_MODE0_DEU_CBC |
2150 DESC_HDR_MODE0_DEU_3DES |
2151 DESC_HDR_SEL1_MDEUB |
2152 DESC_HDR_MODE1_MDEU_INIT |
2153 DESC_HDR_MODE1_MDEU_PAD |
2154 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2155 },
2156 { .type = CRYPTO_ALG_TYPE_AEAD,
d5e4aaef 2157 .alg.crypto = {
56af8cd4
LN
2158 .cra_name = "authenc(hmac(md5),cbc(aes))",
2159 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos",
2160 .cra_blocksize = AES_BLOCK_SIZE,
2161 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 2162 .cra_aead = {
56af8cd4
LN
2163 .ivsize = AES_BLOCK_SIZE,
2164 .maxauthsize = MD5_DIGEST_SIZE,
2165 }
2166 },
3952f17e
LN
2167 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2168 DESC_HDR_SEL0_AESU |
2169 DESC_HDR_MODE0_AESU_CBC |
2170 DESC_HDR_SEL1_MDEUA |
2171 DESC_HDR_MODE1_MDEU_INIT |
2172 DESC_HDR_MODE1_MDEU_PAD |
2173 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2174 },
d5e4aaef
LN
2175 { .type = CRYPTO_ALG_TYPE_AEAD,
2176 .alg.crypto = {
56af8cd4
LN
2177 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2178 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos",
2179 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2180 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
56af8cd4 2181 .cra_aead = {
56af8cd4
LN
2182 .ivsize = DES3_EDE_BLOCK_SIZE,
2183 .maxauthsize = MD5_DIGEST_SIZE,
2184 }
2185 },
3952f17e
LN
2186 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2187 DESC_HDR_SEL0_DEU |
2188 DESC_HDR_MODE0_DEU_CBC |
2189 DESC_HDR_MODE0_DEU_3DES |
2190 DESC_HDR_SEL1_MDEUA |
2191 DESC_HDR_MODE1_MDEU_INIT |
2192 DESC_HDR_MODE1_MDEU_PAD |
2193 DESC_HDR_MODE1_MDEU_MD5_HMAC,
4de9d0b5
LN
2194 },
2195 /* ABLKCIPHER algorithms. */
d5e4aaef
LN
2196 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2197 .alg.crypto = {
4de9d0b5
LN
2198 .cra_name = "cbc(aes)",
2199 .cra_driver_name = "cbc-aes-talitos",
2200 .cra_blocksize = AES_BLOCK_SIZE,
2201 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2202 CRYPTO_ALG_ASYNC,
4de9d0b5 2203 .cra_ablkcipher = {
4de9d0b5
LN
2204 .min_keysize = AES_MIN_KEY_SIZE,
2205 .max_keysize = AES_MAX_KEY_SIZE,
2206 .ivsize = AES_BLOCK_SIZE,
2207 }
2208 },
2209 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2210 DESC_HDR_SEL0_AESU |
2211 DESC_HDR_MODE0_AESU_CBC,
2212 },
d5e4aaef
LN
2213 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2214 .alg.crypto = {
4de9d0b5
LN
2215 .cra_name = "cbc(des3_ede)",
2216 .cra_driver_name = "cbc-3des-talitos",
2217 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2218 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2219 CRYPTO_ALG_ASYNC,
4de9d0b5 2220 .cra_ablkcipher = {
4de9d0b5
LN
2221 .min_keysize = DES3_EDE_KEY_SIZE,
2222 .max_keysize = DES3_EDE_KEY_SIZE,
2223 .ivsize = DES3_EDE_BLOCK_SIZE,
2224 }
2225 },
2226 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2227 DESC_HDR_SEL0_DEU |
2228 DESC_HDR_MODE0_DEU_CBC |
2229 DESC_HDR_MODE0_DEU_3DES,
497f2e6b
LN
2230 },
2231 /* AHASH algorithms. */
2232 { .type = CRYPTO_ALG_TYPE_AHASH,
2233 .alg.hash = {
497f2e6b
LN
2234 .halg.digestsize = MD5_DIGEST_SIZE,
2235 .halg.base = {
2236 .cra_name = "md5",
2237 .cra_driver_name = "md5-talitos",
b3988618 2238 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
497f2e6b
LN
2239 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2240 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2241 }
2242 },
2243 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2244 DESC_HDR_SEL0_MDEUA |
2245 DESC_HDR_MODE0_MDEU_MD5,
2246 },
2247 { .type = CRYPTO_ALG_TYPE_AHASH,
2248 .alg.hash = {
497f2e6b
LN
2249 .halg.digestsize = SHA1_DIGEST_SIZE,
2250 .halg.base = {
2251 .cra_name = "sha1",
2252 .cra_driver_name = "sha1-talitos",
2253 .cra_blocksize = SHA1_BLOCK_SIZE,
2254 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2255 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2256 }
2257 },
2258 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2259 DESC_HDR_SEL0_MDEUA |
2260 DESC_HDR_MODE0_MDEU_SHA1,
2261 },
60f208d7
KP
2262 { .type = CRYPTO_ALG_TYPE_AHASH,
2263 .alg.hash = {
60f208d7
KP
2264 .halg.digestsize = SHA224_DIGEST_SIZE,
2265 .halg.base = {
2266 .cra_name = "sha224",
2267 .cra_driver_name = "sha224-talitos",
2268 .cra_blocksize = SHA224_BLOCK_SIZE,
2269 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2270 CRYPTO_ALG_ASYNC,
60f208d7
KP
2271 }
2272 },
2273 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2274 DESC_HDR_SEL0_MDEUA |
2275 DESC_HDR_MODE0_MDEU_SHA224,
2276 },
497f2e6b
LN
2277 { .type = CRYPTO_ALG_TYPE_AHASH,
2278 .alg.hash = {
497f2e6b
LN
2279 .halg.digestsize = SHA256_DIGEST_SIZE,
2280 .halg.base = {
2281 .cra_name = "sha256",
2282 .cra_driver_name = "sha256-talitos",
2283 .cra_blocksize = SHA256_BLOCK_SIZE,
2284 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2285 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2286 }
2287 },
2288 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2289 DESC_HDR_SEL0_MDEUA |
2290 DESC_HDR_MODE0_MDEU_SHA256,
2291 },
2292 { .type = CRYPTO_ALG_TYPE_AHASH,
2293 .alg.hash = {
497f2e6b
LN
2294 .halg.digestsize = SHA384_DIGEST_SIZE,
2295 .halg.base = {
2296 .cra_name = "sha384",
2297 .cra_driver_name = "sha384-talitos",
2298 .cra_blocksize = SHA384_BLOCK_SIZE,
2299 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2300 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2301 }
2302 },
2303 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2304 DESC_HDR_SEL0_MDEUB |
2305 DESC_HDR_MODE0_MDEUB_SHA384,
2306 },
2307 { .type = CRYPTO_ALG_TYPE_AHASH,
2308 .alg.hash = {
497f2e6b
LN
2309 .halg.digestsize = SHA512_DIGEST_SIZE,
2310 .halg.base = {
2311 .cra_name = "sha512",
2312 .cra_driver_name = "sha512-talitos",
2313 .cra_blocksize = SHA512_BLOCK_SIZE,
2314 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2315 CRYPTO_ALG_ASYNC,
497f2e6b
LN
2316 }
2317 },
2318 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2319 DESC_HDR_SEL0_MDEUB |
2320 DESC_HDR_MODE0_MDEUB_SHA512,
2321 },
79b3a418
LN
2322 { .type = CRYPTO_ALG_TYPE_AHASH,
2323 .alg.hash = {
79b3a418
LN
2324 .halg.digestsize = MD5_DIGEST_SIZE,
2325 .halg.base = {
2326 .cra_name = "hmac(md5)",
2327 .cra_driver_name = "hmac-md5-talitos",
b3988618 2328 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
79b3a418
LN
2329 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2330 CRYPTO_ALG_ASYNC,
79b3a418
LN
2331 }
2332 },
2333 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2334 DESC_HDR_SEL0_MDEUA |
2335 DESC_HDR_MODE0_MDEU_MD5,
2336 },
2337 { .type = CRYPTO_ALG_TYPE_AHASH,
2338 .alg.hash = {
79b3a418
LN
2339 .halg.digestsize = SHA1_DIGEST_SIZE,
2340 .halg.base = {
2341 .cra_name = "hmac(sha1)",
2342 .cra_driver_name = "hmac-sha1-talitos",
2343 .cra_blocksize = SHA1_BLOCK_SIZE,
2344 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2345 CRYPTO_ALG_ASYNC,
79b3a418
LN
2346 }
2347 },
2348 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2349 DESC_HDR_SEL0_MDEUA |
2350 DESC_HDR_MODE0_MDEU_SHA1,
2351 },
2352 { .type = CRYPTO_ALG_TYPE_AHASH,
2353 .alg.hash = {
79b3a418
LN
2354 .halg.digestsize = SHA224_DIGEST_SIZE,
2355 .halg.base = {
2356 .cra_name = "hmac(sha224)",
2357 .cra_driver_name = "hmac-sha224-talitos",
2358 .cra_blocksize = SHA224_BLOCK_SIZE,
2359 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2360 CRYPTO_ALG_ASYNC,
79b3a418
LN
2361 }
2362 },
2363 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2364 DESC_HDR_SEL0_MDEUA |
2365 DESC_HDR_MODE0_MDEU_SHA224,
2366 },
2367 { .type = CRYPTO_ALG_TYPE_AHASH,
2368 .alg.hash = {
79b3a418
LN
2369 .halg.digestsize = SHA256_DIGEST_SIZE,
2370 .halg.base = {
2371 .cra_name = "hmac(sha256)",
2372 .cra_driver_name = "hmac-sha256-talitos",
2373 .cra_blocksize = SHA256_BLOCK_SIZE,
2374 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2375 CRYPTO_ALG_ASYNC,
79b3a418
LN
2376 }
2377 },
2378 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2379 DESC_HDR_SEL0_MDEUA |
2380 DESC_HDR_MODE0_MDEU_SHA256,
2381 },
2382 { .type = CRYPTO_ALG_TYPE_AHASH,
2383 .alg.hash = {
79b3a418
LN
2384 .halg.digestsize = SHA384_DIGEST_SIZE,
2385 .halg.base = {
2386 .cra_name = "hmac(sha384)",
2387 .cra_driver_name = "hmac-sha384-talitos",
2388 .cra_blocksize = SHA384_BLOCK_SIZE,
2389 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2390 CRYPTO_ALG_ASYNC,
79b3a418
LN
2391 }
2392 },
2393 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2394 DESC_HDR_SEL0_MDEUB |
2395 DESC_HDR_MODE0_MDEUB_SHA384,
2396 },
2397 { .type = CRYPTO_ALG_TYPE_AHASH,
2398 .alg.hash = {
79b3a418
LN
2399 .halg.digestsize = SHA512_DIGEST_SIZE,
2400 .halg.base = {
2401 .cra_name = "hmac(sha512)",
2402 .cra_driver_name = "hmac-sha512-talitos",
2403 .cra_blocksize = SHA512_BLOCK_SIZE,
2404 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2405 CRYPTO_ALG_ASYNC,
79b3a418
LN
2406 }
2407 },
2408 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2409 DESC_HDR_SEL0_MDEUB |
2410 DESC_HDR_MODE0_MDEUB_SHA512,
2411 }
9c4a7965
KP
2412};
2413
2414struct talitos_crypto_alg {
2415 struct list_head entry;
2416 struct device *dev;
acbf7c62 2417 struct talitos_alg_template algt;
9c4a7965
KP
2418};
2419
2420static int talitos_cra_init(struct crypto_tfm *tfm)
2421{
2422 struct crypto_alg *alg = tfm->__crt_alg;
19bbbc63 2423 struct talitos_crypto_alg *talitos_alg;
9c4a7965 2424 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
5228f0f7 2425 struct talitos_private *priv;
9c4a7965 2426
497f2e6b
LN
2427 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2428 talitos_alg = container_of(__crypto_ahash_alg(alg),
2429 struct talitos_crypto_alg,
2430 algt.alg.hash);
2431 else
2432 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2433 algt.alg.crypto);
19bbbc63 2434
9c4a7965
KP
2435 /* update context with ptr to dev */
2436 ctx->dev = talitos_alg->dev;
19bbbc63 2437
5228f0f7
KP
2438 /* assign SEC channel to tfm in round-robin fashion */
2439 priv = dev_get_drvdata(ctx->dev);
2440 ctx->ch = atomic_inc_return(&priv->last_chan) &
2441 (priv->num_channels - 1);
2442
9c4a7965 2443 /* copy descriptor header template value */
acbf7c62 2444 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
9c4a7965 2445
602dba5a
KP
2446 /* select done notification */
2447 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2448
497f2e6b
LN
2449 return 0;
2450}
2451
2452static int talitos_cra_init_aead(struct crypto_tfm *tfm)
2453{
2454 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2455
2456 talitos_cra_init(tfm);
9c4a7965
KP
2457
2458 /* random first IV */
70bcaca7 2459 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH);
9c4a7965
KP
2460
2461 return 0;
2462}
2463
497f2e6b
LN
2464static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2465{
2466 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2467
2468 talitos_cra_init(tfm);
2469
2470 ctx->keylen = 0;
2471 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2472 sizeof(struct talitos_ahash_req_ctx));
2473
2474 return 0;
2475}
2476
9c4a7965
KP
2477/*
2478 * given the alg's descriptor header template, determine whether descriptor
2479 * type and primary/secondary execution units required match the hw
2480 * capabilities description provided in the device tree node.
2481 */
2482static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2483{
2484 struct talitos_private *priv = dev_get_drvdata(dev);
2485 int ret;
2486
2487 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2488 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2489
2490 if (SECONDARY_EU(desc_hdr_template))
2491 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2492 & priv->exec_units);
2493
2494 return ret;
2495}
2496
2dc11581 2497static int talitos_remove(struct platform_device *ofdev)
9c4a7965
KP
2498{
2499 struct device *dev = &ofdev->dev;
2500 struct talitos_private *priv = dev_get_drvdata(dev);
2501 struct talitos_crypto_alg *t_alg, *n;
2502 int i;
2503
2504 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
acbf7c62
LN
2505 switch (t_alg->algt.type) {
2506 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2507 case CRYPTO_ALG_TYPE_AEAD:
2508 crypto_unregister_alg(&t_alg->algt.alg.crypto);
2509 break;
2510 case CRYPTO_ALG_TYPE_AHASH:
2511 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2512 break;
2513 }
9c4a7965
KP
2514 list_del(&t_alg->entry);
2515 kfree(t_alg);
2516 }
2517
2518 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2519 talitos_unregister_rng(dev);
2520
4b992628 2521 for (i = 0; i < priv->num_channels; i++)
0b798247 2522 kfree(priv->chan[i].fifo);
9c4a7965 2523
4b992628 2524 kfree(priv->chan);
9c4a7965 2525
c3e337f8 2526 for (i = 0; i < 2; i++)
2cdba3cf 2527 if (priv->irq[i]) {
c3e337f8
KP
2528 free_irq(priv->irq[i], dev);
2529 irq_dispose_mapping(priv->irq[i]);
2530 }
9c4a7965 2531
c3e337f8 2532 tasklet_kill(&priv->done_task[0]);
2cdba3cf 2533 if (priv->irq[1])
c3e337f8 2534 tasklet_kill(&priv->done_task[1]);
9c4a7965
KP
2535
2536 iounmap(priv->reg);
2537
9c4a7965
KP
2538 kfree(priv);
2539
2540 return 0;
2541}
2542
2543static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2544 struct talitos_alg_template
2545 *template)
2546{
60f208d7 2547 struct talitos_private *priv = dev_get_drvdata(dev);
9c4a7965
KP
2548 struct talitos_crypto_alg *t_alg;
2549 struct crypto_alg *alg;
2550
2551 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2552 if (!t_alg)
2553 return ERR_PTR(-ENOMEM);
2554
acbf7c62
LN
2555 t_alg->algt = *template;
2556
2557 switch (t_alg->algt.type) {
2558 case CRYPTO_ALG_TYPE_ABLKCIPHER:
497f2e6b
LN
2559 alg = &t_alg->algt.alg.crypto;
2560 alg->cra_init = talitos_cra_init;
d4cd3283 2561 alg->cra_type = &crypto_ablkcipher_type;
b286e003
KP
2562 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2563 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2564 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2565 alg->cra_ablkcipher.geniv = "eseqiv";
497f2e6b 2566 break;
acbf7c62
LN
2567 case CRYPTO_ALG_TYPE_AEAD:
2568 alg = &t_alg->algt.alg.crypto;
497f2e6b 2569 alg->cra_init = talitos_cra_init_aead;
d4cd3283 2570 alg->cra_type = &crypto_aead_type;
b286e003
KP
2571 alg->cra_aead.setkey = aead_setkey;
2572 alg->cra_aead.setauthsize = aead_setauthsize;
2573 alg->cra_aead.encrypt = aead_encrypt;
2574 alg->cra_aead.decrypt = aead_decrypt;
2575 alg->cra_aead.givencrypt = aead_givencrypt;
2576 alg->cra_aead.geniv = "<built-in>";
acbf7c62
LN
2577 break;
2578 case CRYPTO_ALG_TYPE_AHASH:
2579 alg = &t_alg->algt.alg.hash.halg.base;
497f2e6b 2580 alg->cra_init = talitos_cra_init_ahash;
d4cd3283 2581 alg->cra_type = &crypto_ahash_type;
b286e003
KP
2582 t_alg->algt.alg.hash.init = ahash_init;
2583 t_alg->algt.alg.hash.update = ahash_update;
2584 t_alg->algt.alg.hash.final = ahash_final;
2585 t_alg->algt.alg.hash.finup = ahash_finup;
2586 t_alg->algt.alg.hash.digest = ahash_digest;
2587 t_alg->algt.alg.hash.setkey = ahash_setkey;
2588
79b3a418 2589 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
0b2730d8
KP
2590 !strncmp(alg->cra_name, "hmac", 4)) {
2591 kfree(t_alg);
79b3a418 2592 return ERR_PTR(-ENOTSUPP);
0b2730d8 2593 }
60f208d7 2594 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
79b3a418
LN
2595 (!strcmp(alg->cra_name, "sha224") ||
2596 !strcmp(alg->cra_name, "hmac(sha224)"))) {
60f208d7
KP
2597 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2598 t_alg->algt.desc_hdr_template =
2599 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2600 DESC_HDR_SEL0_MDEUA |
2601 DESC_HDR_MODE0_MDEU_SHA256;
2602 }
497f2e6b 2603 break;
1d11911a
KP
2604 default:
2605 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2606 return ERR_PTR(-EINVAL);
acbf7c62 2607 }
9c4a7965 2608
9c4a7965 2609 alg->cra_module = THIS_MODULE;
9c4a7965 2610 alg->cra_priority = TALITOS_CRA_PRIORITY;
9c4a7965 2611 alg->cra_alignmask = 0;
9c4a7965 2612 alg->cra_ctxsize = sizeof(struct talitos_ctx);
d912bb76 2613 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
9c4a7965 2614
9c4a7965
KP
2615 t_alg->dev = dev;
2616
2617 return t_alg;
2618}
2619
c3e337f8
KP
2620static int talitos_probe_irq(struct platform_device *ofdev)
2621{
2622 struct device *dev = &ofdev->dev;
2623 struct device_node *np = ofdev->dev.of_node;
2624 struct talitos_private *priv = dev_get_drvdata(dev);
2625 int err;
2626
2627 priv->irq[0] = irq_of_parse_and_map(np, 0);
2cdba3cf 2628 if (!priv->irq[0]) {
c3e337f8
KP
2629 dev_err(dev, "failed to map irq\n");
2630 return -EINVAL;
2631 }
2632
2633 priv->irq[1] = irq_of_parse_and_map(np, 1);
2634
2635 /* get the primary irq line */
2cdba3cf 2636 if (!priv->irq[1]) {
c3e337f8
KP
2637 err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
2638 dev_driver_string(dev), dev);
2639 goto primary_out;
2640 }
2641
2642 err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
2643 dev_driver_string(dev), dev);
2644 if (err)
2645 goto primary_out;
2646
2647 /* get the secondary irq line */
2648 err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
2649 dev_driver_string(dev), dev);
2650 if (err) {
2651 dev_err(dev, "failed to request secondary irq\n");
2652 irq_dispose_mapping(priv->irq[1]);
2cdba3cf 2653 priv->irq[1] = 0;
c3e337f8
KP
2654 }
2655
2656 return err;
2657
2658primary_out:
2659 if (err) {
2660 dev_err(dev, "failed to request primary irq\n");
2661 irq_dispose_mapping(priv->irq[0]);
2cdba3cf 2662 priv->irq[0] = 0;
c3e337f8
KP
2663 }
2664
2665 return err;
2666}
2667
1c48a5c9 2668static int talitos_probe(struct platform_device *ofdev)
9c4a7965
KP
2669{
2670 struct device *dev = &ofdev->dev;
61c7a080 2671 struct device_node *np = ofdev->dev.of_node;
9c4a7965
KP
2672 struct talitos_private *priv;
2673 const unsigned int *prop;
2674 int i, err;
2675
2676 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2677 if (!priv)
2678 return -ENOMEM;
2679
f3de9cb1
KH
2680 INIT_LIST_HEAD(&priv->alg_list);
2681
9c4a7965
KP
2682 dev_set_drvdata(dev, priv);
2683
2684 priv->ofdev = ofdev;
2685
511d63cb
HG
2686 spin_lock_init(&priv->reg_lock);
2687
c3e337f8
KP
2688 err = talitos_probe_irq(ofdev);
2689 if (err)
9c4a7965 2690 goto err_out;
9c4a7965 2691
2cdba3cf 2692 if (!priv->irq[1]) {
c3e337f8
KP
2693 tasklet_init(&priv->done_task[0], talitos_done_4ch,
2694 (unsigned long)dev);
2695 } else {
2696 tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
2697 (unsigned long)dev);
2698 tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
2699 (unsigned long)dev);
9c4a7965
KP
2700 }
2701
2702 priv->reg = of_iomap(np, 0);
2703 if (!priv->reg) {
2704 dev_err(dev, "failed to of_iomap\n");
2705 err = -ENOMEM;
2706 goto err_out;
2707 }
2708
2709 /* get SEC version capabilities from device tree */
2710 prop = of_get_property(np, "fsl,num-channels", NULL);
2711 if (prop)
2712 priv->num_channels = *prop;
2713
2714 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2715 if (prop)
2716 priv->chfifo_len = *prop;
2717
2718 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2719 if (prop)
2720 priv->exec_units = *prop;
2721
2722 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2723 if (prop)
2724 priv->desc_types = *prop;
2725
2726 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2727 !priv->exec_units || !priv->desc_types) {
2728 dev_err(dev, "invalid property data in device tree node\n");
2729 err = -EINVAL;
2730 goto err_out;
2731 }
2732
f3c85bc1
LN
2733 if (of_device_is_compatible(np, "fsl,sec3.0"))
2734 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2735
fe5720e2 2736 if (of_device_is_compatible(np, "fsl,sec2.1"))
60f208d7 2737 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
79b3a418
LN
2738 TALITOS_FTR_SHA224_HWINIT |
2739 TALITOS_FTR_HMAC_OK;
fe5720e2 2740
21590888
LC
2741 if (of_device_is_compatible(np, "fsl,sec1.0"))
2742 priv->features |= TALITOS_FTR_SEC1;
2743
4b992628
KP
2744 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2745 priv->num_channels, GFP_KERNEL);
2746 if (!priv->chan) {
2747 dev_err(dev, "failed to allocate channel management space\n");
9c4a7965
KP
2748 err = -ENOMEM;
2749 goto err_out;
2750 }
2751
f641dddd
MH
2752 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2753
c3e337f8
KP
2754 for (i = 0; i < priv->num_channels; i++) {
2755 priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
2cdba3cf 2756 if (!priv->irq[1] || !(i & 1))
c3e337f8 2757 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
ad42d5fc 2758
4b992628
KP
2759 spin_lock_init(&priv->chan[i].head_lock);
2760 spin_lock_init(&priv->chan[i].tail_lock);
9c4a7965 2761
4b992628
KP
2762 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2763 priv->fifo_len, GFP_KERNEL);
2764 if (!priv->chan[i].fifo) {
9c4a7965
KP
2765 dev_err(dev, "failed to allocate request fifo %d\n", i);
2766 err = -ENOMEM;
2767 goto err_out;
2768 }
9c4a7965 2769
4b992628
KP
2770 atomic_set(&priv->chan[i].submit_count,
2771 -(priv->chfifo_len - 1));
f641dddd 2772 }
9c4a7965 2773
81eb024c
KP
2774 dma_set_mask(dev, DMA_BIT_MASK(36));
2775
9c4a7965
KP
2776 /* reset and initialize the h/w */
2777 err = init_device(dev);
2778 if (err) {
2779 dev_err(dev, "failed to initialize device\n");
2780 goto err_out;
2781 }
2782
2783 /* register the RNG, if available */
2784 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
2785 err = talitos_register_rng(dev);
2786 if (err) {
2787 dev_err(dev, "failed to register hwrng: %d\n", err);
2788 goto err_out;
2789 } else
2790 dev_info(dev, "hwrng\n");
2791 }
2792
2793 /* register crypto algorithms the device supports */
9c4a7965
KP
2794 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
2795 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
2796 struct talitos_crypto_alg *t_alg;
acbf7c62 2797 char *name = NULL;
9c4a7965
KP
2798
2799 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
2800 if (IS_ERR(t_alg)) {
2801 err = PTR_ERR(t_alg);
0b2730d8 2802 if (err == -ENOTSUPP)
79b3a418 2803 continue;
9c4a7965
KP
2804 goto err_out;
2805 }
2806
acbf7c62
LN
2807 switch (t_alg->algt.type) {
2808 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2809 case CRYPTO_ALG_TYPE_AEAD:
2810 err = crypto_register_alg(
2811 &t_alg->algt.alg.crypto);
2812 name = t_alg->algt.alg.crypto.cra_driver_name;
2813 break;
2814 case CRYPTO_ALG_TYPE_AHASH:
2815 err = crypto_register_ahash(
2816 &t_alg->algt.alg.hash);
2817 name =
2818 t_alg->algt.alg.hash.halg.base.cra_driver_name;
2819 break;
2820 }
9c4a7965
KP
2821 if (err) {
2822 dev_err(dev, "%s alg registration failed\n",
acbf7c62 2823 name);
9c4a7965 2824 kfree(t_alg);
991155ba 2825 } else
9c4a7965 2826 list_add_tail(&t_alg->entry, &priv->alg_list);
9c4a7965
KP
2827 }
2828 }
5b859b6e
KP
2829 if (!list_empty(&priv->alg_list))
2830 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
2831 (char *)of_get_property(np, "compatible", NULL));
9c4a7965
KP
2832
2833 return 0;
2834
2835err_out:
2836 talitos_remove(ofdev);
9c4a7965
KP
2837
2838 return err;
2839}
2840
6c3f975a 2841static const struct of_device_id talitos_match[] = {
9c4a7965
KP
2842 {
2843 .compatible = "fsl,sec2.0",
2844 },
2845 {},
2846};
2847MODULE_DEVICE_TABLE(of, talitos_match);
2848
1c48a5c9 2849static struct platform_driver talitos_driver = {
4018294b
GL
2850 .driver = {
2851 .name = "talitos",
4018294b
GL
2852 .of_match_table = talitos_match,
2853 },
9c4a7965 2854 .probe = talitos_probe,
596f1034 2855 .remove = talitos_remove,
9c4a7965
KP
2856};
2857
741e8c2d 2858module_platform_driver(talitos_driver);
9c4a7965
KP
2859
2860MODULE_LICENSE("GPL");
2861MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2862MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");