crypto: s5p - update iv after AES-CBC op end
[linux-2.6-block.git] / drivers / crypto / caam / caampkc.c
CommitLineData
618b5dc4 1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
8c419778
TA
2/*
3 * caam - Freescale FSL CAAM support for Public Key Cryptography
4 *
5 * Copyright 2016 Freescale Semiconductor, Inc.
d239b10d 6 * Copyright 2018 NXP
8c419778
TA
7 *
8 * There is no Shared Descriptor for PKC so that the Job Descriptor must carry
9 * all the desired key parameters, input and output pointers.
10 */
11#include "compat.h"
12#include "regs.h"
13#include "intern.h"
14#include "jr.h"
15#include "error.h"
16#include "desc_constr.h"
17#include "sg_sw_sec4.h"
18#include "caampkc.h"
19
20#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + sizeof(struct rsa_pub_pdb))
21#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
22 sizeof(struct rsa_priv_f1_pdb))
52e26d77
RA
23#define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \
24 sizeof(struct rsa_priv_f2_pdb))
4a651b12
RA
25#define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \
26 sizeof(struct rsa_priv_f3_pdb))
8c419778
TA
27
28static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc,
29 struct akcipher_request *req)
30{
31 dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE);
32 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
33
34 if (edesc->sec4_sg_bytes)
35 dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes,
36 DMA_TO_DEVICE);
37}
38
39static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc,
40 struct akcipher_request *req)
41{
42 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
43 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
44 struct caam_rsa_key *key = &ctx->key;
45 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
46
47 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
48 dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE);
49}
50
51static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc,
52 struct akcipher_request *req)
53{
54 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
55 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
56 struct caam_rsa_key *key = &ctx->key;
57 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
58
59 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
60 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
61}
62
52e26d77
RA
63static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc,
64 struct akcipher_request *req)
65{
66 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
67 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
68 struct caam_rsa_key *key = &ctx->key;
69 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
70 size_t p_sz = key->p_sz;
4bffaab3 71 size_t q_sz = key->q_sz;
52e26d77
RA
72
73 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
74 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
75 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
f1bf9e60
HG
76 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
77 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
52e26d77
RA
78}
79
4a651b12
RA
80static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc,
81 struct akcipher_request *req)
82{
83 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
84 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
85 struct caam_rsa_key *key = &ctx->key;
86 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
87 size_t p_sz = key->p_sz;
4bffaab3 88 size_t q_sz = key->q_sz;
4a651b12
RA
89
90 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
91 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
92 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
93 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
94 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
f1bf9e60
HG
95 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
96 dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL);
4a651b12
RA
97}
98
8c419778
TA
99/* RSA Job Completion handler */
100static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context)
101{
102 struct akcipher_request *req = context;
103 struct rsa_edesc *edesc;
104
105 if (err)
106 caam_jr_strstatus(dev, err);
107
108 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
109
110 rsa_pub_unmap(dev, edesc, req);
111 rsa_io_unmap(dev, edesc, req);
112 kfree(edesc);
113
114 akcipher_request_complete(req, err);
115}
116
117static void rsa_priv_f1_done(struct device *dev, u32 *desc, u32 err,
118 void *context)
119{
120 struct akcipher_request *req = context;
121 struct rsa_edesc *edesc;
122
123 if (err)
124 caam_jr_strstatus(dev, err);
125
126 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
127
128 rsa_priv_f1_unmap(dev, edesc, req);
129 rsa_io_unmap(dev, edesc, req);
130 kfree(edesc);
131
132 akcipher_request_complete(req, err);
133}
134
52e26d77
RA
135static void rsa_priv_f2_done(struct device *dev, u32 *desc, u32 err,
136 void *context)
137{
138 struct akcipher_request *req = context;
139 struct rsa_edesc *edesc;
140
141 if (err)
142 caam_jr_strstatus(dev, err);
143
144 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
145
146 rsa_priv_f2_unmap(dev, edesc, req);
147 rsa_io_unmap(dev, edesc, req);
148 kfree(edesc);
149
150 akcipher_request_complete(req, err);
151}
152
4a651b12
RA
153static void rsa_priv_f3_done(struct device *dev, u32 *desc, u32 err,
154 void *context)
155{
156 struct akcipher_request *req = context;
157 struct rsa_edesc *edesc;
158
159 if (err)
160 caam_jr_strstatus(dev, err);
161
162 edesc = container_of(desc, struct rsa_edesc, hw_desc[0]);
163
164 rsa_priv_f3_unmap(dev, edesc, req);
165 rsa_io_unmap(dev, edesc, req);
166 kfree(edesc);
167
168 akcipher_request_complete(req, err);
169}
170
8a2a0dd3
HG
171static int caam_rsa_count_leading_zeros(struct scatterlist *sgl,
172 unsigned int nbytes,
173 unsigned int flags)
174{
175 struct sg_mapping_iter miter;
176 int lzeros, ents;
177 unsigned int len;
178 unsigned int tbytes = nbytes;
179 const u8 *buff;
180
181 ents = sg_nents_for_len(sgl, nbytes);
182 if (ents < 0)
183 return ents;
184
185 sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags);
186
187 lzeros = 0;
188 len = 0;
189 while (nbytes > 0) {
190 while (len && !*buff) {
191 lzeros++;
192 len--;
193 buff++;
194 }
195
196 if (len && *buff)
197 break;
198
199 sg_miter_next(&miter);
200 buff = miter.addr;
201 len = miter.length;
202
203 nbytes -= lzeros;
204 lzeros = 0;
205 }
206
207 miter.consumed = lzeros;
208 sg_miter_stop(&miter);
209 nbytes -= lzeros;
210
211 return tbytes - nbytes;
212}
213
8c419778
TA
214static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
215 size_t desclen)
216{
217 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
218 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
219 struct device *dev = ctx->dev;
8a2a0dd3 220 struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req);
8c419778 221 struct rsa_edesc *edesc;
019d62db
HG
222 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
223 GFP_KERNEL : GFP_ATOMIC;
8a2a0dd3 224 int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0;
8c419778
TA
225 int sgc;
226 int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes;
227 int src_nents, dst_nents;
8a2a0dd3
HG
228 int lzeros;
229
230 lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len, sg_flags);
231 if (lzeros < 0)
232 return ERR_PTR(lzeros);
233
234 req->src_len -= lzeros;
235 req->src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros);
8c419778
TA
236
237 src_nents = sg_nents_for_len(req->src, req->src_len);
238 dst_nents = sg_nents_for_len(req->dst, req->dst_len);
239
240 if (src_nents > 1)
241 sec4_sg_len = src_nents;
242 if (dst_nents > 1)
243 sec4_sg_len += dst_nents;
244
245 sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
246
247 /* allocate space for base edesc, hw desc commands and link tables */
248 edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
249 GFP_DMA | flags);
250 if (!edesc)
251 return ERR_PTR(-ENOMEM);
252
253 sgc = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
254 if (unlikely(!sgc)) {
255 dev_err(dev, "unable to map source\n");
256 goto src_fail;
257 }
258
259 sgc = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
260 if (unlikely(!sgc)) {
261 dev_err(dev, "unable to map destination\n");
262 goto dst_fail;
263 }
264
265 edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen;
266
267 sec4_sg_index = 0;
268 if (src_nents > 1) {
269 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
270 sec4_sg_index += src_nents;
271 }
272 if (dst_nents > 1)
273 sg_to_sec4_sg_last(req->dst, dst_nents,
274 edesc->sec4_sg + sec4_sg_index, 0);
275
276 /* Save nents for later use in Job Descriptor */
277 edesc->src_nents = src_nents;
278 edesc->dst_nents = dst_nents;
279
280 if (!sec4_sg_bytes)
281 return edesc;
282
283 edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg,
284 sec4_sg_bytes, DMA_TO_DEVICE);
285 if (dma_mapping_error(dev, edesc->sec4_sg_dma)) {
286 dev_err(dev, "unable to map S/G table\n");
287 goto sec4_sg_fail;
288 }
289
290 edesc->sec4_sg_bytes = sec4_sg_bytes;
291
292 return edesc;
293
294sec4_sg_fail:
295 dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE);
296dst_fail:
297 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
298src_fail:
299 kfree(edesc);
300 return ERR_PTR(-ENOMEM);
301}
302
303static int set_rsa_pub_pdb(struct akcipher_request *req,
304 struct rsa_edesc *edesc)
305{
306 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
307 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
308 struct caam_rsa_key *key = &ctx->key;
309 struct device *dev = ctx->dev;
310 struct rsa_pub_pdb *pdb = &edesc->pdb.pub;
311 int sec4_sg_index = 0;
312
313 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
314 if (dma_mapping_error(dev, pdb->n_dma)) {
315 dev_err(dev, "Unable to map RSA modulus memory\n");
316 return -ENOMEM;
317 }
318
319 pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE);
320 if (dma_mapping_error(dev, pdb->e_dma)) {
321 dev_err(dev, "Unable to map RSA public exponent memory\n");
322 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
323 return -ENOMEM;
324 }
325
326 if (edesc->src_nents > 1) {
327 pdb->sgf |= RSA_PDB_SGF_F;
328 pdb->f_dma = edesc->sec4_sg_dma;
329 sec4_sg_index += edesc->src_nents;
330 } else {
331 pdb->f_dma = sg_dma_address(req->src);
332 }
333
334 if (edesc->dst_nents > 1) {
335 pdb->sgf |= RSA_PDB_SGF_G;
336 pdb->g_dma = edesc->sec4_sg_dma +
337 sec4_sg_index * sizeof(struct sec4_sg_entry);
338 } else {
339 pdb->g_dma = sg_dma_address(req->dst);
340 }
341
342 pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz;
343 pdb->f_len = req->src_len;
344
345 return 0;
346}
347
348static int set_rsa_priv_f1_pdb(struct akcipher_request *req,
349 struct rsa_edesc *edesc)
350{
351 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
352 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
353 struct caam_rsa_key *key = &ctx->key;
354 struct device *dev = ctx->dev;
355 struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1;
356 int sec4_sg_index = 0;
357
358 pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE);
359 if (dma_mapping_error(dev, pdb->n_dma)) {
360 dev_err(dev, "Unable to map modulus memory\n");
361 return -ENOMEM;
362 }
363
364 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
365 if (dma_mapping_error(dev, pdb->d_dma)) {
366 dev_err(dev, "Unable to map RSA private exponent memory\n");
367 dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE);
368 return -ENOMEM;
369 }
370
371 if (edesc->src_nents > 1) {
372 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
373 pdb->g_dma = edesc->sec4_sg_dma;
374 sec4_sg_index += edesc->src_nents;
375 } else {
376 pdb->g_dma = sg_dma_address(req->src);
377 }
378
379 if (edesc->dst_nents > 1) {
380 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
381 pdb->f_dma = edesc->sec4_sg_dma +
382 sec4_sg_index * sizeof(struct sec4_sg_entry);
383 } else {
384 pdb->f_dma = sg_dma_address(req->dst);
385 }
386
387 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
388
389 return 0;
390}
391
52e26d77
RA
392static int set_rsa_priv_f2_pdb(struct akcipher_request *req,
393 struct rsa_edesc *edesc)
394{
395 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
396 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
397 struct caam_rsa_key *key = &ctx->key;
398 struct device *dev = ctx->dev;
399 struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2;
400 int sec4_sg_index = 0;
401 size_t p_sz = key->p_sz;
4bffaab3 402 size_t q_sz = key->q_sz;
52e26d77
RA
403
404 pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE);
405 if (dma_mapping_error(dev, pdb->d_dma)) {
406 dev_err(dev, "Unable to map RSA private exponent memory\n");
407 return -ENOMEM;
408 }
409
410 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
411 if (dma_mapping_error(dev, pdb->p_dma)) {
412 dev_err(dev, "Unable to map RSA prime factor p memory\n");
413 goto unmap_d;
414 }
415
416 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
417 if (dma_mapping_error(dev, pdb->q_dma)) {
418 dev_err(dev, "Unable to map RSA prime factor q memory\n");
419 goto unmap_p;
420 }
421
f1bf9e60 422 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
52e26d77
RA
423 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
424 dev_err(dev, "Unable to map RSA tmp1 memory\n");
425 goto unmap_q;
426 }
427
f1bf9e60 428 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
52e26d77
RA
429 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
430 dev_err(dev, "Unable to map RSA tmp2 memory\n");
431 goto unmap_tmp1;
432 }
433
434 if (edesc->src_nents > 1) {
435 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
436 pdb->g_dma = edesc->sec4_sg_dma;
437 sec4_sg_index += edesc->src_nents;
438 } else {
439 pdb->g_dma = sg_dma_address(req->src);
440 }
441
442 if (edesc->dst_nents > 1) {
443 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
444 pdb->f_dma = edesc->sec4_sg_dma +
445 sec4_sg_index * sizeof(struct sec4_sg_entry);
446 } else {
447 pdb->f_dma = sg_dma_address(req->dst);
448 }
449
450 pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz;
451 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
452
453 return 0;
454
455unmap_tmp1:
f1bf9e60 456 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
52e26d77
RA
457unmap_q:
458 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
459unmap_p:
460 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
461unmap_d:
462 dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE);
463
464 return -ENOMEM;
465}
466
4a651b12
RA
467static int set_rsa_priv_f3_pdb(struct akcipher_request *req,
468 struct rsa_edesc *edesc)
469{
470 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
471 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
472 struct caam_rsa_key *key = &ctx->key;
473 struct device *dev = ctx->dev;
474 struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3;
475 int sec4_sg_index = 0;
476 size_t p_sz = key->p_sz;
4bffaab3 477 size_t q_sz = key->q_sz;
4a651b12
RA
478
479 pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE);
480 if (dma_mapping_error(dev, pdb->p_dma)) {
481 dev_err(dev, "Unable to map RSA prime factor p memory\n");
482 return -ENOMEM;
483 }
484
485 pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE);
486 if (dma_mapping_error(dev, pdb->q_dma)) {
487 dev_err(dev, "Unable to map RSA prime factor q memory\n");
488 goto unmap_p;
489 }
490
491 pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE);
492 if (dma_mapping_error(dev, pdb->dp_dma)) {
493 dev_err(dev, "Unable to map RSA exponent dp memory\n");
494 goto unmap_q;
495 }
496
497 pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE);
498 if (dma_mapping_error(dev, pdb->dq_dma)) {
499 dev_err(dev, "Unable to map RSA exponent dq memory\n");
500 goto unmap_dp;
501 }
502
503 pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE);
504 if (dma_mapping_error(dev, pdb->c_dma)) {
505 dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n");
506 goto unmap_dq;
507 }
508
f1bf9e60 509 pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL);
4a651b12
RA
510 if (dma_mapping_error(dev, pdb->tmp1_dma)) {
511 dev_err(dev, "Unable to map RSA tmp1 memory\n");
512 goto unmap_qinv;
513 }
514
f1bf9e60 515 pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL);
4a651b12
RA
516 if (dma_mapping_error(dev, pdb->tmp2_dma)) {
517 dev_err(dev, "Unable to map RSA tmp2 memory\n");
518 goto unmap_tmp1;
519 }
520
521 if (edesc->src_nents > 1) {
522 pdb->sgf |= RSA_PRIV_PDB_SGF_G;
523 pdb->g_dma = edesc->sec4_sg_dma;
524 sec4_sg_index += edesc->src_nents;
525 } else {
526 pdb->g_dma = sg_dma_address(req->src);
527 }
528
529 if (edesc->dst_nents > 1) {
530 pdb->sgf |= RSA_PRIV_PDB_SGF_F;
531 pdb->f_dma = edesc->sec4_sg_dma +
532 sec4_sg_index * sizeof(struct sec4_sg_entry);
533 } else {
534 pdb->f_dma = sg_dma_address(req->dst);
535 }
536
537 pdb->sgf |= key->n_sz;
538 pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz;
539
540 return 0;
541
542unmap_tmp1:
f1bf9e60 543 dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL);
4a651b12
RA
544unmap_qinv:
545 dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE);
546unmap_dq:
547 dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE);
548unmap_dp:
549 dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE);
550unmap_q:
551 dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE);
552unmap_p:
553 dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE);
554
555 return -ENOMEM;
556}
557
8c419778
TA
558static int caam_rsa_enc(struct akcipher_request *req)
559{
560 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
561 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
562 struct caam_rsa_key *key = &ctx->key;
563 struct device *jrdev = ctx->dev;
564 struct rsa_edesc *edesc;
565 int ret;
566
567 if (unlikely(!key->n || !key->e))
568 return -EINVAL;
569
570 if (req->dst_len < key->n_sz) {
571 req->dst_len = key->n_sz;
572 dev_err(jrdev, "Output buffer length less than parameter n\n");
573 return -EOVERFLOW;
574 }
575
576 /* Allocate extended descriptor */
577 edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN);
578 if (IS_ERR(edesc))
579 return PTR_ERR(edesc);
580
581 /* Set RSA Encrypt Protocol Data Block */
582 ret = set_rsa_pub_pdb(req, edesc);
583 if (ret)
584 goto init_fail;
585
586 /* Initialize Job Descriptor */
587 init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub);
588
589 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_pub_done, req);
590 if (!ret)
591 return -EINPROGRESS;
592
593 rsa_pub_unmap(jrdev, edesc, req);
594
595init_fail:
596 rsa_io_unmap(jrdev, edesc, req);
597 kfree(edesc);
598 return ret;
599}
600
52e26d77 601static int caam_rsa_dec_priv_f1(struct akcipher_request *req)
8c419778
TA
602{
603 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
604 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8c419778
TA
605 struct device *jrdev = ctx->dev;
606 struct rsa_edesc *edesc;
607 int ret;
608
8c419778
TA
609 /* Allocate extended descriptor */
610 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN);
611 if (IS_ERR(edesc))
612 return PTR_ERR(edesc);
613
614 /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */
615 ret = set_rsa_priv_f1_pdb(req, edesc);
616 if (ret)
617 goto init_fail;
618
619 /* Initialize Job Descriptor */
620 init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1);
621
622 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f1_done, req);
623 if (!ret)
624 return -EINPROGRESS;
625
626 rsa_priv_f1_unmap(jrdev, edesc, req);
627
628init_fail:
629 rsa_io_unmap(jrdev, edesc, req);
630 kfree(edesc);
631 return ret;
632}
633
52e26d77
RA
634static int caam_rsa_dec_priv_f2(struct akcipher_request *req)
635{
636 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
637 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
638 struct device *jrdev = ctx->dev;
639 struct rsa_edesc *edesc;
640 int ret;
641
642 /* Allocate extended descriptor */
643 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN);
644 if (IS_ERR(edesc))
645 return PTR_ERR(edesc);
646
647 /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */
648 ret = set_rsa_priv_f2_pdb(req, edesc);
649 if (ret)
650 goto init_fail;
651
652 /* Initialize Job Descriptor */
653 init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2);
654
655 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f2_done, req);
656 if (!ret)
657 return -EINPROGRESS;
658
659 rsa_priv_f2_unmap(jrdev, edesc, req);
660
661init_fail:
662 rsa_io_unmap(jrdev, edesc, req);
663 kfree(edesc);
664 return ret;
665}
666
4a651b12
RA
667static int caam_rsa_dec_priv_f3(struct akcipher_request *req)
668{
669 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
670 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
671 struct device *jrdev = ctx->dev;
672 struct rsa_edesc *edesc;
673 int ret;
674
675 /* Allocate extended descriptor */
676 edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN);
677 if (IS_ERR(edesc))
678 return PTR_ERR(edesc);
679
680 /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */
681 ret = set_rsa_priv_f3_pdb(req, edesc);
682 if (ret)
683 goto init_fail;
684
685 /* Initialize Job Descriptor */
686 init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3);
687
688 ret = caam_jr_enqueue(jrdev, edesc->hw_desc, rsa_priv_f3_done, req);
689 if (!ret)
690 return -EINPROGRESS;
691
692 rsa_priv_f3_unmap(jrdev, edesc, req);
693
694init_fail:
695 rsa_io_unmap(jrdev, edesc, req);
696 kfree(edesc);
697 return ret;
698}
699
52e26d77
RA
700static int caam_rsa_dec(struct akcipher_request *req)
701{
702 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
703 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
704 struct caam_rsa_key *key = &ctx->key;
705 int ret;
706
707 if (unlikely(!key->n || !key->d))
708 return -EINVAL;
709
710 if (req->dst_len < key->n_sz) {
711 req->dst_len = key->n_sz;
712 dev_err(ctx->dev, "Output buffer length less than parameter n\n");
713 return -EOVERFLOW;
714 }
715
4a651b12
RA
716 if (key->priv_form == FORM3)
717 ret = caam_rsa_dec_priv_f3(req);
718 else if (key->priv_form == FORM2)
52e26d77
RA
719 ret = caam_rsa_dec_priv_f2(req);
720 else
721 ret = caam_rsa_dec_priv_f1(req);
722
723 return ret;
724}
725
8c419778
TA
726static void caam_rsa_free_key(struct caam_rsa_key *key)
727{
728 kzfree(key->d);
52e26d77
RA
729 kzfree(key->p);
730 kzfree(key->q);
4a651b12
RA
731 kzfree(key->dp);
732 kzfree(key->dq);
733 kzfree(key->qinv);
52e26d77
RA
734 kzfree(key->tmp1);
735 kzfree(key->tmp2);
8c419778
TA
736 kfree(key->e);
737 kfree(key->n);
52e26d77 738 memset(key, 0, sizeof(*key));
8c419778
TA
739}
740
7ca4a9a1
RA
741static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes)
742{
743 while (!**ptr && *nbytes) {
744 (*ptr)++;
745 (*nbytes)--;
746 }
747}
748
4a651b12
RA
749/**
750 * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members.
751 * dP, dQ and qInv could decode to less than corresponding p, q length, as the
752 * BER-encoding requires that the minimum number of bytes be used to encode the
753 * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate
754 * length.
755 *
756 * @ptr : pointer to {dP, dQ, qInv} CRT member
757 * @nbytes: length in bytes of {dP, dQ, qInv} CRT member
758 * @dstlen: length in bytes of corresponding p or q prime factor
759 */
760static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
761{
762 u8 *dst;
763
764 caam_rsa_drop_leading_zeros(&ptr, &nbytes);
765 if (!nbytes)
766 return NULL;
767
768 dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
769 if (!dst)
770 return NULL;
771
772 memcpy(dst + (dstlen - nbytes), ptr, nbytes);
773
774 return dst;
775}
776
8c419778
TA
777/**
778 * caam_read_raw_data - Read a raw byte stream as a positive integer.
779 * The function skips buffer's leading zeros, copies the remained data
780 * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
781 * the address of the new buffer.
782 *
783 * @buf : The data to read
784 * @nbytes: The amount of data to read
785 */
786static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
787{
8c419778 788
7ca4a9a1 789 caam_rsa_drop_leading_zeros(&buf, nbytes);
7fcaf62a
TA
790 if (!*nbytes)
791 return NULL;
8c419778 792
b930f3a2 793 return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
8c419778
TA
794}
795
796static int caam_rsa_check_key_length(unsigned int len)
797{
798 if (len > 4096)
799 return -EINVAL;
800 return 0;
801}
802
803static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
804 unsigned int keylen)
805{
806 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8439e94f 807 struct rsa_key raw_key = {NULL};
8c419778
TA
808 struct caam_rsa_key *rsa_key = &ctx->key;
809 int ret;
810
811 /* Free the old RSA key if any */
812 caam_rsa_free_key(rsa_key);
813
814 ret = rsa_parse_pub_key(&raw_key, key, keylen);
815 if (ret)
816 return ret;
817
818 /* Copy key in DMA zone */
819 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
820 if (!rsa_key->e)
821 goto err;
822
823 /*
824 * Skip leading zeros and copy the positive integer to a buffer
825 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
826 * expects a positive integer for the RSA modulus and uses its length as
827 * decryption output length.
828 */
829 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
830 if (!rsa_key->n)
831 goto err;
832
833 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
834 caam_rsa_free_key(rsa_key);
835 return -EINVAL;
836 }
837
838 rsa_key->e_sz = raw_key.e_sz;
839 rsa_key->n_sz = raw_key.n_sz;
840
841 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
842
843 return 0;
844err:
845 caam_rsa_free_key(rsa_key);
846 return -ENOMEM;
847}
848
52e26d77
RA
849static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
850 struct rsa_key *raw_key)
851{
852 struct caam_rsa_key *rsa_key = &ctx->key;
853 size_t p_sz = raw_key->p_sz;
854 size_t q_sz = raw_key->q_sz;
855
856 rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
857 if (!rsa_key->p)
858 return;
859 rsa_key->p_sz = p_sz;
860
861 rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz);
862 if (!rsa_key->q)
863 goto free_p;
864 rsa_key->q_sz = q_sz;
865
866 rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
867 if (!rsa_key->tmp1)
868 goto free_q;
869
870 rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
871 if (!rsa_key->tmp2)
872 goto free_tmp1;
873
874 rsa_key->priv_form = FORM2;
875
4a651b12
RA
876 rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz);
877 if (!rsa_key->dp)
878 goto free_tmp2;
879
880 rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz);
881 if (!rsa_key->dq)
882 goto free_dp;
883
884 rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz,
885 q_sz);
886 if (!rsa_key->qinv)
887 goto free_dq;
888
889 rsa_key->priv_form = FORM3;
890
52e26d77
RA
891 return;
892
4a651b12
RA
893free_dq:
894 kzfree(rsa_key->dq);
895free_dp:
896 kzfree(rsa_key->dp);
897free_tmp2:
898 kzfree(rsa_key->tmp2);
52e26d77
RA
899free_tmp1:
900 kzfree(rsa_key->tmp1);
901free_q:
902 kzfree(rsa_key->q);
903free_p:
904 kzfree(rsa_key->p);
905}
906
8c419778
TA
907static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
908 unsigned int keylen)
909{
910 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8439e94f 911 struct rsa_key raw_key = {NULL};
8c419778
TA
912 struct caam_rsa_key *rsa_key = &ctx->key;
913 int ret;
914
915 /* Free the old RSA key if any */
916 caam_rsa_free_key(rsa_key);
917
918 ret = rsa_parse_priv_key(&raw_key, key, keylen);
919 if (ret)
920 return ret;
921
922 /* Copy key in DMA zone */
923 rsa_key->d = kzalloc(raw_key.d_sz, GFP_DMA | GFP_KERNEL);
924 if (!rsa_key->d)
925 goto err;
926
927 rsa_key->e = kzalloc(raw_key.e_sz, GFP_DMA | GFP_KERNEL);
928 if (!rsa_key->e)
929 goto err;
930
931 /*
932 * Skip leading zeros and copy the positive integer to a buffer
933 * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
934 * expects a positive integer for the RSA modulus and uses its length as
935 * decryption output length.
936 */
937 rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz);
938 if (!rsa_key->n)
939 goto err;
940
941 if (caam_rsa_check_key_length(raw_key.n_sz << 3)) {
942 caam_rsa_free_key(rsa_key);
943 return -EINVAL;
944 }
945
946 rsa_key->d_sz = raw_key.d_sz;
947 rsa_key->e_sz = raw_key.e_sz;
948 rsa_key->n_sz = raw_key.n_sz;
949
950 memcpy(rsa_key->d, raw_key.d, raw_key.d_sz);
951 memcpy(rsa_key->e, raw_key.e, raw_key.e_sz);
952
52e26d77
RA
953 caam_rsa_set_priv_key_form(ctx, &raw_key);
954
8c419778
TA
955 return 0;
956
957err:
958 caam_rsa_free_key(rsa_key);
959 return -ENOMEM;
960}
961
e198429c 962static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm)
8c419778
TA
963{
964 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
8c419778 965
e198429c 966 return ctx->key.n_sz;
8c419778
TA
967}
968
969/* Per session pkc's driver context creation function */
970static int caam_rsa_init_tfm(struct crypto_akcipher *tfm)
971{
972 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
973
974 ctx->dev = caam_jr_alloc();
975
976 if (IS_ERR(ctx->dev)) {
33fa46d7 977 pr_err("Job Ring Device allocation for transform failed\n");
8c419778
TA
978 return PTR_ERR(ctx->dev);
979 }
980
981 return 0;
982}
983
984/* Per session pkc's driver context cleanup function */
985static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm)
986{
987 struct caam_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
988 struct caam_rsa_key *key = &ctx->key;
989
990 caam_rsa_free_key(key);
991 caam_jr_free(ctx->dev);
992}
993
994static struct akcipher_alg caam_rsa = {
995 .encrypt = caam_rsa_enc,
996 .decrypt = caam_rsa_dec,
997 .sign = caam_rsa_dec,
998 .verify = caam_rsa_enc,
999 .set_pub_key = caam_rsa_set_pub_key,
1000 .set_priv_key = caam_rsa_set_priv_key,
1001 .max_size = caam_rsa_max_size,
1002 .init = caam_rsa_init_tfm,
1003 .exit = caam_rsa_exit_tfm,
8a2a0dd3 1004 .reqsize = sizeof(struct caam_rsa_req_ctx),
8c419778
TA
1005 .base = {
1006 .cra_name = "rsa",
1007 .cra_driver_name = "rsa-caam",
1008 .cra_priority = 3000,
1009 .cra_module = THIS_MODULE,
1010 .cra_ctxsize = sizeof(struct caam_rsa_ctx),
1011 },
1012};
1013
1014/* Public Key Cryptography module initialization handler */
1015static int __init caam_pkc_init(void)
1016{
1017 struct device_node *dev_node;
1018 struct platform_device *pdev;
1019 struct device *ctrldev;
1020 struct caam_drv_private *priv;
d239b10d 1021 u32 pk_inst;
8c419778
TA
1022 int err;
1023
1024 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
1025 if (!dev_node) {
1026 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
1027 if (!dev_node)
1028 return -ENODEV;
1029 }
1030
1031 pdev = of_find_device_by_node(dev_node);
1032 if (!pdev) {
1033 of_node_put(dev_node);
1034 return -ENODEV;
1035 }
1036
1037 ctrldev = &pdev->dev;
1038 priv = dev_get_drvdata(ctrldev);
1039 of_node_put(dev_node);
1040
1041 /*
1042 * If priv is NULL, it's probably because the caam driver wasn't
1043 * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
1044 */
1045 if (!priv)
1046 return -ENODEV;
1047
1048 /* Determine public key hardware accelerator presence. */
d239b10d
HG
1049 if (priv->era < 10)
1050 pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
1051 CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
1052 else
1053 pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
8c419778
TA
1054
1055 /* Do not register algorithms if PKHA is not present. */
1056 if (!pk_inst)
1057 return -ENODEV;
1058
1059 err = crypto_register_akcipher(&caam_rsa);
1060 if (err)
1061 dev_warn(ctrldev, "%s alg registration failed\n",
1062 caam_rsa.base.cra_driver_name);
1063 else
1064 dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n");
1065
1066 return err;
1067}
1068
1069static void __exit caam_pkc_exit(void)
1070{
1071 crypto_unregister_akcipher(&caam_rsa);
1072}
1073
1074module_init(caam_pkc_init);
1075module_exit(caam_pkc_exit);
1076
1077MODULE_LICENSE("Dual BSD/GPL");
1078MODULE_DESCRIPTION("FSL CAAM support for PKC functions of crypto API");
1079MODULE_AUTHOR("Freescale Semiconductor");