1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
5 * Copyright (C) 2013-2015 Corentin LABBE <clabbe.montjoie@gmail.com>
7 * This file add support for AES cipher with 128,192,256 bits
8 * keysize in CBC and ECB mode.
9 * Add support also for DES and 3DES in CBC and ECB mode.
11 * You could find the datasheet in Documentation/arm/sunxi/README
15 static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
17 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
18 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
19 struct sun4i_ss_ctx *ss = op->ss;
20 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
21 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
23 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
24 u32 rx_cnt = SS_RX_DEFAULT;
30 unsigned int ileft = areq->cryptlen;
31 unsigned int oleft = areq->cryptlen;
33 struct sg_mapping_iter mi, mo;
34 unsigned int oi, oo; /* offset for in and out */
40 if (!areq->src || !areq->dst) {
41 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
45 spin_lock_irqsave(&ss->slock, flags);
47 for (i = 0; i < op->keylen; i += 4)
48 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
51 for (i = 0; i < 4 && i < ivsize / 4; i++) {
52 v = *(u32 *)(areq->iv + i * 4);
53 writel(v, ss->base + SS_IV0 + i * 4);
56 writel(mode, ss->base + SS_CTL);
58 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
59 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
60 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
61 SG_MITER_TO_SG | SG_MITER_ATOMIC);
64 if (!mi.addr || !mo.addr) {
65 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
70 ileft = areq->cryptlen / 4;
71 oleft = areq->cryptlen / 4;
75 todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
78 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
81 if (oi == mi.length) {
86 spaces = readl(ss->base + SS_FCSR);
87 rx_cnt = SS_RXFIFO_SPACES(spaces);
88 tx_cnt = SS_TXFIFO_SPACES(spaces);
90 todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
93 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
96 if (oo == mo.length) {
103 for (i = 0; i < 4 && i < ivsize / 4; i++) {
104 v = readl(ss->base + SS_IV0 + i * 4);
105 *(u32 *)(areq->iv + i * 4) = v;
112 writel(0, ss->base + SS_CTL);
113 spin_unlock_irqrestore(&ss->slock, flags);
118 static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq)
120 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
121 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
122 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
123 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, op->fallback_tfm);
126 skcipher_request_set_sync_tfm(subreq, op->fallback_tfm);
127 skcipher_request_set_callback(subreq, areq->base.flags, NULL,
129 skcipher_request_set_crypt(subreq, areq->src, areq->dst,
130 areq->cryptlen, areq->iv);
131 if (ctx->mode & SS_DECRYPTION)
132 err = crypto_skcipher_decrypt(subreq);
134 err = crypto_skcipher_encrypt(subreq);
135 skcipher_request_zero(subreq);
140 /* Generic function that support SG with size not multiple of 4 */
141 static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
143 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
144 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
145 struct sun4i_ss_ctx *ss = op->ss;
147 struct scatterlist *in_sg = areq->src;
148 struct scatterlist *out_sg = areq->dst;
149 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
150 struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq);
151 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
152 struct sun4i_ss_alg_template *algt;
153 u32 mode = ctx->mode;
154 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
155 u32 rx_cnt = SS_RX_DEFAULT;
161 unsigned int ileft = areq->cryptlen;
162 unsigned int oleft = areq->cryptlen;
164 struct sg_mapping_iter mi, mo;
165 unsigned int oi, oo; /* offset for in and out */
166 unsigned int ob = 0; /* offset in buf */
167 unsigned int obo = 0; /* offset in bufo*/
168 unsigned int obl = 0; /* length of data in bufo */
175 if (!areq->src || !areq->dst) {
176 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
180 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
181 if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize)
182 need_fallback = true;
185 * if we have only SGs with size multiple of 4,
186 * we can use the SS optimized function
188 while (in_sg && no_chunk == 1) {
189 if (in_sg->length % 4)
191 in_sg = sg_next(in_sg);
193 while (out_sg && no_chunk == 1) {
194 if (out_sg->length % 4)
196 out_sg = sg_next(out_sg);
199 if (no_chunk == 1 && !need_fallback)
200 return sun4i_ss_opti_poll(areq);
203 return sun4i_ss_cipher_poll_fallback(areq);
205 spin_lock_irqsave(&ss->slock, flags);
207 for (i = 0; i < op->keylen; i += 4)
208 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
211 for (i = 0; i < 4 && i < ivsize / 4; i++) {
212 v = *(u32 *)(areq->iv + i * 4);
213 writel(v, ss->base + SS_IV0 + i * 4);
216 writel(mode, ss->base + SS_CTL);
218 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
219 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
220 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
221 SG_MITER_TO_SG | SG_MITER_ATOMIC);
224 if (!mi.addr || !mo.addr) {
225 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
229 ileft = areq->cryptlen;
230 oleft = areq->cryptlen;
236 char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
239 * todo is the number of consecutive 4byte word that we
240 * can read from current SG
242 todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
244 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
250 * not enough consecutive bytes, so we need to
251 * linearize in buf. todo is in bytes
252 * After that copy, if we have a multiple of 4
253 * we need to be able to write all buf in one
254 * pass, so it is why we min() with rx_cnt
256 todo = min3(rx_cnt * 4 - ob, ileft,
258 memcpy(buf + ob, mi.addr + oi, todo);
263 writesl(ss->base + SS_RXFIFO, buf,
268 if (oi == mi.length) {
274 spaces = readl(ss->base + SS_FCSR);
275 rx_cnt = SS_RXFIFO_SPACES(spaces);
276 tx_cnt = SS_TXFIFO_SPACES(spaces);
277 dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
279 oi, mi.length, ileft, areq->cryptlen, rx_cnt,
280 oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
284 /* todo in 4bytes word */
285 todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
287 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
290 if (oo == mo.length) {
295 char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
298 * read obl bytes in bufo, we read at maximum for
299 * emptying the device
301 readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
306 * how many bytes we can copy ?
307 * no more than remaining SG size
308 * no more than remaining buffer
309 * no need to test against oleft
311 todo = min(mo.length - oo, obl - obo);
312 memcpy(mo.addr + oo, bufo + obo, todo);
316 if (oo == mo.length) {
321 /* bufo must be fully used here */
325 for (i = 0; i < 4 && i < ivsize / 4; i++) {
326 v = readl(ss->base + SS_IV0 + i * 4);
327 *(u32 *)(areq->iv + i * 4) = v;
334 writel(0, ss->base + SS_CTL);
335 spin_unlock_irqrestore(&ss->slock, flags);
341 int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq)
343 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
344 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
345 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
347 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
349 return sun4i_ss_cipher_poll(areq);
352 int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq)
354 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
355 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
356 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
358 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
360 return sun4i_ss_cipher_poll(areq);
364 int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq)
366 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
367 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
368 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
370 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
372 return sun4i_ss_cipher_poll(areq);
375 int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq)
377 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
378 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
379 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
381 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
383 return sun4i_ss_cipher_poll(areq);
387 int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq)
389 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
390 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
391 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
393 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
395 return sun4i_ss_cipher_poll(areq);
398 int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq)
400 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
401 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
402 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
404 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
406 return sun4i_ss_cipher_poll(areq);
410 int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq)
412 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
413 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
414 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
416 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
418 return sun4i_ss_cipher_poll(areq);
421 int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq)
423 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
424 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
425 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
427 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
429 return sun4i_ss_cipher_poll(areq);
433 int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq)
435 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
436 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
437 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
439 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
441 return sun4i_ss_cipher_poll(areq);
444 int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq)
446 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
447 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
448 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
450 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
452 return sun4i_ss_cipher_poll(areq);
456 int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq)
458 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
459 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
460 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
462 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
464 return sun4i_ss_cipher_poll(areq);
467 int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq)
469 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
470 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
471 struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
473 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
475 return sun4i_ss_cipher_poll(areq);
478 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
480 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
481 struct sun4i_ss_alg_template *algt;
482 const char *name = crypto_tfm_alg_name(tfm);
484 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
486 algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template,
490 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
491 sizeof(struct sun4i_cipher_req_ctx));
493 op->fallback_tfm = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
494 if (IS_ERR(op->fallback_tfm)) {
495 dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
496 name, PTR_ERR(op->fallback_tfm));
497 return PTR_ERR(op->fallback_tfm);
503 void sun4i_ss_cipher_exit(struct crypto_tfm *tfm)
505 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
506 crypto_free_sync_skcipher(op->fallback_tfm);
509 /* check and set the AES key, prepare the mode to be used */
510 int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
513 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
514 struct sun4i_ss_ctx *ss = op->ss;
518 op->keymode = SS_AES_128BITS;
521 op->keymode = SS_AES_192BITS;
524 op->keymode = SS_AES_256BITS;
527 dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
528 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
532 memcpy(op->key, key, keylen);
534 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
535 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
537 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
540 /* check and set the DES key, prepare the mode to be used */
541 int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key,
544 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
545 struct sun4i_ss_ctx *ss = op->ss;
547 u32 tmp[DES_EXPKEY_WORDS];
550 if (unlikely(keylen != DES_KEY_SIZE)) {
551 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
552 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
556 flags = crypto_skcipher_get_flags(tfm);
558 ret = des_ekey(tmp, key);
559 if (unlikely(!ret) && (flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
560 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
561 dev_dbg(ss->dev, "Weak key %u\n", keylen);
566 memcpy(op->key, key, keylen);
568 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
569 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
571 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);
574 /* check and set the 3DES key, prepare the mode to be used */
575 int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
578 struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm);
581 err = des3_verify_key(tfm, key);
586 memcpy(op->key, key, keylen);
588 crypto_sync_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK);
589 crypto_sync_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
591 return crypto_sync_skcipher_setkey(op->fallback_tfm, key, keylen);