1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api.
5 * Copyright (C) 2014-2017 Axis Communications AB
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/bitfield.h>
10 #include <linux/crypto.h>
11 #include <linux/debugfs.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/fault-inject.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
25 #include <crypto/aes.h>
26 #include <crypto/gcm.h>
27 #include <crypto/internal/aead.h>
28 #include <crypto/internal/hash.h>
29 #include <crypto/internal/skcipher.h>
30 #include <crypto/scatterwalk.h>
31 #include <crypto/sha.h>
32 #include <crypto/xts.h>
34 /* Max length of a line in all cache levels for Artpec SoCs. */
35 #define ARTPEC_CACHE_LINE_MAX 32
37 #define PDMA_OUT_CFG 0x0000
38 #define PDMA_OUT_BUF_CFG 0x0004
39 #define PDMA_OUT_CMD 0x0008
40 #define PDMA_OUT_DESCRQ_PUSH 0x0010
41 #define PDMA_OUT_DESCRQ_STAT 0x0014
43 #define A6_PDMA_IN_CFG 0x0028
44 #define A6_PDMA_IN_BUF_CFG 0x002c
45 #define A6_PDMA_IN_CMD 0x0030
46 #define A6_PDMA_IN_STATQ_PUSH 0x0038
47 #define A6_PDMA_IN_DESCRQ_PUSH 0x0044
48 #define A6_PDMA_IN_DESCRQ_STAT 0x0048
49 #define A6_PDMA_INTR_MASK 0x0068
50 #define A6_PDMA_ACK_INTR 0x006c
51 #define A6_PDMA_MASKED_INTR 0x0074
53 #define A7_PDMA_IN_CFG 0x002c
54 #define A7_PDMA_IN_BUF_CFG 0x0030
55 #define A7_PDMA_IN_CMD 0x0034
56 #define A7_PDMA_IN_STATQ_PUSH 0x003c
57 #define A7_PDMA_IN_DESCRQ_PUSH 0x0048
58 #define A7_PDMA_IN_DESCRQ_STAT 0x004C
59 #define A7_PDMA_INTR_MASK 0x006c
60 #define A7_PDMA_ACK_INTR 0x0070
61 #define A7_PDMA_MASKED_INTR 0x0078
63 #define PDMA_OUT_CFG_EN BIT(0)
65 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
66 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
68 #define PDMA_OUT_CMD_START BIT(0)
69 #define A6_PDMA_OUT_CMD_STOP BIT(3)
70 #define A7_PDMA_OUT_CMD_STOP BIT(2)
72 #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0)
73 #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6)
75 #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0)
76 #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4)
78 #define PDMA_IN_CFG_EN BIT(0)
80 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0)
81 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5)
82 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10)
84 #define PDMA_IN_CMD_START BIT(0)
85 #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2)
86 #define A6_PDMA_IN_CMD_STOP BIT(3)
87 #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1)
88 #define A7_PDMA_IN_CMD_STOP BIT(2)
90 #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0)
91 #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6)
93 #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0)
94 #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6)
96 #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0)
97 #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4)
99 #define A6_PDMA_INTR_MASK_IN_DATA BIT(2)
100 #define A6_PDMA_INTR_MASK_IN_EOP BIT(3)
101 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4)
103 #define A7_PDMA_INTR_MASK_IN_DATA BIT(3)
104 #define A7_PDMA_INTR_MASK_IN_EOP BIT(4)
105 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5)
107 #define A6_CRY_MD_OPER GENMASK(19, 16)
109 #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20)
110 #define A6_CRY_MD_HASH_HMAC_FIN BIT(23)
112 #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20)
113 #define A6_CRY_MD_CIPHER_DECR BIT(22)
114 #define A6_CRY_MD_CIPHER_TWEAK BIT(23)
115 #define A6_CRY_MD_CIPHER_DSEQ BIT(24)
117 #define A7_CRY_MD_OPER GENMASK(11, 8)
119 #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12)
120 #define A7_CRY_MD_HASH_HMAC_FIN BIT(15)
122 #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12)
123 #define A7_CRY_MD_CIPHER_DECR BIT(14)
124 #define A7_CRY_MD_CIPHER_TWEAK BIT(15)
125 #define A7_CRY_MD_CIPHER_DSEQ BIT(16)
127 /* DMA metadata constants */
128 #define regk_crypto_aes_cbc 0x00000002
129 #define regk_crypto_aes_ctr 0x00000003
130 #define regk_crypto_aes_ecb 0x00000001
131 #define regk_crypto_aes_gcm 0x00000004
132 #define regk_crypto_aes_xts 0x00000005
133 #define regk_crypto_cache 0x00000002
134 #define a6_regk_crypto_dlkey 0x0000000a
135 #define a7_regk_crypto_dlkey 0x0000000e
136 #define regk_crypto_ext 0x00000001
137 #define regk_crypto_hmac_sha1 0x00000007
138 #define regk_crypto_hmac_sha256 0x00000009
139 #define regk_crypto_init 0x00000000
140 #define regk_crypto_key_128 0x00000000
141 #define regk_crypto_key_192 0x00000001
142 #define regk_crypto_key_256 0x00000002
143 #define regk_crypto_null 0x00000000
144 #define regk_crypto_sha1 0x00000006
145 #define regk_crypto_sha256 0x00000008
147 /* DMA descriptor structures */
148 struct pdma_descr_ctrl {
149 unsigned char short_descr : 1;
150 unsigned char pad1 : 1;
151 unsigned char eop : 1;
152 unsigned char intr : 1;
153 unsigned char short_len : 3;
154 unsigned char pad2 : 1;
157 struct pdma_data_descr {
158 unsigned int len : 24;
159 unsigned int buf : 32;
162 struct pdma_short_descr {
163 unsigned char data[7];
167 struct pdma_descr_ctrl ctrl;
169 struct pdma_data_descr data;
170 struct pdma_short_descr shrt;
174 struct pdma_stat_descr {
175 unsigned char pad1 : 1;
176 unsigned char pad2 : 1;
177 unsigned char eop : 1;
178 unsigned char pad3 : 5;
179 unsigned int len : 24;
182 /* Each descriptor array can hold max 64 entries */
183 #define PDMA_DESCR_COUNT 64
185 #define MODULE_NAME "Artpec-6 CA"
187 /* Hash modes (including HMAC variants) */
188 #define ARTPEC6_CRYPTO_HASH_SHA1 1
189 #define ARTPEC6_CRYPTO_HASH_SHA256 2
192 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1
193 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2
194 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3
195 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5
197 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine.
198 * It operates on a descriptor array with up to 64 descriptor entries.
199 * The arrays must be 64 byte aligned in memory.
201 * The ciphering unit has no registers and is completely controlled by
202 * a 4-byte metadata that is inserted at the beginning of each dma packet.
204 * A dma packet is a sequence of descriptors terminated by setting the .eop
205 * field in the final descriptor of the packet.
207 * Multiple packets are used for providing context data, key data and
208 * the plain/ciphertext.
210 * PDMA Descriptors (Array)
211 * +------+------+------+~~+-------+------+----
212 * | 0 | 1 | 2 |~~| 11 EOP| 12 | ....
213 * +--+---+--+---+----+-+~~+-------+----+-+----
216 * __|__ +-------++-------++-------+ +----+
217 * | MD | |Payload||Payload||Payload| | MD |
218 * +-----+ +-------++-------++-------+ +----+
221 struct artpec6_crypto_bounce_buffer {
222 struct list_head list;
224 struct scatterlist *sg;
226 /* buf is aligned to ARTPEC_CACHE_LINE_MAX and
227 * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
232 struct artpec6_crypto_dma_map {
235 enum dma_data_direction dir;
238 struct artpec6_crypto_dma_descriptors {
239 struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64);
240 struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64);
241 u32 stat[PDMA_DESCR_COUNT] __aligned(64);
242 struct list_head bounce_buffers;
243 /* Enough maps for all out/in buffers, and all three descr. arrays */
244 struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2];
245 dma_addr_t out_dma_addr;
246 dma_addr_t in_dma_addr;
247 dma_addr_t stat_dma_addr;
253 enum artpec6_crypto_variant {
258 struct artpec6_crypto {
260 spinlock_t queue_lock;
261 struct list_head queue; /* waiting for pdma fifo space */
262 struct list_head pending; /* submitted to pdma fifo */
263 struct tasklet_struct task;
264 struct kmem_cache *dma_cache;
266 struct timer_list timer;
267 enum artpec6_crypto_variant variant;
268 void *pad_buffer; /* cache-aligned block padding buffer */
272 enum artpec6_crypto_hash_flags {
273 HASH_FLAG_INIT_CTX = 2,
274 HASH_FLAG_UPDATE = 4,
275 HASH_FLAG_FINALIZE = 8,
277 HASH_FLAG_UPDATE_KEY = 32,
280 struct artpec6_crypto_req_common {
281 struct list_head list;
282 struct list_head complete_in_progress;
283 struct artpec6_crypto_dma_descriptors *dma;
284 struct crypto_async_request *req;
285 void (*complete)(struct crypto_async_request *req);
289 struct artpec6_hash_request_context {
290 char partial_buffer[SHA256_BLOCK_SIZE];
291 char partial_buffer_out[SHA256_BLOCK_SIZE];
292 char key_buffer[SHA256_BLOCK_SIZE];
293 char pad_buffer[SHA256_BLOCK_SIZE + 32];
294 unsigned char digeststate[SHA256_DIGEST_SIZE];
295 size_t partial_bytes;
299 enum artpec6_crypto_hash_flags hash_flags;
300 struct artpec6_crypto_req_common common;
303 struct artpec6_hash_export_state {
304 char partial_buffer[SHA256_BLOCK_SIZE];
305 unsigned char digeststate[SHA256_DIGEST_SIZE];
306 size_t partial_bytes;
309 unsigned int hash_flags;
312 struct artpec6_hashalg_context {
313 char hmac_key[SHA256_BLOCK_SIZE];
314 size_t hmac_key_length;
315 struct crypto_shash *child_hash;
318 struct artpec6_crypto_request_context {
321 struct artpec6_crypto_req_common common;
324 struct artpec6_cryptotfm_context {
325 unsigned char aes_key[2*AES_MAX_KEY_SIZE];
329 struct crypto_sync_skcipher *fallback;
332 struct artpec6_crypto_aead_hw_ctx {
333 __be64 aad_length_bits;
334 __be64 text_length_bits;
335 __u8 J0[AES_BLOCK_SIZE];
338 struct artpec6_crypto_aead_req_ctx {
339 struct artpec6_crypto_aead_hw_ctx hw_ctx;
342 struct artpec6_crypto_req_common common;
343 __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned;
346 /* The crypto framework makes it hard to avoid this global. */
347 static struct device *artpec6_crypto_dev;
349 #ifdef CONFIG_FAULT_INJECTION
350 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read);
351 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full);
355 ARTPEC6_CRYPTO_PREPARE_HASH_NO_START,
356 ARTPEC6_CRYPTO_PREPARE_HASH_START,
359 static int artpec6_crypto_prepare_aead(struct aead_request *areq);
360 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq);
361 static int artpec6_crypto_prepare_hash(struct ahash_request *areq);
364 artpec6_crypto_complete_crypto(struct crypto_async_request *req);
366 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req);
368 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req);
370 artpec6_crypto_complete_aead(struct crypto_async_request *req);
372 artpec6_crypto_complete_hash(struct crypto_async_request *req);
375 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common);
378 artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common);
380 struct artpec6_crypto_walk {
381 struct scatterlist *sg;
385 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk,
386 struct scatterlist *sg)
392 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk,
395 while (nbytes && awalk->sg) {
398 WARN_ON(awalk->offset > awalk->sg->length);
400 piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset);
402 awalk->offset += piece;
403 if (awalk->offset == awalk->sg->length) {
404 awalk->sg = sg_next(awalk->sg);
414 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk)
416 WARN_ON(awalk->sg->length == awalk->offset);
418 return awalk->sg->length - awalk->offset;
422 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk)
424 return sg_phys(awalk->sg) + awalk->offset;
428 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common)
430 struct artpec6_crypto_dma_descriptors *dma = common->dma;
431 struct artpec6_crypto_bounce_buffer *b;
432 struct artpec6_crypto_bounce_buffer *next;
434 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
435 pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n",
436 b, b->length, b->offset, b->buf);
437 sg_pcopy_from_buffer(b->sg,
448 static inline bool artpec6_crypto_busy(void)
450 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
451 int fifo_count = ac->pending_count;
453 return fifo_count > 6;
456 static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req)
458 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
461 spin_lock_bh(&ac->queue_lock);
463 if (!artpec6_crypto_busy()) {
464 list_add_tail(&req->list, &ac->pending);
465 artpec6_crypto_start_dma(req);
467 } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
468 list_add_tail(&req->list, &ac->queue);
470 artpec6_crypto_common_destroy(req);
473 spin_unlock_bh(&ac->queue_lock);
478 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common)
480 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
481 enum artpec6_crypto_variant variant = ac->variant;
482 void __iomem *base = ac->base;
483 struct artpec6_crypto_dma_descriptors *dma = common->dma;
484 u32 ind, statd, outd;
486 /* Make descriptor content visible to the DMA before starting it. */
489 ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) |
490 FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6);
492 statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) |
493 FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6);
495 outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) |
496 FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6);
498 if (variant == ARTPEC6_CRYPTO) {
499 writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
500 writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
501 writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
503 writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
504 writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
505 writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
508 writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
509 writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
515 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common)
517 struct artpec6_crypto_dma_descriptors *dma = common->dma;
522 INIT_LIST_HEAD(&dma->bounce_buffers);
525 static bool fault_inject_dma_descr(void)
527 #ifdef CONFIG_FAULT_INJECTION
528 return should_fail(&artpec6_crypto_fail_dma_array_full, 1);
534 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a
537 * @addr: The physical address of the data buffer
538 * @len: The length of the data buffer
539 * @eop: True if this is the last buffer in the packet
541 * @return 0 on success or -ENOSPC if there are no more descriptors available
544 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
545 dma_addr_t addr, size_t len, bool eop)
547 struct artpec6_crypto_dma_descriptors *dma = common->dma;
548 struct pdma_descr *d;
550 if (dma->out_cnt >= PDMA_DESCR_COUNT ||
551 fault_inject_dma_descr()) {
552 pr_err("No free OUT DMA descriptors available!\n");
556 d = &dma->out[dma->out_cnt++];
557 memset(d, 0, sizeof(*d));
559 d->ctrl.short_descr = 0;
566 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor
568 * @dst: The virtual address of the data
569 * @len: The length of the data, must be between 1 to 7 bytes
570 * @eop: True if this is the last buffer in the packet
572 * @return 0 on success
573 * -ENOSPC if no more descriptors are available
574 * -EINVAL if the data length exceeds 7 bytes
577 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common,
578 void *dst, unsigned int len, bool eop)
580 struct artpec6_crypto_dma_descriptors *dma = common->dma;
581 struct pdma_descr *d;
583 if (dma->out_cnt >= PDMA_DESCR_COUNT ||
584 fault_inject_dma_descr()) {
585 pr_err("No free OUT DMA descriptors available!\n");
587 } else if (len > 7 || len < 1) {
590 d = &dma->out[dma->out_cnt++];
591 memset(d, 0, sizeof(*d));
593 d->ctrl.short_descr = 1;
594 d->ctrl.short_len = len;
596 memcpy(d->shrt.data, dst, len);
600 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common,
601 struct page *page, size_t offset,
603 enum dma_data_direction dir,
604 dma_addr_t *dma_addr_out)
606 struct artpec6_crypto_dma_descriptors *dma = common->dma;
607 struct device *dev = artpec6_crypto_dev;
608 struct artpec6_crypto_dma_map *map;
613 if (dma->map_count >= ARRAY_SIZE(dma->maps))
616 dma_addr = dma_map_page(dev, page, offset, size, dir);
617 if (dma_mapping_error(dev, dma_addr))
620 map = &dma->maps[dma->map_count++];
622 map->dma_addr = dma_addr;
625 *dma_addr_out = dma_addr;
631 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common,
632 void *ptr, size_t size,
633 enum dma_data_direction dir,
634 dma_addr_t *dma_addr_out)
636 struct page *page = virt_to_page(ptr);
637 size_t offset = (uintptr_t)ptr & ~PAGE_MASK;
639 return artpec6_crypto_dma_map_page(common, page, offset, size, dir,
644 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common)
646 struct artpec6_crypto_dma_descriptors *dma = common->dma;
649 ret = artpec6_crypto_dma_map_single(common, dma->in,
650 sizeof(dma->in[0]) * dma->in_cnt,
651 DMA_TO_DEVICE, &dma->in_dma_addr);
655 ret = artpec6_crypto_dma_map_single(common, dma->out,
656 sizeof(dma->out[0]) * dma->out_cnt,
657 DMA_TO_DEVICE, &dma->out_dma_addr);
661 /* We only read one stat descriptor */
662 dma->stat[dma->in_cnt - 1] = 0;
665 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor
668 return artpec6_crypto_dma_map_single(common,
670 sizeof(dma->stat[0]) * dma->in_cnt,
672 &dma->stat_dma_addr);
676 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common)
678 struct artpec6_crypto_dma_descriptors *dma = common->dma;
679 struct device *dev = artpec6_crypto_dev;
682 for (i = 0; i < dma->map_count; i++) {
683 struct artpec6_crypto_dma_map *map = &dma->maps[i];
685 dma_unmap_page(dev, map->dma_addr, map->size, map->dir);
691 /** artpec6_crypto_setup_out_descr - Setup an out descriptor
693 * @dst: The virtual address of the data
694 * @len: The length of the data
695 * @eop: True if this is the last buffer in the packet
696 * @use_short: If this is true and the data length is 7 bytes or less then
697 * a short descriptor will be used
699 * @return 0 on success
700 * Any errors from artpec6_crypto_setup_out_descr_short() or
701 * setup_out_descr_phys()
704 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common,
705 void *dst, unsigned int len, bool eop,
708 if (use_short && len < 7) {
709 return artpec6_crypto_setup_out_descr_short(common, dst, len,
715 ret = artpec6_crypto_dma_map_single(common, dst, len,
721 return artpec6_crypto_setup_out_descr_phys(common, dma_addr,
726 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a
729 * @addr: The physical address of the data buffer
730 * @len: The length of the data buffer
731 * @intr: True if an interrupt should be fired after HW processing of this
736 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
737 dma_addr_t addr, unsigned int len, bool intr)
739 struct artpec6_crypto_dma_descriptors *dma = common->dma;
740 struct pdma_descr *d;
742 if (dma->in_cnt >= PDMA_DESCR_COUNT ||
743 fault_inject_dma_descr()) {
744 pr_err("No free IN DMA descriptors available!\n");
747 d = &dma->in[dma->in_cnt++];
748 memset(d, 0, sizeof(*d));
756 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor
758 * @buffer: The virtual address to of the data buffer
759 * @len: The length of the data buffer
760 * @last: If this is the last data buffer in the request (i.e. an interrupt
763 * Short descriptors are not used for the in channel
766 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common,
767 void *buffer, unsigned int len, bool last)
772 ret = artpec6_crypto_dma_map_single(common, buffer, len,
773 DMA_FROM_DEVICE, &dma_addr);
777 return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last);
780 static struct artpec6_crypto_bounce_buffer *
781 artpec6_crypto_alloc_bounce(gfp_t flags)
784 size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) +
785 2 * ARTPEC_CACHE_LINE_MAX;
786 struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags);
792 bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX);
796 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common,
797 struct artpec6_crypto_walk *walk, size_t size)
799 struct artpec6_crypto_bounce_buffer *bbuf;
802 bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags);
808 bbuf->offset = walk->offset;
810 ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false);
816 pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset);
817 list_add_tail(&bbuf->list, &common->dma->bounce_buffers);
822 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common,
823 struct artpec6_crypto_walk *walk,
830 while (walk->sg && count) {
831 chunk = min(count, artpec6_crypto_walk_chunklen(walk));
832 addr = artpec6_crypto_walk_chunk_phys(walk);
834 /* When destination buffers are not aligned to the cache line
835 * size we need bounce buffers. The DMA-API requires that the
836 * entire line is owned by the DMA buffer and this holds also
837 * for the case when coherent DMA is used.
839 if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
840 chunk = min_t(dma_addr_t, chunk,
841 ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
844 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
845 ret = setup_bounce_buffer_in(common, walk, chunk);
846 } else if (chunk < ARTPEC_CACHE_LINE_MAX) {
847 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk);
848 ret = setup_bounce_buffer_in(common, walk, chunk);
852 chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1);
854 pr_debug("CHUNK %pad:%zu\n", &addr, chunk);
856 ret = artpec6_crypto_dma_map_page(common,
866 ret = artpec6_crypto_setup_in_descr_phys(common,
874 count = count - chunk;
875 artpec6_crypto_walk_advance(walk, chunk);
879 pr_err("EOL unexpected %zu bytes left\n", count);
881 return count ? -EINVAL : 0;
885 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common,
886 struct artpec6_crypto_walk *walk,
893 while (walk->sg && count) {
894 chunk = min(count, artpec6_crypto_walk_chunklen(walk));
895 addr = artpec6_crypto_walk_chunk_phys(walk);
897 pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk);
902 chunk = min_t(size_t, chunk, (4-(addr&3)));
904 sg_pcopy_to_buffer(walk->sg, 1, buf, chunk,
907 ret = artpec6_crypto_setup_out_descr_short(common, buf,
913 ret = artpec6_crypto_dma_map_page(common,
923 ret = artpec6_crypto_setup_out_descr_phys(common,
931 count = count - chunk;
932 artpec6_crypto_walk_advance(walk, chunk);
936 pr_err("EOL unexpected %zu bytes left\n", count);
938 return count ? -EINVAL : 0;
942 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor
944 * If the out descriptor list is non-empty, then the eop flag on the
945 * last used out descriptor will be set.
947 * @return 0 on success
948 * -EINVAL if the out descriptor is empty or has overflown
951 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
953 struct artpec6_crypto_dma_descriptors *dma = common->dma;
954 struct pdma_descr *d;
956 if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
957 pr_err("%s: OUT descriptor list is %s\n",
958 MODULE_NAME, dma->out_cnt ? "empty" : "full");
963 d = &dma->out[dma->out_cnt-1];
969 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last
972 * See artpec6_crypto_terminate_out_descrs() for return values
975 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
977 struct artpec6_crypto_dma_descriptors *dma = common->dma;
978 struct pdma_descr *d;
980 if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
981 pr_err("%s: IN descriptor list is %s\n",
982 MODULE_NAME, dma->in_cnt ? "empty" : "full");
986 d = &dma->in[dma->in_cnt-1];
991 /** create_hash_pad - Create a Secure Hash conformant pad
993 * @dst: The destination buffer to write the pad. Must be at least 64 bytes
994 * @dgstlen: The total length of the hash digest in bytes
995 * @bitcount: The total length of the digest in bits
997 * @return The total number of padding bytes written to @dst
1000 create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount)
1002 unsigned int mod, target, diff, pad_bytes, size_bytes;
1003 __be64 bits = __cpu_to_be64(bitcount);
1006 case regk_crypto_sha1:
1007 case regk_crypto_sha256:
1008 case regk_crypto_hmac_sha1:
1009 case regk_crypto_hmac_sha256:
1022 diff = dgstlen & (mod - 1);
1023 pad_bytes = diff > target ? target + mod - diff : target - diff;
1025 memset(dst + 1, 0, pad_bytes);
1028 if (size_bytes == 16) {
1029 memset(dst + 1 + pad_bytes, 0, 8);
1030 memcpy(dst + 1 + pad_bytes + 8, &bits, 8);
1032 memcpy(dst + 1 + pad_bytes, &bits, 8);
1035 return pad_bytes + size_bytes + 1;
1038 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common,
1039 struct crypto_async_request *parent,
1040 void (*complete)(struct crypto_async_request *req),
1041 struct scatterlist *dstsg, unsigned int nbytes)
1044 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1046 flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1047 GFP_KERNEL : GFP_ATOMIC;
1049 common->gfp_flags = flags;
1050 common->dma = kmem_cache_alloc(ac->dma_cache, flags);
1054 common->req = parent;
1055 common->complete = complete;
1060 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma)
1062 struct artpec6_crypto_bounce_buffer *b;
1063 struct artpec6_crypto_bounce_buffer *next;
1065 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) {
1071 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common)
1073 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1075 artpec6_crypto_dma_unmap_all(common);
1076 artpec6_crypto_bounce_destroy(common->dma);
1077 kmem_cache_free(ac->dma_cache, common->dma);
1083 * Ciphering functions.
1085 static int artpec6_crypto_encrypt(struct skcipher_request *req)
1087 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1088 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1089 struct artpec6_crypto_request_context *req_ctx = NULL;
1090 void (*complete)(struct crypto_async_request *req);
1093 req_ctx = skcipher_request_ctx(req);
1095 switch (ctx->crypto_type) {
1096 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1097 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1098 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1099 req_ctx->decrypt = 0;
1105 switch (ctx->crypto_type) {
1106 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1107 complete = artpec6_crypto_complete_cbc_encrypt;
1110 complete = artpec6_crypto_complete_crypto;
1114 ret = artpec6_crypto_common_init(&req_ctx->common,
1117 req->dst, req->cryptlen);
1121 ret = artpec6_crypto_prepare_crypto(req);
1123 artpec6_crypto_common_destroy(&req_ctx->common);
1127 return artpec6_crypto_submit(&req_ctx->common);
1130 static int artpec6_crypto_decrypt(struct skcipher_request *req)
1133 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1134 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1135 struct artpec6_crypto_request_context *req_ctx = NULL;
1136 void (*complete)(struct crypto_async_request *req);
1138 req_ctx = skcipher_request_ctx(req);
1140 switch (ctx->crypto_type) {
1141 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1142 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1143 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1144 req_ctx->decrypt = 1;
1151 switch (ctx->crypto_type) {
1152 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1153 complete = artpec6_crypto_complete_cbc_decrypt;
1156 complete = artpec6_crypto_complete_crypto;
1160 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1162 req->dst, req->cryptlen);
1166 ret = artpec6_crypto_prepare_crypto(req);
1168 artpec6_crypto_common_destroy(&req_ctx->common);
1172 return artpec6_crypto_submit(&req_ctx->common);
1176 artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
1178 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req);
1179 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1180 size_t iv_len = crypto_skcipher_ivsize(cipher);
1181 unsigned int counter = be32_to_cpup((__be32 *)
1182 (req->iv + iv_len - 4));
1183 unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) /
1187 * The hardware uses only the last 32-bits as the counter while the
1188 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that
1189 * the whole IV is a counter. So fallback if the counter is going to
1192 if (counter + nblks < counter) {
1195 pr_debug("counter %x will overflow (nblks %u), falling back\n",
1196 counter, counter + nblks);
1198 ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
1204 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
1206 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
1207 skcipher_request_set_callback(subreq, req->base.flags,
1209 skcipher_request_set_crypt(subreq, req->src, req->dst,
1210 req->cryptlen, req->iv);
1211 ret = encrypt ? crypto_skcipher_encrypt(subreq)
1212 : crypto_skcipher_decrypt(subreq);
1213 skcipher_request_zero(subreq);
1218 return encrypt ? artpec6_crypto_encrypt(req)
1219 : artpec6_crypto_decrypt(req);
1222 static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req)
1224 return artpec6_crypto_ctr_crypt(req, true);
1227 static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req)
1229 return artpec6_crypto_ctr_crypt(req, false);
1235 static int artpec6_crypto_aead_init(struct crypto_aead *tfm)
1237 struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm);
1239 memset(tfm_ctx, 0, sizeof(*tfm_ctx));
1241 crypto_aead_set_reqsize(tfm,
1242 sizeof(struct artpec6_crypto_aead_req_ctx));
1247 static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key,
1250 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base);
1252 if (len != 16 && len != 24 && len != 32) {
1253 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1257 ctx->key_length = len;
1259 memcpy(ctx->aes_key, key, len);
1263 static int artpec6_crypto_aead_encrypt(struct aead_request *req)
1266 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1268 req_ctx->decrypt = false;
1269 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base,
1270 artpec6_crypto_complete_aead,
1275 ret = artpec6_crypto_prepare_aead(req);
1277 artpec6_crypto_common_destroy(&req_ctx->common);
1281 return artpec6_crypto_submit(&req_ctx->common);
1284 static int artpec6_crypto_aead_decrypt(struct aead_request *req)
1287 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req);
1289 req_ctx->decrypt = true;
1290 if (req->cryptlen < AES_BLOCK_SIZE)
1293 ret = artpec6_crypto_common_init(&req_ctx->common,
1295 artpec6_crypto_complete_aead,
1300 ret = artpec6_crypto_prepare_aead(req);
1302 artpec6_crypto_common_destroy(&req_ctx->common);
1306 return artpec6_crypto_submit(&req_ctx->common);
1309 static int artpec6_crypto_prepare_hash(struct ahash_request *areq)
1311 struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1312 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq);
1313 size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
1314 size_t contextsize = digestsize;
1315 size_t blocksize = crypto_tfm_alg_blocksize(
1316 crypto_ahash_tfm(crypto_ahash_reqtfm(areq)));
1317 struct artpec6_crypto_req_common *common = &req_ctx->common;
1318 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1319 enum artpec6_crypto_variant variant = ac->variant;
1321 bool ext_ctx = false;
1322 bool run_hw = false;
1325 artpec6_crypto_init_dma_operation(common);
1327 /* Upload HMAC key, must be first the first packet */
1328 if (req_ctx->hash_flags & HASH_FLAG_HMAC) {
1329 if (variant == ARTPEC6_CRYPTO) {
1330 req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1331 a6_regk_crypto_dlkey);
1333 req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1334 a7_regk_crypto_dlkey);
1337 /* Copy and pad up the key */
1338 memcpy(req_ctx->key_buffer, ctx->hmac_key,
1339 ctx->hmac_key_length);
1340 memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
1341 blocksize - ctx->hmac_key_length);
1343 error = artpec6_crypto_setup_out_descr(common,
1344 (void *)&req_ctx->key_md,
1345 sizeof(req_ctx->key_md), false, false);
1349 error = artpec6_crypto_setup_out_descr(common,
1350 req_ctx->key_buffer, blocksize,
1356 if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) {
1357 /* Restore context */
1358 sel_ctx = regk_crypto_ext;
1361 sel_ctx = regk_crypto_init;
1364 if (variant == ARTPEC6_CRYPTO) {
1365 req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX;
1366 req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx);
1368 /* If this is the final round, set the final flag */
1369 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1370 req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
1372 req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
1373 req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
1375 /* If this is the final round, set the final flag */
1376 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
1377 req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
1380 /* Setup up metadata descriptors */
1381 error = artpec6_crypto_setup_out_descr(common,
1382 (void *)&req_ctx->hash_md,
1383 sizeof(req_ctx->hash_md), false, false);
1387 error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1392 error = artpec6_crypto_setup_out_descr(common,
1393 req_ctx->digeststate,
1394 contextsize, false, false);
1400 if (req_ctx->hash_flags & HASH_FLAG_UPDATE) {
1401 size_t done_bytes = 0;
1402 size_t total_bytes = areq->nbytes + req_ctx->partial_bytes;
1403 size_t ready_bytes = round_down(total_bytes, blocksize);
1404 struct artpec6_crypto_walk walk;
1406 run_hw = ready_bytes > 0;
1407 if (req_ctx->partial_bytes && ready_bytes) {
1408 /* We have a partial buffer and will at least some bytes
1409 * to the HW. Empty this partial buffer before tackling
1412 memcpy(req_ctx->partial_buffer_out,
1413 req_ctx->partial_buffer,
1414 req_ctx->partial_bytes);
1416 error = artpec6_crypto_setup_out_descr(common,
1417 req_ctx->partial_buffer_out,
1418 req_ctx->partial_bytes,
1423 /* Reset partial buffer */
1424 done_bytes += req_ctx->partial_bytes;
1425 req_ctx->partial_bytes = 0;
1428 artpec6_crypto_walk_init(&walk, areq->src);
1430 error = artpec6_crypto_setup_sg_descrs_out(common, &walk,
1437 size_t sg_skip = ready_bytes - done_bytes;
1438 size_t sg_rem = areq->nbytes - sg_skip;
1440 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
1441 req_ctx->partial_buffer +
1442 req_ctx->partial_bytes,
1445 req_ctx->partial_bytes += sg_rem;
1448 req_ctx->digcnt += ready_bytes;
1449 req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE);
1453 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) {
1454 size_t hash_pad_len;
1458 if (variant == ARTPEC6_CRYPTO)
1459 oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md);
1461 oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md);
1463 /* Write out the partial buffer if present */
1464 if (req_ctx->partial_bytes) {
1465 memcpy(req_ctx->partial_buffer_out,
1466 req_ctx->partial_buffer,
1467 req_ctx->partial_bytes);
1468 error = artpec6_crypto_setup_out_descr(common,
1469 req_ctx->partial_buffer_out,
1470 req_ctx->partial_bytes,
1475 req_ctx->digcnt += req_ctx->partial_bytes;
1476 req_ctx->partial_bytes = 0;
1479 if (req_ctx->hash_flags & HASH_FLAG_HMAC)
1480 digest_bits = 8 * (req_ctx->digcnt + blocksize);
1482 digest_bits = 8 * req_ctx->digcnt;
1484 /* Add the hash pad */
1485 hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer,
1486 req_ctx->digcnt, digest_bits);
1487 error = artpec6_crypto_setup_out_descr(common,
1488 req_ctx->pad_buffer,
1489 hash_pad_len, false,
1491 req_ctx->digcnt = 0;
1496 /* Descriptor for the final result */
1497 error = artpec6_crypto_setup_in_descr(common, areq->result,
1503 } else { /* This is not the final operation for this request */
1505 return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
1507 /* Save the result to the context */
1508 error = artpec6_crypto_setup_in_descr(common,
1509 req_ctx->digeststate,
1510 contextsize, false);
1516 req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE |
1517 HASH_FLAG_FINALIZE);
1519 error = artpec6_crypto_terminate_in_descrs(common);
1523 error = artpec6_crypto_terminate_out_descrs(common);
1527 error = artpec6_crypto_dma_map_descs(common);
1531 return ARTPEC6_CRYPTO_PREPARE_HASH_START;
1535 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm)
1537 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1539 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1540 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB;
1545 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
1547 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1550 crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
1551 0, CRYPTO_ALG_NEED_FALLBACK);
1552 if (IS_ERR(ctx->fallback))
1553 return PTR_ERR(ctx->fallback);
1555 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1556 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR;
1561 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm)
1563 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1565 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1566 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC;
1571 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm)
1573 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1575 tfm->reqsize = sizeof(struct artpec6_crypto_request_context);
1576 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS;
1581 static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm)
1583 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1585 memset(ctx, 0, sizeof(*ctx));
1588 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
1590 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
1592 crypto_free_sync_skcipher(ctx->fallback);
1593 artpec6_crypto_aes_exit(tfm);
1597 artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key,
1598 unsigned int keylen)
1600 struct artpec6_cryptotfm_context *ctx =
1601 crypto_skcipher_ctx(cipher);
1609 crypto_skcipher_set_flags(cipher,
1610 CRYPTO_TFM_RES_BAD_KEY_LEN);
1614 memcpy(ctx->aes_key, key, keylen);
1615 ctx->key_length = keylen;
1620 artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
1621 unsigned int keylen)
1623 struct artpec6_cryptotfm_context *ctx =
1624 crypto_skcipher_ctx(cipher);
1627 ret = xts_check_key(&cipher->base, key, keylen);
1637 crypto_skcipher_set_flags(cipher,
1638 CRYPTO_TFM_RES_BAD_KEY_LEN);
1642 memcpy(ctx->aes_key, key, keylen);
1643 ctx->key_length = keylen;
1647 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request
1649 * @req: The asynch request to process
1651 * @return 0 if the dma job was successfully prepared
1654 * This function sets up the PDMA descriptors for a block cipher request.
1656 * The required padding is added for AES-CTR using a statically defined
1659 * The PDMA descriptor list will be as follows:
1661 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop>
1662 * IN: <CIPHER_MD><data_0>...[data_n]<intr>
1665 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
1668 struct artpec6_crypto_walk walk;
1669 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq);
1670 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher);
1671 struct artpec6_crypto_request_context *req_ctx = NULL;
1672 size_t iv_len = crypto_skcipher_ivsize(cipher);
1673 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1674 enum artpec6_crypto_variant variant = ac->variant;
1675 struct artpec6_crypto_req_common *common;
1676 bool cipher_decr = false;
1678 u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
1681 req_ctx = skcipher_request_ctx(areq);
1682 common = &req_ctx->common;
1684 artpec6_crypto_init_dma_operation(common);
1686 if (variant == ARTPEC6_CRYPTO)
1687 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey);
1689 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey);
1691 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1692 sizeof(ctx->key_md), false, false);
1696 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1697 ctx->key_length, true, false);
1701 req_ctx->cipher_md = 0;
1703 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS)
1704 cipher_klen = ctx->key_length/2;
1706 cipher_klen = ctx->key_length;
1709 switch (cipher_klen) {
1711 cipher_len = regk_crypto_key_128;
1714 cipher_len = regk_crypto_key_192;
1717 cipher_len = regk_crypto_key_256;
1720 pr_err("%s: Invalid key length %d!\n",
1721 MODULE_NAME, ctx->key_length);
1725 switch (ctx->crypto_type) {
1726 case ARTPEC6_CRYPTO_CIPHER_AES_ECB:
1727 oper = regk_crypto_aes_ecb;
1728 cipher_decr = req_ctx->decrypt;
1731 case ARTPEC6_CRYPTO_CIPHER_AES_CBC:
1732 oper = regk_crypto_aes_cbc;
1733 cipher_decr = req_ctx->decrypt;
1736 case ARTPEC6_CRYPTO_CIPHER_AES_CTR:
1737 oper = regk_crypto_aes_ctr;
1738 cipher_decr = false;
1741 case ARTPEC6_CRYPTO_CIPHER_AES_XTS:
1742 oper = regk_crypto_aes_xts;
1743 cipher_decr = req_ctx->decrypt;
1745 if (variant == ARTPEC6_CRYPTO)
1746 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ;
1748 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ;
1752 pr_err("%s: Invalid cipher mode %d!\n",
1753 MODULE_NAME, ctx->crypto_type);
1757 if (variant == ARTPEC6_CRYPTO) {
1758 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper);
1759 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1762 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1764 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper);
1765 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1768 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1771 ret = artpec6_crypto_setup_out_descr(common,
1772 &req_ctx->cipher_md,
1773 sizeof(req_ctx->cipher_md),
1778 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1783 ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len,
1789 artpec6_crypto_walk_init(&walk, areq->src);
1790 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen);
1795 artpec6_crypto_walk_init(&walk, areq->dst);
1796 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen);
1800 /* CTR-mode padding required by the HW. */
1801 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
1802 ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
1803 size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
1807 ret = artpec6_crypto_setup_out_descr(common,
1813 ret = artpec6_crypto_setup_in_descr(common,
1814 ac->pad_buffer, pad,
1821 ret = artpec6_crypto_terminate_out_descrs(common);
1825 ret = artpec6_crypto_terminate_in_descrs(common);
1829 return artpec6_crypto_dma_map_descs(common);
1832 static int artpec6_crypto_prepare_aead(struct aead_request *areq)
1836 size_t input_length;
1837 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm);
1838 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
1839 struct crypto_aead *cipher = crypto_aead_reqtfm(areq);
1840 struct artpec6_crypto_req_common *common = &req_ctx->common;
1841 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
1842 enum artpec6_crypto_variant variant = ac->variant;
1845 artpec6_crypto_init_dma_operation(common);
1848 if (variant == ARTPEC6_CRYPTO) {
1849 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
1850 a6_regk_crypto_dlkey);
1852 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
1853 a7_regk_crypto_dlkey);
1855 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md,
1856 sizeof(ctx->key_md), false, false);
1860 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key,
1861 ctx->key_length, true, false);
1865 req_ctx->cipher_md = 0;
1867 switch (ctx->key_length) {
1869 md_cipher_len = regk_crypto_key_128;
1872 md_cipher_len = regk_crypto_key_192;
1875 md_cipher_len = regk_crypto_key_256;
1881 if (variant == ARTPEC6_CRYPTO) {
1882 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER,
1883 regk_crypto_aes_gcm);
1884 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN,
1886 if (req_ctx->decrypt)
1887 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR;
1889 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER,
1890 regk_crypto_aes_gcm);
1891 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN,
1893 if (req_ctx->decrypt)
1894 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR;
1897 ret = artpec6_crypto_setup_out_descr(common,
1898 (void *) &req_ctx->cipher_md,
1899 sizeof(req_ctx->cipher_md), false,
1904 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false);
1908 /* For the decryption, cryptlen includes the tag. */
1909 input_length = areq->cryptlen;
1910 if (req_ctx->decrypt)
1911 input_length -= crypto_aead_authsize(cipher);
1913 /* Prepare the context buffer */
1914 req_ctx->hw_ctx.aad_length_bits =
1915 __cpu_to_be64(8*areq->assoclen);
1917 req_ctx->hw_ctx.text_length_bits =
1918 __cpu_to_be64(8*input_length);
1920 memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher));
1921 // The HW omits the initial increment of the counter field.
1922 memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
1924 ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx,
1925 sizeof(struct artpec6_crypto_aead_hw_ctx), false, false);
1930 struct artpec6_crypto_walk walk;
1932 artpec6_crypto_walk_init(&walk, areq->src);
1934 /* Associated data */
1935 count = areq->assoclen;
1936 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1940 if (!IS_ALIGNED(areq->assoclen, 16)) {
1941 size_t assoc_pad = 16 - (areq->assoclen % 16);
1942 /* The HW mandates zero padding here */
1943 ret = artpec6_crypto_setup_out_descr(common,
1951 /* Data to crypto */
1952 count = input_length;
1953 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count);
1957 if (!IS_ALIGNED(input_length, 16)) {
1958 size_t crypto_pad = 16 - (input_length % 16);
1959 /* The HW mandates zero padding here */
1960 ret = artpec6_crypto_setup_out_descr(common,
1970 /* Data from crypto */
1972 struct artpec6_crypto_walk walk;
1973 size_t output_len = areq->cryptlen;
1975 if (req_ctx->decrypt)
1976 output_len -= crypto_aead_authsize(cipher);
1978 artpec6_crypto_walk_init(&walk, areq->dst);
1980 /* skip associated data in the output */
1981 count = artpec6_crypto_walk_advance(&walk, areq->assoclen);
1986 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count);
1990 /* Put padding between the cryptotext and the auth tag */
1991 if (!IS_ALIGNED(output_len, 16)) {
1992 size_t crypto_pad = 16 - (output_len % 16);
1994 ret = artpec6_crypto_setup_in_descr(common,
2001 /* The authentication tag shall follow immediately after
2002 * the output ciphertext. For decryption it is put in a context
2003 * buffer for later compare against the input tag.
2006 if (req_ctx->decrypt) {
2007 ret = artpec6_crypto_setup_in_descr(common,
2008 req_ctx->decryption_tag, AES_BLOCK_SIZE, false);
2013 /* For encryption the requested tag size may be smaller
2014 * than the hardware's generated tag.
2016 size_t authsize = crypto_aead_authsize(cipher);
2018 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
2023 if (authsize < AES_BLOCK_SIZE) {
2024 count = AES_BLOCK_SIZE - authsize;
2025 ret = artpec6_crypto_setup_in_descr(common,
2035 ret = artpec6_crypto_terminate_in_descrs(common);
2039 ret = artpec6_crypto_terminate_out_descrs(common);
2043 return artpec6_crypto_dma_map_descs(common);
2046 static void artpec6_crypto_process_queue(struct artpec6_crypto *ac,
2047 struct list_head *completions)
2049 struct artpec6_crypto_req_common *req;
2051 while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) {
2052 req = list_first_entry(&ac->queue,
2053 struct artpec6_crypto_req_common,
2055 list_move_tail(&req->list, &ac->pending);
2056 artpec6_crypto_start_dma(req);
2058 list_add_tail(&req->complete_in_progress, completions);
2062 * In some cases, the hardware can raise an in_eop_flush interrupt
2063 * before actually updating the status, so we have an timer which will
2064 * recheck the status on timeout. Since the cases are expected to be
2065 * very rare, we use a relatively large timeout value. There should be
2066 * no noticeable negative effect if we timeout spuriously.
2068 if (ac->pending_count)
2069 mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100));
2071 del_timer(&ac->timer);
2074 static void artpec6_crypto_timeout(struct timer_list *t)
2076 struct artpec6_crypto *ac = from_timer(ac, t, timer);
2078 dev_info_ratelimited(artpec6_crypto_dev, "timeout\n");
2080 tasklet_schedule(&ac->task);
2083 static void artpec6_crypto_task(unsigned long data)
2085 struct artpec6_crypto *ac = (struct artpec6_crypto *)data;
2086 struct artpec6_crypto_req_common *req;
2087 struct artpec6_crypto_req_common *n;
2088 struct list_head complete_done;
2089 struct list_head complete_in_progress;
2091 INIT_LIST_HEAD(&complete_done);
2092 INIT_LIST_HEAD(&complete_in_progress);
2094 if (list_empty(&ac->pending)) {
2095 pr_debug("Spurious IRQ\n");
2099 spin_lock_bh(&ac->queue_lock);
2101 list_for_each_entry_safe(req, n, &ac->pending, list) {
2102 struct artpec6_crypto_dma_descriptors *dma = req->dma;
2104 dma_addr_t stataddr;
2106 stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1);
2107 dma_sync_single_for_cpu(artpec6_crypto_dev,
2112 stat = req->dma->stat[req->dma->in_cnt-1];
2114 /* A non-zero final status descriptor indicates
2115 * this job has finished.
2117 pr_debug("Request %p status is %X\n", req, stat);
2121 /* Allow testing of timeout handling with fault injection */
2122 #ifdef CONFIG_FAULT_INJECTION
2123 if (should_fail(&artpec6_crypto_fail_status_read, 1))
2127 pr_debug("Completing request %p\n", req);
2129 list_move_tail(&req->list, &complete_done);
2131 ac->pending_count--;
2134 artpec6_crypto_process_queue(ac, &complete_in_progress);
2136 spin_unlock_bh(&ac->queue_lock);
2138 /* Perform the completion callbacks without holding the queue lock
2139 * to allow new request submissions from the callbacks.
2141 list_for_each_entry_safe(req, n, &complete_done, list) {
2142 artpec6_crypto_dma_unmap_all(req);
2143 artpec6_crypto_copy_bounce_buffers(req);
2144 artpec6_crypto_common_destroy(req);
2146 req->complete(req->req);
2149 list_for_each_entry_safe(req, n, &complete_in_progress,
2150 complete_in_progress) {
2151 req->req->complete(req->req, -EINPROGRESS);
2155 static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
2157 req->complete(req, 0);
2161 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
2163 struct skcipher_request *cipher_req = container_of(req,
2164 struct skcipher_request, base);
2166 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
2167 cipher_req->cryptlen - AES_BLOCK_SIZE,
2169 req->complete(req, 0);
2173 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
2175 struct skcipher_request *cipher_req = container_of(req,
2176 struct skcipher_request, base);
2178 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
2179 cipher_req->cryptlen - AES_BLOCK_SIZE,
2181 req->complete(req, 0);
2184 static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
2188 /* Verify GCM hashtag. */
2189 struct aead_request *areq = container_of(req,
2190 struct aead_request, base);
2191 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
2192 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq);
2194 if (req_ctx->decrypt) {
2195 u8 input_tag[AES_BLOCK_SIZE];
2196 unsigned int authsize = crypto_aead_authsize(aead);
2198 sg_pcopy_to_buffer(areq->src,
2199 sg_nents(areq->src),
2202 areq->assoclen + areq->cryptlen -
2205 if (crypto_memneq(req_ctx->decryption_tag,
2208 pr_debug("***EBADMSG:\n");
2209 print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1,
2210 input_tag, authsize, true);
2211 print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1,
2212 req_ctx->decryption_tag,
2219 req->complete(req, result);
2222 static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
2224 req->complete(req, 0);
2228 /*------------------- Hash functions -----------------------------------------*/
2230 artpec6_crypto_hash_set_key(struct crypto_ahash *tfm,
2231 const u8 *key, unsigned int keylen)
2233 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base);
2238 pr_err("Invalid length (%d) of HMAC key\n",
2243 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2245 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2247 if (keylen > blocksize) {
2248 SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash);
2250 hdesc->tfm = tfm_ctx->child_hash;
2252 tfm_ctx->hmac_key_length = blocksize;
2253 ret = crypto_shash_digest(hdesc, key, keylen,
2259 memcpy(tfm_ctx->hmac_key, key, keylen);
2260 tfm_ctx->hmac_key_length = keylen;
2267 artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac)
2269 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2270 enum artpec6_crypto_variant variant = ac->variant;
2271 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2274 memset(req_ctx, 0, sizeof(*req_ctx));
2276 req_ctx->hash_flags = HASH_FLAG_INIT_CTX;
2278 req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY);
2281 case ARTPEC6_CRYPTO_HASH_SHA1:
2282 oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1;
2284 case ARTPEC6_CRYPTO_HASH_SHA256:
2285 oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256;
2288 pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type);
2292 if (variant == ARTPEC6_CRYPTO)
2293 req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper);
2295 req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper);
2300 static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req)
2302 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2305 if (!req_ctx->common.dma) {
2306 ret = artpec6_crypto_common_init(&req_ctx->common,
2308 artpec6_crypto_complete_hash,
2315 ret = artpec6_crypto_prepare_hash(req);
2317 case ARTPEC6_CRYPTO_PREPARE_HASH_START:
2318 ret = artpec6_crypto_submit(&req_ctx->common);
2321 case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START:
2326 artpec6_crypto_common_destroy(&req_ctx->common);
2333 static int artpec6_crypto_hash_final(struct ahash_request *req)
2335 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2337 req_ctx->hash_flags |= HASH_FLAG_FINALIZE;
2339 return artpec6_crypto_prepare_submit_hash(req);
2342 static int artpec6_crypto_hash_update(struct ahash_request *req)
2344 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2346 req_ctx->hash_flags |= HASH_FLAG_UPDATE;
2348 return artpec6_crypto_prepare_submit_hash(req);
2351 static int artpec6_crypto_sha1_init(struct ahash_request *req)
2353 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2356 static int artpec6_crypto_sha1_digest(struct ahash_request *req)
2358 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2360 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0);
2362 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2364 return artpec6_crypto_prepare_submit_hash(req);
2367 static int artpec6_crypto_sha256_init(struct ahash_request *req)
2369 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2372 static int artpec6_crypto_sha256_digest(struct ahash_request *req)
2374 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2376 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0);
2377 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2379 return artpec6_crypto_prepare_submit_hash(req);
2382 static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req)
2384 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2387 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req)
2389 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req);
2391 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1);
2392 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE;
2394 return artpec6_crypto_prepare_submit_hash(req);
2397 static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm,
2398 const char *base_hash_name)
2400 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2402 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2403 sizeof(struct artpec6_hash_request_context));
2404 memset(tfm_ctx, 0, sizeof(*tfm_ctx));
2406 if (base_hash_name) {
2407 struct crypto_shash *child;
2409 child = crypto_alloc_shash(base_hash_name, 0,
2410 CRYPTO_ALG_NEED_FALLBACK);
2413 return PTR_ERR(child);
2415 tfm_ctx->child_hash = child;
2421 static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm)
2423 return artpec6_crypto_ahash_init_common(tfm, NULL);
2426 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm)
2428 return artpec6_crypto_ahash_init_common(tfm, "sha256");
2431 static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm)
2433 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm);
2435 if (tfm_ctx->child_hash)
2436 crypto_free_shash(tfm_ctx->child_hash);
2438 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key));
2439 tfm_ctx->hmac_key_length = 0;
2442 static int artpec6_crypto_hash_export(struct ahash_request *req, void *out)
2444 const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2445 struct artpec6_hash_export_state *state = out;
2446 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2447 enum artpec6_crypto_variant variant = ac->variant;
2449 BUILD_BUG_ON(sizeof(state->partial_buffer) !=
2450 sizeof(ctx->partial_buffer));
2451 BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate));
2453 state->digcnt = ctx->digcnt;
2454 state->partial_bytes = ctx->partial_bytes;
2455 state->hash_flags = ctx->hash_flags;
2457 if (variant == ARTPEC6_CRYPTO)
2458 state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md);
2460 state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md);
2462 memcpy(state->partial_buffer, ctx->partial_buffer,
2463 sizeof(state->partial_buffer));
2464 memcpy(state->digeststate, ctx->digeststate,
2465 sizeof(state->digeststate));
2470 static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in)
2472 struct artpec6_hash_request_context *ctx = ahash_request_ctx(req);
2473 const struct artpec6_hash_export_state *state = in;
2474 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev);
2475 enum artpec6_crypto_variant variant = ac->variant;
2477 memset(ctx, 0, sizeof(*ctx));
2479 ctx->digcnt = state->digcnt;
2480 ctx->partial_bytes = state->partial_bytes;
2481 ctx->hash_flags = state->hash_flags;
2483 if (variant == ARTPEC6_CRYPTO)
2484 ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper);
2486 ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper);
2488 memcpy(ctx->partial_buffer, state->partial_buffer,
2489 sizeof(state->partial_buffer));
2490 memcpy(ctx->digeststate, state->digeststate,
2491 sizeof(state->digeststate));
2496 static int init_crypto_hw(struct artpec6_crypto *ac)
2498 enum artpec6_crypto_variant variant = ac->variant;
2499 void __iomem *base = ac->base;
2500 u32 out_descr_buf_size;
2501 u32 out_data_buf_size;
2502 u32 in_data_buf_size;
2503 u32 in_descr_buf_size;
2504 u32 in_stat_buf_size;
2508 * The PDMA unit contains 1984 bytes of internal memory for the OUT
2509 * channels and 1024 bytes for the IN channel. This is an elastic
2510 * memory used to internally store the descriptors and data. The values
2511 * ares specified in 64 byte incremements. Trustzone buffers are not
2512 * used at this stage.
2514 out_data_buf_size = 16; /* 1024 bytes for data */
2515 out_descr_buf_size = 15; /* 960 bytes for descriptors */
2516 in_data_buf_size = 8; /* 512 bytes for data */
2517 in_descr_buf_size = 4; /* 256 bytes for descriptors */
2518 in_stat_buf_size = 4; /* 256 bytes for stat descrs */
2520 BUILD_BUG_ON_MSG((out_data_buf_size
2521 + out_descr_buf_size) * 64 > 1984,
2522 "Invalid OUT configuration");
2524 BUILD_BUG_ON_MSG((in_data_buf_size
2526 + in_stat_buf_size) * 64 > 1024,
2527 "Invalid IN configuration");
2529 in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) |
2530 FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) |
2531 FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size);
2533 out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) |
2534 FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size);
2536 writel_relaxed(out, base + PDMA_OUT_BUF_CFG);
2537 writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG);
2539 if (variant == ARTPEC6_CRYPTO) {
2540 writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG);
2541 writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG);
2542 writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA |
2543 A6_PDMA_INTR_MASK_IN_EOP_FLUSH,
2544 base + A6_PDMA_INTR_MASK);
2546 writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG);
2547 writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG);
2548 writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA |
2549 A7_PDMA_INTR_MASK_IN_EOP_FLUSH,
2550 base + A7_PDMA_INTR_MASK);
2556 static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac)
2558 enum artpec6_crypto_variant variant = ac->variant;
2559 void __iomem *base = ac->base;
2561 if (variant == ARTPEC6_CRYPTO) {
2562 writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD);
2563 writel_relaxed(0, base + A6_PDMA_IN_CFG);
2564 writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2566 writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD);
2567 writel_relaxed(0, base + A7_PDMA_IN_CFG);
2568 writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD);
2571 writel_relaxed(0, base + PDMA_OUT_CFG);
2575 static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id)
2577 struct artpec6_crypto *ac = dev_id;
2578 enum artpec6_crypto_variant variant = ac->variant;
2579 void __iomem *base = ac->base;
2580 u32 mask_in_data, mask_in_eop_flush;
2581 u32 in_cmd_flush_stat, in_cmd_reg;
2586 if (variant == ARTPEC6_CRYPTO) {
2587 intr = readl_relaxed(base + A6_PDMA_MASKED_INTR);
2588 mask_in_data = A6_PDMA_INTR_MASK_IN_DATA;
2589 mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH;
2590 in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT;
2591 in_cmd_reg = A6_PDMA_IN_CMD;
2592 ack_intr_reg = A6_PDMA_ACK_INTR;
2594 intr = readl_relaxed(base + A7_PDMA_MASKED_INTR);
2595 mask_in_data = A7_PDMA_INTR_MASK_IN_DATA;
2596 mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH;
2597 in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT;
2598 in_cmd_reg = A7_PDMA_IN_CMD;
2599 ack_intr_reg = A7_PDMA_ACK_INTR;
2602 /* We get two interrupt notifications from each job.
2603 * The in_data means all data was sent to memory and then
2604 * we request a status flush command to write the per-job
2605 * status to its status vector. This ensures that the
2606 * tasklet can detect exactly how many submitted jobs
2607 * that have finished.
2609 if (intr & mask_in_data)
2610 ack |= mask_in_data;
2612 if (intr & mask_in_eop_flush)
2613 ack |= mask_in_eop_flush;
2615 writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg);
2617 writel_relaxed(ack, base + ack_intr_reg);
2619 if (intr & mask_in_eop_flush)
2620 tasklet_schedule(&ac->task);
2625 /*------------------- Algorithm definitions ----------------------------------*/
2628 static struct ahash_alg hash_algos[] = {
2631 .init = artpec6_crypto_sha1_init,
2632 .update = artpec6_crypto_hash_update,
2633 .final = artpec6_crypto_hash_final,
2634 .digest = artpec6_crypto_sha1_digest,
2635 .import = artpec6_crypto_hash_import,
2636 .export = artpec6_crypto_hash_export,
2637 .halg.digestsize = SHA1_DIGEST_SIZE,
2638 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2641 .cra_driver_name = "artpec-sha1",
2642 .cra_priority = 300,
2643 .cra_flags = CRYPTO_ALG_ASYNC,
2644 .cra_blocksize = SHA1_BLOCK_SIZE,
2645 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2647 .cra_module = THIS_MODULE,
2648 .cra_init = artpec6_crypto_ahash_init,
2649 .cra_exit = artpec6_crypto_ahash_exit,
2654 .init = artpec6_crypto_sha256_init,
2655 .update = artpec6_crypto_hash_update,
2656 .final = artpec6_crypto_hash_final,
2657 .digest = artpec6_crypto_sha256_digest,
2658 .import = artpec6_crypto_hash_import,
2659 .export = artpec6_crypto_hash_export,
2660 .halg.digestsize = SHA256_DIGEST_SIZE,
2661 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2663 .cra_name = "sha256",
2664 .cra_driver_name = "artpec-sha256",
2665 .cra_priority = 300,
2666 .cra_flags = CRYPTO_ALG_ASYNC,
2667 .cra_blocksize = SHA256_BLOCK_SIZE,
2668 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2670 .cra_module = THIS_MODULE,
2671 .cra_init = artpec6_crypto_ahash_init,
2672 .cra_exit = artpec6_crypto_ahash_exit,
2677 .init = artpec6_crypto_hmac_sha256_init,
2678 .update = artpec6_crypto_hash_update,
2679 .final = artpec6_crypto_hash_final,
2680 .digest = artpec6_crypto_hmac_sha256_digest,
2681 .import = artpec6_crypto_hash_import,
2682 .export = artpec6_crypto_hash_export,
2683 .setkey = artpec6_crypto_hash_set_key,
2684 .halg.digestsize = SHA256_DIGEST_SIZE,
2685 .halg.statesize = sizeof(struct artpec6_hash_export_state),
2687 .cra_name = "hmac(sha256)",
2688 .cra_driver_name = "artpec-hmac-sha256",
2689 .cra_priority = 300,
2690 .cra_flags = CRYPTO_ALG_ASYNC,
2691 .cra_blocksize = SHA256_BLOCK_SIZE,
2692 .cra_ctxsize = sizeof(struct artpec6_hashalg_context),
2694 .cra_module = THIS_MODULE,
2695 .cra_init = artpec6_crypto_ahash_init_hmac_sha256,
2696 .cra_exit = artpec6_crypto_ahash_exit,
2702 static struct skcipher_alg crypto_algos[] = {
2706 .cra_name = "ecb(aes)",
2707 .cra_driver_name = "artpec6-ecb-aes",
2708 .cra_priority = 300,
2709 .cra_flags = CRYPTO_ALG_ASYNC,
2710 .cra_blocksize = AES_BLOCK_SIZE,
2711 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2713 .cra_module = THIS_MODULE,
2715 .min_keysize = AES_MIN_KEY_SIZE,
2716 .max_keysize = AES_MAX_KEY_SIZE,
2717 .setkey = artpec6_crypto_cipher_set_key,
2718 .encrypt = artpec6_crypto_encrypt,
2719 .decrypt = artpec6_crypto_decrypt,
2720 .init = artpec6_crypto_aes_ecb_init,
2721 .exit = artpec6_crypto_aes_exit,
2726 .cra_name = "ctr(aes)",
2727 .cra_driver_name = "artpec6-ctr-aes",
2728 .cra_priority = 300,
2729 .cra_flags = CRYPTO_ALG_ASYNC |
2730 CRYPTO_ALG_NEED_FALLBACK,
2732 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2734 .cra_module = THIS_MODULE,
2736 .min_keysize = AES_MIN_KEY_SIZE,
2737 .max_keysize = AES_MAX_KEY_SIZE,
2738 .ivsize = AES_BLOCK_SIZE,
2739 .setkey = artpec6_crypto_cipher_set_key,
2740 .encrypt = artpec6_crypto_ctr_encrypt,
2741 .decrypt = artpec6_crypto_ctr_decrypt,
2742 .init = artpec6_crypto_aes_ctr_init,
2743 .exit = artpec6_crypto_aes_ctr_exit,
2748 .cra_name = "cbc(aes)",
2749 .cra_driver_name = "artpec6-cbc-aes",
2750 .cra_priority = 300,
2751 .cra_flags = CRYPTO_ALG_ASYNC,
2752 .cra_blocksize = AES_BLOCK_SIZE,
2753 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2755 .cra_module = THIS_MODULE,
2757 .min_keysize = AES_MIN_KEY_SIZE,
2758 .max_keysize = AES_MAX_KEY_SIZE,
2759 .ivsize = AES_BLOCK_SIZE,
2760 .setkey = artpec6_crypto_cipher_set_key,
2761 .encrypt = artpec6_crypto_encrypt,
2762 .decrypt = artpec6_crypto_decrypt,
2763 .init = artpec6_crypto_aes_cbc_init,
2764 .exit = artpec6_crypto_aes_exit
2769 .cra_name = "xts(aes)",
2770 .cra_driver_name = "artpec6-xts-aes",
2771 .cra_priority = 300,
2772 .cra_flags = CRYPTO_ALG_ASYNC,
2774 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2776 .cra_module = THIS_MODULE,
2778 .min_keysize = 2*AES_MIN_KEY_SIZE,
2779 .max_keysize = 2*AES_MAX_KEY_SIZE,
2781 .setkey = artpec6_crypto_xts_set_key,
2782 .encrypt = artpec6_crypto_encrypt,
2783 .decrypt = artpec6_crypto_decrypt,
2784 .init = artpec6_crypto_aes_xts_init,
2785 .exit = artpec6_crypto_aes_exit,
2789 static struct aead_alg aead_algos[] = {
2791 .init = artpec6_crypto_aead_init,
2792 .setkey = artpec6_crypto_aead_set_key,
2793 .encrypt = artpec6_crypto_aead_encrypt,
2794 .decrypt = artpec6_crypto_aead_decrypt,
2795 .ivsize = GCM_AES_IV_SIZE,
2796 .maxauthsize = AES_BLOCK_SIZE,
2799 .cra_name = "gcm(aes)",
2800 .cra_driver_name = "artpec-gcm-aes",
2801 .cra_priority = 300,
2802 .cra_flags = CRYPTO_ALG_ASYNC |
2803 CRYPTO_ALG_KERN_DRIVER_ONLY,
2805 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context),
2807 .cra_module = THIS_MODULE,
2812 #ifdef CONFIG_DEBUG_FS
2821 static struct dentry *dbgfs_root;
2823 static void artpec6_crypto_init_debugfs(void)
2825 dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL);
2827 #ifdef CONFIG_FAULT_INJECTION
2828 fault_create_debugfs_attr("fail_status_read", dbgfs_root,
2829 &artpec6_crypto_fail_status_read);
2831 fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root,
2832 &artpec6_crypto_fail_dma_array_full);
2836 static void artpec6_crypto_free_debugfs(void)
2838 debugfs_remove_recursive(dbgfs_root);
2843 static const struct of_device_id artpec6_crypto_of_match[] = {
2844 { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO },
2845 { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO },
2848 MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match);
2850 static int artpec6_crypto_probe(struct platform_device *pdev)
2852 const struct of_device_id *match;
2853 enum artpec6_crypto_variant variant;
2854 struct artpec6_crypto *ac;
2855 struct device *dev = &pdev->dev;
2857 struct resource *res;
2861 if (artpec6_crypto_dev)
2864 match = of_match_node(artpec6_crypto_of_match, dev->of_node);
2868 variant = (enum artpec6_crypto_variant)match->data;
2870 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2871 base = devm_ioremap_resource(&pdev->dev, res);
2873 return PTR_ERR(base);
2875 irq = platform_get_irq(pdev, 0);
2879 ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto),
2884 platform_set_drvdata(pdev, ac);
2885 ac->variant = variant;
2887 spin_lock_init(&ac->queue_lock);
2888 INIT_LIST_HEAD(&ac->queue);
2889 INIT_LIST_HEAD(&ac->pending);
2890 timer_setup(&ac->timer, artpec6_crypto_timeout, 0);
2894 ac->dma_cache = kmem_cache_create("artpec6_crypto_dma",
2895 sizeof(struct artpec6_crypto_dma_descriptors),
2902 #ifdef CONFIG_DEBUG_FS
2903 artpec6_crypto_init_debugfs();
2906 tasklet_init(&ac->task, artpec6_crypto_task,
2909 ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
2911 if (!ac->pad_buffer)
2913 ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX);
2915 ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX,
2917 if (!ac->zero_buffer)
2919 ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX);
2921 err = init_crypto_hw(ac);
2925 err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0,
2926 "artpec6-crypto", ac);
2930 artpec6_crypto_dev = &pdev->dev;
2932 err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2934 dev_err(dev, "Failed to register ahashes\n");
2938 err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2940 dev_err(dev, "Failed to register ciphers\n");
2941 goto unregister_ahashes;
2944 err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2946 dev_err(dev, "Failed to register aeads\n");
2947 goto unregister_algs;
2953 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2955 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2957 artpec6_crypto_disable_hw(ac);
2959 kmem_cache_destroy(ac->dma_cache);
2963 static int artpec6_crypto_remove(struct platform_device *pdev)
2965 struct artpec6_crypto *ac = platform_get_drvdata(pdev);
2966 int irq = platform_get_irq(pdev, 0);
2968 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos));
2969 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos));
2970 crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos));
2972 tasklet_disable(&ac->task);
2973 devm_free_irq(&pdev->dev, irq, ac);
2974 tasklet_kill(&ac->task);
2975 del_timer_sync(&ac->timer);
2977 artpec6_crypto_disable_hw(ac);
2979 kmem_cache_destroy(ac->dma_cache);
2980 #ifdef CONFIG_DEBUG_FS
2981 artpec6_crypto_free_debugfs();
2986 static struct platform_driver artpec6_crypto_driver = {
2987 .probe = artpec6_crypto_probe,
2988 .remove = artpec6_crypto_remove,
2990 .name = "artpec6-crypto",
2991 .of_match_table = artpec6_crypto_of_match,
2995 module_platform_driver(artpec6_crypto_driver);
2997 MODULE_AUTHOR("Axis Communications AB");
2998 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver");
2999 MODULE_LICENSE("GPL");