Commit | Line | Data |
---|---|---|
324429d7 HS |
1 | /* |
2 | * This file is part of the Chelsio T6 Crypto driver for Linux. | |
3 | * | |
4 | * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. | |
5 | * | |
6 | * This software is available to you under a choice of one of two | |
7 | * licenses. You may choose to be licensed under the terms of the GNU | |
8 | * General Public License (GPL) Version 2, available from the file | |
9 | * COPYING in the main directory of this source tree, or the | |
10 | * OpenIB.org BSD license below: | |
11 | * | |
12 | * Redistribution and use in source and binary forms, with or | |
13 | * without modification, are permitted provided that the following | |
14 | * conditions are met: | |
15 | * | |
16 | * - Redistributions of source code must retain the above | |
17 | * copyright notice, this list of conditions and the following | |
18 | * disclaimer. | |
19 | * | |
20 | * - Redistributions in binary form must reproduce the above | |
21 | * copyright notice, this list of conditions and the following | |
22 | * disclaimer in the documentation and/or other materials | |
23 | * provided with the distribution. | |
24 | * | |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
32 | * SOFTWARE. | |
33 | * | |
34 | * Written and Maintained by: | |
35 | * Manoj Malviya (manojmalviya@chelsio.com) | |
36 | * Atul Gupta (atul.gupta@chelsio.com) | |
37 | * Jitendra Lulla (jlulla@chelsio.com) | |
38 | * Yeshaswi M R Gowda (yeshaswi@chelsio.com) | |
39 | * Harsh Jain (harsh@chelsio.com) | |
40 | */ | |
41 | ||
42 | #define pr_fmt(fmt) "chcr:" fmt | |
43 | ||
44 | #include <linux/kernel.h> | |
45 | #include <linux/module.h> | |
46 | #include <linux/crypto.h> | |
47 | #include <linux/cryptohash.h> | |
48 | #include <linux/skbuff.h> | |
49 | #include <linux/rtnetlink.h> | |
50 | #include <linux/highmem.h> | |
51 | #include <linux/scatterlist.h> | |
52 | ||
53 | #include <crypto/aes.h> | |
54 | #include <crypto/algapi.h> | |
55 | #include <crypto/hash.h> | |
8f6acb7f | 56 | #include <crypto/gcm.h> |
324429d7 | 57 | #include <crypto/sha.h> |
2debd332 | 58 | #include <crypto/authenc.h> |
b8fd1f41 HJ |
59 | #include <crypto/ctr.h> |
60 | #include <crypto/gf128mul.h> | |
2debd332 HJ |
61 | #include <crypto/internal/aead.h> |
62 | #include <crypto/null.h> | |
63 | #include <crypto/internal/skcipher.h> | |
64 | #include <crypto/aead.h> | |
65 | #include <crypto/scatterwalk.h> | |
324429d7 HS |
66 | #include <crypto/internal/hash.h> |
67 | ||
68 | #include "t4fw_api.h" | |
69 | #include "t4_msg.h" | |
70 | #include "chcr_core.h" | |
71 | #include "chcr_algo.h" | |
72 | #include "chcr_crypto.h" | |
73 | ||
2f47d580 HJ |
74 | #define IV AES_BLOCK_SIZE |
75 | ||
8579e076 CIK |
76 | static unsigned int sgl_ent_len[] = { |
77 | 0, 0, 16, 24, 40, 48, 64, 72, 88, | |
78 | 96, 112, 120, 136, 144, 160, 168, 184, | |
79 | 192, 208, 216, 232, 240, 256, 264, 280, | |
80 | 288, 304, 312, 328, 336, 352, 360, 376 | |
81 | }; | |
6dad4e8a | 82 | |
8579e076 CIK |
83 | static unsigned int dsgl_ent_len[] = { |
84 | 0, 32, 32, 48, 48, 64, 64, 80, 80, | |
85 | 112, 112, 128, 128, 144, 144, 160, 160, | |
86 | 192, 192, 208, 208, 224, 224, 240, 240, | |
87 | 272, 272, 288, 288, 304, 304, 320, 320 | |
88 | }; | |
6dad4e8a AG |
89 | |
90 | static u32 round_constant[11] = { | |
91 | 0x01000000, 0x02000000, 0x04000000, 0x08000000, | |
92 | 0x10000000, 0x20000000, 0x40000000, 0x80000000, | |
93 | 0x1B000000, 0x36000000, 0x6C000000 | |
94 | }; | |
95 | ||
96 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, | |
97 | unsigned char *input, int err); | |
98 | ||
2debd332 HJ |
99 | static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) |
100 | { | |
101 | return ctx->crypto_ctx->aeadctx; | |
102 | } | |
103 | ||
324429d7 HS |
104 | static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) |
105 | { | |
106 | return ctx->crypto_ctx->ablkctx; | |
107 | } | |
108 | ||
109 | static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx) | |
110 | { | |
111 | return ctx->crypto_ctx->hmacctx; | |
112 | } | |
113 | ||
2debd332 HJ |
114 | static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx) |
115 | { | |
116 | return gctx->ctx->gcm; | |
117 | } | |
118 | ||
119 | static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx) | |
120 | { | |
121 | return gctx->ctx->authenc; | |
122 | } | |
123 | ||
324429d7 HS |
124 | static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) |
125 | { | |
126 | return ctx->dev->u_ctx; | |
127 | } | |
128 | ||
129 | static inline int is_ofld_imm(const struct sk_buff *skb) | |
130 | { | |
2f47d580 | 131 | return (skb->len <= SGE_MAX_WR_LEN); |
324429d7 HS |
132 | } |
133 | ||
5110e655 HJ |
134 | static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx) |
135 | { | |
136 | memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr)); | |
137 | } | |
138 | ||
2f47d580 HJ |
139 | static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, |
140 | unsigned int entlen, | |
141 | unsigned int skip) | |
2956f36c HJ |
142 | { |
143 | int nents = 0; | |
144 | unsigned int less; | |
2f47d580 | 145 | unsigned int skip_len = 0; |
2956f36c | 146 | |
2f47d580 HJ |
147 | while (sg && skip) { |
148 | if (sg_dma_len(sg) <= skip) { | |
149 | skip -= sg_dma_len(sg); | |
150 | skip_len = 0; | |
151 | sg = sg_next(sg); | |
152 | } else { | |
153 | skip_len = skip; | |
154 | skip = 0; | |
155 | } | |
2956f36c HJ |
156 | } |
157 | ||
2f47d580 HJ |
158 | while (sg && reqlen) { |
159 | less = min(reqlen, sg_dma_len(sg) - skip_len); | |
160 | nents += DIV_ROUND_UP(less, entlen); | |
161 | reqlen -= less; | |
162 | skip_len = 0; | |
163 | sg = sg_next(sg); | |
164 | } | |
2956f36c HJ |
165 | return nents; |
166 | } | |
167 | ||
6dad4e8a | 168 | static inline int get_aead_subtype(struct crypto_aead *aead) |
2f47d580 | 169 | { |
6dad4e8a AG |
170 | struct aead_alg *alg = crypto_aead_alg(aead); |
171 | struct chcr_alg_template *chcr_crypto_alg = | |
172 | container_of(alg, struct chcr_alg_template, alg.aead); | |
173 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; | |
2f47d580 | 174 | } |
2f47d580 | 175 | |
6dad4e8a | 176 | void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) |
2debd332 HJ |
177 | { |
178 | u8 temp[SHA512_DIGEST_SIZE]; | |
179 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
180 | int authsize = crypto_aead_authsize(tfm); | |
181 | struct cpl_fw6_pld *fw6_pld; | |
182 | int cmp = 0; | |
183 | ||
184 | fw6_pld = (struct cpl_fw6_pld *)input; | |
185 | if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) || | |
186 | (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) { | |
d600fc8a | 187 | cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize); |
2debd332 HJ |
188 | } else { |
189 | ||
190 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp, | |
191 | authsize, req->assoclen + | |
192 | req->cryptlen - authsize); | |
d600fc8a | 193 | cmp = crypto_memneq(temp, (fw6_pld + 1), authsize); |
2debd332 HJ |
194 | } |
195 | if (cmp) | |
196 | *err = -EBADMSG; | |
197 | else | |
198 | *err = 0; | |
199 | } | |
200 | ||
6dad4e8a AG |
201 | static inline void chcr_handle_aead_resp(struct aead_request *req, |
202 | unsigned char *input, | |
203 | int err) | |
204 | { | |
205 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
206 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
207 | struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); | |
208 | ||
209 | chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); | |
210 | if (reqctx->b0_dma) | |
211 | dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma, | |
212 | reqctx->b0_len, DMA_BIDIRECTIONAL); | |
213 | if (reqctx->verify == VERIFY_SW) { | |
214 | chcr_verify_tag(req, input, &err); | |
215 | reqctx->verify = VERIFY_HW; | |
216 | } | |
217 | req->base.complete(&req->base, err); | |
218 | } | |
219 | ||
2f47d580 | 220 | static void get_aes_decrypt_key(unsigned char *dec_key, |
39f91a34 HJ |
221 | const unsigned char *key, |
222 | unsigned int keylength) | |
223 | { | |
224 | u32 temp; | |
225 | u32 w_ring[MAX_NK]; | |
226 | int i, j, k; | |
227 | u8 nr, nk; | |
228 | ||
229 | switch (keylength) { | |
230 | case AES_KEYLENGTH_128BIT: | |
231 | nk = KEYLENGTH_4BYTES; | |
232 | nr = NUMBER_OF_ROUNDS_10; | |
233 | break; | |
234 | case AES_KEYLENGTH_192BIT: | |
235 | nk = KEYLENGTH_6BYTES; | |
236 | nr = NUMBER_OF_ROUNDS_12; | |
237 | break; | |
238 | case AES_KEYLENGTH_256BIT: | |
239 | nk = KEYLENGTH_8BYTES; | |
240 | nr = NUMBER_OF_ROUNDS_14; | |
241 | break; | |
242 | default: | |
243 | return; | |
244 | } | |
245 | for (i = 0; i < nk; i++) | |
246 | w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]); | |
247 | ||
248 | i = 0; | |
249 | temp = w_ring[nk - 1]; | |
250 | while (i + nk < (nr + 1) * 4) { | |
251 | if (!(i % nk)) { | |
252 | /* RotWord(temp) */ | |
253 | temp = (temp << 8) | (temp >> 24); | |
254 | temp = aes_ks_subword(temp); | |
255 | temp ^= round_constant[i / nk]; | |
256 | } else if (nk == 8 && (i % 4 == 0)) { | |
257 | temp = aes_ks_subword(temp); | |
258 | } | |
259 | w_ring[i % nk] ^= temp; | |
260 | temp = w_ring[i % nk]; | |
261 | i++; | |
262 | } | |
263 | i--; | |
264 | for (k = 0, j = i % nk; k < nk; k++) { | |
265 | *((u32 *)dec_key + k) = htonl(w_ring[j]); | |
266 | j--; | |
267 | if (j < 0) | |
268 | j += nk; | |
269 | } | |
270 | } | |
271 | ||
e7922729 | 272 | static struct crypto_shash *chcr_alloc_shash(unsigned int ds) |
324429d7 | 273 | { |
ec1bca94 | 274 | struct crypto_shash *base_hash = ERR_PTR(-EINVAL); |
324429d7 HS |
275 | |
276 | switch (ds) { | |
277 | case SHA1_DIGEST_SIZE: | |
e7922729 | 278 | base_hash = crypto_alloc_shash("sha1", 0, 0); |
324429d7 HS |
279 | break; |
280 | case SHA224_DIGEST_SIZE: | |
e7922729 | 281 | base_hash = crypto_alloc_shash("sha224", 0, 0); |
324429d7 HS |
282 | break; |
283 | case SHA256_DIGEST_SIZE: | |
e7922729 | 284 | base_hash = crypto_alloc_shash("sha256", 0, 0); |
324429d7 HS |
285 | break; |
286 | case SHA384_DIGEST_SIZE: | |
e7922729 | 287 | base_hash = crypto_alloc_shash("sha384", 0, 0); |
324429d7 HS |
288 | break; |
289 | case SHA512_DIGEST_SIZE: | |
e7922729 | 290 | base_hash = crypto_alloc_shash("sha512", 0, 0); |
324429d7 HS |
291 | break; |
292 | } | |
324429d7 | 293 | |
e7922729 | 294 | return base_hash; |
324429d7 HS |
295 | } |
296 | ||
297 | static int chcr_compute_partial_hash(struct shash_desc *desc, | |
298 | char *iopad, char *result_hash, | |
299 | int digest_size) | |
300 | { | |
301 | struct sha1_state sha1_st; | |
302 | struct sha256_state sha256_st; | |
303 | struct sha512_state sha512_st; | |
304 | int error; | |
305 | ||
306 | if (digest_size == SHA1_DIGEST_SIZE) { | |
307 | error = crypto_shash_init(desc) ?: | |
308 | crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?: | |
309 | crypto_shash_export(desc, (void *)&sha1_st); | |
310 | memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE); | |
311 | } else if (digest_size == SHA224_DIGEST_SIZE) { | |
312 | error = crypto_shash_init(desc) ?: | |
313 | crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: | |
314 | crypto_shash_export(desc, (void *)&sha256_st); | |
315 | memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); | |
316 | ||
317 | } else if (digest_size == SHA256_DIGEST_SIZE) { | |
318 | error = crypto_shash_init(desc) ?: | |
319 | crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: | |
320 | crypto_shash_export(desc, (void *)&sha256_st); | |
321 | memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); | |
322 | ||
323 | } else if (digest_size == SHA384_DIGEST_SIZE) { | |
324 | error = crypto_shash_init(desc) ?: | |
325 | crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: | |
326 | crypto_shash_export(desc, (void *)&sha512_st); | |
327 | memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); | |
328 | ||
329 | } else if (digest_size == SHA512_DIGEST_SIZE) { | |
330 | error = crypto_shash_init(desc) ?: | |
331 | crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: | |
332 | crypto_shash_export(desc, (void *)&sha512_st); | |
333 | memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); | |
334 | } else { | |
335 | error = -EINVAL; | |
336 | pr_err("Unknown digest size %d\n", digest_size); | |
337 | } | |
338 | return error; | |
339 | } | |
340 | ||
341 | static void chcr_change_order(char *buf, int ds) | |
342 | { | |
343 | int i; | |
344 | ||
345 | if (ds == SHA512_DIGEST_SIZE) { | |
346 | for (i = 0; i < (ds / sizeof(u64)); i++) | |
347 | *((__be64 *)buf + i) = | |
348 | cpu_to_be64(*((u64 *)buf + i)); | |
349 | } else { | |
350 | for (i = 0; i < (ds / sizeof(u32)); i++) | |
351 | *((__be32 *)buf + i) = | |
352 | cpu_to_be32(*((u32 *)buf + i)); | |
353 | } | |
354 | } | |
355 | ||
356 | static inline int is_hmac(struct crypto_tfm *tfm) | |
357 | { | |
358 | struct crypto_alg *alg = tfm->__crt_alg; | |
359 | struct chcr_alg_template *chcr_crypto_alg = | |
360 | container_of(__crypto_ahash_alg(alg), struct chcr_alg_template, | |
361 | alg.hash); | |
5c86a8ff | 362 | if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC) |
324429d7 HS |
363 | return 1; |
364 | return 0; | |
365 | } | |
366 | ||
2f47d580 HJ |
367 | static inline void dsgl_walk_init(struct dsgl_walk *walk, |
368 | struct cpl_rx_phys_dsgl *dsgl) | |
324429d7 | 369 | { |
2f47d580 HJ |
370 | walk->dsgl = dsgl; |
371 | walk->nents = 0; | |
372 | walk->to = (struct phys_sge_pairs *)(dsgl + 1); | |
373 | } | |
374 | ||
375 | static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid) | |
376 | { | |
377 | struct cpl_rx_phys_dsgl *phys_cpl; | |
378 | ||
379 | phys_cpl = walk->dsgl; | |
324429d7 HS |
380 | |
381 | phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) | |
382 | | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); | |
2f47d580 HJ |
383 | phys_cpl->pcirlxorder_to_noofsgentr = |
384 | htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) | | |
385 | CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) | | |
386 | CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | | |
387 | CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | | |
388 | CPL_RX_PHYS_DSGL_DCAID_V(0) | | |
389 | CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents)); | |
390 | phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; | |
391 | phys_cpl->rss_hdr_int.qid = htons(qid); | |
392 | phys_cpl->rss_hdr_int.hash_val = 0; | |
393 | } | |
394 | ||
395 | static inline void dsgl_walk_add_page(struct dsgl_walk *walk, | |
396 | size_t size, | |
397 | dma_addr_t *addr) | |
398 | { | |
399 | int j; | |
400 | ||
401 | if (!size) | |
402 | return; | |
403 | j = walk->nents; | |
404 | walk->to->len[j % 8] = htons(size); | |
405 | walk->to->addr[j % 8] = cpu_to_be64(*addr); | |
406 | j++; | |
407 | if ((j % 8) == 0) | |
408 | walk->to++; | |
409 | walk->nents = j; | |
410 | } | |
411 | ||
412 | static void dsgl_walk_add_sg(struct dsgl_walk *walk, | |
413 | struct scatterlist *sg, | |
414 | unsigned int slen, | |
415 | unsigned int skip) | |
416 | { | |
417 | int skip_len = 0; | |
418 | unsigned int left_size = slen, len = 0; | |
419 | unsigned int j = walk->nents; | |
420 | int offset, ent_len; | |
421 | ||
422 | if (!slen) | |
423 | return; | |
424 | while (sg && skip) { | |
425 | if (sg_dma_len(sg) <= skip) { | |
426 | skip -= sg_dma_len(sg); | |
427 | skip_len = 0; | |
428 | sg = sg_next(sg); | |
429 | } else { | |
430 | skip_len = skip; | |
431 | skip = 0; | |
432 | } | |
433 | } | |
434 | ||
2956f36c | 435 | while (left_size && sg) { |
2f47d580 | 436 | len = min_t(u32, left_size, sg_dma_len(sg) - skip_len); |
2956f36c HJ |
437 | offset = 0; |
438 | while (len) { | |
2f47d580 HJ |
439 | ent_len = min_t(u32, len, CHCR_DST_SG_SIZE); |
440 | walk->to->len[j % 8] = htons(ent_len); | |
441 | walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) + | |
442 | offset + skip_len); | |
2956f36c HJ |
443 | offset += ent_len; |
444 | len -= ent_len; | |
445 | j++; | |
446 | if ((j % 8) == 0) | |
2f47d580 | 447 | walk->to++; |
2956f36c | 448 | } |
2f47d580 HJ |
449 | walk->last_sg = sg; |
450 | walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) - | |
451 | skip_len) + skip_len; | |
452 | left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len); | |
453 | skip_len = 0; | |
2956f36c HJ |
454 | sg = sg_next(sg); |
455 | } | |
2f47d580 HJ |
456 | walk->nents = j; |
457 | } | |
458 | ||
459 | static inline void ulptx_walk_init(struct ulptx_walk *walk, | |
460 | struct ulptx_sgl *ulp) | |
461 | { | |
462 | walk->sgl = ulp; | |
463 | walk->nents = 0; | |
464 | walk->pair_idx = 0; | |
465 | walk->pair = ulp->sge; | |
466 | walk->last_sg = NULL; | |
467 | walk->last_sg_len = 0; | |
468 | } | |
469 | ||
470 | static inline void ulptx_walk_end(struct ulptx_walk *walk) | |
471 | { | |
472 | walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | | |
473 | ULPTX_NSGE_V(walk->nents)); | |
474 | } | |
2956f36c | 475 | |
2f47d580 HJ |
476 | |
477 | static inline void ulptx_walk_add_page(struct ulptx_walk *walk, | |
478 | size_t size, | |
479 | dma_addr_t *addr) | |
480 | { | |
481 | if (!size) | |
482 | return; | |
483 | ||
484 | if (walk->nents == 0) { | |
485 | walk->sgl->len0 = cpu_to_be32(size); | |
486 | walk->sgl->addr0 = cpu_to_be64(*addr); | |
487 | } else { | |
488 | walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr); | |
489 | walk->pair->len[walk->pair_idx] = cpu_to_be32(size); | |
490 | walk->pair_idx = !walk->pair_idx; | |
491 | if (!walk->pair_idx) | |
492 | walk->pair++; | |
493 | } | |
494 | walk->nents++; | |
324429d7 HS |
495 | } |
496 | ||
2f47d580 | 497 | static void ulptx_walk_add_sg(struct ulptx_walk *walk, |
adf1ca61 | 498 | struct scatterlist *sg, |
2f47d580 HJ |
499 | unsigned int len, |
500 | unsigned int skip) | |
324429d7 | 501 | { |
2f47d580 HJ |
502 | int small; |
503 | int skip_len = 0; | |
504 | unsigned int sgmin; | |
324429d7 | 505 | |
2f47d580 HJ |
506 | if (!len) |
507 | return; | |
2f47d580 HJ |
508 | while (sg && skip) { |
509 | if (sg_dma_len(sg) <= skip) { | |
510 | skip -= sg_dma_len(sg); | |
511 | skip_len = 0; | |
512 | sg = sg_next(sg); | |
513 | } else { | |
514 | skip_len = skip; | |
515 | skip = 0; | |
516 | } | |
517 | } | |
8daa32b9 HJ |
518 | WARN(!sg, "SG should not be null here\n"); |
519 | if (sg && (walk->nents == 0)) { | |
2f47d580 HJ |
520 | small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len); |
521 | sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); | |
522 | walk->sgl->len0 = cpu_to_be32(sgmin); | |
523 | walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len); | |
524 | walk->nents++; | |
525 | len -= sgmin; | |
526 | walk->last_sg = sg; | |
527 | walk->last_sg_len = sgmin + skip_len; | |
528 | skip_len += sgmin; | |
529 | if (sg_dma_len(sg) == skip_len) { | |
530 | sg = sg_next(sg); | |
531 | skip_len = 0; | |
532 | } | |
533 | } | |
534 | ||
535 | while (sg && len) { | |
536 | small = min(sg_dma_len(sg) - skip_len, len); | |
537 | sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); | |
538 | walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin); | |
539 | walk->pair->addr[walk->pair_idx] = | |
540 | cpu_to_be64(sg_dma_address(sg) + skip_len); | |
541 | walk->pair_idx = !walk->pair_idx; | |
542 | walk->nents++; | |
543 | if (!walk->pair_idx) | |
544 | walk->pair++; | |
545 | len -= sgmin; | |
546 | skip_len += sgmin; | |
547 | walk->last_sg = sg; | |
548 | walk->last_sg_len = skip_len; | |
549 | if (sg_dma_len(sg) == skip_len) { | |
550 | sg = sg_next(sg); | |
551 | skip_len = 0; | |
552 | } | |
324429d7 | 553 | } |
324429d7 HS |
554 | } |
555 | ||
556 | static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) | |
557 | { | |
558 | struct crypto_alg *alg = tfm->__crt_alg; | |
559 | struct chcr_alg_template *chcr_crypto_alg = | |
560 | container_of(alg, struct chcr_alg_template, alg.crypto); | |
561 | ||
562 | return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; | |
563 | } | |
564 | ||
b8fd1f41 HJ |
565 | static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) |
566 | { | |
567 | struct adapter *adap = netdev2adap(dev); | |
568 | struct sge_uld_txq_info *txq_info = | |
569 | adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; | |
570 | struct sge_uld_txq *txq; | |
571 | int ret = 0; | |
572 | ||
573 | local_bh_disable(); | |
574 | txq = &txq_info->uldtxq[idx]; | |
575 | spin_lock(&txq->sendq.lock); | |
576 | if (txq->full) | |
577 | ret = -1; | |
578 | spin_unlock(&txq->sendq.lock); | |
579 | local_bh_enable(); | |
580 | return ret; | |
581 | } | |
582 | ||
324429d7 HS |
583 | static int generate_copy_rrkey(struct ablk_ctx *ablkctx, |
584 | struct _key_ctx *key_ctx) | |
585 | { | |
586 | if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { | |
cc1b156d | 587 | memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len); |
324429d7 HS |
588 | } else { |
589 | memcpy(key_ctx->key, | |
590 | ablkctx->key + (ablkctx->enckey_len >> 1), | |
591 | ablkctx->enckey_len >> 1); | |
cc1b156d HJ |
592 | memcpy(key_ctx->key + (ablkctx->enckey_len >> 1), |
593 | ablkctx->rrkey, ablkctx->enckey_len >> 1); | |
324429d7 HS |
594 | } |
595 | return 0; | |
596 | } | |
5110e655 HJ |
597 | |
598 | static int chcr_hash_ent_in_wr(struct scatterlist *src, | |
599 | unsigned int minsg, | |
600 | unsigned int space, | |
601 | unsigned int srcskip) | |
602 | { | |
603 | int srclen = 0; | |
604 | int srcsg = minsg; | |
605 | int soffset = 0, sless; | |
606 | ||
607 | if (sg_dma_len(src) == srcskip) { | |
608 | src = sg_next(src); | |
609 | srcskip = 0; | |
610 | } | |
611 | while (src && space > (sgl_ent_len[srcsg + 1])) { | |
612 | sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip, | |
613 | CHCR_SRC_SG_SIZE); | |
614 | srclen += sless; | |
615 | soffset += sless; | |
616 | srcsg++; | |
617 | if (sg_dma_len(src) == (soffset + srcskip)) { | |
618 | src = sg_next(src); | |
619 | soffset = 0; | |
620 | srcskip = 0; | |
621 | } | |
622 | } | |
623 | return srclen; | |
624 | } | |
625 | ||
b8fd1f41 HJ |
626 | static int chcr_sg_ent_in_wr(struct scatterlist *src, |
627 | struct scatterlist *dst, | |
628 | unsigned int minsg, | |
2f47d580 HJ |
629 | unsigned int space, |
630 | unsigned int srcskip, | |
631 | unsigned int dstskip) | |
b8fd1f41 HJ |
632 | { |
633 | int srclen = 0, dstlen = 0; | |
2f47d580 | 634 | int srcsg = minsg, dstsg = minsg; |
1d693cf6 | 635 | int offset = 0, soffset = 0, less, sless = 0; |
b8fd1f41 | 636 | |
2f47d580 HJ |
637 | if (sg_dma_len(src) == srcskip) { |
638 | src = sg_next(src); | |
639 | srcskip = 0; | |
640 | } | |
2f47d580 HJ |
641 | if (sg_dma_len(dst) == dstskip) { |
642 | dst = sg_next(dst); | |
643 | dstskip = 0; | |
644 | } | |
645 | ||
646 | while (src && dst && | |
b8fd1f41 | 647 | space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { |
1d693cf6 HJ |
648 | sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset, |
649 | CHCR_SRC_SG_SIZE); | |
650 | srclen += sless; | |
b8fd1f41 | 651 | srcsg++; |
2956f36c | 652 | offset = 0; |
b8fd1f41 HJ |
653 | while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && |
654 | space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { | |
655 | if (srclen <= dstlen) | |
656 | break; | |
2f47d580 | 657 | less = min_t(unsigned int, sg_dma_len(dst) - offset - |
db6deea4 | 658 | dstskip, CHCR_DST_SG_SIZE); |
2956f36c HJ |
659 | dstlen += less; |
660 | offset += less; | |
1d693cf6 | 661 | if ((offset + dstskip) == sg_dma_len(dst)) { |
2956f36c HJ |
662 | dst = sg_next(dst); |
663 | offset = 0; | |
664 | } | |
b8fd1f41 | 665 | dstsg++; |
2f47d580 | 666 | dstskip = 0; |
b8fd1f41 | 667 | } |
1d693cf6 HJ |
668 | soffset += sless; |
669 | if ((soffset + srcskip) == sg_dma_len(src)) { | |
670 | src = sg_next(src); | |
671 | srcskip = 0; | |
672 | soffset = 0; | |
673 | } | |
674 | ||
b8fd1f41 | 675 | } |
b8fd1f41 HJ |
676 | return min(srclen, dstlen); |
677 | } | |
678 | ||
679 | static int chcr_cipher_fallback(struct crypto_skcipher *cipher, | |
680 | u32 flags, | |
681 | struct scatterlist *src, | |
682 | struct scatterlist *dst, | |
683 | unsigned int nbytes, | |
684 | u8 *iv, | |
685 | unsigned short op_type) | |
686 | { | |
687 | int err; | |
688 | ||
689 | SKCIPHER_REQUEST_ON_STACK(subreq, cipher); | |
6faa0f57 | 690 | |
b8fd1f41 HJ |
691 | skcipher_request_set_tfm(subreq, cipher); |
692 | skcipher_request_set_callback(subreq, flags, NULL, NULL); | |
693 | skcipher_request_set_crypt(subreq, src, dst, | |
694 | nbytes, iv); | |
695 | ||
696 | err = op_type ? crypto_skcipher_decrypt(subreq) : | |
697 | crypto_skcipher_encrypt(subreq); | |
698 | skcipher_request_zero(subreq); | |
699 | ||
700 | return err; | |
324429d7 | 701 | |
b8fd1f41 | 702 | } |
324429d7 | 703 | static inline void create_wreq(struct chcr_context *ctx, |
358961d1 | 704 | struct chcr_wr *chcr_req, |
2f47d580 HJ |
705 | struct crypto_async_request *req, |
706 | unsigned int imm, | |
570265bf | 707 | int hash_sz, |
2f47d580 | 708 | unsigned int len16, |
2512a624 HJ |
709 | unsigned int sc_len, |
710 | unsigned int lcb) | |
324429d7 HS |
711 | { |
712 | struct uld_ctx *u_ctx = ULD_CTX(ctx); | |
72a56ca9 | 713 | int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx]; |
324429d7 | 714 | |
324429d7 | 715 | |
570265bf | 716 | chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; |
358961d1 | 717 | chcr_req->wreq.pld_size_hash_size = |
570265bf | 718 | htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); |
358961d1 | 719 | chcr_req->wreq.len16_pkd = |
2f47d580 | 720 | htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16))); |
358961d1 HJ |
721 | chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); |
722 | chcr_req->wreq.rx_chid_to_rx_q_id = | |
8a13449f | 723 | FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid, |
570265bf | 724 | !!lcb, ctx->tx_qidx); |
324429d7 | 725 | |
8a13449f HJ |
726 | chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id, |
727 | qid); | |
2f47d580 HJ |
728 | chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - |
729 | ((sizeof(chcr_req->wreq)) >> 4))); | |
324429d7 | 730 | |
2f47d580 | 731 | chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm); |
358961d1 | 732 | chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + |
2f47d580 | 733 | sizeof(chcr_req->key_ctx) + sc_len); |
324429d7 HS |
734 | } |
735 | ||
736 | /** | |
737 | * create_cipher_wr - form the WR for cipher operations | |
738 | * @req: cipher req. | |
739 | * @ctx: crypto driver context of the request. | |
740 | * @qid: ingress qid where response of this WR should be received. | |
741 | * @op_type: encryption or decryption | |
742 | */ | |
b8fd1f41 | 743 | static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) |
324429d7 | 744 | { |
b8fd1f41 | 745 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req); |
2f47d580 | 746 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
324429d7 | 747 | struct sk_buff *skb = NULL; |
358961d1 | 748 | struct chcr_wr *chcr_req; |
324429d7 | 749 | struct cpl_rx_phys_dsgl *phys_cpl; |
2f47d580 | 750 | struct ulptx_sgl *ulptx; |
b8fd1f41 HJ |
751 | struct chcr_blkcipher_req_ctx *reqctx = |
752 | ablkcipher_request_ctx(wrparam->req); | |
2f47d580 | 753 | unsigned int temp = 0, transhdr_len, dst_size; |
b8fd1f41 | 754 | int error; |
2956f36c | 755 | int nents; |
2f47d580 | 756 | unsigned int kctx_len; |
b8fd1f41 HJ |
757 | gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
758 | GFP_KERNEL : GFP_ATOMIC; | |
2f47d580 | 759 | struct adapter *adap = padap(c_ctx(tfm)->dev); |
324429d7 | 760 | |
2f47d580 HJ |
761 | nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, |
762 | reqctx->dst_ofst); | |
335bcc4a | 763 | dst_size = get_space_for_phys_dsgl(nents); |
125d01ca | 764 | kctx_len = roundup(ablkctx->enckey_len, 16); |
2f47d580 HJ |
765 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
766 | nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes, | |
767 | CHCR_SRC_SG_SIZE, reqctx->src_ofst); | |
335bcc4a HJ |
768 | temp = reqctx->imm ? roundup(wrparam->bytes, 16) : |
769 | (sgl_len(nents) * 8); | |
2f47d580 | 770 | transhdr_len += temp; |
125d01ca | 771 | transhdr_len = roundup(transhdr_len, 16); |
2f47d580 | 772 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
b8fd1f41 HJ |
773 | if (!skb) { |
774 | error = -ENOMEM; | |
775 | goto err; | |
776 | } | |
de77b966 | 777 | chcr_req = __skb_put_zero(skb, transhdr_len); |
358961d1 | 778 | chcr_req->sec_cpl.op_ivinsrtofst = |
2f47d580 | 779 | FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1); |
358961d1 | 780 | |
2f47d580 | 781 | chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes); |
358961d1 | 782 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
2f47d580 | 783 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0); |
358961d1 HJ |
784 | |
785 | chcr_req->sec_cpl.cipherstop_lo_authinsert = | |
786 | FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); | |
b8fd1f41 | 787 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0, |
324429d7 | 788 | ablkctx->ciph_mode, |
2f47d580 | 789 | 0, 0, IV >> 1); |
358961d1 | 790 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, |
335bcc4a | 791 | 0, 1, dst_size); |
324429d7 | 792 | |
358961d1 | 793 | chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; |
b8fd1f41 HJ |
794 | if ((reqctx->op == CHCR_DECRYPT_OP) && |
795 | (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | |
796 | CRYPTO_ALG_SUB_TYPE_CTR)) && | |
797 | (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | |
798 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) { | |
358961d1 | 799 | generate_copy_rrkey(ablkctx, &chcr_req->key_ctx); |
324429d7 | 800 | } else { |
b8fd1f41 HJ |
801 | if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) || |
802 | (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) { | |
358961d1 HJ |
803 | memcpy(chcr_req->key_ctx.key, ablkctx->key, |
804 | ablkctx->enckey_len); | |
324429d7 | 805 | } else { |
358961d1 | 806 | memcpy(chcr_req->key_ctx.key, ablkctx->key + |
324429d7 HS |
807 | (ablkctx->enckey_len >> 1), |
808 | ablkctx->enckey_len >> 1); | |
358961d1 | 809 | memcpy(chcr_req->key_ctx.key + |
324429d7 HS |
810 | (ablkctx->enckey_len >> 1), |
811 | ablkctx->key, | |
812 | ablkctx->enckey_len >> 1); | |
813 | } | |
814 | } | |
358961d1 | 815 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
2f47d580 HJ |
816 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
817 | chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam); | |
818 | chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid); | |
324429d7 | 819 | |
ee0863ba | 820 | atomic_inc(&adap->chcr_stats.cipher_rqst); |
335bcc4a HJ |
821 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV |
822 | + (reqctx->imm ? (wrparam->bytes) : 0); | |
2f47d580 HJ |
823 | create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0, |
824 | transhdr_len, temp, | |
2512a624 | 825 | ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); |
5c86a8ff | 826 | reqctx->skb = skb; |
5fb78dba HJ |
827 | |
828 | if (reqctx->op && (ablkctx->ciph_mode == | |
829 | CHCR_SCMD_CIPHER_MODE_AES_CBC)) | |
830 | sg_pcopy_to_buffer(wrparam->req->src, | |
831 | sg_nents(wrparam->req->src), wrparam->req->info, 16, | |
832 | reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE); | |
833 | ||
324429d7 | 834 | return skb; |
b8fd1f41 HJ |
835 | err: |
836 | return ERR_PTR(error); | |
837 | } | |
838 | ||
839 | static inline int chcr_keyctx_ck_size(unsigned int keylen) | |
840 | { | |
841 | int ck_size = 0; | |
842 | ||
843 | if (keylen == AES_KEYSIZE_128) | |
844 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
845 | else if (keylen == AES_KEYSIZE_192) | |
846 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
847 | else if (keylen == AES_KEYSIZE_256) | |
848 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
849 | else | |
850 | ck_size = 0; | |
851 | ||
852 | return ck_size; | |
853 | } | |
854 | static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher, | |
855 | const u8 *key, | |
856 | unsigned int keylen) | |
857 | { | |
858 | struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); | |
2f47d580 | 859 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
b8fd1f41 HJ |
860 | int err = 0; |
861 | ||
862 | crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | |
863 | crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & | |
864 | CRYPTO_TFM_REQ_MASK); | |
865 | err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); | |
866 | tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; | |
867 | tfm->crt_flags |= | |
868 | crypto_skcipher_get_flags(ablkctx->sw_cipher) & | |
869 | CRYPTO_TFM_RES_MASK; | |
870 | return err; | |
324429d7 HS |
871 | } |
872 | ||
b8fd1f41 HJ |
873 | static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher, |
874 | const u8 *key, | |
324429d7 HS |
875 | unsigned int keylen) |
876 | { | |
2f47d580 | 877 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
324429d7 HS |
878 | unsigned int ck_size, context_size; |
879 | u16 alignment = 0; | |
b8fd1f41 | 880 | int err; |
324429d7 | 881 | |
b8fd1f41 HJ |
882 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); |
883 | if (err) | |
324429d7 | 884 | goto badkey_err; |
b8fd1f41 HJ |
885 | |
886 | ck_size = chcr_keyctx_ck_size(keylen); | |
887 | alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0; | |
cc1b156d HJ |
888 | memcpy(ablkctx->key, key, keylen); |
889 | ablkctx->enckey_len = keylen; | |
890 | get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3); | |
324429d7 HS |
891 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + |
892 | keylen + alignment) >> 4; | |
893 | ||
894 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, | |
895 | 0, 0, context_size); | |
896 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; | |
897 | return 0; | |
898 | badkey_err: | |
b8fd1f41 | 899 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); |
324429d7 | 900 | ablkctx->enckey_len = 0; |
b8fd1f41 HJ |
901 | |
902 | return err; | |
324429d7 HS |
903 | } |
904 | ||
b8fd1f41 HJ |
905 | static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher, |
906 | const u8 *key, | |
907 | unsigned int keylen) | |
324429d7 | 908 | { |
2f47d580 | 909 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
b8fd1f41 HJ |
910 | unsigned int ck_size, context_size; |
911 | u16 alignment = 0; | |
912 | int err; | |
913 | ||
914 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); | |
915 | if (err) | |
916 | goto badkey_err; | |
917 | ck_size = chcr_keyctx_ck_size(keylen); | |
918 | alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; | |
919 | memcpy(ablkctx->key, key, keylen); | |
920 | ablkctx->enckey_len = keylen; | |
921 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + | |
922 | keylen + alignment) >> 4; | |
923 | ||
924 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, | |
925 | 0, 0, context_size); | |
926 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; | |
927 | ||
928 | return 0; | |
929 | badkey_err: | |
930 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
931 | ablkctx->enckey_len = 0; | |
932 | ||
933 | return err; | |
934 | } | |
935 | ||
936 | static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher, | |
937 | const u8 *key, | |
938 | unsigned int keylen) | |
939 | { | |
2f47d580 | 940 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
b8fd1f41 HJ |
941 | unsigned int ck_size, context_size; |
942 | u16 alignment = 0; | |
943 | int err; | |
944 | ||
945 | if (keylen < CTR_RFC3686_NONCE_SIZE) | |
946 | return -EINVAL; | |
947 | memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), | |
948 | CTR_RFC3686_NONCE_SIZE); | |
949 | ||
950 | keylen -= CTR_RFC3686_NONCE_SIZE; | |
951 | err = chcr_cipher_fallback_setkey(cipher, key, keylen); | |
952 | if (err) | |
953 | goto badkey_err; | |
954 | ||
955 | ck_size = chcr_keyctx_ck_size(keylen); | |
956 | alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; | |
957 | memcpy(ablkctx->key, key, keylen); | |
958 | ablkctx->enckey_len = keylen; | |
959 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + | |
960 | keylen + alignment) >> 4; | |
961 | ||
962 | ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, | |
963 | 0, 0, context_size); | |
964 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; | |
965 | ||
966 | return 0; | |
967 | badkey_err: | |
968 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
969 | ablkctx->enckey_len = 0; | |
970 | ||
971 | return err; | |
972 | } | |
973 | static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add) | |
974 | { | |
975 | unsigned int size = AES_BLOCK_SIZE; | |
976 | __be32 *b = (__be32 *)(dstiv + size); | |
977 | u32 c, prev; | |
978 | ||
979 | memcpy(dstiv, srciv, AES_BLOCK_SIZE); | |
980 | for (; size >= 4; size -= 4) { | |
981 | prev = be32_to_cpu(*--b); | |
982 | c = prev + add; | |
983 | *b = cpu_to_be32(c); | |
984 | if (prev < c) | |
985 | break; | |
986 | add = 1; | |
987 | } | |
988 | ||
989 | } | |
990 | ||
991 | static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) | |
992 | { | |
993 | __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE); | |
994 | u64 c; | |
995 | u32 temp = be32_to_cpu(*--b); | |
996 | ||
997 | temp = ~temp; | |
998 | c = (u64)temp + 1; // No of block can processed withou overflow | |
999 | if ((bytes / AES_BLOCK_SIZE) > c) | |
1000 | bytes = c * AES_BLOCK_SIZE; | |
1001 | return bytes; | |
1002 | } | |
1003 | ||
209897d5 HJ |
1004 | static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv, |
1005 | u32 isfinal) | |
b8fd1f41 HJ |
1006 | { |
1007 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
2f47d580 | 1008 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
b8fd1f41 HJ |
1009 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); |
1010 | struct crypto_cipher *cipher; | |
1011 | int ret, i; | |
1012 | u8 *key; | |
1013 | unsigned int keylen; | |
de1a00ac HJ |
1014 | int round = reqctx->last_req_len / AES_BLOCK_SIZE; |
1015 | int round8 = round / 8; | |
b8fd1f41 | 1016 | |
d3f1d2f7 | 1017 | cipher = ablkctx->aes_generic; |
de1a00ac | 1018 | memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); |
b8fd1f41 | 1019 | |
b8fd1f41 HJ |
1020 | keylen = ablkctx->enckey_len / 2; |
1021 | key = ablkctx->key + keylen; | |
1022 | ret = crypto_cipher_setkey(cipher, key, keylen); | |
1023 | if (ret) | |
d3f1d2f7 | 1024 | goto out; |
335bcc4a | 1025 | crypto_cipher_encrypt_one(cipher, iv, iv); |
de1a00ac HJ |
1026 | for (i = 0; i < round8; i++) |
1027 | gf128mul_x8_ble((le128 *)iv, (le128 *)iv); | |
1028 | ||
1029 | for (i = 0; i < (round % 8); i++) | |
b8fd1f41 HJ |
1030 | gf128mul_x_ble((le128 *)iv, (le128 *)iv); |
1031 | ||
209897d5 HJ |
1032 | if (!isfinal) |
1033 | crypto_cipher_decrypt_one(cipher, iv, iv); | |
b8fd1f41 HJ |
1034 | out: |
1035 | return ret; | |
1036 | } | |
1037 | ||
1038 | static int chcr_update_cipher_iv(struct ablkcipher_request *req, | |
1039 | struct cpl_fw6_pld *fw6_pld, u8 *iv) | |
1040 | { | |
1041 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
1042 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
1043 | int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); | |
ab677ff4 | 1044 | int ret = 0; |
324429d7 | 1045 | |
b8fd1f41 HJ |
1046 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) |
1047 | ctr_add_iv(iv, req->info, (reqctx->processed / | |
1048 | AES_BLOCK_SIZE)); | |
1049 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) | |
1050 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + | |
1051 | CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed / | |
1052 | AES_BLOCK_SIZE) + 1); | |
1053 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) | |
209897d5 | 1054 | ret = chcr_update_tweak(req, iv, 0); |
b8fd1f41 HJ |
1055 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { |
1056 | if (reqctx->op) | |
5fb78dba HJ |
1057 | /*Updated before sending last WR*/ |
1058 | memcpy(iv, req->info, AES_BLOCK_SIZE); | |
b8fd1f41 HJ |
1059 | else |
1060 | memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); | |
1061 | } | |
1062 | ||
324429d7 | 1063 | return ret; |
b8fd1f41 | 1064 | |
324429d7 HS |
1065 | } |
1066 | ||
b8fd1f41 HJ |
1067 | /* We need separate function for final iv because in rfc3686 Initial counter |
1068 | * starts from 1 and buffer size of iv is 8 byte only which remains constant | |
1069 | * for subsequent update requests | |
1070 | */ | |
1071 | ||
1072 | static int chcr_final_cipher_iv(struct ablkcipher_request *req, | |
1073 | struct cpl_fw6_pld *fw6_pld, u8 *iv) | |
1074 | { | |
1075 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
1076 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
1077 | int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)); | |
1078 | int ret = 0; | |
1079 | ||
1080 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) | |
1081 | ctr_add_iv(iv, req->info, (reqctx->processed / | |
1082 | AES_BLOCK_SIZE)); | |
1083 | else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) | |
209897d5 | 1084 | ret = chcr_update_tweak(req, iv, 1); |
b8fd1f41 | 1085 | else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { |
5fb78dba HJ |
1086 | /*Already updated for Decrypt*/ |
1087 | if (!reqctx->op) | |
b8fd1f41 HJ |
1088 | memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); |
1089 | ||
1090 | } | |
1091 | return ret; | |
1092 | ||
1093 | } | |
1094 | ||
b8fd1f41 HJ |
1095 | static int chcr_handle_cipher_resp(struct ablkcipher_request *req, |
1096 | unsigned char *input, int err) | |
324429d7 HS |
1097 | { |
1098 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
2f47d580 HJ |
1099 | struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
1100 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); | |
324429d7 | 1101 | struct sk_buff *skb; |
b8fd1f41 HJ |
1102 | struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; |
1103 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
1104 | struct cipher_wr_param wrparam; | |
1105 | int bytes; | |
1106 | ||
b8fd1f41 | 1107 | if (err) |
2f47d580 | 1108 | goto unmap; |
b8fd1f41 | 1109 | if (req->nbytes == reqctx->processed) { |
2f47d580 HJ |
1110 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, |
1111 | req); | |
b8fd1f41 HJ |
1112 | err = chcr_final_cipher_iv(req, fw6_pld, req->info); |
1113 | goto complete; | |
1114 | } | |
1115 | ||
2f47d580 | 1116 | if (!reqctx->imm) { |
335bcc4a | 1117 | bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0, |
5110e655 | 1118 | CIP_SPACE_LEFT(ablkctx->enckey_len), |
2f47d580 | 1119 | reqctx->src_ofst, reqctx->dst_ofst); |
db6deea4 HJ |
1120 | if ((bytes + reqctx->processed) >= req->nbytes) |
1121 | bytes = req->nbytes - reqctx->processed; | |
1122 | else | |
125d01ca | 1123 | bytes = rounddown(bytes, 16); |
2f47d580 HJ |
1124 | } else { |
1125 | /*CTR mode counter overfloa*/ | |
1126 | bytes = req->nbytes - reqctx->processed; | |
1127 | } | |
b8fd1f41 HJ |
1128 | err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); |
1129 | if (err) | |
2f47d580 | 1130 | goto unmap; |
b8fd1f41 HJ |
1131 | |
1132 | if (unlikely(bytes == 0)) { | |
2f47d580 HJ |
1133 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, |
1134 | req); | |
b8fd1f41 HJ |
1135 | err = chcr_cipher_fallback(ablkctx->sw_cipher, |
1136 | req->base.flags, | |
2f47d580 HJ |
1137 | req->src, |
1138 | req->dst, | |
1139 | req->nbytes, | |
1140 | req->info, | |
b8fd1f41 HJ |
1141 | reqctx->op); |
1142 | goto complete; | |
1143 | } | |
1144 | ||
1145 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | |
1146 | CRYPTO_ALG_SUB_TYPE_CTR) | |
1147 | bytes = adjust_ctr_overflow(reqctx->iv, bytes); | |
2f47d580 | 1148 | wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx]; |
b8fd1f41 HJ |
1149 | wrparam.req = req; |
1150 | wrparam.bytes = bytes; | |
1151 | skb = create_cipher_wr(&wrparam); | |
1152 | if (IS_ERR(skb)) { | |
1153 | pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); | |
1154 | err = PTR_ERR(skb); | |
2f47d580 | 1155 | goto unmap; |
b8fd1f41 HJ |
1156 | } |
1157 | skb->dev = u_ctx->lldi.ports[0]; | |
2f47d580 | 1158 | set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
b8fd1f41 | 1159 | chcr_send_wr(skb); |
2f47d580 HJ |
1160 | reqctx->last_req_len = bytes; |
1161 | reqctx->processed += bytes; | |
b8fd1f41 | 1162 | return 0; |
2f47d580 HJ |
1163 | unmap: |
1164 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); | |
b8fd1f41 HJ |
1165 | complete: |
1166 | req->base.complete(&req->base, err); | |
1167 | return err; | |
1168 | } | |
1169 | ||
1170 | static int process_cipher(struct ablkcipher_request *req, | |
1171 | unsigned short qid, | |
1172 | struct sk_buff **skb, | |
1173 | unsigned short op_type) | |
1174 | { | |
1175 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
1176 | unsigned int ivsize = crypto_ablkcipher_ivsize(tfm); | |
1177 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
2f47d580 | 1178 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); |
b8fd1f41 | 1179 | struct cipher_wr_param wrparam; |
2956f36c | 1180 | int bytes, err = -EINVAL; |
b8fd1f41 | 1181 | |
b8fd1f41 HJ |
1182 | reqctx->processed = 0; |
1183 | if (!req->info) | |
1184 | goto error; | |
1185 | if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || | |
1186 | (req->nbytes == 0) || | |
1187 | (req->nbytes % crypto_ablkcipher_blocksize(tfm))) { | |
1188 | pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", | |
1189 | ablkctx->enckey_len, req->nbytes, ivsize); | |
1190 | goto error; | |
1191 | } | |
2f47d580 HJ |
1192 | chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); |
1193 | if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + | |
1194 | AES_MIN_KEY_SIZE + | |
1195 | sizeof(struct cpl_rx_phys_dsgl) + | |
1196 | /*Min dsgl size*/ | |
1197 | 32))) { | |
1198 | /* Can be sent as Imm*/ | |
1199 | unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len; | |
1200 | ||
1201 | dnents = sg_nents_xlen(req->dst, req->nbytes, | |
1202 | CHCR_DST_SG_SIZE, 0); | |
2f47d580 | 1203 | phys_dsgl = get_space_for_phys_dsgl(dnents); |
125d01ca | 1204 | kctx_len = roundup(ablkctx->enckey_len, 16); |
2f47d580 HJ |
1205 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); |
1206 | reqctx->imm = (transhdr_len + IV + req->nbytes) <= | |
1207 | SGE_MAX_WR_LEN; | |
1208 | bytes = IV + req->nbytes; | |
1209 | ||
1210 | } else { | |
1211 | reqctx->imm = 0; | |
1212 | } | |
1213 | ||
1214 | if (!reqctx->imm) { | |
335bcc4a | 1215 | bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0, |
5110e655 | 1216 | CIP_SPACE_LEFT(ablkctx->enckey_len), |
2f47d580 | 1217 | 0, 0); |
db6deea4 HJ |
1218 | if ((bytes + reqctx->processed) >= req->nbytes) |
1219 | bytes = req->nbytes - reqctx->processed; | |
1220 | else | |
125d01ca | 1221 | bytes = rounddown(bytes, 16); |
2f47d580 | 1222 | } else { |
b8fd1f41 | 1223 | bytes = req->nbytes; |
2f47d580 | 1224 | } |
b8fd1f41 | 1225 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == |
db6deea4 | 1226 | CRYPTO_ALG_SUB_TYPE_CTR) { |
b8fd1f41 HJ |
1227 | bytes = adjust_ctr_overflow(req->info, bytes); |
1228 | } | |
1229 | if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) == | |
1230 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { | |
1231 | memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); | |
1232 | memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info, | |
1233 | CTR_RFC3686_IV_SIZE); | |
1234 | ||
1235 | /* initialize counter portion of counter block */ | |
1236 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + | |
1237 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); | |
1238 | ||
1239 | } else { | |
1240 | ||
2f47d580 | 1241 | memcpy(reqctx->iv, req->info, IV); |
b8fd1f41 HJ |
1242 | } |
1243 | if (unlikely(bytes == 0)) { | |
2f47d580 HJ |
1244 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, |
1245 | req); | |
b8fd1f41 HJ |
1246 | err = chcr_cipher_fallback(ablkctx->sw_cipher, |
1247 | req->base.flags, | |
1248 | req->src, | |
1249 | req->dst, | |
1250 | req->nbytes, | |
7ffb9118 | 1251 | reqctx->iv, |
b8fd1f41 HJ |
1252 | op_type); |
1253 | goto error; | |
1254 | } | |
b8fd1f41 | 1255 | reqctx->op = op_type; |
2f47d580 HJ |
1256 | reqctx->srcsg = req->src; |
1257 | reqctx->dstsg = req->dst; | |
1258 | reqctx->src_ofst = 0; | |
1259 | reqctx->dst_ofst = 0; | |
b8fd1f41 HJ |
1260 | wrparam.qid = qid; |
1261 | wrparam.req = req; | |
1262 | wrparam.bytes = bytes; | |
1263 | *skb = create_cipher_wr(&wrparam); | |
1264 | if (IS_ERR(*skb)) { | |
1265 | err = PTR_ERR(*skb); | |
2f47d580 | 1266 | goto unmap; |
b8fd1f41 | 1267 | } |
2f47d580 HJ |
1268 | reqctx->processed = bytes; |
1269 | reqctx->last_req_len = bytes; | |
b8fd1f41 HJ |
1270 | |
1271 | return 0; | |
2f47d580 HJ |
1272 | unmap: |
1273 | chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); | |
b8fd1f41 HJ |
1274 | error: |
1275 | return err; | |
1276 | } | |
1277 | ||
1278 | static int chcr_aes_encrypt(struct ablkcipher_request *req) | |
1279 | { | |
1280 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
b8fd1f41 | 1281 | struct sk_buff *skb = NULL; |
6faa0f57 | 1282 | int err, isfull = 0; |
2f47d580 | 1283 | struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
324429d7 HS |
1284 | |
1285 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
2f47d580 | 1286 | c_ctx(tfm)->tx_qidx))) { |
6faa0f57 | 1287 | isfull = 1; |
324429d7 | 1288 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
6faa0f57 | 1289 | return -ENOSPC; |
324429d7 HS |
1290 | } |
1291 | ||
2f47d580 HJ |
1292 | err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], |
1293 | &skb, CHCR_ENCRYPT_OP); | |
b8fd1f41 HJ |
1294 | if (err || !skb) |
1295 | return err; | |
324429d7 | 1296 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1297 | set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
324429d7 | 1298 | chcr_send_wr(skb); |
6faa0f57 | 1299 | return isfull ? -EBUSY : -EINPROGRESS; |
324429d7 HS |
1300 | } |
1301 | ||
1302 | static int chcr_aes_decrypt(struct ablkcipher_request *req) | |
1303 | { | |
1304 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
2f47d580 | 1305 | struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); |
b8fd1f41 | 1306 | struct sk_buff *skb = NULL; |
6faa0f57 | 1307 | int err, isfull = 0; |
324429d7 HS |
1308 | |
1309 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
2f47d580 | 1310 | c_ctx(tfm)->tx_qidx))) { |
6faa0f57 | 1311 | isfull = 1; |
324429d7 | 1312 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
6faa0f57 | 1313 | return -ENOSPC; |
324429d7 HS |
1314 | } |
1315 | ||
2f47d580 HJ |
1316 | err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx], |
1317 | &skb, CHCR_DECRYPT_OP); | |
b8fd1f41 HJ |
1318 | if (err || !skb) |
1319 | return err; | |
324429d7 | 1320 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1321 | set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx); |
324429d7 | 1322 | chcr_send_wr(skb); |
6faa0f57 | 1323 | return isfull ? -EBUSY : -EINPROGRESS; |
324429d7 HS |
1324 | } |
1325 | ||
1326 | static int chcr_device_init(struct chcr_context *ctx) | |
1327 | { | |
14c19b17 | 1328 | struct uld_ctx *u_ctx = NULL; |
72a56ca9 | 1329 | struct adapter *adap; |
324429d7 | 1330 | unsigned int id; |
72a56ca9 | 1331 | int txq_perchan, txq_idx, ntxq; |
324429d7 HS |
1332 | int err = 0, rxq_perchan, rxq_idx; |
1333 | ||
1334 | id = smp_processor_id(); | |
1335 | if (!ctx->dev) { | |
14c19b17 HJ |
1336 | u_ctx = assign_chcr_device(); |
1337 | if (!u_ctx) { | |
324429d7 HS |
1338 | pr_err("chcr device assignment fails\n"); |
1339 | goto out; | |
1340 | } | |
14c19b17 | 1341 | ctx->dev = u_ctx->dev; |
72a56ca9 HJ |
1342 | adap = padap(ctx->dev); |
1343 | ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq, | |
1344 | adap->vres.ncrypto_fc); | |
324429d7 | 1345 | rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; |
72a56ca9 | 1346 | txq_perchan = ntxq / u_ctx->lldi.nchan; |
324429d7 HS |
1347 | rxq_idx = ctx->dev->tx_channel_id * rxq_perchan; |
1348 | rxq_idx += id % rxq_perchan; | |
72a56ca9 HJ |
1349 | txq_idx = ctx->dev->tx_channel_id * txq_perchan; |
1350 | txq_idx += id % txq_perchan; | |
324429d7 | 1351 | spin_lock(&ctx->dev->lock_chcr_dev); |
72a56ca9 HJ |
1352 | ctx->rx_qidx = rxq_idx; |
1353 | ctx->tx_qidx = txq_idx; | |
ab677ff4 | 1354 | ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id; |
8a13449f | 1355 | ctx->dev->rx_channel_id = 0; |
324429d7 HS |
1356 | spin_unlock(&ctx->dev->lock_chcr_dev); |
1357 | } | |
1358 | out: | |
1359 | return err; | |
1360 | } | |
1361 | ||
1362 | static int chcr_cra_init(struct crypto_tfm *tfm) | |
1363 | { | |
b8fd1f41 HJ |
1364 | struct crypto_alg *alg = tfm->__crt_alg; |
1365 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
1366 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | |
1367 | ||
1368 | ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0, | |
1369 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
1370 | if (IS_ERR(ablkctx->sw_cipher)) { | |
1371 | pr_err("failed to allocate fallback for %s\n", alg->cra_name); | |
1372 | return PTR_ERR(ablkctx->sw_cipher); | |
1373 | } | |
d3f1d2f7 HJ |
1374 | |
1375 | if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) { | |
1376 | /* To update tweak*/ | |
1377 | ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0); | |
1378 | if (IS_ERR(ablkctx->aes_generic)) { | |
1379 | pr_err("failed to allocate aes cipher for tweak\n"); | |
1380 | return PTR_ERR(ablkctx->aes_generic); | |
1381 | } | |
1382 | } else | |
1383 | ablkctx->aes_generic = NULL; | |
1384 | ||
324429d7 HS |
1385 | tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); |
1386 | return chcr_device_init(crypto_tfm_ctx(tfm)); | |
1387 | } | |
1388 | ||
b8fd1f41 HJ |
1389 | static int chcr_rfc3686_init(struct crypto_tfm *tfm) |
1390 | { | |
1391 | struct crypto_alg *alg = tfm->__crt_alg; | |
1392 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
1393 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | |
1394 | ||
1395 | /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) | |
1396 | * cannot be used as fallback in chcr_handle_cipher_response | |
1397 | */ | |
1398 | ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, | |
1399 | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); | |
1400 | if (IS_ERR(ablkctx->sw_cipher)) { | |
1401 | pr_err("failed to allocate fallback for %s\n", alg->cra_name); | |
1402 | return PTR_ERR(ablkctx->sw_cipher); | |
1403 | } | |
1404 | tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); | |
1405 | return chcr_device_init(crypto_tfm_ctx(tfm)); | |
1406 | } | |
1407 | ||
1408 | ||
1409 | static void chcr_cra_exit(struct crypto_tfm *tfm) | |
1410 | { | |
1411 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
1412 | struct ablk_ctx *ablkctx = ABLK_CTX(ctx); | |
1413 | ||
1414 | crypto_free_skcipher(ablkctx->sw_cipher); | |
d3f1d2f7 HJ |
1415 | if (ablkctx->aes_generic) |
1416 | crypto_free_cipher(ablkctx->aes_generic); | |
b8fd1f41 HJ |
1417 | } |
1418 | ||
324429d7 HS |
1419 | static int get_alg_config(struct algo_param *params, |
1420 | unsigned int auth_size) | |
1421 | { | |
1422 | switch (auth_size) { | |
1423 | case SHA1_DIGEST_SIZE: | |
1424 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; | |
1425 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1; | |
1426 | params->result_size = SHA1_DIGEST_SIZE; | |
1427 | break; | |
1428 | case SHA224_DIGEST_SIZE: | |
1429 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; | |
1430 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224; | |
1431 | params->result_size = SHA256_DIGEST_SIZE; | |
1432 | break; | |
1433 | case SHA256_DIGEST_SIZE: | |
1434 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; | |
1435 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256; | |
1436 | params->result_size = SHA256_DIGEST_SIZE; | |
1437 | break; | |
1438 | case SHA384_DIGEST_SIZE: | |
1439 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; | |
1440 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384; | |
1441 | params->result_size = SHA512_DIGEST_SIZE; | |
1442 | break; | |
1443 | case SHA512_DIGEST_SIZE: | |
1444 | params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; | |
1445 | params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512; | |
1446 | params->result_size = SHA512_DIGEST_SIZE; | |
1447 | break; | |
1448 | default: | |
1449 | pr_err("chcr : ERROR, unsupported digest size\n"); | |
1450 | return -EINVAL; | |
1451 | } | |
1452 | return 0; | |
1453 | } | |
1454 | ||
e7922729 | 1455 | static inline void chcr_free_shash(struct crypto_shash *base_hash) |
324429d7 | 1456 | { |
e7922729 | 1457 | crypto_free_shash(base_hash); |
324429d7 HS |
1458 | } |
1459 | ||
1460 | /** | |
358961d1 | 1461 | * create_hash_wr - Create hash work request |
324429d7 HS |
1462 | * @req - Cipher req base |
1463 | */ | |
358961d1 | 1464 | static struct sk_buff *create_hash_wr(struct ahash_request *req, |
2debd332 | 1465 | struct hash_wr_param *param) |
324429d7 HS |
1466 | { |
1467 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1468 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
2f47d580 | 1469 | struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); |
324429d7 | 1470 | struct sk_buff *skb = NULL; |
2f47d580 | 1471 | struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); |
358961d1 | 1472 | struct chcr_wr *chcr_req; |
2f47d580 | 1473 | struct ulptx_sgl *ulptx; |
5110e655 HJ |
1474 | unsigned int nents = 0, transhdr_len; |
1475 | unsigned int temp = 0; | |
358961d1 HJ |
1476 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
1477 | GFP_ATOMIC; | |
2f47d580 HJ |
1478 | struct adapter *adap = padap(h_ctx(tfm)->dev); |
1479 | int error = 0; | |
324429d7 | 1480 | |
5110e655 HJ |
1481 | transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len); |
1482 | req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len + | |
1483 | param->sg_len) <= SGE_MAX_WR_LEN; | |
1484 | nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len, | |
1485 | CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst); | |
2f47d580 | 1486 | nents += param->bfr_len ? 1 : 0; |
5110e655 HJ |
1487 | transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len + |
1488 | param->sg_len, 16) : (sgl_len(nents) * 8); | |
125d01ca | 1489 | transhdr_len = roundup(transhdr_len, 16); |
2f47d580 | 1490 | |
5110e655 | 1491 | skb = alloc_skb(transhdr_len, flags); |
324429d7 | 1492 | if (!skb) |
2f47d580 | 1493 | return ERR_PTR(-ENOMEM); |
de77b966 | 1494 | chcr_req = __skb_put_zero(skb, transhdr_len); |
324429d7 | 1495 | |
358961d1 | 1496 | chcr_req->sec_cpl.op_ivinsrtofst = |
2f47d580 | 1497 | FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0); |
358961d1 | 1498 | chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); |
324429d7 | 1499 | |
358961d1 | 1500 | chcr_req->sec_cpl.aadstart_cipherstop_hi = |
324429d7 | 1501 | FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0); |
358961d1 | 1502 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
324429d7 | 1503 | FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0); |
358961d1 | 1504 | chcr_req->sec_cpl.seqno_numivs = |
324429d7 | 1505 | FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode, |
358961d1 | 1506 | param->opad_needed, 0); |
324429d7 | 1507 | |
358961d1 | 1508 | chcr_req->sec_cpl.ivgen_hdrlen = |
324429d7 HS |
1509 | FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0); |
1510 | ||
358961d1 HJ |
1511 | memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash, |
1512 | param->alg_prm.result_size); | |
324429d7 HS |
1513 | |
1514 | if (param->opad_needed) | |
358961d1 HJ |
1515 | memcpy(chcr_req->key_ctx.key + |
1516 | ((param->alg_prm.result_size <= 32) ? 32 : | |
1517 | CHCR_HASH_MAX_DIGEST_SIZE), | |
324429d7 HS |
1518 | hmacctx->opad, param->alg_prm.result_size); |
1519 | ||
358961d1 | 1520 | chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, |
324429d7 HS |
1521 | param->alg_prm.mk_size, 0, |
1522 | param->opad_needed, | |
5110e655 | 1523 | ((param->kctx_len + |
358961d1 HJ |
1524 | sizeof(chcr_req->key_ctx)) >> 4)); |
1525 | chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); | |
5110e655 | 1526 | ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len + |
2f47d580 HJ |
1527 | DUMMY_BYTES); |
1528 | if (param->bfr_len != 0) { | |
5110e655 HJ |
1529 | req_ctx->hctx_wr.dma_addr = |
1530 | dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr, | |
1531 | param->bfr_len, DMA_TO_DEVICE); | |
2f47d580 | 1532 | if (dma_mapping_error(&u_ctx->lldi.pdev->dev, |
5110e655 | 1533 | req_ctx->hctx_wr. dma_addr)) { |
2f47d580 HJ |
1534 | error = -ENOMEM; |
1535 | goto err; | |
1536 | } | |
5110e655 | 1537 | req_ctx->hctx_wr.dma_len = param->bfr_len; |
2f47d580 | 1538 | } else { |
5110e655 | 1539 | req_ctx->hctx_wr.dma_addr = 0; |
2f47d580 HJ |
1540 | } |
1541 | chcr_add_hash_src_ent(req, ulptx, param); | |
1542 | /* Request upto max wr size */ | |
5110e655 HJ |
1543 | temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ? |
1544 | (param->sg_len + param->bfr_len) : 0); | |
ee0863ba | 1545 | atomic_inc(&adap->chcr_stats.digest_rqst); |
5110e655 HJ |
1546 | create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm, |
1547 | param->hash_size, transhdr_len, | |
2f47d580 | 1548 | temp, 0); |
5110e655 | 1549 | req_ctx->hctx_wr.skb = skb; |
324429d7 | 1550 | return skb; |
2f47d580 HJ |
1551 | err: |
1552 | kfree_skb(skb); | |
1553 | return ERR_PTR(error); | |
324429d7 HS |
1554 | } |
1555 | ||
1556 | static int chcr_ahash_update(struct ahash_request *req) | |
1557 | { | |
1558 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1559 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
324429d7 HS |
1560 | struct uld_ctx *u_ctx = NULL; |
1561 | struct sk_buff *skb; | |
1562 | u8 remainder = 0, bs; | |
1563 | unsigned int nbytes = req->nbytes; | |
1564 | struct hash_wr_param params; | |
6faa0f57 | 1565 | int error, isfull = 0; |
324429d7 HS |
1566 | |
1567 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
2f47d580 | 1568 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
324429d7 | 1569 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
2f47d580 | 1570 | h_ctx(rtfm)->tx_qidx))) { |
6faa0f57 | 1571 | isfull = 1; |
324429d7 | 1572 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
6faa0f57 | 1573 | return -ENOSPC; |
324429d7 HS |
1574 | } |
1575 | ||
44fce12a HJ |
1576 | if (nbytes + req_ctx->reqlen >= bs) { |
1577 | remainder = (nbytes + req_ctx->reqlen) % bs; | |
1578 | nbytes = nbytes + req_ctx->reqlen - remainder; | |
324429d7 | 1579 | } else { |
44fce12a HJ |
1580 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr |
1581 | + req_ctx->reqlen, nbytes, 0); | |
1582 | req_ctx->reqlen += nbytes; | |
324429d7 HS |
1583 | return 0; |
1584 | } | |
5110e655 | 1585 | chcr_init_hctx_per_wr(req_ctx); |
2f47d580 HJ |
1586 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); |
1587 | if (error) | |
1588 | return -ENOMEM; | |
5110e655 HJ |
1589 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
1590 | params.kctx_len = roundup(params.alg_prm.result_size, 16); | |
1591 | params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, | |
1592 | HASH_SPACE_LEFT(params.kctx_len), 0); | |
1593 | if (params.sg_len > req->nbytes) | |
1594 | params.sg_len = req->nbytes; | |
1595 | params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) - | |
1596 | req_ctx->reqlen; | |
324429d7 HS |
1597 | params.opad_needed = 0; |
1598 | params.more = 1; | |
1599 | params.last = 0; | |
44fce12a | 1600 | params.bfr_len = req_ctx->reqlen; |
324429d7 | 1601 | params.scmd1 = 0; |
5110e655 HJ |
1602 | req_ctx->hctx_wr.srcsg = req->src; |
1603 | ||
1604 | params.hash_size = params.alg_prm.result_size; | |
324429d7 | 1605 | req_ctx->data_len += params.sg_len + params.bfr_len; |
358961d1 | 1606 | skb = create_hash_wr(req, ¶ms); |
2f47d580 HJ |
1607 | if (IS_ERR(skb)) { |
1608 | error = PTR_ERR(skb); | |
1609 | goto unmap; | |
1610 | } | |
324429d7 | 1611 | |
5110e655 | 1612 | req_ctx->hctx_wr.processed += params.sg_len; |
44fce12a | 1613 | if (remainder) { |
44fce12a | 1614 | /* Swap buffers */ |
abfa2b37 | 1615 | swap(req_ctx->reqbfr, req_ctx->skbfr); |
324429d7 | 1616 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), |
44fce12a | 1617 | req_ctx->reqbfr, remainder, req->nbytes - |
324429d7 | 1618 | remainder); |
44fce12a HJ |
1619 | } |
1620 | req_ctx->reqlen = remainder; | |
324429d7 | 1621 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1622 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
324429d7 HS |
1623 | chcr_send_wr(skb); |
1624 | ||
6faa0f57 | 1625 | return isfull ? -EBUSY : -EINPROGRESS; |
2f47d580 HJ |
1626 | unmap: |
1627 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | |
1628 | return error; | |
324429d7 HS |
1629 | } |
1630 | ||
1631 | static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) | |
1632 | { | |
1633 | memset(bfr_ptr, 0, bs); | |
1634 | *bfr_ptr = 0x80; | |
1635 | if (bs == 64) | |
1636 | *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3); | |
1637 | else | |
1638 | *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3); | |
1639 | } | |
1640 | ||
1641 | static int chcr_ahash_final(struct ahash_request *req) | |
1642 | { | |
1643 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1644 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
324429d7 HS |
1645 | struct hash_wr_param params; |
1646 | struct sk_buff *skb; | |
1647 | struct uld_ctx *u_ctx = NULL; | |
1648 | u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
1649 | ||
5110e655 | 1650 | chcr_init_hctx_per_wr(req_ctx); |
2f47d580 | 1651 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
324429d7 HS |
1652 | if (is_hmac(crypto_ahash_tfm(rtfm))) |
1653 | params.opad_needed = 1; | |
1654 | else | |
1655 | params.opad_needed = 0; | |
1656 | params.sg_len = 0; | |
5110e655 | 1657 | req_ctx->hctx_wr.isfinal = 1; |
324429d7 | 1658 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
5110e655 HJ |
1659 | params.kctx_len = roundup(params.alg_prm.result_size, 16); |
1660 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | |
1661 | params.opad_needed = 1; | |
1662 | params.kctx_len *= 2; | |
1663 | } else { | |
1664 | params.opad_needed = 0; | |
1665 | } | |
1666 | ||
1667 | req_ctx->hctx_wr.result = 1; | |
44fce12a | 1668 | params.bfr_len = req_ctx->reqlen; |
324429d7 | 1669 | req_ctx->data_len += params.bfr_len + params.sg_len; |
5110e655 | 1670 | req_ctx->hctx_wr.srcsg = req->src; |
44fce12a HJ |
1671 | if (req_ctx->reqlen == 0) { |
1672 | create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); | |
324429d7 HS |
1673 | params.last = 0; |
1674 | params.more = 1; | |
1675 | params.scmd1 = 0; | |
1676 | params.bfr_len = bs; | |
1677 | ||
1678 | } else { | |
1679 | params.scmd1 = req_ctx->data_len; | |
1680 | params.last = 1; | |
1681 | params.more = 0; | |
1682 | } | |
5110e655 | 1683 | params.hash_size = crypto_ahash_digestsize(rtfm); |
358961d1 | 1684 | skb = create_hash_wr(req, ¶ms); |
40cdbe1a YG |
1685 | if (IS_ERR(skb)) |
1686 | return PTR_ERR(skb); | |
5110e655 | 1687 | req_ctx->reqlen = 0; |
324429d7 | 1688 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1689 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
324429d7 HS |
1690 | chcr_send_wr(skb); |
1691 | return -EINPROGRESS; | |
1692 | } | |
1693 | ||
1694 | static int chcr_ahash_finup(struct ahash_request *req) | |
1695 | { | |
1696 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1697 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
324429d7 HS |
1698 | struct uld_ctx *u_ctx = NULL; |
1699 | struct sk_buff *skb; | |
1700 | struct hash_wr_param params; | |
1701 | u8 bs; | |
6faa0f57 | 1702 | int error, isfull = 0; |
324429d7 HS |
1703 | |
1704 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
2f47d580 | 1705 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
324429d7 HS |
1706 | |
1707 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
2f47d580 | 1708 | h_ctx(rtfm)->tx_qidx))) { |
6faa0f57 | 1709 | isfull = 1; |
324429d7 | 1710 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
6faa0f57 | 1711 | return -ENOSPC; |
324429d7 | 1712 | } |
5110e655 HJ |
1713 | chcr_init_hctx_per_wr(req_ctx); |
1714 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); | |
1715 | if (error) | |
1716 | return -ENOMEM; | |
324429d7 | 1717 | |
5110e655 HJ |
1718 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
1719 | params.kctx_len = roundup(params.alg_prm.result_size, 16); | |
1720 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | |
1721 | params.kctx_len *= 2; | |
324429d7 | 1722 | params.opad_needed = 1; |
5110e655 | 1723 | } else { |
324429d7 | 1724 | params.opad_needed = 0; |
5110e655 | 1725 | } |
324429d7 | 1726 | |
5110e655 HJ |
1727 | params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, |
1728 | HASH_SPACE_LEFT(params.kctx_len), 0); | |
1729 | if (params.sg_len < req->nbytes) { | |
1730 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | |
1731 | params.kctx_len /= 2; | |
1732 | params.opad_needed = 0; | |
1733 | } | |
1734 | params.last = 0; | |
1735 | params.more = 1; | |
1736 | params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) | |
1737 | - req_ctx->reqlen; | |
1738 | params.hash_size = params.alg_prm.result_size; | |
1739 | params.scmd1 = 0; | |
1740 | } else { | |
1741 | params.last = 1; | |
1742 | params.more = 0; | |
1743 | params.sg_len = req->nbytes; | |
1744 | params.hash_size = crypto_ahash_digestsize(rtfm); | |
1745 | params.scmd1 = req_ctx->data_len + req_ctx->reqlen + | |
1746 | params.sg_len; | |
1747 | } | |
44fce12a | 1748 | params.bfr_len = req_ctx->reqlen; |
324429d7 | 1749 | req_ctx->data_len += params.bfr_len + params.sg_len; |
5110e655 HJ |
1750 | req_ctx->hctx_wr.result = 1; |
1751 | req_ctx->hctx_wr.srcsg = req->src; | |
44fce12a HJ |
1752 | if ((req_ctx->reqlen + req->nbytes) == 0) { |
1753 | create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); | |
324429d7 HS |
1754 | params.last = 0; |
1755 | params.more = 1; | |
1756 | params.scmd1 = 0; | |
1757 | params.bfr_len = bs; | |
324429d7 | 1758 | } |
358961d1 | 1759 | skb = create_hash_wr(req, ¶ms); |
2f47d580 HJ |
1760 | if (IS_ERR(skb)) { |
1761 | error = PTR_ERR(skb); | |
1762 | goto unmap; | |
1763 | } | |
5110e655 HJ |
1764 | req_ctx->reqlen = 0; |
1765 | req_ctx->hctx_wr.processed += params.sg_len; | |
324429d7 | 1766 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1767 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
324429d7 HS |
1768 | chcr_send_wr(skb); |
1769 | ||
6faa0f57 | 1770 | return isfull ? -EBUSY : -EINPROGRESS; |
2f47d580 HJ |
1771 | unmap: |
1772 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | |
1773 | return error; | |
324429d7 HS |
1774 | } |
1775 | ||
1776 | static int chcr_ahash_digest(struct ahash_request *req) | |
1777 | { | |
1778 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
1779 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
324429d7 HS |
1780 | struct uld_ctx *u_ctx = NULL; |
1781 | struct sk_buff *skb; | |
1782 | struct hash_wr_param params; | |
1783 | u8 bs; | |
6faa0f57 | 1784 | int error, isfull = 0; |
324429d7 HS |
1785 | |
1786 | rtfm->init(req); | |
1787 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
1788 | ||
2f47d580 | 1789 | u_ctx = ULD_CTX(h_ctx(rtfm)); |
324429d7 | 1790 | if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], |
2f47d580 | 1791 | h_ctx(rtfm)->tx_qidx))) { |
6faa0f57 | 1792 | isfull = 1; |
324429d7 | 1793 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
6faa0f57 | 1794 | return -ENOSPC; |
324429d7 HS |
1795 | } |
1796 | ||
5110e655 | 1797 | chcr_init_hctx_per_wr(req_ctx); |
2f47d580 HJ |
1798 | error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); |
1799 | if (error) | |
1800 | return -ENOMEM; | |
324429d7 | 1801 | |
324429d7 | 1802 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
5110e655 HJ |
1803 | params.kctx_len = roundup(params.alg_prm.result_size, 16); |
1804 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | |
1805 | params.kctx_len *= 2; | |
1806 | params.opad_needed = 1; | |
1807 | } else { | |
1808 | params.opad_needed = 0; | |
1809 | } | |
1810 | params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, | |
1811 | HASH_SPACE_LEFT(params.kctx_len), 0); | |
1812 | if (params.sg_len < req->nbytes) { | |
1813 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | |
1814 | params.kctx_len /= 2; | |
1815 | params.opad_needed = 0; | |
1816 | } | |
1817 | params.last = 0; | |
1818 | params.more = 1; | |
1819 | params.scmd1 = 0; | |
1820 | params.sg_len = rounddown(params.sg_len, bs); | |
1821 | params.hash_size = params.alg_prm.result_size; | |
1822 | } else { | |
1823 | params.sg_len = req->nbytes; | |
1824 | params.hash_size = crypto_ahash_digestsize(rtfm); | |
1825 | params.last = 1; | |
1826 | params.more = 0; | |
1827 | params.scmd1 = req->nbytes + req_ctx->data_len; | |
1828 | ||
1829 | } | |
1830 | params.bfr_len = 0; | |
1831 | req_ctx->hctx_wr.result = 1; | |
1832 | req_ctx->hctx_wr.srcsg = req->src; | |
324429d7 HS |
1833 | req_ctx->data_len += params.bfr_len + params.sg_len; |
1834 | ||
44fce12a HJ |
1835 | if (req->nbytes == 0) { |
1836 | create_last_hash_block(req_ctx->reqbfr, bs, 0); | |
324429d7 HS |
1837 | params.more = 1; |
1838 | params.bfr_len = bs; | |
1839 | } | |
1840 | ||
358961d1 | 1841 | skb = create_hash_wr(req, ¶ms); |
2f47d580 HJ |
1842 | if (IS_ERR(skb)) { |
1843 | error = PTR_ERR(skb); | |
1844 | goto unmap; | |
1845 | } | |
5110e655 | 1846 | req_ctx->hctx_wr.processed += params.sg_len; |
324429d7 | 1847 | skb->dev = u_ctx->lldi.ports[0]; |
2f47d580 | 1848 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); |
324429d7 | 1849 | chcr_send_wr(skb); |
6faa0f57 | 1850 | return isfull ? -EBUSY : -EINPROGRESS; |
2f47d580 HJ |
1851 | unmap: |
1852 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | |
1853 | return error; | |
324429d7 HS |
1854 | } |
1855 | ||
6f76672b HJ |
1856 | static int chcr_ahash_continue(struct ahash_request *req) |
1857 | { | |
1858 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | |
1859 | struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; | |
1860 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); | |
1861 | struct uld_ctx *u_ctx = NULL; | |
1862 | struct sk_buff *skb; | |
1863 | struct hash_wr_param params; | |
1864 | u8 bs; | |
1865 | int error; | |
1866 | ||
1867 | bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
1868 | u_ctx = ULD_CTX(h_ctx(rtfm)); | |
6f76672b HJ |
1869 | get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); |
1870 | params.kctx_len = roundup(params.alg_prm.result_size, 16); | |
1871 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | |
1872 | params.kctx_len *= 2; | |
1873 | params.opad_needed = 1; | |
1874 | } else { | |
1875 | params.opad_needed = 0; | |
1876 | } | |
1877 | params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0, | |
1878 | HASH_SPACE_LEFT(params.kctx_len), | |
1879 | hctx_wr->src_ofst); | |
1880 | if ((params.sg_len + hctx_wr->processed) > req->nbytes) | |
1881 | params.sg_len = req->nbytes - hctx_wr->processed; | |
1882 | if (!hctx_wr->result || | |
1883 | ((params.sg_len + hctx_wr->processed) < req->nbytes)) { | |
1884 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | |
1885 | params.kctx_len /= 2; | |
1886 | params.opad_needed = 0; | |
1887 | } | |
1888 | params.last = 0; | |
1889 | params.more = 1; | |
1890 | params.sg_len = rounddown(params.sg_len, bs); | |
1891 | params.hash_size = params.alg_prm.result_size; | |
1892 | params.scmd1 = 0; | |
1893 | } else { | |
1894 | params.last = 1; | |
1895 | params.more = 0; | |
1896 | params.hash_size = crypto_ahash_digestsize(rtfm); | |
1897 | params.scmd1 = reqctx->data_len + params.sg_len; | |
1898 | } | |
1899 | params.bfr_len = 0; | |
1900 | reqctx->data_len += params.sg_len; | |
1901 | skb = create_hash_wr(req, ¶ms); | |
1902 | if (IS_ERR(skb)) { | |
1903 | error = PTR_ERR(skb); | |
1904 | goto err; | |
1905 | } | |
1906 | hctx_wr->processed += params.sg_len; | |
1907 | skb->dev = u_ctx->lldi.ports[0]; | |
1908 | set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx); | |
1909 | chcr_send_wr(skb); | |
1910 | return 0; | |
1911 | err: | |
1912 | return error; | |
1913 | } | |
1914 | ||
1915 | static inline void chcr_handle_ahash_resp(struct ahash_request *req, | |
1916 | unsigned char *input, | |
1917 | int err) | |
1918 | { | |
1919 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | |
1920 | struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; | |
1921 | int digestsize, updated_digestsize; | |
1922 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
1923 | struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); | |
1924 | ||
1925 | if (input == NULL) | |
1926 | goto out; | |
1927 | digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); | |
1928 | updated_digestsize = digestsize; | |
1929 | if (digestsize == SHA224_DIGEST_SIZE) | |
1930 | updated_digestsize = SHA256_DIGEST_SIZE; | |
1931 | else if (digestsize == SHA384_DIGEST_SIZE) | |
1932 | updated_digestsize = SHA512_DIGEST_SIZE; | |
1933 | ||
1934 | if (hctx_wr->dma_addr) { | |
1935 | dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr, | |
1936 | hctx_wr->dma_len, DMA_TO_DEVICE); | |
1937 | hctx_wr->dma_addr = 0; | |
1938 | } | |
1939 | if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) == | |
1940 | req->nbytes)) { | |
1941 | if (hctx_wr->result == 1) { | |
1942 | hctx_wr->result = 0; | |
1943 | memcpy(req->result, input + sizeof(struct cpl_fw6_pld), | |
1944 | digestsize); | |
1945 | } else { | |
1946 | memcpy(reqctx->partial_hash, | |
1947 | input + sizeof(struct cpl_fw6_pld), | |
1948 | updated_digestsize); | |
1949 | ||
1950 | } | |
1951 | goto unmap; | |
1952 | } | |
1953 | memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), | |
1954 | updated_digestsize); | |
1955 | ||
1956 | err = chcr_ahash_continue(req); | |
1957 | if (err) | |
1958 | goto unmap; | |
1959 | return; | |
1960 | unmap: | |
1961 | if (hctx_wr->is_sg_map) | |
1962 | chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); | |
1963 | ||
1964 | ||
1965 | out: | |
1966 | req->base.complete(&req->base, err); | |
1967 | } | |
1968 | ||
1969 | /* | |
1970 | * chcr_handle_resp - Unmap the DMA buffers associated with the request | |
1971 | * @req: crypto request | |
1972 | */ | |
1973 | int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, | |
1974 | int err) | |
1975 | { | |
1976 | struct crypto_tfm *tfm = req->tfm; | |
1977 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
1978 | struct adapter *adap = padap(ctx->dev); | |
1979 | ||
1980 | switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { | |
1981 | case CRYPTO_ALG_TYPE_AEAD: | |
1982 | chcr_handle_aead_resp(aead_request_cast(req), input, err); | |
1983 | break; | |
1984 | ||
1985 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | |
1986 | err = chcr_handle_cipher_resp(ablkcipher_request_cast(req), | |
1987 | input, err); | |
1988 | break; | |
1989 | ||
1990 | case CRYPTO_ALG_TYPE_AHASH: | |
1991 | chcr_handle_ahash_resp(ahash_request_cast(req), input, err); | |
1992 | } | |
1993 | atomic_inc(&adap->chcr_stats.complete); | |
1994 | return err; | |
1995 | } | |
324429d7 HS |
1996 | static int chcr_ahash_export(struct ahash_request *areq, void *out) |
1997 | { | |
1998 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
1999 | struct chcr_ahash_req_ctx *state = out; | |
2000 | ||
44fce12a | 2001 | state->reqlen = req_ctx->reqlen; |
324429d7 | 2002 | state->data_len = req_ctx->data_len; |
44fce12a | 2003 | memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); |
324429d7 HS |
2004 | memcpy(state->partial_hash, req_ctx->partial_hash, |
2005 | CHCR_HASH_MAX_DIGEST_SIZE); | |
5110e655 | 2006 | chcr_init_hctx_per_wr(state); |
44fce12a | 2007 | return 0; |
324429d7 HS |
2008 | } |
2009 | ||
2010 | static int chcr_ahash_import(struct ahash_request *areq, const void *in) | |
2011 | { | |
2012 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
2013 | struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in; | |
2014 | ||
44fce12a | 2015 | req_ctx->reqlen = state->reqlen; |
324429d7 | 2016 | req_ctx->data_len = state->data_len; |
44fce12a HJ |
2017 | req_ctx->reqbfr = req_ctx->bfr1; |
2018 | req_ctx->skbfr = req_ctx->bfr2; | |
2019 | memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); | |
324429d7 HS |
2020 | memcpy(req_ctx->partial_hash, state->partial_hash, |
2021 | CHCR_HASH_MAX_DIGEST_SIZE); | |
5110e655 | 2022 | chcr_init_hctx_per_wr(req_ctx); |
324429d7 HS |
2023 | return 0; |
2024 | } | |
2025 | ||
2026 | static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, | |
2027 | unsigned int keylen) | |
2028 | { | |
2f47d580 | 2029 | struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); |
324429d7 HS |
2030 | unsigned int digestsize = crypto_ahash_digestsize(tfm); |
2031 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); | |
2032 | unsigned int i, err = 0, updated_digestsize; | |
2033 | ||
e7922729 HJ |
2034 | SHASH_DESC_ON_STACK(shash, hmacctx->base_hash); |
2035 | ||
2036 | /* use the key to calculate the ipad and opad. ipad will sent with the | |
324429d7 HS |
2037 | * first request's data. opad will be sent with the final hash result |
2038 | * ipad in hmacctx->ipad and opad in hmacctx->opad location | |
2039 | */ | |
e7922729 HJ |
2040 | shash->tfm = hmacctx->base_hash; |
2041 | shash->flags = crypto_shash_get_flags(hmacctx->base_hash); | |
324429d7 | 2042 | if (keylen > bs) { |
e7922729 | 2043 | err = crypto_shash_digest(shash, key, keylen, |
324429d7 HS |
2044 | hmacctx->ipad); |
2045 | if (err) | |
2046 | goto out; | |
2047 | keylen = digestsize; | |
2048 | } else { | |
2049 | memcpy(hmacctx->ipad, key, keylen); | |
2050 | } | |
2051 | memset(hmacctx->ipad + keylen, 0, bs - keylen); | |
2052 | memcpy(hmacctx->opad, hmacctx->ipad, bs); | |
2053 | ||
2054 | for (i = 0; i < bs / sizeof(int); i++) { | |
2055 | *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA; | |
2056 | *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA; | |
2057 | } | |
2058 | ||
2059 | updated_digestsize = digestsize; | |
2060 | if (digestsize == SHA224_DIGEST_SIZE) | |
2061 | updated_digestsize = SHA256_DIGEST_SIZE; | |
2062 | else if (digestsize == SHA384_DIGEST_SIZE) | |
2063 | updated_digestsize = SHA512_DIGEST_SIZE; | |
e7922729 | 2064 | err = chcr_compute_partial_hash(shash, hmacctx->ipad, |
324429d7 HS |
2065 | hmacctx->ipad, digestsize); |
2066 | if (err) | |
2067 | goto out; | |
2068 | chcr_change_order(hmacctx->ipad, updated_digestsize); | |
2069 | ||
e7922729 | 2070 | err = chcr_compute_partial_hash(shash, hmacctx->opad, |
324429d7 HS |
2071 | hmacctx->opad, digestsize); |
2072 | if (err) | |
2073 | goto out; | |
2074 | chcr_change_order(hmacctx->opad, updated_digestsize); | |
2075 | out: | |
2076 | return err; | |
2077 | } | |
2078 | ||
b8fd1f41 | 2079 | static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, |
324429d7 HS |
2080 | unsigned int key_len) |
2081 | { | |
2f47d580 | 2082 | struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); |
324429d7 | 2083 | unsigned short context_size = 0; |
b8fd1f41 | 2084 | int err; |
324429d7 | 2085 | |
b8fd1f41 HJ |
2086 | err = chcr_cipher_fallback_setkey(cipher, key, key_len); |
2087 | if (err) | |
2088 | goto badkey_err; | |
cc1b156d HJ |
2089 | |
2090 | memcpy(ablkctx->key, key, key_len); | |
2091 | ablkctx->enckey_len = key_len; | |
2092 | get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2); | |
2093 | context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; | |
2094 | ablkctx->key_ctx_hdr = | |
2095 | FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? | |
2096 | CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : | |
2097 | CHCR_KEYCTX_CIPHER_KEY_SIZE_256, | |
2098 | CHCR_KEYCTX_NO_KEY, 1, | |
2099 | 0, context_size); | |
2100 | ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; | |
2101 | return 0; | |
b8fd1f41 HJ |
2102 | badkey_err: |
2103 | crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
2104 | ablkctx->enckey_len = 0; | |
2105 | ||
2106 | return err; | |
324429d7 HS |
2107 | } |
2108 | ||
2109 | static int chcr_sha_init(struct ahash_request *areq) | |
2110 | { | |
2111 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
2112 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); | |
2113 | int digestsize = crypto_ahash_digestsize(tfm); | |
2114 | ||
2115 | req_ctx->data_len = 0; | |
44fce12a HJ |
2116 | req_ctx->reqlen = 0; |
2117 | req_ctx->reqbfr = req_ctx->bfr1; | |
2118 | req_ctx->skbfr = req_ctx->bfr2; | |
324429d7 | 2119 | copy_hash_init_values(req_ctx->partial_hash, digestsize); |
5110e655 | 2120 | |
324429d7 HS |
2121 | return 0; |
2122 | } | |
2123 | ||
2124 | static int chcr_sha_cra_init(struct crypto_tfm *tfm) | |
2125 | { | |
2126 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
2127 | sizeof(struct chcr_ahash_req_ctx)); | |
2128 | return chcr_device_init(crypto_tfm_ctx(tfm)); | |
2129 | } | |
2130 | ||
2131 | static int chcr_hmac_init(struct ahash_request *areq) | |
2132 | { | |
2133 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); | |
2134 | struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq); | |
2f47d580 | 2135 | struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm)); |
324429d7 HS |
2136 | unsigned int digestsize = crypto_ahash_digestsize(rtfm); |
2137 | unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); | |
2138 | ||
2139 | chcr_sha_init(areq); | |
2140 | req_ctx->data_len = bs; | |
2141 | if (is_hmac(crypto_ahash_tfm(rtfm))) { | |
2142 | if (digestsize == SHA224_DIGEST_SIZE) | |
2143 | memcpy(req_ctx->partial_hash, hmacctx->ipad, | |
2144 | SHA256_DIGEST_SIZE); | |
2145 | else if (digestsize == SHA384_DIGEST_SIZE) | |
2146 | memcpy(req_ctx->partial_hash, hmacctx->ipad, | |
2147 | SHA512_DIGEST_SIZE); | |
2148 | else | |
2149 | memcpy(req_ctx->partial_hash, hmacctx->ipad, | |
2150 | digestsize); | |
2151 | } | |
2152 | return 0; | |
2153 | } | |
2154 | ||
2155 | static int chcr_hmac_cra_init(struct crypto_tfm *tfm) | |
2156 | { | |
2157 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
2158 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | |
2159 | unsigned int digestsize = | |
2160 | crypto_ahash_digestsize(__crypto_ahash_cast(tfm)); | |
2161 | ||
2162 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
2163 | sizeof(struct chcr_ahash_req_ctx)); | |
e7922729 HJ |
2164 | hmacctx->base_hash = chcr_alloc_shash(digestsize); |
2165 | if (IS_ERR(hmacctx->base_hash)) | |
2166 | return PTR_ERR(hmacctx->base_hash); | |
324429d7 HS |
2167 | return chcr_device_init(crypto_tfm_ctx(tfm)); |
2168 | } | |
2169 | ||
324429d7 HS |
2170 | static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) |
2171 | { | |
2172 | struct chcr_context *ctx = crypto_tfm_ctx(tfm); | |
2173 | struct hmac_ctx *hmacctx = HMAC_CTX(ctx); | |
2174 | ||
e7922729 HJ |
2175 | if (hmacctx->base_hash) { |
2176 | chcr_free_shash(hmacctx->base_hash); | |
2177 | hmacctx->base_hash = NULL; | |
324429d7 HS |
2178 | } |
2179 | } | |
2180 | ||
2f47d580 HJ |
2181 | static int chcr_aead_common_init(struct aead_request *req, |
2182 | unsigned short op_type) | |
2debd332 | 2183 | { |
2f47d580 HJ |
2184 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
2185 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); | |
2186 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2187 | int error = -EINVAL; | |
2f47d580 | 2188 | unsigned int authsize = crypto_aead_authsize(tfm); |
2debd332 | 2189 | |
2f47d580 HJ |
2190 | /* validate key size */ |
2191 | if (aeadctx->enckey_len == 0) | |
2192 | goto err; | |
2193 | if (op_type && req->cryptlen < authsize) | |
2194 | goto err; | |
2195 | error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, | |
2196 | op_type); | |
2197 | if (error) { | |
2198 | error = -ENOMEM; | |
2199 | goto err; | |
2200 | } | |
2201 | reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen, | |
2202 | CHCR_SRC_SG_SIZE, 0); | |
2203 | reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen, | |
2204 | CHCR_SRC_SG_SIZE, req->assoclen); | |
2205 | return 0; | |
2206 | err: | |
2207 | return error; | |
2debd332 | 2208 | } |
2f47d580 HJ |
2209 | |
2210 | static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents, | |
0e93708d HJ |
2211 | int aadmax, int wrlen, |
2212 | unsigned short op_type) | |
2213 | { | |
2214 | unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); | |
2215 | ||
2216 | if (((req->cryptlen - (op_type ? authsize : 0)) == 0) || | |
2f47d580 | 2217 | dst_nents > MAX_DSGL_ENT || |
0e93708d | 2218 | (req->assoclen > aadmax) || |
2f47d580 | 2219 | (wrlen > SGE_MAX_WR_LEN)) |
0e93708d HJ |
2220 | return 1; |
2221 | return 0; | |
2222 | } | |
2debd332 | 2223 | |
0e93708d HJ |
2224 | static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) |
2225 | { | |
2226 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 2227 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
0e93708d HJ |
2228 | struct aead_request *subreq = aead_request_ctx(req); |
2229 | ||
2230 | aead_request_set_tfm(subreq, aeadctx->sw_cipher); | |
2231 | aead_request_set_callback(subreq, req->base.flags, | |
2232 | req->base.complete, req->base.data); | |
2233 | aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | |
2234 | req->iv); | |
2235 | aead_request_set_ad(subreq, req->assoclen); | |
2236 | return op_type ? crypto_aead_decrypt(subreq) : | |
2237 | crypto_aead_encrypt(subreq); | |
2238 | } | |
2debd332 HJ |
2239 | |
2240 | static struct sk_buff *create_authenc_wr(struct aead_request *req, | |
2241 | unsigned short qid, | |
2242 | int size, | |
2243 | unsigned short op_type) | |
2244 | { | |
2245 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 2246 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2247 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); |
2248 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2249 | struct sk_buff *skb = NULL; | |
2250 | struct chcr_wr *chcr_req; | |
2251 | struct cpl_rx_phys_dsgl *phys_cpl; | |
2f47d580 HJ |
2252 | struct ulptx_sgl *ulptx; |
2253 | unsigned int transhdr_len; | |
3d64bd67 | 2254 | unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm); |
2f47d580 | 2255 | unsigned int kctx_len = 0, dnents; |
2debd332 HJ |
2256 | unsigned int assoclen = req->assoclen; |
2257 | unsigned int authsize = crypto_aead_authsize(tfm); | |
2f47d580 | 2258 | int error = -EINVAL; |
2debd332 HJ |
2259 | int null = 0; |
2260 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : | |
2261 | GFP_ATOMIC; | |
2f47d580 | 2262 | struct adapter *adap = padap(a_ctx(tfm)->dev); |
2debd332 | 2263 | |
2f47d580 HJ |
2264 | if (req->cryptlen == 0) |
2265 | return NULL; | |
2debd332 | 2266 | |
2f47d580 | 2267 | reqctx->b0_dma = 0; |
3d64bd67 HJ |
2268 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || |
2269 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | |
2debd332 HJ |
2270 | null = 1; |
2271 | assoclen = 0; | |
2272 | } | |
2f47d580 HJ |
2273 | error = chcr_aead_common_init(req, op_type); |
2274 | if (error) | |
2275 | return ERR_PTR(error); | |
5abc8db0 HJ |
2276 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); |
2277 | dnents += sg_nents_xlen(req->dst, req->cryptlen + | |
2278 | (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE, | |
2279 | req->assoclen); | |
2280 | dnents += MIN_AUTH_SG; // For IV | |
2f47d580 HJ |
2281 | |
2282 | dst_size = get_space_for_phys_dsgl(dnents); | |
2debd332 HJ |
2283 | kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4) |
2284 | - sizeof(chcr_req->key_ctx); | |
2285 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); | |
2f47d580 HJ |
2286 | reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) < |
2287 | SGE_MAX_WR_LEN; | |
125d01ca HJ |
2288 | temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) |
2289 | : (sgl_len(reqctx->src_nents + reqctx->aad_nents | |
2f47d580 HJ |
2290 | + MIN_GCM_SG) * 8); |
2291 | transhdr_len += temp; | |
125d01ca | 2292 | transhdr_len = roundup(transhdr_len, 16); |
2f47d580 HJ |
2293 | |
2294 | if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, | |
2295 | transhdr_len, op_type)) { | |
ee0863ba | 2296 | atomic_inc(&adap->chcr_stats.fallback); |
2f47d580 HJ |
2297 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2298 | op_type); | |
0e93708d HJ |
2299 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
2300 | } | |
2f47d580 | 2301 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
5fe8c711 HJ |
2302 | if (!skb) { |
2303 | error = -ENOMEM; | |
2debd332 | 2304 | goto err; |
5fe8c711 | 2305 | } |
2debd332 | 2306 | |
de77b966 | 2307 | chcr_req = __skb_put_zero(skb, transhdr_len); |
2debd332 | 2308 | |
2f47d580 | 2309 | temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; |
2debd332 HJ |
2310 | |
2311 | /* | |
2312 | * Input order is AAD,IV and Payload. where IV should be included as | |
2313 | * the part of authdata. All other fields should be filled according | |
2314 | * to the hardware spec | |
2315 | */ | |
2316 | chcr_req->sec_cpl.op_ivinsrtofst = | |
2f47d580 HJ |
2317 | FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2, |
2318 | assoclen + 1); | |
2319 | chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen); | |
2debd332 HJ |
2320 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
2321 | assoclen ? 1 : 0, assoclen, | |
2f47d580 HJ |
2322 | assoclen + IV + 1, |
2323 | (temp & 0x1F0) >> 4); | |
2debd332 | 2324 | chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( |
2f47d580 HJ |
2325 | temp & 0xF, |
2326 | null ? 0 : assoclen + IV + 1, | |
2327 | temp, temp); | |
3d64bd67 HJ |
2328 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL || |
2329 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA) | |
2330 | temp = CHCR_SCMD_CIPHER_MODE_AES_CTR; | |
2331 | else | |
2332 | temp = CHCR_SCMD_CIPHER_MODE_AES_CBC; | |
2debd332 HJ |
2333 | chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, |
2334 | (op_type == CHCR_ENCRYPT_OP) ? 1 : 0, | |
3d64bd67 | 2335 | temp, |
2debd332 | 2336 | actx->auth_mode, aeadctx->hmac_ctrl, |
2f47d580 | 2337 | IV >> 1); |
2debd332 | 2338 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, |
2f47d580 | 2339 | 0, 0, dst_size); |
2debd332 HJ |
2340 | |
2341 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | |
3d64bd67 HJ |
2342 | if (op_type == CHCR_ENCRYPT_OP || |
2343 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || | |
2344 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) | |
2debd332 HJ |
2345 | memcpy(chcr_req->key_ctx.key, aeadctx->key, |
2346 | aeadctx->enckey_len); | |
2347 | else | |
2348 | memcpy(chcr_req->key_ctx.key, actx->dec_rrkey, | |
2349 | aeadctx->enckey_len); | |
2350 | ||
125d01ca HJ |
2351 | memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), |
2352 | actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16)); | |
3d64bd67 HJ |
2353 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || |
2354 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | |
2355 | memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); | |
2356 | memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, | |
2357 | CTR_RFC3686_IV_SIZE); | |
2358 | *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + | |
2359 | CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); | |
2360 | } else { | |
2361 | memcpy(reqctx->iv, req->iv, IV); | |
2362 | } | |
2debd332 | 2363 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); |
2f47d580 HJ |
2364 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
2365 | chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); | |
2366 | chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); | |
ee0863ba | 2367 | atomic_inc(&adap->chcr_stats.cipher_rqst); |
2f47d580 HJ |
2368 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
2369 | kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); | |
2370 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, | |
2371 | transhdr_len, temp, 0); | |
2debd332 | 2372 | reqctx->skb = skb; |
2f47d580 | 2373 | reqctx->op = op_type; |
2debd332 HJ |
2374 | |
2375 | return skb; | |
2debd332 | 2376 | err: |
2f47d580 HJ |
2377 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2378 | op_type); | |
2379 | ||
5fe8c711 | 2380 | return ERR_PTR(error); |
2debd332 HJ |
2381 | } |
2382 | ||
6dad4e8a AG |
2383 | int chcr_aead_dma_map(struct device *dev, |
2384 | struct aead_request *req, | |
2385 | unsigned short op_type) | |
2f47d580 HJ |
2386 | { |
2387 | int error; | |
2388 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2389 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2390 | unsigned int authsize = crypto_aead_authsize(tfm); | |
2391 | int dst_size; | |
2392 | ||
2393 | dst_size = req->assoclen + req->cryptlen + (op_type ? | |
2394 | -authsize : authsize); | |
2395 | if (!req->cryptlen || !dst_size) | |
2396 | return 0; | |
2397 | reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV, | |
2398 | DMA_BIDIRECTIONAL); | |
2399 | if (dma_mapping_error(dev, reqctx->iv_dma)) | |
2400 | return -ENOMEM; | |
2401 | ||
2402 | if (req->src == req->dst) { | |
2403 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | |
2404 | DMA_BIDIRECTIONAL); | |
2405 | if (!error) | |
2406 | goto err; | |
2407 | } else { | |
2408 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | |
2409 | DMA_TO_DEVICE); | |
2410 | if (!error) | |
2411 | goto err; | |
2412 | error = dma_map_sg(dev, req->dst, sg_nents(req->dst), | |
2413 | DMA_FROM_DEVICE); | |
2414 | if (!error) { | |
2415 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2416 | DMA_TO_DEVICE); | |
2417 | goto err; | |
2418 | } | |
2419 | } | |
2420 | ||
2421 | return 0; | |
2422 | err: | |
2423 | dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); | |
2424 | return -ENOMEM; | |
2425 | } | |
2426 | ||
6dad4e8a AG |
2427 | void chcr_aead_dma_unmap(struct device *dev, |
2428 | struct aead_request *req, | |
2429 | unsigned short op_type) | |
2f47d580 HJ |
2430 | { |
2431 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2432 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2433 | unsigned int authsize = crypto_aead_authsize(tfm); | |
2434 | int dst_size; | |
2435 | ||
2436 | dst_size = req->assoclen + req->cryptlen + (op_type ? | |
2437 | -authsize : authsize); | |
2438 | if (!req->cryptlen || !dst_size) | |
2439 | return; | |
2440 | ||
2441 | dma_unmap_single(dev, reqctx->iv_dma, IV, | |
2442 | DMA_BIDIRECTIONAL); | |
2443 | if (req->src == req->dst) { | |
2444 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2445 | DMA_BIDIRECTIONAL); | |
2446 | } else { | |
2447 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2448 | DMA_TO_DEVICE); | |
2449 | dma_unmap_sg(dev, req->dst, sg_nents(req->dst), | |
2450 | DMA_FROM_DEVICE); | |
2451 | } | |
2452 | } | |
2453 | ||
6dad4e8a AG |
2454 | void chcr_add_aead_src_ent(struct aead_request *req, |
2455 | struct ulptx_sgl *ulptx, | |
2456 | unsigned int assoclen, | |
2457 | unsigned short op_type) | |
2f47d580 HJ |
2458 | { |
2459 | struct ulptx_walk ulp_walk; | |
2460 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2461 | ||
2462 | if (reqctx->imm) { | |
2463 | u8 *buf = (u8 *)ulptx; | |
2464 | ||
2465 | if (reqctx->b0_dma) { | |
2466 | memcpy(buf, reqctx->scratch_pad, reqctx->b0_len); | |
2467 | buf += reqctx->b0_len; | |
2468 | } | |
2469 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | |
2470 | buf, assoclen, 0); | |
2471 | buf += assoclen; | |
2472 | memcpy(buf, reqctx->iv, IV); | |
2473 | buf += IV; | |
2474 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), | |
2475 | buf, req->cryptlen, req->assoclen); | |
2476 | } else { | |
2477 | ulptx_walk_init(&ulp_walk, ulptx); | |
2478 | if (reqctx->b0_dma) | |
2479 | ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, | |
2480 | &reqctx->b0_dma); | |
2481 | ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0); | |
2482 | ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma); | |
2483 | ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen, | |
2484 | req->assoclen); | |
2485 | ulptx_walk_end(&ulp_walk); | |
2486 | } | |
2487 | } | |
2488 | ||
6dad4e8a AG |
2489 | void chcr_add_aead_dst_ent(struct aead_request *req, |
2490 | struct cpl_rx_phys_dsgl *phys_cpl, | |
2491 | unsigned int assoclen, | |
2492 | unsigned short op_type, | |
2493 | unsigned short qid) | |
2f47d580 HJ |
2494 | { |
2495 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2496 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2497 | struct dsgl_walk dsgl_walk; | |
2498 | unsigned int authsize = crypto_aead_authsize(tfm); | |
2499 | u32 temp; | |
2500 | ||
2501 | dsgl_walk_init(&dsgl_walk, phys_cpl); | |
2502 | if (reqctx->b0_dma) | |
2503 | dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma); | |
2504 | dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0); | |
2505 | dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma); | |
2506 | temp = req->cryptlen + (op_type ? -authsize : authsize); | |
2507 | dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen); | |
2508 | dsgl_walk_end(&dsgl_walk, qid); | |
2509 | } | |
2510 | ||
6dad4e8a | 2511 | void chcr_add_cipher_src_ent(struct ablkcipher_request *req, |
335bcc4a | 2512 | void *ulptx, |
6dad4e8a | 2513 | struct cipher_wr_param *wrparam) |
2f47d580 HJ |
2514 | { |
2515 | struct ulptx_walk ulp_walk; | |
2516 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
335bcc4a | 2517 | u8 *buf = ulptx; |
2f47d580 | 2518 | |
335bcc4a HJ |
2519 | memcpy(buf, reqctx->iv, IV); |
2520 | buf += IV; | |
2f47d580 | 2521 | if (reqctx->imm) { |
2f47d580 HJ |
2522 | sg_pcopy_to_buffer(req->src, sg_nents(req->src), |
2523 | buf, wrparam->bytes, reqctx->processed); | |
2524 | } else { | |
335bcc4a | 2525 | ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf); |
2f47d580 HJ |
2526 | ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes, |
2527 | reqctx->src_ofst); | |
2528 | reqctx->srcsg = ulp_walk.last_sg; | |
2529 | reqctx->src_ofst = ulp_walk.last_sg_len; | |
2530 | ulptx_walk_end(&ulp_walk); | |
2531 | } | |
2532 | } | |
2533 | ||
6dad4e8a AG |
2534 | void chcr_add_cipher_dst_ent(struct ablkcipher_request *req, |
2535 | struct cpl_rx_phys_dsgl *phys_cpl, | |
2536 | struct cipher_wr_param *wrparam, | |
2537 | unsigned short qid) | |
2f47d580 HJ |
2538 | { |
2539 | struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req); | |
2540 | struct dsgl_walk dsgl_walk; | |
2541 | ||
2542 | dsgl_walk_init(&dsgl_walk, phys_cpl); | |
2f47d580 HJ |
2543 | dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, |
2544 | reqctx->dst_ofst); | |
2545 | reqctx->dstsg = dsgl_walk.last_sg; | |
2546 | reqctx->dst_ofst = dsgl_walk.last_sg_len; | |
2547 | ||
2548 | dsgl_walk_end(&dsgl_walk, qid); | |
2549 | } | |
2550 | ||
6dad4e8a AG |
2551 | void chcr_add_hash_src_ent(struct ahash_request *req, |
2552 | struct ulptx_sgl *ulptx, | |
2553 | struct hash_wr_param *param) | |
2f47d580 HJ |
2554 | { |
2555 | struct ulptx_walk ulp_walk; | |
2556 | struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); | |
2557 | ||
5110e655 | 2558 | if (reqctx->hctx_wr.imm) { |
2f47d580 HJ |
2559 | u8 *buf = (u8 *)ulptx; |
2560 | ||
2561 | if (param->bfr_len) { | |
2562 | memcpy(buf, reqctx->reqbfr, param->bfr_len); | |
2563 | buf += param->bfr_len; | |
2564 | } | |
5110e655 HJ |
2565 | |
2566 | sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg, | |
2567 | sg_nents(reqctx->hctx_wr.srcsg), buf, | |
2568 | param->sg_len, 0); | |
2f47d580 HJ |
2569 | } else { |
2570 | ulptx_walk_init(&ulp_walk, ulptx); | |
2571 | if (param->bfr_len) | |
2572 | ulptx_walk_add_page(&ulp_walk, param->bfr_len, | |
5110e655 HJ |
2573 | &reqctx->hctx_wr.dma_addr); |
2574 | ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg, | |
2575 | param->sg_len, reqctx->hctx_wr.src_ofst); | |
2576 | reqctx->hctx_wr.srcsg = ulp_walk.last_sg; | |
2577 | reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len; | |
db6deea4 | 2578 | ulptx_walk_end(&ulp_walk); |
2f47d580 HJ |
2579 | } |
2580 | } | |
2581 | ||
6dad4e8a AG |
2582 | int chcr_hash_dma_map(struct device *dev, |
2583 | struct ahash_request *req) | |
2f47d580 HJ |
2584 | { |
2585 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
2586 | int error = 0; | |
2587 | ||
2588 | if (!req->nbytes) | |
2589 | return 0; | |
2590 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | |
2591 | DMA_TO_DEVICE); | |
2592 | if (!error) | |
7814f552 | 2593 | return -ENOMEM; |
5110e655 | 2594 | req_ctx->hctx_wr.is_sg_map = 1; |
2f47d580 HJ |
2595 | return 0; |
2596 | } | |
2597 | ||
6dad4e8a AG |
2598 | void chcr_hash_dma_unmap(struct device *dev, |
2599 | struct ahash_request *req) | |
2f47d580 HJ |
2600 | { |
2601 | struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); | |
2602 | ||
2603 | if (!req->nbytes) | |
2604 | return; | |
2605 | ||
2606 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2607 | DMA_TO_DEVICE); | |
5110e655 | 2608 | req_ctx->hctx_wr.is_sg_map = 0; |
2f47d580 HJ |
2609 | |
2610 | } | |
2611 | ||
6dad4e8a AG |
2612 | int chcr_cipher_dma_map(struct device *dev, |
2613 | struct ablkcipher_request *req) | |
2f47d580 HJ |
2614 | { |
2615 | int error; | |
2f47d580 HJ |
2616 | |
2617 | if (req->src == req->dst) { | |
2618 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | |
2619 | DMA_BIDIRECTIONAL); | |
2620 | if (!error) | |
2621 | goto err; | |
2622 | } else { | |
2623 | error = dma_map_sg(dev, req->src, sg_nents(req->src), | |
2624 | DMA_TO_DEVICE); | |
2625 | if (!error) | |
2626 | goto err; | |
2627 | error = dma_map_sg(dev, req->dst, sg_nents(req->dst), | |
2628 | DMA_FROM_DEVICE); | |
2629 | if (!error) { | |
2630 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2631 | DMA_TO_DEVICE); | |
2632 | goto err; | |
2633 | } | |
2634 | } | |
2635 | ||
2636 | return 0; | |
2637 | err: | |
2f47d580 HJ |
2638 | return -ENOMEM; |
2639 | } | |
6dad4e8a AG |
2640 | |
2641 | void chcr_cipher_dma_unmap(struct device *dev, | |
2642 | struct ablkcipher_request *req) | |
2f47d580 | 2643 | { |
2f47d580 HJ |
2644 | if (req->src == req->dst) { |
2645 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2646 | DMA_BIDIRECTIONAL); | |
2647 | } else { | |
2648 | dma_unmap_sg(dev, req->src, sg_nents(req->src), | |
2649 | DMA_TO_DEVICE); | |
2650 | dma_unmap_sg(dev, req->dst, sg_nents(req->dst), | |
2651 | DMA_FROM_DEVICE); | |
2652 | } | |
2653 | } | |
2654 | ||
2debd332 HJ |
2655 | static int set_msg_len(u8 *block, unsigned int msglen, int csize) |
2656 | { | |
2657 | __be32 data; | |
2658 | ||
2659 | memset(block, 0, csize); | |
2660 | block += csize; | |
2661 | ||
2662 | if (csize >= 4) | |
2663 | csize = 4; | |
2664 | else if (msglen > (unsigned int)(1 << (8 * csize))) | |
2665 | return -EOVERFLOW; | |
2666 | ||
2667 | data = cpu_to_be32(msglen); | |
2668 | memcpy(block - csize, (u8 *)&data + 4 - csize, csize); | |
2669 | ||
2670 | return 0; | |
2671 | } | |
2672 | ||
2673 | static void generate_b0(struct aead_request *req, | |
2674 | struct chcr_aead_ctx *aeadctx, | |
2675 | unsigned short op_type) | |
2676 | { | |
2677 | unsigned int l, lp, m; | |
2678 | int rc; | |
2679 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | |
2680 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2681 | u8 *b0 = reqctx->scratch_pad; | |
2682 | ||
2683 | m = crypto_aead_authsize(aead); | |
2684 | ||
2685 | memcpy(b0, reqctx->iv, 16); | |
2686 | ||
2687 | lp = b0[0]; | |
2688 | l = lp + 1; | |
2689 | ||
2690 | /* set m, bits 3-5 */ | |
2691 | *b0 |= (8 * ((m - 2) / 2)); | |
2692 | ||
2693 | /* set adata, bit 6, if associated data is used */ | |
2694 | if (req->assoclen) | |
2695 | *b0 |= 64; | |
2696 | rc = set_msg_len(b0 + 16 - l, | |
2697 | (op_type == CHCR_DECRYPT_OP) ? | |
2698 | req->cryptlen - m : req->cryptlen, l); | |
2699 | } | |
2700 | ||
2701 | static inline int crypto_ccm_check_iv(const u8 *iv) | |
2702 | { | |
2703 | /* 2 <= L <= 8, so 1 <= L' <= 7. */ | |
2704 | if (iv[0] < 1 || iv[0] > 7) | |
2705 | return -EINVAL; | |
2706 | ||
2707 | return 0; | |
2708 | } | |
2709 | ||
2710 | static int ccm_format_packet(struct aead_request *req, | |
2711 | struct chcr_aead_ctx *aeadctx, | |
2712 | unsigned int sub_type, | |
2713 | unsigned short op_type) | |
2714 | { | |
2715 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
2716 | int rc = 0; | |
2717 | ||
2debd332 HJ |
2718 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { |
2719 | reqctx->iv[0] = 3; | |
2720 | memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3); | |
2721 | memcpy(reqctx->iv + 4, req->iv, 8); | |
2722 | memset(reqctx->iv + 12, 0, 4); | |
2723 | *((unsigned short *)(reqctx->scratch_pad + 16)) = | |
2724 | htons(req->assoclen - 8); | |
2725 | } else { | |
2726 | memcpy(reqctx->iv, req->iv, 16); | |
2727 | *((unsigned short *)(reqctx->scratch_pad + 16)) = | |
2728 | htons(req->assoclen); | |
2729 | } | |
2730 | generate_b0(req, aeadctx, op_type); | |
2731 | /* zero the ctr value */ | |
2732 | memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1); | |
2733 | return rc; | |
2734 | } | |
2735 | ||
2736 | static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, | |
2737 | unsigned int dst_size, | |
2738 | struct aead_request *req, | |
2f47d580 | 2739 | unsigned short op_type) |
2debd332 HJ |
2740 | { |
2741 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 2742 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2743 | unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; |
2744 | unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; | |
2f47d580 | 2745 | unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id; |
2debd332 HJ |
2746 | unsigned int ccm_xtra; |
2747 | unsigned char tag_offset = 0, auth_offset = 0; | |
2debd332 HJ |
2748 | unsigned int assoclen; |
2749 | ||
2750 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) | |
2751 | assoclen = req->assoclen - 8; | |
2752 | else | |
2753 | assoclen = req->assoclen; | |
2754 | ccm_xtra = CCM_B0_SIZE + | |
2755 | ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); | |
2756 | ||
2757 | auth_offset = req->cryptlen ? | |
2f47d580 | 2758 | (assoclen + IV + 1 + ccm_xtra) : 0; |
2debd332 HJ |
2759 | if (op_type == CHCR_DECRYPT_OP) { |
2760 | if (crypto_aead_authsize(tfm) != req->cryptlen) | |
2761 | tag_offset = crypto_aead_authsize(tfm); | |
2762 | else | |
2763 | auth_offset = 0; | |
2764 | } | |
2765 | ||
2766 | ||
2767 | sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id, | |
2f47d580 | 2768 | 2, assoclen + 1 + ccm_xtra); |
2debd332 | 2769 | sec_cpl->pldlen = |
2f47d580 | 2770 | htonl(assoclen + IV + req->cryptlen + ccm_xtra); |
2debd332 HJ |
2771 | /* For CCM there wil be b0 always. So AAD start will be 1 always */ |
2772 | sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( | |
2773 | 1, assoclen + ccm_xtra, assoclen | |
2f47d580 | 2774 | + IV + 1 + ccm_xtra, 0); |
2debd332 HJ |
2775 | |
2776 | sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, | |
2777 | auth_offset, tag_offset, | |
2778 | (op_type == CHCR_ENCRYPT_OP) ? 0 : | |
2779 | crypto_aead_authsize(tfm)); | |
2780 | sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, | |
2781 | (op_type == CHCR_ENCRYPT_OP) ? 0 : 1, | |
0a7bd30c | 2782 | cipher_mode, mac_mode, |
2f47d580 | 2783 | aeadctx->hmac_ctrl, IV >> 1); |
2debd332 HJ |
2784 | |
2785 | sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, | |
2f47d580 | 2786 | 0, dst_size); |
2debd332 HJ |
2787 | } |
2788 | ||
1efb892b CIK |
2789 | static int aead_ccm_validate_input(unsigned short op_type, |
2790 | struct aead_request *req, | |
2791 | struct chcr_aead_ctx *aeadctx, | |
2792 | unsigned int sub_type) | |
2debd332 HJ |
2793 | { |
2794 | if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { | |
2795 | if (crypto_ccm_check_iv(req->iv)) { | |
2796 | pr_err("CCM: IV check fails\n"); | |
2797 | return -EINVAL; | |
2798 | } | |
2799 | } else { | |
2800 | if (req->assoclen != 16 && req->assoclen != 20) { | |
2801 | pr_err("RFC4309: Invalid AAD length %d\n", | |
2802 | req->assoclen); | |
2803 | return -EINVAL; | |
2804 | } | |
2805 | } | |
2debd332 HJ |
2806 | return 0; |
2807 | } | |
2808 | ||
2debd332 HJ |
2809 | static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, |
2810 | unsigned short qid, | |
2811 | int size, | |
2812 | unsigned short op_type) | |
2813 | { | |
2814 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 2815 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2816 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
2817 | struct sk_buff *skb = NULL; | |
2818 | struct chcr_wr *chcr_req; | |
2819 | struct cpl_rx_phys_dsgl *phys_cpl; | |
2f47d580 HJ |
2820 | struct ulptx_sgl *ulptx; |
2821 | unsigned int transhdr_len; | |
2822 | unsigned int dst_size = 0, kctx_len, dnents, temp; | |
2823 | unsigned int sub_type, assoclen = req->assoclen; | |
2debd332 | 2824 | unsigned int authsize = crypto_aead_authsize(tfm); |
2f47d580 | 2825 | int error = -EINVAL; |
2debd332 HJ |
2826 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
2827 | GFP_ATOMIC; | |
2f47d580 | 2828 | struct adapter *adap = padap(a_ctx(tfm)->dev); |
2debd332 | 2829 | |
2f47d580 HJ |
2830 | reqctx->b0_dma = 0; |
2831 | sub_type = get_aead_subtype(tfm); | |
2832 | if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) | |
2833 | assoclen -= 8; | |
2f47d580 HJ |
2834 | error = chcr_aead_common_init(req, op_type); |
2835 | if (error) | |
2836 | return ERR_PTR(error); | |
0e93708d | 2837 | |
2f47d580 HJ |
2838 | |
2839 | reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0); | |
5fe8c711 HJ |
2840 | error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type); |
2841 | if (error) | |
2debd332 | 2842 | goto err; |
e1a018e6 HJ |
2843 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); |
2844 | dnents += sg_nents_xlen(req->dst, req->cryptlen | |
2845 | + (op_type ? -authsize : authsize), | |
2846 | CHCR_DST_SG_SIZE, req->assoclen); | |
2847 | dnents += MIN_CCM_SG; // For IV and B0 | |
2f47d580 | 2848 | dst_size = get_space_for_phys_dsgl(dnents); |
125d01ca | 2849 | kctx_len = roundup(aeadctx->enckey_len, 16) * 2; |
2debd332 | 2850 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
2f47d580 HJ |
2851 | reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen + |
2852 | reqctx->b0_len) <= SGE_MAX_WR_LEN; | |
125d01ca HJ |
2853 | temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen + |
2854 | reqctx->b0_len, 16) : | |
2f47d580 HJ |
2855 | (sgl_len(reqctx->src_nents + reqctx->aad_nents + |
2856 | MIN_CCM_SG) * 8); | |
2857 | transhdr_len += temp; | |
125d01ca | 2858 | transhdr_len = roundup(transhdr_len, 16); |
2f47d580 HJ |
2859 | |
2860 | if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - | |
2861 | reqctx->b0_len, transhdr_len, op_type)) { | |
ee0863ba | 2862 | atomic_inc(&adap->chcr_stats.fallback); |
2f47d580 HJ |
2863 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2864 | op_type); | |
0e93708d HJ |
2865 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
2866 | } | |
2f47d580 | 2867 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
2debd332 | 2868 | |
5fe8c711 HJ |
2869 | if (!skb) { |
2870 | error = -ENOMEM; | |
2debd332 | 2871 | goto err; |
5fe8c711 | 2872 | } |
2debd332 | 2873 | |
2f47d580 | 2874 | chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len); |
2debd332 | 2875 | |
2f47d580 | 2876 | fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type); |
2debd332 HJ |
2877 | |
2878 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; | |
2879 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); | |
125d01ca HJ |
2880 | memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), |
2881 | aeadctx->key, aeadctx->enckey_len); | |
2debd332 HJ |
2882 | |
2883 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | |
2f47d580 | 2884 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
5fe8c711 HJ |
2885 | error = ccm_format_packet(req, aeadctx, sub_type, op_type); |
2886 | if (error) | |
2debd332 HJ |
2887 | goto dstmap_fail; |
2888 | ||
2f47d580 HJ |
2889 | reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, |
2890 | &reqctx->scratch_pad, reqctx->b0_len, | |
2891 | DMA_BIDIRECTIONAL); | |
2892 | if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, | |
2893 | reqctx->b0_dma)) { | |
2894 | error = -ENOMEM; | |
2debd332 | 2895 | goto dstmap_fail; |
2f47d580 HJ |
2896 | } |
2897 | ||
2898 | chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); | |
2899 | chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); | |
2debd332 | 2900 | |
ee0863ba | 2901 | atomic_inc(&adap->chcr_stats.aead_rqst); |
2f47d580 HJ |
2902 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
2903 | kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen + | |
2904 | reqctx->b0_len) : 0); | |
2905 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, | |
2906 | transhdr_len, temp, 0); | |
2debd332 | 2907 | reqctx->skb = skb; |
2f47d580 HJ |
2908 | reqctx->op = op_type; |
2909 | ||
2debd332 HJ |
2910 | return skb; |
2911 | dstmap_fail: | |
2912 | kfree_skb(skb); | |
2debd332 | 2913 | err: |
2f47d580 | 2914 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); |
5fe8c711 | 2915 | return ERR_PTR(error); |
2debd332 HJ |
2916 | } |
2917 | ||
2918 | static struct sk_buff *create_gcm_wr(struct aead_request *req, | |
2919 | unsigned short qid, | |
2920 | int size, | |
2921 | unsigned short op_type) | |
2922 | { | |
2923 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 2924 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
2925 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
2926 | struct sk_buff *skb = NULL; | |
2927 | struct chcr_wr *chcr_req; | |
2928 | struct cpl_rx_phys_dsgl *phys_cpl; | |
2f47d580 HJ |
2929 | struct ulptx_sgl *ulptx; |
2930 | unsigned int transhdr_len, dnents = 0; | |
2931 | unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen; | |
2debd332 | 2932 | unsigned int authsize = crypto_aead_authsize(tfm); |
2f47d580 | 2933 | int error = -EINVAL; |
2debd332 HJ |
2934 | gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : |
2935 | GFP_ATOMIC; | |
2f47d580 | 2936 | struct adapter *adap = padap(a_ctx(tfm)->dev); |
2debd332 | 2937 | |
2f47d580 HJ |
2938 | if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) |
2939 | assoclen = req->assoclen - 8; | |
2debd332 | 2940 | |
2f47d580 | 2941 | reqctx->b0_dma = 0; |
2f47d580 | 2942 | error = chcr_aead_common_init(req, op_type); |
e1a018e6 HJ |
2943 | if (error) |
2944 | return ERR_PTR(error); | |
2945 | dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0); | |
2946 | dnents += sg_nents_xlen(req->dst, req->cryptlen + | |
2947 | (op_type ? -authsize : authsize), | |
2f47d580 | 2948 | CHCR_DST_SG_SIZE, req->assoclen); |
e1a018e6 | 2949 | dnents += MIN_GCM_SG; // For IV |
2f47d580 | 2950 | dst_size = get_space_for_phys_dsgl(dnents); |
125d01ca | 2951 | kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE; |
2debd332 | 2952 | transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); |
2f47d580 HJ |
2953 | reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <= |
2954 | SGE_MAX_WR_LEN; | |
125d01ca HJ |
2955 | temp = reqctx->imm ? roundup(assoclen + IV + req->cryptlen, 16) : |
2956 | (sgl_len(reqctx->src_nents + | |
2957 | reqctx->aad_nents + MIN_GCM_SG) * 8); | |
2f47d580 | 2958 | transhdr_len += temp; |
125d01ca | 2959 | transhdr_len = roundup(transhdr_len, 16); |
2f47d580 HJ |
2960 | if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, |
2961 | transhdr_len, op_type)) { | |
ee0863ba | 2962 | atomic_inc(&adap->chcr_stats.fallback); |
2f47d580 HJ |
2963 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, |
2964 | op_type); | |
0e93708d HJ |
2965 | return ERR_PTR(chcr_aead_fallback(req, op_type)); |
2966 | } | |
2f47d580 | 2967 | skb = alloc_skb(SGE_MAX_WR_LEN, flags); |
5fe8c711 HJ |
2968 | if (!skb) { |
2969 | error = -ENOMEM; | |
2debd332 | 2970 | goto err; |
5fe8c711 | 2971 | } |
2debd332 | 2972 | |
de77b966 | 2973 | chcr_req = __skb_put_zero(skb, transhdr_len); |
2debd332 | 2974 | |
2f47d580 HJ |
2975 | //Offset of tag from end |
2976 | temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize; | |
2debd332 | 2977 | chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( |
2f47d580 HJ |
2978 | a_ctx(tfm)->dev->rx_channel_id, 2, |
2979 | (assoclen + 1)); | |
0e93708d | 2980 | chcr_req->sec_cpl.pldlen = |
2f47d580 | 2981 | htonl(assoclen + IV + req->cryptlen); |
2debd332 | 2982 | chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( |
d600fc8a | 2983 | assoclen ? 1 : 0, assoclen, |
2f47d580 | 2984 | assoclen + IV + 1, 0); |
e1a018e6 | 2985 | chcr_req->sec_cpl.cipherstop_lo_authinsert = |
2f47d580 HJ |
2986 | FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1, |
2987 | temp, temp); | |
e1a018e6 | 2988 | chcr_req->sec_cpl.seqno_numivs = |
2debd332 HJ |
2989 | FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == |
2990 | CHCR_ENCRYPT_OP) ? 1 : 0, | |
2991 | CHCR_SCMD_CIPHER_MODE_AES_GCM, | |
0a7bd30c | 2992 | CHCR_SCMD_AUTH_MODE_GHASH, |
2f47d580 | 2993 | aeadctx->hmac_ctrl, IV >> 1); |
2debd332 | 2994 | chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, |
2f47d580 | 2995 | 0, 0, dst_size); |
2debd332 HJ |
2996 | chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; |
2997 | memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); | |
125d01ca HJ |
2998 | memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), |
2999 | GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); | |
2debd332 HJ |
3000 | |
3001 | /* prepare a 16 byte iv */ | |
3002 | /* S A L T | IV | 0x00000001 */ | |
3003 | if (get_aead_subtype(tfm) == | |
3004 | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { | |
3005 | memcpy(reqctx->iv, aeadctx->salt, 4); | |
8f6acb7f | 3006 | memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE); |
2debd332 | 3007 | } else { |
8f6acb7f | 3008 | memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE); |
2debd332 HJ |
3009 | } |
3010 | *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01); | |
3011 | ||
3012 | phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); | |
2f47d580 | 3013 | ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); |
2debd332 | 3014 | |
2f47d580 HJ |
3015 | chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid); |
3016 | chcr_add_aead_src_ent(req, ulptx, assoclen, op_type); | |
ee0863ba | 3017 | atomic_inc(&adap->chcr_stats.aead_rqst); |
2f47d580 HJ |
3018 | temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + |
3019 | kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0); | |
3020 | create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, | |
3021 | transhdr_len, temp, reqctx->verify); | |
2debd332 | 3022 | reqctx->skb = skb; |
2f47d580 | 3023 | reqctx->op = op_type; |
2debd332 HJ |
3024 | return skb; |
3025 | ||
2debd332 | 3026 | err: |
2f47d580 | 3027 | chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type); |
5fe8c711 | 3028 | return ERR_PTR(error); |
2debd332 HJ |
3029 | } |
3030 | ||
3031 | ||
3032 | ||
3033 | static int chcr_aead_cra_init(struct crypto_aead *tfm) | |
3034 | { | |
2f47d580 | 3035 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
0e93708d HJ |
3036 | struct aead_alg *alg = crypto_aead_alg(tfm); |
3037 | ||
3038 | aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, | |
5fe8c711 HJ |
3039 | CRYPTO_ALG_NEED_FALLBACK | |
3040 | CRYPTO_ALG_ASYNC); | |
0e93708d HJ |
3041 | if (IS_ERR(aeadctx->sw_cipher)) |
3042 | return PTR_ERR(aeadctx->sw_cipher); | |
3043 | crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx), | |
3044 | sizeof(struct aead_request) + | |
3045 | crypto_aead_reqsize(aeadctx->sw_cipher))); | |
2f47d580 | 3046 | return chcr_device_init(a_ctx(tfm)); |
2debd332 HJ |
3047 | } |
3048 | ||
3049 | static void chcr_aead_cra_exit(struct crypto_aead *tfm) | |
3050 | { | |
2f47d580 | 3051 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
0e93708d | 3052 | |
0e93708d | 3053 | crypto_free_aead(aeadctx->sw_cipher); |
2debd332 HJ |
3054 | } |
3055 | ||
3056 | static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, | |
3057 | unsigned int authsize) | |
3058 | { | |
2f47d580 | 3059 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
3060 | |
3061 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; | |
3062 | aeadctx->mayverify = VERIFY_HW; | |
0e93708d | 3063 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
3064 | } |
3065 | static int chcr_authenc_setauthsize(struct crypto_aead *tfm, | |
3066 | unsigned int authsize) | |
3067 | { | |
2f47d580 | 3068 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
3069 | u32 maxauth = crypto_aead_maxauthsize(tfm); |
3070 | ||
3071 | /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not | |
3072 | * true for sha1. authsize == 12 condition should be before | |
3073 | * authsize == (maxauth >> 1) | |
3074 | */ | |
3075 | if (authsize == ICV_4) { | |
3076 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | |
3077 | aeadctx->mayverify = VERIFY_HW; | |
3078 | } else if (authsize == ICV_6) { | |
3079 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; | |
3080 | aeadctx->mayverify = VERIFY_HW; | |
3081 | } else if (authsize == ICV_10) { | |
3082 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; | |
3083 | aeadctx->mayverify = VERIFY_HW; | |
3084 | } else if (authsize == ICV_12) { | |
3085 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
3086 | aeadctx->mayverify = VERIFY_HW; | |
3087 | } else if (authsize == ICV_14) { | |
3088 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | |
3089 | aeadctx->mayverify = VERIFY_HW; | |
3090 | } else if (authsize == (maxauth >> 1)) { | |
3091 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
3092 | aeadctx->mayverify = VERIFY_HW; | |
3093 | } else if (authsize == maxauth) { | |
3094 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
3095 | aeadctx->mayverify = VERIFY_HW; | |
3096 | } else { | |
3097 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
3098 | aeadctx->mayverify = VERIFY_SW; | |
3099 | } | |
0e93708d | 3100 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
3101 | } |
3102 | ||
3103 | ||
3104 | static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) | |
3105 | { | |
2f47d580 | 3106 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
3107 | |
3108 | switch (authsize) { | |
3109 | case ICV_4: | |
3110 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | |
3111 | aeadctx->mayverify = VERIFY_HW; | |
3112 | break; | |
3113 | case ICV_8: | |
3114 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
3115 | aeadctx->mayverify = VERIFY_HW; | |
3116 | break; | |
3117 | case ICV_12: | |
3118 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
3119 | aeadctx->mayverify = VERIFY_HW; | |
3120 | break; | |
3121 | case ICV_14: | |
3122 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | |
3123 | aeadctx->mayverify = VERIFY_HW; | |
3124 | break; | |
3125 | case ICV_16: | |
3126 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
3127 | aeadctx->mayverify = VERIFY_HW; | |
3128 | break; | |
3129 | case ICV_13: | |
3130 | case ICV_15: | |
3131 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
3132 | aeadctx->mayverify = VERIFY_SW; | |
3133 | break; | |
3134 | default: | |
3135 | ||
3136 | crypto_tfm_set_flags((struct crypto_tfm *) tfm, | |
3137 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3138 | return -EINVAL; | |
3139 | } | |
0e93708d | 3140 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
3141 | } |
3142 | ||
3143 | static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, | |
3144 | unsigned int authsize) | |
3145 | { | |
2f47d580 | 3146 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
3147 | |
3148 | switch (authsize) { | |
3149 | case ICV_8: | |
3150 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
3151 | aeadctx->mayverify = VERIFY_HW; | |
3152 | break; | |
3153 | case ICV_12: | |
3154 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
3155 | aeadctx->mayverify = VERIFY_HW; | |
3156 | break; | |
3157 | case ICV_16: | |
3158 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
3159 | aeadctx->mayverify = VERIFY_HW; | |
3160 | break; | |
3161 | default: | |
3162 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, | |
3163 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3164 | return -EINVAL; | |
3165 | } | |
0e93708d | 3166 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
3167 | } |
3168 | ||
3169 | static int chcr_ccm_setauthsize(struct crypto_aead *tfm, | |
3170 | unsigned int authsize) | |
3171 | { | |
2f47d580 | 3172 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
3173 | |
3174 | switch (authsize) { | |
3175 | case ICV_4: | |
3176 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; | |
3177 | aeadctx->mayverify = VERIFY_HW; | |
3178 | break; | |
3179 | case ICV_6: | |
3180 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; | |
3181 | aeadctx->mayverify = VERIFY_HW; | |
3182 | break; | |
3183 | case ICV_8: | |
3184 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; | |
3185 | aeadctx->mayverify = VERIFY_HW; | |
3186 | break; | |
3187 | case ICV_10: | |
3188 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; | |
3189 | aeadctx->mayverify = VERIFY_HW; | |
3190 | break; | |
3191 | case ICV_12: | |
3192 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; | |
3193 | aeadctx->mayverify = VERIFY_HW; | |
3194 | break; | |
3195 | case ICV_14: | |
3196 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; | |
3197 | aeadctx->mayverify = VERIFY_HW; | |
3198 | break; | |
3199 | case ICV_16: | |
3200 | aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; | |
3201 | aeadctx->mayverify = VERIFY_HW; | |
3202 | break; | |
3203 | default: | |
3204 | crypto_tfm_set_flags((struct crypto_tfm *)tfm, | |
3205 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3206 | return -EINVAL; | |
3207 | } | |
0e93708d | 3208 | return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); |
2debd332 HJ |
3209 | } |
3210 | ||
0e93708d | 3211 | static int chcr_ccm_common_setkey(struct crypto_aead *aead, |
2debd332 HJ |
3212 | const u8 *key, |
3213 | unsigned int keylen) | |
3214 | { | |
2f47d580 | 3215 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
2debd332 HJ |
3216 | unsigned char ck_size, mk_size; |
3217 | int key_ctx_size = 0; | |
3218 | ||
125d01ca | 3219 | key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2; |
2debd332 | 3220 | if (keylen == AES_KEYSIZE_128) { |
2debd332 | 3221 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; |
125d01ca | 3222 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; |
2debd332 HJ |
3223 | } else if (keylen == AES_KEYSIZE_192) { |
3224 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
3225 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; | |
3226 | } else if (keylen == AES_KEYSIZE_256) { | |
3227 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
3228 | mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; | |
3229 | } else { | |
3230 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | |
3231 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3232 | aeadctx->enckey_len = 0; | |
3233 | return -EINVAL; | |
3234 | } | |
3235 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0, | |
3236 | key_ctx_size >> 4); | |
0e93708d HJ |
3237 | memcpy(aeadctx->key, key, keylen); |
3238 | aeadctx->enckey_len = keylen; | |
3239 | ||
2debd332 HJ |
3240 | return 0; |
3241 | } | |
3242 | ||
0e93708d HJ |
3243 | static int chcr_aead_ccm_setkey(struct crypto_aead *aead, |
3244 | const u8 *key, | |
3245 | unsigned int keylen) | |
3246 | { | |
2f47d580 | 3247 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
0e93708d HJ |
3248 | int error; |
3249 | ||
3250 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | |
3251 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & | |
3252 | CRYPTO_TFM_REQ_MASK); | |
3253 | error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
3254 | crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); | |
3255 | crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & | |
3256 | CRYPTO_TFM_RES_MASK); | |
3257 | if (error) | |
3258 | return error; | |
3259 | return chcr_ccm_common_setkey(aead, key, keylen); | |
3260 | } | |
3261 | ||
2debd332 HJ |
3262 | static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, |
3263 | unsigned int keylen) | |
3264 | { | |
2f47d580 | 3265 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
4dbeae42 | 3266 | int error; |
2debd332 HJ |
3267 | |
3268 | if (keylen < 3) { | |
3269 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | |
3270 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3271 | aeadctx->enckey_len = 0; | |
3272 | return -EINVAL; | |
3273 | } | |
4dbeae42 HJ |
3274 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
3275 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & | |
3276 | CRYPTO_TFM_REQ_MASK); | |
3277 | error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
3278 | crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); | |
3279 | crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & | |
3280 | CRYPTO_TFM_RES_MASK); | |
3281 | if (error) | |
3282 | return error; | |
2debd332 HJ |
3283 | keylen -= 3; |
3284 | memcpy(aeadctx->salt, key + keylen, 3); | |
0e93708d | 3285 | return chcr_ccm_common_setkey(aead, key, keylen); |
2debd332 HJ |
3286 | } |
3287 | ||
3288 | static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, | |
3289 | unsigned int keylen) | |
3290 | { | |
2f47d580 | 3291 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); |
2debd332 | 3292 | struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); |
8356ea51 | 3293 | struct crypto_cipher *cipher; |
2debd332 HJ |
3294 | unsigned int ck_size; |
3295 | int ret = 0, key_ctx_size = 0; | |
3296 | ||
0e93708d HJ |
3297 | aeadctx->enckey_len = 0; |
3298 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); | |
3299 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) | |
3300 | & CRYPTO_TFM_REQ_MASK); | |
3301 | ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
3302 | crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK); | |
3303 | crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) & | |
3304 | CRYPTO_TFM_RES_MASK); | |
3305 | if (ret) | |
3306 | goto out; | |
3307 | ||
7c2cf1c4 HJ |
3308 | if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && |
3309 | keylen > 3) { | |
2debd332 HJ |
3310 | keylen -= 4; /* nonce/salt is present in the last 4 bytes */ |
3311 | memcpy(aeadctx->salt, key + keylen, 4); | |
3312 | } | |
3313 | if (keylen == AES_KEYSIZE_128) { | |
3314 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
3315 | } else if (keylen == AES_KEYSIZE_192) { | |
3316 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
3317 | } else if (keylen == AES_KEYSIZE_256) { | |
3318 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
3319 | } else { | |
3320 | crypto_tfm_set_flags((struct crypto_tfm *)aead, | |
3321 | CRYPTO_TFM_RES_BAD_KEY_LEN); | |
0e93708d | 3322 | pr_err("GCM: Invalid key length %d\n", keylen); |
2debd332 HJ |
3323 | ret = -EINVAL; |
3324 | goto out; | |
3325 | } | |
3326 | ||
3327 | memcpy(aeadctx->key, key, keylen); | |
3328 | aeadctx->enckey_len = keylen; | |
125d01ca | 3329 | key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) + |
2debd332 | 3330 | AEAD_H_SIZE; |
125d01ca | 3331 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, |
2debd332 HJ |
3332 | CHCR_KEYCTX_MAC_KEY_SIZE_128, |
3333 | 0, 0, | |
3334 | key_ctx_size >> 4); | |
8356ea51 HJ |
3335 | /* Calculate the H = CIPH(K, 0 repeated 16 times). |
3336 | * It will go in key context | |
2debd332 | 3337 | */ |
8356ea51 HJ |
3338 | cipher = crypto_alloc_cipher("aes-generic", 0, 0); |
3339 | if (IS_ERR(cipher)) { | |
2debd332 HJ |
3340 | aeadctx->enckey_len = 0; |
3341 | ret = -ENOMEM; | |
3342 | goto out; | |
3343 | } | |
8356ea51 HJ |
3344 | |
3345 | ret = crypto_cipher_setkey(cipher, key, keylen); | |
2debd332 HJ |
3346 | if (ret) { |
3347 | aeadctx->enckey_len = 0; | |
3348 | goto out1; | |
3349 | } | |
3350 | memset(gctx->ghash_h, 0, AEAD_H_SIZE); | |
8356ea51 | 3351 | crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h); |
2debd332 HJ |
3352 | |
3353 | out1: | |
8356ea51 | 3354 | crypto_free_cipher(cipher); |
2debd332 HJ |
3355 | out: |
3356 | return ret; | |
3357 | } | |
3358 | ||
3359 | static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, | |
3360 | unsigned int keylen) | |
3361 | { | |
2f47d580 | 3362 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); |
2debd332 HJ |
3363 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); |
3364 | /* it contains auth and cipher key both*/ | |
3365 | struct crypto_authenc_keys keys; | |
3d64bd67 | 3366 | unsigned int bs, subtype; |
2debd332 HJ |
3367 | unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize; |
3368 | int err = 0, i, key_ctx_len = 0; | |
3369 | unsigned char ck_size = 0; | |
3370 | unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 }; | |
ec1bca94 | 3371 | struct crypto_shash *base_hash = ERR_PTR(-EINVAL); |
2debd332 HJ |
3372 | struct algo_param param; |
3373 | int align; | |
3374 | u8 *o_ptr = NULL; | |
3375 | ||
0e93708d HJ |
3376 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
3377 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) | |
3378 | & CRYPTO_TFM_REQ_MASK); | |
3379 | err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
3380 | crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); | |
3381 | crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) | |
3382 | & CRYPTO_TFM_RES_MASK); | |
3383 | if (err) | |
3384 | goto out; | |
3385 | ||
2debd332 HJ |
3386 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { |
3387 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3388 | goto out; | |
3389 | } | |
3390 | ||
3391 | if (get_alg_config(¶m, max_authsize)) { | |
3392 | pr_err("chcr : Unsupported digest size\n"); | |
3393 | goto out; | |
3394 | } | |
3d64bd67 HJ |
3395 | subtype = get_aead_subtype(authenc); |
3396 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || | |
3397 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | |
3398 | if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) | |
3399 | goto out; | |
3400 | memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen | |
3401 | - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); | |
3402 | keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; | |
3403 | } | |
2debd332 HJ |
3404 | if (keys.enckeylen == AES_KEYSIZE_128) { |
3405 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
3406 | } else if (keys.enckeylen == AES_KEYSIZE_192) { | |
3407 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
3408 | } else if (keys.enckeylen == AES_KEYSIZE_256) { | |
3409 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
3410 | } else { | |
3411 | pr_err("chcr : Unsupported cipher key\n"); | |
3412 | goto out; | |
3413 | } | |
3414 | ||
3415 | /* Copy only encryption key. We use authkey to generate h(ipad) and | |
3416 | * h(opad) so authkey is not needed again. authkeylen size have the | |
3417 | * size of the hash digest size. | |
3418 | */ | |
3419 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); | |
3420 | aeadctx->enckey_len = keys.enckeylen; | |
3d64bd67 HJ |
3421 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || |
3422 | subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { | |
2debd332 | 3423 | |
3d64bd67 HJ |
3424 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, |
3425 | aeadctx->enckey_len << 3); | |
3426 | } | |
2debd332 HJ |
3427 | base_hash = chcr_alloc_shash(max_authsize); |
3428 | if (IS_ERR(base_hash)) { | |
3429 | pr_err("chcr : Base driver cannot be loaded\n"); | |
0e93708d | 3430 | aeadctx->enckey_len = 0; |
eb526531 | 3431 | memzero_explicit(&keys, sizeof(keys)); |
0e93708d | 3432 | return -EINVAL; |
324429d7 | 3433 | } |
2debd332 HJ |
3434 | { |
3435 | SHASH_DESC_ON_STACK(shash, base_hash); | |
6faa0f57 | 3436 | |
2debd332 HJ |
3437 | shash->tfm = base_hash; |
3438 | shash->flags = crypto_shash_get_flags(base_hash); | |
3439 | bs = crypto_shash_blocksize(base_hash); | |
3440 | align = KEYCTX_ALIGN_PAD(max_authsize); | |
3441 | o_ptr = actx->h_iopad + param.result_size + align; | |
3442 | ||
3443 | if (keys.authkeylen > bs) { | |
3444 | err = crypto_shash_digest(shash, keys.authkey, | |
3445 | keys.authkeylen, | |
3446 | o_ptr); | |
3447 | if (err) { | |
3448 | pr_err("chcr : Base driver cannot be loaded\n"); | |
3449 | goto out; | |
3450 | } | |
3451 | keys.authkeylen = max_authsize; | |
3452 | } else | |
3453 | memcpy(o_ptr, keys.authkey, keys.authkeylen); | |
3454 | ||
3455 | /* Compute the ipad-digest*/ | |
3456 | memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); | |
3457 | memcpy(pad, o_ptr, keys.authkeylen); | |
3458 | for (i = 0; i < bs >> 2; i++) | |
3459 | *((unsigned int *)pad + i) ^= IPAD_DATA; | |
3460 | ||
3461 | if (chcr_compute_partial_hash(shash, pad, actx->h_iopad, | |
3462 | max_authsize)) | |
3463 | goto out; | |
3464 | /* Compute the opad-digest */ | |
3465 | memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); | |
3466 | memcpy(pad, o_ptr, keys.authkeylen); | |
3467 | for (i = 0; i < bs >> 2; i++) | |
3468 | *((unsigned int *)pad + i) ^= OPAD_DATA; | |
3469 | ||
3470 | if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize)) | |
3471 | goto out; | |
3472 | ||
3473 | /* convert the ipad and opad digest to network order */ | |
3474 | chcr_change_order(actx->h_iopad, param.result_size); | |
3475 | chcr_change_order(o_ptr, param.result_size); | |
3476 | key_ctx_len = sizeof(struct _key_ctx) + | |
125d01ca | 3477 | roundup(keys.enckeylen, 16) + |
2debd332 HJ |
3478 | (param.result_size + align) * 2; |
3479 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, | |
3480 | 0, 1, key_ctx_len >> 4); | |
3481 | actx->auth_mode = param.auth_mode; | |
3482 | chcr_free_shash(base_hash); | |
3483 | ||
eb526531 | 3484 | memzero_explicit(&keys, sizeof(keys)); |
2debd332 HJ |
3485 | return 0; |
3486 | } | |
3487 | out: | |
3488 | aeadctx->enckey_len = 0; | |
eb526531 | 3489 | memzero_explicit(&keys, sizeof(keys)); |
ec1bca94 | 3490 | if (!IS_ERR(base_hash)) |
2debd332 HJ |
3491 | chcr_free_shash(base_hash); |
3492 | return -EINVAL; | |
324429d7 HS |
3493 | } |
3494 | ||
2debd332 HJ |
3495 | static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, |
3496 | const u8 *key, unsigned int keylen) | |
3497 | { | |
2f47d580 | 3498 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); |
2debd332 HJ |
3499 | struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); |
3500 | struct crypto_authenc_keys keys; | |
0e93708d | 3501 | int err; |
2debd332 | 3502 | /* it contains auth and cipher key both*/ |
3d64bd67 | 3503 | unsigned int subtype; |
2debd332 HJ |
3504 | int key_ctx_len = 0; |
3505 | unsigned char ck_size = 0; | |
3506 | ||
0e93708d HJ |
3507 | crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); |
3508 | crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) | |
3509 | & CRYPTO_TFM_REQ_MASK); | |
3510 | err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); | |
3511 | crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK); | |
3512 | crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher) | |
3513 | & CRYPTO_TFM_RES_MASK); | |
3514 | if (err) | |
3515 | goto out; | |
3516 | ||
2debd332 HJ |
3517 | if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) { |
3518 | crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
3519 | goto out; | |
3520 | } | |
3d64bd67 HJ |
3521 | subtype = get_aead_subtype(authenc); |
3522 | if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || | |
3523 | subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { | |
3524 | if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) | |
3525 | goto out; | |
3526 | memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen | |
3527 | - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); | |
3528 | keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; | |
3529 | } | |
2debd332 HJ |
3530 | if (keys.enckeylen == AES_KEYSIZE_128) { |
3531 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; | |
3532 | } else if (keys.enckeylen == AES_KEYSIZE_192) { | |
3533 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; | |
3534 | } else if (keys.enckeylen == AES_KEYSIZE_256) { | |
3535 | ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; | |
3536 | } else { | |
3d64bd67 | 3537 | pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen); |
2debd332 HJ |
3538 | goto out; |
3539 | } | |
3540 | memcpy(aeadctx->key, keys.enckey, keys.enckeylen); | |
3541 | aeadctx->enckey_len = keys.enckeylen; | |
3d64bd67 HJ |
3542 | if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || |
3543 | subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { | |
3544 | get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, | |
3545 | aeadctx->enckey_len << 3); | |
3546 | } | |
125d01ca | 3547 | key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16); |
2debd332 HJ |
3548 | |
3549 | aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, | |
3550 | 0, key_ctx_len >> 4); | |
3551 | actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP; | |
eb526531 | 3552 | memzero_explicit(&keys, sizeof(keys)); |
2debd332 HJ |
3553 | return 0; |
3554 | out: | |
3555 | aeadctx->enckey_len = 0; | |
eb526531 | 3556 | memzero_explicit(&keys, sizeof(keys)); |
2debd332 HJ |
3557 | return -EINVAL; |
3558 | } | |
6dad4e8a AG |
3559 | |
3560 | static int chcr_aead_op(struct aead_request *req, | |
3561 | unsigned short op_type, | |
3562 | int size, | |
3563 | create_wr_t create_wr_fn) | |
3564 | { | |
3565 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
3566 | struct uld_ctx *u_ctx; | |
3567 | struct sk_buff *skb; | |
6faa0f57 | 3568 | int isfull = 0; |
6dad4e8a AG |
3569 | |
3570 | if (!a_ctx(tfm)->dev) { | |
3571 | pr_err("chcr : %s : No crypto device.\n", __func__); | |
3572 | return -ENXIO; | |
3573 | } | |
3574 | u_ctx = ULD_CTX(a_ctx(tfm)); | |
3575 | if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], | |
3576 | a_ctx(tfm)->tx_qidx)) { | |
6faa0f57 | 3577 | isfull = 1; |
6dad4e8a | 3578 | if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) |
6faa0f57 | 3579 | return -ENOSPC; |
6dad4e8a AG |
3580 | } |
3581 | ||
3582 | /* Form a WR from req */ | |
3583 | skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size, | |
3584 | op_type); | |
3585 | ||
3586 | if (IS_ERR(skb) || !skb) | |
3587 | return PTR_ERR(skb); | |
3588 | ||
3589 | skb->dev = u_ctx->lldi.ports[0]; | |
3590 | set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx); | |
3591 | chcr_send_wr(skb); | |
6faa0f57 | 3592 | return isfull ? -EBUSY : -EINPROGRESS; |
6dad4e8a AG |
3593 | } |
3594 | ||
2debd332 HJ |
3595 | static int chcr_aead_encrypt(struct aead_request *req) |
3596 | { | |
3597 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
3598 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); | |
3599 | ||
3600 | reqctx->verify = VERIFY_HW; | |
3601 | ||
3602 | switch (get_aead_subtype(tfm)) { | |
3d64bd67 HJ |
3603 | case CRYPTO_ALG_SUB_TYPE_CTR_SHA: |
3604 | case CRYPTO_ALG_SUB_TYPE_CBC_SHA: | |
3605 | case CRYPTO_ALG_SUB_TYPE_CBC_NULL: | |
3606 | case CRYPTO_ALG_SUB_TYPE_CTR_NULL: | |
2debd332 HJ |
3607 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, |
3608 | create_authenc_wr); | |
3609 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: | |
3610 | case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: | |
3611 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, | |
3612 | create_aead_ccm_wr); | |
3613 | default: | |
3614 | return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0, | |
3615 | create_gcm_wr); | |
3616 | } | |
3617 | } | |
3618 | ||
3619 | static int chcr_aead_decrypt(struct aead_request *req) | |
3620 | { | |
3621 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
2f47d580 | 3622 | struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); |
2debd332 HJ |
3623 | struct chcr_aead_reqctx *reqctx = aead_request_ctx(req); |
3624 | int size; | |
3625 | ||
3626 | if (aeadctx->mayverify == VERIFY_SW) { | |
3627 | size = crypto_aead_maxauthsize(tfm); | |
3628 | reqctx->verify = VERIFY_SW; | |
3629 | } else { | |
3630 | size = 0; | |
3631 | reqctx->verify = VERIFY_HW; | |
3632 | } | |
3633 | ||
3634 | switch (get_aead_subtype(tfm)) { | |
3d64bd67 HJ |
3635 | case CRYPTO_ALG_SUB_TYPE_CBC_SHA: |
3636 | case CRYPTO_ALG_SUB_TYPE_CTR_SHA: | |
3637 | case CRYPTO_ALG_SUB_TYPE_CBC_NULL: | |
3638 | case CRYPTO_ALG_SUB_TYPE_CTR_NULL: | |
2debd332 HJ |
3639 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, |
3640 | create_authenc_wr); | |
3641 | case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: | |
3642 | case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: | |
3643 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, | |
3644 | create_aead_ccm_wr); | |
3645 | default: | |
3646 | return chcr_aead_op(req, CHCR_DECRYPT_OP, size, | |
3647 | create_gcm_wr); | |
3648 | } | |
3649 | } | |
3650 | ||
324429d7 HS |
3651 | static struct chcr_alg_template driver_algs[] = { |
3652 | /* AES-CBC */ | |
3653 | { | |
b8fd1f41 | 3654 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC, |
324429d7 HS |
3655 | .is_registered = 0, |
3656 | .alg.crypto = { | |
3657 | .cra_name = "cbc(aes)", | |
2debd332 | 3658 | .cra_driver_name = "cbc-aes-chcr", |
324429d7 | 3659 | .cra_blocksize = AES_BLOCK_SIZE, |
324429d7 | 3660 | .cra_init = chcr_cra_init, |
b8fd1f41 | 3661 | .cra_exit = chcr_cra_exit, |
324429d7 HS |
3662 | .cra_u.ablkcipher = { |
3663 | .min_keysize = AES_MIN_KEY_SIZE, | |
3664 | .max_keysize = AES_MAX_KEY_SIZE, | |
3665 | .ivsize = AES_BLOCK_SIZE, | |
3666 | .setkey = chcr_aes_cbc_setkey, | |
3667 | .encrypt = chcr_aes_encrypt, | |
3668 | .decrypt = chcr_aes_decrypt, | |
3669 | } | |
3670 | } | |
3671 | }, | |
3672 | { | |
b8fd1f41 | 3673 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS, |
324429d7 HS |
3674 | .is_registered = 0, |
3675 | .alg.crypto = { | |
3676 | .cra_name = "xts(aes)", | |
2debd332 | 3677 | .cra_driver_name = "xts-aes-chcr", |
324429d7 | 3678 | .cra_blocksize = AES_BLOCK_SIZE, |
324429d7 HS |
3679 | .cra_init = chcr_cra_init, |
3680 | .cra_exit = NULL, | |
b8fd1f41 | 3681 | .cra_u .ablkcipher = { |
324429d7 HS |
3682 | .min_keysize = 2 * AES_MIN_KEY_SIZE, |
3683 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | |
3684 | .ivsize = AES_BLOCK_SIZE, | |
3685 | .setkey = chcr_aes_xts_setkey, | |
3686 | .encrypt = chcr_aes_encrypt, | |
3687 | .decrypt = chcr_aes_decrypt, | |
3688 | } | |
3689 | } | |
b8fd1f41 HJ |
3690 | }, |
3691 | { | |
3692 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR, | |
3693 | .is_registered = 0, | |
3694 | .alg.crypto = { | |
3695 | .cra_name = "ctr(aes)", | |
3696 | .cra_driver_name = "ctr-aes-chcr", | |
3697 | .cra_blocksize = 1, | |
3698 | .cra_init = chcr_cra_init, | |
3699 | .cra_exit = chcr_cra_exit, | |
3700 | .cra_u.ablkcipher = { | |
3701 | .min_keysize = AES_MIN_KEY_SIZE, | |
3702 | .max_keysize = AES_MAX_KEY_SIZE, | |
3703 | .ivsize = AES_BLOCK_SIZE, | |
3704 | .setkey = chcr_aes_ctr_setkey, | |
3705 | .encrypt = chcr_aes_encrypt, | |
3706 | .decrypt = chcr_aes_decrypt, | |
3707 | } | |
3708 | } | |
3709 | }, | |
3710 | { | |
3711 | .type = CRYPTO_ALG_TYPE_ABLKCIPHER | | |
3712 | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686, | |
3713 | .is_registered = 0, | |
3714 | .alg.crypto = { | |
3715 | .cra_name = "rfc3686(ctr(aes))", | |
3716 | .cra_driver_name = "rfc3686-ctr-aes-chcr", | |
3717 | .cra_blocksize = 1, | |
3718 | .cra_init = chcr_rfc3686_init, | |
3719 | .cra_exit = chcr_cra_exit, | |
3720 | .cra_u.ablkcipher = { | |
3721 | .min_keysize = AES_MIN_KEY_SIZE + | |
3722 | CTR_RFC3686_NONCE_SIZE, | |
3723 | .max_keysize = AES_MAX_KEY_SIZE + | |
3724 | CTR_RFC3686_NONCE_SIZE, | |
3725 | .ivsize = CTR_RFC3686_IV_SIZE, | |
3726 | .setkey = chcr_aes_rfc3686_setkey, | |
3727 | .encrypt = chcr_aes_encrypt, | |
3728 | .decrypt = chcr_aes_decrypt, | |
3729 | .geniv = "seqiv", | |
3730 | } | |
324429d7 HS |
3731 | } |
3732 | }, | |
3733 | /* SHA */ | |
3734 | { | |
3735 | .type = CRYPTO_ALG_TYPE_AHASH, | |
3736 | .is_registered = 0, | |
3737 | .alg.hash = { | |
3738 | .halg.digestsize = SHA1_DIGEST_SIZE, | |
3739 | .halg.base = { | |
3740 | .cra_name = "sha1", | |
3741 | .cra_driver_name = "sha1-chcr", | |
3742 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
3743 | } | |
3744 | } | |
3745 | }, | |
3746 | { | |
3747 | .type = CRYPTO_ALG_TYPE_AHASH, | |
3748 | .is_registered = 0, | |
3749 | .alg.hash = { | |
3750 | .halg.digestsize = SHA256_DIGEST_SIZE, | |
3751 | .halg.base = { | |
3752 | .cra_name = "sha256", | |
3753 | .cra_driver_name = "sha256-chcr", | |
3754 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
3755 | } | |
3756 | } | |
3757 | }, | |
3758 | { | |
3759 | .type = CRYPTO_ALG_TYPE_AHASH, | |
3760 | .is_registered = 0, | |
3761 | .alg.hash = { | |
3762 | .halg.digestsize = SHA224_DIGEST_SIZE, | |
3763 | .halg.base = { | |
3764 | .cra_name = "sha224", | |
3765 | .cra_driver_name = "sha224-chcr", | |
3766 | .cra_blocksize = SHA224_BLOCK_SIZE, | |
3767 | } | |
3768 | } | |
3769 | }, | |
3770 | { | |
3771 | .type = CRYPTO_ALG_TYPE_AHASH, | |
3772 | .is_registered = 0, | |
3773 | .alg.hash = { | |
3774 | .halg.digestsize = SHA384_DIGEST_SIZE, | |
3775 | .halg.base = { | |
3776 | .cra_name = "sha384", | |
3777 | .cra_driver_name = "sha384-chcr", | |
3778 | .cra_blocksize = SHA384_BLOCK_SIZE, | |
3779 | } | |
3780 | } | |
3781 | }, | |
3782 | { | |
3783 | .type = CRYPTO_ALG_TYPE_AHASH, | |
3784 | .is_registered = 0, | |
3785 | .alg.hash = { | |
3786 | .halg.digestsize = SHA512_DIGEST_SIZE, | |
3787 | .halg.base = { | |
3788 | .cra_name = "sha512", | |
3789 | .cra_driver_name = "sha512-chcr", | |
3790 | .cra_blocksize = SHA512_BLOCK_SIZE, | |
3791 | } | |
3792 | } | |
3793 | }, | |
3794 | /* HMAC */ | |
3795 | { | |
3796 | .type = CRYPTO_ALG_TYPE_HMAC, | |
3797 | .is_registered = 0, | |
3798 | .alg.hash = { | |
3799 | .halg.digestsize = SHA1_DIGEST_SIZE, | |
3800 | .halg.base = { | |
3801 | .cra_name = "hmac(sha1)", | |
2debd332 | 3802 | .cra_driver_name = "hmac-sha1-chcr", |
324429d7 HS |
3803 | .cra_blocksize = SHA1_BLOCK_SIZE, |
3804 | } | |
3805 | } | |
3806 | }, | |
3807 | { | |
3808 | .type = CRYPTO_ALG_TYPE_HMAC, | |
3809 | .is_registered = 0, | |
3810 | .alg.hash = { | |
3811 | .halg.digestsize = SHA224_DIGEST_SIZE, | |
3812 | .halg.base = { | |
3813 | .cra_name = "hmac(sha224)", | |
2debd332 | 3814 | .cra_driver_name = "hmac-sha224-chcr", |
324429d7 HS |
3815 | .cra_blocksize = SHA224_BLOCK_SIZE, |
3816 | } | |
3817 | } | |
3818 | }, | |
3819 | { | |
3820 | .type = CRYPTO_ALG_TYPE_HMAC, | |
3821 | .is_registered = 0, | |
3822 | .alg.hash = { | |
3823 | .halg.digestsize = SHA256_DIGEST_SIZE, | |
3824 | .halg.base = { | |
3825 | .cra_name = "hmac(sha256)", | |
2debd332 | 3826 | .cra_driver_name = "hmac-sha256-chcr", |
324429d7 HS |
3827 | .cra_blocksize = SHA256_BLOCK_SIZE, |
3828 | } | |
3829 | } | |
3830 | }, | |
3831 | { | |
3832 | .type = CRYPTO_ALG_TYPE_HMAC, | |
3833 | .is_registered = 0, | |
3834 | .alg.hash = { | |
3835 | .halg.digestsize = SHA384_DIGEST_SIZE, | |
3836 | .halg.base = { | |
3837 | .cra_name = "hmac(sha384)", | |
2debd332 | 3838 | .cra_driver_name = "hmac-sha384-chcr", |
324429d7 HS |
3839 | .cra_blocksize = SHA384_BLOCK_SIZE, |
3840 | } | |
3841 | } | |
3842 | }, | |
3843 | { | |
3844 | .type = CRYPTO_ALG_TYPE_HMAC, | |
3845 | .is_registered = 0, | |
3846 | .alg.hash = { | |
3847 | .halg.digestsize = SHA512_DIGEST_SIZE, | |
3848 | .halg.base = { | |
3849 | .cra_name = "hmac(sha512)", | |
2debd332 | 3850 | .cra_driver_name = "hmac-sha512-chcr", |
324429d7 HS |
3851 | .cra_blocksize = SHA512_BLOCK_SIZE, |
3852 | } | |
3853 | } | |
3854 | }, | |
2debd332 HJ |
3855 | /* Add AEAD Algorithms */ |
3856 | { | |
3857 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM, | |
3858 | .is_registered = 0, | |
3859 | .alg.aead = { | |
3860 | .base = { | |
3861 | .cra_name = "gcm(aes)", | |
3862 | .cra_driver_name = "gcm-aes-chcr", | |
3863 | .cra_blocksize = 1, | |
e29abda5 | 3864 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3865 | .cra_ctxsize = sizeof(struct chcr_context) + |
3866 | sizeof(struct chcr_aead_ctx) + | |
3867 | sizeof(struct chcr_gcm_ctx), | |
3868 | }, | |
8f6acb7f | 3869 | .ivsize = GCM_AES_IV_SIZE, |
2debd332 HJ |
3870 | .maxauthsize = GHASH_DIGEST_SIZE, |
3871 | .setkey = chcr_gcm_setkey, | |
3872 | .setauthsize = chcr_gcm_setauthsize, | |
3873 | } | |
3874 | }, | |
3875 | { | |
3876 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106, | |
3877 | .is_registered = 0, | |
3878 | .alg.aead = { | |
3879 | .base = { | |
3880 | .cra_name = "rfc4106(gcm(aes))", | |
3881 | .cra_driver_name = "rfc4106-gcm-aes-chcr", | |
3882 | .cra_blocksize = 1, | |
e29abda5 | 3883 | .cra_priority = CHCR_AEAD_PRIORITY + 1, |
2debd332 HJ |
3884 | .cra_ctxsize = sizeof(struct chcr_context) + |
3885 | sizeof(struct chcr_aead_ctx) + | |
3886 | sizeof(struct chcr_gcm_ctx), | |
3887 | ||
3888 | }, | |
8f6acb7f | 3889 | .ivsize = GCM_RFC4106_IV_SIZE, |
2debd332 HJ |
3890 | .maxauthsize = GHASH_DIGEST_SIZE, |
3891 | .setkey = chcr_gcm_setkey, | |
3892 | .setauthsize = chcr_4106_4309_setauthsize, | |
3893 | } | |
3894 | }, | |
3895 | { | |
3896 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM, | |
3897 | .is_registered = 0, | |
3898 | .alg.aead = { | |
3899 | .base = { | |
3900 | .cra_name = "ccm(aes)", | |
3901 | .cra_driver_name = "ccm-aes-chcr", | |
3902 | .cra_blocksize = 1, | |
e29abda5 | 3903 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3904 | .cra_ctxsize = sizeof(struct chcr_context) + |
3905 | sizeof(struct chcr_aead_ctx), | |
3906 | ||
3907 | }, | |
3908 | .ivsize = AES_BLOCK_SIZE, | |
3909 | .maxauthsize = GHASH_DIGEST_SIZE, | |
3910 | .setkey = chcr_aead_ccm_setkey, | |
3911 | .setauthsize = chcr_ccm_setauthsize, | |
3912 | } | |
3913 | }, | |
3914 | { | |
3915 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309, | |
3916 | .is_registered = 0, | |
3917 | .alg.aead = { | |
3918 | .base = { | |
3919 | .cra_name = "rfc4309(ccm(aes))", | |
3920 | .cra_driver_name = "rfc4309-ccm-aes-chcr", | |
3921 | .cra_blocksize = 1, | |
e29abda5 | 3922 | .cra_priority = CHCR_AEAD_PRIORITY + 1, |
2debd332 HJ |
3923 | .cra_ctxsize = sizeof(struct chcr_context) + |
3924 | sizeof(struct chcr_aead_ctx), | |
3925 | ||
3926 | }, | |
3927 | .ivsize = 8, | |
3928 | .maxauthsize = GHASH_DIGEST_SIZE, | |
3929 | .setkey = chcr_aead_rfc4309_setkey, | |
3930 | .setauthsize = chcr_4106_4309_setauthsize, | |
3931 | } | |
3932 | }, | |
3933 | { | |
3d64bd67 | 3934 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
2debd332 HJ |
3935 | .is_registered = 0, |
3936 | .alg.aead = { | |
3937 | .base = { | |
3938 | .cra_name = "authenc(hmac(sha1),cbc(aes))", | |
3939 | .cra_driver_name = | |
3940 | "authenc-hmac-sha1-cbc-aes-chcr", | |
3941 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 3942 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3943 | .cra_ctxsize = sizeof(struct chcr_context) + |
3944 | sizeof(struct chcr_aead_ctx) + | |
3945 | sizeof(struct chcr_authenc_ctx), | |
3946 | ||
3947 | }, | |
3948 | .ivsize = AES_BLOCK_SIZE, | |
3949 | .maxauthsize = SHA1_DIGEST_SIZE, | |
3950 | .setkey = chcr_authenc_setkey, | |
3951 | .setauthsize = chcr_authenc_setauthsize, | |
3952 | } | |
3953 | }, | |
3954 | { | |
3d64bd67 | 3955 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
2debd332 HJ |
3956 | .is_registered = 0, |
3957 | .alg.aead = { | |
3958 | .base = { | |
3959 | ||
3960 | .cra_name = "authenc(hmac(sha256),cbc(aes))", | |
3961 | .cra_driver_name = | |
3962 | "authenc-hmac-sha256-cbc-aes-chcr", | |
3963 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 3964 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3965 | .cra_ctxsize = sizeof(struct chcr_context) + |
3966 | sizeof(struct chcr_aead_ctx) + | |
3967 | sizeof(struct chcr_authenc_ctx), | |
3968 | ||
3969 | }, | |
3970 | .ivsize = AES_BLOCK_SIZE, | |
3971 | .maxauthsize = SHA256_DIGEST_SIZE, | |
3972 | .setkey = chcr_authenc_setkey, | |
3973 | .setauthsize = chcr_authenc_setauthsize, | |
3974 | } | |
3975 | }, | |
3976 | { | |
3d64bd67 | 3977 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
2debd332 HJ |
3978 | .is_registered = 0, |
3979 | .alg.aead = { | |
3980 | .base = { | |
3981 | .cra_name = "authenc(hmac(sha224),cbc(aes))", | |
3982 | .cra_driver_name = | |
3983 | "authenc-hmac-sha224-cbc-aes-chcr", | |
3984 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 3985 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
3986 | .cra_ctxsize = sizeof(struct chcr_context) + |
3987 | sizeof(struct chcr_aead_ctx) + | |
3988 | sizeof(struct chcr_authenc_ctx), | |
3989 | }, | |
3990 | .ivsize = AES_BLOCK_SIZE, | |
3991 | .maxauthsize = SHA224_DIGEST_SIZE, | |
3992 | .setkey = chcr_authenc_setkey, | |
3993 | .setauthsize = chcr_authenc_setauthsize, | |
3994 | } | |
3995 | }, | |
3996 | { | |
3d64bd67 | 3997 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
2debd332 HJ |
3998 | .is_registered = 0, |
3999 | .alg.aead = { | |
4000 | .base = { | |
4001 | .cra_name = "authenc(hmac(sha384),cbc(aes))", | |
4002 | .cra_driver_name = | |
4003 | "authenc-hmac-sha384-cbc-aes-chcr", | |
4004 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 4005 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
4006 | .cra_ctxsize = sizeof(struct chcr_context) + |
4007 | sizeof(struct chcr_aead_ctx) + | |
4008 | sizeof(struct chcr_authenc_ctx), | |
4009 | ||
4010 | }, | |
4011 | .ivsize = AES_BLOCK_SIZE, | |
4012 | .maxauthsize = SHA384_DIGEST_SIZE, | |
4013 | .setkey = chcr_authenc_setkey, | |
4014 | .setauthsize = chcr_authenc_setauthsize, | |
4015 | } | |
4016 | }, | |
4017 | { | |
3d64bd67 | 4018 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, |
2debd332 HJ |
4019 | .is_registered = 0, |
4020 | .alg.aead = { | |
4021 | .base = { | |
4022 | .cra_name = "authenc(hmac(sha512),cbc(aes))", | |
4023 | .cra_driver_name = | |
4024 | "authenc-hmac-sha512-cbc-aes-chcr", | |
4025 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 4026 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
4027 | .cra_ctxsize = sizeof(struct chcr_context) + |
4028 | sizeof(struct chcr_aead_ctx) + | |
4029 | sizeof(struct chcr_authenc_ctx), | |
4030 | ||
4031 | }, | |
4032 | .ivsize = AES_BLOCK_SIZE, | |
4033 | .maxauthsize = SHA512_DIGEST_SIZE, | |
4034 | .setkey = chcr_authenc_setkey, | |
4035 | .setauthsize = chcr_authenc_setauthsize, | |
4036 | } | |
4037 | }, | |
4038 | { | |
3d64bd67 | 4039 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL, |
2debd332 HJ |
4040 | .is_registered = 0, |
4041 | .alg.aead = { | |
4042 | .base = { | |
4043 | .cra_name = "authenc(digest_null,cbc(aes))", | |
4044 | .cra_driver_name = | |
4045 | "authenc-digest_null-cbc-aes-chcr", | |
4046 | .cra_blocksize = AES_BLOCK_SIZE, | |
e29abda5 | 4047 | .cra_priority = CHCR_AEAD_PRIORITY, |
2debd332 HJ |
4048 | .cra_ctxsize = sizeof(struct chcr_context) + |
4049 | sizeof(struct chcr_aead_ctx) + | |
4050 | sizeof(struct chcr_authenc_ctx), | |
4051 | ||
4052 | }, | |
4053 | .ivsize = AES_BLOCK_SIZE, | |
4054 | .maxauthsize = 0, | |
4055 | .setkey = chcr_aead_digest_null_setkey, | |
4056 | .setauthsize = chcr_authenc_null_setauthsize, | |
4057 | } | |
4058 | }, | |
3d64bd67 HJ |
4059 | { |
4060 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | |
4061 | .is_registered = 0, | |
4062 | .alg.aead = { | |
4063 | .base = { | |
4064 | .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", | |
4065 | .cra_driver_name = | |
4066 | "authenc-hmac-sha1-rfc3686-ctr-aes-chcr", | |
4067 | .cra_blocksize = 1, | |
4068 | .cra_priority = CHCR_AEAD_PRIORITY, | |
4069 | .cra_ctxsize = sizeof(struct chcr_context) + | |
4070 | sizeof(struct chcr_aead_ctx) + | |
4071 | sizeof(struct chcr_authenc_ctx), | |
4072 | ||
4073 | }, | |
4074 | .ivsize = CTR_RFC3686_IV_SIZE, | |
4075 | .maxauthsize = SHA1_DIGEST_SIZE, | |
4076 | .setkey = chcr_authenc_setkey, | |
4077 | .setauthsize = chcr_authenc_setauthsize, | |
4078 | } | |
4079 | }, | |
4080 | { | |
4081 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | |
4082 | .is_registered = 0, | |
4083 | .alg.aead = { | |
4084 | .base = { | |
4085 | ||
4086 | .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", | |
4087 | .cra_driver_name = | |
4088 | "authenc-hmac-sha256-rfc3686-ctr-aes-chcr", | |
4089 | .cra_blocksize = 1, | |
4090 | .cra_priority = CHCR_AEAD_PRIORITY, | |
4091 | .cra_ctxsize = sizeof(struct chcr_context) + | |
4092 | sizeof(struct chcr_aead_ctx) + | |
4093 | sizeof(struct chcr_authenc_ctx), | |
4094 | ||
4095 | }, | |
4096 | .ivsize = CTR_RFC3686_IV_SIZE, | |
4097 | .maxauthsize = SHA256_DIGEST_SIZE, | |
4098 | .setkey = chcr_authenc_setkey, | |
4099 | .setauthsize = chcr_authenc_setauthsize, | |
4100 | } | |
4101 | }, | |
4102 | { | |
4103 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | |
4104 | .is_registered = 0, | |
4105 | .alg.aead = { | |
4106 | .base = { | |
4107 | .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", | |
4108 | .cra_driver_name = | |
4109 | "authenc-hmac-sha224-rfc3686-ctr-aes-chcr", | |
4110 | .cra_blocksize = 1, | |
4111 | .cra_priority = CHCR_AEAD_PRIORITY, | |
4112 | .cra_ctxsize = sizeof(struct chcr_context) + | |
4113 | sizeof(struct chcr_aead_ctx) + | |
4114 | sizeof(struct chcr_authenc_ctx), | |
4115 | }, | |
4116 | .ivsize = CTR_RFC3686_IV_SIZE, | |
4117 | .maxauthsize = SHA224_DIGEST_SIZE, | |
4118 | .setkey = chcr_authenc_setkey, | |
4119 | .setauthsize = chcr_authenc_setauthsize, | |
4120 | } | |
4121 | }, | |
4122 | { | |
4123 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | |
4124 | .is_registered = 0, | |
4125 | .alg.aead = { | |
4126 | .base = { | |
4127 | .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", | |
4128 | .cra_driver_name = | |
4129 | "authenc-hmac-sha384-rfc3686-ctr-aes-chcr", | |
4130 | .cra_blocksize = 1, | |
4131 | .cra_priority = CHCR_AEAD_PRIORITY, | |
4132 | .cra_ctxsize = sizeof(struct chcr_context) + | |
4133 | sizeof(struct chcr_aead_ctx) + | |
4134 | sizeof(struct chcr_authenc_ctx), | |
4135 | ||
4136 | }, | |
4137 | .ivsize = CTR_RFC3686_IV_SIZE, | |
4138 | .maxauthsize = SHA384_DIGEST_SIZE, | |
4139 | .setkey = chcr_authenc_setkey, | |
4140 | .setauthsize = chcr_authenc_setauthsize, | |
4141 | } | |
4142 | }, | |
4143 | { | |
4144 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, | |
4145 | .is_registered = 0, | |
4146 | .alg.aead = { | |
4147 | .base = { | |
4148 | .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", | |
4149 | .cra_driver_name = | |
4150 | "authenc-hmac-sha512-rfc3686-ctr-aes-chcr", | |
4151 | .cra_blocksize = 1, | |
4152 | .cra_priority = CHCR_AEAD_PRIORITY, | |
4153 | .cra_ctxsize = sizeof(struct chcr_context) + | |
4154 | sizeof(struct chcr_aead_ctx) + | |
4155 | sizeof(struct chcr_authenc_ctx), | |
4156 | ||
4157 | }, | |
4158 | .ivsize = CTR_RFC3686_IV_SIZE, | |
4159 | .maxauthsize = SHA512_DIGEST_SIZE, | |
4160 | .setkey = chcr_authenc_setkey, | |
4161 | .setauthsize = chcr_authenc_setauthsize, | |
4162 | } | |
4163 | }, | |
4164 | { | |
4165 | .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL, | |
4166 | .is_registered = 0, | |
4167 | .alg.aead = { | |
4168 | .base = { | |
4169 | .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))", | |
4170 | .cra_driver_name = | |
4171 | "authenc-digest_null-rfc3686-ctr-aes-chcr", | |
4172 | .cra_blocksize = 1, | |
4173 | .cra_priority = CHCR_AEAD_PRIORITY, | |
4174 | .cra_ctxsize = sizeof(struct chcr_context) + | |
4175 | sizeof(struct chcr_aead_ctx) + | |
4176 | sizeof(struct chcr_authenc_ctx), | |
4177 | ||
4178 | }, | |
4179 | .ivsize = CTR_RFC3686_IV_SIZE, | |
4180 | .maxauthsize = 0, | |
4181 | .setkey = chcr_aead_digest_null_setkey, | |
4182 | .setauthsize = chcr_authenc_null_setauthsize, | |
4183 | } | |
4184 | }, | |
4185 | ||
324429d7 HS |
4186 | }; |
4187 | ||
4188 | /* | |
4189 | * chcr_unregister_alg - Deregister crypto algorithms with | |
4190 | * kernel framework. | |
4191 | */ | |
4192 | static int chcr_unregister_alg(void) | |
4193 | { | |
4194 | int i; | |
4195 | ||
4196 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | |
4197 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { | |
4198 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | |
4199 | if (driver_algs[i].is_registered) | |
4200 | crypto_unregister_alg( | |
4201 | &driver_algs[i].alg.crypto); | |
4202 | break; | |
2debd332 HJ |
4203 | case CRYPTO_ALG_TYPE_AEAD: |
4204 | if (driver_algs[i].is_registered) | |
4205 | crypto_unregister_aead( | |
4206 | &driver_algs[i].alg.aead); | |
4207 | break; | |
324429d7 HS |
4208 | case CRYPTO_ALG_TYPE_AHASH: |
4209 | if (driver_algs[i].is_registered) | |
4210 | crypto_unregister_ahash( | |
4211 | &driver_algs[i].alg.hash); | |
4212 | break; | |
4213 | } | |
4214 | driver_algs[i].is_registered = 0; | |
4215 | } | |
4216 | return 0; | |
4217 | } | |
4218 | ||
4219 | #define SZ_AHASH_CTX sizeof(struct chcr_context) | |
4220 | #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx)) | |
4221 | #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx) | |
4222 | #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC) | |
4223 | ||
4224 | /* | |
4225 | * chcr_register_alg - Register crypto algorithms with kernel framework. | |
4226 | */ | |
4227 | static int chcr_register_alg(void) | |
4228 | { | |
4229 | struct crypto_alg ai; | |
4230 | struct ahash_alg *a_hash; | |
4231 | int err = 0, i; | |
4232 | char *name = NULL; | |
4233 | ||
4234 | for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { | |
4235 | if (driver_algs[i].is_registered) | |
4236 | continue; | |
4237 | switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { | |
4238 | case CRYPTO_ALG_TYPE_ABLKCIPHER: | |
b8fd1f41 HJ |
4239 | driver_algs[i].alg.crypto.cra_priority = |
4240 | CHCR_CRA_PRIORITY; | |
4241 | driver_algs[i].alg.crypto.cra_module = THIS_MODULE; | |
4242 | driver_algs[i].alg.crypto.cra_flags = | |
4243 | CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | | |
4244 | CRYPTO_ALG_NEED_FALLBACK; | |
4245 | driver_algs[i].alg.crypto.cra_ctxsize = | |
4246 | sizeof(struct chcr_context) + | |
4247 | sizeof(struct ablk_ctx); | |
4248 | driver_algs[i].alg.crypto.cra_alignmask = 0; | |
4249 | driver_algs[i].alg.crypto.cra_type = | |
4250 | &crypto_ablkcipher_type; | |
324429d7 HS |
4251 | err = crypto_register_alg(&driver_algs[i].alg.crypto); |
4252 | name = driver_algs[i].alg.crypto.cra_driver_name; | |
4253 | break; | |
2debd332 | 4254 | case CRYPTO_ALG_TYPE_AEAD: |
2debd332 | 4255 | driver_algs[i].alg.aead.base.cra_flags = |
0e93708d HJ |
4256 | CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC | |
4257 | CRYPTO_ALG_NEED_FALLBACK; | |
2debd332 HJ |
4258 | driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; |
4259 | driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; | |
4260 | driver_algs[i].alg.aead.init = chcr_aead_cra_init; | |
4261 | driver_algs[i].alg.aead.exit = chcr_aead_cra_exit; | |
4262 | driver_algs[i].alg.aead.base.cra_module = THIS_MODULE; | |
4263 | err = crypto_register_aead(&driver_algs[i].alg.aead); | |
4264 | name = driver_algs[i].alg.aead.base.cra_driver_name; | |
4265 | break; | |
324429d7 HS |
4266 | case CRYPTO_ALG_TYPE_AHASH: |
4267 | a_hash = &driver_algs[i].alg.hash; | |
4268 | a_hash->update = chcr_ahash_update; | |
4269 | a_hash->final = chcr_ahash_final; | |
4270 | a_hash->finup = chcr_ahash_finup; | |
4271 | a_hash->digest = chcr_ahash_digest; | |
4272 | a_hash->export = chcr_ahash_export; | |
4273 | a_hash->import = chcr_ahash_import; | |
4274 | a_hash->halg.statesize = SZ_AHASH_REQ_CTX; | |
4275 | a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; | |
4276 | a_hash->halg.base.cra_module = THIS_MODULE; | |
4277 | a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS; | |
4278 | a_hash->halg.base.cra_alignmask = 0; | |
4279 | a_hash->halg.base.cra_exit = NULL; | |
4280 | a_hash->halg.base.cra_type = &crypto_ahash_type; | |
4281 | ||
4282 | if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) { | |
4283 | a_hash->halg.base.cra_init = chcr_hmac_cra_init; | |
4284 | a_hash->halg.base.cra_exit = chcr_hmac_cra_exit; | |
4285 | a_hash->init = chcr_hmac_init; | |
4286 | a_hash->setkey = chcr_ahash_setkey; | |
4287 | a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX; | |
4288 | } else { | |
4289 | a_hash->init = chcr_sha_init; | |
4290 | a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX; | |
4291 | a_hash->halg.base.cra_init = chcr_sha_cra_init; | |
4292 | } | |
4293 | err = crypto_register_ahash(&driver_algs[i].alg.hash); | |
4294 | ai = driver_algs[i].alg.hash.halg.base; | |
4295 | name = ai.cra_driver_name; | |
4296 | break; | |
4297 | } | |
4298 | if (err) { | |
4299 | pr_err("chcr : %s : Algorithm registration failed\n", | |
4300 | name); | |
4301 | goto register_err; | |
4302 | } else { | |
4303 | driver_algs[i].is_registered = 1; | |
4304 | } | |
4305 | } | |
4306 | return 0; | |
4307 | ||
4308 | register_err: | |
4309 | chcr_unregister_alg(); | |
4310 | return err; | |
4311 | } | |
4312 | ||
4313 | /* | |
4314 | * start_crypto - Register the crypto algorithms. | |
4315 | * This should called once when the first device comesup. After this | |
4316 | * kernel will start calling driver APIs for crypto operations. | |
4317 | */ | |
4318 | int start_crypto(void) | |
4319 | { | |
4320 | return chcr_register_alg(); | |
4321 | } | |
4322 | ||
4323 | /* | |
4324 | * stop_crypto - Deregister all the crypto algorithms with kernel. | |
4325 | * This should be called once when the last device goes down. After this | |
4326 | * kernel will not call the driver API for crypto operations. | |
4327 | */ | |
4328 | int stop_crypto(void) | |
4329 | { | |
4330 | chcr_unregister_alg(); | |
4331 | return 0; | |
4332 | } |