Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
d3123599 TL |
2 | /* |
3 | * AMD Cryptographic Coprocessor (CCP) crypto API support | |
4 | * | |
68cc652f | 5 | * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. |
d3123599 TL |
6 | * |
7 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
d3123599 TL |
8 | */ |
9 | ||
10 | #include <linux/module.h> | |
d81ed653 | 11 | #include <linux/moduleparam.h> |
d3123599 TL |
12 | #include <linux/kernel.h> |
13 | #include <linux/list.h> | |
14 | #include <linux/ccp.h> | |
15 | #include <linux/scatterlist.h> | |
16 | #include <crypto/internal/hash.h> | |
ceeec0af | 17 | #include <crypto/internal/akcipher.h> |
d3123599 TL |
18 | |
19 | #include "ccp-crypto.h" | |
20 | ||
21 | MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); | |
22 | MODULE_LICENSE("GPL"); | |
23 | MODULE_VERSION("1.0.0"); | |
24 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support"); | |
25 | ||
d81ed653 TL |
26 | static unsigned int aes_disable; |
27 | module_param(aes_disable, uint, 0444); | |
28 | MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value"); | |
29 | ||
30 | static unsigned int sha_disable; | |
31 | module_param(sha_disable, uint, 0444); | |
32 | MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); | |
33 | ||
990672d4 GH |
34 | static unsigned int des3_disable; |
35 | module_param(des3_disable, uint, 0444); | |
36 | MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value"); | |
37 | ||
ceeec0af GH |
38 | static unsigned int rsa_disable; |
39 | module_param(rsa_disable, uint, 0444); | |
40 | MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); | |
41 | ||
d3123599 TL |
42 | /* List heads for the supported algorithms */ |
43 | static LIST_HEAD(hash_algs); | |
44 | static LIST_HEAD(cipher_algs); | |
36cf515b | 45 | static LIST_HEAD(aead_algs); |
ceeec0af | 46 | static LIST_HEAD(akcipher_algs); |
d3123599 | 47 | |
bc385447 TL |
48 | /* For any tfm, requests for that tfm must be returned on the order |
49 | * received. With multiple queues available, the CCP can process more | |
50 | * than one cmd at a time. Therefore we must maintain a cmd list to insure | |
51 | * the proper ordering of requests on a given tfm. | |
d3123599 | 52 | */ |
bc385447 | 53 | struct ccp_crypto_queue { |
d3123599 TL |
54 | struct list_head cmds; |
55 | struct list_head *backlog; | |
56 | unsigned int cmd_count; | |
57 | }; | |
8db88467 | 58 | |
bc385447 | 59 | #define CCP_CRYPTO_MAX_QLEN 100 |
d3123599 | 60 | |
bc385447 TL |
61 | static struct ccp_crypto_queue req_queue; |
62 | static spinlock_t req_queue_lock; | |
d3123599 TL |
63 | |
64 | struct ccp_crypto_cmd { | |
65 | struct list_head entry; | |
66 | ||
67 | struct ccp_cmd *cmd; | |
68 | ||
69 | /* Save the crypto_tfm and crypto_async_request addresses | |
70 | * separately to avoid any reference to a possibly invalid | |
71 | * crypto_async_request structure after invoking the request | |
72 | * callback | |
73 | */ | |
74 | struct crypto_async_request *req; | |
75 | struct crypto_tfm *tfm; | |
76 | ||
77 | /* Used for held command processing to determine state */ | |
78 | int ret; | |
d3123599 TL |
79 | }; |
80 | ||
81 | struct ccp_crypto_cpu { | |
82 | struct work_struct work; | |
83 | struct completion completion; | |
84 | struct ccp_crypto_cmd *crypto_cmd; | |
85 | int err; | |
86 | }; | |
87 | ||
d3123599 TL |
88 | static inline bool ccp_crypto_success(int err) |
89 | { | |
90 | if (err && (err != -EINPROGRESS) && (err != -EBUSY)) | |
91 | return false; | |
92 | ||
93 | return true; | |
94 | } | |
95 | ||
d3123599 TL |
96 | static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( |
97 | struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) | |
98 | { | |
d3123599 | 99 | struct ccp_crypto_cmd *held = NULL, *tmp; |
bc385447 | 100 | unsigned long flags; |
d3123599 TL |
101 | |
102 | *backlog = NULL; | |
103 | ||
bc385447 | 104 | spin_lock_irqsave(&req_queue_lock, flags); |
d3123599 TL |
105 | |
106 | /* Held cmds will be after the current cmd in the queue so start | |
107 | * searching for a cmd with a matching tfm for submission. | |
108 | */ | |
109 | tmp = crypto_cmd; | |
bc385447 | 110 | list_for_each_entry_continue(tmp, &req_queue.cmds, entry) { |
d3123599 TL |
111 | if (crypto_cmd->tfm != tmp->tfm) |
112 | continue; | |
113 | held = tmp; | |
114 | break; | |
115 | } | |
116 | ||
117 | /* Process the backlog: | |
118 | * Because cmds can be executed from any point in the cmd list | |
119 | * special precautions have to be taken when handling the backlog. | |
120 | */ | |
bc385447 | 121 | if (req_queue.backlog != &req_queue.cmds) { |
d3123599 | 122 | /* Skip over this cmd if it is the next backlog cmd */ |
bc385447 TL |
123 | if (req_queue.backlog == &crypto_cmd->entry) |
124 | req_queue.backlog = crypto_cmd->entry.next; | |
d3123599 | 125 | |
bc385447 | 126 | *backlog = container_of(req_queue.backlog, |
d3123599 | 127 | struct ccp_crypto_cmd, entry); |
bc385447 | 128 | req_queue.backlog = req_queue.backlog->next; |
d3123599 TL |
129 | |
130 | /* Skip over this cmd if it is now the next backlog cmd */ | |
bc385447 TL |
131 | if (req_queue.backlog == &crypto_cmd->entry) |
132 | req_queue.backlog = crypto_cmd->entry.next; | |
d3123599 TL |
133 | } |
134 | ||
135 | /* Remove the cmd entry from the list of cmds */ | |
bc385447 | 136 | req_queue.cmd_count--; |
d3123599 TL |
137 | list_del(&crypto_cmd->entry); |
138 | ||
bc385447 TL |
139 | spin_unlock_irqrestore(&req_queue_lock, flags); |
140 | ||
d3123599 TL |
141 | return held; |
142 | } | |
143 | ||
bc385447 | 144 | static void ccp_crypto_complete(void *data, int err) |
d3123599 | 145 | { |
bc385447 | 146 | struct ccp_crypto_cmd *crypto_cmd = data; |
d3123599 TL |
147 | struct ccp_crypto_cmd *held, *next, *backlog; |
148 | struct crypto_async_request *req = crypto_cmd->req; | |
149 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
bc385447 | 150 | int ret; |
d3123599 | 151 | |
bc385447 | 152 | if (err == -EINPROGRESS) { |
8db88467 | 153 | /* Only propagate the -EINPROGRESS if necessary */ |
d3123599 TL |
154 | if (crypto_cmd->ret == -EBUSY) { |
155 | crypto_cmd->ret = -EINPROGRESS; | |
156 | req->complete(req, -EINPROGRESS); | |
157 | } | |
158 | ||
bc385447 | 159 | return; |
d3123599 TL |
160 | } |
161 | ||
162 | /* Operation has completed - update the queue before invoking | |
163 | * the completion callbacks and retrieve the next cmd (cmd with | |
164 | * a matching tfm) that can be submitted to the CCP. | |
165 | */ | |
166 | held = ccp_crypto_cmd_complete(crypto_cmd, &backlog); | |
167 | if (backlog) { | |
168 | backlog->ret = -EINPROGRESS; | |
169 | backlog->req->complete(backlog->req, -EINPROGRESS); | |
170 | } | |
171 | ||
172 | /* Transition the state from -EBUSY to -EINPROGRESS first */ | |
173 | if (crypto_cmd->ret == -EBUSY) | |
174 | req->complete(req, -EINPROGRESS); | |
175 | ||
176 | /* Completion callbacks */ | |
bc385447 | 177 | ret = err; |
d3123599 TL |
178 | if (ctx->complete) |
179 | ret = ctx->complete(req, ret); | |
180 | req->complete(req, ret); | |
181 | ||
182 | /* Submit the next cmd */ | |
183 | while (held) { | |
0611451b TL |
184 | /* Since we have already queued the cmd, we must indicate that |
185 | * we can backlog so as not to "lose" this request. | |
186 | */ | |
187 | held->cmd->flags |= CCP_CMD_MAY_BACKLOG; | |
d3123599 TL |
188 | ret = ccp_enqueue_cmd(held->cmd); |
189 | if (ccp_crypto_success(ret)) | |
190 | break; | |
191 | ||
192 | /* Error occurred, report it and get the next entry */ | |
950b10ba TL |
193 | ctx = crypto_tfm_ctx(held->req->tfm); |
194 | if (ctx->complete) | |
195 | ret = ctx->complete(held->req, ret); | |
d3123599 TL |
196 | held->req->complete(held->req, ret); |
197 | ||
198 | next = ccp_crypto_cmd_complete(held, &backlog); | |
199 | if (backlog) { | |
200 | backlog->ret = -EINPROGRESS; | |
201 | backlog->req->complete(backlog->req, -EINPROGRESS); | |
202 | } | |
203 | ||
204 | kfree(held); | |
205 | held = next; | |
206 | } | |
207 | ||
208 | kfree(crypto_cmd); | |
d3123599 TL |
209 | } |
210 | ||
211 | static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) | |
212 | { | |
d3123599 | 213 | struct ccp_crypto_cmd *active = NULL, *tmp; |
bc385447 | 214 | unsigned long flags; |
c65a52f8 | 215 | bool free_cmd = true; |
bc385447 | 216 | int ret; |
d3123599 | 217 | |
bc385447 | 218 | spin_lock_irqsave(&req_queue_lock, flags); |
d3123599 TL |
219 | |
220 | /* Check if the cmd can/should be queued */ | |
bc385447 | 221 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
cfba73d2 GBY |
222 | if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) { |
223 | ret = -ENOSPC; | |
bc385447 | 224 | goto e_lock; |
cfba73d2 | 225 | } |
d3123599 TL |
226 | } |
227 | ||
228 | /* Look for an entry with the same tfm. If there is a cmd | |
bc385447 TL |
229 | * with the same tfm in the list then the current cmd cannot |
230 | * be submitted to the CCP yet. | |
d3123599 | 231 | */ |
bc385447 | 232 | list_for_each_entry(tmp, &req_queue.cmds, entry) { |
d3123599 TL |
233 | if (crypto_cmd->tfm != tmp->tfm) |
234 | continue; | |
235 | active = tmp; | |
236 | break; | |
237 | } | |
238 | ||
239 | ret = -EINPROGRESS; | |
240 | if (!active) { | |
241 | ret = ccp_enqueue_cmd(crypto_cmd->cmd); | |
242 | if (!ccp_crypto_success(ret)) | |
c65a52f8 | 243 | goto e_lock; /* Error, don't queue it */ |
d3123599 TL |
244 | } |
245 | ||
bc385447 | 246 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
d3123599 | 247 | ret = -EBUSY; |
bc385447 TL |
248 | if (req_queue.backlog == &req_queue.cmds) |
249 | req_queue.backlog = &crypto_cmd->entry; | |
d3123599 TL |
250 | } |
251 | crypto_cmd->ret = ret; | |
252 | ||
bc385447 TL |
253 | req_queue.cmd_count++; |
254 | list_add_tail(&crypto_cmd->entry, &req_queue.cmds); | |
d3123599 | 255 | |
c65a52f8 TL |
256 | free_cmd = false; |
257 | ||
bc385447 TL |
258 | e_lock: |
259 | spin_unlock_irqrestore(&req_queue_lock, flags); | |
d3123599 | 260 | |
c65a52f8 TL |
261 | if (free_cmd) |
262 | kfree(crypto_cmd); | |
263 | ||
d3123599 TL |
264 | return ret; |
265 | } | |
266 | ||
267 | /** | |
268 | * ccp_crypto_enqueue_request - queue an crypto async request for processing | |
269 | * by the CCP | |
270 | * | |
271 | * @req: crypto_async_request struct to be processed | |
272 | * @cmd: ccp_cmd struct to be sent to the CCP | |
273 | */ | |
274 | int ccp_crypto_enqueue_request(struct crypto_async_request *req, | |
275 | struct ccp_cmd *cmd) | |
276 | { | |
277 | struct ccp_crypto_cmd *crypto_cmd; | |
278 | gfp_t gfp; | |
d3123599 TL |
279 | |
280 | gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; | |
281 | ||
282 | crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp); | |
283 | if (!crypto_cmd) | |
284 | return -ENOMEM; | |
285 | ||
286 | /* The tfm pointer must be saved and not referenced from the | |
287 | * crypto_async_request (req) pointer because it is used after | |
288 | * completion callback for the request and the req pointer | |
289 | * might not be valid anymore. | |
290 | */ | |
291 | crypto_cmd->cmd = cmd; | |
292 | crypto_cmd->req = req; | |
293 | crypto_cmd->tfm = req->tfm; | |
294 | ||
295 | cmd->callback = ccp_crypto_complete; | |
296 | cmd->data = crypto_cmd; | |
297 | ||
298 | if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) | |
299 | cmd->flags |= CCP_CMD_MAY_BACKLOG; | |
300 | else | |
301 | cmd->flags &= ~CCP_CMD_MAY_BACKLOG; | |
302 | ||
c65a52f8 | 303 | return ccp_crypto_enqueue_cmd(crypto_cmd); |
d3123599 TL |
304 | } |
305 | ||
306 | struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, | |
307 | struct scatterlist *sg_add) | |
308 | { | |
309 | struct scatterlist *sg, *sg_last = NULL; | |
310 | ||
311 | for (sg = table->sgl; sg; sg = sg_next(sg)) | |
312 | if (!sg_page(sg)) | |
313 | break; | |
355eba5d TL |
314 | if (WARN_ON(!sg)) |
315 | return NULL; | |
d3123599 TL |
316 | |
317 | for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) { | |
318 | sg_set_page(sg, sg_page(sg_add), sg_add->length, | |
319 | sg_add->offset); | |
320 | sg_last = sg; | |
321 | } | |
355eba5d TL |
322 | if (WARN_ON(sg_add)) |
323 | return NULL; | |
d3123599 TL |
324 | |
325 | return sg_last; | |
326 | } | |
327 | ||
328 | static int ccp_register_algs(void) | |
329 | { | |
330 | int ret; | |
331 | ||
d81ed653 TL |
332 | if (!aes_disable) { |
333 | ret = ccp_register_aes_algs(&cipher_algs); | |
334 | if (ret) | |
335 | return ret; | |
d3123599 | 336 | |
d81ed653 TL |
337 | ret = ccp_register_aes_cmac_algs(&hash_algs); |
338 | if (ret) | |
339 | return ret; | |
d3123599 | 340 | |
d81ed653 TL |
341 | ret = ccp_register_aes_xts_algs(&cipher_algs); |
342 | if (ret) | |
343 | return ret; | |
36cf515b GH |
344 | |
345 | ret = ccp_register_aes_aeads(&aead_algs); | |
346 | if (ret) | |
347 | return ret; | |
d81ed653 | 348 | } |
990672d4 GH |
349 | |
350 | if (!des3_disable) { | |
351 | ret = ccp_register_des3_algs(&cipher_algs); | |
352 | if (ret) | |
353 | return ret; | |
354 | } | |
d3123599 | 355 | |
d81ed653 TL |
356 | if (!sha_disable) { |
357 | ret = ccp_register_sha_algs(&hash_algs); | |
358 | if (ret) | |
359 | return ret; | |
360 | } | |
d3123599 | 361 | |
ceeec0af GH |
362 | if (!rsa_disable) { |
363 | ret = ccp_register_rsa_algs(&akcipher_algs); | |
364 | if (ret) | |
365 | return ret; | |
366 | } | |
367 | ||
d3123599 TL |
368 | return 0; |
369 | } | |
370 | ||
371 | static void ccp_unregister_algs(void) | |
372 | { | |
373 | struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp; | |
374 | struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp; | |
36cf515b | 375 | struct ccp_crypto_aead *aead_alg, *aead_tmp; |
ceeec0af | 376 | struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp; |
d3123599 TL |
377 | |
378 | list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) { | |
379 | crypto_unregister_ahash(&ahash_alg->alg); | |
380 | list_del(&ahash_alg->entry); | |
381 | kfree(ahash_alg); | |
382 | } | |
383 | ||
384 | list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) { | |
385 | crypto_unregister_alg(&ablk_alg->alg); | |
386 | list_del(&ablk_alg->entry); | |
387 | kfree(ablk_alg); | |
388 | } | |
36cf515b GH |
389 | |
390 | list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) { | |
391 | crypto_unregister_aead(&aead_alg->alg); | |
392 | list_del(&aead_alg->entry); | |
393 | kfree(aead_alg); | |
394 | } | |
ceeec0af GH |
395 | |
396 | list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) { | |
397 | crypto_unregister_akcipher(&akc_alg->alg); | |
398 | list_del(&akc_alg->entry); | |
399 | kfree(akc_alg); | |
400 | } | |
d3123599 TL |
401 | } |
402 | ||
d3123599 TL |
403 | static int ccp_crypto_init(void) |
404 | { | |
405 | int ret; | |
406 | ||
c9f21cb6 TL |
407 | ret = ccp_present(); |
408 | if (ret) | |
409 | return ret; | |
410 | ||
bc385447 TL |
411 | spin_lock_init(&req_queue_lock); |
412 | INIT_LIST_HEAD(&req_queue.cmds); | |
413 | req_queue.backlog = &req_queue.cmds; | |
414 | req_queue.cmd_count = 0; | |
d3123599 TL |
415 | |
416 | ret = ccp_register_algs(); | |
bc385447 | 417 | if (ret) |
d3123599 | 418 | ccp_unregister_algs(); |
d3123599 TL |
419 | |
420 | return ret; | |
421 | } | |
422 | ||
423 | static void ccp_crypto_exit(void) | |
424 | { | |
425 | ccp_unregister_algs(); | |
d3123599 TL |
426 | } |
427 | ||
428 | module_init(ccp_crypto_init); | |
429 | module_exit(ccp_crypto_exit); |