Commit | Line | Data |
---|---|---|
d3123599 TL |
1 | /* |
2 | * AMD Cryptographic Coprocessor (CCP) crypto API support | |
3 | * | |
4 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | |
5 | * | |
6 | * Author: Tom Lendacky <thomas.lendacky@amd.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
d81ed653 | 14 | #include <linux/moduleparam.h> |
d3123599 TL |
15 | #include <linux/kernel.h> |
16 | #include <linux/list.h> | |
17 | #include <linux/ccp.h> | |
18 | #include <linux/scatterlist.h> | |
19 | #include <crypto/internal/hash.h> | |
20 | ||
21 | #include "ccp-crypto.h" | |
22 | ||
23 | MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); | |
24 | MODULE_LICENSE("GPL"); | |
25 | MODULE_VERSION("1.0.0"); | |
26 | MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support"); | |
27 | ||
d81ed653 TL |
28 | static unsigned int aes_disable; |
29 | module_param(aes_disable, uint, 0444); | |
30 | MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value"); | |
31 | ||
32 | static unsigned int sha_disable; | |
33 | module_param(sha_disable, uint, 0444); | |
34 | MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); | |
35 | ||
d3123599 TL |
36 | |
37 | /* List heads for the supported algorithms */ | |
38 | static LIST_HEAD(hash_algs); | |
39 | static LIST_HEAD(cipher_algs); | |
40 | ||
bc385447 TL |
41 | /* For any tfm, requests for that tfm must be returned on the order |
42 | * received. With multiple queues available, the CCP can process more | |
43 | * than one cmd at a time. Therefore we must maintain a cmd list to insure | |
44 | * the proper ordering of requests on a given tfm. | |
d3123599 | 45 | */ |
bc385447 | 46 | struct ccp_crypto_queue { |
d3123599 TL |
47 | struct list_head cmds; |
48 | struct list_head *backlog; | |
49 | unsigned int cmd_count; | |
50 | }; | |
bc385447 | 51 | #define CCP_CRYPTO_MAX_QLEN 100 |
d3123599 | 52 | |
bc385447 TL |
53 | static struct ccp_crypto_queue req_queue; |
54 | static spinlock_t req_queue_lock; | |
d3123599 TL |
55 | |
56 | struct ccp_crypto_cmd { | |
57 | struct list_head entry; | |
58 | ||
59 | struct ccp_cmd *cmd; | |
60 | ||
61 | /* Save the crypto_tfm and crypto_async_request addresses | |
62 | * separately to avoid any reference to a possibly invalid | |
63 | * crypto_async_request structure after invoking the request | |
64 | * callback | |
65 | */ | |
66 | struct crypto_async_request *req; | |
67 | struct crypto_tfm *tfm; | |
68 | ||
69 | /* Used for held command processing to determine state */ | |
70 | int ret; | |
d3123599 TL |
71 | }; |
72 | ||
73 | struct ccp_crypto_cpu { | |
74 | struct work_struct work; | |
75 | struct completion completion; | |
76 | struct ccp_crypto_cmd *crypto_cmd; | |
77 | int err; | |
78 | }; | |
79 | ||
80 | ||
81 | static inline bool ccp_crypto_success(int err) | |
82 | { | |
83 | if (err && (err != -EINPROGRESS) && (err != -EBUSY)) | |
84 | return false; | |
85 | ||
86 | return true; | |
87 | } | |
88 | ||
d3123599 TL |
89 | static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( |
90 | struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) | |
91 | { | |
d3123599 | 92 | struct ccp_crypto_cmd *held = NULL, *tmp; |
bc385447 | 93 | unsigned long flags; |
d3123599 TL |
94 | |
95 | *backlog = NULL; | |
96 | ||
bc385447 | 97 | spin_lock_irqsave(&req_queue_lock, flags); |
d3123599 TL |
98 | |
99 | /* Held cmds will be after the current cmd in the queue so start | |
100 | * searching for a cmd with a matching tfm for submission. | |
101 | */ | |
102 | tmp = crypto_cmd; | |
bc385447 | 103 | list_for_each_entry_continue(tmp, &req_queue.cmds, entry) { |
d3123599 TL |
104 | if (crypto_cmd->tfm != tmp->tfm) |
105 | continue; | |
106 | held = tmp; | |
107 | break; | |
108 | } | |
109 | ||
110 | /* Process the backlog: | |
111 | * Because cmds can be executed from any point in the cmd list | |
112 | * special precautions have to be taken when handling the backlog. | |
113 | */ | |
bc385447 | 114 | if (req_queue.backlog != &req_queue.cmds) { |
d3123599 | 115 | /* Skip over this cmd if it is the next backlog cmd */ |
bc385447 TL |
116 | if (req_queue.backlog == &crypto_cmd->entry) |
117 | req_queue.backlog = crypto_cmd->entry.next; | |
d3123599 | 118 | |
bc385447 | 119 | *backlog = container_of(req_queue.backlog, |
d3123599 | 120 | struct ccp_crypto_cmd, entry); |
bc385447 | 121 | req_queue.backlog = req_queue.backlog->next; |
d3123599 TL |
122 | |
123 | /* Skip over this cmd if it is now the next backlog cmd */ | |
bc385447 TL |
124 | if (req_queue.backlog == &crypto_cmd->entry) |
125 | req_queue.backlog = crypto_cmd->entry.next; | |
d3123599 TL |
126 | } |
127 | ||
128 | /* Remove the cmd entry from the list of cmds */ | |
bc385447 | 129 | req_queue.cmd_count--; |
d3123599 TL |
130 | list_del(&crypto_cmd->entry); |
131 | ||
bc385447 TL |
132 | spin_unlock_irqrestore(&req_queue_lock, flags); |
133 | ||
d3123599 TL |
134 | return held; |
135 | } | |
136 | ||
bc385447 | 137 | static void ccp_crypto_complete(void *data, int err) |
d3123599 | 138 | { |
bc385447 | 139 | struct ccp_crypto_cmd *crypto_cmd = data; |
d3123599 TL |
140 | struct ccp_crypto_cmd *held, *next, *backlog; |
141 | struct crypto_async_request *req = crypto_cmd->req; | |
142 | struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
bc385447 | 143 | int ret; |
d3123599 | 144 | |
bc385447 | 145 | if (err == -EINPROGRESS) { |
d3123599 TL |
146 | /* Only propogate the -EINPROGRESS if necessary */ |
147 | if (crypto_cmd->ret == -EBUSY) { | |
148 | crypto_cmd->ret = -EINPROGRESS; | |
149 | req->complete(req, -EINPROGRESS); | |
150 | } | |
151 | ||
bc385447 | 152 | return; |
d3123599 TL |
153 | } |
154 | ||
155 | /* Operation has completed - update the queue before invoking | |
156 | * the completion callbacks and retrieve the next cmd (cmd with | |
157 | * a matching tfm) that can be submitted to the CCP. | |
158 | */ | |
159 | held = ccp_crypto_cmd_complete(crypto_cmd, &backlog); | |
160 | if (backlog) { | |
161 | backlog->ret = -EINPROGRESS; | |
162 | backlog->req->complete(backlog->req, -EINPROGRESS); | |
163 | } | |
164 | ||
165 | /* Transition the state from -EBUSY to -EINPROGRESS first */ | |
166 | if (crypto_cmd->ret == -EBUSY) | |
167 | req->complete(req, -EINPROGRESS); | |
168 | ||
169 | /* Completion callbacks */ | |
bc385447 | 170 | ret = err; |
d3123599 TL |
171 | if (ctx->complete) |
172 | ret = ctx->complete(req, ret); | |
173 | req->complete(req, ret); | |
174 | ||
175 | /* Submit the next cmd */ | |
176 | while (held) { | |
177 | ret = ccp_enqueue_cmd(held->cmd); | |
178 | if (ccp_crypto_success(ret)) | |
179 | break; | |
180 | ||
181 | /* Error occurred, report it and get the next entry */ | |
182 | held->req->complete(held->req, ret); | |
183 | ||
184 | next = ccp_crypto_cmd_complete(held, &backlog); | |
185 | if (backlog) { | |
186 | backlog->ret = -EINPROGRESS; | |
187 | backlog->req->complete(backlog->req, -EINPROGRESS); | |
188 | } | |
189 | ||
190 | kfree(held); | |
191 | held = next; | |
192 | } | |
193 | ||
194 | kfree(crypto_cmd); | |
d3123599 TL |
195 | } |
196 | ||
197 | static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) | |
198 | { | |
d3123599 | 199 | struct ccp_crypto_cmd *active = NULL, *tmp; |
bc385447 TL |
200 | unsigned long flags; |
201 | int ret; | |
d3123599 | 202 | |
bc385447 | 203 | spin_lock_irqsave(&req_queue_lock, flags); |
d3123599 TL |
204 | |
205 | /* Check if the cmd can/should be queued */ | |
bc385447 | 206 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
d3123599 TL |
207 | ret = -EBUSY; |
208 | if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) | |
bc385447 | 209 | goto e_lock; |
d3123599 TL |
210 | } |
211 | ||
212 | /* Look for an entry with the same tfm. If there is a cmd | |
bc385447 TL |
213 | * with the same tfm in the list then the current cmd cannot |
214 | * be submitted to the CCP yet. | |
d3123599 | 215 | */ |
bc385447 | 216 | list_for_each_entry(tmp, &req_queue.cmds, entry) { |
d3123599 TL |
217 | if (crypto_cmd->tfm != tmp->tfm) |
218 | continue; | |
219 | active = tmp; | |
220 | break; | |
221 | } | |
222 | ||
223 | ret = -EINPROGRESS; | |
224 | if (!active) { | |
225 | ret = ccp_enqueue_cmd(crypto_cmd->cmd); | |
226 | if (!ccp_crypto_success(ret)) | |
bc385447 | 227 | goto e_lock; |
d3123599 TL |
228 | } |
229 | ||
bc385447 | 230 | if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { |
d3123599 | 231 | ret = -EBUSY; |
bc385447 TL |
232 | if (req_queue.backlog == &req_queue.cmds) |
233 | req_queue.backlog = &crypto_cmd->entry; | |
d3123599 TL |
234 | } |
235 | crypto_cmd->ret = ret; | |
236 | ||
bc385447 TL |
237 | req_queue.cmd_count++; |
238 | list_add_tail(&crypto_cmd->entry, &req_queue.cmds); | |
d3123599 | 239 | |
bc385447 TL |
240 | e_lock: |
241 | spin_unlock_irqrestore(&req_queue_lock, flags); | |
d3123599 TL |
242 | |
243 | return ret; | |
244 | } | |
245 | ||
246 | /** | |
247 | * ccp_crypto_enqueue_request - queue an crypto async request for processing | |
248 | * by the CCP | |
249 | * | |
250 | * @req: crypto_async_request struct to be processed | |
251 | * @cmd: ccp_cmd struct to be sent to the CCP | |
252 | */ | |
253 | int ccp_crypto_enqueue_request(struct crypto_async_request *req, | |
254 | struct ccp_cmd *cmd) | |
255 | { | |
256 | struct ccp_crypto_cmd *crypto_cmd; | |
257 | gfp_t gfp; | |
258 | int ret; | |
259 | ||
260 | gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; | |
261 | ||
262 | crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp); | |
263 | if (!crypto_cmd) | |
264 | return -ENOMEM; | |
265 | ||
266 | /* The tfm pointer must be saved and not referenced from the | |
267 | * crypto_async_request (req) pointer because it is used after | |
268 | * completion callback for the request and the req pointer | |
269 | * might not be valid anymore. | |
270 | */ | |
271 | crypto_cmd->cmd = cmd; | |
272 | crypto_cmd->req = req; | |
273 | crypto_cmd->tfm = req->tfm; | |
274 | ||
275 | cmd->callback = ccp_crypto_complete; | |
276 | cmd->data = crypto_cmd; | |
277 | ||
278 | if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) | |
279 | cmd->flags |= CCP_CMD_MAY_BACKLOG; | |
280 | else | |
281 | cmd->flags &= ~CCP_CMD_MAY_BACKLOG; | |
282 | ||
283 | ret = ccp_crypto_enqueue_cmd(crypto_cmd); | |
284 | if (!ccp_crypto_success(ret)) | |
285 | kfree(crypto_cmd); | |
286 | ||
287 | return ret; | |
288 | } | |
289 | ||
290 | struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, | |
291 | struct scatterlist *sg_add) | |
292 | { | |
293 | struct scatterlist *sg, *sg_last = NULL; | |
294 | ||
295 | for (sg = table->sgl; sg; sg = sg_next(sg)) | |
296 | if (!sg_page(sg)) | |
297 | break; | |
298 | BUG_ON(!sg); | |
299 | ||
300 | for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) { | |
301 | sg_set_page(sg, sg_page(sg_add), sg_add->length, | |
302 | sg_add->offset); | |
303 | sg_last = sg; | |
304 | } | |
305 | BUG_ON(sg_add); | |
306 | ||
307 | return sg_last; | |
308 | } | |
309 | ||
310 | static int ccp_register_algs(void) | |
311 | { | |
312 | int ret; | |
313 | ||
d81ed653 TL |
314 | if (!aes_disable) { |
315 | ret = ccp_register_aes_algs(&cipher_algs); | |
316 | if (ret) | |
317 | return ret; | |
d3123599 | 318 | |
d81ed653 TL |
319 | ret = ccp_register_aes_cmac_algs(&hash_algs); |
320 | if (ret) | |
321 | return ret; | |
d3123599 | 322 | |
d81ed653 TL |
323 | ret = ccp_register_aes_xts_algs(&cipher_algs); |
324 | if (ret) | |
325 | return ret; | |
326 | } | |
d3123599 | 327 | |
d81ed653 TL |
328 | if (!sha_disable) { |
329 | ret = ccp_register_sha_algs(&hash_algs); | |
330 | if (ret) | |
331 | return ret; | |
332 | } | |
d3123599 TL |
333 | |
334 | return 0; | |
335 | } | |
336 | ||
337 | static void ccp_unregister_algs(void) | |
338 | { | |
339 | struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp; | |
340 | struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp; | |
341 | ||
342 | list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) { | |
343 | crypto_unregister_ahash(&ahash_alg->alg); | |
344 | list_del(&ahash_alg->entry); | |
345 | kfree(ahash_alg); | |
346 | } | |
347 | ||
348 | list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) { | |
349 | crypto_unregister_alg(&ablk_alg->alg); | |
350 | list_del(&ablk_alg->entry); | |
351 | kfree(ablk_alg); | |
352 | } | |
353 | } | |
354 | ||
d3123599 TL |
355 | static int ccp_crypto_init(void) |
356 | { | |
357 | int ret; | |
358 | ||
bc385447 TL |
359 | spin_lock_init(&req_queue_lock); |
360 | INIT_LIST_HEAD(&req_queue.cmds); | |
361 | req_queue.backlog = &req_queue.cmds; | |
362 | req_queue.cmd_count = 0; | |
d3123599 TL |
363 | |
364 | ret = ccp_register_algs(); | |
bc385447 | 365 | if (ret) |
d3123599 | 366 | ccp_unregister_algs(); |
d3123599 TL |
367 | |
368 | return ret; | |
369 | } | |
370 | ||
371 | static void ccp_crypto_exit(void) | |
372 | { | |
373 | ccp_unregister_algs(); | |
d3123599 TL |
374 | } |
375 | ||
376 | module_init(ccp_crypto_init); | |
377 | module_exit(ccp_crypto_exit); |