Commit | Line | Data |
---|---|---|
400c40cf SM |
1 | /* |
2 | * algif_aead: User-space interface for AEAD algorithms | |
3 | * | |
4 | * Copyright (C) 2014, Stephan Mueller <smueller@chronox.de> | |
5 | * | |
6 | * This file provides the user-space API for AEAD ciphers. | |
7 | * | |
400c40cf SM |
8 | * This program is free software; you can redistribute it and/or modify it |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
d887c52d SM |
12 | * |
13 | * The following concept of the memory management is used: | |
14 | * | |
15 | * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is | |
16 | * filled by user space with the data submitted via sendpage/sendmsg. Filling | |
17 | * up the TX SGL does not cause a crypto operation -- the data will only be | |
18 | * tracked by the kernel. Upon receipt of one recvmsg call, the caller must | |
19 | * provide a buffer which is tracked with the RX SGL. | |
20 | * | |
21 | * During the processing of the recvmsg operation, the cipher request is | |
22 | * allocated and prepared. As part of the recvmsg operation, the processed | |
23 | * TX buffers are extracted from the TX SGL into a separate SGL. | |
24 | * | |
25 | * After the completion of the crypto operation, the RX SGL and the cipher | |
26 | * request is released. The extracted TX SGL parts are released together with | |
27 | * the RX SGL release. | |
400c40cf SM |
28 | */ |
29 | ||
83094e5e | 30 | #include <crypto/internal/aead.h> |
400c40cf SM |
31 | #include <crypto/scatterwalk.h> |
32 | #include <crypto/if_alg.h> | |
33 | #include <linux/init.h> | |
34 | #include <linux/list.h> | |
35 | #include <linux/kernel.h> | |
174cd4b1 | 36 | #include <linux/sched/signal.h> |
400c40cf SM |
37 | #include <linux/mm.h> |
38 | #include <linux/module.h> | |
39 | #include <linux/net.h> | |
40 | #include <net/sock.h> | |
41 | ||
d887c52d SM |
42 | struct aead_tsgl { |
43 | struct list_head list; | |
44 | unsigned int cur; /* Last processed SG entry */ | |
45 | struct scatterlist sg[0]; /* Array of SGs forming the SGL */ | |
400c40cf SM |
46 | }; |
47 | ||
d887c52d | 48 | struct aead_rsgl { |
83094e5e TS |
49 | struct af_alg_sgl sgl; |
50 | struct list_head list; | |
d887c52d | 51 | size_t sg_num_bytes; /* Bytes of data in that SGL */ |
83094e5e TS |
52 | }; |
53 | ||
54 | struct aead_async_req { | |
83094e5e | 55 | struct kiocb *iocb; |
e6534aeb | 56 | struct sock *sk; |
d887c52d SM |
57 | |
58 | struct aead_rsgl first_rsgl; /* First RX SG */ | |
59 | struct list_head rsgl_list; /* Track RX SGs */ | |
60 | ||
61 | struct scatterlist *tsgl; /* priv. TX SGL of buffers to process */ | |
62 | unsigned int tsgl_entries; /* number of entries in priv. TX SGL */ | |
63 | ||
64 | unsigned int outlen; /* Filled output buf length */ | |
65 | ||
66 | unsigned int areqlen; /* Length of this data struct */ | |
67 | struct aead_request aead_req; /* req ctx trails this struct */ | |
83094e5e TS |
68 | }; |
69 | ||
2a2a251f SM |
70 | struct aead_tfm { |
71 | struct crypto_aead *aead; | |
72 | bool has_key; | |
73 | }; | |
74 | ||
400c40cf | 75 | struct aead_ctx { |
d887c52d | 76 | struct list_head tsgl_list; /* Link to TX SGL */ |
400c40cf SM |
77 | |
78 | void *iv; | |
d887c52d | 79 | size_t aead_assoclen; |
400c40cf | 80 | |
d887c52d | 81 | struct af_alg_completion completion; /* sync work queue */ |
400c40cf | 82 | |
d887c52d SM |
83 | size_t used; /* TX bytes sent to kernel */ |
84 | size_t rcvused; /* total RX bytes to be processed by kernel */ | |
400c40cf | 85 | |
d887c52d SM |
86 | bool more; /* More data to be expected? */ |
87 | bool merge; /* Merge new data into existing SG */ | |
88 | bool enc; /* Crypto operation: enc, dec */ | |
400c40cf | 89 | |
d887c52d | 90 | unsigned int len; /* Length of allocated memory for this struct */ |
400c40cf SM |
91 | }; |
92 | ||
d887c52d SM |
93 | #define MAX_SGL_ENTS ((4096 - sizeof(struct aead_tsgl)) / \ |
94 | sizeof(struct scatterlist) - 1) | |
95 | ||
400c40cf SM |
96 | static inline int aead_sndbuf(struct sock *sk) |
97 | { | |
98 | struct alg_sock *ask = alg_sk(sk); | |
99 | struct aead_ctx *ctx = ask->private; | |
100 | ||
101 | return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - | |
102 | ctx->used, 0); | |
103 | } | |
104 | ||
105 | static inline bool aead_writable(struct sock *sk) | |
106 | { | |
107 | return PAGE_SIZE <= aead_sndbuf(sk); | |
108 | } | |
109 | ||
d887c52d | 110 | static inline int aead_rcvbuf(struct sock *sk) |
400c40cf | 111 | { |
d887c52d SM |
112 | struct alg_sock *ask = alg_sk(sk); |
113 | struct aead_ctx *ctx = ask->private; | |
114 | ||
115 | return max_t(int, max_t(int, sk->sk_rcvbuf & PAGE_MASK, PAGE_SIZE) - | |
116 | ctx->rcvused, 0); | |
117 | } | |
118 | ||
119 | static inline bool aead_readable(struct sock *sk) | |
120 | { | |
121 | return PAGE_SIZE <= aead_rcvbuf(sk); | |
122 | } | |
123 | ||
124 | static inline bool aead_sufficient_data(struct sock *sk) | |
125 | { | |
126 | struct alg_sock *ask = alg_sk(sk); | |
127 | struct sock *psk = ask->parent; | |
128 | struct alg_sock *pask = alg_sk(psk); | |
129 | struct aead_ctx *ctx = ask->private; | |
130 | struct aead_tfm *aeadc = pask->private; | |
131 | struct crypto_aead *tfm = aeadc->aead; | |
132 | unsigned int as = crypto_aead_authsize(tfm); | |
400c40cf | 133 | |
0c1e16cd SM |
134 | /* |
135 | * The minimum amount of memory needed for an AEAD cipher is | |
136 | * the AAD and in case of decryption the tag. | |
137 | */ | |
138 | return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as); | |
400c40cf SM |
139 | } |
140 | ||
d887c52d | 141 | static int aead_alloc_tsgl(struct sock *sk) |
83094e5e | 142 | { |
d887c52d SM |
143 | struct alg_sock *ask = alg_sk(sk); |
144 | struct aead_ctx *ctx = ask->private; | |
145 | struct aead_tsgl *sgl; | |
146 | struct scatterlist *sg = NULL; | |
83094e5e | 147 | |
d887c52d SM |
148 | sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list); |
149 | if (!list_empty(&ctx->tsgl_list)) | |
150 | sg = sgl->sg; | |
151 | ||
152 | if (!sg || sgl->cur >= MAX_SGL_ENTS) { | |
153 | sgl = sock_kmalloc(sk, sizeof(*sgl) + | |
154 | sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), | |
155 | GFP_KERNEL); | |
156 | if (!sgl) | |
157 | return -ENOMEM; | |
158 | ||
159 | sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); | |
160 | sgl->cur = 0; | |
161 | ||
162 | if (sg) | |
163 | sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); | |
164 | ||
165 | list_add_tail(&sgl->list, &ctx->tsgl_list); | |
166 | } | |
167 | ||
168 | return 0; | |
169 | } | |
170 | ||
171 | static unsigned int aead_count_tsgl(struct sock *sk, size_t bytes) | |
172 | { | |
173 | struct alg_sock *ask = alg_sk(sk); | |
174 | struct aead_ctx *ctx = ask->private; | |
175 | struct aead_tsgl *sgl, *tmp; | |
176 | unsigned int i; | |
177 | unsigned int sgl_count = 0; | |
178 | ||
179 | if (!bytes) | |
180 | return 0; | |
181 | ||
182 | list_for_each_entry_safe(sgl, tmp, &ctx->tsgl_list, list) { | |
183 | struct scatterlist *sg = sgl->sg; | |
184 | ||
185 | for (i = 0; i < sgl->cur; i++) { | |
186 | sgl_count++; | |
187 | if (sg[i].length >= bytes) | |
188 | return sgl_count; | |
189 | ||
190 | bytes -= sg[i].length; | |
191 | } | |
192 | } | |
193 | ||
194 | return sgl_count; | |
83094e5e TS |
195 | } |
196 | ||
d887c52d SM |
197 | static void aead_pull_tsgl(struct sock *sk, size_t used, |
198 | struct scatterlist *dst) | |
400c40cf SM |
199 | { |
200 | struct alg_sock *ask = alg_sk(sk); | |
201 | struct aead_ctx *ctx = ask->private; | |
d887c52d SM |
202 | struct aead_tsgl *sgl; |
203 | struct scatterlist *sg; | |
400c40cf SM |
204 | unsigned int i; |
205 | ||
d887c52d SM |
206 | while (!list_empty(&ctx->tsgl_list)) { |
207 | sgl = list_first_entry(&ctx->tsgl_list, struct aead_tsgl, | |
208 | list); | |
209 | sg = sgl->sg; | |
210 | ||
211 | for (i = 0; i < sgl->cur; i++) { | |
212 | size_t plen = min_t(size_t, used, sg[i].length); | |
213 | struct page *page = sg_page(sg + i); | |
214 | ||
215 | if (!page) | |
216 | continue; | |
217 | ||
218 | /* | |
219 | * Assumption: caller created aead_count_tsgl(len) | |
220 | * SG entries in dst. | |
221 | */ | |
222 | if (dst) | |
223 | sg_set_page(dst + i, page, plen, sg[i].offset); | |
224 | ||
225 | sg[i].length -= plen; | |
226 | sg[i].offset += plen; | |
227 | ||
228 | used -= plen; | |
229 | ctx->used -= plen; | |
230 | ||
231 | if (sg[i].length) | |
232 | return; | |
233 | ||
234 | if (!dst) | |
235 | put_page(page); | |
236 | sg_assign_page(sg + i, NULL); | |
237 | } | |
238 | ||
239 | list_del(&sgl->list); | |
240 | sock_kfree_s(sk, sgl, sizeof(*sgl) + sizeof(sgl->sg[0]) * | |
241 | (MAX_SGL_ENTS + 1)); | |
242 | } | |
243 | ||
244 | if (!ctx->used) | |
245 | ctx->merge = 0; | |
246 | } | |
247 | ||
248 | static void aead_free_areq_sgls(struct aead_async_req *areq) | |
249 | { | |
250 | struct sock *sk = areq->sk; | |
251 | struct alg_sock *ask = alg_sk(sk); | |
252 | struct aead_ctx *ctx = ask->private; | |
253 | struct aead_rsgl *rsgl, *tmp; | |
254 | struct scatterlist *tsgl; | |
255 | struct scatterlist *sg; | |
256 | unsigned int i; | |
257 | ||
258 | list_for_each_entry_safe(rsgl, tmp, &areq->rsgl_list, list) { | |
259 | ctx->rcvused -= rsgl->sg_num_bytes; | |
260 | af_alg_free_sg(&rsgl->sgl); | |
261 | list_del(&rsgl->list); | |
262 | if (rsgl != &areq->first_rsgl) | |
263 | sock_kfree_s(sk, rsgl, sizeof(*rsgl)); | |
264 | } | |
265 | ||
266 | tsgl = areq->tsgl; | |
267 | for_each_sg(tsgl, sg, areq->tsgl_entries, i) { | |
268 | if (!sg_page(sg)) | |
400c40cf | 269 | continue; |
d887c52d SM |
270 | put_page(sg_page(sg)); |
271 | } | |
272 | ||
273 | if (areq->tsgl && areq->tsgl_entries) | |
274 | sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); | |
275 | } | |
276 | ||
277 | static int aead_wait_for_wmem(struct sock *sk, unsigned int flags) | |
278 | { | |
279 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | |
280 | int err = -ERESTARTSYS; | |
281 | long timeout; | |
282 | ||
283 | if (flags & MSG_DONTWAIT) | |
284 | return -EAGAIN; | |
400c40cf | 285 | |
d887c52d SM |
286 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
287 | ||
288 | add_wait_queue(sk_sleep(sk), &wait); | |
289 | for (;;) { | |
290 | if (signal_pending(current)) | |
291 | break; | |
292 | timeout = MAX_SCHEDULE_TIMEOUT; | |
293 | if (sk_wait_event(sk, &timeout, aead_writable(sk), &wait)) { | |
294 | err = 0; | |
295 | break; | |
296 | } | |
400c40cf | 297 | } |
d887c52d SM |
298 | remove_wait_queue(sk_sleep(sk), &wait); |
299 | ||
300 | return err; | |
400c40cf SM |
301 | } |
302 | ||
303 | static void aead_wmem_wakeup(struct sock *sk) | |
304 | { | |
305 | struct socket_wq *wq; | |
306 | ||
307 | if (!aead_writable(sk)) | |
308 | return; | |
309 | ||
310 | rcu_read_lock(); | |
311 | wq = rcu_dereference(sk->sk_wq); | |
1ce0bf50 | 312 | if (skwq_has_sleeper(wq)) |
400c40cf SM |
313 | wake_up_interruptible_sync_poll(&wq->wait, POLLIN | |
314 | POLLRDNORM | | |
315 | POLLRDBAND); | |
316 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | |
317 | rcu_read_unlock(); | |
318 | } | |
319 | ||
320 | static int aead_wait_for_data(struct sock *sk, unsigned flags) | |
321 | { | |
d9dc8b0f | 322 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
400c40cf SM |
323 | struct alg_sock *ask = alg_sk(sk); |
324 | struct aead_ctx *ctx = ask->private; | |
325 | long timeout; | |
400c40cf SM |
326 | int err = -ERESTARTSYS; |
327 | ||
328 | if (flags & MSG_DONTWAIT) | |
329 | return -EAGAIN; | |
330 | ||
9cd3e072 | 331 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
d887c52d | 332 | |
d9dc8b0f | 333 | add_wait_queue(sk_sleep(sk), &wait); |
400c40cf SM |
334 | for (;;) { |
335 | if (signal_pending(current)) | |
336 | break; | |
400c40cf | 337 | timeout = MAX_SCHEDULE_TIMEOUT; |
d9dc8b0f | 338 | if (sk_wait_event(sk, &timeout, !ctx->more, &wait)) { |
400c40cf SM |
339 | err = 0; |
340 | break; | |
341 | } | |
342 | } | |
d9dc8b0f | 343 | remove_wait_queue(sk_sleep(sk), &wait); |
400c40cf | 344 | |
9cd3e072 | 345 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
400c40cf SM |
346 | |
347 | return err; | |
348 | } | |
349 | ||
350 | static void aead_data_wakeup(struct sock *sk) | |
351 | { | |
352 | struct alg_sock *ask = alg_sk(sk); | |
353 | struct aead_ctx *ctx = ask->private; | |
354 | struct socket_wq *wq; | |
355 | ||
400c40cf SM |
356 | if (!ctx->used) |
357 | return; | |
358 | ||
359 | rcu_read_lock(); | |
360 | wq = rcu_dereference(sk->sk_wq); | |
1ce0bf50 | 361 | if (skwq_has_sleeper(wq)) |
400c40cf SM |
362 | wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | |
363 | POLLRDNORM | | |
364 | POLLRDBAND); | |
365 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | |
366 | rcu_read_unlock(); | |
367 | } | |
368 | ||
eccd02f3 | 369 | static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) |
400c40cf SM |
370 | { |
371 | struct sock *sk = sock->sk; | |
372 | struct alg_sock *ask = alg_sk(sk); | |
d887c52d SM |
373 | struct sock *psk = ask->parent; |
374 | struct alg_sock *pask = alg_sk(psk); | |
400c40cf | 375 | struct aead_ctx *ctx = ask->private; |
d887c52d SM |
376 | struct aead_tfm *aeadc = pask->private; |
377 | struct crypto_aead *tfm = aeadc->aead; | |
378 | unsigned int ivsize = crypto_aead_ivsize(tfm); | |
379 | struct aead_tsgl *sgl; | |
400c40cf SM |
380 | struct af_alg_control con = {}; |
381 | long copied = 0; | |
382 | bool enc = 0; | |
383 | bool init = 0; | |
d887c52d | 384 | int err = 0; |
400c40cf SM |
385 | |
386 | if (msg->msg_controllen) { | |
387 | err = af_alg_cmsg_send(msg, &con); | |
388 | if (err) | |
389 | return err; | |
390 | ||
391 | init = 1; | |
392 | switch (con.op) { | |
393 | case ALG_OP_ENCRYPT: | |
394 | enc = 1; | |
395 | break; | |
396 | case ALG_OP_DECRYPT: | |
397 | enc = 0; | |
398 | break; | |
399 | default: | |
400 | return -EINVAL; | |
401 | } | |
402 | ||
403 | if (con.iv && con.iv->ivlen != ivsize) | |
404 | return -EINVAL; | |
405 | } | |
406 | ||
407 | lock_sock(sk); | |
d887c52d SM |
408 | if (!ctx->more && ctx->used) { |
409 | err = -EINVAL; | |
400c40cf | 410 | goto unlock; |
d887c52d | 411 | } |
400c40cf SM |
412 | |
413 | if (init) { | |
414 | ctx->enc = enc; | |
415 | if (con.iv) | |
416 | memcpy(ctx->iv, con.iv->iv, ivsize); | |
417 | ||
418 | ctx->aead_assoclen = con.aead_assoclen; | |
419 | } | |
420 | ||
421 | while (size) { | |
d887c52d | 422 | struct scatterlist *sg; |
652d5b8a | 423 | size_t len = size; |
d887c52d | 424 | size_t plen; |
400c40cf SM |
425 | |
426 | /* use the existing memory in an allocated page */ | |
427 | if (ctx->merge) { | |
d887c52d SM |
428 | sgl = list_entry(ctx->tsgl_list.prev, |
429 | struct aead_tsgl, list); | |
400c40cf SM |
430 | sg = sgl->sg + sgl->cur - 1; |
431 | len = min_t(unsigned long, len, | |
432 | PAGE_SIZE - sg->offset - sg->length); | |
433 | err = memcpy_from_msg(page_address(sg_page(sg)) + | |
434 | sg->offset + sg->length, | |
435 | msg, len); | |
436 | if (err) | |
437 | goto unlock; | |
438 | ||
439 | sg->length += len; | |
440 | ctx->merge = (sg->offset + sg->length) & | |
441 | (PAGE_SIZE - 1); | |
442 | ||
443 | ctx->used += len; | |
444 | copied += len; | |
445 | size -= len; | |
446 | continue; | |
447 | } | |
448 | ||
449 | if (!aead_writable(sk)) { | |
d887c52d SM |
450 | err = aead_wait_for_wmem(sk, msg->msg_flags); |
451 | if (err) | |
452 | goto unlock; | |
400c40cf SM |
453 | } |
454 | ||
455 | /* allocate a new page */ | |
456 | len = min_t(unsigned long, size, aead_sndbuf(sk)); | |
400c40cf | 457 | |
d887c52d SM |
458 | err = aead_alloc_tsgl(sk); |
459 | if (err) | |
460 | goto unlock; | |
461 | ||
462 | sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, | |
463 | list); | |
464 | sg = sgl->sg; | |
465 | if (sgl->cur) | |
466 | sg_unmark_end(sg + sgl->cur - 1); | |
467 | ||
468 | do { | |
469 | unsigned int i = sgl->cur; | |
400c40cf | 470 | |
652d5b8a | 471 | plen = min_t(size_t, len, PAGE_SIZE); |
400c40cf | 472 | |
d887c52d SM |
473 | sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); |
474 | if (!sg_page(sg + i)) { | |
475 | err = -ENOMEM; | |
400c40cf | 476 | goto unlock; |
d887c52d | 477 | } |
400c40cf | 478 | |
d887c52d | 479 | err = memcpy_from_msg(page_address(sg_page(sg + i)), |
400c40cf SM |
480 | msg, plen); |
481 | if (err) { | |
d887c52d SM |
482 | __free_page(sg_page(sg + i)); |
483 | sg_assign_page(sg + i, NULL); | |
400c40cf SM |
484 | goto unlock; |
485 | } | |
486 | ||
d887c52d | 487 | sg[i].length = plen; |
400c40cf SM |
488 | len -= plen; |
489 | ctx->used += plen; | |
490 | copied += plen; | |
400c40cf | 491 | size -= plen; |
d887c52d SM |
492 | sgl->cur++; |
493 | } while (len && sgl->cur < MAX_SGL_ENTS); | |
494 | ||
495 | if (!size) | |
496 | sg_mark_end(sg + sgl->cur - 1); | |
497 | ||
498 | ctx->merge = plen & (PAGE_SIZE - 1); | |
400c40cf SM |
499 | } |
500 | ||
501 | err = 0; | |
502 | ||
503 | ctx->more = msg->msg_flags & MSG_MORE; | |
400c40cf SM |
504 | |
505 | unlock: | |
506 | aead_data_wakeup(sk); | |
507 | release_sock(sk); | |
508 | ||
509 | return err ?: copied; | |
510 | } | |
511 | ||
512 | static ssize_t aead_sendpage(struct socket *sock, struct page *page, | |
513 | int offset, size_t size, int flags) | |
514 | { | |
515 | struct sock *sk = sock->sk; | |
516 | struct alg_sock *ask = alg_sk(sk); | |
517 | struct aead_ctx *ctx = ask->private; | |
d887c52d | 518 | struct aead_tsgl *sgl; |
400c40cf SM |
519 | int err = -EINVAL; |
520 | ||
521 | if (flags & MSG_SENDPAGE_NOTLAST) | |
522 | flags |= MSG_MORE; | |
523 | ||
400c40cf SM |
524 | lock_sock(sk); |
525 | if (!ctx->more && ctx->used) | |
526 | goto unlock; | |
527 | ||
528 | if (!size) | |
529 | goto done; | |
530 | ||
531 | if (!aead_writable(sk)) { | |
d887c52d SM |
532 | err = aead_wait_for_wmem(sk, flags); |
533 | if (err) | |
534 | goto unlock; | |
400c40cf SM |
535 | } |
536 | ||
d887c52d SM |
537 | err = aead_alloc_tsgl(sk); |
538 | if (err) | |
539 | goto unlock; | |
540 | ||
400c40cf | 541 | ctx->merge = 0; |
d887c52d SM |
542 | sgl = list_entry(ctx->tsgl_list.prev, struct aead_tsgl, list); |
543 | ||
544 | if (sgl->cur) | |
545 | sg_unmark_end(sgl->sg + sgl->cur - 1); | |
546 | ||
547 | sg_mark_end(sgl->sg + sgl->cur); | |
400c40cf SM |
548 | |
549 | get_page(page); | |
550 | sg_set_page(sgl->sg + sgl->cur, page, size, offset); | |
551 | sgl->cur++; | |
552 | ctx->used += size; | |
553 | ||
554 | err = 0; | |
555 | ||
556 | done: | |
557 | ctx->more = flags & MSG_MORE; | |
400c40cf SM |
558 | unlock: |
559 | aead_data_wakeup(sk); | |
560 | release_sock(sk); | |
561 | ||
562 | return err ?: size; | |
563 | } | |
564 | ||
83094e5e TS |
565 | static void aead_async_cb(struct crypto_async_request *_req, int err) |
566 | { | |
d887c52d | 567 | struct aead_async_req *areq = _req->data; |
e6534aeb | 568 | struct sock *sk = areq->sk; |
83094e5e | 569 | struct kiocb *iocb = areq->iocb; |
d887c52d | 570 | unsigned int resultlen; |
83094e5e TS |
571 | |
572 | lock_sock(sk); | |
83094e5e | 573 | |
d887c52d SM |
574 | /* Buffer size written by crypto operation. */ |
575 | resultlen = areq->outlen; | |
0c1e16cd | 576 | |
d887c52d SM |
577 | aead_free_areq_sgls(areq); |
578 | sock_kfree_s(sk, areq, areq->areqlen); | |
579 | __sock_put(sk); | |
83094e5e | 580 | |
d887c52d | 581 | iocb->ki_complete(iocb, err ? err : resultlen, 0); |
83094e5e | 582 | |
83094e5e | 583 | release_sock(sk); |
83094e5e TS |
584 | } |
585 | ||
d887c52d SM |
586 | static int _aead_recvmsg(struct socket *sock, struct msghdr *msg, |
587 | size_t ignored, int flags) | |
400c40cf SM |
588 | { |
589 | struct sock *sk = sock->sk; | |
590 | struct alg_sock *ask = alg_sk(sk); | |
d887c52d SM |
591 | struct sock *psk = ask->parent; |
592 | struct alg_sock *pask = alg_sk(psk); | |
400c40cf | 593 | struct aead_ctx *ctx = ask->private; |
d887c52d SM |
594 | struct aead_tfm *aeadc = pask->private; |
595 | struct crypto_aead *tfm = aeadc->aead; | |
596 | unsigned int as = crypto_aead_authsize(tfm); | |
597 | unsigned int areqlen = | |
598 | sizeof(struct aead_async_req) + crypto_aead_reqsize(tfm); | |
599 | struct aead_async_req *areq; | |
600 | struct aead_rsgl *last_rsgl = NULL; | |
601 | int err = 0; | |
602 | size_t used = 0; /* [in] TX bufs to be en/decrypted */ | |
603 | size_t outlen = 0; /* [out] RX bufs produced by kernel */ | |
604 | size_t usedpages = 0; /* [in] RX bufs to be used from user */ | |
605 | size_t processed = 0; /* [in] TX bufs to be consumed */ | |
400c40cf SM |
606 | |
607 | /* | |
d887c52d SM |
608 | * Data length provided by caller via sendmsg/sendpage that has not |
609 | * yet been processed. | |
400c40cf | 610 | */ |
400c40cf SM |
611 | used = ctx->used; |
612 | ||
613 | /* | |
614 | * Make sure sufficient data is present -- note, the same check is | |
615 | * is also present in sendmsg/sendpage. The checks in sendpage/sendmsg | |
616 | * shall provide an information to the data sender that something is | |
617 | * wrong, but they are irrelevant to maintain the kernel integrity. | |
618 | * We need this check here too in case user space decides to not honor | |
619 | * the error message in sendmsg/sendpage and still call recvmsg. This | |
620 | * check here protects the kernel integrity. | |
621 | */ | |
d887c52d SM |
622 | if (!aead_sufficient_data(sk)) |
623 | return -EINVAL; | |
400c40cf | 624 | |
0c1e16cd SM |
625 | /* |
626 | * Calculate the minimum output buffer size holding the result of the | |
627 | * cipher operation. When encrypting data, the receiving buffer is | |
628 | * larger by the tag length compared to the input buffer as the | |
629 | * encryption operation generates the tag. For decryption, the input | |
630 | * buffer provides the tag which is consumed resulting in only the | |
631 | * plaintext without a buffer for the tag returned to the caller. | |
632 | */ | |
633 | if (ctx->enc) | |
634 | outlen = used + as; | |
635 | else | |
636 | outlen = used - as; | |
19fa7752 | 637 | |
400c40cf SM |
638 | /* |
639 | * The cipher operation input data is reduced by the associated data | |
640 | * length as this data is processed separately later on. | |
641 | */ | |
0c1e16cd | 642 | used -= ctx->aead_assoclen; |
400c40cf | 643 | |
d887c52d SM |
644 | /* Allocate cipher request for current operation. */ |
645 | areq = sock_kmalloc(sk, areqlen, GFP_KERNEL); | |
646 | if (unlikely(!areq)) | |
647 | return -ENOMEM; | |
648 | areq->areqlen = areqlen; | |
649 | areq->sk = sk; | |
650 | INIT_LIST_HEAD(&areq->rsgl_list); | |
651 | areq->tsgl = NULL; | |
652 | areq->tsgl_entries = 0; | |
653 | ||
654 | /* convert iovecs of output buffers into RX SGL */ | |
655 | while (outlen > usedpages && msg_data_left(msg)) { | |
656 | struct aead_rsgl *rsgl; | |
657 | size_t seglen; | |
658 | ||
659 | /* limit the amount of readable buffers */ | |
660 | if (!aead_readable(sk)) | |
661 | break; | |
400c40cf | 662 | |
d887c52d SM |
663 | if (!ctx->used) { |
664 | err = aead_wait_for_data(sk, flags); | |
665 | if (err) | |
666 | goto free; | |
667 | } | |
668 | ||
669 | seglen = min_t(size_t, (outlen - usedpages), | |
670 | msg_data_left(msg)); | |
671 | ||
672 | if (list_empty(&areq->rsgl_list)) { | |
673 | rsgl = &areq->first_rsgl; | |
83094e5e TS |
674 | } else { |
675 | rsgl = sock_kmalloc(sk, sizeof(*rsgl), GFP_KERNEL); | |
676 | if (unlikely(!rsgl)) { | |
677 | err = -ENOMEM; | |
d887c52d | 678 | goto free; |
83094e5e TS |
679 | } |
680 | } | |
d887c52d | 681 | |
83094e5e | 682 | rsgl->sgl.npages = 0; |
d887c52d | 683 | list_add_tail(&rsgl->list, &areq->rsgl_list); |
83094e5e | 684 | |
400c40cf | 685 | /* make one iovec available as scatterlist */ |
83094e5e | 686 | err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen); |
400c40cf | 687 | if (err < 0) |
d887c52d SM |
688 | goto free; |
689 | ||
7b2a18e0 | 690 | /* chain the new scatterlist with previous one */ |
83094e5e TS |
691 | if (last_rsgl) |
692 | af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); | |
693 | ||
694 | last_rsgl = rsgl; | |
d887c52d SM |
695 | usedpages += err; |
696 | ctx->rcvused += err; | |
697 | rsgl->sg_num_bytes = err; | |
400c40cf | 698 | iov_iter_advance(&msg->msg_iter, err); |
400c40cf SM |
699 | } |
700 | ||
d887c52d SM |
701 | /* |
702 | * Ensure output buffer is sufficiently large. If the caller provides | |
703 | * less buffer space, only use the relative required input size. This | |
704 | * allows AIO operation where the caller sent all data to be processed | |
705 | * and the AIO operation performs the operation on the different chunks | |
706 | * of the input data. | |
707 | */ | |
0c1e16cd | 708 | if (usedpages < outlen) { |
d887c52d | 709 | size_t less = outlen - usedpages; |
400c40cf | 710 | |
d887c52d SM |
711 | if (used < less) { |
712 | err = -EINVAL; | |
713 | goto free; | |
714 | } | |
715 | used -= less; | |
716 | outlen -= less; | |
717 | } | |
400c40cf | 718 | |
d887c52d SM |
719 | /* |
720 | * Create a per request TX SGL for this request which tracks the | |
721 | * SG entries from the global TX SGL. | |
722 | */ | |
723 | processed = used + ctx->aead_assoclen; | |
724 | areq->tsgl_entries = aead_count_tsgl(sk, processed); | |
725 | if (!areq->tsgl_entries) | |
726 | areq->tsgl_entries = 1; | |
727 | areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries, | |
728 | GFP_KERNEL); | |
729 | if (!areq->tsgl) { | |
730 | err = -ENOMEM; | |
731 | goto free; | |
732 | } | |
733 | sg_init_table(areq->tsgl, areq->tsgl_entries); | |
734 | aead_pull_tsgl(sk, processed, areq->tsgl); | |
735 | ||
736 | /* Initialize the crypto operation */ | |
737 | aead_request_set_crypt(&areq->aead_req, areq->tsgl, | |
738 | areq->first_rsgl.sgl.sg, used, ctx->iv); | |
739 | aead_request_set_ad(&areq->aead_req, ctx->aead_assoclen); | |
740 | aead_request_set_tfm(&areq->aead_req, tfm); | |
741 | ||
742 | if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) { | |
743 | /* AIO operation */ | |
744 | areq->iocb = msg->msg_iocb; | |
745 | aead_request_set_callback(&areq->aead_req, | |
746 | CRYPTO_TFM_REQ_MAY_BACKLOG, | |
747 | aead_async_cb, areq); | |
748 | err = ctx->enc ? crypto_aead_encrypt(&areq->aead_req) : | |
749 | crypto_aead_decrypt(&areq->aead_req); | |
750 | } else { | |
751 | /* Synchronous operation */ | |
752 | aead_request_set_callback(&areq->aead_req, | |
753 | CRYPTO_TFM_REQ_MAY_BACKLOG, | |
754 | af_alg_complete, &ctx->completion); | |
755 | err = af_alg_wait_for_completion(ctx->enc ? | |
756 | crypto_aead_encrypt(&areq->aead_req) : | |
757 | crypto_aead_decrypt(&areq->aead_req), | |
400c40cf | 758 | &ctx->completion); |
400c40cf SM |
759 | } |
760 | ||
d887c52d SM |
761 | /* AIO operation in progress */ |
762 | if (err == -EINPROGRESS) { | |
763 | sock_hold(sk); | |
400c40cf | 764 | |
d887c52d SM |
765 | /* Remember output size that will be generated. */ |
766 | areq->outlen = outlen; | |
767 | ||
768 | return -EIOCBQUEUED; | |
83094e5e | 769 | } |
d887c52d SM |
770 | |
771 | free: | |
772 | aead_free_areq_sgls(areq); | |
773 | if (areq) | |
774 | sock_kfree_s(sk, areq, areqlen); | |
400c40cf SM |
775 | |
776 | return err ? err : outlen; | |
777 | } | |
778 | ||
d887c52d SM |
779 | static int aead_recvmsg(struct socket *sock, struct msghdr *msg, |
780 | size_t ignored, int flags) | |
83094e5e | 781 | { |
d887c52d SM |
782 | struct sock *sk = sock->sk; |
783 | int ret = 0; | |
784 | ||
785 | lock_sock(sk); | |
786 | while (msg_data_left(msg)) { | |
787 | int err = _aead_recvmsg(sock, msg, ignored, flags); | |
788 | ||
789 | /* | |
790 | * This error covers -EIOCBQUEUED which implies that we can | |
791 | * only handle one AIO request. If the caller wants to have | |
792 | * multiple AIO requests in parallel, he must make multiple | |
793 | * separate AIO calls. | |
794 | */ | |
795 | if (err <= 0) { | |
796 | if (err == -EIOCBQUEUED || err == -EBADMSG) | |
797 | ret = err; | |
798 | goto out; | |
799 | } | |
800 | ||
801 | ret += err; | |
802 | } | |
803 | ||
804 | out: | |
805 | aead_wmem_wakeup(sk); | |
806 | release_sock(sk); | |
807 | return ret; | |
83094e5e TS |
808 | } |
809 | ||
400c40cf SM |
810 | static unsigned int aead_poll(struct file *file, struct socket *sock, |
811 | poll_table *wait) | |
812 | { | |
813 | struct sock *sk = sock->sk; | |
814 | struct alg_sock *ask = alg_sk(sk); | |
815 | struct aead_ctx *ctx = ask->private; | |
816 | unsigned int mask; | |
817 | ||
818 | sock_poll_wait(file, sk_sleep(sk), wait); | |
819 | mask = 0; | |
820 | ||
821 | if (!ctx->more) | |
822 | mask |= POLLIN | POLLRDNORM; | |
823 | ||
824 | if (aead_writable(sk)) | |
825 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | |
826 | ||
827 | return mask; | |
828 | } | |
829 | ||
830 | static struct proto_ops algif_aead_ops = { | |
831 | .family = PF_ALG, | |
832 | ||
833 | .connect = sock_no_connect, | |
834 | .socketpair = sock_no_socketpair, | |
835 | .getname = sock_no_getname, | |
836 | .ioctl = sock_no_ioctl, | |
837 | .listen = sock_no_listen, | |
838 | .shutdown = sock_no_shutdown, | |
839 | .getsockopt = sock_no_getsockopt, | |
840 | .mmap = sock_no_mmap, | |
841 | .bind = sock_no_bind, | |
842 | .accept = sock_no_accept, | |
843 | .setsockopt = sock_no_setsockopt, | |
844 | ||
845 | .release = af_alg_release, | |
846 | .sendmsg = aead_sendmsg, | |
847 | .sendpage = aead_sendpage, | |
848 | .recvmsg = aead_recvmsg, | |
849 | .poll = aead_poll, | |
850 | }; | |
851 | ||
2a2a251f SM |
852 | static int aead_check_key(struct socket *sock) |
853 | { | |
854 | int err = 0; | |
855 | struct sock *psk; | |
856 | struct alg_sock *pask; | |
857 | struct aead_tfm *tfm; | |
858 | struct sock *sk = sock->sk; | |
859 | struct alg_sock *ask = alg_sk(sk); | |
860 | ||
861 | lock_sock(sk); | |
862 | if (ask->refcnt) | |
863 | goto unlock_child; | |
864 | ||
865 | psk = ask->parent; | |
866 | pask = alg_sk(ask->parent); | |
867 | tfm = pask->private; | |
868 | ||
869 | err = -ENOKEY; | |
870 | lock_sock_nested(psk, SINGLE_DEPTH_NESTING); | |
871 | if (!tfm->has_key) | |
872 | goto unlock; | |
873 | ||
874 | if (!pask->refcnt++) | |
875 | sock_hold(psk); | |
876 | ||
877 | ask->refcnt = 1; | |
878 | sock_put(psk); | |
879 | ||
880 | err = 0; | |
881 | ||
882 | unlock: | |
883 | release_sock(psk); | |
884 | unlock_child: | |
885 | release_sock(sk); | |
886 | ||
887 | return err; | |
888 | } | |
889 | ||
890 | static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg, | |
891 | size_t size) | |
892 | { | |
893 | int err; | |
894 | ||
895 | err = aead_check_key(sock); | |
896 | if (err) | |
897 | return err; | |
898 | ||
899 | return aead_sendmsg(sock, msg, size); | |
900 | } | |
901 | ||
902 | static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page, | |
903 | int offset, size_t size, int flags) | |
904 | { | |
905 | int err; | |
906 | ||
907 | err = aead_check_key(sock); | |
908 | if (err) | |
909 | return err; | |
910 | ||
911 | return aead_sendpage(sock, page, offset, size, flags); | |
912 | } | |
913 | ||
914 | static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg, | |
915 | size_t ignored, int flags) | |
916 | { | |
917 | int err; | |
918 | ||
919 | err = aead_check_key(sock); | |
920 | if (err) | |
921 | return err; | |
922 | ||
923 | return aead_recvmsg(sock, msg, ignored, flags); | |
924 | } | |
925 | ||
926 | static struct proto_ops algif_aead_ops_nokey = { | |
927 | .family = PF_ALG, | |
928 | ||
929 | .connect = sock_no_connect, | |
930 | .socketpair = sock_no_socketpair, | |
931 | .getname = sock_no_getname, | |
932 | .ioctl = sock_no_ioctl, | |
933 | .listen = sock_no_listen, | |
934 | .shutdown = sock_no_shutdown, | |
935 | .getsockopt = sock_no_getsockopt, | |
936 | .mmap = sock_no_mmap, | |
937 | .bind = sock_no_bind, | |
938 | .accept = sock_no_accept, | |
939 | .setsockopt = sock_no_setsockopt, | |
940 | ||
941 | .release = af_alg_release, | |
942 | .sendmsg = aead_sendmsg_nokey, | |
943 | .sendpage = aead_sendpage_nokey, | |
944 | .recvmsg = aead_recvmsg_nokey, | |
945 | .poll = aead_poll, | |
946 | }; | |
947 | ||
400c40cf SM |
948 | static void *aead_bind(const char *name, u32 type, u32 mask) |
949 | { | |
2a2a251f SM |
950 | struct aead_tfm *tfm; |
951 | struct crypto_aead *aead; | |
952 | ||
953 | tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); | |
954 | if (!tfm) | |
955 | return ERR_PTR(-ENOMEM); | |
956 | ||
957 | aead = crypto_alloc_aead(name, type, mask); | |
958 | if (IS_ERR(aead)) { | |
959 | kfree(tfm); | |
960 | return ERR_CAST(aead); | |
961 | } | |
962 | ||
963 | tfm->aead = aead; | |
964 | ||
965 | return tfm; | |
400c40cf SM |
966 | } |
967 | ||
968 | static void aead_release(void *private) | |
969 | { | |
2a2a251f SM |
970 | struct aead_tfm *tfm = private; |
971 | ||
972 | crypto_free_aead(tfm->aead); | |
973 | kfree(tfm); | |
400c40cf SM |
974 | } |
975 | ||
976 | static int aead_setauthsize(void *private, unsigned int authsize) | |
977 | { | |
2a2a251f SM |
978 | struct aead_tfm *tfm = private; |
979 | ||
980 | return crypto_aead_setauthsize(tfm->aead, authsize); | |
400c40cf SM |
981 | } |
982 | ||
983 | static int aead_setkey(void *private, const u8 *key, unsigned int keylen) | |
984 | { | |
2a2a251f SM |
985 | struct aead_tfm *tfm = private; |
986 | int err; | |
987 | ||
988 | err = crypto_aead_setkey(tfm->aead, key, keylen); | |
989 | tfm->has_key = !err; | |
990 | ||
991 | return err; | |
400c40cf SM |
992 | } |
993 | ||
994 | static void aead_sock_destruct(struct sock *sk) | |
995 | { | |
996 | struct alg_sock *ask = alg_sk(sk); | |
997 | struct aead_ctx *ctx = ask->private; | |
d887c52d SM |
998 | struct sock *psk = ask->parent; |
999 | struct alg_sock *pask = alg_sk(psk); | |
1000 | struct aead_tfm *aeadc = pask->private; | |
1001 | struct crypto_aead *tfm = aeadc->aead; | |
1002 | unsigned int ivlen = crypto_aead_ivsize(tfm); | |
400c40cf | 1003 | |
d887c52d | 1004 | aead_pull_tsgl(sk, ctx->used, NULL); |
400c40cf SM |
1005 | sock_kzfree_s(sk, ctx->iv, ivlen); |
1006 | sock_kfree_s(sk, ctx, ctx->len); | |
1007 | af_alg_release_parent(sk); | |
1008 | } | |
1009 | ||
2a2a251f | 1010 | static int aead_accept_parent_nokey(void *private, struct sock *sk) |
400c40cf SM |
1011 | { |
1012 | struct aead_ctx *ctx; | |
1013 | struct alg_sock *ask = alg_sk(sk); | |
2a2a251f SM |
1014 | struct aead_tfm *tfm = private; |
1015 | struct crypto_aead *aead = tfm->aead; | |
d887c52d | 1016 | unsigned int len = sizeof(*ctx); |
2a2a251f | 1017 | unsigned int ivlen = crypto_aead_ivsize(aead); |
400c40cf SM |
1018 | |
1019 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); | |
1020 | if (!ctx) | |
1021 | return -ENOMEM; | |
1022 | memset(ctx, 0, len); | |
1023 | ||
1024 | ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL); | |
1025 | if (!ctx->iv) { | |
1026 | sock_kfree_s(sk, ctx, len); | |
1027 | return -ENOMEM; | |
1028 | } | |
1029 | memset(ctx->iv, 0, ivlen); | |
1030 | ||
d887c52d | 1031 | INIT_LIST_HEAD(&ctx->tsgl_list); |
400c40cf SM |
1032 | ctx->len = len; |
1033 | ctx->used = 0; | |
d887c52d | 1034 | ctx->rcvused = 0; |
400c40cf SM |
1035 | ctx->more = 0; |
1036 | ctx->merge = 0; | |
1037 | ctx->enc = 0; | |
400c40cf SM |
1038 | ctx->aead_assoclen = 0; |
1039 | af_alg_init_completion(&ctx->completion); | |
400c40cf SM |
1040 | |
1041 | ask->private = ctx; | |
1042 | ||
400c40cf SM |
1043 | sk->sk_destruct = aead_sock_destruct; |
1044 | ||
1045 | return 0; | |
1046 | } | |
1047 | ||
2a2a251f SM |
1048 | static int aead_accept_parent(void *private, struct sock *sk) |
1049 | { | |
1050 | struct aead_tfm *tfm = private; | |
1051 | ||
1052 | if (!tfm->has_key) | |
1053 | return -ENOKEY; | |
1054 | ||
1055 | return aead_accept_parent_nokey(private, sk); | |
1056 | } | |
1057 | ||
400c40cf SM |
1058 | static const struct af_alg_type algif_type_aead = { |
1059 | .bind = aead_bind, | |
1060 | .release = aead_release, | |
1061 | .setkey = aead_setkey, | |
1062 | .setauthsize = aead_setauthsize, | |
1063 | .accept = aead_accept_parent, | |
2a2a251f | 1064 | .accept_nokey = aead_accept_parent_nokey, |
400c40cf | 1065 | .ops = &algif_aead_ops, |
2a2a251f | 1066 | .ops_nokey = &algif_aead_ops_nokey, |
400c40cf SM |
1067 | .name = "aead", |
1068 | .owner = THIS_MODULE | |
1069 | }; | |
1070 | ||
1071 | static int __init algif_aead_init(void) | |
1072 | { | |
1073 | return af_alg_register_type(&algif_type_aead); | |
1074 | } | |
1075 | ||
1076 | static void __exit algif_aead_exit(void) | |
1077 | { | |
1078 | int err = af_alg_unregister_type(&algif_type_aead); | |
1079 | BUG_ON(err); | |
1080 | } | |
1081 | ||
1082 | module_init(algif_aead_init); | |
1083 | module_exit(algif_aead_exit); | |
1084 | MODULE_LICENSE("GPL"); | |
1085 | MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>"); | |
1086 | MODULE_DESCRIPTION("AEAD kernel crypto API user space interface"); |