Commit | Line | Data |
---|---|---|
8ff59090 HX |
1 | /* |
2 | * algif_skcipher: User-space interface for skcipher algorithms | |
3 | * | |
4 | * This file provides the user-space API for symmetric key ciphers. | |
5 | * | |
6 | * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <crypto/scatterwalk.h> | |
16 | #include <crypto/skcipher.h> | |
17 | #include <crypto/if_alg.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/list.h> | |
20 | #include <linux/kernel.h> | |
174cd4b1 | 21 | #include <linux/sched/signal.h> |
8ff59090 HX |
22 | #include <linux/mm.h> |
23 | #include <linux/module.h> | |
24 | #include <linux/net.h> | |
25 | #include <net/sock.h> | |
26 | ||
27 | struct skcipher_sg_list { | |
28 | struct list_head list; | |
29 | ||
30 | int cur; | |
31 | ||
32 | struct scatterlist sg[0]; | |
33 | }; | |
34 | ||
dd504589 HX |
35 | struct skcipher_tfm { |
36 | struct crypto_skcipher *skcipher; | |
37 | bool has_key; | |
38 | }; | |
39 | ||
8ff59090 HX |
40 | struct skcipher_ctx { |
41 | struct list_head tsgl; | |
42 | struct af_alg_sgl rsgl; | |
43 | ||
44 | void *iv; | |
45 | ||
46 | struct af_alg_completion completion; | |
47 | ||
a596999b | 48 | atomic_t inflight; |
652d5b8a | 49 | size_t used; |
8ff59090 HX |
50 | |
51 | unsigned int len; | |
52 | bool more; | |
53 | bool merge; | |
54 | bool enc; | |
55 | ||
0d96e4ba | 56 | struct skcipher_request req; |
8ff59090 HX |
57 | }; |
58 | ||
a596999b TS |
59 | struct skcipher_async_rsgl { |
60 | struct af_alg_sgl sgl; | |
61 | struct list_head list; | |
62 | }; | |
63 | ||
64 | struct skcipher_async_req { | |
65 | struct kiocb *iocb; | |
66 | struct skcipher_async_rsgl first_sgl; | |
67 | struct list_head list; | |
68 | struct scatterlist *tsg; | |
ec69bbfb HX |
69 | atomic_t *inflight; |
70 | struct skcipher_request req; | |
a596999b TS |
71 | }; |
72 | ||
e2cffb5f | 73 | #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ |
8ff59090 HX |
74 | sizeof(struct scatterlist) - 1) |
75 | ||
a596999b TS |
76 | static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) |
77 | { | |
78 | struct skcipher_async_rsgl *rsgl, *tmp; | |
79 | struct scatterlist *sgl; | |
80 | struct scatterlist *sg; | |
81 | int i, n; | |
82 | ||
83 | list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) { | |
84 | af_alg_free_sg(&rsgl->sgl); | |
85 | if (rsgl != &sreq->first_sgl) | |
86 | kfree(rsgl); | |
87 | } | |
88 | sgl = sreq->tsg; | |
89 | n = sg_nents(sgl); | |
90 | for_each_sg(sgl, sg, n, i) | |
91 | put_page(sg_page(sg)); | |
92 | ||
93 | kfree(sreq->tsg); | |
94 | } | |
95 | ||
96 | static void skcipher_async_cb(struct crypto_async_request *req, int err) | |
97 | { | |
ec69bbfb | 98 | struct skcipher_async_req *sreq = req->data; |
a596999b TS |
99 | struct kiocb *iocb = sreq->iocb; |
100 | ||
ec69bbfb | 101 | atomic_dec(sreq->inflight); |
a596999b | 102 | skcipher_free_async_sgls(sreq); |
ec69bbfb | 103 | kzfree(sreq); |
237dae88 | 104 | iocb->ki_complete(iocb, err, err); |
a596999b TS |
105 | } |
106 | ||
0f6bb83c | 107 | static inline int skcipher_sndbuf(struct sock *sk) |
8ff59090 HX |
108 | { |
109 | struct alg_sock *ask = alg_sk(sk); | |
110 | struct skcipher_ctx *ctx = ask->private; | |
111 | ||
0f6bb83c HX |
112 | return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) - |
113 | ctx->used, 0); | |
114 | } | |
115 | ||
116 | static inline bool skcipher_writable(struct sock *sk) | |
117 | { | |
118 | return PAGE_SIZE <= skcipher_sndbuf(sk); | |
8ff59090 HX |
119 | } |
120 | ||
121 | static int skcipher_alloc_sgl(struct sock *sk) | |
122 | { | |
123 | struct alg_sock *ask = alg_sk(sk); | |
124 | struct skcipher_ctx *ctx = ask->private; | |
125 | struct skcipher_sg_list *sgl; | |
126 | struct scatterlist *sg = NULL; | |
127 | ||
128 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | |
129 | if (!list_empty(&ctx->tsgl)) | |
130 | sg = sgl->sg; | |
131 | ||
132 | if (!sg || sgl->cur >= MAX_SGL_ENTS) { | |
133 | sgl = sock_kmalloc(sk, sizeof(*sgl) + | |
134 | sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1), | |
135 | GFP_KERNEL); | |
136 | if (!sgl) | |
137 | return -ENOMEM; | |
138 | ||
139 | sg_init_table(sgl->sg, MAX_SGL_ENTS + 1); | |
140 | sgl->cur = 0; | |
141 | ||
142 | if (sg) | |
c56f6d12 | 143 | sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg); |
8ff59090 HX |
144 | |
145 | list_add_tail(&sgl->list, &ctx->tsgl); | |
146 | } | |
147 | ||
148 | return 0; | |
149 | } | |
150 | ||
652d5b8a | 151 | static void skcipher_pull_sgl(struct sock *sk, size_t used, int put) |
8ff59090 HX |
152 | { |
153 | struct alg_sock *ask = alg_sk(sk); | |
154 | struct skcipher_ctx *ctx = ask->private; | |
155 | struct skcipher_sg_list *sgl; | |
156 | struct scatterlist *sg; | |
157 | int i; | |
158 | ||
159 | while (!list_empty(&ctx->tsgl)) { | |
160 | sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, | |
161 | list); | |
162 | sg = sgl->sg; | |
163 | ||
164 | for (i = 0; i < sgl->cur; i++) { | |
652d5b8a | 165 | size_t plen = min_t(size_t, used, sg[i].length); |
8ff59090 HX |
166 | |
167 | if (!sg_page(sg + i)) | |
168 | continue; | |
169 | ||
170 | sg[i].length -= plen; | |
171 | sg[i].offset += plen; | |
172 | ||
173 | used -= plen; | |
174 | ctx->used -= plen; | |
175 | ||
176 | if (sg[i].length) | |
177 | return; | |
a596999b TS |
178 | if (put) |
179 | put_page(sg_page(sg + i)); | |
8ff59090 HX |
180 | sg_assign_page(sg + i, NULL); |
181 | } | |
182 | ||
183 | list_del(&sgl->list); | |
184 | sock_kfree_s(sk, sgl, | |
185 | sizeof(*sgl) + sizeof(sgl->sg[0]) * | |
186 | (MAX_SGL_ENTS + 1)); | |
187 | } | |
188 | ||
189 | if (!ctx->used) | |
190 | ctx->merge = 0; | |
191 | } | |
192 | ||
193 | static void skcipher_free_sgl(struct sock *sk) | |
194 | { | |
195 | struct alg_sock *ask = alg_sk(sk); | |
196 | struct skcipher_ctx *ctx = ask->private; | |
197 | ||
a596999b | 198 | skcipher_pull_sgl(sk, ctx->used, 1); |
8ff59090 HX |
199 | } |
200 | ||
201 | static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags) | |
202 | { | |
d9dc8b0f | 203 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
8ff59090 | 204 | int err = -ERESTARTSYS; |
d9dc8b0f | 205 | long timeout; |
8ff59090 HX |
206 | |
207 | if (flags & MSG_DONTWAIT) | |
208 | return -EAGAIN; | |
209 | ||
9cd3e072 | 210 | sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); |
8ff59090 | 211 | |
d9dc8b0f | 212 | add_wait_queue(sk_sleep(sk), &wait); |
8ff59090 HX |
213 | for (;;) { |
214 | if (signal_pending(current)) | |
215 | break; | |
8ff59090 | 216 | timeout = MAX_SCHEDULE_TIMEOUT; |
d9dc8b0f | 217 | if (sk_wait_event(sk, &timeout, skcipher_writable(sk), &wait)) { |
8ff59090 HX |
218 | err = 0; |
219 | break; | |
220 | } | |
221 | } | |
d9dc8b0f | 222 | remove_wait_queue(sk_sleep(sk), &wait); |
8ff59090 HX |
223 | |
224 | return err; | |
225 | } | |
226 | ||
227 | static void skcipher_wmem_wakeup(struct sock *sk) | |
228 | { | |
229 | struct socket_wq *wq; | |
230 | ||
231 | if (!skcipher_writable(sk)) | |
232 | return; | |
233 | ||
234 | rcu_read_lock(); | |
235 | wq = rcu_dereference(sk->sk_wq); | |
1ce0bf50 | 236 | if (skwq_has_sleeper(wq)) |
8ff59090 HX |
237 | wake_up_interruptible_sync_poll(&wq->wait, POLLIN | |
238 | POLLRDNORM | | |
239 | POLLRDBAND); | |
240 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | |
241 | rcu_read_unlock(); | |
242 | } | |
243 | ||
244 | static int skcipher_wait_for_data(struct sock *sk, unsigned flags) | |
245 | { | |
d9dc8b0f | 246 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
8ff59090 HX |
247 | struct alg_sock *ask = alg_sk(sk); |
248 | struct skcipher_ctx *ctx = ask->private; | |
249 | long timeout; | |
8ff59090 HX |
250 | int err = -ERESTARTSYS; |
251 | ||
252 | if (flags & MSG_DONTWAIT) { | |
253 | return -EAGAIN; | |
254 | } | |
255 | ||
9cd3e072 | 256 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
8ff59090 | 257 | |
d9dc8b0f | 258 | add_wait_queue(sk_sleep(sk), &wait); |
8ff59090 HX |
259 | for (;;) { |
260 | if (signal_pending(current)) | |
261 | break; | |
8ff59090 | 262 | timeout = MAX_SCHEDULE_TIMEOUT; |
d9dc8b0f | 263 | if (sk_wait_event(sk, &timeout, ctx->used, &wait)) { |
8ff59090 HX |
264 | err = 0; |
265 | break; | |
266 | } | |
267 | } | |
d9dc8b0f | 268 | remove_wait_queue(sk_sleep(sk), &wait); |
8ff59090 | 269 | |
9cd3e072 | 270 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
8ff59090 HX |
271 | |
272 | return err; | |
273 | } | |
274 | ||
275 | static void skcipher_data_wakeup(struct sock *sk) | |
276 | { | |
277 | struct alg_sock *ask = alg_sk(sk); | |
278 | struct skcipher_ctx *ctx = ask->private; | |
279 | struct socket_wq *wq; | |
280 | ||
281 | if (!ctx->used) | |
282 | return; | |
283 | ||
284 | rcu_read_lock(); | |
285 | wq = rcu_dereference(sk->sk_wq); | |
1ce0bf50 | 286 | if (skwq_has_sleeper(wq)) |
8ff59090 HX |
287 | wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | |
288 | POLLRDNORM | | |
289 | POLLRDBAND); | |
290 | sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); | |
291 | rcu_read_unlock(); | |
292 | } | |
293 | ||
1b784140 YX |
294 | static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, |
295 | size_t size) | |
8ff59090 HX |
296 | { |
297 | struct sock *sk = sock->sk; | |
298 | struct alg_sock *ask = alg_sk(sk); | |
6454c2b8 HX |
299 | struct sock *psk = ask->parent; |
300 | struct alg_sock *pask = alg_sk(psk); | |
8ff59090 | 301 | struct skcipher_ctx *ctx = ask->private; |
6454c2b8 HX |
302 | struct skcipher_tfm *skc = pask->private; |
303 | struct crypto_skcipher *tfm = skc->skcipher; | |
0d96e4ba | 304 | unsigned ivsize = crypto_skcipher_ivsize(tfm); |
8ff59090 HX |
305 | struct skcipher_sg_list *sgl; |
306 | struct af_alg_control con = {}; | |
307 | long copied = 0; | |
308 | bool enc = 0; | |
f26b7b80 | 309 | bool init = 0; |
8ff59090 HX |
310 | int err; |
311 | int i; | |
312 | ||
313 | if (msg->msg_controllen) { | |
314 | err = af_alg_cmsg_send(msg, &con); | |
315 | if (err) | |
316 | return err; | |
317 | ||
f26b7b80 | 318 | init = 1; |
8ff59090 HX |
319 | switch (con.op) { |
320 | case ALG_OP_ENCRYPT: | |
321 | enc = 1; | |
322 | break; | |
323 | case ALG_OP_DECRYPT: | |
324 | enc = 0; | |
325 | break; | |
326 | default: | |
327 | return -EINVAL; | |
328 | } | |
329 | ||
330 | if (con.iv && con.iv->ivlen != ivsize) | |
331 | return -EINVAL; | |
332 | } | |
333 | ||
334 | err = -EINVAL; | |
335 | ||
336 | lock_sock(sk); | |
337 | if (!ctx->more && ctx->used) | |
338 | goto unlock; | |
339 | ||
f26b7b80 | 340 | if (init) { |
8ff59090 HX |
341 | ctx->enc = enc; |
342 | if (con.iv) | |
343 | memcpy(ctx->iv, con.iv->iv, ivsize); | |
344 | } | |
345 | ||
8ff59090 HX |
346 | while (size) { |
347 | struct scatterlist *sg; | |
348 | unsigned long len = size; | |
652d5b8a | 349 | size_t plen; |
8ff59090 HX |
350 | |
351 | if (ctx->merge) { | |
352 | sgl = list_entry(ctx->tsgl.prev, | |
353 | struct skcipher_sg_list, list); | |
354 | sg = sgl->sg + sgl->cur - 1; | |
355 | len = min_t(unsigned long, len, | |
356 | PAGE_SIZE - sg->offset - sg->length); | |
357 | ||
6ce8e9ce AV |
358 | err = memcpy_from_msg(page_address(sg_page(sg)) + |
359 | sg->offset + sg->length, | |
360 | msg, len); | |
8ff59090 HX |
361 | if (err) |
362 | goto unlock; | |
363 | ||
364 | sg->length += len; | |
365 | ctx->merge = (sg->offset + sg->length) & | |
366 | (PAGE_SIZE - 1); | |
367 | ||
368 | ctx->used += len; | |
369 | copied += len; | |
370 | size -= len; | |
8ff59090 HX |
371 | continue; |
372 | } | |
373 | ||
0f6bb83c | 374 | if (!skcipher_writable(sk)) { |
8ff59090 HX |
375 | err = skcipher_wait_for_wmem(sk, msg->msg_flags); |
376 | if (err) | |
377 | goto unlock; | |
8ff59090 HX |
378 | } |
379 | ||
0f6bb83c | 380 | len = min_t(unsigned long, len, skcipher_sndbuf(sk)); |
8ff59090 HX |
381 | |
382 | err = skcipher_alloc_sgl(sk); | |
383 | if (err) | |
384 | goto unlock; | |
385 | ||
386 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | |
387 | sg = sgl->sg; | |
202736d9 HX |
388 | if (sgl->cur) |
389 | sg_unmark_end(sg + sgl->cur - 1); | |
8ff59090 HX |
390 | do { |
391 | i = sgl->cur; | |
652d5b8a | 392 | plen = min_t(size_t, len, PAGE_SIZE); |
8ff59090 HX |
393 | |
394 | sg_assign_page(sg + i, alloc_page(GFP_KERNEL)); | |
395 | err = -ENOMEM; | |
396 | if (!sg_page(sg + i)) | |
397 | goto unlock; | |
398 | ||
6ce8e9ce AV |
399 | err = memcpy_from_msg(page_address(sg_page(sg + i)), |
400 | msg, plen); | |
8ff59090 HX |
401 | if (err) { |
402 | __free_page(sg_page(sg + i)); | |
403 | sg_assign_page(sg + i, NULL); | |
404 | goto unlock; | |
405 | } | |
406 | ||
407 | sg[i].length = plen; | |
408 | len -= plen; | |
409 | ctx->used += plen; | |
410 | copied += plen; | |
411 | size -= plen; | |
8ff59090 HX |
412 | sgl->cur++; |
413 | } while (len && sgl->cur < MAX_SGL_ENTS); | |
414 | ||
0f477b65 TS |
415 | if (!size) |
416 | sg_mark_end(sg + sgl->cur - 1); | |
417 | ||
8ff59090 HX |
418 | ctx->merge = plen & (PAGE_SIZE - 1); |
419 | } | |
420 | ||
421 | err = 0; | |
422 | ||
423 | ctx->more = msg->msg_flags & MSG_MORE; | |
8ff59090 HX |
424 | |
425 | unlock: | |
426 | skcipher_data_wakeup(sk); | |
427 | release_sock(sk); | |
428 | ||
429 | return copied ?: err; | |
430 | } | |
431 | ||
432 | static ssize_t skcipher_sendpage(struct socket *sock, struct page *page, | |
433 | int offset, size_t size, int flags) | |
434 | { | |
435 | struct sock *sk = sock->sk; | |
436 | struct alg_sock *ask = alg_sk(sk); | |
437 | struct skcipher_ctx *ctx = ask->private; | |
438 | struct skcipher_sg_list *sgl; | |
439 | int err = -EINVAL; | |
8ff59090 | 440 | |
d3f7d56a SL |
441 | if (flags & MSG_SENDPAGE_NOTLAST) |
442 | flags |= MSG_MORE; | |
443 | ||
8ff59090 HX |
444 | lock_sock(sk); |
445 | if (!ctx->more && ctx->used) | |
446 | goto unlock; | |
447 | ||
448 | if (!size) | |
449 | goto done; | |
450 | ||
0f6bb83c | 451 | if (!skcipher_writable(sk)) { |
8ff59090 HX |
452 | err = skcipher_wait_for_wmem(sk, flags); |
453 | if (err) | |
454 | goto unlock; | |
8ff59090 HX |
455 | } |
456 | ||
457 | err = skcipher_alloc_sgl(sk); | |
458 | if (err) | |
459 | goto unlock; | |
460 | ||
461 | ctx->merge = 0; | |
462 | sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); | |
463 | ||
0f477b65 TS |
464 | if (sgl->cur) |
465 | sg_unmark_end(sgl->sg + sgl->cur - 1); | |
466 | ||
467 | sg_mark_end(sgl->sg + sgl->cur); | |
8ff59090 HX |
468 | get_page(page); |
469 | sg_set_page(sgl->sg + sgl->cur, page, size, offset); | |
470 | sgl->cur++; | |
471 | ctx->used += size; | |
472 | ||
473 | done: | |
474 | ctx->more = flags & MSG_MORE; | |
8ff59090 HX |
475 | |
476 | unlock: | |
477 | skcipher_data_wakeup(sk); | |
478 | release_sock(sk); | |
479 | ||
480 | return err ?: size; | |
481 | } | |
482 | ||
a596999b TS |
483 | static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) |
484 | { | |
485 | struct skcipher_sg_list *sgl; | |
486 | struct scatterlist *sg; | |
487 | int nents = 0; | |
488 | ||
489 | list_for_each_entry(sgl, &ctx->tsgl, list) { | |
490 | sg = sgl->sg; | |
491 | ||
492 | while (!sg->length) | |
493 | sg++; | |
494 | ||
495 | nents += sg_nents(sg); | |
496 | } | |
497 | return nents; | |
498 | } | |
499 | ||
500 | static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, | |
501 | int flags) | |
502 | { | |
503 | struct sock *sk = sock->sk; | |
504 | struct alg_sock *ask = alg_sk(sk); | |
ec69bbfb HX |
505 | struct sock *psk = ask->parent; |
506 | struct alg_sock *pask = alg_sk(psk); | |
a596999b | 507 | struct skcipher_ctx *ctx = ask->private; |
ec69bbfb HX |
508 | struct skcipher_tfm *skc = pask->private; |
509 | struct crypto_skcipher *tfm = skc->skcipher; | |
a596999b TS |
510 | struct skcipher_sg_list *sgl; |
511 | struct scatterlist *sg; | |
512 | struct skcipher_async_req *sreq; | |
0d96e4ba | 513 | struct skcipher_request *req; |
a596999b | 514 | struct skcipher_async_rsgl *last_rsgl = NULL; |
6454c2b8 | 515 | unsigned int txbufs = 0, len = 0, tx_nents; |
ec69bbfb HX |
516 | unsigned int reqsize = crypto_skcipher_reqsize(tfm); |
517 | unsigned int ivsize = crypto_skcipher_ivsize(tfm); | |
a596999b | 518 | int err = -ENOMEM; |
033f46b3 | 519 | bool mark = false; |
ec69bbfb | 520 | char *iv; |
a596999b | 521 | |
ec69bbfb HX |
522 | sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL); |
523 | if (unlikely(!sreq)) | |
524 | goto out; | |
a596999b | 525 | |
ec69bbfb HX |
526 | req = &sreq->req; |
527 | iv = (char *)(req + 1) + reqsize; | |
a596999b | 528 | sreq->iocb = msg->msg_iocb; |
a596999b | 529 | INIT_LIST_HEAD(&sreq->list); |
ec69bbfb HX |
530 | sreq->inflight = &ctx->inflight; |
531 | ||
532 | lock_sock(sk); | |
6454c2b8 | 533 | tx_nents = skcipher_all_sg_nents(ctx); |
a596999b | 534 | sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); |
ec69bbfb | 535 | if (unlikely(!sreq->tsg)) |
a596999b | 536 | goto unlock; |
a596999b | 537 | sg_init_table(sreq->tsg, tx_nents); |
ec69bbfb HX |
538 | memcpy(iv, ctx->iv, ivsize); |
539 | skcipher_request_set_tfm(req, tfm); | |
dad41997 | 540 | skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, |
ec69bbfb | 541 | skcipher_async_cb, sreq); |
a596999b TS |
542 | |
543 | while (iov_iter_count(&msg->msg_iter)) { | |
544 | struct skcipher_async_rsgl *rsgl; | |
ac110f49 | 545 | int used; |
a596999b TS |
546 | |
547 | if (!ctx->used) { | |
548 | err = skcipher_wait_for_data(sk, flags); | |
549 | if (err) | |
550 | goto free; | |
551 | } | |
552 | sgl = list_first_entry(&ctx->tsgl, | |
553 | struct skcipher_sg_list, list); | |
554 | sg = sgl->sg; | |
555 | ||
556 | while (!sg->length) | |
557 | sg++; | |
558 | ||
559 | used = min_t(unsigned long, ctx->used, | |
560 | iov_iter_count(&msg->msg_iter)); | |
561 | used = min_t(unsigned long, used, sg->length); | |
562 | ||
033f46b3 | 563 | if (txbufs == tx_nents) { |
a596999b TS |
564 | struct scatterlist *tmp; |
565 | int x; | |
566 | /* Ran out of tx slots in async request | |
567 | * need to expand */ | |
568 | tmp = kcalloc(tx_nents * 2, sizeof(*tmp), | |
569 | GFP_KERNEL); | |
e2c1b823 PB |
570 | if (!tmp) { |
571 | err = -ENOMEM; | |
a596999b | 572 | goto free; |
e2c1b823 | 573 | } |
a596999b TS |
574 | |
575 | sg_init_table(tmp, tx_nents * 2); | |
576 | for (x = 0; x < tx_nents; x++) | |
577 | sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]), | |
578 | sreq->tsg[x].length, | |
579 | sreq->tsg[x].offset); | |
580 | kfree(sreq->tsg); | |
581 | sreq->tsg = tmp; | |
582 | tx_nents *= 2; | |
033f46b3 | 583 | mark = true; |
a596999b TS |
584 | } |
585 | /* Need to take over the tx sgl from ctx | |
586 | * to the asynch req - these sgls will be freed later */ | |
033f46b3 | 587 | sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length, |
a596999b TS |
588 | sg->offset); |
589 | ||
590 | if (list_empty(&sreq->list)) { | |
591 | rsgl = &sreq->first_sgl; | |
592 | list_add_tail(&rsgl->list, &sreq->list); | |
593 | } else { | |
82d92920 | 594 | rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL); |
a596999b TS |
595 | if (!rsgl) { |
596 | err = -ENOMEM; | |
597 | goto free; | |
598 | } | |
599 | list_add_tail(&rsgl->list, &sreq->list); | |
600 | } | |
601 | ||
602 | used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used); | |
603 | err = used; | |
604 | if (used < 0) | |
605 | goto free; | |
606 | if (last_rsgl) | |
607 | af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl); | |
608 | ||
609 | last_rsgl = rsgl; | |
610 | len += used; | |
611 | skcipher_pull_sgl(sk, used, 0); | |
612 | iov_iter_advance(&msg->msg_iter, used); | |
613 | } | |
614 | ||
033f46b3 | 615 | if (mark) |
616 | sg_mark_end(sreq->tsg + txbufs - 1); | |
617 | ||
0d96e4ba | 618 | skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, |
ec69bbfb | 619 | len, iv); |
0d96e4ba HX |
620 | err = ctx->enc ? crypto_skcipher_encrypt(req) : |
621 | crypto_skcipher_decrypt(req); | |
a596999b TS |
622 | if (err == -EINPROGRESS) { |
623 | atomic_inc(&ctx->inflight); | |
624 | err = -EIOCBQUEUED; | |
ec69bbfb | 625 | sreq = NULL; |
a596999b TS |
626 | goto unlock; |
627 | } | |
628 | free: | |
629 | skcipher_free_async_sgls(sreq); | |
a596999b TS |
630 | unlock: |
631 | skcipher_wmem_wakeup(sk); | |
632 | release_sock(sk); | |
ec69bbfb HX |
633 | kzfree(sreq); |
634 | out: | |
a596999b TS |
635 | return err; |
636 | } | |
637 | ||
638 | static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, | |
639 | int flags) | |
8ff59090 HX |
640 | { |
641 | struct sock *sk = sock->sk; | |
642 | struct alg_sock *ask = alg_sk(sk); | |
6454c2b8 HX |
643 | struct sock *psk = ask->parent; |
644 | struct alg_sock *pask = alg_sk(psk); | |
8ff59090 | 645 | struct skcipher_ctx *ctx = ask->private; |
6454c2b8 HX |
646 | struct skcipher_tfm *skc = pask->private; |
647 | struct crypto_skcipher *tfm = skc->skcipher; | |
648 | unsigned bs = crypto_skcipher_blocksize(tfm); | |
8ff59090 HX |
649 | struct skcipher_sg_list *sgl; |
650 | struct scatterlist *sg; | |
8ff59090 HX |
651 | int err = -EAGAIN; |
652 | int used; | |
653 | long copied = 0; | |
654 | ||
655 | lock_sock(sk); | |
01e97e65 | 656 | while (msg_data_left(msg)) { |
9399f0c5 | 657 | if (!ctx->used) { |
1d10eb2f AV |
658 | err = skcipher_wait_for_data(sk, flags); |
659 | if (err) | |
bc97e57e | 660 | goto unlock; |
1d10eb2f AV |
661 | } |
662 | ||
01e97e65 | 663 | used = min_t(unsigned long, ctx->used, msg_data_left(msg)); |
1d10eb2f AV |
664 | |
665 | used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used); | |
666 | err = used; | |
667 | if (err < 0) | |
668 | goto unlock; | |
bc97e57e | 669 | |
1d10eb2f AV |
670 | if (ctx->more || used < ctx->used) |
671 | used -= used % bs; | |
8ff59090 | 672 | |
1d10eb2f AV |
673 | err = -EINVAL; |
674 | if (!used) | |
675 | goto free; | |
8ff59090 | 676 | |
4f0414e5 HX |
677 | sgl = list_first_entry(&ctx->tsgl, |
678 | struct skcipher_sg_list, list); | |
679 | sg = sgl->sg; | |
680 | ||
681 | while (!sg->length) | |
682 | sg++; | |
683 | ||
0d96e4ba HX |
684 | skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used, |
685 | ctx->iv); | |
8ff59090 | 686 | |
1d10eb2f | 687 | err = af_alg_wait_for_completion( |
8ff59090 | 688 | ctx->enc ? |
0d96e4ba HX |
689 | crypto_skcipher_encrypt(&ctx->req) : |
690 | crypto_skcipher_decrypt(&ctx->req), | |
8ff59090 HX |
691 | &ctx->completion); |
692 | ||
bc97e57e | 693 | free: |
1d10eb2f | 694 | af_alg_free_sg(&ctx->rsgl); |
8ff59090 | 695 | |
1d10eb2f AV |
696 | if (err) |
697 | goto unlock; | |
8ff59090 | 698 | |
1d10eb2f | 699 | copied += used; |
a596999b | 700 | skcipher_pull_sgl(sk, used, 1); |
1d10eb2f | 701 | iov_iter_advance(&msg->msg_iter, used); |
8ff59090 HX |
702 | } |
703 | ||
704 | err = 0; | |
705 | ||
706 | unlock: | |
707 | skcipher_wmem_wakeup(sk); | |
708 | release_sock(sk); | |
709 | ||
710 | return copied ?: err; | |
711 | } | |
712 | ||
a596999b TS |
713 | static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg, |
714 | size_t ignored, int flags) | |
715 | { | |
716 | return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ? | |
717 | skcipher_recvmsg_async(sock, msg, flags) : | |
718 | skcipher_recvmsg_sync(sock, msg, flags); | |
719 | } | |
8ff59090 HX |
720 | |
721 | static unsigned int skcipher_poll(struct file *file, struct socket *sock, | |
722 | poll_table *wait) | |
723 | { | |
724 | struct sock *sk = sock->sk; | |
725 | struct alg_sock *ask = alg_sk(sk); | |
726 | struct skcipher_ctx *ctx = ask->private; | |
727 | unsigned int mask; | |
728 | ||
729 | sock_poll_wait(file, sk_sleep(sk), wait); | |
730 | mask = 0; | |
731 | ||
732 | if (ctx->used) | |
733 | mask |= POLLIN | POLLRDNORM; | |
734 | ||
735 | if (skcipher_writable(sk)) | |
736 | mask |= POLLOUT | POLLWRNORM | POLLWRBAND; | |
737 | ||
738 | return mask; | |
739 | } | |
740 | ||
741 | static struct proto_ops algif_skcipher_ops = { | |
742 | .family = PF_ALG, | |
743 | ||
744 | .connect = sock_no_connect, | |
745 | .socketpair = sock_no_socketpair, | |
746 | .getname = sock_no_getname, | |
747 | .ioctl = sock_no_ioctl, | |
748 | .listen = sock_no_listen, | |
749 | .shutdown = sock_no_shutdown, | |
750 | .getsockopt = sock_no_getsockopt, | |
751 | .mmap = sock_no_mmap, | |
752 | .bind = sock_no_bind, | |
753 | .accept = sock_no_accept, | |
754 | .setsockopt = sock_no_setsockopt, | |
755 | ||
756 | .release = af_alg_release, | |
757 | .sendmsg = skcipher_sendmsg, | |
758 | .sendpage = skcipher_sendpage, | |
759 | .recvmsg = skcipher_recvmsg, | |
760 | .poll = skcipher_poll, | |
761 | }; | |
762 | ||
a0fa2d03 HX |
763 | static int skcipher_check_key(struct socket *sock) |
764 | { | |
1822793a | 765 | int err = 0; |
a0fa2d03 HX |
766 | struct sock *psk; |
767 | struct alg_sock *pask; | |
768 | struct skcipher_tfm *tfm; | |
769 | struct sock *sk = sock->sk; | |
770 | struct alg_sock *ask = alg_sk(sk); | |
771 | ||
1822793a | 772 | lock_sock(sk); |
a0fa2d03 | 773 | if (ask->refcnt) |
1822793a | 774 | goto unlock_child; |
a0fa2d03 HX |
775 | |
776 | psk = ask->parent; | |
777 | pask = alg_sk(ask->parent); | |
778 | tfm = pask->private; | |
779 | ||
780 | err = -ENOKEY; | |
1822793a | 781 | lock_sock_nested(psk, SINGLE_DEPTH_NESTING); |
a0fa2d03 HX |
782 | if (!tfm->has_key) |
783 | goto unlock; | |
784 | ||
785 | if (!pask->refcnt++) | |
786 | sock_hold(psk); | |
787 | ||
788 | ask->refcnt = 1; | |
789 | sock_put(psk); | |
790 | ||
791 | err = 0; | |
792 | ||
793 | unlock: | |
794 | release_sock(psk); | |
1822793a HX |
795 | unlock_child: |
796 | release_sock(sk); | |
a0fa2d03 HX |
797 | |
798 | return err; | |
799 | } | |
800 | ||
801 | static int skcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg, | |
802 | size_t size) | |
803 | { | |
804 | int err; | |
805 | ||
806 | err = skcipher_check_key(sock); | |
807 | if (err) | |
808 | return err; | |
809 | ||
810 | return skcipher_sendmsg(sock, msg, size); | |
811 | } | |
812 | ||
813 | static ssize_t skcipher_sendpage_nokey(struct socket *sock, struct page *page, | |
814 | int offset, size_t size, int flags) | |
815 | { | |
816 | int err; | |
817 | ||
818 | err = skcipher_check_key(sock); | |
819 | if (err) | |
820 | return err; | |
821 | ||
822 | return skcipher_sendpage(sock, page, offset, size, flags); | |
823 | } | |
824 | ||
825 | static int skcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg, | |
826 | size_t ignored, int flags) | |
827 | { | |
828 | int err; | |
829 | ||
830 | err = skcipher_check_key(sock); | |
831 | if (err) | |
832 | return err; | |
833 | ||
834 | return skcipher_recvmsg(sock, msg, ignored, flags); | |
835 | } | |
836 | ||
837 | static struct proto_ops algif_skcipher_ops_nokey = { | |
838 | .family = PF_ALG, | |
839 | ||
840 | .connect = sock_no_connect, | |
841 | .socketpair = sock_no_socketpair, | |
842 | .getname = sock_no_getname, | |
843 | .ioctl = sock_no_ioctl, | |
844 | .listen = sock_no_listen, | |
845 | .shutdown = sock_no_shutdown, | |
846 | .getsockopt = sock_no_getsockopt, | |
847 | .mmap = sock_no_mmap, | |
848 | .bind = sock_no_bind, | |
849 | .accept = sock_no_accept, | |
850 | .setsockopt = sock_no_setsockopt, | |
851 | ||
852 | .release = af_alg_release, | |
853 | .sendmsg = skcipher_sendmsg_nokey, | |
854 | .sendpage = skcipher_sendpage_nokey, | |
855 | .recvmsg = skcipher_recvmsg_nokey, | |
856 | .poll = skcipher_poll, | |
857 | }; | |
858 | ||
8ff59090 HX |
859 | static void *skcipher_bind(const char *name, u32 type, u32 mask) |
860 | { | |
dd504589 HX |
861 | struct skcipher_tfm *tfm; |
862 | struct crypto_skcipher *skcipher; | |
863 | ||
864 | tfm = kzalloc(sizeof(*tfm), GFP_KERNEL); | |
865 | if (!tfm) | |
866 | return ERR_PTR(-ENOMEM); | |
867 | ||
868 | skcipher = crypto_alloc_skcipher(name, type, mask); | |
869 | if (IS_ERR(skcipher)) { | |
870 | kfree(tfm); | |
871 | return ERR_CAST(skcipher); | |
872 | } | |
873 | ||
874 | tfm->skcipher = skcipher; | |
875 | ||
876 | return tfm; | |
8ff59090 HX |
877 | } |
878 | ||
879 | static void skcipher_release(void *private) | |
880 | { | |
dd504589 HX |
881 | struct skcipher_tfm *tfm = private; |
882 | ||
883 | crypto_free_skcipher(tfm->skcipher); | |
884 | kfree(tfm); | |
8ff59090 HX |
885 | } |
886 | ||
887 | static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen) | |
888 | { | |
dd504589 HX |
889 | struct skcipher_tfm *tfm = private; |
890 | int err; | |
891 | ||
892 | err = crypto_skcipher_setkey(tfm->skcipher, key, keylen); | |
893 | tfm->has_key = !err; | |
894 | ||
895 | return err; | |
8ff59090 HX |
896 | } |
897 | ||
a596999b TS |
898 | static void skcipher_wait(struct sock *sk) |
899 | { | |
900 | struct alg_sock *ask = alg_sk(sk); | |
901 | struct skcipher_ctx *ctx = ask->private; | |
902 | int ctr = 0; | |
903 | ||
904 | while (atomic_read(&ctx->inflight) && ctr++ < 100) | |
905 | msleep(100); | |
906 | } | |
907 | ||
8ff59090 HX |
908 | static void skcipher_sock_destruct(struct sock *sk) |
909 | { | |
910 | struct alg_sock *ask = alg_sk(sk); | |
911 | struct skcipher_ctx *ctx = ask->private; | |
0d96e4ba | 912 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); |
8ff59090 | 913 | |
a596999b TS |
914 | if (atomic_read(&ctx->inflight)) |
915 | skcipher_wait(sk); | |
916 | ||
8ff59090 | 917 | skcipher_free_sgl(sk); |
0d96e4ba | 918 | sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm)); |
8ff59090 HX |
919 | sock_kfree_s(sk, ctx, ctx->len); |
920 | af_alg_release_parent(sk); | |
921 | } | |
922 | ||
d7b65aee | 923 | static int skcipher_accept_parent_nokey(void *private, struct sock *sk) |
8ff59090 HX |
924 | { |
925 | struct skcipher_ctx *ctx; | |
926 | struct alg_sock *ask = alg_sk(sk); | |
dd504589 HX |
927 | struct skcipher_tfm *tfm = private; |
928 | struct crypto_skcipher *skcipher = tfm->skcipher; | |
929 | unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher); | |
8ff59090 HX |
930 | |
931 | ctx = sock_kmalloc(sk, len, GFP_KERNEL); | |
932 | if (!ctx) | |
933 | return -ENOMEM; | |
934 | ||
dd504589 | 935 | ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher), |
8ff59090 HX |
936 | GFP_KERNEL); |
937 | if (!ctx->iv) { | |
938 | sock_kfree_s(sk, ctx, len); | |
939 | return -ENOMEM; | |
940 | } | |
941 | ||
dd504589 | 942 | memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher)); |
8ff59090 HX |
943 | |
944 | INIT_LIST_HEAD(&ctx->tsgl); | |
945 | ctx->len = len; | |
946 | ctx->used = 0; | |
947 | ctx->more = 0; | |
948 | ctx->merge = 0; | |
949 | ctx->enc = 0; | |
a596999b | 950 | atomic_set(&ctx->inflight, 0); |
8ff59090 HX |
951 | af_alg_init_completion(&ctx->completion); |
952 | ||
953 | ask->private = ctx; | |
954 | ||
dd504589 | 955 | skcipher_request_set_tfm(&ctx->req, skcipher); |
dad41997 HX |
956 | skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP | |
957 | CRYPTO_TFM_REQ_MAY_BACKLOG, | |
0d96e4ba | 958 | af_alg_complete, &ctx->completion); |
8ff59090 HX |
959 | |
960 | sk->sk_destruct = skcipher_sock_destruct; | |
961 | ||
962 | return 0; | |
963 | } | |
964 | ||
a0fa2d03 HX |
965 | static int skcipher_accept_parent(void *private, struct sock *sk) |
966 | { | |
967 | struct skcipher_tfm *tfm = private; | |
968 | ||
6e8d8ecf | 969 | if (!tfm->has_key && crypto_skcipher_has_setkey(tfm->skcipher)) |
a0fa2d03 HX |
970 | return -ENOKEY; |
971 | ||
d7b65aee | 972 | return skcipher_accept_parent_nokey(private, sk); |
a0fa2d03 HX |
973 | } |
974 | ||
8ff59090 HX |
975 | static const struct af_alg_type algif_type_skcipher = { |
976 | .bind = skcipher_bind, | |
977 | .release = skcipher_release, | |
978 | .setkey = skcipher_setkey, | |
979 | .accept = skcipher_accept_parent, | |
a0fa2d03 | 980 | .accept_nokey = skcipher_accept_parent_nokey, |
8ff59090 | 981 | .ops = &algif_skcipher_ops, |
a0fa2d03 | 982 | .ops_nokey = &algif_skcipher_ops_nokey, |
8ff59090 HX |
983 | .name = "skcipher", |
984 | .owner = THIS_MODULE | |
985 | }; | |
986 | ||
987 | static int __init algif_skcipher_init(void) | |
988 | { | |
989 | return af_alg_register_type(&algif_type_skcipher); | |
990 | } | |
991 | ||
992 | static void __exit algif_skcipher_exit(void) | |
993 | { | |
994 | int err = af_alg_unregister_type(&algif_type_skcipher); | |
995 | BUG_ON(err); | |
996 | } | |
997 | ||
998 | module_init(algif_skcipher_init); | |
999 | module_exit(algif_skcipher_exit); | |
1000 | MODULE_LICENSE("GPL"); |