| 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * Symmetric key cipher operations. |
| 4 | * |
| 5 | * Generic encrypt/decrypt wrapper for ciphers, handles operations across |
| 6 | * multiple page boundaries by using temporary blocks. In user context, |
| 7 | * the kernel is given a chance to schedule us once per page. |
| 8 | * |
| 9 | * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> |
| 10 | */ |
| 11 | |
| 12 | #include <crypto/internal/aead.h> |
| 13 | #include <crypto/internal/cipher.h> |
| 14 | #include <crypto/internal/skcipher.h> |
| 15 | #include <crypto/scatterwalk.h> |
| 16 | #include <linux/bug.h> |
| 17 | #include <linux/cryptouser.h> |
| 18 | #include <linux/err.h> |
| 19 | #include <linux/kernel.h> |
| 20 | #include <linux/list.h> |
| 21 | #include <linux/mm.h> |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/seq_file.h> |
| 24 | #include <linux/slab.h> |
| 25 | #include <linux/string.h> |
| 26 | #include <net/netlink.h> |
| 27 | |
| 28 | #include "internal.h" |
| 29 | |
| 30 | enum { |
| 31 | SKCIPHER_WALK_PHYS = 1 << 0, |
| 32 | SKCIPHER_WALK_SLOW = 1 << 1, |
| 33 | SKCIPHER_WALK_COPY = 1 << 2, |
| 34 | SKCIPHER_WALK_DIFF = 1 << 3, |
| 35 | SKCIPHER_WALK_SLEEP = 1 << 4, |
| 36 | }; |
| 37 | |
| 38 | struct skcipher_walk_buffer { |
| 39 | struct list_head entry; |
| 40 | struct scatter_walk dst; |
| 41 | unsigned int len; |
| 42 | u8 *data; |
| 43 | u8 buffer[]; |
| 44 | }; |
| 45 | |
| 46 | static int skcipher_walk_next(struct skcipher_walk *walk); |
| 47 | |
| 48 | static inline void skcipher_map_src(struct skcipher_walk *walk) |
| 49 | { |
| 50 | walk->src.virt.addr = scatterwalk_map(&walk->in); |
| 51 | } |
| 52 | |
| 53 | static inline void skcipher_map_dst(struct skcipher_walk *walk) |
| 54 | { |
| 55 | walk->dst.virt.addr = scatterwalk_map(&walk->out); |
| 56 | } |
| 57 | |
| 58 | static inline void skcipher_unmap_src(struct skcipher_walk *walk) |
| 59 | { |
| 60 | scatterwalk_unmap(walk->src.virt.addr); |
| 61 | } |
| 62 | |
| 63 | static inline void skcipher_unmap_dst(struct skcipher_walk *walk) |
| 64 | { |
| 65 | scatterwalk_unmap(walk->dst.virt.addr); |
| 66 | } |
| 67 | |
| 68 | static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) |
| 69 | { |
| 70 | return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; |
| 71 | } |
| 72 | |
| 73 | /* Get a spot of the specified length that does not straddle a page. |
| 74 | * The caller needs to ensure that there is enough space for this operation. |
| 75 | */ |
| 76 | static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) |
| 77 | { |
| 78 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); |
| 79 | |
| 80 | return max(start, end_page); |
| 81 | } |
| 82 | |
| 83 | static inline struct skcipher_alg *__crypto_skcipher_alg( |
| 84 | struct crypto_alg *alg) |
| 85 | { |
| 86 | return container_of(alg, struct skcipher_alg, base); |
| 87 | } |
| 88 | |
| 89 | static inline struct crypto_istat_cipher *skcipher_get_stat( |
| 90 | struct skcipher_alg *alg) |
| 91 | { |
| 92 | #ifdef CONFIG_CRYPTO_STATS |
| 93 | return &alg->stat; |
| 94 | #else |
| 95 | return NULL; |
| 96 | #endif |
| 97 | } |
| 98 | |
| 99 | static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err) |
| 100 | { |
| 101 | struct crypto_istat_cipher *istat = skcipher_get_stat(alg); |
| 102 | |
| 103 | if (!IS_ENABLED(CONFIG_CRYPTO_STATS)) |
| 104 | return err; |
| 105 | |
| 106 | if (err && err != -EINPROGRESS && err != -EBUSY) |
| 107 | atomic64_inc(&istat->err_cnt); |
| 108 | |
| 109 | return err; |
| 110 | } |
| 111 | |
| 112 | static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) |
| 113 | { |
| 114 | u8 *addr; |
| 115 | |
| 116 | addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); |
| 117 | addr = skcipher_get_spot(addr, bsize); |
| 118 | scatterwalk_copychunks(addr, &walk->out, bsize, |
| 119 | (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); |
| 120 | return 0; |
| 121 | } |
| 122 | |
| 123 | int skcipher_walk_done(struct skcipher_walk *walk, int err) |
| 124 | { |
| 125 | unsigned int n = walk->nbytes; |
| 126 | unsigned int nbytes = 0; |
| 127 | |
| 128 | if (!n) |
| 129 | goto finish; |
| 130 | |
| 131 | if (likely(err >= 0)) { |
| 132 | n -= err; |
| 133 | nbytes = walk->total - n; |
| 134 | } |
| 135 | |
| 136 | if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | |
| 137 | SKCIPHER_WALK_SLOW | |
| 138 | SKCIPHER_WALK_COPY | |
| 139 | SKCIPHER_WALK_DIFF)))) { |
| 140 | unmap_src: |
| 141 | skcipher_unmap_src(walk); |
| 142 | } else if (walk->flags & SKCIPHER_WALK_DIFF) { |
| 143 | skcipher_unmap_dst(walk); |
| 144 | goto unmap_src; |
| 145 | } else if (walk->flags & SKCIPHER_WALK_COPY) { |
| 146 | skcipher_map_dst(walk); |
| 147 | memcpy(walk->dst.virt.addr, walk->page, n); |
| 148 | skcipher_unmap_dst(walk); |
| 149 | } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { |
| 150 | if (err > 0) { |
| 151 | /* |
| 152 | * Didn't process all bytes. Either the algorithm is |
| 153 | * broken, or this was the last step and it turned out |
| 154 | * the message wasn't evenly divisible into blocks but |
| 155 | * the algorithm requires it. |
| 156 | */ |
| 157 | err = -EINVAL; |
| 158 | nbytes = 0; |
| 159 | } else |
| 160 | n = skcipher_done_slow(walk, n); |
| 161 | } |
| 162 | |
| 163 | if (err > 0) |
| 164 | err = 0; |
| 165 | |
| 166 | walk->total = nbytes; |
| 167 | walk->nbytes = 0; |
| 168 | |
| 169 | scatterwalk_advance(&walk->in, n); |
| 170 | scatterwalk_advance(&walk->out, n); |
| 171 | scatterwalk_done(&walk->in, 0, nbytes); |
| 172 | scatterwalk_done(&walk->out, 1, nbytes); |
| 173 | |
| 174 | if (nbytes) { |
| 175 | crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? |
| 176 | CRYPTO_TFM_REQ_MAY_SLEEP : 0); |
| 177 | return skcipher_walk_next(walk); |
| 178 | } |
| 179 | |
| 180 | finish: |
| 181 | /* Short-circuit for the common/fast path. */ |
| 182 | if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) |
| 183 | goto out; |
| 184 | |
| 185 | if (walk->flags & SKCIPHER_WALK_PHYS) |
| 186 | goto out; |
| 187 | |
| 188 | if (walk->iv != walk->oiv) |
| 189 | memcpy(walk->oiv, walk->iv, walk->ivsize); |
| 190 | if (walk->buffer != walk->page) |
| 191 | kfree(walk->buffer); |
| 192 | if (walk->page) |
| 193 | free_page((unsigned long)walk->page); |
| 194 | |
| 195 | out: |
| 196 | return err; |
| 197 | } |
| 198 | EXPORT_SYMBOL_GPL(skcipher_walk_done); |
| 199 | |
| 200 | void skcipher_walk_complete(struct skcipher_walk *walk, int err) |
| 201 | { |
| 202 | struct skcipher_walk_buffer *p, *tmp; |
| 203 | |
| 204 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { |
| 205 | u8 *data; |
| 206 | |
| 207 | if (err) |
| 208 | goto done; |
| 209 | |
| 210 | data = p->data; |
| 211 | if (!data) { |
| 212 | data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); |
| 213 | data = skcipher_get_spot(data, walk->stride); |
| 214 | } |
| 215 | |
| 216 | scatterwalk_copychunks(data, &p->dst, p->len, 1); |
| 217 | |
| 218 | if (offset_in_page(p->data) + p->len + walk->stride > |
| 219 | PAGE_SIZE) |
| 220 | free_page((unsigned long)p->data); |
| 221 | |
| 222 | done: |
| 223 | list_del(&p->entry); |
| 224 | kfree(p); |
| 225 | } |
| 226 | |
| 227 | if (!err && walk->iv != walk->oiv) |
| 228 | memcpy(walk->oiv, walk->iv, walk->ivsize); |
| 229 | if (walk->buffer != walk->page) |
| 230 | kfree(walk->buffer); |
| 231 | if (walk->page) |
| 232 | free_page((unsigned long)walk->page); |
| 233 | } |
| 234 | EXPORT_SYMBOL_GPL(skcipher_walk_complete); |
| 235 | |
| 236 | static void skcipher_queue_write(struct skcipher_walk *walk, |
| 237 | struct skcipher_walk_buffer *p) |
| 238 | { |
| 239 | p->dst = walk->out; |
| 240 | list_add_tail(&p->entry, &walk->buffers); |
| 241 | } |
| 242 | |
| 243 | static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) |
| 244 | { |
| 245 | bool phys = walk->flags & SKCIPHER_WALK_PHYS; |
| 246 | unsigned alignmask = walk->alignmask; |
| 247 | struct skcipher_walk_buffer *p; |
| 248 | unsigned a; |
| 249 | unsigned n; |
| 250 | u8 *buffer; |
| 251 | void *v; |
| 252 | |
| 253 | if (!phys) { |
| 254 | if (!walk->buffer) |
| 255 | walk->buffer = walk->page; |
| 256 | buffer = walk->buffer; |
| 257 | if (buffer) |
| 258 | goto ok; |
| 259 | } |
| 260 | |
| 261 | /* Start with the minimum alignment of kmalloc. */ |
| 262 | a = crypto_tfm_ctx_alignment() - 1; |
| 263 | n = bsize; |
| 264 | |
| 265 | if (phys) { |
| 266 | /* Calculate the minimum alignment of p->buffer. */ |
| 267 | a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; |
| 268 | n += sizeof(*p); |
| 269 | } |
| 270 | |
| 271 | /* Minimum size to align p->buffer by alignmask. */ |
| 272 | n += alignmask & ~a; |
| 273 | |
| 274 | /* Minimum size to ensure p->buffer does not straddle a page. */ |
| 275 | n += (bsize - 1) & ~(alignmask | a); |
| 276 | |
| 277 | v = kzalloc(n, skcipher_walk_gfp(walk)); |
| 278 | if (!v) |
| 279 | return skcipher_walk_done(walk, -ENOMEM); |
| 280 | |
| 281 | if (phys) { |
| 282 | p = v; |
| 283 | p->len = bsize; |
| 284 | skcipher_queue_write(walk, p); |
| 285 | buffer = p->buffer; |
| 286 | } else { |
| 287 | walk->buffer = v; |
| 288 | buffer = v; |
| 289 | } |
| 290 | |
| 291 | ok: |
| 292 | walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); |
| 293 | walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); |
| 294 | walk->src.virt.addr = walk->dst.virt.addr; |
| 295 | |
| 296 | scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); |
| 297 | |
| 298 | walk->nbytes = bsize; |
| 299 | walk->flags |= SKCIPHER_WALK_SLOW; |
| 300 | |
| 301 | return 0; |
| 302 | } |
| 303 | |
| 304 | static int skcipher_next_copy(struct skcipher_walk *walk) |
| 305 | { |
| 306 | struct skcipher_walk_buffer *p; |
| 307 | u8 *tmp = walk->page; |
| 308 | |
| 309 | skcipher_map_src(walk); |
| 310 | memcpy(tmp, walk->src.virt.addr, walk->nbytes); |
| 311 | skcipher_unmap_src(walk); |
| 312 | |
| 313 | walk->src.virt.addr = tmp; |
| 314 | walk->dst.virt.addr = tmp; |
| 315 | |
| 316 | if (!(walk->flags & SKCIPHER_WALK_PHYS)) |
| 317 | return 0; |
| 318 | |
| 319 | p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); |
| 320 | if (!p) |
| 321 | return -ENOMEM; |
| 322 | |
| 323 | p->data = walk->page; |
| 324 | p->len = walk->nbytes; |
| 325 | skcipher_queue_write(walk, p); |
| 326 | |
| 327 | if (offset_in_page(walk->page) + walk->nbytes + walk->stride > |
| 328 | PAGE_SIZE) |
| 329 | walk->page = NULL; |
| 330 | else |
| 331 | walk->page += walk->nbytes; |
| 332 | |
| 333 | return 0; |
| 334 | } |
| 335 | |
| 336 | static int skcipher_next_fast(struct skcipher_walk *walk) |
| 337 | { |
| 338 | unsigned long diff; |
| 339 | |
| 340 | walk->src.phys.page = scatterwalk_page(&walk->in); |
| 341 | walk->src.phys.offset = offset_in_page(walk->in.offset); |
| 342 | walk->dst.phys.page = scatterwalk_page(&walk->out); |
| 343 | walk->dst.phys.offset = offset_in_page(walk->out.offset); |
| 344 | |
| 345 | if (walk->flags & SKCIPHER_WALK_PHYS) |
| 346 | return 0; |
| 347 | |
| 348 | diff = walk->src.phys.offset - walk->dst.phys.offset; |
| 349 | diff |= walk->src.virt.page - walk->dst.virt.page; |
| 350 | |
| 351 | skcipher_map_src(walk); |
| 352 | walk->dst.virt.addr = walk->src.virt.addr; |
| 353 | |
| 354 | if (diff) { |
| 355 | walk->flags |= SKCIPHER_WALK_DIFF; |
| 356 | skcipher_map_dst(walk); |
| 357 | } |
| 358 | |
| 359 | return 0; |
| 360 | } |
| 361 | |
| 362 | static int skcipher_walk_next(struct skcipher_walk *walk) |
| 363 | { |
| 364 | unsigned int bsize; |
| 365 | unsigned int n; |
| 366 | int err; |
| 367 | |
| 368 | walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | |
| 369 | SKCIPHER_WALK_DIFF); |
| 370 | |
| 371 | n = walk->total; |
| 372 | bsize = min(walk->stride, max(n, walk->blocksize)); |
| 373 | n = scatterwalk_clamp(&walk->in, n); |
| 374 | n = scatterwalk_clamp(&walk->out, n); |
| 375 | |
| 376 | if (unlikely(n < bsize)) { |
| 377 | if (unlikely(walk->total < walk->blocksize)) |
| 378 | return skcipher_walk_done(walk, -EINVAL); |
| 379 | |
| 380 | slow_path: |
| 381 | err = skcipher_next_slow(walk, bsize); |
| 382 | goto set_phys_lowmem; |
| 383 | } |
| 384 | |
| 385 | if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { |
| 386 | if (!walk->page) { |
| 387 | gfp_t gfp = skcipher_walk_gfp(walk); |
| 388 | |
| 389 | walk->page = (void *)__get_free_page(gfp); |
| 390 | if (!walk->page) |
| 391 | goto slow_path; |
| 392 | } |
| 393 | |
| 394 | walk->nbytes = min_t(unsigned, n, |
| 395 | PAGE_SIZE - offset_in_page(walk->page)); |
| 396 | walk->flags |= SKCIPHER_WALK_COPY; |
| 397 | err = skcipher_next_copy(walk); |
| 398 | goto set_phys_lowmem; |
| 399 | } |
| 400 | |
| 401 | walk->nbytes = n; |
| 402 | |
| 403 | return skcipher_next_fast(walk); |
| 404 | |
| 405 | set_phys_lowmem: |
| 406 | if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { |
| 407 | walk->src.phys.page = virt_to_page(walk->src.virt.addr); |
| 408 | walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); |
| 409 | walk->src.phys.offset &= PAGE_SIZE - 1; |
| 410 | walk->dst.phys.offset &= PAGE_SIZE - 1; |
| 411 | } |
| 412 | return err; |
| 413 | } |
| 414 | |
| 415 | static int skcipher_copy_iv(struct skcipher_walk *walk) |
| 416 | { |
| 417 | unsigned a = crypto_tfm_ctx_alignment() - 1; |
| 418 | unsigned alignmask = walk->alignmask; |
| 419 | unsigned ivsize = walk->ivsize; |
| 420 | unsigned bs = walk->stride; |
| 421 | unsigned aligned_bs; |
| 422 | unsigned size; |
| 423 | u8 *iv; |
| 424 | |
| 425 | aligned_bs = ALIGN(bs, alignmask + 1); |
| 426 | |
| 427 | /* Minimum size to align buffer by alignmask. */ |
| 428 | size = alignmask & ~a; |
| 429 | |
| 430 | if (walk->flags & SKCIPHER_WALK_PHYS) |
| 431 | size += ivsize; |
| 432 | else { |
| 433 | size += aligned_bs + ivsize; |
| 434 | |
| 435 | /* Minimum size to ensure buffer does not straddle a page. */ |
| 436 | size += (bs - 1) & ~(alignmask | a); |
| 437 | } |
| 438 | |
| 439 | walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); |
| 440 | if (!walk->buffer) |
| 441 | return -ENOMEM; |
| 442 | |
| 443 | iv = PTR_ALIGN(walk->buffer, alignmask + 1); |
| 444 | iv = skcipher_get_spot(iv, bs) + aligned_bs; |
| 445 | |
| 446 | walk->iv = memcpy(iv, walk->iv, walk->ivsize); |
| 447 | return 0; |
| 448 | } |
| 449 | |
| 450 | static int skcipher_walk_first(struct skcipher_walk *walk) |
| 451 | { |
| 452 | if (WARN_ON_ONCE(in_hardirq())) |
| 453 | return -EDEADLK; |
| 454 | |
| 455 | walk->buffer = NULL; |
| 456 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { |
| 457 | int err = skcipher_copy_iv(walk); |
| 458 | if (err) |
| 459 | return err; |
| 460 | } |
| 461 | |
| 462 | walk->page = NULL; |
| 463 | |
| 464 | return skcipher_walk_next(walk); |
| 465 | } |
| 466 | |
| 467 | static int skcipher_walk_skcipher(struct skcipher_walk *walk, |
| 468 | struct skcipher_request *req) |
| 469 | { |
| 470 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 471 | |
| 472 | walk->total = req->cryptlen; |
| 473 | walk->nbytes = 0; |
| 474 | walk->iv = req->iv; |
| 475 | walk->oiv = req->iv; |
| 476 | |
| 477 | if (unlikely(!walk->total)) |
| 478 | return 0; |
| 479 | |
| 480 | scatterwalk_start(&walk->in, req->src); |
| 481 | scatterwalk_start(&walk->out, req->dst); |
| 482 | |
| 483 | walk->flags &= ~SKCIPHER_WALK_SLEEP; |
| 484 | walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? |
| 485 | SKCIPHER_WALK_SLEEP : 0; |
| 486 | |
| 487 | walk->blocksize = crypto_skcipher_blocksize(tfm); |
| 488 | walk->stride = crypto_skcipher_walksize(tfm); |
| 489 | walk->ivsize = crypto_skcipher_ivsize(tfm); |
| 490 | walk->alignmask = crypto_skcipher_alignmask(tfm); |
| 491 | |
| 492 | return skcipher_walk_first(walk); |
| 493 | } |
| 494 | |
| 495 | int skcipher_walk_virt(struct skcipher_walk *walk, |
| 496 | struct skcipher_request *req, bool atomic) |
| 497 | { |
| 498 | int err; |
| 499 | |
| 500 | might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); |
| 501 | |
| 502 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
| 503 | |
| 504 | err = skcipher_walk_skcipher(walk, req); |
| 505 | |
| 506 | walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; |
| 507 | |
| 508 | return err; |
| 509 | } |
| 510 | EXPORT_SYMBOL_GPL(skcipher_walk_virt); |
| 511 | |
| 512 | int skcipher_walk_async(struct skcipher_walk *walk, |
| 513 | struct skcipher_request *req) |
| 514 | { |
| 515 | walk->flags |= SKCIPHER_WALK_PHYS; |
| 516 | |
| 517 | INIT_LIST_HEAD(&walk->buffers); |
| 518 | |
| 519 | return skcipher_walk_skcipher(walk, req); |
| 520 | } |
| 521 | EXPORT_SYMBOL_GPL(skcipher_walk_async); |
| 522 | |
| 523 | static int skcipher_walk_aead_common(struct skcipher_walk *walk, |
| 524 | struct aead_request *req, bool atomic) |
| 525 | { |
| 526 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 527 | int err; |
| 528 | |
| 529 | walk->nbytes = 0; |
| 530 | walk->iv = req->iv; |
| 531 | walk->oiv = req->iv; |
| 532 | |
| 533 | if (unlikely(!walk->total)) |
| 534 | return 0; |
| 535 | |
| 536 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
| 537 | |
| 538 | scatterwalk_start(&walk->in, req->src); |
| 539 | scatterwalk_start(&walk->out, req->dst); |
| 540 | |
| 541 | scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); |
| 542 | scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); |
| 543 | |
| 544 | scatterwalk_done(&walk->in, 0, walk->total); |
| 545 | scatterwalk_done(&walk->out, 0, walk->total); |
| 546 | |
| 547 | if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) |
| 548 | walk->flags |= SKCIPHER_WALK_SLEEP; |
| 549 | else |
| 550 | walk->flags &= ~SKCIPHER_WALK_SLEEP; |
| 551 | |
| 552 | walk->blocksize = crypto_aead_blocksize(tfm); |
| 553 | walk->stride = crypto_aead_chunksize(tfm); |
| 554 | walk->ivsize = crypto_aead_ivsize(tfm); |
| 555 | walk->alignmask = crypto_aead_alignmask(tfm); |
| 556 | |
| 557 | err = skcipher_walk_first(walk); |
| 558 | |
| 559 | if (atomic) |
| 560 | walk->flags &= ~SKCIPHER_WALK_SLEEP; |
| 561 | |
| 562 | return err; |
| 563 | } |
| 564 | |
| 565 | int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, |
| 566 | struct aead_request *req, bool atomic) |
| 567 | { |
| 568 | walk->total = req->cryptlen; |
| 569 | |
| 570 | return skcipher_walk_aead_common(walk, req, atomic); |
| 571 | } |
| 572 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); |
| 573 | |
| 574 | int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, |
| 575 | struct aead_request *req, bool atomic) |
| 576 | { |
| 577 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 578 | |
| 579 | walk->total = req->cryptlen - crypto_aead_authsize(tfm); |
| 580 | |
| 581 | return skcipher_walk_aead_common(walk, req, atomic); |
| 582 | } |
| 583 | EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); |
| 584 | |
| 585 | static void skcipher_set_needkey(struct crypto_skcipher *tfm) |
| 586 | { |
| 587 | if (crypto_skcipher_max_keysize(tfm) != 0) |
| 588 | crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY); |
| 589 | } |
| 590 | |
| 591 | static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, |
| 592 | const u8 *key, unsigned int keylen) |
| 593 | { |
| 594 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); |
| 595 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); |
| 596 | u8 *buffer, *alignbuffer; |
| 597 | unsigned long absize; |
| 598 | int ret; |
| 599 | |
| 600 | absize = keylen + alignmask; |
| 601 | buffer = kmalloc(absize, GFP_ATOMIC); |
| 602 | if (!buffer) |
| 603 | return -ENOMEM; |
| 604 | |
| 605 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); |
| 606 | memcpy(alignbuffer, key, keylen); |
| 607 | ret = cipher->setkey(tfm, alignbuffer, keylen); |
| 608 | kfree_sensitive(buffer); |
| 609 | return ret; |
| 610 | } |
| 611 | |
| 612 | int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, |
| 613 | unsigned int keylen) |
| 614 | { |
| 615 | struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); |
| 616 | unsigned long alignmask = crypto_skcipher_alignmask(tfm); |
| 617 | int err; |
| 618 | |
| 619 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) |
| 620 | return -EINVAL; |
| 621 | |
| 622 | if ((unsigned long)key & alignmask) |
| 623 | err = skcipher_setkey_unaligned(tfm, key, keylen); |
| 624 | else |
| 625 | err = cipher->setkey(tfm, key, keylen); |
| 626 | |
| 627 | if (unlikely(err)) { |
| 628 | skcipher_set_needkey(tfm); |
| 629 | return err; |
| 630 | } |
| 631 | |
| 632 | crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); |
| 633 | return 0; |
| 634 | } |
| 635 | EXPORT_SYMBOL_GPL(crypto_skcipher_setkey); |
| 636 | |
| 637 | int crypto_skcipher_encrypt(struct skcipher_request *req) |
| 638 | { |
| 639 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 640 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
| 641 | int ret; |
| 642 | |
| 643 | if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { |
| 644 | struct crypto_istat_cipher *istat = skcipher_get_stat(alg); |
| 645 | |
| 646 | atomic64_inc(&istat->encrypt_cnt); |
| 647 | atomic64_add(req->cryptlen, &istat->encrypt_tlen); |
| 648 | } |
| 649 | |
| 650 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
| 651 | ret = -ENOKEY; |
| 652 | else |
| 653 | ret = alg->encrypt(req); |
| 654 | |
| 655 | return crypto_skcipher_errstat(alg, ret); |
| 656 | } |
| 657 | EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); |
| 658 | |
| 659 | int crypto_skcipher_decrypt(struct skcipher_request *req) |
| 660 | { |
| 661 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 662 | struct skcipher_alg *alg = crypto_skcipher_alg(tfm); |
| 663 | int ret; |
| 664 | |
| 665 | if (IS_ENABLED(CONFIG_CRYPTO_STATS)) { |
| 666 | struct crypto_istat_cipher *istat = skcipher_get_stat(alg); |
| 667 | |
| 668 | atomic64_inc(&istat->decrypt_cnt); |
| 669 | atomic64_add(req->cryptlen, &istat->decrypt_tlen); |
| 670 | } |
| 671 | |
| 672 | if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) |
| 673 | ret = -ENOKEY; |
| 674 | else |
| 675 | ret = alg->decrypt(req); |
| 676 | |
| 677 | return crypto_skcipher_errstat(alg, ret); |
| 678 | } |
| 679 | EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); |
| 680 | |
| 681 | static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) |
| 682 | { |
| 683 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); |
| 684 | struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); |
| 685 | |
| 686 | alg->exit(skcipher); |
| 687 | } |
| 688 | |
| 689 | static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) |
| 690 | { |
| 691 | struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); |
| 692 | struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); |
| 693 | |
| 694 | skcipher_set_needkey(skcipher); |
| 695 | |
| 696 | if (alg->exit) |
| 697 | skcipher->base.exit = crypto_skcipher_exit_tfm; |
| 698 | |
| 699 | if (alg->init) |
| 700 | return alg->init(skcipher); |
| 701 | |
| 702 | return 0; |
| 703 | } |
| 704 | |
| 705 | static void crypto_skcipher_free_instance(struct crypto_instance *inst) |
| 706 | { |
| 707 | struct skcipher_instance *skcipher = |
| 708 | container_of(inst, struct skcipher_instance, s.base); |
| 709 | |
| 710 | skcipher->free(skcipher); |
| 711 | } |
| 712 | |
| 713 | static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) |
| 714 | __maybe_unused; |
| 715 | static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) |
| 716 | { |
| 717 | struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); |
| 718 | |
| 719 | seq_printf(m, "type : skcipher\n"); |
| 720 | seq_printf(m, "async : %s\n", |
| 721 | alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); |
| 722 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
| 723 | seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); |
| 724 | seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); |
| 725 | seq_printf(m, "ivsize : %u\n", skcipher->ivsize); |
| 726 | seq_printf(m, "chunksize : %u\n", skcipher->chunksize); |
| 727 | seq_printf(m, "walksize : %u\n", skcipher->walksize); |
| 728 | } |
| 729 | |
| 730 | static int __maybe_unused crypto_skcipher_report( |
| 731 | struct sk_buff *skb, struct crypto_alg *alg) |
| 732 | { |
| 733 | struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); |
| 734 | struct crypto_report_blkcipher rblkcipher; |
| 735 | |
| 736 | memset(&rblkcipher, 0, sizeof(rblkcipher)); |
| 737 | |
| 738 | strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); |
| 739 | strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); |
| 740 | |
| 741 | rblkcipher.blocksize = alg->cra_blocksize; |
| 742 | rblkcipher.min_keysize = skcipher->min_keysize; |
| 743 | rblkcipher.max_keysize = skcipher->max_keysize; |
| 744 | rblkcipher.ivsize = skcipher->ivsize; |
| 745 | |
| 746 | return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, |
| 747 | sizeof(rblkcipher), &rblkcipher); |
| 748 | } |
| 749 | |
| 750 | static int __maybe_unused crypto_skcipher_report_stat( |
| 751 | struct sk_buff *skb, struct crypto_alg *alg) |
| 752 | { |
| 753 | struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg); |
| 754 | struct crypto_istat_cipher *istat; |
| 755 | struct crypto_stat_cipher rcipher; |
| 756 | |
| 757 | istat = skcipher_get_stat(skcipher); |
| 758 | |
| 759 | memset(&rcipher, 0, sizeof(rcipher)); |
| 760 | |
| 761 | strscpy(rcipher.type, "cipher", sizeof(rcipher.type)); |
| 762 | |
| 763 | rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt); |
| 764 | rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen); |
| 765 | rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt); |
| 766 | rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen); |
| 767 | rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt); |
| 768 | |
| 769 | return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher); |
| 770 | } |
| 771 | |
| 772 | static const struct crypto_type crypto_skcipher_type = { |
| 773 | .extsize = crypto_alg_extsize, |
| 774 | .init_tfm = crypto_skcipher_init_tfm, |
| 775 | .free = crypto_skcipher_free_instance, |
| 776 | #ifdef CONFIG_PROC_FS |
| 777 | .show = crypto_skcipher_show, |
| 778 | #endif |
| 779 | #if IS_ENABLED(CONFIG_CRYPTO_USER) |
| 780 | .report = crypto_skcipher_report, |
| 781 | #endif |
| 782 | #ifdef CONFIG_CRYPTO_STATS |
| 783 | .report_stat = crypto_skcipher_report_stat, |
| 784 | #endif |
| 785 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, |
| 786 | .maskset = CRYPTO_ALG_TYPE_MASK, |
| 787 | .type = CRYPTO_ALG_TYPE_SKCIPHER, |
| 788 | .tfmsize = offsetof(struct crypto_skcipher, base), |
| 789 | }; |
| 790 | |
| 791 | int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, |
| 792 | struct crypto_instance *inst, |
| 793 | const char *name, u32 type, u32 mask) |
| 794 | { |
| 795 | spawn->base.frontend = &crypto_skcipher_type; |
| 796 | return crypto_grab_spawn(&spawn->base, inst, name, type, mask); |
| 797 | } |
| 798 | EXPORT_SYMBOL_GPL(crypto_grab_skcipher); |
| 799 | |
| 800 | struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, |
| 801 | u32 type, u32 mask) |
| 802 | { |
| 803 | return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); |
| 804 | } |
| 805 | EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); |
| 806 | |
| 807 | struct crypto_sync_skcipher *crypto_alloc_sync_skcipher( |
| 808 | const char *alg_name, u32 type, u32 mask) |
| 809 | { |
| 810 | struct crypto_skcipher *tfm; |
| 811 | |
| 812 | /* Only sync algorithms allowed. */ |
| 813 | mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE; |
| 814 | |
| 815 | tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask); |
| 816 | |
| 817 | /* |
| 818 | * Make sure we do not allocate something that might get used with |
| 819 | * an on-stack request: check the request size. |
| 820 | */ |
| 821 | if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) > |
| 822 | MAX_SYNC_SKCIPHER_REQSIZE)) { |
| 823 | crypto_free_skcipher(tfm); |
| 824 | return ERR_PTR(-EINVAL); |
| 825 | } |
| 826 | |
| 827 | return (struct crypto_sync_skcipher *)tfm; |
| 828 | } |
| 829 | EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher); |
| 830 | |
| 831 | int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask) |
| 832 | { |
| 833 | return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask); |
| 834 | } |
| 835 | EXPORT_SYMBOL_GPL(crypto_has_skcipher); |
| 836 | |
| 837 | static int skcipher_prepare_alg(struct skcipher_alg *alg) |
| 838 | { |
| 839 | struct crypto_istat_cipher *istat = skcipher_get_stat(alg); |
| 840 | struct crypto_alg *base = &alg->base; |
| 841 | |
| 842 | if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || |
| 843 | alg->walksize > PAGE_SIZE / 8) |
| 844 | return -EINVAL; |
| 845 | |
| 846 | if (!alg->chunksize) |
| 847 | alg->chunksize = base->cra_blocksize; |
| 848 | if (!alg->walksize) |
| 849 | alg->walksize = alg->chunksize; |
| 850 | |
| 851 | base->cra_type = &crypto_skcipher_type; |
| 852 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; |
| 853 | base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; |
| 854 | |
| 855 | if (IS_ENABLED(CONFIG_CRYPTO_STATS)) |
| 856 | memset(istat, 0, sizeof(*istat)); |
| 857 | |
| 858 | return 0; |
| 859 | } |
| 860 | |
| 861 | int crypto_register_skcipher(struct skcipher_alg *alg) |
| 862 | { |
| 863 | struct crypto_alg *base = &alg->base; |
| 864 | int err; |
| 865 | |
| 866 | err = skcipher_prepare_alg(alg); |
| 867 | if (err) |
| 868 | return err; |
| 869 | |
| 870 | return crypto_register_alg(base); |
| 871 | } |
| 872 | EXPORT_SYMBOL_GPL(crypto_register_skcipher); |
| 873 | |
| 874 | void crypto_unregister_skcipher(struct skcipher_alg *alg) |
| 875 | { |
| 876 | crypto_unregister_alg(&alg->base); |
| 877 | } |
| 878 | EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); |
| 879 | |
| 880 | int crypto_register_skciphers(struct skcipher_alg *algs, int count) |
| 881 | { |
| 882 | int i, ret; |
| 883 | |
| 884 | for (i = 0; i < count; i++) { |
| 885 | ret = crypto_register_skcipher(&algs[i]); |
| 886 | if (ret) |
| 887 | goto err; |
| 888 | } |
| 889 | |
| 890 | return 0; |
| 891 | |
| 892 | err: |
| 893 | for (--i; i >= 0; --i) |
| 894 | crypto_unregister_skcipher(&algs[i]); |
| 895 | |
| 896 | return ret; |
| 897 | } |
| 898 | EXPORT_SYMBOL_GPL(crypto_register_skciphers); |
| 899 | |
| 900 | void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) |
| 901 | { |
| 902 | int i; |
| 903 | |
| 904 | for (i = count - 1; i >= 0; --i) |
| 905 | crypto_unregister_skcipher(&algs[i]); |
| 906 | } |
| 907 | EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); |
| 908 | |
| 909 | int skcipher_register_instance(struct crypto_template *tmpl, |
| 910 | struct skcipher_instance *inst) |
| 911 | { |
| 912 | int err; |
| 913 | |
| 914 | if (WARN_ON(!inst->free)) |
| 915 | return -EINVAL; |
| 916 | |
| 917 | err = skcipher_prepare_alg(&inst->alg); |
| 918 | if (err) |
| 919 | return err; |
| 920 | |
| 921 | return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); |
| 922 | } |
| 923 | EXPORT_SYMBOL_GPL(skcipher_register_instance); |
| 924 | |
| 925 | static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key, |
| 926 | unsigned int keylen) |
| 927 | { |
| 928 | struct crypto_cipher *cipher = skcipher_cipher_simple(tfm); |
| 929 | |
| 930 | crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK); |
| 931 | crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) & |
| 932 | CRYPTO_TFM_REQ_MASK); |
| 933 | return crypto_cipher_setkey(cipher, key, keylen); |
| 934 | } |
| 935 | |
| 936 | static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm) |
| 937 | { |
| 938 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); |
| 939 | struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst); |
| 940 | struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); |
| 941 | struct crypto_cipher *cipher; |
| 942 | |
| 943 | cipher = crypto_spawn_cipher(spawn); |
| 944 | if (IS_ERR(cipher)) |
| 945 | return PTR_ERR(cipher); |
| 946 | |
| 947 | ctx->cipher = cipher; |
| 948 | return 0; |
| 949 | } |
| 950 | |
| 951 | static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm) |
| 952 | { |
| 953 | struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm); |
| 954 | |
| 955 | crypto_free_cipher(ctx->cipher); |
| 956 | } |
| 957 | |
| 958 | static void skcipher_free_instance_simple(struct skcipher_instance *inst) |
| 959 | { |
| 960 | crypto_drop_cipher(skcipher_instance_ctx(inst)); |
| 961 | kfree(inst); |
| 962 | } |
| 963 | |
| 964 | /** |
| 965 | * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode |
| 966 | * |
| 967 | * Allocate an skcipher_instance for a simple block cipher mode of operation, |
| 968 | * e.g. cbc or ecb. The instance context will have just a single crypto_spawn, |
| 969 | * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize, |
| 970 | * alignmask, and priority are set from the underlying cipher but can be |
| 971 | * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and |
| 972 | * default ->setkey(), ->init(), and ->exit() methods are installed. |
| 973 | * |
| 974 | * @tmpl: the template being instantiated |
| 975 | * @tb: the template parameters |
| 976 | * |
| 977 | * Return: a pointer to the new instance, or an ERR_PTR(). The caller still |
| 978 | * needs to register the instance. |
| 979 | */ |
| 980 | struct skcipher_instance *skcipher_alloc_instance_simple( |
| 981 | struct crypto_template *tmpl, struct rtattr **tb) |
| 982 | { |
| 983 | u32 mask; |
| 984 | struct skcipher_instance *inst; |
| 985 | struct crypto_cipher_spawn *spawn; |
| 986 | struct crypto_alg *cipher_alg; |
| 987 | int err; |
| 988 | |
| 989 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask); |
| 990 | if (err) |
| 991 | return ERR_PTR(err); |
| 992 | |
| 993 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); |
| 994 | if (!inst) |
| 995 | return ERR_PTR(-ENOMEM); |
| 996 | spawn = skcipher_instance_ctx(inst); |
| 997 | |
| 998 | err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst), |
| 999 | crypto_attr_alg_name(tb[1]), 0, mask); |
| 1000 | if (err) |
| 1001 | goto err_free_inst; |
| 1002 | cipher_alg = crypto_spawn_cipher_alg(spawn); |
| 1003 | |
| 1004 | err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name, |
| 1005 | cipher_alg); |
| 1006 | if (err) |
| 1007 | goto err_free_inst; |
| 1008 | |
| 1009 | inst->free = skcipher_free_instance_simple; |
| 1010 | |
| 1011 | /* Default algorithm properties, can be overridden */ |
| 1012 | inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize; |
| 1013 | inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask; |
| 1014 | inst->alg.base.cra_priority = cipher_alg->cra_priority; |
| 1015 | inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize; |
| 1016 | inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize; |
| 1017 | inst->alg.ivsize = cipher_alg->cra_blocksize; |
| 1018 | |
| 1019 | /* Use skcipher_ctx_simple by default, can be overridden */ |
| 1020 | inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple); |
| 1021 | inst->alg.setkey = skcipher_setkey_simple; |
| 1022 | inst->alg.init = skcipher_init_tfm_simple; |
| 1023 | inst->alg.exit = skcipher_exit_tfm_simple; |
| 1024 | |
| 1025 | return inst; |
| 1026 | |
| 1027 | err_free_inst: |
| 1028 | skcipher_free_instance_simple(inst); |
| 1029 | return ERR_PTR(err); |
| 1030 | } |
| 1031 | EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple); |
| 1032 | |
| 1033 | MODULE_LICENSE("GPL"); |
| 1034 | MODULE_DESCRIPTION("Symmetric key cipher type"); |
| 1035 | MODULE_IMPORT_NS(CRYPTO_INTERNAL); |