| 1 | /* |
| 2 | * seqiv: Sequence Number IV Generator |
| 3 | * |
| 4 | * This generator generates an IV based on a sequence number by xoring it |
| 5 | * with a salt. This algorithm is mainly useful for CTR and similar modes. |
| 6 | * |
| 7 | * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au> |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify it |
| 10 | * under the terms of the GNU General Public License as published by the Free |
| 11 | * Software Foundation; either version 2 of the License, or (at your option) |
| 12 | * any later version. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <crypto/internal/geniv.h> |
| 17 | #include <crypto/internal/skcipher.h> |
| 18 | #include <crypto/null.h> |
| 19 | #include <crypto/rng.h> |
| 20 | #include <crypto/scatterwalk.h> |
| 21 | #include <linux/err.h> |
| 22 | #include <linux/init.h> |
| 23 | #include <linux/kernel.h> |
| 24 | #include <linux/module.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/spinlock.h> |
| 27 | #include <linux/string.h> |
| 28 | |
| 29 | struct seqiv_ctx { |
| 30 | spinlock_t lock; |
| 31 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); |
| 32 | }; |
| 33 | |
| 34 | struct seqiv_aead_ctx { |
| 35 | /* aead_geniv_ctx must be first the element */ |
| 36 | struct aead_geniv_ctx geniv; |
| 37 | struct crypto_blkcipher *null; |
| 38 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); |
| 39 | }; |
| 40 | |
| 41 | static void seqiv_free(struct crypto_instance *inst); |
| 42 | |
| 43 | static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) |
| 44 | { |
| 45 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); |
| 46 | struct crypto_ablkcipher *geniv; |
| 47 | |
| 48 | if (err == -EINPROGRESS) |
| 49 | return; |
| 50 | |
| 51 | if (err) |
| 52 | goto out; |
| 53 | |
| 54 | geniv = skcipher_givcrypt_reqtfm(req); |
| 55 | memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv)); |
| 56 | |
| 57 | out: |
| 58 | kfree(subreq->info); |
| 59 | } |
| 60 | |
| 61 | static void seqiv_complete(struct crypto_async_request *base, int err) |
| 62 | { |
| 63 | struct skcipher_givcrypt_request *req = base->data; |
| 64 | |
| 65 | seqiv_complete2(req, err); |
| 66 | skcipher_givcrypt_complete(req, err); |
| 67 | } |
| 68 | |
| 69 | static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err) |
| 70 | { |
| 71 | struct aead_request *subreq = aead_givcrypt_reqctx(req); |
| 72 | struct crypto_aead *geniv; |
| 73 | |
| 74 | if (err == -EINPROGRESS) |
| 75 | return; |
| 76 | |
| 77 | if (err) |
| 78 | goto out; |
| 79 | |
| 80 | geniv = aead_givcrypt_reqtfm(req); |
| 81 | memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv)); |
| 82 | |
| 83 | out: |
| 84 | kfree(subreq->iv); |
| 85 | } |
| 86 | |
| 87 | static void seqiv_aead_complete(struct crypto_async_request *base, int err) |
| 88 | { |
| 89 | struct aead_givcrypt_request *req = base->data; |
| 90 | |
| 91 | seqiv_aead_complete2(req, err); |
| 92 | aead_givcrypt_complete(req, err); |
| 93 | } |
| 94 | |
| 95 | static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) |
| 96 | { |
| 97 | struct aead_request *subreq = aead_request_ctx(req); |
| 98 | struct crypto_aead *geniv; |
| 99 | |
| 100 | if (err == -EINPROGRESS) |
| 101 | return; |
| 102 | |
| 103 | if (err) |
| 104 | goto out; |
| 105 | |
| 106 | geniv = crypto_aead_reqtfm(req); |
| 107 | memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv)); |
| 108 | |
| 109 | out: |
| 110 | kzfree(subreq->iv); |
| 111 | } |
| 112 | |
| 113 | static void seqiv_aead_encrypt_complete(struct crypto_async_request *base, |
| 114 | int err) |
| 115 | { |
| 116 | struct aead_request *req = base->data; |
| 117 | |
| 118 | seqiv_aead_encrypt_complete2(req, err); |
| 119 | aead_request_complete(req, err); |
| 120 | } |
| 121 | |
| 122 | static void seqiv_geniv(struct seqiv_ctx *ctx, u8 *info, u64 seq, |
| 123 | unsigned int ivsize) |
| 124 | { |
| 125 | unsigned int len = ivsize; |
| 126 | |
| 127 | if (ivsize > sizeof(u64)) { |
| 128 | memset(info, 0, ivsize - sizeof(u64)); |
| 129 | len = sizeof(u64); |
| 130 | } |
| 131 | seq = cpu_to_be64(seq); |
| 132 | memcpy(info + ivsize - len, &seq, len); |
| 133 | crypto_xor(info, ctx->salt, ivsize); |
| 134 | } |
| 135 | |
| 136 | static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) |
| 137 | { |
| 138 | struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); |
| 139 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
| 140 | struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); |
| 141 | crypto_completion_t compl; |
| 142 | void *data; |
| 143 | u8 *info; |
| 144 | unsigned int ivsize; |
| 145 | int err; |
| 146 | |
| 147 | ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv)); |
| 148 | |
| 149 | compl = req->creq.base.complete; |
| 150 | data = req->creq.base.data; |
| 151 | info = req->creq.info; |
| 152 | |
| 153 | ivsize = crypto_ablkcipher_ivsize(geniv); |
| 154 | |
| 155 | if (unlikely(!IS_ALIGNED((unsigned long)info, |
| 156 | crypto_ablkcipher_alignmask(geniv) + 1))) { |
| 157 | info = kmalloc(ivsize, req->creq.base.flags & |
| 158 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: |
| 159 | GFP_ATOMIC); |
| 160 | if (!info) |
| 161 | return -ENOMEM; |
| 162 | |
| 163 | compl = seqiv_complete; |
| 164 | data = req; |
| 165 | } |
| 166 | |
| 167 | ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, |
| 168 | data); |
| 169 | ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, |
| 170 | req->creq.nbytes, info); |
| 171 | |
| 172 | seqiv_geniv(ctx, info, req->seq, ivsize); |
| 173 | memcpy(req->giv, info, ivsize); |
| 174 | |
| 175 | err = crypto_ablkcipher_encrypt(subreq); |
| 176 | if (unlikely(info != req->creq.info)) |
| 177 | seqiv_complete2(req, err); |
| 178 | return err; |
| 179 | } |
| 180 | |
| 181 | static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) |
| 182 | { |
| 183 | struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); |
| 184 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); |
| 185 | struct aead_request *areq = &req->areq; |
| 186 | struct aead_request *subreq = aead_givcrypt_reqctx(req); |
| 187 | crypto_completion_t compl; |
| 188 | void *data; |
| 189 | u8 *info; |
| 190 | unsigned int ivsize; |
| 191 | int err; |
| 192 | |
| 193 | aead_request_set_tfm(subreq, aead_geniv_base(geniv)); |
| 194 | |
| 195 | compl = areq->base.complete; |
| 196 | data = areq->base.data; |
| 197 | info = areq->iv; |
| 198 | |
| 199 | ivsize = crypto_aead_ivsize(geniv); |
| 200 | |
| 201 | if (unlikely(!IS_ALIGNED((unsigned long)info, |
| 202 | crypto_aead_alignmask(geniv) + 1))) { |
| 203 | info = kmalloc(ivsize, areq->base.flags & |
| 204 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: |
| 205 | GFP_ATOMIC); |
| 206 | if (!info) |
| 207 | return -ENOMEM; |
| 208 | |
| 209 | compl = seqiv_aead_complete; |
| 210 | data = req; |
| 211 | } |
| 212 | |
| 213 | aead_request_set_callback(subreq, areq->base.flags, compl, data); |
| 214 | aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen, |
| 215 | info); |
| 216 | aead_request_set_assoc(subreq, areq->assoc, areq->assoclen); |
| 217 | |
| 218 | seqiv_geniv(ctx, info, req->seq, ivsize); |
| 219 | memcpy(req->giv, info, ivsize); |
| 220 | |
| 221 | err = crypto_aead_encrypt(subreq); |
| 222 | if (unlikely(info != areq->iv)) |
| 223 | seqiv_aead_complete2(req, err); |
| 224 | return err; |
| 225 | } |
| 226 | |
| 227 | static int seqiv_aead_encrypt(struct aead_request *req) |
| 228 | { |
| 229 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); |
| 230 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); |
| 231 | struct aead_request *subreq = aead_request_ctx(req); |
| 232 | crypto_completion_t compl; |
| 233 | void *data; |
| 234 | u8 *info; |
| 235 | unsigned int ivsize = 8; |
| 236 | int err; |
| 237 | |
| 238 | if (req->cryptlen < ivsize) |
| 239 | return -EINVAL; |
| 240 | |
| 241 | aead_request_set_tfm(subreq, ctx->geniv.child); |
| 242 | |
| 243 | compl = req->base.complete; |
| 244 | data = req->base.data; |
| 245 | info = req->iv; |
| 246 | |
| 247 | if (req->src != req->dst) { |
| 248 | struct blkcipher_desc desc = { |
| 249 | .tfm = ctx->null, |
| 250 | }; |
| 251 | |
| 252 | err = crypto_blkcipher_encrypt(&desc, req->dst, req->src, |
| 253 | req->assoclen + req->cryptlen); |
| 254 | if (err) |
| 255 | return err; |
| 256 | } |
| 257 | |
| 258 | if (unlikely(!IS_ALIGNED((unsigned long)info, |
| 259 | crypto_aead_alignmask(geniv) + 1))) { |
| 260 | info = kmalloc(ivsize, req->base.flags & |
| 261 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: |
| 262 | GFP_ATOMIC); |
| 263 | if (!info) |
| 264 | return -ENOMEM; |
| 265 | |
| 266 | memcpy(info, req->iv, ivsize); |
| 267 | compl = seqiv_aead_encrypt_complete; |
| 268 | data = req; |
| 269 | } |
| 270 | |
| 271 | aead_request_set_callback(subreq, req->base.flags, compl, data); |
| 272 | aead_request_set_crypt(subreq, req->dst, req->dst, |
| 273 | req->cryptlen - ivsize, info); |
| 274 | aead_request_set_ad(subreq, req->assoclen + ivsize); |
| 275 | |
| 276 | crypto_xor(info, ctx->salt, ivsize); |
| 277 | scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); |
| 278 | |
| 279 | err = crypto_aead_encrypt(subreq); |
| 280 | if (unlikely(info != req->iv)) |
| 281 | seqiv_aead_encrypt_complete2(req, err); |
| 282 | return err; |
| 283 | } |
| 284 | |
| 285 | static int seqiv_aead_decrypt(struct aead_request *req) |
| 286 | { |
| 287 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); |
| 288 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); |
| 289 | struct aead_request *subreq = aead_request_ctx(req); |
| 290 | crypto_completion_t compl; |
| 291 | void *data; |
| 292 | unsigned int ivsize = 8; |
| 293 | |
| 294 | if (req->cryptlen < ivsize + crypto_aead_authsize(geniv)) |
| 295 | return -EINVAL; |
| 296 | |
| 297 | aead_request_set_tfm(subreq, ctx->geniv.child); |
| 298 | |
| 299 | compl = req->base.complete; |
| 300 | data = req->base.data; |
| 301 | |
| 302 | aead_request_set_callback(subreq, req->base.flags, compl, data); |
| 303 | aead_request_set_crypt(subreq, req->src, req->dst, |
| 304 | req->cryptlen - ivsize, req->iv); |
| 305 | aead_request_set_ad(subreq, req->assoclen + ivsize); |
| 306 | |
| 307 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); |
| 308 | |
| 309 | return crypto_aead_decrypt(subreq); |
| 310 | } |
| 311 | |
| 312 | static int seqiv_init(struct crypto_tfm *tfm) |
| 313 | { |
| 314 | struct crypto_ablkcipher *geniv = __crypto_ablkcipher_cast(tfm); |
| 315 | struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); |
| 316 | int err; |
| 317 | |
| 318 | spin_lock_init(&ctx->lock); |
| 319 | |
| 320 | tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request); |
| 321 | |
| 322 | err = 0; |
| 323 | if (!crypto_get_default_rng()) { |
| 324 | crypto_ablkcipher_crt(geniv)->givencrypt = seqiv_givencrypt; |
| 325 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, |
| 326 | crypto_ablkcipher_ivsize(geniv)); |
| 327 | crypto_put_default_rng(); |
| 328 | } |
| 329 | |
| 330 | return err ?: skcipher_geniv_init(tfm); |
| 331 | } |
| 332 | |
| 333 | static int seqiv_old_aead_init(struct crypto_tfm *tfm) |
| 334 | { |
| 335 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); |
| 336 | struct seqiv_ctx *ctx = crypto_aead_ctx(geniv); |
| 337 | int err; |
| 338 | |
| 339 | spin_lock_init(&ctx->lock); |
| 340 | |
| 341 | crypto_aead_set_reqsize(__crypto_aead_cast(tfm), |
| 342 | sizeof(struct aead_request)); |
| 343 | err = 0; |
| 344 | if (!crypto_get_default_rng()) { |
| 345 | geniv->givencrypt = seqiv_aead_givencrypt; |
| 346 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, |
| 347 | crypto_aead_ivsize(geniv)); |
| 348 | crypto_put_default_rng(); |
| 349 | } |
| 350 | |
| 351 | return err ?: aead_geniv_init(tfm); |
| 352 | } |
| 353 | |
| 354 | static int seqiv_aead_init_common(struct crypto_aead *geniv, |
| 355 | unsigned int reqsize) |
| 356 | { |
| 357 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(geniv); |
| 358 | int err; |
| 359 | |
| 360 | spin_lock_init(&ctx->geniv.lock); |
| 361 | |
| 362 | crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); |
| 363 | |
| 364 | err = crypto_get_default_rng(); |
| 365 | if (err) |
| 366 | goto out; |
| 367 | |
| 368 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, |
| 369 | crypto_aead_ivsize(geniv)); |
| 370 | crypto_put_default_rng(); |
| 371 | if (err) |
| 372 | goto out; |
| 373 | |
| 374 | ctx->null = crypto_get_default_null_skcipher(); |
| 375 | err = PTR_ERR(ctx->null); |
| 376 | if (IS_ERR(ctx->null)) |
| 377 | goto out; |
| 378 | |
| 379 | err = aead_geniv_init(crypto_aead_tfm(geniv)); |
| 380 | if (err) |
| 381 | goto drop_null; |
| 382 | |
| 383 | ctx->geniv.child = geniv->child; |
| 384 | geniv->child = geniv; |
| 385 | |
| 386 | out: |
| 387 | return err; |
| 388 | |
| 389 | drop_null: |
| 390 | crypto_put_default_null_skcipher(); |
| 391 | goto out; |
| 392 | } |
| 393 | |
| 394 | static int seqiv_aead_init(struct crypto_aead *tfm) |
| 395 | { |
| 396 | return seqiv_aead_init_common(tfm, sizeof(struct aead_request)); |
| 397 | } |
| 398 | |
| 399 | static void seqiv_aead_exit(struct crypto_aead *tfm) |
| 400 | { |
| 401 | struct seqiv_aead_ctx *ctx = crypto_aead_ctx(tfm); |
| 402 | |
| 403 | crypto_free_aead(ctx->geniv.child); |
| 404 | crypto_put_default_null_skcipher(); |
| 405 | } |
| 406 | |
| 407 | static int seqiv_ablkcipher_create(struct crypto_template *tmpl, |
| 408 | struct rtattr **tb) |
| 409 | { |
| 410 | struct crypto_instance *inst; |
| 411 | int err; |
| 412 | |
| 413 | inst = skcipher_geniv_alloc(tmpl, tb, 0, 0); |
| 414 | |
| 415 | if (IS_ERR(inst)) |
| 416 | return PTR_ERR(inst); |
| 417 | |
| 418 | err = -EINVAL; |
| 419 | if (inst->alg.cra_ablkcipher.ivsize < sizeof(u64)) |
| 420 | goto free_inst; |
| 421 | |
| 422 | inst->alg.cra_init = seqiv_init; |
| 423 | inst->alg.cra_exit = skcipher_geniv_exit; |
| 424 | |
| 425 | inst->alg.cra_ctxsize += inst->alg.cra_ablkcipher.ivsize; |
| 426 | inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); |
| 427 | |
| 428 | inst->alg.cra_alignmask |= __alignof__(u32) - 1; |
| 429 | |
| 430 | err = crypto_register_instance(tmpl, inst); |
| 431 | if (err) |
| 432 | goto free_inst; |
| 433 | |
| 434 | out: |
| 435 | return err; |
| 436 | |
| 437 | free_inst: |
| 438 | skcipher_geniv_free(inst); |
| 439 | goto out; |
| 440 | } |
| 441 | |
| 442 | static int seqiv_old_aead_create(struct crypto_template *tmpl, |
| 443 | struct aead_instance *aead) |
| 444 | { |
| 445 | struct crypto_instance *inst = aead_crypto_instance(aead); |
| 446 | int err = -EINVAL; |
| 447 | |
| 448 | if (inst->alg.cra_aead.ivsize < sizeof(u64)) |
| 449 | goto free_inst; |
| 450 | |
| 451 | inst->alg.cra_init = seqiv_old_aead_init; |
| 452 | inst->alg.cra_exit = aead_geniv_exit; |
| 453 | |
| 454 | inst->alg.cra_ctxsize = inst->alg.cra_aead.ivsize; |
| 455 | inst->alg.cra_ctxsize += sizeof(struct seqiv_ctx); |
| 456 | |
| 457 | err = crypto_register_instance(tmpl, inst); |
| 458 | if (err) |
| 459 | goto free_inst; |
| 460 | |
| 461 | out: |
| 462 | return err; |
| 463 | |
| 464 | free_inst: |
| 465 | aead_geniv_free(aead); |
| 466 | goto out; |
| 467 | } |
| 468 | |
| 469 | static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb) |
| 470 | { |
| 471 | struct aead_instance *inst; |
| 472 | struct crypto_aead_spawn *spawn; |
| 473 | struct aead_alg *alg; |
| 474 | int err; |
| 475 | |
| 476 | inst = aead_geniv_alloc(tmpl, tb, 0, 0); |
| 477 | |
| 478 | if (IS_ERR(inst)) |
| 479 | return PTR_ERR(inst); |
| 480 | |
| 481 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; |
| 482 | |
| 483 | if (inst->alg.base.cra_aead.encrypt) |
| 484 | return seqiv_old_aead_create(tmpl, inst); |
| 485 | |
| 486 | spawn = aead_instance_ctx(inst); |
| 487 | alg = crypto_spawn_aead_alg(spawn); |
| 488 | |
| 489 | if (alg->base.cra_aead.encrypt) |
| 490 | goto done; |
| 491 | |
| 492 | err = -EINVAL; |
| 493 | if (inst->alg.ivsize != sizeof(u64)) |
| 494 | goto free_inst; |
| 495 | |
| 496 | inst->alg.encrypt = seqiv_aead_encrypt; |
| 497 | inst->alg.decrypt = seqiv_aead_decrypt; |
| 498 | |
| 499 | inst->alg.init = seqiv_aead_init; |
| 500 | inst->alg.exit = seqiv_aead_exit; |
| 501 | |
| 502 | inst->alg.base.cra_ctxsize = sizeof(struct seqiv_aead_ctx); |
| 503 | inst->alg.base.cra_ctxsize += inst->alg.ivsize; |
| 504 | |
| 505 | done: |
| 506 | err = aead_register_instance(tmpl, inst); |
| 507 | if (err) |
| 508 | goto free_inst; |
| 509 | |
| 510 | out: |
| 511 | return err; |
| 512 | |
| 513 | free_inst: |
| 514 | aead_geniv_free(inst); |
| 515 | goto out; |
| 516 | } |
| 517 | |
| 518 | static int seqiv_create(struct crypto_template *tmpl, struct rtattr **tb) |
| 519 | { |
| 520 | struct crypto_attr_type *algt; |
| 521 | int err; |
| 522 | |
| 523 | algt = crypto_get_attr_type(tb); |
| 524 | if (IS_ERR(algt)) |
| 525 | return PTR_ERR(algt); |
| 526 | |
| 527 | if ((algt->type ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) |
| 528 | err = seqiv_ablkcipher_create(tmpl, tb); |
| 529 | else |
| 530 | err = seqiv_aead_create(tmpl, tb); |
| 531 | |
| 532 | return err; |
| 533 | } |
| 534 | |
| 535 | static void seqiv_free(struct crypto_instance *inst) |
| 536 | { |
| 537 | if ((inst->alg.cra_flags ^ CRYPTO_ALG_TYPE_AEAD) & CRYPTO_ALG_TYPE_MASK) |
| 538 | skcipher_geniv_free(inst); |
| 539 | else |
| 540 | aead_geniv_free(aead_instance(inst)); |
| 541 | } |
| 542 | |
| 543 | static struct crypto_template seqiv_tmpl = { |
| 544 | .name = "seqiv", |
| 545 | .create = seqiv_create, |
| 546 | .free = seqiv_free, |
| 547 | .module = THIS_MODULE, |
| 548 | }; |
| 549 | |
| 550 | static int __init seqiv_module_init(void) |
| 551 | { |
| 552 | return crypto_register_template(&seqiv_tmpl); |
| 553 | } |
| 554 | |
| 555 | static void __exit seqiv_module_exit(void) |
| 556 | { |
| 557 | crypto_unregister_template(&seqiv_tmpl); |
| 558 | } |
| 559 | |
| 560 | module_init(seqiv_module_init); |
| 561 | module_exit(seqiv_module_exit); |
| 562 | |
| 563 | MODULE_LICENSE("GPL"); |
| 564 | MODULE_DESCRIPTION("Sequence Number IV Generator"); |
| 565 | MODULE_ALIAS_CRYPTO("seqiv"); |