crypto: ccp - Remove user triggerable pr_err calls
[linux-2.6-block.git] / drivers / crypto / ccp / ccp-crypto-aes-cmac.c
CommitLineData
7c185371
TL
1/*
2 * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support
3 *
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
5 *
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/sched.h>
15#include <linux/delay.h>
16#include <linux/scatterlist.h>
17#include <linux/crypto.h>
18#include <crypto/algapi.h>
19#include <crypto/aes.h>
20#include <crypto/hash.h>
21#include <crypto/internal/hash.h>
22#include <crypto/scatterwalk.h>
23
24#include "ccp-crypto.h"
25
26
27static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
28 int ret)
29{
30 struct ahash_request *req = ahash_request_cast(async_req);
31 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
32 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
33 unsigned int digest_size = crypto_ahash_digestsize(tfm);
34
35 if (ret)
36 goto e_free;
37
38 if (rctx->hash_rem) {
39 /* Save remaining data to buffer */
40 scatterwalk_map_and_copy(rctx->buf, rctx->cmd.u.aes.src,
41 rctx->hash_cnt, rctx->hash_rem, 0);
42 rctx->buf_count = rctx->hash_rem;
43 } else
44 rctx->buf_count = 0;
45
46 memcpy(req->result, rctx->iv, digest_size);
47
48e_free:
49 sg_free_table(&rctx->data_sg);
50
51 return ret;
52}
53
54static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes,
55 unsigned int final)
56{
57 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
58 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm);
59 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
60 struct scatterlist *sg, *cmac_key_sg = NULL;
61 unsigned int block_size =
62 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
63 unsigned int len, need_pad, sg_count;
64 int ret;
65
369f3dab 66 if (!ctx->u.aes.key_len)
7c185371 67 return -EINVAL;
7c185371
TL
68
69 if (nbytes)
70 rctx->null_msg = 0;
71
72 if (!final && ((nbytes + rctx->buf_count) <= block_size)) {
73 scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src,
74 0, nbytes, 0);
75 rctx->buf_count += nbytes;
76
77 return 0;
78 }
79
80 len = rctx->buf_count + nbytes;
81
82 rctx->final = final;
83 rctx->hash_cnt = final ? len : len & ~(block_size - 1);
84 rctx->hash_rem = final ? 0 : len & (block_size - 1);
85 if (!final && (rctx->hash_cnt == len)) {
86 /* CCP can't do zero length final, so keep some data around */
87 rctx->hash_cnt -= block_size;
88 rctx->hash_rem = block_size;
89 }
90
91 if (final && (rctx->null_msg || (len & (block_size - 1))))
92 need_pad = 1;
93 else
94 need_pad = 0;
95
96 sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv));
97
98 /* Build the data scatterlist table - allocate enough entries for all
99 * possible data pieces (buffer, input data, padding)
100 */
101 sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2;
102 ret = sg_alloc_table(&rctx->data_sg, sg_count, GFP_KERNEL);
103 if (ret)
104 return ret;
105
106 sg = NULL;
107 if (rctx->buf_count) {
108 sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count);
109 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg);
110 }
111
112 if (nbytes)
113 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src);
114
115 if (need_pad) {
116 int pad_length = block_size - (len & (block_size - 1));
117
118 rctx->hash_cnt += pad_length;
119
120 memset(rctx->pad, 0, sizeof(rctx->pad));
121 rctx->pad[0] = 0x80;
122 sg_init_one(&rctx->pad_sg, rctx->pad, pad_length);
123 sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg);
124 }
125 if (sg)
126 sg_mark_end(sg);
127
128 /* Initialize the K1/K2 scatterlist */
129 if (final)
130 cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg
131 : &ctx->u.aes.k1_sg;
132
133 memset(&rctx->cmd, 0, sizeof(rctx->cmd));
134 INIT_LIST_HEAD(&rctx->cmd.entry);
135 rctx->cmd.engine = CCP_ENGINE_AES;
136 rctx->cmd.u.aes.type = ctx->u.aes.type;
137 rctx->cmd.u.aes.mode = ctx->u.aes.mode;
138 rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT;
139 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg;
140 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len;
141 rctx->cmd.u.aes.iv = &rctx->iv_sg;
142 rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE;
143 rctx->cmd.u.aes.src = (sg) ? rctx->data_sg.sgl : NULL;
144 rctx->cmd.u.aes.src_len = rctx->hash_cnt;
145 rctx->cmd.u.aes.dst = NULL;
146 rctx->cmd.u.aes.cmac_key = cmac_key_sg;
147 rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len;
148 rctx->cmd.u.aes.cmac_final = final;
149
150 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
151
152 return ret;
153}
154
155static int ccp_aes_cmac_init(struct ahash_request *req)
156{
157 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
158
159 memset(rctx, 0, sizeof(*rctx));
160
161 rctx->null_msg = 1;
162
163 return 0;
164}
165
166static int ccp_aes_cmac_update(struct ahash_request *req)
167{
168 return ccp_do_cmac_update(req, req->nbytes, 0);
169}
170
171static int ccp_aes_cmac_final(struct ahash_request *req)
172{
173 return ccp_do_cmac_update(req, 0, 1);
174}
175
176static int ccp_aes_cmac_finup(struct ahash_request *req)
177{
178 return ccp_do_cmac_update(req, req->nbytes, 1);
179}
180
181static int ccp_aes_cmac_digest(struct ahash_request *req)
182{
183 int ret;
184
185 ret = ccp_aes_cmac_init(req);
186 if (ret)
187 return ret;
188
189 return ccp_do_cmac_update(req, req->nbytes, 1);
190}
191
192static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
193 unsigned int key_len)
194{
195 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
196 struct ccp_crypto_ahash_alg *alg =
197 ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm));
198 u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo;
199 u64 rb_hi = 0x00, rb_lo = 0x87;
200 __be64 *gk;
201 int ret;
202
203 switch (key_len) {
204 case AES_KEYSIZE_128:
205 ctx->u.aes.type = CCP_AES_TYPE_128;
206 break;
207 case AES_KEYSIZE_192:
208 ctx->u.aes.type = CCP_AES_TYPE_192;
209 break;
210 case AES_KEYSIZE_256:
211 ctx->u.aes.type = CCP_AES_TYPE_256;
212 break;
213 default:
214 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
215 return -EINVAL;
216 }
217 ctx->u.aes.mode = alg->mode;
218
219 /* Set to zero until complete */
220 ctx->u.aes.key_len = 0;
221
222 /* Set the key for the AES cipher used to generate the keys */
223 ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len);
224 if (ret)
225 return ret;
226
227 /* Encrypt a block of zeroes - use key area in context */
228 memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
229 crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key,
230 ctx->u.aes.key);
231
232 /* Generate K1 and K2 */
233 k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key));
234 k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1));
235
236 k1_hi = (k0_hi << 1) | (k0_lo >> 63);
237 k1_lo = k0_lo << 1;
238 if (ctx->u.aes.key[0] & 0x80) {
239 k1_hi ^= rb_hi;
240 k1_lo ^= rb_lo;
241 }
242 gk = (__be64 *)ctx->u.aes.k1;
243 *gk = cpu_to_be64(k1_hi);
244 gk++;
245 *gk = cpu_to_be64(k1_lo);
246
247 k2_hi = (k1_hi << 1) | (k1_lo >> 63);
248 k2_lo = k1_lo << 1;
249 if (ctx->u.aes.k1[0] & 0x80) {
250 k2_hi ^= rb_hi;
251 k2_lo ^= rb_lo;
252 }
253 gk = (__be64 *)ctx->u.aes.k2;
254 *gk = cpu_to_be64(k2_hi);
255 gk++;
256 *gk = cpu_to_be64(k2_lo);
257
258 ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1);
259 sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1));
260 sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2));
261
262 /* Save the supplied key */
263 memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key));
264 memcpy(ctx->u.aes.key, key, key_len);
265 ctx->u.aes.key_len = key_len;
266 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
267
268 return ret;
269}
270
271static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm)
272{
273 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
274 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
275 struct crypto_cipher *cipher_tfm;
276
277 ctx->complete = ccp_aes_cmac_complete;
278 ctx->u.aes.key_len = 0;
279
280 crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_aes_cmac_req_ctx));
281
282 cipher_tfm = crypto_alloc_cipher("aes", 0,
283 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
284 if (IS_ERR(cipher_tfm)) {
285 pr_warn("could not load aes cipher driver\n");
286 return PTR_ERR(cipher_tfm);
287 }
288 ctx->u.aes.tfm_cipher = cipher_tfm;
289
290 return 0;
291}
292
293static void ccp_aes_cmac_cra_exit(struct crypto_tfm *tfm)
294{
295 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
296
297 if (ctx->u.aes.tfm_cipher)
298 crypto_free_cipher(ctx->u.aes.tfm_cipher);
299 ctx->u.aes.tfm_cipher = NULL;
300}
301
302int ccp_register_aes_cmac_algs(struct list_head *head)
303{
304 struct ccp_crypto_ahash_alg *ccp_alg;
305 struct ahash_alg *alg;
306 struct hash_alg_common *halg;
307 struct crypto_alg *base;
308 int ret;
309
310 ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
311 if (!ccp_alg)
312 return -ENOMEM;
313
314 INIT_LIST_HEAD(&ccp_alg->entry);
315 ccp_alg->mode = CCP_AES_MODE_CMAC;
316
317 alg = &ccp_alg->alg;
318 alg->init = ccp_aes_cmac_init;
319 alg->update = ccp_aes_cmac_update;
320 alg->final = ccp_aes_cmac_final;
321 alg->finup = ccp_aes_cmac_finup;
322 alg->digest = ccp_aes_cmac_digest;
323 alg->setkey = ccp_aes_cmac_setkey;
324
325 halg = &alg->halg;
326 halg->digestsize = AES_BLOCK_SIZE;
327
328 base = &halg->base;
329 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
330 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp");
331 base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC |
332 CRYPTO_ALG_KERN_DRIVER_ONLY |
333 CRYPTO_ALG_NEED_FALLBACK;
334 base->cra_blocksize = AES_BLOCK_SIZE;
335 base->cra_ctxsize = sizeof(struct ccp_ctx);
336 base->cra_priority = CCP_CRA_PRIORITY;
337 base->cra_type = &crypto_ahash_type;
338 base->cra_init = ccp_aes_cmac_cra_init;
339 base->cra_exit = ccp_aes_cmac_cra_exit;
340 base->cra_module = THIS_MODULE;
341
342 ret = crypto_register_ahash(alg);
343 if (ret) {
344 pr_err("%s ahash algorithm registration error (%d)\n",
345 base->cra_name, ret);
346 kfree(ccp_alg);
347 return ret;
348 }
349
350 list_add(&ccp_alg->entry, head);
351
352 return 0;
353}