2 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
5 * Mimi Zohar <zohar@us.ibm.com>
6 * Kylene Hall <kjhall@us.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, version 2 of the License.
13 * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kernel.h>
19 #include <linux/moduleparam.h>
20 #include <linux/ratelimit.h>
21 #include <linux/file.h>
22 #include <linux/crypto.h>
23 #include <linux/scatterlist.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26 #include <crypto/hash.h>
27 #include <crypto/hash_info.h>
30 struct ahash_completion {
31 struct completion completion;
35 /* minimum file size for ahash use */
36 static unsigned long ima_ahash_minsize;
37 module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
38 MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
40 static struct crypto_shash *ima_shash_tfm;
41 static struct crypto_ahash *ima_ahash_tfm;
44 * ima_kernel_read - read file content
46 * This is a function for reading file content instead of kernel_read().
47 * It does not perform locking checks to ensure it cannot be blocked.
48 * It does not perform security checks because it is irrelevant for IMA.
51 static int ima_kernel_read(struct file *file, loff_t offset,
52 char *addr, unsigned long count)
55 char __user *buf = addr;
58 if (!(file->f_mode & FMODE_READ))
60 if (!file->f_op->read && !file->f_op->aio_read)
66 ret = file->f_op->read(file, buf, count, &offset);
68 ret = do_sync_read(file, buf, count, &offset);
73 int ima_init_crypto(void)
77 ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
78 if (IS_ERR(ima_shash_tfm)) {
79 rc = PTR_ERR(ima_shash_tfm);
80 pr_err("Can not allocate %s (reason: %ld)\n",
81 hash_algo_name[ima_hash_algo], rc);
87 static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
89 struct crypto_shash *tfm = ima_shash_tfm;
92 if (algo != ima_hash_algo && algo < HASH_ALGO__LAST) {
93 tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
96 pr_err("Can not allocate %s (reason: %d)\n",
97 hash_algo_name[algo], rc);
103 static void ima_free_tfm(struct crypto_shash *tfm)
105 if (tfm != ima_shash_tfm)
106 crypto_free_shash(tfm);
109 static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
111 struct crypto_ahash *tfm = ima_ahash_tfm;
114 if ((algo != ima_hash_algo && algo < HASH_ALGO__LAST) || !tfm) {
115 tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
117 if (algo == ima_hash_algo)
121 pr_err("Can not allocate %s (reason: %d)\n",
122 hash_algo_name[algo], rc);
128 static void ima_free_atfm(struct crypto_ahash *tfm)
130 if (tfm != ima_ahash_tfm)
131 crypto_free_ahash(tfm);
134 static void ahash_complete(struct crypto_async_request *req, int err)
136 struct ahash_completion *res = req->data;
138 if (err == -EINPROGRESS)
141 complete(&res->completion);
144 static int ahash_wait(int err, struct ahash_completion *res)
151 wait_for_completion(&res->completion);
152 reinit_completion(&res->completion);
156 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
162 static int ima_calc_file_hash_atfm(struct file *file,
163 struct ima_digest_data *hash,
164 struct crypto_ahash *tfm)
166 loff_t i_size, offset;
168 int rc, read = 0, rbuf_len;
169 struct ahash_request *req;
170 struct scatterlist sg[1];
171 struct ahash_completion res;
173 hash->length = crypto_ahash_digestsize(tfm);
175 req = ahash_request_alloc(tfm, GFP_KERNEL);
179 init_completion(&res.completion);
180 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
181 CRYPTO_TFM_REQ_MAY_SLEEP,
182 ahash_complete, &res);
184 rc = ahash_wait(crypto_ahash_init(req), &res);
188 i_size = i_size_read(file_inode(file));
193 rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
199 if (!(file->f_mode & FMODE_READ)) {
200 file->f_mode |= FMODE_READ;
204 for (offset = 0; offset < i_size; offset += rbuf_len) {
205 rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
213 sg_init_one(&sg[0], rbuf, rbuf_len);
214 ahash_request_set_crypt(req, sg, NULL, rbuf_len);
216 rc = ahash_wait(crypto_ahash_update(req), &res);
221 file->f_mode &= ~FMODE_READ;
225 ahash_request_set_crypt(req, NULL, hash->digest, 0);
226 rc = ahash_wait(crypto_ahash_final(req), &res);
229 ahash_request_free(req);
233 static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
235 struct crypto_ahash *tfm;
238 tfm = ima_alloc_atfm(hash->algo);
242 rc = ima_calc_file_hash_atfm(file, hash, tfm);
249 static int ima_calc_file_hash_tfm(struct file *file,
250 struct ima_digest_data *hash,
251 struct crypto_shash *tfm)
253 loff_t i_size, offset = 0;
257 struct shash_desc shash;
258 char ctx[crypto_shash_descsize(tfm)];
261 desc.shash.tfm = tfm;
262 desc.shash.flags = 0;
264 hash->length = crypto_shash_digestsize(tfm);
266 rc = crypto_shash_init(&desc.shash);
270 i_size = i_size_read(file_inode(file));
275 rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
279 if (!(file->f_mode & FMODE_READ)) {
280 file->f_mode |= FMODE_READ;
284 while (offset < i_size) {
287 rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
296 rc = crypto_shash_update(&desc.shash, rbuf, rbuf_len);
301 file->f_mode &= ~FMODE_READ;
305 rc = crypto_shash_final(&desc.shash, hash->digest);
309 static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
311 struct crypto_shash *tfm;
314 tfm = ima_alloc_tfm(hash->algo);
318 rc = ima_calc_file_hash_tfm(file, hash, tfm);
326 * ima_calc_file_hash - calculate file hash
328 * Asynchronous hash (ahash) allows using HW acceleration for calculating
329 * a hash. ahash performance varies for different data sizes on different
330 * crypto accelerators. shash performance might be better for smaller files.
331 * The 'ima.ahash_minsize' module parameter allows specifying the best
332 * minimum file size for using ahash on the system.
334 * If the ima.ahash_minsize parameter is not specified, this function uses
335 * shash for the hash calculation. If ahash fails, it falls back to using
338 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
343 i_size = i_size_read(file_inode(file));
345 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
346 rc = ima_calc_file_ahash(file, hash);
351 return ima_calc_file_shash(file, hash);
355 * Calculate the hash of template data
357 static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
358 struct ima_template_desc *td,
360 struct ima_digest_data *hash,
361 struct crypto_shash *tfm)
364 struct shash_desc shash;
365 char ctx[crypto_shash_descsize(tfm)];
369 desc.shash.tfm = tfm;
370 desc.shash.flags = 0;
372 hash->length = crypto_shash_digestsize(tfm);
374 rc = crypto_shash_init(&desc.shash);
378 for (i = 0; i < num_fields; i++) {
379 u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
380 u8 *data_to_hash = field_data[i].data;
381 u32 datalen = field_data[i].len;
383 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
384 rc = crypto_shash_update(&desc.shash,
385 (const u8 *) &field_data[i].len,
386 sizeof(field_data[i].len));
389 } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
390 memcpy(buffer, data_to_hash, datalen);
391 data_to_hash = buffer;
392 datalen = IMA_EVENT_NAME_LEN_MAX + 1;
394 rc = crypto_shash_update(&desc.shash, data_to_hash, datalen);
400 rc = crypto_shash_final(&desc.shash, hash->digest);
405 int ima_calc_field_array_hash(struct ima_field_data *field_data,
406 struct ima_template_desc *desc, int num_fields,
407 struct ima_digest_data *hash)
409 struct crypto_shash *tfm;
412 tfm = ima_alloc_tfm(hash->algo);
416 rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
424 static void __init ima_pcrread(int idx, u8 *pcr)
429 if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0)
430 pr_err("Error Communicating to TPM chip\n");
434 * Calculate the boot aggregate hash
436 static int __init ima_calc_boot_aggregate_tfm(char *digest,
437 struct crypto_shash *tfm)
439 u8 pcr_i[TPM_DIGEST_SIZE];
442 struct shash_desc shash;
443 char ctx[crypto_shash_descsize(tfm)];
446 desc.shash.tfm = tfm;
447 desc.shash.flags = 0;
449 rc = crypto_shash_init(&desc.shash);
453 /* cumulative sha1 over tpm registers 0-7 */
454 for (i = TPM_PCR0; i < TPM_PCR8; i++) {
455 ima_pcrread(i, pcr_i);
456 /* now accumulate with current aggregate */
457 rc = crypto_shash_update(&desc.shash, pcr_i, TPM_DIGEST_SIZE);
460 crypto_shash_final(&desc.shash, digest);
464 int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
466 struct crypto_shash *tfm;
469 tfm = ima_alloc_tfm(hash->algo);
473 hash->length = crypto_shash_digestsize(tfm);
474 rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);