Commit | Line | Data |
---|---|---|
b886d83c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
3323eec9 MZ |
2 | /* |
3 | * Copyright (C) 2005,2006,2007,2008 IBM Corporation | |
4 | * | |
5 | * Authors: | |
6 | * Serge Hallyn <serue@us.ibm.com> | |
7 | * Reiner Sailer <sailer@watson.ibm.com> | |
8 | * Mimi Zohar <zohar@us.ibm.com> | |
9 | * | |
3323eec9 MZ |
10 | * File: ima_queue.c |
11 | * Implements queues that store template measurements and | |
12 | * maintains aggregate over the stored measurements | |
13 | * in the pre-configured TPM PCR (if available). | |
14 | * The measurement list is append-only. No entry is | |
15 | * ever removed or changed during the boot-cycle. | |
16 | */ | |
20ee451f | 17 | |
3323eec9 | 18 | #include <linux/rculist.h> |
5a0e3ad6 | 19 | #include <linux/slab.h> |
3323eec9 MZ |
20 | #include "ima.h" |
21 | ||
7b7e5916 RS |
22 | #define AUDIT_CAUSE_LEN_MAX 32 |
23 | ||
0b6cf6b9 RS |
24 | /* pre-allocated array of tpm_digest structures to extend a PCR */ |
25 | static struct tpm_digest *digests; | |
26 | ||
3323eec9 | 27 | LIST_HEAD(ima_measurements); /* list of all measurements */ |
d158847a MZ |
28 | #ifdef CONFIG_IMA_KEXEC |
29 | static unsigned long binary_runtime_size; | |
30 | #else | |
31 | static unsigned long binary_runtime_size = ULONG_MAX; | |
32 | #endif | |
3323eec9 MZ |
33 | |
34 | /* key: inode (before secure-hashing a file) */ | |
35 | struct ima_h_table ima_htable = { | |
36 | .len = ATOMIC_LONG_INIT(0), | |
37 | .violations = ATOMIC_LONG_INIT(0), | |
38 | .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT | |
39 | }; | |
40 | ||
41 | /* mutex protects atomicity of extending measurement list | |
42 | * and extending the TPM PCR aggregate. Since tpm_extend can take | |
43 | * long (and the tpm driver uses a mutex), we can't use the spinlock. | |
44 | */ | |
45 | static DEFINE_MUTEX(ima_extend_list_mutex); | |
46 | ||
47 | /* lookup up the digest value in the hash table, and return the entry */ | |
67696f6d ER |
48 | static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value, |
49 | int pcr) | |
3323eec9 MZ |
50 | { |
51 | struct ima_queue_entry *qe, *ret = NULL; | |
52 | unsigned int key; | |
3323eec9 MZ |
53 | int rc; |
54 | ||
55 | key = ima_hash_key(digest_value); | |
56 | rcu_read_lock(); | |
b67bfe0d | 57 | hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) { |
2592677c RS |
58 | rc = memcmp(qe->entry->digests[ima_hash_algo_idx].digest, |
59 | digest_value, hash_digest_size[ima_hash_algo]); | |
67696f6d | 60 | if ((rc == 0) && (qe->entry->pcr == pcr)) { |
3323eec9 MZ |
61 | ret = qe; |
62 | break; | |
63 | } | |
64 | } | |
65 | rcu_read_unlock(); | |
66 | return ret; | |
67 | } | |
68 | ||
d158847a MZ |
69 | /* |
70 | * Calculate the memory required for serializing a single | |
71 | * binary_runtime_measurement list entry, which contains a | |
72 | * couple of variable length fields (e.g template name and data). | |
73 | */ | |
74 | static int get_binary_runtime_size(struct ima_template_entry *entry) | |
75 | { | |
76 | int size = 0; | |
77 | ||
78 | size += sizeof(u32); /* pcr */ | |
aa724fe1 | 79 | size += TPM_DIGEST_SIZE; |
d158847a | 80 | size += sizeof(int); /* template name size field */ |
e4586c79 | 81 | size += strlen(entry->template_desc->name); |
d158847a MZ |
82 | size += sizeof(entry->template_data_len); |
83 | size += entry->template_data_len; | |
84 | return size; | |
85 | } | |
86 | ||
3323eec9 | 87 | /* ima_add_template_entry helper function: |
dcfc5693 MZ |
88 | * - Add template entry to the measurement list and hash table, for |
89 | * all entries except those carried across kexec. | |
3323eec9 MZ |
90 | * |
91 | * (Called with ima_extend_list_mutex held.) | |
92 | */ | |
dcfc5693 MZ |
93 | static int ima_add_digest_entry(struct ima_template_entry *entry, |
94 | bool update_htable) | |
3323eec9 MZ |
95 | { |
96 | struct ima_queue_entry *qe; | |
97 | unsigned int key; | |
98 | ||
99 | qe = kmalloc(sizeof(*qe), GFP_KERNEL); | |
100 | if (qe == NULL) { | |
20ee451f | 101 | pr_err("OUT OF MEMORY ERROR creating queue entry\n"); |
3323eec9 MZ |
102 | return -ENOMEM; |
103 | } | |
104 | qe->entry = entry; | |
105 | ||
106 | INIT_LIST_HEAD(&qe->later); | |
107 | list_add_tail_rcu(&qe->later, &ima_measurements); | |
108 | ||
109 | atomic_long_inc(&ima_htable.len); | |
dcfc5693 | 110 | if (update_htable) { |
2592677c | 111 | key = ima_hash_key(entry->digests[ima_hash_algo_idx].digest); |
dcfc5693 MZ |
112 | hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); |
113 | } | |
d158847a MZ |
114 | |
115 | if (binary_runtime_size != ULONG_MAX) { | |
116 | int size; | |
117 | ||
118 | size = get_binary_runtime_size(entry); | |
119 | binary_runtime_size = (binary_runtime_size < ULONG_MAX - size) ? | |
120 | binary_runtime_size + size : ULONG_MAX; | |
121 | } | |
3323eec9 MZ |
122 | return 0; |
123 | } | |
124 | ||
d158847a MZ |
125 | /* |
126 | * Return the amount of memory required for serializing the | |
127 | * entire binary_runtime_measurement list, including the ima_kexec_hdr | |
128 | * structure. | |
129 | */ | |
130 | unsigned long ima_get_binary_runtime_size(void) | |
131 | { | |
132 | if (binary_runtime_size >= (ULONG_MAX - sizeof(struct ima_kexec_hdr))) | |
133 | return ULONG_MAX; | |
134 | else | |
135 | return binary_runtime_size + sizeof(struct ima_kexec_hdr); | |
136 | }; | |
137 | ||
1ea973df | 138 | static int ima_pcr_extend(struct tpm_digest *digests_arg, int pcr) |
3323eec9 MZ |
139 | { |
140 | int result = 0; | |
141 | ||
ec403d8e | 142 | if (!ima_tpm_chip) |
3323eec9 MZ |
143 | return result; |
144 | ||
1ea973df | 145 | result = tpm_pcr_extend(ima_tpm_chip, pcr, digests_arg); |
3323eec9 | 146 | if (result != 0) |
20ee451f | 147 | pr_err("Error Communicating to TPM chip, result: %d\n", result); |
3323eec9 MZ |
148 | return result; |
149 | } | |
150 | ||
d158847a MZ |
151 | /* |
152 | * Add template entry to the measurement list and hash table, and | |
153 | * extend the pcr. | |
154 | * | |
155 | * On systems which support carrying the IMA measurement list across | |
156 | * kexec, maintain the total memory size required for serializing the | |
157 | * binary_runtime_measurements. | |
3323eec9 MZ |
158 | */ |
159 | int ima_add_template_entry(struct ima_template_entry *entry, int violation, | |
9803d413 RS |
160 | const char *op, struct inode *inode, |
161 | const unsigned char *filename) | |
3323eec9 | 162 | { |
2592677c | 163 | u8 *digest = entry->digests[ima_hash_algo_idx].digest; |
1ea973df | 164 | struct tpm_digest *digests_arg = entry->digests; |
3323eec9 | 165 | const char *audit_cause = "hash_added"; |
7b7e5916 | 166 | char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX]; |
3323eec9 | 167 | int audit_info = 1; |
7b7e5916 | 168 | int result = 0, tpmresult = 0; |
3323eec9 MZ |
169 | |
170 | mutex_lock(&ima_extend_list_mutex); | |
171 | if (!violation) { | |
67696f6d | 172 | if (ima_lookup_digest_entry(digest, entry->pcr)) { |
3323eec9 | 173 | audit_cause = "hash_exists"; |
45fae749 | 174 | result = -EEXIST; |
3323eec9 MZ |
175 | goto out; |
176 | } | |
177 | } | |
178 | ||
dcfc5693 | 179 | result = ima_add_digest_entry(entry, 1); |
3323eec9 MZ |
180 | if (result < 0) { |
181 | audit_cause = "ENOMEM"; | |
182 | audit_info = 0; | |
183 | goto out; | |
184 | } | |
185 | ||
186 | if (violation) /* invalidate pcr */ | |
1ea973df | 187 | digests_arg = digests; |
3323eec9 | 188 | |
1ea973df | 189 | tpmresult = ima_pcr_extend(digests_arg, entry->pcr); |
7b7e5916 RS |
190 | if (tpmresult != 0) { |
191 | snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)", | |
192 | tpmresult); | |
193 | audit_cause = tpm_audit_cause; | |
3323eec9 MZ |
194 | audit_info = 0; |
195 | } | |
196 | out: | |
197 | mutex_unlock(&ima_extend_list_mutex); | |
9803d413 | 198 | integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, |
3323eec9 MZ |
199 | op, audit_cause, result, audit_info); |
200 | return result; | |
201 | } | |
94c3aac5 MZ |
202 | |
203 | int ima_restore_measurement_entry(struct ima_template_entry *entry) | |
204 | { | |
205 | int result = 0; | |
206 | ||
207 | mutex_lock(&ima_extend_list_mutex); | |
dcfc5693 | 208 | result = ima_add_digest_entry(entry, 0); |
94c3aac5 MZ |
209 | mutex_unlock(&ima_extend_list_mutex); |
210 | return result; | |
211 | } | |
0b6cf6b9 RS |
212 | |
213 | int __init ima_init_digests(void) | |
214 | { | |
1ea973df RS |
215 | u16 digest_size; |
216 | u16 crypto_id; | |
0b6cf6b9 RS |
217 | int i; |
218 | ||
219 | if (!ima_tpm_chip) | |
220 | return 0; | |
221 | ||
222 | digests = kcalloc(ima_tpm_chip->nr_allocated_banks, sizeof(*digests), | |
223 | GFP_NOFS); | |
224 | if (!digests) | |
225 | return -ENOMEM; | |
226 | ||
1ea973df | 227 | for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) { |
0b6cf6b9 | 228 | digests[i].alg_id = ima_tpm_chip->allocated_banks[i].alg_id; |
1ea973df RS |
229 | digest_size = ima_tpm_chip->allocated_banks[i].digest_size; |
230 | crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id; | |
231 | ||
232 | /* for unmapped TPM algorithms digest is still a padded SHA1 */ | |
233 | if (crypto_id == HASH_ALGO__LAST) | |
234 | digest_size = SHA1_DIGEST_SIZE; | |
235 | ||
236 | memset(digests[i].digest, 0xff, digest_size); | |
237 | } | |
0b6cf6b9 RS |
238 | |
239 | return 0; | |
240 | } |