1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-20 Intel Corporation. */
13 #include <sys/types.h>
15 #include <openssl/err.h>
16 #include <openssl/pem.h>
29 static void free_q1q2_ctx(struct q1q2_ctx *ctx)
31 BN_CTX_free(ctx->bn_ctx);
39 static bool alloc_q1q2_ctx(const uint8_t *s, const uint8_t *m,
42 ctx->bn_ctx = BN_CTX_new();
43 ctx->s = BN_bin2bn(s, SGX_MODULUS_SIZE, NULL);
44 ctx->m = BN_bin2bn(m, SGX_MODULUS_SIZE, NULL);
49 if (!ctx->bn_ctx || !ctx->s || !ctx->m || !ctx->q1 || !ctx->qr ||
58 static void reverse_bytes(void *data, int length)
74 static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1,
80 if (!alloc_q1q2_ctx(s, m, &ctx)) {
81 fprintf(stderr, "Not enough memory for Q1Q2 calculation\n");
85 if (!BN_mul(ctx.q1, ctx.s, ctx.s, ctx.bn_ctx))
88 if (!BN_div(ctx.q1, ctx.qr, ctx.q1, ctx.m, ctx.bn_ctx))
91 if (BN_num_bytes(ctx.q1) > SGX_MODULUS_SIZE) {
92 fprintf(stderr, "Too large Q1 %d bytes\n",
93 BN_num_bytes(ctx.q1));
97 if (!BN_mul(ctx.q2, ctx.s, ctx.qr, ctx.bn_ctx))
100 if (!BN_div(ctx.q2, NULL, ctx.q2, ctx.m, ctx.bn_ctx))
103 if (BN_num_bytes(ctx.q2) > SGX_MODULUS_SIZE) {
104 fprintf(stderr, "Too large Q2 %d bytes\n",
105 BN_num_bytes(ctx.q2));
109 len = BN_bn2bin(ctx.q1, q1);
110 reverse_bytes(q1, len);
111 len = BN_bn2bin(ctx.q2, q2);
112 reverse_bytes(q2, len);
121 struct sgx_sigstruct_payload {
122 struct sgx_sigstruct_header header;
123 struct sgx_sigstruct_body body;
126 static bool check_crypto_errors(void)
129 bool had_errors = false;
130 const char *filename;
135 if (ERR_peek_error() == 0)
139 err = ERR_get_error_line(&filename, &line);
140 ERR_error_string_n(err, str, sizeof(str));
141 fprintf(stderr, "crypto: %s: %s:%d\n", str, filename, line);
147 static inline const BIGNUM *get_modulus(RSA *key)
151 RSA_get0_key(key, &n, NULL, NULL);
155 static RSA *gen_sign_key(void)
157 unsigned long sign_key_length;
161 sign_key_length = (unsigned long)&sign_key_end -
162 (unsigned long)&sign_key;
164 bio = BIO_new_mem_buf(&sign_key, sign_key_length);
168 key = PEM_read_bio_RSAPrivateKey(bio, NULL, NULL, NULL);
175 MRECREATE = 0x0045544145524345,
176 MREADD = 0x0000000044444145,
177 MREEXTEND = 0x00444E4554584545,
180 static bool mrenclave_update(EVP_MD_CTX *ctx, const void *data)
182 if (!EVP_DigestUpdate(ctx, data, 64)) {
183 fprintf(stderr, "digest update failed\n");
190 static bool mrenclave_commit(EVP_MD_CTX *ctx, uint8_t *mrenclave)
194 if (!EVP_DigestFinal_ex(ctx, (unsigned char *)mrenclave, &size)) {
195 fprintf(stderr, "digest commit failed\n");
200 fprintf(stderr, "invalid digest size = %u\n", size);
209 uint32_t ssaframesize;
211 uint8_t reserved[44];
212 } __attribute__((__packed__));
215 static bool mrenclave_ecreate(EVP_MD_CTX *ctx, uint64_t blob_size)
217 struct mrecreate mrecreate;
220 for (encl_size = 0x1000; encl_size < blob_size; )
223 memset(&mrecreate, 0, sizeof(mrecreate));
224 mrecreate.tag = MRECREATE;
225 mrecreate.ssaframesize = 1;
226 mrecreate.size = encl_size;
228 if (!EVP_DigestInit_ex(ctx, EVP_sha256(), NULL))
231 return mrenclave_update(ctx, &mrecreate);
237 uint64_t flags; /* SECINFO flags */
238 uint8_t reserved[40];
239 } __attribute__((__packed__));
241 static bool mrenclave_eadd(EVP_MD_CTX *ctx, uint64_t offset, uint64_t flags)
243 struct mreadd mreadd;
245 memset(&mreadd, 0, sizeof(mreadd));
247 mreadd.offset = offset;
248 mreadd.flags = flags;
250 return mrenclave_update(ctx, &mreadd);
256 uint8_t reserved[48];
257 } __attribute__((__packed__));
259 static bool mrenclave_eextend(EVP_MD_CTX *ctx, uint64_t offset,
262 struct mreextend mreextend;
265 for (i = 0; i < 0x1000; i += 0x100) {
266 memset(&mreextend, 0, sizeof(mreextend));
267 mreextend.tag = MREEXTEND;
268 mreextend.offset = offset + i;
270 if (!mrenclave_update(ctx, &mreextend))
273 if (!mrenclave_update(ctx, &data[i + 0x00]))
276 if (!mrenclave_update(ctx, &data[i + 0x40]))
279 if (!mrenclave_update(ctx, &data[i + 0x80]))
282 if (!mrenclave_update(ctx, &data[i + 0xC0]))
289 static bool mrenclave_segment(EVP_MD_CTX *ctx, struct encl *encl,
290 struct encl_segment *seg)
292 uint64_t end = seg->size;
295 for (offset = 0; offset < end; offset += PAGE_SIZE) {
296 if (!mrenclave_eadd(ctx, seg->offset + offset, seg->flags))
300 if (!mrenclave_eextend(ctx, seg->offset + offset, seg->src + offset))
308 bool encl_measure(struct encl *encl)
310 uint64_t header1[2] = {0x000000E100000006, 0x0000000000010000};
311 uint64_t header2[2] = {0x0000006000000101, 0x0000000100000060};
312 struct sgx_sigstruct *sigstruct = &encl->sigstruct;
313 struct sgx_sigstruct_payload payload;
314 uint8_t digest[SHA256_DIGEST_LENGTH];
320 memset(sigstruct, 0, sizeof(*sigstruct));
322 sigstruct->header.header1[0] = header1[0];
323 sigstruct->header.header1[1] = header1[1];
324 sigstruct->header.header2[0] = header2[0];
325 sigstruct->header.header2[1] = header2[1];
326 sigstruct->exponent = 3;
327 sigstruct->body.attributes = SGX_ATTR_MODE64BIT;
328 sigstruct->body.xfrm = 3;
331 if (check_crypto_errors())
334 key = gen_sign_key();
336 ERR_print_errors_fp(stdout);
340 BN_bn2bin(get_modulus(key), sigstruct->modulus);
342 ctx = EVP_MD_CTX_create();
346 if (!mrenclave_ecreate(ctx, encl->src_size))
349 for (i = 0; i < encl->nr_segments; i++) {
350 struct encl_segment *seg = &encl->segment_tbl[i];
352 if (!mrenclave_segment(ctx, encl, seg))
356 if (!mrenclave_commit(ctx, sigstruct->body.mrenclave))
359 memcpy(&payload.header, &sigstruct->header, sizeof(sigstruct->header));
360 memcpy(&payload.body, &sigstruct->body, sizeof(sigstruct->body));
362 SHA256((unsigned char *)&payload, sizeof(payload), digest);
364 if (!RSA_sign(NID_sha256, digest, SHA256_DIGEST_LENGTH,
365 sigstruct->signature, &siglen, key))
368 if (!calc_q1q2(sigstruct->signature, sigstruct->modulus, sigstruct->q1,
373 reverse_bytes(sigstruct->signature, SGX_MODULUS_SIZE);
374 reverse_bytes(sigstruct->modulus, SGX_MODULUS_SIZE);
376 EVP_MD_CTX_destroy(ctx);
381 EVP_MD_CTX_destroy(ctx);