1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2016-20 Intel Corporation. */
13 #include <sys/types.h>
15 #include <openssl/err.h>
16 #include <openssl/pem.h>
29 static void free_q1q2_ctx(struct q1q2_ctx *ctx)
31 BN_CTX_free(ctx->bn_ctx);
39 static bool alloc_q1q2_ctx(const uint8_t *s, const uint8_t *m,
42 ctx->bn_ctx = BN_CTX_new();
43 ctx->s = BN_bin2bn(s, SGX_MODULUS_SIZE, NULL);
44 ctx->m = BN_bin2bn(m, SGX_MODULUS_SIZE, NULL);
49 if (!ctx->bn_ctx || !ctx->s || !ctx->m || !ctx->q1 || !ctx->qr ||
58 static bool calc_q1q2(const uint8_t *s, const uint8_t *m, uint8_t *q1,
63 if (!alloc_q1q2_ctx(s, m, &ctx)) {
64 fprintf(stderr, "Not enough memory for Q1Q2 calculation\n");
68 if (!BN_mul(ctx.q1, ctx.s, ctx.s, ctx.bn_ctx))
71 if (!BN_div(ctx.q1, ctx.qr, ctx.q1, ctx.m, ctx.bn_ctx))
74 if (BN_num_bytes(ctx.q1) > SGX_MODULUS_SIZE) {
75 fprintf(stderr, "Too large Q1 %d bytes\n",
76 BN_num_bytes(ctx.q1));
80 if (!BN_mul(ctx.q2, ctx.s, ctx.qr, ctx.bn_ctx))
83 if (!BN_div(ctx.q2, NULL, ctx.q2, ctx.m, ctx.bn_ctx))
86 if (BN_num_bytes(ctx.q2) > SGX_MODULUS_SIZE) {
87 fprintf(stderr, "Too large Q2 %d bytes\n",
88 BN_num_bytes(ctx.q2));
92 BN_bn2bin(ctx.q1, q1);
93 BN_bn2bin(ctx.q2, q2);
102 struct sgx_sigstruct_payload {
103 struct sgx_sigstruct_header header;
104 struct sgx_sigstruct_body body;
107 static bool check_crypto_errors(void)
110 bool had_errors = false;
111 const char *filename;
116 if (ERR_peek_error() == 0)
120 err = ERR_get_error_line(&filename, &line);
121 ERR_error_string_n(err, str, sizeof(str));
122 fprintf(stderr, "crypto: %s: %s:%d\n", str, filename, line);
128 static inline const BIGNUM *get_modulus(RSA *key)
132 RSA_get0_key(key, &n, NULL, NULL);
136 static RSA *gen_sign_key(void)
148 ret = BN_set_word(e, RSA_3);
152 ret = RSA_generate_key_ex(key, 3072, e, NULL);
167 static void reverse_bytes(void *data, int length)
184 MRECREATE = 0x0045544145524345,
185 MREADD = 0x0000000044444145,
186 MREEXTEND = 0x00444E4554584545,
189 static bool mrenclave_update(EVP_MD_CTX *ctx, const void *data)
191 if (!EVP_DigestUpdate(ctx, data, 64)) {
192 fprintf(stderr, "digest update failed\n");
199 static bool mrenclave_commit(EVP_MD_CTX *ctx, uint8_t *mrenclave)
203 if (!EVP_DigestFinal_ex(ctx, (unsigned char *)mrenclave, &size)) {
204 fprintf(stderr, "digest commit failed\n");
209 fprintf(stderr, "invalid digest size = %u\n", size);
218 uint32_t ssaframesize;
220 uint8_t reserved[44];
221 } __attribute__((__packed__));
224 static bool mrenclave_ecreate(EVP_MD_CTX *ctx, uint64_t blob_size)
226 struct mrecreate mrecreate;
229 for (encl_size = 0x1000; encl_size < blob_size; )
232 memset(&mrecreate, 0, sizeof(mrecreate));
233 mrecreate.tag = MRECREATE;
234 mrecreate.ssaframesize = 1;
235 mrecreate.size = encl_size;
237 if (!EVP_DigestInit_ex(ctx, EVP_sha256(), NULL))
240 return mrenclave_update(ctx, &mrecreate);
246 uint64_t flags; /* SECINFO flags */
247 uint8_t reserved[40];
248 } __attribute__((__packed__));
250 static bool mrenclave_eadd(EVP_MD_CTX *ctx, uint64_t offset, uint64_t flags)
252 struct mreadd mreadd;
254 memset(&mreadd, 0, sizeof(mreadd));
256 mreadd.offset = offset;
257 mreadd.flags = flags;
259 return mrenclave_update(ctx, &mreadd);
265 uint8_t reserved[48];
266 } __attribute__((__packed__));
268 static bool mrenclave_eextend(EVP_MD_CTX *ctx, uint64_t offset,
271 struct mreextend mreextend;
274 for (i = 0; i < 0x1000; i += 0x100) {
275 memset(&mreextend, 0, sizeof(mreextend));
276 mreextend.tag = MREEXTEND;
277 mreextend.offset = offset + i;
279 if (!mrenclave_update(ctx, &mreextend))
282 if (!mrenclave_update(ctx, &data[i + 0x00]))
285 if (!mrenclave_update(ctx, &data[i + 0x40]))
288 if (!mrenclave_update(ctx, &data[i + 0x80]))
291 if (!mrenclave_update(ctx, &data[i + 0xC0]))
298 static bool mrenclave_segment(EVP_MD_CTX *ctx, struct encl *encl,
299 struct encl_segment *seg)
301 uint64_t end = seg->offset + seg->size;
304 for (offset = seg->offset; offset < end; offset += PAGE_SIZE) {
305 if (!mrenclave_eadd(ctx, offset, seg->flags))
308 if (!mrenclave_eextend(ctx, offset, encl->src + offset))
315 bool encl_measure(struct encl *encl)
317 uint64_t header1[2] = {0x000000E100000006, 0x0000000000010000};
318 uint64_t header2[2] = {0x0000006000000101, 0x0000000100000060};
319 struct sgx_sigstruct *sigstruct = &encl->sigstruct;
320 struct sgx_sigstruct_payload payload;
321 uint8_t digest[SHA256_DIGEST_LENGTH];
327 memset(sigstruct, 0, sizeof(*sigstruct));
329 sigstruct->header.header1[0] = header1[0];
330 sigstruct->header.header1[1] = header1[1];
331 sigstruct->header.header2[0] = header2[0];
332 sigstruct->header.header2[1] = header2[1];
333 sigstruct->exponent = 3;
334 sigstruct->body.attributes = SGX_ATTR_MODE64BIT;
335 sigstruct->body.xfrm = 3;
338 if (check_crypto_errors())
341 key = gen_sign_key();
345 BN_bn2bin(get_modulus(key), sigstruct->modulus);
347 ctx = EVP_MD_CTX_create();
351 if (!mrenclave_ecreate(ctx, encl->src_size))
354 for (i = 0; i < encl->nr_segments; i++) {
355 struct encl_segment *seg = &encl->segment_tbl[i];
357 if (!mrenclave_segment(ctx, encl, seg))
361 if (!mrenclave_commit(ctx, sigstruct->body.mrenclave))
364 memcpy(&payload.header, &sigstruct->header, sizeof(sigstruct->header));
365 memcpy(&payload.body, &sigstruct->body, sizeof(sigstruct->body));
367 SHA256((unsigned char *)&payload, sizeof(payload), digest);
369 if (!RSA_sign(NID_sha256, digest, SHA256_DIGEST_LENGTH,
370 sigstruct->signature, &siglen, key))
373 if (!calc_q1q2(sigstruct->signature, sigstruct->modulus, sigstruct->q1,
378 reverse_bytes(sigstruct->signature, SGX_MODULUS_SIZE);
379 reverse_bytes(sigstruct->modulus, SGX_MODULUS_SIZE);
380 reverse_bytes(sigstruct->q1, SGX_MODULUS_SIZE);
381 reverse_bytes(sigstruct->q2, SGX_MODULUS_SIZE);
383 EVP_MD_CTX_destroy(ctx);
388 EVP_MD_CTX_destroy(ctx);