Commit | Line | Data |
---|---|---|
e7e05452 SC |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright(c) 2016-20 Intel Corporation. */ | |
3 | ||
4 | #include <linux/freezer.h> | |
5 | #include <linux/highmem.h> | |
6 | #include <linux/kthread.h> | |
7 | #include <linux/pagemap.h> | |
8 | #include <linux/ratelimit.h> | |
9 | #include <linux/sched/mm.h> | |
10 | #include <linux/sched/signal.h> | |
11 | #include <linux/slab.h> | |
3fe0778e JS |
12 | #include "driver.h" |
13 | #include "encl.h" | |
e7e05452 SC |
14 | #include "encls.h" |
15 | ||
16 | struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS]; | |
17 | static int sgx_nr_epc_sections; | |
18 | static struct task_struct *ksgxd_tsk; | |
1728ab54 JS |
19 | static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq); |
20 | ||
21 | /* | |
22 | * These variables are part of the state of the reclaimer, and must be accessed | |
23 | * with sgx_reclaimer_lock acquired. | |
24 | */ | |
25 | static LIST_HEAD(sgx_active_page_list); | |
26 | ||
27 | static DEFINE_SPINLOCK(sgx_reclaimer_lock); | |
e7e05452 SC |
28 | |
29 | /* | |
30 | * Reset dirty EPC pages to uninitialized state. Laundry can be left with SECS | |
31 | * pages whose child pages blocked EREMOVE. | |
32 | */ | |
33 | static void sgx_sanitize_section(struct sgx_epc_section *section) | |
34 | { | |
35 | struct sgx_epc_page *page; | |
36 | LIST_HEAD(dirty); | |
37 | int ret; | |
38 | ||
67655b57 DH |
39 | /* init_laundry_list is thread-local, no need for a lock: */ |
40 | while (!list_empty(§ion->init_laundry_list)) { | |
e7e05452 SC |
41 | if (kthread_should_stop()) |
42 | return; | |
43 | ||
67655b57 | 44 | /* needed for access to ->page_list: */ |
e7e05452 SC |
45 | spin_lock(§ion->lock); |
46 | ||
67655b57 | 47 | page = list_first_entry(§ion->init_laundry_list, |
e7e05452 SC |
48 | struct sgx_epc_page, list); |
49 | ||
50 | ret = __eremove(sgx_get_epc_virt_addr(page)); | |
51 | if (!ret) | |
52 | list_move(&page->list, §ion->page_list); | |
53 | else | |
54 | list_move_tail(&page->list, &dirty); | |
55 | ||
56 | spin_unlock(§ion->lock); | |
57 | ||
58 | cond_resched(); | |
59 | } | |
60 | ||
67655b57 | 61 | list_splice(&dirty, §ion->init_laundry_list); |
e7e05452 SC |
62 | } |
63 | ||
1728ab54 JS |
64 | static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page) |
65 | { | |
66 | struct sgx_encl_page *page = epc_page->owner; | |
67 | struct sgx_encl *encl = page->encl; | |
68 | struct sgx_encl_mm *encl_mm; | |
69 | bool ret = true; | |
70 | int idx; | |
71 | ||
72 | idx = srcu_read_lock(&encl->srcu); | |
73 | ||
74 | list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { | |
75 | if (!mmget_not_zero(encl_mm->mm)) | |
76 | continue; | |
77 | ||
78 | mmap_read_lock(encl_mm->mm); | |
79 | ret = !sgx_encl_test_and_clear_young(encl_mm->mm, page); | |
80 | mmap_read_unlock(encl_mm->mm); | |
81 | ||
82 | mmput_async(encl_mm->mm); | |
83 | ||
84 | if (!ret) | |
85 | break; | |
86 | } | |
87 | ||
88 | srcu_read_unlock(&encl->srcu, idx); | |
89 | ||
90 | if (!ret) | |
91 | return false; | |
92 | ||
93 | return true; | |
94 | } | |
95 | ||
96 | static void sgx_reclaimer_block(struct sgx_epc_page *epc_page) | |
97 | { | |
98 | struct sgx_encl_page *page = epc_page->owner; | |
99 | unsigned long addr = page->desc & PAGE_MASK; | |
100 | struct sgx_encl *encl = page->encl; | |
101 | unsigned long mm_list_version; | |
102 | struct sgx_encl_mm *encl_mm; | |
103 | struct vm_area_struct *vma; | |
104 | int idx, ret; | |
105 | ||
106 | do { | |
107 | mm_list_version = encl->mm_list_version; | |
108 | ||
109 | /* Pairs with smp_rmb() in sgx_encl_mm_add(). */ | |
110 | smp_rmb(); | |
111 | ||
112 | idx = srcu_read_lock(&encl->srcu); | |
113 | ||
114 | list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { | |
115 | if (!mmget_not_zero(encl_mm->mm)) | |
116 | continue; | |
117 | ||
118 | mmap_read_lock(encl_mm->mm); | |
119 | ||
120 | ret = sgx_encl_find(encl_mm->mm, addr, &vma); | |
121 | if (!ret && encl == vma->vm_private_data) | |
122 | zap_vma_ptes(vma, addr, PAGE_SIZE); | |
123 | ||
124 | mmap_read_unlock(encl_mm->mm); | |
125 | ||
126 | mmput_async(encl_mm->mm); | |
127 | } | |
128 | ||
129 | srcu_read_unlock(&encl->srcu, idx); | |
130 | } while (unlikely(encl->mm_list_version != mm_list_version)); | |
131 | ||
132 | mutex_lock(&encl->lock); | |
133 | ||
134 | ret = __eblock(sgx_get_epc_virt_addr(epc_page)); | |
135 | if (encls_failed(ret)) | |
136 | ENCLS_WARN(ret, "EBLOCK"); | |
137 | ||
138 | mutex_unlock(&encl->lock); | |
139 | } | |
140 | ||
141 | static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot, | |
142 | struct sgx_backing *backing) | |
143 | { | |
144 | struct sgx_pageinfo pginfo; | |
145 | int ret; | |
146 | ||
147 | pginfo.addr = 0; | |
148 | pginfo.secs = 0; | |
149 | ||
150 | pginfo.contents = (unsigned long)kmap_atomic(backing->contents); | |
151 | pginfo.metadata = (unsigned long)kmap_atomic(backing->pcmd) + | |
152 | backing->pcmd_offset; | |
153 | ||
154 | ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot); | |
155 | ||
156 | kunmap_atomic((void *)(unsigned long)(pginfo.metadata - | |
157 | backing->pcmd_offset)); | |
158 | kunmap_atomic((void *)(unsigned long)pginfo.contents); | |
159 | ||
160 | return ret; | |
161 | } | |
162 | ||
163 | static void sgx_ipi_cb(void *info) | |
164 | { | |
165 | } | |
166 | ||
167 | static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl) | |
168 | { | |
169 | cpumask_t *cpumask = &encl->cpumask; | |
170 | struct sgx_encl_mm *encl_mm; | |
171 | int idx; | |
172 | ||
173 | /* | |
174 | * Can race with sgx_encl_mm_add(), but ETRACK has already been | |
175 | * executed, which means that the CPUs running in the new mm will enter | |
176 | * into the enclave with a fresh epoch. | |
177 | */ | |
178 | cpumask_clear(cpumask); | |
179 | ||
180 | idx = srcu_read_lock(&encl->srcu); | |
181 | ||
182 | list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { | |
183 | if (!mmget_not_zero(encl_mm->mm)) | |
184 | continue; | |
185 | ||
186 | cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm)); | |
187 | ||
188 | mmput_async(encl_mm->mm); | |
189 | } | |
190 | ||
191 | srcu_read_unlock(&encl->srcu, idx); | |
192 | ||
193 | return cpumask; | |
194 | } | |
195 | ||
196 | /* | |
197 | * Swap page to the regular memory transformed to the blocked state by using | |
198 | * EBLOCK, which means that it can no loger be referenced (no new TLB entries). | |
199 | * | |
200 | * The first trial just tries to write the page assuming that some other thread | |
201 | * has reset the count for threads inside the enlave by using ETRACK, and | |
202 | * previous thread count has been zeroed out. The second trial calls ETRACK | |
203 | * before EWB. If that fails we kick all the HW threads out, and then do EWB, | |
204 | * which should be guaranteed the succeed. | |
205 | */ | |
206 | static void sgx_encl_ewb(struct sgx_epc_page *epc_page, | |
207 | struct sgx_backing *backing) | |
208 | { | |
209 | struct sgx_encl_page *encl_page = epc_page->owner; | |
210 | struct sgx_encl *encl = encl_page->encl; | |
211 | struct sgx_va_page *va_page; | |
212 | unsigned int va_offset; | |
213 | void *va_slot; | |
214 | int ret; | |
215 | ||
216 | encl_page->desc &= ~SGX_ENCL_PAGE_BEING_RECLAIMED; | |
217 | ||
218 | va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, | |
219 | list); | |
220 | va_offset = sgx_alloc_va_slot(va_page); | |
221 | va_slot = sgx_get_epc_virt_addr(va_page->epc_page) + va_offset; | |
222 | if (sgx_va_page_full(va_page)) | |
223 | list_move_tail(&va_page->list, &encl->va_pages); | |
224 | ||
225 | ret = __sgx_encl_ewb(epc_page, va_slot, backing); | |
226 | if (ret == SGX_NOT_TRACKED) { | |
227 | ret = __etrack(sgx_get_epc_virt_addr(encl->secs.epc_page)); | |
228 | if (ret) { | |
229 | if (encls_failed(ret)) | |
230 | ENCLS_WARN(ret, "ETRACK"); | |
231 | } | |
232 | ||
233 | ret = __sgx_encl_ewb(epc_page, va_slot, backing); | |
234 | if (ret == SGX_NOT_TRACKED) { | |
235 | /* | |
236 | * Slow path, send IPIs to kick cpus out of the | |
237 | * enclave. Note, it's imperative that the cpu | |
238 | * mask is generated *after* ETRACK, else we'll | |
239 | * miss cpus that entered the enclave between | |
240 | * generating the mask and incrementing epoch. | |
241 | */ | |
242 | on_each_cpu_mask(sgx_encl_ewb_cpumask(encl), | |
243 | sgx_ipi_cb, NULL, 1); | |
244 | ret = __sgx_encl_ewb(epc_page, va_slot, backing); | |
245 | } | |
246 | } | |
247 | ||
248 | if (ret) { | |
249 | if (encls_failed(ret)) | |
250 | ENCLS_WARN(ret, "EWB"); | |
251 | ||
252 | sgx_free_va_slot(va_page, va_offset); | |
253 | } else { | |
254 | encl_page->desc |= va_offset; | |
255 | encl_page->va_page = va_page; | |
256 | } | |
257 | } | |
258 | ||
259 | static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, | |
260 | struct sgx_backing *backing) | |
261 | { | |
262 | struct sgx_encl_page *encl_page = epc_page->owner; | |
263 | struct sgx_encl *encl = encl_page->encl; | |
264 | struct sgx_backing secs_backing; | |
265 | int ret; | |
266 | ||
267 | mutex_lock(&encl->lock); | |
268 | ||
269 | sgx_encl_ewb(epc_page, backing); | |
270 | encl_page->epc_page = NULL; | |
271 | encl->secs_child_cnt--; | |
272 | ||
273 | if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) { | |
274 | ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size), | |
275 | &secs_backing); | |
276 | if (ret) | |
277 | goto out; | |
278 | ||
279 | sgx_encl_ewb(encl->secs.epc_page, &secs_backing); | |
280 | ||
281 | sgx_free_epc_page(encl->secs.epc_page); | |
282 | encl->secs.epc_page = NULL; | |
283 | ||
284 | sgx_encl_put_backing(&secs_backing, true); | |
285 | } | |
286 | ||
287 | out: | |
288 | mutex_unlock(&encl->lock); | |
289 | } | |
290 | ||
291 | /* | |
292 | * Take a fixed number of pages from the head of the active page pool and | |
293 | * reclaim them to the enclave's private shmem files. Skip the pages, which have | |
294 | * been accessed since the last scan. Move those pages to the tail of active | |
295 | * page pool so that the pages get scanned in LRU like fashion. | |
296 | * | |
297 | * Batch process a chunk of pages (at the moment 16) in order to degrade amount | |
298 | * of IPI's and ETRACK's potentially required. sgx_encl_ewb() does degrade a bit | |
299 | * among the HW threads with three stage EWB pipeline (EWB, ETRACK + EWB and IPI | |
300 | * + EWB) but not sufficiently. Reclaiming one page at a time would also be | |
301 | * problematic as it would increase the lock contention too much, which would | |
302 | * halt forward progress. | |
303 | */ | |
304 | static void sgx_reclaim_pages(void) | |
305 | { | |
306 | struct sgx_epc_page *chunk[SGX_NR_TO_SCAN]; | |
307 | struct sgx_backing backing[SGX_NR_TO_SCAN]; | |
308 | struct sgx_epc_section *section; | |
309 | struct sgx_encl_page *encl_page; | |
310 | struct sgx_epc_page *epc_page; | |
311 | pgoff_t page_index; | |
312 | int cnt = 0; | |
313 | int ret; | |
314 | int i; | |
315 | ||
316 | spin_lock(&sgx_reclaimer_lock); | |
317 | for (i = 0; i < SGX_NR_TO_SCAN; i++) { | |
318 | if (list_empty(&sgx_active_page_list)) | |
319 | break; | |
320 | ||
321 | epc_page = list_first_entry(&sgx_active_page_list, | |
322 | struct sgx_epc_page, list); | |
323 | list_del_init(&epc_page->list); | |
324 | encl_page = epc_page->owner; | |
325 | ||
326 | if (kref_get_unless_zero(&encl_page->encl->refcount) != 0) | |
327 | chunk[cnt++] = epc_page; | |
328 | else | |
329 | /* The owner is freeing the page. No need to add the | |
330 | * page back to the list of reclaimable pages. | |
331 | */ | |
332 | epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; | |
333 | } | |
334 | spin_unlock(&sgx_reclaimer_lock); | |
335 | ||
336 | for (i = 0; i < cnt; i++) { | |
337 | epc_page = chunk[i]; | |
338 | encl_page = epc_page->owner; | |
339 | ||
340 | if (!sgx_reclaimer_age(epc_page)) | |
341 | goto skip; | |
342 | ||
343 | page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); | |
344 | ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]); | |
345 | if (ret) | |
346 | goto skip; | |
347 | ||
348 | mutex_lock(&encl_page->encl->lock); | |
349 | encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED; | |
350 | mutex_unlock(&encl_page->encl->lock); | |
351 | continue; | |
352 | ||
353 | skip: | |
354 | spin_lock(&sgx_reclaimer_lock); | |
355 | list_add_tail(&epc_page->list, &sgx_active_page_list); | |
356 | spin_unlock(&sgx_reclaimer_lock); | |
357 | ||
358 | kref_put(&encl_page->encl->refcount, sgx_encl_release); | |
359 | ||
360 | chunk[i] = NULL; | |
361 | } | |
362 | ||
363 | for (i = 0; i < cnt; i++) { | |
364 | epc_page = chunk[i]; | |
365 | if (epc_page) | |
366 | sgx_reclaimer_block(epc_page); | |
367 | } | |
368 | ||
369 | for (i = 0; i < cnt; i++) { | |
370 | epc_page = chunk[i]; | |
371 | if (!epc_page) | |
372 | continue; | |
373 | ||
374 | encl_page = epc_page->owner; | |
375 | sgx_reclaimer_write(epc_page, &backing[i]); | |
376 | sgx_encl_put_backing(&backing[i], true); | |
377 | ||
378 | kref_put(&encl_page->encl->refcount, sgx_encl_release); | |
379 | epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; | |
380 | ||
381 | section = &sgx_epc_sections[epc_page->section]; | |
382 | spin_lock(§ion->lock); | |
383 | list_add_tail(&epc_page->list, §ion->page_list); | |
384 | section->free_cnt++; | |
385 | spin_unlock(§ion->lock); | |
386 | } | |
387 | } | |
388 | ||
389 | static unsigned long sgx_nr_free_pages(void) | |
390 | { | |
391 | unsigned long cnt = 0; | |
392 | int i; | |
393 | ||
394 | for (i = 0; i < sgx_nr_epc_sections; i++) | |
395 | cnt += sgx_epc_sections[i].free_cnt; | |
396 | ||
397 | return cnt; | |
398 | } | |
399 | ||
400 | static bool sgx_should_reclaim(unsigned long watermark) | |
401 | { | |
402 | return sgx_nr_free_pages() < watermark && | |
403 | !list_empty(&sgx_active_page_list); | |
404 | } | |
405 | ||
e7e05452 SC |
406 | static int ksgxd(void *p) |
407 | { | |
408 | int i; | |
409 | ||
410 | set_freezable(); | |
411 | ||
412 | /* | |
413 | * Sanitize pages in order to recover from kexec(). The 2nd pass is | |
414 | * required for SECS pages, whose child pages blocked EREMOVE. | |
415 | */ | |
416 | for (i = 0; i < sgx_nr_epc_sections; i++) | |
417 | sgx_sanitize_section(&sgx_epc_sections[i]); | |
418 | ||
419 | for (i = 0; i < sgx_nr_epc_sections; i++) { | |
420 | sgx_sanitize_section(&sgx_epc_sections[i]); | |
421 | ||
422 | /* Should never happen. */ | |
67655b57 | 423 | if (!list_empty(&sgx_epc_sections[i].init_laundry_list)) |
e7e05452 SC |
424 | WARN(1, "EPC section %d has unsanitized pages.\n", i); |
425 | } | |
426 | ||
1728ab54 JS |
427 | while (!kthread_should_stop()) { |
428 | if (try_to_freeze()) | |
429 | continue; | |
430 | ||
431 | wait_event_freezable(ksgxd_waitq, | |
432 | kthread_should_stop() || | |
433 | sgx_should_reclaim(SGX_NR_HIGH_PAGES)); | |
434 | ||
435 | if (sgx_should_reclaim(SGX_NR_HIGH_PAGES)) | |
436 | sgx_reclaim_pages(); | |
437 | ||
438 | cond_resched(); | |
439 | } | |
440 | ||
e7e05452 SC |
441 | return 0; |
442 | } | |
443 | ||
444 | static bool __init sgx_page_reclaimer_init(void) | |
445 | { | |
446 | struct task_struct *tsk; | |
447 | ||
448 | tsk = kthread_run(ksgxd, NULL, "ksgxd"); | |
449 | if (IS_ERR(tsk)) | |
450 | return false; | |
451 | ||
452 | ksgxd_tsk = tsk; | |
453 | ||
454 | return true; | |
455 | } | |
456 | ||
d2285493 JS |
457 | static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_section *section) |
458 | { | |
459 | struct sgx_epc_page *page; | |
460 | ||
461 | spin_lock(§ion->lock); | |
462 | ||
463 | if (list_empty(§ion->page_list)) { | |
464 | spin_unlock(§ion->lock); | |
465 | return NULL; | |
466 | } | |
467 | ||
468 | page = list_first_entry(§ion->page_list, struct sgx_epc_page, list); | |
469 | list_del_init(&page->list); | |
1728ab54 | 470 | section->free_cnt--; |
d2285493 JS |
471 | |
472 | spin_unlock(§ion->lock); | |
473 | return page; | |
474 | } | |
475 | ||
476 | /** | |
477 | * __sgx_alloc_epc_page() - Allocate an EPC page | |
478 | * | |
479 | * Iterate through EPC sections and borrow a free EPC page to the caller. When a | |
480 | * page is no longer needed it must be released with sgx_free_epc_page(). | |
481 | * | |
482 | * Return: | |
483 | * an EPC page, | |
484 | * -errno on error | |
485 | */ | |
486 | struct sgx_epc_page *__sgx_alloc_epc_page(void) | |
487 | { | |
488 | struct sgx_epc_section *section; | |
489 | struct sgx_epc_page *page; | |
490 | int i; | |
491 | ||
492 | for (i = 0; i < sgx_nr_epc_sections; i++) { | |
493 | section = &sgx_epc_sections[i]; | |
494 | ||
495 | page = __sgx_alloc_epc_page_from_section(section); | |
496 | if (page) | |
497 | return page; | |
498 | } | |
499 | ||
500 | return ERR_PTR(-ENOMEM); | |
501 | } | |
502 | ||
1728ab54 JS |
503 | /** |
504 | * sgx_mark_page_reclaimable() - Mark a page as reclaimable | |
505 | * @page: EPC page | |
506 | * | |
507 | * Mark a page as reclaimable and add it to the active page list. Pages | |
508 | * are automatically removed from the active list when freed. | |
509 | */ | |
510 | void sgx_mark_page_reclaimable(struct sgx_epc_page *page) | |
511 | { | |
512 | spin_lock(&sgx_reclaimer_lock); | |
513 | page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED; | |
514 | list_add_tail(&page->list, &sgx_active_page_list); | |
515 | spin_unlock(&sgx_reclaimer_lock); | |
516 | } | |
517 | ||
518 | /** | |
519 | * sgx_unmark_page_reclaimable() - Remove a page from the reclaim list | |
520 | * @page: EPC page | |
521 | * | |
522 | * Clear the reclaimable flag and remove the page from the active page list. | |
523 | * | |
524 | * Return: | |
525 | * 0 on success, | |
526 | * -EBUSY if the page is in the process of being reclaimed | |
527 | */ | |
528 | int sgx_unmark_page_reclaimable(struct sgx_epc_page *page) | |
529 | { | |
530 | spin_lock(&sgx_reclaimer_lock); | |
531 | if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) { | |
532 | /* The page is being reclaimed. */ | |
533 | if (list_empty(&page->list)) { | |
534 | spin_unlock(&sgx_reclaimer_lock); | |
535 | return -EBUSY; | |
536 | } | |
537 | ||
538 | list_del(&page->list); | |
539 | page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; | |
540 | } | |
541 | spin_unlock(&sgx_reclaimer_lock); | |
542 | ||
543 | return 0; | |
544 | } | |
545 | ||
546 | /** | |
547 | * sgx_alloc_epc_page() - Allocate an EPC page | |
548 | * @owner: the owner of the EPC page | |
549 | * @reclaim: reclaim pages if necessary | |
550 | * | |
551 | * Iterate through EPC sections and borrow a free EPC page to the caller. When a | |
552 | * page is no longer needed it must be released with sgx_free_epc_page(). If | |
553 | * @reclaim is set to true, directly reclaim pages when we are out of pages. No | |
554 | * mm's can be locked when @reclaim is set to true. | |
555 | * | |
556 | * Finally, wake up ksgxd when the number of pages goes below the watermark | |
557 | * before returning back to the caller. | |
558 | * | |
559 | * Return: | |
560 | * an EPC page, | |
561 | * -errno on error | |
562 | */ | |
563 | struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) | |
564 | { | |
565 | struct sgx_epc_page *page; | |
566 | ||
567 | for ( ; ; ) { | |
568 | page = __sgx_alloc_epc_page(); | |
569 | if (!IS_ERR(page)) { | |
570 | page->owner = owner; | |
571 | break; | |
572 | } | |
573 | ||
574 | if (list_empty(&sgx_active_page_list)) | |
575 | return ERR_PTR(-ENOMEM); | |
576 | ||
577 | if (!reclaim) { | |
578 | page = ERR_PTR(-EBUSY); | |
579 | break; | |
580 | } | |
581 | ||
582 | if (signal_pending(current)) { | |
583 | page = ERR_PTR(-ERESTARTSYS); | |
584 | break; | |
585 | } | |
586 | ||
587 | sgx_reclaim_pages(); | |
588 | cond_resched(); | |
589 | } | |
590 | ||
591 | if (sgx_should_reclaim(SGX_NR_LOW_PAGES)) | |
592 | wake_up(&ksgxd_waitq); | |
593 | ||
594 | return page; | |
595 | } | |
596 | ||
d2285493 JS |
597 | /** |
598 | * sgx_free_epc_page() - Free an EPC page | |
599 | * @page: an EPC page | |
600 | * | |
601 | * Call EREMOVE for an EPC page and insert it back to the list of free pages. | |
602 | */ | |
603 | void sgx_free_epc_page(struct sgx_epc_page *page) | |
604 | { | |
605 | struct sgx_epc_section *section = &sgx_epc_sections[page->section]; | |
606 | int ret; | |
607 | ||
1728ab54 JS |
608 | WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED); |
609 | ||
d2285493 JS |
610 | ret = __eremove(sgx_get_epc_virt_addr(page)); |
611 | if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret)) | |
612 | return; | |
613 | ||
614 | spin_lock(§ion->lock); | |
615 | list_add_tail(&page->list, §ion->page_list); | |
1728ab54 | 616 | section->free_cnt++; |
d2285493 JS |
617 | spin_unlock(§ion->lock); |
618 | } | |
619 | ||
e7e05452 SC |
620 | static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size, |
621 | unsigned long index, | |
622 | struct sgx_epc_section *section) | |
623 | { | |
624 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
625 | unsigned long i; | |
626 | ||
627 | section->virt_addr = memremap(phys_addr, size, MEMREMAP_WB); | |
628 | if (!section->virt_addr) | |
629 | return false; | |
630 | ||
631 | section->pages = vmalloc(nr_pages * sizeof(struct sgx_epc_page)); | |
632 | if (!section->pages) { | |
633 | memunmap(section->virt_addr); | |
634 | return false; | |
635 | } | |
636 | ||
637 | section->phys_addr = phys_addr; | |
638 | spin_lock_init(§ion->lock); | |
639 | INIT_LIST_HEAD(§ion->page_list); | |
67655b57 | 640 | INIT_LIST_HEAD(§ion->init_laundry_list); |
e7e05452 SC |
641 | |
642 | for (i = 0; i < nr_pages; i++) { | |
643 | section->pages[i].section = index; | |
1728ab54 JS |
644 | section->pages[i].flags = 0; |
645 | section->pages[i].owner = NULL; | |
67655b57 | 646 | list_add_tail(§ion->pages[i].list, §ion->init_laundry_list); |
e7e05452 SC |
647 | } |
648 | ||
1728ab54 | 649 | section->free_cnt = nr_pages; |
e7e05452 SC |
650 | return true; |
651 | } | |
652 | ||
653 | /** | |
654 | * A section metric is concatenated in a way that @low bits 12-31 define the | |
655 | * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the | |
656 | * metric. | |
657 | */ | |
658 | static inline u64 __init sgx_calc_section_metric(u64 low, u64 high) | |
659 | { | |
660 | return (low & GENMASK_ULL(31, 12)) + | |
661 | ((high & GENMASK_ULL(19, 0)) << 32); | |
662 | } | |
663 | ||
664 | static bool __init sgx_page_cache_init(void) | |
665 | { | |
666 | u32 eax, ebx, ecx, edx, type; | |
667 | u64 pa, size; | |
668 | int i; | |
669 | ||
670 | for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) { | |
671 | cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx); | |
672 | ||
673 | type = eax & SGX_CPUID_EPC_MASK; | |
674 | if (type == SGX_CPUID_EPC_INVALID) | |
675 | break; | |
676 | ||
677 | if (type != SGX_CPUID_EPC_SECTION) { | |
678 | pr_err_once("Unknown EPC section type: %u\n", type); | |
679 | break; | |
680 | } | |
681 | ||
682 | pa = sgx_calc_section_metric(eax, ebx); | |
683 | size = sgx_calc_section_metric(ecx, edx); | |
684 | ||
685 | pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1); | |
686 | ||
687 | if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) { | |
688 | pr_err("No free memory for an EPC section\n"); | |
689 | break; | |
690 | } | |
691 | ||
692 | sgx_nr_epc_sections++; | |
693 | } | |
694 | ||
695 | if (!sgx_nr_epc_sections) { | |
696 | pr_err("There are zero EPC sections.\n"); | |
697 | return false; | |
698 | } | |
699 | ||
700 | return true; | |
701 | } | |
702 | ||
31bf9288 | 703 | static int __init sgx_init(void) |
e7e05452 | 704 | { |
3fe0778e | 705 | int ret; |
e7e05452 SC |
706 | int i; |
707 | ||
3fe0778e | 708 | if (!cpu_feature_enabled(X86_FEATURE_SGX)) |
31bf9288 | 709 | return -ENODEV; |
e7e05452 SC |
710 | |
711 | if (!sgx_page_cache_init()) | |
31bf9288 | 712 | return -ENOMEM; |
e7e05452 | 713 | |
31bf9288 ST |
714 | if (!sgx_page_reclaimer_init()) { |
715 | ret = -ENOMEM; | |
e7e05452 | 716 | goto err_page_cache; |
31bf9288 | 717 | } |
e7e05452 | 718 | |
3fe0778e JS |
719 | ret = sgx_drv_init(); |
720 | if (ret) | |
721 | goto err_kthread; | |
722 | ||
31bf9288 | 723 | return 0; |
e7e05452 | 724 | |
3fe0778e JS |
725 | err_kthread: |
726 | kthread_stop(ksgxd_tsk); | |
727 | ||
e7e05452 SC |
728 | err_page_cache: |
729 | for (i = 0; i < sgx_nr_epc_sections; i++) { | |
730 | vfree(sgx_epc_sections[i].pages); | |
731 | memunmap(sgx_epc_sections[i].virt_addr); | |
732 | } | |
31bf9288 ST |
733 | |
734 | return ret; | |
e7e05452 SC |
735 | } |
736 | ||
737 | device_initcall(sgx_init); |