Commit | Line | Data |
---|---|---|
ae4c51a5 OA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | ||
3 | /****************************************************************************** | |
4 | * Xen memory reservation utilities. | |
5 | * | |
6 | * Copyright (c) 2003, B Dragovic | |
7 | * Copyright (c) 2003-2004, M Williamson, K Fraser | |
8 | * Copyright (c) 2005 Dan M. Smith, IBM Corporation | |
9 | * Copyright (c) 2010 Daniel Kiper | |
10 | * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. | |
11 | */ | |
12 | ||
13 | #include <asm/xen/hypercall.h> | |
14 | ||
15 | #include <xen/interface/memory.h> | |
16 | #include <xen/mem-reservation.h> | |
17 | ||
18 | /* | |
19 | * Use one extent per PAGE_SIZE to avoid to break down the page into | |
20 | * multiple frame. | |
21 | */ | |
22 | #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) | |
23 | ||
24 | #ifdef CONFIG_XEN_HAVE_PVMMU | |
25 | void __xenmem_reservation_va_mapping_update(unsigned long count, | |
26 | struct page **pages, | |
27 | xen_pfn_t *frames) | |
28 | { | |
29 | int i; | |
30 | ||
31 | for (i = 0; i < count; i++) { | |
32 | struct page *page = pages[i]; | |
33 | unsigned long pfn = page_to_pfn(page); | |
34 | ||
35 | BUG_ON(!page); | |
36 | ||
37 | /* | |
38 | * We don't support PV MMU when Linux and Xen is using | |
39 | * different page granularity. | |
40 | */ | |
41 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); | |
42 | ||
43 | set_phys_to_machine(pfn, frames[i]); | |
44 | ||
45 | /* Link back into the page tables if not highmem. */ | |
46 | if (!PageHighMem(page)) { | |
47 | int ret; | |
48 | ||
49 | ret = HYPERVISOR_update_va_mapping( | |
50 | (unsigned long)__va(pfn << PAGE_SHIFT), | |
51 | mfn_pte(frames[i], PAGE_KERNEL), | |
52 | 0); | |
53 | BUG_ON(ret); | |
54 | } | |
55 | } | |
56 | } | |
57 | EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update); | |
58 | ||
59 | void __xenmem_reservation_va_mapping_reset(unsigned long count, | |
60 | struct page **pages) | |
61 | { | |
62 | int i; | |
63 | ||
64 | for (i = 0; i < count; i++) { | |
65 | struct page *page = pages[i]; | |
66 | unsigned long pfn = page_to_pfn(page); | |
67 | ||
68 | /* | |
69 | * We don't support PV MMU when Linux and Xen are using | |
70 | * different page granularity. | |
71 | */ | |
72 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); | |
73 | ||
74 | if (!PageHighMem(page)) { | |
75 | int ret; | |
76 | ||
77 | ret = HYPERVISOR_update_va_mapping( | |
78 | (unsigned long)__va(pfn << PAGE_SHIFT), | |
79 | __pte_ma(0), 0); | |
80 | BUG_ON(ret); | |
81 | } | |
82 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | |
83 | } | |
84 | } | |
85 | EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset); | |
86 | #endif /* CONFIG_XEN_HAVE_PVMMU */ | |
87 | ||
88 | /* @frames is an array of PFNs */ | |
89 | int xenmem_reservation_increase(int count, xen_pfn_t *frames) | |
90 | { | |
91 | struct xen_memory_reservation reservation = { | |
92 | .address_bits = 0, | |
93 | .extent_order = EXTENT_ORDER, | |
94 | .domid = DOMID_SELF | |
95 | }; | |
96 | ||
97 | /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */ | |
98 | set_xen_guest_handle(reservation.extent_start, frames); | |
99 | reservation.nr_extents = count; | |
100 | return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); | |
101 | } | |
102 | EXPORT_SYMBOL_GPL(xenmem_reservation_increase); | |
103 | ||
104 | /* @frames is an array of GFNs */ | |
105 | int xenmem_reservation_decrease(int count, xen_pfn_t *frames) | |
106 | { | |
107 | struct xen_memory_reservation reservation = { | |
108 | .address_bits = 0, | |
109 | .extent_order = EXTENT_ORDER, | |
110 | .domid = DOMID_SELF | |
111 | }; | |
112 | ||
113 | /* XENMEM_decrease_reservation requires a GFN */ | |
114 | set_xen_guest_handle(reservation.extent_start, frames); | |
115 | reservation.nr_extents = count; | |
116 | return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); | |
117 | } | |
118 | EXPORT_SYMBOL_GPL(xenmem_reservation_decrease); |