Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / arch / x86 / xen / setup.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
5ead97c8
JF
2/*
3 * Machine specific setup for xen
4 *
5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
6 */
7
7a2463dc 8#include <linux/init.h>
9338c223 9#include <linux/iscsi_ibft.h>
5ead97c8 10#include <linux/sched.h>
639b2e2f 11#include <linux/kstrtox.h>
5ead97c8
JF
12#include <linux/mm.h>
13#include <linux/pm.h>
a9ce6bc1 14#include <linux/memblock.h>
d91ee586 15#include <linux/cpuidle.h>
48cdd828 16#include <linux/cpufreq.h>
1d988ed4 17#include <linux/memory_hotplug.h>
9221222c 18#include <linux/acpi.h>
5ead97c8
JF
19
20#include <asm/elf.h>
6c3652ef 21#include <asm/vdso.h>
66441bd3 22#include <asm/e820/api.h>
5ead97c8 23#include <asm/setup.h>
8d54db79 24#include <asm/numa.h>
2f6474e4 25#include <asm/idtentry.h>
5ead97c8
JF
26#include <asm/xen/hypervisor.h>
27#include <asm/xen/hypercall.h>
28
45263cb0 29#include <xen/xen.h>
8006ec3e 30#include <xen/page.h>
e2a81baf 31#include <xen/interface/callback.h>
35ae11fd 32#include <xen/interface/memory.h>
5ead97c8
JF
33#include <xen/interface/physdev.h>
34#include <xen/features.h>
808fdb71 35#include <xen/hvc-console.h>
5ead97c8
JF
36#include "xen-ops.h"
37
c70727a5
JG
38#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
39
358cd9af
JG
40/* Memory map would allow PCI passthrough. */
41bool xen_pv_pci_possible;
42
69632ecf 43/* E820 map used during setting up memory. */
e7dbf7ad 44static struct e820_table xen_e820_table __initdata;
69632ecf 45
43dc2a0f
JG
46/* Number of initially usable memory pages. */
47static unsigned long ini_nr_pages __initdata;
48
1f3ac86b
JG
49/*
50 * Buffer used to remap identity mapped pages. We only need the virtual space.
51 * The physical page behind this address is remapped as needed to different
52 * buffer pages.
53 */
54#define REMAP_SIZE (P2M_PER_PAGE - 3)
55static struct {
56 unsigned long next_area_mfn;
57 unsigned long target_pfn;
58 unsigned long size;
59 unsigned long mfns[REMAP_SIZE];
60} xen_remap_buf __initdata __aligned(PAGE_SIZE);
61static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
4fbb67e3 62
c70727a5
JG
63static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
64
65static void __init xen_parse_512gb(void)
66{
67 bool val = false;
68 char *arg;
69
70 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
71 if (!arg)
72 return;
73
74 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
75 if (!arg)
76 val = true;
639b2e2f 77 else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
c70727a5
JG
78 return;
79
80 xen_512gb_limit = val;
81}
82
626d7508
JG
83static void __init xen_del_extra_mem(unsigned long start_pfn,
84 unsigned long n_pfns)
5b8e7d80
JG
85{
86 int i;
626d7508 87 unsigned long start_r, size_r;
c96aae1f 88
5b8e7d80 89 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
626d7508
JG
90 start_r = xen_extra_mem[i].start_pfn;
91 size_r = xen_extra_mem[i].n_pfns;
5b8e7d80
JG
92
93 /* Start of region. */
626d7508
JG
94 if (start_r == start_pfn) {
95 BUG_ON(n_pfns > size_r);
96 xen_extra_mem[i].start_pfn += n_pfns;
97 xen_extra_mem[i].n_pfns -= n_pfns;
5b8e7d80
JG
98 break;
99 }
100 /* End of region. */
626d7508
JG
101 if (start_r + size_r == start_pfn + n_pfns) {
102 BUG_ON(n_pfns > size_r);
103 xen_extra_mem[i].n_pfns -= n_pfns;
5b8e7d80
JG
104 break;
105 }
106 /* Mid of region. */
626d7508
JG
107 if (start_pfn > start_r && start_pfn < start_r + size_r) {
108 BUG_ON(start_pfn + n_pfns > start_r + size_r);
109 xen_extra_mem[i].n_pfns = start_pfn - start_r;
5b8e7d80 110 /* Calling memblock_reserve() again is okay. */
626d7508
JG
111 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
112 (start_pfn + n_pfns));
5b8e7d80
JG
113 break;
114 }
115 }
3ecc6834 116 memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
5b8e7d80
JG
117}
118
119/*
120 * Called during boot before the p2m list can take entries beyond the
121 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
122 * invalid.
123 */
124unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
125{
126 int i;
6eaa412f 127
5b8e7d80 128 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
626d7508
JG
129 if (pfn >= xen_extra_mem[i].start_pfn &&
130 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
5b8e7d80
JG
131 return INVALID_P2M_ENTRY;
132 }
133
134 return IDENTITY_FRAME(pfn);
135}
136
137/*
138 * Mark all pfns of extra mem as invalid in p2m list.
139 */
140void __init xen_inv_extra_mem(void)
141{
142 unsigned long pfn, pfn_s, pfn_e;
143 int i;
144
145 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
626d7508 146 if (!xen_extra_mem[i].n_pfns)
9a17ad7f 147 continue;
626d7508
JG
148 pfn_s = xen_extra_mem[i].start_pfn;
149 pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
5b8e7d80
JG
150 for (pfn = pfn_s; pfn < pfn_e; pfn++)
151 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
c96aae1f 152 }
42ee1471
JF
153}
154
4fbb67e3
MR
155/*
156 * Finds the next RAM pfn available in the E820 map after min_pfn.
157 * This function updates min_pfn with the pfn found and returns
158 * the size of that range or zero if not found.
159 */
69632ecf 160static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
2e2fb754 161{
e7dbf7ad 162 const struct e820_entry *entry = xen_e820_table.entries;
2e2fb754
KRW
163 unsigned int i;
164 unsigned long done = 0;
2e2fb754 165
e7dbf7ad 166 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
2e2fb754
KRW
167 unsigned long s_pfn;
168 unsigned long e_pfn;
2e2fb754 169
09821ff1 170 if (entry->type != E820_TYPE_RAM)
2e2fb754
KRW
171 continue;
172
c3d93f88 173 e_pfn = PFN_DOWN(entry->addr + entry->size);
2e2fb754 174
4fbb67e3 175 /* We only care about E820 after this */
abed7d07 176 if (e_pfn <= *min_pfn)
2e2fb754
KRW
177 continue;
178
c3d93f88 179 s_pfn = PFN_UP(entry->addr);
4fbb67e3
MR
180
181 /* If min_pfn falls within the E820 entry, we want to start
182 * at the min_pfn PFN.
2e2fb754 183 */
4fbb67e3
MR
184 if (s_pfn <= *min_pfn) {
185 done = e_pfn - *min_pfn;
2e2fb754 186 } else {
4fbb67e3
MR
187 done = e_pfn - s_pfn;
188 *min_pfn = s_pfn;
2e2fb754 189 }
4fbb67e3
MR
190 break;
191 }
2e2fb754 192
4fbb67e3
MR
193 return done;
194}
2e2fb754 195
1f3ac86b
JG
196static int __init xen_free_mfn(unsigned long mfn)
197{
198 struct xen_memory_reservation reservation = {
199 .address_bits = 0,
200 .extent_order = 0,
201 .domid = DOMID_SELF
202 };
203
204 set_xen_guest_handle(reservation.extent_start, &mfn);
205 reservation.nr_extents = 1;
206
207 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
208}
209
4fbb67e3 210/*
1f3ac86b 211 * This releases a chunk of memory and then does the identity map. It's used
4fbb67e3
MR
212 * as a fallback if the remapping fails.
213 */
214static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
43dc2a0f 215 unsigned long end_pfn)
4fbb67e3 216{
1f3ac86b
JG
217 unsigned long pfn, end;
218 int ret;
219
4fbb67e3
MR
220 WARN_ON(start_pfn > end_pfn);
221
bc7142cf 222 /* Release pages first. */
43dc2a0f 223 end = min(end_pfn, ini_nr_pages);
1f3ac86b
JG
224 for (pfn = start_pfn; pfn < end; pfn++) {
225 unsigned long mfn = pfn_to_mfn(pfn);
226
227 /* Make sure pfn exists to start with */
228 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
229 continue;
230
231 ret = xen_free_mfn(mfn);
232 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
233
234 if (ret == 1) {
5097cdf6 235 xen_released_pages++;
1f3ac86b
JG
236 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
237 break;
1f3ac86b
JG
238 } else
239 break;
240 }
241
bc7142cf 242 set_phys_range_identity(start_pfn, end_pfn);
4fbb67e3
MR
243}
244
245/*
1f3ac86b 246 * Helper function to update the p2m and m2p tables and kernel mapping.
4fbb67e3 247 */
1f3ac86b 248static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
4fbb67e3
MR
249{
250 struct mmu_update update = {
3ba5c867 251 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
4fbb67e3
MR
252 .val = pfn
253 };
254
255 /* Update p2m */
1f3ac86b 256 if (!set_phys_to_machine(pfn, mfn)) {
4fbb67e3
MR
257 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
258 pfn, mfn);
1f3ac86b 259 BUG();
2e2fb754 260 }
4fbb67e3
MR
261
262 /* Update m2p */
263 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
264 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
265 mfn, pfn);
1f3ac86b 266 BUG();
4fbb67e3
MR
267 }
268
1f3ac86b
JG
269 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
270 mfn_pte(mfn, PAGE_KERNEL), 0)) {
271 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
272 mfn, pfn);
273 BUG();
274 }
2e2fb754 275}
83d51ab4 276
4fbb67e3
MR
277/*
278 * This function updates the p2m and m2p tables with an identity map from
1f3ac86b
JG
279 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
280 * original allocation at remap_pfn. The information needed for remapping is
281 * saved in the memory itself to avoid the need for allocating buffers. The
282 * complete remap information is contained in a list of MFNs each containing
283 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
284 * This enables us to preserve the original mfn sequence while doing the
285 * remapping at a time when the memory management is capable of allocating
286 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
287 * its callers.
4fbb67e3 288 */
1f3ac86b 289static void __init xen_do_set_identity_and_remap_chunk(
4fbb67e3 290 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
83d51ab4 291{
1f3ac86b
JG
292 unsigned long buf = (unsigned long)&xen_remap_buf;
293 unsigned long mfn_save, mfn;
4fbb67e3 294 unsigned long ident_pfn_iter, remap_pfn_iter;
1f3ac86b 295 unsigned long ident_end_pfn = start_pfn + size;
4fbb67e3 296 unsigned long left = size;
1f3ac86b 297 unsigned int i, chunk;
4fbb67e3
MR
298
299 WARN_ON(size == 0);
300
067e4f17 301 mfn_save = virt_to_mfn((void *)buf);
e201bfcc 302
1f3ac86b
JG
303 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
304 ident_pfn_iter < ident_end_pfn;
305 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
306 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
4fbb67e3 307
1f3ac86b
JG
308 /* Map first pfn to xen_remap_buf */
309 mfn = pfn_to_mfn(ident_pfn_iter);
310 set_pte_mfn(buf, mfn, PAGE_KERNEL);
4fbb67e3 311
1f3ac86b
JG
312 /* Save mapping information in page */
313 xen_remap_buf.next_area_mfn = xen_remap_mfn;
314 xen_remap_buf.target_pfn = remap_pfn_iter;
315 xen_remap_buf.size = chunk;
316 for (i = 0; i < chunk; i++)
317 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
4fbb67e3 318
1f3ac86b
JG
319 /* Put remap buf into list. */
320 xen_remap_mfn = mfn;
4fbb67e3 321
1f3ac86b 322 /* Set identity map */
bc7142cf 323 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
83d51ab4 324
1f3ac86b 325 left -= chunk;
4fbb67e3 326 }
83d51ab4 327
1f3ac86b
JG
328 /* Restore old xen_remap_buf mapping */
329 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
83d51ab4
DV
330}
331
4fbb67e3
MR
332/*
333 * This function takes a contiguous pfn range that needs to be identity mapped
334 * and:
335 *
336 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
337 * 2) Calls the do_ function to actually do the mapping/remapping work.
338 *
339 * The goal is to not allocate additional memory but to remap the existing
340 * pages. In the case of an error the underlying memory is simply released back
341 * to Xen and not remapped.
342 */
76f0a486 343static unsigned long __init xen_set_identity_and_remap_chunk(
43dc2a0f 344 unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
4fbb67e3
MR
345{
346 unsigned long pfn;
347 unsigned long i = 0;
348 unsigned long n = end_pfn - start_pfn;
349
dd14be92 350 if (remap_pfn == 0)
43dc2a0f 351 remap_pfn = ini_nr_pages;
dd14be92 352
4fbb67e3
MR
353 while (i < n) {
354 unsigned long cur_pfn = start_pfn + i;
355 unsigned long left = n - i;
356 unsigned long size = left;
357 unsigned long remap_range_size;
358
359 /* Do not remap pages beyond the current allocation */
43dc2a0f 360 if (cur_pfn >= ini_nr_pages) {
4fbb67e3 361 /* Identity map remaining pages */
bc7142cf 362 set_phys_range_identity(cur_pfn, cur_pfn + size);
4fbb67e3
MR
363 break;
364 }
43dc2a0f
JG
365 if (cur_pfn + size > ini_nr_pages)
366 size = ini_nr_pages - cur_pfn;
4fbb67e3 367
69632ecf 368 remap_range_size = xen_find_pfn_range(&remap_pfn);
4fbb67e3 369 if (!remap_range_size) {
8d3bcc44 370 pr_warn("Unable to find available pfn range, not remapping identity pages\n");
4fbb67e3 371 xen_set_identity_and_release_chunk(cur_pfn,
43dc2a0f 372 cur_pfn + left);
4fbb67e3
MR
373 break;
374 }
375 /* Adjust size to fit in current e820 RAM region */
376 if (size > remap_range_size)
377 size = remap_range_size;
378
1f3ac86b 379 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
4fbb67e3
MR
380
381 /* Update variables to reflect new mappings. */
382 i += size;
383 remap_pfn += size;
4fbb67e3
MR
384 }
385
386 /*
9a58b352
JB
387 * If the PFNs are currently mapped, their VA mappings need to be
388 * zapped.
4fbb67e3
MR
389 */
390 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
391 (void)HYPERVISOR_update_va_mapping(
392 (unsigned long)__va(pfn << PAGE_SHIFT),
9a58b352 393 native_make_pte(0), 0);
4fbb67e3
MR
394
395 return remap_pfn;
396}
397
dd14be92 398static unsigned long __init xen_count_remap_pages(
43dc2a0f 399 unsigned long start_pfn, unsigned long end_pfn,
dd14be92
JG
400 unsigned long remap_pages)
401{
43dc2a0f 402 if (start_pfn >= ini_nr_pages)
dd14be92
JG
403 return remap_pages;
404
43dc2a0f 405 return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn;
dd14be92
JG
406}
407
43dc2a0f 408static unsigned long __init xen_foreach_remap_area(
dd14be92 409 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
43dc2a0f 410 unsigned long last_val))
093d7b46 411{
f3f436e3 412 phys_addr_t start = 0;
dd14be92 413 unsigned long ret_val = 0;
e7dbf7ad 414 const struct e820_entry *entry = xen_e820_table.entries;
68df0da7
KRW
415 int i;
416
f3f436e3
DV
417 /*
418 * Combine non-RAM regions and gaps until a RAM region (or the
dd14be92
JG
419 * end of the map) is reached, then call the provided function
420 * to perform its duty on the non-RAM region.
f3f436e3
DV
421 *
422 * The combined non-RAM regions are rounded to a whole number
423 * of pages so any partial pages are accessible via the 1:1
424 * mapping. This is needed for some BIOSes that put (for
425 * example) the DMI tables in a reserved region that begins on
426 * a non-page boundary.
427 */
e7dbf7ad 428 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
f3f436e3 429 phys_addr_t end = entry->addr + entry->size;
e7dbf7ad 430 if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
f3f436e3
DV
431 unsigned long start_pfn = PFN_DOWN(start);
432 unsigned long end_pfn = PFN_UP(end);
68df0da7 433
09821ff1 434 if (entry->type == E820_TYPE_RAM)
f3f436e3 435 end_pfn = PFN_UP(entry->addr);
68df0da7 436
83d51ab4 437 if (start_pfn < end_pfn)
43dc2a0f 438 ret_val = func(start_pfn, end_pfn, ret_val);
f3f436e3 439 start = end;
68df0da7 440 }
68df0da7 441 }
f3f436e3 442
dd14be92 443 return ret_val;
4fbb67e3 444}
1f3ac86b
JG
445
446/*
447 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
448 * The remap information (which mfn remap to which pfn) is contained in the
449 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
450 * This scheme allows to remap the different chunks in arbitrary order while
a97673a1 451 * the resulting mapping will be independent from the order.
1f3ac86b
JG
452 */
453void __init xen_remap_memory(void)
454{
455 unsigned long buf = (unsigned long)&xen_remap_buf;
bf1b9ddf 456 unsigned long mfn_save, pfn;
1f3ac86b
JG
457 unsigned long remapped = 0;
458 unsigned int i;
459 unsigned long pfn_s = ~0UL;
460 unsigned long len = 0;
461
067e4f17 462 mfn_save = virt_to_mfn((void *)buf);
1f3ac86b
JG
463
464 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
465 /* Map the remap information */
466 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
467
468 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
469
470 pfn = xen_remap_buf.target_pfn;
471 for (i = 0; i < xen_remap_buf.size; i++) {
bf1b9ddf 472 xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
1f3ac86b
JG
473 remapped++;
474 pfn++;
475 }
476 if (pfn_s == ~0UL || pfn == pfn_s) {
477 pfn_s = xen_remap_buf.target_pfn;
478 len += xen_remap_buf.size;
479 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
480 len += xen_remap_buf.size;
481 } else {
626d7508 482 xen_del_extra_mem(pfn_s, len);
1f3ac86b
JG
483 pfn_s = xen_remap_buf.target_pfn;
484 len = xen_remap_buf.size;
485 }
1f3ac86b
JG
486 xen_remap_mfn = xen_remap_buf.next_area_mfn;
487 }
488
489 if (pfn_s != ~0UL && len)
626d7508 490 xen_del_extra_mem(pfn_s, len);
1f3ac86b
JG
491
492 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
493
494 pr_info("Remapped %ld page(s)\n", remapped);
be35d91c
JG
495
496 xen_do_remap_nonram();
1f3ac86b
JG
497}
498
c70727a5
JG
499static unsigned long __init xen_get_pages_limit(void)
500{
501 unsigned long limit;
502
cb9e444b 503 limit = MAXMEM / PAGE_SIZE;
c70727a5
JG
504 if (!xen_initial_domain() && xen_512gb_limit)
505 limit = GB(512) / PAGE_SIZE;
a13f2ef1 506
c70727a5
JG
507 return limit;
508}
509
d312ae87
DV
510static unsigned long __init xen_get_max_pages(void)
511{
c70727a5 512 unsigned long max_pages, limit;
d312ae87 513 domid_t domid = DOMID_SELF;
24f775a6 514 long ret;
d312ae87 515
c70727a5
JG
516 limit = xen_get_pages_limit();
517 max_pages = limit;
518
d3db7281
IC
519 /*
520 * For the initial domain we use the maximum reservation as
521 * the maximum page.
522 *
523 * For guest domains the current maximum reservation reflects
524 * the current maximum rather than the static maximum. In this
525 * case the e820 map provided to us will cover the static
526 * maximum region.
527 */
528 if (xen_initial_domain()) {
529 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
530 if (ret > 0)
531 max_pages = ret;
532 }
533
c70727a5 534 return min(max_pages, limit);
d312ae87
DV
535}
536
a3f52396
JG
537static void __init xen_align_and_add_e820_region(phys_addr_t start,
538 phys_addr_t size, int type)
dc91c728 539{
3ba5c867 540 phys_addr_t end = start + size;
dc91c728
DV
541
542 /* Align RAM regions to page boundaries. */
09821ff1 543 if (type == E820_TYPE_RAM) {
dc91c728 544 start = PAGE_ALIGN(start);
3ba5c867 545 end &= ~((phys_addr_t)PAGE_SIZE - 1);
1d988ed4
JG
546#ifdef CONFIG_MEMORY_HOTPLUG
547 /*
548 * Don't allow adding memory not in E820 map while booting the
549 * system. Once the balloon driver is up it will remove that
550 * restriction again.
551 */
552 max_mem_size = end;
553#endif
dc91c728
DV
554 }
555
ab6bc04c 556 e820__range_add(start, end - start, type);
dc91c728
DV
557}
558
69632ecf 559static void __init xen_ignore_unusable(void)
3bc38cbc 560{
e7dbf7ad 561 struct e820_entry *entry = xen_e820_table.entries;
3bc38cbc
DV
562 unsigned int i;
563
e7dbf7ad 564 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
09821ff1
IM
565 if (entry->type == E820_TYPE_UNUSABLE)
566 entry->type = E820_TYPE_RAM;
3bc38cbc
DV
567 }
568}
569
ba888297 570static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
e612b4a7 571{
8ec67d97 572 struct e820_entry *entry;
e612b4a7
JG
573 unsigned mapcnt;
574 phys_addr_t end;
575
576 if (!size)
577 return false;
578
579 end = start + size;
e7dbf7ad 580 entry = xen_e820_table.entries;
e612b4a7 581
e7dbf7ad 582 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
09821ff1 583 if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
e612b4a7
JG
584 (entry->addr + entry->size) >= end)
585 return false;
586
587 entry++;
588 }
589
590 return true;
591}
592
9ddac5b7
JG
593/*
594 * Find a free area in physical memory not yet reserved and compliant with
595 * E820 map.
596 * Used to relocate pre-allocated areas like initrd or p2m list which are in
597 * conflict with the to be used E820 map.
598 * In case no area is found, return 0. Otherwise return the physical address
599 * of the area which is already reserved for convenience.
600 */
601phys_addr_t __init xen_find_free_area(phys_addr_t size)
602{
603 unsigned mapcnt;
604 phys_addr_t addr, start;
e7dbf7ad 605 struct e820_entry *entry = xen_e820_table.entries;
9ddac5b7 606
e7dbf7ad 607 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
09821ff1 608 if (entry->type != E820_TYPE_RAM || entry->size < size)
9ddac5b7
JG
609 continue;
610 start = entry->addr;
611 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
612 if (!memblock_is_reserved(addr))
613 continue;
614 start = addr + PAGE_SIZE;
615 if (start + size > entry->addr + entry->size)
616 break;
617 }
618 if (addr >= start + size) {
619 memblock_reserve(start, size);
620 return start;
621 }
622 }
623
624 return 0;
625}
626
be35d91c
JG
627/*
628 * Swap a non-RAM E820 map entry with RAM above ini_nr_pages.
629 * Note that the E820 map is modified accordingly, but the P2M map isn't yet.
630 * The adaption of the P2M must be deferred until page allocation is possible.
631 */
632static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry)
633{
634 struct e820_entry *entry;
635 unsigned int mapcnt;
636 phys_addr_t mem_end = PFN_PHYS(ini_nr_pages);
637 phys_addr_t swap_addr, swap_size, entry_end;
638
639 swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr);
640 swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size);
641 entry = xen_e820_table.entries;
642
643 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
644 entry_end = entry->addr + entry->size;
645 if (entry->type == E820_TYPE_RAM && entry->size >= swap_size &&
646 entry_end - swap_size >= mem_end) {
647 /* Reduce RAM entry by needed space (whole pages). */
648 entry->size -= swap_size;
649
650 /* Add new entry at the end of E820 map. */
651 entry = xen_e820_table.entries +
652 xen_e820_table.nr_entries;
653 xen_e820_table.nr_entries++;
654
655 /* Fill new entry (keep size and page offset). */
656 entry->type = swap_entry->type;
657 entry->addr = entry_end - swap_size +
658 swap_addr - swap_entry->addr;
659 entry->size = swap_entry->size;
660
661 /* Convert old entry to RAM, align to pages. */
662 swap_entry->type = E820_TYPE_RAM;
663 swap_entry->addr = swap_addr;
664 swap_entry->size = swap_size;
665
666 /* Remember PFN<->MFN relation for P2M update. */
667 xen_add_remap_nonram(swap_addr, entry_end - swap_size,
668 swap_size);
669
670 /* Order E820 table and merge entries. */
671 e820__update_table(&xen_e820_table);
672
673 return;
674 }
675
676 entry++;
677 }
678
679 xen_raw_console_write("No suitable area found for required E820 entry remapping action\n");
680 BUG();
681}
682
683/*
684 * Look for non-RAM memory types in a specific guest physical area and move
685 * those away if possible (ACPI NVS only for now).
686 */
687static void __init xen_e820_resolve_conflicts(phys_addr_t start,
688 phys_addr_t size)
689{
690 struct e820_entry *entry;
691 unsigned int mapcnt;
692 phys_addr_t end;
693
694 if (!size)
695 return;
696
697 end = start + size;
698 entry = xen_e820_table.entries;
699
700 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
701 if (entry->addr >= end)
702 return;
703
704 if (entry->addr + entry->size > start &&
705 entry->type == E820_TYPE_NVS)
706 xen_e820_swap_entry_with_ram(entry);
707
708 entry++;
709 }
710}
711
ba888297
JG
712/*
713 * Check for an area in physical memory to be usable for non-movable purposes.
be35d91c
JG
714 * An area is considered to usable if the used E820 map lists it to be RAM or
715 * some other type which can be moved to higher PFNs while keeping the MFNs.
ba888297
JG
716 * In case the area is not usable, crash the system with an error message.
717 */
718void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
719 const char *component)
720{
be35d91c
JG
721 xen_e820_resolve_conflicts(start, size);
722
ba888297
JG
723 if (!xen_is_e820_reserved(start, size))
724 return;
725
726 xen_raw_console_write("Xen hypervisor allocated ");
727 xen_raw_console_write(component);
728 xen_raw_console_write(" memory conflicts with E820 map\n");
729 BUG();
730}
731
4b9c1537
JG
732/*
733 * Like memcpy, but with physical addresses for dest and src.
734 */
735static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
736 phys_addr_t n)
737{
738 phys_addr_t dest_off, src_off, dest_len, src_len, len;
739 void *from, *to;
740
741 while (n) {
742 dest_off = dest & ~PAGE_MASK;
743 src_off = src & ~PAGE_MASK;
744 dest_len = n;
745 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
746 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
747 src_len = n;
748 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
749 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
750 len = min(dest_len, src_len);
751 to = early_memremap(dest - dest_off, dest_len + dest_off);
752 from = early_memremap(src - src_off, src_len + src_off);
753 memcpy(to, from, len);
754 early_memunmap(to, dest_len + dest_off);
755 early_memunmap(from, src_len + src_off);
756 n -= len;
757 dest += len;
758 src += len;
759 }
760}
761
8f5b0c63
JG
762/*
763 * Reserve Xen mfn_list.
8f5b0c63
JG
764 */
765static void __init xen_reserve_xen_mfnlist(void)
766{
70e61199
JG
767 phys_addr_t start, size;
768
8f5b0c63 769 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
70e61199
JG
770 start = __pa(xen_start_info->mfn_list);
771 size = PFN_ALIGN(xen_start_info->nr_pages *
772 sizeof(unsigned long));
773 } else {
774 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
775 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
776 }
777
7ecec850
RL
778 memblock_reserve(start, size);
779 if (!xen_is_e820_reserved(start, size))
8f5b0c63 780 return;
8f5b0c63 781
70e61199 782 xen_relocate_p2m();
3ecc6834 783 memblock_phys_free(start, size);
8f5b0c63
JG
784}
785
5ead97c8 786/**
b359b3a0 787 * xen_memory_setup - Hook for machine specific memory setup.
5ead97c8 788 **/
5ead97c8
JF
789char * __init xen_memory_setup(void)
790{
43dc2a0f 791 unsigned long pfn_s, n_pfns;
5097cdf6
JG
792 phys_addr_t mem_end, addr, size, chunk_size;
793 u32 type;
35ae11fd
IC
794 int rc;
795 struct xen_memory_map memmap;
dc91c728 796 unsigned long max_pages;
42ee1471 797 unsigned long extra_pages = 0;
e8432ac8 798 unsigned long maxmem_pages;
35ae11fd 799 int i;
9e9a5fcb 800 int op;
5ead97c8 801
c70727a5 802 xen_parse_512gb();
43dc2a0f
JG
803 ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages);
804 mem_end = PFN_PHYS(ini_nr_pages);
35ae11fd 805
e7dbf7ad
IM
806 memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
807 set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
35ae11fd 808
1d988ed4
JG
809#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
810 xen_saved_max_mem_size = max_mem_size;
811#endif
812
9e9a5fcb
IC
813 op = xen_initial_domain() ?
814 XENMEM_machine_memory_map :
815 XENMEM_memory_map;
816 rc = HYPERVISOR_memory_op(op, &memmap);
35ae11fd 817 if (rc == -ENOSYS) {
9ec23a7f 818 BUG_ON(xen_initial_domain());
35ae11fd 819 memmap.nr_entries = 1;
e7dbf7ad
IM
820 xen_e820_table.entries[0].addr = 0ULL;
821 xen_e820_table.entries[0].size = mem_end;
35ae11fd 822 /* 8MB slack (to balance backend allocations). */
e7dbf7ad
IM
823 xen_e820_table.entries[0].size += 8ULL << 20;
824 xen_e820_table.entries[0].type = E820_TYPE_RAM;
35ae11fd
IC
825 rc = 0;
826 }
827 BUG_ON(rc);
1ea644c8 828 BUG_ON(memmap.nr_entries == 0);
e7dbf7ad 829 xen_e820_table.nr_entries = memmap.nr_entries;
8006ec3e 830
9338c223
RL
831 if (xen_initial_domain()) {
832 /*
833 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
834 * regions, so if we're using the machine memory map leave the
835 * region as RAM as it is in the pseudo-physical map.
836 *
837 * UNUSABLE regions in domUs are not handled and will need
838 * a patch in the future.
839 */
69632ecf 840 xen_ignore_unusable();
3bc38cbc 841
9338c223
RL
842#ifdef CONFIG_ISCSI_IBFT_FIND
843 /* Reserve 0.5 MiB to 1 MiB region so iBFT can be found */
844 xen_e820_table.entries[xen_e820_table.nr_entries].addr = IBFT_START;
845 xen_e820_table.entries[xen_e820_table.nr_entries].size = IBFT_END - IBFT_START;
846 xen_e820_table.entries[xen_e820_table.nr_entries].type = E820_TYPE_RESERVED;
847 xen_e820_table.nr_entries++;
848#endif
849 }
850
dc91c728 851 /* Make sure the Xen-supplied memory map is well-ordered. */
f9748fa0 852 e820__update_table(&xen_e820_table);
dc91c728 853
c4498ae3
JG
854 /*
855 * Check whether the kernel itself conflicts with the target E820 map.
856 * Failing now is better than running into weird problems later due
857 * to relocating (and even reusing) pages with kernel text or data.
858 */
859 xen_chk_is_e820_usable(__pa_symbol(_text),
860 __pa_symbol(_end) - __pa_symbol(_text),
861 "kernel");
862
863 /*
864 * Check for a conflict of the xen_start_info memory with the target
865 * E820 map.
866 */
867 xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info),
868 "xen_start_info");
869
870 /*
871 * Check for a conflict of the hypervisor supplied page tables with
872 * the target E820 map.
873 */
874 xen_pt_check_e820();
875
dc91c728 876 max_pages = xen_get_max_pages();
dc91c728 877
5097cdf6 878 /* How many extra pages do we need due to remapping? */
43dc2a0f 879 max_pages += xen_foreach_remap_area(xen_count_remap_pages);
eafd72e0 880
43dc2a0f
JG
881 if (max_pages > ini_nr_pages)
882 extra_pages += max_pages - ini_nr_pages;
2e2fb754 883
dc91c728 884 /*
af44a387 885 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
88221399 886 * factor the base size.
dc91c728 887 *
c70727a5
JG
888 * Make sure we have no memory above max_pages, as this area
889 * isn't handled by the p2m management.
dc91c728 890 */
43dc2a0f
JG
891 maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM));
892 extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages);
dc91c728 893 i = 0;
e7dbf7ad
IM
894 addr = xen_e820_table.entries[0].addr;
895 size = xen_e820_table.entries[0].size;
896 while (i < xen_e820_table.nr_entries) {
12366410 897 bool discard = false;
f5775e0b 898
5097cdf6 899 chunk_size = size;
e7dbf7ad 900 type = xen_e820_table.entries[i].type;
dc91c728 901
358cd9af
JG
902 if (type == E820_TYPE_RESERVED)
903 xen_pv_pci_possible = true;
904
09821ff1 905 if (type == E820_TYPE_RAM) {
dc91c728 906 if (addr < mem_end) {
5097cdf6 907 chunk_size = min(size, mem_end - addr);
dc91c728 908 } else if (extra_pages) {
5097cdf6 909 chunk_size = min(size, PFN_PHYS(extra_pages));
626d7508
JG
910 pfn_s = PFN_UP(addr);
911 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
912 extra_pages -= n_pfns;
913 xen_add_extra_mem(pfn_s, n_pfns);
914 xen_max_p2m_pfn = pfn_s + n_pfns;
dc91c728 915 } else
12366410 916 discard = true;
3654581e
JF
917 }
918
12366410
ID
919 if (!discard)
920 xen_align_and_add_e820_region(addr, chunk_size, type);
b5b43ced 921
5097cdf6
JG
922 addr += chunk_size;
923 size -= chunk_size;
924 if (size == 0) {
dc91c728 925 i++;
e7dbf7ad
IM
926 if (i < xen_e820_table.nr_entries) {
927 addr = xen_e820_table.entries[i].addr;
928 size = xen_e820_table.entries[i].size;
5097cdf6
JG
929 }
930 }
35ae11fd 931 }
b792c755 932
25b884a8
DV
933 /*
934 * Set the rest as identity mapped, in case PCI BARs are
935 * located here.
25b884a8 936 */
5097cdf6 937 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
25b884a8 938
b792c755 939 /*
9ec23a7f
IC
940 * In domU, the ISA region is normal, usable memory, but we
941 * reserve ISA memory anyway because too many things poke
b792c755
JF
942 * about in there.
943 */
f9748fa0 944 e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
5ead97c8 945
f9748fa0 946 e820__update_table(e820_table);
be5bf9fa 947
8f5b0c63
JG
948 xen_reserve_xen_mfnlist();
949
4b9c1537
JG
950 /* Check for a conflict of the initrd with the target E820 map. */
951 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
952 boot_params.hdr.ramdisk_size)) {
953 phys_addr_t new_area, start, size;
954
955 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
956 if (!new_area) {
957 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
958 BUG();
959 }
960
961 start = boot_params.hdr.ramdisk_image;
962 size = boot_params.hdr.ramdisk_size;
963 xen_phys_memcpy(new_area, start, size);
964 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
965 start, start + size, new_area, new_area + size);
3ecc6834 966 memblock_phys_free(start, size);
4b9c1537
JG
967 boot_params.hdr.ramdisk_image = new_area;
968 boot_params.ext_ramdisk_image = new_area >> 32;
969 }
970
5097cdf6
JG
971 /*
972 * Set identity map on non-RAM pages and prepare remapping the
973 * underlying RAM.
974 */
43dc2a0f 975 xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
dd14be92
JG
976
977 pr_info("Released %ld page(s)\n", xen_released_pages);
5097cdf6 978
5ead97c8
JF
979 return "Xen";
980}
981
148f9bb8 982static int register_callback(unsigned type, const void *func)
e2a81baf 983{
88459d4c
JF
984 struct callback_register callback = {
985 .type = type,
986 .address = XEN_CALLBACK(__KERNEL_CS, func),
e2a81baf
JF
987 .flags = CALLBACKF_mask_events,
988 };
989
88459d4c
JF
990 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
991}
992
148f9bb8 993void xen_enable_sysenter(void)
88459d4c 994{
4bff677b
JG
995 if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
996 register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
997 setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
e2a81baf
JF
998}
999
148f9bb8 1000void xen_enable_syscall(void)
6fcac6d3 1001{
6fcac6d3 1002 int ret;
6fcac6d3 1003
b75b7f8e 1004 ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
6fcac6d3 1005 if (ret != 0) {
d5303b81 1006 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
62541c37
JF
1007 /* Pretty fatal; 64-bit userspace has no other
1008 mechanism for syscalls. */
1009 }
1010
4bff677b
JG
1011 if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
1012 register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
1013 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
6fcac6d3 1014}
ea9f9274 1015
0e1b4271 1016static void __init xen_pvmmu_arch_setup(void)
5ead97c8 1017{
5ead97c8
JF
1018 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
1019
2f6474e4
TG
1020 if (register_callback(CALLBACKTYPE_event,
1021 xen_asm_exc_xen_hypervisor_callback) ||
88459d4c
JF
1022 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
1023 BUG();
5ead97c8 1024
e2a81baf 1025 xen_enable_sysenter();
6fcac6d3 1026 xen_enable_syscall();
d285d683
MR
1027}
1028
1029/* This function is not called for HVM domains */
1030void __init xen_arch_setup(void)
1031{
1032 xen_panic_handler_init();
82616f95 1033 xen_pvmmu_arch_setup();
d285d683 1034
5ead97c8
JF
1035#ifdef CONFIG_ACPI
1036 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
1037 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1038 disable_acpi();
1039 }
1040#endif
1041
1042 memcpy(boot_command_line, xen_start_info->cmd_line,
1043 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
1044 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
1045
bc15fde7 1046 /* Set up idle, making sure it calls safe_halt() pvop */
d91ee586 1047 disable_cpuidle();
48cdd828 1048 disable_cpufreq();
6a377ddc 1049 WARN_ON(xen_set_default_idle());
8d54db79
KRW
1050#ifdef CONFIG_NUMA
1051 numa_off = 1;
1052#endif
5ead97c8 1053}