Merge tag 'clk-for-linus-20151104' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / x86 / xen / setup.c
CommitLineData
5ead97c8
JF
1/*
2 * Machine specific setup for xen
3 *
4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
5 */
6
7#include <linux/module.h>
8#include <linux/sched.h>
9#include <linux/mm.h>
10#include <linux/pm.h>
a9ce6bc1 11#include <linux/memblock.h>
d91ee586 12#include <linux/cpuidle.h>
48cdd828 13#include <linux/cpufreq.h>
5ead97c8
JF
14
15#include <asm/elf.h>
6c3652ef 16#include <asm/vdso.h>
5ead97c8
JF
17#include <asm/e820.h>
18#include <asm/setup.h>
b792c755 19#include <asm/acpi.h>
8d54db79 20#include <asm/numa.h>
5ead97c8
JF
21#include <asm/xen/hypervisor.h>
22#include <asm/xen/hypercall.h>
23
45263cb0 24#include <xen/xen.h>
8006ec3e 25#include <xen/page.h>
e2a81baf 26#include <xen/interface/callback.h>
35ae11fd 27#include <xen/interface/memory.h>
5ead97c8
JF
28#include <xen/interface/physdev.h>
29#include <xen/features.h>
808fdb71 30#include <xen/hvc-console.h>
5ead97c8 31#include "xen-ops.h"
d2eea68e 32#include "vdso.h"
1f3ac86b 33#include "mmu.h"
5ead97c8 34
c70727a5
JG
35#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
36
42ee1471 37/* Amount of extra memory space we add to the e820 ranges */
8b5d44a5 38struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
42ee1471 39
aa24411b
DV
40/* Number of pages released from the initial allocation. */
41unsigned long xen_released_pages;
42
69632ecf
JG
43/* E820 map used during setting up memory. */
44static struct e820entry xen_e820_map[E820MAX] __initdata;
45static u32 xen_e820_map_entries __initdata;
46
1f3ac86b
JG
47/*
48 * Buffer used to remap identity mapped pages. We only need the virtual space.
49 * The physical page behind this address is remapped as needed to different
50 * buffer pages.
51 */
52#define REMAP_SIZE (P2M_PER_PAGE - 3)
53static struct {
54 unsigned long next_area_mfn;
55 unsigned long target_pfn;
56 unsigned long size;
57 unsigned long mfns[REMAP_SIZE];
58} xen_remap_buf __initdata __aligned(PAGE_SIZE);
59static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
4fbb67e3 60
698bb8d1
JF
61/*
62 * The maximum amount of extra memory compared to the base size. The
63 * main scaling factor is the size of struct page. At extreme ratios
64 * of base:extra, all the base memory can be filled with page
65 * structures for the extra memory, leaving no space for anything
66 * else.
67 *
68 * 10x seems like a reasonable balance between scaling flexibility and
69 * leaving a practically usable system.
70 */
71#define EXTRA_MEM_RATIO (10)
72
c70727a5
JG
73static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
74
75static void __init xen_parse_512gb(void)
76{
77 bool val = false;
78 char *arg;
79
80 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
81 if (!arg)
82 return;
83
84 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
85 if (!arg)
86 val = true;
87 else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
88 return;
89
90 xen_512gb_limit = val;
91}
92
626d7508
JG
93static void __init xen_add_extra_mem(unsigned long start_pfn,
94 unsigned long n_pfns)
42ee1471 95{
dc91c728 96 int i;
6eaa412f 97
626d7508
JG
98 /*
99 * No need to check for zero size, should happen rarely and will only
100 * write a new entry regarded to be unused due to zero size.
101 */
dc91c728
DV
102 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
103 /* Add new region. */
626d7508
JG
104 if (xen_extra_mem[i].n_pfns == 0) {
105 xen_extra_mem[i].start_pfn = start_pfn;
106 xen_extra_mem[i].n_pfns = n_pfns;
dc91c728
DV
107 break;
108 }
109 /* Append to existing region. */
626d7508
JG
110 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
111 start_pfn) {
112 xen_extra_mem[i].n_pfns += n_pfns;
dc91c728
DV
113 break;
114 }
115 }
116 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
117 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
42ee1471 118
626d7508 119 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
5b8e7d80 120}
2f7acb20 121
626d7508
JG
122static void __init xen_del_extra_mem(unsigned long start_pfn,
123 unsigned long n_pfns)
5b8e7d80
JG
124{
125 int i;
626d7508 126 unsigned long start_r, size_r;
c96aae1f 127
5b8e7d80 128 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
626d7508
JG
129 start_r = xen_extra_mem[i].start_pfn;
130 size_r = xen_extra_mem[i].n_pfns;
5b8e7d80
JG
131
132 /* Start of region. */
626d7508
JG
133 if (start_r == start_pfn) {
134 BUG_ON(n_pfns > size_r);
135 xen_extra_mem[i].start_pfn += n_pfns;
136 xen_extra_mem[i].n_pfns -= n_pfns;
5b8e7d80
JG
137 break;
138 }
139 /* End of region. */
626d7508
JG
140 if (start_r + size_r == start_pfn + n_pfns) {
141 BUG_ON(n_pfns > size_r);
142 xen_extra_mem[i].n_pfns -= n_pfns;
5b8e7d80
JG
143 break;
144 }
145 /* Mid of region. */
626d7508
JG
146 if (start_pfn > start_r && start_pfn < start_r + size_r) {
147 BUG_ON(start_pfn + n_pfns > start_r + size_r);
148 xen_extra_mem[i].n_pfns = start_pfn - start_r;
5b8e7d80 149 /* Calling memblock_reserve() again is okay. */
626d7508
JG
150 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
151 (start_pfn + n_pfns));
5b8e7d80
JG
152 break;
153 }
154 }
626d7508 155 memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
5b8e7d80
JG
156}
157
158/*
159 * Called during boot before the p2m list can take entries beyond the
160 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
161 * invalid.
162 */
163unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
164{
165 int i;
6eaa412f 166
5b8e7d80 167 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
626d7508
JG
168 if (pfn >= xen_extra_mem[i].start_pfn &&
169 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
5b8e7d80
JG
170 return INVALID_P2M_ENTRY;
171 }
172
173 return IDENTITY_FRAME(pfn);
174}
175
176/*
177 * Mark all pfns of extra mem as invalid in p2m list.
178 */
179void __init xen_inv_extra_mem(void)
180{
181 unsigned long pfn, pfn_s, pfn_e;
182 int i;
183
184 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
626d7508 185 if (!xen_extra_mem[i].n_pfns)
9a17ad7f 186 continue;
626d7508
JG
187 pfn_s = xen_extra_mem[i].start_pfn;
188 pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
5b8e7d80
JG
189 for (pfn = pfn_s; pfn < pfn_e; pfn++)
190 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
c96aae1f 191 }
42ee1471
JF
192}
193
4fbb67e3
MR
194/*
195 * Finds the next RAM pfn available in the E820 map after min_pfn.
196 * This function updates min_pfn with the pfn found and returns
197 * the size of that range or zero if not found.
198 */
69632ecf 199static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
2e2fb754 200{
69632ecf 201 const struct e820entry *entry = xen_e820_map;
2e2fb754
KRW
202 unsigned int i;
203 unsigned long done = 0;
2e2fb754 204
69632ecf 205 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
2e2fb754
KRW
206 unsigned long s_pfn;
207 unsigned long e_pfn;
2e2fb754
KRW
208
209 if (entry->type != E820_RAM)
210 continue;
211
c3d93f88 212 e_pfn = PFN_DOWN(entry->addr + entry->size);
2e2fb754 213
4fbb67e3 214 /* We only care about E820 after this */
abed7d07 215 if (e_pfn <= *min_pfn)
2e2fb754
KRW
216 continue;
217
c3d93f88 218 s_pfn = PFN_UP(entry->addr);
4fbb67e3
MR
219
220 /* If min_pfn falls within the E820 entry, we want to start
221 * at the min_pfn PFN.
2e2fb754 222 */
4fbb67e3
MR
223 if (s_pfn <= *min_pfn) {
224 done = e_pfn - *min_pfn;
2e2fb754 225 } else {
4fbb67e3
MR
226 done = e_pfn - s_pfn;
227 *min_pfn = s_pfn;
2e2fb754 228 }
4fbb67e3
MR
229 break;
230 }
2e2fb754 231
4fbb67e3
MR
232 return done;
233}
2e2fb754 234
1f3ac86b
JG
235static int __init xen_free_mfn(unsigned long mfn)
236{
237 struct xen_memory_reservation reservation = {
238 .address_bits = 0,
239 .extent_order = 0,
240 .domid = DOMID_SELF
241 };
242
243 set_xen_guest_handle(reservation.extent_start, &mfn);
244 reservation.nr_extents = 1;
245
246 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
247}
248
4fbb67e3 249/*
1f3ac86b 250 * This releases a chunk of memory and then does the identity map. It's used
4fbb67e3
MR
251 * as a fallback if the remapping fails.
252 */
253static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
5097cdf6 254 unsigned long end_pfn, unsigned long nr_pages)
4fbb67e3 255{
1f3ac86b
JG
256 unsigned long pfn, end;
257 int ret;
258
4fbb67e3
MR
259 WARN_ON(start_pfn > end_pfn);
260
bc7142cf 261 /* Release pages first. */
1f3ac86b
JG
262 end = min(end_pfn, nr_pages);
263 for (pfn = start_pfn; pfn < end; pfn++) {
264 unsigned long mfn = pfn_to_mfn(pfn);
265
266 /* Make sure pfn exists to start with */
267 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
268 continue;
269
270 ret = xen_free_mfn(mfn);
271 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
272
273 if (ret == 1) {
5097cdf6 274 xen_released_pages++;
1f3ac86b
JG
275 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
276 break;
1f3ac86b
JG
277 } else
278 break;
279 }
280
bc7142cf 281 set_phys_range_identity(start_pfn, end_pfn);
4fbb67e3
MR
282}
283
284/*
1f3ac86b 285 * Helper function to update the p2m and m2p tables and kernel mapping.
4fbb67e3 286 */
1f3ac86b 287static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
4fbb67e3
MR
288{
289 struct mmu_update update = {
3ba5c867 290 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
4fbb67e3
MR
291 .val = pfn
292 };
293
294 /* Update p2m */
1f3ac86b 295 if (!set_phys_to_machine(pfn, mfn)) {
4fbb67e3
MR
296 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
297 pfn, mfn);
1f3ac86b 298 BUG();
2e2fb754 299 }
4fbb67e3
MR
300
301 /* Update m2p */
302 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
303 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
304 mfn, pfn);
1f3ac86b 305 BUG();
4fbb67e3
MR
306 }
307
1f3ac86b 308 /* Update kernel mapping, but not for highmem. */
e86f9496 309 if (pfn >= PFN_UP(__pa(high_memory - 1)))
1f3ac86b
JG
310 return;
311
312 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
313 mfn_pte(mfn, PAGE_KERNEL), 0)) {
314 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
315 mfn, pfn);
316 BUG();
317 }
2e2fb754 318}
83d51ab4 319
4fbb67e3
MR
320/*
321 * This function updates the p2m and m2p tables with an identity map from
1f3ac86b
JG
322 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
323 * original allocation at remap_pfn. The information needed for remapping is
324 * saved in the memory itself to avoid the need for allocating buffers. The
325 * complete remap information is contained in a list of MFNs each containing
326 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
327 * This enables us to preserve the original mfn sequence while doing the
328 * remapping at a time when the memory management is capable of allocating
329 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
330 * its callers.
4fbb67e3 331 */
1f3ac86b 332static void __init xen_do_set_identity_and_remap_chunk(
4fbb67e3 333 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
83d51ab4 334{
1f3ac86b
JG
335 unsigned long buf = (unsigned long)&xen_remap_buf;
336 unsigned long mfn_save, mfn;
4fbb67e3 337 unsigned long ident_pfn_iter, remap_pfn_iter;
1f3ac86b 338 unsigned long ident_end_pfn = start_pfn + size;
4fbb67e3 339 unsigned long left = size;
1f3ac86b 340 unsigned int i, chunk;
4fbb67e3
MR
341
342 WARN_ON(size == 0);
343
344 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
83d51ab4 345
1f3ac86b 346 mfn_save = virt_to_mfn(buf);
e201bfcc 347
1f3ac86b
JG
348 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
349 ident_pfn_iter < ident_end_pfn;
350 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
351 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
4fbb67e3 352
1f3ac86b
JG
353 /* Map first pfn to xen_remap_buf */
354 mfn = pfn_to_mfn(ident_pfn_iter);
355 set_pte_mfn(buf, mfn, PAGE_KERNEL);
4fbb67e3 356
1f3ac86b
JG
357 /* Save mapping information in page */
358 xen_remap_buf.next_area_mfn = xen_remap_mfn;
359 xen_remap_buf.target_pfn = remap_pfn_iter;
360 xen_remap_buf.size = chunk;
361 for (i = 0; i < chunk; i++)
362 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
4fbb67e3 363
1f3ac86b
JG
364 /* Put remap buf into list. */
365 xen_remap_mfn = mfn;
4fbb67e3 366
1f3ac86b 367 /* Set identity map */
bc7142cf 368 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
83d51ab4 369
1f3ac86b 370 left -= chunk;
4fbb67e3 371 }
83d51ab4 372
1f3ac86b
JG
373 /* Restore old xen_remap_buf mapping */
374 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
83d51ab4
DV
375}
376
4fbb67e3
MR
377/*
378 * This function takes a contiguous pfn range that needs to be identity mapped
379 * and:
380 *
381 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
382 * 2) Calls the do_ function to actually do the mapping/remapping work.
383 *
384 * The goal is to not allocate additional memory but to remap the existing
385 * pages. In the case of an error the underlying memory is simply released back
386 * to Xen and not remapped.
387 */
76f0a486 388static unsigned long __init xen_set_identity_and_remap_chunk(
69632ecf 389 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
5097cdf6 390 unsigned long remap_pfn)
4fbb67e3
MR
391{
392 unsigned long pfn;
393 unsigned long i = 0;
394 unsigned long n = end_pfn - start_pfn;
395
396 while (i < n) {
397 unsigned long cur_pfn = start_pfn + i;
398 unsigned long left = n - i;
399 unsigned long size = left;
400 unsigned long remap_range_size;
401
402 /* Do not remap pages beyond the current allocation */
403 if (cur_pfn >= nr_pages) {
404 /* Identity map remaining pages */
bc7142cf 405 set_phys_range_identity(cur_pfn, cur_pfn + size);
4fbb67e3
MR
406 break;
407 }
408 if (cur_pfn + size > nr_pages)
409 size = nr_pages - cur_pfn;
410
69632ecf 411 remap_range_size = xen_find_pfn_range(&remap_pfn);
4fbb67e3
MR
412 if (!remap_range_size) {
413 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
414 xen_set_identity_and_release_chunk(cur_pfn,
5097cdf6 415 cur_pfn + left, nr_pages);
4fbb67e3
MR
416 break;
417 }
418 /* Adjust size to fit in current e820 RAM region */
419 if (size > remap_range_size)
420 size = remap_range_size;
421
1f3ac86b 422 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
4fbb67e3
MR
423
424 /* Update variables to reflect new mappings. */
425 i += size;
426 remap_pfn += size;
4fbb67e3
MR
427 }
428
429 /*
430 * If the PFNs are currently mapped, the VA mapping also needs
431 * to be updated to be 1:1.
432 */
433 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
434 (void)HYPERVISOR_update_va_mapping(
435 (unsigned long)__va(pfn << PAGE_SHIFT),
436 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
437
438 return remap_pfn;
439}
440
5097cdf6 441static void __init xen_set_identity_and_remap(unsigned long nr_pages)
093d7b46 442{
f3f436e3 443 phys_addr_t start = 0;
4fbb67e3 444 unsigned long last_pfn = nr_pages;
69632ecf 445 const struct e820entry *entry = xen_e820_map;
68df0da7
KRW
446 int i;
447
f3f436e3
DV
448 /*
449 * Combine non-RAM regions and gaps until a RAM region (or the
450 * end of the map) is reached, then set the 1:1 map and
4fbb67e3 451 * remap the memory in those non-RAM regions.
f3f436e3
DV
452 *
453 * The combined non-RAM regions are rounded to a whole number
454 * of pages so any partial pages are accessible via the 1:1
455 * mapping. This is needed for some BIOSes that put (for
456 * example) the DMI tables in a reserved region that begins on
457 * a non-page boundary.
458 */
69632ecf 459 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
f3f436e3 460 phys_addr_t end = entry->addr + entry->size;
69632ecf 461 if (entry->type == E820_RAM || i == xen_e820_map_entries - 1) {
f3f436e3
DV
462 unsigned long start_pfn = PFN_DOWN(start);
463 unsigned long end_pfn = PFN_UP(end);
68df0da7 464
f3f436e3
DV
465 if (entry->type == E820_RAM)
466 end_pfn = PFN_UP(entry->addr);
68df0da7 467
83d51ab4 468 if (start_pfn < end_pfn)
4fbb67e3 469 last_pfn = xen_set_identity_and_remap_chunk(
69632ecf 470 start_pfn, end_pfn, nr_pages,
5097cdf6 471 last_pfn);
f3f436e3 472 start = end;
68df0da7 473 }
68df0da7 474 }
f3f436e3 475
5097cdf6 476 pr_info("Released %ld page(s)\n", xen_released_pages);
4fbb67e3 477}
1f3ac86b
JG
478
479/*
480 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
481 * The remap information (which mfn remap to which pfn) is contained in the
482 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
483 * This scheme allows to remap the different chunks in arbitrary order while
484 * the resulting mapping will be independant from the order.
485 */
486void __init xen_remap_memory(void)
487{
488 unsigned long buf = (unsigned long)&xen_remap_buf;
489 unsigned long mfn_save, mfn, pfn;
490 unsigned long remapped = 0;
491 unsigned int i;
492 unsigned long pfn_s = ~0UL;
493 unsigned long len = 0;
494
495 mfn_save = virt_to_mfn(buf);
496
497 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
498 /* Map the remap information */
499 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
500
501 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
502
503 pfn = xen_remap_buf.target_pfn;
504 for (i = 0; i < xen_remap_buf.size; i++) {
505 mfn = xen_remap_buf.mfns[i];
506 xen_update_mem_tables(pfn, mfn);
507 remapped++;
508 pfn++;
509 }
510 if (pfn_s == ~0UL || pfn == pfn_s) {
511 pfn_s = xen_remap_buf.target_pfn;
512 len += xen_remap_buf.size;
513 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
514 len += xen_remap_buf.size;
515 } else {
626d7508 516 xen_del_extra_mem(pfn_s, len);
1f3ac86b
JG
517 pfn_s = xen_remap_buf.target_pfn;
518 len = xen_remap_buf.size;
519 }
520
521 mfn = xen_remap_mfn;
522 xen_remap_mfn = xen_remap_buf.next_area_mfn;
523 }
524
525 if (pfn_s != ~0UL && len)
626d7508 526 xen_del_extra_mem(pfn_s, len);
1f3ac86b
JG
527
528 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
529
530 pr_info("Remapped %ld page(s)\n", remapped);
531}
532
c70727a5
JG
533static unsigned long __init xen_get_pages_limit(void)
534{
535 unsigned long limit;
536
537#ifdef CONFIG_X86_32
538 limit = GB(64) / PAGE_SIZE;
539#else
cb9e444b 540 limit = MAXMEM / PAGE_SIZE;
c70727a5
JG
541 if (!xen_initial_domain() && xen_512gb_limit)
542 limit = GB(512) / PAGE_SIZE;
543#endif
544 return limit;
545}
546
d312ae87
DV
547static unsigned long __init xen_get_max_pages(void)
548{
c70727a5 549 unsigned long max_pages, limit;
d312ae87 550 domid_t domid = DOMID_SELF;
24f775a6 551 long ret;
d312ae87 552
c70727a5
JG
553 limit = xen_get_pages_limit();
554 max_pages = limit;
555
d3db7281
IC
556 /*
557 * For the initial domain we use the maximum reservation as
558 * the maximum page.
559 *
560 * For guest domains the current maximum reservation reflects
561 * the current maximum rather than the static maximum. In this
562 * case the e820 map provided to us will cover the static
563 * maximum region.
564 */
565 if (xen_initial_domain()) {
566 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
567 if (ret > 0)
568 max_pages = ret;
569 }
570
c70727a5 571 return min(max_pages, limit);
d312ae87
DV
572}
573
a3f52396
JG
574static void __init xen_align_and_add_e820_region(phys_addr_t start,
575 phys_addr_t size, int type)
dc91c728 576{
3ba5c867 577 phys_addr_t end = start + size;
dc91c728
DV
578
579 /* Align RAM regions to page boundaries. */
580 if (type == E820_RAM) {
581 start = PAGE_ALIGN(start);
3ba5c867 582 end &= ~((phys_addr_t)PAGE_SIZE - 1);
dc91c728
DV
583 }
584
585 e820_add_region(start, end - start, type);
586}
587
69632ecf 588static void __init xen_ignore_unusable(void)
3bc38cbc 589{
69632ecf 590 struct e820entry *entry = xen_e820_map;
3bc38cbc
DV
591 unsigned int i;
592
69632ecf 593 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
3bc38cbc
DV
594 if (entry->type == E820_UNUSABLE)
595 entry->type = E820_RAM;
596 }
597}
598
5097cdf6
JG
599static unsigned long __init xen_count_remap_pages(unsigned long max_pfn)
600{
601 unsigned long extra = 0;
ab24507c 602 unsigned long start_pfn, end_pfn;
5097cdf6
JG
603 const struct e820entry *entry = xen_e820_map;
604 int i;
605
ab24507c 606 end_pfn = 0;
5097cdf6 607 for (i = 0; i < xen_e820_map_entries; i++, entry++) {
ab24507c
JG
608 start_pfn = PFN_DOWN(entry->addr);
609 /* Adjacent regions on non-page boundaries handling! */
610 end_pfn = min(end_pfn, start_pfn);
5097cdf6
JG
611
612 if (start_pfn >= max_pfn)
ab24507c
JG
613 return extra + max_pfn - end_pfn;
614
615 /* Add any holes in map to result. */
616 extra += start_pfn - end_pfn;
617
618 end_pfn = PFN_UP(entry->addr + entry->size);
619 end_pfn = min(end_pfn, max_pfn);
620
621 if (entry->type != E820_RAM)
622 extra += end_pfn - start_pfn;
5097cdf6
JG
623 }
624
625 return extra;
626}
627
e612b4a7
JG
628bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
629{
630 struct e820entry *entry;
631 unsigned mapcnt;
632 phys_addr_t end;
633
634 if (!size)
635 return false;
636
637 end = start + size;
638 entry = xen_e820_map;
639
640 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++) {
641 if (entry->type == E820_RAM && entry->addr <= start &&
642 (entry->addr + entry->size) >= end)
643 return false;
644
645 entry++;
646 }
647
648 return true;
649}
650
9ddac5b7
JG
651/*
652 * Find a free area in physical memory not yet reserved and compliant with
653 * E820 map.
654 * Used to relocate pre-allocated areas like initrd or p2m list which are in
655 * conflict with the to be used E820 map.
656 * In case no area is found, return 0. Otherwise return the physical address
657 * of the area which is already reserved for convenience.
658 */
659phys_addr_t __init xen_find_free_area(phys_addr_t size)
660{
661 unsigned mapcnt;
662 phys_addr_t addr, start;
663 struct e820entry *entry = xen_e820_map;
664
665 for (mapcnt = 0; mapcnt < xen_e820_map_entries; mapcnt++, entry++) {
666 if (entry->type != E820_RAM || entry->size < size)
667 continue;
668 start = entry->addr;
669 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
670 if (!memblock_is_reserved(addr))
671 continue;
672 start = addr + PAGE_SIZE;
673 if (start + size > entry->addr + entry->size)
674 break;
675 }
676 if (addr >= start + size) {
677 memblock_reserve(start, size);
678 return start;
679 }
680 }
681
682 return 0;
683}
684
4b9c1537
JG
685/*
686 * Like memcpy, but with physical addresses for dest and src.
687 */
688static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
689 phys_addr_t n)
690{
691 phys_addr_t dest_off, src_off, dest_len, src_len, len;
692 void *from, *to;
693
694 while (n) {
695 dest_off = dest & ~PAGE_MASK;
696 src_off = src & ~PAGE_MASK;
697 dest_len = n;
698 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
699 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
700 src_len = n;
701 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
702 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
703 len = min(dest_len, src_len);
704 to = early_memremap(dest - dest_off, dest_len + dest_off);
705 from = early_memremap(src - src_off, src_len + src_off);
706 memcpy(to, from, len);
707 early_memunmap(to, dest_len + dest_off);
708 early_memunmap(from, src_len + src_off);
709 n -= len;
710 dest += len;
711 src += len;
712 }
713}
714
8f5b0c63
JG
715/*
716 * Reserve Xen mfn_list.
8f5b0c63
JG
717 */
718static void __init xen_reserve_xen_mfnlist(void)
719{
70e61199
JG
720 phys_addr_t start, size;
721
8f5b0c63 722 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
70e61199
JG
723 start = __pa(xen_start_info->mfn_list);
724 size = PFN_ALIGN(xen_start_info->nr_pages *
725 sizeof(unsigned long));
726 } else {
727 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
728 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
729 }
730
731 if (!xen_is_e820_reserved(start, size)) {
732 memblock_reserve(start, size);
8f5b0c63
JG
733 return;
734 }
735
70e61199
JG
736#ifdef CONFIG_X86_32
737 /*
738 * Relocating the p2m on 32 bit system to an arbitrary virtual address
739 * is not supported, so just give up.
740 */
741 xen_raw_console_write("Xen hypervisor allocated p2m list conflicts with E820 map\n");
742 BUG();
743#else
744 xen_relocate_p2m();
745#endif
8f5b0c63
JG
746}
747
5ead97c8
JF
748/**
749 * machine_specific_memory_setup - Hook for machine specific memory setup.
750 **/
5ead97c8
JF
751char * __init xen_memory_setup(void)
752{
626d7508 753 unsigned long max_pfn, pfn_s, n_pfns;
5097cdf6
JG
754 phys_addr_t mem_end, addr, size, chunk_size;
755 u32 type;
35ae11fd
IC
756 int rc;
757 struct xen_memory_map memmap;
dc91c728 758 unsigned long max_pages;
42ee1471 759 unsigned long extra_pages = 0;
35ae11fd 760 int i;
9e9a5fcb 761 int op;
5ead97c8 762
c70727a5
JG
763 xen_parse_512gb();
764 max_pfn = xen_get_pages_limit();
765 max_pfn = min(max_pfn, xen_start_info->nr_pages);
35ae11fd
IC
766 mem_end = PFN_PHYS(max_pfn);
767
768 memmap.nr_entries = E820MAX;
69632ecf 769 set_xen_guest_handle(memmap.buffer, xen_e820_map);
35ae11fd 770
9e9a5fcb
IC
771 op = xen_initial_domain() ?
772 XENMEM_machine_memory_map :
773 XENMEM_memory_map;
774 rc = HYPERVISOR_memory_op(op, &memmap);
35ae11fd 775 if (rc == -ENOSYS) {
9ec23a7f 776 BUG_ON(xen_initial_domain());
35ae11fd 777 memmap.nr_entries = 1;
69632ecf
JG
778 xen_e820_map[0].addr = 0ULL;
779 xen_e820_map[0].size = mem_end;
35ae11fd 780 /* 8MB slack (to balance backend allocations). */
69632ecf
JG
781 xen_e820_map[0].size += 8ULL << 20;
782 xen_e820_map[0].type = E820_RAM;
35ae11fd
IC
783 rc = 0;
784 }
785 BUG_ON(rc);
1ea644c8 786 BUG_ON(memmap.nr_entries == 0);
69632ecf 787 xen_e820_map_entries = memmap.nr_entries;
8006ec3e 788
3bc38cbc
DV
789 /*
790 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
791 * regions, so if we're using the machine memory map leave the
792 * region as RAM as it is in the pseudo-physical map.
793 *
794 * UNUSABLE regions in domUs are not handled and will need
795 * a patch in the future.
796 */
797 if (xen_initial_domain())
69632ecf 798 xen_ignore_unusable();
3bc38cbc 799
dc91c728 800 /* Make sure the Xen-supplied memory map is well-ordered. */
64c98e7f 801 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
69632ecf 802 &xen_e820_map_entries);
dc91c728
DV
803
804 max_pages = xen_get_max_pages();
dc91c728 805
5097cdf6 806 /* How many extra pages do we need due to remapping? */
eafd72e0
JG
807 max_pages += xen_count_remap_pages(max_pfn);
808
809 if (max_pages > max_pfn)
810 extra_pages += max_pages - max_pfn;
2e2fb754 811
dc91c728
DV
812 /*
813 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
814 * factor the base size. On non-highmem systems, the base
815 * size is the full initial memory allocation; on highmem it
816 * is limited to the max size of lowmem, so that it doesn't
817 * get completely filled.
818 *
c70727a5
JG
819 * Make sure we have no memory above max_pages, as this area
820 * isn't handled by the p2m management.
821 *
dc91c728
DV
822 * In principle there could be a problem in lowmem systems if
823 * the initial memory is also very large with respect to
824 * lowmem, but we won't try to deal with that here.
825 */
c70727a5
JG
826 extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
827 extra_pages, max_pages - max_pfn);
dc91c728 828 i = 0;
5097cdf6
JG
829 addr = xen_e820_map[0].addr;
830 size = xen_e820_map[0].size;
69632ecf 831 while (i < xen_e820_map_entries) {
f5775e0b
DV
832 bool discard = false;
833
5097cdf6
JG
834 chunk_size = size;
835 type = xen_e820_map[i].type;
dc91c728
DV
836
837 if (type == E820_RAM) {
838 if (addr < mem_end) {
5097cdf6 839 chunk_size = min(size, mem_end - addr);
dc91c728 840 } else if (extra_pages) {
5097cdf6 841 chunk_size = min(size, PFN_PHYS(extra_pages));
626d7508
JG
842 pfn_s = PFN_UP(addr);
843 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
844 extra_pages -= n_pfns;
845 xen_add_extra_mem(pfn_s, n_pfns);
846 xen_max_p2m_pfn = pfn_s + n_pfns;
dc91c728 847 } else
f5775e0b 848 discard = true;
3654581e
JF
849 }
850
f5775e0b
DV
851 if (!discard)
852 xen_align_and_add_e820_region(addr, chunk_size, type);
b5b43ced 853
5097cdf6
JG
854 addr += chunk_size;
855 size -= chunk_size;
856 if (size == 0) {
dc91c728 857 i++;
5097cdf6
JG
858 if (i < xen_e820_map_entries) {
859 addr = xen_e820_map[i].addr;
860 size = xen_e820_map[i].size;
861 }
862 }
35ae11fd 863 }
b792c755 864
25b884a8
DV
865 /*
866 * Set the rest as identity mapped, in case PCI BARs are
867 * located here.
25b884a8 868 */
5097cdf6 869 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
25b884a8 870
b792c755 871 /*
9ec23a7f
IC
872 * In domU, the ISA region is normal, usable memory, but we
873 * reserve ISA memory anyway because too many things poke
b792c755
JF
874 * about in there.
875 */
876 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
877 E820_RESERVED);
5ead97c8 878
be5bf9fa
JF
879 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
880
808fdb71
JG
881 /*
882 * Check whether the kernel itself conflicts with the target E820 map.
883 * Failing now is better than running into weird problems later due
884 * to relocating (and even reusing) pages with kernel text or data.
885 */
886 if (xen_is_e820_reserved(__pa_symbol(_text),
887 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
888 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
889 BUG();
890 }
891
04414baa
JG
892 /*
893 * Check for a conflict of the hypervisor supplied page tables with
894 * the target E820 map.
895 */
896 xen_pt_check_e820();
897
8f5b0c63
JG
898 xen_reserve_xen_mfnlist();
899
4b9c1537
JG
900 /* Check for a conflict of the initrd with the target E820 map. */
901 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
902 boot_params.hdr.ramdisk_size)) {
903 phys_addr_t new_area, start, size;
904
905 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
906 if (!new_area) {
907 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
908 BUG();
909 }
910
911 start = boot_params.hdr.ramdisk_image;
912 size = boot_params.hdr.ramdisk_size;
913 xen_phys_memcpy(new_area, start, size);
914 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
915 start, start + size, new_area, new_area + size);
916 memblock_free(start, size);
917 boot_params.hdr.ramdisk_image = new_area;
918 boot_params.ext_ramdisk_image = new_area >> 32;
919 }
920
5097cdf6
JG
921 /*
922 * Set identity map on non-RAM pages and prepare remapping the
923 * underlying RAM.
924 */
925 xen_set_identity_and_remap(max_pfn);
926
5ead97c8
JF
927 return "Xen";
928}
929
abacaadc
DV
930/*
931 * Machine specific memory setup for auto-translated guests.
932 */
933char * __init xen_auto_xlated_memory_setup(void)
934{
abacaadc
DV
935 struct xen_memory_map memmap;
936 int i;
937 int rc;
938
939 memmap.nr_entries = E820MAX;
69632ecf 940 set_xen_guest_handle(memmap.buffer, xen_e820_map);
abacaadc
DV
941
942 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
943 if (rc < 0)
944 panic("No memory map (%d)\n", rc);
945
69632ecf
JG
946 xen_e820_map_entries = memmap.nr_entries;
947
948 sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map),
949 &xen_e820_map_entries);
abacaadc 950
69632ecf
JG
951 for (i = 0; i < xen_e820_map_entries; i++)
952 e820_add_region(xen_e820_map[i].addr, xen_e820_map[i].size,
953 xen_e820_map[i].type);
abacaadc 954
70e61199
JG
955 /* Remove p2m info, it is not needed. */
956 xen_start_info->mfn_list = 0;
957 xen_start_info->first_p2m_pfn = 0;
958 xen_start_info->nr_p2m_frames = 0;
abacaadc
DV
959
960 return "Xen";
961}
962
d2eea68e
RM
963/*
964 * Set the bit indicating "nosegneg" library variants should be used.
6a52e4b1
JF
965 * We only need to bother in pure 32-bit mode; compat 32-bit processes
966 * can have un-truncated segments, so wrapping around is allowed.
d2eea68e 967 */
08b6d290 968static void __init fiddle_vdso(void)
d2eea68e 969{
6a52e4b1 970#ifdef CONFIG_X86_32
0a6d1fa0
AL
971 u32 *mask = vdso_image_32.data +
972 vdso_image_32.sym_VDSO32_NOTE_MASK;
d2eea68e 973 *mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
6fcac6d3 974#endif
d2eea68e
RM
975}
976
148f9bb8 977static int register_callback(unsigned type, const void *func)
e2a81baf 978{
88459d4c
JF
979 struct callback_register callback = {
980 .type = type,
981 .address = XEN_CALLBACK(__KERNEL_CS, func),
e2a81baf
JF
982 .flags = CALLBACKF_mask_events,
983 };
984
88459d4c
JF
985 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
986}
987
148f9bb8 988void xen_enable_sysenter(void)
88459d4c 989{
6fcac6d3 990 int ret;
62541c37 991 unsigned sysenter_feature;
6fcac6d3
JF
992
993#ifdef CONFIG_X86_32
62541c37 994 sysenter_feature = X86_FEATURE_SEP;
6fcac6d3 995#else
62541c37 996 sysenter_feature = X86_FEATURE_SYSENTER32;
6fcac6d3 997#endif
88459d4c 998
62541c37
JF
999 if (!boot_cpu_has(sysenter_feature))
1000 return;
1001
6fcac6d3 1002 ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
62541c37
JF
1003 if(ret != 0)
1004 setup_clear_cpu_cap(sysenter_feature);
e2a81baf
JF
1005}
1006
148f9bb8 1007void xen_enable_syscall(void)
6fcac6d3
JF
1008{
1009#ifdef CONFIG_X86_64
6fcac6d3 1010 int ret;
6fcac6d3
JF
1011
1012 ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
1013 if (ret != 0) {
d5303b81 1014 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
62541c37
JF
1015 /* Pretty fatal; 64-bit userspace has no other
1016 mechanism for syscalls. */
1017 }
1018
1019 if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
6fcac6d3
JF
1020 ret = register_callback(CALLBACKTYPE_syscall32,
1021 xen_syscall32_target);
d5303b81 1022 if (ret != 0)
62541c37 1023 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
6fcac6d3
JF
1024 }
1025#endif /* CONFIG_X86_64 */
1026}
ea9f9274 1027
d285d683 1028void __init xen_pvmmu_arch_setup(void)
5ead97c8 1029{
5ead97c8
JF
1030 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
1031 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
1032
d285d683
MR
1033 HYPERVISOR_vm_assist(VMASST_CMD_enable,
1034 VMASST_TYPE_pae_extended_cr3);
5ead97c8 1035
88459d4c
JF
1036 if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
1037 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
1038 BUG();
5ead97c8 1039
e2a81baf 1040 xen_enable_sysenter();
6fcac6d3 1041 xen_enable_syscall();
d285d683
MR
1042}
1043
1044/* This function is not called for HVM domains */
1045void __init xen_arch_setup(void)
1046{
1047 xen_panic_handler_init();
1048 if (!xen_feature(XENFEAT_auto_translated_physmap))
1049 xen_pvmmu_arch_setup();
1050
5ead97c8
JF
1051#ifdef CONFIG_ACPI
1052 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
1053 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1054 disable_acpi();
1055 }
1056#endif
1057
1058 memcpy(boot_command_line, xen_start_info->cmd_line,
1059 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
1060 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
1061
bc15fde7 1062 /* Set up idle, making sure it calls safe_halt() pvop */
d91ee586 1063 disable_cpuidle();
48cdd828 1064 disable_cpufreq();
6a377ddc 1065 WARN_ON(xen_set_default_idle());
d2eea68e 1066 fiddle_vdso();
8d54db79
KRW
1067#ifdef CONFIG_NUMA
1068 numa_off = 1;
1069#endif
5ead97c8 1070}