Revert "vfs: Delete the associated dentry when deleting a file"
[linux-2.6-block.git] / drivers / xen / balloon.c
1 /******************************************************************************
2  * Xen balloon driver - enables returning/claiming memory to/from Xen.
3  *
4  * Copyright (c) 2003, B Dragovic
5  * Copyright (c) 2003-2004, M Williamson, K Fraser
6  * Copyright (c) 2005 Dan M. Smith, IBM Corporation
7  * Copyright (c) 2010 Daniel Kiper
8  *
9  * Memory hotplug support was written by Daniel Kiper. Work on
10  * it was sponsored by Google under Google Summer of Code 2010
11  * program. Jeremy Fitzhardinge from Citrix was the mentor for
12  * this project.
13  *
14  * This program is free software; you can redistribute it and/or
15  * modify it under the terms of the GNU General Public License version 2
16  * as published by the Free Software Foundation; or, when distributed
17  * separately from the Linux kernel or incorporated into other
18  * software packages, subject to the following license:
19  *
20  * Permission is hereby granted, free of charge, to any person obtaining a copy
21  * of this source file (the "Software"), to deal in the Software without
22  * restriction, including without limitation the rights to use, copy, modify,
23  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
24  * and to permit persons to whom the Software is furnished to do so, subject to
25  * the following conditions:
26  *
27  * The above copyright notice and this permission notice shall be included in
28  * all copies or substantial portions of the Software.
29  *
30  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
31  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
32  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
33  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
34  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
35  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
36  * IN THE SOFTWARE.
37  */
38
39 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
40
41 #include <linux/cpu.h>
42 #include <linux/kernel.h>
43 #include <linux/sched.h>
44 #include <linux/cred.h>
45 #include <linux/errno.h>
46 #include <linux/freezer.h>
47 #include <linux/kthread.h>
48 #include <linux/mm.h>
49 #include <linux/memblock.h>
50 #include <linux/pagemap.h>
51 #include <linux/highmem.h>
52 #include <linux/mutex.h>
53 #include <linux/list.h>
54 #include <linux/gfp.h>
55 #include <linux/notifier.h>
56 #include <linux/memory.h>
57 #include <linux/memory_hotplug.h>
58 #include <linux/percpu-defs.h>
59 #include <linux/slab.h>
60 #include <linux/sysctl.h>
61 #include <linux/moduleparam.h>
62 #include <linux/jiffies.h>
63
64 #include <asm/page.h>
65 #include <asm/tlb.h>
66
67 #include <asm/xen/hypervisor.h>
68 #include <asm/xen/hypercall.h>
69
70 #include <xen/xen.h>
71 #include <xen/interface/xen.h>
72 #include <xen/interface/memory.h>
73 #include <xen/balloon.h>
74 #include <xen/features.h>
75 #include <xen/page.h>
76 #include <xen/mem-reservation.h>
77
78 #undef MODULE_PARAM_PREFIX
79 #define MODULE_PARAM_PREFIX "xen."
80
81 static uint __read_mostly balloon_boot_timeout = 180;
82 module_param(balloon_boot_timeout, uint, 0444);
83
84 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
85 static int xen_hotplug_unpopulated;
86
87 static struct ctl_table balloon_table[] = {
88         {
89                 .procname       = "hotplug_unpopulated",
90                 .data           = &xen_hotplug_unpopulated,
91                 .maxlen         = sizeof(int),
92                 .mode           = 0644,
93                 .proc_handler   = proc_dointvec_minmax,
94                 .extra1         = SYSCTL_ZERO,
95                 .extra2         = SYSCTL_ONE,
96         },
97 };
98
99 #else
100 #define xen_hotplug_unpopulated 0
101 #endif
102
103 /*
104  * Use one extent per PAGE_SIZE to avoid to break down the page into
105  * multiple frame.
106  */
107 #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
108
109 /*
110  * balloon_thread() state:
111  *
112  * BP_DONE: done or nothing to do,
113  * BP_WAIT: wait to be rescheduled,
114  * BP_EAGAIN: error, go to sleep,
115  * BP_ECANCELED: error, balloon operation canceled.
116  */
117
118 static enum bp_state {
119         BP_DONE,
120         BP_WAIT,
121         BP_EAGAIN,
122         BP_ECANCELED
123 } balloon_state = BP_DONE;
124
125 /* Main waiting point for xen-balloon thread. */
126 static DECLARE_WAIT_QUEUE_HEAD(balloon_thread_wq);
127
128 static DEFINE_MUTEX(balloon_mutex);
129
130 struct balloon_stats balloon_stats;
131 EXPORT_SYMBOL_GPL(balloon_stats);
132
133 /* We increase/decrease in batches which fit in a page */
134 static xen_pfn_t frame_list[PAGE_SIZE / sizeof(xen_pfn_t)];
135
136
137 /* List of ballooned pages, threaded through the mem_map array. */
138 static LIST_HEAD(ballooned_pages);
139 static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
140
141 /* When ballooning out (allocating memory to return to Xen) we don't really
142    want the kernel to try too hard since that can trigger the oom killer. */
143 #define GFP_BALLOON \
144         (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
145
146 /* balloon_append: add the given page to the balloon. */
147 static void balloon_append(struct page *page)
148 {
149         __SetPageOffline(page);
150
151         /* Lowmem is re-populated first, so highmem pages go at list tail. */
152         if (PageHighMem(page)) {
153                 list_add_tail(&page->lru, &ballooned_pages);
154                 balloon_stats.balloon_high++;
155         } else {
156                 list_add(&page->lru, &ballooned_pages);
157                 balloon_stats.balloon_low++;
158         }
159         wake_up(&balloon_wq);
160 }
161
162 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
163 static struct page *balloon_retrieve(bool require_lowmem)
164 {
165         struct page *page;
166
167         if (list_empty(&ballooned_pages))
168                 return NULL;
169
170         page = list_entry(ballooned_pages.next, struct page, lru);
171         if (require_lowmem && PageHighMem(page))
172                 return NULL;
173         list_del(&page->lru);
174
175         if (PageHighMem(page))
176                 balloon_stats.balloon_high--;
177         else
178                 balloon_stats.balloon_low--;
179
180         __ClearPageOffline(page);
181         return page;
182 }
183
184 static struct page *balloon_next_page(struct page *page)
185 {
186         struct list_head *next = page->lru.next;
187         if (next == &ballooned_pages)
188                 return NULL;
189         return list_entry(next, struct page, lru);
190 }
191
192 static void update_schedule(void)
193 {
194         if (balloon_state == BP_WAIT || balloon_state == BP_ECANCELED)
195                 return;
196
197         if (balloon_state == BP_DONE) {
198                 balloon_stats.schedule_delay = 1;
199                 balloon_stats.retry_count = 1;
200                 return;
201         }
202
203         ++balloon_stats.retry_count;
204
205         if (balloon_stats.max_retry_count != RETRY_UNLIMITED &&
206                         balloon_stats.retry_count > balloon_stats.max_retry_count) {
207                 balloon_stats.schedule_delay = 1;
208                 balloon_stats.retry_count = 1;
209                 balloon_state = BP_ECANCELED;
210                 return;
211         }
212
213         balloon_stats.schedule_delay <<= 1;
214
215         if (balloon_stats.schedule_delay > balloon_stats.max_schedule_delay)
216                 balloon_stats.schedule_delay = balloon_stats.max_schedule_delay;
217
218         balloon_state = BP_EAGAIN;
219 }
220
221 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
222 static void release_memory_resource(struct resource *resource)
223 {
224         if (!resource)
225                 return;
226
227         /*
228          * No need to reset region to identity mapped since we now
229          * know that no I/O can be in this region
230          */
231         release_resource(resource);
232         kfree(resource);
233 }
234
235 static struct resource *additional_memory_resource(phys_addr_t size)
236 {
237         struct resource *res;
238         int ret;
239
240         res = kzalloc(sizeof(*res), GFP_KERNEL);
241         if (!res)
242                 return NULL;
243
244         res->name = "System RAM";
245         res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
246
247         ret = allocate_resource(&iomem_resource, res,
248                                 size, 0, -1,
249                                 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
250         if (ret < 0) {
251                 pr_err("Cannot allocate new System RAM resource\n");
252                 kfree(res);
253                 return NULL;
254         }
255
256         return res;
257 }
258
259 static enum bp_state reserve_additional_memory(void)
260 {
261         long credit;
262         struct resource *resource;
263         int nid, rc;
264         unsigned long balloon_hotplug;
265
266         credit = balloon_stats.target_pages + balloon_stats.target_unpopulated
267                 - balloon_stats.total_pages;
268
269         /*
270          * Already hotplugged enough pages?  Wait for them to be
271          * onlined.
272          */
273         if (credit <= 0)
274                 return BP_WAIT;
275
276         balloon_hotplug = round_up(credit, PAGES_PER_SECTION);
277
278         resource = additional_memory_resource(balloon_hotplug * PAGE_SIZE);
279         if (!resource)
280                 goto err;
281
282         nid = memory_add_physaddr_to_nid(resource->start);
283
284 #ifdef CONFIG_XEN_HAVE_PVMMU
285         /*
286          * We don't support PV MMU when Linux and Xen is using
287          * different page granularity.
288          */
289         BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
290
291         /*
292          * add_memory() will build page tables for the new memory so
293          * the p2m must contain invalid entries so the correct
294          * non-present PTEs will be written.
295          *
296          * If a failure occurs, the original (identity) p2m entries
297          * are not restored since this region is now known not to
298          * conflict with any devices.
299          */ 
300         if (!xen_feature(XENFEAT_auto_translated_physmap)) {
301                 unsigned long pfn, i;
302
303                 pfn = PFN_DOWN(resource->start);
304                 for (i = 0; i < balloon_hotplug; i++) {
305                         if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
306                                 pr_warn("set_phys_to_machine() failed, no memory added\n");
307                                 goto err;
308                         }
309                 }
310         }
311 #endif
312
313         /*
314          * add_memory_resource() will call online_pages() which in its turn
315          * will call xen_online_page() callback causing deadlock if we don't
316          * release balloon_mutex here. Unlocking here is safe because the
317          * callers drop the mutex before trying again.
318          */
319         mutex_unlock(&balloon_mutex);
320         /* add_memory_resource() requires the device_hotplug lock */
321         lock_device_hotplug();
322         rc = add_memory_resource(nid, resource, MHP_MERGE_RESOURCE);
323         unlock_device_hotplug();
324         mutex_lock(&balloon_mutex);
325
326         if (rc) {
327                 pr_warn("Cannot add additional memory (%i)\n", rc);
328                 goto err;
329         }
330
331         balloon_stats.total_pages += balloon_hotplug;
332
333         return BP_WAIT;
334   err:
335         release_memory_resource(resource);
336         return BP_ECANCELED;
337 }
338
339 static void xen_online_page(struct page *page, unsigned int order)
340 {
341         unsigned long i, size = (1 << order);
342         unsigned long start_pfn = page_to_pfn(page);
343         struct page *p;
344
345         pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
346         mutex_lock(&balloon_mutex);
347         for (i = 0; i < size; i++) {
348                 p = pfn_to_page(start_pfn + i);
349                 balloon_append(p);
350         }
351         mutex_unlock(&balloon_mutex);
352 }
353
354 static int xen_memory_notifier(struct notifier_block *nb, unsigned long val, void *v)
355 {
356         if (val == MEM_ONLINE)
357                 wake_up(&balloon_thread_wq);
358
359         return NOTIFY_OK;
360 }
361
362 static struct notifier_block xen_memory_nb = {
363         .notifier_call = xen_memory_notifier,
364         .priority = 0
365 };
366 #else
367 static enum bp_state reserve_additional_memory(void)
368 {
369         balloon_stats.target_pages = balloon_stats.current_pages +
370                                      balloon_stats.target_unpopulated;
371         return BP_ECANCELED;
372 }
373 #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */
374
375 static long current_credit(void)
376 {
377         return balloon_stats.target_pages - balloon_stats.current_pages;
378 }
379
380 static bool balloon_is_inflated(void)
381 {
382         return balloon_stats.balloon_low || balloon_stats.balloon_high;
383 }
384
385 static enum bp_state increase_reservation(unsigned long nr_pages)
386 {
387         int rc;
388         unsigned long i;
389         struct page   *page;
390
391         if (nr_pages > ARRAY_SIZE(frame_list))
392                 nr_pages = ARRAY_SIZE(frame_list);
393
394         page = list_first_entry_or_null(&ballooned_pages, struct page, lru);
395         for (i = 0; i < nr_pages; i++) {
396                 if (!page) {
397                         nr_pages = i;
398                         break;
399                 }
400
401                 frame_list[i] = page_to_xen_pfn(page);
402                 page = balloon_next_page(page);
403         }
404
405         rc = xenmem_reservation_increase(nr_pages, frame_list);
406         if (rc <= 0)
407                 return BP_EAGAIN;
408
409         for (i = 0; i < rc; i++) {
410                 page = balloon_retrieve(false);
411                 BUG_ON(page == NULL);
412
413                 xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
414
415                 /* Relinquish the page back to the allocator. */
416                 free_reserved_page(page);
417         }
418
419         balloon_stats.current_pages += rc;
420
421         return BP_DONE;
422 }
423
424 static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
425 {
426         enum bp_state state = BP_DONE;
427         unsigned long i;
428         struct page *page, *tmp;
429         int ret;
430         LIST_HEAD(pages);
431
432         if (nr_pages > ARRAY_SIZE(frame_list))
433                 nr_pages = ARRAY_SIZE(frame_list);
434
435         for (i = 0; i < nr_pages; i++) {
436                 page = alloc_page(gfp);
437                 if (page == NULL) {
438                         nr_pages = i;
439                         state = BP_EAGAIN;
440                         break;
441                 }
442                 adjust_managed_page_count(page, -1);
443                 xenmem_reservation_scrub_page(page);
444                 list_add(&page->lru, &pages);
445         }
446
447         /*
448          * Ensure that ballooned highmem pages don't have kmaps.
449          *
450          * Do this before changing the p2m as kmap_flush_unused()
451          * reads PTEs to obtain pages (and hence needs the original
452          * p2m entry).
453          */
454         kmap_flush_unused();
455
456         /*
457          * Setup the frame, update direct mapping, invalidate P2M,
458          * and add to balloon.
459          */
460         i = 0;
461         list_for_each_entry_safe(page, tmp, &pages, lru) {
462                 frame_list[i++] = xen_page_to_gfn(page);
463
464                 xenmem_reservation_va_mapping_reset(1, &page);
465
466                 list_del(&page->lru);
467
468                 balloon_append(page);
469         }
470
471         flush_tlb_all();
472
473         ret = xenmem_reservation_decrease(nr_pages, frame_list);
474         BUG_ON(ret != nr_pages);
475
476         balloon_stats.current_pages -= nr_pages;
477
478         return state;
479 }
480
481 /*
482  * Stop waiting if either state is BP_DONE and ballooning action is
483  * needed, or if the credit has changed while state is not BP_DONE.
484  */
485 static bool balloon_thread_cond(long credit)
486 {
487         if (balloon_state == BP_DONE)
488                 credit = 0;
489
490         return current_credit() != credit || kthread_should_stop();
491 }
492
493 /*
494  * As this is a kthread it is guaranteed to run as a single instance only.
495  * We may of course race updates of the target counts (which are protected
496  * by the balloon lock), or with changes to the Xen hard limit, but we will
497  * recover from these in time.
498  */
499 static int balloon_thread(void *unused)
500 {
501         long credit;
502         unsigned long timeout;
503
504         set_freezable();
505         for (;;) {
506                 switch (balloon_state) {
507                 case BP_DONE:
508                 case BP_ECANCELED:
509                         timeout = 3600 * HZ;
510                         break;
511                 case BP_EAGAIN:
512                         timeout = balloon_stats.schedule_delay * HZ;
513                         break;
514                 case BP_WAIT:
515                         timeout = HZ;
516                         break;
517                 }
518
519                 credit = current_credit();
520
521                 wait_event_freezable_timeout(balloon_thread_wq,
522                         balloon_thread_cond(credit), timeout);
523
524                 if (kthread_should_stop())
525                         return 0;
526
527                 mutex_lock(&balloon_mutex);
528
529                 credit = current_credit();
530
531                 if (credit > 0) {
532                         if (balloon_is_inflated())
533                                 balloon_state = increase_reservation(credit);
534                         else
535                                 balloon_state = reserve_additional_memory();
536                 }
537
538                 if (credit < 0) {
539                         long n_pages;
540
541                         n_pages = min(-credit, si_mem_available());
542                         balloon_state = decrease_reservation(n_pages,
543                                                              GFP_BALLOON);
544                         if (balloon_state == BP_DONE && n_pages != -credit &&
545                             n_pages < totalreserve_pages)
546                                 balloon_state = BP_EAGAIN;
547                 }
548
549                 update_schedule();
550
551                 mutex_unlock(&balloon_mutex);
552
553                 cond_resched();
554         }
555 }
556
557 /* Resets the Xen limit, sets new target, and kicks off processing. */
558 void balloon_set_new_target(unsigned long target)
559 {
560         /* No need for lock. Not read-modify-write updates. */
561         balloon_stats.target_pages = target;
562         wake_up(&balloon_thread_wq);
563 }
564 EXPORT_SYMBOL_GPL(balloon_set_new_target);
565
566 static int add_ballooned_pages(unsigned int nr_pages)
567 {
568         enum bp_state st;
569
570         if (xen_hotplug_unpopulated) {
571                 st = reserve_additional_memory();
572                 if (st != BP_ECANCELED) {
573                         int rc;
574
575                         mutex_unlock(&balloon_mutex);
576                         rc = wait_event_interruptible(balloon_wq,
577                                    !list_empty(&ballooned_pages));
578                         mutex_lock(&balloon_mutex);
579                         return rc ? -ENOMEM : 0;
580                 }
581         }
582
583         if (si_mem_available() < nr_pages)
584                 return -ENOMEM;
585
586         st = decrease_reservation(nr_pages, GFP_USER);
587         if (st != BP_DONE)
588                 return -ENOMEM;
589
590         return 0;
591 }
592
593 /**
594  * xen_alloc_ballooned_pages - get pages that have been ballooned out
595  * @nr_pages: Number of pages to get
596  * @pages: pages returned
597  * @return 0 on success, error otherwise
598  */
599 int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
600 {
601         unsigned int pgno = 0;
602         struct page *page;
603         int ret;
604
605         mutex_lock(&balloon_mutex);
606
607         balloon_stats.target_unpopulated += nr_pages;
608
609         while (pgno < nr_pages) {
610                 page = balloon_retrieve(true);
611                 if (page) {
612                         pages[pgno++] = page;
613 #ifdef CONFIG_XEN_HAVE_PVMMU
614                         /*
615                          * We don't support PV MMU when Linux and Xen is using
616                          * different page granularity.
617                          */
618                         BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
619
620                         if (!xen_feature(XENFEAT_auto_translated_physmap)) {
621                                 ret = xen_alloc_p2m_entry(page_to_pfn(page));
622                                 if (ret < 0)
623                                         goto out_undo;
624                         }
625 #endif
626                 } else {
627                         ret = add_ballooned_pages(nr_pages - pgno);
628                         if (ret < 0)
629                                 goto out_undo;
630                 }
631         }
632         mutex_unlock(&balloon_mutex);
633         return 0;
634  out_undo:
635         mutex_unlock(&balloon_mutex);
636         xen_free_ballooned_pages(pgno, pages);
637         /*
638          * NB: xen_free_ballooned_pages will only subtract pgno pages, but since
639          * target_unpopulated is incremented with nr_pages at the start we need
640          * to remove the remaining ones also, or accounting will be screwed.
641          */
642         balloon_stats.target_unpopulated -= nr_pages - pgno;
643         return ret;
644 }
645 EXPORT_SYMBOL(xen_alloc_ballooned_pages);
646
647 /**
648  * xen_free_ballooned_pages - return pages retrieved with get_ballooned_pages
649  * @nr_pages: Number of pages
650  * @pages: pages to return
651  */
652 void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
653 {
654         unsigned int i;
655
656         mutex_lock(&balloon_mutex);
657
658         for (i = 0; i < nr_pages; i++) {
659                 if (pages[i])
660                         balloon_append(pages[i]);
661         }
662
663         balloon_stats.target_unpopulated -= nr_pages;
664
665         /* The balloon may be too large now. Shrink it if needed. */
666         if (current_credit())
667                 wake_up(&balloon_thread_wq);
668
669         mutex_unlock(&balloon_mutex);
670 }
671 EXPORT_SYMBOL(xen_free_ballooned_pages);
672
673 static void __init balloon_add_regions(void)
674 {
675         unsigned long start_pfn, pages;
676         unsigned long pfn, extra_pfn_end;
677         unsigned int i;
678
679         for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
680                 pages = xen_extra_mem[i].n_pfns;
681                 if (!pages)
682                         continue;
683
684                 start_pfn = xen_extra_mem[i].start_pfn;
685
686                 /*
687                  * If the amount of usable memory has been limited (e.g., with
688                  * the 'mem' command line parameter), don't add pages beyond
689                  * this limit.
690                  */
691                 extra_pfn_end = min(max_pfn, start_pfn + pages);
692
693                 for (pfn = start_pfn; pfn < extra_pfn_end; pfn++)
694                         balloon_append(pfn_to_page(pfn));
695
696                 balloon_stats.total_pages += extra_pfn_end - start_pfn;
697         }
698 }
699
700 static int __init balloon_init(void)
701 {
702         struct task_struct *task;
703
704         if (!xen_domain())
705                 return -ENODEV;
706
707         pr_info("Initialising balloon driver\n");
708
709 #ifdef CONFIG_XEN_PV
710         balloon_stats.current_pages = xen_pv_domain()
711                 ? min(xen_start_info->nr_pages - xen_released_pages, max_pfn)
712                 : get_num_physpages();
713 #else
714         balloon_stats.current_pages = get_num_physpages();
715 #endif
716         balloon_stats.target_pages  = balloon_stats.current_pages;
717         balloon_stats.balloon_low   = 0;
718         balloon_stats.balloon_high  = 0;
719         balloon_stats.total_pages   = balloon_stats.current_pages;
720
721         balloon_stats.schedule_delay = 1;
722         balloon_stats.max_schedule_delay = 32;
723         balloon_stats.retry_count = 1;
724         balloon_stats.max_retry_count = 4;
725
726 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
727         set_online_page_callback(&xen_online_page);
728         register_memory_notifier(&xen_memory_nb);
729         register_sysctl_init("xen/balloon", balloon_table);
730 #endif
731
732         balloon_add_regions();
733
734         task = kthread_run(balloon_thread, NULL, "xen-balloon");
735         if (IS_ERR(task)) {
736                 pr_err("xen-balloon thread could not be started, ballooning will not work!\n");
737                 return PTR_ERR(task);
738         }
739
740         /* Init the xen-balloon driver. */
741         xen_balloon_init();
742
743         return 0;
744 }
745 subsys_initcall(balloon_init);
746
747 static int __init balloon_wait_finish(void)
748 {
749         long credit, last_credit = 0;
750         unsigned long last_changed = 0;
751
752         if (!xen_domain())
753                 return -ENODEV;
754
755         /* PV guests don't need to wait. */
756         if (xen_pv_domain() || !current_credit())
757                 return 0;
758
759         pr_notice("Waiting for initial ballooning down having finished.\n");
760
761         while ((credit = current_credit()) < 0) {
762                 if (credit != last_credit) {
763                         last_changed = jiffies;
764                         last_credit = credit;
765                 }
766                 if (balloon_state == BP_ECANCELED) {
767                         pr_warn_once("Initial ballooning failed, %ld pages need to be freed.\n",
768                                      -credit);
769                         if (time_is_before_eq_jiffies(last_changed + HZ * balloon_boot_timeout))
770                                 panic("Initial ballooning failed!\n");
771                 }
772
773                 schedule_timeout_interruptible(HZ / 10);
774         }
775
776         pr_notice("Initial ballooning down finished.\n");
777
778         return 0;
779 }
780 late_initcall_sync(balloon_wait_finish);