select ARCH_SUPPORTS_NUMA_BALANCING
select ARCH_SUPPORTS_PAGE_TABLE_CHECK
select ARCH_SUPPORTS_PER_VMA_LOCK
+ select ARCH_SUPPORTS_HUGE_PFNMAP if TRANSPARENT_HUGEPAGE
+ select ARCH_SUPPORTS_RT
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT
generic-y += parport.h
generic-y += early_ioremap.h
generic-y += qrwlock.h
-generic-y += qspinlock.h
generic-y += user.h
generic-y += ioctl.h
+ generic-y += mmzone.h
generic-y += statfs.h
generic-y += param.h
unsigned long len, unsigned long pgoff, unsigned long flags,
vm_flags_t vm_flags);
- bool can_split_folio(struct folio *folio, int *pextra_pins);
+ bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
+int min_order_for_split(struct folio *folio);
+int split_folio_to_list(struct folio *folio, struct list_head *list);
static inline int split_huge_page(struct page *page)
{
- return split_huge_page_to_list_to_order(page, NULL, 0);
+ struct folio *folio = page_folio(page);
+ int ret = min_order_for_split(folio);
+
+ if (ret < 0)
+ return ret;
+
+ /*
+ * split_huge_page() locks the page before splitting and
+ * expects the same page that has been split to be locked when
+ * returned. split_folio(page_folio(page)) cannot be used here
+ * because it converts the page to folio and passes the head
+ * page to be split.
+ */
+ return split_huge_page_to_list_to_order(page, NULL, ret);
}
- void deferred_split_folio(struct folio *folio);
+ void deferred_split_folio(struct folio *folio, bool partially_mapped);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct folio *folio);
{
return 0;
}
- static inline void deferred_split_folio(struct folio *folio) {}
+
+static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
+{
+ return 0;
+}
+
+ static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
extern int mmap_rnd_compat_bits __read_mostly;
#endif
+#ifndef PHYSMEM_END
++# ifdef MAX_PHYSMEM_BITS
+# define PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1)
++# else
++# define PHYSMEM_END (-1ULL)
++# endif
+#endif
+
#include <asm/page.h>
#include <asm/processor.h>
int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
+#ifdef CONFIG_64BIT
+int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
+#else
+static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
+{
+ /* noop on 32 bit */
+ return 0;
+}
+#endif
+
+ #ifdef CONFIG_MEM_ALLOC_PROFILING
+ static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
+ {
+ int i;
+ struct alloc_tag *tag;
+ unsigned int nr_pages = 1 << new_order;
+
+ if (!mem_alloc_profiling_enabled())
+ return;
+
+ tag = pgalloc_tag_get(&folio->page);
+ if (!tag)
+ return;
+
+ for (i = nr_pages; i < (1 << old_order); i += nr_pages) {
+ union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i));
+
+ if (ref) {
+ /* Set new reference to point to the original tag */
+ alloc_tag_ref_set(ref, tag);
+ put_page_tag_ref(ref);
+ }
+ }
+ }
+
+ static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+ {
+ struct alloc_tag *tag;
+ union codetag_ref *ref;
+
+ tag = pgalloc_tag_get(&old->page);
+ if (!tag)
+ return;
+
+ ref = get_page_tag_ref(&new->page);
+ if (!ref)
+ return;
+
+ /* Clear the old ref to the original allocation tag. */
+ clear_page_tag_ref(&old->page);
+ /* Decrement the counters of the tag on get_new_folio. */
+ alloc_tag_sub(ref, folio_nr_pages(new));
+
+ __alloc_tag_ref_set(ref, tag);
+
+ put_page_tag_ref(ref);
+ }
+ #else /* !CONFIG_MEM_ALLOC_PROFILING */
+ static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
+ {
+ }
+
+ static inline void pgalloc_tag_copy(struct folio *new, struct folio *old)
+ {
+ }
+ #endif /* CONFIG_MEM_ALLOC_PROFILING */
+
#endif /* _LINUX_MM_H */
__def_pageflag_names \
) : "none"
- #define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) }
-
- #define __def_pagetype_names \
- DEF_PAGETYPE_NAME(slab), \
- DEF_PAGETYPE_NAME(hugetlb), \
- DEF_PAGETYPE_NAME(offline), \
- DEF_PAGETYPE_NAME(guard), \
- DEF_PAGETYPE_NAME(table), \
- DEF_PAGETYPE_NAME(buddy)
-
#if defined(CONFIG_X86)
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
-#elif defined(CONFIG_PPC)
+#elif defined(CONFIG_PPC64)
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
#elif defined(CONFIG_PARISC)
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
#ifdef CONFIG_GET_FREE_REGION
#define GFR_DESCENDING (1UL << 0)
#define GFR_REQUEST_REGION (1UL << 1)
- #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
+ #ifdef PA_SECTION_SHIFT
+ #define GFR_DEFAULT_ALIGN (1UL << PA_SECTION_SHIFT)
+ #else
+ #define GFR_DEFAULT_ALIGN PAGE_SIZE
+ #endif
-#ifdef MAX_PHYSMEM_BITS
-#define MAX_PHYS_ADDR ((1ULL << MAX_PHYSMEM_BITS) - 1)
-#else
-#define MAX_PHYS_ADDR (-1ULL)
-#endif
-
static resource_size_t gfr_start(struct resource *base, resource_size_t size,
resource_size_t align, unsigned long flags)
{
*/
for (addr = vaddr_start; addr < vaddr_end; addr += PAGE_SIZE) {
struct vm_area_struct *vma = vma_lookup(mm, addr);
- struct page *page;
+ struct folio_walk fw;
struct folio *folio;
+ struct address_space *mapping;
+ unsigned int target_order = new_order;
if (!vma)
break;
if (!folio_trylock(folio))
goto next;
+ folio_get(folio);
+ folio_walk_end(&fw, vma);
- if (!split_folio_to_order(folio, new_order))
+ if (!folio_test_anon(folio) && folio->mapping != mapping)
+ goto unlock;
+
+ if (!split_folio_to_order(folio, target_order))
split++;
+unlock:
+
folio_unlock(folio);
- next:
folio_put(folio);
+
+ cond_resched();
+ continue;
+ next:
+ folio_walk_end(&fw, vma);
cond_resched();
}
mmap_read_unlock(mm);