Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
9476df7d DW |
2 | #ifndef _LINUX_MEMREMAP_H_ |
3 | #define _LINUX_MEMREMAP_H_ | |
4 | #include <linux/mm.h> | |
5c2c2587 DW |
5 | #include <linux/ioport.h> |
6 | #include <linux/percpu-refcount.h> | |
9476df7d | 7 | |
5042db43 JG |
8 | #include <asm/pgtable.h> |
9 | ||
9476df7d DW |
10 | struct resource; |
11 | struct device; | |
4b94ffdc DW |
12 | |
13 | /** | |
14 | * struct vmem_altmap - pre-allocated storage for vmemmap_populate | |
15 | * @base_pfn: base of the entire dev_pagemap mapping | |
16 | * @reserve: pages mapped, but reserved for driver use (relative to @base) | |
17 | * @free: free pages set aside in the mapping for memmap storage | |
18 | * @align: pages reserved to meet allocation alignments | |
19 | * @alloc: track pages consumed, private to vmemmap_populate() | |
20 | */ | |
21 | struct vmem_altmap { | |
22 | const unsigned long base_pfn; | |
23 | const unsigned long reserve; | |
24 | unsigned long free; | |
25 | unsigned long align; | |
26 | unsigned long alloc; | |
27 | }; | |
28 | ||
11db0486 | 29 | #ifdef CONFIG_ZONE_DEVICE |
4b94ffdc DW |
30 | struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start); |
31 | #else | |
32 | static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) | |
33 | { | |
34 | return NULL; | |
35 | } | |
36 | #endif | |
37 | ||
5042db43 JG |
38 | /* |
39 | * Specialize ZONE_DEVICE memory into multiple types each having differents | |
40 | * usage. | |
41 | * | |
42 | * MEMORY_DEVICE_HOST: | |
43 | * Persistent device memory (pmem): struct page might be allocated in different | |
44 | * memory and architecture might want to perform special actions. It is similar | |
45 | * to regular memory, in that the CPU can access it transparently. However, | |
46 | * it is likely to have different bandwidth and latency than regular memory. | |
47 | * See Documentation/nvdimm/nvdimm.txt for more information. | |
48 | * | |
49 | * MEMORY_DEVICE_PRIVATE: | |
50 | * Device memory that is not directly addressable by the CPU: CPU can neither | |
51 | * read nor write private memory. In this case, we do still have struct pages | |
52 | * backing the device memory. Doing so simplifies the implementation, but it is | |
53 | * important to remember that there are certain points at which the struct page | |
54 | * must be treated as an opaque object, rather than a "normal" struct page. | |
55 | * | |
56 | * A more complete discussion of unaddressable memory may be found in | |
57 | * include/linux/hmm.h and Documentation/vm/hmm.txt. | |
df6ad698 JG |
58 | * |
59 | * MEMORY_DEVICE_PUBLIC: | |
60 | * Device memory that is cache coherent from device and CPU point of view. This | |
61 | * is use on platform that have an advance system bus (like CAPI or CCIX). A | |
62 | * driver can hotplug the device memory using ZONE_DEVICE and with that memory | |
63 | * type. Any page of a process can be migrated to such memory. However no one | |
64 | * should be allow to pin such memory so that it can always be evicted. | |
5042db43 JG |
65 | */ |
66 | enum memory_type { | |
67 | MEMORY_DEVICE_HOST = 0, | |
68 | MEMORY_DEVICE_PRIVATE, | |
df6ad698 | 69 | MEMORY_DEVICE_PUBLIC, |
5042db43 JG |
70 | }; |
71 | ||
72 | /* | |
73 | * For MEMORY_DEVICE_PRIVATE we use ZONE_DEVICE and extend it with two | |
74 | * callbacks: | |
75 | * page_fault() | |
76 | * page_free() | |
77 | * | |
78 | * Additional notes about MEMORY_DEVICE_PRIVATE may be found in | |
79 | * include/linux/hmm.h and Documentation/vm/hmm.txt. There is also a brief | |
80 | * explanation in include/linux/memory_hotplug.h. | |
81 | * | |
82 | * The page_fault() callback must migrate page back, from device memory to | |
83 | * system memory, so that the CPU can access it. This might fail for various | |
84 | * reasons (device issues, device have been unplugged, ...). When such error | |
85 | * conditions happen, the page_fault() callback must return VM_FAULT_SIGBUS and | |
86 | * set the CPU page table entry to "poisoned". | |
87 | * | |
88 | * Note that because memory cgroup charges are transferred to the device memory, | |
89 | * this should never fail due to memory restrictions. However, allocation | |
90 | * of a regular system page might still fail because we are out of memory. If | |
91 | * that happens, the page_fault() callback must return VM_FAULT_OOM. | |
92 | * | |
93 | * The page_fault() callback can also try to migrate back multiple pages in one | |
94 | * chunk, as an optimization. It must, however, prioritize the faulting address | |
95 | * over all the others. | |
96 | * | |
97 | * | |
98 | * The page_free() callback is called once the page refcount reaches 1 | |
99 | * (ZONE_DEVICE pages never reach 0 refcount unless there is a refcount bug. | |
100 | * This allows the device driver to implement its own memory management.) | |
df6ad698 JG |
101 | * |
102 | * For MEMORY_DEVICE_PUBLIC only the page_free() callback matter. | |
5042db43 JG |
103 | */ |
104 | typedef int (*dev_page_fault_t)(struct vm_area_struct *vma, | |
105 | unsigned long addr, | |
106 | const struct page *page, | |
107 | unsigned int flags, | |
108 | pmd_t *pmdp); | |
109 | typedef void (*dev_page_free_t)(struct page *page, void *data); | |
110 | ||
9476df7d DW |
111 | /** |
112 | * struct dev_pagemap - metadata for ZONE_DEVICE mappings | |
5042db43 JG |
113 | * @page_fault: callback when CPU fault on an unaddressable device page |
114 | * @page_free: free page callback when page refcount reaches 1 | |
4b94ffdc | 115 | * @altmap: pre-allocated/reserved memory for vmemmap allocations |
5c2c2587 DW |
116 | * @res: physical address range covered by @ref |
117 | * @ref: reference count that pins the devm_memremap_pages() mapping | |
9476df7d | 118 | * @dev: host device of the mapping for debug |
5042db43 JG |
119 | * @data: private data pointer for page_free() |
120 | * @type: memory type: see MEMORY_* in memory_hotplug.h | |
9476df7d DW |
121 | */ |
122 | struct dev_pagemap { | |
5042db43 JG |
123 | dev_page_fault_t page_fault; |
124 | dev_page_free_t page_free; | |
4b94ffdc DW |
125 | struct vmem_altmap *altmap; |
126 | const struct resource *res; | |
5c2c2587 | 127 | struct percpu_ref *ref; |
9476df7d | 128 | struct device *dev; |
5042db43 JG |
129 | void *data; |
130 | enum memory_type type; | |
9476df7d DW |
131 | }; |
132 | ||
133 | #ifdef CONFIG_ZONE_DEVICE | |
4b94ffdc | 134 | void *devm_memremap_pages(struct device *dev, struct resource *res, |
5c2c2587 | 135 | struct percpu_ref *ref, struct vmem_altmap *altmap); |
9476df7d | 136 | struct dev_pagemap *find_dev_pagemap(resource_size_t phys); |
7b2d55d2 | 137 | |
8e37d00a CH |
138 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); |
139 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); | |
140 | ||
7b2d55d2 | 141 | static inline bool is_zone_device_page(const struct page *page); |
9476df7d DW |
142 | #else |
143 | static inline void *devm_memremap_pages(struct device *dev, | |
5c2c2587 DW |
144 | struct resource *res, struct percpu_ref *ref, |
145 | struct vmem_altmap *altmap) | |
9476df7d DW |
146 | { |
147 | /* | |
148 | * Fail attempts to call devm_memremap_pages() without | |
149 | * ZONE_DEVICE support enabled, this requires callers to fall | |
150 | * back to plain devm_memremap() based on config | |
151 | */ | |
152 | WARN_ON_ONCE(1); | |
153 | return ERR_PTR(-ENXIO); | |
154 | } | |
155 | ||
156 | static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys) | |
157 | { | |
158 | return NULL; | |
159 | } | |
8e37d00a CH |
160 | |
161 | static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) | |
162 | { | |
163 | return 0; | |
164 | } | |
165 | ||
166 | static inline void vmem_altmap_free(struct vmem_altmap *altmap, | |
167 | unsigned long nr_pfns) | |
168 | { | |
169 | } | |
170 | #endif /* CONFIG_ZONE_DEVICE */ | |
7b2d55d2 | 171 | |
6b368cd4 | 172 | #if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC) |
7b2d55d2 JG |
173 | static inline bool is_device_private_page(const struct page *page) |
174 | { | |
6b368cd4 JG |
175 | return is_zone_device_page(page) && |
176 | page->pgmap->type == MEMORY_DEVICE_PRIVATE; | |
7b2d55d2 | 177 | } |
df6ad698 JG |
178 | |
179 | static inline bool is_device_public_page(const struct page *page) | |
180 | { | |
6b368cd4 JG |
181 | return is_zone_device_page(page) && |
182 | page->pgmap->type == MEMORY_DEVICE_PUBLIC; | |
df6ad698 | 183 | } |
6b368cd4 | 184 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |
5c2c2587 DW |
185 | |
186 | /** | |
187 | * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn | |
188 | * @pfn: page frame number to lookup page_map | |
189 | * @pgmap: optional known pgmap that already has a reference | |
190 | * | |
191 | * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the | |
192 | * same mapping. | |
193 | */ | |
194 | static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, | |
195 | struct dev_pagemap *pgmap) | |
196 | { | |
197 | const struct resource *res = pgmap ? pgmap->res : NULL; | |
198 | resource_size_t phys = PFN_PHYS(pfn); | |
199 | ||
200 | /* | |
201 | * In the cached case we're already holding a live reference so | |
202 | * we can simply do a blind increment | |
203 | */ | |
204 | if (res && phys >= res->start && phys <= res->end) { | |
205 | percpu_ref_get(pgmap->ref); | |
206 | return pgmap; | |
207 | } | |
208 | ||
209 | /* fall back to slow path lookup */ | |
210 | rcu_read_lock(); | |
211 | pgmap = find_dev_pagemap(phys); | |
212 | if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) | |
213 | pgmap = NULL; | |
214 | rcu_read_unlock(); | |
215 | ||
216 | return pgmap; | |
217 | } | |
218 | ||
219 | static inline void put_dev_pagemap(struct dev_pagemap *pgmap) | |
220 | { | |
221 | if (pgmap) | |
222 | percpu_ref_put(pgmap->ref); | |
223 | } | |
9476df7d | 224 | #endif /* _LINUX_MEMREMAP_H_ */ |