Commit | Line | Data |
---|---|---|
ab68f262 DW |
1 | /* |
2 | * Copyright(c) 2016 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/device.h> | |
50d34394 | 16 | #include <linux/magic.h> |
3bc52c45 | 17 | #include <linux/mount.h> |
ab68f262 | 18 | #include <linux/pfn_t.h> |
3bc52c45 | 19 | #include <linux/hash.h> |
ba09c01d | 20 | #include <linux/cdev.h> |
ab68f262 DW |
21 | #include <linux/slab.h> |
22 | #include <linux/dax.h> | |
23 | #include <linux/fs.h> | |
24 | #include <linux/mm.h> | |
ccdb07f6 | 25 | #include "dax.h" |
ab68f262 | 26 | |
ba09c01d | 27 | static dev_t dax_devt; |
ab68f262 DW |
28 | static struct class *dax_class; |
29 | static DEFINE_IDA(dax_minor_ida); | |
ba09c01d DW |
30 | static int nr_dax = CONFIG_NR_DEV_DAX; |
31 | module_param(nr_dax, int, S_IRUGO); | |
3bc52c45 DW |
32 | static struct vfsmount *dax_mnt; |
33 | static struct kmem_cache *dax_cache __read_mostly; | |
34 | static struct super_block *dax_superblock __read_mostly; | |
ba09c01d | 35 | MODULE_PARM_DESC(nr_dax, "max number of device-dax instances"); |
ab68f262 DW |
36 | |
37 | /** | |
38 | * struct dax_region - mapping infrastructure for dax devices | |
39 | * @id: kernel-wide unique region for a memory range | |
40 | * @base: linear address corresponding to @res | |
41 | * @kref: to pin while other agents have a need to do lookups | |
42 | * @dev: parent device backing this region | |
43 | * @align: allocation and mapping alignment for child dax devices | |
44 | * @res: physical address range of the region | |
45 | * @pfn_flags: identify whether the pfns are paged back or not | |
46 | */ | |
47 | struct dax_region { | |
48 | int id; | |
49 | struct ida ida; | |
50 | void *base; | |
51 | struct kref kref; | |
52 | struct device *dev; | |
53 | unsigned int align; | |
54 | struct resource res; | |
55 | unsigned long pfn_flags; | |
56 | }; | |
57 | ||
58 | /** | |
59 | * struct dax_dev - subdivision of a dax region | |
60 | * @region - parent region | |
61 | * @dev - device backing the character device | |
ba09c01d | 62 | * @cdev - core chardev data |
dee41079 | 63 | * @alive - !alive + rcu grace period == no new mappings can be established |
ab68f262 DW |
64 | * @id - child id in the region |
65 | * @num_resources - number of physical address extents in this device | |
66 | * @res - array of physical address ranges | |
67 | */ | |
68 | struct dax_dev { | |
69 | struct dax_region *region; | |
3bc52c45 | 70 | struct inode *inode; |
ebd84d72 | 71 | struct device dev; |
ba09c01d | 72 | struct cdev cdev; |
dee41079 | 73 | bool alive; |
ab68f262 DW |
74 | int id; |
75 | int num_resources; | |
76 | struct resource res[0]; | |
77 | }; | |
78 | ||
d7fe1a67 DW |
79 | static ssize_t id_show(struct device *dev, |
80 | struct device_attribute *attr, char *buf) | |
81 | { | |
82 | struct dax_region *dax_region; | |
83 | ssize_t rc = -ENXIO; | |
84 | ||
85 | device_lock(dev); | |
86 | dax_region = dev_get_drvdata(dev); | |
87 | if (dax_region) | |
88 | rc = sprintf(buf, "%d\n", dax_region->id); | |
89 | device_unlock(dev); | |
90 | ||
91 | return rc; | |
92 | } | |
93 | static DEVICE_ATTR_RO(id); | |
94 | ||
95 | static ssize_t region_size_show(struct device *dev, | |
96 | struct device_attribute *attr, char *buf) | |
97 | { | |
98 | struct dax_region *dax_region; | |
99 | ssize_t rc = -ENXIO; | |
100 | ||
101 | device_lock(dev); | |
102 | dax_region = dev_get_drvdata(dev); | |
103 | if (dax_region) | |
104 | rc = sprintf(buf, "%llu\n", (unsigned long long) | |
105 | resource_size(&dax_region->res)); | |
106 | device_unlock(dev); | |
107 | ||
108 | return rc; | |
109 | } | |
110 | static struct device_attribute dev_attr_region_size = __ATTR(size, 0444, | |
111 | region_size_show, NULL); | |
112 | ||
113 | static ssize_t align_show(struct device *dev, | |
114 | struct device_attribute *attr, char *buf) | |
115 | { | |
116 | struct dax_region *dax_region; | |
117 | ssize_t rc = -ENXIO; | |
118 | ||
119 | device_lock(dev); | |
120 | dax_region = dev_get_drvdata(dev); | |
121 | if (dax_region) | |
122 | rc = sprintf(buf, "%u\n", dax_region->align); | |
123 | device_unlock(dev); | |
124 | ||
125 | return rc; | |
126 | } | |
127 | static DEVICE_ATTR_RO(align); | |
128 | ||
129 | static struct attribute *dax_region_attributes[] = { | |
130 | &dev_attr_region_size.attr, | |
131 | &dev_attr_align.attr, | |
132 | &dev_attr_id.attr, | |
133 | NULL, | |
134 | }; | |
135 | ||
136 | static const struct attribute_group dax_region_attribute_group = { | |
137 | .name = "dax_region", | |
138 | .attrs = dax_region_attributes, | |
139 | }; | |
140 | ||
141 | static const struct attribute_group *dax_region_attribute_groups[] = { | |
142 | &dax_region_attribute_group, | |
143 | NULL, | |
144 | }; | |
145 | ||
3bc52c45 | 146 | static struct inode *dax_alloc_inode(struct super_block *sb) |
ab68f262 | 147 | { |
3bc52c45 DW |
148 | return kmem_cache_alloc(dax_cache, GFP_KERNEL); |
149 | } | |
ab68f262 | 150 | |
3bc52c45 DW |
151 | static void dax_i_callback(struct rcu_head *head) |
152 | { | |
153 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
154 | ||
155 | kmem_cache_free(dax_cache, inode); | |
ab68f262 DW |
156 | } |
157 | ||
3bc52c45 | 158 | static void dax_destroy_inode(struct inode *inode) |
ab68f262 | 159 | { |
3bc52c45 | 160 | call_rcu(&inode->i_rcu, dax_i_callback); |
ab68f262 | 161 | } |
ab68f262 | 162 | |
3bc52c45 DW |
163 | static const struct super_operations dax_sops = { |
164 | .statfs = simple_statfs, | |
165 | .alloc_inode = dax_alloc_inode, | |
166 | .destroy_inode = dax_destroy_inode, | |
167 | .drop_inode = generic_delete_inode, | |
168 | }; | |
169 | ||
170 | static struct dentry *dax_mount(struct file_system_type *fs_type, | |
171 | int flags, const char *dev_name, void *data) | |
ab68f262 | 172 | { |
3bc52c45 DW |
173 | return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC); |
174 | } | |
ab68f262 | 175 | |
3bc52c45 DW |
176 | static struct file_system_type dax_type = { |
177 | .name = "dax", | |
178 | .mount = dax_mount, | |
179 | .kill_sb = kill_anon_super, | |
180 | }; | |
181 | ||
182 | static int dax_test(struct inode *inode, void *data) | |
183 | { | |
184 | return inode->i_cdev == data; | |
185 | } | |
186 | ||
187 | static int dax_set(struct inode *inode, void *data) | |
188 | { | |
189 | inode->i_cdev = data; | |
190 | return 0; | |
191 | } | |
192 | ||
193 | static struct inode *dax_inode_get(struct cdev *cdev, dev_t devt) | |
194 | { | |
195 | struct inode *inode; | |
196 | ||
197 | inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31), | |
198 | dax_test, dax_set, cdev); | |
199 | ||
200 | if (!inode) | |
201 | return NULL; | |
202 | ||
203 | if (inode->i_state & I_NEW) { | |
204 | inode->i_mode = S_IFCHR; | |
205 | inode->i_flags = S_DAX; | |
206 | inode->i_rdev = devt; | |
207 | mapping_set_gfp_mask(&inode->i_data, GFP_USER); | |
208 | unlock_new_inode(inode); | |
209 | } | |
210 | return inode; | |
211 | } | |
212 | ||
213 | static void init_once(void *inode) | |
214 | { | |
215 | inode_init_once(inode); | |
216 | } | |
217 | ||
218 | static int dax_inode_init(void) | |
219 | { | |
220 | int rc; | |
221 | ||
222 | dax_cache = kmem_cache_create("dax_cache", sizeof(struct inode), 0, | |
223 | (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| | |
224 | SLAB_MEM_SPREAD|SLAB_ACCOUNT), | |
225 | init_once); | |
226 | if (!dax_cache) | |
227 | return -ENOMEM; | |
228 | ||
229 | rc = register_filesystem(&dax_type); | |
230 | if (rc) | |
231 | goto err_register_fs; | |
232 | ||
233 | dax_mnt = kern_mount(&dax_type); | |
234 | if (IS_ERR(dax_mnt)) { | |
235 | rc = PTR_ERR(dax_mnt); | |
236 | goto err_mount; | |
237 | } | |
238 | dax_superblock = dax_mnt->mnt_sb; | |
239 | ||
240 | return 0; | |
241 | ||
242 | err_mount: | |
243 | unregister_filesystem(&dax_type); | |
244 | err_register_fs: | |
245 | kmem_cache_destroy(dax_cache); | |
246 | ||
247 | return rc; | |
ab68f262 DW |
248 | } |
249 | ||
3bc52c45 DW |
250 | static void dax_inode_exit(void) |
251 | { | |
252 | kern_unmount(dax_mnt); | |
253 | unregister_filesystem(&dax_type); | |
254 | kmem_cache_destroy(dax_cache); | |
255 | } | |
256 | ||
ab68f262 DW |
257 | static void dax_region_free(struct kref *kref) |
258 | { | |
259 | struct dax_region *dax_region; | |
260 | ||
261 | dax_region = container_of(kref, struct dax_region, kref); | |
262 | kfree(dax_region); | |
263 | } | |
264 | ||
265 | void dax_region_put(struct dax_region *dax_region) | |
ab68f262 | 266 | { |
ab68f262 | 267 | kref_put(&dax_region->kref, dax_region_free); |
ab68f262 | 268 | } |
ab68f262 | 269 | EXPORT_SYMBOL_GPL(dax_region_put); |
ab68f262 | 270 | |
d7fe1a67 DW |
271 | static void dax_region_unregister(void *region) |
272 | { | |
273 | struct dax_region *dax_region = region; | |
274 | ||
275 | sysfs_remove_groups(&dax_region->dev->kobj, | |
276 | dax_region_attribute_groups); | |
277 | dax_region_put(dax_region); | |
278 | } | |
279 | ||
ab68f262 DW |
280 | struct dax_region *alloc_dax_region(struct device *parent, int region_id, |
281 | struct resource *res, unsigned int align, void *addr, | |
282 | unsigned long pfn_flags) | |
283 | { | |
284 | struct dax_region *dax_region; | |
285 | ||
d7fe1a67 DW |
286 | /* |
287 | * The DAX core assumes that it can store its private data in | |
288 | * parent->driver_data. This WARN is a reminder / safeguard for | |
289 | * developers of device-dax drivers. | |
290 | */ | |
291 | if (dev_get_drvdata(parent)) { | |
292 | dev_WARN(parent, "dax core failed to setup private data\n"); | |
293 | return NULL; | |
294 | } | |
295 | ||
9d2d01a0 DW |
296 | if (!IS_ALIGNED(res->start, align) |
297 | || !IS_ALIGNED(resource_size(res), align)) | |
298 | return NULL; | |
ab68f262 | 299 | |
9d2d01a0 | 300 | dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL); |
ab68f262 DW |
301 | if (!dax_region) |
302 | return NULL; | |
303 | ||
d7fe1a67 | 304 | dev_set_drvdata(parent, dax_region); |
ab68f262 DW |
305 | memcpy(&dax_region->res, res, sizeof(*res)); |
306 | dax_region->pfn_flags = pfn_flags; | |
307 | kref_init(&dax_region->kref); | |
308 | dax_region->id = region_id; | |
309 | ida_init(&dax_region->ida); | |
310 | dax_region->align = align; | |
311 | dax_region->dev = parent; | |
312 | dax_region->base = addr; | |
d7fe1a67 DW |
313 | if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { |
314 | kfree(dax_region); | |
315 | return NULL;; | |
316 | } | |
ab68f262 | 317 | |
d7fe1a67 DW |
318 | kref_get(&dax_region->kref); |
319 | if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) | |
320 | return NULL; | |
ab68f262 DW |
321 | return dax_region; |
322 | } | |
323 | EXPORT_SYMBOL_GPL(alloc_dax_region); | |
324 | ||
ebd84d72 DW |
325 | static struct dax_dev *to_dax_dev(struct device *dev) |
326 | { | |
327 | return container_of(dev, struct dax_dev, dev); | |
328 | } | |
329 | ||
ab68f262 DW |
330 | static ssize_t size_show(struct device *dev, |
331 | struct device_attribute *attr, char *buf) | |
332 | { | |
ebd84d72 | 333 | struct dax_dev *dax_dev = to_dax_dev(dev); |
ab68f262 DW |
334 | unsigned long long size = 0; |
335 | int i; | |
336 | ||
337 | for (i = 0; i < dax_dev->num_resources; i++) | |
338 | size += resource_size(&dax_dev->res[i]); | |
339 | ||
340 | return sprintf(buf, "%llu\n", size); | |
341 | } | |
342 | static DEVICE_ATTR_RO(size); | |
343 | ||
344 | static struct attribute *dax_device_attributes[] = { | |
345 | &dev_attr_size.attr, | |
346 | NULL, | |
347 | }; | |
348 | ||
349 | static const struct attribute_group dax_device_attribute_group = { | |
350 | .attrs = dax_device_attributes, | |
351 | }; | |
352 | ||
353 | static const struct attribute_group *dax_attribute_groups[] = { | |
354 | &dax_device_attribute_group, | |
355 | NULL, | |
356 | }; | |
357 | ||
dee41079 DW |
358 | static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma, |
359 | const char *func) | |
360 | { | |
361 | struct dax_region *dax_region = dax_dev->region; | |
ebd84d72 | 362 | struct device *dev = &dax_dev->dev; |
dee41079 DW |
363 | unsigned long mask; |
364 | ||
365 | if (!dax_dev->alive) | |
366 | return -ENXIO; | |
367 | ||
4cb19355 | 368 | /* prevent private mappings from being established */ |
325896ff | 369 | if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { |
dee41079 DW |
370 | dev_info(dev, "%s: %s: fail, attempted private mapping\n", |
371 | current->comm, func); | |
372 | return -EINVAL; | |
373 | } | |
374 | ||
375 | mask = dax_region->align - 1; | |
376 | if (vma->vm_start & mask || vma->vm_end & mask) { | |
377 | dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", | |
378 | current->comm, func, vma->vm_start, vma->vm_end, | |
379 | mask); | |
380 | return -EINVAL; | |
381 | } | |
382 | ||
383 | if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV | |
384 | && (vma->vm_flags & VM_DONTCOPY) == 0) { | |
385 | dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n", | |
386 | current->comm, func); | |
387 | return -EINVAL; | |
388 | } | |
389 | ||
390 | if (!vma_is_dax(vma)) { | |
391 | dev_info(dev, "%s: %s: fail, vma is not DAX capable\n", | |
392 | current->comm, func); | |
393 | return -EINVAL; | |
394 | } | |
395 | ||
396 | return 0; | |
397 | } | |
398 | ||
399 | static phys_addr_t pgoff_to_phys(struct dax_dev *dax_dev, pgoff_t pgoff, | |
400 | unsigned long size) | |
401 | { | |
402 | struct resource *res; | |
403 | phys_addr_t phys; | |
404 | int i; | |
405 | ||
406 | for (i = 0; i < dax_dev->num_resources; i++) { | |
407 | res = &dax_dev->res[i]; | |
408 | phys = pgoff * PAGE_SIZE + res->start; | |
409 | if (phys >= res->start && phys <= res->end) | |
410 | break; | |
411 | pgoff -= PHYS_PFN(resource_size(res)); | |
412 | } | |
413 | ||
414 | if (i < dax_dev->num_resources) { | |
415 | res = &dax_dev->res[i]; | |
416 | if (phys + size - 1 <= res->end) | |
417 | return phys; | |
418 | } | |
419 | ||
420 | return -1; | |
421 | } | |
422 | ||
a2d58167 | 423 | static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) |
dee41079 | 424 | { |
ebd84d72 | 425 | struct device *dev = &dax_dev->dev; |
dee41079 DW |
426 | struct dax_region *dax_region; |
427 | int rc = VM_FAULT_SIGBUS; | |
428 | phys_addr_t phys; | |
429 | pfn_t pfn; | |
0134ed4f | 430 | unsigned int fault_size = PAGE_SIZE; |
dee41079 | 431 | |
11bac800 | 432 | if (check_vma(dax_dev, vmf->vma, __func__)) |
dee41079 DW |
433 | return VM_FAULT_SIGBUS; |
434 | ||
435 | dax_region = dax_dev->region; | |
436 | if (dax_region->align > PAGE_SIZE) { | |
437 | dev_dbg(dev, "%s: alignment > fault size\n", __func__); | |
438 | return VM_FAULT_SIGBUS; | |
439 | } | |
440 | ||
0134ed4f DJ |
441 | if (fault_size != dax_region->align) |
442 | return VM_FAULT_SIGBUS; | |
443 | ||
dee41079 DW |
444 | phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE); |
445 | if (phys == -1) { | |
52084f89 | 446 | dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, |
dee41079 DW |
447 | vmf->pgoff); |
448 | return VM_FAULT_SIGBUS; | |
449 | } | |
450 | ||
451 | pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); | |
452 | ||
11bac800 | 453 | rc = vm_insert_mixed(vmf->vma, vmf->address, pfn); |
dee41079 DW |
454 | |
455 | if (rc == -ENOMEM) | |
456 | return VM_FAULT_OOM; | |
457 | if (rc < 0 && rc != -EBUSY) | |
458 | return VM_FAULT_SIGBUS; | |
459 | ||
460 | return VM_FAULT_NOPAGE; | |
461 | } | |
462 | ||
f4200391 | 463 | static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) |
dee41079 | 464 | { |
d8a849e1 | 465 | unsigned long pmd_addr = vmf->address & PMD_MASK; |
ebd84d72 | 466 | struct device *dev = &dax_dev->dev; |
dee41079 DW |
467 | struct dax_region *dax_region; |
468 | phys_addr_t phys; | |
469 | pgoff_t pgoff; | |
470 | pfn_t pfn; | |
0134ed4f | 471 | unsigned int fault_size = PMD_SIZE; |
dee41079 | 472 | |
f4200391 | 473 | if (check_vma(dax_dev, vmf->vma, __func__)) |
dee41079 DW |
474 | return VM_FAULT_SIGBUS; |
475 | ||
476 | dax_region = dax_dev->region; | |
477 | if (dax_region->align > PMD_SIZE) { | |
478 | dev_dbg(dev, "%s: alignment > fault size\n", __func__); | |
479 | return VM_FAULT_SIGBUS; | |
480 | } | |
481 | ||
482 | /* dax pmd mappings require pfn_t_devmap() */ | |
483 | if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { | |
484 | dev_dbg(dev, "%s: alignment > fault size\n", __func__); | |
485 | return VM_FAULT_SIGBUS; | |
486 | } | |
487 | ||
0134ed4f DJ |
488 | if (fault_size < dax_region->align) |
489 | return VM_FAULT_SIGBUS; | |
490 | else if (fault_size > dax_region->align) | |
491 | return VM_FAULT_FALLBACK; | |
492 | ||
493 | /* if we are outside of the VMA */ | |
494 | if (pmd_addr < vmf->vma->vm_start || | |
495 | (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) | |
496 | return VM_FAULT_SIGBUS; | |
497 | ||
f4200391 | 498 | pgoff = linear_page_index(vmf->vma, pmd_addr); |
4c3cb6e9 | 499 | phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE); |
dee41079 | 500 | if (phys == -1) { |
52084f89 | 501 | dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, |
dee41079 DW |
502 | pgoff); |
503 | return VM_FAULT_SIGBUS; | |
504 | } | |
505 | ||
506 | pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); | |
507 | ||
f4200391 | 508 | return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd, pfn, |
d8a849e1 | 509 | vmf->flags & FAULT_FLAG_WRITE); |
dee41079 DW |
510 | } |
511 | ||
9557feee DJ |
512 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
513 | static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | |
514 | { | |
515 | unsigned long pud_addr = vmf->address & PUD_MASK; | |
516 | struct device *dev = &dax_dev->dev; | |
517 | struct dax_region *dax_region; | |
518 | phys_addr_t phys; | |
519 | pgoff_t pgoff; | |
520 | pfn_t pfn; | |
70b085b0 DJ |
521 | unsigned int fault_size = PUD_SIZE; |
522 | ||
9557feee DJ |
523 | |
524 | if (check_vma(dax_dev, vmf->vma, __func__)) | |
525 | return VM_FAULT_SIGBUS; | |
526 | ||
527 | dax_region = dax_dev->region; | |
528 | if (dax_region->align > PUD_SIZE) { | |
529 | dev_dbg(dev, "%s: alignment > fault size\n", __func__); | |
530 | return VM_FAULT_SIGBUS; | |
531 | } | |
532 | ||
533 | /* dax pud mappings require pfn_t_devmap() */ | |
534 | if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) != (PFN_DEV|PFN_MAP)) { | |
535 | dev_dbg(dev, "%s: alignment > fault size\n", __func__); | |
536 | return VM_FAULT_SIGBUS; | |
537 | } | |
538 | ||
70b085b0 DJ |
539 | if (fault_size < dax_region->align) |
540 | return VM_FAULT_SIGBUS; | |
541 | else if (fault_size > dax_region->align) | |
542 | return VM_FAULT_FALLBACK; | |
543 | ||
544 | /* if we are outside of the VMA */ | |
545 | if (pud_addr < vmf->vma->vm_start || | |
546 | (pud_addr + PUD_SIZE) > vmf->vma->vm_end) | |
547 | return VM_FAULT_SIGBUS; | |
548 | ||
9557feee DJ |
549 | pgoff = linear_page_index(vmf->vma, pud_addr); |
550 | phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE); | |
551 | if (phys == -1) { | |
52084f89 | 552 | dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, |
9557feee DJ |
553 | pgoff); |
554 | return VM_FAULT_SIGBUS; | |
555 | } | |
556 | ||
557 | pfn = phys_to_pfn_t(phys, dax_region->pfn_flags); | |
558 | ||
559 | return vmf_insert_pfn_pud(vmf->vma, vmf->address, vmf->pud, pfn, | |
560 | vmf->flags & FAULT_FLAG_WRITE); | |
561 | } | |
562 | #else | |
563 | static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | |
564 | { | |
565 | return VM_FAULT_FALLBACK; | |
566 | } | |
567 | #endif /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ | |
568 | ||
c791ace1 DJ |
569 | static int dax_dev_huge_fault(struct vm_fault *vmf, |
570 | enum page_entry_size pe_size) | |
dee41079 DW |
571 | { |
572 | int rc; | |
f4200391 | 573 | struct file *filp = vmf->vma->vm_file; |
dee41079 DW |
574 | struct dax_dev *dax_dev = filp->private_data; |
575 | ||
ebd84d72 | 576 | dev_dbg(&dax_dev->dev, "%s: %s: %s (%#lx - %#lx)\n", __func__, |
d8a849e1 | 577 | current->comm, (vmf->flags & FAULT_FLAG_WRITE) |
f4200391 DJ |
578 | ? "write" : "read", |
579 | vmf->vma->vm_start, vmf->vma->vm_end); | |
dee41079 DW |
580 | |
581 | rcu_read_lock(); | |
c791ace1 DJ |
582 | switch (pe_size) { |
583 | case PE_SIZE_PTE: | |
a2d58167 DJ |
584 | rc = __dax_dev_pte_fault(dax_dev, vmf); |
585 | break; | |
c791ace1 | 586 | case PE_SIZE_PMD: |
a2d58167 | 587 | rc = __dax_dev_pmd_fault(dax_dev, vmf); |
9557feee | 588 | break; |
c791ace1 | 589 | case PE_SIZE_PUD: |
9557feee | 590 | rc = __dax_dev_pud_fault(dax_dev, vmf); |
a2d58167 DJ |
591 | break; |
592 | default: | |
593 | return VM_FAULT_FALLBACK; | |
594 | } | |
dee41079 DW |
595 | rcu_read_unlock(); |
596 | ||
597 | return rc; | |
598 | } | |
599 | ||
c791ace1 DJ |
600 | static int dax_dev_fault(struct vm_fault *vmf) |
601 | { | |
602 | return dax_dev_huge_fault(vmf, PE_SIZE_PTE); | |
603 | } | |
604 | ||
dee41079 DW |
605 | static const struct vm_operations_struct dax_dev_vm_ops = { |
606 | .fault = dax_dev_fault, | |
c791ace1 | 607 | .huge_fault = dax_dev_huge_fault, |
dee41079 DW |
608 | }; |
609 | ||
af69f51e | 610 | static int dax_mmap(struct file *filp, struct vm_area_struct *vma) |
dee41079 DW |
611 | { |
612 | struct dax_dev *dax_dev = filp->private_data; | |
613 | int rc; | |
614 | ||
ebd84d72 | 615 | dev_dbg(&dax_dev->dev, "%s\n", __func__); |
dee41079 DW |
616 | |
617 | rc = check_vma(dax_dev, vma, __func__); | |
618 | if (rc) | |
619 | return rc; | |
620 | ||
dee41079 DW |
621 | vma->vm_ops = &dax_dev_vm_ops; |
622 | vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; | |
623 | return 0; | |
043a9255 DW |
624 | } |
625 | ||
626 | /* return an unmapped area aligned to the dax region specified alignment */ | |
af69f51e | 627 | static unsigned long dax_get_unmapped_area(struct file *filp, |
043a9255 DW |
628 | unsigned long addr, unsigned long len, unsigned long pgoff, |
629 | unsigned long flags) | |
630 | { | |
631 | unsigned long off, off_end, off_align, len_align, addr_align, align; | |
632 | struct dax_dev *dax_dev = filp ? filp->private_data : NULL; | |
633 | struct dax_region *dax_region; | |
634 | ||
635 | if (!dax_dev || addr) | |
636 | goto out; | |
637 | ||
638 | dax_region = dax_dev->region; | |
639 | align = dax_region->align; | |
640 | off = pgoff << PAGE_SHIFT; | |
641 | off_end = off + len; | |
642 | off_align = round_up(off, align); | |
643 | ||
644 | if ((off_end <= off_align) || ((off_end - off_align) < align)) | |
645 | goto out; | |
646 | ||
647 | len_align = len + align; | |
648 | if ((off + len_align) < off) | |
649 | goto out; | |
dee41079 | 650 | |
043a9255 DW |
651 | addr_align = current->mm->get_unmapped_area(filp, addr, len_align, |
652 | pgoff, flags); | |
653 | if (!IS_ERR_VALUE(addr_align)) { | |
654 | addr_align += (off - addr_align) & (align - 1); | |
655 | return addr_align; | |
656 | } | |
657 | out: | |
658 | return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); | |
659 | } | |
660 | ||
af69f51e | 661 | static int dax_open(struct inode *inode, struct file *filp) |
043a9255 | 662 | { |
ba09c01d | 663 | struct dax_dev *dax_dev; |
043a9255 | 664 | |
ba09c01d DW |
665 | dax_dev = container_of(inode->i_cdev, struct dax_dev, cdev); |
666 | dev_dbg(&dax_dev->dev, "%s\n", __func__); | |
3bc52c45 DW |
667 | inode->i_mapping = dax_dev->inode->i_mapping; |
668 | inode->i_mapping->host = dax_dev->inode; | |
669 | filp->f_mapping = inode->i_mapping; | |
ebd84d72 DW |
670 | filp->private_data = dax_dev; |
671 | inode->i_flags = S_DAX; | |
043a9255 | 672 | |
043a9255 DW |
673 | return 0; |
674 | } | |
dee41079 | 675 | |
af69f51e | 676 | static int dax_release(struct inode *inode, struct file *filp) |
043a9255 DW |
677 | { |
678 | struct dax_dev *dax_dev = filp->private_data; | |
043a9255 | 679 | |
ba09c01d | 680 | dev_dbg(&dax_dev->dev, "%s\n", __func__); |
043a9255 | 681 | return 0; |
dee41079 DW |
682 | } |
683 | ||
ab68f262 DW |
684 | static const struct file_operations dax_fops = { |
685 | .llseek = noop_llseek, | |
686 | .owner = THIS_MODULE, | |
af69f51e DW |
687 | .open = dax_open, |
688 | .release = dax_release, | |
689 | .get_unmapped_area = dax_get_unmapped_area, | |
690 | .mmap = dax_mmap, | |
ab68f262 DW |
691 | }; |
692 | ||
ebd84d72 | 693 | static void dax_dev_release(struct device *dev) |
043a9255 | 694 | { |
ebd84d72 | 695 | struct dax_dev *dax_dev = to_dax_dev(dev); |
043a9255 DW |
696 | struct dax_region *dax_region = dax_dev->region; |
697 | ||
ebd84d72 DW |
698 | ida_simple_remove(&dax_region->ida, dax_dev->id); |
699 | ida_simple_remove(&dax_minor_ida, MINOR(dev->devt)); | |
700 | dax_region_put(dax_region); | |
3bc52c45 | 701 | iput(dax_dev->inode); |
ebd84d72 DW |
702 | kfree(dax_dev); |
703 | } | |
704 | ||
705 | static void unregister_dax_dev(void *dev) | |
706 | { | |
707 | struct dax_dev *dax_dev = to_dax_dev(dev); | |
ba09c01d | 708 | struct cdev *cdev = &dax_dev->cdev; |
ebd84d72 | 709 | |
043a9255 DW |
710 | dev_dbg(dev, "%s\n", __func__); |
711 | ||
712 | /* | |
713 | * Note, rcu is not protecting the liveness of dax_dev, rcu is | |
714 | * ensuring that any fault handlers that might have seen | |
715 | * dax_dev->alive == true, have completed. Any fault handlers | |
716 | * that start after synchronize_rcu() has started will abort | |
717 | * upon seeing dax_dev->alive == false. | |
718 | */ | |
719 | dax_dev->alive = false; | |
720 | synchronize_rcu(); | |
9dc1e492 | 721 | unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1); |
ba09c01d | 722 | cdev_del(cdev); |
043a9255 | 723 | device_unregister(dev); |
043a9255 DW |
724 | } |
725 | ||
d76911ee DW |
726 | struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region, |
727 | struct resource *res, int count) | |
043a9255 DW |
728 | { |
729 | struct device *parent = dax_region->dev; | |
730 | struct dax_dev *dax_dev; | |
9d2d01a0 | 731 | int rc = 0, minor, i; |
043a9255 | 732 | struct device *dev; |
ba09c01d | 733 | struct cdev *cdev; |
043a9255 DW |
734 | dev_t dev_t; |
735 | ||
736 | dax_dev = kzalloc(sizeof(*dax_dev) + sizeof(*res) * count, GFP_KERNEL); | |
737 | if (!dax_dev) | |
d76911ee | 738 | return ERR_PTR(-ENOMEM); |
043a9255 | 739 | |
9d2d01a0 DW |
740 | for (i = 0; i < count; i++) { |
741 | if (!IS_ALIGNED(res[i].start, dax_region->align) | |
742 | || !IS_ALIGNED(resource_size(&res[i]), | |
743 | dax_region->align)) { | |
744 | rc = -EINVAL; | |
745 | break; | |
746 | } | |
747 | dax_dev->res[i].start = res[i].start; | |
748 | dax_dev->res[i].end = res[i].end; | |
749 | } | |
750 | ||
751 | if (i < count) | |
752 | goto err_id; | |
753 | ||
043a9255 DW |
754 | dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); |
755 | if (dax_dev->id < 0) { | |
756 | rc = dax_dev->id; | |
757 | goto err_id; | |
758 | } | |
759 | ||
760 | minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL); | |
761 | if (minor < 0) { | |
762 | rc = minor; | |
763 | goto err_minor; | |
764 | } | |
765 | ||
bc0a0fe9 AB |
766 | dev_t = MKDEV(MAJOR(dax_devt), minor); |
767 | dev = &dax_dev->dev; | |
3bc52c45 DW |
768 | dax_dev->inode = dax_inode_get(&dax_dev->cdev, dev_t); |
769 | if (!dax_dev->inode) { | |
770 | rc = -ENOMEM; | |
771 | goto err_inode; | |
772 | } | |
773 | ||
ba09c01d | 774 | /* device_initialize() so cdev can reference kobj parent */ |
ebd84d72 | 775 | device_initialize(dev); |
ba09c01d DW |
776 | |
777 | cdev = &dax_dev->cdev; | |
778 | cdev_init(cdev, &dax_fops); | |
779 | cdev->owner = parent->driver->owner; | |
780 | cdev->kobj.parent = &dev->kobj; | |
781 | rc = cdev_add(&dax_dev->cdev, dev_t, 1); | |
782 | if (rc) | |
783 | goto err_cdev; | |
784 | ||
785 | /* from here on we're committed to teardown via dax_dev_release() */ | |
ba09c01d DW |
786 | dax_dev->num_resources = count; |
787 | dax_dev->alive = true; | |
788 | dax_dev->region = dax_region; | |
789 | kref_get(&dax_region->kref); | |
790 | ||
ebd84d72 DW |
791 | dev->devt = dev_t; |
792 | dev->class = dax_class; | |
793 | dev->parent = parent; | |
794 | dev->groups = dax_attribute_groups; | |
795 | dev->release = dax_dev_release; | |
796 | dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id); | |
797 | rc = device_add(dev); | |
798 | if (rc) { | |
799 | put_device(dev); | |
d76911ee | 800 | return ERR_PTR(rc); |
ebd84d72 | 801 | } |
043a9255 | 802 | |
d76911ee DW |
803 | rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_dev, dev); |
804 | if (rc) | |
805 | return ERR_PTR(rc); | |
806 | ||
807 | return dax_dev; | |
043a9255 | 808 | |
ba09c01d | 809 | err_cdev: |
3bc52c45 DW |
810 | iput(dax_dev->inode); |
811 | err_inode: | |
ba09c01d | 812 | ida_simple_remove(&dax_minor_ida, minor); |
043a9255 DW |
813 | err_minor: |
814 | ida_simple_remove(&dax_region->ida, dax_dev->id); | |
815 | err_id: | |
ebd84d72 | 816 | kfree(dax_dev); |
043a9255 | 817 | |
d76911ee | 818 | return ERR_PTR(rc); |
043a9255 DW |
819 | } |
820 | EXPORT_SYMBOL_GPL(devm_create_dax_dev); | |
821 | ||
ab68f262 DW |
822 | static int __init dax_init(void) |
823 | { | |
824 | int rc; | |
825 | ||
3bc52c45 DW |
826 | rc = dax_inode_init(); |
827 | if (rc) | |
ab68f262 | 828 | return rc; |
3bc52c45 | 829 | |
ba09c01d DW |
830 | nr_dax = max(nr_dax, 256); |
831 | rc = alloc_chrdev_region(&dax_devt, 0, nr_dax, "dax"); | |
832 | if (rc) | |
3bc52c45 | 833 | goto err_chrdev; |
ab68f262 DW |
834 | |
835 | dax_class = class_create(THIS_MODULE, "dax"); | |
836 | if (IS_ERR(dax_class)) { | |
3bc52c45 DW |
837 | rc = PTR_ERR(dax_class); |
838 | goto err_class; | |
ab68f262 DW |
839 | } |
840 | ||
841 | return 0; | |
3bc52c45 DW |
842 | |
843 | err_class: | |
844 | unregister_chrdev_region(dax_devt, nr_dax); | |
845 | err_chrdev: | |
846 | dax_inode_exit(); | |
847 | return rc; | |
ab68f262 DW |
848 | } |
849 | ||
850 | static void __exit dax_exit(void) | |
851 | { | |
852 | class_destroy(dax_class); | |
ba09c01d | 853 | unregister_chrdev_region(dax_devt, nr_dax); |
ab68f262 | 854 | ida_destroy(&dax_minor_ida); |
3bc52c45 | 855 | dax_inode_exit(); |
ab68f262 DW |
856 | } |
857 | ||
858 | MODULE_AUTHOR("Intel Corporation"); | |
859 | MODULE_LICENSE("GPL v2"); | |
860 | subsys_initcall(dax_init); | |
861 | module_exit(dax_exit); |