Commit | Line | Data |
---|---|---|
7b6be844 DW |
1 | /* |
2 | * Copyright(c) 2017 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of version 2 of the GNU General Public License as | |
6 | * published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | */ | |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/mount.h> | |
16 | #include <linux/magic.h> | |
ef510424 | 17 | #include <linux/genhd.h> |
569d0365 | 18 | #include <linux/pfn_t.h> |
7b6be844 DW |
19 | #include <linux/cdev.h> |
20 | #include <linux/hash.h> | |
21 | #include <linux/slab.h> | |
7e026c8c | 22 | #include <linux/uio.h> |
6568b08b | 23 | #include <linux/dax.h> |
7b6be844 DW |
24 | #include <linux/fs.h> |
25 | ||
7b6be844 DW |
26 | static dev_t dax_devt; |
27 | DEFINE_STATIC_SRCU(dax_srcu); | |
28 | static struct vfsmount *dax_mnt; | |
29 | static DEFINE_IDA(dax_minor_ida); | |
30 | static struct kmem_cache *dax_cache __read_mostly; | |
31 | static struct super_block *dax_superblock __read_mostly; | |
32 | ||
72058005 DW |
33 | #define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head)) |
34 | static struct hlist_head dax_host_list[DAX_HASH_SIZE]; | |
35 | static DEFINE_SPINLOCK(dax_host_lock); | |
36 | ||
7b6be844 DW |
37 | int dax_read_lock(void) |
38 | { | |
39 | return srcu_read_lock(&dax_srcu); | |
40 | } | |
41 | EXPORT_SYMBOL_GPL(dax_read_lock); | |
42 | ||
43 | void dax_read_unlock(int id) | |
44 | { | |
45 | srcu_read_unlock(&dax_srcu, id); | |
46 | } | |
47 | EXPORT_SYMBOL_GPL(dax_read_unlock); | |
48 | ||
9d109081 | 49 | #ifdef CONFIG_BLOCK |
78f35473 DW |
50 | #include <linux/blkdev.h> |
51 | ||
ef510424 DW |
52 | int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size, |
53 | pgoff_t *pgoff) | |
54 | { | |
55 | phys_addr_t phys_off = (get_start_sect(bdev) + sector) * 512; | |
56 | ||
57 | if (pgoff) | |
58 | *pgoff = PHYS_PFN(phys_off); | |
59 | if (phys_off % PAGE_SIZE || size % PAGE_SIZE) | |
60 | return -EINVAL; | |
61 | return 0; | |
62 | } | |
63 | EXPORT_SYMBOL(bdev_dax_pgoff); | |
64 | ||
26f2f4de | 65 | #if IS_ENABLED(CONFIG_FS_DAX) |
78f35473 DW |
66 | struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev) |
67 | { | |
68 | if (!blk_queue_dax(bdev->bd_queue)) | |
69 | return NULL; | |
70 | return fs_dax_get_by_host(bdev->bd_disk->disk_name); | |
71 | } | |
72 | EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev); | |
26f2f4de | 73 | #endif |
78f35473 | 74 | |
ef510424 DW |
75 | /** |
76 | * __bdev_dax_supported() - Check if the device supports dax for filesystem | |
77 | * @sb: The superblock of the device | |
78 | * @blocksize: The block size of the device | |
79 | * | |
80 | * This is a library function for filesystems to check if the block device | |
81 | * can be mounted with dax option. | |
82 | * | |
83 | * Return: negative errno if unsupported, 0 if supported. | |
84 | */ | |
85 | int __bdev_dax_supported(struct super_block *sb, int blocksize) | |
86 | { | |
87 | struct block_device *bdev = sb->s_bdev; | |
88 | struct dax_device *dax_dev; | |
89 | pgoff_t pgoff; | |
90 | int err, id; | |
91 | void *kaddr; | |
92 | pfn_t pfn; | |
93 | long len; | |
94 | ||
95 | if (blocksize != PAGE_SIZE) { | |
66a86cc1 | 96 | pr_debug("VFS (%s): error: unsupported blocksize for dax\n", |
ef510424 DW |
97 | sb->s_id); |
98 | return -EINVAL; | |
99 | } | |
100 | ||
101 | err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff); | |
102 | if (err) { | |
66a86cc1 | 103 | pr_debug("VFS (%s): error: unaligned partition for dax\n", |
ef510424 DW |
104 | sb->s_id); |
105 | return err; | |
106 | } | |
107 | ||
108 | dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); | |
109 | if (!dax_dev) { | |
66a86cc1 | 110 | pr_debug("VFS (%s): error: device does not support dax\n", |
ef510424 DW |
111 | sb->s_id); |
112 | return -EOPNOTSUPP; | |
113 | } | |
114 | ||
115 | id = dax_read_lock(); | |
116 | len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); | |
117 | dax_read_unlock(id); | |
118 | ||
119 | put_dax(dax_dev); | |
120 | ||
121 | if (len < 1) { | |
66a86cc1 | 122 | pr_debug("VFS (%s): error: dax access failed (%ld)\n", |
ef510424 DW |
123 | sb->s_id, len); |
124 | return len < 0 ? len : -EIO; | |
125 | } | |
126 | ||
3fe0791c DW |
127 | if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) { |
128 | /* | |
129 | * An arch that has enabled the pmem api should also | |
130 | * have its drivers support pfn_t_devmap() | |
131 | * | |
132 | * This is a developer warning and should not trigger in | |
133 | * production. dax_flush() will crash since it depends | |
134 | * on being able to do (page_address(pfn_to_page())). | |
135 | */ | |
136 | WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)); | |
137 | } else if (pfn_t_devmap(pfn)) { | |
569d0365 | 138 | /* pass */; |
3fe0791c | 139 | } else { |
569d0365 DW |
140 | pr_debug("VFS (%s): error: dax support not enabled\n", |
141 | sb->s_id); | |
142 | return -EOPNOTSUPP; | |
143 | } | |
144 | ||
ef510424 DW |
145 | return 0; |
146 | } | |
147 | EXPORT_SYMBOL_GPL(__bdev_dax_supported); | |
9d109081 | 148 | #endif |
ef510424 | 149 | |
9a60c3ef DW |
150 | enum dax_device_flags { |
151 | /* !alive + rcu grace period == no new operations / mappings */ | |
152 | DAXDEV_ALIVE, | |
6e0c90d6 DW |
153 | /* gate whether dax_flush() calls the low level flush routine */ |
154 | DAXDEV_WRITE_CACHE, | |
9a60c3ef DW |
155 | }; |
156 | ||
7b6be844 DW |
157 | /** |
158 | * struct dax_device - anchor object for dax services | |
159 | * @inode: core vfs | |
160 | * @cdev: optional character interface for "device dax" | |
72058005 | 161 | * @host: optional name for lookups where the device path is not available |
7b6be844 | 162 | * @private: dax driver private data |
9a60c3ef | 163 | * @flags: state and boolean properties |
7b6be844 DW |
164 | */ |
165 | struct dax_device { | |
72058005 | 166 | struct hlist_node list; |
7b6be844 DW |
167 | struct inode inode; |
168 | struct cdev cdev; | |
72058005 | 169 | const char *host; |
7b6be844 | 170 | void *private; |
9a60c3ef | 171 | unsigned long flags; |
6568b08b | 172 | const struct dax_operations *ops; |
7b6be844 DW |
173 | }; |
174 | ||
6e0c90d6 DW |
175 | static ssize_t write_cache_show(struct device *dev, |
176 | struct device_attribute *attr, char *buf) | |
177 | { | |
178 | struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); | |
179 | ssize_t rc; | |
180 | ||
181 | WARN_ON_ONCE(!dax_dev); | |
182 | if (!dax_dev) | |
183 | return -ENXIO; | |
184 | ||
185 | rc = sprintf(buf, "%d\n", !!test_bit(DAXDEV_WRITE_CACHE, | |
186 | &dax_dev->flags)); | |
187 | put_dax(dax_dev); | |
188 | return rc; | |
189 | } | |
190 | ||
191 | static ssize_t write_cache_store(struct device *dev, | |
192 | struct device_attribute *attr, const char *buf, size_t len) | |
193 | { | |
194 | bool write_cache; | |
195 | int rc = strtobool(buf, &write_cache); | |
196 | struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); | |
197 | ||
198 | WARN_ON_ONCE(!dax_dev); | |
199 | if (!dax_dev) | |
200 | return -ENXIO; | |
201 | ||
202 | if (rc) | |
203 | len = rc; | |
204 | else if (write_cache) | |
205 | set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); | |
206 | else | |
207 | clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); | |
208 | ||
209 | put_dax(dax_dev); | |
210 | return len; | |
211 | } | |
212 | static DEVICE_ATTR_RW(write_cache); | |
213 | ||
214 | static umode_t dax_visible(struct kobject *kobj, struct attribute *a, int n) | |
215 | { | |
216 | struct device *dev = container_of(kobj, typeof(*dev), kobj); | |
217 | struct dax_device *dax_dev = dax_get_by_host(dev_name(dev)); | |
218 | ||
219 | WARN_ON_ONCE(!dax_dev); | |
220 | if (!dax_dev) | |
221 | return 0; | |
222 | ||
c3ca015f MP |
223 | #ifndef CONFIG_ARCH_HAS_PMEM_API |
224 | if (a == &dev_attr_write_cache.attr) | |
6e0c90d6 | 225 | return 0; |
c3ca015f | 226 | #endif |
6e0c90d6 DW |
227 | return a->mode; |
228 | } | |
229 | ||
230 | static struct attribute *dax_attributes[] = { | |
231 | &dev_attr_write_cache.attr, | |
232 | NULL, | |
233 | }; | |
234 | ||
235 | struct attribute_group dax_attribute_group = { | |
236 | .name = "dax", | |
237 | .attrs = dax_attributes, | |
238 | .is_visible = dax_visible, | |
239 | }; | |
240 | EXPORT_SYMBOL_GPL(dax_attribute_group); | |
241 | ||
b0686260 DW |
242 | /** |
243 | * dax_direct_access() - translate a device pgoff to an absolute pfn | |
244 | * @dax_dev: a dax_device instance representing the logical memory range | |
245 | * @pgoff: offset in pages from the start of the device to translate | |
246 | * @nr_pages: number of consecutive pages caller can handle relative to @pfn | |
247 | * @kaddr: output parameter that returns a virtual address mapping of pfn | |
248 | * @pfn: output parameter that returns an absolute pfn translation of @pgoff | |
249 | * | |
250 | * Return: negative errno if an error occurs, otherwise the number of | |
251 | * pages accessible at the device relative @pgoff. | |
252 | */ | |
253 | long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, | |
254 | void **kaddr, pfn_t *pfn) | |
255 | { | |
256 | long avail; | |
257 | ||
b0686260 DW |
258 | if (!dax_dev) |
259 | return -EOPNOTSUPP; | |
260 | ||
261 | if (!dax_alive(dax_dev)) | |
262 | return -ENXIO; | |
263 | ||
264 | if (nr_pages < 0) | |
265 | return nr_pages; | |
266 | ||
267 | avail = dax_dev->ops->direct_access(dax_dev, pgoff, nr_pages, | |
268 | kaddr, pfn); | |
269 | if (!avail) | |
270 | return -ERANGE; | |
271 | return min(avail, nr_pages); | |
272 | } | |
273 | EXPORT_SYMBOL_GPL(dax_direct_access); | |
274 | ||
7e026c8c DW |
275 | size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
276 | size_t bytes, struct iov_iter *i) | |
277 | { | |
278 | if (!dax_alive(dax_dev)) | |
279 | return 0; | |
280 | ||
7e026c8c DW |
281 | return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i); |
282 | } | |
283 | EXPORT_SYMBOL_GPL(dax_copy_from_iter); | |
284 | ||
c3ca015f MP |
285 | #ifdef CONFIG_ARCH_HAS_PMEM_API |
286 | void arch_wb_cache_pmem(void *addr, size_t size); | |
287 | void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) | |
abebfbe2 | 288 | { |
c3ca015f | 289 | if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))) |
6e0c90d6 DW |
290 | return; |
291 | ||
c3ca015f | 292 | arch_wb_cache_pmem(addr, size); |
abebfbe2 | 293 | } |
c3ca015f MP |
294 | #else |
295 | void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) | |
296 | { | |
297 | } | |
298 | #endif | |
abebfbe2 DW |
299 | EXPORT_SYMBOL_GPL(dax_flush); |
300 | ||
6e0c90d6 DW |
301 | void dax_write_cache(struct dax_device *dax_dev, bool wc) |
302 | { | |
303 | if (wc) | |
304 | set_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); | |
305 | else | |
306 | clear_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); | |
307 | } | |
308 | EXPORT_SYMBOL_GPL(dax_write_cache); | |
309 | ||
273752c9 VG |
310 | bool dax_write_cache_enabled(struct dax_device *dax_dev) |
311 | { | |
312 | return test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags); | |
313 | } | |
314 | EXPORT_SYMBOL_GPL(dax_write_cache_enabled); | |
315 | ||
7b6be844 DW |
316 | bool dax_alive(struct dax_device *dax_dev) |
317 | { | |
318 | lockdep_assert_held(&dax_srcu); | |
9a60c3ef | 319 | return test_bit(DAXDEV_ALIVE, &dax_dev->flags); |
7b6be844 DW |
320 | } |
321 | EXPORT_SYMBOL_GPL(dax_alive); | |
322 | ||
72058005 DW |
323 | static int dax_host_hash(const char *host) |
324 | { | |
325 | return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE; | |
326 | } | |
327 | ||
7b6be844 DW |
328 | /* |
329 | * Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring | |
330 | * that any fault handlers or operations that might have seen | |
331 | * dax_alive(), have completed. Any operations that start after | |
332 | * synchronize_srcu() has run will abort upon seeing !dax_alive(). | |
333 | */ | |
334 | void kill_dax(struct dax_device *dax_dev) | |
335 | { | |
336 | if (!dax_dev) | |
337 | return; | |
338 | ||
9a60c3ef | 339 | clear_bit(DAXDEV_ALIVE, &dax_dev->flags); |
72058005 | 340 | |
7b6be844 | 341 | synchronize_srcu(&dax_srcu); |
72058005 DW |
342 | |
343 | spin_lock(&dax_host_lock); | |
344 | hlist_del_init(&dax_dev->list); | |
345 | spin_unlock(&dax_host_lock); | |
346 | ||
7b6be844 DW |
347 | dax_dev->private = NULL; |
348 | } | |
349 | EXPORT_SYMBOL_GPL(kill_dax); | |
350 | ||
351 | static struct inode *dax_alloc_inode(struct super_block *sb) | |
352 | { | |
353 | struct dax_device *dax_dev; | |
b9d39d17 | 354 | struct inode *inode; |
7b6be844 DW |
355 | |
356 | dax_dev = kmem_cache_alloc(dax_cache, GFP_KERNEL); | |
9f586fff MP |
357 | if (!dax_dev) |
358 | return NULL; | |
359 | ||
b9d39d17 DW |
360 | inode = &dax_dev->inode; |
361 | inode->i_rdev = 0; | |
362 | return inode; | |
7b6be844 DW |
363 | } |
364 | ||
365 | static struct dax_device *to_dax_dev(struct inode *inode) | |
366 | { | |
367 | return container_of(inode, struct dax_device, inode); | |
368 | } | |
369 | ||
370 | static void dax_i_callback(struct rcu_head *head) | |
371 | { | |
372 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
373 | struct dax_device *dax_dev = to_dax_dev(inode); | |
374 | ||
72058005 DW |
375 | kfree(dax_dev->host); |
376 | dax_dev->host = NULL; | |
b9d39d17 DW |
377 | if (inode->i_rdev) |
378 | ida_simple_remove(&dax_minor_ida, MINOR(inode->i_rdev)); | |
7b6be844 DW |
379 | kmem_cache_free(dax_cache, dax_dev); |
380 | } | |
381 | ||
382 | static void dax_destroy_inode(struct inode *inode) | |
383 | { | |
384 | struct dax_device *dax_dev = to_dax_dev(inode); | |
385 | ||
9a60c3ef | 386 | WARN_ONCE(test_bit(DAXDEV_ALIVE, &dax_dev->flags), |
7b6be844 DW |
387 | "kill_dax() must be called before final iput()\n"); |
388 | call_rcu(&inode->i_rcu, dax_i_callback); | |
389 | } | |
390 | ||
391 | static const struct super_operations dax_sops = { | |
392 | .statfs = simple_statfs, | |
393 | .alloc_inode = dax_alloc_inode, | |
394 | .destroy_inode = dax_destroy_inode, | |
395 | .drop_inode = generic_delete_inode, | |
396 | }; | |
397 | ||
398 | static struct dentry *dax_mount(struct file_system_type *fs_type, | |
399 | int flags, const char *dev_name, void *data) | |
400 | { | |
401 | return mount_pseudo(fs_type, "dax:", &dax_sops, NULL, DAXFS_MAGIC); | |
402 | } | |
403 | ||
404 | static struct file_system_type dax_fs_type = { | |
405 | .name = "dax", | |
406 | .mount = dax_mount, | |
407 | .kill_sb = kill_anon_super, | |
408 | }; | |
409 | ||
410 | static int dax_test(struct inode *inode, void *data) | |
411 | { | |
412 | dev_t devt = *(dev_t *) data; | |
413 | ||
414 | return inode->i_rdev == devt; | |
415 | } | |
416 | ||
417 | static int dax_set(struct inode *inode, void *data) | |
418 | { | |
419 | dev_t devt = *(dev_t *) data; | |
420 | ||
421 | inode->i_rdev = devt; | |
422 | return 0; | |
423 | } | |
424 | ||
425 | static struct dax_device *dax_dev_get(dev_t devt) | |
426 | { | |
427 | struct dax_device *dax_dev; | |
428 | struct inode *inode; | |
429 | ||
430 | inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31), | |
431 | dax_test, dax_set, &devt); | |
432 | ||
433 | if (!inode) | |
434 | return NULL; | |
435 | ||
436 | dax_dev = to_dax_dev(inode); | |
437 | if (inode->i_state & I_NEW) { | |
9a60c3ef | 438 | set_bit(DAXDEV_ALIVE, &dax_dev->flags); |
7b6be844 DW |
439 | inode->i_cdev = &dax_dev->cdev; |
440 | inode->i_mode = S_IFCHR; | |
441 | inode->i_flags = S_DAX; | |
442 | mapping_set_gfp_mask(&inode->i_data, GFP_USER); | |
443 | unlock_new_inode(inode); | |
444 | } | |
445 | ||
446 | return dax_dev; | |
447 | } | |
448 | ||
72058005 DW |
449 | static void dax_add_host(struct dax_device *dax_dev, const char *host) |
450 | { | |
451 | int hash; | |
452 | ||
453 | /* | |
454 | * Unconditionally init dax_dev since it's coming from a | |
455 | * non-zeroed slab cache | |
456 | */ | |
457 | INIT_HLIST_NODE(&dax_dev->list); | |
458 | dax_dev->host = host; | |
459 | if (!host) | |
460 | return; | |
461 | ||
462 | hash = dax_host_hash(host); | |
463 | spin_lock(&dax_host_lock); | |
464 | hlist_add_head(&dax_dev->list, &dax_host_list[hash]); | |
465 | spin_unlock(&dax_host_lock); | |
466 | } | |
467 | ||
6568b08b DW |
468 | struct dax_device *alloc_dax(void *private, const char *__host, |
469 | const struct dax_operations *ops) | |
7b6be844 DW |
470 | { |
471 | struct dax_device *dax_dev; | |
72058005 | 472 | const char *host; |
7b6be844 DW |
473 | dev_t devt; |
474 | int minor; | |
475 | ||
72058005 DW |
476 | host = kstrdup(__host, GFP_KERNEL); |
477 | if (__host && !host) | |
478 | return NULL; | |
479 | ||
cf1e2289 | 480 | minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL); |
7b6be844 | 481 | if (minor < 0) |
72058005 | 482 | goto err_minor; |
7b6be844 DW |
483 | |
484 | devt = MKDEV(MAJOR(dax_devt), minor); | |
485 | dax_dev = dax_dev_get(devt); | |
486 | if (!dax_dev) | |
72058005 | 487 | goto err_dev; |
7b6be844 | 488 | |
72058005 | 489 | dax_add_host(dax_dev, host); |
6568b08b | 490 | dax_dev->ops = ops; |
7b6be844 DW |
491 | dax_dev->private = private; |
492 | return dax_dev; | |
493 | ||
72058005 | 494 | err_dev: |
7b6be844 | 495 | ida_simple_remove(&dax_minor_ida, minor); |
72058005 DW |
496 | err_minor: |
497 | kfree(host); | |
7b6be844 DW |
498 | return NULL; |
499 | } | |
500 | EXPORT_SYMBOL_GPL(alloc_dax); | |
501 | ||
502 | void put_dax(struct dax_device *dax_dev) | |
503 | { | |
504 | if (!dax_dev) | |
505 | return; | |
506 | iput(&dax_dev->inode); | |
507 | } | |
508 | EXPORT_SYMBOL_GPL(put_dax); | |
509 | ||
72058005 DW |
510 | /** |
511 | * dax_get_by_host() - temporary lookup mechanism for filesystem-dax | |
512 | * @host: alternate name for the device registered by a dax driver | |
513 | */ | |
514 | struct dax_device *dax_get_by_host(const char *host) | |
515 | { | |
516 | struct dax_device *dax_dev, *found = NULL; | |
517 | int hash, id; | |
518 | ||
519 | if (!host) | |
520 | return NULL; | |
521 | ||
522 | hash = dax_host_hash(host); | |
523 | ||
524 | id = dax_read_lock(); | |
525 | spin_lock(&dax_host_lock); | |
526 | hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) { | |
527 | if (!dax_alive(dax_dev) | |
528 | || strcmp(host, dax_dev->host) != 0) | |
529 | continue; | |
530 | ||
531 | if (igrab(&dax_dev->inode)) | |
532 | found = dax_dev; | |
533 | break; | |
534 | } | |
535 | spin_unlock(&dax_host_lock); | |
536 | dax_read_unlock(id); | |
537 | ||
538 | return found; | |
539 | } | |
540 | EXPORT_SYMBOL_GPL(dax_get_by_host); | |
541 | ||
7b6be844 DW |
542 | /** |
543 | * inode_dax: convert a public inode into its dax_dev | |
544 | * @inode: An inode with i_cdev pointing to a dax_dev | |
545 | * | |
546 | * Note this is not equivalent to to_dax_dev() which is for private | |
547 | * internal use where we know the inode filesystem type == dax_fs_type. | |
548 | */ | |
549 | struct dax_device *inode_dax(struct inode *inode) | |
550 | { | |
551 | struct cdev *cdev = inode->i_cdev; | |
552 | ||
553 | return container_of(cdev, struct dax_device, cdev); | |
554 | } | |
555 | EXPORT_SYMBOL_GPL(inode_dax); | |
556 | ||
557 | struct inode *dax_inode(struct dax_device *dax_dev) | |
558 | { | |
559 | return &dax_dev->inode; | |
560 | } | |
561 | EXPORT_SYMBOL_GPL(dax_inode); | |
562 | ||
563 | void *dax_get_private(struct dax_device *dax_dev) | |
564 | { | |
565 | return dax_dev->private; | |
566 | } | |
567 | EXPORT_SYMBOL_GPL(dax_get_private); | |
568 | ||
569 | static void init_once(void *_dax_dev) | |
570 | { | |
571 | struct dax_device *dax_dev = _dax_dev; | |
572 | struct inode *inode = &dax_dev->inode; | |
573 | ||
b9d39d17 | 574 | memset(dax_dev, 0, sizeof(*dax_dev)); |
7b6be844 DW |
575 | inode_init_once(inode); |
576 | } | |
577 | ||
578 | static int __dax_fs_init(void) | |
579 | { | |
580 | int rc; | |
581 | ||
582 | dax_cache = kmem_cache_create("dax_cache", sizeof(struct dax_device), 0, | |
583 | (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| | |
584 | SLAB_MEM_SPREAD|SLAB_ACCOUNT), | |
585 | init_once); | |
586 | if (!dax_cache) | |
587 | return -ENOMEM; | |
588 | ||
589 | rc = register_filesystem(&dax_fs_type); | |
590 | if (rc) | |
591 | goto err_register_fs; | |
592 | ||
593 | dax_mnt = kern_mount(&dax_fs_type); | |
594 | if (IS_ERR(dax_mnt)) { | |
595 | rc = PTR_ERR(dax_mnt); | |
596 | goto err_mount; | |
597 | } | |
598 | dax_superblock = dax_mnt->mnt_sb; | |
599 | ||
600 | return 0; | |
601 | ||
602 | err_mount: | |
603 | unregister_filesystem(&dax_fs_type); | |
604 | err_register_fs: | |
605 | kmem_cache_destroy(dax_cache); | |
606 | ||
607 | return rc; | |
608 | } | |
609 | ||
610 | static void __dax_fs_exit(void) | |
611 | { | |
612 | kern_unmount(dax_mnt); | |
613 | unregister_filesystem(&dax_fs_type); | |
614 | kmem_cache_destroy(dax_cache); | |
615 | } | |
616 | ||
617 | static int __init dax_fs_init(void) | |
618 | { | |
619 | int rc; | |
620 | ||
621 | rc = __dax_fs_init(); | |
622 | if (rc) | |
623 | return rc; | |
624 | ||
cf1e2289 | 625 | rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); |
7b6be844 DW |
626 | if (rc) |
627 | __dax_fs_exit(); | |
628 | return rc; | |
629 | } | |
630 | ||
631 | static void __exit dax_fs_exit(void) | |
632 | { | |
cf1e2289 | 633 | unregister_chrdev_region(dax_devt, MINORMASK+1); |
7b6be844 DW |
634 | ida_destroy(&dax_minor_ida); |
635 | __dax_fs_exit(); | |
636 | } | |
637 | ||
638 | MODULE_AUTHOR("Intel Corporation"); | |
639 | MODULE_LICENSE("GPL v2"); | |
640 | subsys_initcall(dax_fs_init); | |
641 | module_exit(dax_fs_exit); |