Commit | Line | Data |
---|---|---|
9e853f23 RZ |
1 | /* |
2 | * Persistent Memory Driver | |
3 | * | |
9f53f9fa | 4 | * Copyright (c) 2014-2015, Intel Corporation. |
9e853f23 RZ |
5 | * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>. |
6 | * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms and conditions of the GNU General Public License, | |
10 | * version 2, as published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | */ | |
17 | ||
18 | #include <asm/cacheflush.h> | |
19 | #include <linux/blkdev.h> | |
20 | #include <linux/hdreg.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/platform_device.h> | |
c953cc98 | 23 | #include <linux/set_memory.h> |
9e853f23 RZ |
24 | #include <linux/module.h> |
25 | #include <linux/moduleparam.h> | |
b95f5f43 | 26 | #include <linux/badblocks.h> |
9476df7d | 27 | #include <linux/memremap.h> |
32ab0a3f | 28 | #include <linux/vmalloc.h> |
71389703 | 29 | #include <linux/blk-mq.h> |
34c0fd54 | 30 | #include <linux/pfn_t.h> |
9e853f23 | 31 | #include <linux/slab.h> |
0aed55af | 32 | #include <linux/uio.h> |
c1d6e828 | 33 | #include <linux/dax.h> |
9f53f9fa | 34 | #include <linux/nd.h> |
23c47d2a | 35 | #include <linux/backing-dev.h> |
f295e53b | 36 | #include "pmem.h" |
32ab0a3f | 37 | #include "pfn.h" |
9f53f9fa | 38 | #include "nd.h" |
06e8ccda | 39 | #include "nd-core.h" |
9e853f23 | 40 | |
f284a4f2 DW |
41 | static struct device *to_dev(struct pmem_device *pmem) |
42 | { | |
43 | /* | |
44 | * nvdimm bus services need a 'dev' parameter, and we record the device | |
45 | * at init in bb.dev. | |
46 | */ | |
47 | return pmem->bb.dev; | |
48 | } | |
49 | ||
50 | static struct nd_region *to_region(struct pmem_device *pmem) | |
51 | { | |
52 | return to_nd_region(to_dev(pmem)->parent); | |
53 | } | |
9e853f23 | 54 | |
c953cc98 DW |
55 | static void hwpoison_clear(struct pmem_device *pmem, |
56 | phys_addr_t phys, unsigned int len) | |
57 | { | |
58 | unsigned long pfn_start, pfn_end, pfn; | |
59 | ||
60 | /* only pmem in the linear map supports HWPoison */ | |
61 | if (is_vmalloc_addr(pmem->virt_addr)) | |
62 | return; | |
63 | ||
64 | pfn_start = PHYS_PFN(phys); | |
65 | pfn_end = pfn_start + PHYS_PFN(len); | |
66 | for (pfn = pfn_start; pfn < pfn_end; pfn++) { | |
67 | struct page *page = pfn_to_page(pfn); | |
68 | ||
69 | /* | |
70 | * Note, no need to hold a get_dev_pagemap() reference | |
71 | * here since we're in the driver I/O path and | |
72 | * outstanding I/O requests pin the dev_pagemap. | |
73 | */ | |
74 | if (test_and_clear_pmem_poison(page)) | |
75 | clear_mce_nospec(pfn); | |
76 | } | |
77 | } | |
78 | ||
4e4cbee9 CH |
79 | static blk_status_t pmem_clear_poison(struct pmem_device *pmem, |
80 | phys_addr_t offset, unsigned int len) | |
59e64739 | 81 | { |
f284a4f2 | 82 | struct device *dev = to_dev(pmem); |
59e64739 DW |
83 | sector_t sector; |
84 | long cleared; | |
4e4cbee9 | 85 | blk_status_t rc = BLK_STS_OK; |
59e64739 DW |
86 | |
87 | sector = (offset - pmem->data_offset) / 512; | |
59e64739 | 88 | |
868f036f DW |
89 | cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); |
90 | if (cleared < len) | |
4e4cbee9 | 91 | rc = BLK_STS_IOERR; |
59e64739 | 92 | if (cleared > 0 && cleared / 512) { |
c953cc98 | 93 | hwpoison_clear(pmem, pmem->phys_addr + offset, cleared); |
868f036f | 94 | cleared /= 512; |
426824d6 | 95 | dev_dbg(dev, "%#llx clear %ld sector%s\n", |
868f036f DW |
96 | (unsigned long long) sector, cleared, |
97 | cleared > 1 ? "s" : ""); | |
0a3f27b9 | 98 | badblocks_clear(&pmem->bb, sector, cleared); |
975750a9 TK |
99 | if (pmem->bb_state) |
100 | sysfs_notify_dirent(pmem->bb_state); | |
59e64739 | 101 | } |
3115bb02 | 102 | |
f2b61257 | 103 | arch_invalidate_pmem(pmem->virt_addr + offset, len); |
868f036f DW |
104 | |
105 | return rc; | |
59e64739 DW |
106 | } |
107 | ||
bd697a80 VV |
108 | static void write_pmem(void *pmem_addr, struct page *page, |
109 | unsigned int off, unsigned int len) | |
110 | { | |
98cc093c HY |
111 | unsigned int chunk; |
112 | void *mem; | |
113 | ||
114 | while (len) { | |
115 | mem = kmap_atomic(page); | |
9dc6488e | 116 | chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
98cc093c HY |
117 | memcpy_flushcache(pmem_addr, mem + off, chunk); |
118 | kunmap_atomic(mem); | |
119 | len -= chunk; | |
120 | off = 0; | |
121 | page++; | |
9dc6488e | 122 | pmem_addr += chunk; |
98cc093c | 123 | } |
bd697a80 VV |
124 | } |
125 | ||
4e4cbee9 | 126 | static blk_status_t read_pmem(struct page *page, unsigned int off, |
bd697a80 VV |
127 | void *pmem_addr, unsigned int len) |
128 | { | |
98cc093c | 129 | unsigned int chunk; |
60622d68 | 130 | unsigned long rem; |
98cc093c HY |
131 | void *mem; |
132 | ||
133 | while (len) { | |
134 | mem = kmap_atomic(page); | |
9dc6488e | 135 | chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
60622d68 | 136 | rem = memcpy_mcsafe(mem + off, pmem_addr, chunk); |
98cc093c | 137 | kunmap_atomic(mem); |
60622d68 | 138 | if (rem) |
98cc093c HY |
139 | return BLK_STS_IOERR; |
140 | len -= chunk; | |
141 | off = 0; | |
142 | page++; | |
9dc6488e | 143 | pmem_addr += chunk; |
98cc093c | 144 | } |
4e4cbee9 | 145 | return BLK_STS_OK; |
bd697a80 VV |
146 | } |
147 | ||
4e4cbee9 | 148 | static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page, |
3f289dcb | 149 | unsigned int len, unsigned int off, unsigned int op, |
9e853f23 RZ |
150 | sector_t sector) |
151 | { | |
4e4cbee9 | 152 | blk_status_t rc = BLK_STS_OK; |
59e64739 | 153 | bool bad_pmem = false; |
32ab0a3f | 154 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
7a9eb206 | 155 | void *pmem_addr = pmem->virt_addr + pmem_off; |
9e853f23 | 156 | |
59e64739 DW |
157 | if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) |
158 | bad_pmem = true; | |
159 | ||
3f289dcb | 160 | if (!op_is_write(op)) { |
59e64739 | 161 | if (unlikely(bad_pmem)) |
4e4cbee9 | 162 | rc = BLK_STS_IOERR; |
b5ebc8ec | 163 | else { |
bd697a80 | 164 | rc = read_pmem(page, off, pmem_addr, len); |
b5ebc8ec DW |
165 | flush_dcache_page(page); |
166 | } | |
9e853f23 | 167 | } else { |
0a370d26 DW |
168 | /* |
169 | * Note that we write the data both before and after | |
170 | * clearing poison. The write before clear poison | |
171 | * handles situations where the latest written data is | |
172 | * preserved and the clear poison operation simply marks | |
173 | * the address range as valid without changing the data. | |
174 | * In this case application software can assume that an | |
175 | * interrupted write will either return the new good | |
176 | * data or an error. | |
177 | * | |
178 | * However, if pmem_clear_poison() leaves the data in an | |
179 | * indeterminate state we need to perform the write | |
180 | * after clear poison. | |
181 | */ | |
9e853f23 | 182 | flush_dcache_page(page); |
bd697a80 | 183 | write_pmem(pmem_addr, page, off, len); |
59e64739 | 184 | if (unlikely(bad_pmem)) { |
3115bb02 | 185 | rc = pmem_clear_poison(pmem, pmem_off, len); |
bd697a80 | 186 | write_pmem(pmem_addr, page, off, len); |
59e64739 | 187 | } |
9e853f23 RZ |
188 | } |
189 | ||
b5ebc8ec | 190 | return rc; |
9e853f23 RZ |
191 | } |
192 | ||
dece1635 | 193 | static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) |
9e853f23 | 194 | { |
4e4cbee9 | 195 | blk_status_t rc = 0; |
f0dc089c DW |
196 | bool do_acct; |
197 | unsigned long start; | |
9e853f23 | 198 | struct bio_vec bvec; |
9e853f23 | 199 | struct bvec_iter iter; |
bd842b8c | 200 | struct pmem_device *pmem = q->queuedata; |
7e267a8c DW |
201 | struct nd_region *nd_region = to_region(pmem); |
202 | ||
d2d6364d | 203 | if (bio->bi_opf & REQ_PREFLUSH) |
7e267a8c | 204 | nvdimm_flush(nd_region); |
9e853f23 | 205 | |
f0dc089c | 206 | do_acct = nd_iostat_start(bio, &start); |
e10624f8 DW |
207 | bio_for_each_segment(bvec, bio, iter) { |
208 | rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, | |
3f289dcb | 209 | bvec.bv_offset, bio_op(bio), iter.bi_sector); |
e10624f8 | 210 | if (rc) { |
4e4cbee9 | 211 | bio->bi_status = rc; |
e10624f8 DW |
212 | break; |
213 | } | |
214 | } | |
f0dc089c DW |
215 | if (do_acct) |
216 | nd_iostat_end(bio, start); | |
61031952 | 217 | |
1eff9d32 | 218 | if (bio->bi_opf & REQ_FUA) |
7e267a8c | 219 | nvdimm_flush(nd_region); |
61031952 | 220 | |
4246a0b6 | 221 | bio_endio(bio); |
dece1635 | 222 | return BLK_QC_T_NONE; |
9e853f23 RZ |
223 | } |
224 | ||
225 | static int pmem_rw_page(struct block_device *bdev, sector_t sector, | |
3f289dcb | 226 | struct page *page, unsigned int op) |
9e853f23 | 227 | { |
bd842b8c | 228 | struct pmem_device *pmem = bdev->bd_queue->queuedata; |
4e4cbee9 | 229 | blk_status_t rc; |
9e853f23 | 230 | |
98cc093c | 231 | rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE, |
3f289dcb | 232 | 0, op, sector); |
9e853f23 | 233 | |
e10624f8 DW |
234 | /* |
235 | * The ->rw_page interface is subtle and tricky. The core | |
236 | * retries on any error, so we can only invoke page_endio() in | |
237 | * the successful completion case. Otherwise, we'll see crashes | |
238 | * caused by double completion. | |
239 | */ | |
240 | if (rc == 0) | |
3f289dcb | 241 | page_endio(page, op_is_write(op), 0); |
e10624f8 | 242 | |
4e4cbee9 | 243 | return blk_status_to_errno(rc); |
9e853f23 RZ |
244 | } |
245 | ||
f295e53b | 246 | /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ |
c1d6e828 DW |
247 | __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, |
248 | long nr_pages, void **kaddr, pfn_t *pfn) | |
9e853f23 | 249 | { |
c1d6e828 | 250 | resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; |
589e75d1 | 251 | |
c1d6e828 DW |
252 | if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, |
253 | PFN_PHYS(nr_pages)))) | |
0a70bd43 | 254 | return -EIO; |
46a590cd HY |
255 | |
256 | if (kaddr) | |
257 | *kaddr = pmem->virt_addr + offset; | |
258 | if (pfn) | |
259 | *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); | |
9e853f23 | 260 | |
0a70bd43 DW |
261 | /* |
262 | * If badblocks are present, limit known good range to the | |
263 | * requested range. | |
264 | */ | |
265 | if (unlikely(pmem->bb.count)) | |
c1d6e828 DW |
266 | return nr_pages; |
267 | return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); | |
9e853f23 RZ |
268 | } |
269 | ||
270 | static const struct block_device_operations pmem_fops = { | |
271 | .owner = THIS_MODULE, | |
272 | .rw_page = pmem_rw_page, | |
58138820 | 273 | .revalidate_disk = nvdimm_revalidate_disk, |
9e853f23 RZ |
274 | }; |
275 | ||
c1d6e828 DW |
276 | static long pmem_dax_direct_access(struct dax_device *dax_dev, |
277 | pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) | |
278 | { | |
279 | struct pmem_device *pmem = dax_get_private(dax_dev); | |
280 | ||
281 | return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); | |
282 | } | |
283 | ||
0aed55af DW |
284 | static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, |
285 | void *addr, size_t bytes, struct iov_iter *i) | |
286 | { | |
287 | return copy_from_iter_flushcache(addr, bytes, i); | |
288 | } | |
289 | ||
b3a9a0c3 DW |
290 | static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, |
291 | void *addr, size_t bytes, struct iov_iter *i) | |
292 | { | |
6dfdb2b6 | 293 | return copy_to_iter_mcsafe(addr, bytes, i); |
b3a9a0c3 DW |
294 | } |
295 | ||
c1d6e828 DW |
296 | static const struct dax_operations pmem_dax_ops = { |
297 | .direct_access = pmem_dax_direct_access, | |
0aed55af | 298 | .copy_from_iter = pmem_copy_from_iter, |
b3a9a0c3 | 299 | .copy_to_iter = pmem_copy_to_iter, |
c1d6e828 DW |
300 | }; |
301 | ||
6e0c90d6 DW |
302 | static const struct attribute_group *pmem_attribute_groups[] = { |
303 | &dax_attribute_group, | |
304 | NULL, | |
c1d6e828 DW |
305 | }; |
306 | ||
030b99e3 DW |
307 | static void pmem_release_queue(void *q) |
308 | { | |
309 | blk_cleanup_queue(q); | |
310 | } | |
311 | ||
a95c90f1 | 312 | static void pmem_freeze_queue(struct percpu_ref *ref) |
71389703 | 313 | { |
a95c90f1 DW |
314 | struct request_queue *q; |
315 | ||
316 | q = container_of(ref, typeof(*q), q_usage_counter); | |
d3b5d352 | 317 | blk_freeze_queue_start(q); |
71389703 DW |
318 | } |
319 | ||
c1d6e828 | 320 | static void pmem_release_disk(void *__pmem) |
030b99e3 | 321 | { |
c1d6e828 DW |
322 | struct pmem_device *pmem = __pmem; |
323 | ||
324 | kill_dax(pmem->dax_dev); | |
325 | put_dax(pmem->dax_dev); | |
326 | del_gendisk(pmem->disk); | |
327 | put_disk(pmem->disk); | |
030b99e3 DW |
328 | } |
329 | ||
e7638488 DW |
330 | static void pmem_release_pgmap_ops(void *__pgmap) |
331 | { | |
332 | dev_pagemap_put_ops(); | |
333 | } | |
334 | ||
335 | static void fsdax_pagefree(struct page *page, void *data) | |
336 | { | |
337 | wake_up_var(&page->_refcount); | |
338 | } | |
339 | ||
340 | static int setup_pagemap_fsdax(struct device *dev, struct dev_pagemap *pgmap) | |
341 | { | |
342 | dev_pagemap_get_ops(); | |
343 | if (devm_add_action_or_reset(dev, pmem_release_pgmap_ops, pgmap)) | |
344 | return -ENOMEM; | |
345 | pgmap->type = MEMORY_DEVICE_FS_DAX; | |
346 | pgmap->page_free = fsdax_pagefree; | |
347 | ||
348 | return 0; | |
349 | } | |
350 | ||
200c79da DW |
351 | static int pmem_attach_disk(struct device *dev, |
352 | struct nd_namespace_common *ndns) | |
9e853f23 | 353 | { |
200c79da | 354 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
f284a4f2 | 355 | struct nd_region *nd_region = to_nd_region(dev->parent); |
ce7f11a2 | 356 | int nid = dev_to_node(dev), fua; |
200c79da | 357 | struct resource *res = &nsio->res; |
e8d51348 | 358 | struct resource bb_res; |
200c79da | 359 | struct nd_pfn *nd_pfn = NULL; |
c1d6e828 | 360 | struct dax_device *dax_dev; |
200c79da | 361 | struct nd_pfn_sb *pfn_sb; |
9e853f23 | 362 | struct pmem_device *pmem; |
468ded03 | 363 | struct request_queue *q; |
6e0c90d6 | 364 | struct device *gendev; |
200c79da DW |
365 | struct gendisk *disk; |
366 | void *addr; | |
e8d51348 CH |
367 | int rc; |
368 | ||
369 | pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); | |
370 | if (!pmem) | |
371 | return -ENOMEM; | |
200c79da DW |
372 | |
373 | /* while nsio_rw_bytes is active, parse a pfn info block if present */ | |
374 | if (is_nd_pfn(dev)) { | |
375 | nd_pfn = to_nd_pfn(dev); | |
e8d51348 CH |
376 | rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap); |
377 | if (rc) | |
378 | return rc; | |
200c79da DW |
379 | } |
380 | ||
381 | /* we're attaching a block device, disable raw namespace access */ | |
382 | devm_nsio_disable(dev, nsio); | |
9e853f23 | 383 | |
200c79da | 384 | dev_set_drvdata(dev, pmem); |
9e853f23 RZ |
385 | pmem->phys_addr = res->start; |
386 | pmem->size = resource_size(res); | |
0b277961 DW |
387 | fua = nvdimm_has_flush(nd_region); |
388 | if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) { | |
61031952 | 389 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
0b277961 DW |
390 | fua = 0; |
391 | } | |
9e853f23 | 392 | |
947df02d | 393 | if (!devm_request_mem_region(dev, res->start, resource_size(res), |
450c6633 | 394 | dev_name(&ndns->dev))) { |
947df02d | 395 | dev_warn(dev, "could not reserve region %pR\n", res); |
200c79da | 396 | return -EBUSY; |
9e853f23 RZ |
397 | } |
398 | ||
6d469642 | 399 | q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); |
468ded03 | 400 | if (!q) |
200c79da | 401 | return -ENOMEM; |
468ded03 | 402 | |
71389703 DW |
403 | if (devm_add_action_or_reset(dev, pmem_release_queue, q)) |
404 | return -ENOMEM; | |
405 | ||
34c0fd54 | 406 | pmem->pfn_flags = PFN_DEV; |
e8d51348 | 407 | pmem->pgmap.ref = &q->q_usage_counter; |
a95c90f1 | 408 | pmem->pgmap.kill = pmem_freeze_queue; |
200c79da | 409 | if (is_nd_pfn(dev)) { |
e7638488 DW |
410 | if (setup_pagemap_fsdax(dev, &pmem->pgmap)) |
411 | return -ENOMEM; | |
e8d51348 | 412 | addr = devm_memremap_pages(dev, &pmem->pgmap); |
200c79da DW |
413 | pfn_sb = nd_pfn->pfn_sb; |
414 | pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); | |
e8d51348 CH |
415 | pmem->pfn_pad = resource_size(res) - |
416 | resource_size(&pmem->pgmap.res); | |
200c79da | 417 | pmem->pfn_flags |= PFN_MAP; |
e8d51348 CH |
418 | memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); |
419 | bb_res.start += pmem->data_offset; | |
200c79da | 420 | } else if (pmem_should_map_pages(dev)) { |
e8d51348 CH |
421 | memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res)); |
422 | pmem->pgmap.altmap_valid = false; | |
e7638488 DW |
423 | if (setup_pagemap_fsdax(dev, &pmem->pgmap)) |
424 | return -ENOMEM; | |
e8d51348 | 425 | addr = devm_memremap_pages(dev, &pmem->pgmap); |
34c0fd54 | 426 | pmem->pfn_flags |= PFN_MAP; |
e8d51348 | 427 | memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res)); |
91ed7ac4 | 428 | } else { |
200c79da DW |
429 | addr = devm_memremap(dev, pmem->phys_addr, |
430 | pmem->size, ARCH_MEMREMAP_PMEM); | |
91ed7ac4 DW |
431 | memcpy(&bb_res, &nsio->res, sizeof(bb_res)); |
432 | } | |
b36f4761 | 433 | |
200c79da DW |
434 | if (IS_ERR(addr)) |
435 | return PTR_ERR(addr); | |
7a9eb206 | 436 | pmem->virt_addr = addr; |
9e853f23 | 437 | |
ce7f11a2 | 438 | blk_queue_write_cache(q, true, fua); |
5a92289f DW |
439 | blk_queue_make_request(q, pmem_make_request); |
440 | blk_queue_physical_block_size(q, PAGE_SIZE); | |
f979b13c | 441 | blk_queue_logical_block_size(q, pmem_sector_size(ndns)); |
5a92289f | 442 | blk_queue_max_hw_sectors(q, UINT_MAX); |
8b904b5b | 443 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
4557641b RZ |
444 | if (pmem->pfn_flags & PFN_MAP) |
445 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); | |
5a92289f | 446 | q->queuedata = pmem; |
9e853f23 | 447 | |
538ea4aa | 448 | disk = alloc_disk_node(0, nid); |
030b99e3 DW |
449 | if (!disk) |
450 | return -ENOMEM; | |
c1d6e828 | 451 | pmem->disk = disk; |
9e853f23 | 452 | |
9e853f23 | 453 | disk->fops = &pmem_fops; |
5a92289f | 454 | disk->queue = q; |
9e853f23 | 455 | disk->flags = GENHD_FL_EXT_DEVT; |
23c47d2a | 456 | disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; |
5212e11f | 457 | nvdimm_namespace_disk_name(ndns, disk->disk_name); |
cfe30b87 DW |
458 | set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) |
459 | / 512); | |
b95f5f43 DW |
460 | if (devm_init_badblocks(dev, &pmem->bb)) |
461 | return -ENOMEM; | |
e8d51348 | 462 | nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res); |
57f7f317 | 463 | disk->bb = &pmem->bb; |
f02716db | 464 | |
c1d6e828 DW |
465 | dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops); |
466 | if (!dax_dev) { | |
467 | put_disk(disk); | |
468 | return -ENOMEM; | |
469 | } | |
ce7f11a2 | 470 | dax_write_cache(dax_dev, nvdimm_has_cache(nd_region)); |
c1d6e828 DW |
471 | pmem->dax_dev = dax_dev; |
472 | ||
6e0c90d6 DW |
473 | gendev = disk_to_dev(disk); |
474 | gendev->groups = pmem_attribute_groups; | |
475 | ||
fef912bf | 476 | device_add_disk(dev, disk, NULL); |
c1d6e828 | 477 | if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) |
f02716db DW |
478 | return -ENOMEM; |
479 | ||
58138820 | 480 | revalidate_disk(disk); |
9e853f23 | 481 | |
975750a9 TK |
482 | pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd, |
483 | "badblocks"); | |
6aa734a2 DW |
484 | if (!pmem->bb_state) |
485 | dev_warn(dev, "'badblocks' notification disabled\n"); | |
975750a9 | 486 | |
8c2f7e86 DW |
487 | return 0; |
488 | } | |
9e853f23 | 489 | |
9f53f9fa | 490 | static int nd_pmem_probe(struct device *dev) |
9e853f23 | 491 | { |
8c2f7e86 | 492 | struct nd_namespace_common *ndns; |
9e853f23 | 493 | |
8c2f7e86 DW |
494 | ndns = nvdimm_namespace_common_probe(dev); |
495 | if (IS_ERR(ndns)) | |
496 | return PTR_ERR(ndns); | |
bf9bccc1 | 497 | |
200c79da DW |
498 | if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev))) |
499 | return -ENXIO; | |
708ab62b | 500 | |
200c79da | 501 | if (is_nd_btt(dev)) |
708ab62b CH |
502 | return nvdimm_namespace_attach_btt(ndns); |
503 | ||
32ab0a3f | 504 | if (is_nd_pfn(dev)) |
200c79da | 505 | return pmem_attach_disk(dev, ndns); |
32ab0a3f | 506 | |
200c79da | 507 | /* if we find a valid info-block we'll come back as that personality */ |
c5ed9268 DW |
508 | if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0 |
509 | || nd_dax_probe(dev, ndns) == 0) | |
32ab0a3f | 510 | return -ENXIO; |
32ab0a3f | 511 | |
200c79da DW |
512 | /* ...otherwise we're just a raw pmem device */ |
513 | return pmem_attach_disk(dev, ndns); | |
9e853f23 RZ |
514 | } |
515 | ||
9f53f9fa | 516 | static int nd_pmem_remove(struct device *dev) |
9e853f23 | 517 | { |
6aa734a2 DW |
518 | struct pmem_device *pmem = dev_get_drvdata(dev); |
519 | ||
8c2f7e86 | 520 | if (is_nd_btt(dev)) |
298f2bc5 | 521 | nvdimm_namespace_detach_btt(to_nd_btt(dev)); |
6aa734a2 DW |
522 | else { |
523 | /* | |
524 | * Note, this assumes device_lock() context to not race | |
525 | * nd_pmem_notify() | |
526 | */ | |
527 | sysfs_put(pmem->bb_state); | |
528 | pmem->bb_state = NULL; | |
529 | } | |
476f848a DW |
530 | nvdimm_flush(to_nd_region(dev->parent)); |
531 | ||
9e853f23 RZ |
532 | return 0; |
533 | } | |
534 | ||
476f848a DW |
535 | static void nd_pmem_shutdown(struct device *dev) |
536 | { | |
537 | nvdimm_flush(to_nd_region(dev->parent)); | |
538 | } | |
539 | ||
71999466 DW |
540 | static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) |
541 | { | |
b2518c78 | 542 | struct nd_region *nd_region; |
298f2bc5 DW |
543 | resource_size_t offset = 0, end_trunc = 0; |
544 | struct nd_namespace_common *ndns; | |
545 | struct nd_namespace_io *nsio; | |
546 | struct resource res; | |
b2518c78 | 547 | struct badblocks *bb; |
975750a9 | 548 | struct kernfs_node *bb_state; |
71999466 DW |
549 | |
550 | if (event != NVDIMM_REVALIDATE_POISON) | |
551 | return; | |
552 | ||
298f2bc5 DW |
553 | if (is_nd_btt(dev)) { |
554 | struct nd_btt *nd_btt = to_nd_btt(dev); | |
555 | ||
556 | ndns = nd_btt->ndns; | |
b2518c78 TK |
557 | nd_region = to_nd_region(ndns->dev.parent); |
558 | nsio = to_nd_namespace_io(&ndns->dev); | |
559 | bb = &nsio->bb; | |
975750a9 | 560 | bb_state = NULL; |
b2518c78 TK |
561 | } else { |
562 | struct pmem_device *pmem = dev_get_drvdata(dev); | |
a3901802 | 563 | |
b2518c78 TK |
564 | nd_region = to_region(pmem); |
565 | bb = &pmem->bb; | |
975750a9 | 566 | bb_state = pmem->bb_state; |
b2518c78 TK |
567 | |
568 | if (is_nd_pfn(dev)) { | |
569 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | |
570 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; | |
571 | ||
572 | ndns = nd_pfn->ndns; | |
573 | offset = pmem->data_offset + | |
574 | __le32_to_cpu(pfn_sb->start_pad); | |
575 | end_trunc = __le32_to_cpu(pfn_sb->end_trunc); | |
576 | } else { | |
577 | ndns = to_ndns(dev); | |
578 | } | |
579 | ||
580 | nsio = to_nd_namespace_io(&ndns->dev); | |
581 | } | |
a3901802 | 582 | |
298f2bc5 DW |
583 | res.start = nsio->res.start + offset; |
584 | res.end = nsio->res.end - end_trunc; | |
b2518c78 | 585 | nvdimm_badblocks_populate(nd_region, bb, &res); |
975750a9 TK |
586 | if (bb_state) |
587 | sysfs_notify_dirent(bb_state); | |
71999466 DW |
588 | } |
589 | ||
9f53f9fa DW |
590 | MODULE_ALIAS("pmem"); |
591 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); | |
bf9bccc1 | 592 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); |
9f53f9fa DW |
593 | static struct nd_device_driver nd_pmem_driver = { |
594 | .probe = nd_pmem_probe, | |
595 | .remove = nd_pmem_remove, | |
71999466 | 596 | .notify = nd_pmem_notify, |
476f848a | 597 | .shutdown = nd_pmem_shutdown, |
9f53f9fa DW |
598 | .drv = { |
599 | .name = "nd_pmem", | |
9e853f23 | 600 | }, |
bf9bccc1 | 601 | .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, |
9e853f23 RZ |
602 | }; |
603 | ||
03e90843 | 604 | module_nd_driver(nd_pmem_driver); |
9e853f23 RZ |
605 | |
606 | MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); | |
607 | MODULE_LICENSE("GPL v2"); |