Commit | Line | Data |
---|---|---|
2025cf9e | 1 | // SPDX-License-Identifier: GPL-2.0-only |
9e853f23 RZ |
2 | /* |
3 | * Persistent Memory Driver | |
4 | * | |
9f53f9fa | 5 | * Copyright (c) 2014-2015, Intel Corporation. |
9e853f23 RZ |
6 | * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>. |
7 | * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>. | |
9e853f23 RZ |
8 | */ |
9 | ||
9e853f23 | 10 | #include <linux/blkdev.h> |
4ee60ec1 | 11 | #include <linux/pagemap.h> |
9e853f23 RZ |
12 | #include <linux/hdreg.h> |
13 | #include <linux/init.h> | |
14 | #include <linux/platform_device.h> | |
c953cc98 | 15 | #include <linux/set_memory.h> |
9e853f23 RZ |
16 | #include <linux/module.h> |
17 | #include <linux/moduleparam.h> | |
b95f5f43 | 18 | #include <linux/badblocks.h> |
9476df7d | 19 | #include <linux/memremap.h> |
32ab0a3f | 20 | #include <linux/vmalloc.h> |
71389703 | 21 | #include <linux/blk-mq.h> |
34c0fd54 | 22 | #include <linux/pfn_t.h> |
9e853f23 | 23 | #include <linux/slab.h> |
0aed55af | 24 | #include <linux/uio.h> |
c1d6e828 | 25 | #include <linux/dax.h> |
9f53f9fa | 26 | #include <linux/nd.h> |
e0cf615d CH |
27 | #include <linux/mm.h> |
28 | #include <asm/cacheflush.h> | |
f295e53b | 29 | #include "pmem.h" |
2361db89 | 30 | #include "btt.h" |
32ab0a3f | 31 | #include "pfn.h" |
9f53f9fa | 32 | #include "nd.h" |
9e853f23 | 33 | |
f284a4f2 DW |
34 | static struct device *to_dev(struct pmem_device *pmem) |
35 | { | |
36 | /* | |
37 | * nvdimm bus services need a 'dev' parameter, and we record the device | |
38 | * at init in bb.dev. | |
39 | */ | |
40 | return pmem->bb.dev; | |
41 | } | |
42 | ||
43 | static struct nd_region *to_region(struct pmem_device *pmem) | |
44 | { | |
45 | return to_nd_region(to_dev(pmem)->parent); | |
46 | } | |
9e853f23 | 47 | |
c953cc98 DW |
48 | static void hwpoison_clear(struct pmem_device *pmem, |
49 | phys_addr_t phys, unsigned int len) | |
50 | { | |
51 | unsigned long pfn_start, pfn_end, pfn; | |
52 | ||
53 | /* only pmem in the linear map supports HWPoison */ | |
54 | if (is_vmalloc_addr(pmem->virt_addr)) | |
55 | return; | |
56 | ||
57 | pfn_start = PHYS_PFN(phys); | |
58 | pfn_end = pfn_start + PHYS_PFN(len); | |
59 | for (pfn = pfn_start; pfn < pfn_end; pfn++) { | |
60 | struct page *page = pfn_to_page(pfn); | |
61 | ||
62 | /* | |
63 | * Note, no need to hold a get_dev_pagemap() reference | |
64 | * here since we're in the driver I/O path and | |
65 | * outstanding I/O requests pin the dev_pagemap. | |
66 | */ | |
67 | if (test_and_clear_pmem_poison(page)) | |
68 | clear_mce_nospec(pfn); | |
69 | } | |
70 | } | |
71 | ||
4e4cbee9 CH |
72 | static blk_status_t pmem_clear_poison(struct pmem_device *pmem, |
73 | phys_addr_t offset, unsigned int len) | |
59e64739 | 74 | { |
f284a4f2 | 75 | struct device *dev = to_dev(pmem); |
59e64739 DW |
76 | sector_t sector; |
77 | long cleared; | |
4e4cbee9 | 78 | blk_status_t rc = BLK_STS_OK; |
59e64739 DW |
79 | |
80 | sector = (offset - pmem->data_offset) / 512; | |
59e64739 | 81 | |
868f036f DW |
82 | cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len); |
83 | if (cleared < len) | |
4e4cbee9 | 84 | rc = BLK_STS_IOERR; |
59e64739 | 85 | if (cleared > 0 && cleared / 512) { |
c953cc98 | 86 | hwpoison_clear(pmem, pmem->phys_addr + offset, cleared); |
868f036f | 87 | cleared /= 512; |
426824d6 | 88 | dev_dbg(dev, "%#llx clear %ld sector%s\n", |
868f036f DW |
89 | (unsigned long long) sector, cleared, |
90 | cleared > 1 ? "s" : ""); | |
0a3f27b9 | 91 | badblocks_clear(&pmem->bb, sector, cleared); |
975750a9 TK |
92 | if (pmem->bb_state) |
93 | sysfs_notify_dirent(pmem->bb_state); | |
59e64739 | 94 | } |
3115bb02 | 95 | |
f2b61257 | 96 | arch_invalidate_pmem(pmem->virt_addr + offset, len); |
868f036f DW |
97 | |
98 | return rc; | |
59e64739 DW |
99 | } |
100 | ||
bd697a80 VV |
101 | static void write_pmem(void *pmem_addr, struct page *page, |
102 | unsigned int off, unsigned int len) | |
103 | { | |
98cc093c HY |
104 | unsigned int chunk; |
105 | void *mem; | |
106 | ||
107 | while (len) { | |
108 | mem = kmap_atomic(page); | |
9dc6488e | 109 | chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
98cc093c HY |
110 | memcpy_flushcache(pmem_addr, mem + off, chunk); |
111 | kunmap_atomic(mem); | |
112 | len -= chunk; | |
113 | off = 0; | |
114 | page++; | |
9dc6488e | 115 | pmem_addr += chunk; |
98cc093c | 116 | } |
bd697a80 VV |
117 | } |
118 | ||
4e4cbee9 | 119 | static blk_status_t read_pmem(struct page *page, unsigned int off, |
bd697a80 VV |
120 | void *pmem_addr, unsigned int len) |
121 | { | |
98cc093c | 122 | unsigned int chunk; |
60622d68 | 123 | unsigned long rem; |
98cc093c HY |
124 | void *mem; |
125 | ||
126 | while (len) { | |
127 | mem = kmap_atomic(page); | |
9dc6488e | 128 | chunk = min_t(unsigned int, len, PAGE_SIZE - off); |
ec6347bb | 129 | rem = copy_mc_to_kernel(mem + off, pmem_addr, chunk); |
98cc093c | 130 | kunmap_atomic(mem); |
60622d68 | 131 | if (rem) |
98cc093c HY |
132 | return BLK_STS_IOERR; |
133 | len -= chunk; | |
134 | off = 0; | |
135 | page++; | |
9dc6488e | 136 | pmem_addr += chunk; |
98cc093c | 137 | } |
4e4cbee9 | 138 | return BLK_STS_OK; |
bd697a80 VV |
139 | } |
140 | ||
5d64efe7 VG |
141 | static blk_status_t pmem_do_read(struct pmem_device *pmem, |
142 | struct page *page, unsigned int page_off, | |
143 | sector_t sector, unsigned int len) | |
144 | { | |
145 | blk_status_t rc; | |
146 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; | |
147 | void *pmem_addr = pmem->virt_addr + pmem_off; | |
148 | ||
149 | if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) | |
150 | return BLK_STS_IOERR; | |
151 | ||
152 | rc = read_pmem(page, page_off, pmem_addr, len); | |
153 | flush_dcache_page(page); | |
154 | return rc; | |
155 | } | |
156 | ||
157 | static blk_status_t pmem_do_write(struct pmem_device *pmem, | |
158 | struct page *page, unsigned int page_off, | |
159 | sector_t sector, unsigned int len) | |
9e853f23 | 160 | { |
4e4cbee9 | 161 | blk_status_t rc = BLK_STS_OK; |
59e64739 | 162 | bool bad_pmem = false; |
32ab0a3f | 163 | phys_addr_t pmem_off = sector * 512 + pmem->data_offset; |
7a9eb206 | 164 | void *pmem_addr = pmem->virt_addr + pmem_off; |
9e853f23 | 165 | |
59e64739 DW |
166 | if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) |
167 | bad_pmem = true; | |
168 | ||
5d64efe7 VG |
169 | /* |
170 | * Note that we write the data both before and after | |
171 | * clearing poison. The write before clear poison | |
172 | * handles situations where the latest written data is | |
173 | * preserved and the clear poison operation simply marks | |
174 | * the address range as valid without changing the data. | |
175 | * In this case application software can assume that an | |
176 | * interrupted write will either return the new good | |
177 | * data or an error. | |
178 | * | |
179 | * However, if pmem_clear_poison() leaves the data in an | |
180 | * indeterminate state we need to perform the write | |
181 | * after clear poison. | |
182 | */ | |
183 | flush_dcache_page(page); | |
184 | write_pmem(pmem_addr, page, page_off, len); | |
185 | if (unlikely(bad_pmem)) { | |
186 | rc = pmem_clear_poison(pmem, pmem_off, len); | |
187 | write_pmem(pmem_addr, page, page_off, len); | |
9e853f23 RZ |
188 | } |
189 | ||
b5ebc8ec | 190 | return rc; |
9e853f23 RZ |
191 | } |
192 | ||
c62b37d9 | 193 | static blk_qc_t pmem_submit_bio(struct bio *bio) |
9e853f23 | 194 | { |
c5d4355d | 195 | int ret = 0; |
4e4cbee9 | 196 | blk_status_t rc = 0; |
f0dc089c DW |
197 | bool do_acct; |
198 | unsigned long start; | |
9e853f23 | 199 | struct bio_vec bvec; |
9e853f23 | 200 | struct bvec_iter iter; |
309dca30 | 201 | struct pmem_device *pmem = bio->bi_bdev->bd_disk->private_data; |
7e267a8c DW |
202 | struct nd_region *nd_region = to_region(pmem); |
203 | ||
d2d6364d | 204 | if (bio->bi_opf & REQ_PREFLUSH) |
c5d4355d | 205 | ret = nvdimm_flush(nd_region, bio); |
9e853f23 | 206 | |
309dca30 | 207 | do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue); |
0fd92f89 CH |
208 | if (do_acct) |
209 | start = bio_start_io_acct(bio); | |
e10624f8 | 210 | bio_for_each_segment(bvec, bio, iter) { |
5d64efe7 VG |
211 | if (op_is_write(bio_op(bio))) |
212 | rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset, | |
213 | iter.bi_sector, bvec.bv_len); | |
214 | else | |
215 | rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset, | |
216 | iter.bi_sector, bvec.bv_len); | |
e10624f8 | 217 | if (rc) { |
4e4cbee9 | 218 | bio->bi_status = rc; |
e10624f8 DW |
219 | break; |
220 | } | |
221 | } | |
f0dc089c | 222 | if (do_acct) |
0fd92f89 | 223 | bio_end_io_acct(bio, start); |
61031952 | 224 | |
1eff9d32 | 225 | if (bio->bi_opf & REQ_FUA) |
c5d4355d PG |
226 | ret = nvdimm_flush(nd_region, bio); |
227 | ||
228 | if (ret) | |
229 | bio->bi_status = errno_to_blk_status(ret); | |
61031952 | 230 | |
4246a0b6 | 231 | bio_endio(bio); |
dece1635 | 232 | return BLK_QC_T_NONE; |
9e853f23 RZ |
233 | } |
234 | ||
235 | static int pmem_rw_page(struct block_device *bdev, sector_t sector, | |
3f289dcb | 236 | struct page *page, unsigned int op) |
9e853f23 | 237 | { |
6ec26b8b | 238 | struct pmem_device *pmem = bdev->bd_disk->private_data; |
4e4cbee9 | 239 | blk_status_t rc; |
9e853f23 | 240 | |
5d64efe7 | 241 | if (op_is_write(op)) |
af3bbc12 | 242 | rc = pmem_do_write(pmem, page, 0, sector, thp_size(page)); |
5d64efe7 | 243 | else |
af3bbc12 | 244 | rc = pmem_do_read(pmem, page, 0, sector, thp_size(page)); |
e10624f8 DW |
245 | /* |
246 | * The ->rw_page interface is subtle and tricky. The core | |
247 | * retries on any error, so we can only invoke page_endio() in | |
248 | * the successful completion case. Otherwise, we'll see crashes | |
249 | * caused by double completion. | |
250 | */ | |
251 | if (rc == 0) | |
3f289dcb | 252 | page_endio(page, op_is_write(op), 0); |
e10624f8 | 253 | |
4e4cbee9 | 254 | return blk_status_to_errno(rc); |
9e853f23 RZ |
255 | } |
256 | ||
f295e53b | 257 | /* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */ |
c1d6e828 DW |
258 | __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, |
259 | long nr_pages, void **kaddr, pfn_t *pfn) | |
9e853f23 | 260 | { |
c1d6e828 | 261 | resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset; |
589e75d1 | 262 | |
c1d6e828 DW |
263 | if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512, |
264 | PFN_PHYS(nr_pages)))) | |
0a70bd43 | 265 | return -EIO; |
46a590cd HY |
266 | |
267 | if (kaddr) | |
268 | *kaddr = pmem->virt_addr + offset; | |
269 | if (pfn) | |
270 | *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags); | |
9e853f23 | 271 | |
0a70bd43 DW |
272 | /* |
273 | * If badblocks are present, limit known good range to the | |
274 | * requested range. | |
275 | */ | |
276 | if (unlikely(pmem->bb.count)) | |
c1d6e828 DW |
277 | return nr_pages; |
278 | return PHYS_PFN(pmem->size - pmem->pfn_pad - offset); | |
9e853f23 RZ |
279 | } |
280 | ||
281 | static const struct block_device_operations pmem_fops = { | |
282 | .owner = THIS_MODULE, | |
c62b37d9 | 283 | .submit_bio = pmem_submit_bio, |
9e853f23 | 284 | .rw_page = pmem_rw_page, |
9e853f23 RZ |
285 | }; |
286 | ||
f605a263 VG |
287 | static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, |
288 | size_t nr_pages) | |
289 | { | |
290 | struct pmem_device *pmem = dax_get_private(dax_dev); | |
291 | ||
292 | return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0, | |
293 | PFN_PHYS(pgoff) >> SECTOR_SHIFT, | |
294 | PAGE_SIZE)); | |
295 | } | |
296 | ||
c1d6e828 DW |
297 | static long pmem_dax_direct_access(struct dax_device *dax_dev, |
298 | pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) | |
299 | { | |
300 | struct pmem_device *pmem = dax_get_private(dax_dev); | |
301 | ||
302 | return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); | |
303 | } | |
304 | ||
52f476a3 DW |
305 | /* |
306 | * Use the 'no check' versions of copy_from_iter_flushcache() and | |
ec6347bb | 307 | * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds |
52f476a3 DW |
308 | * checking, both file offset and device offset, is handled by |
309 | * dax_iomap_actor() | |
310 | */ | |
0aed55af DW |
311 | static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, |
312 | void *addr, size_t bytes, struct iov_iter *i) | |
313 | { | |
52f476a3 | 314 | return _copy_from_iter_flushcache(addr, bytes, i); |
0aed55af DW |
315 | } |
316 | ||
b3a9a0c3 DW |
317 | static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, |
318 | void *addr, size_t bytes, struct iov_iter *i) | |
319 | { | |
ec6347bb | 320 | return _copy_mc_to_iter(addr, bytes, i); |
b3a9a0c3 DW |
321 | } |
322 | ||
c1d6e828 DW |
323 | static const struct dax_operations pmem_dax_ops = { |
324 | .direct_access = pmem_dax_direct_access, | |
7bf7eac8 | 325 | .dax_supported = generic_fsdax_supported, |
0aed55af | 326 | .copy_from_iter = pmem_copy_from_iter, |
b3a9a0c3 | 327 | .copy_to_iter = pmem_copy_to_iter, |
f605a263 | 328 | .zero_page_range = pmem_dax_zero_page_range, |
c1d6e828 DW |
329 | }; |
330 | ||
6e0c90d6 DW |
331 | static const struct attribute_group *pmem_attribute_groups[] = { |
332 | &dax_attribute_group, | |
333 | NULL, | |
c1d6e828 DW |
334 | }; |
335 | ||
d8668bb0 | 336 | static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap) |
030b99e3 | 337 | { |
a624eb52 | 338 | struct pmem_device *pmem = pgmap->owner; |
50f44ee7 | 339 | |
a624eb52 | 340 | blk_cleanup_disk(pmem->disk); |
030b99e3 DW |
341 | } |
342 | ||
d8668bb0 | 343 | static void pmem_release_queue(void *pgmap) |
50f44ee7 | 344 | { |
d8668bb0 | 345 | pmem_pagemap_cleanup(pgmap); |
50f44ee7 DW |
346 | } |
347 | ||
d8668bb0 | 348 | static void pmem_pagemap_kill(struct dev_pagemap *pgmap) |
71389703 | 349 | { |
d8668bb0 CH |
350 | struct request_queue *q = |
351 | container_of(pgmap->ref, struct request_queue, q_usage_counter); | |
a95c90f1 | 352 | |
d3b5d352 | 353 | blk_freeze_queue_start(q); |
71389703 DW |
354 | } |
355 | ||
c1d6e828 | 356 | static void pmem_release_disk(void *__pmem) |
030b99e3 | 357 | { |
c1d6e828 DW |
358 | struct pmem_device *pmem = __pmem; |
359 | ||
360 | kill_dax(pmem->dax_dev); | |
361 | put_dax(pmem->dax_dev); | |
362 | del_gendisk(pmem->disk); | |
030b99e3 DW |
363 | } |
364 | ||
1e240e8d | 365 | static const struct dev_pagemap_ops fsdax_pagemap_ops = { |
1e240e8d CH |
366 | .kill = pmem_pagemap_kill, |
367 | .cleanup = pmem_pagemap_cleanup, | |
368 | }; | |
369 | ||
200c79da DW |
370 | static int pmem_attach_disk(struct device *dev, |
371 | struct nd_namespace_common *ndns) | |
9e853f23 | 372 | { |
200c79da | 373 | struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); |
f284a4f2 | 374 | struct nd_region *nd_region = to_nd_region(dev->parent); |
ce7f11a2 | 375 | int nid = dev_to_node(dev), fua; |
200c79da | 376 | struct resource *res = &nsio->res; |
a4574f63 | 377 | struct range bb_range; |
200c79da | 378 | struct nd_pfn *nd_pfn = NULL; |
c1d6e828 | 379 | struct dax_device *dax_dev; |
200c79da | 380 | struct nd_pfn_sb *pfn_sb; |
9e853f23 | 381 | struct pmem_device *pmem; |
468ded03 | 382 | struct request_queue *q; |
200c79da DW |
383 | struct gendisk *disk; |
384 | void *addr; | |
e8d51348 | 385 | int rc; |
fefc1d97 | 386 | unsigned long flags = 0UL; |
e8d51348 CH |
387 | |
388 | pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL); | |
389 | if (!pmem) | |
390 | return -ENOMEM; | |
200c79da | 391 | |
8f4b01fc AK |
392 | rc = devm_namespace_enable(dev, ndns, nd_info_block_reserve()); |
393 | if (rc) | |
394 | return rc; | |
395 | ||
200c79da DW |
396 | /* while nsio_rw_bytes is active, parse a pfn info block if present */ |
397 | if (is_nd_pfn(dev)) { | |
398 | nd_pfn = to_nd_pfn(dev); | |
e8d51348 CH |
399 | rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap); |
400 | if (rc) | |
401 | return rc; | |
200c79da DW |
402 | } |
403 | ||
404 | /* we're attaching a block device, disable raw namespace access */ | |
8f4b01fc | 405 | devm_namespace_disable(dev, ndns); |
9e853f23 | 406 | |
200c79da | 407 | dev_set_drvdata(dev, pmem); |
9e853f23 RZ |
408 | pmem->phys_addr = res->start; |
409 | pmem->size = resource_size(res); | |
0b277961 DW |
410 | fua = nvdimm_has_flush(nd_region); |
411 | if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) { | |
61031952 | 412 | dev_warn(dev, "unable to guarantee persistence of writes\n"); |
0b277961 DW |
413 | fua = 0; |
414 | } | |
9e853f23 | 415 | |
947df02d | 416 | if (!devm_request_mem_region(dev, res->start, resource_size(res), |
450c6633 | 417 | dev_name(&ndns->dev))) { |
947df02d | 418 | dev_warn(dev, "could not reserve region %pR\n", res); |
200c79da | 419 | return -EBUSY; |
9e853f23 RZ |
420 | } |
421 | ||
87eb73b2 CH |
422 | disk = blk_alloc_disk(nid); |
423 | if (!disk) | |
200c79da | 424 | return -ENOMEM; |
87eb73b2 | 425 | q = disk->queue; |
468ded03 | 426 | |
87eb73b2 | 427 | pmem->disk = disk; |
a624eb52 | 428 | pmem->pgmap.owner = pmem; |
34c0fd54 | 429 | pmem->pfn_flags = PFN_DEV; |
e8d51348 | 430 | pmem->pgmap.ref = &q->q_usage_counter; |
200c79da | 431 | if (is_nd_pfn(dev)) { |
f6a55e1a CH |
432 | pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; |
433 | pmem->pgmap.ops = &fsdax_pagemap_ops; | |
e8d51348 | 434 | addr = devm_memremap_pages(dev, &pmem->pgmap); |
200c79da DW |
435 | pfn_sb = nd_pfn->pfn_sb; |
436 | pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); | |
e8d51348 | 437 | pmem->pfn_pad = resource_size(res) - |
a4574f63 | 438 | range_len(&pmem->pgmap.range); |
200c79da | 439 | pmem->pfn_flags |= PFN_MAP; |
a4574f63 DW |
440 | bb_range = pmem->pgmap.range; |
441 | bb_range.start += pmem->data_offset; | |
200c79da | 442 | } else if (pmem_should_map_pages(dev)) { |
a4574f63 DW |
443 | pmem->pgmap.range.start = res->start; |
444 | pmem->pgmap.range.end = res->end; | |
b7b3c01b | 445 | pmem->pgmap.nr_range = 1; |
f6a55e1a CH |
446 | pmem->pgmap.type = MEMORY_DEVICE_FS_DAX; |
447 | pmem->pgmap.ops = &fsdax_pagemap_ops; | |
e8d51348 | 448 | addr = devm_memremap_pages(dev, &pmem->pgmap); |
34c0fd54 | 449 | pmem->pfn_flags |= PFN_MAP; |
a4574f63 | 450 | bb_range = pmem->pgmap.range; |
91ed7ac4 | 451 | } else { |
32b2397c | 452 | addr = devm_memremap(dev, pmem->phys_addr, |
453 | pmem->size, ARCH_MEMREMAP_PMEM); | |
50f44ee7 | 454 | if (devm_add_action_or_reset(dev, pmem_release_queue, |
d8668bb0 | 455 | &pmem->pgmap)) |
50f44ee7 | 456 | return -ENOMEM; |
a4574f63 DW |
457 | bb_range.start = res->start; |
458 | bb_range.end = res->end; | |
91ed7ac4 | 459 | } |
b36f4761 | 460 | |
200c79da DW |
461 | if (IS_ERR(addr)) |
462 | return PTR_ERR(addr); | |
7a9eb206 | 463 | pmem->virt_addr = addr; |
9e853f23 | 464 | |
ce7f11a2 | 465 | blk_queue_write_cache(q, true, fua); |
5a92289f | 466 | blk_queue_physical_block_size(q, PAGE_SIZE); |
f979b13c | 467 | blk_queue_logical_block_size(q, pmem_sector_size(ndns)); |
5a92289f | 468 | blk_queue_max_hw_sectors(q, UINT_MAX); |
8b904b5b | 469 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
4557641b RZ |
470 | if (pmem->pfn_flags & PFN_MAP) |
471 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); | |
9e853f23 | 472 | |
9e853f23 | 473 | disk->fops = &pmem_fops; |
6ec26b8b | 474 | disk->private_data = pmem; |
5212e11f | 475 | nvdimm_namespace_disk_name(ndns, disk->disk_name); |
cfe30b87 DW |
476 | set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset) |
477 | / 512); | |
b95f5f43 DW |
478 | if (devm_init_badblocks(dev, &pmem->bb)) |
479 | return -ENOMEM; | |
a4574f63 | 480 | nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range); |
57f7f317 | 481 | disk->bb = &pmem->bb; |
f02716db | 482 | |
fefc1d97 PG |
483 | if (is_nvdimm_sync(nd_region)) |
484 | flags = DAXDEV_F_SYNC; | |
485 | dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags); | |
4e4ced93 | 486 | if (IS_ERR(dax_dev)) { |
4e4ced93 | 487 | return PTR_ERR(dax_dev); |
c1d6e828 | 488 | } |
ce7f11a2 | 489 | dax_write_cache(dax_dev, nvdimm_has_cache(nd_region)); |
c1d6e828 | 490 | pmem->dax_dev = dax_dev; |
6e0c90d6 | 491 | |
d55174cc | 492 | device_add_disk(dev, disk, pmem_attribute_groups); |
c1d6e828 | 493 | if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) |
f02716db DW |
494 | return -ENOMEM; |
495 | ||
32f61d67 | 496 | nvdimm_check_and_set_ro(disk); |
9e853f23 | 497 | |
975750a9 TK |
498 | pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd, |
499 | "badblocks"); | |
6aa734a2 DW |
500 | if (!pmem->bb_state) |
501 | dev_warn(dev, "'badblocks' notification disabled\n"); | |
975750a9 | 502 | |
8c2f7e86 DW |
503 | return 0; |
504 | } | |
9e853f23 | 505 | |
9f53f9fa | 506 | static int nd_pmem_probe(struct device *dev) |
9e853f23 | 507 | { |
1c97afa7 | 508 | int ret; |
8c2f7e86 | 509 | struct nd_namespace_common *ndns; |
9e853f23 | 510 | |
8c2f7e86 DW |
511 | ndns = nvdimm_namespace_common_probe(dev); |
512 | if (IS_ERR(ndns)) | |
513 | return PTR_ERR(ndns); | |
bf9bccc1 | 514 | |
200c79da | 515 | if (is_nd_btt(dev)) |
708ab62b CH |
516 | return nvdimm_namespace_attach_btt(ndns); |
517 | ||
32ab0a3f | 518 | if (is_nd_pfn(dev)) |
200c79da | 519 | return pmem_attach_disk(dev, ndns); |
32ab0a3f | 520 | |
8f4b01fc AK |
521 | ret = devm_namespace_enable(dev, ndns, nd_info_block_reserve()); |
522 | if (ret) | |
523 | return ret; | |
524 | ||
1c97afa7 AK |
525 | ret = nd_btt_probe(dev, ndns); |
526 | if (ret == 0) | |
32ab0a3f | 527 | return -ENXIO; |
32ab0a3f | 528 | |
1c97afa7 AK |
529 | /* |
530 | * We have two failure conditions here, there is no | |
531 | * info reserver block or we found a valid info reserve block | |
532 | * but failed to initialize the pfn superblock. | |
533 | * | |
534 | * For the first case consider namespace as a raw pmem namespace | |
535 | * and attach a disk. | |
536 | * | |
537 | * For the latter, consider this a success and advance the namespace | |
538 | * seed. | |
539 | */ | |
540 | ret = nd_pfn_probe(dev, ndns); | |
541 | if (ret == 0) | |
542 | return -ENXIO; | |
543 | else if (ret == -EOPNOTSUPP) | |
544 | return ret; | |
545 | ||
546 | ret = nd_dax_probe(dev, ndns); | |
547 | if (ret == 0) | |
548 | return -ENXIO; | |
549 | else if (ret == -EOPNOTSUPP) | |
550 | return ret; | |
8f4b01fc AK |
551 | |
552 | /* probe complete, attach handles namespace enabling */ | |
553 | devm_namespace_disable(dev, ndns); | |
554 | ||
200c79da | 555 | return pmem_attach_disk(dev, ndns); |
9e853f23 RZ |
556 | } |
557 | ||
1f975074 | 558 | static void nd_pmem_remove(struct device *dev) |
9e853f23 | 559 | { |
6aa734a2 DW |
560 | struct pmem_device *pmem = dev_get_drvdata(dev); |
561 | ||
8c2f7e86 | 562 | if (is_nd_btt(dev)) |
298f2bc5 | 563 | nvdimm_namespace_detach_btt(to_nd_btt(dev)); |
6aa734a2 DW |
564 | else { |
565 | /* | |
87a30e1f DW |
566 | * Note, this assumes nd_device_lock() context to not |
567 | * race nd_pmem_notify() | |
6aa734a2 DW |
568 | */ |
569 | sysfs_put(pmem->bb_state); | |
570 | pmem->bb_state = NULL; | |
571 | } | |
c5d4355d | 572 | nvdimm_flush(to_nd_region(dev->parent), NULL); |
9e853f23 RZ |
573 | } |
574 | ||
476f848a DW |
575 | static void nd_pmem_shutdown(struct device *dev) |
576 | { | |
c5d4355d | 577 | nvdimm_flush(to_nd_region(dev->parent), NULL); |
476f848a DW |
578 | } |
579 | ||
2361db89 | 580 | static void pmem_revalidate_poison(struct device *dev) |
71999466 | 581 | { |
b2518c78 | 582 | struct nd_region *nd_region; |
298f2bc5 DW |
583 | resource_size_t offset = 0, end_trunc = 0; |
584 | struct nd_namespace_common *ndns; | |
585 | struct nd_namespace_io *nsio; | |
b2518c78 | 586 | struct badblocks *bb; |
a4574f63 | 587 | struct range range; |
975750a9 | 588 | struct kernfs_node *bb_state; |
71999466 | 589 | |
298f2bc5 DW |
590 | if (is_nd_btt(dev)) { |
591 | struct nd_btt *nd_btt = to_nd_btt(dev); | |
592 | ||
593 | ndns = nd_btt->ndns; | |
b2518c78 TK |
594 | nd_region = to_nd_region(ndns->dev.parent); |
595 | nsio = to_nd_namespace_io(&ndns->dev); | |
596 | bb = &nsio->bb; | |
975750a9 | 597 | bb_state = NULL; |
b2518c78 TK |
598 | } else { |
599 | struct pmem_device *pmem = dev_get_drvdata(dev); | |
a3901802 | 600 | |
b2518c78 TK |
601 | nd_region = to_region(pmem); |
602 | bb = &pmem->bb; | |
975750a9 | 603 | bb_state = pmem->bb_state; |
b2518c78 TK |
604 | |
605 | if (is_nd_pfn(dev)) { | |
606 | struct nd_pfn *nd_pfn = to_nd_pfn(dev); | |
607 | struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; | |
608 | ||
609 | ndns = nd_pfn->ndns; | |
610 | offset = pmem->data_offset + | |
611 | __le32_to_cpu(pfn_sb->start_pad); | |
612 | end_trunc = __le32_to_cpu(pfn_sb->end_trunc); | |
613 | } else { | |
614 | ndns = to_ndns(dev); | |
615 | } | |
616 | ||
617 | nsio = to_nd_namespace_io(&ndns->dev); | |
618 | } | |
a3901802 | 619 | |
a4574f63 DW |
620 | range.start = nsio->res.start + offset; |
621 | range.end = nsio->res.end - end_trunc; | |
622 | nvdimm_badblocks_populate(nd_region, bb, &range); | |
975750a9 TK |
623 | if (bb_state) |
624 | sysfs_notify_dirent(bb_state); | |
71999466 DW |
625 | } |
626 | ||
2361db89 DW |
627 | static void pmem_revalidate_region(struct device *dev) |
628 | { | |
629 | struct pmem_device *pmem; | |
630 | ||
631 | if (is_nd_btt(dev)) { | |
632 | struct nd_btt *nd_btt = to_nd_btt(dev); | |
633 | struct btt *btt = nd_btt->btt; | |
634 | ||
635 | nvdimm_check_and_set_ro(btt->btt_disk); | |
636 | return; | |
637 | } | |
638 | ||
639 | pmem = dev_get_drvdata(dev); | |
640 | nvdimm_check_and_set_ro(pmem->disk); | |
641 | } | |
642 | ||
643 | static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) | |
644 | { | |
645 | switch (event) { | |
646 | case NVDIMM_REVALIDATE_POISON: | |
647 | pmem_revalidate_poison(dev); | |
648 | break; | |
649 | case NVDIMM_REVALIDATE_REGION: | |
650 | pmem_revalidate_region(dev); | |
651 | break; | |
652 | default: | |
653 | dev_WARN_ONCE(dev, 1, "notify: unknown event: %d\n", event); | |
654 | break; | |
655 | } | |
656 | } | |
657 | ||
9f53f9fa DW |
658 | MODULE_ALIAS("pmem"); |
659 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO); | |
bf9bccc1 | 660 | MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM); |
9f53f9fa DW |
661 | static struct nd_device_driver nd_pmem_driver = { |
662 | .probe = nd_pmem_probe, | |
663 | .remove = nd_pmem_remove, | |
71999466 | 664 | .notify = nd_pmem_notify, |
476f848a | 665 | .shutdown = nd_pmem_shutdown, |
9f53f9fa DW |
666 | .drv = { |
667 | .name = "nd_pmem", | |
9e853f23 | 668 | }, |
bf9bccc1 | 669 | .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM, |
9e853f23 RZ |
670 | }; |
671 | ||
03e90843 | 672 | module_nd_driver(nd_pmem_driver); |
9e853f23 RZ |
673 | |
674 | MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>"); | |
675 | MODULE_LICENSE("GPL v2"); |