libnvdimm/altmap: Track namespace boundaries in altmap
[linux-2.6-block.git] / drivers / nvdimm / pmem.c
CommitLineData
2025cf9e 1// SPDX-License-Identifier: GPL-2.0-only
9e853f23
RZ
2/*
3 * Persistent Memory Driver
4 *
9f53f9fa 5 * Copyright (c) 2014-2015, Intel Corporation.
9e853f23
RZ
6 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
7 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
9e853f23
RZ
8 */
9
10#include <asm/cacheflush.h>
11#include <linux/blkdev.h>
12#include <linux/hdreg.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
c953cc98 15#include <linux/set_memory.h>
9e853f23
RZ
16#include <linux/module.h>
17#include <linux/moduleparam.h>
b95f5f43 18#include <linux/badblocks.h>
9476df7d 19#include <linux/memremap.h>
32ab0a3f 20#include <linux/vmalloc.h>
71389703 21#include <linux/blk-mq.h>
34c0fd54 22#include <linux/pfn_t.h>
9e853f23 23#include <linux/slab.h>
0aed55af 24#include <linux/uio.h>
c1d6e828 25#include <linux/dax.h>
9f53f9fa 26#include <linux/nd.h>
23c47d2a 27#include <linux/backing-dev.h>
f295e53b 28#include "pmem.h"
32ab0a3f 29#include "pfn.h"
9f53f9fa 30#include "nd.h"
06e8ccda 31#include "nd-core.h"
9e853f23 32
f284a4f2
DW
33static struct device *to_dev(struct pmem_device *pmem)
34{
35 /*
36 * nvdimm bus services need a 'dev' parameter, and we record the device
37 * at init in bb.dev.
38 */
39 return pmem->bb.dev;
40}
41
42static struct nd_region *to_region(struct pmem_device *pmem)
43{
44 return to_nd_region(to_dev(pmem)->parent);
45}
9e853f23 46
c953cc98
DW
47static void hwpoison_clear(struct pmem_device *pmem,
48 phys_addr_t phys, unsigned int len)
49{
50 unsigned long pfn_start, pfn_end, pfn;
51
52 /* only pmem in the linear map supports HWPoison */
53 if (is_vmalloc_addr(pmem->virt_addr))
54 return;
55
56 pfn_start = PHYS_PFN(phys);
57 pfn_end = pfn_start + PHYS_PFN(len);
58 for (pfn = pfn_start; pfn < pfn_end; pfn++) {
59 struct page *page = pfn_to_page(pfn);
60
61 /*
62 * Note, no need to hold a get_dev_pagemap() reference
63 * here since we're in the driver I/O path and
64 * outstanding I/O requests pin the dev_pagemap.
65 */
66 if (test_and_clear_pmem_poison(page))
67 clear_mce_nospec(pfn);
68 }
69}
70
4e4cbee9
CH
71static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
72 phys_addr_t offset, unsigned int len)
59e64739 73{
f284a4f2 74 struct device *dev = to_dev(pmem);
59e64739
DW
75 sector_t sector;
76 long cleared;
4e4cbee9 77 blk_status_t rc = BLK_STS_OK;
59e64739
DW
78
79 sector = (offset - pmem->data_offset) / 512;
59e64739 80
868f036f
DW
81 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
82 if (cleared < len)
4e4cbee9 83 rc = BLK_STS_IOERR;
59e64739 84 if (cleared > 0 && cleared / 512) {
c953cc98 85 hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
868f036f 86 cleared /= 512;
426824d6 87 dev_dbg(dev, "%#llx clear %ld sector%s\n",
868f036f
DW
88 (unsigned long long) sector, cleared,
89 cleared > 1 ? "s" : "");
0a3f27b9 90 badblocks_clear(&pmem->bb, sector, cleared);
975750a9
TK
91 if (pmem->bb_state)
92 sysfs_notify_dirent(pmem->bb_state);
59e64739 93 }
3115bb02 94
f2b61257 95 arch_invalidate_pmem(pmem->virt_addr + offset, len);
868f036f
DW
96
97 return rc;
59e64739
DW
98}
99
bd697a80
VV
100static void write_pmem(void *pmem_addr, struct page *page,
101 unsigned int off, unsigned int len)
102{
98cc093c
HY
103 unsigned int chunk;
104 void *mem;
105
106 while (len) {
107 mem = kmap_atomic(page);
9dc6488e 108 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
98cc093c
HY
109 memcpy_flushcache(pmem_addr, mem + off, chunk);
110 kunmap_atomic(mem);
111 len -= chunk;
112 off = 0;
113 page++;
9dc6488e 114 pmem_addr += chunk;
98cc093c 115 }
bd697a80
VV
116}
117
4e4cbee9 118static blk_status_t read_pmem(struct page *page, unsigned int off,
bd697a80
VV
119 void *pmem_addr, unsigned int len)
120{
98cc093c 121 unsigned int chunk;
60622d68 122 unsigned long rem;
98cc093c
HY
123 void *mem;
124
125 while (len) {
126 mem = kmap_atomic(page);
9dc6488e 127 chunk = min_t(unsigned int, len, PAGE_SIZE - off);
60622d68 128 rem = memcpy_mcsafe(mem + off, pmem_addr, chunk);
98cc093c 129 kunmap_atomic(mem);
60622d68 130 if (rem)
98cc093c
HY
131 return BLK_STS_IOERR;
132 len -= chunk;
133 off = 0;
134 page++;
9dc6488e 135 pmem_addr += chunk;
98cc093c 136 }
4e4cbee9 137 return BLK_STS_OK;
bd697a80
VV
138}
139
4e4cbee9 140static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
3f289dcb 141 unsigned int len, unsigned int off, unsigned int op,
9e853f23
RZ
142 sector_t sector)
143{
4e4cbee9 144 blk_status_t rc = BLK_STS_OK;
59e64739 145 bool bad_pmem = false;
32ab0a3f 146 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
7a9eb206 147 void *pmem_addr = pmem->virt_addr + pmem_off;
9e853f23 148
59e64739
DW
149 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
150 bad_pmem = true;
151
3f289dcb 152 if (!op_is_write(op)) {
59e64739 153 if (unlikely(bad_pmem))
4e4cbee9 154 rc = BLK_STS_IOERR;
b5ebc8ec 155 else {
bd697a80 156 rc = read_pmem(page, off, pmem_addr, len);
b5ebc8ec
DW
157 flush_dcache_page(page);
158 }
9e853f23 159 } else {
0a370d26
DW
160 /*
161 * Note that we write the data both before and after
162 * clearing poison. The write before clear poison
163 * handles situations where the latest written data is
164 * preserved and the clear poison operation simply marks
165 * the address range as valid without changing the data.
166 * In this case application software can assume that an
167 * interrupted write will either return the new good
168 * data or an error.
169 *
170 * However, if pmem_clear_poison() leaves the data in an
171 * indeterminate state we need to perform the write
172 * after clear poison.
173 */
9e853f23 174 flush_dcache_page(page);
bd697a80 175 write_pmem(pmem_addr, page, off, len);
59e64739 176 if (unlikely(bad_pmem)) {
3115bb02 177 rc = pmem_clear_poison(pmem, pmem_off, len);
bd697a80 178 write_pmem(pmem_addr, page, off, len);
59e64739 179 }
9e853f23
RZ
180 }
181
b5ebc8ec 182 return rc;
9e853f23
RZ
183}
184
dece1635 185static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
9e853f23 186{
c5d4355d 187 int ret = 0;
4e4cbee9 188 blk_status_t rc = 0;
f0dc089c
DW
189 bool do_acct;
190 unsigned long start;
9e853f23 191 struct bio_vec bvec;
9e853f23 192 struct bvec_iter iter;
bd842b8c 193 struct pmem_device *pmem = q->queuedata;
7e267a8c
DW
194 struct nd_region *nd_region = to_region(pmem);
195
d2d6364d 196 if (bio->bi_opf & REQ_PREFLUSH)
c5d4355d 197 ret = nvdimm_flush(nd_region, bio);
9e853f23 198
f0dc089c 199 do_acct = nd_iostat_start(bio, &start);
e10624f8
DW
200 bio_for_each_segment(bvec, bio, iter) {
201 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
3f289dcb 202 bvec.bv_offset, bio_op(bio), iter.bi_sector);
e10624f8 203 if (rc) {
4e4cbee9 204 bio->bi_status = rc;
e10624f8
DW
205 break;
206 }
207 }
f0dc089c
DW
208 if (do_acct)
209 nd_iostat_end(bio, start);
61031952 210
1eff9d32 211 if (bio->bi_opf & REQ_FUA)
c5d4355d
PG
212 ret = nvdimm_flush(nd_region, bio);
213
214 if (ret)
215 bio->bi_status = errno_to_blk_status(ret);
61031952 216
4246a0b6 217 bio_endio(bio);
dece1635 218 return BLK_QC_T_NONE;
9e853f23
RZ
219}
220
221static int pmem_rw_page(struct block_device *bdev, sector_t sector,
3f289dcb 222 struct page *page, unsigned int op)
9e853f23 223{
bd842b8c 224 struct pmem_device *pmem = bdev->bd_queue->queuedata;
4e4cbee9 225 blk_status_t rc;
9e853f23 226
98cc093c 227 rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
3f289dcb 228 0, op, sector);
9e853f23 229
e10624f8
DW
230 /*
231 * The ->rw_page interface is subtle and tricky. The core
232 * retries on any error, so we can only invoke page_endio() in
233 * the successful completion case. Otherwise, we'll see crashes
234 * caused by double completion.
235 */
236 if (rc == 0)
3f289dcb 237 page_endio(page, op_is_write(op), 0);
e10624f8 238
4e4cbee9 239 return blk_status_to_errno(rc);
9e853f23
RZ
240}
241
f295e53b 242/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
c1d6e828
DW
243__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
244 long nr_pages, void **kaddr, pfn_t *pfn)
9e853f23 245{
c1d6e828 246 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
589e75d1 247
c1d6e828
DW
248 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
249 PFN_PHYS(nr_pages))))
0a70bd43 250 return -EIO;
46a590cd
HY
251
252 if (kaddr)
253 *kaddr = pmem->virt_addr + offset;
254 if (pfn)
255 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
9e853f23 256
0a70bd43
DW
257 /*
258 * If badblocks are present, limit known good range to the
259 * requested range.
260 */
261 if (unlikely(pmem->bb.count))
c1d6e828
DW
262 return nr_pages;
263 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
9e853f23
RZ
264}
265
266static const struct block_device_operations pmem_fops = {
267 .owner = THIS_MODULE,
268 .rw_page = pmem_rw_page,
58138820 269 .revalidate_disk = nvdimm_revalidate_disk,
9e853f23
RZ
270};
271
c1d6e828
DW
272static long pmem_dax_direct_access(struct dax_device *dax_dev,
273 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
274{
275 struct pmem_device *pmem = dax_get_private(dax_dev);
276
277 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
278}
279
52f476a3
DW
280/*
281 * Use the 'no check' versions of copy_from_iter_flushcache() and
282 * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
283 * checking, both file offset and device offset, is handled by
284 * dax_iomap_actor()
285 */
0aed55af
DW
286static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
287 void *addr, size_t bytes, struct iov_iter *i)
288{
52f476a3 289 return _copy_from_iter_flushcache(addr, bytes, i);
0aed55af
DW
290}
291
b3a9a0c3
DW
292static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
293 void *addr, size_t bytes, struct iov_iter *i)
294{
52f476a3 295 return _copy_to_iter_mcsafe(addr, bytes, i);
b3a9a0c3
DW
296}
297
c1d6e828
DW
298static const struct dax_operations pmem_dax_ops = {
299 .direct_access = pmem_dax_direct_access,
7bf7eac8 300 .dax_supported = generic_fsdax_supported,
0aed55af 301 .copy_from_iter = pmem_copy_from_iter,
b3a9a0c3 302 .copy_to_iter = pmem_copy_to_iter,
c1d6e828
DW
303};
304
6e0c90d6
DW
305static const struct attribute_group *pmem_attribute_groups[] = {
306 &dax_attribute_group,
307 NULL,
c1d6e828
DW
308};
309
d8668bb0 310static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
030b99e3 311{
d8668bb0
CH
312 struct request_queue *q =
313 container_of(pgmap->ref, struct request_queue, q_usage_counter);
50f44ee7 314
030b99e3
DW
315 blk_cleanup_queue(q);
316}
317
d8668bb0 318static void pmem_release_queue(void *pgmap)
50f44ee7 319{
d8668bb0 320 pmem_pagemap_cleanup(pgmap);
50f44ee7
DW
321}
322
d8668bb0 323static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
71389703 324{
d8668bb0
CH
325 struct request_queue *q =
326 container_of(pgmap->ref, struct request_queue, q_usage_counter);
a95c90f1 327
d3b5d352 328 blk_freeze_queue_start(q);
71389703
DW
329}
330
c1d6e828 331static void pmem_release_disk(void *__pmem)
030b99e3 332{
c1d6e828
DW
333 struct pmem_device *pmem = __pmem;
334
335 kill_dax(pmem->dax_dev);
336 put_dax(pmem->dax_dev);
337 del_gendisk(pmem->disk);
338 put_disk(pmem->disk);
030b99e3
DW
339}
340
80a72d0a 341static void pmem_pagemap_page_free(struct page *page)
e7638488
DW
342{
343 wake_up_var(&page->_refcount);
344}
345
1e240e8d
CH
346static const struct dev_pagemap_ops fsdax_pagemap_ops = {
347 .page_free = pmem_pagemap_page_free,
348 .kill = pmem_pagemap_kill,
349 .cleanup = pmem_pagemap_cleanup,
350};
351
200c79da
DW
352static int pmem_attach_disk(struct device *dev,
353 struct nd_namespace_common *ndns)
9e853f23 354{
200c79da 355 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
f284a4f2 356 struct nd_region *nd_region = to_nd_region(dev->parent);
ce7f11a2 357 int nid = dev_to_node(dev), fua;
200c79da 358 struct resource *res = &nsio->res;
e8d51348 359 struct resource bb_res;
200c79da 360 struct nd_pfn *nd_pfn = NULL;
c1d6e828 361 struct dax_device *dax_dev;
200c79da 362 struct nd_pfn_sb *pfn_sb;
9e853f23 363 struct pmem_device *pmem;
468ded03 364 struct request_queue *q;
6e0c90d6 365 struct device *gendev;
200c79da
DW
366 struct gendisk *disk;
367 void *addr;
e8d51348 368 int rc;
fefc1d97 369 unsigned long flags = 0UL;
e8d51348
CH
370
371 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
372 if (!pmem)
373 return -ENOMEM;
200c79da
DW
374
375 /* while nsio_rw_bytes is active, parse a pfn info block if present */
376 if (is_nd_pfn(dev)) {
377 nd_pfn = to_nd_pfn(dev);
e8d51348
CH
378 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
379 if (rc)
380 return rc;
200c79da
DW
381 }
382
383 /* we're attaching a block device, disable raw namespace access */
384 devm_nsio_disable(dev, nsio);
9e853f23 385
200c79da 386 dev_set_drvdata(dev, pmem);
9e853f23
RZ
387 pmem->phys_addr = res->start;
388 pmem->size = resource_size(res);
0b277961
DW
389 fua = nvdimm_has_flush(nd_region);
390 if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) {
61031952 391 dev_warn(dev, "unable to guarantee persistence of writes\n");
0b277961
DW
392 fua = 0;
393 }
9e853f23 394
947df02d 395 if (!devm_request_mem_region(dev, res->start, resource_size(res),
450c6633 396 dev_name(&ndns->dev))) {
947df02d 397 dev_warn(dev, "could not reserve region %pR\n", res);
200c79da 398 return -EBUSY;
9e853f23
RZ
399 }
400
6d469642 401 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
468ded03 402 if (!q)
200c79da 403 return -ENOMEM;
468ded03 404
34c0fd54 405 pmem->pfn_flags = PFN_DEV;
e8d51348 406 pmem->pgmap.ref = &q->q_usage_counter;
200c79da 407 if (is_nd_pfn(dev)) {
f6a55e1a
CH
408 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
409 pmem->pgmap.ops = &fsdax_pagemap_ops;
e8d51348 410 addr = devm_memremap_pages(dev, &pmem->pgmap);
200c79da
DW
411 pfn_sb = nd_pfn->pfn_sb;
412 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
e8d51348
CH
413 pmem->pfn_pad = resource_size(res) -
414 resource_size(&pmem->pgmap.res);
200c79da 415 pmem->pfn_flags |= PFN_MAP;
e8d51348
CH
416 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
417 bb_res.start += pmem->data_offset;
200c79da 418 } else if (pmem_should_map_pages(dev)) {
e8d51348 419 memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
f6a55e1a
CH
420 pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
421 pmem->pgmap.ops = &fsdax_pagemap_ops;
e8d51348 422 addr = devm_memremap_pages(dev, &pmem->pgmap);
34c0fd54 423 pmem->pfn_flags |= PFN_MAP;
e8d51348 424 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
91ed7ac4 425 } else {
50f44ee7 426 if (devm_add_action_or_reset(dev, pmem_release_queue,
d8668bb0 427 &pmem->pgmap))
50f44ee7 428 return -ENOMEM;
200c79da
DW
429 addr = devm_memremap(dev, pmem->phys_addr,
430 pmem->size, ARCH_MEMREMAP_PMEM);
91ed7ac4
DW
431 memcpy(&bb_res, &nsio->res, sizeof(bb_res));
432 }
b36f4761 433
200c79da
DW
434 if (IS_ERR(addr))
435 return PTR_ERR(addr);
7a9eb206 436 pmem->virt_addr = addr;
9e853f23 437
ce7f11a2 438 blk_queue_write_cache(q, true, fua);
5a92289f
DW
439 blk_queue_make_request(q, pmem_make_request);
440 blk_queue_physical_block_size(q, PAGE_SIZE);
f979b13c 441 blk_queue_logical_block_size(q, pmem_sector_size(ndns));
5a92289f 442 blk_queue_max_hw_sectors(q, UINT_MAX);
8b904b5b 443 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
4557641b
RZ
444 if (pmem->pfn_flags & PFN_MAP)
445 blk_queue_flag_set(QUEUE_FLAG_DAX, q);
5a92289f 446 q->queuedata = pmem;
9e853f23 447
538ea4aa 448 disk = alloc_disk_node(0, nid);
030b99e3
DW
449 if (!disk)
450 return -ENOMEM;
c1d6e828 451 pmem->disk = disk;
9e853f23 452
9e853f23 453 disk->fops = &pmem_fops;
5a92289f 454 disk->queue = q;
9e853f23 455 disk->flags = GENHD_FL_EXT_DEVT;
23c47d2a 456 disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
5212e11f 457 nvdimm_namespace_disk_name(ndns, disk->disk_name);
cfe30b87
DW
458 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
459 / 512);
b95f5f43
DW
460 if (devm_init_badblocks(dev, &pmem->bb))
461 return -ENOMEM;
e8d51348 462 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
57f7f317 463 disk->bb = &pmem->bb;
f02716db 464
fefc1d97
PG
465 if (is_nvdimm_sync(nd_region))
466 flags = DAXDEV_F_SYNC;
467 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
c1d6e828
DW
468 if (!dax_dev) {
469 put_disk(disk);
470 return -ENOMEM;
471 }
ce7f11a2 472 dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
c1d6e828 473 pmem->dax_dev = dax_dev;
6e0c90d6
DW
474 gendev = disk_to_dev(disk);
475 gendev->groups = pmem_attribute_groups;
476
fef912bf 477 device_add_disk(dev, disk, NULL);
c1d6e828 478 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
f02716db
DW
479 return -ENOMEM;
480
58138820 481 revalidate_disk(disk);
9e853f23 482
975750a9
TK
483 pmem->bb_state = sysfs_get_dirent(disk_to_dev(disk)->kobj.sd,
484 "badblocks");
6aa734a2
DW
485 if (!pmem->bb_state)
486 dev_warn(dev, "'badblocks' notification disabled\n");
975750a9 487
8c2f7e86
DW
488 return 0;
489}
9e853f23 490
9f53f9fa 491static int nd_pmem_probe(struct device *dev)
9e853f23 492{
1c97afa7 493 int ret;
8c2f7e86 494 struct nd_namespace_common *ndns;
9e853f23 495
8c2f7e86
DW
496 ndns = nvdimm_namespace_common_probe(dev);
497 if (IS_ERR(ndns))
498 return PTR_ERR(ndns);
bf9bccc1 499
200c79da
DW
500 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
501 return -ENXIO;
708ab62b 502
200c79da 503 if (is_nd_btt(dev))
708ab62b
CH
504 return nvdimm_namespace_attach_btt(ndns);
505
32ab0a3f 506 if (is_nd_pfn(dev))
200c79da 507 return pmem_attach_disk(dev, ndns);
32ab0a3f 508
1c97afa7
AK
509 ret = nd_btt_probe(dev, ndns);
510 if (ret == 0)
32ab0a3f 511 return -ENXIO;
32ab0a3f 512
1c97afa7
AK
513 /*
514 * We have two failure conditions here, there is no
515 * info reserver block or we found a valid info reserve block
516 * but failed to initialize the pfn superblock.
517 *
518 * For the first case consider namespace as a raw pmem namespace
519 * and attach a disk.
520 *
521 * For the latter, consider this a success and advance the namespace
522 * seed.
523 */
524 ret = nd_pfn_probe(dev, ndns);
525 if (ret == 0)
526 return -ENXIO;
527 else if (ret == -EOPNOTSUPP)
528 return ret;
529
530 ret = nd_dax_probe(dev, ndns);
531 if (ret == 0)
532 return -ENXIO;
533 else if (ret == -EOPNOTSUPP)
534 return ret;
200c79da 535 return pmem_attach_disk(dev, ndns);
9e853f23
RZ
536}
537
9f53f9fa 538static int nd_pmem_remove(struct device *dev)
9e853f23 539{
6aa734a2
DW
540 struct pmem_device *pmem = dev_get_drvdata(dev);
541
8c2f7e86 542 if (is_nd_btt(dev))
298f2bc5 543 nvdimm_namespace_detach_btt(to_nd_btt(dev));
6aa734a2
DW
544 else {
545 /*
87a30e1f
DW
546 * Note, this assumes nd_device_lock() context to not
547 * race nd_pmem_notify()
6aa734a2
DW
548 */
549 sysfs_put(pmem->bb_state);
550 pmem->bb_state = NULL;
551 }
c5d4355d 552 nvdimm_flush(to_nd_region(dev->parent), NULL);
476f848a 553
9e853f23
RZ
554 return 0;
555}
556
476f848a
DW
557static void nd_pmem_shutdown(struct device *dev)
558{
c5d4355d 559 nvdimm_flush(to_nd_region(dev->parent), NULL);
476f848a
DW
560}
561
71999466
DW
562static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
563{
b2518c78 564 struct nd_region *nd_region;
298f2bc5
DW
565 resource_size_t offset = 0, end_trunc = 0;
566 struct nd_namespace_common *ndns;
567 struct nd_namespace_io *nsio;
568 struct resource res;
b2518c78 569 struct badblocks *bb;
975750a9 570 struct kernfs_node *bb_state;
71999466
DW
571
572 if (event != NVDIMM_REVALIDATE_POISON)
573 return;
574
298f2bc5
DW
575 if (is_nd_btt(dev)) {
576 struct nd_btt *nd_btt = to_nd_btt(dev);
577
578 ndns = nd_btt->ndns;
b2518c78
TK
579 nd_region = to_nd_region(ndns->dev.parent);
580 nsio = to_nd_namespace_io(&ndns->dev);
581 bb = &nsio->bb;
975750a9 582 bb_state = NULL;
b2518c78
TK
583 } else {
584 struct pmem_device *pmem = dev_get_drvdata(dev);
a3901802 585
b2518c78
TK
586 nd_region = to_region(pmem);
587 bb = &pmem->bb;
975750a9 588 bb_state = pmem->bb_state;
b2518c78
TK
589
590 if (is_nd_pfn(dev)) {
591 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
592 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
593
594 ndns = nd_pfn->ndns;
595 offset = pmem->data_offset +
596 __le32_to_cpu(pfn_sb->start_pad);
597 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
598 } else {
599 ndns = to_ndns(dev);
600 }
601
602 nsio = to_nd_namespace_io(&ndns->dev);
603 }
a3901802 604
298f2bc5
DW
605 res.start = nsio->res.start + offset;
606 res.end = nsio->res.end - end_trunc;
b2518c78 607 nvdimm_badblocks_populate(nd_region, bb, &res);
975750a9
TK
608 if (bb_state)
609 sysfs_notify_dirent(bb_state);
71999466
DW
610}
611
9f53f9fa
DW
612MODULE_ALIAS("pmem");
613MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
bf9bccc1 614MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
9f53f9fa
DW
615static struct nd_device_driver nd_pmem_driver = {
616 .probe = nd_pmem_probe,
617 .remove = nd_pmem_remove,
71999466 618 .notify = nd_pmem_notify,
476f848a 619 .shutdown = nd_pmem_shutdown,
9f53f9fa
DW
620 .drv = {
621 .name = "nd_pmem",
9e853f23 622 },
bf9bccc1 623 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
9e853f23
RZ
624};
625
03e90843 626module_nd_driver(nd_pmem_driver);
9e853f23
RZ
627
628MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
629MODULE_LICENSE("GPL v2");