Merge tag 'nios2-v4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/lftan...
[linux-2.6-block.git] / drivers / nvdimm / pmem.c
CommitLineData
9e853f23
RZ
1/*
2 * Persistent Memory Driver
3 *
9f53f9fa 4 * Copyright (c) 2014-2015, Intel Corporation.
9e853f23
RZ
5 * Copyright (c) 2015, Christoph Hellwig <hch@lst.de>.
6 * Copyright (c) 2015, Boaz Harrosh <boaz@plexistor.com>.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 */
17
18#include <asm/cacheflush.h>
19#include <linux/blkdev.h>
20#include <linux/hdreg.h>
21#include <linux/init.h>
22#include <linux/platform_device.h>
23#include <linux/module.h>
24#include <linux/moduleparam.h>
b95f5f43 25#include <linux/badblocks.h>
9476df7d 26#include <linux/memremap.h>
32ab0a3f 27#include <linux/vmalloc.h>
71389703 28#include <linux/blk-mq.h>
34c0fd54 29#include <linux/pfn_t.h>
9e853f23 30#include <linux/slab.h>
61031952 31#include <linux/pmem.h>
c1d6e828 32#include <linux/dax.h>
9f53f9fa 33#include <linux/nd.h>
f295e53b 34#include "pmem.h"
32ab0a3f 35#include "pfn.h"
9f53f9fa 36#include "nd.h"
9e853f23 37
f284a4f2
DW
38static struct device *to_dev(struct pmem_device *pmem)
39{
40 /*
41 * nvdimm bus services need a 'dev' parameter, and we record the device
42 * at init in bb.dev.
43 */
44 return pmem->bb.dev;
45}
46
47static struct nd_region *to_region(struct pmem_device *pmem)
48{
49 return to_nd_region(to_dev(pmem)->parent);
50}
9e853f23 51
3115bb02 52static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset,
59e64739
DW
53 unsigned int len)
54{
f284a4f2 55 struct device *dev = to_dev(pmem);
59e64739
DW
56 sector_t sector;
57 long cleared;
868f036f 58 int rc = 0;
59e64739
DW
59
60 sector = (offset - pmem->data_offset) / 512;
59e64739 61
868f036f
DW
62 cleared = nvdimm_clear_poison(dev, pmem->phys_addr + offset, len);
63 if (cleared < len)
64 rc = -EIO;
59e64739 65 if (cleared > 0 && cleared / 512) {
868f036f
DW
66 cleared /= 512;
67 dev_dbg(dev, "%s: %#llx clear %ld sector%s\n", __func__,
68 (unsigned long long) sector, cleared,
69 cleared > 1 ? "s" : "");
0a3f27b9 70 badblocks_clear(&pmem->bb, sector, cleared);
59e64739 71 }
3115bb02 72
59e64739 73 invalidate_pmem(pmem->virt_addr + offset, len);
868f036f
DW
74
75 return rc;
59e64739
DW
76}
77
bd697a80
VV
78static void write_pmem(void *pmem_addr, struct page *page,
79 unsigned int off, unsigned int len)
80{
81 void *mem = kmap_atomic(page);
82
83 memcpy_to_pmem(pmem_addr, mem + off, len);
84 kunmap_atomic(mem);
85}
86
87static int read_pmem(struct page *page, unsigned int off,
88 void *pmem_addr, unsigned int len)
89{
90 int rc;
91 void *mem = kmap_atomic(page);
92
6abccd1b 93 rc = memcpy_mcsafe(mem + off, pmem_addr, len);
bd697a80 94 kunmap_atomic(mem);
d47d1d27
SH
95 if (rc)
96 return -EIO;
97 return 0;
bd697a80
VV
98}
99
e10624f8 100static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
c11f0c0b 101 unsigned int len, unsigned int off, bool is_write,
9e853f23
RZ
102 sector_t sector)
103{
b5ebc8ec 104 int rc = 0;
59e64739 105 bool bad_pmem = false;
32ab0a3f 106 phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
7a9eb206 107 void *pmem_addr = pmem->virt_addr + pmem_off;
9e853f23 108
59e64739
DW
109 if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
110 bad_pmem = true;
111
c11f0c0b 112 if (!is_write) {
59e64739 113 if (unlikely(bad_pmem))
b5ebc8ec
DW
114 rc = -EIO;
115 else {
bd697a80 116 rc = read_pmem(page, off, pmem_addr, len);
b5ebc8ec
DW
117 flush_dcache_page(page);
118 }
9e853f23 119 } else {
0a370d26
DW
120 /*
121 * Note that we write the data both before and after
122 * clearing poison. The write before clear poison
123 * handles situations where the latest written data is
124 * preserved and the clear poison operation simply marks
125 * the address range as valid without changing the data.
126 * In this case application software can assume that an
127 * interrupted write will either return the new good
128 * data or an error.
129 *
130 * However, if pmem_clear_poison() leaves the data in an
131 * indeterminate state we need to perform the write
132 * after clear poison.
133 */
9e853f23 134 flush_dcache_page(page);
bd697a80 135 write_pmem(pmem_addr, page, off, len);
59e64739 136 if (unlikely(bad_pmem)) {
3115bb02 137 rc = pmem_clear_poison(pmem, pmem_off, len);
bd697a80 138 write_pmem(pmem_addr, page, off, len);
59e64739 139 }
9e853f23
RZ
140 }
141
b5ebc8ec 142 return rc;
9e853f23
RZ
143}
144
7e267a8c
DW
145/* account for REQ_FLUSH rename, replace with REQ_PREFLUSH after v4.8-rc1 */
146#ifndef REQ_FLUSH
147#define REQ_FLUSH REQ_PREFLUSH
148#endif
149
dece1635 150static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
9e853f23 151{
e10624f8 152 int rc = 0;
f0dc089c
DW
153 bool do_acct;
154 unsigned long start;
9e853f23 155 struct bio_vec bvec;
9e853f23 156 struct bvec_iter iter;
bd842b8c 157 struct pmem_device *pmem = q->queuedata;
7e267a8c
DW
158 struct nd_region *nd_region = to_region(pmem);
159
1eff9d32 160 if (bio->bi_opf & REQ_FLUSH)
7e267a8c 161 nvdimm_flush(nd_region);
9e853f23 162
f0dc089c 163 do_acct = nd_iostat_start(bio, &start);
e10624f8
DW
164 bio_for_each_segment(bvec, bio, iter) {
165 rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
c11f0c0b 166 bvec.bv_offset, op_is_write(bio_op(bio)),
e10624f8
DW
167 iter.bi_sector);
168 if (rc) {
169 bio->bi_error = rc;
170 break;
171 }
172 }
f0dc089c
DW
173 if (do_acct)
174 nd_iostat_end(bio, start);
61031952 175
1eff9d32 176 if (bio->bi_opf & REQ_FUA)
7e267a8c 177 nvdimm_flush(nd_region);
61031952 178
4246a0b6 179 bio_endio(bio);
dece1635 180 return BLK_QC_T_NONE;
9e853f23
RZ
181}
182
183static int pmem_rw_page(struct block_device *bdev, sector_t sector,
c11f0c0b 184 struct page *page, bool is_write)
9e853f23 185{
bd842b8c 186 struct pmem_device *pmem = bdev->bd_queue->queuedata;
e10624f8 187 int rc;
9e853f23 188
c11f0c0b 189 rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, is_write, sector);
9e853f23 190
e10624f8
DW
191 /*
192 * The ->rw_page interface is subtle and tricky. The core
193 * retries on any error, so we can only invoke page_endio() in
194 * the successful completion case. Otherwise, we'll see crashes
195 * caused by double completion.
196 */
197 if (rc == 0)
c11f0c0b 198 page_endio(page, is_write, 0);
e10624f8
DW
199
200 return rc;
9e853f23
RZ
201}
202
f295e53b 203/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
c1d6e828
DW
204__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
205 long nr_pages, void **kaddr, pfn_t *pfn)
9e853f23 206{
c1d6e828 207 resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
589e75d1 208
c1d6e828
DW
209 if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
210 PFN_PHYS(nr_pages))))
0a70bd43 211 return -EIO;
e2e05394 212 *kaddr = pmem->virt_addr + offset;
34c0fd54 213 *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
9e853f23 214
0a70bd43
DW
215 /*
216 * If badblocks are present, limit known good range to the
217 * requested range.
218 */
219 if (unlikely(pmem->bb.count))
c1d6e828
DW
220 return nr_pages;
221 return PHYS_PFN(pmem->size - pmem->pfn_pad - offset);
9e853f23
RZ
222}
223
224static const struct block_device_operations pmem_fops = {
225 .owner = THIS_MODULE,
226 .rw_page = pmem_rw_page,
58138820 227 .revalidate_disk = nvdimm_revalidate_disk,
9e853f23
RZ
228};
229
c1d6e828
DW
230static long pmem_dax_direct_access(struct dax_device *dax_dev,
231 pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
232{
233 struct pmem_device *pmem = dax_get_private(dax_dev);
234
235 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
236}
237
238static const struct dax_operations pmem_dax_ops = {
239 .direct_access = pmem_dax_direct_access,
240};
241
030b99e3
DW
242static void pmem_release_queue(void *q)
243{
244 blk_cleanup_queue(q);
245}
246
71389703
DW
247static void pmem_freeze_queue(void *q)
248{
d3b5d352 249 blk_freeze_queue_start(q);
71389703
DW
250}
251
c1d6e828 252static void pmem_release_disk(void *__pmem)
030b99e3 253{
c1d6e828
DW
254 struct pmem_device *pmem = __pmem;
255
256 kill_dax(pmem->dax_dev);
257 put_dax(pmem->dax_dev);
258 del_gendisk(pmem->disk);
259 put_disk(pmem->disk);
030b99e3
DW
260}
261
200c79da
DW
262static int pmem_attach_disk(struct device *dev,
263 struct nd_namespace_common *ndns)
9e853f23 264{
200c79da 265 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
f284a4f2 266 struct nd_region *nd_region = to_nd_region(dev->parent);
200c79da
DW
267 struct vmem_altmap __altmap, *altmap = NULL;
268 struct resource *res = &nsio->res;
269 struct nd_pfn *nd_pfn = NULL;
c1d6e828 270 struct dax_device *dax_dev;
200c79da
DW
271 int nid = dev_to_node(dev);
272 struct nd_pfn_sb *pfn_sb;
9e853f23 273 struct pmem_device *pmem;
200c79da 274 struct resource pfn_res;
468ded03 275 struct request_queue *q;
200c79da
DW
276 struct gendisk *disk;
277 void *addr;
278
279 /* while nsio_rw_bytes is active, parse a pfn info block if present */
280 if (is_nd_pfn(dev)) {
281 nd_pfn = to_nd_pfn(dev);
282 altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap);
283 if (IS_ERR(altmap))
284 return PTR_ERR(altmap);
285 }
286
287 /* we're attaching a block device, disable raw namespace access */
288 devm_nsio_disable(dev, nsio);
9e853f23 289
708ab62b 290 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
9e853f23 291 if (!pmem)
200c79da 292 return -ENOMEM;
9e853f23 293
200c79da 294 dev_set_drvdata(dev, pmem);
9e853f23
RZ
295 pmem->phys_addr = res->start;
296 pmem->size = resource_size(res);
f284a4f2 297 if (nvdimm_has_flush(nd_region) < 0)
61031952 298 dev_warn(dev, "unable to guarantee persistence of writes\n");
9e853f23 299
947df02d 300 if (!devm_request_mem_region(dev, res->start, resource_size(res),
450c6633 301 dev_name(&ndns->dev))) {
947df02d 302 dev_warn(dev, "could not reserve region %pR\n", res);
200c79da 303 return -EBUSY;
9e853f23
RZ
304 }
305
468ded03
DW
306 q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
307 if (!q)
200c79da 308 return -ENOMEM;
468ded03 309
71389703
DW
310 if (devm_add_action_or_reset(dev, pmem_release_queue, q))
311 return -ENOMEM;
312
34c0fd54 313 pmem->pfn_flags = PFN_DEV;
200c79da
DW
314 if (is_nd_pfn(dev)) {
315 addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter,
316 altmap);
317 pfn_sb = nd_pfn->pfn_sb;
318 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
319 pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res);
320 pmem->pfn_flags |= PFN_MAP;
321 res = &pfn_res; /* for badblocks populate */
322 res->start += pmem->data_offset;
323 } else if (pmem_should_map_pages(dev)) {
324 addr = devm_memremap_pages(dev, &nsio->res,
5c2c2587 325 &q->q_usage_counter, NULL);
34c0fd54
DW
326 pmem->pfn_flags |= PFN_MAP;
327 } else
200c79da
DW
328 addr = devm_memremap(dev, pmem->phys_addr,
329 pmem->size, ARCH_MEMREMAP_PMEM);
b36f4761 330
030b99e3 331 /*
71389703 332 * At release time the queue must be frozen before
030b99e3
DW
333 * devm_memremap_pages is unwound
334 */
71389703 335 if (devm_add_action_or_reset(dev, pmem_freeze_queue, q))
200c79da 336 return -ENOMEM;
8c2f7e86 337
200c79da
DW
338 if (IS_ERR(addr))
339 return PTR_ERR(addr);
7a9eb206 340 pmem->virt_addr = addr;
9e853f23 341
7e267a8c 342 blk_queue_write_cache(q, true, true);
5a92289f
DW
343 blk_queue_make_request(q, pmem_make_request);
344 blk_queue_physical_block_size(q, PAGE_SIZE);
345 blk_queue_max_hw_sectors(q, UINT_MAX);
346 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
347 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
163d4baa 348 queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
5a92289f 349 q->queuedata = pmem;
9e853f23 350
538ea4aa 351 disk = alloc_disk_node(0, nid);
030b99e3
DW
352 if (!disk)
353 return -ENOMEM;
c1d6e828 354 pmem->disk = disk;
9e853f23 355
9e853f23 356 disk->fops = &pmem_fops;
5a92289f 357 disk->queue = q;
9e853f23 358 disk->flags = GENHD_FL_EXT_DEVT;
5212e11f 359 nvdimm_namespace_disk_name(ndns, disk->disk_name);
cfe30b87
DW
360 set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
361 / 512);
b95f5f43
DW
362 if (devm_init_badblocks(dev, &pmem->bb))
363 return -ENOMEM;
f284a4f2 364 nvdimm_badblocks_populate(nd_region, &pmem->bb, res);
57f7f317 365 disk->bb = &pmem->bb;
f02716db 366
c1d6e828
DW
367 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
368 if (!dax_dev) {
369 put_disk(disk);
370 return -ENOMEM;
371 }
372 pmem->dax_dev = dax_dev;
373
374 device_add_disk(dev, disk);
375 if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
f02716db
DW
376 return -ENOMEM;
377
58138820 378 revalidate_disk(disk);
9e853f23 379
8c2f7e86
DW
380 return 0;
381}
9e853f23 382
9f53f9fa 383static int nd_pmem_probe(struct device *dev)
9e853f23 384{
8c2f7e86 385 struct nd_namespace_common *ndns;
9e853f23 386
8c2f7e86
DW
387 ndns = nvdimm_namespace_common_probe(dev);
388 if (IS_ERR(ndns))
389 return PTR_ERR(ndns);
bf9bccc1 390
200c79da
DW
391 if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
392 return -ENXIO;
708ab62b 393
200c79da 394 if (is_nd_btt(dev))
708ab62b
CH
395 return nvdimm_namespace_attach_btt(ndns);
396
32ab0a3f 397 if (is_nd_pfn(dev))
200c79da 398 return pmem_attach_disk(dev, ndns);
32ab0a3f 399
200c79da 400 /* if we find a valid info-block we'll come back as that personality */
c5ed9268
DW
401 if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
402 || nd_dax_probe(dev, ndns) == 0)
32ab0a3f 403 return -ENXIO;
32ab0a3f 404
200c79da
DW
405 /* ...otherwise we're just a raw pmem device */
406 return pmem_attach_disk(dev, ndns);
9e853f23
RZ
407}
408
9f53f9fa 409static int nd_pmem_remove(struct device *dev)
9e853f23 410{
8c2f7e86 411 if (is_nd_btt(dev))
298f2bc5 412 nvdimm_namespace_detach_btt(to_nd_btt(dev));
476f848a
DW
413 nvdimm_flush(to_nd_region(dev->parent));
414
9e853f23
RZ
415 return 0;
416}
417
476f848a
DW
418static void nd_pmem_shutdown(struct device *dev)
419{
420 nvdimm_flush(to_nd_region(dev->parent));
421}
422
71999466
DW
423static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
424{
b2518c78 425 struct nd_region *nd_region;
298f2bc5
DW
426 resource_size_t offset = 0, end_trunc = 0;
427 struct nd_namespace_common *ndns;
428 struct nd_namespace_io *nsio;
429 struct resource res;
b2518c78 430 struct badblocks *bb;
71999466
DW
431
432 if (event != NVDIMM_REVALIDATE_POISON)
433 return;
434
298f2bc5
DW
435 if (is_nd_btt(dev)) {
436 struct nd_btt *nd_btt = to_nd_btt(dev);
437
438 ndns = nd_btt->ndns;
b2518c78
TK
439 nd_region = to_nd_region(ndns->dev.parent);
440 nsio = to_nd_namespace_io(&ndns->dev);
441 bb = &nsio->bb;
442 } else {
443 struct pmem_device *pmem = dev_get_drvdata(dev);
a3901802 444
b2518c78
TK
445 nd_region = to_region(pmem);
446 bb = &pmem->bb;
447
448 if (is_nd_pfn(dev)) {
449 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
450 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
451
452 ndns = nd_pfn->ndns;
453 offset = pmem->data_offset +
454 __le32_to_cpu(pfn_sb->start_pad);
455 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
456 } else {
457 ndns = to_ndns(dev);
458 }
459
460 nsio = to_nd_namespace_io(&ndns->dev);
461 }
a3901802 462
298f2bc5
DW
463 res.start = nsio->res.start + offset;
464 res.end = nsio->res.end - end_trunc;
b2518c78 465 nvdimm_badblocks_populate(nd_region, bb, &res);
71999466
DW
466}
467
9f53f9fa
DW
468MODULE_ALIAS("pmem");
469MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_IO);
bf9bccc1 470MODULE_ALIAS_ND_DEVICE(ND_DEVICE_NAMESPACE_PMEM);
9f53f9fa
DW
471static struct nd_device_driver nd_pmem_driver = {
472 .probe = nd_pmem_probe,
473 .remove = nd_pmem_remove,
71999466 474 .notify = nd_pmem_notify,
476f848a 475 .shutdown = nd_pmem_shutdown,
9f53f9fa
DW
476 .drv = {
477 .name = "nd_pmem",
9e853f23 478 },
bf9bccc1 479 .type = ND_DRIVER_NAMESPACE_IO | ND_DRIVER_NAMESPACE_PMEM,
9e853f23
RZ
480};
481
482static int __init pmem_init(void)
483{
55155291 484 return nd_driver_register(&nd_pmem_driver);
9e853f23
RZ
485}
486module_init(pmem_init);
487
488static void pmem_exit(void)
489{
9f53f9fa 490 driver_unregister(&nd_pmem_driver.drv);
9e853f23
RZ
491}
492module_exit(pmem_exit);
493
494MODULE_AUTHOR("Ross Zwisler <ross.zwisler@linux.intel.com>");
495MODULE_LICENSE("GPL v2");