2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
15 #include <linux/libnvdimm.h>
16 #include <linux/badblocks.h>
17 #include <linux/blkdev.h>
18 #include <linux/device.h>
19 #include <linux/mutex.h>
20 #include <linux/ndctl.h>
21 #include <linux/types.h>
27 * Limits the maximum number of block apertures a dimm can
28 * support and is an input to the geometry/on-disk-format of a
33 INT_LBASIZE_ALIGNMENT = 64,
39 struct list_head list;
42 struct nvdimm_drvdata {
45 struct nd_cmd_get_config_size nsarea;
47 int ns_current, ns_next;
52 struct nd_region_data {
55 unsigned int hints_shift;
56 void __iomem *flush_wpq[0];
59 static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
62 unsigned int num = 1 << ndrd->hints_shift;
63 unsigned int mask = num - 1;
65 return ndrd->flush_wpq[dimm * num + (hint & mask)];
68 static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
69 int hint, void __iomem *flush)
71 unsigned int num = 1 << ndrd->hints_shift;
72 unsigned int mask = num - 1;
74 ndrd->flush_wpq[dimm * num + (hint & mask)] = flush;
77 static inline struct nd_namespace_index *to_namespace_index(
78 struct nvdimm_drvdata *ndd, int i)
83 return ndd->data + sizeof_namespace_index(ndd) * i;
86 static inline struct nd_namespace_index *to_current_namespace_index(
87 struct nvdimm_drvdata *ndd)
89 return to_namespace_index(ndd, ndd->ns_current);
92 static inline struct nd_namespace_index *to_next_namespace_index(
93 struct nvdimm_drvdata *ndd)
95 return to_namespace_index(ndd, ndd->ns_next);
98 #define nd_dbg_dpa(r, d, res, fmt, arg...) \
99 dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
100 (r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
101 (unsigned long long) (res ? resource_size(res) : 0), \
102 (unsigned long long) (res ? res->start : 0), ##arg)
104 #define for_each_dpa_resource(ndd, res) \
105 for (res = (ndd)->dpa.child; res; res = res->sibling)
107 #define for_each_dpa_resource_safe(ndd, res, next) \
108 for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
109 res; res = next, next = next ? next->sibling : NULL)
111 struct nd_percpu_lane {
116 struct nd_label_ent {
117 struct list_head list;
118 struct nd_namespace_label *label;
121 enum nd_mapping_lock_class {
123 ND_MAPPING_UUID_SCAN,
127 struct nvdimm *nvdimm;
130 struct list_head labels;
133 * @ndd is for private use at region enable / disable time for
134 * get_ndd() + put_ndd(), all other nd_mapping to ndd
135 * conversions use to_ndd() which respects enabled state of the
138 struct nvdimm_drvdata *ndd;
148 struct device *ns_seed;
149 struct device *btt_seed;
150 struct device *pfn_seed;
151 struct device *dax_seed;
155 int id, num_lanes, ro, numa_node;
158 struct nd_interleave_set *nd_set;
159 struct nd_percpu_lane __percpu *lane;
160 struct nd_mapping mapping[0];
163 struct nd_blk_region {
164 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
165 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
166 void *iobuf, u64 len, int rw);
167 void *blk_provider_data;
168 struct nd_region nd_region;
172 * Lookup next in the repeating sequence of 01, 10, and 11.
174 static inline unsigned nd_inc_seq(unsigned seq)
176 static const unsigned next[] = { 0, 2, 3, 1 };
178 return next[seq & 3];
184 struct nd_namespace_common *ndns;
186 unsigned long lbasize;
204 enum nd_pfn_mode mode;
205 struct nd_pfn_sb *pfn_sb;
206 struct nd_namespace_common *ndns;
210 struct nd_pfn nd_pfn;
218 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size);
219 void wait_nvdimm_bus_probe_idle(struct device *dev);
220 void nd_device_register(struct device *dev);
221 void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
222 void nd_device_notify(struct device *dev, enum nvdimm_event event);
223 int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
225 ssize_t nd_sector_size_show(unsigned long current_lbasize,
226 const unsigned long *supported, char *buf);
227 ssize_t nd_sector_size_store(struct device *dev, const char *buf,
228 unsigned long *current_lbasize, const unsigned long *supported);
229 int __init nvdimm_init(void);
230 int __init nd_region_init(void);
231 void nvdimm_exit(void);
232 void nd_region_exit(void);
234 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
235 int nvdimm_check_config_data(struct device *dev);
236 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
237 int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
238 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
239 void *buf, size_t len);
240 long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
242 void nvdimm_set_aliasing(struct device *dev);
243 void nvdimm_set_locked(struct device *dev);
244 struct nd_btt *to_nd_btt(struct device *dev);
247 char reserved[SZ_4K - 8];
251 u64 nd_sb_checksum(struct nd_gen_sb *sb);
252 #if IS_ENABLED(CONFIG_BTT)
253 int nd_btt_probe(struct device *dev, struct nd_namespace_common *ndns);
254 bool is_nd_btt(struct device *dev);
255 struct device *nd_btt_create(struct nd_region *nd_region);
257 static inline int nd_btt_probe(struct device *dev,
258 struct nd_namespace_common *ndns)
263 static inline bool is_nd_btt(struct device *dev)
268 static inline struct device *nd_btt_create(struct nd_region *nd_region)
274 struct nd_pfn *to_nd_pfn(struct device *dev);
275 #if IS_ENABLED(CONFIG_NVDIMM_PFN)
276 int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
277 bool is_nd_pfn(struct device *dev);
278 struct device *nd_pfn_create(struct nd_region *nd_region);
279 struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
280 struct nd_namespace_common *ndns);
281 int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
282 extern struct attribute_group nd_pfn_attribute_group;
284 static inline int nd_pfn_probe(struct device *dev,
285 struct nd_namespace_common *ndns)
290 static inline bool is_nd_pfn(struct device *dev)
295 static inline struct device *nd_pfn_create(struct nd_region *nd_region)
300 static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
306 struct nd_dax *to_nd_dax(struct device *dev);
307 #if IS_ENABLED(CONFIG_NVDIMM_DAX)
308 int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
309 bool is_nd_dax(struct device *dev);
310 struct device *nd_dax_create(struct nd_region *nd_region);
312 static inline int nd_dax_probe(struct device *dev,
313 struct nd_namespace_common *ndns)
318 static inline bool is_nd_dax(struct device *dev)
323 static inline struct device *nd_dax_create(struct nd_region *nd_region)
329 struct nd_region *to_nd_region(struct device *dev);
330 int nd_region_to_nstype(struct nd_region *nd_region);
331 int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
332 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region);
333 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
334 void nvdimm_bus_lock(struct device *dev);
335 void nvdimm_bus_unlock(struct device *dev);
336 bool is_nvdimm_bus_locked(struct device *dev);
337 int nvdimm_revalidate_disk(struct gendisk *disk);
338 void nvdimm_drvdata_release(struct kref *kref);
339 void put_ndd(struct nvdimm_drvdata *ndd);
340 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
341 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res);
342 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
343 struct nd_label_id *label_id, resource_size_t start,
345 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
346 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
347 int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
348 int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
349 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
351 void nvdimm_badblocks_populate(struct nd_region *nd_region,
352 struct badblocks *bb, const struct resource *res);
353 #if IS_ENABLED(CONFIG_ND_CLAIM)
354 struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
355 struct resource *res, struct vmem_altmap *altmap);
356 int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
357 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
359 static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
360 struct resource *res, struct vmem_altmap *altmap)
362 return ERR_PTR(-ENXIO);
364 static inline int devm_nsio_enable(struct device *dev,
365 struct nd_namespace_io *nsio)
369 static inline void devm_nsio_disable(struct device *dev,
370 struct nd_namespace_io *nsio)
374 int nd_blk_region_init(struct nd_region *nd_region);
375 int nd_region_activate(struct nd_region *nd_region);
376 void __nd_iostat_start(struct bio *bio, unsigned long *start);
377 static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
379 struct gendisk *disk = bio->bi_bdev->bd_disk;
381 if (!blk_queue_io_stat(disk->queue))
385 generic_start_io_acct(bio_data_dir(bio),
386 bio_sectors(bio), &disk->part0);
389 static inline void nd_iostat_end(struct bio *bio, unsigned long start)
391 struct gendisk *disk = bio->bi_bdev->bd_disk;
393 generic_end_io_acct(bio_data_dir(bio), &disk->part0, start);
395 static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
402 return !!badblocks_check(bb, sector, len / 512, &first_bad,
408 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
409 const u8 *nd_dev_to_uuid(struct device *dev);
410 bool pmem_should_map_pages(struct device *dev);
411 #endif /* __ND_H__ */