1 // SPDX-License-Identifier: GPL-2.0
3 #define pr_fmt(fmt) "papr-scm: " fmt
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/ioport.h>
9 #include <linux/slab.h>
10 #include <linux/ndctl.h>
11 #include <linux/sched.h>
12 #include <linux/libnvdimm.h>
13 #include <linux/platform_device.h>
14 #include <linux/delay.h>
16 #include <asm/plpar_wrappers.h>
18 #define BIND_ANY_ADDR (~0ul)
20 #define PAPR_SCM_DIMM_CMD_MASK \
21 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
22 (1ul << ND_CMD_GET_CONFIG_DATA) | \
23 (1ul << ND_CMD_SET_CONFIG_DATA))
25 struct papr_scm_priv {
26 struct platform_device *pdev;
27 struct device_node *dn;
36 struct nvdimm_bus_descriptor bus_desc;
37 struct nvdimm_bus *bus;
38 struct nvdimm *nvdimm;
40 struct nd_region *region;
41 struct nd_interleave_set nd_set;
44 static int drc_pmem_bind(struct papr_scm_priv *p)
46 unsigned long ret[PLPAR_HCALL_BUFSIZE];
52 * When the hypervisor cannot map all the requested memory in a single
53 * hcall it returns H_BUSY and we call again with the token until
54 * we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS
55 * leave the system in an undefined state, so we wait.
60 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
61 p->blocks, BIND_ANY_ADDR, token);
66 } while (rc == H_BUSY);
69 /* H_OVERLAP needs a separate error path */
73 dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
77 p->bound_addr = saved;
79 dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
84 static int drc_pmem_unbind(struct papr_scm_priv *p)
86 unsigned long ret[PLPAR_HCALL_BUFSIZE];
90 dev_dbg(&p->pdev->dev, "unbind drc %x\n", p->drc_index);
92 /* NB: unbind has the same retry requirements as drc_pmem_bind() */
95 /* Unbind of all SCM resources associated with drcIndex */
96 rc = plpar_hcall(H_SCM_UNBIND_ALL, ret, H_UNBIND_SCOPE_DRC,
100 /* Check if we are stalled for some time */
101 if (H_IS_LONG_BUSY(rc)) {
102 msleep(get_longbusy_msecs(rc));
104 } else if (rc == H_BUSY) {
108 } while (rc == H_BUSY);
111 dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
113 dev_dbg(&p->pdev->dev, "unbind drc %x complete\n",
116 return rc == H_SUCCESS ? 0 : -ENXIO;
119 static int papr_scm_meta_get(struct papr_scm_priv *p,
120 struct nd_cmd_get_config_data_hdr *hdr)
122 unsigned long data[PLPAR_HCALL_BUFSIZE];
123 unsigned long offset, data_offset;
127 if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
130 for (len = hdr->in_length; len; len -= read) {
132 data_offset = hdr->in_length - len;
133 offset = hdr->in_offset + data_offset;
144 ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
147 if (ret == H_PARAMETER) /* bad DRC index */
150 return -EINVAL; /* other invalid parameter */
154 *(uint64_t *)(hdr->out_buf + data_offset) = be64_to_cpu(data[0]);
157 *(uint32_t *)(hdr->out_buf + data_offset) = be32_to_cpu(data[0] & 0xffffffff);
161 *(uint16_t *)(hdr->out_buf + data_offset) = be16_to_cpu(data[0] & 0xffff);
165 *(uint8_t *)(hdr->out_buf + data_offset) = (data[0] & 0xff);
172 static int papr_scm_meta_set(struct papr_scm_priv *p,
173 struct nd_cmd_set_config_hdr *hdr)
175 unsigned long offset, data_offset;
181 if ((hdr->in_offset + hdr->in_length) >= p->metadata_size)
184 for (len = hdr->in_length; len; len -= wrote) {
186 data_offset = hdr->in_length - len;
187 offset = hdr->in_offset + data_offset;
190 data = *(uint64_t *)(hdr->in_buf + data_offset);
191 data_be = cpu_to_be64(data);
193 } else if (len >= 4) {
194 data = *(uint32_t *)(hdr->in_buf + data_offset);
196 data_be = cpu_to_be32(data);
198 } else if (len >= 2) {
199 data = *(uint16_t *)(hdr->in_buf + data_offset);
201 data_be = cpu_to_be16(data);
204 data_be = *(uint8_t *)(hdr->in_buf + data_offset);
209 ret = plpar_hcall_norets(H_SCM_WRITE_METADATA, p->drc_index,
210 offset, data_be, wrote);
211 if (ret == H_PARAMETER) /* bad DRC index */
214 return -EINVAL; /* other invalid parameter */
220 int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
221 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
223 struct nd_cmd_get_config_size *get_size_hdr;
224 struct papr_scm_priv *p;
226 /* Only dimm-specific calls are supported atm */
230 p = nvdimm_provider_data(nvdimm);
233 case ND_CMD_GET_CONFIG_SIZE:
236 get_size_hdr->status = 0;
237 get_size_hdr->max_xfer = 8;
238 get_size_hdr->config_size = p->metadata_size;
242 case ND_CMD_GET_CONFIG_DATA:
243 *cmd_rc = papr_scm_meta_get(p, buf);
246 case ND_CMD_SET_CONFIG_DATA:
247 *cmd_rc = papr_scm_meta_set(p, buf);
254 dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc);
259 static const struct attribute_group *region_attr_groups[] = {
260 &nd_region_attribute_group,
261 &nd_device_attribute_group,
262 &nd_mapping_attribute_group,
263 &nd_numa_attribute_group,
267 static const struct attribute_group *bus_attr_groups[] = {
268 &nvdimm_bus_attribute_group,
272 static const struct attribute_group *papr_scm_dimm_groups[] = {
273 &nvdimm_attribute_group,
274 &nd_device_attribute_group,
278 static inline int papr_scm_node(int node)
280 int min_dist = INT_MAX, dist;
283 if ((node == NUMA_NO_NODE) || node_online(node))
286 min_node = first_online_node;
287 for_each_online_node(nid) {
288 dist = node_distance(node, nid);
289 if (dist < min_dist) {
297 static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
299 struct device *dev = &p->pdev->dev;
300 struct nd_mapping_desc mapping;
301 struct nd_region_desc ndr_desc;
302 unsigned long dimm_flags;
303 int target_nid, online_nid;
305 p->bus_desc.ndctl = papr_scm_ndctl;
306 p->bus_desc.module = THIS_MODULE;
307 p->bus_desc.of_node = p->pdev->dev.of_node;
308 p->bus_desc.attr_groups = bus_attr_groups;
309 p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
311 if (!p->bus_desc.provider_name)
314 p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
316 dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
321 set_bit(NDD_ALIASING, &dimm_flags);
323 p->nvdimm = nvdimm_create(p->bus, p, papr_scm_dimm_groups,
324 dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
326 dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
330 if (nvdimm_bus_check_dimm_count(p->bus, 1))
333 /* now add the region */
335 memset(&mapping, 0, sizeof(mapping));
336 mapping.nvdimm = p->nvdimm;
338 mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
340 memset(&ndr_desc, 0, sizeof(ndr_desc));
341 ndr_desc.attr_groups = region_attr_groups;
342 target_nid = dev_to_node(&p->pdev->dev);
343 online_nid = papr_scm_node(target_nid);
344 ndr_desc.numa_node = online_nid;
345 ndr_desc.target_node = target_nid;
346 ndr_desc.res = &p->res;
347 ndr_desc.of_node = p->dn;
348 ndr_desc.provider_data = p;
349 ndr_desc.mapping = &mapping;
350 ndr_desc.num_mappings = 1;
351 ndr_desc.nd_set = &p->nd_set;
352 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
355 p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
357 p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
359 dev_err(dev, "Error registering region %pR from %pOF\n",
360 ndr_desc.res, p->dn);
363 if (target_nid != online_nid)
364 dev_info(dev, "Region registered with target node %d and online node %d",
365 target_nid, online_nid);
369 err: nvdimm_bus_unregister(p->bus);
370 kfree(p->bus_desc.provider_name);
374 static int papr_scm_probe(struct platform_device *pdev)
376 struct device_node *dn = pdev->dev.of_node;
377 u32 drc_index, metadata_size;
378 u64 blocks, block_size;
379 struct papr_scm_priv *p;
380 const char *uuid_str;
384 /* check we have all the required DT properties */
385 if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) {
386 dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn);
390 if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
391 dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
395 if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
396 dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
400 if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
401 dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
406 p = kzalloc(sizeof(*p), GFP_KERNEL);
410 /* optional DT properties */
411 of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
414 p->drc_index = drc_index;
415 p->block_size = block_size;
417 p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
419 /* We just need to ensure that set cookies are unique across */
420 uuid_parse(uuid_str, (uuid_t *) uuid);
422 * cookie1 and cookie2 are not really little endian
423 * we store a little endian representation of the
424 * uuid str so that we can compare this with the label
425 * area cookie irrespective of the endian config with which
426 * the kernel is built.
428 p->nd_set.cookie1 = cpu_to_le64(uuid[0]);
429 p->nd_set.cookie2 = cpu_to_le64(uuid[1]);
432 p->metadata_size = metadata_size;
435 /* request the hypervisor to bind this region to somewhere in memory */
436 rc = drc_pmem_bind(p);
438 /* If phyp says drc memory still bound then force unbound and retry */
440 dev_warn(&pdev->dev, "Retrying bind after unbinding\n");
442 rc = drc_pmem_bind(p);
448 /* setup the resource for the newly bound range */
449 p->res.start = p->bound_addr;
450 p->res.end = p->bound_addr + p->blocks * p->block_size - 1;
451 p->res.name = pdev->name;
452 p->res.flags = IORESOURCE_MEM;
454 rc = papr_scm_nvdimm_init(p);
458 platform_set_drvdata(pdev, p);
462 err2: drc_pmem_unbind(p);
467 static int papr_scm_remove(struct platform_device *pdev)
469 struct papr_scm_priv *p = platform_get_drvdata(pdev);
471 nvdimm_bus_unregister(p->bus);
478 static const struct of_device_id papr_scm_match[] = {
479 { .compatible = "ibm,pmemory" },
483 static struct platform_driver papr_scm_driver = {
484 .probe = papr_scm_probe,
485 .remove = papr_scm_remove,
488 .owner = THIS_MODULE,
489 .of_match_table = papr_scm_match,
493 module_platform_driver(papr_scm_driver);
494 MODULE_DEVICE_TABLE(of, papr_scm_match);
495 MODULE_LICENSE("GPL");
496 MODULE_AUTHOR("IBM Corporation");