powerpc/pseries/scm: Mark the region volatile if cache flush not required
[linux-2.6-block.git] / arch / powerpc / platforms / pseries / papr_scm.c
CommitLineData
b5beae5e
OH
1// SPDX-License-Identifier: GPL-2.0
2
3#define pr_fmt(fmt) "papr-scm: " fmt
4
5#include <linux/of.h>
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/ioport.h>
9#include <linux/slab.h>
10#include <linux/ndctl.h>
11#include <linux/sched.h>
12#include <linux/libnvdimm.h>
13#include <linux/platform_device.h>
14
15#include <asm/plpar_wrappers.h>
16
17#define BIND_ANY_ADDR (~0ul)
18
19#define PAPR_SCM_DIMM_CMD_MASK \
20 ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
21 (1ul << ND_CMD_GET_CONFIG_DATA) | \
22 (1ul << ND_CMD_SET_CONFIG_DATA))
23
24struct papr_scm_priv {
25 struct platform_device *pdev;
26 struct device_node *dn;
27 uint32_t drc_index;
28 uint64_t blocks;
29 uint64_t block_size;
30 int metadata_size;
2a0ffbd4 31 bool is_volatile;
b5beae5e
OH
32
33 uint64_t bound_addr;
34
35 struct nvdimm_bus_descriptor bus_desc;
36 struct nvdimm_bus *bus;
37 struct nvdimm *nvdimm;
38 struct resource res;
39 struct nd_region *region;
40 struct nd_interleave_set nd_set;
41};
42
43static int drc_pmem_bind(struct papr_scm_priv *p)
44{
45 unsigned long ret[PLPAR_HCALL_BUFSIZE];
46 uint64_t rc, token;
5a3840a4 47 uint64_t saved = 0;
b5beae5e
OH
48
49 /*
50 * When the hypervisor cannot map all the requested memory in a single
51 * hcall it returns H_BUSY and we call again with the token until
52 * we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS
53 * leave the system in an undefined state, so we wait.
54 */
55 token = 0;
56
57 do {
58 rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
59 p->blocks, BIND_ANY_ADDR, token);
409dd7dc 60 token = ret[0];
5a3840a4
OH
61 if (!saved)
62 saved = ret[1];
b5beae5e
OH
63 cond_resched();
64 } while (rc == H_BUSY);
65
66 if (rc) {
67 dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
68 return -ENXIO;
69 }
70
5a3840a4 71 p->bound_addr = saved;
b5beae5e
OH
72
73 dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
74
75 return 0;
76}
77
78static int drc_pmem_unbind(struct papr_scm_priv *p)
79{
80 unsigned long ret[PLPAR_HCALL_BUFSIZE];
81 uint64_t rc, token;
82
83 token = 0;
84
85 /* NB: unbind has the same retry requirements mentioned above */
86 do {
87 rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
88 p->bound_addr, p->blocks, token);
409dd7dc 89 token = ret[0];
b5beae5e
OH
90 cond_resched();
91 } while (rc == H_BUSY);
92
93 if (rc)
94 dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
95
96 return !!rc;
97}
98
99static int papr_scm_meta_get(struct papr_scm_priv *p,
100 struct nd_cmd_get_config_data_hdr *hdr)
101{
102 unsigned long data[PLPAR_HCALL_BUFSIZE];
103 int64_t ret;
104
105 if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1)
106 return -EINVAL;
107
108 ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
109 hdr->in_offset, 1);
110
111 if (ret == H_PARAMETER) /* bad DRC index */
112 return -ENODEV;
113 if (ret)
114 return -EINVAL; /* other invalid parameter */
115
116 hdr->out_buf[0] = data[0] & 0xff;
117
118 return 0;
119}
120
121static int papr_scm_meta_set(struct papr_scm_priv *p,
122 struct nd_cmd_set_config_hdr *hdr)
123{
124 int64_t ret;
125
126 if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1)
127 return -EINVAL;
128
129 ret = plpar_hcall_norets(H_SCM_WRITE_METADATA,
130 p->drc_index, hdr->in_offset, hdr->in_buf[0], 1);
131
132 if (ret == H_PARAMETER) /* bad DRC index */
133 return -ENODEV;
134 if (ret)
135 return -EINVAL; /* other invalid parameter */
136
137 return 0;
138}
139
140int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
141 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
142{
143 struct nd_cmd_get_config_size *get_size_hdr;
144 struct papr_scm_priv *p;
145
146 /* Only dimm-specific calls are supported atm */
147 if (!nvdimm)
148 return -EINVAL;
149
150 p = nvdimm_provider_data(nvdimm);
151
152 switch (cmd) {
153 case ND_CMD_GET_CONFIG_SIZE:
154 get_size_hdr = buf;
155
156 get_size_hdr->status = 0;
157 get_size_hdr->max_xfer = 1;
158 get_size_hdr->config_size = p->metadata_size;
159 *cmd_rc = 0;
160 break;
161
162 case ND_CMD_GET_CONFIG_DATA:
163 *cmd_rc = papr_scm_meta_get(p, buf);
164 break;
165
166 case ND_CMD_SET_CONFIG_DATA:
167 *cmd_rc = papr_scm_meta_set(p, buf);
168 break;
169
170 default:
171 return -EINVAL;
172 }
173
174 dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc);
175
176 return 0;
177}
178
179static const struct attribute_group *region_attr_groups[] = {
180 &nd_region_attribute_group,
181 &nd_device_attribute_group,
182 &nd_mapping_attribute_group,
183 &nd_numa_attribute_group,
184 NULL,
185};
186
187static const struct attribute_group *bus_attr_groups[] = {
188 &nvdimm_bus_attribute_group,
189 NULL,
190};
191
192static const struct attribute_group *papr_scm_dimm_groups[] = {
193 &nvdimm_attribute_group,
194 &nd_device_attribute_group,
195 NULL,
196};
197
198static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
199{
200 struct device *dev = &p->pdev->dev;
201 struct nd_mapping_desc mapping;
202 struct nd_region_desc ndr_desc;
203 unsigned long dimm_flags;
204
205 p->bus_desc.ndctl = papr_scm_ndctl;
206 p->bus_desc.module = THIS_MODULE;
207 p->bus_desc.of_node = p->pdev->dev.of_node;
208 p->bus_desc.attr_groups = bus_attr_groups;
209 p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
210
211 if (!p->bus_desc.provider_name)
212 return -ENOMEM;
213
214 p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
215 if (!p->bus) {
216 dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
217 return -ENXIO;
218 }
219
220 dimm_flags = 0;
221 set_bit(NDD_ALIASING, &dimm_flags);
222
223 p->nvdimm = nvdimm_create(p->bus, p, papr_scm_dimm_groups,
224 dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
225 if (!p->nvdimm) {
226 dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
227 goto err;
228 }
229
b0d65a8c
OH
230 if (nvdimm_bus_check_dimm_count(p->bus, 1))
231 goto err;
232
b5beae5e
OH
233 /* now add the region */
234
235 memset(&mapping, 0, sizeof(mapping));
236 mapping.nvdimm = p->nvdimm;
237 mapping.start = 0;
238 mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
239
240 memset(&ndr_desc, 0, sizeof(ndr_desc));
241 ndr_desc.attr_groups = region_attr_groups;
242 ndr_desc.numa_node = dev_to_node(&p->pdev->dev);
8fc5c735 243 ndr_desc.target_node = ndr_desc.numa_node;
b5beae5e
OH
244 ndr_desc.res = &p->res;
245 ndr_desc.of_node = p->dn;
246 ndr_desc.provider_data = p;
247 ndr_desc.mapping = &mapping;
248 ndr_desc.num_mappings = 1;
249 ndr_desc.nd_set = &p->nd_set;
250 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
251
2a0ffbd4
AK
252 if (p->is_volatile)
253 p->region = nvdimm_volatile_region_create(p->bus, &ndr_desc);
254 else
255 p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
b5beae5e
OH
256 if (!p->region) {
257 dev_err(dev, "Error registering region %pR from %pOF\n",
258 ndr_desc.res, p->dn);
259 goto err;
260 }
261
262 return 0;
263
264err: nvdimm_bus_unregister(p->bus);
265 kfree(p->bus_desc.provider_name);
266 return -ENXIO;
267}
268
269static int papr_scm_probe(struct platform_device *pdev)
270{
b5beae5e 271 struct device_node *dn = pdev->dev.of_node;
683ec0e0
OH
272 u32 drc_index, metadata_size;
273 u64 blocks, block_size;
b5beae5e 274 struct papr_scm_priv *p;
43001c52
OH
275 const char *uuid_str;
276 u64 uuid[2];
b5beae5e
OH
277 int rc;
278
279 /* check we have all the required DT properties */
280 if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) {
281 dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn);
282 return -ENODEV;
283 }
284
683ec0e0
OH
285 if (of_property_read_u64(dn, "ibm,block-size", &block_size)) {
286 dev_err(&pdev->dev, "%pOF: missing block-size!\n", dn);
287 return -ENODEV;
288 }
289
290 if (of_property_read_u64(dn, "ibm,number-of-blocks", &blocks)) {
291 dev_err(&pdev->dev, "%pOF: missing number-of-blocks!\n", dn);
b5beae5e
OH
292 return -ENODEV;
293 }
294
43001c52
OH
295 if (of_property_read_string(dn, "ibm,unit-guid", &uuid_str)) {
296 dev_err(&pdev->dev, "%pOF: missing unit-guid!\n", dn);
297 return -ENODEV;
298 }
299
2a0ffbd4 300
b5beae5e
OH
301 p = kzalloc(sizeof(*p), GFP_KERNEL);
302 if (!p)
303 return -ENOMEM;
304
305 /* optional DT properties */
306 of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
307
308 p->dn = dn;
309 p->drc_index = drc_index;
683ec0e0
OH
310 p->block_size = block_size;
311 p->blocks = blocks;
2a0ffbd4 312 p->is_volatile = !of_property_read_bool(dn, "ibm,cache-flush-required");
b5beae5e 313
43001c52
OH
314 /* We just need to ensure that set cookies are unique across */
315 uuid_parse(uuid_str, (uuid_t *) uuid);
316 p->nd_set.cookie1 = uuid[0];
317 p->nd_set.cookie2 = uuid[1];
318
b5beae5e
OH
319 /* might be zero */
320 p->metadata_size = metadata_size;
321 p->pdev = pdev;
322
323 /* request the hypervisor to bind this region to somewhere in memory */
324 rc = drc_pmem_bind(p);
325 if (rc)
326 goto err;
327
328 /* setup the resource for the newly bound range */
329 p->res.start = p->bound_addr;
59613526 330 p->res.end = p->bound_addr + p->blocks * p->block_size - 1;
b5beae5e
OH
331 p->res.name = pdev->name;
332 p->res.flags = IORESOURCE_MEM;
333
334 rc = papr_scm_nvdimm_init(p);
335 if (rc)
336 goto err2;
337
338 platform_set_drvdata(pdev, p);
339
340 return 0;
341
342err2: drc_pmem_unbind(p);
343err: kfree(p);
344 return rc;
345}
346
347static int papr_scm_remove(struct platform_device *pdev)
348{
349 struct papr_scm_priv *p = platform_get_drvdata(pdev);
350
351 nvdimm_bus_unregister(p->bus);
352 drc_pmem_unbind(p);
353 kfree(p);
354
355 return 0;
356}
357
358static const struct of_device_id papr_scm_match[] = {
359 { .compatible = "ibm,pmemory" },
360 { },
361};
362
363static struct platform_driver papr_scm_driver = {
364 .probe = papr_scm_probe,
365 .remove = papr_scm_remove,
366 .driver = {
367 .name = "papr_scm",
368 .owner = THIS_MODULE,
369 .of_match_table = papr_scm_match,
370 },
371};
372
373module_platform_driver(papr_scm_driver);
374MODULE_DEVICE_TABLE(of, papr_scm_match);
375MODULE_LICENSE("GPL");
376MODULE_AUTHOR("IBM Corporation");