libnvdimm/altmap: Track namespace boundaries in altmap
[linux-2.6-block.git] / drivers / nvmem / core.c
CommitLineData
b1c1db98 1// SPDX-License-Identifier: GPL-2.0
eace75cf
SK
2/*
3 * nvmem framework core.
4 *
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
eace75cf
SK
7 */
8
9#include <linux/device.h>
10#include <linux/export.h>
11#include <linux/fs.h>
12#include <linux/idr.h>
13#include <linux/init.h>
c1de7f43 14#include <linux/kref.h>
eace75cf
SK
15#include <linux/module.h>
16#include <linux/nvmem-consumer.h>
17#include <linux/nvmem-provider.h>
18#include <linux/of.h>
eace75cf 19#include <linux/slab.h>
ae0c2d72 20#include "nvmem.h"
b6c217ab 21
eace75cf
SK
22struct nvmem_cell {
23 const char *name;
24 int offset;
25 int bytes;
26 int bit_offset;
27 int nbits;
0749aa25 28 struct device_node *np;
eace75cf
SK
29 struct nvmem_device *nvmem;
30 struct list_head node;
31};
32
33static DEFINE_MUTEX(nvmem_mutex);
34static DEFINE_IDA(nvmem_ida);
35
b985f4cb
BG
36static DEFINE_MUTEX(nvmem_cell_mutex);
37static LIST_HEAD(nvmem_cell_tables);
38
506157be
BG
39static DEFINE_MUTEX(nvmem_lookup_mutex);
40static LIST_HEAD(nvmem_lookup_list);
41
bee1138b
BG
42static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
43
b6c217ab 44
795ddd18
SK
45static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
46 void *val, size_t bytes)
47{
48 if (nvmem->reg_read)
49 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
50
51 return -EINVAL;
52}
53
54static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
55 void *val, size_t bytes)
56{
57 if (nvmem->reg_write)
58 return nvmem->reg_write(nvmem->priv, offset, val, bytes);
59
60 return -EINVAL;
61}
eace75cf 62
eace75cf
SK
63static void nvmem_release(struct device *dev)
64{
65 struct nvmem_device *nvmem = to_nvmem_device(dev);
66
67 ida_simple_remove(&nvmem_ida, nvmem->id);
68 kfree(nvmem);
69}
70
71static const struct device_type nvmem_provider_type = {
72 .release = nvmem_release,
73};
74
75static struct bus_type nvmem_bus_type = {
76 .name = "nvmem",
77};
78
418e3ea1 79static int of_nvmem_match(struct device *dev, const void *nvmem_np)
eace75cf
SK
80{
81 return dev->of_node == nvmem_np;
82}
83
84static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
85{
86 struct device *d;
87
88 if (!nvmem_np)
89 return NULL;
90
91 d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
92
93 if (!d)
94 return NULL;
95
96 return to_nvmem_device(d);
97}
98
506157be
BG
99static struct nvmem_device *nvmem_find(const char *name)
100{
101 struct device *d;
102
103 d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
104
105 if (!d)
106 return NULL;
107
108 return to_nvmem_device(d);
109}
110
eace75cf
SK
111static void nvmem_cell_drop(struct nvmem_cell *cell)
112{
bee1138b 113 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
c7235ee3 114 mutex_lock(&nvmem_mutex);
eace75cf 115 list_del(&cell->node);
c7235ee3 116 mutex_unlock(&nvmem_mutex);
0749aa25 117 of_node_put(cell->np);
badcdff1 118 kfree(cell->name);
eace75cf
SK
119 kfree(cell);
120}
121
122static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
123{
1852183e 124 struct nvmem_cell *cell, *p;
eace75cf 125
c7235ee3
BG
126 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
127 nvmem_cell_drop(cell);
eace75cf
SK
128}
129
130static void nvmem_cell_add(struct nvmem_cell *cell)
131{
c7235ee3
BG
132 mutex_lock(&nvmem_mutex);
133 list_add_tail(&cell->node, &cell->nvmem->cells);
134 mutex_unlock(&nvmem_mutex);
bee1138b 135 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
eace75cf
SK
136}
137
138static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
139 const struct nvmem_cell_info *info,
140 struct nvmem_cell *cell)
141{
142 cell->nvmem = nvmem;
143 cell->offset = info->offset;
144 cell->bytes = info->bytes;
145 cell->name = info->name;
146
147 cell->bit_offset = info->bit_offset;
148 cell->nbits = info->nbits;
149
150 if (cell->nbits)
151 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
152 BITS_PER_BYTE);
153
154 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
155 dev_err(&nvmem->dev,
156 "cell %s unaligned to nvmem stride %d\n",
157 cell->name, nvmem->stride);
158 return -EINVAL;
159 }
160
161 return 0;
162}
163
b3db17e4
AL
164/**
165 * nvmem_add_cells() - Add cell information to an nvmem device
166 *
167 * @nvmem: nvmem device to add cells to.
168 * @info: nvmem cell info to add to the device
169 * @ncells: number of cells in info
170 *
171 * Return: 0 or negative error code on failure.
172 */
ef92ab30 173static int nvmem_add_cells(struct nvmem_device *nvmem,
b3db17e4
AL
174 const struct nvmem_cell_info *info,
175 int ncells)
eace75cf
SK
176{
177 struct nvmem_cell **cells;
eace75cf
SK
178 int i, rval;
179
b3db17e4 180 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
eace75cf
SK
181 if (!cells)
182 return -ENOMEM;
183
b3db17e4 184 for (i = 0; i < ncells; i++) {
eace75cf
SK
185 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
186 if (!cells[i]) {
187 rval = -ENOMEM;
188 goto err;
189 }
190
191 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
287980e4 192 if (rval) {
eace75cf
SK
193 kfree(cells[i]);
194 goto err;
195 }
196
197 nvmem_cell_add(cells[i]);
198 }
199
eace75cf
SK
200 /* remove tmp array */
201 kfree(cells);
202
203 return 0;
204err:
dfdf1414 205 while (i--)
eace75cf
SK
206 nvmem_cell_drop(cells[i]);
207
dfdf1414
RV
208 kfree(cells);
209
eace75cf
SK
210 return rval;
211}
212
bee1138b
BG
213/**
214 * nvmem_register_notifier() - Register a notifier block for nvmem events.
215 *
216 * @nb: notifier block to be called on nvmem events.
217 *
218 * Return: 0 on success, negative error number on failure.
219 */
220int nvmem_register_notifier(struct notifier_block *nb)
221{
222 return blocking_notifier_chain_register(&nvmem_notifier, nb);
223}
224EXPORT_SYMBOL_GPL(nvmem_register_notifier);
225
226/**
227 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
228 *
229 * @nb: notifier block to be unregistered.
230 *
231 * Return: 0 on success, negative error number on failure.
232 */
233int nvmem_unregister_notifier(struct notifier_block *nb)
234{
235 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
236}
237EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
238
b985f4cb
BG
239static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
240{
241 const struct nvmem_cell_info *info;
242 struct nvmem_cell_table *table;
243 struct nvmem_cell *cell;
244 int rval = 0, i;
245
246 mutex_lock(&nvmem_cell_mutex);
247 list_for_each_entry(table, &nvmem_cell_tables, node) {
248 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
249 for (i = 0; i < table->ncells; i++) {
250 info = &table->cells[i];
251
252 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
253 if (!cell) {
254 rval = -ENOMEM;
255 goto out;
256 }
257
258 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
259 info,
260 cell);
261 if (rval) {
262 kfree(cell);
263 goto out;
264 }
265
266 nvmem_cell_add(cell);
267 }
268 }
269 }
270
271out:
272 mutex_unlock(&nvmem_cell_mutex);
273 return rval;
274}
275
506157be
BG
276static struct nvmem_cell *
277nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
278{
1c832674 279 struct nvmem_cell *iter, *cell = NULL;
506157be
BG
280
281 mutex_lock(&nvmem_mutex);
1c832674
AB
282 list_for_each_entry(iter, &nvmem->cells, node) {
283 if (strcmp(cell_id, iter->name) == 0) {
284 cell = iter;
506157be 285 break;
1c832674 286 }
506157be
BG
287 }
288 mutex_unlock(&nvmem_mutex);
289
290 return cell;
291}
292
e888d445
BG
293static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
294{
295 struct device_node *parent, *child;
296 struct device *dev = &nvmem->dev;
297 struct nvmem_cell *cell;
298 const __be32 *addr;
299 int len;
300
301 parent = dev->of_node;
302
303 for_each_child_of_node(parent, child) {
304 addr = of_get_property(child, "reg", &len);
305 if (!addr || (len < 2 * sizeof(u32))) {
306 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
307 return -EINVAL;
308 }
309
310 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
311 if (!cell)
312 return -ENOMEM;
313
314 cell->nvmem = nvmem;
0749aa25 315 cell->np = of_node_get(child);
e888d445
BG
316 cell->offset = be32_to_cpup(addr++);
317 cell->bytes = be32_to_cpup(addr);
badcdff1 318 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
e888d445
BG
319
320 addr = of_get_property(child, "bits", &len);
321 if (addr && len == (2 * sizeof(u32))) {
322 cell->bit_offset = be32_to_cpup(addr++);
323 cell->nbits = be32_to_cpup(addr);
324 }
325
326 if (cell->nbits)
327 cell->bytes = DIV_ROUND_UP(
328 cell->nbits + cell->bit_offset,
329 BITS_PER_BYTE);
330
331 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
332 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
333 cell->name, nvmem->stride);
334 /* Cells already added will be freed later. */
badcdff1 335 kfree(cell->name);
e888d445
BG
336 kfree(cell);
337 return -EINVAL;
338 }
339
340 nvmem_cell_add(cell);
341 }
342
343 return 0;
344}
345
eace75cf
SK
346/**
347 * nvmem_register() - Register a nvmem device for given nvmem_config.
348 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
349 *
350 * @config: nvmem device configuration with which nvmem device is created.
351 *
352 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
353 * on success.
354 */
355
356struct nvmem_device *nvmem_register(const struct nvmem_config *config)
357{
358 struct nvmem_device *nvmem;
eace75cf
SK
359 int rval;
360
361 if (!config->dev)
362 return ERR_PTR(-EINVAL);
363
eace75cf
SK
364 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
365 if (!nvmem)
366 return ERR_PTR(-ENOMEM);
367
368 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
369 if (rval < 0) {
370 kfree(nvmem);
371 return ERR_PTR(rval);
372 }
373
c1de7f43 374 kref_init(&nvmem->refcnt);
c7235ee3 375 INIT_LIST_HEAD(&nvmem->cells);
c1de7f43 376
eace75cf 377 nvmem->id = rval;
eace75cf 378 nvmem->owner = config->owner;
17eb18d6
MY
379 if (!nvmem->owner && config->dev->driver)
380 nvmem->owner = config->dev->driver->owner;
99897efd
HK
381 nvmem->stride = config->stride ?: 1;
382 nvmem->word_size = config->word_size ?: 1;
795ddd18 383 nvmem->size = config->size;
eace75cf
SK
384 nvmem->dev.type = &nvmem_provider_type;
385 nvmem->dev.bus = &nvmem_bus_type;
386 nvmem->dev.parent = config->dev;
795ddd18 387 nvmem->priv = config->priv;
16688453 388 nvmem->type = config->type;
795ddd18
SK
389 nvmem->reg_read = config->reg_read;
390 nvmem->reg_write = config->reg_write;
517f14d9
BG
391 if (!config->no_of_node)
392 nvmem->dev.of_node = config->dev->of_node;
fd0f4906
AS
393
394 if (config->id == -1 && config->name) {
395 dev_set_name(&nvmem->dev, "%s", config->name);
396 } else {
397 dev_set_name(&nvmem->dev, "%s%d",
398 config->name ? : "nvmem",
399 config->name ? config->id : nvmem->id);
400 }
eace75cf 401
1716cfe8
AB
402 nvmem->read_only = device_property_present(config->dev, "read-only") ||
403 config->read_only || !nvmem->reg_write;
eace75cf 404
ae0c2d72 405 nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
eace75cf
SK
406
407 device_initialize(&nvmem->dev);
408
409 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
410
411 rval = device_add(&nvmem->dev);
b6c217ab 412 if (rval)
3360acdf 413 goto err_put_device;
b6c217ab
AL
414
415 if (config->compat) {
ae0c2d72 416 rval = nvmem_sysfs_setup_compat(nvmem, config);
b6c217ab 417 if (rval)
3360acdf 418 goto err_device_del;
eace75cf
SK
419 }
420
fa72d847
BG
421 if (config->cells) {
422 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
423 if (rval)
424 goto err_teardown_compat;
425 }
eace75cf 426
b985f4cb
BG
427 rval = nvmem_add_cells_from_table(nvmem);
428 if (rval)
429 goto err_remove_cells;
430
e888d445
BG
431 rval = nvmem_add_cells_from_of(nvmem);
432 if (rval)
433 goto err_remove_cells;
434
f4853e1c 435 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
bee1138b 436
eace75cf 437 return nvmem;
3360acdf 438
b985f4cb
BG
439err_remove_cells:
440 nvmem_device_remove_all_cells(nvmem);
fa72d847
BG
441err_teardown_compat:
442 if (config->compat)
ae0c2d72 443 nvmem_sysfs_remove_compat(nvmem, config);
3360acdf
JH
444err_device_del:
445 device_del(&nvmem->dev);
446err_put_device:
447 put_device(&nvmem->dev);
448
b6c217ab 449 return ERR_PTR(rval);
eace75cf
SK
450}
451EXPORT_SYMBOL_GPL(nvmem_register);
452
c1de7f43
BG
453static void nvmem_device_release(struct kref *kref)
454{
455 struct nvmem_device *nvmem;
456
457 nvmem = container_of(kref, struct nvmem_device, refcnt);
458
bee1138b
BG
459 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
460
c1de7f43
BG
461 if (nvmem->flags & FLAG_COMPAT)
462 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
463
464 nvmem_device_remove_all_cells(nvmem);
465 device_del(&nvmem->dev);
466 put_device(&nvmem->dev);
467}
468
eace75cf
SK
469/**
470 * nvmem_unregister() - Unregister previously registered nvmem device
471 *
472 * @nvmem: Pointer to previously registered nvmem device.
eace75cf 473 */
bf58e882 474void nvmem_unregister(struct nvmem_device *nvmem)
eace75cf 475{
c1de7f43 476 kref_put(&nvmem->refcnt, nvmem_device_release);
eace75cf
SK
477}
478EXPORT_SYMBOL_GPL(nvmem_unregister);
479
f1f50eca
AS
480static void devm_nvmem_release(struct device *dev, void *res)
481{
bf58e882 482 nvmem_unregister(*(struct nvmem_device **)res);
f1f50eca
AS
483}
484
485/**
486 * devm_nvmem_register() - Register a managed nvmem device for given
487 * nvmem_config.
488 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
489 *
b378c779 490 * @dev: Device that uses the nvmem device.
f1f50eca
AS
491 * @config: nvmem device configuration with which nvmem device is created.
492 *
493 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
494 * on success.
495 */
496struct nvmem_device *devm_nvmem_register(struct device *dev,
497 const struct nvmem_config *config)
498{
499 struct nvmem_device **ptr, *nvmem;
500
501 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
502 if (!ptr)
503 return ERR_PTR(-ENOMEM);
504
505 nvmem = nvmem_register(config);
506
507 if (!IS_ERR(nvmem)) {
508 *ptr = nvmem;
509 devres_add(dev, ptr);
510 } else {
511 devres_free(ptr);
512 }
513
514 return nvmem;
515}
516EXPORT_SYMBOL_GPL(devm_nvmem_register);
517
518static int devm_nvmem_match(struct device *dev, void *res, void *data)
519{
520 struct nvmem_device **r = res;
521
522 return *r == data;
523}
524
525/**
526 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
527 * device.
528 *
b378c779 529 * @dev: Device that uses the nvmem device.
f1f50eca
AS
530 * @nvmem: Pointer to previously registered nvmem device.
531 *
532 * Return: Will be an negative on error or a zero on success.
533 */
534int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
535{
536 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
537}
538EXPORT_SYMBOL(devm_nvmem_unregister);
539
69aba794 540static struct nvmem_device *__nvmem_device_get(struct device_node *np,
506157be 541 const char *nvmem_name)
69aba794
SK
542{
543 struct nvmem_device *nvmem = NULL;
544
c7235ee3 545 mutex_lock(&nvmem_mutex);
506157be 546 nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
69aba794 547 mutex_unlock(&nvmem_mutex);
c7235ee3
BG
548 if (!nvmem)
549 return ERR_PTR(-EPROBE_DEFER);
69aba794
SK
550
551 if (!try_module_get(nvmem->owner)) {
552 dev_err(&nvmem->dev,
553 "could not increase module refcount for cell %s\n",
5db652c9 554 nvmem_dev_name(nvmem));
69aba794 555
73e9dc4d 556 put_device(&nvmem->dev);
69aba794
SK
557 return ERR_PTR(-EINVAL);
558 }
559
c1de7f43
BG
560 kref_get(&nvmem->refcnt);
561
69aba794
SK
562 return nvmem;
563}
564
565static void __nvmem_device_put(struct nvmem_device *nvmem)
566{
73e9dc4d 567 put_device(&nvmem->dev);
69aba794 568 module_put(nvmem->owner);
c1de7f43 569 kref_put(&nvmem->refcnt, nvmem_device_release);
69aba794
SK
570}
571
e701c67c 572#if IS_ENABLED(CONFIG_OF)
e2a5402e
SK
573/**
574 * of_nvmem_device_get() - Get nvmem device from a given id
575 *
29143268 576 * @np: Device tree node that uses the nvmem device.
e2a5402e
SK
577 * @id: nvmem name from nvmem-names property.
578 *
579 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
580 * on success.
581 */
582struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
583{
584
585 struct device_node *nvmem_np;
d4e7fef1 586 int index = 0;
e2a5402e 587
d4e7fef1
AB
588 if (id)
589 index = of_property_match_string(np, "nvmem-names", id);
e2a5402e
SK
590
591 nvmem_np = of_parse_phandle(np, "nvmem", index);
592 if (!nvmem_np)
d4e7fef1 593 return ERR_PTR(-ENOENT);
e2a5402e 594
506157be 595 return __nvmem_device_get(nvmem_np, NULL);
e2a5402e
SK
596}
597EXPORT_SYMBOL_GPL(of_nvmem_device_get);
598#endif
599
600/**
601 * nvmem_device_get() - Get nvmem device from a given id
602 *
29143268
VG
603 * @dev: Device that uses the nvmem device.
604 * @dev_name: name of the requested nvmem device.
e2a5402e
SK
605 *
606 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
607 * on success.
608 */
609struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
610{
611 if (dev->of_node) { /* try dt first */
612 struct nvmem_device *nvmem;
613
614 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
615
616 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
617 return nvmem;
618
619 }
620
95b65195 621 return __nvmem_device_get(NULL, dev_name);
e2a5402e
SK
622}
623EXPORT_SYMBOL_GPL(nvmem_device_get);
624
625static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
626{
627 struct nvmem_device **nvmem = res;
628
629 if (WARN_ON(!nvmem || !*nvmem))
630 return 0;
631
632 return *nvmem == data;
633}
634
635static void devm_nvmem_device_release(struct device *dev, void *res)
636{
637 nvmem_device_put(*(struct nvmem_device **)res);
638}
639
640/**
641 * devm_nvmem_device_put() - put alredy got nvmem device
642 *
29143268 643 * @dev: Device that uses the nvmem device.
e2a5402e
SK
644 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
645 * that needs to be released.
646 */
647void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
648{
649 int ret;
650
651 ret = devres_release(dev, devm_nvmem_device_release,
652 devm_nvmem_device_match, nvmem);
653
654 WARN_ON(ret);
655}
656EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
657
658/**
659 * nvmem_device_put() - put alredy got nvmem device
660 *
661 * @nvmem: pointer to nvmem device that needs to be released.
662 */
663void nvmem_device_put(struct nvmem_device *nvmem)
664{
665 __nvmem_device_put(nvmem);
666}
667EXPORT_SYMBOL_GPL(nvmem_device_put);
668
669/**
670 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
671 *
29143268
VG
672 * @dev: Device that requests the nvmem device.
673 * @id: name id for the requested nvmem device.
e2a5402e
SK
674 *
675 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
676 * on success. The nvmem_cell will be freed by the automatically once the
677 * device is freed.
678 */
679struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
680{
681 struct nvmem_device **ptr, *nvmem;
682
683 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
684 if (!ptr)
685 return ERR_PTR(-ENOMEM);
686
687 nvmem = nvmem_device_get(dev, id);
688 if (!IS_ERR(nvmem)) {
689 *ptr = nvmem;
690 devres_add(dev, ptr);
691 } else {
692 devres_free(ptr);
693 }
694
695 return nvmem;
696}
697EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
698
506157be
BG
699static struct nvmem_cell *
700nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
69aba794 701{
506157be
BG
702 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
703 struct nvmem_cell_lookup *lookup;
69aba794 704 struct nvmem_device *nvmem;
506157be 705 const char *dev_id;
69aba794 706
506157be
BG
707 if (!dev)
708 return ERR_PTR(-EINVAL);
709
710 dev_id = dev_name(dev);
711
712 mutex_lock(&nvmem_lookup_mutex);
713
714 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
715 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
716 (strcmp(lookup->con_id, con_id) == 0)) {
717 /* This is the right entry. */
718 nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
cccb3b19 719 if (IS_ERR(nvmem)) {
506157be 720 /* Provider may not be registered yet. */
cccb3b19 721 cell = ERR_CAST(nvmem);
9bfd8198 722 break;
506157be
BG
723 }
724
725 cell = nvmem_find_cell_by_name(nvmem,
726 lookup->cell_name);
727 if (!cell) {
728 __nvmem_device_put(nvmem);
cccb3b19 729 cell = ERR_PTR(-ENOENT);
506157be 730 }
9bfd8198 731 break;
506157be
BG
732 }
733 }
69aba794 734
506157be 735 mutex_unlock(&nvmem_lookup_mutex);
69aba794
SK
736 return cell;
737}
738
e701c67c 739#if IS_ENABLED(CONFIG_OF)
3c53e235 740static struct nvmem_cell *
0749aa25 741nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
3c53e235 742{
1c832674 743 struct nvmem_cell *iter, *cell = NULL;
3c53e235
AB
744
745 mutex_lock(&nvmem_mutex);
1c832674
AB
746 list_for_each_entry(iter, &nvmem->cells, node) {
747 if (np == iter->np) {
748 cell = iter;
3c53e235 749 break;
1c832674 750 }
3c53e235
AB
751 }
752 mutex_unlock(&nvmem_mutex);
753
754 return cell;
755}
756
69aba794
SK
757/**
758 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
759 *
29143268 760 * @np: Device tree node that uses the nvmem cell.
165589f0
BG
761 * @id: nvmem cell name from nvmem-cell-names property, or NULL
762 * for the cell at index 0 (the lone cell with no accompanying
763 * nvmem-cell-names property).
69aba794
SK
764 *
765 * Return: Will be an ERR_PTR() on error or a valid pointer
766 * to a struct nvmem_cell. The nvmem_cell will be freed by the
767 * nvmem_cell_put().
768 */
165589f0 769struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
69aba794
SK
770{
771 struct device_node *cell_np, *nvmem_np;
69aba794 772 struct nvmem_device *nvmem;
e888d445 773 struct nvmem_cell *cell;
fd0c478c 774 int index = 0;
69aba794 775
fd0c478c 776 /* if cell name exists, find index to the name */
165589f0
BG
777 if (id)
778 index = of_property_match_string(np, "nvmem-cell-names", id);
69aba794
SK
779
780 cell_np = of_parse_phandle(np, "nvmem-cells", index);
781 if (!cell_np)
5087cc19 782 return ERR_PTR(-ENOENT);
69aba794
SK
783
784 nvmem_np = of_get_next_parent(cell_np);
785 if (!nvmem_np)
786 return ERR_PTR(-EINVAL);
787
506157be 788 nvmem = __nvmem_device_get(nvmem_np, NULL);
aad8d097 789 of_node_put(nvmem_np);
69aba794
SK
790 if (IS_ERR(nvmem))
791 return ERR_CAST(nvmem);
792
0749aa25 793 cell = nvmem_find_cell_by_node(nvmem, cell_np);
69aba794 794 if (!cell) {
e888d445
BG
795 __nvmem_device_put(nvmem);
796 return ERR_PTR(-ENOENT);
69aba794
SK
797 }
798
69aba794 799 return cell;
69aba794
SK
800}
801EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
802#endif
803
804/**
805 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
806 *
29143268 807 * @dev: Device that requests the nvmem cell.
165589f0
BG
808 * @id: nvmem cell name to get (this corresponds with the name from the
809 * nvmem-cell-names property for DT systems and with the con_id from
810 * the lookup entry for non-DT systems).
69aba794
SK
811 *
812 * Return: Will be an ERR_PTR() on error or a valid pointer
813 * to a struct nvmem_cell. The nvmem_cell will be freed by the
814 * nvmem_cell_put().
815 */
165589f0 816struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
69aba794
SK
817{
818 struct nvmem_cell *cell;
819
820 if (dev->of_node) { /* try dt first */
165589f0 821 cell = of_nvmem_cell_get(dev->of_node, id);
69aba794
SK
822 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
823 return cell;
824 }
825
165589f0
BG
826 /* NULL cell id only allowed for device tree; invalid otherwise */
827 if (!id)
87ed1405
DA
828 return ERR_PTR(-EINVAL);
829
165589f0 830 return nvmem_cell_get_from_lookup(dev, id);
69aba794
SK
831}
832EXPORT_SYMBOL_GPL(nvmem_cell_get);
833
834static void devm_nvmem_cell_release(struct device *dev, void *res)
835{
836 nvmem_cell_put(*(struct nvmem_cell **)res);
837}
838
839/**
840 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
841 *
29143268
VG
842 * @dev: Device that requests the nvmem cell.
843 * @id: nvmem cell name id to get.
69aba794
SK
844 *
845 * Return: Will be an ERR_PTR() on error or a valid pointer
846 * to a struct nvmem_cell. The nvmem_cell will be freed by the
847 * automatically once the device is freed.
848 */
849struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
850{
851 struct nvmem_cell **ptr, *cell;
852
853 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
854 if (!ptr)
855 return ERR_PTR(-ENOMEM);
856
857 cell = nvmem_cell_get(dev, id);
858 if (!IS_ERR(cell)) {
859 *ptr = cell;
860 devres_add(dev, ptr);
861 } else {
862 devres_free(ptr);
863 }
864
865 return cell;
866}
867EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
868
869static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
870{
871 struct nvmem_cell **c = res;
872
873 if (WARN_ON(!c || !*c))
874 return 0;
875
876 return *c == data;
877}
878
879/**
880 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
881 * from devm_nvmem_cell_get.
882 *
29143268
VG
883 * @dev: Device that requests the nvmem cell.
884 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
69aba794
SK
885 */
886void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
887{
888 int ret;
889
890 ret = devres_release(dev, devm_nvmem_cell_release,
891 devm_nvmem_cell_match, cell);
892
893 WARN_ON(ret);
894}
895EXPORT_SYMBOL(devm_nvmem_cell_put);
896
897/**
898 * nvmem_cell_put() - Release previously allocated nvmem cell.
899 *
29143268 900 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
69aba794
SK
901 */
902void nvmem_cell_put(struct nvmem_cell *cell)
903{
904 struct nvmem_device *nvmem = cell->nvmem;
905
906 __nvmem_device_put(nvmem);
69aba794
SK
907}
908EXPORT_SYMBOL_GPL(nvmem_cell_put);
909
f7c04f16 910static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
69aba794
SK
911{
912 u8 *p, *b;
2fe518fe 913 int i, extra, bit_offset = cell->bit_offset;
69aba794
SK
914
915 p = b = buf;
916 if (bit_offset) {
917 /* First shift */
918 *b++ >>= bit_offset;
919
920 /* setup rest of the bytes if any */
921 for (i = 1; i < cell->bytes; i++) {
922 /* Get bits from next byte and shift them towards msb */
923 *p |= *b << (BITS_PER_BYTE - bit_offset);
924
925 p = b;
926 *b++ >>= bit_offset;
927 }
2fe518fe
JRO
928 } else {
929 /* point to the msb */
930 p += cell->bytes - 1;
69aba794 931 }
2fe518fe
JRO
932
933 /* result fits in less bytes */
934 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
935 while (--extra >= 0)
936 *p-- = 0;
937
69aba794
SK
938 /* clear msb bits if any leftover in the last byte */
939 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
940}
941
942static int __nvmem_cell_read(struct nvmem_device *nvmem,
943 struct nvmem_cell *cell,
944 void *buf, size_t *len)
945{
946 int rc;
947
795ddd18 948 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
69aba794 949
287980e4 950 if (rc)
69aba794
SK
951 return rc;
952
953 /* shift bits in-place */
cbf854ab 954 if (cell->bit_offset || cell->nbits)
69aba794
SK
955 nvmem_shift_read_buffer_in_place(cell, buf);
956
3b4a6877
VG
957 if (len)
958 *len = cell->bytes;
69aba794
SK
959
960 return 0;
961}
962
963/**
964 * nvmem_cell_read() - Read a given nvmem cell
965 *
966 * @cell: nvmem cell to be read.
3b4a6877
VG
967 * @len: pointer to length of cell which will be populated on successful read;
968 * can be NULL.
69aba794 969 *
b577fafc
BN
970 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
971 * buffer should be freed by the consumer with a kfree().
69aba794
SK
972 */
973void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
974{
975 struct nvmem_device *nvmem = cell->nvmem;
976 u8 *buf;
977 int rc;
978
795ddd18 979 if (!nvmem)
69aba794
SK
980 return ERR_PTR(-EINVAL);
981
982 buf = kzalloc(cell->bytes, GFP_KERNEL);
983 if (!buf)
984 return ERR_PTR(-ENOMEM);
985
986 rc = __nvmem_cell_read(nvmem, cell, buf, len);
287980e4 987 if (rc) {
69aba794
SK
988 kfree(buf);
989 return ERR_PTR(rc);
990 }
991
992 return buf;
993}
994EXPORT_SYMBOL_GPL(nvmem_cell_read);
995
f7c04f16
MY
996static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
997 u8 *_buf, int len)
69aba794
SK
998{
999 struct nvmem_device *nvmem = cell->nvmem;
1000 int i, rc, nbits, bit_offset = cell->bit_offset;
1001 u8 v, *p, *buf, *b, pbyte, pbits;
1002
1003 nbits = cell->nbits;
1004 buf = kzalloc(cell->bytes, GFP_KERNEL);
1005 if (!buf)
1006 return ERR_PTR(-ENOMEM);
1007
1008 memcpy(buf, _buf, len);
1009 p = b = buf;
1010
1011 if (bit_offset) {
1012 pbyte = *b;
1013 *b <<= bit_offset;
1014
1015 /* setup the first byte with lsb bits from nvmem */
795ddd18 1016 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
50808bfc
MM
1017 if (rc)
1018 goto err;
69aba794
SK
1019 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1020
1021 /* setup rest of the byte if any */
1022 for (i = 1; i < cell->bytes; i++) {
1023 /* Get last byte bits and shift them towards lsb */
1024 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1025 pbyte = *b;
1026 p = b;
1027 *b <<= bit_offset;
1028 *b++ |= pbits;
1029 }
1030 }
1031
1032 /* if it's not end on byte boundary */
1033 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1034 /* setup the last byte with msb bits from nvmem */
795ddd18 1035 rc = nvmem_reg_read(nvmem,
69aba794 1036 cell->offset + cell->bytes - 1, &v, 1);
50808bfc
MM
1037 if (rc)
1038 goto err;
69aba794
SK
1039 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1040
1041 }
1042
1043 return buf;
50808bfc
MM
1044err:
1045 kfree(buf);
1046 return ERR_PTR(rc);
69aba794
SK
1047}
1048
1049/**
1050 * nvmem_cell_write() - Write to a given nvmem cell
1051 *
1052 * @cell: nvmem cell to be written.
1053 * @buf: Buffer to be written.
1054 * @len: length of buffer to be written to nvmem cell.
1055 *
1056 * Return: length of bytes written or negative on failure.
1057 */
1058int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1059{
1060 struct nvmem_device *nvmem = cell->nvmem;
1061 int rc;
1062
795ddd18 1063 if (!nvmem || nvmem->read_only ||
69aba794
SK
1064 (cell->bit_offset == 0 && len != cell->bytes))
1065 return -EINVAL;
1066
1067 if (cell->bit_offset || cell->nbits) {
1068 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1069 if (IS_ERR(buf))
1070 return PTR_ERR(buf);
1071 }
1072
795ddd18 1073 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
69aba794
SK
1074
1075 /* free the tmp buffer */
ace22170 1076 if (cell->bit_offset || cell->nbits)
69aba794
SK
1077 kfree(buf);
1078
287980e4 1079 if (rc)
69aba794
SK
1080 return rc;
1081
1082 return len;
1083}
1084EXPORT_SYMBOL_GPL(nvmem_cell_write);
1085
0a9b2d1c
FG
1086/**
1087 * nvmem_cell_read_u16() - Read a cell value as an u16
1088 *
1089 * @dev: Device that requests the nvmem cell.
1090 * @cell_id: Name of nvmem cell to read.
1091 * @val: pointer to output value.
1092 *
1093 * Return: 0 on success or negative errno.
1094 */
1095int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1096{
1097 struct nvmem_cell *cell;
1098 void *buf;
1099 size_t len;
1100
1101 cell = nvmem_cell_get(dev, cell_id);
1102 if (IS_ERR(cell))
1103 return PTR_ERR(cell);
1104
1105 buf = nvmem_cell_read(cell, &len);
1106 if (IS_ERR(buf)) {
1107 nvmem_cell_put(cell);
1108 return PTR_ERR(buf);
1109 }
1110 if (len != sizeof(*val)) {
1111 kfree(buf);
1112 nvmem_cell_put(cell);
1113 return -EINVAL;
1114 }
1115 memcpy(val, buf, sizeof(*val));
1116 kfree(buf);
1117 nvmem_cell_put(cell);
1118
1119 return 0;
1120}
1121EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1122
d026d70a
LC
1123/**
1124 * nvmem_cell_read_u32() - Read a cell value as an u32
1125 *
1126 * @dev: Device that requests the nvmem cell.
1127 * @cell_id: Name of nvmem cell to read.
1128 * @val: pointer to output value.
1129 *
1130 * Return: 0 on success or negative errno.
1131 */
1132int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1133{
1134 struct nvmem_cell *cell;
1135 void *buf;
1136 size_t len;
1137
1138 cell = nvmem_cell_get(dev, cell_id);
1139 if (IS_ERR(cell))
1140 return PTR_ERR(cell);
1141
1142 buf = nvmem_cell_read(cell, &len);
1143 if (IS_ERR(buf)) {
1144 nvmem_cell_put(cell);
1145 return PTR_ERR(buf);
1146 }
1147 if (len != sizeof(*val)) {
1148 kfree(buf);
1149 nvmem_cell_put(cell);
1150 return -EINVAL;
1151 }
1152 memcpy(val, buf, sizeof(*val));
1153
1154 kfree(buf);
1155 nvmem_cell_put(cell);
1156 return 0;
1157}
1158EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1159
e2a5402e
SK
1160/**
1161 * nvmem_device_cell_read() - Read a given nvmem device and cell
1162 *
1163 * @nvmem: nvmem device to read from.
1164 * @info: nvmem cell info to be read.
1165 * @buf: buffer pointer which will be populated on successful read.
1166 *
1167 * Return: length of successful bytes read on success and negative
1168 * error code on error.
1169 */
1170ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1171 struct nvmem_cell_info *info, void *buf)
1172{
1173 struct nvmem_cell cell;
1174 int rc;
1175 ssize_t len;
1176
795ddd18 1177 if (!nvmem)
e2a5402e
SK
1178 return -EINVAL;
1179
1180 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
287980e4 1181 if (rc)
e2a5402e
SK
1182 return rc;
1183
1184 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
287980e4 1185 if (rc)
e2a5402e
SK
1186 return rc;
1187
1188 return len;
1189}
1190EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1191
1192/**
1193 * nvmem_device_cell_write() - Write cell to a given nvmem device
1194 *
1195 * @nvmem: nvmem device to be written to.
29143268 1196 * @info: nvmem cell info to be written.
e2a5402e
SK
1197 * @buf: buffer to be written to cell.
1198 *
1199 * Return: length of bytes written or negative error code on failure.
48f63a2c 1200 */
e2a5402e
SK
1201int nvmem_device_cell_write(struct nvmem_device *nvmem,
1202 struct nvmem_cell_info *info, void *buf)
1203{
1204 struct nvmem_cell cell;
1205 int rc;
1206
795ddd18 1207 if (!nvmem)
e2a5402e
SK
1208 return -EINVAL;
1209
1210 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
287980e4 1211 if (rc)
e2a5402e
SK
1212 return rc;
1213
1214 return nvmem_cell_write(&cell, buf, cell.bytes);
1215}
1216EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1217
1218/**
1219 * nvmem_device_read() - Read from a given nvmem device
1220 *
1221 * @nvmem: nvmem device to read from.
1222 * @offset: offset in nvmem device.
1223 * @bytes: number of bytes to read.
1224 * @buf: buffer pointer which will be populated on successful read.
1225 *
1226 * Return: length of successful bytes read on success and negative
1227 * error code on error.
1228 */
1229int nvmem_device_read(struct nvmem_device *nvmem,
1230 unsigned int offset,
1231 size_t bytes, void *buf)
1232{
1233 int rc;
1234
795ddd18 1235 if (!nvmem)
e2a5402e
SK
1236 return -EINVAL;
1237
795ddd18 1238 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
e2a5402e 1239
287980e4 1240 if (rc)
e2a5402e
SK
1241 return rc;
1242
1243 return bytes;
1244}
1245EXPORT_SYMBOL_GPL(nvmem_device_read);
1246
1247/**
1248 * nvmem_device_write() - Write cell to a given nvmem device
1249 *
1250 * @nvmem: nvmem device to be written to.
1251 * @offset: offset in nvmem device.
1252 * @bytes: number of bytes to write.
1253 * @buf: buffer to be written.
1254 *
1255 * Return: length of bytes written or negative error code on failure.
48f63a2c 1256 */
e2a5402e
SK
1257int nvmem_device_write(struct nvmem_device *nvmem,
1258 unsigned int offset,
1259 size_t bytes, void *buf)
1260{
1261 int rc;
1262
795ddd18 1263 if (!nvmem)
e2a5402e
SK
1264 return -EINVAL;
1265
795ddd18 1266 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
e2a5402e 1267
287980e4 1268 if (rc)
e2a5402e
SK
1269 return rc;
1270
1271
1272 return bytes;
1273}
1274EXPORT_SYMBOL_GPL(nvmem_device_write);
1275
b985f4cb
BG
1276/**
1277 * nvmem_add_cell_table() - register a table of cell info entries
1278 *
1279 * @table: table of cell info entries
1280 */
1281void nvmem_add_cell_table(struct nvmem_cell_table *table)
1282{
1283 mutex_lock(&nvmem_cell_mutex);
1284 list_add_tail(&table->node, &nvmem_cell_tables);
1285 mutex_unlock(&nvmem_cell_mutex);
1286}
1287EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1288
1289/**
1290 * nvmem_del_cell_table() - remove a previously registered cell info table
1291 *
1292 * @table: table of cell info entries
1293 */
1294void nvmem_del_cell_table(struct nvmem_cell_table *table)
1295{
1296 mutex_lock(&nvmem_cell_mutex);
1297 list_del(&table->node);
1298 mutex_unlock(&nvmem_cell_mutex);
1299}
1300EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1301
506157be
BG
1302/**
1303 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1304 *
1305 * @entries: array of cell lookup entries
1306 * @nentries: number of cell lookup entries in the array
1307 */
1308void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1309{
1310 int i;
1311
1312 mutex_lock(&nvmem_lookup_mutex);
1313 for (i = 0; i < nentries; i++)
1314 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1315 mutex_unlock(&nvmem_lookup_mutex);
1316}
1317EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1318
1319/**
1320 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1321 * entries
1322 *
1323 * @entries: array of cell lookup entries
1324 * @nentries: number of cell lookup entries in the array
1325 */
1326void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1327{
1328 int i;
1329
1330 mutex_lock(&nvmem_lookup_mutex);
1331 for (i = 0; i < nentries; i++)
1332 list_del(&entries[i].node);
1333 mutex_unlock(&nvmem_lookup_mutex);
1334}
1335EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1336
d7b9fd16
BG
1337/**
1338 * nvmem_dev_name() - Get the name of a given nvmem device.
1339 *
1340 * @nvmem: nvmem device.
1341 *
1342 * Return: name of the nvmem device.
1343 */
1344const char *nvmem_dev_name(struct nvmem_device *nvmem)
1345{
1346 return dev_name(&nvmem->dev);
1347}
1348EXPORT_SYMBOL_GPL(nvmem_dev_name);
1349
eace75cf
SK
1350static int __init nvmem_init(void)
1351{
1352 return bus_register(&nvmem_bus_type);
1353}
1354
1355static void __exit nvmem_exit(void)
1356{
1357 bus_unregister(&nvmem_bus_type);
1358}
1359
1360subsys_initcall(nvmem_init);
1361module_exit(nvmem_exit);
1362
1363MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1364MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1365MODULE_DESCRIPTION("nvmem Driver Core");
1366MODULE_LICENSE("GPL v2");