Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / drivers / nvmem / core.c
CommitLineData
b1c1db98 1// SPDX-License-Identifier: GPL-2.0
eace75cf
SK
2/*
3 * nvmem framework core.
4 *
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
eace75cf
SK
7 */
8
9#include <linux/device.h>
10#include <linux/export.h>
11#include <linux/fs.h>
12#include <linux/idr.h>
13#include <linux/init.h>
c1de7f43 14#include <linux/kref.h>
eace75cf
SK
15#include <linux/module.h>
16#include <linux/nvmem-consumer.h>
17#include <linux/nvmem-provider.h>
18#include <linux/of.h>
eace75cf 19#include <linux/slab.h>
ae0c2d72 20#include "nvmem.h"
b6c217ab 21
eace75cf
SK
22struct nvmem_cell {
23 const char *name;
24 int offset;
25 int bytes;
26 int bit_offset;
27 int nbits;
0749aa25 28 struct device_node *np;
eace75cf
SK
29 struct nvmem_device *nvmem;
30 struct list_head node;
31};
32
33static DEFINE_MUTEX(nvmem_mutex);
34static DEFINE_IDA(nvmem_ida);
35
b985f4cb
BG
36static DEFINE_MUTEX(nvmem_cell_mutex);
37static LIST_HEAD(nvmem_cell_tables);
38
506157be
BG
39static DEFINE_MUTEX(nvmem_lookup_mutex);
40static LIST_HEAD(nvmem_lookup_list);
41
bee1138b
BG
42static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
43
b6c217ab 44
795ddd18
SK
45static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
46 void *val, size_t bytes)
47{
48 if (nvmem->reg_read)
49 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
50
51 return -EINVAL;
52}
53
54static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
55 void *val, size_t bytes)
56{
57 if (nvmem->reg_write)
58 return nvmem->reg_write(nvmem->priv, offset, val, bytes);
59
60 return -EINVAL;
61}
eace75cf 62
eace75cf
SK
63static void nvmem_release(struct device *dev)
64{
65 struct nvmem_device *nvmem = to_nvmem_device(dev);
66
67 ida_simple_remove(&nvmem_ida, nvmem->id);
68 kfree(nvmem);
69}
70
71static const struct device_type nvmem_provider_type = {
72 .release = nvmem_release,
73};
74
75static struct bus_type nvmem_bus_type = {
76 .name = "nvmem",
77};
78
eace75cf
SK
79static void nvmem_cell_drop(struct nvmem_cell *cell)
80{
bee1138b 81 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
c7235ee3 82 mutex_lock(&nvmem_mutex);
eace75cf 83 list_del(&cell->node);
c7235ee3 84 mutex_unlock(&nvmem_mutex);
0749aa25 85 of_node_put(cell->np);
badcdff1 86 kfree(cell->name);
eace75cf
SK
87 kfree(cell);
88}
89
90static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
91{
1852183e 92 struct nvmem_cell *cell, *p;
eace75cf 93
c7235ee3
BG
94 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
95 nvmem_cell_drop(cell);
eace75cf
SK
96}
97
98static void nvmem_cell_add(struct nvmem_cell *cell)
99{
c7235ee3
BG
100 mutex_lock(&nvmem_mutex);
101 list_add_tail(&cell->node, &cell->nvmem->cells);
102 mutex_unlock(&nvmem_mutex);
bee1138b 103 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
eace75cf
SK
104}
105
106static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
107 const struct nvmem_cell_info *info,
108 struct nvmem_cell *cell)
109{
110 cell->nvmem = nvmem;
111 cell->offset = info->offset;
112 cell->bytes = info->bytes;
113 cell->name = info->name;
114
115 cell->bit_offset = info->bit_offset;
116 cell->nbits = info->nbits;
117
118 if (cell->nbits)
119 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
120 BITS_PER_BYTE);
121
122 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
123 dev_err(&nvmem->dev,
124 "cell %s unaligned to nvmem stride %d\n",
125 cell->name, nvmem->stride);
126 return -EINVAL;
127 }
128
129 return 0;
130}
131
b3db17e4
AL
132/**
133 * nvmem_add_cells() - Add cell information to an nvmem device
134 *
135 * @nvmem: nvmem device to add cells to.
136 * @info: nvmem cell info to add to the device
137 * @ncells: number of cells in info
138 *
139 * Return: 0 or negative error code on failure.
140 */
ef92ab30 141static int nvmem_add_cells(struct nvmem_device *nvmem,
b3db17e4
AL
142 const struct nvmem_cell_info *info,
143 int ncells)
eace75cf
SK
144{
145 struct nvmem_cell **cells;
eace75cf
SK
146 int i, rval;
147
b3db17e4 148 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
eace75cf
SK
149 if (!cells)
150 return -ENOMEM;
151
b3db17e4 152 for (i = 0; i < ncells; i++) {
eace75cf
SK
153 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
154 if (!cells[i]) {
155 rval = -ENOMEM;
156 goto err;
157 }
158
159 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
287980e4 160 if (rval) {
eace75cf
SK
161 kfree(cells[i]);
162 goto err;
163 }
164
165 nvmem_cell_add(cells[i]);
166 }
167
eace75cf
SK
168 /* remove tmp array */
169 kfree(cells);
170
171 return 0;
172err:
dfdf1414 173 while (i--)
eace75cf
SK
174 nvmem_cell_drop(cells[i]);
175
dfdf1414
RV
176 kfree(cells);
177
eace75cf
SK
178 return rval;
179}
180
bee1138b
BG
181/**
182 * nvmem_register_notifier() - Register a notifier block for nvmem events.
183 *
184 * @nb: notifier block to be called on nvmem events.
185 *
186 * Return: 0 on success, negative error number on failure.
187 */
188int nvmem_register_notifier(struct notifier_block *nb)
189{
190 return blocking_notifier_chain_register(&nvmem_notifier, nb);
191}
192EXPORT_SYMBOL_GPL(nvmem_register_notifier);
193
194/**
195 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
196 *
197 * @nb: notifier block to be unregistered.
198 *
199 * Return: 0 on success, negative error number on failure.
200 */
201int nvmem_unregister_notifier(struct notifier_block *nb)
202{
203 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
204}
205EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
206
b985f4cb
BG
207static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
208{
209 const struct nvmem_cell_info *info;
210 struct nvmem_cell_table *table;
211 struct nvmem_cell *cell;
212 int rval = 0, i;
213
214 mutex_lock(&nvmem_cell_mutex);
215 list_for_each_entry(table, &nvmem_cell_tables, node) {
216 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
217 for (i = 0; i < table->ncells; i++) {
218 info = &table->cells[i];
219
220 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
221 if (!cell) {
222 rval = -ENOMEM;
223 goto out;
224 }
225
226 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
227 info,
228 cell);
229 if (rval) {
230 kfree(cell);
231 goto out;
232 }
233
234 nvmem_cell_add(cell);
235 }
236 }
237 }
238
239out:
240 mutex_unlock(&nvmem_cell_mutex);
241 return rval;
242}
243
506157be
BG
244static struct nvmem_cell *
245nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
246{
1c832674 247 struct nvmem_cell *iter, *cell = NULL;
506157be
BG
248
249 mutex_lock(&nvmem_mutex);
1c832674
AB
250 list_for_each_entry(iter, &nvmem->cells, node) {
251 if (strcmp(cell_id, iter->name) == 0) {
252 cell = iter;
506157be 253 break;
1c832674 254 }
506157be
BG
255 }
256 mutex_unlock(&nvmem_mutex);
257
258 return cell;
259}
260
e888d445
BG
261static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
262{
263 struct device_node *parent, *child;
264 struct device *dev = &nvmem->dev;
265 struct nvmem_cell *cell;
266 const __be32 *addr;
267 int len;
268
269 parent = dev->of_node;
270
271 for_each_child_of_node(parent, child) {
272 addr = of_get_property(child, "reg", &len);
273 if (!addr || (len < 2 * sizeof(u32))) {
274 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
275 return -EINVAL;
276 }
277
278 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
279 if (!cell)
280 return -ENOMEM;
281
282 cell->nvmem = nvmem;
0749aa25 283 cell->np = of_node_get(child);
e888d445
BG
284 cell->offset = be32_to_cpup(addr++);
285 cell->bytes = be32_to_cpup(addr);
badcdff1 286 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
e888d445
BG
287
288 addr = of_get_property(child, "bits", &len);
289 if (addr && len == (2 * sizeof(u32))) {
290 cell->bit_offset = be32_to_cpup(addr++);
291 cell->nbits = be32_to_cpup(addr);
292 }
293
294 if (cell->nbits)
295 cell->bytes = DIV_ROUND_UP(
296 cell->nbits + cell->bit_offset,
297 BITS_PER_BYTE);
298
299 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
300 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
301 cell->name, nvmem->stride);
302 /* Cells already added will be freed later. */
badcdff1 303 kfree(cell->name);
e888d445
BG
304 kfree(cell);
305 return -EINVAL;
306 }
307
308 nvmem_cell_add(cell);
309 }
310
311 return 0;
312}
313
eace75cf
SK
314/**
315 * nvmem_register() - Register a nvmem device for given nvmem_config.
316 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
317 *
318 * @config: nvmem device configuration with which nvmem device is created.
319 *
320 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
321 * on success.
322 */
323
324struct nvmem_device *nvmem_register(const struct nvmem_config *config)
325{
326 struct nvmem_device *nvmem;
eace75cf
SK
327 int rval;
328
329 if (!config->dev)
330 return ERR_PTR(-EINVAL);
331
eace75cf
SK
332 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
333 if (!nvmem)
334 return ERR_PTR(-ENOMEM);
335
336 rval = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
337 if (rval < 0) {
338 kfree(nvmem);
339 return ERR_PTR(rval);
340 }
341
c1de7f43 342 kref_init(&nvmem->refcnt);
c7235ee3 343 INIT_LIST_HEAD(&nvmem->cells);
c1de7f43 344
eace75cf 345 nvmem->id = rval;
eace75cf 346 nvmem->owner = config->owner;
17eb18d6
MY
347 if (!nvmem->owner && config->dev->driver)
348 nvmem->owner = config->dev->driver->owner;
99897efd
HK
349 nvmem->stride = config->stride ?: 1;
350 nvmem->word_size = config->word_size ?: 1;
795ddd18 351 nvmem->size = config->size;
eace75cf
SK
352 nvmem->dev.type = &nvmem_provider_type;
353 nvmem->dev.bus = &nvmem_bus_type;
354 nvmem->dev.parent = config->dev;
795ddd18 355 nvmem->priv = config->priv;
16688453 356 nvmem->type = config->type;
795ddd18
SK
357 nvmem->reg_read = config->reg_read;
358 nvmem->reg_write = config->reg_write;
517f14d9
BG
359 if (!config->no_of_node)
360 nvmem->dev.of_node = config->dev->of_node;
fd0f4906
AS
361
362 if (config->id == -1 && config->name) {
363 dev_set_name(&nvmem->dev, "%s", config->name);
364 } else {
365 dev_set_name(&nvmem->dev, "%s%d",
366 config->name ? : "nvmem",
367 config->name ? config->id : nvmem->id);
368 }
eace75cf 369
1716cfe8
AB
370 nvmem->read_only = device_property_present(config->dev, "read-only") ||
371 config->read_only || !nvmem->reg_write;
eace75cf 372
ae0c2d72 373 nvmem->dev.groups = nvmem_sysfs_get_groups(nvmem, config);
eace75cf
SK
374
375 device_initialize(&nvmem->dev);
376
377 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
378
379 rval = device_add(&nvmem->dev);
b6c217ab 380 if (rval)
3360acdf 381 goto err_put_device;
b6c217ab
AL
382
383 if (config->compat) {
ae0c2d72 384 rval = nvmem_sysfs_setup_compat(nvmem, config);
b6c217ab 385 if (rval)
3360acdf 386 goto err_device_del;
eace75cf
SK
387 }
388
fa72d847
BG
389 if (config->cells) {
390 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
391 if (rval)
392 goto err_teardown_compat;
393 }
eace75cf 394
b985f4cb
BG
395 rval = nvmem_add_cells_from_table(nvmem);
396 if (rval)
397 goto err_remove_cells;
398
e888d445
BG
399 rval = nvmem_add_cells_from_of(nvmem);
400 if (rval)
401 goto err_remove_cells;
402
f4853e1c 403 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
bee1138b 404
eace75cf 405 return nvmem;
3360acdf 406
b985f4cb
BG
407err_remove_cells:
408 nvmem_device_remove_all_cells(nvmem);
fa72d847
BG
409err_teardown_compat:
410 if (config->compat)
ae0c2d72 411 nvmem_sysfs_remove_compat(nvmem, config);
3360acdf
JH
412err_device_del:
413 device_del(&nvmem->dev);
414err_put_device:
415 put_device(&nvmem->dev);
416
b6c217ab 417 return ERR_PTR(rval);
eace75cf
SK
418}
419EXPORT_SYMBOL_GPL(nvmem_register);
420
c1de7f43
BG
421static void nvmem_device_release(struct kref *kref)
422{
423 struct nvmem_device *nvmem;
424
425 nvmem = container_of(kref, struct nvmem_device, refcnt);
426
bee1138b
BG
427 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
428
c1de7f43
BG
429 if (nvmem->flags & FLAG_COMPAT)
430 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
431
432 nvmem_device_remove_all_cells(nvmem);
433 device_del(&nvmem->dev);
434 put_device(&nvmem->dev);
435}
436
eace75cf
SK
437/**
438 * nvmem_unregister() - Unregister previously registered nvmem device
439 *
440 * @nvmem: Pointer to previously registered nvmem device.
eace75cf 441 */
bf58e882 442void nvmem_unregister(struct nvmem_device *nvmem)
eace75cf 443{
c1de7f43 444 kref_put(&nvmem->refcnt, nvmem_device_release);
eace75cf
SK
445}
446EXPORT_SYMBOL_GPL(nvmem_unregister);
447
f1f50eca
AS
448static void devm_nvmem_release(struct device *dev, void *res)
449{
bf58e882 450 nvmem_unregister(*(struct nvmem_device **)res);
f1f50eca
AS
451}
452
453/**
454 * devm_nvmem_register() - Register a managed nvmem device for given
455 * nvmem_config.
456 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
457 *
b378c779 458 * @dev: Device that uses the nvmem device.
f1f50eca
AS
459 * @config: nvmem device configuration with which nvmem device is created.
460 *
461 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
462 * on success.
463 */
464struct nvmem_device *devm_nvmem_register(struct device *dev,
465 const struct nvmem_config *config)
466{
467 struct nvmem_device **ptr, *nvmem;
468
469 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
470 if (!ptr)
471 return ERR_PTR(-ENOMEM);
472
473 nvmem = nvmem_register(config);
474
475 if (!IS_ERR(nvmem)) {
476 *ptr = nvmem;
477 devres_add(dev, ptr);
478 } else {
479 devres_free(ptr);
480 }
481
482 return nvmem;
483}
484EXPORT_SYMBOL_GPL(devm_nvmem_register);
485
486static int devm_nvmem_match(struct device *dev, void *res, void *data)
487{
488 struct nvmem_device **r = res;
489
490 return *r == data;
491}
492
493/**
494 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
495 * device.
496 *
b378c779 497 * @dev: Device that uses the nvmem device.
f1f50eca
AS
498 * @nvmem: Pointer to previously registered nvmem device.
499 *
500 * Return: Will be an negative on error or a zero on success.
501 */
502int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
503{
504 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
505}
506EXPORT_SYMBOL(devm_nvmem_unregister);
507
8c2a2b8c
TB
508static struct nvmem_device *__nvmem_device_get(void *data,
509 int (*match)(struct device *dev, const void *data))
69aba794
SK
510{
511 struct nvmem_device *nvmem = NULL;
8c2a2b8c 512 struct device *dev;
69aba794 513
c7235ee3 514 mutex_lock(&nvmem_mutex);
8c2a2b8c
TB
515 dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
516 if (dev)
517 nvmem = to_nvmem_device(dev);
69aba794 518 mutex_unlock(&nvmem_mutex);
c7235ee3
BG
519 if (!nvmem)
520 return ERR_PTR(-EPROBE_DEFER);
69aba794
SK
521
522 if (!try_module_get(nvmem->owner)) {
523 dev_err(&nvmem->dev,
524 "could not increase module refcount for cell %s\n",
5db652c9 525 nvmem_dev_name(nvmem));
69aba794 526
73e9dc4d 527 put_device(&nvmem->dev);
69aba794
SK
528 return ERR_PTR(-EINVAL);
529 }
530
c1de7f43
BG
531 kref_get(&nvmem->refcnt);
532
69aba794
SK
533 return nvmem;
534}
535
536static void __nvmem_device_put(struct nvmem_device *nvmem)
537{
73e9dc4d 538 put_device(&nvmem->dev);
69aba794 539 module_put(nvmem->owner);
c1de7f43 540 kref_put(&nvmem->refcnt, nvmem_device_release);
69aba794
SK
541}
542
e701c67c 543#if IS_ENABLED(CONFIG_OF)
e2a5402e
SK
544/**
545 * of_nvmem_device_get() - Get nvmem device from a given id
546 *
29143268 547 * @np: Device tree node that uses the nvmem device.
e2a5402e
SK
548 * @id: nvmem name from nvmem-names property.
549 *
550 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
551 * on success.
552 */
553struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
554{
555
556 struct device_node *nvmem_np;
d4e7fef1 557 int index = 0;
e2a5402e 558
d4e7fef1
AB
559 if (id)
560 index = of_property_match_string(np, "nvmem-names", id);
e2a5402e
SK
561
562 nvmem_np = of_parse_phandle(np, "nvmem", index);
563 if (!nvmem_np)
d4e7fef1 564 return ERR_PTR(-ENOENT);
e2a5402e 565
8c2a2b8c 566 return __nvmem_device_get(nvmem_np, device_match_of_node);
e2a5402e
SK
567}
568EXPORT_SYMBOL_GPL(of_nvmem_device_get);
569#endif
570
571/**
572 * nvmem_device_get() - Get nvmem device from a given id
573 *
29143268
VG
574 * @dev: Device that uses the nvmem device.
575 * @dev_name: name of the requested nvmem device.
e2a5402e
SK
576 *
577 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
578 * on success.
579 */
580struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
581{
582 if (dev->of_node) { /* try dt first */
583 struct nvmem_device *nvmem;
584
585 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
586
587 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
588 return nvmem;
589
590 }
591
8c2a2b8c 592 return __nvmem_device_get((void *)dev_name, device_match_name);
e2a5402e
SK
593}
594EXPORT_SYMBOL_GPL(nvmem_device_get);
595
8c2a2b8c
TB
596/**
597 * nvmem_device_find() - Find nvmem device with matching function
598 *
599 * @data: Data to pass to match function
600 * @match: Callback function to check device
601 *
602 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
603 * on success.
604 */
605struct nvmem_device *nvmem_device_find(void *data,
606 int (*match)(struct device *dev, const void *data))
607{
608 return __nvmem_device_get(data, match);
609}
610EXPORT_SYMBOL_GPL(nvmem_device_find);
611
e2a5402e
SK
612static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
613{
614 struct nvmem_device **nvmem = res;
615
616 if (WARN_ON(!nvmem || !*nvmem))
617 return 0;
618
619 return *nvmem == data;
620}
621
622static void devm_nvmem_device_release(struct device *dev, void *res)
623{
624 nvmem_device_put(*(struct nvmem_device **)res);
625}
626
627/**
628 * devm_nvmem_device_put() - put alredy got nvmem device
629 *
29143268 630 * @dev: Device that uses the nvmem device.
e2a5402e
SK
631 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
632 * that needs to be released.
633 */
634void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
635{
636 int ret;
637
638 ret = devres_release(dev, devm_nvmem_device_release,
639 devm_nvmem_device_match, nvmem);
640
641 WARN_ON(ret);
642}
643EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
644
645/**
646 * nvmem_device_put() - put alredy got nvmem device
647 *
648 * @nvmem: pointer to nvmem device that needs to be released.
649 */
650void nvmem_device_put(struct nvmem_device *nvmem)
651{
652 __nvmem_device_put(nvmem);
653}
654EXPORT_SYMBOL_GPL(nvmem_device_put);
655
656/**
657 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
658 *
29143268
VG
659 * @dev: Device that requests the nvmem device.
660 * @id: name id for the requested nvmem device.
e2a5402e
SK
661 *
662 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
663 * on success. The nvmem_cell will be freed by the automatically once the
664 * device is freed.
665 */
666struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
667{
668 struct nvmem_device **ptr, *nvmem;
669
670 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
671 if (!ptr)
672 return ERR_PTR(-ENOMEM);
673
674 nvmem = nvmem_device_get(dev, id);
675 if (!IS_ERR(nvmem)) {
676 *ptr = nvmem;
677 devres_add(dev, ptr);
678 } else {
679 devres_free(ptr);
680 }
681
682 return nvmem;
683}
684EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
685
506157be
BG
686static struct nvmem_cell *
687nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
69aba794 688{
506157be
BG
689 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
690 struct nvmem_cell_lookup *lookup;
69aba794 691 struct nvmem_device *nvmem;
506157be 692 const char *dev_id;
69aba794 693
506157be
BG
694 if (!dev)
695 return ERR_PTR(-EINVAL);
696
697 dev_id = dev_name(dev);
698
699 mutex_lock(&nvmem_lookup_mutex);
700
701 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
702 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
703 (strcmp(lookup->con_id, con_id) == 0)) {
704 /* This is the right entry. */
8c2a2b8c
TB
705 nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
706 device_match_name);
cccb3b19 707 if (IS_ERR(nvmem)) {
506157be 708 /* Provider may not be registered yet. */
cccb3b19 709 cell = ERR_CAST(nvmem);
9bfd8198 710 break;
506157be
BG
711 }
712
713 cell = nvmem_find_cell_by_name(nvmem,
714 lookup->cell_name);
715 if (!cell) {
716 __nvmem_device_put(nvmem);
cccb3b19 717 cell = ERR_PTR(-ENOENT);
506157be 718 }
9bfd8198 719 break;
506157be
BG
720 }
721 }
69aba794 722
506157be 723 mutex_unlock(&nvmem_lookup_mutex);
69aba794
SK
724 return cell;
725}
726
e701c67c 727#if IS_ENABLED(CONFIG_OF)
3c53e235 728static struct nvmem_cell *
0749aa25 729nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
3c53e235 730{
1c832674 731 struct nvmem_cell *iter, *cell = NULL;
3c53e235
AB
732
733 mutex_lock(&nvmem_mutex);
1c832674
AB
734 list_for_each_entry(iter, &nvmem->cells, node) {
735 if (np == iter->np) {
736 cell = iter;
3c53e235 737 break;
1c832674 738 }
3c53e235
AB
739 }
740 mutex_unlock(&nvmem_mutex);
741
742 return cell;
743}
744
69aba794
SK
745/**
746 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
747 *
29143268 748 * @np: Device tree node that uses the nvmem cell.
165589f0
BG
749 * @id: nvmem cell name from nvmem-cell-names property, or NULL
750 * for the cell at index 0 (the lone cell with no accompanying
751 * nvmem-cell-names property).
69aba794
SK
752 *
753 * Return: Will be an ERR_PTR() on error or a valid pointer
754 * to a struct nvmem_cell. The nvmem_cell will be freed by the
755 * nvmem_cell_put().
756 */
165589f0 757struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
69aba794
SK
758{
759 struct device_node *cell_np, *nvmem_np;
69aba794 760 struct nvmem_device *nvmem;
e888d445 761 struct nvmem_cell *cell;
fd0c478c 762 int index = 0;
69aba794 763
fd0c478c 764 /* if cell name exists, find index to the name */
165589f0
BG
765 if (id)
766 index = of_property_match_string(np, "nvmem-cell-names", id);
69aba794
SK
767
768 cell_np = of_parse_phandle(np, "nvmem-cells", index);
769 if (!cell_np)
5087cc19 770 return ERR_PTR(-ENOENT);
69aba794
SK
771
772 nvmem_np = of_get_next_parent(cell_np);
773 if (!nvmem_np)
774 return ERR_PTR(-EINVAL);
775
8c2a2b8c 776 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
aad8d097 777 of_node_put(nvmem_np);
69aba794
SK
778 if (IS_ERR(nvmem))
779 return ERR_CAST(nvmem);
780
0749aa25 781 cell = nvmem_find_cell_by_node(nvmem, cell_np);
69aba794 782 if (!cell) {
e888d445
BG
783 __nvmem_device_put(nvmem);
784 return ERR_PTR(-ENOENT);
69aba794
SK
785 }
786
69aba794 787 return cell;
69aba794
SK
788}
789EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
790#endif
791
792/**
793 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
794 *
29143268 795 * @dev: Device that requests the nvmem cell.
165589f0
BG
796 * @id: nvmem cell name to get (this corresponds with the name from the
797 * nvmem-cell-names property for DT systems and with the con_id from
798 * the lookup entry for non-DT systems).
69aba794
SK
799 *
800 * Return: Will be an ERR_PTR() on error or a valid pointer
801 * to a struct nvmem_cell. The nvmem_cell will be freed by the
802 * nvmem_cell_put().
803 */
165589f0 804struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
69aba794
SK
805{
806 struct nvmem_cell *cell;
807
808 if (dev->of_node) { /* try dt first */
165589f0 809 cell = of_nvmem_cell_get(dev->of_node, id);
69aba794
SK
810 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
811 return cell;
812 }
813
165589f0
BG
814 /* NULL cell id only allowed for device tree; invalid otherwise */
815 if (!id)
87ed1405
DA
816 return ERR_PTR(-EINVAL);
817
165589f0 818 return nvmem_cell_get_from_lookup(dev, id);
69aba794
SK
819}
820EXPORT_SYMBOL_GPL(nvmem_cell_get);
821
822static void devm_nvmem_cell_release(struct device *dev, void *res)
823{
824 nvmem_cell_put(*(struct nvmem_cell **)res);
825}
826
827/**
828 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
829 *
29143268
VG
830 * @dev: Device that requests the nvmem cell.
831 * @id: nvmem cell name id to get.
69aba794
SK
832 *
833 * Return: Will be an ERR_PTR() on error or a valid pointer
834 * to a struct nvmem_cell. The nvmem_cell will be freed by the
835 * automatically once the device is freed.
836 */
837struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
838{
839 struct nvmem_cell **ptr, *cell;
840
841 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
842 if (!ptr)
843 return ERR_PTR(-ENOMEM);
844
845 cell = nvmem_cell_get(dev, id);
846 if (!IS_ERR(cell)) {
847 *ptr = cell;
848 devres_add(dev, ptr);
849 } else {
850 devres_free(ptr);
851 }
852
853 return cell;
854}
855EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
856
857static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
858{
859 struct nvmem_cell **c = res;
860
861 if (WARN_ON(!c || !*c))
862 return 0;
863
864 return *c == data;
865}
866
867/**
868 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
869 * from devm_nvmem_cell_get.
870 *
29143268
VG
871 * @dev: Device that requests the nvmem cell.
872 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
69aba794
SK
873 */
874void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
875{
876 int ret;
877
878 ret = devres_release(dev, devm_nvmem_cell_release,
879 devm_nvmem_cell_match, cell);
880
881 WARN_ON(ret);
882}
883EXPORT_SYMBOL(devm_nvmem_cell_put);
884
885/**
886 * nvmem_cell_put() - Release previously allocated nvmem cell.
887 *
29143268 888 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
69aba794
SK
889 */
890void nvmem_cell_put(struct nvmem_cell *cell)
891{
892 struct nvmem_device *nvmem = cell->nvmem;
893
894 __nvmem_device_put(nvmem);
69aba794
SK
895}
896EXPORT_SYMBOL_GPL(nvmem_cell_put);
897
f7c04f16 898static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
69aba794
SK
899{
900 u8 *p, *b;
2fe518fe 901 int i, extra, bit_offset = cell->bit_offset;
69aba794
SK
902
903 p = b = buf;
904 if (bit_offset) {
905 /* First shift */
906 *b++ >>= bit_offset;
907
908 /* setup rest of the bytes if any */
909 for (i = 1; i < cell->bytes; i++) {
910 /* Get bits from next byte and shift them towards msb */
911 *p |= *b << (BITS_PER_BYTE - bit_offset);
912
913 p = b;
914 *b++ >>= bit_offset;
915 }
2fe518fe
JRO
916 } else {
917 /* point to the msb */
918 p += cell->bytes - 1;
69aba794 919 }
2fe518fe
JRO
920
921 /* result fits in less bytes */
922 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
923 while (--extra >= 0)
924 *p-- = 0;
925
69aba794
SK
926 /* clear msb bits if any leftover in the last byte */
927 *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
928}
929
930static int __nvmem_cell_read(struct nvmem_device *nvmem,
931 struct nvmem_cell *cell,
932 void *buf, size_t *len)
933{
934 int rc;
935
795ddd18 936 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
69aba794 937
287980e4 938 if (rc)
69aba794
SK
939 return rc;
940
941 /* shift bits in-place */
cbf854ab 942 if (cell->bit_offset || cell->nbits)
69aba794
SK
943 nvmem_shift_read_buffer_in_place(cell, buf);
944
3b4a6877
VG
945 if (len)
946 *len = cell->bytes;
69aba794
SK
947
948 return 0;
949}
950
951/**
952 * nvmem_cell_read() - Read a given nvmem cell
953 *
954 * @cell: nvmem cell to be read.
3b4a6877
VG
955 * @len: pointer to length of cell which will be populated on successful read;
956 * can be NULL.
69aba794 957 *
b577fafc
BN
958 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
959 * buffer should be freed by the consumer with a kfree().
69aba794
SK
960 */
961void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
962{
963 struct nvmem_device *nvmem = cell->nvmem;
964 u8 *buf;
965 int rc;
966
795ddd18 967 if (!nvmem)
69aba794
SK
968 return ERR_PTR(-EINVAL);
969
970 buf = kzalloc(cell->bytes, GFP_KERNEL);
971 if (!buf)
972 return ERR_PTR(-ENOMEM);
973
974 rc = __nvmem_cell_read(nvmem, cell, buf, len);
287980e4 975 if (rc) {
69aba794
SK
976 kfree(buf);
977 return ERR_PTR(rc);
978 }
979
980 return buf;
981}
982EXPORT_SYMBOL_GPL(nvmem_cell_read);
983
f7c04f16
MY
984static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
985 u8 *_buf, int len)
69aba794
SK
986{
987 struct nvmem_device *nvmem = cell->nvmem;
988 int i, rc, nbits, bit_offset = cell->bit_offset;
989 u8 v, *p, *buf, *b, pbyte, pbits;
990
991 nbits = cell->nbits;
992 buf = kzalloc(cell->bytes, GFP_KERNEL);
993 if (!buf)
994 return ERR_PTR(-ENOMEM);
995
996 memcpy(buf, _buf, len);
997 p = b = buf;
998
999 if (bit_offset) {
1000 pbyte = *b;
1001 *b <<= bit_offset;
1002
1003 /* setup the first byte with lsb bits from nvmem */
795ddd18 1004 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
50808bfc
MM
1005 if (rc)
1006 goto err;
69aba794
SK
1007 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1008
1009 /* setup rest of the byte if any */
1010 for (i = 1; i < cell->bytes; i++) {
1011 /* Get last byte bits and shift them towards lsb */
1012 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1013 pbyte = *b;
1014 p = b;
1015 *b <<= bit_offset;
1016 *b++ |= pbits;
1017 }
1018 }
1019
1020 /* if it's not end on byte boundary */
1021 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1022 /* setup the last byte with msb bits from nvmem */
795ddd18 1023 rc = nvmem_reg_read(nvmem,
69aba794 1024 cell->offset + cell->bytes - 1, &v, 1);
50808bfc
MM
1025 if (rc)
1026 goto err;
69aba794
SK
1027 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1028
1029 }
1030
1031 return buf;
50808bfc
MM
1032err:
1033 kfree(buf);
1034 return ERR_PTR(rc);
69aba794
SK
1035}
1036
1037/**
1038 * nvmem_cell_write() - Write to a given nvmem cell
1039 *
1040 * @cell: nvmem cell to be written.
1041 * @buf: Buffer to be written.
1042 * @len: length of buffer to be written to nvmem cell.
1043 *
1044 * Return: length of bytes written or negative on failure.
1045 */
1046int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1047{
1048 struct nvmem_device *nvmem = cell->nvmem;
1049 int rc;
1050
795ddd18 1051 if (!nvmem || nvmem->read_only ||
69aba794
SK
1052 (cell->bit_offset == 0 && len != cell->bytes))
1053 return -EINVAL;
1054
1055 if (cell->bit_offset || cell->nbits) {
1056 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1057 if (IS_ERR(buf))
1058 return PTR_ERR(buf);
1059 }
1060
795ddd18 1061 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
69aba794
SK
1062
1063 /* free the tmp buffer */
ace22170 1064 if (cell->bit_offset || cell->nbits)
69aba794
SK
1065 kfree(buf);
1066
287980e4 1067 if (rc)
69aba794
SK
1068 return rc;
1069
1070 return len;
1071}
1072EXPORT_SYMBOL_GPL(nvmem_cell_write);
1073
0a9b2d1c
FG
1074/**
1075 * nvmem_cell_read_u16() - Read a cell value as an u16
1076 *
1077 * @dev: Device that requests the nvmem cell.
1078 * @cell_id: Name of nvmem cell to read.
1079 * @val: pointer to output value.
1080 *
1081 * Return: 0 on success or negative errno.
1082 */
1083int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1084{
1085 struct nvmem_cell *cell;
1086 void *buf;
1087 size_t len;
1088
1089 cell = nvmem_cell_get(dev, cell_id);
1090 if (IS_ERR(cell))
1091 return PTR_ERR(cell);
1092
1093 buf = nvmem_cell_read(cell, &len);
1094 if (IS_ERR(buf)) {
1095 nvmem_cell_put(cell);
1096 return PTR_ERR(buf);
1097 }
1098 if (len != sizeof(*val)) {
1099 kfree(buf);
1100 nvmem_cell_put(cell);
1101 return -EINVAL;
1102 }
1103 memcpy(val, buf, sizeof(*val));
1104 kfree(buf);
1105 nvmem_cell_put(cell);
1106
1107 return 0;
1108}
1109EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1110
d026d70a
LC
1111/**
1112 * nvmem_cell_read_u32() - Read a cell value as an u32
1113 *
1114 * @dev: Device that requests the nvmem cell.
1115 * @cell_id: Name of nvmem cell to read.
1116 * @val: pointer to output value.
1117 *
1118 * Return: 0 on success or negative errno.
1119 */
1120int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1121{
1122 struct nvmem_cell *cell;
1123 void *buf;
1124 size_t len;
1125
1126 cell = nvmem_cell_get(dev, cell_id);
1127 if (IS_ERR(cell))
1128 return PTR_ERR(cell);
1129
1130 buf = nvmem_cell_read(cell, &len);
1131 if (IS_ERR(buf)) {
1132 nvmem_cell_put(cell);
1133 return PTR_ERR(buf);
1134 }
1135 if (len != sizeof(*val)) {
1136 kfree(buf);
1137 nvmem_cell_put(cell);
1138 return -EINVAL;
1139 }
1140 memcpy(val, buf, sizeof(*val));
1141
1142 kfree(buf);
1143 nvmem_cell_put(cell);
1144 return 0;
1145}
1146EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1147
e2a5402e
SK
1148/**
1149 * nvmem_device_cell_read() - Read a given nvmem device and cell
1150 *
1151 * @nvmem: nvmem device to read from.
1152 * @info: nvmem cell info to be read.
1153 * @buf: buffer pointer which will be populated on successful read.
1154 *
1155 * Return: length of successful bytes read on success and negative
1156 * error code on error.
1157 */
1158ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1159 struct nvmem_cell_info *info, void *buf)
1160{
1161 struct nvmem_cell cell;
1162 int rc;
1163 ssize_t len;
1164
795ddd18 1165 if (!nvmem)
e2a5402e
SK
1166 return -EINVAL;
1167
1168 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
287980e4 1169 if (rc)
e2a5402e
SK
1170 return rc;
1171
1172 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
287980e4 1173 if (rc)
e2a5402e
SK
1174 return rc;
1175
1176 return len;
1177}
1178EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1179
1180/**
1181 * nvmem_device_cell_write() - Write cell to a given nvmem device
1182 *
1183 * @nvmem: nvmem device to be written to.
29143268 1184 * @info: nvmem cell info to be written.
e2a5402e
SK
1185 * @buf: buffer to be written to cell.
1186 *
1187 * Return: length of bytes written or negative error code on failure.
48f63a2c 1188 */
e2a5402e
SK
1189int nvmem_device_cell_write(struct nvmem_device *nvmem,
1190 struct nvmem_cell_info *info, void *buf)
1191{
1192 struct nvmem_cell cell;
1193 int rc;
1194
795ddd18 1195 if (!nvmem)
e2a5402e
SK
1196 return -EINVAL;
1197
1198 rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
287980e4 1199 if (rc)
e2a5402e
SK
1200 return rc;
1201
1202 return nvmem_cell_write(&cell, buf, cell.bytes);
1203}
1204EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1205
1206/**
1207 * nvmem_device_read() - Read from a given nvmem device
1208 *
1209 * @nvmem: nvmem device to read from.
1210 * @offset: offset in nvmem device.
1211 * @bytes: number of bytes to read.
1212 * @buf: buffer pointer which will be populated on successful read.
1213 *
1214 * Return: length of successful bytes read on success and negative
1215 * error code on error.
1216 */
1217int nvmem_device_read(struct nvmem_device *nvmem,
1218 unsigned int offset,
1219 size_t bytes, void *buf)
1220{
1221 int rc;
1222
795ddd18 1223 if (!nvmem)
e2a5402e
SK
1224 return -EINVAL;
1225
795ddd18 1226 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
e2a5402e 1227
287980e4 1228 if (rc)
e2a5402e
SK
1229 return rc;
1230
1231 return bytes;
1232}
1233EXPORT_SYMBOL_GPL(nvmem_device_read);
1234
1235/**
1236 * nvmem_device_write() - Write cell to a given nvmem device
1237 *
1238 * @nvmem: nvmem device to be written to.
1239 * @offset: offset in nvmem device.
1240 * @bytes: number of bytes to write.
1241 * @buf: buffer to be written.
1242 *
1243 * Return: length of bytes written or negative error code on failure.
48f63a2c 1244 */
e2a5402e
SK
1245int nvmem_device_write(struct nvmem_device *nvmem,
1246 unsigned int offset,
1247 size_t bytes, void *buf)
1248{
1249 int rc;
1250
795ddd18 1251 if (!nvmem)
e2a5402e
SK
1252 return -EINVAL;
1253
795ddd18 1254 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
e2a5402e 1255
287980e4 1256 if (rc)
e2a5402e
SK
1257 return rc;
1258
1259
1260 return bytes;
1261}
1262EXPORT_SYMBOL_GPL(nvmem_device_write);
1263
b985f4cb
BG
1264/**
1265 * nvmem_add_cell_table() - register a table of cell info entries
1266 *
1267 * @table: table of cell info entries
1268 */
1269void nvmem_add_cell_table(struct nvmem_cell_table *table)
1270{
1271 mutex_lock(&nvmem_cell_mutex);
1272 list_add_tail(&table->node, &nvmem_cell_tables);
1273 mutex_unlock(&nvmem_cell_mutex);
1274}
1275EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1276
1277/**
1278 * nvmem_del_cell_table() - remove a previously registered cell info table
1279 *
1280 * @table: table of cell info entries
1281 */
1282void nvmem_del_cell_table(struct nvmem_cell_table *table)
1283{
1284 mutex_lock(&nvmem_cell_mutex);
1285 list_del(&table->node);
1286 mutex_unlock(&nvmem_cell_mutex);
1287}
1288EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1289
506157be
BG
1290/**
1291 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1292 *
1293 * @entries: array of cell lookup entries
1294 * @nentries: number of cell lookup entries in the array
1295 */
1296void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1297{
1298 int i;
1299
1300 mutex_lock(&nvmem_lookup_mutex);
1301 for (i = 0; i < nentries; i++)
1302 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1303 mutex_unlock(&nvmem_lookup_mutex);
1304}
1305EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1306
1307/**
1308 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1309 * entries
1310 *
1311 * @entries: array of cell lookup entries
1312 * @nentries: number of cell lookup entries in the array
1313 */
1314void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1315{
1316 int i;
1317
1318 mutex_lock(&nvmem_lookup_mutex);
1319 for (i = 0; i < nentries; i++)
1320 list_del(&entries[i].node);
1321 mutex_unlock(&nvmem_lookup_mutex);
1322}
1323EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1324
d7b9fd16
BG
1325/**
1326 * nvmem_dev_name() - Get the name of a given nvmem device.
1327 *
1328 * @nvmem: nvmem device.
1329 *
1330 * Return: name of the nvmem device.
1331 */
1332const char *nvmem_dev_name(struct nvmem_device *nvmem)
1333{
1334 return dev_name(&nvmem->dev);
1335}
1336EXPORT_SYMBOL_GPL(nvmem_dev_name);
1337
eace75cf
SK
1338static int __init nvmem_init(void)
1339{
1340 return bus_register(&nvmem_bus_type);
1341}
1342
1343static void __exit nvmem_exit(void)
1344{
1345 bus_unregister(&nvmem_bus_type);
1346}
1347
1348subsys_initcall(nvmem_init);
1349module_exit(nvmem_exit);
1350
1351MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1352MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1353MODULE_DESCRIPTION("nvmem Driver Core");
1354MODULE_LICENSE("GPL v2");