Merge tag 'powerpc-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-2.6-block.git] / drivers / spi / spi-mem.c
CommitLineData
c36ff266
BB
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2018 Exceet Electronics GmbH
4 * Copyright (C) 2018 Bootlin
5 *
6 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
7 */
8#include <linux/dmaengine.h>
9#include <linux/pm_runtime.h>
10#include <linux/spi/spi.h>
11#include <linux/spi/spi-mem.h>
12
13#include "internals.h"
14
b12a084c 15#define SPI_MEM_MAX_BUSWIDTH 8
38058322 16
c36ff266
BB
17/**
18 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
19 * memory operation
20 * @ctlr: the SPI controller requesting this dma_map()
21 * @op: the memory operation containing the buffer to map
22 * @sgt: a pointer to a non-initialized sg_table that will be filled by this
23 * function
24 *
25 * Some controllers might want to do DMA on the data buffer embedded in @op.
26 * This helper prepares everything for you and provides a ready-to-use
27 * sg_table. This function is not intended to be called from spi drivers.
28 * Only SPI controller drivers should use it.
29 * Note that the caller must ensure the memory region pointed by
30 * op->data.buf.{in,out} is DMA-able before calling this function.
31 *
32 * Return: 0 in case of success, a negative error code otherwise.
33 */
34int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
35 const struct spi_mem_op *op,
36 struct sg_table *sgt)
37{
38 struct device *dmadev;
39
40 if (!op->data.nbytes)
41 return -EINVAL;
42
43 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
44 dmadev = ctlr->dma_tx->device->dev;
45 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
46 dmadev = ctlr->dma_rx->device->dev;
47 else
48 dmadev = ctlr->dev.parent;
49
50 if (!dmadev)
51 return -EINVAL;
52
53 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
54 op->data.dir == SPI_MEM_DATA_IN ?
55 DMA_FROM_DEVICE : DMA_TO_DEVICE);
56}
57EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
58
59/**
60 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
61 * memory operation
62 * @ctlr: the SPI controller requesting this dma_unmap()
63 * @op: the memory operation containing the buffer to unmap
64 * @sgt: a pointer to an sg_table previously initialized by
65 * spi_controller_dma_map_mem_op_data()
66 *
67 * Some controllers might want to do DMA on the data buffer embedded in @op.
68 * This helper prepares things so that the CPU can access the
69 * op->data.buf.{in,out} buffer again.
70 *
71 * This function is not intended to be called from SPI drivers. Only SPI
72 * controller drivers should use it.
73 *
74 * This function should be called after the DMA operation has finished and is
75 * only valid if the previous spi_controller_dma_map_mem_op_data() call
76 * returned 0.
77 *
78 * Return: 0 in case of success, a negative error code otherwise.
79 */
80void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
81 const struct spi_mem_op *op,
82 struct sg_table *sgt)
83{
84 struct device *dmadev;
85
86 if (!op->data.nbytes)
87 return;
88
89 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
90 dmadev = ctlr->dma_tx->device->dev;
91 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
92 dmadev = ctlr->dma_rx->device->dev;
93 else
94 dmadev = ctlr->dev.parent;
95
96 spi_unmap_buf(ctlr, dmadev, sgt,
97 op->data.dir == SPI_MEM_DATA_IN ?
98 DMA_FROM_DEVICE : DMA_TO_DEVICE);
99}
100EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
101
102static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
103{
104 u32 mode = mem->spi->mode;
105
106 switch (buswidth) {
107 case 1:
108 return 0;
109
110 case 2:
80300a7d
GU
111 if ((tx &&
112 (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
113 (!tx &&
114 (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
c36ff266
BB
115 return 0;
116
117 break;
118
119 case 4:
80300a7d
GU
120 if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
121 (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
c36ff266
BB
122 return 0;
123
124 break;
125
b12a084c
YNG
126 case 8:
127 if ((tx && (mode & SPI_TX_OCTAL)) ||
128 (!tx && (mode & SPI_RX_OCTAL)))
129 return 0;
130
131 break;
132
c36ff266
BB
133 default:
134 break;
135 }
136
137 return -ENOTSUPP;
138}
139
539cf68c
PY
140static bool spi_mem_check_buswidth(struct spi_mem *mem,
141 const struct spi_mem_op *op)
c36ff266
BB
142{
143 if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
144 return false;
145
146 if (op->addr.nbytes &&
147 spi_check_buswidth_req(mem, op->addr.buswidth, true))
148 return false;
149
150 if (op->dummy.nbytes &&
151 spi_check_buswidth_req(mem, op->dummy.buswidth, true))
152 return false;
153
0ebb261a 154 if (op->data.dir != SPI_MEM_NO_DATA &&
c36ff266
BB
155 spi_check_buswidth_req(mem, op->data.buswidth,
156 op->data.dir == SPI_MEM_DATA_OUT))
157 return false;
158
539cf68c
PY
159 return true;
160}
161
162bool spi_mem_dtr_supports_op(struct spi_mem *mem,
163 const struct spi_mem_op *op)
164{
165 if (op->cmd.nbytes != 2)
166 return false;
167
168 return spi_mem_check_buswidth(mem, op);
169}
170EXPORT_SYMBOL_GPL(spi_mem_dtr_supports_op);
171
172bool spi_mem_default_supports_op(struct spi_mem *mem,
173 const struct spi_mem_op *op)
174{
4c5e2bba
PY
175 if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
176 return false;
177
caf72df4
PY
178 if (op->cmd.nbytes != 1)
179 return false;
180
539cf68c 181 return spi_mem_check_buswidth(mem, op);
c36ff266
BB
182}
183EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
184
38058322
BB
185static bool spi_mem_buswidth_is_valid(u8 buswidth)
186{
187 if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
188 return false;
189
190 return true;
191}
192
193static int spi_mem_check_op(const struct spi_mem_op *op)
194{
caf72df4 195 if (!op->cmd.buswidth || !op->cmd.nbytes)
38058322
BB
196 return -EINVAL;
197
198 if ((op->addr.nbytes && !op->addr.buswidth) ||
199 (op->dummy.nbytes && !op->dummy.buswidth) ||
200 (op->data.nbytes && !op->data.buswidth))
201 return -EINVAL;
202
aea3877e
GU
203 if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
204 !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
205 !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
206 !spi_mem_buswidth_is_valid(op->data.buswidth))
38058322
BB
207 return -EINVAL;
208
209 return 0;
210}
211
212static bool spi_mem_internal_supports_op(struct spi_mem *mem,
213 const struct spi_mem_op *op)
214{
215 struct spi_controller *ctlr = mem->spi->controller;
216
217 if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
218 return ctlr->mem_ops->supports_op(mem, op);
219
220 return spi_mem_default_supports_op(mem, op);
221}
222
c36ff266
BB
223/**
224 * spi_mem_supports_op() - Check if a memory device and the controller it is
225 * connected to support a specific memory operation
226 * @mem: the SPI memory
227 * @op: the memory operation to check
228 *
229 * Some controllers are only supporting Single or Dual IOs, others might only
230 * support specific opcodes, or it can even be that the controller and device
231 * both support Quad IOs but the hardware prevents you from using it because
232 * only 2 IO lines are connected.
233 *
234 * This function checks whether a specific operation is supported.
235 *
236 * Return: true if @op is supported, false otherwise.
237 */
238bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
239{
38058322
BB
240 if (spi_mem_check_op(op))
241 return false;
c36ff266 242
38058322 243 return spi_mem_internal_supports_op(mem, op);
c36ff266
BB
244}
245EXPORT_SYMBOL_GPL(spi_mem_supports_op);
246
f86c24f4
BB
247static int spi_mem_access_start(struct spi_mem *mem)
248{
249 struct spi_controller *ctlr = mem->spi->controller;
250
251 /*
252 * Flush the message queue before executing our SPI memory
253 * operation to prevent preemption of regular SPI transfers.
254 */
255 spi_flush_queue(ctlr);
256
257 if (ctlr->auto_runtime_pm) {
258 int ret;
259
260 ret = pm_runtime_get_sync(ctlr->dev.parent);
261 if (ret < 0) {
c02bb16b 262 pm_runtime_put_noidle(ctlr->dev.parent);
f86c24f4
BB
263 dev_err(&ctlr->dev, "Failed to power device: %d\n",
264 ret);
265 return ret;
266 }
267 }
268
269 mutex_lock(&ctlr->bus_lock_mutex);
270 mutex_lock(&ctlr->io_mutex);
271
272 return 0;
273}
274
275static void spi_mem_access_end(struct spi_mem *mem)
276{
277 struct spi_controller *ctlr = mem->spi->controller;
278
279 mutex_unlock(&ctlr->io_mutex);
280 mutex_unlock(&ctlr->bus_lock_mutex);
281
282 if (ctlr->auto_runtime_pm)
283 pm_runtime_put(ctlr->dev.parent);
284}
285
c36ff266
BB
286/**
287 * spi_mem_exec_op() - Execute a memory operation
288 * @mem: the SPI memory
289 * @op: the memory operation to execute
290 *
291 * Executes a memory operation.
292 *
293 * This function first checks that @op is supported and then tries to execute
294 * it.
295 *
296 * Return: 0 in case of success, a negative error code otherwise.
297 */
298int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
299{
300 unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
301 struct spi_controller *ctlr = mem->spi->controller;
302 struct spi_transfer xfers[4] = { };
303 struct spi_message msg;
304 u8 *tmpbuf;
305 int ret;
306
38058322
BB
307 ret = spi_mem_check_op(op);
308 if (ret)
309 return ret;
310
311 if (!spi_mem_internal_supports_op(mem, op))
c36ff266
BB
312 return -ENOTSUPP;
313
05766050 314 if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
f86c24f4
BB
315 ret = spi_mem_access_start(mem);
316 if (ret)
317 return ret;
c36ff266 318
c36ff266 319 ret = ctlr->mem_ops->exec_op(mem, op);
c36ff266 320
f86c24f4 321 spi_mem_access_end(mem);
c36ff266
BB
322
323 /*
324 * Some controllers only optimize specific paths (typically the
325 * read path) and expect the core to use the regular SPI
326 * interface in other cases.
327 */
328 if (!ret || ret != -ENOTSUPP)
329 return ret;
330 }
331
caf72df4 332 tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
c36ff266
BB
333
334 /*
335 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
336 * we're guaranteed that this buffer is DMA-able, as required by the
337 * SPI layer.
338 */
339 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
340 if (!tmpbuf)
341 return -ENOMEM;
342
343 spi_message_init(&msg);
344
345 tmpbuf[0] = op->cmd.opcode;
346 xfers[xferpos].tx_buf = tmpbuf;
caf72df4 347 xfers[xferpos].len = op->cmd.nbytes;
c36ff266
BB
348 xfers[xferpos].tx_nbits = op->cmd.buswidth;
349 spi_message_add_tail(&xfers[xferpos], &msg);
350 xferpos++;
351 totalxferlen++;
352
353 if (op->addr.nbytes) {
354 int i;
355
356 for (i = 0; i < op->addr.nbytes; i++)
357 tmpbuf[i + 1] = op->addr.val >>
358 (8 * (op->addr.nbytes - i - 1));
359
360 xfers[xferpos].tx_buf = tmpbuf + 1;
361 xfers[xferpos].len = op->addr.nbytes;
362 xfers[xferpos].tx_nbits = op->addr.buswidth;
363 spi_message_add_tail(&xfers[xferpos], &msg);
364 xferpos++;
365 totalxferlen += op->addr.nbytes;
366 }
367
368 if (op->dummy.nbytes) {
369 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
370 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
371 xfers[xferpos].len = op->dummy.nbytes;
372 xfers[xferpos].tx_nbits = op->dummy.buswidth;
98621ed0 373 xfers[xferpos].dummy_data = 1;
c36ff266
BB
374 spi_message_add_tail(&xfers[xferpos], &msg);
375 xferpos++;
376 totalxferlen += op->dummy.nbytes;
377 }
378
379 if (op->data.nbytes) {
380 if (op->data.dir == SPI_MEM_DATA_IN) {
381 xfers[xferpos].rx_buf = op->data.buf.in;
382 xfers[xferpos].rx_nbits = op->data.buswidth;
383 } else {
384 xfers[xferpos].tx_buf = op->data.buf.out;
385 xfers[xferpos].tx_nbits = op->data.buswidth;
386 }
387
388 xfers[xferpos].len = op->data.nbytes;
389 spi_message_add_tail(&xfers[xferpos], &msg);
390 xferpos++;
391 totalxferlen += op->data.nbytes;
392 }
393
394 ret = spi_sync(mem->spi, &msg);
395
396 kfree(tmpbuf);
397
398 if (ret)
399 return ret;
400
401 if (msg.actual_length != totalxferlen)
402 return -EIO;
403
404 return 0;
405}
406EXPORT_SYMBOL_GPL(spi_mem_exec_op);
407
5d27a9c8
FS
408/**
409 * spi_mem_get_name() - Return the SPI mem device name to be used by the
410 * upper layer if necessary
411 * @mem: the SPI memory
412 *
413 * This function allows SPI mem users to retrieve the SPI mem device name.
414 * It is useful if the upper layer needs to expose a custom name for
415 * compatibility reasons.
416 *
417 * Return: a string containing the name of the memory device to be used
418 * by the SPI mem user
419 */
420const char *spi_mem_get_name(struct spi_mem *mem)
421{
422 return mem->name;
423}
424EXPORT_SYMBOL_GPL(spi_mem_get_name);
425
c36ff266
BB
426/**
427 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
428 * match controller limitations
429 * @mem: the SPI memory
430 * @op: the operation to adjust
431 *
432 * Some controllers have FIFO limitations and must split a data transfer
433 * operation into multiple ones, others require a specific alignment for
434 * optimized accesses. This function allows SPI mem drivers to split a single
435 * operation into multiple sub-operations when required.
436 *
437 * Return: a negative error code if the controller can't properly adjust @op,
438 * 0 otherwise. Note that @op->data.nbytes will be updated if @op
439 * can't be handled in a single step.
440 */
441int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
442{
443 struct spi_controller *ctlr = mem->spi->controller;
e757996c
CH
444 size_t len;
445
c36ff266
BB
446 if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
447 return ctlr->mem_ops->adjust_op_size(mem, op);
448
e757996c 449 if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
caf72df4 450 len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
c0e035ac 451
e757996c
CH
452 if (len > spi_max_transfer_size(mem->spi))
453 return -EINVAL;
454
455 op->data.nbytes = min3((size_t)op->data.nbytes,
456 spi_max_transfer_size(mem->spi),
457 spi_max_message_size(mem->spi) -
458 len);
459 if (!op->data.nbytes)
460 return -EINVAL;
461 }
462
c36ff266
BB
463 return 0;
464}
465EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
466
aa167f3f
BB
467static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
468 u64 offs, size_t len, void *buf)
469{
470 struct spi_mem_op op = desc->info.op_tmpl;
471 int ret;
472
473 op.addr.val = desc->info.offset + offs;
474 op.data.buf.in = buf;
475 op.data.nbytes = len;
476 ret = spi_mem_adjust_op_size(desc->mem, &op);
477 if (ret)
478 return ret;
479
480 ret = spi_mem_exec_op(desc->mem, &op);
481 if (ret)
482 return ret;
483
484 return op.data.nbytes;
485}
486
487static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
488 u64 offs, size_t len, const void *buf)
489{
490 struct spi_mem_op op = desc->info.op_tmpl;
491 int ret;
492
493 op.addr.val = desc->info.offset + offs;
494 op.data.buf.out = buf;
495 op.data.nbytes = len;
496 ret = spi_mem_adjust_op_size(desc->mem, &op);
497 if (ret)
498 return ret;
499
500 ret = spi_mem_exec_op(desc->mem, &op);
501 if (ret)
502 return ret;
503
504 return op.data.nbytes;
505}
506
507/**
508 * spi_mem_dirmap_create() - Create a direct mapping descriptor
509 * @mem: SPI mem device this direct mapping should be created for
510 * @info: direct mapping information
511 *
512 * This function is creating a direct mapping descriptor which can then be used
513 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
514 * If the SPI controller driver does not support direct mapping, this function
32a9d054 515 * falls back to an implementation using spi_mem_exec_op(), so that the caller
aa167f3f
BB
516 * doesn't have to bother implementing a fallback on his own.
517 *
518 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
519 */
520struct spi_mem_dirmap_desc *
521spi_mem_dirmap_create(struct spi_mem *mem,
522 const struct spi_mem_dirmap_info *info)
523{
524 struct spi_controller *ctlr = mem->spi->controller;
525 struct spi_mem_dirmap_desc *desc;
526 int ret = -ENOTSUPP;
527
528 /* Make sure the number of address cycles is between 1 and 8 bytes. */
529 if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
530 return ERR_PTR(-EINVAL);
531
532 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
533 if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
534 return ERR_PTR(-EINVAL);
535
536 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
537 if (!desc)
538 return ERR_PTR(-ENOMEM);
539
540 desc->mem = mem;
541 desc->info = *info;
542 if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
543 ret = ctlr->mem_ops->dirmap_create(desc);
544
545 if (ret) {
546 desc->nodirmap = true;
547 if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
548 ret = -ENOTSUPP;
549 else
550 ret = 0;
551 }
552
553 if (ret) {
554 kfree(desc);
555 return ERR_PTR(ret);
556 }
557
558 return desc;
559}
560EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
561
562/**
563 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
564 * @desc: the direct mapping descriptor to destroy
aa167f3f
BB
565 *
566 * This function destroys a direct mapping descriptor previously created by
567 * spi_mem_dirmap_create().
568 */
569void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
570{
571 struct spi_controller *ctlr = desc->mem->spi->controller;
572
573 if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
574 ctlr->mem_ops->dirmap_destroy(desc);
bfecfd6e
BB
575
576 kfree(desc);
aa167f3f
BB
577}
578EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
579
1fc1b636
BB
580static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
581{
582 struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
583
584 spi_mem_dirmap_destroy(desc);
585}
586
587/**
588 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
589 * it to a device
590 * @dev: device the dirmap desc will be attached to
591 * @mem: SPI mem device this direct mapping should be created for
592 * @info: direct mapping information
593 *
594 * devm_ variant of the spi_mem_dirmap_create() function. See
595 * spi_mem_dirmap_create() for more details.
596 *
597 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
598 */
599struct spi_mem_dirmap_desc *
600devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
601 const struct spi_mem_dirmap_info *info)
602{
603 struct spi_mem_dirmap_desc **ptr, *desc;
604
605 ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
606 GFP_KERNEL);
607 if (!ptr)
608 return ERR_PTR(-ENOMEM);
609
610 desc = spi_mem_dirmap_create(mem, info);
611 if (IS_ERR(desc)) {
612 devres_free(ptr);
613 } else {
614 *ptr = desc;
615 devres_add(dev, ptr);
616 }
617
618 return desc;
619}
620EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
621
622static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
623{
624 struct spi_mem_dirmap_desc **ptr = res;
625
626 if (WARN_ON(!ptr || !*ptr))
627 return 0;
628
629 return *ptr == data;
630}
631
632/**
633 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
634 * to a device
635 * @dev: device the dirmap desc is attached to
636 * @desc: the direct mapping descriptor to destroy
637 *
638 * devm_ variant of the spi_mem_dirmap_destroy() function. See
639 * spi_mem_dirmap_destroy() for more details.
640 */
641void devm_spi_mem_dirmap_destroy(struct device *dev,
642 struct spi_mem_dirmap_desc *desc)
643{
644 devres_release(dev, devm_spi_mem_dirmap_release,
645 devm_spi_mem_dirmap_match, desc);
646}
647EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
648
aa167f3f 649/**
9d8371e2 650 * spi_mem_dirmap_read() - Read data through a direct mapping
aa167f3f
BB
651 * @desc: direct mapping descriptor
652 * @offs: offset to start reading from. Note that this is not an absolute
653 * offset, but the offset within the direct mapping which already has
654 * its own offset
655 * @len: length in bytes
656 * @buf: destination buffer. This buffer must be DMA-able
657 *
658 * This function reads data from a memory device using a direct mapping
659 * previously instantiated with spi_mem_dirmap_create().
660 *
661 * Return: the amount of data read from the memory device or a negative error
662 * code. Note that the returned size might be smaller than @len, and the caller
663 * is responsible for calling spi_mem_dirmap_read() again when that happens.
664 */
665ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
666 u64 offs, size_t len, void *buf)
667{
668 struct spi_controller *ctlr = desc->mem->spi->controller;
669 ssize_t ret;
670
671 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
672 return -EINVAL;
673
674 if (!len)
675 return 0;
676
677 if (desc->nodirmap) {
678 ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
679 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
680 ret = spi_mem_access_start(desc->mem);
681 if (ret)
682 return ret;
683
684 ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
685
686 spi_mem_access_end(desc->mem);
687 } else {
688 ret = -ENOTSUPP;
689 }
690
691 return ret;
692}
693EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
694
695/**
9d8371e2 696 * spi_mem_dirmap_write() - Write data through a direct mapping
aa167f3f
BB
697 * @desc: direct mapping descriptor
698 * @offs: offset to start writing from. Note that this is not an absolute
699 * offset, but the offset within the direct mapping which already has
700 * its own offset
701 * @len: length in bytes
702 * @buf: source buffer. This buffer must be DMA-able
703 *
704 * This function writes data to a memory device using a direct mapping
705 * previously instantiated with spi_mem_dirmap_create().
706 *
707 * Return: the amount of data written to the memory device or a negative error
708 * code. Note that the returned size might be smaller than @len, and the caller
709 * is responsible for calling spi_mem_dirmap_write() again when that happens.
710 */
711ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
712 u64 offs, size_t len, const void *buf)
713{
714 struct spi_controller *ctlr = desc->mem->spi->controller;
715 ssize_t ret;
716
717 if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
718 return -EINVAL;
719
720 if (!len)
721 return 0;
722
723 if (desc->nodirmap) {
724 ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
725 } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
726 ret = spi_mem_access_start(desc->mem);
727 if (ret)
728 return ret;
729
730 ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
731
732 spi_mem_access_end(desc->mem);
733 } else {
734 ret = -ENOTSUPP;
735 }
736
737 return ret;
738}
739EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
740
c36ff266
BB
741static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
742{
743 return container_of(drv, struct spi_mem_driver, spidrv.driver);
744}
745
746static int spi_mem_probe(struct spi_device *spi)
747{
748 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
5d27a9c8 749 struct spi_controller *ctlr = spi->controller;
c36ff266
BB
750 struct spi_mem *mem;
751
752 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
753 if (!mem)
754 return -ENOMEM;
755
756 mem->spi = spi;
5d27a9c8
FS
757
758 if (ctlr->mem_ops && ctlr->mem_ops->get_name)
759 mem->name = ctlr->mem_ops->get_name(mem);
760 else
761 mem->name = dev_name(&spi->dev);
762
763 if (IS_ERR_OR_NULL(mem->name))
a9c52d42 764 return PTR_ERR_OR_ZERO(mem->name);
5d27a9c8 765
c36ff266
BB
766 spi_set_drvdata(spi, mem);
767
768 return memdrv->probe(mem);
769}
770
771static int spi_mem_remove(struct spi_device *spi)
772{
773 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
774 struct spi_mem *mem = spi_get_drvdata(spi);
775
776 if (memdrv->remove)
777 return memdrv->remove(mem);
778
779 return 0;
780}
781
782static void spi_mem_shutdown(struct spi_device *spi)
783{
784 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
785 struct spi_mem *mem = spi_get_drvdata(spi);
786
787 if (memdrv->shutdown)
788 memdrv->shutdown(mem);
789}
790
791/**
792 * spi_mem_driver_register_with_owner() - Register a SPI memory driver
793 * @memdrv: the SPI memory driver to register
794 * @owner: the owner of this driver
795 *
796 * Registers a SPI memory driver.
797 *
798 * Return: 0 in case of success, a negative error core otherwise.
799 */
800
801int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
802 struct module *owner)
803{
804 memdrv->spidrv.probe = spi_mem_probe;
805 memdrv->spidrv.remove = spi_mem_remove;
806 memdrv->spidrv.shutdown = spi_mem_shutdown;
807
808 return __spi_register_driver(owner, &memdrv->spidrv);
809}
810EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
811
812/**
813 * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
814 * @memdrv: the SPI memory driver to unregister
815 *
816 * Unregisters a SPI memory driver.
817 */
818void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
819{
820 spi_unregister_driver(&memdrv->spidrv);
821}
822EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);