Commit | Line | Data |
---|---|---|
c36ff266 BB |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * Copyright (C) 2018 Exceet Electronics GmbH | |
4 | * Copyright (C) 2018 Bootlin | |
5 | * | |
6 | * Author: Boris Brezillon <boris.brezillon@bootlin.com> | |
7 | */ | |
8 | #include <linux/dmaengine.h> | |
9 | #include <linux/pm_runtime.h> | |
10 | #include <linux/spi/spi.h> | |
11 | #include <linux/spi/spi-mem.h> | |
12 | ||
13 | #include "internals.h" | |
14 | ||
b12a084c | 15 | #define SPI_MEM_MAX_BUSWIDTH 8 |
38058322 | 16 | |
c36ff266 BB |
17 | /** |
18 | * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a | |
19 | * memory operation | |
20 | * @ctlr: the SPI controller requesting this dma_map() | |
21 | * @op: the memory operation containing the buffer to map | |
22 | * @sgt: a pointer to a non-initialized sg_table that will be filled by this | |
23 | * function | |
24 | * | |
25 | * Some controllers might want to do DMA on the data buffer embedded in @op. | |
26 | * This helper prepares everything for you and provides a ready-to-use | |
27 | * sg_table. This function is not intended to be called from spi drivers. | |
28 | * Only SPI controller drivers should use it. | |
29 | * Note that the caller must ensure the memory region pointed by | |
30 | * op->data.buf.{in,out} is DMA-able before calling this function. | |
31 | * | |
32 | * Return: 0 in case of success, a negative error code otherwise. | |
33 | */ | |
34 | int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, | |
35 | const struct spi_mem_op *op, | |
36 | struct sg_table *sgt) | |
37 | { | |
38 | struct device *dmadev; | |
39 | ||
40 | if (!op->data.nbytes) | |
41 | return -EINVAL; | |
42 | ||
43 | if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) | |
44 | dmadev = ctlr->dma_tx->device->dev; | |
45 | else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) | |
46 | dmadev = ctlr->dma_rx->device->dev; | |
47 | else | |
48 | dmadev = ctlr->dev.parent; | |
49 | ||
50 | if (!dmadev) | |
51 | return -EINVAL; | |
52 | ||
53 | return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes, | |
54 | op->data.dir == SPI_MEM_DATA_IN ? | |
55 | DMA_FROM_DEVICE : DMA_TO_DEVICE); | |
56 | } | |
57 | EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data); | |
58 | ||
59 | /** | |
60 | * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a | |
61 | * memory operation | |
62 | * @ctlr: the SPI controller requesting this dma_unmap() | |
63 | * @op: the memory operation containing the buffer to unmap | |
64 | * @sgt: a pointer to an sg_table previously initialized by | |
65 | * spi_controller_dma_map_mem_op_data() | |
66 | * | |
67 | * Some controllers might want to do DMA on the data buffer embedded in @op. | |
68 | * This helper prepares things so that the CPU can access the | |
69 | * op->data.buf.{in,out} buffer again. | |
70 | * | |
71 | * This function is not intended to be called from SPI drivers. Only SPI | |
72 | * controller drivers should use it. | |
73 | * | |
74 | * This function should be called after the DMA operation has finished and is | |
75 | * only valid if the previous spi_controller_dma_map_mem_op_data() call | |
76 | * returned 0. | |
77 | * | |
78 | * Return: 0 in case of success, a negative error code otherwise. | |
79 | */ | |
80 | void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, | |
81 | const struct spi_mem_op *op, | |
82 | struct sg_table *sgt) | |
83 | { | |
84 | struct device *dmadev; | |
85 | ||
86 | if (!op->data.nbytes) | |
87 | return; | |
88 | ||
89 | if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) | |
90 | dmadev = ctlr->dma_tx->device->dev; | |
91 | else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) | |
92 | dmadev = ctlr->dma_rx->device->dev; | |
93 | else | |
94 | dmadev = ctlr->dev.parent; | |
95 | ||
96 | spi_unmap_buf(ctlr, dmadev, sgt, | |
97 | op->data.dir == SPI_MEM_DATA_IN ? | |
98 | DMA_FROM_DEVICE : DMA_TO_DEVICE); | |
99 | } | |
100 | EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data); | |
101 | ||
102 | static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx) | |
103 | { | |
104 | u32 mode = mem->spi->mode; | |
105 | ||
106 | switch (buswidth) { | |
107 | case 1: | |
108 | return 0; | |
109 | ||
110 | case 2: | |
111 | if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || | |
112 | (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) | |
113 | return 0; | |
114 | ||
115 | break; | |
116 | ||
117 | case 4: | |
118 | if ((tx && (mode & SPI_TX_QUAD)) || | |
119 | (!tx && (mode & SPI_RX_QUAD))) | |
120 | return 0; | |
121 | ||
122 | break; | |
123 | ||
b12a084c YNG |
124 | case 8: |
125 | if ((tx && (mode & SPI_TX_OCTAL)) || | |
126 | (!tx && (mode & SPI_RX_OCTAL))) | |
127 | return 0; | |
128 | ||
129 | break; | |
130 | ||
c36ff266 BB |
131 | default: |
132 | break; | |
133 | } | |
134 | ||
135 | return -ENOTSUPP; | |
136 | } | |
137 | ||
138 | static bool spi_mem_default_supports_op(struct spi_mem *mem, | |
139 | const struct spi_mem_op *op) | |
140 | { | |
141 | if (spi_check_buswidth_req(mem, op->cmd.buswidth, true)) | |
142 | return false; | |
143 | ||
144 | if (op->addr.nbytes && | |
145 | spi_check_buswidth_req(mem, op->addr.buswidth, true)) | |
146 | return false; | |
147 | ||
148 | if (op->dummy.nbytes && | |
149 | spi_check_buswidth_req(mem, op->dummy.buswidth, true)) | |
150 | return false; | |
151 | ||
0ebb261a | 152 | if (op->data.dir != SPI_MEM_NO_DATA && |
c36ff266 BB |
153 | spi_check_buswidth_req(mem, op->data.buswidth, |
154 | op->data.dir == SPI_MEM_DATA_OUT)) | |
155 | return false; | |
156 | ||
157 | return true; | |
158 | } | |
159 | EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); | |
160 | ||
38058322 BB |
161 | static bool spi_mem_buswidth_is_valid(u8 buswidth) |
162 | { | |
163 | if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH) | |
164 | return false; | |
165 | ||
166 | return true; | |
167 | } | |
168 | ||
169 | static int spi_mem_check_op(const struct spi_mem_op *op) | |
170 | { | |
171 | if (!op->cmd.buswidth) | |
172 | return -EINVAL; | |
173 | ||
174 | if ((op->addr.nbytes && !op->addr.buswidth) || | |
175 | (op->dummy.nbytes && !op->dummy.buswidth) || | |
176 | (op->data.nbytes && !op->data.buswidth)) | |
177 | return -EINVAL; | |
178 | ||
aea3877e GU |
179 | if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) || |
180 | !spi_mem_buswidth_is_valid(op->addr.buswidth) || | |
181 | !spi_mem_buswidth_is_valid(op->dummy.buswidth) || | |
182 | !spi_mem_buswidth_is_valid(op->data.buswidth)) | |
38058322 BB |
183 | return -EINVAL; |
184 | ||
185 | return 0; | |
186 | } | |
187 | ||
188 | static bool spi_mem_internal_supports_op(struct spi_mem *mem, | |
189 | const struct spi_mem_op *op) | |
190 | { | |
191 | struct spi_controller *ctlr = mem->spi->controller; | |
192 | ||
193 | if (ctlr->mem_ops && ctlr->mem_ops->supports_op) | |
194 | return ctlr->mem_ops->supports_op(mem, op); | |
195 | ||
196 | return spi_mem_default_supports_op(mem, op); | |
197 | } | |
198 | ||
c36ff266 BB |
199 | /** |
200 | * spi_mem_supports_op() - Check if a memory device and the controller it is | |
201 | * connected to support a specific memory operation | |
202 | * @mem: the SPI memory | |
203 | * @op: the memory operation to check | |
204 | * | |
205 | * Some controllers are only supporting Single or Dual IOs, others might only | |
206 | * support specific opcodes, or it can even be that the controller and device | |
207 | * both support Quad IOs but the hardware prevents you from using it because | |
208 | * only 2 IO lines are connected. | |
209 | * | |
210 | * This function checks whether a specific operation is supported. | |
211 | * | |
212 | * Return: true if @op is supported, false otherwise. | |
213 | */ | |
214 | bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) | |
215 | { | |
38058322 BB |
216 | if (spi_mem_check_op(op)) |
217 | return false; | |
c36ff266 | 218 | |
38058322 | 219 | return spi_mem_internal_supports_op(mem, op); |
c36ff266 BB |
220 | } |
221 | EXPORT_SYMBOL_GPL(spi_mem_supports_op); | |
222 | ||
f86c24f4 BB |
223 | static int spi_mem_access_start(struct spi_mem *mem) |
224 | { | |
225 | struct spi_controller *ctlr = mem->spi->controller; | |
226 | ||
227 | /* | |
228 | * Flush the message queue before executing our SPI memory | |
229 | * operation to prevent preemption of regular SPI transfers. | |
230 | */ | |
231 | spi_flush_queue(ctlr); | |
232 | ||
233 | if (ctlr->auto_runtime_pm) { | |
234 | int ret; | |
235 | ||
236 | ret = pm_runtime_get_sync(ctlr->dev.parent); | |
237 | if (ret < 0) { | |
238 | dev_err(&ctlr->dev, "Failed to power device: %d\n", | |
239 | ret); | |
240 | return ret; | |
241 | } | |
242 | } | |
243 | ||
244 | mutex_lock(&ctlr->bus_lock_mutex); | |
245 | mutex_lock(&ctlr->io_mutex); | |
246 | ||
247 | return 0; | |
248 | } | |
249 | ||
250 | static void spi_mem_access_end(struct spi_mem *mem) | |
251 | { | |
252 | struct spi_controller *ctlr = mem->spi->controller; | |
253 | ||
254 | mutex_unlock(&ctlr->io_mutex); | |
255 | mutex_unlock(&ctlr->bus_lock_mutex); | |
256 | ||
257 | if (ctlr->auto_runtime_pm) | |
258 | pm_runtime_put(ctlr->dev.parent); | |
259 | } | |
260 | ||
c36ff266 BB |
261 | /** |
262 | * spi_mem_exec_op() - Execute a memory operation | |
263 | * @mem: the SPI memory | |
264 | * @op: the memory operation to execute | |
265 | * | |
266 | * Executes a memory operation. | |
267 | * | |
268 | * This function first checks that @op is supported and then tries to execute | |
269 | * it. | |
270 | * | |
271 | * Return: 0 in case of success, a negative error code otherwise. | |
272 | */ | |
273 | int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) | |
274 | { | |
275 | unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0; | |
276 | struct spi_controller *ctlr = mem->spi->controller; | |
277 | struct spi_transfer xfers[4] = { }; | |
278 | struct spi_message msg; | |
279 | u8 *tmpbuf; | |
280 | int ret; | |
281 | ||
38058322 BB |
282 | ret = spi_mem_check_op(op); |
283 | if (ret) | |
284 | return ret; | |
285 | ||
286 | if (!spi_mem_internal_supports_op(mem, op)) | |
c36ff266 BB |
287 | return -ENOTSUPP; |
288 | ||
289 | if (ctlr->mem_ops) { | |
f86c24f4 BB |
290 | ret = spi_mem_access_start(mem); |
291 | if (ret) | |
292 | return ret; | |
c36ff266 | 293 | |
c36ff266 | 294 | ret = ctlr->mem_ops->exec_op(mem, op); |
c36ff266 | 295 | |
f86c24f4 | 296 | spi_mem_access_end(mem); |
c36ff266 BB |
297 | |
298 | /* | |
299 | * Some controllers only optimize specific paths (typically the | |
300 | * read path) and expect the core to use the regular SPI | |
301 | * interface in other cases. | |
302 | */ | |
303 | if (!ret || ret != -ENOTSUPP) | |
304 | return ret; | |
305 | } | |
306 | ||
307 | tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes + | |
308 | op->dummy.nbytes; | |
309 | ||
310 | /* | |
311 | * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so | |
312 | * we're guaranteed that this buffer is DMA-able, as required by the | |
313 | * SPI layer. | |
314 | */ | |
315 | tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA); | |
316 | if (!tmpbuf) | |
317 | return -ENOMEM; | |
318 | ||
319 | spi_message_init(&msg); | |
320 | ||
321 | tmpbuf[0] = op->cmd.opcode; | |
322 | xfers[xferpos].tx_buf = tmpbuf; | |
323 | xfers[xferpos].len = sizeof(op->cmd.opcode); | |
324 | xfers[xferpos].tx_nbits = op->cmd.buswidth; | |
325 | spi_message_add_tail(&xfers[xferpos], &msg); | |
326 | xferpos++; | |
327 | totalxferlen++; | |
328 | ||
329 | if (op->addr.nbytes) { | |
330 | int i; | |
331 | ||
332 | for (i = 0; i < op->addr.nbytes; i++) | |
333 | tmpbuf[i + 1] = op->addr.val >> | |
334 | (8 * (op->addr.nbytes - i - 1)); | |
335 | ||
336 | xfers[xferpos].tx_buf = tmpbuf + 1; | |
337 | xfers[xferpos].len = op->addr.nbytes; | |
338 | xfers[xferpos].tx_nbits = op->addr.buswidth; | |
339 | spi_message_add_tail(&xfers[xferpos], &msg); | |
340 | xferpos++; | |
341 | totalxferlen += op->addr.nbytes; | |
342 | } | |
343 | ||
344 | if (op->dummy.nbytes) { | |
345 | memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes); | |
346 | xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1; | |
347 | xfers[xferpos].len = op->dummy.nbytes; | |
348 | xfers[xferpos].tx_nbits = op->dummy.buswidth; | |
349 | spi_message_add_tail(&xfers[xferpos], &msg); | |
350 | xferpos++; | |
351 | totalxferlen += op->dummy.nbytes; | |
352 | } | |
353 | ||
354 | if (op->data.nbytes) { | |
355 | if (op->data.dir == SPI_MEM_DATA_IN) { | |
356 | xfers[xferpos].rx_buf = op->data.buf.in; | |
357 | xfers[xferpos].rx_nbits = op->data.buswidth; | |
358 | } else { | |
359 | xfers[xferpos].tx_buf = op->data.buf.out; | |
360 | xfers[xferpos].tx_nbits = op->data.buswidth; | |
361 | } | |
362 | ||
363 | xfers[xferpos].len = op->data.nbytes; | |
364 | spi_message_add_tail(&xfers[xferpos], &msg); | |
365 | xferpos++; | |
366 | totalxferlen += op->data.nbytes; | |
367 | } | |
368 | ||
369 | ret = spi_sync(mem->spi, &msg); | |
370 | ||
371 | kfree(tmpbuf); | |
372 | ||
373 | if (ret) | |
374 | return ret; | |
375 | ||
376 | if (msg.actual_length != totalxferlen) | |
377 | return -EIO; | |
378 | ||
379 | return 0; | |
380 | } | |
381 | EXPORT_SYMBOL_GPL(spi_mem_exec_op); | |
382 | ||
5d27a9c8 FS |
383 | /** |
384 | * spi_mem_get_name() - Return the SPI mem device name to be used by the | |
385 | * upper layer if necessary | |
386 | * @mem: the SPI memory | |
387 | * | |
388 | * This function allows SPI mem users to retrieve the SPI mem device name. | |
389 | * It is useful if the upper layer needs to expose a custom name for | |
390 | * compatibility reasons. | |
391 | * | |
392 | * Return: a string containing the name of the memory device to be used | |
393 | * by the SPI mem user | |
394 | */ | |
395 | const char *spi_mem_get_name(struct spi_mem *mem) | |
396 | { | |
397 | return mem->name; | |
398 | } | |
399 | EXPORT_SYMBOL_GPL(spi_mem_get_name); | |
400 | ||
c36ff266 BB |
401 | /** |
402 | * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to | |
403 | * match controller limitations | |
404 | * @mem: the SPI memory | |
405 | * @op: the operation to adjust | |
406 | * | |
407 | * Some controllers have FIFO limitations and must split a data transfer | |
408 | * operation into multiple ones, others require a specific alignment for | |
409 | * optimized accesses. This function allows SPI mem drivers to split a single | |
410 | * operation into multiple sub-operations when required. | |
411 | * | |
412 | * Return: a negative error code if the controller can't properly adjust @op, | |
413 | * 0 otherwise. Note that @op->data.nbytes will be updated if @op | |
414 | * can't be handled in a single step. | |
415 | */ | |
416 | int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) | |
417 | { | |
418 | struct spi_controller *ctlr = mem->spi->controller; | |
e757996c CH |
419 | size_t len; |
420 | ||
421 | len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; | |
c36ff266 BB |
422 | |
423 | if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) | |
424 | return ctlr->mem_ops->adjust_op_size(mem, op); | |
425 | ||
e757996c CH |
426 | if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { |
427 | if (len > spi_max_transfer_size(mem->spi)) | |
428 | return -EINVAL; | |
429 | ||
430 | op->data.nbytes = min3((size_t)op->data.nbytes, | |
431 | spi_max_transfer_size(mem->spi), | |
432 | spi_max_message_size(mem->spi) - | |
433 | len); | |
434 | if (!op->data.nbytes) | |
435 | return -EINVAL; | |
436 | } | |
437 | ||
c36ff266 BB |
438 | return 0; |
439 | } | |
440 | EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); | |
441 | ||
aa167f3f BB |
442 | static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc, |
443 | u64 offs, size_t len, void *buf) | |
444 | { | |
445 | struct spi_mem_op op = desc->info.op_tmpl; | |
446 | int ret; | |
447 | ||
448 | op.addr.val = desc->info.offset + offs; | |
449 | op.data.buf.in = buf; | |
450 | op.data.nbytes = len; | |
451 | ret = spi_mem_adjust_op_size(desc->mem, &op); | |
452 | if (ret) | |
453 | return ret; | |
454 | ||
455 | ret = spi_mem_exec_op(desc->mem, &op); | |
456 | if (ret) | |
457 | return ret; | |
458 | ||
459 | return op.data.nbytes; | |
460 | } | |
461 | ||
462 | static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc, | |
463 | u64 offs, size_t len, const void *buf) | |
464 | { | |
465 | struct spi_mem_op op = desc->info.op_tmpl; | |
466 | int ret; | |
467 | ||
468 | op.addr.val = desc->info.offset + offs; | |
469 | op.data.buf.out = buf; | |
470 | op.data.nbytes = len; | |
471 | ret = spi_mem_adjust_op_size(desc->mem, &op); | |
472 | if (ret) | |
473 | return ret; | |
474 | ||
475 | ret = spi_mem_exec_op(desc->mem, &op); | |
476 | if (ret) | |
477 | return ret; | |
478 | ||
479 | return op.data.nbytes; | |
480 | } | |
481 | ||
482 | /** | |
483 | * spi_mem_dirmap_create() - Create a direct mapping descriptor | |
484 | * @mem: SPI mem device this direct mapping should be created for | |
485 | * @info: direct mapping information | |
486 | * | |
487 | * This function is creating a direct mapping descriptor which can then be used | |
488 | * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write(). | |
489 | * If the SPI controller driver does not support direct mapping, this function | |
490 | * fallback to an implementation using spi_mem_exec_op(), so that the caller | |
491 | * doesn't have to bother implementing a fallback on his own. | |
492 | * | |
493 | * Return: a valid pointer in case of success, and ERR_PTR() otherwise. | |
494 | */ | |
495 | struct spi_mem_dirmap_desc * | |
496 | spi_mem_dirmap_create(struct spi_mem *mem, | |
497 | const struct spi_mem_dirmap_info *info) | |
498 | { | |
499 | struct spi_controller *ctlr = mem->spi->controller; | |
500 | struct spi_mem_dirmap_desc *desc; | |
501 | int ret = -ENOTSUPP; | |
502 | ||
503 | /* Make sure the number of address cycles is between 1 and 8 bytes. */ | |
504 | if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8) | |
505 | return ERR_PTR(-EINVAL); | |
506 | ||
507 | /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */ | |
508 | if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA) | |
509 | return ERR_PTR(-EINVAL); | |
510 | ||
511 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | |
512 | if (!desc) | |
513 | return ERR_PTR(-ENOMEM); | |
514 | ||
515 | desc->mem = mem; | |
516 | desc->info = *info; | |
517 | if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create) | |
518 | ret = ctlr->mem_ops->dirmap_create(desc); | |
519 | ||
520 | if (ret) { | |
521 | desc->nodirmap = true; | |
522 | if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl)) | |
523 | ret = -ENOTSUPP; | |
524 | else | |
525 | ret = 0; | |
526 | } | |
527 | ||
528 | if (ret) { | |
529 | kfree(desc); | |
530 | return ERR_PTR(ret); | |
531 | } | |
532 | ||
533 | return desc; | |
534 | } | |
535 | EXPORT_SYMBOL_GPL(spi_mem_dirmap_create); | |
536 | ||
537 | /** | |
538 | * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor | |
539 | * @desc: the direct mapping descriptor to destroy | |
aa167f3f BB |
540 | * |
541 | * This function destroys a direct mapping descriptor previously created by | |
542 | * spi_mem_dirmap_create(). | |
543 | */ | |
544 | void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc) | |
545 | { | |
546 | struct spi_controller *ctlr = desc->mem->spi->controller; | |
547 | ||
548 | if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy) | |
549 | ctlr->mem_ops->dirmap_destroy(desc); | |
bfecfd6e BB |
550 | |
551 | kfree(desc); | |
aa167f3f BB |
552 | } |
553 | EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy); | |
554 | ||
1fc1b636 BB |
555 | static void devm_spi_mem_dirmap_release(struct device *dev, void *res) |
556 | { | |
557 | struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res; | |
558 | ||
559 | spi_mem_dirmap_destroy(desc); | |
560 | } | |
561 | ||
562 | /** | |
563 | * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach | |
564 | * it to a device | |
565 | * @dev: device the dirmap desc will be attached to | |
566 | * @mem: SPI mem device this direct mapping should be created for | |
567 | * @info: direct mapping information | |
568 | * | |
569 | * devm_ variant of the spi_mem_dirmap_create() function. See | |
570 | * spi_mem_dirmap_create() for more details. | |
571 | * | |
572 | * Return: a valid pointer in case of success, and ERR_PTR() otherwise. | |
573 | */ | |
574 | struct spi_mem_dirmap_desc * | |
575 | devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, | |
576 | const struct spi_mem_dirmap_info *info) | |
577 | { | |
578 | struct spi_mem_dirmap_desc **ptr, *desc; | |
579 | ||
580 | ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr), | |
581 | GFP_KERNEL); | |
582 | if (!ptr) | |
583 | return ERR_PTR(-ENOMEM); | |
584 | ||
585 | desc = spi_mem_dirmap_create(mem, info); | |
586 | if (IS_ERR(desc)) { | |
587 | devres_free(ptr); | |
588 | } else { | |
589 | *ptr = desc; | |
590 | devres_add(dev, ptr); | |
591 | } | |
592 | ||
593 | return desc; | |
594 | } | |
595 | EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create); | |
596 | ||
597 | static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data) | |
598 | { | |
599 | struct spi_mem_dirmap_desc **ptr = res; | |
600 | ||
601 | if (WARN_ON(!ptr || !*ptr)) | |
602 | return 0; | |
603 | ||
604 | return *ptr == data; | |
605 | } | |
606 | ||
607 | /** | |
608 | * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached | |
609 | * to a device | |
610 | * @dev: device the dirmap desc is attached to | |
611 | * @desc: the direct mapping descriptor to destroy | |
612 | * | |
613 | * devm_ variant of the spi_mem_dirmap_destroy() function. See | |
614 | * spi_mem_dirmap_destroy() for more details. | |
615 | */ | |
616 | void devm_spi_mem_dirmap_destroy(struct device *dev, | |
617 | struct spi_mem_dirmap_desc *desc) | |
618 | { | |
619 | devres_release(dev, devm_spi_mem_dirmap_release, | |
620 | devm_spi_mem_dirmap_match, desc); | |
621 | } | |
622 | EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy); | |
623 | ||
aa167f3f BB |
624 | /** |
625 | * spi_mem_dirmap_dirmap_read() - Read data through a direct mapping | |
626 | * @desc: direct mapping descriptor | |
627 | * @offs: offset to start reading from. Note that this is not an absolute | |
628 | * offset, but the offset within the direct mapping which already has | |
629 | * its own offset | |
630 | * @len: length in bytes | |
631 | * @buf: destination buffer. This buffer must be DMA-able | |
632 | * | |
633 | * This function reads data from a memory device using a direct mapping | |
634 | * previously instantiated with spi_mem_dirmap_create(). | |
635 | * | |
636 | * Return: the amount of data read from the memory device or a negative error | |
637 | * code. Note that the returned size might be smaller than @len, and the caller | |
638 | * is responsible for calling spi_mem_dirmap_read() again when that happens. | |
639 | */ | |
640 | ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, | |
641 | u64 offs, size_t len, void *buf) | |
642 | { | |
643 | struct spi_controller *ctlr = desc->mem->spi->controller; | |
644 | ssize_t ret; | |
645 | ||
646 | if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) | |
647 | return -EINVAL; | |
648 | ||
649 | if (!len) | |
650 | return 0; | |
651 | ||
652 | if (desc->nodirmap) { | |
653 | ret = spi_mem_no_dirmap_read(desc, offs, len, buf); | |
654 | } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) { | |
655 | ret = spi_mem_access_start(desc->mem); | |
656 | if (ret) | |
657 | return ret; | |
658 | ||
659 | ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf); | |
660 | ||
661 | spi_mem_access_end(desc->mem); | |
662 | } else { | |
663 | ret = -ENOTSUPP; | |
664 | } | |
665 | ||
666 | return ret; | |
667 | } | |
668 | EXPORT_SYMBOL_GPL(spi_mem_dirmap_read); | |
669 | ||
670 | /** | |
671 | * spi_mem_dirmap_dirmap_write() - Write data through a direct mapping | |
672 | * @desc: direct mapping descriptor | |
673 | * @offs: offset to start writing from. Note that this is not an absolute | |
674 | * offset, but the offset within the direct mapping which already has | |
675 | * its own offset | |
676 | * @len: length in bytes | |
677 | * @buf: source buffer. This buffer must be DMA-able | |
678 | * | |
679 | * This function writes data to a memory device using a direct mapping | |
680 | * previously instantiated with spi_mem_dirmap_create(). | |
681 | * | |
682 | * Return: the amount of data written to the memory device or a negative error | |
683 | * code. Note that the returned size might be smaller than @len, and the caller | |
684 | * is responsible for calling spi_mem_dirmap_write() again when that happens. | |
685 | */ | |
686 | ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, | |
687 | u64 offs, size_t len, const void *buf) | |
688 | { | |
689 | struct spi_controller *ctlr = desc->mem->spi->controller; | |
690 | ssize_t ret; | |
691 | ||
692 | if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT) | |
693 | return -EINVAL; | |
694 | ||
695 | if (!len) | |
696 | return 0; | |
697 | ||
698 | if (desc->nodirmap) { | |
699 | ret = spi_mem_no_dirmap_write(desc, offs, len, buf); | |
700 | } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) { | |
701 | ret = spi_mem_access_start(desc->mem); | |
702 | if (ret) | |
703 | return ret; | |
704 | ||
705 | ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf); | |
706 | ||
707 | spi_mem_access_end(desc->mem); | |
708 | } else { | |
709 | ret = -ENOTSUPP; | |
710 | } | |
711 | ||
712 | return ret; | |
713 | } | |
714 | EXPORT_SYMBOL_GPL(spi_mem_dirmap_write); | |
715 | ||
c36ff266 BB |
716 | static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv) |
717 | { | |
718 | return container_of(drv, struct spi_mem_driver, spidrv.driver); | |
719 | } | |
720 | ||
721 | static int spi_mem_probe(struct spi_device *spi) | |
722 | { | |
723 | struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); | |
5d27a9c8 | 724 | struct spi_controller *ctlr = spi->controller; |
c36ff266 BB |
725 | struct spi_mem *mem; |
726 | ||
727 | mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL); | |
728 | if (!mem) | |
729 | return -ENOMEM; | |
730 | ||
731 | mem->spi = spi; | |
5d27a9c8 FS |
732 | |
733 | if (ctlr->mem_ops && ctlr->mem_ops->get_name) | |
734 | mem->name = ctlr->mem_ops->get_name(mem); | |
735 | else | |
736 | mem->name = dev_name(&spi->dev); | |
737 | ||
738 | if (IS_ERR_OR_NULL(mem->name)) | |
739 | return PTR_ERR(mem->name); | |
740 | ||
c36ff266 BB |
741 | spi_set_drvdata(spi, mem); |
742 | ||
743 | return memdrv->probe(mem); | |
744 | } | |
745 | ||
746 | static int spi_mem_remove(struct spi_device *spi) | |
747 | { | |
748 | struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); | |
749 | struct spi_mem *mem = spi_get_drvdata(spi); | |
750 | ||
751 | if (memdrv->remove) | |
752 | return memdrv->remove(mem); | |
753 | ||
754 | return 0; | |
755 | } | |
756 | ||
757 | static void spi_mem_shutdown(struct spi_device *spi) | |
758 | { | |
759 | struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); | |
760 | struct spi_mem *mem = spi_get_drvdata(spi); | |
761 | ||
762 | if (memdrv->shutdown) | |
763 | memdrv->shutdown(mem); | |
764 | } | |
765 | ||
766 | /** | |
767 | * spi_mem_driver_register_with_owner() - Register a SPI memory driver | |
768 | * @memdrv: the SPI memory driver to register | |
769 | * @owner: the owner of this driver | |
770 | * | |
771 | * Registers a SPI memory driver. | |
772 | * | |
773 | * Return: 0 in case of success, a negative error core otherwise. | |
774 | */ | |
775 | ||
776 | int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv, | |
777 | struct module *owner) | |
778 | { | |
779 | memdrv->spidrv.probe = spi_mem_probe; | |
780 | memdrv->spidrv.remove = spi_mem_remove; | |
781 | memdrv->spidrv.shutdown = spi_mem_shutdown; | |
782 | ||
783 | return __spi_register_driver(owner, &memdrv->spidrv); | |
784 | } | |
785 | EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner); | |
786 | ||
787 | /** | |
788 | * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver | |
789 | * @memdrv: the SPI memory driver to unregister | |
790 | * | |
791 | * Unregisters a SPI memory driver. | |
792 | */ | |
793 | void spi_mem_driver_unregister(struct spi_mem_driver *memdrv) | |
794 | { | |
795 | spi_unregister_driver(&memdrv->spidrv); | |
796 | } | |
797 | EXPORT_SYMBOL_GPL(spi_mem_driver_unregister); |