net/mlx4_core: drop useless LIST_HEAD
[linux-2.6-block.git] / drivers / dma / imx-sdma.c
CommitLineData
c01faaca
FE
1// SPDX-License-Identifier: GPL-2.0+
2//
3// drivers/dma/imx-sdma.c
4//
5// This file contains a driver for the Freescale Smart DMA engine
6//
7// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8//
9// Based on code from Freescale:
10//
11// Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
1ec1e82f
SH
12
13#include <linux/init.h>
1d069bfa 14#include <linux/iopoll.h>
f8de8f4c 15#include <linux/module.h>
1ec1e82f 16#include <linux/types.h>
0bbc1413 17#include <linux/bitops.h>
1ec1e82f
SH
18#include <linux/mm.h>
19#include <linux/interrupt.h>
20#include <linux/clk.h>
2ccaef05 21#include <linux/delay.h>
1ec1e82f
SH
22#include <linux/sched.h>
23#include <linux/semaphore.h>
24#include <linux/spinlock.h>
25#include <linux/device.h>
26#include <linux/dma-mapping.h>
27#include <linux/firmware.h>
28#include <linux/slab.h>
29#include <linux/platform_device.h>
30#include <linux/dmaengine.h>
580975d7 31#include <linux/of.h>
8391ecf4 32#include <linux/of_address.h>
580975d7 33#include <linux/of_device.h>
9479e17c 34#include <linux/of_dma.h>
b8603d2a 35#include <linux/workqueue.h>
1ec1e82f
SH
36
37#include <asm/irq.h>
82906b13
AB
38#include <linux/platform_data/dma-imx-sdma.h>
39#include <linux/platform_data/dma-imx.h>
d078cd1b
ZW
40#include <linux/regmap.h>
41#include <linux/mfd/syscon.h>
42#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
1ec1e82f 43
d2ebfb33 44#include "dmaengine.h"
57b772b8 45#include "virt-dma.h"
d2ebfb33 46
1ec1e82f
SH
47/* SDMA registers */
48#define SDMA_H_C0PTR 0x000
49#define SDMA_H_INTR 0x004
50#define SDMA_H_STATSTOP 0x008
51#define SDMA_H_START 0x00c
52#define SDMA_H_EVTOVR 0x010
53#define SDMA_H_DSPOVR 0x014
54#define SDMA_H_HOSTOVR 0x018
55#define SDMA_H_EVTPEND 0x01c
56#define SDMA_H_DSPENBL 0x020
57#define SDMA_H_RESET 0x024
58#define SDMA_H_EVTERR 0x028
59#define SDMA_H_INTRMSK 0x02c
60#define SDMA_H_PSW 0x030
61#define SDMA_H_EVTERRDBG 0x034
62#define SDMA_H_CONFIG 0x038
63#define SDMA_ONCE_ENB 0x040
64#define SDMA_ONCE_DATA 0x044
65#define SDMA_ONCE_INSTR 0x048
66#define SDMA_ONCE_STAT 0x04c
67#define SDMA_ONCE_CMD 0x050
68#define SDMA_EVT_MIRROR 0x054
69#define SDMA_ILLINSTADDR 0x058
70#define SDMA_CHN0ADDR 0x05c
71#define SDMA_ONCE_RTB 0x060
72#define SDMA_XTRIG_CONF1 0x070
73#define SDMA_XTRIG_CONF2 0x074
62550cd7
SG
74#define SDMA_CHNENBL0_IMX35 0x200
75#define SDMA_CHNENBL0_IMX31 0x080
1ec1e82f
SH
76#define SDMA_CHNPRI_0 0x100
77
78/*
79 * Buffer descriptor status values.
80 */
81#define BD_DONE 0x01
82#define BD_WRAP 0x02
83#define BD_CONT 0x04
84#define BD_INTR 0x08
85#define BD_RROR 0x10
86#define BD_LAST 0x20
87#define BD_EXTD 0x80
88
89/*
90 * Data Node descriptor status values.
91 */
92#define DND_END_OF_FRAME 0x80
93#define DND_END_OF_XFER 0x40
94#define DND_DONE 0x20
95#define DND_UNUSED 0x01
96
97/*
98 * IPCV2 descriptor status values.
99 */
100#define BD_IPCV2_END_OF_FRAME 0x40
101
102#define IPCV2_MAX_NODES 50
103/*
104 * Error bit set in the CCB status field by the SDMA,
105 * in setbd routine, in case of a transfer error
106 */
107#define DATA_ERROR 0x10000000
108
109/*
110 * Buffer descriptor commands.
111 */
112#define C0_ADDR 0x01
113#define C0_LOAD 0x02
114#define C0_DUMP 0x03
115#define C0_SETCTX 0x07
116#define C0_GETCTX 0x03
117#define C0_SETDM 0x01
118#define C0_SETPM 0x04
119#define C0_GETDM 0x02
120#define C0_GETPM 0x08
121/*
122 * Change endianness indicator in the BD command field
123 */
124#define CHANGE_ENDIANNESS 0x80
125
8391ecf4
SW
126/*
127 * p_2_p watermark_level description
128 * Bits Name Description
129 * 0-7 Lower WML Lower watermark level
130 * 8 PS 1: Pad Swallowing
131 * 0: No Pad Swallowing
132 * 9 PA 1: Pad Adding
133 * 0: No Pad Adding
134 * 10 SPDIF If this bit is set both source
135 * and destination are on SPBA
136 * 11 Source Bit(SP) 1: Source on SPBA
137 * 0: Source on AIPS
138 * 12 Destination Bit(DP) 1: Destination on SPBA
139 * 0: Destination on AIPS
140 * 13-15 --------- MUST BE 0
141 * 16-23 Higher WML HWML
142 * 24-27 N Total number of samples after
143 * which Pad adding/Swallowing
144 * must be done. It must be odd.
145 * 28 Lower WML Event(LWE) SDMA events reg to check for
146 * LWML event mask
147 * 0: LWE in EVENTS register
148 * 1: LWE in EVENTS2 register
149 * 29 Higher WML Event(HWE) SDMA events reg to check for
150 * HWML event mask
151 * 0: HWE in EVENTS register
152 * 1: HWE in EVENTS2 register
153 * 30 --------- MUST BE 0
154 * 31 CONT 1: Amount of samples to be
155 * transferred is unknown and
156 * script will keep on
157 * transferring samples as long as
158 * both events are detected and
159 * script must be manually stopped
160 * by the application
161 * 0: The amount of samples to be
162 * transferred is equal to the
163 * count field of mode word
164 */
165#define SDMA_WATERMARK_LEVEL_LWML 0xFF
166#define SDMA_WATERMARK_LEVEL_PS BIT(8)
167#define SDMA_WATERMARK_LEVEL_PA BIT(9)
168#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
169#define SDMA_WATERMARK_LEVEL_SP BIT(11)
170#define SDMA_WATERMARK_LEVEL_DP BIT(12)
171#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
172#define SDMA_WATERMARK_LEVEL_LWE BIT(28)
173#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
174#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
175
f9d4a398
NC
176#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
177 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
178 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
179
180#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
181 BIT(DMA_MEM_TO_DEV) | \
182 BIT(DMA_DEV_TO_DEV))
183
1ec1e82f
SH
184/*
185 * Mode/Count of data node descriptors - IPCv2
186 */
187struct sdma_mode_count {
4a6b2e8a 188#define SDMA_BD_MAX_CNT 0xffff
1ec1e82f
SH
189 u32 count : 16; /* size of the buffer pointed by this BD */
190 u32 status : 8; /* E,R,I,C,W,D status bits stored here */
e4b75760 191 u32 command : 8; /* command mostly used for channel 0 */
1ec1e82f
SH
192};
193
194/*
195 * Buffer descriptor
196 */
197struct sdma_buffer_descriptor {
198 struct sdma_mode_count mode;
199 u32 buffer_addr; /* address of the buffer described */
200 u32 ext_buffer_addr; /* extended buffer address */
201} __attribute__ ((packed));
202
203/**
204 * struct sdma_channel_control - Channel control Block
205 *
24ca312d
RG
206 * @current_bd_ptr: current buffer descriptor processed
207 * @base_bd_ptr: first element of buffer descriptor array
208 * @unused: padding. The SDMA engine expects an array of 128 byte
1ec1e82f
SH
209 * control blocks
210 */
211struct sdma_channel_control {
212 u32 current_bd_ptr;
213 u32 base_bd_ptr;
214 u32 unused[2];
215} __attribute__ ((packed));
216
217/**
218 * struct sdma_state_registers - SDMA context for a channel
219 *
220 * @pc: program counter
24ca312d 221 * @unused1: unused
1ec1e82f
SH
222 * @t: test bit: status of arithmetic & test instruction
223 * @rpc: return program counter
24ca312d 224 * @unused0: unused
1ec1e82f
SH
225 * @sf: source fault while loading data
226 * @spc: loop start program counter
24ca312d 227 * @unused2: unused
1ec1e82f
SH
228 * @df: destination fault while storing data
229 * @epc: loop end program counter
230 * @lm: loop mode
231 */
232struct sdma_state_registers {
233 u32 pc :14;
234 u32 unused1: 1;
235 u32 t : 1;
236 u32 rpc :14;
237 u32 unused0: 1;
238 u32 sf : 1;
239 u32 spc :14;
240 u32 unused2: 1;
241 u32 df : 1;
242 u32 epc :14;
243 u32 lm : 2;
244} __attribute__ ((packed));
245
246/**
247 * struct sdma_context_data - sdma context specific to a channel
248 *
249 * @channel_state: channel state bits
250 * @gReg: general registers
251 * @mda: burst dma destination address register
252 * @msa: burst dma source address register
253 * @ms: burst dma status register
254 * @md: burst dma data register
255 * @pda: peripheral dma destination address register
256 * @psa: peripheral dma source address register
257 * @ps: peripheral dma status register
258 * @pd: peripheral dma data register
259 * @ca: CRC polynomial register
260 * @cs: CRC accumulator register
261 * @dda: dedicated core destination address register
262 * @dsa: dedicated core source address register
263 * @ds: dedicated core status register
264 * @dd: dedicated core data register
24ca312d
RG
265 * @scratch0: 1st word of dedicated ram for context switch
266 * @scratch1: 2nd word of dedicated ram for context switch
267 * @scratch2: 3rd word of dedicated ram for context switch
268 * @scratch3: 4th word of dedicated ram for context switch
269 * @scratch4: 5th word of dedicated ram for context switch
270 * @scratch5: 6th word of dedicated ram for context switch
271 * @scratch6: 7th word of dedicated ram for context switch
272 * @scratch7: 8th word of dedicated ram for context switch
1ec1e82f
SH
273 */
274struct sdma_context_data {
275 struct sdma_state_registers channel_state;
276 u32 gReg[8];
277 u32 mda;
278 u32 msa;
279 u32 ms;
280 u32 md;
281 u32 pda;
282 u32 psa;
283 u32 ps;
284 u32 pd;
285 u32 ca;
286 u32 cs;
287 u32 dda;
288 u32 dsa;
289 u32 ds;
290 u32 dd;
291 u32 scratch0;
292 u32 scratch1;
293 u32 scratch2;
294 u32 scratch3;
295 u32 scratch4;
296 u32 scratch5;
297 u32 scratch6;
298 u32 scratch7;
299} __attribute__ ((packed));
300
1ec1e82f
SH
301
302struct sdma_engine;
303
76c33d27
SH
304/**
305 * struct sdma_desc - descriptor structor for one transfer
24ca312d
RG
306 * @vd: descriptor for virt dma
307 * @num_bd: number of descriptors currently handling
308 * @bd_phys: physical address of bd
309 * @buf_tail: ID of the buffer that was processed
310 * @buf_ptail: ID of the previous buffer that was processed
311 * @period_len: period length, used in cyclic.
312 * @chn_real_count: the real count updated from bd->mode.count
313 * @chn_count: the transfer count set
314 * @sdmac: sdma_channel pointer
315 * @bd: pointer of allocate bd
76c33d27
SH
316 */
317struct sdma_desc {
57b772b8 318 struct virt_dma_desc vd;
76c33d27
SH
319 unsigned int num_bd;
320 dma_addr_t bd_phys;
321 unsigned int buf_tail;
322 unsigned int buf_ptail;
323 unsigned int period_len;
324 unsigned int chn_real_count;
325 unsigned int chn_count;
326 struct sdma_channel *sdmac;
327 struct sdma_buffer_descriptor *bd;
328};
329
1ec1e82f
SH
330/**
331 * struct sdma_channel - housekeeping for a SDMA channel
332 *
24ca312d
RG
333 * @vc: virt_dma base structure
334 * @desc: sdma description including vd and other special member
335 * @sdma: pointer to the SDMA engine for this channel
336 * @channel: the channel number, matches dmaengine chan_id + 1
337 * @direction: transfer type. Needed for setting SDMA script
338 * @peripheral_type: Peripheral type. Needed for setting SDMA script
339 * @event_id0: aka dma request line
340 * @event_id1: for channels that use 2 events
341 * @word_size: peripheral access size
342 * @pc_from_device: script address for those device_2_memory
343 * @pc_to_device: script address for those memory_2_device
344 * @device_to_device: script address for those device_2_device
0f06c027 345 * @pc_to_pc: script address for those memory_2_memory
24ca312d
RG
346 * @flags: loop mode or not
347 * @per_address: peripheral source or destination address in common case
348 * destination address in p_2_p case
349 * @per_address2: peripheral source address in p_2_p case
350 * @event_mask: event mask used in p_2_p script
351 * @watermark_level: value for gReg[7], some script will extend it from
352 * basic watermark such as p_2_p
353 * @shp_addr: value for gReg[6]
354 * @per_addr: value for gReg[2]
355 * @status: status of dma channel
356 * @data: specific sdma interface structure
357 * @bd_pool: dma_pool for bd
1ec1e82f
SH
358 */
359struct sdma_channel {
57b772b8 360 struct virt_dma_chan vc;
76c33d27 361 struct sdma_desc *desc;
1ec1e82f
SH
362 struct sdma_engine *sdma;
363 unsigned int channel;
db8196df 364 enum dma_transfer_direction direction;
1ec1e82f
SH
365 enum sdma_peripheral_type peripheral_type;
366 unsigned int event_id0;
367 unsigned int event_id1;
368 enum dma_slave_buswidth word_size;
1ec1e82f 369 unsigned int pc_from_device, pc_to_device;
8391ecf4 370 unsigned int device_to_device;
0f06c027 371 unsigned int pc_to_pc;
1ec1e82f 372 unsigned long flags;
8391ecf4 373 dma_addr_t per_address, per_address2;
0bbc1413
RZ
374 unsigned long event_mask[2];
375 unsigned long watermark_level;
1ec1e82f 376 u32 shp_addr, per_addr;
1ec1e82f 377 enum dma_status status;
0b351865 378 struct imx_dma_data data;
b8603d2a 379 struct work_struct terminate_worker;
1ec1e82f
SH
380};
381
0bbc1413 382#define IMX_DMA_SG_LOOP BIT(0)
1ec1e82f
SH
383
384#define MAX_DMA_CHANNELS 32
385#define MXC_SDMA_DEFAULT_PRIORITY 1
386#define MXC_SDMA_MIN_PRIORITY 1
387#define MXC_SDMA_MAX_PRIORITY 7
388
1ec1e82f
SH
389#define SDMA_FIRMWARE_MAGIC 0x414d4453
390
391/**
392 * struct sdma_firmware_header - Layout of the firmware image
393 *
24ca312d
RG
394 * @magic: "SDMA"
395 * @version_major: increased whenever layout of struct
396 * sdma_script_start_addrs changes.
397 * @version_minor: firmware minor version (for binary compatible changes)
398 * @script_addrs_start: offset of struct sdma_script_start_addrs in this image
399 * @num_script_addrs: Number of script addresses in this image
400 * @ram_code_start: offset of SDMA ram image in this firmware image
401 * @ram_code_size: size of SDMA ram image
402 * @script_addrs: Stores the start address of the SDMA scripts
1ec1e82f
SH
403 * (in SDMA memory space)
404 */
405struct sdma_firmware_header {
406 u32 magic;
407 u32 version_major;
408 u32 version_minor;
409 u32 script_addrs_start;
410 u32 num_script_addrs;
411 u32 ram_code_start;
412 u32 ram_code_size;
413};
414
17bba72f
SH
415struct sdma_driver_data {
416 int chnenbl0;
417 int num_events;
dcfec3c0 418 struct sdma_script_start_addrs *script_addrs;
62550cd7
SG
419};
420
1ec1e82f
SH
421struct sdma_engine {
422 struct device *dev;
b9b3f82f 423 struct device_dma_parameters dma_parms;
1ec1e82f
SH
424 struct sdma_channel channel[MAX_DMA_CHANNELS];
425 struct sdma_channel_control *channel_control;
426 void __iomem *regs;
1ec1e82f
SH
427 struct sdma_context_data *context;
428 dma_addr_t context_phys;
429 struct dma_device dma_device;
7560e3f3
SH
430 struct clk *clk_ipg;
431 struct clk *clk_ahb;
2ccaef05 432 spinlock_t channel_0_lock;
cd72b846 433 u32 script_number;
1ec1e82f 434 struct sdma_script_start_addrs *script_addrs;
17bba72f 435 const struct sdma_driver_data *drvdata;
8391ecf4
SW
436 u32 spba_start_addr;
437 u32 spba_end_addr;
5bb9dbb5 438 unsigned int irq;
76c33d27
SH
439 dma_addr_t bd0_phys;
440 struct sdma_buffer_descriptor *bd0;
17bba72f
SH
441};
442
e9fd58de 443static struct sdma_driver_data sdma_imx31 = {
17bba72f
SH
444 .chnenbl0 = SDMA_CHNENBL0_IMX31,
445 .num_events = 32,
446};
447
dcfec3c0
SH
448static struct sdma_script_start_addrs sdma_script_imx25 = {
449 .ap_2_ap_addr = 729,
450 .uart_2_mcu_addr = 904,
451 .per_2_app_addr = 1255,
452 .mcu_2_app_addr = 834,
453 .uartsh_2_mcu_addr = 1120,
454 .per_2_shp_addr = 1329,
455 .mcu_2_shp_addr = 1048,
456 .ata_2_mcu_addr = 1560,
457 .mcu_2_ata_addr = 1479,
458 .app_2_per_addr = 1189,
459 .app_2_mcu_addr = 770,
460 .shp_2_per_addr = 1407,
461 .shp_2_mcu_addr = 979,
462};
463
e9fd58de 464static struct sdma_driver_data sdma_imx25 = {
dcfec3c0
SH
465 .chnenbl0 = SDMA_CHNENBL0_IMX35,
466 .num_events = 48,
467 .script_addrs = &sdma_script_imx25,
468};
469
e9fd58de 470static struct sdma_driver_data sdma_imx35 = {
17bba72f
SH
471 .chnenbl0 = SDMA_CHNENBL0_IMX35,
472 .num_events = 48,
1ec1e82f
SH
473};
474
dcfec3c0
SH
475static struct sdma_script_start_addrs sdma_script_imx51 = {
476 .ap_2_ap_addr = 642,
477 .uart_2_mcu_addr = 817,
478 .mcu_2_app_addr = 747,
479 .mcu_2_shp_addr = 961,
480 .ata_2_mcu_addr = 1473,
481 .mcu_2_ata_addr = 1392,
482 .app_2_per_addr = 1033,
483 .app_2_mcu_addr = 683,
484 .shp_2_per_addr = 1251,
485 .shp_2_mcu_addr = 892,
486};
487
e9fd58de 488static struct sdma_driver_data sdma_imx51 = {
dcfec3c0
SH
489 .chnenbl0 = SDMA_CHNENBL0_IMX35,
490 .num_events = 48,
491 .script_addrs = &sdma_script_imx51,
492};
493
494static struct sdma_script_start_addrs sdma_script_imx53 = {
495 .ap_2_ap_addr = 642,
496 .app_2_mcu_addr = 683,
497 .mcu_2_app_addr = 747,
498 .uart_2_mcu_addr = 817,
499 .shp_2_mcu_addr = 891,
500 .mcu_2_shp_addr = 960,
501 .uartsh_2_mcu_addr = 1032,
502 .spdif_2_mcu_addr = 1100,
503 .mcu_2_spdif_addr = 1134,
504 .firi_2_mcu_addr = 1193,
505 .mcu_2_firi_addr = 1290,
506};
507
e9fd58de 508static struct sdma_driver_data sdma_imx53 = {
dcfec3c0
SH
509 .chnenbl0 = SDMA_CHNENBL0_IMX35,
510 .num_events = 48,
511 .script_addrs = &sdma_script_imx53,
512};
513
514static struct sdma_script_start_addrs sdma_script_imx6q = {
515 .ap_2_ap_addr = 642,
516 .uart_2_mcu_addr = 817,
517 .mcu_2_app_addr = 747,
518 .per_2_per_addr = 6331,
519 .uartsh_2_mcu_addr = 1032,
520 .mcu_2_shp_addr = 960,
521 .app_2_mcu_addr = 683,
522 .shp_2_mcu_addr = 891,
523 .spdif_2_mcu_addr = 1100,
524 .mcu_2_spdif_addr = 1134,
525};
526
e9fd58de 527static struct sdma_driver_data sdma_imx6q = {
dcfec3c0
SH
528 .chnenbl0 = SDMA_CHNENBL0_IMX35,
529 .num_events = 48,
530 .script_addrs = &sdma_script_imx6q,
531};
532
b7d2648a
FE
533static struct sdma_script_start_addrs sdma_script_imx7d = {
534 .ap_2_ap_addr = 644,
535 .uart_2_mcu_addr = 819,
536 .mcu_2_app_addr = 749,
537 .uartsh_2_mcu_addr = 1034,
538 .mcu_2_shp_addr = 962,
539 .app_2_mcu_addr = 685,
540 .shp_2_mcu_addr = 893,
541 .spdif_2_mcu_addr = 1102,
542 .mcu_2_spdif_addr = 1136,
543};
544
545static struct sdma_driver_data sdma_imx7d = {
546 .chnenbl0 = SDMA_CHNENBL0_IMX35,
547 .num_events = 48,
548 .script_addrs = &sdma_script_imx7d,
549};
550
afe7cded 551static const struct platform_device_id sdma_devtypes[] = {
62550cd7 552 {
dcfec3c0
SH
553 .name = "imx25-sdma",
554 .driver_data = (unsigned long)&sdma_imx25,
555 }, {
62550cd7 556 .name = "imx31-sdma",
17bba72f 557 .driver_data = (unsigned long)&sdma_imx31,
62550cd7
SG
558 }, {
559 .name = "imx35-sdma",
17bba72f 560 .driver_data = (unsigned long)&sdma_imx35,
dcfec3c0
SH
561 }, {
562 .name = "imx51-sdma",
563 .driver_data = (unsigned long)&sdma_imx51,
564 }, {
565 .name = "imx53-sdma",
566 .driver_data = (unsigned long)&sdma_imx53,
567 }, {
568 .name = "imx6q-sdma",
569 .driver_data = (unsigned long)&sdma_imx6q,
b7d2648a
FE
570 }, {
571 .name = "imx7d-sdma",
572 .driver_data = (unsigned long)&sdma_imx7d,
62550cd7
SG
573 }, {
574 /* sentinel */
575 }
576};
577MODULE_DEVICE_TABLE(platform, sdma_devtypes);
578
580975d7 579static const struct of_device_id sdma_dt_ids[] = {
dcfec3c0
SH
580 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
581 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
582 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
17bba72f 583 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
dcfec3c0 584 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
63edea16 585 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
b7d2648a 586 { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
580975d7
SG
587 { /* sentinel */ }
588};
589MODULE_DEVICE_TABLE(of, sdma_dt_ids);
590
0bbc1413
RZ
591#define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */
592#define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */
593#define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */
1ec1e82f
SH
594#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
595
596static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
597{
17bba72f 598 u32 chnenbl0 = sdma->drvdata->chnenbl0;
1ec1e82f
SH
599 return chnenbl0 + event * 4;
600}
601
602static int sdma_config_ownership(struct sdma_channel *sdmac,
603 bool event_override, bool mcu_override, bool dsp_override)
604{
605 struct sdma_engine *sdma = sdmac->sdma;
606 int channel = sdmac->channel;
0bbc1413 607 unsigned long evt, mcu, dsp;
1ec1e82f
SH
608
609 if (event_override && mcu_override && dsp_override)
610 return -EINVAL;
611
c4b56857
RZ
612 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
613 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
614 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
1ec1e82f
SH
615
616 if (dsp_override)
0bbc1413 617 __clear_bit(channel, &dsp);
1ec1e82f 618 else
0bbc1413 619 __set_bit(channel, &dsp);
1ec1e82f
SH
620
621 if (event_override)
0bbc1413 622 __clear_bit(channel, &evt);
1ec1e82f 623 else
0bbc1413 624 __set_bit(channel, &evt);
1ec1e82f
SH
625
626 if (mcu_override)
0bbc1413 627 __clear_bit(channel, &mcu);
1ec1e82f 628 else
0bbc1413 629 __set_bit(channel, &mcu);
1ec1e82f 630
c4b56857
RZ
631 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
632 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
633 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
1ec1e82f
SH
634
635 return 0;
636}
637
b9a59166
RZ
638static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
639{
0bbc1413 640 writel(BIT(channel), sdma->regs + SDMA_H_START);
b9a59166
RZ
641}
642
1ec1e82f 643/*
2ccaef05 644 * sdma_run_channel0 - run a channel and wait till it's done
1ec1e82f 645 */
2ccaef05 646static int sdma_run_channel0(struct sdma_engine *sdma)
1ec1e82f 647{
1ec1e82f 648 int ret;
1d069bfa 649 u32 reg;
1ec1e82f 650
2ccaef05 651 sdma_enable_channel(sdma, 0);
1ec1e82f 652
1d069bfa
MO
653 ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
654 reg, !(reg & 1), 1, 500);
655 if (ret)
2ccaef05 656 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
1ec1e82f 657
855832e4
RG
658 /* Set bits of CONFIG register with dynamic context switching */
659 if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
660 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
661
1d069bfa 662 return ret;
1ec1e82f
SH
663}
664
665static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
666 u32 address)
667{
76c33d27 668 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
1ec1e82f
SH
669 void *buf_virt;
670 dma_addr_t buf_phys;
671 int ret;
2ccaef05 672 unsigned long flags;
73eab978 673
1ec1e82f
SH
674 buf_virt = dma_alloc_coherent(NULL,
675 size,
676 &buf_phys, GFP_KERNEL);
73eab978 677 if (!buf_virt) {
2ccaef05 678 return -ENOMEM;
73eab978 679 }
1ec1e82f 680
2ccaef05
RZ
681 spin_lock_irqsave(&sdma->channel_0_lock, flags);
682
1ec1e82f
SH
683 bd0->mode.command = C0_SETPM;
684 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
685 bd0->mode.count = size / 2;
686 bd0->buffer_addr = buf_phys;
687 bd0->ext_buffer_addr = address;
688
689 memcpy(buf_virt, buf, size);
690
2ccaef05 691 ret = sdma_run_channel0(sdma);
1ec1e82f 692
2ccaef05 693 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
1ec1e82f 694
2ccaef05 695 dma_free_coherent(NULL, size, buf_virt, buf_phys);
73eab978 696
1ec1e82f
SH
697 return ret;
698}
699
700static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
701{
702 struct sdma_engine *sdma = sdmac->sdma;
703 int channel = sdmac->channel;
0bbc1413 704 unsigned long val;
1ec1e82f
SH
705 u32 chnenbl = chnenbl_ofs(sdma, event);
706
c4b56857 707 val = readl_relaxed(sdma->regs + chnenbl);
0bbc1413 708 __set_bit(channel, &val);
c4b56857 709 writel_relaxed(val, sdma->regs + chnenbl);
1ec1e82f
SH
710}
711
712static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
713{
714 struct sdma_engine *sdma = sdmac->sdma;
715 int channel = sdmac->channel;
716 u32 chnenbl = chnenbl_ofs(sdma, event);
0bbc1413 717 unsigned long val;
1ec1e82f 718
c4b56857 719 val = readl_relaxed(sdma->regs + chnenbl);
0bbc1413 720 __clear_bit(channel, &val);
c4b56857 721 writel_relaxed(val, sdma->regs + chnenbl);
1ec1e82f
SH
722}
723
57b772b8
RG
724static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
725{
726 return container_of(t, struct sdma_desc, vd.tx);
727}
728
729static void sdma_start_desc(struct sdma_channel *sdmac)
730{
731 struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
732 struct sdma_desc *desc;
733 struct sdma_engine *sdma = sdmac->sdma;
734 int channel = sdmac->channel;
735
736 if (!vd) {
737 sdmac->desc = NULL;
738 return;
739 }
740 sdmac->desc = desc = to_sdma_desc(&vd->tx);
741 /*
742 * Do not delete the node in desc_issued list in cyclic mode, otherwise
680302c4 743 * the desc allocated will never be freed in vchan_dma_desc_free_list
57b772b8
RG
744 */
745 if (!(sdmac->flags & IMX_DMA_SG_LOOP))
746 list_del(&vd->node);
747
748 sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
749 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
750 sdma_enable_channel(sdma, sdmac->channel);
751}
752
d1a792f3 753static void sdma_update_channel_loop(struct sdma_channel *sdmac)
1ec1e82f
SH
754{
755 struct sdma_buffer_descriptor *bd;
5881826d
NH
756 int error = 0;
757 enum dma_status old_status = sdmac->status;
1ec1e82f
SH
758
759 /*
760 * loop mode. Iterate over descriptors, re-setup them and
761 * call callback function.
762 */
57b772b8 763 while (sdmac->desc) {
76c33d27
SH
764 struct sdma_desc *desc = sdmac->desc;
765
766 bd = &desc->bd[desc->buf_tail];
1ec1e82f
SH
767
768 if (bd->mode.status & BD_DONE)
769 break;
770
5881826d
NH
771 if (bd->mode.status & BD_RROR) {
772 bd->mode.status &= ~BD_RROR;
1ec1e82f 773 sdmac->status = DMA_ERROR;
5881826d
NH
774 error = -EIO;
775 }
1ec1e82f 776
5881826d
NH
777 /*
778 * We use bd->mode.count to calculate the residue, since contains
779 * the number of bytes present in the current buffer descriptor.
780 */
781
76c33d27 782 desc->chn_real_count = bd->mode.count;
1ec1e82f 783 bd->mode.status |= BD_DONE;
76c33d27
SH
784 bd->mode.count = desc->period_len;
785 desc->buf_ptail = desc->buf_tail;
786 desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
15f30f51
NH
787
788 /*
789 * The callback is called from the interrupt context in order
790 * to reduce latency and to avoid the risk of altering the
791 * SDMA transaction status by the time the client tasklet is
792 * executed.
793 */
57b772b8
RG
794 spin_unlock(&sdmac->vc.lock);
795 dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
796 spin_lock(&sdmac->vc.lock);
15f30f51 797
5881826d
NH
798 if (error)
799 sdmac->status = old_status;
1ec1e82f
SH
800 }
801}
802
57b772b8 803static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
1ec1e82f 804{
15f30f51 805 struct sdma_channel *sdmac = (struct sdma_channel *) data;
1ec1e82f
SH
806 struct sdma_buffer_descriptor *bd;
807 int i, error = 0;
808
76c33d27 809 sdmac->desc->chn_real_count = 0;
1ec1e82f
SH
810 /*
811 * non loop mode. Iterate over all descriptors, collect
812 * errors and call callback function
813 */
76c33d27
SH
814 for (i = 0; i < sdmac->desc->num_bd; i++) {
815 bd = &sdmac->desc->bd[i];
1ec1e82f
SH
816
817 if (bd->mode.status & (BD_DONE | BD_RROR))
818 error = -EIO;
76c33d27 819 sdmac->desc->chn_real_count += bd->mode.count;
1ec1e82f
SH
820 }
821
822 if (error)
823 sdmac->status = DMA_ERROR;
824 else
409bff6a 825 sdmac->status = DMA_COMPLETE;
1ec1e82f
SH
826}
827
1ec1e82f
SH
828static irqreturn_t sdma_int_handler(int irq, void *dev_id)
829{
830 struct sdma_engine *sdma = dev_id;
0bbc1413 831 unsigned long stat;
1ec1e82f 832
c4b56857
RZ
833 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
834 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
1d069bfa
MO
835 /* channel 0 is special and not handled here, see run_channel0() */
836 stat &= ~1;
1ec1e82f
SH
837
838 while (stat) {
839 int channel = fls(stat) - 1;
840 struct sdma_channel *sdmac = &sdma->channel[channel];
57b772b8
RG
841 struct sdma_desc *desc;
842
843 spin_lock(&sdmac->vc.lock);
844 desc = sdmac->desc;
845 if (desc) {
846 if (sdmac->flags & IMX_DMA_SG_LOOP) {
847 sdma_update_channel_loop(sdmac);
848 } else {
849 mxc_sdma_handle_channel_normal(sdmac);
850 vchan_cookie_complete(&desc->vd);
851 sdma_start_desc(sdmac);
852 }
853 }
1ec1e82f 854
57b772b8 855 spin_unlock(&sdmac->vc.lock);
0bbc1413 856 __clear_bit(channel, &stat);
1ec1e82f
SH
857 }
858
859 return IRQ_HANDLED;
860}
861
862/*
863 * sets the pc of SDMA script according to the peripheral type
864 */
865static void sdma_get_pc(struct sdma_channel *sdmac,
866 enum sdma_peripheral_type peripheral_type)
867{
868 struct sdma_engine *sdma = sdmac->sdma;
869 int per_2_emi = 0, emi_2_per = 0;
870 /*
871 * These are needed once we start to support transfers between
872 * two peripherals or memory-to-memory transfers
873 */
0f06c027 874 int per_2_per = 0, emi_2_emi = 0;
1ec1e82f
SH
875
876 sdmac->pc_from_device = 0;
877 sdmac->pc_to_device = 0;
8391ecf4 878 sdmac->device_to_device = 0;
0f06c027 879 sdmac->pc_to_pc = 0;
1ec1e82f
SH
880
881 switch (peripheral_type) {
882 case IMX_DMATYPE_MEMORY:
0f06c027 883 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
1ec1e82f
SH
884 break;
885 case IMX_DMATYPE_DSP:
886 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
887 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
888 break;
889 case IMX_DMATYPE_FIRI:
890 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
891 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
892 break;
893 case IMX_DMATYPE_UART:
894 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
895 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
896 break;
897 case IMX_DMATYPE_UART_SP:
898 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
899 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
900 break;
901 case IMX_DMATYPE_ATA:
902 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
903 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
904 break;
905 case IMX_DMATYPE_CSPI:
906 case IMX_DMATYPE_EXT:
907 case IMX_DMATYPE_SSI:
29aebfde 908 case IMX_DMATYPE_SAI:
1ec1e82f
SH
909 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
910 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
911 break;
1a895578
NC
912 case IMX_DMATYPE_SSI_DUAL:
913 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
914 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
915 break;
1ec1e82f
SH
916 case IMX_DMATYPE_SSI_SP:
917 case IMX_DMATYPE_MMC:
918 case IMX_DMATYPE_SDHC:
919 case IMX_DMATYPE_CSPI_SP:
920 case IMX_DMATYPE_ESAI:
921 case IMX_DMATYPE_MSHC_SP:
922 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
923 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
924 break;
925 case IMX_DMATYPE_ASRC:
926 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
927 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
928 per_2_per = sdma->script_addrs->per_2_per_addr;
929 break;
f892afb0
NC
930 case IMX_DMATYPE_ASRC_SP:
931 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
932 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
933 per_2_per = sdma->script_addrs->per_2_per_addr;
934 break;
1ec1e82f
SH
935 case IMX_DMATYPE_MSHC:
936 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
937 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
938 break;
939 case IMX_DMATYPE_CCM:
940 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
941 break;
942 case IMX_DMATYPE_SPDIF:
943 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
944 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
945 break;
946 case IMX_DMATYPE_IPU_MEMORY:
947 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
948 break;
949 default:
950 break;
951 }
952
953 sdmac->pc_from_device = per_2_emi;
954 sdmac->pc_to_device = emi_2_per;
8391ecf4 955 sdmac->device_to_device = per_2_per;
0f06c027 956 sdmac->pc_to_pc = emi_2_emi;
1ec1e82f
SH
957}
958
959static int sdma_load_context(struct sdma_channel *sdmac)
960{
961 struct sdma_engine *sdma = sdmac->sdma;
962 int channel = sdmac->channel;
963 int load_address;
964 struct sdma_context_data *context = sdma->context;
76c33d27 965 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
1ec1e82f 966 int ret;
2ccaef05 967 unsigned long flags;
1ec1e82f 968
8391ecf4 969 if (sdmac->direction == DMA_DEV_TO_MEM)
1ec1e82f 970 load_address = sdmac->pc_from_device;
8391ecf4
SW
971 else if (sdmac->direction == DMA_DEV_TO_DEV)
972 load_address = sdmac->device_to_device;
0f06c027
RG
973 else if (sdmac->direction == DMA_MEM_TO_MEM)
974 load_address = sdmac->pc_to_pc;
8391ecf4 975 else
1ec1e82f 976 load_address = sdmac->pc_to_device;
1ec1e82f
SH
977
978 if (load_address < 0)
979 return load_address;
980
981 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
0bbc1413 982 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
1ec1e82f
SH
983 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
984 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
0bbc1413
RZ
985 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
986 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
1ec1e82f 987
2ccaef05 988 spin_lock_irqsave(&sdma->channel_0_lock, flags);
73eab978 989
1ec1e82f
SH
990 memset(context, 0, sizeof(*context));
991 context->channel_state.pc = load_address;
992
993 /* Send by context the event mask,base address for peripheral
994 * and watermark level
995 */
0bbc1413
RZ
996 context->gReg[0] = sdmac->event_mask[1];
997 context->gReg[1] = sdmac->event_mask[0];
1ec1e82f
SH
998 context->gReg[2] = sdmac->per_addr;
999 context->gReg[6] = sdmac->shp_addr;
1000 context->gReg[7] = sdmac->watermark_level;
1001
1002 bd0->mode.command = C0_SETDM;
1003 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
1004 bd0->mode.count = sizeof(*context) / 4;
1005 bd0->buffer_addr = sdma->context_phys;
1006 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
2ccaef05 1007 ret = sdma_run_channel0(sdma);
1ec1e82f 1008
2ccaef05 1009 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
73eab978 1010
1ec1e82f
SH
1011 return ret;
1012}
1013
7b350ab0
MR
1014static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
1015{
57b772b8 1016 return container_of(chan, struct sdma_channel, vc.chan);
7b350ab0
MR
1017}
1018
1019static int sdma_disable_channel(struct dma_chan *chan)
1ec1e82f 1020{
7b350ab0 1021 struct sdma_channel *sdmac = to_sdma_chan(chan);
1ec1e82f
SH
1022 struct sdma_engine *sdma = sdmac->sdma;
1023 int channel = sdmac->channel;
1024
0bbc1413 1025 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
1ec1e82f 1026 sdmac->status = DMA_ERROR;
7b350ab0
MR
1027
1028 return 0;
1ec1e82f 1029}
b8603d2a 1030static void sdma_channel_terminate_work(struct work_struct *work)
7f3ff14b 1031{
b8603d2a
LS
1032 struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
1033 terminate_worker);
57b772b8
RG
1034 unsigned long flags;
1035 LIST_HEAD(head);
1036
7f3ff14b
JW
1037 /*
1038 * According to NXP R&D team a delay of one BD SDMA cost time
1039 * (maximum is 1ms) should be added after disable of the channel
1040 * bit, to ensure SDMA core has really been stopped after SDMA
1041 * clients call .device_terminate_all.
1042 */
b8603d2a
LS
1043 usleep_range(1000, 2000);
1044
1045 spin_lock_irqsave(&sdmac->vc.lock, flags);
1046 vchan_get_all_descriptors(&sdmac->vc, &head);
1047 sdmac->desc = NULL;
1048 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1049 vchan_dma_desc_free_list(&sdmac->vc, &head);
1050}
1051
1052static int sdma_disable_channel_async(struct dma_chan *chan)
1053{
1054 struct sdma_channel *sdmac = to_sdma_chan(chan);
1055
1056 sdma_disable_channel(chan);
1057
1058 if (sdmac->desc)
1059 schedule_work(&sdmac->terminate_worker);
7f3ff14b
JW
1060
1061 return 0;
1062}
1063
b8603d2a
LS
1064static void sdma_channel_synchronize(struct dma_chan *chan)
1065{
1066 struct sdma_channel *sdmac = to_sdma_chan(chan);
1067
1068 vchan_synchronize(&sdmac->vc);
1069
1070 flush_work(&sdmac->terminate_worker);
1071}
1072
8391ecf4
SW
1073static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1074{
1075 struct sdma_engine *sdma = sdmac->sdma;
1076
1077 int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
1078 int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
1079
1080 set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
1081 set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
1082
1083 if (sdmac->event_id0 > 31)
1084 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
1085
1086 if (sdmac->event_id1 > 31)
1087 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
1088
1089 /*
1090 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
1091 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
1092 * r0(event_mask[1]) and r1(event_mask[0]).
1093 */
1094 if (lwml > hwml) {
1095 sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
1096 SDMA_WATERMARK_LEVEL_HWML);
1097 sdmac->watermark_level |= hwml;
1098 sdmac->watermark_level |= lwml << 16;
1099 swap(sdmac->event_mask[0], sdmac->event_mask[1]);
1100 }
1101
1102 if (sdmac->per_address2 >= sdma->spba_start_addr &&
1103 sdmac->per_address2 <= sdma->spba_end_addr)
1104 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
1105
1106 if (sdmac->per_address >= sdma->spba_start_addr &&
1107 sdmac->per_address <= sdma->spba_end_addr)
1108 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
1109
1110 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
1111}
1112
7b350ab0 1113static int sdma_config_channel(struct dma_chan *chan)
1ec1e82f 1114{
7b350ab0 1115 struct sdma_channel *sdmac = to_sdma_chan(chan);
1ec1e82f
SH
1116 int ret;
1117
7b350ab0 1118 sdma_disable_channel(chan);
1ec1e82f 1119
0bbc1413
RZ
1120 sdmac->event_mask[0] = 0;
1121 sdmac->event_mask[1] = 0;
1ec1e82f
SH
1122 sdmac->shp_addr = 0;
1123 sdmac->per_addr = 0;
1124
1125 if (sdmac->event_id0) {
17bba72f 1126 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1ec1e82f
SH
1127 return -EINVAL;
1128 sdma_event_enable(sdmac, sdmac->event_id0);
1129 }
1130
8391ecf4
SW
1131 if (sdmac->event_id1) {
1132 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1133 return -EINVAL;
1134 sdma_event_enable(sdmac, sdmac->event_id1);
1135 }
1136
1ec1e82f
SH
1137 switch (sdmac->peripheral_type) {
1138 case IMX_DMATYPE_DSP:
1139 sdma_config_ownership(sdmac, false, true, true);
1140 break;
1141 case IMX_DMATYPE_MEMORY:
1142 sdma_config_ownership(sdmac, false, true, false);
1143 break;
1144 default:
1145 sdma_config_ownership(sdmac, true, true, false);
1146 break;
1147 }
1148
1149 sdma_get_pc(sdmac, sdmac->peripheral_type);
1150
1151 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1152 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1153 /* Handle multiple event channels differently */
1154 if (sdmac->event_id1) {
8391ecf4
SW
1155 if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1156 sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1157 sdma_set_watermarklevel_for_p2p(sdmac);
1158 } else
0bbc1413 1159 __set_bit(sdmac->event_id0, sdmac->event_mask);
8391ecf4 1160
1ec1e82f
SH
1161 /* Address */
1162 sdmac->shp_addr = sdmac->per_address;
8391ecf4 1163 sdmac->per_addr = sdmac->per_address2;
1ec1e82f
SH
1164 } else {
1165 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
1166 }
1167
1168 ret = sdma_load_context(sdmac);
1169
1170 return ret;
1171}
1172
1173static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1174 unsigned int priority)
1175{
1176 struct sdma_engine *sdma = sdmac->sdma;
1177 int channel = sdmac->channel;
1178
1179 if (priority < MXC_SDMA_MIN_PRIORITY
1180 || priority > MXC_SDMA_MAX_PRIORITY) {
1181 return -EINVAL;
1182 }
1183
c4b56857 1184 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1ec1e82f
SH
1185
1186 return 0;
1187}
1188
57b772b8 1189static int sdma_request_channel0(struct sdma_engine *sdma)
1ec1e82f 1190{
1ec1e82f
SH
1191 int ret = -EBUSY;
1192
57b772b8
RG
1193 sdma->bd0 = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdma->bd0_phys,
1194 GFP_NOWAIT);
1195 if (!sdma->bd0) {
1ec1e82f
SH
1196 ret = -ENOMEM;
1197 goto out;
1198 }
1199
57b772b8
RG
1200 sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
1201 sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
1ec1e82f 1202
57b772b8 1203 sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
1ec1e82f
SH
1204 return 0;
1205out:
1206
1207 return ret;
1208}
1209
57b772b8
RG
1210
1211static int sdma_alloc_bd(struct sdma_desc *desc)
1ec1e82f 1212{
ebb853b1 1213 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
57b772b8 1214 int ret = 0;
1ec1e82f 1215
ebb853b1 1216 desc->bd = dma_zalloc_coherent(NULL, bd_size, &desc->bd_phys,
64068853 1217 GFP_NOWAIT);
57b772b8
RG
1218 if (!desc->bd) {
1219 ret = -ENOMEM;
1220 goto out;
1221 }
1222out:
1223 return ret;
1224}
1ec1e82f 1225
57b772b8
RG
1226static void sdma_free_bd(struct sdma_desc *desc)
1227{
ebb853b1
LS
1228 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1229
1230 dma_free_coherent(NULL, bd_size, desc->bd, desc->bd_phys);
57b772b8 1231}
1ec1e82f 1232
57b772b8
RG
1233static void sdma_desc_free(struct virt_dma_desc *vd)
1234{
1235 struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
1236
1237 sdma_free_bd(desc);
1238 kfree(desc);
1ec1e82f
SH
1239}
1240
1241static int sdma_alloc_chan_resources(struct dma_chan *chan)
1242{
1243 struct sdma_channel *sdmac = to_sdma_chan(chan);
1244 struct imx_dma_data *data = chan->private;
0f06c027 1245 struct imx_dma_data mem_data;
1ec1e82f
SH
1246 int prio, ret;
1247
0f06c027
RG
1248 /*
1249 * MEMCPY may never setup chan->private by filter function such as
1250 * dmatest, thus create 'struct imx_dma_data mem_data' for this case.
1251 * Please note in any other slave case, you have to setup chan->private
1252 * with 'struct imx_dma_data' in your own filter function if you want to
1253 * request dma channel by dma_request_channel() rather than
1254 * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
1255 * to warn you to correct your filter function.
1256 */
1257 if (!data) {
1258 dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
1259 mem_data.priority = 2;
1260 mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
1261 mem_data.dma_request = 0;
1262 mem_data.dma_request2 = 0;
1263 data = &mem_data;
1264
1265 sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
1266 }
1ec1e82f
SH
1267
1268 switch (data->priority) {
1269 case DMA_PRIO_HIGH:
1270 prio = 3;
1271 break;
1272 case DMA_PRIO_MEDIUM:
1273 prio = 2;
1274 break;
1275 case DMA_PRIO_LOW:
1276 default:
1277 prio = 1;
1278 break;
1279 }
1280
1281 sdmac->peripheral_type = data->peripheral_type;
1282 sdmac->event_id0 = data->dma_request;
8391ecf4 1283 sdmac->event_id1 = data->dma_request2;
c2c744d3 1284
b93edcdd
FE
1285 ret = clk_enable(sdmac->sdma->clk_ipg);
1286 if (ret)
1287 return ret;
1288 ret = clk_enable(sdmac->sdma->clk_ahb);
1289 if (ret)
1290 goto disable_clk_ipg;
c2c744d3 1291
3bb5e7ca 1292 ret = sdma_set_channel_priority(sdmac, prio);
1ec1e82f 1293 if (ret)
b93edcdd 1294 goto disable_clk_ahb;
1ec1e82f 1295
1ec1e82f 1296 return 0;
b93edcdd
FE
1297
1298disable_clk_ahb:
1299 clk_disable(sdmac->sdma->clk_ahb);
1300disable_clk_ipg:
1301 clk_disable(sdmac->sdma->clk_ipg);
1302 return ret;
1ec1e82f
SH
1303}
1304
1305static void sdma_free_chan_resources(struct dma_chan *chan)
1306{
1307 struct sdma_channel *sdmac = to_sdma_chan(chan);
1308 struct sdma_engine *sdma = sdmac->sdma;
1309
b8603d2a
LS
1310 sdma_disable_channel_async(chan);
1311
1312 sdma_channel_synchronize(chan);
1ec1e82f
SH
1313
1314 if (sdmac->event_id0)
1315 sdma_event_disable(sdmac, sdmac->event_id0);
1316 if (sdmac->event_id1)
1317 sdma_event_disable(sdmac, sdmac->event_id1);
1318
1319 sdmac->event_id0 = 0;
1320 sdmac->event_id1 = 0;
1321
1322 sdma_set_channel_priority(sdmac, 0);
1323
7560e3f3
SH
1324 clk_disable(sdma->clk_ipg);
1325 clk_disable(sdma->clk_ahb);
1ec1e82f
SH
1326}
1327
21420841
RG
1328static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1329 enum dma_transfer_direction direction, u32 bds)
1330{
1331 struct sdma_desc *desc;
1332
1333 desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
1334 if (!desc)
1335 goto err_out;
1336
1337 sdmac->status = DMA_IN_PROGRESS;
1338 sdmac->direction = direction;
1339 sdmac->flags = 0;
1340
1341 desc->chn_count = 0;
1342 desc->chn_real_count = 0;
1343 desc->buf_tail = 0;
1344 desc->buf_ptail = 0;
1345 desc->sdmac = sdmac;
1346 desc->num_bd = bds;
1347
1348 if (sdma_alloc_bd(desc))
1349 goto err_desc_out;
1350
0f06c027
RG
1351 /* No slave_config called in MEMCPY case, so do here */
1352 if (direction == DMA_MEM_TO_MEM)
1353 sdma_config_ownership(sdmac, false, true, false);
1354
21420841
RG
1355 if (sdma_load_context(sdmac))
1356 goto err_desc_out;
1357
1358 return desc;
1359
1360err_desc_out:
1361 kfree(desc);
1362err_out:
1363 return NULL;
1364}
1365
0f06c027
RG
1366static struct dma_async_tx_descriptor *sdma_prep_memcpy(
1367 struct dma_chan *chan, dma_addr_t dma_dst,
1368 dma_addr_t dma_src, size_t len, unsigned long flags)
1369{
1370 struct sdma_channel *sdmac = to_sdma_chan(chan);
1371 struct sdma_engine *sdma = sdmac->sdma;
1372 int channel = sdmac->channel;
1373 size_t count;
1374 int i = 0, param;
1375 struct sdma_buffer_descriptor *bd;
1376 struct sdma_desc *desc;
1377
1378 if (!chan || !len)
1379 return NULL;
1380
1381 dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
1382 &dma_src, &dma_dst, len, channel);
1383
1384 desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
1385 len / SDMA_BD_MAX_CNT + 1);
1386 if (!desc)
1387 return NULL;
1388
1389 do {
1390 count = min_t(size_t, len, SDMA_BD_MAX_CNT);
1391 bd = &desc->bd[i];
1392 bd->buffer_addr = dma_src;
1393 bd->ext_buffer_addr = dma_dst;
1394 bd->mode.count = count;
1395 desc->chn_count += count;
1396 bd->mode.command = 0;
1397
1398 dma_src += count;
1399 dma_dst += count;
1400 len -= count;
1401 i++;
1402
1403 param = BD_DONE | BD_EXTD | BD_CONT;
1404 /* last bd */
1405 if (!len) {
1406 param |= BD_INTR;
1407 param |= BD_LAST;
1408 param &= ~BD_CONT;
1409 }
1410
1411 dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
1412 i, count, bd->buffer_addr,
1413 param & BD_WRAP ? "wrap" : "",
1414 param & BD_INTR ? " intr" : "");
1415
1416 bd->mode.status = param;
1417 } while (len);
1418
1419 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1420}
1421
1ec1e82f
SH
1422static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1423 struct dma_chan *chan, struct scatterlist *sgl,
db8196df 1424 unsigned int sg_len, enum dma_transfer_direction direction,
185ecb5f 1425 unsigned long flags, void *context)
1ec1e82f
SH
1426{
1427 struct sdma_channel *sdmac = to_sdma_chan(chan);
1428 struct sdma_engine *sdma = sdmac->sdma;
ad78b000 1429 int i, count;
23889c63 1430 int channel = sdmac->channel;
1ec1e82f 1431 struct scatterlist *sg;
57b772b8 1432 struct sdma_desc *desc;
1ec1e82f 1433
21420841 1434 desc = sdma_transfer_init(sdmac, direction, sg_len);
57b772b8
RG
1435 if (!desc)
1436 goto err_out;
1437
1ec1e82f
SH
1438 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1439 sg_len, channel);
1440
1ec1e82f 1441 for_each_sg(sgl, sg, sg_len, i) {
76c33d27 1442 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1ec1e82f
SH
1443 int param;
1444
d2f5c276 1445 bd->buffer_addr = sg->dma_address;
1ec1e82f 1446
fdaf9c4b 1447 count = sg_dma_len(sg);
1ec1e82f 1448
4a6b2e8a 1449 if (count > SDMA_BD_MAX_CNT) {
1ec1e82f 1450 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
4a6b2e8a 1451 channel, count, SDMA_BD_MAX_CNT);
57b772b8 1452 goto err_bd_out;
1ec1e82f
SH
1453 }
1454
1455 bd->mode.count = count;
76c33d27 1456 desc->chn_count += count;
1ec1e82f 1457
ad78b000 1458 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
57b772b8 1459 goto err_bd_out;
1fa81c27
SH
1460
1461 switch (sdmac->word_size) {
1462 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1ec1e82f 1463 bd->mode.command = 0;
1fa81c27 1464 if (count & 3 || sg->dma_address & 3)
57b772b8 1465 goto err_bd_out;
1fa81c27
SH
1466 break;
1467 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1468 bd->mode.command = 2;
1469 if (count & 1 || sg->dma_address & 1)
57b772b8 1470 goto err_bd_out;
1fa81c27
SH
1471 break;
1472 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1473 bd->mode.command = 1;
1474 break;
1475 default:
57b772b8 1476 goto err_bd_out;
1fa81c27 1477 }
1ec1e82f
SH
1478
1479 param = BD_DONE | BD_EXTD | BD_CONT;
1480
341b9419 1481 if (i + 1 == sg_len) {
1ec1e82f 1482 param |= BD_INTR;
341b9419
SG
1483 param |= BD_LAST;
1484 param &= ~BD_CONT;
1ec1e82f
SH
1485 }
1486
c3cc74b2
OJ
1487 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1488 i, count, (u64)sg->dma_address,
1ec1e82f
SH
1489 param & BD_WRAP ? "wrap" : "",
1490 param & BD_INTR ? " intr" : "");
1491
1492 bd->mode.status = param;
1493 }
1494
57b772b8
RG
1495 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1496err_bd_out:
1497 sdma_free_bd(desc);
1498 kfree(desc);
1ec1e82f 1499err_out:
4b2ce9dd 1500 sdmac->status = DMA_ERROR;
1ec1e82f
SH
1501 return NULL;
1502}
1503
1504static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1505 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
185ecb5f 1506 size_t period_len, enum dma_transfer_direction direction,
31c1e5a1 1507 unsigned long flags)
1ec1e82f
SH
1508{
1509 struct sdma_channel *sdmac = to_sdma_chan(chan);
1510 struct sdma_engine *sdma = sdmac->sdma;
1511 int num_periods = buf_len / period_len;
23889c63 1512 int channel = sdmac->channel;
21420841 1513 int i = 0, buf = 0;
57b772b8 1514 struct sdma_desc *desc;
1ec1e82f
SH
1515
1516 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1517
21420841 1518 desc = sdma_transfer_init(sdmac, direction, num_periods);
57b772b8
RG
1519 if (!desc)
1520 goto err_out;
1521
76c33d27 1522 desc->period_len = period_len;
8e2e27c7 1523
1ec1e82f 1524 sdmac->flags |= IMX_DMA_SG_LOOP;
1ec1e82f 1525
4a6b2e8a 1526 if (period_len > SDMA_BD_MAX_CNT) {
ba6ab3b3 1527 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
4a6b2e8a 1528 channel, period_len, SDMA_BD_MAX_CNT);
57b772b8 1529 goto err_bd_out;
1ec1e82f
SH
1530 }
1531
1532 while (buf < buf_len) {
76c33d27 1533 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1ec1e82f
SH
1534 int param;
1535
1536 bd->buffer_addr = dma_addr;
1537
1538 bd->mode.count = period_len;
1539
1540 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
57b772b8 1541 goto err_bd_out;
1ec1e82f
SH
1542 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1543 bd->mode.command = 0;
1544 else
1545 bd->mode.command = sdmac->word_size;
1546
1547 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1548 if (i + 1 == num_periods)
1549 param |= BD_WRAP;
1550
ba6ab3b3 1551 dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
c3cc74b2 1552 i, period_len, (u64)dma_addr,
1ec1e82f
SH
1553 param & BD_WRAP ? "wrap" : "",
1554 param & BD_INTR ? " intr" : "");
1555
1556 bd->mode.status = param;
1557
1558 dma_addr += period_len;
1559 buf += period_len;
1560
1561 i++;
1562 }
1563
57b772b8
RG
1564 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1565err_bd_out:
1566 sdma_free_bd(desc);
1567 kfree(desc);
1ec1e82f
SH
1568err_out:
1569 sdmac->status = DMA_ERROR;
1570 return NULL;
1571}
1572
7b350ab0
MR
1573static int sdma_config(struct dma_chan *chan,
1574 struct dma_slave_config *dmaengine_cfg)
1ec1e82f
SH
1575{
1576 struct sdma_channel *sdmac = to_sdma_chan(chan);
1ec1e82f 1577
7b350ab0
MR
1578 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1579 sdmac->per_address = dmaengine_cfg->src_addr;
1580 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1581 dmaengine_cfg->src_addr_width;
1582 sdmac->word_size = dmaengine_cfg->src_addr_width;
8391ecf4
SW
1583 } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
1584 sdmac->per_address2 = dmaengine_cfg->src_addr;
1585 sdmac->per_address = dmaengine_cfg->dst_addr;
1586 sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1587 SDMA_WATERMARK_LEVEL_LWML;
1588 sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1589 SDMA_WATERMARK_LEVEL_HWML;
1590 sdmac->word_size = dmaengine_cfg->dst_addr_width;
7b350ab0
MR
1591 } else {
1592 sdmac->per_address = dmaengine_cfg->dst_addr;
1593 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1594 dmaengine_cfg->dst_addr_width;
1595 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1596 }
1597 sdmac->direction = dmaengine_cfg->direction;
1598 return sdma_config_channel(chan);
1ec1e82f
SH
1599}
1600
1601static enum dma_status sdma_tx_status(struct dma_chan *chan,
e8e3a790
AS
1602 dma_cookie_t cookie,
1603 struct dma_tx_state *txstate)
1ec1e82f
SH
1604{
1605 struct sdma_channel *sdmac = to_sdma_chan(chan);
57b772b8 1606 struct sdma_desc *desc;
d1a792f3 1607 u32 residue;
57b772b8
RG
1608 struct virt_dma_desc *vd;
1609 enum dma_status ret;
1610 unsigned long flags;
d1a792f3 1611
57b772b8
RG
1612 ret = dma_cookie_status(chan, cookie, txstate);
1613 if (ret == DMA_COMPLETE || !txstate)
1614 return ret;
1615
1616 spin_lock_irqsave(&sdmac->vc.lock, flags);
1617 vd = vchan_find_desc(&sdmac->vc, cookie);
1618 if (vd) {
1619 desc = to_sdma_desc(&vd->tx);
1620 if (sdmac->flags & IMX_DMA_SG_LOOP)
1621 residue = (desc->num_bd - desc->buf_ptail) *
1622 desc->period_len - desc->chn_real_count;
1623 else
1624 residue = desc->chn_count - desc->chn_real_count;
1625 } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
1626 residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
1627 } else {
1628 residue = 0;
1629 }
1630 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1ec1e82f 1631
e8e3a790 1632 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
d1a792f3 1633 residue);
1ec1e82f 1634
8a965911 1635 return sdmac->status;
1ec1e82f
SH
1636}
1637
1638static void sdma_issue_pending(struct dma_chan *chan)
1639{
2b4f130e 1640 struct sdma_channel *sdmac = to_sdma_chan(chan);
57b772b8 1641 unsigned long flags;
2b4f130e 1642
57b772b8
RG
1643 spin_lock_irqsave(&sdmac->vc.lock, flags);
1644 if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1645 sdma_start_desc(sdmac);
1646 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1ec1e82f
SH
1647}
1648
5b28aa31 1649#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
cd72b846 1650#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
a572460b 1651#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
b7d2648a 1652#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42
5b28aa31
SH
1653
1654static void sdma_add_scripts(struct sdma_engine *sdma,
1655 const struct sdma_script_start_addrs *addr)
1656{
1657 s32 *addr_arr = (u32 *)addr;
1658 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1659 int i;
1660
70dabaed
NC
1661 /* use the default firmware in ROM if missing external firmware */
1662 if (!sdma->script_number)
1663 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1664
cd72b846 1665 for (i = 0; i < sdma->script_number; i++)
5b28aa31
SH
1666 if (addr_arr[i] > 0)
1667 saddr_arr[i] = addr_arr[i];
1668}
1669
7b4b88e0 1670static void sdma_load_firmware(const struct firmware *fw, void *context)
5b28aa31 1671{
7b4b88e0 1672 struct sdma_engine *sdma = context;
5b28aa31 1673 const struct sdma_firmware_header *header;
5b28aa31
SH
1674 const struct sdma_script_start_addrs *addr;
1675 unsigned short *ram_code;
1676
7b4b88e0 1677 if (!fw) {
0f927a11
SH
1678 dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1679 /* In this case we just use the ROM firmware. */
7b4b88e0
SH
1680 return;
1681 }
5b28aa31
SH
1682
1683 if (fw->size < sizeof(*header))
1684 goto err_firmware;
1685
1686 header = (struct sdma_firmware_header *)fw->data;
1687
1688 if (header->magic != SDMA_FIRMWARE_MAGIC)
1689 goto err_firmware;
1690 if (header->ram_code_start + header->ram_code_size > fw->size)
1691 goto err_firmware;
cd72b846 1692 switch (header->version_major) {
681d15ec
AV
1693 case 1:
1694 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1695 break;
1696 case 2:
1697 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1698 break;
a572460b
FE
1699 case 3:
1700 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1701 break;
b7d2648a
FE
1702 case 4:
1703 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1704 break;
681d15ec
AV
1705 default:
1706 dev_err(sdma->dev, "unknown firmware version\n");
1707 goto err_firmware;
cd72b846 1708 }
5b28aa31
SH
1709
1710 addr = (void *)header + header->script_addrs_start;
1711 ram_code = (void *)header + header->ram_code_start;
1712
7560e3f3
SH
1713 clk_enable(sdma->clk_ipg);
1714 clk_enable(sdma->clk_ahb);
5b28aa31
SH
1715 /* download the RAM image for SDMA */
1716 sdma_load_script(sdma, ram_code,
1717 header->ram_code_size,
6866fd3b 1718 addr->ram_code_start_addr);
7560e3f3
SH
1719 clk_disable(sdma->clk_ipg);
1720 clk_disable(sdma->clk_ahb);
5b28aa31
SH
1721
1722 sdma_add_scripts(sdma, addr);
1723
1724 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1725 header->version_major,
1726 header->version_minor);
1727
1728err_firmware:
1729 release_firmware(fw);
7b4b88e0
SH
1730}
1731
d078cd1b
ZW
1732#define EVENT_REMAP_CELLS 3
1733
29f493da 1734static int sdma_event_remap(struct sdma_engine *sdma)
d078cd1b
ZW
1735{
1736 struct device_node *np = sdma->dev->of_node;
1737 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1738 struct property *event_remap;
1739 struct regmap *gpr;
1740 char propname[] = "fsl,sdma-event-remap";
1741 u32 reg, val, shift, num_map, i;
1742 int ret = 0;
1743
1744 if (IS_ERR(np) || IS_ERR(gpr_np))
1745 goto out;
1746
1747 event_remap = of_find_property(np, propname, NULL);
1748 num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1749 if (!num_map) {
ce078af7 1750 dev_dbg(sdma->dev, "no event needs to be remapped\n");
d078cd1b
ZW
1751 goto out;
1752 } else if (num_map % EVENT_REMAP_CELLS) {
1753 dev_err(sdma->dev, "the property %s must modulo %d\n",
1754 propname, EVENT_REMAP_CELLS);
1755 ret = -EINVAL;
1756 goto out;
1757 }
1758
1759 gpr = syscon_node_to_regmap(gpr_np);
1760 if (IS_ERR(gpr)) {
1761 dev_err(sdma->dev, "failed to get gpr regmap\n");
1762 ret = PTR_ERR(gpr);
1763 goto out;
1764 }
1765
1766 for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
1767 ret = of_property_read_u32_index(np, propname, i, &reg);
1768 if (ret) {
1769 dev_err(sdma->dev, "failed to read property %s index %d\n",
1770 propname, i);
1771 goto out;
1772 }
1773
1774 ret = of_property_read_u32_index(np, propname, i + 1, &shift);
1775 if (ret) {
1776 dev_err(sdma->dev, "failed to read property %s index %d\n",
1777 propname, i + 1);
1778 goto out;
1779 }
1780
1781 ret = of_property_read_u32_index(np, propname, i + 2, &val);
1782 if (ret) {
1783 dev_err(sdma->dev, "failed to read property %s index %d\n",
1784 propname, i + 2);
1785 goto out;
1786 }
1787
1788 regmap_update_bits(gpr, reg, BIT(shift), val << shift);
1789 }
1790
1791out:
1792 if (!IS_ERR(gpr_np))
1793 of_node_put(gpr_np);
1794
1795 return ret;
1796}
1797
fe6cf289 1798static int sdma_get_firmware(struct sdma_engine *sdma,
7b4b88e0
SH
1799 const char *fw_name)
1800{
1801 int ret;
1802
1803 ret = request_firmware_nowait(THIS_MODULE,
1804 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1805 GFP_KERNEL, sdma, sdma_load_firmware);
5b28aa31
SH
1806
1807 return ret;
1808}
1809
19bfc772 1810static int sdma_init(struct sdma_engine *sdma)
1ec1e82f
SH
1811{
1812 int i, ret;
1813 dma_addr_t ccb_phys;
1814
b93edcdd
FE
1815 ret = clk_enable(sdma->clk_ipg);
1816 if (ret)
1817 return ret;
1818 ret = clk_enable(sdma->clk_ahb);
1819 if (ret)
1820 goto disable_clk_ipg;
1ec1e82f
SH
1821
1822 /* Be sure SDMA has not started yet */
c4b56857 1823 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1ec1e82f
SH
1824
1825 sdma->channel_control = dma_alloc_coherent(NULL,
1826 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1827 sizeof(struct sdma_context_data),
1828 &ccb_phys, GFP_KERNEL);
1829
1830 if (!sdma->channel_control) {
1831 ret = -ENOMEM;
1832 goto err_dma_alloc;
1833 }
1834
1835 sdma->context = (void *)sdma->channel_control +
1836 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1837 sdma->context_phys = ccb_phys +
1838 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1839
1840 /* Zero-out the CCB structures array just allocated */
1841 memset(sdma->channel_control, 0,
1842 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1843
1844 /* disable all channels */
17bba72f 1845 for (i = 0; i < sdma->drvdata->num_events; i++)
c4b56857 1846 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1ec1e82f
SH
1847
1848 /* All channels have priority 0 */
1849 for (i = 0; i < MAX_DMA_CHANNELS; i++)
c4b56857 1850 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1ec1e82f 1851
57b772b8 1852 ret = sdma_request_channel0(sdma);
1ec1e82f
SH
1853 if (ret)
1854 goto err_dma_alloc;
1855
1856 sdma_config_ownership(&sdma->channel[0], false, true, false);
1857
1858 /* Set Command Channel (Channel Zero) */
c4b56857 1859 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1ec1e82f
SH
1860
1861 /* Set bits of CONFIG register but with static context switching */
1862 /* FIXME: Check whether to set ACR bit depending on clock ratios */
c4b56857 1863 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1ec1e82f 1864
c4b56857 1865 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1ec1e82f 1866
1ec1e82f
SH
1867 /* Initializes channel's priorities */
1868 sdma_set_channel_priority(&sdma->channel[0], 7);
1869
7560e3f3
SH
1870 clk_disable(sdma->clk_ipg);
1871 clk_disable(sdma->clk_ahb);
1ec1e82f
SH
1872
1873 return 0;
1874
1875err_dma_alloc:
7560e3f3 1876 clk_disable(sdma->clk_ahb);
b93edcdd
FE
1877disable_clk_ipg:
1878 clk_disable(sdma->clk_ipg);
1ec1e82f
SH
1879 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1880 return ret;
1881}
1882
9479e17c
SG
1883static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1884{
0b351865 1885 struct sdma_channel *sdmac = to_sdma_chan(chan);
9479e17c
SG
1886 struct imx_dma_data *data = fn_param;
1887
1888 if (!imx_dma_is_general_purpose(chan))
1889 return false;
1890
0b351865
NC
1891 sdmac->data = *data;
1892 chan->private = &sdmac->data;
9479e17c
SG
1893
1894 return true;
1895}
1896
1897static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1898 struct of_dma *ofdma)
1899{
1900 struct sdma_engine *sdma = ofdma->of_dma_data;
1901 dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1902 struct imx_dma_data data;
1903
1904 if (dma_spec->args_count != 3)
1905 return NULL;
1906
1907 data.dma_request = dma_spec->args[0];
1908 data.peripheral_type = dma_spec->args[1];
1909 data.priority = dma_spec->args[2];
8391ecf4
SW
1910 /*
1911 * init dma_request2 to zero, which is not used by the dts.
1912 * For P2P, dma_request2 is init from dma_request_channel(),
1913 * chan->private will point to the imx_dma_data, and in
1914 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
1915 * be set to sdmac->event_id1.
1916 */
1917 data.dma_request2 = 0;
9479e17c
SG
1918
1919 return dma_request_channel(mask, sdma_filter_fn, &data);
1920}
1921
e34b731f 1922static int sdma_probe(struct platform_device *pdev)
1ec1e82f 1923{
580975d7
SG
1924 const struct of_device_id *of_id =
1925 of_match_device(sdma_dt_ids, &pdev->dev);
1926 struct device_node *np = pdev->dev.of_node;
8391ecf4 1927 struct device_node *spba_bus;
580975d7 1928 const char *fw_name;
1ec1e82f 1929 int ret;
1ec1e82f 1930 int irq;
1ec1e82f 1931 struct resource *iores;
8391ecf4 1932 struct resource spba_res;
d4adcc01 1933 struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1ec1e82f 1934 int i;
1ec1e82f 1935 struct sdma_engine *sdma;
36e2f21a 1936 s32 *saddr_arr;
17bba72f
SH
1937 const struct sdma_driver_data *drvdata = NULL;
1938
1939 if (of_id)
1940 drvdata = of_id->data;
1941 else if (pdev->id_entry)
1942 drvdata = (void *)pdev->id_entry->driver_data;
1943
1944 if (!drvdata) {
1945 dev_err(&pdev->dev, "unable to find driver data\n");
1946 return -EINVAL;
1947 }
1ec1e82f 1948
42536b9f
PR
1949 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1950 if (ret)
1951 return ret;
1952
7f24e0ee 1953 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
1ec1e82f
SH
1954 if (!sdma)
1955 return -ENOMEM;
1956
2ccaef05 1957 spin_lock_init(&sdma->channel_0_lock);
73eab978 1958
1ec1e82f 1959 sdma->dev = &pdev->dev;
17bba72f 1960 sdma->drvdata = drvdata;
1ec1e82f 1961
1ec1e82f 1962 irq = platform_get_irq(pdev, 0);
7f24e0ee 1963 if (irq < 0)
63c72e02 1964 return irq;
1ec1e82f 1965
7f24e0ee
FE
1966 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1967 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
1968 if (IS_ERR(sdma->regs))
1969 return PTR_ERR(sdma->regs);
1ec1e82f 1970
7560e3f3 1971 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
7f24e0ee
FE
1972 if (IS_ERR(sdma->clk_ipg))
1973 return PTR_ERR(sdma->clk_ipg);
1ec1e82f 1974
7560e3f3 1975 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
7f24e0ee
FE
1976 if (IS_ERR(sdma->clk_ahb))
1977 return PTR_ERR(sdma->clk_ahb);
7560e3f3 1978
fb9caf37
AY
1979 ret = clk_prepare(sdma->clk_ipg);
1980 if (ret)
1981 return ret;
1982
1983 ret = clk_prepare(sdma->clk_ahb);
1984 if (ret)
1985 goto err_clk;
7560e3f3 1986
7f24e0ee
FE
1987 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
1988 sdma);
1ec1e82f 1989 if (ret)
fb9caf37 1990 goto err_irq;
1ec1e82f 1991
5bb9dbb5
VK
1992 sdma->irq = irq;
1993
5b28aa31 1994 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
fb9caf37
AY
1995 if (!sdma->script_addrs) {
1996 ret = -ENOMEM;
1997 goto err_irq;
1998 }
1ec1e82f 1999
36e2f21a
SH
2000 /* initially no scripts available */
2001 saddr_arr = (s32 *)sdma->script_addrs;
2002 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
2003 saddr_arr[i] = -EINVAL;
2004
7214a8b1
SH
2005 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
2006 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
0f06c027 2007 dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
7214a8b1 2008
1ec1e82f
SH
2009 INIT_LIST_HEAD(&sdma->dma_device.channels);
2010 /* Initialize channel parameters */
2011 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2012 struct sdma_channel *sdmac = &sdma->channel[i];
2013
2014 sdmac->sdma = sdma;
1ec1e82f 2015
1ec1e82f 2016 sdmac->channel = i;
57b772b8 2017 sdmac->vc.desc_free = sdma_desc_free;
b8603d2a
LS
2018 INIT_WORK(&sdmac->terminate_worker,
2019 sdma_channel_terminate_work);
23889c63
SH
2020 /*
2021 * Add the channel to the DMAC list. Do not add channel 0 though
2022 * because we need it internally in the SDMA driver. This also means
2023 * that channel 0 in dmaengine counting matches sdma channel 1.
2024 */
2025 if (i)
57b772b8 2026 vchan_init(&sdmac->vc, &sdma->dma_device);
1ec1e82f
SH
2027 }
2028
5b28aa31 2029 ret = sdma_init(sdma);
1ec1e82f
SH
2030 if (ret)
2031 goto err_init;
2032
d078cd1b
ZW
2033 ret = sdma_event_remap(sdma);
2034 if (ret)
2035 goto err_init;
2036
dcfec3c0
SH
2037 if (sdma->drvdata->script_addrs)
2038 sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
580975d7 2039 if (pdata && pdata->script_addrs)
5b28aa31
SH
2040 sdma_add_scripts(sdma, pdata->script_addrs);
2041
580975d7 2042 if (pdata) {
6d0d7e2d
FE
2043 ret = sdma_get_firmware(sdma, pdata->fw_name);
2044 if (ret)
ad1122e5 2045 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
580975d7
SG
2046 } else {
2047 /*
2048 * Because that device tree does not encode ROM script address,
2049 * the RAM script in firmware is mandatory for device tree
2050 * probe, otherwise it fails.
2051 */
2052 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2053 &fw_name);
6602b0dd 2054 if (ret)
ad1122e5 2055 dev_warn(&pdev->dev, "failed to get firmware name\n");
6602b0dd
FE
2056 else {
2057 ret = sdma_get_firmware(sdma, fw_name);
2058 if (ret)
ad1122e5 2059 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
580975d7
SG
2060 }
2061 }
5b28aa31 2062
1ec1e82f
SH
2063 sdma->dma_device.dev = &pdev->dev;
2064
2065 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
2066 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
2067 sdma->dma_device.device_tx_status = sdma_tx_status;
2068 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
2069 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
7b350ab0 2070 sdma->dma_device.device_config = sdma_config;
b8603d2a
LS
2071 sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
2072 sdma->dma_device.device_synchronize = sdma_channel_synchronize;
f9d4a398
NC
2073 sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
2074 sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
2075 sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
6f3125ce 2076 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
0f06c027 2077 sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
1ec1e82f 2078 sdma->dma_device.device_issue_pending = sdma_issue_pending;
b9b3f82f 2079 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
4a6b2e8a 2080 dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
1ec1e82f 2081
23e11811
VR
2082 platform_set_drvdata(pdev, sdma);
2083
1ec1e82f
SH
2084 ret = dma_async_device_register(&sdma->dma_device);
2085 if (ret) {
2086 dev_err(&pdev->dev, "unable to register\n");
2087 goto err_init;
2088 }
2089
9479e17c
SG
2090 if (np) {
2091 ret = of_dma_controller_register(np, sdma_xlate, sdma);
2092 if (ret) {
2093 dev_err(&pdev->dev, "failed to register controller\n");
2094 goto err_register;
2095 }
8391ecf4
SW
2096
2097 spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
2098 ret = of_address_to_resource(spba_bus, 0, &spba_res);
2099 if (!ret) {
2100 sdma->spba_start_addr = spba_res.start;
2101 sdma->spba_end_addr = spba_res.end;
2102 }
2103 of_node_put(spba_bus);
9479e17c
SG
2104 }
2105
1ec1e82f
SH
2106 return 0;
2107
9479e17c
SG
2108err_register:
2109 dma_async_device_unregister(&sdma->dma_device);
1ec1e82f
SH
2110err_init:
2111 kfree(sdma->script_addrs);
fb9caf37
AY
2112err_irq:
2113 clk_unprepare(sdma->clk_ahb);
2114err_clk:
2115 clk_unprepare(sdma->clk_ipg);
939fd4f0 2116 return ret;
1ec1e82f
SH
2117}
2118
1d1bbd30 2119static int sdma_remove(struct platform_device *pdev)
1ec1e82f 2120{
23e11811 2121 struct sdma_engine *sdma = platform_get_drvdata(pdev);
c12fe497 2122 int i;
23e11811 2123
5bb9dbb5 2124 devm_free_irq(&pdev->dev, sdma->irq, sdma);
23e11811
VR
2125 dma_async_device_unregister(&sdma->dma_device);
2126 kfree(sdma->script_addrs);
fb9caf37
AY
2127 clk_unprepare(sdma->clk_ahb);
2128 clk_unprepare(sdma->clk_ipg);
c12fe497
VR
2129 /* Kill the tasklet */
2130 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2131 struct sdma_channel *sdmac = &sdma->channel[i];
2132
57b772b8
RG
2133 tasklet_kill(&sdmac->vc.task);
2134 sdma_free_chan_resources(&sdmac->vc.chan);
c12fe497 2135 }
23e11811
VR
2136
2137 platform_set_drvdata(pdev, NULL);
23e11811 2138 return 0;
1ec1e82f
SH
2139}
2140
2141static struct platform_driver sdma_driver = {
2142 .driver = {
2143 .name = "imx-sdma",
580975d7 2144 .of_match_table = sdma_dt_ids,
1ec1e82f 2145 },
62550cd7 2146 .id_table = sdma_devtypes,
1d1bbd30 2147 .remove = sdma_remove,
23e11811 2148 .probe = sdma_probe,
1ec1e82f
SH
2149};
2150
23e11811 2151module_platform_driver(sdma_driver);
1ec1e82f
SH
2152
2153MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
2154MODULE_DESCRIPTION("i.MX SDMA driver");
c0879342
NC
2155#if IS_ENABLED(CONFIG_SOC_IMX6Q)
2156MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
2157#endif
2158#if IS_ENABLED(CONFIG_SOC_IMX7D)
2159MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
2160#endif
1ec1e82f 2161MODULE_LICENSE("GPL");