Commit | Line | Data |
---|---|---|
a4ffb13c PYM |
1 | /* |
2 | * | |
3 | * Copyright (C) STMicroelectronics SA 2017 | |
4 | * Author(s): M'boumba Cedric Madianga <cedric.madianga@gmail.com> | |
5 | * Pierre-Yves Mordret <pierre-yves.mordret@st.com> | |
6 | * | |
7 | * License terms: GPL V2.0. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License version 2 as published by | |
11 | * the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more | |
16 | * details. | |
17 | * | |
18 | * Driver for STM32 MDMA controller | |
19 | * | |
20 | * Inspired by stm32-dma.c and dma-jz4780.c | |
21 | * | |
22 | */ | |
23 | ||
24 | #include <linux/clk.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/dmaengine.h> | |
27 | #include <linux/dma-mapping.h> | |
28 | #include <linux/dmapool.h> | |
29 | #include <linux/err.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/iopoll.h> | |
32 | #include <linux/jiffies.h> | |
33 | #include <linux/list.h> | |
34 | #include <linux/log2.h> | |
35 | #include <linux/module.h> | |
36 | #include <linux/of.h> | |
37 | #include <linux/of_device.h> | |
38 | #include <linux/of_dma.h> | |
39 | #include <linux/platform_device.h> | |
89e987e3 | 40 | #include <linux/pm_runtime.h> |
a4ffb13c PYM |
41 | #include <linux/reset.h> |
42 | #include <linux/slab.h> | |
43 | ||
44 | #include "virt-dma.h" | |
45 | ||
46 | /* MDMA Generic getter/setter */ | |
47 | #define STM32_MDMA_SHIFT(n) (ffs(n) - 1) | |
48 | #define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \ | |
49 | (mask)) | |
50 | #define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \ | |
51 | STM32_MDMA_SHIFT(mask)) | |
52 | ||
53 | #define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */ | |
54 | #define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */ | |
55 | ||
56 | /* MDMA Channel x interrupt/status register */ | |
57 | #define STM32_MDMA_CISR(x) (0x40 + 0x40 * (x)) /* x = 0..62 */ | |
58 | #define STM32_MDMA_CISR_CRQA BIT(16) | |
59 | #define STM32_MDMA_CISR_TCIF BIT(4) | |
60 | #define STM32_MDMA_CISR_BTIF BIT(3) | |
61 | #define STM32_MDMA_CISR_BRTIF BIT(2) | |
62 | #define STM32_MDMA_CISR_CTCIF BIT(1) | |
63 | #define STM32_MDMA_CISR_TEIF BIT(0) | |
64 | ||
65 | /* MDMA Channel x interrupt flag clear register */ | |
66 | #define STM32_MDMA_CIFCR(x) (0x44 + 0x40 * (x)) | |
67 | #define STM32_MDMA_CIFCR_CLTCIF BIT(4) | |
68 | #define STM32_MDMA_CIFCR_CBTIF BIT(3) | |
69 | #define STM32_MDMA_CIFCR_CBRTIF BIT(2) | |
70 | #define STM32_MDMA_CIFCR_CCTCIF BIT(1) | |
71 | #define STM32_MDMA_CIFCR_CTEIF BIT(0) | |
72 | #define STM32_MDMA_CIFCR_CLEAR_ALL (STM32_MDMA_CIFCR_CLTCIF \ | |
73 | | STM32_MDMA_CIFCR_CBTIF \ | |
74 | | STM32_MDMA_CIFCR_CBRTIF \ | |
75 | | STM32_MDMA_CIFCR_CCTCIF \ | |
76 | | STM32_MDMA_CIFCR_CTEIF) | |
77 | ||
78 | /* MDMA Channel x error status register */ | |
79 | #define STM32_MDMA_CESR(x) (0x48 + 0x40 * (x)) | |
80 | #define STM32_MDMA_CESR_BSE BIT(11) | |
81 | #define STM32_MDMA_CESR_ASR BIT(10) | |
82 | #define STM32_MDMA_CESR_TEMD BIT(9) | |
83 | #define STM32_MDMA_CESR_TELD BIT(8) | |
84 | #define STM32_MDMA_CESR_TED BIT(7) | |
85 | #define STM32_MDMA_CESR_TEA_MASK GENMASK(6, 0) | |
86 | ||
87 | /* MDMA Channel x control register */ | |
88 | #define STM32_MDMA_CCR(x) (0x4C + 0x40 * (x)) | |
89 | #define STM32_MDMA_CCR_SWRQ BIT(16) | |
90 | #define STM32_MDMA_CCR_WEX BIT(14) | |
91 | #define STM32_MDMA_CCR_HEX BIT(13) | |
92 | #define STM32_MDMA_CCR_BEX BIT(12) | |
93 | #define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6) | |
94 | #define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \ | |
95 | STM32_MDMA_CCR_PL_MASK) | |
96 | #define STM32_MDMA_CCR_TCIE BIT(5) | |
97 | #define STM32_MDMA_CCR_BTIE BIT(4) | |
98 | #define STM32_MDMA_CCR_BRTIE BIT(3) | |
99 | #define STM32_MDMA_CCR_CTCIE BIT(2) | |
100 | #define STM32_MDMA_CCR_TEIE BIT(1) | |
101 | #define STM32_MDMA_CCR_EN BIT(0) | |
102 | #define STM32_MDMA_CCR_IRQ_MASK (STM32_MDMA_CCR_TCIE \ | |
103 | | STM32_MDMA_CCR_BTIE \ | |
104 | | STM32_MDMA_CCR_BRTIE \ | |
105 | | STM32_MDMA_CCR_CTCIE \ | |
106 | | STM32_MDMA_CCR_TEIE) | |
107 | ||
108 | /* MDMA Channel x transfer configuration register */ | |
109 | #define STM32_MDMA_CTCR(x) (0x50 + 0x40 * (x)) | |
110 | #define STM32_MDMA_CTCR_BWM BIT(31) | |
111 | #define STM32_MDMA_CTCR_SWRM BIT(30) | |
112 | #define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28) | |
113 | #define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \ | |
114 | STM32_MDMA_CTCR_TRGM_MSK) | |
115 | #define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \ | |
116 | STM32_MDMA_CTCR_TRGM_MSK) | |
117 | #define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26) | |
118 | #define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \ | |
119 | STM32_MDMA_CTCR_PAM_MASK) | |
120 | #define STM32_MDMA_CTCR_PKE BIT(25) | |
121 | #define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18) | |
122 | #define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \ | |
123 | STM32_MDMA_CTCR_TLEN_MSK) | |
124 | #define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \ | |
125 | STM32_MDMA_CTCR_TLEN_MSK) | |
126 | #define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18) | |
127 | #define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \ | |
128 | STM32_MDMA_CTCR_LEN2_MSK) | |
129 | #define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \ | |
130 | STM32_MDMA_CTCR_LEN2_MSK) | |
131 | #define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15) | |
132 | #define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \ | |
133 | STM32_MDMA_CTCR_DBURST_MASK) | |
134 | #define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12) | |
135 | #define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \ | |
136 | STM32_MDMA_CTCR_SBURST_MASK) | |
137 | #define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10) | |
138 | #define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \ | |
139 | STM32_MDMA_CTCR_DINCOS_MASK) | |
140 | #define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8) | |
141 | #define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \ | |
142 | STM32_MDMA_CTCR_SINCOS_MASK) | |
143 | #define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6) | |
144 | #define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \ | |
145 | STM32_MDMA_CTCR_DSIZE_MASK) | |
146 | #define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4) | |
147 | #define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \ | |
148 | STM32_MDMA_CTCR_SSIZE_MASK) | |
149 | #define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2) | |
150 | #define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \ | |
151 | STM32_MDMA_CTCR_DINC_MASK) | |
152 | #define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0) | |
153 | #define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \ | |
154 | STM32_MDMA_CTCR_SINC_MASK) | |
155 | #define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \ | |
156 | | STM32_MDMA_CTCR_DINC_MASK \ | |
157 | | STM32_MDMA_CTCR_SINCOS_MASK \ | |
158 | | STM32_MDMA_CTCR_DINCOS_MASK \ | |
159 | | STM32_MDMA_CTCR_LEN2_MSK \ | |
160 | | STM32_MDMA_CTCR_TRGM_MSK) | |
161 | ||
162 | /* MDMA Channel x block number of data register */ | |
163 | #define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x)) | |
164 | #define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20) | |
165 | #define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \ | |
166 | STM32_MDMA_CBNDTR_BRC_MK) | |
167 | #define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \ | |
168 | STM32_MDMA_CBNDTR_BRC_MK) | |
169 | ||
170 | #define STM32_MDMA_CBNDTR_BRDUM BIT(19) | |
171 | #define STM32_MDMA_CBNDTR_BRSUM BIT(18) | |
172 | #define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0) | |
173 | #define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \ | |
174 | STM32_MDMA_CBNDTR_BNDT_MASK) | |
175 | ||
176 | /* MDMA Channel x source address register */ | |
177 | #define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x)) | |
178 | ||
179 | /* MDMA Channel x destination address register */ | |
180 | #define STM32_MDMA_CDAR(x) (0x5C + 0x40 * (x)) | |
181 | ||
182 | /* MDMA Channel x block repeat address update register */ | |
183 | #define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x)) | |
184 | #define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16) | |
185 | #define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \ | |
186 | STM32_MDMA_CBRUR_DUV_MASK) | |
187 | #define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0) | |
188 | #define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \ | |
189 | STM32_MDMA_CBRUR_SUV_MASK) | |
190 | ||
191 | /* MDMA Channel x link address register */ | |
192 | #define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x)) | |
193 | ||
194 | /* MDMA Channel x trigger and bus selection register */ | |
195 | #define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x)) | |
196 | #define STM32_MDMA_CTBR_DBUS BIT(17) | |
197 | #define STM32_MDMA_CTBR_SBUS BIT(16) | |
198 | #define STM32_MDMA_CTBR_TSEL_MASK GENMASK(7, 0) | |
199 | #define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \ | |
200 | STM32_MDMA_CTBR_TSEL_MASK) | |
201 | ||
202 | /* MDMA Channel x mask address register */ | |
203 | #define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x)) | |
204 | ||
205 | /* MDMA Channel x mask data register */ | |
206 | #define STM32_MDMA_CMDR(x) (0x74 + 0x40 * (x)) | |
207 | ||
208 | #define STM32_MDMA_MAX_BUF_LEN 128 | |
209 | #define STM32_MDMA_MAX_BLOCK_LEN 65536 | |
210 | #define STM32_MDMA_MAX_CHANNELS 63 | |
211 | #define STM32_MDMA_MAX_REQUESTS 256 | |
212 | #define STM32_MDMA_MAX_BURST 128 | |
213 | #define STM32_MDMA_VERY_HIGH_PRIORITY 0x11 | |
214 | ||
215 | enum stm32_mdma_trigger_mode { | |
216 | STM32_MDMA_BUFFER, | |
217 | STM32_MDMA_BLOCK, | |
218 | STM32_MDMA_BLOCK_REP, | |
219 | STM32_MDMA_LINKED_LIST, | |
220 | }; | |
221 | ||
222 | enum stm32_mdma_width { | |
223 | STM32_MDMA_BYTE, | |
224 | STM32_MDMA_HALF_WORD, | |
225 | STM32_MDMA_WORD, | |
226 | STM32_MDMA_DOUBLE_WORD, | |
227 | }; | |
228 | ||
229 | enum stm32_mdma_inc_mode { | |
230 | STM32_MDMA_FIXED = 0, | |
231 | STM32_MDMA_INC = 2, | |
232 | STM32_MDMA_DEC = 3, | |
233 | }; | |
234 | ||
235 | struct stm32_mdma_chan_config { | |
236 | u32 request; | |
237 | u32 priority_level; | |
238 | u32 transfer_config; | |
239 | u32 mask_addr; | |
240 | u32 mask_data; | |
241 | }; | |
242 | ||
243 | struct stm32_mdma_hwdesc { | |
244 | u32 ctcr; | |
245 | u32 cbndtr; | |
246 | u32 csar; | |
247 | u32 cdar; | |
248 | u32 cbrur; | |
249 | u32 clar; | |
250 | u32 ctbr; | |
251 | u32 dummy; | |
252 | u32 cmar; | |
253 | u32 cmdr; | |
254 | } __aligned(64); | |
255 | ||
bbb5a4e1 PYM |
256 | struct stm32_mdma_desc_node { |
257 | struct stm32_mdma_hwdesc *hwdesc; | |
258 | dma_addr_t hwdesc_phys; | |
259 | }; | |
260 | ||
a4ffb13c PYM |
261 | struct stm32_mdma_desc { |
262 | struct virt_dma_desc vdesc; | |
263 | u32 ccr; | |
a4ffb13c PYM |
264 | bool cyclic; |
265 | u32 count; | |
bbb5a4e1 | 266 | struct stm32_mdma_desc_node node[]; |
a4ffb13c PYM |
267 | }; |
268 | ||
269 | struct stm32_mdma_chan { | |
270 | struct virt_dma_chan vchan; | |
271 | struct dma_pool *desc_pool; | |
272 | u32 id; | |
273 | struct stm32_mdma_desc *desc; | |
274 | u32 curr_hwdesc; | |
275 | struct dma_slave_config dma_config; | |
276 | struct stm32_mdma_chan_config chan_config; | |
277 | bool busy; | |
278 | u32 mem_burst; | |
279 | u32 mem_width; | |
280 | }; | |
281 | ||
282 | struct stm32_mdma_device { | |
283 | struct dma_device ddev; | |
284 | void __iomem *base; | |
285 | struct clk *clk; | |
286 | int irq; | |
287 | struct reset_control *rst; | |
288 | u32 nr_channels; | |
289 | u32 nr_requests; | |
290 | u32 nr_ahb_addr_masks; | |
291 | struct stm32_mdma_chan chan[STM32_MDMA_MAX_CHANNELS]; | |
292 | u32 ahb_addr_masks[]; | |
293 | }; | |
294 | ||
295 | static struct stm32_mdma_device *stm32_mdma_get_dev( | |
296 | struct stm32_mdma_chan *chan) | |
297 | { | |
298 | return container_of(chan->vchan.chan.device, struct stm32_mdma_device, | |
299 | ddev); | |
300 | } | |
301 | ||
302 | static struct stm32_mdma_chan *to_stm32_mdma_chan(struct dma_chan *c) | |
303 | { | |
304 | return container_of(c, struct stm32_mdma_chan, vchan.chan); | |
305 | } | |
306 | ||
307 | static struct stm32_mdma_desc *to_stm32_mdma_desc(struct virt_dma_desc *vdesc) | |
308 | { | |
309 | return container_of(vdesc, struct stm32_mdma_desc, vdesc); | |
310 | } | |
311 | ||
312 | static struct device *chan2dev(struct stm32_mdma_chan *chan) | |
313 | { | |
314 | return &chan->vchan.chan.dev->device; | |
315 | } | |
316 | ||
317 | static struct device *mdma2dev(struct stm32_mdma_device *mdma_dev) | |
318 | { | |
319 | return mdma_dev->ddev.dev; | |
320 | } | |
321 | ||
322 | static u32 stm32_mdma_read(struct stm32_mdma_device *dmadev, u32 reg) | |
323 | { | |
324 | return readl_relaxed(dmadev->base + reg); | |
325 | } | |
326 | ||
327 | static void stm32_mdma_write(struct stm32_mdma_device *dmadev, u32 reg, u32 val) | |
328 | { | |
329 | writel_relaxed(val, dmadev->base + reg); | |
330 | } | |
331 | ||
332 | static void stm32_mdma_set_bits(struct stm32_mdma_device *dmadev, u32 reg, | |
333 | u32 mask) | |
334 | { | |
335 | void __iomem *addr = dmadev->base + reg; | |
336 | ||
337 | writel_relaxed(readl_relaxed(addr) | mask, addr); | |
338 | } | |
339 | ||
340 | static void stm32_mdma_clr_bits(struct stm32_mdma_device *dmadev, u32 reg, | |
341 | u32 mask) | |
342 | { | |
343 | void __iomem *addr = dmadev->base + reg; | |
344 | ||
345 | writel_relaxed(readl_relaxed(addr) & ~mask, addr); | |
346 | } | |
347 | ||
348 | static struct stm32_mdma_desc *stm32_mdma_alloc_desc( | |
349 | struct stm32_mdma_chan *chan, u32 count) | |
350 | { | |
351 | struct stm32_mdma_desc *desc; | |
bbb5a4e1 | 352 | int i; |
a4ffb13c | 353 | |
bbb5a4e1 | 354 | desc = kzalloc(offsetof(typeof(*desc), node[count]), GFP_NOWAIT); |
a4ffb13c PYM |
355 | if (!desc) |
356 | return NULL; | |
357 | ||
bbb5a4e1 PYM |
358 | for (i = 0; i < count; i++) { |
359 | desc->node[i].hwdesc = | |
360 | dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, | |
361 | &desc->node[i].hwdesc_phys); | |
362 | if (!desc->node[i].hwdesc) | |
363 | goto err; | |
a4ffb13c PYM |
364 | } |
365 | ||
366 | desc->count = count; | |
367 | ||
368 | return desc; | |
bbb5a4e1 PYM |
369 | |
370 | err: | |
371 | dev_err(chan2dev(chan), "Failed to allocate descriptor\n"); | |
372 | while (--i >= 0) | |
373 | dma_pool_free(chan->desc_pool, desc->node[i].hwdesc, | |
374 | desc->node[i].hwdesc_phys); | |
375 | kfree(desc); | |
376 | return NULL; | |
a4ffb13c PYM |
377 | } |
378 | ||
379 | static void stm32_mdma_desc_free(struct virt_dma_desc *vdesc) | |
380 | { | |
381 | struct stm32_mdma_desc *desc = to_stm32_mdma_desc(vdesc); | |
382 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(vdesc->tx.chan); | |
bbb5a4e1 | 383 | int i; |
a4ffb13c | 384 | |
bbb5a4e1 PYM |
385 | for (i = 0; i < desc->count; i++) |
386 | dma_pool_free(chan->desc_pool, desc->node[i].hwdesc, | |
387 | desc->node[i].hwdesc_phys); | |
a4ffb13c PYM |
388 | kfree(desc); |
389 | } | |
390 | ||
391 | static int stm32_mdma_get_width(struct stm32_mdma_chan *chan, | |
392 | enum dma_slave_buswidth width) | |
393 | { | |
394 | switch (width) { | |
395 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
396 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
397 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
398 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | |
399 | return ffs(width) - 1; | |
400 | default: | |
401 | dev_err(chan2dev(chan), "Dma bus width %i not supported\n", | |
402 | width); | |
403 | return -EINVAL; | |
404 | } | |
405 | } | |
406 | ||
d83f4131 PYM |
407 | static enum dma_slave_buswidth stm32_mdma_get_max_width(dma_addr_t addr, |
408 | u32 buf_len, u32 tlen) | |
a4ffb13c PYM |
409 | { |
410 | enum dma_slave_buswidth max_width = DMA_SLAVE_BUSWIDTH_8_BYTES; | |
411 | ||
412 | for (max_width = DMA_SLAVE_BUSWIDTH_8_BYTES; | |
413 | max_width > DMA_SLAVE_BUSWIDTH_1_BYTE; | |
414 | max_width >>= 1) { | |
d83f4131 PYM |
415 | /* |
416 | * Address and buffer length both have to be aligned on | |
417 | * bus width | |
418 | */ | |
419 | if ((((buf_len | addr) & (max_width - 1)) == 0) && | |
420 | tlen >= max_width) | |
a4ffb13c PYM |
421 | break; |
422 | } | |
423 | ||
424 | return max_width; | |
425 | } | |
426 | ||
427 | static u32 stm32_mdma_get_best_burst(u32 buf_len, u32 tlen, u32 max_burst, | |
428 | enum dma_slave_buswidth width) | |
429 | { | |
ee6de9ac | 430 | u32 best_burst; |
a4ffb13c | 431 | |
ee6de9ac PYM |
432 | best_burst = min((u32)1 << __ffs(tlen | buf_len), |
433 | max_burst * width) / width; | |
a4ffb13c PYM |
434 | |
435 | return (best_burst > 0) ? best_burst : 1; | |
436 | } | |
437 | ||
438 | static int stm32_mdma_disable_chan(struct stm32_mdma_chan *chan) | |
439 | { | |
440 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
441 | u32 ccr, cisr, id, reg; | |
442 | int ret; | |
443 | ||
444 | id = chan->id; | |
445 | reg = STM32_MDMA_CCR(id); | |
446 | ||
447 | /* Disable interrupts */ | |
448 | stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_IRQ_MASK); | |
449 | ||
450 | ccr = stm32_mdma_read(dmadev, reg); | |
451 | if (ccr & STM32_MDMA_CCR_EN) { | |
452 | stm32_mdma_clr_bits(dmadev, reg, STM32_MDMA_CCR_EN); | |
453 | ||
454 | /* Ensure that any ongoing transfer has been completed */ | |
455 | ret = readl_relaxed_poll_timeout_atomic( | |
456 | dmadev->base + STM32_MDMA_CISR(id), cisr, | |
457 | (cisr & STM32_MDMA_CISR_CTCIF), 10, 1000); | |
458 | if (ret) { | |
459 | dev_err(chan2dev(chan), "%s: timeout!\n", __func__); | |
460 | return -EBUSY; | |
461 | } | |
462 | } | |
463 | ||
464 | return 0; | |
465 | } | |
466 | ||
467 | static void stm32_mdma_stop(struct stm32_mdma_chan *chan) | |
468 | { | |
469 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
470 | u32 status; | |
471 | int ret; | |
472 | ||
473 | /* Disable DMA */ | |
474 | ret = stm32_mdma_disable_chan(chan); | |
475 | if (ret < 0) | |
476 | return; | |
477 | ||
478 | /* Clear interrupt status if it is there */ | |
479 | status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); | |
480 | if (status) { | |
481 | dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n", | |
482 | __func__, status); | |
483 | stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status); | |
484 | } | |
485 | ||
486 | chan->busy = false; | |
487 | } | |
488 | ||
489 | static void stm32_mdma_set_bus(struct stm32_mdma_device *dmadev, u32 *ctbr, | |
490 | u32 ctbr_mask, u32 src_addr) | |
491 | { | |
492 | u32 mask; | |
493 | int i; | |
494 | ||
495 | /* Check if memory device is on AHB or AXI */ | |
496 | *ctbr &= ~ctbr_mask; | |
497 | mask = src_addr & 0xF0000000; | |
498 | for (i = 0; i < dmadev->nr_ahb_addr_masks; i++) { | |
499 | if (mask == dmadev->ahb_addr_masks[i]) { | |
500 | *ctbr |= ctbr_mask; | |
501 | break; | |
502 | } | |
503 | } | |
504 | } | |
505 | ||
506 | static int stm32_mdma_set_xfer_param(struct stm32_mdma_chan *chan, | |
507 | enum dma_transfer_direction direction, | |
508 | u32 *mdma_ccr, u32 *mdma_ctcr, | |
d83f4131 PYM |
509 | u32 *mdma_ctbr, dma_addr_t addr, |
510 | u32 buf_len) | |
a4ffb13c PYM |
511 | { |
512 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
513 | struct stm32_mdma_chan_config *chan_config = &chan->chan_config; | |
514 | enum dma_slave_buswidth src_addr_width, dst_addr_width; | |
515 | phys_addr_t src_addr, dst_addr; | |
516 | int src_bus_width, dst_bus_width; | |
517 | u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; | |
518 | u32 ccr, ctcr, ctbr, tlen; | |
519 | ||
520 | src_addr_width = chan->dma_config.src_addr_width; | |
521 | dst_addr_width = chan->dma_config.dst_addr_width; | |
522 | src_maxburst = chan->dma_config.src_maxburst; | |
523 | dst_maxburst = chan->dma_config.dst_maxburst; | |
524 | ||
525 | ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); | |
526 | ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); | |
527 | ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); | |
528 | ||
529 | /* Enable HW request mode */ | |
530 | ctcr &= ~STM32_MDMA_CTCR_SWRM; | |
531 | ||
532 | /* Set DINC, SINC, DINCOS, SINCOS, TRGM and TLEN retrieve from DT */ | |
533 | ctcr &= ~STM32_MDMA_CTCR_CFG_MASK; | |
534 | ctcr |= chan_config->transfer_config & STM32_MDMA_CTCR_CFG_MASK; | |
535 | ||
536 | /* | |
537 | * For buffer transfer length (TLEN) we have to set | |
538 | * the number of bytes - 1 in CTCR register | |
539 | */ | |
540 | tlen = STM32_MDMA_CTCR_LEN2_GET(ctcr); | |
541 | ctcr &= ~STM32_MDMA_CTCR_LEN2_MSK; | |
542 | ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1)); | |
543 | ||
d83f4131 PYM |
544 | /* Disable Pack Enable */ |
545 | ctcr &= ~STM32_MDMA_CTCR_PKE; | |
546 | ||
a4ffb13c PYM |
547 | /* Check burst size constraints */ |
548 | if (src_maxburst * src_addr_width > STM32_MDMA_MAX_BURST || | |
549 | dst_maxburst * dst_addr_width > STM32_MDMA_MAX_BURST) { | |
550 | dev_err(chan2dev(chan), | |
551 | "burst size * bus width higher than %d bytes\n", | |
552 | STM32_MDMA_MAX_BURST); | |
553 | return -EINVAL; | |
554 | } | |
555 | ||
556 | if ((!is_power_of_2(src_maxburst) && src_maxburst > 0) || | |
557 | (!is_power_of_2(dst_maxburst) && dst_maxburst > 0)) { | |
558 | dev_err(chan2dev(chan), "burst size must be a power of 2\n"); | |
559 | return -EINVAL; | |
560 | } | |
561 | ||
562 | /* | |
563 | * Configure channel control: | |
564 | * - Clear SW request as in this case this is a HW one | |
565 | * - Clear WEX, HEX and BEX bits | |
566 | * - Set priority level | |
567 | */ | |
568 | ccr &= ~(STM32_MDMA_CCR_SWRQ | STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX | | |
569 | STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK); | |
570 | ccr |= STM32_MDMA_CCR_PL(chan_config->priority_level); | |
571 | ||
572 | /* Configure Trigger selection */ | |
573 | ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK; | |
574 | ctbr |= STM32_MDMA_CTBR_TSEL(chan_config->request); | |
575 | ||
576 | switch (direction) { | |
577 | case DMA_MEM_TO_DEV: | |
d83f4131 PYM |
578 | dst_addr = chan->dma_config.dst_addr; |
579 | ||
a4ffb13c PYM |
580 | /* Set device data size */ |
581 | dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width); | |
582 | if (dst_bus_width < 0) | |
583 | return dst_bus_width; | |
584 | ctcr &= ~STM32_MDMA_CTCR_DSIZE_MASK; | |
585 | ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width); | |
586 | ||
587 | /* Set device burst value */ | |
588 | dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, | |
589 | dst_maxburst, | |
590 | dst_addr_width); | |
591 | chan->mem_burst = dst_best_burst; | |
592 | ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK; | |
593 | ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst))); | |
594 | ||
595 | /* Set memory data size */ | |
d83f4131 | 596 | src_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen); |
a4ffb13c PYM |
597 | chan->mem_width = src_addr_width; |
598 | src_bus_width = stm32_mdma_get_width(chan, src_addr_width); | |
599 | if (src_bus_width < 0) | |
600 | return src_bus_width; | |
601 | ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK | | |
602 | STM32_MDMA_CTCR_SINCOS_MASK; | |
603 | ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width) | | |
604 | STM32_MDMA_CTCR_SINCOS(src_bus_width); | |
605 | ||
606 | /* Set memory burst value */ | |
607 | src_maxburst = STM32_MDMA_MAX_BUF_LEN / src_addr_width; | |
608 | src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, | |
609 | src_maxburst, | |
610 | src_addr_width); | |
611 | chan->mem_burst = src_best_burst; | |
612 | ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK; | |
613 | ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst))); | |
614 | ||
615 | /* Select bus */ | |
a4ffb13c PYM |
616 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, |
617 | dst_addr); | |
618 | ||
d83f4131 PYM |
619 | if (dst_bus_width != src_bus_width) |
620 | ctcr |= STM32_MDMA_CTCR_PKE; | |
621 | ||
a4ffb13c PYM |
622 | /* Set destination address */ |
623 | stm32_mdma_write(dmadev, STM32_MDMA_CDAR(chan->id), dst_addr); | |
624 | break; | |
625 | ||
626 | case DMA_DEV_TO_MEM: | |
d83f4131 PYM |
627 | src_addr = chan->dma_config.src_addr; |
628 | ||
a4ffb13c PYM |
629 | /* Set device data size */ |
630 | src_bus_width = stm32_mdma_get_width(chan, src_addr_width); | |
631 | if (src_bus_width < 0) | |
632 | return src_bus_width; | |
633 | ctcr &= ~STM32_MDMA_CTCR_SSIZE_MASK; | |
634 | ctcr |= STM32_MDMA_CTCR_SSIZE(src_bus_width); | |
635 | ||
636 | /* Set device burst value */ | |
637 | src_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, | |
638 | src_maxburst, | |
639 | src_addr_width); | |
640 | ctcr &= ~STM32_MDMA_CTCR_SBURST_MASK; | |
641 | ctcr |= STM32_MDMA_CTCR_SBURST((ilog2(src_best_burst))); | |
642 | ||
643 | /* Set memory data size */ | |
d83f4131 | 644 | dst_addr_width = stm32_mdma_get_max_width(addr, buf_len, tlen); |
a4ffb13c PYM |
645 | chan->mem_width = dst_addr_width; |
646 | dst_bus_width = stm32_mdma_get_width(chan, dst_addr_width); | |
647 | if (dst_bus_width < 0) | |
648 | return dst_bus_width; | |
649 | ctcr &= ~(STM32_MDMA_CTCR_DSIZE_MASK | | |
650 | STM32_MDMA_CTCR_DINCOS_MASK); | |
651 | ctcr |= STM32_MDMA_CTCR_DSIZE(dst_bus_width) | | |
652 | STM32_MDMA_CTCR_DINCOS(dst_bus_width); | |
653 | ||
654 | /* Set memory burst value */ | |
655 | dst_maxburst = STM32_MDMA_MAX_BUF_LEN / dst_addr_width; | |
656 | dst_best_burst = stm32_mdma_get_best_burst(buf_len, tlen, | |
657 | dst_maxburst, | |
658 | dst_addr_width); | |
659 | ctcr &= ~STM32_MDMA_CTCR_DBURST_MASK; | |
660 | ctcr |= STM32_MDMA_CTCR_DBURST((ilog2(dst_best_burst))); | |
661 | ||
662 | /* Select bus */ | |
a4ffb13c PYM |
663 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, |
664 | src_addr); | |
665 | ||
d83f4131 PYM |
666 | if (dst_bus_width != src_bus_width) |
667 | ctcr |= STM32_MDMA_CTCR_PKE; | |
668 | ||
a4ffb13c PYM |
669 | /* Set source address */ |
670 | stm32_mdma_write(dmadev, STM32_MDMA_CSAR(chan->id), src_addr); | |
671 | break; | |
672 | ||
673 | default: | |
674 | dev_err(chan2dev(chan), "Dma direction is not supported\n"); | |
675 | return -EINVAL; | |
676 | } | |
677 | ||
678 | *mdma_ccr = ccr; | |
679 | *mdma_ctcr = ctcr; | |
680 | *mdma_ctbr = ctbr; | |
681 | ||
682 | return 0; | |
683 | } | |
684 | ||
685 | static void stm32_mdma_dump_hwdesc(struct stm32_mdma_chan *chan, | |
bbb5a4e1 | 686 | struct stm32_mdma_desc_node *node) |
a4ffb13c | 687 | { |
bbb5a4e1 PYM |
688 | dev_dbg(chan2dev(chan), "hwdesc: %pad\n", &node->hwdesc_phys); |
689 | dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", node->hwdesc->ctcr); | |
690 | dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", node->hwdesc->cbndtr); | |
691 | dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", node->hwdesc->csar); | |
692 | dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", node->hwdesc->cdar); | |
693 | dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", node->hwdesc->cbrur); | |
694 | dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", node->hwdesc->clar); | |
695 | dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", node->hwdesc->ctbr); | |
696 | dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", node->hwdesc->cmar); | |
697 | dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n\n", node->hwdesc->cmdr); | |
a4ffb13c PYM |
698 | } |
699 | ||
700 | static void stm32_mdma_setup_hwdesc(struct stm32_mdma_chan *chan, | |
701 | struct stm32_mdma_desc *desc, | |
702 | enum dma_transfer_direction dir, u32 count, | |
703 | dma_addr_t src_addr, dma_addr_t dst_addr, | |
704 | u32 len, u32 ctcr, u32 ctbr, bool is_last, | |
705 | bool is_first, bool is_cyclic) | |
706 | { | |
707 | struct stm32_mdma_chan_config *config = &chan->chan_config; | |
708 | struct stm32_mdma_hwdesc *hwdesc; | |
709 | u32 next = count + 1; | |
710 | ||
bbb5a4e1 | 711 | hwdesc = desc->node[count].hwdesc; |
a4ffb13c PYM |
712 | hwdesc->ctcr = ctcr; |
713 | hwdesc->cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | | |
714 | STM32_MDMA_CBNDTR_BRDUM | | |
715 | STM32_MDMA_CBNDTR_BRSUM | | |
716 | STM32_MDMA_CBNDTR_BNDT_MASK); | |
717 | hwdesc->cbndtr |= STM32_MDMA_CBNDTR_BNDT(len); | |
718 | hwdesc->csar = src_addr; | |
719 | hwdesc->cdar = dst_addr; | |
720 | hwdesc->cbrur = 0; | |
a4ffb13c PYM |
721 | hwdesc->ctbr = ctbr; |
722 | hwdesc->cmar = config->mask_addr; | |
723 | hwdesc->cmdr = config->mask_data; | |
724 | ||
725 | if (is_last) { | |
726 | if (is_cyclic) | |
bbb5a4e1 | 727 | hwdesc->clar = desc->node[0].hwdesc_phys; |
a4ffb13c PYM |
728 | else |
729 | hwdesc->clar = 0; | |
bbb5a4e1 PYM |
730 | } else { |
731 | hwdesc->clar = desc->node[next].hwdesc_phys; | |
a4ffb13c PYM |
732 | } |
733 | ||
bbb5a4e1 | 734 | stm32_mdma_dump_hwdesc(chan, &desc->node[count]); |
a4ffb13c PYM |
735 | } |
736 | ||
737 | static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan, | |
738 | struct stm32_mdma_desc *desc, | |
739 | struct scatterlist *sgl, u32 sg_len, | |
740 | enum dma_transfer_direction direction) | |
741 | { | |
742 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
743 | struct dma_slave_config *dma_config = &chan->dma_config; | |
744 | struct scatterlist *sg; | |
745 | dma_addr_t src_addr, dst_addr; | |
746 | u32 ccr, ctcr, ctbr; | |
747 | int i, ret = 0; | |
748 | ||
749 | for_each_sg(sgl, sg, sg_len, i) { | |
750 | if (sg_dma_len(sg) > STM32_MDMA_MAX_BLOCK_LEN) { | |
751 | dev_err(chan2dev(chan), "Invalid block len\n"); | |
752 | return -EINVAL; | |
753 | } | |
754 | ||
a4ffb13c PYM |
755 | if (direction == DMA_MEM_TO_DEV) { |
756 | src_addr = sg_dma_address(sg); | |
757 | dst_addr = dma_config->dst_addr; | |
d83f4131 PYM |
758 | ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, |
759 | &ctcr, &ctbr, src_addr, | |
760 | sg_dma_len(sg)); | |
a4ffb13c PYM |
761 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, |
762 | src_addr); | |
763 | } else { | |
764 | src_addr = dma_config->src_addr; | |
765 | dst_addr = sg_dma_address(sg); | |
d83f4131 PYM |
766 | ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, |
767 | &ctcr, &ctbr, dst_addr, | |
768 | sg_dma_len(sg)); | |
a4ffb13c PYM |
769 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, |
770 | dst_addr); | |
771 | } | |
772 | ||
d83f4131 PYM |
773 | if (ret < 0) |
774 | return ret; | |
775 | ||
a4ffb13c PYM |
776 | stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr, |
777 | dst_addr, sg_dma_len(sg), ctcr, ctbr, | |
778 | i == sg_len - 1, i == 0, false); | |
779 | } | |
780 | ||
781 | /* Enable interrupts */ | |
782 | ccr &= ~STM32_MDMA_CCR_IRQ_MASK; | |
783 | ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE; | |
784 | if (sg_len > 1) | |
785 | ccr |= STM32_MDMA_CCR_BTIE; | |
786 | desc->ccr = ccr; | |
787 | ||
788 | return 0; | |
789 | } | |
790 | ||
791 | static struct dma_async_tx_descriptor * | |
792 | stm32_mdma_prep_slave_sg(struct dma_chan *c, struct scatterlist *sgl, | |
793 | u32 sg_len, enum dma_transfer_direction direction, | |
794 | unsigned long flags, void *context) | |
795 | { | |
796 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
797 | struct stm32_mdma_desc *desc; | |
bbb5a4e1 | 798 | int i, ret; |
a4ffb13c PYM |
799 | |
800 | /* | |
801 | * Once DMA is in setup cyclic mode the channel we cannot assign this | |
802 | * channel anymore. The DMA channel needs to be aborted or terminated | |
803 | * for allowing another request. | |
804 | */ | |
805 | if (chan->desc && chan->desc->cyclic) { | |
806 | dev_err(chan2dev(chan), | |
807 | "Request not allowed when dma in cyclic mode\n"); | |
808 | return NULL; | |
809 | } | |
810 | ||
811 | desc = stm32_mdma_alloc_desc(chan, sg_len); | |
812 | if (!desc) | |
813 | return NULL; | |
814 | ||
815 | ret = stm32_mdma_setup_xfer(chan, desc, sgl, sg_len, direction); | |
816 | if (ret < 0) | |
817 | goto xfer_setup_err; | |
818 | ||
819 | desc->cyclic = false; | |
820 | ||
821 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
822 | ||
823 | xfer_setup_err: | |
bbb5a4e1 PYM |
824 | for (i = 0; i < desc->count; i++) |
825 | dma_pool_free(chan->desc_pool, desc->node[i].hwdesc, | |
826 | desc->node[i].hwdesc_phys); | |
a4ffb13c PYM |
827 | kfree(desc); |
828 | return NULL; | |
829 | } | |
830 | ||
831 | static struct dma_async_tx_descriptor * | |
832 | stm32_mdma_prep_dma_cyclic(struct dma_chan *c, dma_addr_t buf_addr, | |
833 | size_t buf_len, size_t period_len, | |
834 | enum dma_transfer_direction direction, | |
835 | unsigned long flags) | |
836 | { | |
837 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
838 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
839 | struct dma_slave_config *dma_config = &chan->dma_config; | |
840 | struct stm32_mdma_desc *desc; | |
841 | dma_addr_t src_addr, dst_addr; | |
842 | u32 ccr, ctcr, ctbr, count; | |
843 | int i, ret; | |
844 | ||
845 | /* | |
846 | * Once DMA is in setup cyclic mode the channel we cannot assign this | |
847 | * channel anymore. The DMA channel needs to be aborted or terminated | |
848 | * for allowing another request. | |
849 | */ | |
850 | if (chan->desc && chan->desc->cyclic) { | |
851 | dev_err(chan2dev(chan), | |
852 | "Request not allowed when dma in cyclic mode\n"); | |
853 | return NULL; | |
854 | } | |
855 | ||
856 | if (!buf_len || !period_len || period_len > STM32_MDMA_MAX_BLOCK_LEN) { | |
857 | dev_err(chan2dev(chan), "Invalid buffer/period len\n"); | |
858 | return NULL; | |
859 | } | |
860 | ||
861 | if (buf_len % period_len) { | |
862 | dev_err(chan2dev(chan), "buf_len not multiple of period_len\n"); | |
863 | return NULL; | |
864 | } | |
865 | ||
866 | count = buf_len / period_len; | |
867 | ||
868 | desc = stm32_mdma_alloc_desc(chan, count); | |
869 | if (!desc) | |
870 | return NULL; | |
871 | ||
a4ffb13c PYM |
872 | /* Select bus */ |
873 | if (direction == DMA_MEM_TO_DEV) { | |
874 | src_addr = buf_addr; | |
d83f4131 PYM |
875 | ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr, |
876 | &ctbr, src_addr, period_len); | |
a4ffb13c PYM |
877 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, |
878 | src_addr); | |
879 | } else { | |
880 | dst_addr = buf_addr; | |
d83f4131 PYM |
881 | ret = stm32_mdma_set_xfer_param(chan, direction, &ccr, &ctcr, |
882 | &ctbr, dst_addr, period_len); | |
a4ffb13c PYM |
883 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, |
884 | dst_addr); | |
885 | } | |
886 | ||
d83f4131 PYM |
887 | if (ret < 0) |
888 | goto xfer_setup_err; | |
889 | ||
890 | /* Enable interrupts */ | |
891 | ccr &= ~STM32_MDMA_CCR_IRQ_MASK; | |
892 | ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE | STM32_MDMA_CCR_BTIE; | |
893 | desc->ccr = ccr; | |
894 | ||
a4ffb13c PYM |
895 | /* Configure hwdesc list */ |
896 | for (i = 0; i < count; i++) { | |
897 | if (direction == DMA_MEM_TO_DEV) { | |
898 | src_addr = buf_addr + i * period_len; | |
899 | dst_addr = dma_config->dst_addr; | |
900 | } else { | |
901 | src_addr = dma_config->src_addr; | |
902 | dst_addr = buf_addr + i * period_len; | |
903 | } | |
904 | ||
905 | stm32_mdma_setup_hwdesc(chan, desc, direction, i, src_addr, | |
906 | dst_addr, period_len, ctcr, ctbr, | |
907 | i == count - 1, i == 0, true); | |
908 | } | |
909 | ||
910 | desc->cyclic = true; | |
911 | ||
912 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
913 | ||
914 | xfer_setup_err: | |
bbb5a4e1 PYM |
915 | for (i = 0; i < desc->count; i++) |
916 | dma_pool_free(chan->desc_pool, desc->node[i].hwdesc, | |
917 | desc->node[i].hwdesc_phys); | |
a4ffb13c PYM |
918 | kfree(desc); |
919 | return NULL; | |
920 | } | |
921 | ||
922 | static struct dma_async_tx_descriptor * | |
923 | stm32_mdma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dest, dma_addr_t src, | |
924 | size_t len, unsigned long flags) | |
925 | { | |
926 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
927 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
928 | enum dma_slave_buswidth max_width; | |
929 | struct stm32_mdma_desc *desc; | |
930 | struct stm32_mdma_hwdesc *hwdesc; | |
931 | u32 ccr, ctcr, ctbr, cbndtr, count, max_burst, mdma_burst; | |
932 | u32 best_burst, tlen; | |
933 | size_t xfer_count, offset; | |
934 | int src_bus_width, dst_bus_width; | |
935 | int i; | |
936 | ||
937 | /* | |
938 | * Once DMA is in setup cyclic mode the channel we cannot assign this | |
939 | * channel anymore. The DMA channel needs to be aborted or terminated | |
940 | * to allow another request | |
941 | */ | |
942 | if (chan->desc && chan->desc->cyclic) { | |
943 | dev_err(chan2dev(chan), | |
944 | "Request not allowed when dma in cyclic mode\n"); | |
945 | return NULL; | |
946 | } | |
947 | ||
948 | count = DIV_ROUND_UP(len, STM32_MDMA_MAX_BLOCK_LEN); | |
949 | desc = stm32_mdma_alloc_desc(chan, count); | |
950 | if (!desc) | |
951 | return NULL; | |
952 | ||
953 | ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); | |
954 | ctcr = stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id)); | |
955 | ctbr = stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id)); | |
956 | cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); | |
957 | ||
958 | /* Enable sw req, some interrupts and clear other bits */ | |
959 | ccr &= ~(STM32_MDMA_CCR_WEX | STM32_MDMA_CCR_HEX | | |
960 | STM32_MDMA_CCR_BEX | STM32_MDMA_CCR_PL_MASK | | |
961 | STM32_MDMA_CCR_IRQ_MASK); | |
962 | ccr |= STM32_MDMA_CCR_TEIE; | |
963 | ||
964 | /* Enable SW request mode, dest/src inc and clear other bits */ | |
965 | ctcr &= ~(STM32_MDMA_CTCR_BWM | STM32_MDMA_CTCR_TRGM_MSK | | |
966 | STM32_MDMA_CTCR_PAM_MASK | STM32_MDMA_CTCR_PKE | | |
967 | STM32_MDMA_CTCR_TLEN_MSK | STM32_MDMA_CTCR_DBURST_MASK | | |
968 | STM32_MDMA_CTCR_SBURST_MASK | STM32_MDMA_CTCR_DINCOS_MASK | | |
969 | STM32_MDMA_CTCR_SINCOS_MASK | STM32_MDMA_CTCR_DSIZE_MASK | | |
970 | STM32_MDMA_CTCR_SSIZE_MASK | STM32_MDMA_CTCR_DINC_MASK | | |
971 | STM32_MDMA_CTCR_SINC_MASK); | |
972 | ctcr |= STM32_MDMA_CTCR_SWRM | STM32_MDMA_CTCR_SINC(STM32_MDMA_INC) | | |
973 | STM32_MDMA_CTCR_DINC(STM32_MDMA_INC); | |
974 | ||
975 | /* Reset HW request */ | |
976 | ctbr &= ~STM32_MDMA_CTBR_TSEL_MASK; | |
977 | ||
978 | /* Select bus */ | |
979 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_SBUS, src); | |
980 | stm32_mdma_set_bus(dmadev, &ctbr, STM32_MDMA_CTBR_DBUS, dest); | |
981 | ||
982 | /* Clear CBNDTR registers */ | |
983 | cbndtr &= ~(STM32_MDMA_CBNDTR_BRC_MK | STM32_MDMA_CBNDTR_BRDUM | | |
984 | STM32_MDMA_CBNDTR_BRSUM | STM32_MDMA_CBNDTR_BNDT_MASK); | |
985 | ||
986 | if (len <= STM32_MDMA_MAX_BLOCK_LEN) { | |
987 | cbndtr |= STM32_MDMA_CBNDTR_BNDT(len); | |
988 | if (len <= STM32_MDMA_MAX_BUF_LEN) { | |
989 | /* Setup a buffer transfer */ | |
990 | ccr |= STM32_MDMA_CCR_TCIE | STM32_MDMA_CCR_CTCIE; | |
991 | ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BUFFER); | |
992 | } else { | |
993 | /* Setup a block transfer */ | |
994 | ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE; | |
995 | ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_BLOCK); | |
996 | } | |
997 | ||
998 | tlen = STM32_MDMA_MAX_BUF_LEN; | |
999 | ctcr |= STM32_MDMA_CTCR_TLEN((tlen - 1)); | |
1000 | ||
1001 | /* Set source best burst size */ | |
d83f4131 | 1002 | max_width = stm32_mdma_get_max_width(src, len, tlen); |
a4ffb13c PYM |
1003 | src_bus_width = stm32_mdma_get_width(chan, max_width); |
1004 | ||
1005 | max_burst = tlen / max_width; | |
1006 | best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst, | |
1007 | max_width); | |
1008 | mdma_burst = ilog2(best_burst); | |
1009 | ||
1010 | ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) | | |
1011 | STM32_MDMA_CTCR_SSIZE(src_bus_width) | | |
1012 | STM32_MDMA_CTCR_SINCOS(src_bus_width); | |
1013 | ||
1014 | /* Set destination best burst size */ | |
d83f4131 | 1015 | max_width = stm32_mdma_get_max_width(dest, len, tlen); |
a4ffb13c PYM |
1016 | dst_bus_width = stm32_mdma_get_width(chan, max_width); |
1017 | ||
1018 | max_burst = tlen / max_width; | |
1019 | best_burst = stm32_mdma_get_best_burst(len, tlen, max_burst, | |
1020 | max_width); | |
1021 | mdma_burst = ilog2(best_burst); | |
1022 | ||
1023 | ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) | | |
1024 | STM32_MDMA_CTCR_DSIZE(dst_bus_width) | | |
1025 | STM32_MDMA_CTCR_DINCOS(dst_bus_width); | |
1026 | ||
1027 | if (dst_bus_width != src_bus_width) | |
1028 | ctcr |= STM32_MDMA_CTCR_PKE; | |
1029 | ||
1030 | /* Prepare hardware descriptor */ | |
bbb5a4e1 | 1031 | hwdesc = desc->node[0].hwdesc; |
a4ffb13c PYM |
1032 | hwdesc->ctcr = ctcr; |
1033 | hwdesc->cbndtr = cbndtr; | |
1034 | hwdesc->csar = src; | |
1035 | hwdesc->cdar = dest; | |
1036 | hwdesc->cbrur = 0; | |
1037 | hwdesc->clar = 0; | |
1038 | hwdesc->ctbr = ctbr; | |
1039 | hwdesc->cmar = 0; | |
1040 | hwdesc->cmdr = 0; | |
1041 | ||
bbb5a4e1 | 1042 | stm32_mdma_dump_hwdesc(chan, &desc->node[0]); |
a4ffb13c PYM |
1043 | } else { |
1044 | /* Setup a LLI transfer */ | |
1045 | ctcr |= STM32_MDMA_CTCR_TRGM(STM32_MDMA_LINKED_LIST) | | |
1046 | STM32_MDMA_CTCR_TLEN((STM32_MDMA_MAX_BUF_LEN - 1)); | |
1047 | ccr |= STM32_MDMA_CCR_BTIE | STM32_MDMA_CCR_CTCIE; | |
1048 | tlen = STM32_MDMA_MAX_BUF_LEN; | |
1049 | ||
1050 | for (i = 0, offset = 0; offset < len; | |
1051 | i++, offset += xfer_count) { | |
1052 | xfer_count = min_t(size_t, len - offset, | |
1053 | STM32_MDMA_MAX_BLOCK_LEN); | |
1054 | ||
1055 | /* Set source best burst size */ | |
d83f4131 | 1056 | max_width = stm32_mdma_get_max_width(src, len, tlen); |
a4ffb13c PYM |
1057 | src_bus_width = stm32_mdma_get_width(chan, max_width); |
1058 | ||
1059 | max_burst = tlen / max_width; | |
1060 | best_burst = stm32_mdma_get_best_burst(len, tlen, | |
1061 | max_burst, | |
1062 | max_width); | |
1063 | mdma_burst = ilog2(best_burst); | |
1064 | ||
1065 | ctcr |= STM32_MDMA_CTCR_SBURST(mdma_burst) | | |
1066 | STM32_MDMA_CTCR_SSIZE(src_bus_width) | | |
1067 | STM32_MDMA_CTCR_SINCOS(src_bus_width); | |
1068 | ||
1069 | /* Set destination best burst size */ | |
d83f4131 | 1070 | max_width = stm32_mdma_get_max_width(dest, len, tlen); |
a4ffb13c PYM |
1071 | dst_bus_width = stm32_mdma_get_width(chan, max_width); |
1072 | ||
1073 | max_burst = tlen / max_width; | |
1074 | best_burst = stm32_mdma_get_best_burst(len, tlen, | |
1075 | max_burst, | |
1076 | max_width); | |
1077 | mdma_burst = ilog2(best_burst); | |
1078 | ||
1079 | ctcr |= STM32_MDMA_CTCR_DBURST(mdma_burst) | | |
1080 | STM32_MDMA_CTCR_DSIZE(dst_bus_width) | | |
1081 | STM32_MDMA_CTCR_DINCOS(dst_bus_width); | |
1082 | ||
1083 | if (dst_bus_width != src_bus_width) | |
1084 | ctcr |= STM32_MDMA_CTCR_PKE; | |
1085 | ||
1086 | /* Prepare hardware descriptor */ | |
1087 | stm32_mdma_setup_hwdesc(chan, desc, DMA_MEM_TO_MEM, i, | |
1088 | src + offset, dest + offset, | |
1089 | xfer_count, ctcr, ctbr, | |
1090 | i == count - 1, i == 0, false); | |
1091 | } | |
1092 | } | |
1093 | ||
1094 | desc->ccr = ccr; | |
1095 | ||
1096 | desc->cyclic = false; | |
1097 | ||
1098 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
1099 | } | |
1100 | ||
1101 | static void stm32_mdma_dump_reg(struct stm32_mdma_chan *chan) | |
1102 | { | |
1103 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
1104 | ||
1105 | dev_dbg(chan2dev(chan), "CCR: 0x%08x\n", | |
1106 | stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id))); | |
1107 | dev_dbg(chan2dev(chan), "CTCR: 0x%08x\n", | |
1108 | stm32_mdma_read(dmadev, STM32_MDMA_CTCR(chan->id))); | |
1109 | dev_dbg(chan2dev(chan), "CBNDTR: 0x%08x\n", | |
1110 | stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id))); | |
1111 | dev_dbg(chan2dev(chan), "CSAR: 0x%08x\n", | |
1112 | stm32_mdma_read(dmadev, STM32_MDMA_CSAR(chan->id))); | |
1113 | dev_dbg(chan2dev(chan), "CDAR: 0x%08x\n", | |
1114 | stm32_mdma_read(dmadev, STM32_MDMA_CDAR(chan->id))); | |
1115 | dev_dbg(chan2dev(chan), "CBRUR: 0x%08x\n", | |
1116 | stm32_mdma_read(dmadev, STM32_MDMA_CBRUR(chan->id))); | |
1117 | dev_dbg(chan2dev(chan), "CLAR: 0x%08x\n", | |
1118 | stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id))); | |
1119 | dev_dbg(chan2dev(chan), "CTBR: 0x%08x\n", | |
1120 | stm32_mdma_read(dmadev, STM32_MDMA_CTBR(chan->id))); | |
1121 | dev_dbg(chan2dev(chan), "CMAR: 0x%08x\n", | |
1122 | stm32_mdma_read(dmadev, STM32_MDMA_CMAR(chan->id))); | |
1123 | dev_dbg(chan2dev(chan), "CMDR: 0x%08x\n", | |
1124 | stm32_mdma_read(dmadev, STM32_MDMA_CMDR(chan->id))); | |
1125 | } | |
1126 | ||
1127 | static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan) | |
1128 | { | |
1129 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
1130 | struct virt_dma_desc *vdesc; | |
1131 | struct stm32_mdma_hwdesc *hwdesc; | |
1132 | u32 id = chan->id; | |
1133 | u32 status, reg; | |
1134 | ||
1135 | vdesc = vchan_next_desc(&chan->vchan); | |
1136 | if (!vdesc) { | |
1137 | chan->desc = NULL; | |
1138 | return; | |
1139 | } | |
1140 | ||
1141 | chan->desc = to_stm32_mdma_desc(vdesc); | |
bbb5a4e1 | 1142 | hwdesc = chan->desc->node[0].hwdesc; |
a4ffb13c PYM |
1143 | chan->curr_hwdesc = 0; |
1144 | ||
1145 | stm32_mdma_write(dmadev, STM32_MDMA_CCR(id), chan->desc->ccr); | |
1146 | stm32_mdma_write(dmadev, STM32_MDMA_CTCR(id), hwdesc->ctcr); | |
1147 | stm32_mdma_write(dmadev, STM32_MDMA_CBNDTR(id), hwdesc->cbndtr); | |
1148 | stm32_mdma_write(dmadev, STM32_MDMA_CSAR(id), hwdesc->csar); | |
1149 | stm32_mdma_write(dmadev, STM32_MDMA_CDAR(id), hwdesc->cdar); | |
1150 | stm32_mdma_write(dmadev, STM32_MDMA_CBRUR(id), hwdesc->cbrur); | |
1151 | stm32_mdma_write(dmadev, STM32_MDMA_CLAR(id), hwdesc->clar); | |
1152 | stm32_mdma_write(dmadev, STM32_MDMA_CTBR(id), hwdesc->ctbr); | |
1153 | stm32_mdma_write(dmadev, STM32_MDMA_CMAR(id), hwdesc->cmar); | |
1154 | stm32_mdma_write(dmadev, STM32_MDMA_CMDR(id), hwdesc->cmdr); | |
1155 | ||
1156 | /* Clear interrupt status if it is there */ | |
1157 | status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id)); | |
1158 | if (status) | |
1159 | stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(id), status); | |
1160 | ||
1161 | stm32_mdma_dump_reg(chan); | |
1162 | ||
1163 | /* Start DMA */ | |
1164 | stm32_mdma_set_bits(dmadev, STM32_MDMA_CCR(id), STM32_MDMA_CCR_EN); | |
1165 | ||
1166 | /* Set SW request in case of MEM2MEM transfer */ | |
1167 | if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) { | |
1168 | reg = STM32_MDMA_CCR(id); | |
1169 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ); | |
1170 | } | |
1171 | ||
1172 | chan->busy = true; | |
1173 | ||
90ec93cb | 1174 | dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); |
a4ffb13c PYM |
1175 | } |
1176 | ||
1177 | static void stm32_mdma_issue_pending(struct dma_chan *c) | |
1178 | { | |
1179 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
1180 | unsigned long flags; | |
1181 | ||
1182 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1183 | ||
1184 | if (!vchan_issue_pending(&chan->vchan)) | |
1185 | goto end; | |
1186 | ||
90ec93cb | 1187 | dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan); |
a4ffb13c PYM |
1188 | |
1189 | if (!chan->desc && !chan->busy) | |
1190 | stm32_mdma_start_transfer(chan); | |
1191 | ||
1192 | end: | |
1193 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1194 | } | |
1195 | ||
1196 | static int stm32_mdma_pause(struct dma_chan *c) | |
1197 | { | |
1198 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
1199 | unsigned long flags; | |
1200 | int ret; | |
1201 | ||
1202 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1203 | ret = stm32_mdma_disable_chan(chan); | |
1204 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1205 | ||
1206 | if (!ret) | |
90ec93cb | 1207 | dev_dbg(chan2dev(chan), "vchan %pK: pause\n", &chan->vchan); |
a4ffb13c PYM |
1208 | |
1209 | return ret; | |
1210 | } | |
1211 | ||
1212 | static int stm32_mdma_resume(struct dma_chan *c) | |
1213 | { | |
1214 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
1215 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
1216 | struct stm32_mdma_hwdesc *hwdesc; | |
1217 | unsigned long flags; | |
1218 | u32 status, reg; | |
1219 | ||
bbb5a4e1 | 1220 | hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc; |
a4ffb13c PYM |
1221 | |
1222 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1223 | ||
1224 | /* Re-configure control register */ | |
1225 | stm32_mdma_write(dmadev, STM32_MDMA_CCR(chan->id), chan->desc->ccr); | |
1226 | ||
1227 | /* Clear interrupt status if it is there */ | |
1228 | status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); | |
1229 | if (status) | |
1230 | stm32_mdma_set_bits(dmadev, STM32_MDMA_CIFCR(chan->id), status); | |
1231 | ||
1232 | stm32_mdma_dump_reg(chan); | |
1233 | ||
1234 | /* Re-start DMA */ | |
1235 | reg = STM32_MDMA_CCR(chan->id); | |
1236 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_EN); | |
1237 | ||
1238 | /* Set SW request in case of MEM2MEM transfer */ | |
1239 | if (hwdesc->ctcr & STM32_MDMA_CTCR_SWRM) | |
1240 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CCR_SWRQ); | |
1241 | ||
1242 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1243 | ||
90ec93cb | 1244 | dev_dbg(chan2dev(chan), "vchan %pK: resume\n", &chan->vchan); |
a4ffb13c PYM |
1245 | |
1246 | return 0; | |
1247 | } | |
1248 | ||
1249 | static int stm32_mdma_terminate_all(struct dma_chan *c) | |
1250 | { | |
1251 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
1252 | unsigned long flags; | |
1253 | LIST_HEAD(head); | |
1254 | ||
1255 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1256 | if (chan->busy) { | |
1257 | stm32_mdma_stop(chan); | |
1258 | chan->desc = NULL; | |
1259 | } | |
1260 | vchan_get_all_descriptors(&chan->vchan, &head); | |
1261 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1262 | ||
1263 | vchan_dma_desc_free_list(&chan->vchan, &head); | |
1264 | ||
1265 | return 0; | |
1266 | } | |
1267 | ||
1268 | static void stm32_mdma_synchronize(struct dma_chan *c) | |
1269 | { | |
1270 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
1271 | ||
1272 | vchan_synchronize(&chan->vchan); | |
1273 | } | |
1274 | ||
1275 | static int stm32_mdma_slave_config(struct dma_chan *c, | |
1276 | struct dma_slave_config *config) | |
1277 | { | |
1278 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
1279 | ||
1280 | memcpy(&chan->dma_config, config, sizeof(*config)); | |
1281 | ||
1282 | return 0; | |
1283 | } | |
1284 | ||
1285 | static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan, | |
1286 | struct stm32_mdma_desc *desc, | |
1287 | u32 curr_hwdesc) | |
1288 | { | |
1289 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
bbb5a4e1 | 1290 | struct stm32_mdma_hwdesc *hwdesc = desc->node[0].hwdesc; |
a4ffb13c PYM |
1291 | u32 cbndtr, residue, modulo, burst_size; |
1292 | int i; | |
1293 | ||
1294 | residue = 0; | |
1295 | for (i = curr_hwdesc + 1; i < desc->count; i++) { | |
bbb5a4e1 | 1296 | hwdesc = desc->node[i].hwdesc; |
a4ffb13c PYM |
1297 | residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr); |
1298 | } | |
1299 | cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id)); | |
1300 | residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK; | |
1301 | ||
1302 | if (!chan->mem_burst) | |
1303 | return residue; | |
1304 | ||
1305 | burst_size = chan->mem_burst * chan->mem_width; | |
1306 | modulo = residue % burst_size; | |
1307 | if (modulo) | |
1308 | residue = residue - modulo + burst_size; | |
1309 | ||
1310 | return residue; | |
1311 | } | |
1312 | ||
1313 | static enum dma_status stm32_mdma_tx_status(struct dma_chan *c, | |
1314 | dma_cookie_t cookie, | |
1315 | struct dma_tx_state *state) | |
1316 | { | |
1317 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
1318 | struct virt_dma_desc *vdesc; | |
1319 | enum dma_status status; | |
1320 | unsigned long flags; | |
1321 | u32 residue = 0; | |
1322 | ||
1323 | status = dma_cookie_status(c, cookie, state); | |
1324 | if ((status == DMA_COMPLETE) || (!state)) | |
1325 | return status; | |
1326 | ||
1327 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1328 | ||
1329 | vdesc = vchan_find_desc(&chan->vchan, cookie); | |
1330 | if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) | |
1331 | residue = stm32_mdma_desc_residue(chan, chan->desc, | |
1332 | chan->curr_hwdesc); | |
1333 | else if (vdesc) | |
1334 | residue = stm32_mdma_desc_residue(chan, | |
1335 | to_stm32_mdma_desc(vdesc), 0); | |
1336 | dma_set_residue(state, residue); | |
1337 | ||
1338 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1339 | ||
1340 | return status; | |
1341 | } | |
1342 | ||
1343 | static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan) | |
1344 | { | |
1345 | list_del(&chan->desc->vdesc.node); | |
1346 | vchan_cookie_complete(&chan->desc->vdesc); | |
1347 | chan->desc = NULL; | |
1348 | chan->busy = false; | |
1349 | ||
1350 | /* Start the next transfer if this driver has a next desc */ | |
1351 | stm32_mdma_start_transfer(chan); | |
1352 | } | |
1353 | ||
1354 | static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid) | |
1355 | { | |
1356 | struct stm32_mdma_device *dmadev = devid; | |
1357 | struct stm32_mdma_chan *chan = devid; | |
1358 | u32 reg, id, ien, status, flag; | |
1359 | ||
1360 | /* Find out which channel generates the interrupt */ | |
1361 | status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0); | |
1362 | if (status) { | |
1363 | id = __ffs(status); | |
1364 | } else { | |
1365 | status = readl_relaxed(dmadev->base + STM32_MDMA_GISR1); | |
1366 | if (!status) { | |
1367 | dev_dbg(mdma2dev(dmadev), "spurious it\n"); | |
1368 | return IRQ_NONE; | |
1369 | } | |
1370 | id = __ffs(status); | |
1371 | /* | |
1372 | * As GISR0 provides status for channel id from 0 to 31, | |
1373 | * so GISR1 provides status for channel id from 32 to 62 | |
1374 | */ | |
1375 | id += 32; | |
1376 | } | |
1377 | ||
1378 | chan = &dmadev->chan[id]; | |
1379 | if (!chan) { | |
1380 | dev_err(chan2dev(chan), "MDMA channel not initialized\n"); | |
1381 | goto exit; | |
1382 | } | |
1383 | ||
1384 | /* Handle interrupt for the channel */ | |
1385 | spin_lock(&chan->vchan.lock); | |
1386 | status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id)); | |
1387 | ien = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)); | |
1388 | ien &= STM32_MDMA_CCR_IRQ_MASK; | |
1389 | ien >>= 1; | |
1390 | ||
1391 | if (!(status & ien)) { | |
1392 | spin_unlock(&chan->vchan.lock); | |
1393 | dev_dbg(chan2dev(chan), | |
1394 | "spurious it (status=0x%04x, ien=0x%04x)\n", | |
1395 | status, ien); | |
1396 | return IRQ_NONE; | |
1397 | } | |
1398 | ||
1399 | flag = __ffs(status & ien); | |
1400 | reg = STM32_MDMA_CIFCR(chan->id); | |
1401 | ||
1402 | switch (1 << flag) { | |
1403 | case STM32_MDMA_CISR_TEIF: | |
1404 | id = chan->id; | |
1405 | status = readl_relaxed(dmadev->base + STM32_MDMA_CESR(id)); | |
1406 | dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", status); | |
1407 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF); | |
1408 | break; | |
1409 | ||
1410 | case STM32_MDMA_CISR_CTCIF: | |
1411 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF); | |
1412 | stm32_mdma_xfer_end(chan); | |
1413 | break; | |
1414 | ||
1415 | case STM32_MDMA_CISR_BRTIF: | |
1416 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF); | |
1417 | break; | |
1418 | ||
1419 | case STM32_MDMA_CISR_BTIF: | |
1420 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF); | |
1421 | chan->curr_hwdesc++; | |
1422 | if (chan->desc && chan->desc->cyclic) { | |
1423 | if (chan->curr_hwdesc == chan->desc->count) | |
1424 | chan->curr_hwdesc = 0; | |
1425 | vchan_cyclic_callback(&chan->desc->vdesc); | |
1426 | } | |
1427 | break; | |
1428 | ||
1429 | case STM32_MDMA_CISR_TCIF: | |
1430 | stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF); | |
1431 | break; | |
1432 | ||
1433 | default: | |
1434 | dev_err(chan2dev(chan), "it %d unhandled (status=0x%04x)\n", | |
1435 | 1 << flag, status); | |
1436 | } | |
1437 | ||
1438 | spin_unlock(&chan->vchan.lock); | |
1439 | ||
1440 | exit: | |
1441 | return IRQ_HANDLED; | |
1442 | } | |
1443 | ||
1444 | static int stm32_mdma_alloc_chan_resources(struct dma_chan *c) | |
1445 | { | |
1446 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
1447 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
1448 | int ret; | |
1449 | ||
1450 | chan->desc_pool = dmam_pool_create(dev_name(&c->dev->device), | |
1451 | c->device->dev, | |
1452 | sizeof(struct stm32_mdma_hwdesc), | |
1453 | __alignof__(struct stm32_mdma_hwdesc), | |
1454 | 0); | |
1455 | if (!chan->desc_pool) { | |
1456 | dev_err(chan2dev(chan), "failed to allocate descriptor pool\n"); | |
1457 | return -ENOMEM; | |
1458 | } | |
1459 | ||
89e987e3 PYM |
1460 | ret = pm_runtime_get_sync(dmadev->ddev.dev); |
1461 | if (ret < 0) | |
a4ffb13c | 1462 | return ret; |
a4ffb13c PYM |
1463 | |
1464 | ret = stm32_mdma_disable_chan(chan); | |
1465 | if (ret < 0) | |
89e987e3 | 1466 | pm_runtime_put(dmadev->ddev.dev); |
a4ffb13c PYM |
1467 | |
1468 | return ret; | |
1469 | } | |
1470 | ||
1471 | static void stm32_mdma_free_chan_resources(struct dma_chan *c) | |
1472 | { | |
1473 | struct stm32_mdma_chan *chan = to_stm32_mdma_chan(c); | |
1474 | struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan); | |
1475 | unsigned long flags; | |
1476 | ||
1477 | dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id); | |
1478 | ||
1479 | if (chan->busy) { | |
1480 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1481 | stm32_mdma_stop(chan); | |
1482 | chan->desc = NULL; | |
1483 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1484 | } | |
1485 | ||
89e987e3 | 1486 | pm_runtime_put(dmadev->ddev.dev); |
a4ffb13c PYM |
1487 | vchan_free_chan_resources(to_virt_chan(c)); |
1488 | dmam_pool_destroy(chan->desc_pool); | |
1489 | chan->desc_pool = NULL; | |
1490 | } | |
1491 | ||
1492 | static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec, | |
1493 | struct of_dma *ofdma) | |
1494 | { | |
1495 | struct stm32_mdma_device *dmadev = ofdma->of_dma_data; | |
1496 | struct stm32_mdma_chan *chan; | |
1497 | struct dma_chan *c; | |
1498 | struct stm32_mdma_chan_config config; | |
1499 | ||
1500 | if (dma_spec->args_count < 5) { | |
1501 | dev_err(mdma2dev(dmadev), "Bad number of args\n"); | |
1502 | return NULL; | |
1503 | } | |
1504 | ||
1505 | config.request = dma_spec->args[0]; | |
1506 | config.priority_level = dma_spec->args[1]; | |
1507 | config.transfer_config = dma_spec->args[2]; | |
1508 | config.mask_addr = dma_spec->args[3]; | |
1509 | config.mask_data = dma_spec->args[4]; | |
1510 | ||
1511 | if (config.request >= dmadev->nr_requests) { | |
1512 | dev_err(mdma2dev(dmadev), "Bad request line\n"); | |
1513 | return NULL; | |
1514 | } | |
1515 | ||
1516 | if (config.priority_level > STM32_MDMA_VERY_HIGH_PRIORITY) { | |
1517 | dev_err(mdma2dev(dmadev), "Priority level not supported\n"); | |
1518 | return NULL; | |
1519 | } | |
1520 | ||
1521 | c = dma_get_any_slave_channel(&dmadev->ddev); | |
1522 | if (!c) { | |
d317d32b | 1523 | dev_err(mdma2dev(dmadev), "No more channels available\n"); |
a4ffb13c PYM |
1524 | return NULL; |
1525 | } | |
1526 | ||
1527 | chan = to_stm32_mdma_chan(c); | |
1528 | chan->chan_config = config; | |
1529 | ||
1530 | return c; | |
1531 | } | |
1532 | ||
1533 | static const struct of_device_id stm32_mdma_of_match[] = { | |
1534 | { .compatible = "st,stm32h7-mdma", }, | |
1535 | { /* sentinel */ }, | |
1536 | }; | |
1537 | MODULE_DEVICE_TABLE(of, stm32_mdma_of_match); | |
1538 | ||
1539 | static int stm32_mdma_probe(struct platform_device *pdev) | |
1540 | { | |
1541 | struct stm32_mdma_chan *chan; | |
1542 | struct stm32_mdma_device *dmadev; | |
1543 | struct dma_device *dd; | |
1544 | struct device_node *of_node; | |
1545 | struct resource *res; | |
1546 | u32 nr_channels, nr_requests; | |
1547 | int i, count, ret; | |
1548 | ||
1549 | of_node = pdev->dev.of_node; | |
1550 | if (!of_node) | |
1551 | return -ENODEV; | |
1552 | ||
1553 | ret = device_property_read_u32(&pdev->dev, "dma-channels", | |
1554 | &nr_channels); | |
1555 | if (ret) { | |
1556 | nr_channels = STM32_MDMA_MAX_CHANNELS; | |
1557 | dev_warn(&pdev->dev, "MDMA defaulting on %i channels\n", | |
1558 | nr_channels); | |
1559 | } | |
1560 | ||
1561 | ret = device_property_read_u32(&pdev->dev, "dma-requests", | |
1562 | &nr_requests); | |
1563 | if (ret) { | |
1564 | nr_requests = STM32_MDMA_MAX_REQUESTS; | |
1565 | dev_warn(&pdev->dev, "MDMA defaulting on %i request lines\n", | |
1566 | nr_requests); | |
1567 | } | |
1568 | ||
1569 | count = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", | |
1570 | NULL, 0); | |
1571 | if (count < 0) | |
1572 | count = 0; | |
1573 | ||
1574 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev) + sizeof(u32) * count, | |
1575 | GFP_KERNEL); | |
1576 | if (!dmadev) | |
1577 | return -ENOMEM; | |
1578 | ||
1579 | dmadev->nr_channels = nr_channels; | |
1580 | dmadev->nr_requests = nr_requests; | |
906b40b2 | 1581 | ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", |
a4ffb13c PYM |
1582 | dmadev->ahb_addr_masks, |
1583 | count); | |
906b40b2 AP |
1584 | if (ret) |
1585 | return ret; | |
a4ffb13c PYM |
1586 | dmadev->nr_ahb_addr_masks = count; |
1587 | ||
1588 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1589 | dmadev->base = devm_ioremap_resource(&pdev->dev, res); | |
1590 | if (IS_ERR(dmadev->base)) | |
1591 | return PTR_ERR(dmadev->base); | |
1592 | ||
1593 | dmadev->clk = devm_clk_get(&pdev->dev, NULL); | |
1594 | if (IS_ERR(dmadev->clk)) { | |
1595 | ret = PTR_ERR(dmadev->clk); | |
1596 | if (ret == -EPROBE_DEFER) | |
1597 | dev_info(&pdev->dev, "Missing controller clock\n"); | |
1598 | return ret; | |
1599 | } | |
1600 | ||
89e987e3 PYM |
1601 | ret = clk_prepare_enable(dmadev->clk); |
1602 | if (ret < 0) { | |
1603 | dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); | |
1604 | return ret; | |
1605 | } | |
1606 | ||
a4ffb13c PYM |
1607 | dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); |
1608 | if (!IS_ERR(dmadev->rst)) { | |
1609 | reset_control_assert(dmadev->rst); | |
1610 | udelay(2); | |
1611 | reset_control_deassert(dmadev->rst); | |
1612 | } | |
1613 | ||
1614 | dd = &dmadev->ddev; | |
1615 | dma_cap_set(DMA_SLAVE, dd->cap_mask); | |
1616 | dma_cap_set(DMA_PRIVATE, dd->cap_mask); | |
1617 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | |
1618 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | |
1619 | dd->device_alloc_chan_resources = stm32_mdma_alloc_chan_resources; | |
1620 | dd->device_free_chan_resources = stm32_mdma_free_chan_resources; | |
1621 | dd->device_tx_status = stm32_mdma_tx_status; | |
1622 | dd->device_issue_pending = stm32_mdma_issue_pending; | |
1623 | dd->device_prep_slave_sg = stm32_mdma_prep_slave_sg; | |
1624 | dd->device_prep_dma_cyclic = stm32_mdma_prep_dma_cyclic; | |
1625 | dd->device_prep_dma_memcpy = stm32_mdma_prep_dma_memcpy; | |
1626 | dd->device_config = stm32_mdma_slave_config; | |
1627 | dd->device_pause = stm32_mdma_pause; | |
1628 | dd->device_resume = stm32_mdma_resume; | |
1629 | dd->device_terminate_all = stm32_mdma_terminate_all; | |
1630 | dd->device_synchronize = stm32_mdma_synchronize; | |
1631 | dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | |
1632 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | |
1633 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | |
1634 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); | |
1635 | dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | |
1636 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | |
1637 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | | |
1638 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); | |
1639 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | | |
1640 | BIT(DMA_MEM_TO_MEM); | |
1641 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1642 | dd->max_burst = STM32_MDMA_MAX_BURST; | |
1643 | dd->dev = &pdev->dev; | |
1644 | INIT_LIST_HEAD(&dd->channels); | |
1645 | ||
1646 | for (i = 0; i < dmadev->nr_channels; i++) { | |
1647 | chan = &dmadev->chan[i]; | |
1648 | chan->id = i; | |
1649 | chan->vchan.desc_free = stm32_mdma_desc_free; | |
1650 | vchan_init(&chan->vchan, dd); | |
1651 | } | |
1652 | ||
1653 | dmadev->irq = platform_get_irq(pdev, 0); | |
1654 | if (dmadev->irq < 0) { | |
1655 | dev_err(&pdev->dev, "failed to get IRQ\n"); | |
1656 | return dmadev->irq; | |
1657 | } | |
1658 | ||
1659 | ret = devm_request_irq(&pdev->dev, dmadev->irq, stm32_mdma_irq_handler, | |
1660 | 0, dev_name(&pdev->dev), dmadev); | |
1661 | if (ret) { | |
1662 | dev_err(&pdev->dev, "failed to request IRQ\n"); | |
1663 | return ret; | |
1664 | } | |
1665 | ||
42f604b6 | 1666 | ret = dmaenginem_async_device_register(dd); |
a4ffb13c PYM |
1667 | if (ret) |
1668 | return ret; | |
1669 | ||
1670 | ret = of_dma_controller_register(of_node, stm32_mdma_of_xlate, dmadev); | |
1671 | if (ret < 0) { | |
1672 | dev_err(&pdev->dev, | |
1673 | "STM32 MDMA DMA OF registration failed %d\n", ret); | |
1674 | goto err_unregister; | |
1675 | } | |
1676 | ||
1677 | platform_set_drvdata(pdev, dmadev); | |
89e987e3 PYM |
1678 | pm_runtime_set_active(&pdev->dev); |
1679 | pm_runtime_enable(&pdev->dev); | |
1680 | pm_runtime_get_noresume(&pdev->dev); | |
1681 | pm_runtime_put(&pdev->dev); | |
a4ffb13c PYM |
1682 | |
1683 | dev_info(&pdev->dev, "STM32 MDMA driver registered\n"); | |
1684 | ||
1685 | return 0; | |
1686 | ||
1687 | err_unregister: | |
a4ffb13c PYM |
1688 | return ret; |
1689 | } | |
1690 | ||
89e987e3 PYM |
1691 | #ifdef CONFIG_PM |
1692 | static int stm32_mdma_runtime_suspend(struct device *dev) | |
1693 | { | |
1694 | struct stm32_mdma_device *dmadev = dev_get_drvdata(dev); | |
1695 | ||
1696 | clk_disable_unprepare(dmadev->clk); | |
1697 | ||
1698 | return 0; | |
1699 | } | |
1700 | ||
1701 | static int stm32_mdma_runtime_resume(struct device *dev) | |
1702 | { | |
1703 | struct stm32_mdma_device *dmadev = dev_get_drvdata(dev); | |
1704 | int ret; | |
1705 | ||
1706 | ret = clk_prepare_enable(dmadev->clk); | |
1707 | if (ret) { | |
1708 | dev_err(dev, "failed to prepare_enable clock\n"); | |
1709 | return ret; | |
1710 | } | |
1711 | ||
1712 | return 0; | |
1713 | } | |
1714 | #endif | |
1715 | ||
1716 | static const struct dev_pm_ops stm32_mdma_pm_ops = { | |
1717 | SET_RUNTIME_PM_OPS(stm32_mdma_runtime_suspend, | |
1718 | stm32_mdma_runtime_resume, NULL) | |
1719 | }; | |
1720 | ||
a4ffb13c PYM |
1721 | static struct platform_driver stm32_mdma_driver = { |
1722 | .probe = stm32_mdma_probe, | |
1723 | .driver = { | |
1724 | .name = "stm32-mdma", | |
1725 | .of_match_table = stm32_mdma_of_match, | |
89e987e3 | 1726 | .pm = &stm32_mdma_pm_ops, |
a4ffb13c PYM |
1727 | }, |
1728 | }; | |
1729 | ||
1730 | static int __init stm32_mdma_init(void) | |
1731 | { | |
1732 | return platform_driver_register(&stm32_mdma_driver); | |
1733 | } | |
1734 | ||
1735 | subsys_initcall(stm32_mdma_init); | |
1736 | ||
1737 | MODULE_DESCRIPTION("Driver for STM32 MDMA controller"); | |
1738 | MODULE_AUTHOR("M'boumba Cedric Madianga <cedric.madianga@gmail.com>"); | |
1739 | MODULE_AUTHOR("Pierre-Yves Mordret <pierre-yves.mordret@st.com>"); | |
1740 | MODULE_LICENSE("GPL v2"); |