Commit | Line | Data |
---|---|---|
af873fce | 1 | // SPDX-License-Identifier: GPL-2.0-only |
d8b46839 CM |
2 | /* |
3 | * Driver for STM32 DMA controller | |
4 | * | |
5 | * Inspired by dma-jz4740.c and tegra20-apb-dma.c | |
6 | * | |
7 | * Copyright (C) M'boumba Cedric Madianga 2015 | |
8 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> | |
a2b6103b | 9 | * Pierre-Yves Mordret <pierre-yves.mordret@st.com> |
d8b46839 CM |
10 | */ |
11 | ||
12 | #include <linux/clk.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/err.h> | |
17 | #include <linux/init.h> | |
409ffc4d | 18 | #include <linux/iopoll.h> |
d8b46839 CM |
19 | #include <linux/jiffies.h> |
20 | #include <linux/list.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/of.h> | |
23 | #include <linux/of_device.h> | |
24 | #include <linux/of_dma.h> | |
25 | #include <linux/platform_device.h> | |
48bc73ba | 26 | #include <linux/pm_runtime.h> |
d8b46839 CM |
27 | #include <linux/reset.h> |
28 | #include <linux/sched.h> | |
29 | #include <linux/slab.h> | |
30 | ||
31 | #include "virt-dma.h" | |
32 | ||
33 | #define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */ | |
34 | #define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */ | |
35 | #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */ | |
36 | #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */ | |
37 | #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */ | |
c2d86b1c | 38 | #define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */ |
d8b46839 CM |
39 | #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */ |
40 | #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */ | |
41 | #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */ | |
9df3bd55 PYM |
42 | #define STM32_DMA_MASKI (STM32_DMA_TCI \ |
43 | | STM32_DMA_TEI \ | |
44 | | STM32_DMA_DMEI \ | |
45 | | STM32_DMA_FEI) | |
d8b46839 CM |
46 | |
47 | /* DMA Stream x Configuration Register */ | |
48 | #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */ | |
49 | #define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25) | |
50 | #define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23) | |
51 | #define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23) | |
52 | #define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21) | |
53 | #define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21) | |
54 | #define STM32_DMA_SCR_PL_MASK GENMASK(17, 16) | |
55 | #define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16) | |
56 | #define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13) | |
57 | #define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13) | |
58 | #define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11) | |
59 | #define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11) | |
60 | #define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11) | |
61 | #define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6) | |
62 | #define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6) | |
63 | #define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */ | |
64 | #define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */ | |
65 | #define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */ | |
66 | #define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */ | |
67 | #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */ | |
68 | #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */ | |
69 | #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */ | |
249d5531 PYM |
70 | #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable |
71 | */ | |
d8b46839 CM |
72 | #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */ |
73 | #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */ | |
74 | #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */ | |
75 | #define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \ | |
76 | | STM32_DMA_SCR_MINC \ | |
77 | | STM32_DMA_SCR_PINCOS \ | |
78 | | STM32_DMA_SCR_PL_MASK) | |
79 | #define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \ | |
80 | | STM32_DMA_SCR_TEIE \ | |
81 | | STM32_DMA_SCR_DMEIE) | |
82 | ||
83 | /* DMA Stream x number of data register */ | |
84 | #define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x)) | |
85 | ||
86 | /* DMA stream peripheral address register */ | |
87 | #define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x)) | |
88 | ||
89 | /* DMA stream x memory 0 address register */ | |
90 | #define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x)) | |
91 | ||
92 | /* DMA stream x memory 1 address register */ | |
93 | #define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x)) | |
94 | ||
95 | /* DMA stream x FIFO control register */ | |
96 | #define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x)) | |
97 | #define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0) | |
98 | #define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK) | |
99 | #define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */ | |
100 | #define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */ | |
101 | #define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \ | |
102 | | STM32_DMA_SFCR_DMDIS) | |
103 | ||
104 | /* DMA direction */ | |
105 | #define STM32_DMA_DEV_TO_MEM 0x00 | |
106 | #define STM32_DMA_MEM_TO_DEV 0x01 | |
107 | #define STM32_DMA_MEM_TO_MEM 0x02 | |
108 | ||
109 | /* DMA priority level */ | |
110 | #define STM32_DMA_PRIORITY_LOW 0x00 | |
111 | #define STM32_DMA_PRIORITY_MEDIUM 0x01 | |
112 | #define STM32_DMA_PRIORITY_HIGH 0x02 | |
113 | #define STM32_DMA_PRIORITY_VERY_HIGH 0x03 | |
114 | ||
115 | /* DMA FIFO threshold selection */ | |
116 | #define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00 | |
117 | #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01 | |
118 | #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02 | |
119 | #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 | |
955b1766 | 120 | #define STM32_DMA_FIFO_THRESHOLD_NONE 0x04 |
d8b46839 CM |
121 | |
122 | #define STM32_DMA_MAX_DATA_ITEMS 0xffff | |
80a76952 PYM |
123 | /* |
124 | * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter | |
125 | * gather at boundary. Thus it's safer to round down this value on FIFO | |
126 | * size (16 Bytes) | |
127 | */ | |
128 | #define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \ | |
129 | ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16) | |
d8b46839 CM |
130 | #define STM32_DMA_MAX_CHANNELS 0x08 |
131 | #define STM32_DMA_MAX_REQUEST_ID 0x08 | |
132 | #define STM32_DMA_MAX_DATA_PARAM 0x03 | |
a2b6103b PYM |
133 | #define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */ |
134 | #define STM32_DMA_MIN_BURST 4 | |
276b0046 | 135 | #define STM32_DMA_MAX_BURST 16 |
d8b46839 | 136 | |
951f44cb PYM |
137 | /* DMA Features */ |
138 | #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0) | |
139 | #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK) | |
955b1766 AD |
140 | #define STM32_DMA_DIRECT_MODE_MASK BIT(2) |
141 | #define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \ | |
142 | >> 2) | |
951f44cb | 143 | |
d8b46839 CM |
144 | enum stm32_dma_width { |
145 | STM32_DMA_BYTE, | |
146 | STM32_DMA_HALF_WORD, | |
147 | STM32_DMA_WORD, | |
148 | }; | |
149 | ||
150 | enum stm32_dma_burst_size { | |
151 | STM32_DMA_BURST_SINGLE, | |
152 | STM32_DMA_BURST_INCR4, | |
153 | STM32_DMA_BURST_INCR8, | |
154 | STM32_DMA_BURST_INCR16, | |
155 | }; | |
156 | ||
951f44cb PYM |
157 | /** |
158 | * struct stm32_dma_cfg - STM32 DMA custom configuration | |
159 | * @channel_id: channel ID | |
160 | * @request_line: DMA request | |
161 | * @stream_config: 32bit mask specifying the DMA channel configuration | |
162 | * @features: 32bit mask specifying the DMA Feature list | |
163 | */ | |
d8b46839 CM |
164 | struct stm32_dma_cfg { |
165 | u32 channel_id; | |
166 | u32 request_line; | |
167 | u32 stream_config; | |
951f44cb | 168 | u32 features; |
d8b46839 CM |
169 | }; |
170 | ||
171 | struct stm32_dma_chan_reg { | |
172 | u32 dma_lisr; | |
173 | u32 dma_hisr; | |
174 | u32 dma_lifcr; | |
175 | u32 dma_hifcr; | |
176 | u32 dma_scr; | |
177 | u32 dma_sndtr; | |
178 | u32 dma_spar; | |
179 | u32 dma_sm0ar; | |
180 | u32 dma_sm1ar; | |
181 | u32 dma_sfcr; | |
182 | }; | |
183 | ||
184 | struct stm32_dma_sg_req { | |
185 | u32 len; | |
186 | struct stm32_dma_chan_reg chan_reg; | |
187 | }; | |
188 | ||
189 | struct stm32_dma_desc { | |
190 | struct virt_dma_desc vdesc; | |
191 | bool cyclic; | |
192 | u32 num_sgs; | |
193 | struct stm32_dma_sg_req sg_req[]; | |
194 | }; | |
195 | ||
196 | struct stm32_dma_chan { | |
197 | struct virt_dma_chan vchan; | |
198 | bool config_init; | |
199 | bool busy; | |
200 | u32 id; | |
201 | u32 irq; | |
202 | struct stm32_dma_desc *desc; | |
203 | u32 next_sg; | |
204 | struct dma_slave_config dma_sconfig; | |
205 | struct stm32_dma_chan_reg chan_reg; | |
951f44cb | 206 | u32 threshold; |
a2b6103b PYM |
207 | u32 mem_burst; |
208 | u32 mem_width; | |
d8b46839 CM |
209 | }; |
210 | ||
211 | struct stm32_dma_device { | |
212 | struct dma_device ddev; | |
213 | void __iomem *base; | |
214 | struct clk *clk; | |
d8b46839 CM |
215 | bool mem2mem; |
216 | struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS]; | |
217 | }; | |
218 | ||
219 | static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan) | |
220 | { | |
221 | return container_of(chan->vchan.chan.device, struct stm32_dma_device, | |
222 | ddev); | |
223 | } | |
224 | ||
225 | static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c) | |
226 | { | |
227 | return container_of(c, struct stm32_dma_chan, vchan.chan); | |
228 | } | |
229 | ||
230 | static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc) | |
231 | { | |
232 | return container_of(vdesc, struct stm32_dma_desc, vdesc); | |
233 | } | |
234 | ||
235 | static struct device *chan2dev(struct stm32_dma_chan *chan) | |
236 | { | |
237 | return &chan->vchan.chan.dev->device; | |
238 | } | |
239 | ||
240 | static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg) | |
241 | { | |
242 | return readl_relaxed(dmadev->base + reg); | |
243 | } | |
244 | ||
245 | static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val) | |
246 | { | |
247 | writel_relaxed(val, dmadev->base + reg); | |
248 | } | |
249 | ||
d8b46839 CM |
250 | static int stm32_dma_get_width(struct stm32_dma_chan *chan, |
251 | enum dma_slave_buswidth width) | |
252 | { | |
253 | switch (width) { | |
254 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
255 | return STM32_DMA_BYTE; | |
256 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
257 | return STM32_DMA_HALF_WORD; | |
258 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
259 | return STM32_DMA_WORD; | |
260 | default: | |
261 | dev_err(chan2dev(chan), "Dma bus width not supported\n"); | |
262 | return -EINVAL; | |
263 | } | |
264 | } | |
265 | ||
a2b6103b | 266 | static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len, |
e0ebdbdc | 267 | dma_addr_t buf_addr, |
a2b6103b PYM |
268 | u32 threshold) |
269 | { | |
270 | enum dma_slave_buswidth max_width; | |
e0ebdbdc | 271 | u64 addr = buf_addr; |
a2b6103b PYM |
272 | |
273 | if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL) | |
274 | max_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
275 | else | |
276 | max_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | |
277 | ||
278 | while ((buf_len < max_width || buf_len % max_width) && | |
279 | max_width > DMA_SLAVE_BUSWIDTH_1_BYTE) | |
280 | max_width = max_width >> 1; | |
281 | ||
e0ebdbdc AD |
282 | if (do_div(addr, max_width)) |
283 | max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | |
284 | ||
a2b6103b PYM |
285 | return max_width; |
286 | } | |
287 | ||
288 | static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, | |
289 | enum dma_slave_buswidth width) | |
290 | { | |
291 | u32 remaining; | |
292 | ||
955b1766 AD |
293 | if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE) |
294 | return false; | |
295 | ||
a2b6103b PYM |
296 | if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) { |
297 | if (burst != 0) { | |
298 | /* | |
299 | * If number of beats fit in several whole bursts | |
300 | * this configuration is allowed. | |
301 | */ | |
302 | remaining = ((STM32_DMA_FIFO_SIZE / width) * | |
303 | (threshold + 1) / 4) % burst; | |
304 | ||
305 | if (remaining == 0) | |
306 | return true; | |
307 | } else { | |
308 | return true; | |
309 | } | |
310 | } | |
311 | ||
312 | return false; | |
313 | } | |
314 | ||
315 | static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold) | |
316 | { | |
955b1766 AD |
317 | /* If FIFO direct mode, burst is not possible */ |
318 | if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE) | |
319 | return false; | |
320 | ||
cc832dc8 PYM |
321 | /* |
322 | * Buffer or period length has to be aligned on FIFO depth. | |
323 | * Otherwise bytes may be stuck within FIFO at buffer or period | |
324 | * length. | |
325 | */ | |
326 | return ((buf_len % ((threshold + 1) * 4)) == 0); | |
a2b6103b PYM |
327 | } |
328 | ||
329 | static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold, | |
330 | enum dma_slave_buswidth width) | |
331 | { | |
332 | u32 best_burst = max_burst; | |
333 | ||
334 | if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold)) | |
335 | return 0; | |
336 | ||
337 | while ((buf_len < best_burst * width && best_burst > 1) || | |
338 | !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold, | |
339 | width)) { | |
340 | if (best_burst > STM32_DMA_MIN_BURST) | |
341 | best_burst = best_burst >> 1; | |
342 | else | |
343 | best_burst = 0; | |
344 | } | |
345 | ||
346 | return best_burst; | |
347 | } | |
348 | ||
d8b46839 CM |
349 | static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst) |
350 | { | |
351 | switch (maxburst) { | |
352 | case 0: | |
353 | case 1: | |
354 | return STM32_DMA_BURST_SINGLE; | |
355 | case 4: | |
356 | return STM32_DMA_BURST_INCR4; | |
357 | case 8: | |
358 | return STM32_DMA_BURST_INCR8; | |
359 | case 16: | |
360 | return STM32_DMA_BURST_INCR16; | |
361 | default: | |
362 | dev_err(chan2dev(chan), "Dma burst size not supported\n"); | |
363 | return -EINVAL; | |
364 | } | |
365 | } | |
366 | ||
367 | static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan, | |
a2b6103b | 368 | u32 src_burst, u32 dst_burst) |
d8b46839 CM |
369 | { |
370 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK; | |
371 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE; | |
372 | ||
a2b6103b | 373 | if (!src_burst && !dst_burst) { |
d8b46839 CM |
374 | /* Using direct mode */ |
375 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE; | |
376 | } else { | |
377 | /* Using FIFO mode */ | |
378 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; | |
379 | } | |
380 | } | |
381 | ||
382 | static int stm32_dma_slave_config(struct dma_chan *c, | |
383 | struct dma_slave_config *config) | |
384 | { | |
385 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
386 | ||
387 | memcpy(&chan->dma_sconfig, config, sizeof(*config)); | |
388 | ||
389 | chan->config_init = true; | |
390 | ||
391 | return 0; | |
392 | } | |
393 | ||
394 | static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan) | |
395 | { | |
396 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
397 | u32 flags, dma_isr; | |
398 | ||
399 | /* | |
400 | * Read "flags" from DMA_xISR register corresponding to the selected | |
401 | * DMA channel at the correct bit offset inside that register. | |
402 | * | |
403 | * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. | |
404 | * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. | |
405 | */ | |
406 | ||
407 | if (chan->id & 4) | |
408 | dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR); | |
409 | else | |
410 | dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR); | |
411 | ||
412 | flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); | |
413 | ||
9df3bd55 | 414 | return flags & STM32_DMA_MASKI; |
d8b46839 CM |
415 | } |
416 | ||
417 | static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags) | |
418 | { | |
419 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
420 | u32 dma_ifcr; | |
421 | ||
422 | /* | |
423 | * Write "flags" to the DMA_xIFCR register corresponding to the selected | |
424 | * DMA channel at the correct bit offset inside that register. | |
425 | * | |
426 | * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. | |
427 | * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. | |
428 | */ | |
9df3bd55 | 429 | flags &= STM32_DMA_MASKI; |
d8b46839 CM |
430 | dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); |
431 | ||
432 | if (chan->id & 4) | |
433 | stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr); | |
434 | else | |
435 | stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr); | |
436 | } | |
437 | ||
438 | static int stm32_dma_disable_chan(struct stm32_dma_chan *chan) | |
439 | { | |
440 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
409ffc4d | 441 | u32 dma_scr, id, reg; |
d8b46839 CM |
442 | |
443 | id = chan->id; | |
409ffc4d AD |
444 | reg = STM32_DMA_SCR(id); |
445 | dma_scr = stm32_dma_read(dmadev, reg); | |
d8b46839 CM |
446 | |
447 | if (dma_scr & STM32_DMA_SCR_EN) { | |
448 | dma_scr &= ~STM32_DMA_SCR_EN; | |
409ffc4d AD |
449 | stm32_dma_write(dmadev, reg, dma_scr); |
450 | ||
451 | return readl_relaxed_poll_timeout_atomic(dmadev->base + reg, | |
452 | dma_scr, !(dma_scr & STM32_DMA_SCR_EN), | |
453 | 10, 1000000); | |
d8b46839 CM |
454 | } |
455 | ||
456 | return 0; | |
457 | } | |
458 | ||
459 | static void stm32_dma_stop(struct stm32_dma_chan *chan) | |
460 | { | |
461 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
462 | u32 dma_scr, dma_sfcr, status; | |
463 | int ret; | |
464 | ||
465 | /* Disable interrupts */ | |
466 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
467 | dma_scr &= ~STM32_DMA_SCR_IRQ_MASK; | |
468 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); | |
469 | dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | |
470 | dma_sfcr &= ~STM32_DMA_SFCR_FEIE; | |
471 | stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr); | |
472 | ||
473 | /* Disable DMA */ | |
474 | ret = stm32_dma_disable_chan(chan); | |
475 | if (ret < 0) | |
476 | return; | |
477 | ||
478 | /* Clear interrupt status if it is there */ | |
479 | status = stm32_dma_irq_status(chan); | |
480 | if (status) { | |
481 | dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n", | |
482 | __func__, status); | |
483 | stm32_dma_irq_clear(chan, status); | |
484 | } | |
485 | ||
486 | chan->busy = false; | |
487 | } | |
488 | ||
489 | static int stm32_dma_terminate_all(struct dma_chan *c) | |
490 | { | |
491 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
492 | unsigned long flags; | |
493 | LIST_HEAD(head); | |
494 | ||
495 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
496 | ||
d80cbef3 AD |
497 | if (chan->desc) { |
498 | vchan_terminate_vdesc(&chan->desc->vdesc); | |
499 | if (chan->busy) | |
500 | stm32_dma_stop(chan); | |
d8b46839 CM |
501 | chan->desc = NULL; |
502 | } | |
503 | ||
504 | vchan_get_all_descriptors(&chan->vchan, &head); | |
505 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
506 | vchan_dma_desc_free_list(&chan->vchan, &head); | |
507 | ||
508 | return 0; | |
509 | } | |
510 | ||
dc808675 CM |
511 | static void stm32_dma_synchronize(struct dma_chan *c) |
512 | { | |
513 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
514 | ||
515 | vchan_synchronize(&chan->vchan); | |
516 | } | |
517 | ||
d8b46839 CM |
518 | static void stm32_dma_dump_reg(struct stm32_dma_chan *chan) |
519 | { | |
520 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
521 | u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
522 | u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); | |
523 | u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id)); | |
524 | u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id)); | |
525 | u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id)); | |
526 | u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | |
527 | ||
528 | dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr); | |
529 | dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr); | |
530 | dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar); | |
531 | dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar); | |
532 | dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar); | |
533 | dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); | |
534 | } | |
535 | ||
e57cb3b3 PYM |
536 | static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan); |
537 | ||
8d1b76f0 | 538 | static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) |
d8b46839 CM |
539 | { |
540 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
541 | struct virt_dma_desc *vdesc; | |
542 | struct stm32_dma_sg_req *sg_req; | |
543 | struct stm32_dma_chan_reg *reg; | |
544 | u32 status; | |
545 | int ret; | |
546 | ||
547 | ret = stm32_dma_disable_chan(chan); | |
548 | if (ret < 0) | |
8d1b76f0 | 549 | return; |
d8b46839 CM |
550 | |
551 | if (!chan->desc) { | |
552 | vdesc = vchan_next_desc(&chan->vchan); | |
553 | if (!vdesc) | |
8d1b76f0 | 554 | return; |
d8b46839 | 555 | |
d80cbef3 AD |
556 | list_del(&vdesc->node); |
557 | ||
d8b46839 CM |
558 | chan->desc = to_stm32_dma_desc(vdesc); |
559 | chan->next_sg = 0; | |
560 | } | |
561 | ||
562 | if (chan->next_sg == chan->desc->num_sgs) | |
563 | chan->next_sg = 0; | |
564 | ||
565 | sg_req = &chan->desc->sg_req[chan->next_sg]; | |
566 | reg = &sg_req->chan_reg; | |
567 | ||
22a0bb29 | 568 | reg->dma_scr &= ~STM32_DMA_SCR_EN; |
d8b46839 CM |
569 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); |
570 | stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar); | |
571 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar); | |
572 | stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr); | |
573 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar); | |
574 | stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr); | |
575 | ||
576 | chan->next_sg++; | |
577 | ||
578 | /* Clear interrupt status if it is there */ | |
579 | status = stm32_dma_irq_status(chan); | |
580 | if (status) | |
581 | stm32_dma_irq_clear(chan, status); | |
582 | ||
e57cb3b3 PYM |
583 | if (chan->desc->cyclic) |
584 | stm32_dma_configure_next_sg(chan); | |
585 | ||
d8b46839 CM |
586 | stm32_dma_dump_reg(chan); |
587 | ||
588 | /* Start DMA */ | |
589 | reg->dma_scr |= STM32_DMA_SCR_EN; | |
590 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); | |
591 | ||
592 | chan->busy = true; | |
593 | ||
90ec93cb | 594 | dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); |
d8b46839 CM |
595 | } |
596 | ||
597 | static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) | |
598 | { | |
599 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
600 | struct stm32_dma_sg_req *sg_req; | |
601 | u32 dma_scr, dma_sm0ar, dma_sm1ar, id; | |
602 | ||
603 | id = chan->id; | |
604 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | |
605 | ||
606 | if (dma_scr & STM32_DMA_SCR_DBM) { | |
607 | if (chan->next_sg == chan->desc->num_sgs) | |
608 | chan->next_sg = 0; | |
609 | ||
610 | sg_req = &chan->desc->sg_req[chan->next_sg]; | |
611 | ||
612 | if (dma_scr & STM32_DMA_SCR_CT) { | |
613 | dma_sm0ar = sg_req->chan_reg.dma_sm0ar; | |
614 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); | |
615 | dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", | |
616 | stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); | |
617 | } else { | |
618 | dma_sm1ar = sg_req->chan_reg.dma_sm1ar; | |
619 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); | |
620 | dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", | |
621 | stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); | |
622 | } | |
d8b46839 CM |
623 | } |
624 | } | |
625 | ||
626 | static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) | |
627 | { | |
628 | if (chan->desc) { | |
629 | if (chan->desc->cyclic) { | |
630 | vchan_cyclic_callback(&chan->desc->vdesc); | |
2b12c558 | 631 | chan->next_sg++; |
d8b46839 CM |
632 | stm32_dma_configure_next_sg(chan); |
633 | } else { | |
634 | chan->busy = false; | |
635 | if (chan->next_sg == chan->desc->num_sgs) { | |
d8b46839 CM |
636 | vchan_cookie_complete(&chan->desc->vdesc); |
637 | chan->desc = NULL; | |
638 | } | |
639 | stm32_dma_start_transfer(chan); | |
640 | } | |
641 | } | |
642 | } | |
643 | ||
644 | static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) | |
645 | { | |
646 | struct stm32_dma_chan *chan = devid; | |
647 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
ca4c72c0 | 648 | u32 status, scr, sfcr; |
d8b46839 CM |
649 | |
650 | spin_lock(&chan->vchan.lock); | |
651 | ||
652 | status = stm32_dma_irq_status(chan); | |
653 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
ca4c72c0 | 654 | sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); |
d8b46839 | 655 | |
c2d86b1c PYM |
656 | if (status & STM32_DMA_FEI) { |
657 | stm32_dma_irq_clear(chan, STM32_DMA_FEI); | |
658 | status &= ~STM32_DMA_FEI; | |
ca4c72c0 | 659 | if (sfcr & STM32_DMA_SFCR_FEIE) { |
a44d9d72 AD |
660 | if (!(scr & STM32_DMA_SCR_EN) && |
661 | !(status & STM32_DMA_TCI)) | |
ca4c72c0 PYM |
662 | dev_err(chan2dev(chan), "FIFO Error\n"); |
663 | else | |
664 | dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); | |
665 | } | |
c2d86b1c | 666 | } |
955b1766 AD |
667 | if (status & STM32_DMA_DMEI) { |
668 | stm32_dma_irq_clear(chan, STM32_DMA_DMEI); | |
669 | status &= ~STM32_DMA_DMEI; | |
670 | if (sfcr & STM32_DMA_SCR_DMEIE) | |
671 | dev_dbg(chan2dev(chan), "Direct mode overrun\n"); | |
672 | } | |
a44d9d72 AD |
673 | |
674 | if (status & STM32_DMA_TCI) { | |
675 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); | |
676 | if (scr & STM32_DMA_SCR_TCIE) | |
677 | stm32_dma_handle_chan_done(chan); | |
678 | status &= ~STM32_DMA_TCI; | |
679 | } | |
680 | ||
681 | if (status & STM32_DMA_HTI) { | |
682 | stm32_dma_irq_clear(chan, STM32_DMA_HTI); | |
683 | status &= ~STM32_DMA_HTI; | |
684 | } | |
685 | ||
c2d86b1c | 686 | if (status) { |
d8b46839 CM |
687 | stm32_dma_irq_clear(chan, status); |
688 | dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status); | |
c2d86b1c PYM |
689 | if (!(scr & STM32_DMA_SCR_EN)) |
690 | dev_err(chan2dev(chan), "chan disabled by HW\n"); | |
d8b46839 CM |
691 | } |
692 | ||
693 | spin_unlock(&chan->vchan.lock); | |
694 | ||
695 | return IRQ_HANDLED; | |
696 | } | |
697 | ||
698 | static void stm32_dma_issue_pending(struct dma_chan *c) | |
699 | { | |
700 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
701 | unsigned long flags; | |
d8b46839 CM |
702 | |
703 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
8d1b76f0 | 704 | if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) { |
90ec93cb | 705 | dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan); |
8d1b76f0 | 706 | stm32_dma_start_transfer(chan); |
e57cb3b3 | 707 | |
d8b46839 CM |
708 | } |
709 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
710 | } | |
711 | ||
712 | static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, | |
713 | enum dma_transfer_direction direction, | |
a2b6103b | 714 | enum dma_slave_buswidth *buswidth, |
e0ebdbdc | 715 | u32 buf_len, dma_addr_t buf_addr) |
d8b46839 CM |
716 | { |
717 | enum dma_slave_buswidth src_addr_width, dst_addr_width; | |
718 | int src_bus_width, dst_bus_width; | |
719 | int src_burst_size, dst_burst_size; | |
a2b6103b | 720 | u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; |
955b1766 | 721 | u32 dma_scr, fifoth; |
d8b46839 CM |
722 | |
723 | src_addr_width = chan->dma_sconfig.src_addr_width; | |
724 | dst_addr_width = chan->dma_sconfig.dst_addr_width; | |
725 | src_maxburst = chan->dma_sconfig.src_maxburst; | |
726 | dst_maxburst = chan->dma_sconfig.dst_maxburst; | |
955b1766 | 727 | fifoth = chan->threshold; |
d8b46839 CM |
728 | |
729 | switch (direction) { | |
730 | case DMA_MEM_TO_DEV: | |
a2b6103b | 731 | /* Set device data size */ |
d8b46839 CM |
732 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); |
733 | if (dst_bus_width < 0) | |
734 | return dst_bus_width; | |
735 | ||
a2b6103b PYM |
736 | /* Set device burst size */ |
737 | dst_best_burst = stm32_dma_get_best_burst(buf_len, | |
738 | dst_maxburst, | |
955b1766 | 739 | fifoth, |
a2b6103b PYM |
740 | dst_addr_width); |
741 | ||
742 | dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); | |
d8b46839 CM |
743 | if (dst_burst_size < 0) |
744 | return dst_burst_size; | |
745 | ||
a2b6103b | 746 | /* Set memory data size */ |
e0ebdbdc AD |
747 | src_addr_width = stm32_dma_get_max_width(buf_len, buf_addr, |
748 | fifoth); | |
a2b6103b | 749 | chan->mem_width = src_addr_width; |
d8b46839 CM |
750 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); |
751 | if (src_bus_width < 0) | |
752 | return src_bus_width; | |
753 | ||
a2b6103b PYM |
754 | /* Set memory burst size */ |
755 | src_maxburst = STM32_DMA_MAX_BURST; | |
756 | src_best_burst = stm32_dma_get_best_burst(buf_len, | |
757 | src_maxburst, | |
955b1766 | 758 | fifoth, |
a2b6103b PYM |
759 | src_addr_width); |
760 | src_burst_size = stm32_dma_get_burst(chan, src_best_burst); | |
d8b46839 CM |
761 | if (src_burst_size < 0) |
762 | return src_burst_size; | |
763 | ||
764 | dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) | | |
765 | STM32_DMA_SCR_PSIZE(dst_bus_width) | | |
766 | STM32_DMA_SCR_MSIZE(src_bus_width) | | |
767 | STM32_DMA_SCR_PBURST(dst_burst_size) | | |
768 | STM32_DMA_SCR_MBURST(src_burst_size); | |
769 | ||
a2b6103b PYM |
770 | /* Set FIFO threshold */ |
771 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; | |
955b1766 AD |
772 | if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE) |
773 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth); | |
a2b6103b PYM |
774 | |
775 | /* Set peripheral address */ | |
d8b46839 CM |
776 | chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; |
777 | *buswidth = dst_addr_width; | |
778 | break; | |
779 | ||
780 | case DMA_DEV_TO_MEM: | |
a2b6103b | 781 | /* Set device data size */ |
d8b46839 CM |
782 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); |
783 | if (src_bus_width < 0) | |
784 | return src_bus_width; | |
785 | ||
a2b6103b PYM |
786 | /* Set device burst size */ |
787 | src_best_burst = stm32_dma_get_best_burst(buf_len, | |
788 | src_maxburst, | |
955b1766 | 789 | fifoth, |
a2b6103b PYM |
790 | src_addr_width); |
791 | chan->mem_burst = src_best_burst; | |
792 | src_burst_size = stm32_dma_get_burst(chan, src_best_burst); | |
d8b46839 CM |
793 | if (src_burst_size < 0) |
794 | return src_burst_size; | |
795 | ||
a2b6103b | 796 | /* Set memory data size */ |
e0ebdbdc AD |
797 | dst_addr_width = stm32_dma_get_max_width(buf_len, buf_addr, |
798 | fifoth); | |
a2b6103b | 799 | chan->mem_width = dst_addr_width; |
d8b46839 CM |
800 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); |
801 | if (dst_bus_width < 0) | |
802 | return dst_bus_width; | |
803 | ||
a2b6103b PYM |
804 | /* Set memory burst size */ |
805 | dst_maxburst = STM32_DMA_MAX_BURST; | |
806 | dst_best_burst = stm32_dma_get_best_burst(buf_len, | |
807 | dst_maxburst, | |
955b1766 | 808 | fifoth, |
a2b6103b PYM |
809 | dst_addr_width); |
810 | chan->mem_burst = dst_best_burst; | |
811 | dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); | |
d8b46839 CM |
812 | if (dst_burst_size < 0) |
813 | return dst_burst_size; | |
814 | ||
815 | dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) | | |
816 | STM32_DMA_SCR_PSIZE(src_bus_width) | | |
817 | STM32_DMA_SCR_MSIZE(dst_bus_width) | | |
818 | STM32_DMA_SCR_PBURST(src_burst_size) | | |
819 | STM32_DMA_SCR_MBURST(dst_burst_size); | |
820 | ||
a2b6103b PYM |
821 | /* Set FIFO threshold */ |
822 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; | |
955b1766 AD |
823 | if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE) |
824 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth); | |
a2b6103b PYM |
825 | |
826 | /* Set peripheral address */ | |
d8b46839 CM |
827 | chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; |
828 | *buswidth = chan->dma_sconfig.src_addr_width; | |
829 | break; | |
830 | ||
831 | default: | |
832 | dev_err(chan2dev(chan), "Dma direction is not supported\n"); | |
833 | return -EINVAL; | |
834 | } | |
835 | ||
a2b6103b | 836 | stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst); |
d8b46839 | 837 | |
a2b6103b | 838 | /* Set DMA control register */ |
d8b46839 CM |
839 | chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK | |
840 | STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK | | |
841 | STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK); | |
842 | chan->chan_reg.dma_scr |= dma_scr; | |
843 | ||
844 | return 0; | |
845 | } | |
846 | ||
847 | static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs) | |
848 | { | |
849 | memset(regs, 0, sizeof(struct stm32_dma_chan_reg)); | |
850 | } | |
851 | ||
852 | static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( | |
853 | struct dma_chan *c, struct scatterlist *sgl, | |
854 | u32 sg_len, enum dma_transfer_direction direction, | |
855 | unsigned long flags, void *context) | |
856 | { | |
857 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
858 | struct stm32_dma_desc *desc; | |
859 | struct scatterlist *sg; | |
860 | enum dma_slave_buswidth buswidth; | |
861 | u32 nb_data_items; | |
862 | int i, ret; | |
863 | ||
864 | if (!chan->config_init) { | |
865 | dev_err(chan2dev(chan), "dma channel is not configured\n"); | |
866 | return NULL; | |
867 | } | |
868 | ||
869 | if (sg_len < 1) { | |
870 | dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len); | |
871 | return NULL; | |
872 | } | |
873 | ||
402096cb | 874 | desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT); |
d8b46839 CM |
875 | if (!desc) |
876 | return NULL; | |
877 | ||
d8b46839 CM |
878 | /* Set peripheral flow controller */ |
879 | if (chan->dma_sconfig.device_fc) | |
880 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL; | |
881 | else | |
882 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; | |
883 | ||
884 | for_each_sg(sgl, sg, sg_len, i) { | |
a2b6103b | 885 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, |
e0ebdbdc AD |
886 | sg_dma_len(sg), |
887 | sg_dma_address(sg)); | |
a2b6103b PYM |
888 | if (ret < 0) |
889 | goto err; | |
890 | ||
d8b46839 CM |
891 | desc->sg_req[i].len = sg_dma_len(sg); |
892 | ||
893 | nb_data_items = desc->sg_req[i].len / buswidth; | |
80a76952 | 894 | if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { |
d8b46839 CM |
895 | dev_err(chan2dev(chan), "nb items not supported\n"); |
896 | goto err; | |
897 | } | |
898 | ||
899 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | |
900 | desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; | |
901 | desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; | |
902 | desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; | |
903 | desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg); | |
904 | desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg); | |
905 | desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; | |
906 | } | |
907 | ||
908 | desc->num_sgs = sg_len; | |
909 | desc->cyclic = false; | |
910 | ||
911 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
912 | ||
913 | err: | |
914 | kfree(desc); | |
915 | return NULL; | |
916 | } | |
917 | ||
918 | static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( | |
919 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, | |
920 | size_t period_len, enum dma_transfer_direction direction, | |
921 | unsigned long flags) | |
922 | { | |
923 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
924 | struct stm32_dma_desc *desc; | |
925 | enum dma_slave_buswidth buswidth; | |
926 | u32 num_periods, nb_data_items; | |
927 | int i, ret; | |
928 | ||
929 | if (!buf_len || !period_len) { | |
930 | dev_err(chan2dev(chan), "Invalid buffer/period len\n"); | |
931 | return NULL; | |
932 | } | |
933 | ||
934 | if (!chan->config_init) { | |
935 | dev_err(chan2dev(chan), "dma channel is not configured\n"); | |
936 | return NULL; | |
937 | } | |
938 | ||
939 | if (buf_len % period_len) { | |
940 | dev_err(chan2dev(chan), "buf_len not multiple of period_len\n"); | |
941 | return NULL; | |
942 | } | |
943 | ||
944 | /* | |
945 | * We allow to take more number of requests till DMA is | |
946 | * not started. The driver will loop over all requests. | |
947 | * Once DMA is started then new requests can be queued only after | |
948 | * terminating the DMA. | |
949 | */ | |
950 | if (chan->busy) { | |
951 | dev_err(chan2dev(chan), "Request not allowed when dma busy\n"); | |
952 | return NULL; | |
953 | } | |
954 | ||
e0ebdbdc AD |
955 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len, |
956 | buf_addr); | |
d8b46839 CM |
957 | if (ret < 0) |
958 | return NULL; | |
959 | ||
960 | nb_data_items = period_len / buswidth; | |
80a76952 | 961 | if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { |
d8b46839 CM |
962 | dev_err(chan2dev(chan), "number of items not supported\n"); |
963 | return NULL; | |
964 | } | |
965 | ||
966 | /* Enable Circular mode or double buffer mode */ | |
967 | if (buf_len == period_len) | |
968 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC; | |
969 | else | |
970 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; | |
971 | ||
972 | /* Clear periph ctrl if client set it */ | |
973 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; | |
974 | ||
975 | num_periods = buf_len / period_len; | |
976 | ||
402096cb | 977 | desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT); |
d8b46839 CM |
978 | if (!desc) |
979 | return NULL; | |
980 | ||
981 | for (i = 0; i < num_periods; i++) { | |
982 | desc->sg_req[i].len = period_len; | |
983 | ||
984 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | |
985 | desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; | |
986 | desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; | |
987 | desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; | |
988 | desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr; | |
989 | desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr; | |
990 | desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; | |
991 | buf_addr += period_len; | |
992 | } | |
993 | ||
994 | desc->num_sgs = num_periods; | |
995 | desc->cyclic = true; | |
996 | ||
997 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
998 | } | |
999 | ||
1000 | static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( | |
1001 | struct dma_chan *c, dma_addr_t dest, | |
1002 | dma_addr_t src, size_t len, unsigned long flags) | |
1003 | { | |
1004 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
a2b6103b | 1005 | enum dma_slave_buswidth max_width; |
d8b46839 CM |
1006 | struct stm32_dma_desc *desc; |
1007 | size_t xfer_count, offset; | |
a2b6103b | 1008 | u32 num_sgs, best_burst, dma_burst, threshold; |
d8b46839 CM |
1009 | int i; |
1010 | ||
80a76952 | 1011 | num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
402096cb | 1012 | desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT); |
d8b46839 CM |
1013 | if (!desc) |
1014 | return NULL; | |
1015 | ||
a2b6103b PYM |
1016 | threshold = chan->threshold; |
1017 | ||
d8b46839 CM |
1018 | for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) { |
1019 | xfer_count = min_t(size_t, len - offset, | |
80a76952 | 1020 | STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
d8b46839 | 1021 | |
a2b6103b PYM |
1022 | /* Compute best burst size */ |
1023 | max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | |
1024 | best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST, | |
1025 | threshold, max_width); | |
1026 | dma_burst = stm32_dma_get_burst(chan, best_burst); | |
d8b46839 CM |
1027 | |
1028 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | |
1029 | desc->sg_req[i].chan_reg.dma_scr = | |
1030 | STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) | | |
a2b6103b PYM |
1031 | STM32_DMA_SCR_PBURST(dma_burst) | |
1032 | STM32_DMA_SCR_MBURST(dma_burst) | | |
d8b46839 CM |
1033 | STM32_DMA_SCR_MINC | |
1034 | STM32_DMA_SCR_PINC | | |
1035 | STM32_DMA_SCR_TCIE | | |
1036 | STM32_DMA_SCR_TEIE; | |
a2b6103b PYM |
1037 | desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; |
1038 | desc->sg_req[i].chan_reg.dma_sfcr |= | |
1039 | STM32_DMA_SFCR_FTH(threshold); | |
d8b46839 CM |
1040 | desc->sg_req[i].chan_reg.dma_spar = src + offset; |
1041 | desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset; | |
1042 | desc->sg_req[i].chan_reg.dma_sndtr = xfer_count; | |
a2b6103b | 1043 | desc->sg_req[i].len = xfer_count; |
d8b46839 CM |
1044 | } |
1045 | ||
1046 | desc->num_sgs = num_sgs; | |
1047 | desc->cyclic = false; | |
1048 | ||
1049 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
1050 | } | |
1051 | ||
2b12c558 CM |
1052 | static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan) |
1053 | { | |
1054 | u32 dma_scr, width, ndtr; | |
1055 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1056 | ||
1057 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
1058 | width = STM32_DMA_SCR_PSIZE_GET(dma_scr); | |
1059 | ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); | |
1060 | ||
1061 | return ndtr << width; | |
1062 | } | |
1063 | ||
2a4885ab AP |
1064 | /** |
1065 | * stm32_dma_is_current_sg - check that expected sg_req is currently transferred | |
1066 | * @chan: dma channel | |
1067 | * | |
1068 | * This function called when IRQ are disable, checks that the hardware has not | |
1069 | * switched on the next transfer in double buffer mode. The test is done by | |
1070 | * comparing the next_sg memory address with the hardware related register | |
1071 | * (based on CT bit value). | |
1072 | * | |
1073 | * Returns true if expected current transfer is still running or double | |
1074 | * buffer mode is not activated. | |
1075 | */ | |
1076 | static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan) | |
1077 | { | |
1078 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1079 | struct stm32_dma_sg_req *sg_req; | |
1080 | u32 dma_scr, dma_smar, id; | |
1081 | ||
1082 | id = chan->id; | |
1083 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | |
1084 | ||
1085 | if (!(dma_scr & STM32_DMA_SCR_DBM)) | |
1086 | return true; | |
1087 | ||
1088 | sg_req = &chan->desc->sg_req[chan->next_sg]; | |
1089 | ||
1090 | if (dma_scr & STM32_DMA_SCR_CT) { | |
1091 | dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)); | |
1092 | return (dma_smar == sg_req->chan_reg.dma_sm0ar); | |
1093 | } | |
1094 | ||
1095 | dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)); | |
1096 | ||
1097 | return (dma_smar == sg_req->chan_reg.dma_sm1ar); | |
1098 | } | |
1099 | ||
d8b46839 CM |
1100 | static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, |
1101 | struct stm32_dma_desc *desc, | |
1102 | u32 next_sg) | |
1103 | { | |
a2b6103b | 1104 | u32 modulo, burst_size; |
2a4885ab AP |
1105 | u32 residue; |
1106 | u32 n_sg = next_sg; | |
1107 | struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg]; | |
d8b46839 CM |
1108 | int i; |
1109 | ||
2b12c558 | 1110 | /* |
2a4885ab AP |
1111 | * Calculate the residue means compute the descriptors |
1112 | * information: | |
1113 | * - the sg_req currently transferred | |
1114 | * - the Hardware remaining position in this sg (NDTR bits field). | |
1115 | * | |
1116 | * A race condition may occur if DMA is running in cyclic or double | |
1117 | * buffer mode, since the DMA register are automatically reloaded at end | |
1118 | * of period transfer. The hardware may have switched to the next | |
1119 | * transfer (CT bit updated) just before the position (SxNDTR reg) is | |
1120 | * read. | |
1121 | * In this case the SxNDTR reg could (or not) correspond to the new | |
1122 | * transfer position, and not the expected one. | |
1123 | * The strategy implemented in the stm32 driver is to: | |
1124 | * - read the SxNDTR register | |
1125 | * - crosscheck that hardware is still in current transfer. | |
1126 | * In case of switch, we can assume that the DMA is at the beginning of | |
1127 | * the next transfer. So we approximate the residue in consequence, by | |
1128 | * pointing on the beginning of next transfer. | |
1129 | * | |
1130 | * This race condition doesn't apply for none cyclic mode, as double | |
1131 | * buffer is not used. In such situation registers are updated by the | |
1132 | * software. | |
2b12c558 | 1133 | */ |
2a4885ab AP |
1134 | |
1135 | residue = stm32_dma_get_remaining_bytes(chan); | |
1136 | ||
1137 | if (!stm32_dma_is_current_sg(chan)) { | |
1138 | n_sg++; | |
1139 | if (n_sg == chan->desc->num_sgs) | |
1140 | n_sg = 0; | |
1141 | residue = sg_req->len; | |
a2b6103b | 1142 | } |
d8b46839 | 1143 | |
2b12c558 | 1144 | /* |
2a4885ab AP |
1145 | * In cyclic mode, for the last period, residue = remaining bytes |
1146 | * from NDTR, | |
1147 | * else for all other periods in cyclic mode, and in sg mode, | |
1148 | * residue = remaining bytes from NDTR + remaining | |
1149 | * periods/sg to be transferred | |
2b12c558 | 1150 | */ |
2a4885ab AP |
1151 | if (!chan->desc->cyclic || n_sg != 0) |
1152 | for (i = n_sg; i < desc->num_sgs; i++) | |
1153 | residue += desc->sg_req[i].len; | |
d8b46839 | 1154 | |
a2b6103b PYM |
1155 | if (!chan->mem_burst) |
1156 | return residue; | |
1157 | ||
1158 | burst_size = chan->mem_burst * chan->mem_width; | |
1159 | modulo = residue % burst_size; | |
1160 | if (modulo) | |
1161 | residue = residue - modulo + burst_size; | |
1162 | ||
d8b46839 CM |
1163 | return residue; |
1164 | } | |
1165 | ||
1166 | static enum dma_status stm32_dma_tx_status(struct dma_chan *c, | |
1167 | dma_cookie_t cookie, | |
1168 | struct dma_tx_state *state) | |
1169 | { | |
1170 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
1171 | struct virt_dma_desc *vdesc; | |
1172 | enum dma_status status; | |
1173 | unsigned long flags; | |
57b5a321 | 1174 | u32 residue = 0; |
d8b46839 CM |
1175 | |
1176 | status = dma_cookie_status(c, cookie, state); | |
249d5531 | 1177 | if (status == DMA_COMPLETE || !state) |
d8b46839 CM |
1178 | return status; |
1179 | ||
1180 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1181 | vdesc = vchan_find_desc(&chan->vchan, cookie); | |
57b5a321 | 1182 | if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) |
d8b46839 CM |
1183 | residue = stm32_dma_desc_residue(chan, chan->desc, |
1184 | chan->next_sg); | |
57b5a321 | 1185 | else if (vdesc) |
d8b46839 CM |
1186 | residue = stm32_dma_desc_residue(chan, |
1187 | to_stm32_dma_desc(vdesc), 0); | |
d8b46839 CM |
1188 | dma_set_residue(state, residue); |
1189 | ||
1190 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1191 | ||
1192 | return status; | |
1193 | } | |
1194 | ||
1195 | static int stm32_dma_alloc_chan_resources(struct dma_chan *c) | |
1196 | { | |
1197 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
1198 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1199 | int ret; | |
1200 | ||
1201 | chan->config_init = false; | |
48bc73ba PYM |
1202 | |
1203 | ret = pm_runtime_get_sync(dmadev->ddev.dev); | |
1204 | if (ret < 0) | |
d8b46839 | 1205 | return ret; |
d8b46839 CM |
1206 | |
1207 | ret = stm32_dma_disable_chan(chan); | |
1208 | if (ret < 0) | |
48bc73ba | 1209 | pm_runtime_put(dmadev->ddev.dev); |
d8b46839 CM |
1210 | |
1211 | return ret; | |
1212 | } | |
1213 | ||
1214 | static void stm32_dma_free_chan_resources(struct dma_chan *c) | |
1215 | { | |
1216 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
1217 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1218 | unsigned long flags; | |
1219 | ||
1220 | dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id); | |
1221 | ||
1222 | if (chan->busy) { | |
1223 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1224 | stm32_dma_stop(chan); | |
1225 | chan->desc = NULL; | |
1226 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1227 | } | |
1228 | ||
48bc73ba | 1229 | pm_runtime_put(dmadev->ddev.dev); |
d8b46839 CM |
1230 | |
1231 | vchan_free_chan_resources(to_virt_chan(c)); | |
5d4d4dfb AD |
1232 | stm32_dma_clear_reg(&chan->chan_reg); |
1233 | chan->threshold = 0; | |
d8b46839 CM |
1234 | } |
1235 | ||
1236 | static void stm32_dma_desc_free(struct virt_dma_desc *vdesc) | |
1237 | { | |
1238 | kfree(container_of(vdesc, struct stm32_dma_desc, vdesc)); | |
1239 | } | |
1240 | ||
e97adb49 | 1241 | static void stm32_dma_set_config(struct stm32_dma_chan *chan, |
249d5531 | 1242 | struct stm32_dma_cfg *cfg) |
d8b46839 CM |
1243 | { |
1244 | stm32_dma_clear_reg(&chan->chan_reg); | |
1245 | ||
1246 | chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK; | |
1247 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line); | |
1248 | ||
1249 | /* Enable Interrupts */ | |
1250 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; | |
1251 | ||
951f44cb | 1252 | chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features); |
955b1766 AD |
1253 | if (STM32_DMA_DIRECT_MODE_GET(cfg->features)) |
1254 | chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE; | |
d8b46839 CM |
1255 | } |
1256 | ||
1257 | static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, | |
1258 | struct of_dma *ofdma) | |
1259 | { | |
1260 | struct stm32_dma_device *dmadev = ofdma->of_dma_data; | |
5df4eb45 | 1261 | struct device *dev = dmadev->ddev.dev; |
d8b46839 CM |
1262 | struct stm32_dma_cfg cfg; |
1263 | struct stm32_dma_chan *chan; | |
1264 | struct dma_chan *c; | |
1265 | ||
5df4eb45 CM |
1266 | if (dma_spec->args_count < 4) { |
1267 | dev_err(dev, "Bad number of cells\n"); | |
d8b46839 | 1268 | return NULL; |
5df4eb45 | 1269 | } |
d8b46839 CM |
1270 | |
1271 | cfg.channel_id = dma_spec->args[0]; | |
1272 | cfg.request_line = dma_spec->args[1]; | |
1273 | cfg.stream_config = dma_spec->args[2]; | |
951f44cb | 1274 | cfg.features = dma_spec->args[3]; |
d8b46839 | 1275 | |
249d5531 PYM |
1276 | if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS || |
1277 | cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) { | |
5df4eb45 | 1278 | dev_err(dev, "Bad channel and/or request id\n"); |
d8b46839 | 1279 | return NULL; |
5df4eb45 | 1280 | } |
d8b46839 | 1281 | |
d8b46839 CM |
1282 | chan = &dmadev->chan[cfg.channel_id]; |
1283 | ||
1284 | c = dma_get_slave_channel(&chan->vchan.chan); | |
5df4eb45 | 1285 | if (!c) { |
041cf7e0 | 1286 | dev_err(dev, "No more channels available\n"); |
5df4eb45 CM |
1287 | return NULL; |
1288 | } | |
1289 | ||
1290 | stm32_dma_set_config(chan, &cfg); | |
d8b46839 CM |
1291 | |
1292 | return c; | |
1293 | } | |
1294 | ||
1295 | static const struct of_device_id stm32_dma_of_match[] = { | |
1296 | { .compatible = "st,stm32-dma", }, | |
1297 | { /* sentinel */ }, | |
1298 | }; | |
1299 | MODULE_DEVICE_TABLE(of, stm32_dma_of_match); | |
1300 | ||
1301 | static int stm32_dma_probe(struct platform_device *pdev) | |
1302 | { | |
1303 | struct stm32_dma_chan *chan; | |
1304 | struct stm32_dma_device *dmadev; | |
1305 | struct dma_device *dd; | |
1306 | const struct of_device_id *match; | |
1307 | struct resource *res; | |
8cf1e0fc | 1308 | struct reset_control *rst; |
d8b46839 CM |
1309 | int i, ret; |
1310 | ||
1311 | match = of_match_device(stm32_dma_of_match, &pdev->dev); | |
1312 | if (!match) { | |
1313 | dev_err(&pdev->dev, "Error: No device match found\n"); | |
1314 | return -ENODEV; | |
1315 | } | |
1316 | ||
1317 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); | |
1318 | if (!dmadev) | |
1319 | return -ENOMEM; | |
1320 | ||
1321 | dd = &dmadev->ddev; | |
1322 | ||
1323 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1324 | dmadev->base = devm_ioremap_resource(&pdev->dev, res); | |
1325 | if (IS_ERR(dmadev->base)) | |
1326 | return PTR_ERR(dmadev->base); | |
1327 | ||
1328 | dmadev->clk = devm_clk_get(&pdev->dev, NULL); | |
1c966e1d KK |
1329 | if (IS_ERR(dmadev->clk)) |
1330 | return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk), "Can't get clock\n"); | |
d8b46839 | 1331 | |
48bc73ba PYM |
1332 | ret = clk_prepare_enable(dmadev->clk); |
1333 | if (ret < 0) { | |
1334 | dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); | |
1335 | return ret; | |
1336 | } | |
1337 | ||
d8b46839 CM |
1338 | dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, |
1339 | "st,mem2mem"); | |
1340 | ||
8cf1e0fc | 1341 | rst = devm_reset_control_get(&pdev->dev, NULL); |
615eee2c EC |
1342 | if (IS_ERR(rst)) { |
1343 | ret = PTR_ERR(rst); | |
1344 | if (ret == -EPROBE_DEFER) | |
1345 | goto clk_free; | |
1346 | } else { | |
8cf1e0fc | 1347 | reset_control_assert(rst); |
d8b46839 | 1348 | udelay(2); |
8cf1e0fc | 1349 | reset_control_deassert(rst); |
d8b46839 CM |
1350 | } |
1351 | ||
d7a9e426 AD |
1352 | dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
1353 | ||
d8b46839 CM |
1354 | dma_cap_set(DMA_SLAVE, dd->cap_mask); |
1355 | dma_cap_set(DMA_PRIVATE, dd->cap_mask); | |
1356 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | |
1357 | dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources; | |
1358 | dd->device_free_chan_resources = stm32_dma_free_chan_resources; | |
1359 | dd->device_tx_status = stm32_dma_tx_status; | |
1360 | dd->device_issue_pending = stm32_dma_issue_pending; | |
1361 | dd->device_prep_slave_sg = stm32_dma_prep_slave_sg; | |
1362 | dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic; | |
1363 | dd->device_config = stm32_dma_slave_config; | |
1364 | dd->device_terminate_all = stm32_dma_terminate_all; | |
dc808675 | 1365 | dd->device_synchronize = stm32_dma_synchronize; |
d8b46839 CM |
1366 | dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1367 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | |
1368 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1369 | dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | |
1370 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | |
1371 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1372 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
1373 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
32ce1088 | 1374 | dd->copy_align = DMAENGINE_ALIGN_32_BYTES; |
276b0046 | 1375 | dd->max_burst = STM32_DMA_MAX_BURST; |
22a0bb29 | 1376 | dd->descriptor_reuse = true; |
d8b46839 CM |
1377 | dd->dev = &pdev->dev; |
1378 | INIT_LIST_HEAD(&dd->channels); | |
1379 | ||
1380 | if (dmadev->mem2mem) { | |
1381 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | |
1382 | dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy; | |
1383 | dd->directions |= BIT(DMA_MEM_TO_MEM); | |
1384 | } | |
1385 | ||
1386 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { | |
1387 | chan = &dmadev->chan[i]; | |
1388 | chan->id = i; | |
1389 | chan->vchan.desc_free = stm32_dma_desc_free; | |
1390 | vchan_init(&chan->vchan, dd); | |
1391 | } | |
1392 | ||
1393 | ret = dma_async_device_register(dd); | |
1394 | if (ret) | |
48bc73ba | 1395 | goto clk_free; |
d8b46839 CM |
1396 | |
1397 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { | |
1398 | chan = &dmadev->chan[i]; | |
c6504be5 | 1399 | ret = platform_get_irq(pdev, i); |
e17be6e1 | 1400 | if (ret < 0) |
d8b46839 | 1401 | goto err_unregister; |
c6504be5 VK |
1402 | chan->irq = ret; |
1403 | ||
d8b46839 CM |
1404 | ret = devm_request_irq(&pdev->dev, chan->irq, |
1405 | stm32_dma_chan_irq, 0, | |
1406 | dev_name(chan2dev(chan)), chan); | |
1407 | if (ret) { | |
1408 | dev_err(&pdev->dev, | |
1409 | "request_irq failed with err %d channel %d\n", | |
1410 | ret, i); | |
1411 | goto err_unregister; | |
1412 | } | |
1413 | } | |
1414 | ||
1415 | ret = of_dma_controller_register(pdev->dev.of_node, | |
1416 | stm32_dma_of_xlate, dmadev); | |
1417 | if (ret < 0) { | |
1418 | dev_err(&pdev->dev, | |
1419 | "STM32 DMA DMA OF registration failed %d\n", ret); | |
1420 | goto err_unregister; | |
1421 | } | |
1422 | ||
1423 | platform_set_drvdata(pdev, dmadev); | |
1424 | ||
48bc73ba PYM |
1425 | pm_runtime_set_active(&pdev->dev); |
1426 | pm_runtime_enable(&pdev->dev); | |
1427 | pm_runtime_get_noresume(&pdev->dev); | |
1428 | pm_runtime_put(&pdev->dev); | |
1429 | ||
d8b46839 CM |
1430 | dev_info(&pdev->dev, "STM32 DMA driver registered\n"); |
1431 | ||
1432 | return 0; | |
1433 | ||
1434 | err_unregister: | |
1435 | dma_async_device_unregister(dd); | |
48bc73ba PYM |
1436 | clk_free: |
1437 | clk_disable_unprepare(dmadev->clk); | |
d8b46839 CM |
1438 | |
1439 | return ret; | |
1440 | } | |
1441 | ||
48bc73ba PYM |
1442 | #ifdef CONFIG_PM |
1443 | static int stm32_dma_runtime_suspend(struct device *dev) | |
1444 | { | |
1445 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); | |
1446 | ||
1447 | clk_disable_unprepare(dmadev->clk); | |
1448 | ||
1449 | return 0; | |
1450 | } | |
1451 | ||
1452 | static int stm32_dma_runtime_resume(struct device *dev) | |
1453 | { | |
1454 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); | |
1455 | int ret; | |
1456 | ||
1457 | ret = clk_prepare_enable(dmadev->clk); | |
1458 | if (ret) { | |
1459 | dev_err(dev, "failed to prepare_enable clock\n"); | |
1460 | return ret; | |
1461 | } | |
1462 | ||
1463 | return 0; | |
1464 | } | |
1465 | #endif | |
1466 | ||
05f8740a PYM |
1467 | #ifdef CONFIG_PM_SLEEP |
1468 | static int stm32_dma_suspend(struct device *dev) | |
1469 | { | |
1470 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); | |
1471 | int id, ret, scr; | |
1472 | ||
1473 | ret = pm_runtime_get_sync(dev); | |
1474 | if (ret < 0) | |
1475 | return ret; | |
1476 | ||
1477 | for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) { | |
1478 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | |
1479 | if (scr & STM32_DMA_SCR_EN) { | |
1480 | dev_warn(dev, "Suspend is prevented by Chan %i\n", id); | |
1481 | return -EBUSY; | |
1482 | } | |
1483 | } | |
1484 | ||
1485 | pm_runtime_put_sync(dev); | |
1486 | ||
1487 | pm_runtime_force_suspend(dev); | |
1488 | ||
1489 | return 0; | |
1490 | } | |
1491 | ||
1492 | static int stm32_dma_resume(struct device *dev) | |
1493 | { | |
1494 | return pm_runtime_force_resume(dev); | |
1495 | } | |
1496 | #endif | |
1497 | ||
48bc73ba | 1498 | static const struct dev_pm_ops stm32_dma_pm_ops = { |
05f8740a | 1499 | SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume) |
48bc73ba PYM |
1500 | SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, |
1501 | stm32_dma_runtime_resume, NULL) | |
1502 | }; | |
1503 | ||
d8b46839 CM |
1504 | static struct platform_driver stm32_dma_driver = { |
1505 | .driver = { | |
1506 | .name = "stm32-dma", | |
1507 | .of_match_table = stm32_dma_of_match, | |
48bc73ba | 1508 | .pm = &stm32_dma_pm_ops, |
d8b46839 | 1509 | }, |
615eee2c | 1510 | .probe = stm32_dma_probe, |
d8b46839 CM |
1511 | }; |
1512 | ||
1513 | static int __init stm32_dma_init(void) | |
1514 | { | |
615eee2c | 1515 | return platform_driver_register(&stm32_dma_driver); |
d8b46839 CM |
1516 | } |
1517 | subsys_initcall(stm32_dma_init); |