Commit | Line | Data |
---|---|---|
a636cd6c | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
ca21a146 RY |
2 | /* |
3 | * DMA controller driver for CSR SiRFprimaII | |
4 | * | |
5 | * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. | |
ca21a146 RY |
6 | */ |
7 | ||
8 | #include <linux/module.h> | |
9 | #include <linux/dmaengine.h> | |
10 | #include <linux/dma-mapping.h> | |
2a76689b | 11 | #include <linux/pm_runtime.h> |
ca21a146 RY |
12 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/of_irq.h> | |
16 | #include <linux/of_address.h> | |
17 | #include <linux/of_device.h> | |
18 | #include <linux/of_platform.h> | |
a7e34065 | 19 | #include <linux/clk.h> |
2e041c94 | 20 | #include <linux/of_dma.h> |
ca21a146 RY |
21 | #include <linux/sirfsoc_dma.h> |
22 | ||
949ff5b8 VK |
23 | #include "dmaengine.h" |
24 | ||
0a45dcab HL |
25 | #define SIRFSOC_DMA_VER_A7V1 1 |
26 | #define SIRFSOC_DMA_VER_A7V2 2 | |
27 | #define SIRFSOC_DMA_VER_A6 4 | |
28 | ||
ca21a146 RY |
29 | #define SIRFSOC_DMA_DESCRIPTORS 16 |
30 | #define SIRFSOC_DMA_CHANNELS 16 | |
0a45dcab | 31 | #define SIRFSOC_DMA_TABLE_NUM 256 |
ca21a146 RY |
32 | |
33 | #define SIRFSOC_DMA_CH_ADDR 0x00 | |
34 | #define SIRFSOC_DMA_CH_XLEN 0x04 | |
35 | #define SIRFSOC_DMA_CH_YLEN 0x08 | |
36 | #define SIRFSOC_DMA_CH_CTRL 0x0C | |
37 | ||
38 | #define SIRFSOC_DMA_WIDTH_0 0x100 | |
39 | #define SIRFSOC_DMA_CH_VALID 0x140 | |
40 | #define SIRFSOC_DMA_CH_INT 0x144 | |
41 | #define SIRFSOC_DMA_INT_EN 0x148 | |
0a45dcab | 42 | #define SIRFSOC_DMA_INT_EN_CLR 0x14C |
ca21a146 | 43 | #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 |
0a45dcab HL |
44 | #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x154 |
45 | #define SIRFSOC_DMA_WIDTH_ATLAS7 0x10 | |
46 | #define SIRFSOC_DMA_VALID_ATLAS7 0x14 | |
47 | #define SIRFSOC_DMA_INT_ATLAS7 0x18 | |
48 | #define SIRFSOC_DMA_INT_EN_ATLAS7 0x1c | |
49 | #define SIRFSOC_DMA_LOOP_CTRL_ATLAS7 0x20 | |
50 | #define SIRFSOC_DMA_CUR_DATA_ADDR 0x34 | |
51 | #define SIRFSOC_DMA_MUL_ATLAS7 0x38 | |
52 | #define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7 0x158 | |
53 | #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7 0x15C | |
54 | #define SIRFSOC_DMA_IOBG_SCMD_EN 0x800 | |
55 | #define SIRFSOC_DMA_EARLY_RESP_SET 0x818 | |
56 | #define SIRFSOC_DMA_EARLY_RESP_CLR 0x81C | |
ca21a146 RY |
57 | |
58 | #define SIRFSOC_DMA_MODE_CTRL_BIT 4 | |
59 | #define SIRFSOC_DMA_DIR_CTRL_BIT 5 | |
0a45dcab HL |
60 | #define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7 2 |
61 | #define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7 3 | |
62 | #define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7 4 | |
63 | #define SIRFSOC_DMA_TAB_NUM_ATLAS7 7 | |
64 | #define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7 5 | |
65 | #define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7 25 | |
66 | #define SIRFSOC_DMA_CHAIN_ADDR_SHIFT 32 | |
67 | ||
68 | #define SIRFSOC_DMA_INT_FINI_INT_ATLAS7 BIT(0) | |
69 | #define SIRFSOC_DMA_INT_CNT_INT_ATLAS7 BIT(1) | |
70 | #define SIRFSOC_DMA_INT_PAU_INT_ATLAS7 BIT(2) | |
71 | #define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7 BIT(3) | |
72 | #define SIRFSOC_DMA_INT_INV_INT_ATLAS7 BIT(4) | |
73 | #define SIRFSOC_DMA_INT_END_INT_ATLAS7 BIT(5) | |
74 | #define SIRFSOC_DMA_INT_ALL_ATLAS7 0x3F | |
ca21a146 RY |
75 | |
76 | /* xlen and dma_width register is in 4 bytes boundary */ | |
77 | #define SIRFSOC_DMA_WORD_LEN 4 | |
0a45dcab HL |
78 | #define SIRFSOC_DMA_XLEN_MAX_V1 0x800 |
79 | #define SIRFSOC_DMA_XLEN_MAX_V2 0x1000 | |
ca21a146 RY |
80 | |
81 | struct sirfsoc_dma_desc { | |
82 | struct dma_async_tx_descriptor desc; | |
83 | struct list_head node; | |
84 | ||
85 | /* SiRFprimaII 2D-DMA parameters */ | |
86 | ||
87 | int xlen; /* DMA xlen */ | |
88 | int ylen; /* DMA ylen */ | |
89 | int width; /* DMA width */ | |
90 | int dir; | |
91 | bool cyclic; /* is loop DMA? */ | |
0a45dcab | 92 | bool chain; /* is chain DMA? */ |
ca21a146 | 93 | u32 addr; /* DMA buffer address */ |
0a45dcab | 94 | u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */ |
ca21a146 RY |
95 | }; |
96 | ||
97 | struct sirfsoc_dma_chan { | |
98 | struct dma_chan chan; | |
99 | struct list_head free; | |
100 | struct list_head prepared; | |
101 | struct list_head queued; | |
102 | struct list_head active; | |
103 | struct list_head completed; | |
ca21a146 RY |
104 | unsigned long happened_cyclic; |
105 | unsigned long completed_cyclic; | |
106 | ||
107 | /* Lock for this structure */ | |
108 | spinlock_t lock; | |
109 | ||
110 | int mode; | |
111 | }; | |
112 | ||
2a76689b BS |
113 | struct sirfsoc_dma_regs { |
114 | u32 ctrl[SIRFSOC_DMA_CHANNELS]; | |
115 | u32 interrupt_en; | |
116 | }; | |
117 | ||
ca21a146 RY |
118 | struct sirfsoc_dma { |
119 | struct dma_device dma; | |
120 | struct tasklet_struct tasklet; | |
121 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; | |
122 | void __iomem *base; | |
123 | int irq; | |
a7e34065 | 124 | struct clk *clk; |
0a45dcab HL |
125 | int type; |
126 | void (*exec_desc)(struct sirfsoc_dma_desc *sdesc, | |
127 | int cid, int burst_mode, void __iomem *base); | |
2a76689b | 128 | struct sirfsoc_dma_regs regs_save; |
ca21a146 RY |
129 | }; |
130 | ||
0a45dcab HL |
131 | struct sirfsoc_dmadata { |
132 | void (*exec)(struct sirfsoc_dma_desc *sdesc, | |
133 | int cid, int burst_mode, void __iomem *base); | |
134 | int type; | |
135 | }; | |
136 | ||
137 | enum sirfsoc_dma_chain_flag { | |
138 | SIRFSOC_DMA_CHAIN_NORMAL = 0x01, | |
139 | SIRFSOC_DMA_CHAIN_PAUSE = 0x02, | |
140 | SIRFSOC_DMA_CHAIN_LOOP = 0x03, | |
141 | SIRFSOC_DMA_CHAIN_END = 0x04 | |
142 | }; | |
143 | ||
ca21a146 RY |
144 | #define DRV_NAME "sirfsoc_dma" |
145 | ||
2a76689b BS |
146 | static int sirfsoc_dma_runtime_suspend(struct device *dev); |
147 | ||
ca21a146 RY |
148 | /* Convert struct dma_chan to struct sirfsoc_dma_chan */ |
149 | static inline | |
150 | struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) | |
151 | { | |
152 | return container_of(c, struct sirfsoc_dma_chan, chan); | |
153 | } | |
154 | ||
155 | /* Convert struct dma_chan to struct sirfsoc_dma */ | |
156 | static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) | |
157 | { | |
158 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); | |
159 | return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); | |
160 | } | |
161 | ||
0a45dcab HL |
162 | static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc, |
163 | int cid, int burst_mode, void __iomem *base) | |
164 | { | |
165 | if (sdesc->chain) { | |
166 | /* DMA v2 HW chain mode */ | |
167 | writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | | |
168 | (sdesc->chain << | |
169 | SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | | |
170 | (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3, | |
171 | base + SIRFSOC_DMA_CH_CTRL); | |
172 | } else { | |
173 | /* DMA v2 legacy mode */ | |
174 | writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN); | |
175 | writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN); | |
176 | writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7); | |
177 | writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)), | |
178 | base + SIRFSOC_DMA_MUL_ATLAS7); | |
179 | writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) | | |
180 | (sdesc->chain << | |
181 | SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) | | |
182 | 0x3, base + SIRFSOC_DMA_CH_CTRL); | |
183 | } | |
184 | writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 : | |
185 | (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 | | |
186 | SIRFSOC_DMA_INT_LOOP_INT_ATLAS7), | |
187 | base + SIRFSOC_DMA_INT_EN_ATLAS7); | |
188 | writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR); | |
189 | if (sdesc->cyclic) | |
190 | writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); | |
191 | } | |
192 | ||
193 | static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc, | |
194 | int cid, int burst_mode, void __iomem *base) | |
195 | { | |
196 | writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN); | |
197 | writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET); | |
198 | writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); | |
199 | writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | | |
200 | (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), | |
201 | base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
202 | writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); | |
203 | writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); | |
204 | writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | | |
205 | (1 << cid), base + SIRFSOC_DMA_INT_EN); | |
206 | writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); | |
207 | if (sdesc->cyclic) { | |
208 | writel((1 << cid) | 1 << (cid + 16) | | |
209 | readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7), | |
210 | base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); | |
211 | } | |
212 | ||
213 | } | |
214 | ||
215 | static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc, | |
216 | int cid, int burst_mode, void __iomem *base) | |
217 | { | |
218 | writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4); | |
219 | writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) | | |
220 | (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), | |
221 | base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
222 | writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN); | |
223 | writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN); | |
224 | writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) | | |
225 | (1 << cid), base + SIRFSOC_DMA_INT_EN); | |
226 | writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); | |
227 | if (sdesc->cyclic) { | |
228 | writel((1 << cid) | 1 << (cid + 16) | | |
229 | readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL), | |
230 | base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
231 | } | |
232 | ||
233 | } | |
234 | ||
ca21a146 RY |
235 | /* Execute all queued DMA descriptors */ |
236 | static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) | |
237 | { | |
238 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
239 | int cid = schan->chan.chan_id; | |
240 | struct sirfsoc_dma_desc *sdesc = NULL; | |
0a45dcab | 241 | void __iomem *base; |
ca21a146 RY |
242 | |
243 | /* | |
244 | * lock has been held by functions calling this, so we don't hold | |
245 | * lock again | |
246 | */ | |
0a45dcab | 247 | base = sdma->base; |
ca21a146 | 248 | sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, |
0a45dcab | 249 | node); |
ca21a146 | 250 | /* Move the first queued descriptor to active list */ |
26fd1220 | 251 | list_move_tail(&sdesc->node, &schan->active); |
ca21a146 | 252 | |
0a45dcab HL |
253 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) |
254 | cid = 0; | |
ca21a146 | 255 | |
0a45dcab HL |
256 | /* Start the DMA transfer */ |
257 | sdma->exec_desc(sdesc, cid, schan->mode, base); | |
ca21a146 | 258 | |
0a45dcab | 259 | if (sdesc->cyclic) |
ca21a146 | 260 | schan->happened_cyclic = schan->completed_cyclic = 0; |
ca21a146 RY |
261 | } |
262 | ||
263 | /* Interrupt handler */ | |
264 | static irqreturn_t sirfsoc_dma_irq(int irq, void *data) | |
265 | { | |
266 | struct sirfsoc_dma *sdma = data; | |
267 | struct sirfsoc_dma_chan *schan; | |
268 | struct sirfsoc_dma_desc *sdesc = NULL; | |
269 | u32 is; | |
0a45dcab | 270 | bool chain; |
ca21a146 | 271 | int ch; |
0a45dcab HL |
272 | void __iomem *reg; |
273 | ||
274 | switch (sdma->type) { | |
275 | case SIRFSOC_DMA_VER_A6: | |
276 | case SIRFSOC_DMA_VER_A7V1: | |
277 | is = readl(sdma->base + SIRFSOC_DMA_CH_INT); | |
278 | reg = sdma->base + SIRFSOC_DMA_CH_INT; | |
279 | while ((ch = fls(is) - 1) >= 0) { | |
280 | is &= ~(1 << ch); | |
281 | writel_relaxed(1 << ch, reg); | |
282 | schan = &sdma->channels[ch]; | |
283 | spin_lock(&schan->lock); | |
284 | sdesc = list_first_entry(&schan->active, | |
285 | struct sirfsoc_dma_desc, node); | |
286 | if (!sdesc->cyclic) { | |
287 | /* Execute queued descriptors */ | |
288 | list_splice_tail_init(&schan->active, | |
289 | &schan->completed); | |
290 | dma_cookie_complete(&sdesc->desc); | |
291 | if (!list_empty(&schan->queued)) | |
292 | sirfsoc_dma_execute(schan); | |
293 | } else | |
294 | schan->happened_cyclic++; | |
295 | spin_unlock(&schan->lock); | |
296 | } | |
297 | break; | |
ca21a146 | 298 | |
0a45dcab HL |
299 | case SIRFSOC_DMA_VER_A7V2: |
300 | is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7); | |
ca21a146 | 301 | |
0a45dcab HL |
302 | reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7; |
303 | writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg); | |
304 | schan = &sdma->channels[0]; | |
ca21a146 | 305 | spin_lock(&schan->lock); |
0a45dcab HL |
306 | sdesc = list_first_entry(&schan->active, |
307 | struct sirfsoc_dma_desc, node); | |
ca21a146 | 308 | if (!sdesc->cyclic) { |
0a45dcab HL |
309 | chain = sdesc->chain; |
310 | if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) || | |
311 | (!chain && | |
312 | (is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) { | |
313 | /* Execute queued descriptors */ | |
314 | list_splice_tail_init(&schan->active, | |
315 | &schan->completed); | |
316 | dma_cookie_complete(&sdesc->desc); | |
317 | if (!list_empty(&schan->queued)) | |
318 | sirfsoc_dma_execute(schan); | |
319 | } | |
320 | } else if (sdesc->cyclic && (is & | |
321 | SIRFSOC_DMA_INT_LOOP_INT_ATLAS7)) | |
ca21a146 RY |
322 | schan->happened_cyclic++; |
323 | ||
324 | spin_unlock(&schan->lock); | |
0a45dcab HL |
325 | break; |
326 | ||
327 | default: | |
328 | break; | |
ca21a146 RY |
329 | } |
330 | ||
331 | /* Schedule tasklet */ | |
332 | tasklet_schedule(&sdma->tasklet); | |
333 | ||
334 | return IRQ_HANDLED; | |
335 | } | |
336 | ||
337 | /* process completed descriptors */ | |
338 | static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) | |
339 | { | |
340 | dma_cookie_t last_cookie = 0; | |
341 | struct sirfsoc_dma_chan *schan; | |
342 | struct sirfsoc_dma_desc *sdesc; | |
343 | struct dma_async_tx_descriptor *desc; | |
344 | unsigned long flags; | |
345 | unsigned long happened_cyclic; | |
346 | LIST_HEAD(list); | |
347 | int i; | |
348 | ||
349 | for (i = 0; i < sdma->dma.chancnt; i++) { | |
350 | schan = &sdma->channels[i]; | |
351 | ||
352 | /* Get all completed descriptors */ | |
353 | spin_lock_irqsave(&schan->lock, flags); | |
354 | if (!list_empty(&schan->completed)) { | |
355 | list_splice_tail_init(&schan->completed, &list); | |
356 | spin_unlock_irqrestore(&schan->lock, flags); | |
357 | ||
358 | /* Execute callbacks and run dependencies */ | |
359 | list_for_each_entry(sdesc, &list, node) { | |
360 | desc = &sdesc->desc; | |
361 | ||
b8bdebb9 | 362 | dmaengine_desc_get_callback_invoke(desc, NULL); |
ca21a146 RY |
363 | last_cookie = desc->cookie; |
364 | dma_run_dependencies(desc); | |
365 | } | |
366 | ||
367 | /* Free descriptors */ | |
368 | spin_lock_irqsave(&schan->lock, flags); | |
369 | list_splice_tail_init(&list, &schan->free); | |
4d4e58de | 370 | schan->chan.completed_cookie = last_cookie; |
ca21a146 RY |
371 | spin_unlock_irqrestore(&schan->lock, flags); |
372 | } else { | |
0a45dcab | 373 | if (list_empty(&schan->active)) { |
ca21a146 RY |
374 | spin_unlock_irqrestore(&schan->lock, flags); |
375 | continue; | |
376 | } | |
377 | ||
0a45dcab HL |
378 | /* for cyclic channel, desc is always in active list */ |
379 | sdesc = list_first_entry(&schan->active, | |
380 | struct sirfsoc_dma_desc, node); | |
381 | ||
ca21a146 RY |
382 | /* cyclic DMA */ |
383 | happened_cyclic = schan->happened_cyclic; | |
384 | spin_unlock_irqrestore(&schan->lock, flags); | |
385 | ||
386 | desc = &sdesc->desc; | |
387 | while (happened_cyclic != schan->completed_cyclic) { | |
b8bdebb9 | 388 | dmaengine_desc_get_callback_invoke(desc, NULL); |
ca21a146 RY |
389 | schan->completed_cyclic++; |
390 | } | |
391 | } | |
392 | } | |
393 | } | |
394 | ||
395 | /* DMA Tasklet */ | |
758cc054 | 396 | static void sirfsoc_dma_tasklet(struct tasklet_struct *t) |
ca21a146 | 397 | { |
758cc054 | 398 | struct sirfsoc_dma *sdma = from_tasklet(sdma, t, tasklet); |
ca21a146 RY |
399 | |
400 | sirfsoc_dma_process_completed(sdma); | |
401 | } | |
402 | ||
403 | /* Submit descriptor to hardware */ | |
404 | static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |
405 | { | |
406 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); | |
407 | struct sirfsoc_dma_desc *sdesc; | |
408 | unsigned long flags; | |
409 | dma_cookie_t cookie; | |
410 | ||
411 | sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); | |
412 | ||
413 | spin_lock_irqsave(&schan->lock, flags); | |
414 | ||
415 | /* Move descriptor to queue */ | |
416 | list_move_tail(&sdesc->node, &schan->queued); | |
417 | ||
884485e1 | 418 | cookie = dma_cookie_assign(txd); |
ca21a146 RY |
419 | |
420 | spin_unlock_irqrestore(&schan->lock, flags); | |
421 | ||
422 | return cookie; | |
423 | } | |
424 | ||
ed14a7c9 MR |
425 | static int sirfsoc_dma_slave_config(struct dma_chan *chan, |
426 | struct dma_slave_config *config) | |
ca21a146 | 427 | { |
ed14a7c9 | 428 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
ca21a146 RY |
429 | unsigned long flags; |
430 | ||
431 | if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | |
432 | (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) | |
433 | return -EINVAL; | |
434 | ||
435 | spin_lock_irqsave(&schan->lock, flags); | |
436 | schan->mode = (config->src_maxburst == 4 ? 1 : 0); | |
437 | spin_unlock_irqrestore(&schan->lock, flags); | |
438 | ||
439 | return 0; | |
440 | } | |
441 | ||
ed14a7c9 | 442 | static int sirfsoc_dma_terminate_all(struct dma_chan *chan) |
ca21a146 | 443 | { |
ed14a7c9 | 444 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
ca21a146 RY |
445 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); |
446 | int cid = schan->chan.chan_id; | |
447 | unsigned long flags; | |
448 | ||
2b99c259 | 449 | spin_lock_irqsave(&schan->lock, flags); |
ca21a146 | 450 | |
0a45dcab HL |
451 | switch (sdma->type) { |
452 | case SIRFSOC_DMA_VER_A7V1: | |
f7d935dc | 453 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR); |
ac9bd0ef | 454 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT); |
f7d935dc | 455 | writel_relaxed((1 << cid) | 1 << (cid + 16), |
0a45dcab HL |
456 | sdma->base + |
457 | SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); | |
458 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | |
459 | break; | |
460 | case SIRFSOC_DMA_VER_A7V2: | |
461 | writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7); | |
ac9bd0ef YL |
462 | writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, |
463 | sdma->base + SIRFSOC_DMA_INT_ATLAS7); | |
0a45dcab HL |
464 | writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); |
465 | writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7); | |
466 | break; | |
467 | case SIRFSOC_DMA_VER_A6: | |
468 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & | |
469 | ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | |
470 | writel_relaxed(readl_relaxed(sdma->base + | |
471 | SIRFSOC_DMA_CH_LOOP_CTRL) & | |
472 | ~((1 << cid) | 1 << (cid + 16)), | |
473 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
474 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | |
475 | break; | |
476 | default: | |
477 | break; | |
f7d935dc BS |
478 | } |
479 | ||
ca21a146 RY |
480 | list_splice_tail_init(&schan->active, &schan->free); |
481 | list_splice_tail_init(&schan->queued, &schan->free); | |
2b99c259 | 482 | |
ca21a146 RY |
483 | spin_unlock_irqrestore(&schan->lock, flags); |
484 | ||
485 | return 0; | |
486 | } | |
487 | ||
ed14a7c9 | 488 | static int sirfsoc_dma_pause_chan(struct dma_chan *chan) |
2518d1d1 | 489 | { |
ed14a7c9 | 490 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
2518d1d1 BS |
491 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); |
492 | int cid = schan->chan.chan_id; | |
493 | unsigned long flags; | |
494 | ||
495 | spin_lock_irqsave(&schan->lock, flags); | |
496 | ||
0a45dcab HL |
497 | switch (sdma->type) { |
498 | case SIRFSOC_DMA_VER_A7V1: | |
2518d1d1 | 499 | writel_relaxed((1 << cid) | 1 << (cid + 16), |
0a45dcab HL |
500 | sdma->base + |
501 | SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7); | |
502 | break; | |
503 | case SIRFSOC_DMA_VER_A7V2: | |
504 | writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); | |
505 | break; | |
506 | case SIRFSOC_DMA_VER_A6: | |
507 | writel_relaxed(readl_relaxed(sdma->base + | |
508 | SIRFSOC_DMA_CH_LOOP_CTRL) & | |
509 | ~((1 << cid) | 1 << (cid + 16)), | |
510 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
511 | break; | |
512 | ||
513 | default: | |
514 | break; | |
515 | } | |
2518d1d1 BS |
516 | |
517 | spin_unlock_irqrestore(&schan->lock, flags); | |
518 | ||
519 | return 0; | |
520 | } | |
521 | ||
ed14a7c9 | 522 | static int sirfsoc_dma_resume_chan(struct dma_chan *chan) |
2518d1d1 | 523 | { |
ed14a7c9 | 524 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
2518d1d1 BS |
525 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); |
526 | int cid = schan->chan.chan_id; | |
527 | unsigned long flags; | |
528 | ||
529 | spin_lock_irqsave(&schan->lock, flags); | |
0a45dcab HL |
530 | switch (sdma->type) { |
531 | case SIRFSOC_DMA_VER_A7V1: | |
2518d1d1 | 532 | writel_relaxed((1 << cid) | 1 << (cid + 16), |
0a45dcab HL |
533 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7); |
534 | break; | |
535 | case SIRFSOC_DMA_VER_A7V2: | |
536 | writel_relaxed(0x10001, | |
537 | sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7); | |
538 | break; | |
539 | case SIRFSOC_DMA_VER_A6: | |
540 | writel_relaxed(readl_relaxed(sdma->base + | |
541 | SIRFSOC_DMA_CH_LOOP_CTRL) | | |
542 | ((1 << cid) | 1 << (cid + 16)), | |
543 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
544 | break; | |
545 | ||
546 | default: | |
547 | break; | |
548 | } | |
2518d1d1 | 549 | |
ca21a146 RY |
550 | spin_unlock_irqrestore(&schan->lock, flags); |
551 | ||
552 | return 0; | |
553 | } | |
554 | ||
ca21a146 RY |
555 | /* Alloc channel resources */ |
556 | static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | |
557 | { | |
558 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
559 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
560 | struct sirfsoc_dma_desc *sdesc; | |
561 | unsigned long flags; | |
562 | LIST_HEAD(descs); | |
563 | int i; | |
564 | ||
2a76689b BS |
565 | pm_runtime_get_sync(sdma->dma.dev); |
566 | ||
ca21a146 RY |
567 | /* Alloc descriptors for this channel */ |
568 | for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { | |
569 | sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); | |
570 | if (!sdesc) { | |
571 | dev_notice(sdma->dma.dev, "Memory allocation error. " | |
572 | "Allocated only %u descriptors\n", i); | |
573 | break; | |
574 | } | |
575 | ||
576 | dma_async_tx_descriptor_init(&sdesc->desc, chan); | |
577 | sdesc->desc.flags = DMA_CTRL_ACK; | |
578 | sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; | |
579 | ||
580 | list_add_tail(&sdesc->node, &descs); | |
581 | } | |
582 | ||
583 | /* Return error only if no descriptors were allocated */ | |
584 | if (i == 0) | |
585 | return -ENOMEM; | |
586 | ||
587 | spin_lock_irqsave(&schan->lock, flags); | |
588 | ||
589 | list_splice_tail_init(&descs, &schan->free); | |
590 | spin_unlock_irqrestore(&schan->lock, flags); | |
591 | ||
592 | return i; | |
593 | } | |
594 | ||
595 | /* Free channel resources */ | |
596 | static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) | |
597 | { | |
598 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
2a76689b | 599 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); |
ca21a146 RY |
600 | struct sirfsoc_dma_desc *sdesc, *tmp; |
601 | unsigned long flags; | |
602 | LIST_HEAD(descs); | |
603 | ||
604 | spin_lock_irqsave(&schan->lock, flags); | |
605 | ||
606 | /* Channel must be idle */ | |
607 | BUG_ON(!list_empty(&schan->prepared)); | |
608 | BUG_ON(!list_empty(&schan->queued)); | |
609 | BUG_ON(!list_empty(&schan->active)); | |
610 | BUG_ON(!list_empty(&schan->completed)); | |
611 | ||
612 | /* Move data */ | |
613 | list_splice_tail_init(&schan->free, &descs); | |
614 | ||
615 | spin_unlock_irqrestore(&schan->lock, flags); | |
616 | ||
617 | /* Free descriptors */ | |
618 | list_for_each_entry_safe(sdesc, tmp, &descs, node) | |
619 | kfree(sdesc); | |
2a76689b BS |
620 | |
621 | pm_runtime_put(sdma->dma.dev); | |
ca21a146 RY |
622 | } |
623 | ||
624 | /* Send pending descriptor to hardware */ | |
625 | static void sirfsoc_dma_issue_pending(struct dma_chan *chan) | |
626 | { | |
627 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
628 | unsigned long flags; | |
629 | ||
630 | spin_lock_irqsave(&schan->lock, flags); | |
631 | ||
632 | if (list_empty(&schan->active) && !list_empty(&schan->queued)) | |
633 | sirfsoc_dma_execute(schan); | |
634 | ||
635 | spin_unlock_irqrestore(&schan->lock, flags); | |
636 | } | |
637 | ||
638 | /* Check request completion status */ | |
639 | static enum dma_status | |
640 | sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
641 | struct dma_tx_state *txstate) | |
642 | { | |
add93b57 | 643 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); |
ca21a146 RY |
644 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); |
645 | unsigned long flags; | |
96a2af41 | 646 | enum dma_status ret; |
add93b57 RY |
647 | struct sirfsoc_dma_desc *sdesc; |
648 | int cid = schan->chan.chan_id; | |
649 | unsigned long dma_pos; | |
650 | unsigned long dma_request_bytes; | |
651 | unsigned long residue; | |
ca21a146 RY |
652 | |
653 | spin_lock_irqsave(&schan->lock, flags); | |
add93b57 | 654 | |
0a45dcab HL |
655 | if (list_empty(&schan->active)) { |
656 | ret = dma_cookie_status(chan, cookie, txstate); | |
657 | dma_set_residue(txstate, 0); | |
658 | spin_unlock_irqrestore(&schan->lock, flags); | |
659 | return ret; | |
660 | } | |
661 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node); | |
662 | if (sdesc->cyclic) | |
663 | dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) * | |
664 | (sdesc->width * SIRFSOC_DMA_WORD_LEN); | |
665 | else | |
666 | dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN; | |
add93b57 | 667 | |
96a2af41 | 668 | ret = dma_cookie_status(chan, cookie, txstate); |
0a45dcab HL |
669 | |
670 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) | |
671 | cid = 0; | |
672 | ||
673 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) { | |
674 | dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR); | |
675 | } else { | |
676 | dma_pos = readl_relaxed( | |
677 | sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2; | |
678 | } | |
679 | ||
add93b57 RY |
680 | residue = dma_request_bytes - (dma_pos - sdesc->addr); |
681 | dma_set_residue(txstate, residue); | |
682 | ||
ca21a146 RY |
683 | spin_unlock_irqrestore(&schan->lock, flags); |
684 | ||
96a2af41 | 685 | return ret; |
ca21a146 RY |
686 | } |
687 | ||
688 | static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( | |
689 | struct dma_chan *chan, struct dma_interleaved_template *xt, | |
690 | unsigned long flags) | |
691 | { | |
692 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
693 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
694 | struct sirfsoc_dma_desc *sdesc = NULL; | |
695 | unsigned long iflags; | |
696 | int ret; | |
697 | ||
5997e089 | 698 | if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) { |
ca21a146 RY |
699 | ret = -EINVAL; |
700 | goto err_dir; | |
701 | } | |
702 | ||
703 | /* Get free descriptor */ | |
704 | spin_lock_irqsave(&schan->lock, iflags); | |
705 | if (!list_empty(&schan->free)) { | |
706 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
707 | node); | |
708 | list_del(&sdesc->node); | |
709 | } | |
710 | spin_unlock_irqrestore(&schan->lock, iflags); | |
711 | ||
712 | if (!sdesc) { | |
713 | /* try to free completed descriptors */ | |
714 | sirfsoc_dma_process_completed(sdma); | |
715 | ret = 0; | |
716 | goto no_desc; | |
717 | } | |
718 | ||
719 | /* Place descriptor in prepared list */ | |
720 | spin_lock_irqsave(&schan->lock, iflags); | |
721 | ||
722 | /* | |
723 | * Number of chunks in a frame can only be 1 for prima2 | |
724 | * and ylen (number of frame - 1) must be at least 0 | |
725 | */ | |
726 | if ((xt->frame_size == 1) && (xt->numf > 0)) { | |
727 | sdesc->cyclic = 0; | |
728 | sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; | |
729 | sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / | |
730 | SIRFSOC_DMA_WORD_LEN; | |
731 | sdesc->ylen = xt->numf - 1; | |
732 | if (xt->dir == DMA_MEM_TO_DEV) { | |
733 | sdesc->addr = xt->src_start; | |
734 | sdesc->dir = 1; | |
735 | } else { | |
736 | sdesc->addr = xt->dst_start; | |
737 | sdesc->dir = 0; | |
738 | } | |
739 | ||
740 | list_add_tail(&sdesc->node, &schan->prepared); | |
741 | } else { | |
742 | pr_err("sirfsoc DMA Invalid xfer\n"); | |
743 | ret = -EINVAL; | |
744 | goto err_xfer; | |
745 | } | |
746 | spin_unlock_irqrestore(&schan->lock, iflags); | |
747 | ||
748 | return &sdesc->desc; | |
749 | err_xfer: | |
750 | spin_unlock_irqrestore(&schan->lock, iflags); | |
751 | no_desc: | |
752 | err_dir: | |
753 | return ERR_PTR(ret); | |
754 | } | |
755 | ||
756 | static struct dma_async_tx_descriptor * | |
757 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | |
758 | size_t buf_len, size_t period_len, | |
31c1e5a1 | 759 | enum dma_transfer_direction direction, unsigned long flags) |
ca21a146 RY |
760 | { |
761 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
762 | struct sirfsoc_dma_desc *sdesc = NULL; | |
763 | unsigned long iflags; | |
764 | ||
765 | /* | |
766 | * we only support cycle transfer with 2 period | |
767 | * If the X-length is set to 0, it would be the loop mode. | |
768 | * The DMA address keeps increasing until reaching the end of a loop | |
769 | * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then | |
770 | * the DMA address goes back to the beginning of this area. | |
771 | * In loop mode, the DMA data region is divided into two parts, BUFA | |
772 | * and BUFB. DMA controller generates interrupts twice in each loop: | |
773 | * when the DMA address reaches the end of BUFA or the end of the | |
774 | * BUFB | |
775 | */ | |
776 | if (buf_len != 2 * period_len) | |
777 | return ERR_PTR(-EINVAL); | |
778 | ||
779 | /* Get free descriptor */ | |
780 | spin_lock_irqsave(&schan->lock, iflags); | |
781 | if (!list_empty(&schan->free)) { | |
782 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
783 | node); | |
784 | list_del(&sdesc->node); | |
785 | } | |
786 | spin_unlock_irqrestore(&schan->lock, iflags); | |
787 | ||
788 | if (!sdesc) | |
696b4ff8 | 789 | return NULL; |
ca21a146 RY |
790 | |
791 | /* Place descriptor in prepared list */ | |
792 | spin_lock_irqsave(&schan->lock, iflags); | |
793 | sdesc->addr = addr; | |
794 | sdesc->cyclic = 1; | |
795 | sdesc->xlen = 0; | |
796 | sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; | |
797 | sdesc->width = 1; | |
798 | list_add_tail(&sdesc->node, &schan->prepared); | |
799 | spin_unlock_irqrestore(&schan->lock, iflags); | |
800 | ||
801 | return &sdesc->desc; | |
802 | } | |
803 | ||
804 | /* | |
805 | * The DMA controller consists of 16 independent DMA channels. | |
806 | * Each channel is allocated to a different function | |
807 | */ | |
808 | bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) | |
809 | { | |
810 | unsigned int ch_nr = (unsigned int) chan_id; | |
811 | ||
812 | if (ch_nr == chan->chan_id + | |
813 | chan->device->dev_id * SIRFSOC_DMA_CHANNELS) | |
814 | return true; | |
815 | ||
816 | return false; | |
817 | } | |
818 | EXPORT_SYMBOL(sirfsoc_dma_filter_id); | |
819 | ||
ba07d812 RY |
820 | #define SIRFSOC_DMA_BUSWIDTHS \ |
821 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | |
822 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
823 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
824 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | |
825 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | |
826 | ||
2e041c94 BS |
827 | static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec, |
828 | struct of_dma *ofdma) | |
829 | { | |
830 | struct sirfsoc_dma *sdma = ofdma->of_dma_data; | |
831 | unsigned int request = dma_spec->args[0]; | |
832 | ||
f3817e77 | 833 | if (request >= SIRFSOC_DMA_CHANNELS) |
2e041c94 BS |
834 | return NULL; |
835 | ||
836 | return dma_get_slave_channel(&sdma->channels[request].chan); | |
837 | } | |
838 | ||
463a1f8b | 839 | static int sirfsoc_dma_probe(struct platform_device *op) |
ca21a146 RY |
840 | { |
841 | struct device_node *dn = op->dev.of_node; | |
842 | struct device *dev = &op->dev; | |
843 | struct dma_device *dma; | |
844 | struct sirfsoc_dma *sdma; | |
845 | struct sirfsoc_dma_chan *schan; | |
0a45dcab | 846 | struct sirfsoc_dmadata *data; |
ca21a146 RY |
847 | struct resource res; |
848 | ulong regs_start, regs_size; | |
849 | u32 id; | |
850 | int ret, i; | |
851 | ||
852 | sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); | |
aef94fea | 853 | if (!sdma) |
ca21a146 | 854 | return -ENOMEM; |
aef94fea | 855 | |
0a45dcab HL |
856 | data = (struct sirfsoc_dmadata *) |
857 | (of_match_device(op->dev.driver->of_match_table, | |
858 | &op->dev)->data); | |
859 | sdma->exec_desc = data->exec; | |
860 | sdma->type = data->type; | |
f7d935dc | 861 | |
ca21a146 RY |
862 | if (of_property_read_u32(dn, "cell-index", &id)) { |
863 | dev_err(dev, "Fail to get DMAC index\n"); | |
94d3901c | 864 | return -ENODEV; |
ca21a146 RY |
865 | } |
866 | ||
867 | sdma->irq = irq_of_parse_and_map(dn, 0); | |
524c6e04 | 868 | if (!sdma->irq) { |
ca21a146 | 869 | dev_err(dev, "Error mapping IRQ!\n"); |
94d3901c | 870 | return -EINVAL; |
ca21a146 RY |
871 | } |
872 | ||
a7e34065 BS |
873 | sdma->clk = devm_clk_get(dev, NULL); |
874 | if (IS_ERR(sdma->clk)) { | |
875 | dev_err(dev, "failed to get a clock.\n"); | |
876 | return PTR_ERR(sdma->clk); | |
877 | } | |
878 | ||
ca21a146 RY |
879 | ret = of_address_to_resource(dn, 0, &res); |
880 | if (ret) { | |
881 | dev_err(dev, "Error parsing memory region!\n"); | |
94d3901c | 882 | goto irq_dispose; |
ca21a146 RY |
883 | } |
884 | ||
885 | regs_start = res.start; | |
886 | regs_size = resource_size(&res); | |
887 | ||
888 | sdma->base = devm_ioremap(dev, regs_start, regs_size); | |
889 | if (!sdma->base) { | |
890 | dev_err(dev, "Error mapping memory region!\n"); | |
891 | ret = -ENOMEM; | |
892 | goto irq_dispose; | |
893 | } | |
894 | ||
94d3901c | 895 | ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma); |
ca21a146 RY |
896 | if (ret) { |
897 | dev_err(dev, "Error requesting IRQ!\n"); | |
898 | ret = -EINVAL; | |
94d3901c | 899 | goto irq_dispose; |
ca21a146 RY |
900 | } |
901 | ||
902 | dma = &sdma->dma; | |
903 | dma->dev = dev; | |
ca21a146 RY |
904 | |
905 | dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; | |
906 | dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; | |
907 | dma->device_issue_pending = sirfsoc_dma_issue_pending; | |
ed14a7c9 MR |
908 | dma->device_config = sirfsoc_dma_slave_config; |
909 | dma->device_pause = sirfsoc_dma_pause_chan; | |
910 | dma->device_resume = sirfsoc_dma_resume_chan; | |
911 | dma->device_terminate_all = sirfsoc_dma_terminate_all; | |
ca21a146 RY |
912 | dma->device_tx_status = sirfsoc_dma_tx_status; |
913 | dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; | |
914 | dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; | |
07ffa6ba MR |
915 | dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; |
916 | dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS; | |
917 | dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
ca21a146 RY |
918 | |
919 | INIT_LIST_HEAD(&dma->channels); | |
920 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | |
921 | dma_cap_set(DMA_CYCLIC, dma->cap_mask); | |
922 | dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); | |
923 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | |
924 | ||
35202451 | 925 | for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) { |
ca21a146 RY |
926 | schan = &sdma->channels[i]; |
927 | ||
928 | schan->chan.device = dma; | |
d3ee98cd | 929 | dma_cookie_init(&schan->chan); |
ca21a146 RY |
930 | |
931 | INIT_LIST_HEAD(&schan->free); | |
932 | INIT_LIST_HEAD(&schan->prepared); | |
933 | INIT_LIST_HEAD(&schan->queued); | |
934 | INIT_LIST_HEAD(&schan->active); | |
935 | INIT_LIST_HEAD(&schan->completed); | |
936 | ||
937 | spin_lock_init(&schan->lock); | |
938 | list_add_tail(&schan->chan.device_node, &dma->channels); | |
939 | } | |
940 | ||
758cc054 | 941 | tasklet_setup(&sdma->tasklet, sirfsoc_dma_tasklet); |
ca21a146 RY |
942 | |
943 | /* Register DMA engine */ | |
944 | dev_set_drvdata(dev, sdma); | |
2a76689b | 945 | |
ca21a146 RY |
946 | ret = dma_async_device_register(dma); |
947 | if (ret) | |
948 | goto free_irq; | |
949 | ||
2e041c94 BS |
950 | /* Device-tree DMA controller registration */ |
951 | ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma); | |
952 | if (ret) { | |
953 | dev_err(dev, "failed to register DMA controller\n"); | |
954 | goto unreg_dma_dev; | |
955 | } | |
956 | ||
2a76689b | 957 | pm_runtime_enable(&op->dev); |
ca21a146 RY |
958 | dev_info(dev, "initialized SIRFSOC DMAC driver\n"); |
959 | ||
960 | return 0; | |
961 | ||
2e041c94 BS |
962 | unreg_dma_dev: |
963 | dma_async_device_unregister(dma); | |
ca21a146 | 964 | free_irq: |
94d3901c | 965 | free_irq(sdma->irq, sdma); |
ca21a146 RY |
966 | irq_dispose: |
967 | irq_dispose_mapping(sdma->irq); | |
ca21a146 RY |
968 | return ret; |
969 | } | |
970 | ||
4bf27b8b | 971 | static int sirfsoc_dma_remove(struct platform_device *op) |
ca21a146 RY |
972 | { |
973 | struct device *dev = &op->dev; | |
974 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
975 | ||
2e041c94 | 976 | of_dma_controller_free(op->dev.of_node); |
ca21a146 | 977 | dma_async_device_unregister(&sdma->dma); |
94d3901c | 978 | free_irq(sdma->irq, sdma); |
1f11e377 | 979 | tasklet_kill(&sdma->tasklet); |
ca21a146 | 980 | irq_dispose_mapping(sdma->irq); |
2a76689b BS |
981 | pm_runtime_disable(&op->dev); |
982 | if (!pm_runtime_status_suspended(&op->dev)) | |
983 | sirfsoc_dma_runtime_suspend(&op->dev); | |
984 | ||
985 | return 0; | |
986 | } | |
987 | ||
6ff1cb88 | 988 | static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev) |
2a76689b BS |
989 | { |
990 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
991 | ||
992 | clk_disable_unprepare(sdma->clk); | |
993 | return 0; | |
994 | } | |
995 | ||
6ff1cb88 | 996 | static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev) |
2a76689b BS |
997 | { |
998 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
999 | int ret; | |
1000 | ||
1001 | ret = clk_prepare_enable(sdma->clk); | |
1002 | if (ret < 0) { | |
1003 | dev_err(dev, "clk_enable failed: %d\n", ret); | |
1004 | return ret; | |
1005 | } | |
1006 | return 0; | |
1007 | } | |
1008 | ||
6ff1cb88 | 1009 | static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev) |
2a76689b BS |
1010 | { |
1011 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
1012 | struct sirfsoc_dma_regs *save = &sdma->regs_save; | |
2a76689b BS |
1013 | struct sirfsoc_dma_chan *schan; |
1014 | int ch; | |
1015 | int ret; | |
0a45dcab HL |
1016 | int count; |
1017 | u32 int_offset; | |
2a76689b BS |
1018 | |
1019 | /* | |
1020 | * if we were runtime-suspended before, resume to enable clock | |
1021 | * before accessing register | |
1022 | */ | |
1023 | if (pm_runtime_status_suspended(dev)) { | |
1024 | ret = sirfsoc_dma_runtime_resume(dev); | |
1025 | if (ret < 0) | |
1026 | return ret; | |
1027 | } | |
1028 | ||
0a45dcab HL |
1029 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) { |
1030 | count = 1; | |
1031 | int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; | |
1032 | } else { | |
1033 | count = SIRFSOC_DMA_CHANNELS; | |
1034 | int_offset = SIRFSOC_DMA_INT_EN; | |
1035 | } | |
1036 | ||
2a76689b BS |
1037 | /* |
1038 | * DMA controller will lose all registers while suspending | |
1039 | * so we need to save registers for active channels | |
1040 | */ | |
0a45dcab | 1041 | for (ch = 0; ch < count; ch++) { |
2a76689b BS |
1042 | schan = &sdma->channels[ch]; |
1043 | if (list_empty(&schan->active)) | |
1044 | continue; | |
2a76689b BS |
1045 | save->ctrl[ch] = readl_relaxed(sdma->base + |
1046 | ch * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
1047 | } | |
0a45dcab | 1048 | save->interrupt_en = readl_relaxed(sdma->base + int_offset); |
2a76689b BS |
1049 | |
1050 | /* Disable clock */ | |
1051 | sirfsoc_dma_runtime_suspend(dev); | |
1052 | ||
1053 | return 0; | |
1054 | } | |
1055 | ||
6ff1cb88 | 1056 | static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev) |
2a76689b BS |
1057 | { |
1058 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
1059 | struct sirfsoc_dma_regs *save = &sdma->regs_save; | |
1060 | struct sirfsoc_dma_desc *sdesc; | |
1061 | struct sirfsoc_dma_chan *schan; | |
1062 | int ch; | |
1063 | int ret; | |
0a45dcab HL |
1064 | int count; |
1065 | u32 int_offset; | |
1066 | u32 width_offset; | |
2a76689b BS |
1067 | |
1068 | /* Enable clock before accessing register */ | |
1069 | ret = sirfsoc_dma_runtime_resume(dev); | |
1070 | if (ret < 0) | |
1071 | return ret; | |
1072 | ||
0a45dcab HL |
1073 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) { |
1074 | count = 1; | |
1075 | int_offset = SIRFSOC_DMA_INT_EN_ATLAS7; | |
1076 | width_offset = SIRFSOC_DMA_WIDTH_ATLAS7; | |
1077 | } else { | |
1078 | count = SIRFSOC_DMA_CHANNELS; | |
1079 | int_offset = SIRFSOC_DMA_INT_EN; | |
1080 | width_offset = SIRFSOC_DMA_WIDTH_0; | |
1081 | } | |
1082 | ||
1083 | writel_relaxed(save->interrupt_en, sdma->base + int_offset); | |
1084 | for (ch = 0; ch < count; ch++) { | |
2a76689b BS |
1085 | schan = &sdma->channels[ch]; |
1086 | if (list_empty(&schan->active)) | |
1087 | continue; | |
1088 | sdesc = list_first_entry(&schan->active, | |
1089 | struct sirfsoc_dma_desc, | |
1090 | node); | |
1091 | writel_relaxed(sdesc->width, | |
0a45dcab | 1092 | sdma->base + width_offset + ch * 4); |
2a76689b BS |
1093 | writel_relaxed(sdesc->xlen, |
1094 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN); | |
1095 | writel_relaxed(sdesc->ylen, | |
1096 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN); | |
1097 | writel_relaxed(save->ctrl[ch], | |
1098 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
0a45dcab HL |
1099 | if (sdma->type == SIRFSOC_DMA_VER_A7V2) { |
1100 | writel_relaxed(sdesc->addr, | |
1101 | sdma->base + SIRFSOC_DMA_CH_ADDR); | |
1102 | } else { | |
1103 | writel_relaxed(sdesc->addr >> 2, | |
1104 | sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR); | |
1105 | ||
1106 | } | |
2a76689b BS |
1107 | } |
1108 | ||
1109 | /* if we were runtime-suspended before, suspend again */ | |
1110 | if (pm_runtime_status_suspended(dev)) | |
1111 | sirfsoc_dma_runtime_suspend(dev); | |
1112 | ||
ca21a146 RY |
1113 | return 0; |
1114 | } | |
1115 | ||
2a76689b BS |
1116 | static const struct dev_pm_ops sirfsoc_dma_pm_ops = { |
1117 | SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL) | |
1118 | SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume) | |
1119 | }; | |
1120 | ||
7978a583 | 1121 | static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = { |
0a45dcab HL |
1122 | .exec = sirfsoc_dma_execute_hw_a6, |
1123 | .type = SIRFSOC_DMA_VER_A6, | |
1124 | }; | |
1125 | ||
7978a583 | 1126 | static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = { |
0a45dcab HL |
1127 | .exec = sirfsoc_dma_execute_hw_a7v1, |
1128 | .type = SIRFSOC_DMA_VER_A7V1, | |
1129 | }; | |
1130 | ||
7978a583 | 1131 | static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = { |
0a45dcab HL |
1132 | .exec = sirfsoc_dma_execute_hw_a7v2, |
1133 | .type = SIRFSOC_DMA_VER_A7V2, | |
1134 | }; | |
1135 | ||
57c03422 | 1136 | static const struct of_device_id sirfsoc_dma_match[] = { |
0a45dcab HL |
1137 | { .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,}, |
1138 | { .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,}, | |
1139 | { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,}, | |
ca21a146 RY |
1140 | {}, |
1141 | }; | |
e0c26f22 | 1142 | MODULE_DEVICE_TABLE(of, sirfsoc_dma_match); |
ca21a146 RY |
1143 | |
1144 | static struct platform_driver sirfsoc_dma_driver = { | |
1145 | .probe = sirfsoc_dma_probe, | |
a7d6e3ec | 1146 | .remove = sirfsoc_dma_remove, |
ca21a146 RY |
1147 | .driver = { |
1148 | .name = DRV_NAME, | |
2a76689b | 1149 | .pm = &sirfsoc_dma_pm_ops, |
ca21a146 RY |
1150 | .of_match_table = sirfsoc_dma_match, |
1151 | }, | |
1152 | }; | |
1153 | ||
42361f20 BS |
1154 | static __init int sirfsoc_dma_init(void) |
1155 | { | |
1156 | return platform_driver_register(&sirfsoc_dma_driver); | |
1157 | } | |
1158 | ||
1159 | static void __exit sirfsoc_dma_exit(void) | |
1160 | { | |
1161 | platform_driver_unregister(&sirfsoc_dma_driver); | |
1162 | } | |
1163 | ||
1164 | subsys_initcall(sirfsoc_dma_init); | |
1165 | module_exit(sirfsoc_dma_exit); | |
ca21a146 | 1166 | |
0a45dcab HL |
1167 | MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>"); |
1168 | MODULE_AUTHOR("Barry Song <baohua.song@csr.com>"); | |
ca21a146 RY |
1169 | MODULE_DESCRIPTION("SIRFSOC DMA control driver"); |
1170 | MODULE_LICENSE("GPL v2"); |