Commit | Line | Data |
---|---|---|
ff7b0479 SB |
1 | /* |
2 | * offload engine driver for the Marvell XOR engine | |
3 | * Copyright (C) 2007, 2008, Marvell International Ltd. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., | |
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
17 | */ | |
18 | ||
19 | #include <linux/init.h> | |
20 | #include <linux/module.h> | |
5a0e3ad6 | 21 | #include <linux/slab.h> |
ff7b0479 SB |
22 | #include <linux/delay.h> |
23 | #include <linux/dma-mapping.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/platform_device.h> | |
27 | #include <linux/memory.h> | |
c510182b | 28 | #include <linux/clk.h> |
f7d12ef5 TP |
29 | #include <linux/of.h> |
30 | #include <linux/of_irq.h> | |
31 | #include <linux/irqdomain.h> | |
c02cecb9 | 32 | #include <linux/platform_data/dma-mv_xor.h> |
d2ebfb33 RKAL |
33 | |
34 | #include "dmaengine.h" | |
ff7b0479 SB |
35 | #include "mv_xor.h" |
36 | ||
37 | static void mv_xor_issue_pending(struct dma_chan *chan); | |
38 | ||
39 | #define to_mv_xor_chan(chan) \ | |
98817b99 | 40 | container_of(chan, struct mv_xor_chan, dmachan) |
ff7b0479 SB |
41 | |
42 | #define to_mv_xor_slot(tx) \ | |
43 | container_of(tx, struct mv_xor_desc_slot, async_tx) | |
44 | ||
c98c1781 | 45 | #define mv_chan_to_devp(chan) \ |
1ef48a26 | 46 | ((chan)->dmadev.dev) |
c98c1781 | 47 | |
ff7b0479 SB |
48 | static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) |
49 | { | |
50 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
51 | ||
52 | hw_desc->status = (1 << 31); | |
53 | hw_desc->phy_next_desc = 0; | |
54 | hw_desc->desc_command = (1 << 31); | |
55 | } | |
56 | ||
ff7b0479 SB |
57 | static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, |
58 | u32 byte_count) | |
59 | { | |
60 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
61 | hw_desc->byte_count = byte_count; | |
62 | } | |
63 | ||
64 | static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, | |
65 | u32 next_desc_addr) | |
66 | { | |
67 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
68 | BUG_ON(hw_desc->phy_next_desc); | |
69 | hw_desc->phy_next_desc = next_desc_addr; | |
70 | } | |
71 | ||
72 | static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) | |
73 | { | |
74 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
75 | hw_desc->phy_next_desc = 0; | |
76 | } | |
77 | ||
ff7b0479 SB |
78 | static void mv_desc_set_dest_addr(struct mv_xor_desc_slot *desc, |
79 | dma_addr_t addr) | |
80 | { | |
81 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
82 | hw_desc->phy_dest_addr = addr; | |
83 | } | |
84 | ||
85 | static int mv_chan_memset_slot_count(size_t len) | |
86 | { | |
87 | return 1; | |
88 | } | |
89 | ||
90 | #define mv_chan_memcpy_slot_count(c) mv_chan_memset_slot_count(c) | |
91 | ||
92 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, | |
93 | int index, dma_addr_t addr) | |
94 | { | |
95 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
e03bc654 | 96 | hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; |
ff7b0479 SB |
97 | if (desc->type == DMA_XOR) |
98 | hw_desc->desc_command |= (1 << index); | |
99 | } | |
100 | ||
101 | static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) | |
102 | { | |
5733c38a | 103 | return readl_relaxed(XOR_CURR_DESC(chan)); |
ff7b0479 SB |
104 | } |
105 | ||
106 | static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | |
107 | u32 next_desc_addr) | |
108 | { | |
5733c38a | 109 | writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); |
ff7b0479 SB |
110 | } |
111 | ||
ff7b0479 SB |
112 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) |
113 | { | |
5733c38a | 114 | u32 val = readl_relaxed(XOR_INTR_MASK(chan)); |
ff7b0479 | 115 | val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); |
5733c38a | 116 | writel_relaxed(val, XOR_INTR_MASK(chan)); |
ff7b0479 SB |
117 | } |
118 | ||
119 | static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) | |
120 | { | |
5733c38a | 121 | u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
122 | intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; |
123 | return intr_cause; | |
124 | } | |
125 | ||
126 | static int mv_is_err_intr(u32 intr_cause) | |
127 | { | |
128 | if (intr_cause & ((1<<4)|(1<<5)|(1<<6)|(1<<7)|(1<<8)|(1<<9))) | |
129 | return 1; | |
130 | ||
131 | return 0; | |
132 | } | |
133 | ||
134 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) | |
135 | { | |
86363682 | 136 | u32 val = ~(1 << (chan->idx * 16)); |
c98c1781 | 137 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); |
5733c38a | 138 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
139 | } |
140 | ||
141 | static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) | |
142 | { | |
143 | u32 val = 0xFFFF0000 >> (chan->idx * 16); | |
5733c38a | 144 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
145 | } |
146 | ||
147 | static int mv_can_chain(struct mv_xor_desc_slot *desc) | |
148 | { | |
149 | struct mv_xor_desc_slot *chain_old_tail = list_entry( | |
150 | desc->chain_node.prev, struct mv_xor_desc_slot, chain_node); | |
151 | ||
152 | if (chain_old_tail->type != desc->type) | |
153 | return 0; | |
ff7b0479 SB |
154 | |
155 | return 1; | |
156 | } | |
157 | ||
158 | static void mv_set_mode(struct mv_xor_chan *chan, | |
159 | enum dma_transaction_type type) | |
160 | { | |
161 | u32 op_mode; | |
5733c38a | 162 | u32 config = readl_relaxed(XOR_CONFIG(chan)); |
ff7b0479 SB |
163 | |
164 | switch (type) { | |
165 | case DMA_XOR: | |
166 | op_mode = XOR_OPERATION_MODE_XOR; | |
167 | break; | |
168 | case DMA_MEMCPY: | |
169 | op_mode = XOR_OPERATION_MODE_MEMCPY; | |
170 | break; | |
ff7b0479 | 171 | default: |
c98c1781 | 172 | dev_err(mv_chan_to_devp(chan), |
1ba151cd | 173 | "error: unsupported operation %d\n", |
a3fc74bc | 174 | type); |
ff7b0479 SB |
175 | BUG(); |
176 | return; | |
177 | } | |
178 | ||
179 | config &= ~0x7; | |
180 | config |= op_mode; | |
e03bc654 TP |
181 | |
182 | #if defined(__BIG_ENDIAN) | |
183 | config |= XOR_DESCRIPTOR_SWAP; | |
184 | #else | |
185 | config &= ~XOR_DESCRIPTOR_SWAP; | |
186 | #endif | |
187 | ||
5733c38a | 188 | writel_relaxed(config, XOR_CONFIG(chan)); |
ff7b0479 SB |
189 | chan->current_type = type; |
190 | } | |
191 | ||
192 | static void mv_chan_activate(struct mv_xor_chan *chan) | |
193 | { | |
c98c1781 | 194 | dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); |
5a9a55bf EG |
195 | |
196 | /* writel ensures all descriptors are flushed before activation */ | |
197 | writel(BIT(0), XOR_ACTIVATION(chan)); | |
ff7b0479 SB |
198 | } |
199 | ||
200 | static char mv_chan_is_busy(struct mv_xor_chan *chan) | |
201 | { | |
5733c38a | 202 | u32 state = readl_relaxed(XOR_ACTIVATION(chan)); |
ff7b0479 SB |
203 | |
204 | state = (state >> 4) & 0x3; | |
205 | ||
206 | return (state == 1) ? 1 : 0; | |
207 | } | |
208 | ||
209 | static int mv_chan_xor_slot_count(size_t len, int src_cnt) | |
210 | { | |
211 | return 1; | |
212 | } | |
213 | ||
214 | /** | |
215 | * mv_xor_free_slots - flags descriptor slots for reuse | |
216 | * @slot: Slot to free | |
217 | * Caller must hold &mv_chan->lock while calling this function | |
218 | */ | |
219 | static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, | |
220 | struct mv_xor_desc_slot *slot) | |
221 | { | |
c98c1781 | 222 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", |
ff7b0479 SB |
223 | __func__, __LINE__, slot); |
224 | ||
225 | slot->slots_per_op = 0; | |
226 | ||
227 | } | |
228 | ||
229 | /* | |
230 | * mv_xor_start_new_chain - program the engine to operate on new chain headed by | |
231 | * sw_desc | |
232 | * Caller must hold &mv_chan->lock while calling this function | |
233 | */ | |
234 | static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | |
235 | struct mv_xor_desc_slot *sw_desc) | |
236 | { | |
c98c1781 | 237 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", |
ff7b0479 SB |
238 | __func__, __LINE__, sw_desc); |
239 | if (sw_desc->type != mv_chan->current_type) | |
240 | mv_set_mode(mv_chan, sw_desc->type); | |
241 | ||
48a9db46 BZ |
242 | /* set the hardware chain */ |
243 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | |
244 | ||
ff7b0479 | 245 | mv_chan->pending += sw_desc->slot_cnt; |
98817b99 | 246 | mv_xor_issue_pending(&mv_chan->dmachan); |
ff7b0479 SB |
247 | } |
248 | ||
249 | static dma_cookie_t | |
250 | mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |
251 | struct mv_xor_chan *mv_chan, dma_cookie_t cookie) | |
252 | { | |
253 | BUG_ON(desc->async_tx.cookie < 0); | |
254 | ||
255 | if (desc->async_tx.cookie > 0) { | |
256 | cookie = desc->async_tx.cookie; | |
257 | ||
258 | /* call the callback (must not sleep or submit new | |
259 | * operations to this channel) | |
260 | */ | |
261 | if (desc->async_tx.callback) | |
262 | desc->async_tx.callback( | |
263 | desc->async_tx.callback_param); | |
264 | ||
d38a8c62 | 265 | dma_descriptor_unmap(&desc->async_tx); |
54f8d501 | 266 | if (desc->group_head) |
ff7b0479 | 267 | desc->group_head = NULL; |
ff7b0479 SB |
268 | } |
269 | ||
270 | /* run dependent operations */ | |
07f2211e | 271 | dma_run_dependencies(&desc->async_tx); |
ff7b0479 SB |
272 | |
273 | return cookie; | |
274 | } | |
275 | ||
276 | static int | |
277 | mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) | |
278 | { | |
279 | struct mv_xor_desc_slot *iter, *_iter; | |
280 | ||
c98c1781 | 281 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); |
ff7b0479 SB |
282 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, |
283 | completed_node) { | |
284 | ||
285 | if (async_tx_test_ack(&iter->async_tx)) { | |
286 | list_del(&iter->completed_node); | |
287 | mv_xor_free_slots(mv_chan, iter); | |
288 | } | |
289 | } | |
290 | return 0; | |
291 | } | |
292 | ||
293 | static int | |
294 | mv_xor_clean_slot(struct mv_xor_desc_slot *desc, | |
295 | struct mv_xor_chan *mv_chan) | |
296 | { | |
c98c1781 | 297 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", |
ff7b0479 SB |
298 | __func__, __LINE__, desc, desc->async_tx.flags); |
299 | list_del(&desc->chain_node); | |
300 | /* the client is allowed to attach dependent operations | |
301 | * until 'ack' is set | |
302 | */ | |
303 | if (!async_tx_test_ack(&desc->async_tx)) { | |
304 | /* move this slot to the completed_slots */ | |
305 | list_add_tail(&desc->completed_node, &mv_chan->completed_slots); | |
306 | return 0; | |
307 | } | |
308 | ||
309 | mv_xor_free_slots(mv_chan, desc); | |
310 | return 0; | |
311 | } | |
312 | ||
313 | static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |
314 | { | |
315 | struct mv_xor_desc_slot *iter, *_iter; | |
316 | dma_cookie_t cookie = 0; | |
317 | int busy = mv_chan_is_busy(mv_chan); | |
318 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
319 | int seen_current = 0; | |
320 | ||
c98c1781 TP |
321 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); |
322 | dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); | |
ff7b0479 SB |
323 | mv_xor_clean_completed_slots(mv_chan); |
324 | ||
325 | /* free completed slots from the chain starting with | |
326 | * the oldest descriptor | |
327 | */ | |
328 | ||
329 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | |
330 | chain_node) { | |
331 | prefetch(_iter); | |
332 | prefetch(&_iter->async_tx); | |
333 | ||
334 | /* do not advance past the current descriptor loaded into the | |
335 | * hardware channel, subsequent descriptors are either in | |
336 | * process or have not been submitted | |
337 | */ | |
338 | if (seen_current) | |
339 | break; | |
340 | ||
341 | /* stop the search if we reach the current descriptor and the | |
342 | * channel is busy | |
343 | */ | |
344 | if (iter->async_tx.phys == current_desc) { | |
345 | seen_current = 1; | |
346 | if (busy) | |
347 | break; | |
348 | } | |
349 | ||
350 | cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); | |
351 | ||
352 | if (mv_xor_clean_slot(iter, mv_chan)) | |
353 | break; | |
354 | } | |
355 | ||
356 | if ((busy == 0) && !list_empty(&mv_chan->chain)) { | |
357 | struct mv_xor_desc_slot *chain_head; | |
358 | chain_head = list_entry(mv_chan->chain.next, | |
359 | struct mv_xor_desc_slot, | |
360 | chain_node); | |
361 | ||
362 | mv_xor_start_new_chain(mv_chan, chain_head); | |
363 | } | |
364 | ||
365 | if (cookie > 0) | |
98817b99 | 366 | mv_chan->dmachan.completed_cookie = cookie; |
ff7b0479 SB |
367 | } |
368 | ||
369 | static void | |
370 | mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |
371 | { | |
372 | spin_lock_bh(&mv_chan->lock); | |
373 | __mv_xor_slot_cleanup(mv_chan); | |
374 | spin_unlock_bh(&mv_chan->lock); | |
375 | } | |
376 | ||
377 | static void mv_xor_tasklet(unsigned long data) | |
378 | { | |
379 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; | |
8333f65e | 380 | mv_xor_slot_cleanup(chan); |
ff7b0479 SB |
381 | } |
382 | ||
383 | static struct mv_xor_desc_slot * | |
384 | mv_xor_alloc_slots(struct mv_xor_chan *mv_chan, int num_slots, | |
385 | int slots_per_op) | |
386 | { | |
387 | struct mv_xor_desc_slot *iter, *_iter, *alloc_start = NULL; | |
388 | LIST_HEAD(chain); | |
389 | int slots_found, retry = 0; | |
390 | ||
391 | /* start search from the last allocated descrtiptor | |
392 | * if a contiguous allocation can not be found start searching | |
393 | * from the beginning of the list | |
394 | */ | |
395 | retry: | |
396 | slots_found = 0; | |
397 | if (retry == 0) | |
398 | iter = mv_chan->last_used; | |
399 | else | |
400 | iter = list_entry(&mv_chan->all_slots, | |
401 | struct mv_xor_desc_slot, | |
402 | slot_node); | |
403 | ||
404 | list_for_each_entry_safe_continue( | |
405 | iter, _iter, &mv_chan->all_slots, slot_node) { | |
406 | prefetch(_iter); | |
407 | prefetch(&_iter->async_tx); | |
408 | if (iter->slots_per_op) { | |
409 | /* give up after finding the first busy slot | |
410 | * on the second pass through the list | |
411 | */ | |
412 | if (retry) | |
413 | break; | |
414 | ||
415 | slots_found = 0; | |
416 | continue; | |
417 | } | |
418 | ||
419 | /* start the allocation if the slot is correctly aligned */ | |
420 | if (!slots_found++) | |
421 | alloc_start = iter; | |
422 | ||
423 | if (slots_found == num_slots) { | |
424 | struct mv_xor_desc_slot *alloc_tail = NULL; | |
425 | struct mv_xor_desc_slot *last_used = NULL; | |
426 | iter = alloc_start; | |
427 | while (num_slots) { | |
428 | int i; | |
429 | ||
430 | /* pre-ack all but the last descriptor */ | |
431 | async_tx_ack(&iter->async_tx); | |
432 | ||
433 | list_add_tail(&iter->chain_node, &chain); | |
434 | alloc_tail = iter; | |
435 | iter->async_tx.cookie = 0; | |
436 | iter->slot_cnt = num_slots; | |
437 | iter->xor_check_result = NULL; | |
438 | for (i = 0; i < slots_per_op; i++) { | |
439 | iter->slots_per_op = slots_per_op - i; | |
440 | last_used = iter; | |
441 | iter = list_entry(iter->slot_node.next, | |
442 | struct mv_xor_desc_slot, | |
443 | slot_node); | |
444 | } | |
445 | num_slots -= slots_per_op; | |
446 | } | |
447 | alloc_tail->group_head = alloc_start; | |
448 | alloc_tail->async_tx.cookie = -EBUSY; | |
64203b67 | 449 | list_splice(&chain, &alloc_tail->tx_list); |
ff7b0479 SB |
450 | mv_chan->last_used = last_used; |
451 | mv_desc_clear_next_desc(alloc_start); | |
452 | mv_desc_clear_next_desc(alloc_tail); | |
453 | return alloc_tail; | |
454 | } | |
455 | } | |
456 | if (!retry++) | |
457 | goto retry; | |
458 | ||
459 | /* try to free some slots if the allocation fails */ | |
460 | tasklet_schedule(&mv_chan->irq_tasklet); | |
461 | ||
462 | return NULL; | |
463 | } | |
464 | ||
ff7b0479 SB |
465 | /************************ DMA engine API functions ****************************/ |
466 | static dma_cookie_t | |
467 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |
468 | { | |
469 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); | |
470 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); | |
471 | struct mv_xor_desc_slot *grp_start, *old_chain_tail; | |
472 | dma_cookie_t cookie; | |
473 | int new_hw_chain = 1; | |
474 | ||
c98c1781 | 475 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
476 | "%s sw_desc %p: async_tx %p\n", |
477 | __func__, sw_desc, &sw_desc->async_tx); | |
478 | ||
479 | grp_start = sw_desc->group_head; | |
480 | ||
481 | spin_lock_bh(&mv_chan->lock); | |
884485e1 | 482 | cookie = dma_cookie_assign(tx); |
ff7b0479 SB |
483 | |
484 | if (list_empty(&mv_chan->chain)) | |
64203b67 | 485 | list_splice_init(&sw_desc->tx_list, &mv_chan->chain); |
ff7b0479 SB |
486 | else { |
487 | new_hw_chain = 0; | |
488 | ||
489 | old_chain_tail = list_entry(mv_chan->chain.prev, | |
490 | struct mv_xor_desc_slot, | |
491 | chain_node); | |
64203b67 | 492 | list_splice_init(&grp_start->tx_list, |
ff7b0479 SB |
493 | &old_chain_tail->chain_node); |
494 | ||
495 | if (!mv_can_chain(grp_start)) | |
496 | goto submit_done; | |
497 | ||
31fd8f5b OJ |
498 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", |
499 | &old_chain_tail->async_tx.phys); | |
ff7b0479 SB |
500 | |
501 | /* fix up the hardware chain */ | |
502 | mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys); | |
503 | ||
504 | /* if the channel is not busy */ | |
505 | if (!mv_chan_is_busy(mv_chan)) { | |
506 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
507 | /* | |
508 | * and the curren desc is the end of the chain before | |
509 | * the append, then we need to start the channel | |
510 | */ | |
511 | if (current_desc == old_chain_tail->async_tx.phys) | |
512 | new_hw_chain = 1; | |
513 | } | |
514 | } | |
515 | ||
516 | if (new_hw_chain) | |
517 | mv_xor_start_new_chain(mv_chan, grp_start); | |
518 | ||
519 | submit_done: | |
520 | spin_unlock_bh(&mv_chan->lock); | |
521 | ||
522 | return cookie; | |
523 | } | |
524 | ||
525 | /* returns the number of allocated descriptors */ | |
aa1e6f1a | 526 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) |
ff7b0479 | 527 | { |
31fd8f5b OJ |
528 | void *virt_desc; |
529 | dma_addr_t dma_desc; | |
ff7b0479 SB |
530 | int idx; |
531 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
532 | struct mv_xor_desc_slot *slot = NULL; | |
b503fa01 | 533 | int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; |
ff7b0479 SB |
534 | |
535 | /* Allocate descriptor slots */ | |
536 | idx = mv_chan->slots_allocated; | |
537 | while (idx < num_descs_in_pool) { | |
538 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | |
539 | if (!slot) { | |
540 | printk(KERN_INFO "MV XOR Channel only initialized" | |
541 | " %d descriptor slots", idx); | |
542 | break; | |
543 | } | |
31fd8f5b OJ |
544 | virt_desc = mv_chan->dma_desc_pool_virt; |
545 | slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; | |
ff7b0479 SB |
546 | |
547 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | |
548 | slot->async_tx.tx_submit = mv_xor_tx_submit; | |
549 | INIT_LIST_HEAD(&slot->chain_node); | |
550 | INIT_LIST_HEAD(&slot->slot_node); | |
64203b67 | 551 | INIT_LIST_HEAD(&slot->tx_list); |
31fd8f5b OJ |
552 | dma_desc = mv_chan->dma_desc_pool; |
553 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; | |
ff7b0479 SB |
554 | slot->idx = idx++; |
555 | ||
556 | spin_lock_bh(&mv_chan->lock); | |
557 | mv_chan->slots_allocated = idx; | |
558 | list_add_tail(&slot->slot_node, &mv_chan->all_slots); | |
559 | spin_unlock_bh(&mv_chan->lock); | |
560 | } | |
561 | ||
562 | if (mv_chan->slots_allocated && !mv_chan->last_used) | |
563 | mv_chan->last_used = list_entry(mv_chan->all_slots.next, | |
564 | struct mv_xor_desc_slot, | |
565 | slot_node); | |
566 | ||
c98c1781 | 567 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
568 | "allocated %d descriptor slots last_used: %p\n", |
569 | mv_chan->slots_allocated, mv_chan->last_used); | |
570 | ||
571 | return mv_chan->slots_allocated ? : -ENOMEM; | |
572 | } | |
573 | ||
574 | static struct dma_async_tx_descriptor * | |
575 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
576 | size_t len, unsigned long flags) | |
577 | { | |
578 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
579 | struct mv_xor_desc_slot *sw_desc, *grp_start; | |
580 | int slot_cnt; | |
581 | ||
c98c1781 | 582 | dev_dbg(mv_chan_to_devp(mv_chan), |
31fd8f5b OJ |
583 | "%s dest: %pad src %pad len: %u flags: %ld\n", |
584 | __func__, &dest, &src, len, flags); | |
ff7b0479 SB |
585 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) |
586 | return NULL; | |
587 | ||
7912d300 | 588 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
ff7b0479 SB |
589 | |
590 | spin_lock_bh(&mv_chan->lock); | |
591 | slot_cnt = mv_chan_memcpy_slot_count(len); | |
592 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | |
593 | if (sw_desc) { | |
594 | sw_desc->type = DMA_MEMCPY; | |
595 | sw_desc->async_tx.flags = flags; | |
596 | grp_start = sw_desc->group_head; | |
597 | mv_desc_init(grp_start, flags); | |
598 | mv_desc_set_byte_count(grp_start, len); | |
599 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | |
600 | mv_desc_set_src_addr(grp_start, 0, src); | |
601 | sw_desc->unmap_src_cnt = 1; | |
602 | sw_desc->unmap_len = len; | |
603 | } | |
604 | spin_unlock_bh(&mv_chan->lock); | |
605 | ||
c98c1781 | 606 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 | 607 | "%s sw_desc %p async_tx %p\n", |
4c143725 | 608 | __func__, sw_desc, sw_desc ? &sw_desc->async_tx : NULL); |
ff7b0479 SB |
609 | |
610 | return sw_desc ? &sw_desc->async_tx : NULL; | |
611 | } | |
612 | ||
ff7b0479 SB |
613 | static struct dma_async_tx_descriptor * |
614 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
615 | unsigned int src_cnt, size_t len, unsigned long flags) | |
616 | { | |
617 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
618 | struct mv_xor_desc_slot *sw_desc, *grp_start; | |
619 | int slot_cnt; | |
620 | ||
621 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | |
622 | return NULL; | |
623 | ||
7912d300 | 624 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
ff7b0479 | 625 | |
c98c1781 | 626 | dev_dbg(mv_chan_to_devp(mv_chan), |
31fd8f5b OJ |
627 | "%s src_cnt: %d len: %u dest %pad flags: %ld\n", |
628 | __func__, src_cnt, len, &dest, flags); | |
ff7b0479 SB |
629 | |
630 | spin_lock_bh(&mv_chan->lock); | |
631 | slot_cnt = mv_chan_xor_slot_count(len, src_cnt); | |
632 | sw_desc = mv_xor_alloc_slots(mv_chan, slot_cnt, 1); | |
633 | if (sw_desc) { | |
634 | sw_desc->type = DMA_XOR; | |
635 | sw_desc->async_tx.flags = flags; | |
636 | grp_start = sw_desc->group_head; | |
637 | mv_desc_init(grp_start, flags); | |
638 | /* the byte count field is the same as in memcpy desc*/ | |
639 | mv_desc_set_byte_count(grp_start, len); | |
640 | mv_desc_set_dest_addr(sw_desc->group_head, dest); | |
641 | sw_desc->unmap_src_cnt = src_cnt; | |
642 | sw_desc->unmap_len = len; | |
643 | while (src_cnt--) | |
644 | mv_desc_set_src_addr(grp_start, src_cnt, src[src_cnt]); | |
645 | } | |
646 | spin_unlock_bh(&mv_chan->lock); | |
c98c1781 | 647 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
648 | "%s sw_desc %p async_tx %p \n", |
649 | __func__, sw_desc, &sw_desc->async_tx); | |
650 | return sw_desc ? &sw_desc->async_tx : NULL; | |
651 | } | |
652 | ||
653 | static void mv_xor_free_chan_resources(struct dma_chan *chan) | |
654 | { | |
655 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
656 | struct mv_xor_desc_slot *iter, *_iter; | |
657 | int in_use_descs = 0; | |
658 | ||
659 | mv_xor_slot_cleanup(mv_chan); | |
660 | ||
661 | spin_lock_bh(&mv_chan->lock); | |
662 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | |
663 | chain_node) { | |
664 | in_use_descs++; | |
665 | list_del(&iter->chain_node); | |
666 | } | |
667 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | |
668 | completed_node) { | |
669 | in_use_descs++; | |
670 | list_del(&iter->completed_node); | |
671 | } | |
672 | list_for_each_entry_safe_reverse( | |
673 | iter, _iter, &mv_chan->all_slots, slot_node) { | |
674 | list_del(&iter->slot_node); | |
675 | kfree(iter); | |
676 | mv_chan->slots_allocated--; | |
677 | } | |
678 | mv_chan->last_used = NULL; | |
679 | ||
c98c1781 | 680 | dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", |
ff7b0479 SB |
681 | __func__, mv_chan->slots_allocated); |
682 | spin_unlock_bh(&mv_chan->lock); | |
683 | ||
684 | if (in_use_descs) | |
c98c1781 | 685 | dev_err(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
686 | "freeing %d in use descriptors!\n", in_use_descs); |
687 | } | |
688 | ||
689 | /** | |
07934481 | 690 | * mv_xor_status - poll the status of an XOR transaction |
ff7b0479 SB |
691 | * @chan: XOR channel handle |
692 | * @cookie: XOR transaction identifier | |
07934481 | 693 | * @txstate: XOR transactions state holder (or NULL) |
ff7b0479 | 694 | */ |
07934481 | 695 | static enum dma_status mv_xor_status(struct dma_chan *chan, |
ff7b0479 | 696 | dma_cookie_t cookie, |
07934481 | 697 | struct dma_tx_state *txstate) |
ff7b0479 SB |
698 | { |
699 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
ff7b0479 SB |
700 | enum dma_status ret; |
701 | ||
96a2af41 | 702 | ret = dma_cookie_status(chan, cookie, txstate); |
b3efb8fc | 703 | if (ret == DMA_COMPLETE) { |
ff7b0479 SB |
704 | mv_xor_clean_completed_slots(mv_chan); |
705 | return ret; | |
706 | } | |
707 | mv_xor_slot_cleanup(mv_chan); | |
708 | ||
96a2af41 | 709 | return dma_cookie_status(chan, cookie, txstate); |
ff7b0479 SB |
710 | } |
711 | ||
712 | static void mv_dump_xor_regs(struct mv_xor_chan *chan) | |
713 | { | |
714 | u32 val; | |
715 | ||
5733c38a | 716 | val = readl_relaxed(XOR_CONFIG(chan)); |
1ba151cd | 717 | dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); |
ff7b0479 | 718 | |
5733c38a | 719 | val = readl_relaxed(XOR_ACTIVATION(chan)); |
1ba151cd | 720 | dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); |
ff7b0479 | 721 | |
5733c38a | 722 | val = readl_relaxed(XOR_INTR_CAUSE(chan)); |
1ba151cd | 723 | dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); |
ff7b0479 | 724 | |
5733c38a | 725 | val = readl_relaxed(XOR_INTR_MASK(chan)); |
1ba151cd | 726 | dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); |
ff7b0479 | 727 | |
5733c38a | 728 | val = readl_relaxed(XOR_ERROR_CAUSE(chan)); |
1ba151cd | 729 | dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); |
ff7b0479 | 730 | |
5733c38a | 731 | val = readl_relaxed(XOR_ERROR_ADDR(chan)); |
1ba151cd | 732 | dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); |
ff7b0479 SB |
733 | } |
734 | ||
735 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, | |
736 | u32 intr_cause) | |
737 | { | |
738 | if (intr_cause & (1 << 4)) { | |
c98c1781 | 739 | dev_dbg(mv_chan_to_devp(chan), |
ff7b0479 SB |
740 | "ignore this error\n"); |
741 | return; | |
742 | } | |
743 | ||
c98c1781 | 744 | dev_err(mv_chan_to_devp(chan), |
1ba151cd | 745 | "error on chan %d. intr cause 0x%08x\n", |
a3fc74bc | 746 | chan->idx, intr_cause); |
ff7b0479 SB |
747 | |
748 | mv_dump_xor_regs(chan); | |
749 | BUG(); | |
750 | } | |
751 | ||
752 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | |
753 | { | |
754 | struct mv_xor_chan *chan = data; | |
755 | u32 intr_cause = mv_chan_get_intr_cause(chan); | |
756 | ||
c98c1781 | 757 | dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); |
ff7b0479 SB |
758 | |
759 | if (mv_is_err_intr(intr_cause)) | |
760 | mv_xor_err_interrupt_handler(chan, intr_cause); | |
761 | ||
762 | tasklet_schedule(&chan->irq_tasklet); | |
763 | ||
764 | mv_xor_device_clear_eoc_cause(chan); | |
765 | ||
766 | return IRQ_HANDLED; | |
767 | } | |
768 | ||
769 | static void mv_xor_issue_pending(struct dma_chan *chan) | |
770 | { | |
771 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
772 | ||
773 | if (mv_chan->pending >= MV_XOR_THRESHOLD) { | |
774 | mv_chan->pending = 0; | |
775 | mv_chan_activate(mv_chan); | |
776 | } | |
777 | } | |
778 | ||
779 | /* | |
780 | * Perform a transaction to verify the HW works. | |
781 | */ | |
ff7b0479 | 782 | |
c2714334 | 783 | static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) |
ff7b0479 SB |
784 | { |
785 | int i; | |
786 | void *src, *dest; | |
787 | dma_addr_t src_dma, dest_dma; | |
788 | struct dma_chan *dma_chan; | |
789 | dma_cookie_t cookie; | |
790 | struct dma_async_tx_descriptor *tx; | |
d16695a7 | 791 | struct dmaengine_unmap_data *unmap; |
ff7b0479 | 792 | int err = 0; |
ff7b0479 | 793 | |
d16695a7 | 794 | src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
ff7b0479 SB |
795 | if (!src) |
796 | return -ENOMEM; | |
797 | ||
d16695a7 | 798 | dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
ff7b0479 SB |
799 | if (!dest) { |
800 | kfree(src); | |
801 | return -ENOMEM; | |
802 | } | |
803 | ||
804 | /* Fill in src buffer */ | |
d16695a7 | 805 | for (i = 0; i < PAGE_SIZE; i++) |
ff7b0479 SB |
806 | ((u8 *) src)[i] = (u8)i; |
807 | ||
275cc0c8 | 808 | dma_chan = &mv_chan->dmachan; |
aa1e6f1a | 809 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
810 | err = -ENODEV; |
811 | goto out; | |
812 | } | |
813 | ||
d16695a7 EG |
814 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); |
815 | if (!unmap) { | |
816 | err = -ENOMEM; | |
817 | goto free_resources; | |
818 | } | |
819 | ||
820 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, | |
821 | PAGE_SIZE, DMA_TO_DEVICE); | |
822 | unmap->to_cnt = 1; | |
823 | unmap->addr[0] = src_dma; | |
ff7b0479 | 824 | |
d16695a7 EG |
825 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, |
826 | PAGE_SIZE, DMA_FROM_DEVICE); | |
827 | unmap->from_cnt = 1; | |
828 | unmap->addr[1] = dest_dma; | |
829 | ||
830 | unmap->len = PAGE_SIZE; | |
ff7b0479 SB |
831 | |
832 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | |
d16695a7 | 833 | PAGE_SIZE, 0); |
ff7b0479 SB |
834 | cookie = mv_xor_tx_submit(tx); |
835 | mv_xor_issue_pending(dma_chan); | |
836 | async_tx_ack(tx); | |
837 | msleep(1); | |
838 | ||
07934481 | 839 | if (mv_xor_status(dma_chan, cookie, NULL) != |
b3efb8fc | 840 | DMA_COMPLETE) { |
a3fc74bc TP |
841 | dev_err(dma_chan->device->dev, |
842 | "Self-test copy timed out, disabling\n"); | |
ff7b0479 SB |
843 | err = -ENODEV; |
844 | goto free_resources; | |
845 | } | |
846 | ||
c35064c4 | 847 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
d16695a7 EG |
848 | PAGE_SIZE, DMA_FROM_DEVICE); |
849 | if (memcmp(src, dest, PAGE_SIZE)) { | |
a3fc74bc TP |
850 | dev_err(dma_chan->device->dev, |
851 | "Self-test copy failed compare, disabling\n"); | |
ff7b0479 SB |
852 | err = -ENODEV; |
853 | goto free_resources; | |
854 | } | |
855 | ||
856 | free_resources: | |
d16695a7 | 857 | dmaengine_unmap_put(unmap); |
ff7b0479 SB |
858 | mv_xor_free_chan_resources(dma_chan); |
859 | out: | |
860 | kfree(src); | |
861 | kfree(dest); | |
862 | return err; | |
863 | } | |
864 | ||
865 | #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ | |
463a1f8b | 866 | static int |
275cc0c8 | 867 | mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) |
ff7b0479 SB |
868 | { |
869 | int i, src_idx; | |
870 | struct page *dest; | |
871 | struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; | |
872 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | |
873 | dma_addr_t dest_dma; | |
874 | struct dma_async_tx_descriptor *tx; | |
d16695a7 | 875 | struct dmaengine_unmap_data *unmap; |
ff7b0479 SB |
876 | struct dma_chan *dma_chan; |
877 | dma_cookie_t cookie; | |
878 | u8 cmp_byte = 0; | |
879 | u32 cmp_word; | |
880 | int err = 0; | |
d16695a7 | 881 | int src_count = MV_XOR_NUM_SRC_TEST; |
ff7b0479 | 882 | |
d16695a7 | 883 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
ff7b0479 | 884 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
a09b09ae RK |
885 | if (!xor_srcs[src_idx]) { |
886 | while (src_idx--) | |
ff7b0479 | 887 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
888 | return -ENOMEM; |
889 | } | |
ff7b0479 SB |
890 | } |
891 | ||
892 | dest = alloc_page(GFP_KERNEL); | |
a09b09ae RK |
893 | if (!dest) { |
894 | while (src_idx--) | |
ff7b0479 | 895 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
896 | return -ENOMEM; |
897 | } | |
ff7b0479 SB |
898 | |
899 | /* Fill in src buffers */ | |
d16695a7 | 900 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
ff7b0479 SB |
901 | u8 *ptr = page_address(xor_srcs[src_idx]); |
902 | for (i = 0; i < PAGE_SIZE; i++) | |
903 | ptr[i] = (1 << src_idx); | |
904 | } | |
905 | ||
d16695a7 | 906 | for (src_idx = 0; src_idx < src_count; src_idx++) |
ff7b0479 SB |
907 | cmp_byte ^= (u8) (1 << src_idx); |
908 | ||
909 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | |
910 | (cmp_byte << 8) | cmp_byte; | |
911 | ||
912 | memset(page_address(dest), 0, PAGE_SIZE); | |
913 | ||
275cc0c8 | 914 | dma_chan = &mv_chan->dmachan; |
aa1e6f1a | 915 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
916 | err = -ENODEV; |
917 | goto out; | |
918 | } | |
919 | ||
d16695a7 EG |
920 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, |
921 | GFP_KERNEL); | |
922 | if (!unmap) { | |
923 | err = -ENOMEM; | |
924 | goto free_resources; | |
925 | } | |
926 | ||
ff7b0479 | 927 | /* test xor */ |
d16695a7 EG |
928 | for (i = 0; i < src_count; i++) { |
929 | unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | |
930 | 0, PAGE_SIZE, DMA_TO_DEVICE); | |
931 | dma_srcs[i] = unmap->addr[i]; | |
932 | unmap->to_cnt++; | |
933 | } | |
ff7b0479 | 934 | |
d16695a7 EG |
935 | unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, |
936 | DMA_FROM_DEVICE); | |
937 | dest_dma = unmap->addr[src_count]; | |
938 | unmap->from_cnt = 1; | |
939 | unmap->len = PAGE_SIZE; | |
ff7b0479 SB |
940 | |
941 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | |
d16695a7 | 942 | src_count, PAGE_SIZE, 0); |
ff7b0479 SB |
943 | |
944 | cookie = mv_xor_tx_submit(tx); | |
945 | mv_xor_issue_pending(dma_chan); | |
946 | async_tx_ack(tx); | |
947 | msleep(8); | |
948 | ||
07934481 | 949 | if (mv_xor_status(dma_chan, cookie, NULL) != |
b3efb8fc | 950 | DMA_COMPLETE) { |
a3fc74bc TP |
951 | dev_err(dma_chan->device->dev, |
952 | "Self-test xor timed out, disabling\n"); | |
ff7b0479 SB |
953 | err = -ENODEV; |
954 | goto free_resources; | |
955 | } | |
956 | ||
c35064c4 | 957 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
ff7b0479 SB |
958 | PAGE_SIZE, DMA_FROM_DEVICE); |
959 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | |
960 | u32 *ptr = page_address(dest); | |
961 | if (ptr[i] != cmp_word) { | |
a3fc74bc | 962 | dev_err(dma_chan->device->dev, |
1ba151cd JP |
963 | "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", |
964 | i, ptr[i], cmp_word); | |
ff7b0479 SB |
965 | err = -ENODEV; |
966 | goto free_resources; | |
967 | } | |
968 | } | |
969 | ||
970 | free_resources: | |
d16695a7 | 971 | dmaengine_unmap_put(unmap); |
ff7b0479 SB |
972 | mv_xor_free_chan_resources(dma_chan); |
973 | out: | |
d16695a7 | 974 | src_idx = src_count; |
ff7b0479 SB |
975 | while (src_idx--) |
976 | __free_page(xor_srcs[src_idx]); | |
977 | __free_page(dest); | |
978 | return err; | |
979 | } | |
980 | ||
34c93c86 AL |
981 | /* This driver does not implement any of the optional DMA operations. */ |
982 | static int | |
983 | mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
984 | unsigned long arg) | |
985 | { | |
986 | return -ENOSYS; | |
987 | } | |
988 | ||
1ef48a26 | 989 | static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) |
ff7b0479 | 990 | { |
ff7b0479 | 991 | struct dma_chan *chan, *_chan; |
1ef48a26 | 992 | struct device *dev = mv_chan->dmadev.dev; |
ff7b0479 | 993 | |
1ef48a26 | 994 | dma_async_device_unregister(&mv_chan->dmadev); |
ff7b0479 | 995 | |
b503fa01 | 996 | dma_free_coherent(dev, MV_XOR_POOL_SIZE, |
1ef48a26 | 997 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
ff7b0479 | 998 | |
1ef48a26 | 999 | list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, |
a6b4a9d2 | 1000 | device_node) { |
ff7b0479 SB |
1001 | list_del(&chan->device_node); |
1002 | } | |
1003 | ||
88eb92cb TP |
1004 | free_irq(mv_chan->irq, mv_chan); |
1005 | ||
ff7b0479 SB |
1006 | return 0; |
1007 | } | |
1008 | ||
1ef48a26 | 1009 | static struct mv_xor_chan * |
297eedba | 1010 | mv_xor_channel_add(struct mv_xor_device *xordev, |
a6b4a9d2 | 1011 | struct platform_device *pdev, |
b503fa01 | 1012 | int idx, dma_cap_mask_t cap_mask, int irq) |
ff7b0479 SB |
1013 | { |
1014 | int ret = 0; | |
ff7b0479 SB |
1015 | struct mv_xor_chan *mv_chan; |
1016 | struct dma_device *dma_dev; | |
ff7b0479 | 1017 | |
1ef48a26 | 1018 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); |
a577659f SK |
1019 | if (!mv_chan) |
1020 | return ERR_PTR(-ENOMEM); | |
ff7b0479 | 1021 | |
9aedbdba | 1022 | mv_chan->idx = idx; |
88eb92cb | 1023 | mv_chan->irq = irq; |
ff7b0479 | 1024 | |
1ef48a26 | 1025 | dma_dev = &mv_chan->dmadev; |
ff7b0479 SB |
1026 | |
1027 | /* allocate coherent memory for hardware descriptors | |
1028 | * note: writecombine gives slightly better performance, but | |
1029 | * requires that we explicitly flush the writes | |
1030 | */ | |
1ef48a26 | 1031 | mv_chan->dma_desc_pool_virt = |
b503fa01 | 1032 | dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE, |
1ef48a26 TP |
1033 | &mv_chan->dma_desc_pool, GFP_KERNEL); |
1034 | if (!mv_chan->dma_desc_pool_virt) | |
a6b4a9d2 | 1035 | return ERR_PTR(-ENOMEM); |
ff7b0479 SB |
1036 | |
1037 | /* discover transaction capabilites from the platform data */ | |
a6b4a9d2 | 1038 | dma_dev->cap_mask = cap_mask; |
ff7b0479 SB |
1039 | |
1040 | INIT_LIST_HEAD(&dma_dev->channels); | |
1041 | ||
1042 | /* set base routines */ | |
1043 | dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; | |
1044 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | |
07934481 | 1045 | dma_dev->device_tx_status = mv_xor_status; |
ff7b0479 | 1046 | dma_dev->device_issue_pending = mv_xor_issue_pending; |
34c93c86 | 1047 | dma_dev->device_control = mv_xor_control; |
ff7b0479 SB |
1048 | dma_dev->dev = &pdev->dev; |
1049 | ||
1050 | /* set prep routines based on capability */ | |
1051 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | |
1052 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | |
ff7b0479 | 1053 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
c019894e | 1054 | dma_dev->max_xor = 8; |
ff7b0479 SB |
1055 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
1056 | } | |
1057 | ||
297eedba | 1058 | mv_chan->mmr_base = xordev->xor_base; |
82a1402e | 1059 | mv_chan->mmr_high_base = xordev->xor_high_base; |
ff7b0479 SB |
1060 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) |
1061 | mv_chan); | |
1062 | ||
1063 | /* clear errors before enabling interrupts */ | |
1064 | mv_xor_device_clear_err_status(mv_chan); | |
1065 | ||
2d0a0745 TP |
1066 | ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, |
1067 | 0, dev_name(&pdev->dev), mv_chan); | |
ff7b0479 SB |
1068 | if (ret) |
1069 | goto err_free_dma; | |
1070 | ||
1071 | mv_chan_unmask_interrupts(mv_chan); | |
1072 | ||
1073 | mv_set_mode(mv_chan, DMA_MEMCPY); | |
1074 | ||
1075 | spin_lock_init(&mv_chan->lock); | |
1076 | INIT_LIST_HEAD(&mv_chan->chain); | |
1077 | INIT_LIST_HEAD(&mv_chan->completed_slots); | |
1078 | INIT_LIST_HEAD(&mv_chan->all_slots); | |
98817b99 TP |
1079 | mv_chan->dmachan.device = dma_dev; |
1080 | dma_cookie_init(&mv_chan->dmachan); | |
ff7b0479 | 1081 | |
98817b99 | 1082 | list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); |
ff7b0479 SB |
1083 | |
1084 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | |
275cc0c8 | 1085 | ret = mv_xor_memcpy_self_test(mv_chan); |
ff7b0479 SB |
1086 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); |
1087 | if (ret) | |
2d0a0745 | 1088 | goto err_free_irq; |
ff7b0479 SB |
1089 | } |
1090 | ||
1091 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | |
275cc0c8 | 1092 | ret = mv_xor_xor_self_test(mv_chan); |
ff7b0479 SB |
1093 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); |
1094 | if (ret) | |
2d0a0745 | 1095 | goto err_free_irq; |
ff7b0479 SB |
1096 | } |
1097 | ||
48a9db46 | 1098 | dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", |
1ba151cd | 1099 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1ba151cd JP |
1100 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1101 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | |
ff7b0479 SB |
1102 | |
1103 | dma_async_device_register(dma_dev); | |
1ef48a26 | 1104 | return mv_chan; |
ff7b0479 | 1105 | |
2d0a0745 TP |
1106 | err_free_irq: |
1107 | free_irq(mv_chan->irq, mv_chan); | |
ff7b0479 | 1108 | err_free_dma: |
b503fa01 | 1109 | dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, |
1ef48a26 | 1110 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
a6b4a9d2 | 1111 | return ERR_PTR(ret); |
ff7b0479 SB |
1112 | } |
1113 | ||
1114 | static void | |
297eedba | 1115 | mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, |
63a9332b | 1116 | const struct mbus_dram_target_info *dram) |
ff7b0479 | 1117 | { |
82a1402e | 1118 | void __iomem *base = xordev->xor_high_base; |
ff7b0479 SB |
1119 | u32 win_enable = 0; |
1120 | int i; | |
1121 | ||
1122 | for (i = 0; i < 8; i++) { | |
1123 | writel(0, base + WINDOW_BASE(i)); | |
1124 | writel(0, base + WINDOW_SIZE(i)); | |
1125 | if (i < 4) | |
1126 | writel(0, base + WINDOW_REMAP_HIGH(i)); | |
1127 | } | |
1128 | ||
1129 | for (i = 0; i < dram->num_cs; i++) { | |
63a9332b | 1130 | const struct mbus_dram_window *cs = dram->cs + i; |
ff7b0479 SB |
1131 | |
1132 | writel((cs->base & 0xffff0000) | | |
1133 | (cs->mbus_attr << 8) | | |
1134 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); | |
1135 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | |
1136 | ||
1137 | win_enable |= (1 << i); | |
1138 | win_enable |= 3 << (16 + (2 * i)); | |
1139 | } | |
1140 | ||
1141 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | |
1142 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | |
c4b4b732 TP |
1143 | writel(0, base + WINDOW_OVERRIDE_CTRL(0)); |
1144 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); | |
ff7b0479 SB |
1145 | } |
1146 | ||
c2714334 | 1147 | static int mv_xor_probe(struct platform_device *pdev) |
ff7b0479 | 1148 | { |
63a9332b | 1149 | const struct mbus_dram_target_info *dram; |
297eedba | 1150 | struct mv_xor_device *xordev; |
d4adcc01 | 1151 | struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); |
ff7b0479 | 1152 | struct resource *res; |
60d151f3 | 1153 | int i, ret; |
ff7b0479 | 1154 | |
1ba151cd | 1155 | dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); |
ff7b0479 | 1156 | |
297eedba TP |
1157 | xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); |
1158 | if (!xordev) | |
ff7b0479 SB |
1159 | return -ENOMEM; |
1160 | ||
1161 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1162 | if (!res) | |
1163 | return -ENODEV; | |
1164 | ||
297eedba TP |
1165 | xordev->xor_base = devm_ioremap(&pdev->dev, res->start, |
1166 | resource_size(res)); | |
1167 | if (!xordev->xor_base) | |
ff7b0479 SB |
1168 | return -EBUSY; |
1169 | ||
1170 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
1171 | if (!res) | |
1172 | return -ENODEV; | |
1173 | ||
297eedba TP |
1174 | xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, |
1175 | resource_size(res)); | |
1176 | if (!xordev->xor_high_base) | |
ff7b0479 SB |
1177 | return -EBUSY; |
1178 | ||
297eedba | 1179 | platform_set_drvdata(pdev, xordev); |
ff7b0479 SB |
1180 | |
1181 | /* | |
1182 | * (Re-)program MBUS remapping windows if we are asked to. | |
1183 | */ | |
63a9332b AL |
1184 | dram = mv_mbus_dram_info(); |
1185 | if (dram) | |
297eedba | 1186 | mv_xor_conf_mbus_windows(xordev, dram); |
ff7b0479 | 1187 | |
c510182b AL |
1188 | /* Not all platforms can gate the clock, so it is not |
1189 | * an error if the clock does not exists. | |
1190 | */ | |
297eedba TP |
1191 | xordev->clk = clk_get(&pdev->dev, NULL); |
1192 | if (!IS_ERR(xordev->clk)) | |
1193 | clk_prepare_enable(xordev->clk); | |
c510182b | 1194 | |
f7d12ef5 TP |
1195 | if (pdev->dev.of_node) { |
1196 | struct device_node *np; | |
1197 | int i = 0; | |
1198 | ||
1199 | for_each_child_of_node(pdev->dev.of_node, np) { | |
0be8253f | 1200 | struct mv_xor_chan *chan; |
f7d12ef5 TP |
1201 | dma_cap_mask_t cap_mask; |
1202 | int irq; | |
1203 | ||
1204 | dma_cap_zero(cap_mask); | |
1205 | if (of_property_read_bool(np, "dmacap,memcpy")) | |
1206 | dma_cap_set(DMA_MEMCPY, cap_mask); | |
1207 | if (of_property_read_bool(np, "dmacap,xor")) | |
1208 | dma_cap_set(DMA_XOR, cap_mask); | |
f7d12ef5 TP |
1209 | if (of_property_read_bool(np, "dmacap,interrupt")) |
1210 | dma_cap_set(DMA_INTERRUPT, cap_mask); | |
1211 | ||
1212 | irq = irq_of_parse_and_map(np, 0); | |
f8eb9e7d TP |
1213 | if (!irq) { |
1214 | ret = -ENODEV; | |
f7d12ef5 TP |
1215 | goto err_channel_add; |
1216 | } | |
1217 | ||
0be8253f RK |
1218 | chan = mv_xor_channel_add(xordev, pdev, i, |
1219 | cap_mask, irq); | |
1220 | if (IS_ERR(chan)) { | |
1221 | ret = PTR_ERR(chan); | |
f7d12ef5 TP |
1222 | irq_dispose_mapping(irq); |
1223 | goto err_channel_add; | |
1224 | } | |
1225 | ||
0be8253f | 1226 | xordev->channels[i] = chan; |
f7d12ef5 TP |
1227 | i++; |
1228 | } | |
1229 | } else if (pdata && pdata->channels) { | |
60d151f3 | 1230 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { |
e39f6ec1 | 1231 | struct mv_xor_channel_data *cd; |
0be8253f | 1232 | struct mv_xor_chan *chan; |
60d151f3 TP |
1233 | int irq; |
1234 | ||
1235 | cd = &pdata->channels[i]; | |
1236 | if (!cd) { | |
1237 | ret = -ENODEV; | |
1238 | goto err_channel_add; | |
1239 | } | |
1240 | ||
1241 | irq = platform_get_irq(pdev, i); | |
1242 | if (irq < 0) { | |
1243 | ret = irq; | |
1244 | goto err_channel_add; | |
1245 | } | |
1246 | ||
0be8253f RK |
1247 | chan = mv_xor_channel_add(xordev, pdev, i, |
1248 | cd->cap_mask, irq); | |
1249 | if (IS_ERR(chan)) { | |
1250 | ret = PTR_ERR(chan); | |
60d151f3 TP |
1251 | goto err_channel_add; |
1252 | } | |
0be8253f RK |
1253 | |
1254 | xordev->channels[i] = chan; | |
60d151f3 TP |
1255 | } |
1256 | } | |
c510182b | 1257 | |
ff7b0479 | 1258 | return 0; |
60d151f3 TP |
1259 | |
1260 | err_channel_add: | |
1261 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) | |
f7d12ef5 | 1262 | if (xordev->channels[i]) { |
ab6e439f | 1263 | mv_xor_channel_remove(xordev->channels[i]); |
f7d12ef5 TP |
1264 | if (pdev->dev.of_node) |
1265 | irq_dispose_mapping(xordev->channels[i]->irq); | |
f7d12ef5 | 1266 | } |
60d151f3 | 1267 | |
dab92064 TP |
1268 | if (!IS_ERR(xordev->clk)) { |
1269 | clk_disable_unprepare(xordev->clk); | |
1270 | clk_put(xordev->clk); | |
1271 | } | |
1272 | ||
60d151f3 | 1273 | return ret; |
ff7b0479 SB |
1274 | } |
1275 | ||
c2714334 | 1276 | static int mv_xor_remove(struct platform_device *pdev) |
ff7b0479 | 1277 | { |
297eedba | 1278 | struct mv_xor_device *xordev = platform_get_drvdata(pdev); |
60d151f3 TP |
1279 | int i; |
1280 | ||
1281 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | |
297eedba TP |
1282 | if (xordev->channels[i]) |
1283 | mv_xor_channel_remove(xordev->channels[i]); | |
60d151f3 | 1284 | } |
c510182b | 1285 | |
297eedba TP |
1286 | if (!IS_ERR(xordev->clk)) { |
1287 | clk_disable_unprepare(xordev->clk); | |
1288 | clk_put(xordev->clk); | |
c510182b AL |
1289 | } |
1290 | ||
ff7b0479 SB |
1291 | return 0; |
1292 | } | |
1293 | ||
f7d12ef5 | 1294 | #ifdef CONFIG_OF |
c2714334 | 1295 | static struct of_device_id mv_xor_dt_ids[] = { |
f7d12ef5 TP |
1296 | { .compatible = "marvell,orion-xor", }, |
1297 | {}, | |
1298 | }; | |
1299 | MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); | |
1300 | #endif | |
1301 | ||
61971656 TP |
1302 | static struct platform_driver mv_xor_driver = { |
1303 | .probe = mv_xor_probe, | |
c2714334 | 1304 | .remove = mv_xor_remove, |
ff7b0479 | 1305 | .driver = { |
f7d12ef5 TP |
1306 | .owner = THIS_MODULE, |
1307 | .name = MV_XOR_NAME, | |
1308 | .of_match_table = of_match_ptr(mv_xor_dt_ids), | |
ff7b0479 SB |
1309 | }, |
1310 | }; | |
1311 | ||
1312 | ||
1313 | static int __init mv_xor_init(void) | |
1314 | { | |
61971656 | 1315 | return platform_driver_register(&mv_xor_driver); |
ff7b0479 SB |
1316 | } |
1317 | module_init(mv_xor_init); | |
1318 | ||
1319 | /* it's currently unsafe to unload this module */ | |
1320 | #if 0 | |
1321 | static void __exit mv_xor_exit(void) | |
1322 | { | |
1323 | platform_driver_unregister(&mv_xor_driver); | |
ff7b0479 SB |
1324 | return; |
1325 | } | |
1326 | ||
1327 | module_exit(mv_xor_exit); | |
1328 | #endif | |
1329 | ||
1330 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); | |
1331 | MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); | |
1332 | MODULE_LICENSE("GPL"); |