Commit | Line | Data |
---|---|---|
ff7b0479 SB |
1 | /* |
2 | * offload engine driver for the Marvell XOR engine | |
3 | * Copyright (C) 2007, 2008, Marvell International Ltd. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
ff7b0479 SB |
13 | */ |
14 | ||
15 | #include <linux/init.h> | |
16 | #include <linux/module.h> | |
5a0e3ad6 | 17 | #include <linux/slab.h> |
ff7b0479 SB |
18 | #include <linux/delay.h> |
19 | #include <linux/dma-mapping.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/memory.h> | |
c510182b | 24 | #include <linux/clk.h> |
f7d12ef5 TP |
25 | #include <linux/of.h> |
26 | #include <linux/of_irq.h> | |
27 | #include <linux/irqdomain.h> | |
c02cecb9 | 28 | #include <linux/platform_data/dma-mv_xor.h> |
d2ebfb33 RKAL |
29 | |
30 | #include "dmaengine.h" | |
ff7b0479 SB |
31 | #include "mv_xor.h" |
32 | ||
33 | static void mv_xor_issue_pending(struct dma_chan *chan); | |
34 | ||
35 | #define to_mv_xor_chan(chan) \ | |
98817b99 | 36 | container_of(chan, struct mv_xor_chan, dmachan) |
ff7b0479 SB |
37 | |
38 | #define to_mv_xor_slot(tx) \ | |
39 | container_of(tx, struct mv_xor_desc_slot, async_tx) | |
40 | ||
c98c1781 | 41 | #define mv_chan_to_devp(chan) \ |
1ef48a26 | 42 | ((chan)->dmadev.dev) |
c98c1781 | 43 | |
dfc97661 | 44 | static void mv_desc_init(struct mv_xor_desc_slot *desc, |
ba87d137 LA |
45 | dma_addr_t addr, u32 byte_count, |
46 | enum dma_ctrl_flags flags) | |
ff7b0479 SB |
47 | { |
48 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
49 | ||
0e7488ed | 50 | hw_desc->status = XOR_DESC_DMA_OWNED; |
ff7b0479 | 51 | hw_desc->phy_next_desc = 0; |
ba87d137 LA |
52 | /* Enable end-of-descriptor interrupts only for DMA_PREP_INTERRUPT */ |
53 | hw_desc->desc_command = (flags & DMA_PREP_INTERRUPT) ? | |
54 | XOR_DESC_EOD_INT_EN : 0; | |
dfc97661 | 55 | hw_desc->phy_dest_addr = addr; |
ff7b0479 SB |
56 | hw_desc->byte_count = byte_count; |
57 | } | |
58 | ||
59 | static void mv_desc_set_next_desc(struct mv_xor_desc_slot *desc, | |
60 | u32 next_desc_addr) | |
61 | { | |
62 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
63 | BUG_ON(hw_desc->phy_next_desc); | |
64 | hw_desc->phy_next_desc = next_desc_addr; | |
65 | } | |
66 | ||
67 | static void mv_desc_clear_next_desc(struct mv_xor_desc_slot *desc) | |
68 | { | |
69 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
70 | hw_desc->phy_next_desc = 0; | |
71 | } | |
72 | ||
ff7b0479 SB |
73 | static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc, |
74 | int index, dma_addr_t addr) | |
75 | { | |
76 | struct mv_xor_desc *hw_desc = desc->hw_desc; | |
e03bc654 | 77 | hw_desc->phy_src_addr[mv_phy_src_idx(index)] = addr; |
ff7b0479 SB |
78 | if (desc->type == DMA_XOR) |
79 | hw_desc->desc_command |= (1 << index); | |
80 | } | |
81 | ||
82 | static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) | |
83 | { | |
5733c38a | 84 | return readl_relaxed(XOR_CURR_DESC(chan)); |
ff7b0479 SB |
85 | } |
86 | ||
87 | static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, | |
88 | u32 next_desc_addr) | |
89 | { | |
5733c38a | 90 | writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan)); |
ff7b0479 SB |
91 | } |
92 | ||
ff7b0479 SB |
93 | static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) |
94 | { | |
5733c38a | 95 | u32 val = readl_relaxed(XOR_INTR_MASK(chan)); |
ff7b0479 | 96 | val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); |
5733c38a | 97 | writel_relaxed(val, XOR_INTR_MASK(chan)); |
ff7b0479 SB |
98 | } |
99 | ||
100 | static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) | |
101 | { | |
5733c38a | 102 | u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
103 | intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; |
104 | return intr_cause; | |
105 | } | |
106 | ||
ff7b0479 SB |
107 | static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) |
108 | { | |
ba87d137 LA |
109 | u32 val; |
110 | ||
111 | val = XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | XOR_INT_STOPPED; | |
112 | val = ~(val << (chan->idx * 16)); | |
c98c1781 | 113 | dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); |
5733c38a | 114 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
115 | } |
116 | ||
117 | static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) | |
118 | { | |
119 | u32 val = 0xFFFF0000 >> (chan->idx * 16); | |
5733c38a | 120 | writel_relaxed(val, XOR_INTR_CAUSE(chan)); |
ff7b0479 SB |
121 | } |
122 | ||
ff7b0479 SB |
123 | static void mv_set_mode(struct mv_xor_chan *chan, |
124 | enum dma_transaction_type type) | |
125 | { | |
126 | u32 op_mode; | |
5733c38a | 127 | u32 config = readl_relaxed(XOR_CONFIG(chan)); |
ff7b0479 SB |
128 | |
129 | switch (type) { | |
130 | case DMA_XOR: | |
131 | op_mode = XOR_OPERATION_MODE_XOR; | |
132 | break; | |
133 | case DMA_MEMCPY: | |
134 | op_mode = XOR_OPERATION_MODE_MEMCPY; | |
135 | break; | |
ff7b0479 | 136 | default: |
c98c1781 | 137 | dev_err(mv_chan_to_devp(chan), |
1ba151cd | 138 | "error: unsupported operation %d\n", |
a3fc74bc | 139 | type); |
ff7b0479 SB |
140 | BUG(); |
141 | return; | |
142 | } | |
143 | ||
144 | config &= ~0x7; | |
145 | config |= op_mode; | |
e03bc654 TP |
146 | |
147 | #if defined(__BIG_ENDIAN) | |
148 | config |= XOR_DESCRIPTOR_SWAP; | |
149 | #else | |
150 | config &= ~XOR_DESCRIPTOR_SWAP; | |
151 | #endif | |
152 | ||
5733c38a | 153 | writel_relaxed(config, XOR_CONFIG(chan)); |
ff7b0479 SB |
154 | chan->current_type = type; |
155 | } | |
156 | ||
157 | static void mv_chan_activate(struct mv_xor_chan *chan) | |
158 | { | |
c98c1781 | 159 | dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); |
5a9a55bf EG |
160 | |
161 | /* writel ensures all descriptors are flushed before activation */ | |
162 | writel(BIT(0), XOR_ACTIVATION(chan)); | |
ff7b0479 SB |
163 | } |
164 | ||
165 | static char mv_chan_is_busy(struct mv_xor_chan *chan) | |
166 | { | |
5733c38a | 167 | u32 state = readl_relaxed(XOR_ACTIVATION(chan)); |
ff7b0479 SB |
168 | |
169 | state = (state >> 4) & 0x3; | |
170 | ||
171 | return (state == 1) ? 1 : 0; | |
172 | } | |
173 | ||
ff7b0479 SB |
174 | /** |
175 | * mv_xor_free_slots - flags descriptor slots for reuse | |
176 | * @slot: Slot to free | |
177 | * Caller must hold &mv_chan->lock while calling this function | |
178 | */ | |
179 | static void mv_xor_free_slots(struct mv_xor_chan *mv_chan, | |
180 | struct mv_xor_desc_slot *slot) | |
181 | { | |
c98c1781 | 182 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d slot %p\n", |
ff7b0479 SB |
183 | __func__, __LINE__, slot); |
184 | ||
dfc97661 | 185 | slot->slot_used = 0; |
ff7b0479 SB |
186 | |
187 | } | |
188 | ||
189 | /* | |
190 | * mv_xor_start_new_chain - program the engine to operate on new chain headed by | |
191 | * sw_desc | |
192 | * Caller must hold &mv_chan->lock while calling this function | |
193 | */ | |
194 | static void mv_xor_start_new_chain(struct mv_xor_chan *mv_chan, | |
195 | struct mv_xor_desc_slot *sw_desc) | |
196 | { | |
c98c1781 | 197 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: sw_desc %p\n", |
ff7b0479 | 198 | __func__, __LINE__, sw_desc); |
ff7b0479 | 199 | |
48a9db46 BZ |
200 | /* set the hardware chain */ |
201 | mv_chan_set_next_descriptor(mv_chan, sw_desc->async_tx.phys); | |
202 | ||
dfc97661 | 203 | mv_chan->pending++; |
98817b99 | 204 | mv_xor_issue_pending(&mv_chan->dmachan); |
ff7b0479 SB |
205 | } |
206 | ||
207 | static dma_cookie_t | |
208 | mv_xor_run_tx_complete_actions(struct mv_xor_desc_slot *desc, | |
209 | struct mv_xor_chan *mv_chan, dma_cookie_t cookie) | |
210 | { | |
211 | BUG_ON(desc->async_tx.cookie < 0); | |
212 | ||
213 | if (desc->async_tx.cookie > 0) { | |
214 | cookie = desc->async_tx.cookie; | |
215 | ||
216 | /* call the callback (must not sleep or submit new | |
217 | * operations to this channel) | |
218 | */ | |
219 | if (desc->async_tx.callback) | |
220 | desc->async_tx.callback( | |
221 | desc->async_tx.callback_param); | |
222 | ||
d38a8c62 | 223 | dma_descriptor_unmap(&desc->async_tx); |
ff7b0479 SB |
224 | } |
225 | ||
226 | /* run dependent operations */ | |
07f2211e | 227 | dma_run_dependencies(&desc->async_tx); |
ff7b0479 SB |
228 | |
229 | return cookie; | |
230 | } | |
231 | ||
232 | static int | |
233 | mv_xor_clean_completed_slots(struct mv_xor_chan *mv_chan) | |
234 | { | |
235 | struct mv_xor_desc_slot *iter, *_iter; | |
236 | ||
c98c1781 | 237 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); |
ff7b0479 SB |
238 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, |
239 | completed_node) { | |
240 | ||
241 | if (async_tx_test_ack(&iter->async_tx)) { | |
242 | list_del(&iter->completed_node); | |
243 | mv_xor_free_slots(mv_chan, iter); | |
244 | } | |
245 | } | |
246 | return 0; | |
247 | } | |
248 | ||
249 | static int | |
250 | mv_xor_clean_slot(struct mv_xor_desc_slot *desc, | |
251 | struct mv_xor_chan *mv_chan) | |
252 | { | |
c98c1781 | 253 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d: desc %p flags %d\n", |
ff7b0479 SB |
254 | __func__, __LINE__, desc, desc->async_tx.flags); |
255 | list_del(&desc->chain_node); | |
256 | /* the client is allowed to attach dependent operations | |
257 | * until 'ack' is set | |
258 | */ | |
259 | if (!async_tx_test_ack(&desc->async_tx)) { | |
260 | /* move this slot to the completed_slots */ | |
261 | list_add_tail(&desc->completed_node, &mv_chan->completed_slots); | |
262 | return 0; | |
263 | } | |
264 | ||
265 | mv_xor_free_slots(mv_chan, desc); | |
266 | return 0; | |
267 | } | |
268 | ||
fbeec99a EG |
269 | /* This function must be called with the mv_xor_chan spinlock held */ |
270 | static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) | |
ff7b0479 SB |
271 | { |
272 | struct mv_xor_desc_slot *iter, *_iter; | |
273 | dma_cookie_t cookie = 0; | |
274 | int busy = mv_chan_is_busy(mv_chan); | |
275 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
276 | int seen_current = 0; | |
277 | ||
c98c1781 TP |
278 | dev_dbg(mv_chan_to_devp(mv_chan), "%s %d\n", __func__, __LINE__); |
279 | dev_dbg(mv_chan_to_devp(mv_chan), "current_desc %x\n", current_desc); | |
ff7b0479 SB |
280 | mv_xor_clean_completed_slots(mv_chan); |
281 | ||
282 | /* free completed slots from the chain starting with | |
283 | * the oldest descriptor | |
284 | */ | |
285 | ||
286 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, | |
287 | chain_node) { | |
288 | prefetch(_iter); | |
289 | prefetch(&_iter->async_tx); | |
290 | ||
291 | /* do not advance past the current descriptor loaded into the | |
292 | * hardware channel, subsequent descriptors are either in | |
293 | * process or have not been submitted | |
294 | */ | |
295 | if (seen_current) | |
296 | break; | |
297 | ||
298 | /* stop the search if we reach the current descriptor and the | |
299 | * channel is busy | |
300 | */ | |
301 | if (iter->async_tx.phys == current_desc) { | |
302 | seen_current = 1; | |
303 | if (busy) | |
304 | break; | |
305 | } | |
306 | ||
307 | cookie = mv_xor_run_tx_complete_actions(iter, mv_chan, cookie); | |
308 | ||
309 | if (mv_xor_clean_slot(iter, mv_chan)) | |
310 | break; | |
311 | } | |
312 | ||
313 | if ((busy == 0) && !list_empty(&mv_chan->chain)) { | |
314 | struct mv_xor_desc_slot *chain_head; | |
315 | chain_head = list_entry(mv_chan->chain.next, | |
316 | struct mv_xor_desc_slot, | |
317 | chain_node); | |
318 | ||
319 | mv_xor_start_new_chain(mv_chan, chain_head); | |
320 | } | |
321 | ||
322 | if (cookie > 0) | |
98817b99 | 323 | mv_chan->dmachan.completed_cookie = cookie; |
ff7b0479 SB |
324 | } |
325 | ||
ff7b0479 SB |
326 | static void mv_xor_tasklet(unsigned long data) |
327 | { | |
328 | struct mv_xor_chan *chan = (struct mv_xor_chan *) data; | |
e43147ac EG |
329 | |
330 | spin_lock_bh(&chan->lock); | |
8333f65e | 331 | mv_xor_slot_cleanup(chan); |
e43147ac | 332 | spin_unlock_bh(&chan->lock); |
ff7b0479 SB |
333 | } |
334 | ||
335 | static struct mv_xor_desc_slot * | |
dfc97661 | 336 | mv_xor_alloc_slot(struct mv_xor_chan *mv_chan) |
ff7b0479 | 337 | { |
dfc97661 LA |
338 | struct mv_xor_desc_slot *iter, *_iter; |
339 | int retry = 0; | |
ff7b0479 SB |
340 | |
341 | /* start search from the last allocated descrtiptor | |
342 | * if a contiguous allocation can not be found start searching | |
343 | * from the beginning of the list | |
344 | */ | |
345 | retry: | |
ff7b0479 SB |
346 | if (retry == 0) |
347 | iter = mv_chan->last_used; | |
348 | else | |
349 | iter = list_entry(&mv_chan->all_slots, | |
350 | struct mv_xor_desc_slot, | |
351 | slot_node); | |
352 | ||
353 | list_for_each_entry_safe_continue( | |
354 | iter, _iter, &mv_chan->all_slots, slot_node) { | |
dfc97661 | 355 | |
ff7b0479 SB |
356 | prefetch(_iter); |
357 | prefetch(&_iter->async_tx); | |
dfc97661 | 358 | if (iter->slot_used) { |
ff7b0479 SB |
359 | /* give up after finding the first busy slot |
360 | * on the second pass through the list | |
361 | */ | |
362 | if (retry) | |
363 | break; | |
ff7b0479 SB |
364 | continue; |
365 | } | |
366 | ||
dfc97661 LA |
367 | /* pre-ack descriptor */ |
368 | async_tx_ack(&iter->async_tx); | |
369 | ||
370 | iter->slot_used = 1; | |
371 | INIT_LIST_HEAD(&iter->chain_node); | |
372 | iter->async_tx.cookie = -EBUSY; | |
373 | mv_chan->last_used = iter; | |
374 | mv_desc_clear_next_desc(iter); | |
375 | ||
376 | return iter; | |
377 | ||
ff7b0479 SB |
378 | } |
379 | if (!retry++) | |
380 | goto retry; | |
381 | ||
382 | /* try to free some slots if the allocation fails */ | |
383 | tasklet_schedule(&mv_chan->irq_tasklet); | |
384 | ||
385 | return NULL; | |
386 | } | |
387 | ||
ff7b0479 SB |
388 | /************************ DMA engine API functions ****************************/ |
389 | static dma_cookie_t | |
390 | mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) | |
391 | { | |
392 | struct mv_xor_desc_slot *sw_desc = to_mv_xor_slot(tx); | |
393 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(tx->chan); | |
dfc97661 | 394 | struct mv_xor_desc_slot *old_chain_tail; |
ff7b0479 SB |
395 | dma_cookie_t cookie; |
396 | int new_hw_chain = 1; | |
397 | ||
c98c1781 | 398 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
399 | "%s sw_desc %p: async_tx %p\n", |
400 | __func__, sw_desc, &sw_desc->async_tx); | |
401 | ||
ff7b0479 | 402 | spin_lock_bh(&mv_chan->lock); |
884485e1 | 403 | cookie = dma_cookie_assign(tx); |
ff7b0479 SB |
404 | |
405 | if (list_empty(&mv_chan->chain)) | |
dfc97661 | 406 | list_add_tail(&sw_desc->chain_node, &mv_chan->chain); |
ff7b0479 SB |
407 | else { |
408 | new_hw_chain = 0; | |
409 | ||
410 | old_chain_tail = list_entry(mv_chan->chain.prev, | |
411 | struct mv_xor_desc_slot, | |
412 | chain_node); | |
dfc97661 | 413 | list_add_tail(&sw_desc->chain_node, &mv_chan->chain); |
ff7b0479 | 414 | |
31fd8f5b OJ |
415 | dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n", |
416 | &old_chain_tail->async_tx.phys); | |
ff7b0479 SB |
417 | |
418 | /* fix up the hardware chain */ | |
dfc97661 | 419 | mv_desc_set_next_desc(old_chain_tail, sw_desc->async_tx.phys); |
ff7b0479 SB |
420 | |
421 | /* if the channel is not busy */ | |
422 | if (!mv_chan_is_busy(mv_chan)) { | |
423 | u32 current_desc = mv_chan_get_current_desc(mv_chan); | |
424 | /* | |
425 | * and the curren desc is the end of the chain before | |
426 | * the append, then we need to start the channel | |
427 | */ | |
428 | if (current_desc == old_chain_tail->async_tx.phys) | |
429 | new_hw_chain = 1; | |
430 | } | |
431 | } | |
432 | ||
433 | if (new_hw_chain) | |
dfc97661 | 434 | mv_xor_start_new_chain(mv_chan, sw_desc); |
ff7b0479 | 435 | |
ff7b0479 SB |
436 | spin_unlock_bh(&mv_chan->lock); |
437 | ||
438 | return cookie; | |
439 | } | |
440 | ||
441 | /* returns the number of allocated descriptors */ | |
aa1e6f1a | 442 | static int mv_xor_alloc_chan_resources(struct dma_chan *chan) |
ff7b0479 | 443 | { |
31fd8f5b OJ |
444 | void *virt_desc; |
445 | dma_addr_t dma_desc; | |
ff7b0479 SB |
446 | int idx; |
447 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
448 | struct mv_xor_desc_slot *slot = NULL; | |
b503fa01 | 449 | int num_descs_in_pool = MV_XOR_POOL_SIZE/MV_XOR_SLOT_SIZE; |
ff7b0479 SB |
450 | |
451 | /* Allocate descriptor slots */ | |
452 | idx = mv_chan->slots_allocated; | |
453 | while (idx < num_descs_in_pool) { | |
454 | slot = kzalloc(sizeof(*slot), GFP_KERNEL); | |
455 | if (!slot) { | |
b8291dde EG |
456 | dev_info(mv_chan_to_devp(mv_chan), |
457 | "channel only initialized %d descriptor slots", | |
458 | idx); | |
ff7b0479 SB |
459 | break; |
460 | } | |
31fd8f5b OJ |
461 | virt_desc = mv_chan->dma_desc_pool_virt; |
462 | slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE; | |
ff7b0479 SB |
463 | |
464 | dma_async_tx_descriptor_init(&slot->async_tx, chan); | |
465 | slot->async_tx.tx_submit = mv_xor_tx_submit; | |
466 | INIT_LIST_HEAD(&slot->chain_node); | |
467 | INIT_LIST_HEAD(&slot->slot_node); | |
31fd8f5b OJ |
468 | dma_desc = mv_chan->dma_desc_pool; |
469 | slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE; | |
ff7b0479 SB |
470 | slot->idx = idx++; |
471 | ||
472 | spin_lock_bh(&mv_chan->lock); | |
473 | mv_chan->slots_allocated = idx; | |
474 | list_add_tail(&slot->slot_node, &mv_chan->all_slots); | |
475 | spin_unlock_bh(&mv_chan->lock); | |
476 | } | |
477 | ||
478 | if (mv_chan->slots_allocated && !mv_chan->last_used) | |
479 | mv_chan->last_used = list_entry(mv_chan->all_slots.next, | |
480 | struct mv_xor_desc_slot, | |
481 | slot_node); | |
482 | ||
c98c1781 | 483 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
484 | "allocated %d descriptor slots last_used: %p\n", |
485 | mv_chan->slots_allocated, mv_chan->last_used); | |
486 | ||
487 | return mv_chan->slots_allocated ? : -ENOMEM; | |
488 | } | |
489 | ||
ff7b0479 SB |
490 | static struct dma_async_tx_descriptor * |
491 | mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |
492 | unsigned int src_cnt, size_t len, unsigned long flags) | |
493 | { | |
494 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
dfc97661 | 495 | struct mv_xor_desc_slot *sw_desc; |
ff7b0479 SB |
496 | |
497 | if (unlikely(len < MV_XOR_MIN_BYTE_COUNT)) | |
498 | return NULL; | |
499 | ||
7912d300 | 500 | BUG_ON(len > MV_XOR_MAX_BYTE_COUNT); |
ff7b0479 | 501 | |
c98c1781 | 502 | dev_dbg(mv_chan_to_devp(mv_chan), |
31fd8f5b OJ |
503 | "%s src_cnt: %d len: %u dest %pad flags: %ld\n", |
504 | __func__, src_cnt, len, &dest, flags); | |
ff7b0479 SB |
505 | |
506 | spin_lock_bh(&mv_chan->lock); | |
dfc97661 | 507 | sw_desc = mv_xor_alloc_slot(mv_chan); |
ff7b0479 SB |
508 | if (sw_desc) { |
509 | sw_desc->type = DMA_XOR; | |
510 | sw_desc->async_tx.flags = flags; | |
ba87d137 | 511 | mv_desc_init(sw_desc, dest, len, flags); |
ff7b0479 | 512 | while (src_cnt--) |
dfc97661 | 513 | mv_desc_set_src_addr(sw_desc, src_cnt, src[src_cnt]); |
ff7b0479 SB |
514 | } |
515 | spin_unlock_bh(&mv_chan->lock); | |
c98c1781 | 516 | dev_dbg(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
517 | "%s sw_desc %p async_tx %p \n", |
518 | __func__, sw_desc, &sw_desc->async_tx); | |
519 | return sw_desc ? &sw_desc->async_tx : NULL; | |
520 | } | |
521 | ||
3e4f52e2 LA |
522 | static struct dma_async_tx_descriptor * |
523 | mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | |
524 | size_t len, unsigned long flags) | |
525 | { | |
526 | /* | |
527 | * A MEMCPY operation is identical to an XOR operation with only | |
528 | * a single source address. | |
529 | */ | |
530 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | |
531 | } | |
532 | ||
22843545 LA |
533 | static struct dma_async_tx_descriptor * |
534 | mv_xor_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |
535 | { | |
536 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
537 | dma_addr_t src, dest; | |
538 | size_t len; | |
539 | ||
540 | src = mv_chan->dummy_src_addr; | |
541 | dest = mv_chan->dummy_dst_addr; | |
542 | len = MV_XOR_MIN_BYTE_COUNT; | |
543 | ||
544 | /* | |
545 | * We implement the DMA_INTERRUPT operation as a minimum sized | |
546 | * XOR operation with a single dummy source address. | |
547 | */ | |
548 | return mv_xor_prep_dma_xor(chan, dest, &src, 1, len, flags); | |
549 | } | |
550 | ||
ff7b0479 SB |
551 | static void mv_xor_free_chan_resources(struct dma_chan *chan) |
552 | { | |
553 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
554 | struct mv_xor_desc_slot *iter, *_iter; | |
555 | int in_use_descs = 0; | |
556 | ||
ff7b0479 | 557 | spin_lock_bh(&mv_chan->lock); |
e43147ac | 558 | |
ff7b0479 SB |
559 | mv_xor_slot_cleanup(mv_chan); |
560 | ||
ff7b0479 SB |
561 | list_for_each_entry_safe(iter, _iter, &mv_chan->chain, |
562 | chain_node) { | |
563 | in_use_descs++; | |
564 | list_del(&iter->chain_node); | |
565 | } | |
566 | list_for_each_entry_safe(iter, _iter, &mv_chan->completed_slots, | |
567 | completed_node) { | |
568 | in_use_descs++; | |
569 | list_del(&iter->completed_node); | |
570 | } | |
571 | list_for_each_entry_safe_reverse( | |
572 | iter, _iter, &mv_chan->all_slots, slot_node) { | |
573 | list_del(&iter->slot_node); | |
574 | kfree(iter); | |
575 | mv_chan->slots_allocated--; | |
576 | } | |
577 | mv_chan->last_used = NULL; | |
578 | ||
c98c1781 | 579 | dev_dbg(mv_chan_to_devp(mv_chan), "%s slots_allocated %d\n", |
ff7b0479 SB |
580 | __func__, mv_chan->slots_allocated); |
581 | spin_unlock_bh(&mv_chan->lock); | |
582 | ||
583 | if (in_use_descs) | |
c98c1781 | 584 | dev_err(mv_chan_to_devp(mv_chan), |
ff7b0479 SB |
585 | "freeing %d in use descriptors!\n", in_use_descs); |
586 | } | |
587 | ||
588 | /** | |
07934481 | 589 | * mv_xor_status - poll the status of an XOR transaction |
ff7b0479 SB |
590 | * @chan: XOR channel handle |
591 | * @cookie: XOR transaction identifier | |
07934481 | 592 | * @txstate: XOR transactions state holder (or NULL) |
ff7b0479 | 593 | */ |
07934481 | 594 | static enum dma_status mv_xor_status(struct dma_chan *chan, |
ff7b0479 | 595 | dma_cookie_t cookie, |
07934481 | 596 | struct dma_tx_state *txstate) |
ff7b0479 SB |
597 | { |
598 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
ff7b0479 SB |
599 | enum dma_status ret; |
600 | ||
96a2af41 | 601 | ret = dma_cookie_status(chan, cookie, txstate); |
890766d2 | 602 | if (ret == DMA_COMPLETE) |
ff7b0479 | 603 | return ret; |
e43147ac EG |
604 | |
605 | spin_lock_bh(&mv_chan->lock); | |
ff7b0479 | 606 | mv_xor_slot_cleanup(mv_chan); |
e43147ac | 607 | spin_unlock_bh(&mv_chan->lock); |
ff7b0479 | 608 | |
96a2af41 | 609 | return dma_cookie_status(chan, cookie, txstate); |
ff7b0479 SB |
610 | } |
611 | ||
612 | static void mv_dump_xor_regs(struct mv_xor_chan *chan) | |
613 | { | |
614 | u32 val; | |
615 | ||
5733c38a | 616 | val = readl_relaxed(XOR_CONFIG(chan)); |
1ba151cd | 617 | dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); |
ff7b0479 | 618 | |
5733c38a | 619 | val = readl_relaxed(XOR_ACTIVATION(chan)); |
1ba151cd | 620 | dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); |
ff7b0479 | 621 | |
5733c38a | 622 | val = readl_relaxed(XOR_INTR_CAUSE(chan)); |
1ba151cd | 623 | dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); |
ff7b0479 | 624 | |
5733c38a | 625 | val = readl_relaxed(XOR_INTR_MASK(chan)); |
1ba151cd | 626 | dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); |
ff7b0479 | 627 | |
5733c38a | 628 | val = readl_relaxed(XOR_ERROR_CAUSE(chan)); |
1ba151cd | 629 | dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); |
ff7b0479 | 630 | |
5733c38a | 631 | val = readl_relaxed(XOR_ERROR_ADDR(chan)); |
1ba151cd | 632 | dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); |
ff7b0479 SB |
633 | } |
634 | ||
635 | static void mv_xor_err_interrupt_handler(struct mv_xor_chan *chan, | |
636 | u32 intr_cause) | |
637 | { | |
0e7488ed EG |
638 | if (intr_cause & XOR_INT_ERR_DECODE) { |
639 | dev_dbg(mv_chan_to_devp(chan), "ignoring address decode error\n"); | |
640 | return; | |
ff7b0479 SB |
641 | } |
642 | ||
0e7488ed | 643 | dev_err(mv_chan_to_devp(chan), "error on chan %d. intr cause 0x%08x\n", |
a3fc74bc | 644 | chan->idx, intr_cause); |
ff7b0479 SB |
645 | |
646 | mv_dump_xor_regs(chan); | |
0e7488ed | 647 | WARN_ON(1); |
ff7b0479 SB |
648 | } |
649 | ||
650 | static irqreturn_t mv_xor_interrupt_handler(int irq, void *data) | |
651 | { | |
652 | struct mv_xor_chan *chan = data; | |
653 | u32 intr_cause = mv_chan_get_intr_cause(chan); | |
654 | ||
c98c1781 | 655 | dev_dbg(mv_chan_to_devp(chan), "intr cause %x\n", intr_cause); |
ff7b0479 | 656 | |
0e7488ed | 657 | if (intr_cause & XOR_INTR_ERRORS) |
ff7b0479 SB |
658 | mv_xor_err_interrupt_handler(chan, intr_cause); |
659 | ||
660 | tasklet_schedule(&chan->irq_tasklet); | |
661 | ||
662 | mv_xor_device_clear_eoc_cause(chan); | |
663 | ||
664 | return IRQ_HANDLED; | |
665 | } | |
666 | ||
667 | static void mv_xor_issue_pending(struct dma_chan *chan) | |
668 | { | |
669 | struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); | |
670 | ||
671 | if (mv_chan->pending >= MV_XOR_THRESHOLD) { | |
672 | mv_chan->pending = 0; | |
673 | mv_chan_activate(mv_chan); | |
674 | } | |
675 | } | |
676 | ||
677 | /* | |
678 | * Perform a transaction to verify the HW works. | |
679 | */ | |
ff7b0479 | 680 | |
c2714334 | 681 | static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) |
ff7b0479 | 682 | { |
b8c01d25 | 683 | int i, ret; |
ff7b0479 SB |
684 | void *src, *dest; |
685 | dma_addr_t src_dma, dest_dma; | |
686 | struct dma_chan *dma_chan; | |
687 | dma_cookie_t cookie; | |
688 | struct dma_async_tx_descriptor *tx; | |
d16695a7 | 689 | struct dmaengine_unmap_data *unmap; |
ff7b0479 | 690 | int err = 0; |
ff7b0479 | 691 | |
d16695a7 | 692 | src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
ff7b0479 SB |
693 | if (!src) |
694 | return -ENOMEM; | |
695 | ||
d16695a7 | 696 | dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL); |
ff7b0479 SB |
697 | if (!dest) { |
698 | kfree(src); | |
699 | return -ENOMEM; | |
700 | } | |
701 | ||
702 | /* Fill in src buffer */ | |
d16695a7 | 703 | for (i = 0; i < PAGE_SIZE; i++) |
ff7b0479 SB |
704 | ((u8 *) src)[i] = (u8)i; |
705 | ||
275cc0c8 | 706 | dma_chan = &mv_chan->dmachan; |
aa1e6f1a | 707 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
708 | err = -ENODEV; |
709 | goto out; | |
710 | } | |
711 | ||
d16695a7 EG |
712 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL); |
713 | if (!unmap) { | |
714 | err = -ENOMEM; | |
715 | goto free_resources; | |
716 | } | |
717 | ||
718 | src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, | |
719 | PAGE_SIZE, DMA_TO_DEVICE); | |
d16695a7 | 720 | unmap->addr[0] = src_dma; |
ff7b0479 | 721 | |
b8c01d25 EG |
722 | ret = dma_mapping_error(dma_chan->device->dev, src_dma); |
723 | if (ret) { | |
724 | err = -ENOMEM; | |
725 | goto free_resources; | |
726 | } | |
727 | unmap->to_cnt = 1; | |
728 | ||
d16695a7 EG |
729 | dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, |
730 | PAGE_SIZE, DMA_FROM_DEVICE); | |
d16695a7 EG |
731 | unmap->addr[1] = dest_dma; |
732 | ||
b8c01d25 EG |
733 | ret = dma_mapping_error(dma_chan->device->dev, dest_dma); |
734 | if (ret) { | |
735 | err = -ENOMEM; | |
736 | goto free_resources; | |
737 | } | |
738 | unmap->from_cnt = 1; | |
d16695a7 | 739 | unmap->len = PAGE_SIZE; |
ff7b0479 SB |
740 | |
741 | tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, | |
d16695a7 | 742 | PAGE_SIZE, 0); |
b8c01d25 EG |
743 | if (!tx) { |
744 | dev_err(dma_chan->device->dev, | |
745 | "Self-test cannot prepare operation, disabling\n"); | |
746 | err = -ENODEV; | |
747 | goto free_resources; | |
748 | } | |
749 | ||
ff7b0479 | 750 | cookie = mv_xor_tx_submit(tx); |
b8c01d25 EG |
751 | if (dma_submit_error(cookie)) { |
752 | dev_err(dma_chan->device->dev, | |
753 | "Self-test submit error, disabling\n"); | |
754 | err = -ENODEV; | |
755 | goto free_resources; | |
756 | } | |
757 | ||
ff7b0479 SB |
758 | mv_xor_issue_pending(dma_chan); |
759 | async_tx_ack(tx); | |
760 | msleep(1); | |
761 | ||
07934481 | 762 | if (mv_xor_status(dma_chan, cookie, NULL) != |
b3efb8fc | 763 | DMA_COMPLETE) { |
a3fc74bc TP |
764 | dev_err(dma_chan->device->dev, |
765 | "Self-test copy timed out, disabling\n"); | |
ff7b0479 SB |
766 | err = -ENODEV; |
767 | goto free_resources; | |
768 | } | |
769 | ||
c35064c4 | 770 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
d16695a7 EG |
771 | PAGE_SIZE, DMA_FROM_DEVICE); |
772 | if (memcmp(src, dest, PAGE_SIZE)) { | |
a3fc74bc TP |
773 | dev_err(dma_chan->device->dev, |
774 | "Self-test copy failed compare, disabling\n"); | |
ff7b0479 SB |
775 | err = -ENODEV; |
776 | goto free_resources; | |
777 | } | |
778 | ||
779 | free_resources: | |
d16695a7 | 780 | dmaengine_unmap_put(unmap); |
ff7b0479 SB |
781 | mv_xor_free_chan_resources(dma_chan); |
782 | out: | |
783 | kfree(src); | |
784 | kfree(dest); | |
785 | return err; | |
786 | } | |
787 | ||
788 | #define MV_XOR_NUM_SRC_TEST 4 /* must be <= 15 */ | |
463a1f8b | 789 | static int |
275cc0c8 | 790 | mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) |
ff7b0479 | 791 | { |
b8c01d25 | 792 | int i, src_idx, ret; |
ff7b0479 SB |
793 | struct page *dest; |
794 | struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; | |
795 | dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; | |
796 | dma_addr_t dest_dma; | |
797 | struct dma_async_tx_descriptor *tx; | |
d16695a7 | 798 | struct dmaengine_unmap_data *unmap; |
ff7b0479 SB |
799 | struct dma_chan *dma_chan; |
800 | dma_cookie_t cookie; | |
801 | u8 cmp_byte = 0; | |
802 | u32 cmp_word; | |
803 | int err = 0; | |
d16695a7 | 804 | int src_count = MV_XOR_NUM_SRC_TEST; |
ff7b0479 | 805 | |
d16695a7 | 806 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
ff7b0479 | 807 | xor_srcs[src_idx] = alloc_page(GFP_KERNEL); |
a09b09ae RK |
808 | if (!xor_srcs[src_idx]) { |
809 | while (src_idx--) | |
ff7b0479 | 810 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
811 | return -ENOMEM; |
812 | } | |
ff7b0479 SB |
813 | } |
814 | ||
815 | dest = alloc_page(GFP_KERNEL); | |
a09b09ae RK |
816 | if (!dest) { |
817 | while (src_idx--) | |
ff7b0479 | 818 | __free_page(xor_srcs[src_idx]); |
a09b09ae RK |
819 | return -ENOMEM; |
820 | } | |
ff7b0479 SB |
821 | |
822 | /* Fill in src buffers */ | |
d16695a7 | 823 | for (src_idx = 0; src_idx < src_count; src_idx++) { |
ff7b0479 SB |
824 | u8 *ptr = page_address(xor_srcs[src_idx]); |
825 | for (i = 0; i < PAGE_SIZE; i++) | |
826 | ptr[i] = (1 << src_idx); | |
827 | } | |
828 | ||
d16695a7 | 829 | for (src_idx = 0; src_idx < src_count; src_idx++) |
ff7b0479 SB |
830 | cmp_byte ^= (u8) (1 << src_idx); |
831 | ||
832 | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | | |
833 | (cmp_byte << 8) | cmp_byte; | |
834 | ||
835 | memset(page_address(dest), 0, PAGE_SIZE); | |
836 | ||
275cc0c8 | 837 | dma_chan = &mv_chan->dmachan; |
aa1e6f1a | 838 | if (mv_xor_alloc_chan_resources(dma_chan) < 1) { |
ff7b0479 SB |
839 | err = -ENODEV; |
840 | goto out; | |
841 | } | |
842 | ||
d16695a7 EG |
843 | unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1, |
844 | GFP_KERNEL); | |
845 | if (!unmap) { | |
846 | err = -ENOMEM; | |
847 | goto free_resources; | |
848 | } | |
849 | ||
ff7b0479 | 850 | /* test xor */ |
d16695a7 EG |
851 | for (i = 0; i < src_count; i++) { |
852 | unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], | |
853 | 0, PAGE_SIZE, DMA_TO_DEVICE); | |
854 | dma_srcs[i] = unmap->addr[i]; | |
b8c01d25 EG |
855 | ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); |
856 | if (ret) { | |
857 | err = -ENOMEM; | |
858 | goto free_resources; | |
859 | } | |
d16695a7 EG |
860 | unmap->to_cnt++; |
861 | } | |
ff7b0479 | 862 | |
d16695a7 EG |
863 | unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, |
864 | DMA_FROM_DEVICE); | |
865 | dest_dma = unmap->addr[src_count]; | |
b8c01d25 EG |
866 | ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); |
867 | if (ret) { | |
868 | err = -ENOMEM; | |
869 | goto free_resources; | |
870 | } | |
d16695a7 EG |
871 | unmap->from_cnt = 1; |
872 | unmap->len = PAGE_SIZE; | |
ff7b0479 SB |
873 | |
874 | tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, | |
d16695a7 | 875 | src_count, PAGE_SIZE, 0); |
b8c01d25 EG |
876 | if (!tx) { |
877 | dev_err(dma_chan->device->dev, | |
878 | "Self-test cannot prepare operation, disabling\n"); | |
879 | err = -ENODEV; | |
880 | goto free_resources; | |
881 | } | |
ff7b0479 SB |
882 | |
883 | cookie = mv_xor_tx_submit(tx); | |
b8c01d25 EG |
884 | if (dma_submit_error(cookie)) { |
885 | dev_err(dma_chan->device->dev, | |
886 | "Self-test submit error, disabling\n"); | |
887 | err = -ENODEV; | |
888 | goto free_resources; | |
889 | } | |
890 | ||
ff7b0479 SB |
891 | mv_xor_issue_pending(dma_chan); |
892 | async_tx_ack(tx); | |
893 | msleep(8); | |
894 | ||
07934481 | 895 | if (mv_xor_status(dma_chan, cookie, NULL) != |
b3efb8fc | 896 | DMA_COMPLETE) { |
a3fc74bc TP |
897 | dev_err(dma_chan->device->dev, |
898 | "Self-test xor timed out, disabling\n"); | |
ff7b0479 SB |
899 | err = -ENODEV; |
900 | goto free_resources; | |
901 | } | |
902 | ||
c35064c4 | 903 | dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, |
ff7b0479 SB |
904 | PAGE_SIZE, DMA_FROM_DEVICE); |
905 | for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { | |
906 | u32 *ptr = page_address(dest); | |
907 | if (ptr[i] != cmp_word) { | |
a3fc74bc | 908 | dev_err(dma_chan->device->dev, |
1ba151cd JP |
909 | "Self-test xor failed compare, disabling. index %d, data %x, expected %x\n", |
910 | i, ptr[i], cmp_word); | |
ff7b0479 SB |
911 | err = -ENODEV; |
912 | goto free_resources; | |
913 | } | |
914 | } | |
915 | ||
916 | free_resources: | |
d16695a7 | 917 | dmaengine_unmap_put(unmap); |
ff7b0479 SB |
918 | mv_xor_free_chan_resources(dma_chan); |
919 | out: | |
d16695a7 | 920 | src_idx = src_count; |
ff7b0479 SB |
921 | while (src_idx--) |
922 | __free_page(xor_srcs[src_idx]); | |
923 | __free_page(dest); | |
924 | return err; | |
925 | } | |
926 | ||
1ef48a26 | 927 | static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) |
ff7b0479 | 928 | { |
ff7b0479 | 929 | struct dma_chan *chan, *_chan; |
1ef48a26 | 930 | struct device *dev = mv_chan->dmadev.dev; |
ff7b0479 | 931 | |
1ef48a26 | 932 | dma_async_device_unregister(&mv_chan->dmadev); |
ff7b0479 | 933 | |
b503fa01 | 934 | dma_free_coherent(dev, MV_XOR_POOL_SIZE, |
1ef48a26 | 935 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
22843545 LA |
936 | dma_unmap_single(dev, mv_chan->dummy_src_addr, |
937 | MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); | |
938 | dma_unmap_single(dev, mv_chan->dummy_dst_addr, | |
939 | MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); | |
ff7b0479 | 940 | |
1ef48a26 | 941 | list_for_each_entry_safe(chan, _chan, &mv_chan->dmadev.channels, |
a6b4a9d2 | 942 | device_node) { |
ff7b0479 SB |
943 | list_del(&chan->device_node); |
944 | } | |
945 | ||
88eb92cb TP |
946 | free_irq(mv_chan->irq, mv_chan); |
947 | ||
ff7b0479 SB |
948 | return 0; |
949 | } | |
950 | ||
1ef48a26 | 951 | static struct mv_xor_chan * |
297eedba | 952 | mv_xor_channel_add(struct mv_xor_device *xordev, |
a6b4a9d2 | 953 | struct platform_device *pdev, |
b503fa01 | 954 | int idx, dma_cap_mask_t cap_mask, int irq) |
ff7b0479 SB |
955 | { |
956 | int ret = 0; | |
ff7b0479 SB |
957 | struct mv_xor_chan *mv_chan; |
958 | struct dma_device *dma_dev; | |
ff7b0479 | 959 | |
1ef48a26 | 960 | mv_chan = devm_kzalloc(&pdev->dev, sizeof(*mv_chan), GFP_KERNEL); |
a577659f SK |
961 | if (!mv_chan) |
962 | return ERR_PTR(-ENOMEM); | |
ff7b0479 | 963 | |
9aedbdba | 964 | mv_chan->idx = idx; |
88eb92cb | 965 | mv_chan->irq = irq; |
ff7b0479 | 966 | |
1ef48a26 | 967 | dma_dev = &mv_chan->dmadev; |
ff7b0479 | 968 | |
22843545 LA |
969 | /* |
970 | * These source and destination dummy buffers are used to implement | |
971 | * a DMA_INTERRUPT operation as a minimum-sized XOR operation. | |
972 | * Hence, we only need to map the buffers at initialization-time. | |
973 | */ | |
974 | mv_chan->dummy_src_addr = dma_map_single(dma_dev->dev, | |
975 | mv_chan->dummy_src, MV_XOR_MIN_BYTE_COUNT, DMA_FROM_DEVICE); | |
976 | mv_chan->dummy_dst_addr = dma_map_single(dma_dev->dev, | |
977 | mv_chan->dummy_dst, MV_XOR_MIN_BYTE_COUNT, DMA_TO_DEVICE); | |
978 | ||
ff7b0479 SB |
979 | /* allocate coherent memory for hardware descriptors |
980 | * note: writecombine gives slightly better performance, but | |
981 | * requires that we explicitly flush the writes | |
982 | */ | |
1ef48a26 | 983 | mv_chan->dma_desc_pool_virt = |
b503fa01 | 984 | dma_alloc_writecombine(&pdev->dev, MV_XOR_POOL_SIZE, |
1ef48a26 TP |
985 | &mv_chan->dma_desc_pool, GFP_KERNEL); |
986 | if (!mv_chan->dma_desc_pool_virt) | |
a6b4a9d2 | 987 | return ERR_PTR(-ENOMEM); |
ff7b0479 SB |
988 | |
989 | /* discover transaction capabilites from the platform data */ | |
a6b4a9d2 | 990 | dma_dev->cap_mask = cap_mask; |
ff7b0479 SB |
991 | |
992 | INIT_LIST_HEAD(&dma_dev->channels); | |
993 | ||
994 | /* set base routines */ | |
995 | dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; | |
996 | dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; | |
07934481 | 997 | dma_dev->device_tx_status = mv_xor_status; |
ff7b0479 SB |
998 | dma_dev->device_issue_pending = mv_xor_issue_pending; |
999 | dma_dev->dev = &pdev->dev; | |
1000 | ||
1001 | /* set prep routines based on capability */ | |
22843545 LA |
1002 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) |
1003 | dma_dev->device_prep_dma_interrupt = mv_xor_prep_dma_interrupt; | |
ff7b0479 SB |
1004 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
1005 | dma_dev->device_prep_dma_memcpy = mv_xor_prep_dma_memcpy; | |
ff7b0479 | 1006 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
c019894e | 1007 | dma_dev->max_xor = 8; |
ff7b0479 SB |
1008 | dma_dev->device_prep_dma_xor = mv_xor_prep_dma_xor; |
1009 | } | |
1010 | ||
297eedba | 1011 | mv_chan->mmr_base = xordev->xor_base; |
82a1402e | 1012 | mv_chan->mmr_high_base = xordev->xor_high_base; |
ff7b0479 SB |
1013 | tasklet_init(&mv_chan->irq_tasklet, mv_xor_tasklet, (unsigned long) |
1014 | mv_chan); | |
1015 | ||
1016 | /* clear errors before enabling interrupts */ | |
1017 | mv_xor_device_clear_err_status(mv_chan); | |
1018 | ||
2d0a0745 TP |
1019 | ret = request_irq(mv_chan->irq, mv_xor_interrupt_handler, |
1020 | 0, dev_name(&pdev->dev), mv_chan); | |
ff7b0479 SB |
1021 | if (ret) |
1022 | goto err_free_dma; | |
1023 | ||
1024 | mv_chan_unmask_interrupts(mv_chan); | |
1025 | ||
3e4f52e2 | 1026 | mv_set_mode(mv_chan, DMA_XOR); |
ff7b0479 SB |
1027 | |
1028 | spin_lock_init(&mv_chan->lock); | |
1029 | INIT_LIST_HEAD(&mv_chan->chain); | |
1030 | INIT_LIST_HEAD(&mv_chan->completed_slots); | |
1031 | INIT_LIST_HEAD(&mv_chan->all_slots); | |
98817b99 TP |
1032 | mv_chan->dmachan.device = dma_dev; |
1033 | dma_cookie_init(&mv_chan->dmachan); | |
ff7b0479 | 1034 | |
98817b99 | 1035 | list_add_tail(&mv_chan->dmachan.device_node, &dma_dev->channels); |
ff7b0479 SB |
1036 | |
1037 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { | |
275cc0c8 | 1038 | ret = mv_xor_memcpy_self_test(mv_chan); |
ff7b0479 SB |
1039 | dev_dbg(&pdev->dev, "memcpy self test returned %d\n", ret); |
1040 | if (ret) | |
2d0a0745 | 1041 | goto err_free_irq; |
ff7b0479 SB |
1042 | } |
1043 | ||
1044 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | |
275cc0c8 | 1045 | ret = mv_xor_xor_self_test(mv_chan); |
ff7b0479 SB |
1046 | dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); |
1047 | if (ret) | |
2d0a0745 | 1048 | goto err_free_irq; |
ff7b0479 SB |
1049 | } |
1050 | ||
48a9db46 | 1051 | dev_info(&pdev->dev, "Marvell XOR: ( %s%s%s)\n", |
1ba151cd | 1052 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
1ba151cd JP |
1053 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", |
1054 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); | |
ff7b0479 SB |
1055 | |
1056 | dma_async_device_register(dma_dev); | |
1ef48a26 | 1057 | return mv_chan; |
ff7b0479 | 1058 | |
2d0a0745 TP |
1059 | err_free_irq: |
1060 | free_irq(mv_chan->irq, mv_chan); | |
ff7b0479 | 1061 | err_free_dma: |
b503fa01 | 1062 | dma_free_coherent(&pdev->dev, MV_XOR_POOL_SIZE, |
1ef48a26 | 1063 | mv_chan->dma_desc_pool_virt, mv_chan->dma_desc_pool); |
a6b4a9d2 | 1064 | return ERR_PTR(ret); |
ff7b0479 SB |
1065 | } |
1066 | ||
1067 | static void | |
297eedba | 1068 | mv_xor_conf_mbus_windows(struct mv_xor_device *xordev, |
63a9332b | 1069 | const struct mbus_dram_target_info *dram) |
ff7b0479 | 1070 | { |
82a1402e | 1071 | void __iomem *base = xordev->xor_high_base; |
ff7b0479 SB |
1072 | u32 win_enable = 0; |
1073 | int i; | |
1074 | ||
1075 | for (i = 0; i < 8; i++) { | |
1076 | writel(0, base + WINDOW_BASE(i)); | |
1077 | writel(0, base + WINDOW_SIZE(i)); | |
1078 | if (i < 4) | |
1079 | writel(0, base + WINDOW_REMAP_HIGH(i)); | |
1080 | } | |
1081 | ||
1082 | for (i = 0; i < dram->num_cs; i++) { | |
63a9332b | 1083 | const struct mbus_dram_window *cs = dram->cs + i; |
ff7b0479 SB |
1084 | |
1085 | writel((cs->base & 0xffff0000) | | |
1086 | (cs->mbus_attr << 8) | | |
1087 | dram->mbus_dram_target_id, base + WINDOW_BASE(i)); | |
1088 | writel((cs->size - 1) & 0xffff0000, base + WINDOW_SIZE(i)); | |
1089 | ||
1090 | win_enable |= (1 << i); | |
1091 | win_enable |= 3 << (16 + (2 * i)); | |
1092 | } | |
1093 | ||
1094 | writel(win_enable, base + WINDOW_BAR_ENABLE(0)); | |
1095 | writel(win_enable, base + WINDOW_BAR_ENABLE(1)); | |
c4b4b732 TP |
1096 | writel(0, base + WINDOW_OVERRIDE_CTRL(0)); |
1097 | writel(0, base + WINDOW_OVERRIDE_CTRL(1)); | |
ff7b0479 SB |
1098 | } |
1099 | ||
c2714334 | 1100 | static int mv_xor_probe(struct platform_device *pdev) |
ff7b0479 | 1101 | { |
63a9332b | 1102 | const struct mbus_dram_target_info *dram; |
297eedba | 1103 | struct mv_xor_device *xordev; |
d4adcc01 | 1104 | struct mv_xor_platform_data *pdata = dev_get_platdata(&pdev->dev); |
ff7b0479 | 1105 | struct resource *res; |
60d151f3 | 1106 | int i, ret; |
ff7b0479 | 1107 | |
1ba151cd | 1108 | dev_notice(&pdev->dev, "Marvell shared XOR driver\n"); |
ff7b0479 | 1109 | |
297eedba TP |
1110 | xordev = devm_kzalloc(&pdev->dev, sizeof(*xordev), GFP_KERNEL); |
1111 | if (!xordev) | |
ff7b0479 SB |
1112 | return -ENOMEM; |
1113 | ||
1114 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1115 | if (!res) | |
1116 | return -ENODEV; | |
1117 | ||
297eedba TP |
1118 | xordev->xor_base = devm_ioremap(&pdev->dev, res->start, |
1119 | resource_size(res)); | |
1120 | if (!xordev->xor_base) | |
ff7b0479 SB |
1121 | return -EBUSY; |
1122 | ||
1123 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
1124 | if (!res) | |
1125 | return -ENODEV; | |
1126 | ||
297eedba TP |
1127 | xordev->xor_high_base = devm_ioremap(&pdev->dev, res->start, |
1128 | resource_size(res)); | |
1129 | if (!xordev->xor_high_base) | |
ff7b0479 SB |
1130 | return -EBUSY; |
1131 | ||
297eedba | 1132 | platform_set_drvdata(pdev, xordev); |
ff7b0479 SB |
1133 | |
1134 | /* | |
1135 | * (Re-)program MBUS remapping windows if we are asked to. | |
1136 | */ | |
63a9332b AL |
1137 | dram = mv_mbus_dram_info(); |
1138 | if (dram) | |
297eedba | 1139 | mv_xor_conf_mbus_windows(xordev, dram); |
ff7b0479 | 1140 | |
c510182b AL |
1141 | /* Not all platforms can gate the clock, so it is not |
1142 | * an error if the clock does not exists. | |
1143 | */ | |
297eedba TP |
1144 | xordev->clk = clk_get(&pdev->dev, NULL); |
1145 | if (!IS_ERR(xordev->clk)) | |
1146 | clk_prepare_enable(xordev->clk); | |
c510182b | 1147 | |
f7d12ef5 TP |
1148 | if (pdev->dev.of_node) { |
1149 | struct device_node *np; | |
1150 | int i = 0; | |
1151 | ||
1152 | for_each_child_of_node(pdev->dev.of_node, np) { | |
0be8253f | 1153 | struct mv_xor_chan *chan; |
f7d12ef5 TP |
1154 | dma_cap_mask_t cap_mask; |
1155 | int irq; | |
1156 | ||
1157 | dma_cap_zero(cap_mask); | |
1158 | if (of_property_read_bool(np, "dmacap,memcpy")) | |
1159 | dma_cap_set(DMA_MEMCPY, cap_mask); | |
1160 | if (of_property_read_bool(np, "dmacap,xor")) | |
1161 | dma_cap_set(DMA_XOR, cap_mask); | |
f7d12ef5 TP |
1162 | if (of_property_read_bool(np, "dmacap,interrupt")) |
1163 | dma_cap_set(DMA_INTERRUPT, cap_mask); | |
1164 | ||
1165 | irq = irq_of_parse_and_map(np, 0); | |
f8eb9e7d TP |
1166 | if (!irq) { |
1167 | ret = -ENODEV; | |
f7d12ef5 TP |
1168 | goto err_channel_add; |
1169 | } | |
1170 | ||
0be8253f RK |
1171 | chan = mv_xor_channel_add(xordev, pdev, i, |
1172 | cap_mask, irq); | |
1173 | if (IS_ERR(chan)) { | |
1174 | ret = PTR_ERR(chan); | |
f7d12ef5 TP |
1175 | irq_dispose_mapping(irq); |
1176 | goto err_channel_add; | |
1177 | } | |
1178 | ||
0be8253f | 1179 | xordev->channels[i] = chan; |
f7d12ef5 TP |
1180 | i++; |
1181 | } | |
1182 | } else if (pdata && pdata->channels) { | |
60d151f3 | 1183 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { |
e39f6ec1 | 1184 | struct mv_xor_channel_data *cd; |
0be8253f | 1185 | struct mv_xor_chan *chan; |
60d151f3 TP |
1186 | int irq; |
1187 | ||
1188 | cd = &pdata->channels[i]; | |
1189 | if (!cd) { | |
1190 | ret = -ENODEV; | |
1191 | goto err_channel_add; | |
1192 | } | |
1193 | ||
1194 | irq = platform_get_irq(pdev, i); | |
1195 | if (irq < 0) { | |
1196 | ret = irq; | |
1197 | goto err_channel_add; | |
1198 | } | |
1199 | ||
0be8253f RK |
1200 | chan = mv_xor_channel_add(xordev, pdev, i, |
1201 | cd->cap_mask, irq); | |
1202 | if (IS_ERR(chan)) { | |
1203 | ret = PTR_ERR(chan); | |
60d151f3 TP |
1204 | goto err_channel_add; |
1205 | } | |
0be8253f RK |
1206 | |
1207 | xordev->channels[i] = chan; | |
60d151f3 TP |
1208 | } |
1209 | } | |
c510182b | 1210 | |
ff7b0479 | 1211 | return 0; |
60d151f3 TP |
1212 | |
1213 | err_channel_add: | |
1214 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) | |
f7d12ef5 | 1215 | if (xordev->channels[i]) { |
ab6e439f | 1216 | mv_xor_channel_remove(xordev->channels[i]); |
f7d12ef5 TP |
1217 | if (pdev->dev.of_node) |
1218 | irq_dispose_mapping(xordev->channels[i]->irq); | |
f7d12ef5 | 1219 | } |
60d151f3 | 1220 | |
dab92064 TP |
1221 | if (!IS_ERR(xordev->clk)) { |
1222 | clk_disable_unprepare(xordev->clk); | |
1223 | clk_put(xordev->clk); | |
1224 | } | |
1225 | ||
60d151f3 | 1226 | return ret; |
ff7b0479 SB |
1227 | } |
1228 | ||
c2714334 | 1229 | static int mv_xor_remove(struct platform_device *pdev) |
ff7b0479 | 1230 | { |
297eedba | 1231 | struct mv_xor_device *xordev = platform_get_drvdata(pdev); |
60d151f3 TP |
1232 | int i; |
1233 | ||
1234 | for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { | |
297eedba TP |
1235 | if (xordev->channels[i]) |
1236 | mv_xor_channel_remove(xordev->channels[i]); | |
60d151f3 | 1237 | } |
c510182b | 1238 | |
297eedba TP |
1239 | if (!IS_ERR(xordev->clk)) { |
1240 | clk_disable_unprepare(xordev->clk); | |
1241 | clk_put(xordev->clk); | |
c510182b AL |
1242 | } |
1243 | ||
ff7b0479 SB |
1244 | return 0; |
1245 | } | |
1246 | ||
f7d12ef5 | 1247 | #ifdef CONFIG_OF |
57c03422 | 1248 | static const struct of_device_id mv_xor_dt_ids[] = { |
f7d12ef5 TP |
1249 | { .compatible = "marvell,orion-xor", }, |
1250 | {}, | |
1251 | }; | |
1252 | MODULE_DEVICE_TABLE(of, mv_xor_dt_ids); | |
1253 | #endif | |
1254 | ||
61971656 TP |
1255 | static struct platform_driver mv_xor_driver = { |
1256 | .probe = mv_xor_probe, | |
c2714334 | 1257 | .remove = mv_xor_remove, |
ff7b0479 | 1258 | .driver = { |
f7d12ef5 TP |
1259 | .name = MV_XOR_NAME, |
1260 | .of_match_table = of_match_ptr(mv_xor_dt_ids), | |
ff7b0479 SB |
1261 | }, |
1262 | }; | |
1263 | ||
1264 | ||
1265 | static int __init mv_xor_init(void) | |
1266 | { | |
61971656 | 1267 | return platform_driver_register(&mv_xor_driver); |
ff7b0479 SB |
1268 | } |
1269 | module_init(mv_xor_init); | |
1270 | ||
1271 | /* it's currently unsafe to unload this module */ | |
1272 | #if 0 | |
1273 | static void __exit mv_xor_exit(void) | |
1274 | { | |
1275 | platform_driver_unregister(&mv_xor_driver); | |
ff7b0479 SB |
1276 | return; |
1277 | } | |
1278 | ||
1279 | module_exit(mv_xor_exit); | |
1280 | #endif | |
1281 | ||
1282 | MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>"); | |
1283 | MODULE_DESCRIPTION("DMA engine driver for Marvell's XOR engine"); | |
1284 | MODULE_LICENSE("GPL"); |