Commit | Line | Data |
---|---|---|
8722ff8c | 1 | /* |
2 | * Filename: dma.c | |
3 | * | |
4 | * | |
5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> | |
6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | |
7 | * | |
8 | * (C) Copyright 2013 IBM Corporation | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License as | |
12 | * published by the Free Software Foundation; either version 2 of the | |
13 | * License, or (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software Foundation, | |
22 | * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
23 | */ | |
24 | ||
e5e9fdaa | 25 | #include <linux/slab.h> |
8722ff8c | 26 | #include "rsxx_priv.h" |
27 | ||
28 | struct rsxx_dma { | |
29 | struct list_head list; | |
30 | u8 cmd; | |
9bb3c446 | 31 | unsigned int laddr; /* Logical address */ |
8722ff8c | 32 | struct { |
33 | u32 off; | |
34 | u32 cnt; | |
35 | } sub_page; | |
36 | dma_addr_t dma_addr; | |
37 | struct page *page; | |
38 | unsigned int pg_off; /* Page Offset */ | |
39 | rsxx_dma_cb cb; | |
40 | void *cb_data; | |
41 | }; | |
42 | ||
43 | /* This timeout is used to detect a stalled DMA channel */ | |
44 | #define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000) | |
45 | ||
46 | struct hw_status { | |
47 | u8 status; | |
48 | u8 tag; | |
49 | __le16 count; | |
50 | __le32 _rsvd2; | |
51 | __le64 _rsvd3; | |
52 | } __packed; | |
53 | ||
54 | enum rsxx_dma_status { | |
55 | DMA_SW_ERR = 0x1, | |
56 | DMA_HW_FAULT = 0x2, | |
57 | DMA_CANCELLED = 0x4, | |
58 | }; | |
59 | ||
60 | struct hw_cmd { | |
61 | u8 command; | |
62 | u8 tag; | |
63 | u8 _rsvd; | |
64 | u8 sub_page; /* Bit[0:2]: 512byte offset */ | |
65 | /* Bit[4:6]: 512byte count */ | |
66 | __le32 device_addr; | |
67 | __le64 host_addr; | |
68 | } __packed; | |
69 | ||
70 | enum rsxx_hw_cmd { | |
71 | HW_CMD_BLK_DISCARD = 0x70, | |
72 | HW_CMD_BLK_WRITE = 0x80, | |
73 | HW_CMD_BLK_READ = 0xC0, | |
74 | HW_CMD_BLK_RECON_READ = 0xE0, | |
75 | }; | |
76 | ||
77 | enum rsxx_hw_status { | |
78 | HW_STATUS_CRC = 0x01, | |
79 | HW_STATUS_HARD_ERR = 0x02, | |
80 | HW_STATUS_SOFT_ERR = 0x04, | |
81 | HW_STATUS_FAULT = 0x08, | |
82 | }; | |
83 | ||
8722ff8c | 84 | static struct kmem_cache *rsxx_dma_pool; |
85 | ||
86 | struct dma_tracker { | |
87 | int next_tag; | |
88 | struct rsxx_dma *dma; | |
89 | }; | |
90 | ||
91 | #define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \ | |
92 | (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS)) | |
93 | ||
94 | struct dma_tracker_list { | |
95 | spinlock_t lock; | |
96 | int head; | |
97 | struct dma_tracker list[0]; | |
98 | }; | |
99 | ||
100 | ||
101 | /*----------------- Misc Utility Functions -------------------*/ | |
c206c709 | 102 | static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card) |
8722ff8c | 103 | { |
104 | unsigned long long tgt_addr8; | |
105 | ||
106 | tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) & | |
107 | card->_stripe.upper_mask) | | |
108 | ((addr8) & card->_stripe.lower_mask); | |
109 | do_div(tgt_addr8, RSXX_HW_BLK_SIZE); | |
110 | return tgt_addr8; | |
111 | } | |
112 | ||
c206c709 | 113 | static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8) |
8722ff8c | 114 | { |
115 | unsigned int tgt; | |
116 | ||
117 | tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask; | |
118 | ||
119 | return tgt; | |
120 | } | |
121 | ||
c95246c3 | 122 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) |
8722ff8c | 123 | { |
124 | /* Reset all DMA Command/Status Queues */ | |
125 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); | |
126 | } | |
127 | ||
128 | static unsigned int get_dma_size(struct rsxx_dma *dma) | |
129 | { | |
130 | if (dma->sub_page.cnt) | |
131 | return dma->sub_page.cnt << 9; | |
132 | else | |
133 | return RSXX_HW_BLK_SIZE; | |
134 | } | |
135 | ||
136 | ||
137 | /*----------------- DMA Tracker -------------------*/ | |
138 | static void set_tracker_dma(struct dma_tracker_list *trackers, | |
139 | int tag, | |
140 | struct rsxx_dma *dma) | |
141 | { | |
142 | trackers->list[tag].dma = dma; | |
143 | } | |
144 | ||
145 | static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers, | |
146 | int tag) | |
147 | { | |
148 | return trackers->list[tag].dma; | |
149 | } | |
150 | ||
151 | static int pop_tracker(struct dma_tracker_list *trackers) | |
152 | { | |
153 | int tag; | |
154 | ||
155 | spin_lock(&trackers->lock); | |
156 | tag = trackers->head; | |
157 | if (tag != -1) { | |
158 | trackers->head = trackers->list[tag].next_tag; | |
159 | trackers->list[tag].next_tag = -1; | |
160 | } | |
161 | spin_unlock(&trackers->lock); | |
162 | ||
163 | return tag; | |
164 | } | |
165 | ||
166 | static void push_tracker(struct dma_tracker_list *trackers, int tag) | |
167 | { | |
168 | spin_lock(&trackers->lock); | |
169 | trackers->list[tag].next_tag = trackers->head; | |
170 | trackers->head = tag; | |
171 | trackers->list[tag].dma = NULL; | |
172 | spin_unlock(&trackers->lock); | |
173 | } | |
174 | ||
175 | ||
176 | /*----------------- Interrupt Coalescing -------------*/ | |
177 | /* | |
178 | * Interrupt Coalescing Register Format: | |
179 | * Interrupt Timer (64ns units) [15:0] | |
180 | * Interrupt Count [24:16] | |
181 | * Reserved [31:25] | |
182 | */ | |
183 | #define INTR_COAL_LATENCY_MASK (0x0000ffff) | |
184 | ||
185 | #define INTR_COAL_COUNT_SHIFT 16 | |
186 | #define INTR_COAL_COUNT_BITS 9 | |
187 | #define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \ | |
188 | INTR_COAL_COUNT_SHIFT) | |
189 | #define INTR_COAL_LATENCY_UNITS_NS 64 | |
190 | ||
191 | ||
192 | static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency) | |
193 | { | |
194 | u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS; | |
195 | ||
196 | if (mode == RSXX_INTR_COAL_DISABLED) | |
197 | return 0; | |
198 | ||
199 | return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) | | |
200 | (latency_units & INTR_COAL_LATENCY_MASK); | |
201 | ||
202 | } | |
203 | ||
204 | static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |
205 | { | |
206 | int i; | |
207 | u32 q_depth = 0; | |
208 | u32 intr_coal; | |
209 | ||
c95246c3 PK |
210 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE || |
211 | unlikely(card->eeh_state)) | |
8722ff8c | 212 | return; |
213 | ||
214 | for (i = 0; i < card->n_targets; i++) | |
215 | q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth); | |
216 | ||
217 | intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, | |
218 | q_depth / 2, | |
219 | card->config.data.intr_coal.latency); | |
220 | iowrite32(intr_coal, card->regmap + INTR_COAL); | |
221 | } | |
222 | ||
223 | /*----------------- RSXX DMA Handling -------------------*/ | |
e5feab22 PK |
224 | static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma) |
225 | { | |
226 | if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { | |
227 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, | |
228 | get_dma_size(dma), | |
229 | dma->cmd == HW_CMD_BLK_WRITE ? | |
230 | PCI_DMA_TODEVICE : | |
231 | PCI_DMA_FROMDEVICE); | |
232 | } | |
233 | ||
234 | kmem_cache_free(rsxx_dma_pool, dma); | |
235 | } | |
236 | ||
c95246c3 | 237 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, |
8722ff8c | 238 | struct rsxx_dma *dma, |
239 | unsigned int status) | |
240 | { | |
241 | if (status & DMA_SW_ERR) | |
c95246c3 | 242 | ctrl->stats.dma_sw_err++; |
8722ff8c | 243 | if (status & DMA_HW_FAULT) |
c95246c3 | 244 | ctrl->stats.dma_hw_fault++; |
8722ff8c | 245 | if (status & DMA_CANCELLED) |
c95246c3 | 246 | ctrl->stats.dma_cancelled++; |
8722ff8c | 247 | |
8722ff8c | 248 | if (dma->cb) |
c95246c3 | 249 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); |
8722ff8c | 250 | |
e5feab22 | 251 | rsxx_free_dma(ctrl, dma); |
8722ff8c | 252 | } |
253 | ||
0ab4743e | 254 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, |
e5feab22 | 255 | struct list_head *q, unsigned int done) |
0ab4743e PK |
256 | { |
257 | struct rsxx_dma *dma; | |
258 | struct rsxx_dma *tmp; | |
259 | int cnt = 0; | |
260 | ||
261 | list_for_each_entry_safe(dma, tmp, q, list) { | |
262 | list_del(&dma->list); | |
e5feab22 PK |
263 | if (done & COMPLETE_DMA) |
264 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | |
265 | else | |
266 | rsxx_free_dma(ctrl, dma); | |
0ab4743e PK |
267 | cnt++; |
268 | } | |
269 | ||
270 | return cnt; | |
271 | } | |
272 | ||
8722ff8c | 273 | static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, |
274 | struct rsxx_dma *dma) | |
275 | { | |
276 | /* | |
277 | * Requeued DMAs go to the front of the queue so they are issued | |
278 | * first. | |
279 | */ | |
0ab4743e | 280 | spin_lock_bh(&ctrl->queue_lock); |
62302508 | 281 | ctrl->stats.sw_q_depth++; |
8722ff8c | 282 | list_add(&dma->list, &ctrl->queue); |
0ab4743e | 283 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 284 | } |
285 | ||
286 | static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | |
287 | struct rsxx_dma *dma, | |
288 | u8 hw_st) | |
289 | { | |
290 | unsigned int status = 0; | |
291 | int requeue_cmd = 0; | |
292 | ||
293 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
294 | "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n", | |
295 | dma->cmd, dma->laddr, hw_st); | |
296 | ||
297 | if (hw_st & HW_STATUS_CRC) | |
298 | ctrl->stats.crc_errors++; | |
299 | if (hw_st & HW_STATUS_HARD_ERR) | |
300 | ctrl->stats.hard_errors++; | |
301 | if (hw_st & HW_STATUS_SOFT_ERR) | |
302 | ctrl->stats.soft_errors++; | |
303 | ||
304 | switch (dma->cmd) { | |
305 | case HW_CMD_BLK_READ: | |
306 | if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) { | |
307 | if (ctrl->card->scrub_hard) { | |
308 | dma->cmd = HW_CMD_BLK_RECON_READ; | |
309 | requeue_cmd = 1; | |
310 | ctrl->stats.reads_retried++; | |
311 | } else { | |
312 | status |= DMA_HW_FAULT; | |
313 | ctrl->stats.reads_failed++; | |
314 | } | |
315 | } else if (hw_st & HW_STATUS_FAULT) { | |
316 | status |= DMA_HW_FAULT; | |
317 | ctrl->stats.reads_failed++; | |
318 | } | |
319 | ||
320 | break; | |
321 | case HW_CMD_BLK_RECON_READ: | |
322 | if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) { | |
323 | /* Data could not be reconstructed. */ | |
324 | status |= DMA_HW_FAULT; | |
325 | ctrl->stats.reads_failed++; | |
326 | } | |
327 | ||
328 | break; | |
329 | case HW_CMD_BLK_WRITE: | |
330 | status |= DMA_HW_FAULT; | |
331 | ctrl->stats.writes_failed++; | |
332 | ||
333 | break; | |
334 | case HW_CMD_BLK_DISCARD: | |
335 | status |= DMA_HW_FAULT; | |
336 | ctrl->stats.discards_failed++; | |
337 | ||
338 | break; | |
339 | default: | |
340 | dev_err(CARD_TO_DEV(ctrl->card), | |
341 | "Unknown command in DMA!(cmd: x%02x " | |
342 | "laddr x%08x st: x%02x\n", | |
343 | dma->cmd, dma->laddr, hw_st); | |
344 | status |= DMA_SW_ERR; | |
345 | ||
346 | break; | |
347 | } | |
348 | ||
349 | if (requeue_cmd) | |
350 | rsxx_requeue_dma(ctrl, dma); | |
351 | else | |
c95246c3 | 352 | rsxx_complete_dma(ctrl, dma, status); |
8722ff8c | 353 | } |
354 | ||
355 | static void dma_engine_stalled(unsigned long data) | |
356 | { | |
357 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; | |
0ab4743e | 358 | int cnt; |
8722ff8c | 359 | |
c95246c3 PK |
360 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || |
361 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 362 | return; |
363 | ||
364 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { | |
365 | /* | |
366 | * The dma engine was stalled because the SW_CMD_IDX write | |
367 | * was lost. Issue it again to recover. | |
368 | */ | |
369 | dev_warn(CARD_TO_DEV(ctrl->card), | |
370 | "SW_CMD_IDX write was lost, re-writing...\n"); | |
371 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | |
372 | mod_timer(&ctrl->activity_timer, | |
373 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
374 | } else { | |
375 | dev_warn(CARD_TO_DEV(ctrl->card), | |
376 | "DMA channel %d has stalled, faulting interface.\n", | |
377 | ctrl->id); | |
378 | ctrl->card->dma_fault = 1; | |
0ab4743e PK |
379 | |
380 | /* Clean up the DMA queue */ | |
381 | spin_lock(&ctrl->queue_lock); | |
e5feab22 | 382 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
0ab4743e PK |
383 | spin_unlock(&ctrl->queue_lock); |
384 | ||
385 | cnt += rsxx_dma_cancel(ctrl); | |
386 | ||
387 | if (cnt) | |
388 | dev_info(CARD_TO_DEV(ctrl->card), | |
389 | "Freed %d queued DMAs on channel %d\n", | |
390 | cnt, ctrl->id); | |
8722ff8c | 391 | } |
392 | } | |
393 | ||
31a70bb4 | 394 | static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) |
8722ff8c | 395 | { |
8722ff8c | 396 | struct rsxx_dma *dma; |
397 | int tag; | |
398 | int cmds_pending = 0; | |
399 | struct hw_cmd *hw_cmd_buf; | |
400 | ||
8722ff8c | 401 | hw_cmd_buf = ctrl->cmd.buf; |
402 | ||
c95246c3 PK |
403 | if (unlikely(ctrl->card->halt) || |
404 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 405 | return; |
406 | ||
407 | while (1) { | |
0ab4743e | 408 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 409 | if (list_empty(&ctrl->queue)) { |
0ab4743e | 410 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 411 | break; |
412 | } | |
0ab4743e | 413 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 414 | |
415 | tag = pop_tracker(ctrl->trackers); | |
416 | if (tag == -1) | |
417 | break; | |
418 | ||
0ab4743e | 419 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 420 | dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); |
421 | list_del(&dma->list); | |
422 | ctrl->stats.sw_q_depth--; | |
0ab4743e | 423 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 424 | |
425 | /* | |
426 | * This will catch any DMAs that slipped in right before the | |
427 | * fault, but was queued after all the other DMAs were | |
428 | * cancelled. | |
429 | */ | |
430 | if (unlikely(ctrl->card->dma_fault)) { | |
431 | push_tracker(ctrl->trackers, tag); | |
c95246c3 | 432 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); |
8722ff8c | 433 | continue; |
434 | } | |
435 | ||
436 | set_tracker_dma(ctrl->trackers, tag, dma); | |
437 | hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; | |
438 | hw_cmd_buf[ctrl->cmd.idx].tag = tag; | |
439 | hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0; | |
440 | hw_cmd_buf[ctrl->cmd.idx].sub_page = | |
441 | ((dma->sub_page.cnt & 0x7) << 4) | | |
442 | (dma->sub_page.off & 0x7); | |
443 | ||
444 | hw_cmd_buf[ctrl->cmd.idx].device_addr = | |
445 | cpu_to_le32(dma->laddr); | |
446 | ||
447 | hw_cmd_buf[ctrl->cmd.idx].host_addr = | |
448 | cpu_to_le64(dma->dma_addr); | |
449 | ||
450 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
451 | "Issue DMA%d(laddr %d tag %d) to idx %d\n", | |
452 | ctrl->id, dma->laddr, tag, ctrl->cmd.idx); | |
453 | ||
454 | ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK; | |
455 | cmds_pending++; | |
456 | ||
457 | if (dma->cmd == HW_CMD_BLK_WRITE) | |
458 | ctrl->stats.writes_issued++; | |
459 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | |
460 | ctrl->stats.discards_issued++; | |
461 | else | |
462 | ctrl->stats.reads_issued++; | |
463 | } | |
464 | ||
465 | /* Let HW know we've queued commands. */ | |
466 | if (cmds_pending) { | |
8722ff8c | 467 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); |
468 | mod_timer(&ctrl->activity_timer, | |
469 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
c95246c3 PK |
470 | |
471 | if (unlikely(ctrl->card->eeh_state)) { | |
472 | del_timer_sync(&ctrl->activity_timer); | |
473 | return; | |
474 | } | |
475 | ||
8722ff8c | 476 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); |
477 | } | |
478 | } | |
479 | ||
31a70bb4 | 480 | static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl) |
8722ff8c | 481 | { |
8722ff8c | 482 | struct rsxx_dma *dma; |
483 | unsigned long flags; | |
484 | u16 count; | |
485 | u8 status; | |
486 | u8 tag; | |
487 | struct hw_status *hw_st_buf; | |
488 | ||
8722ff8c | 489 | hw_st_buf = ctrl->status.buf; |
490 | ||
491 | if (unlikely(ctrl->card->halt) || | |
c95246c3 PK |
492 | unlikely(ctrl->card->dma_fault) || |
493 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 494 | return; |
495 | ||
496 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | |
497 | ||
498 | while (count == ctrl->e_cnt) { | |
499 | /* | |
500 | * The read memory-barrier is necessary to keep aggressive | |
501 | * processors/optimizers (such as the PPC Apple G5) from | |
502 | * reordering the following status-buffer tag & status read | |
503 | * *before* the count read on subsequent iterations of the | |
504 | * loop! | |
505 | */ | |
506 | rmb(); | |
507 | ||
508 | status = hw_st_buf[ctrl->status.idx].status; | |
509 | tag = hw_st_buf[ctrl->status.idx].tag; | |
510 | ||
511 | dma = get_tracker_dma(ctrl->trackers, tag); | |
512 | if (dma == NULL) { | |
513 | spin_lock_irqsave(&ctrl->card->irq_lock, flags); | |
514 | rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL); | |
515 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | |
516 | ||
517 | dev_err(CARD_TO_DEV(ctrl->card), | |
518 | "No tracker for tag %d " | |
519 | "(idx %d id %d)\n", | |
520 | tag, ctrl->status.idx, ctrl->id); | |
521 | return; | |
522 | } | |
523 | ||
524 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
525 | "Completing DMA%d" | |
526 | "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n", | |
527 | ctrl->id, dma->laddr, tag, status, count, | |
528 | ctrl->status.idx); | |
529 | ||
530 | atomic_dec(&ctrl->stats.hw_q_depth); | |
531 | ||
532 | mod_timer(&ctrl->activity_timer, | |
533 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
534 | ||
535 | if (status) | |
536 | rsxx_handle_dma_error(ctrl, dma, status); | |
537 | else | |
c95246c3 | 538 | rsxx_complete_dma(ctrl, dma, 0); |
8722ff8c | 539 | |
540 | push_tracker(ctrl->trackers, tag); | |
541 | ||
542 | ctrl->status.idx = (ctrl->status.idx + 1) & | |
543 | RSXX_CS_IDX_MASK; | |
544 | ctrl->e_cnt++; | |
545 | ||
546 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | |
547 | } | |
548 | ||
549 | dma_intr_coal_auto_tune(ctrl->card); | |
550 | ||
551 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0) | |
552 | del_timer_sync(&ctrl->activity_timer); | |
553 | ||
554 | spin_lock_irqsave(&ctrl->card->irq_lock, flags); | |
555 | rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); | |
556 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | |
557 | ||
0ab4743e | 558 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 559 | if (ctrl->stats.sw_q_depth) |
560 | queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); | |
0ab4743e | 561 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 562 | } |
563 | ||
31a70bb4 PK |
564 | static void rsxx_schedule_issue(struct work_struct *work) |
565 | { | |
566 | struct rsxx_dma_ctrl *ctrl; | |
567 | ||
568 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); | |
569 | ||
570 | mutex_lock(&ctrl->work_lock); | |
571 | rsxx_issue_dmas(ctrl); | |
572 | mutex_unlock(&ctrl->work_lock); | |
573 | } | |
574 | ||
575 | static void rsxx_schedule_done(struct work_struct *work) | |
576 | { | |
577 | struct rsxx_dma_ctrl *ctrl; | |
578 | ||
579 | ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); | |
580 | ||
581 | mutex_lock(&ctrl->work_lock); | |
582 | rsxx_dma_done(ctrl); | |
583 | mutex_unlock(&ctrl->work_lock); | |
584 | } | |
585 | ||
8722ff8c | 586 | static int rsxx_queue_discard(struct rsxx_cardinfo *card, |
587 | struct list_head *q, | |
588 | unsigned int laddr, | |
589 | rsxx_dma_cb cb, | |
590 | void *cb_data) | |
591 | { | |
592 | struct rsxx_dma *dma; | |
593 | ||
594 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | |
595 | if (!dma) | |
596 | return -ENOMEM; | |
597 | ||
598 | dma->cmd = HW_CMD_BLK_DISCARD; | |
599 | dma->laddr = laddr; | |
600 | dma->dma_addr = 0; | |
601 | dma->sub_page.off = 0; | |
602 | dma->sub_page.cnt = 0; | |
603 | dma->page = NULL; | |
604 | dma->pg_off = 0; | |
605 | dma->cb = cb; | |
606 | dma->cb_data = cb_data; | |
607 | ||
608 | dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr); | |
609 | ||
610 | list_add_tail(&dma->list, q); | |
611 | ||
612 | return 0; | |
613 | } | |
614 | ||
615 | static int rsxx_queue_dma(struct rsxx_cardinfo *card, | |
616 | struct list_head *q, | |
617 | int dir, | |
618 | unsigned int dma_off, | |
619 | unsigned int dma_len, | |
620 | unsigned int laddr, | |
621 | struct page *page, | |
622 | unsigned int pg_off, | |
623 | rsxx_dma_cb cb, | |
624 | void *cb_data) | |
625 | { | |
626 | struct rsxx_dma *dma; | |
627 | ||
628 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | |
629 | if (!dma) | |
630 | return -ENOMEM; | |
631 | ||
632 | dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len, | |
633 | dir ? PCI_DMA_TODEVICE : | |
634 | PCI_DMA_FROMDEVICE); | |
e5feab22 | 635 | if (pci_dma_mapping_error(card->dev, dma->dma_addr)) { |
8722ff8c | 636 | kmem_cache_free(rsxx_dma_pool, dma); |
637 | return -ENOMEM; | |
638 | } | |
639 | ||
640 | dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; | |
641 | dma->laddr = laddr; | |
642 | dma->sub_page.off = (dma_off >> 9); | |
643 | dma->sub_page.cnt = (dma_len >> 9); | |
644 | dma->page = page; | |
645 | dma->pg_off = pg_off; | |
646 | dma->cb = cb; | |
647 | dma->cb_data = cb_data; | |
648 | ||
649 | dev_dbg(CARD_TO_DEV(card), | |
650 | "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n", | |
651 | dir ? 'W' : 'R', dma->laddr, dma->sub_page.off, | |
652 | dma->sub_page.cnt, dma->page, dma->pg_off); | |
653 | ||
654 | /* Queue the DMA */ | |
655 | list_add_tail(&dma->list, q); | |
656 | ||
657 | return 0; | |
658 | } | |
659 | ||
660 | int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |
661 | struct bio *bio, | |
662 | atomic_t *n_dmas, | |
663 | rsxx_dma_cb cb, | |
664 | void *cb_data) | |
665 | { | |
666 | struct list_head dma_list[RSXX_MAX_TARGETS]; | |
667 | struct bio_vec *bvec; | |
668 | unsigned long long addr8; | |
669 | unsigned int laddr; | |
670 | unsigned int bv_len; | |
671 | unsigned int bv_off; | |
672 | unsigned int dma_off; | |
673 | unsigned int dma_len; | |
674 | int dma_cnt[RSXX_MAX_TARGETS]; | |
675 | int tgt; | |
676 | int st; | |
677 | int i; | |
678 | ||
679 | addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ | |
680 | atomic_set(n_dmas, 0); | |
681 | ||
682 | for (i = 0; i < card->n_targets; i++) { | |
683 | INIT_LIST_HEAD(&dma_list[i]); | |
684 | dma_cnt[i] = 0; | |
685 | } | |
686 | ||
687 | if (bio->bi_rw & REQ_DISCARD) { | |
688 | bv_len = bio->bi_size; | |
689 | ||
690 | while (bv_len > 0) { | |
691 | tgt = rsxx_get_dma_tgt(card, addr8); | |
692 | laddr = rsxx_addr8_to_laddr(addr8, card); | |
693 | ||
694 | st = rsxx_queue_discard(card, &dma_list[tgt], laddr, | |
695 | cb, cb_data); | |
696 | if (st) | |
697 | goto bvec_err; | |
698 | ||
699 | dma_cnt[tgt]++; | |
700 | atomic_inc(n_dmas); | |
701 | addr8 += RSXX_HW_BLK_SIZE; | |
702 | bv_len -= RSXX_HW_BLK_SIZE; | |
703 | } | |
704 | } else { | |
705 | bio_for_each_segment(bvec, bio, i) { | |
706 | bv_len = bvec->bv_len; | |
707 | bv_off = bvec->bv_offset; | |
708 | ||
709 | while (bv_len > 0) { | |
710 | tgt = rsxx_get_dma_tgt(card, addr8); | |
711 | laddr = rsxx_addr8_to_laddr(addr8, card); | |
712 | dma_off = addr8 & RSXX_HW_BLK_MASK; | |
713 | dma_len = min(bv_len, | |
714 | RSXX_HW_BLK_SIZE - dma_off); | |
715 | ||
716 | st = rsxx_queue_dma(card, &dma_list[tgt], | |
717 | bio_data_dir(bio), | |
718 | dma_off, dma_len, | |
719 | laddr, bvec->bv_page, | |
720 | bv_off, cb, cb_data); | |
721 | if (st) | |
722 | goto bvec_err; | |
723 | ||
724 | dma_cnt[tgt]++; | |
725 | atomic_inc(n_dmas); | |
726 | addr8 += dma_len; | |
727 | bv_off += dma_len; | |
728 | bv_len -= dma_len; | |
729 | } | |
730 | } | |
731 | } | |
732 | ||
733 | for (i = 0; i < card->n_targets; i++) { | |
734 | if (!list_empty(&dma_list[i])) { | |
0ab4743e | 735 | spin_lock_bh(&card->ctrl[i].queue_lock); |
8722ff8c | 736 | card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; |
737 | list_splice_tail(&dma_list[i], &card->ctrl[i].queue); | |
0ab4743e | 738 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
8722ff8c | 739 | |
740 | queue_work(card->ctrl[i].issue_wq, | |
741 | &card->ctrl[i].issue_dma_work); | |
742 | } | |
743 | } | |
744 | ||
745 | return 0; | |
746 | ||
747 | bvec_err: | |
e5feab22 PK |
748 | for (i = 0; i < card->n_targets; i++) |
749 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], | |
750 | FREE_DMA); | |
8722ff8c | 751 | |
752 | return st; | |
753 | } | |
754 | ||
755 | ||
756 | /*----------------- DMA Engine Initialization & Setup -------------------*/ | |
c95246c3 PK |
757 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) |
758 | { | |
759 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | |
760 | &ctrl->status.dma_addr); | |
761 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | |
762 | &ctrl->cmd.dma_addr); | |
763 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | |
764 | return -ENOMEM; | |
765 | ||
766 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | |
767 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | |
768 | ctrl->regmap + SB_ADD_LO); | |
769 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | |
770 | ctrl->regmap + SB_ADD_HI); | |
771 | ||
772 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | |
773 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | |
774 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | |
775 | ||
776 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | |
777 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | |
778 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | |
779 | ctrl->status.idx); | |
780 | return -EINVAL; | |
781 | } | |
782 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | |
783 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | |
784 | ||
785 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | |
786 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | |
787 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | |
788 | ctrl->status.idx); | |
789 | return -EINVAL; | |
790 | } | |
791 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | |
792 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | |
793 | ||
794 | return 0; | |
795 | } | |
796 | ||
8722ff8c | 797 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, |
798 | struct rsxx_dma_ctrl *ctrl) | |
799 | { | |
800 | int i; | |
c95246c3 | 801 | int st; |
8722ff8c | 802 | |
803 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); | |
804 | ||
8722ff8c | 805 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); |
806 | if (!ctrl->trackers) | |
807 | return -ENOMEM; | |
808 | ||
809 | ctrl->trackers->head = 0; | |
810 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | |
811 | ctrl->trackers->list[i].next_tag = i + 1; | |
812 | ctrl->trackers->list[i].dma = NULL; | |
813 | } | |
814 | ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1; | |
815 | spin_lock_init(&ctrl->trackers->lock); | |
816 | ||
817 | spin_lock_init(&ctrl->queue_lock); | |
31a70bb4 | 818 | mutex_init(&ctrl->work_lock); |
8722ff8c | 819 | INIT_LIST_HEAD(&ctrl->queue); |
820 | ||
821 | setup_timer(&ctrl->activity_timer, dma_engine_stalled, | |
c206c709 | 822 | (unsigned long)ctrl); |
8722ff8c | 823 | |
824 | ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0); | |
825 | if (!ctrl->issue_wq) | |
826 | return -ENOMEM; | |
827 | ||
828 | ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0); | |
829 | if (!ctrl->done_wq) | |
830 | return -ENOMEM; | |
831 | ||
31a70bb4 PK |
832 | INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue); |
833 | INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done); | |
8722ff8c | 834 | |
c95246c3 PK |
835 | st = rsxx_hw_buffers_init(dev, ctrl); |
836 | if (st) | |
837 | return st; | |
8722ff8c | 838 | |
8722ff8c | 839 | return 0; |
840 | } | |
841 | ||
c206c709 | 842 | static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card, |
8722ff8c | 843 | unsigned int stripe_size8) |
844 | { | |
845 | if (!is_power_of_2(stripe_size8)) { | |
846 | dev_err(CARD_TO_DEV(card), | |
847 | "stripe_size is NOT a power of 2!\n"); | |
848 | return -EINVAL; | |
849 | } | |
850 | ||
851 | card->_stripe.lower_mask = stripe_size8 - 1; | |
852 | ||
853 | card->_stripe.upper_mask = ~(card->_stripe.lower_mask); | |
854 | card->_stripe.upper_shift = ffs(card->n_targets) - 1; | |
855 | ||
856 | card->_stripe.target_mask = card->n_targets - 1; | |
857 | card->_stripe.target_shift = ffs(stripe_size8) - 1; | |
858 | ||
859 | dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n", | |
860 | card->_stripe.lower_mask); | |
861 | dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n", | |
862 | card->_stripe.upper_shift); | |
863 | dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n", | |
864 | card->_stripe.upper_mask); | |
865 | dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n", | |
866 | card->_stripe.target_mask); | |
867 | dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n", | |
868 | card->_stripe.target_shift); | |
869 | ||
870 | return 0; | |
871 | } | |
872 | ||
c95246c3 | 873 | int rsxx_dma_configure(struct rsxx_cardinfo *card) |
8722ff8c | 874 | { |
875 | u32 intr_coal; | |
876 | ||
877 | intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, | |
878 | card->config.data.intr_coal.count, | |
879 | card->config.data.intr_coal.latency); | |
880 | iowrite32(intr_coal, card->regmap + INTR_COAL); | |
881 | ||
882 | return rsxx_dma_stripe_setup(card, card->config.data.stripe_size); | |
883 | } | |
884 | ||
885 | int rsxx_dma_setup(struct rsxx_cardinfo *card) | |
886 | { | |
887 | unsigned long flags; | |
888 | int st; | |
889 | int i; | |
890 | ||
891 | dev_info(CARD_TO_DEV(card), | |
892 | "Initializing %d DMA targets\n", | |
893 | card->n_targets); | |
894 | ||
895 | /* Regmap is divided up into 4K chunks. One for each DMA channel */ | |
896 | for (i = 0; i < card->n_targets; i++) | |
897 | card->ctrl[i].regmap = card->regmap + (i * 4096); | |
898 | ||
899 | card->dma_fault = 0; | |
900 | ||
901 | /* Reset the DMA queues */ | |
902 | rsxx_dma_queue_reset(card); | |
903 | ||
904 | /************* Setup DMA Control *************/ | |
905 | for (i = 0; i < card->n_targets; i++) { | |
906 | st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]); | |
907 | if (st) | |
908 | goto failed_dma_setup; | |
909 | ||
910 | card->ctrl[i].card = card; | |
911 | card->ctrl[i].id = i; | |
912 | } | |
913 | ||
914 | card->scrub_hard = 1; | |
915 | ||
916 | if (card->config_valid) | |
917 | rsxx_dma_configure(card); | |
918 | ||
919 | /* Enable the interrupts after all setup has completed. */ | |
920 | for (i = 0; i < card->n_targets; i++) { | |
921 | spin_lock_irqsave(&card->irq_lock, flags); | |
922 | rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i)); | |
923 | spin_unlock_irqrestore(&card->irq_lock, flags); | |
924 | } | |
925 | ||
926 | return 0; | |
927 | ||
928 | failed_dma_setup: | |
929 | for (i = 0; i < card->n_targets; i++) { | |
930 | struct rsxx_dma_ctrl *ctrl = &card->ctrl[i]; | |
931 | ||
932 | if (ctrl->issue_wq) { | |
933 | destroy_workqueue(ctrl->issue_wq); | |
934 | ctrl->issue_wq = NULL; | |
935 | } | |
936 | ||
937 | if (ctrl->done_wq) { | |
938 | destroy_workqueue(ctrl->done_wq); | |
939 | ctrl->done_wq = NULL; | |
940 | } | |
941 | ||
942 | if (ctrl->trackers) | |
943 | vfree(ctrl->trackers); | |
944 | ||
945 | if (ctrl->status.buf) | |
946 | pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, | |
947 | ctrl->status.buf, | |
948 | ctrl->status.dma_addr); | |
949 | if (ctrl->cmd.buf) | |
950 | pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, | |
951 | ctrl->cmd.buf, ctrl->cmd.dma_addr); | |
952 | } | |
953 | ||
954 | return st; | |
955 | } | |
956 | ||
0ab4743e PK |
957 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl) |
958 | { | |
959 | struct rsxx_dma *dma; | |
960 | int i; | |
961 | int cnt = 0; | |
962 | ||
963 | /* Clean up issued DMAs */ | |
964 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | |
965 | dma = get_tracker_dma(ctrl->trackers, i); | |
966 | if (dma) { | |
967 | atomic_dec(&ctrl->stats.hw_q_depth); | |
968 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | |
969 | push_tracker(ctrl->trackers, i); | |
970 | cnt++; | |
971 | } | |
972 | } | |
973 | ||
974 | return cnt; | |
975 | } | |
8722ff8c | 976 | |
977 | void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |
978 | { | |
979 | struct rsxx_dma_ctrl *ctrl; | |
0ab4743e | 980 | int i; |
8722ff8c | 981 | |
982 | for (i = 0; i < card->n_targets; i++) { | |
983 | ctrl = &card->ctrl[i]; | |
984 | ||
985 | if (ctrl->issue_wq) { | |
986 | destroy_workqueue(ctrl->issue_wq); | |
987 | ctrl->issue_wq = NULL; | |
988 | } | |
989 | ||
990 | if (ctrl->done_wq) { | |
991 | destroy_workqueue(ctrl->done_wq); | |
992 | ctrl->done_wq = NULL; | |
993 | } | |
994 | ||
995 | if (timer_pending(&ctrl->activity_timer)) | |
996 | del_timer_sync(&ctrl->activity_timer); | |
997 | ||
998 | /* Clean up the DMA queue */ | |
0ab4743e | 999 | spin_lock_bh(&ctrl->queue_lock); |
e5feab22 | 1000 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
0ab4743e | 1001 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 1002 | |
0ab4743e | 1003 | rsxx_dma_cancel(ctrl); |
8722ff8c | 1004 | |
1005 | vfree(ctrl->trackers); | |
1006 | ||
1007 | pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8, | |
1008 | ctrl->status.buf, ctrl->status.dma_addr); | |
1009 | pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8, | |
1010 | ctrl->cmd.buf, ctrl->cmd.dma_addr); | |
1011 | } | |
1012 | } | |
1013 | ||
4dcaf472 | 1014 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) |
c95246c3 PK |
1015 | { |
1016 | int i; | |
1017 | int j; | |
1018 | int cnt; | |
1019 | struct rsxx_dma *dma; | |
d8d595df PK |
1020 | struct list_head *issued_dmas; |
1021 | ||
1022 | issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets, | |
1023 | GFP_KERNEL); | |
4dcaf472 PK |
1024 | if (!issued_dmas) |
1025 | return -ENOMEM; | |
c95246c3 PK |
1026 | |
1027 | for (i = 0; i < card->n_targets; i++) { | |
1028 | INIT_LIST_HEAD(&issued_dmas[i]); | |
1029 | cnt = 0; | |
1030 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | |
1031 | dma = get_tracker_dma(card->ctrl[i].trackers, j); | |
1032 | if (dma == NULL) | |
1033 | continue; | |
1034 | ||
1035 | if (dma->cmd == HW_CMD_BLK_WRITE) | |
1036 | card->ctrl[i].stats.writes_issued--; | |
1037 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | |
1038 | card->ctrl[i].stats.discards_issued--; | |
1039 | else | |
1040 | card->ctrl[i].stats.reads_issued--; | |
1041 | ||
1042 | list_add_tail(&dma->list, &issued_dmas[i]); | |
1043 | push_tracker(card->ctrl[i].trackers, j); | |
1044 | cnt++; | |
1045 | } | |
1046 | ||
0ab4743e | 1047 | spin_lock_bh(&card->ctrl[i].queue_lock); |
c95246c3 PK |
1048 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); |
1049 | ||
1050 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | |
1051 | card->ctrl[i].stats.sw_q_depth += cnt; | |
1052 | card->ctrl[i].e_cnt = 0; | |
1053 | ||
1054 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | |
e5feab22 | 1055 | if (!pci_dma_mapping_error(card->dev, dma->dma_addr)) |
c95246c3 PK |
1056 | pci_unmap_page(card->dev, dma->dma_addr, |
1057 | get_dma_size(dma), | |
1058 | dma->cmd == HW_CMD_BLK_WRITE ? | |
1059 | PCI_DMA_TODEVICE : | |
1060 | PCI_DMA_FROMDEVICE); | |
1061 | } | |
0ab4743e | 1062 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
c95246c3 | 1063 | } |
d8d595df PK |
1064 | |
1065 | kfree(issued_dmas); | |
4dcaf472 PK |
1066 | |
1067 | return 0; | |
c95246c3 PK |
1068 | } |
1069 | ||
c95246c3 PK |
1070 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) |
1071 | { | |
1072 | struct rsxx_dma *dma; | |
c95246c3 PK |
1073 | int i; |
1074 | ||
1075 | for (i = 0; i < card->n_targets; i++) { | |
0ab4743e | 1076 | spin_lock_bh(&card->ctrl[i].queue_lock); |
c95246c3 PK |
1077 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { |
1078 | dma->dma_addr = pci_map_page(card->dev, dma->page, | |
1079 | dma->pg_off, get_dma_size(dma), | |
1080 | dma->cmd == HW_CMD_BLK_WRITE ? | |
1081 | PCI_DMA_TODEVICE : | |
1082 | PCI_DMA_FROMDEVICE); | |
e5feab22 | 1083 | if (pci_dma_mapping_error(card->dev, dma->dma_addr)) { |
0ab4743e | 1084 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
c95246c3 PK |
1085 | kmem_cache_free(rsxx_dma_pool, dma); |
1086 | return -ENOMEM; | |
1087 | } | |
1088 | } | |
0ab4743e | 1089 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
c95246c3 PK |
1090 | } |
1091 | ||
1092 | return 0; | |
1093 | } | |
8722ff8c | 1094 | |
1095 | int rsxx_dma_init(void) | |
1096 | { | |
1097 | rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); | |
1098 | if (!rsxx_dma_pool) | |
1099 | return -ENOMEM; | |
1100 | ||
1101 | return 0; | |
1102 | } | |
1103 | ||
1104 | ||
1105 | void rsxx_dma_cleanup(void) | |
1106 | { | |
1107 | kmem_cache_destroy(rsxx_dma_pool); | |
1108 | } | |
1109 |