Commit | Line | Data |
---|---|---|
1a59d1b8 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
8722ff8c | 2 | /* |
3 | * Filename: dma.c | |
4 | * | |
8722ff8c | 5 | * Authors: Joshua Morris <josh.h.morris@us.ibm.com> |
6 | * Philip Kelleher <pjk1939@linux.vnet.ibm.com> | |
7 | * | |
8 | * (C) Copyright 2013 IBM Corporation | |
8722ff8c | 9 | */ |
10 | ||
e5e9fdaa | 11 | #include <linux/slab.h> |
8722ff8c | 12 | #include "rsxx_priv.h" |
13 | ||
14 | struct rsxx_dma { | |
15 | struct list_head list; | |
16 | u8 cmd; | |
9bb3c446 | 17 | unsigned int laddr; /* Logical address */ |
8722ff8c | 18 | struct { |
19 | u32 off; | |
20 | u32 cnt; | |
21 | } sub_page; | |
22 | dma_addr_t dma_addr; | |
23 | struct page *page; | |
24 | unsigned int pg_off; /* Page Offset */ | |
25 | rsxx_dma_cb cb; | |
26 | void *cb_data; | |
27 | }; | |
28 | ||
29 | /* This timeout is used to detect a stalled DMA channel */ | |
30 | #define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000) | |
31 | ||
32 | struct hw_status { | |
33 | u8 status; | |
34 | u8 tag; | |
35 | __le16 count; | |
36 | __le32 _rsvd2; | |
37 | __le64 _rsvd3; | |
38 | } __packed; | |
39 | ||
40 | enum rsxx_dma_status { | |
41 | DMA_SW_ERR = 0x1, | |
42 | DMA_HW_FAULT = 0x2, | |
43 | DMA_CANCELLED = 0x4, | |
44 | }; | |
45 | ||
46 | struct hw_cmd { | |
47 | u8 command; | |
48 | u8 tag; | |
49 | u8 _rsvd; | |
50 | u8 sub_page; /* Bit[0:2]: 512byte offset */ | |
51 | /* Bit[4:6]: 512byte count */ | |
52 | __le32 device_addr; | |
53 | __le64 host_addr; | |
54 | } __packed; | |
55 | ||
56 | enum rsxx_hw_cmd { | |
57 | HW_CMD_BLK_DISCARD = 0x70, | |
58 | HW_CMD_BLK_WRITE = 0x80, | |
59 | HW_CMD_BLK_READ = 0xC0, | |
60 | HW_CMD_BLK_RECON_READ = 0xE0, | |
61 | }; | |
62 | ||
63 | enum rsxx_hw_status { | |
64 | HW_STATUS_CRC = 0x01, | |
65 | HW_STATUS_HARD_ERR = 0x02, | |
66 | HW_STATUS_SOFT_ERR = 0x04, | |
67 | HW_STATUS_FAULT = 0x08, | |
68 | }; | |
69 | ||
8722ff8c | 70 | static struct kmem_cache *rsxx_dma_pool; |
71 | ||
72 | struct dma_tracker { | |
73 | int next_tag; | |
74 | struct rsxx_dma *dma; | |
75 | }; | |
76 | ||
77 | #define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \ | |
78 | (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS)) | |
79 | ||
80 | struct dma_tracker_list { | |
81 | spinlock_t lock; | |
82 | int head; | |
431d6e3e | 83 | struct dma_tracker list[]; |
8722ff8c | 84 | }; |
85 | ||
86 | ||
87 | /*----------------- Misc Utility Functions -------------------*/ | |
c206c709 | 88 | static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card) |
8722ff8c | 89 | { |
90 | unsigned long long tgt_addr8; | |
91 | ||
92 | tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) & | |
93 | card->_stripe.upper_mask) | | |
94 | ((addr8) & card->_stripe.lower_mask); | |
95 | do_div(tgt_addr8, RSXX_HW_BLK_SIZE); | |
96 | return tgt_addr8; | |
97 | } | |
98 | ||
c206c709 | 99 | static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8) |
8722ff8c | 100 | { |
101 | unsigned int tgt; | |
102 | ||
103 | tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask; | |
104 | ||
105 | return tgt; | |
106 | } | |
107 | ||
c95246c3 | 108 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) |
8722ff8c | 109 | { |
110 | /* Reset all DMA Command/Status Queues */ | |
111 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); | |
112 | } | |
113 | ||
114 | static unsigned int get_dma_size(struct rsxx_dma *dma) | |
115 | { | |
116 | if (dma->sub_page.cnt) | |
117 | return dma->sub_page.cnt << 9; | |
118 | else | |
119 | return RSXX_HW_BLK_SIZE; | |
120 | } | |
121 | ||
122 | ||
123 | /*----------------- DMA Tracker -------------------*/ | |
124 | static void set_tracker_dma(struct dma_tracker_list *trackers, | |
125 | int tag, | |
126 | struct rsxx_dma *dma) | |
127 | { | |
128 | trackers->list[tag].dma = dma; | |
129 | } | |
130 | ||
131 | static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers, | |
132 | int tag) | |
133 | { | |
134 | return trackers->list[tag].dma; | |
135 | } | |
136 | ||
137 | static int pop_tracker(struct dma_tracker_list *trackers) | |
138 | { | |
139 | int tag; | |
140 | ||
141 | spin_lock(&trackers->lock); | |
142 | tag = trackers->head; | |
143 | if (tag != -1) { | |
144 | trackers->head = trackers->list[tag].next_tag; | |
145 | trackers->list[tag].next_tag = -1; | |
146 | } | |
147 | spin_unlock(&trackers->lock); | |
148 | ||
149 | return tag; | |
150 | } | |
151 | ||
152 | static void push_tracker(struct dma_tracker_list *trackers, int tag) | |
153 | { | |
154 | spin_lock(&trackers->lock); | |
155 | trackers->list[tag].next_tag = trackers->head; | |
156 | trackers->head = tag; | |
157 | trackers->list[tag].dma = NULL; | |
158 | spin_unlock(&trackers->lock); | |
159 | } | |
160 | ||
161 | ||
162 | /*----------------- Interrupt Coalescing -------------*/ | |
163 | /* | |
164 | * Interrupt Coalescing Register Format: | |
165 | * Interrupt Timer (64ns units) [15:0] | |
166 | * Interrupt Count [24:16] | |
167 | * Reserved [31:25] | |
168 | */ | |
169 | #define INTR_COAL_LATENCY_MASK (0x0000ffff) | |
170 | ||
171 | #define INTR_COAL_COUNT_SHIFT 16 | |
172 | #define INTR_COAL_COUNT_BITS 9 | |
173 | #define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \ | |
174 | INTR_COAL_COUNT_SHIFT) | |
175 | #define INTR_COAL_LATENCY_UNITS_NS 64 | |
176 | ||
177 | ||
178 | static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency) | |
179 | { | |
180 | u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS; | |
181 | ||
182 | if (mode == RSXX_INTR_COAL_DISABLED) | |
183 | return 0; | |
184 | ||
185 | return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) | | |
186 | (latency_units & INTR_COAL_LATENCY_MASK); | |
187 | ||
188 | } | |
189 | ||
190 | static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |
191 | { | |
192 | int i; | |
193 | u32 q_depth = 0; | |
194 | u32 intr_coal; | |
195 | ||
c95246c3 PK |
196 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE || |
197 | unlikely(card->eeh_state)) | |
8722ff8c | 198 | return; |
199 | ||
200 | for (i = 0; i < card->n_targets; i++) | |
201 | q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth); | |
202 | ||
203 | intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, | |
204 | q_depth / 2, | |
205 | card->config.data.intr_coal.latency); | |
206 | iowrite32(intr_coal, card->regmap + INTR_COAL); | |
207 | } | |
208 | ||
209 | /*----------------- RSXX DMA Handling -------------------*/ | |
e5feab22 PK |
210 | static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma) |
211 | { | |
e35f38bf | 212 | if (dma->cmd != HW_CMD_BLK_DISCARD) { |
77a12e51 CH |
213 | if (!dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) { |
214 | dma_unmap_page(&ctrl->card->dev->dev, dma->dma_addr, | |
e35f38bf PK |
215 | get_dma_size(dma), |
216 | dma->cmd == HW_CMD_BLK_WRITE ? | |
77a12e51 CH |
217 | DMA_TO_DEVICE : |
218 | DMA_FROM_DEVICE); | |
e35f38bf | 219 | } |
e5feab22 PK |
220 | } |
221 | ||
222 | kmem_cache_free(rsxx_dma_pool, dma); | |
223 | } | |
224 | ||
c95246c3 | 225 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, |
8722ff8c | 226 | struct rsxx_dma *dma, |
227 | unsigned int status) | |
228 | { | |
229 | if (status & DMA_SW_ERR) | |
c95246c3 | 230 | ctrl->stats.dma_sw_err++; |
8722ff8c | 231 | if (status & DMA_HW_FAULT) |
c95246c3 | 232 | ctrl->stats.dma_hw_fault++; |
8722ff8c | 233 | if (status & DMA_CANCELLED) |
c95246c3 | 234 | ctrl->stats.dma_cancelled++; |
8722ff8c | 235 | |
8722ff8c | 236 | if (dma->cb) |
c95246c3 | 237 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); |
8722ff8c | 238 | |
e5feab22 | 239 | rsxx_free_dma(ctrl, dma); |
8722ff8c | 240 | } |
241 | ||
0ab4743e | 242 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, |
e5feab22 | 243 | struct list_head *q, unsigned int done) |
0ab4743e PK |
244 | { |
245 | struct rsxx_dma *dma; | |
246 | struct rsxx_dma *tmp; | |
247 | int cnt = 0; | |
248 | ||
249 | list_for_each_entry_safe(dma, tmp, q, list) { | |
250 | list_del(&dma->list); | |
e5feab22 PK |
251 | if (done & COMPLETE_DMA) |
252 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | |
253 | else | |
254 | rsxx_free_dma(ctrl, dma); | |
0ab4743e PK |
255 | cnt++; |
256 | } | |
257 | ||
258 | return cnt; | |
259 | } | |
260 | ||
8722ff8c | 261 | static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, |
262 | struct rsxx_dma *dma) | |
263 | { | |
264 | /* | |
265 | * Requeued DMAs go to the front of the queue so they are issued | |
266 | * first. | |
267 | */ | |
0ab4743e | 268 | spin_lock_bh(&ctrl->queue_lock); |
62302508 | 269 | ctrl->stats.sw_q_depth++; |
8722ff8c | 270 | list_add(&dma->list, &ctrl->queue); |
0ab4743e | 271 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 272 | } |
273 | ||
274 | static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | |
275 | struct rsxx_dma *dma, | |
276 | u8 hw_st) | |
277 | { | |
278 | unsigned int status = 0; | |
279 | int requeue_cmd = 0; | |
280 | ||
281 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
282 | "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n", | |
283 | dma->cmd, dma->laddr, hw_st); | |
284 | ||
285 | if (hw_st & HW_STATUS_CRC) | |
286 | ctrl->stats.crc_errors++; | |
287 | if (hw_st & HW_STATUS_HARD_ERR) | |
288 | ctrl->stats.hard_errors++; | |
289 | if (hw_st & HW_STATUS_SOFT_ERR) | |
290 | ctrl->stats.soft_errors++; | |
291 | ||
292 | switch (dma->cmd) { | |
293 | case HW_CMD_BLK_READ: | |
294 | if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) { | |
295 | if (ctrl->card->scrub_hard) { | |
296 | dma->cmd = HW_CMD_BLK_RECON_READ; | |
297 | requeue_cmd = 1; | |
298 | ctrl->stats.reads_retried++; | |
299 | } else { | |
300 | status |= DMA_HW_FAULT; | |
301 | ctrl->stats.reads_failed++; | |
302 | } | |
303 | } else if (hw_st & HW_STATUS_FAULT) { | |
304 | status |= DMA_HW_FAULT; | |
305 | ctrl->stats.reads_failed++; | |
306 | } | |
307 | ||
308 | break; | |
309 | case HW_CMD_BLK_RECON_READ: | |
310 | if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) { | |
311 | /* Data could not be reconstructed. */ | |
312 | status |= DMA_HW_FAULT; | |
313 | ctrl->stats.reads_failed++; | |
314 | } | |
315 | ||
316 | break; | |
317 | case HW_CMD_BLK_WRITE: | |
318 | status |= DMA_HW_FAULT; | |
319 | ctrl->stats.writes_failed++; | |
320 | ||
321 | break; | |
322 | case HW_CMD_BLK_DISCARD: | |
323 | status |= DMA_HW_FAULT; | |
324 | ctrl->stats.discards_failed++; | |
325 | ||
326 | break; | |
327 | default: | |
328 | dev_err(CARD_TO_DEV(ctrl->card), | |
329 | "Unknown command in DMA!(cmd: x%02x " | |
330 | "laddr x%08x st: x%02x\n", | |
331 | dma->cmd, dma->laddr, hw_st); | |
332 | status |= DMA_SW_ERR; | |
333 | ||
334 | break; | |
335 | } | |
336 | ||
337 | if (requeue_cmd) | |
338 | rsxx_requeue_dma(ctrl, dma); | |
339 | else | |
c95246c3 | 340 | rsxx_complete_dma(ctrl, dma, status); |
8722ff8c | 341 | } |
342 | ||
e99e88a9 | 343 | static void dma_engine_stalled(struct timer_list *t) |
8722ff8c | 344 | { |
e99e88a9 | 345 | struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer); |
0ab4743e | 346 | int cnt; |
8722ff8c | 347 | |
c95246c3 PK |
348 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || |
349 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 350 | return; |
351 | ||
352 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { | |
353 | /* | |
354 | * The dma engine was stalled because the SW_CMD_IDX write | |
355 | * was lost. Issue it again to recover. | |
356 | */ | |
357 | dev_warn(CARD_TO_DEV(ctrl->card), | |
358 | "SW_CMD_IDX write was lost, re-writing...\n"); | |
359 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | |
360 | mod_timer(&ctrl->activity_timer, | |
361 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
362 | } else { | |
363 | dev_warn(CARD_TO_DEV(ctrl->card), | |
364 | "DMA channel %d has stalled, faulting interface.\n", | |
365 | ctrl->id); | |
366 | ctrl->card->dma_fault = 1; | |
0ab4743e PK |
367 | |
368 | /* Clean up the DMA queue */ | |
369 | spin_lock(&ctrl->queue_lock); | |
e5feab22 | 370 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
0ab4743e PK |
371 | spin_unlock(&ctrl->queue_lock); |
372 | ||
373 | cnt += rsxx_dma_cancel(ctrl); | |
374 | ||
375 | if (cnt) | |
376 | dev_info(CARD_TO_DEV(ctrl->card), | |
377 | "Freed %d queued DMAs on channel %d\n", | |
378 | cnt, ctrl->id); | |
8722ff8c | 379 | } |
380 | } | |
381 | ||
31a70bb4 | 382 | static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) |
8722ff8c | 383 | { |
8722ff8c | 384 | struct rsxx_dma *dma; |
385 | int tag; | |
386 | int cmds_pending = 0; | |
387 | struct hw_cmd *hw_cmd_buf; | |
1b21f5b2 | 388 | int dir; |
8722ff8c | 389 | |
8722ff8c | 390 | hw_cmd_buf = ctrl->cmd.buf; |
391 | ||
c95246c3 PK |
392 | if (unlikely(ctrl->card->halt) || |
393 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 394 | return; |
395 | ||
396 | while (1) { | |
0ab4743e | 397 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 398 | if (list_empty(&ctrl->queue)) { |
0ab4743e | 399 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 400 | break; |
401 | } | |
0ab4743e | 402 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 403 | |
404 | tag = pop_tracker(ctrl->trackers); | |
405 | if (tag == -1) | |
406 | break; | |
407 | ||
0ab4743e | 408 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 409 | dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); |
410 | list_del(&dma->list); | |
411 | ctrl->stats.sw_q_depth--; | |
0ab4743e | 412 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 413 | |
414 | /* | |
415 | * This will catch any DMAs that slipped in right before the | |
416 | * fault, but was queued after all the other DMAs were | |
417 | * cancelled. | |
418 | */ | |
419 | if (unlikely(ctrl->card->dma_fault)) { | |
420 | push_tracker(ctrl->trackers, tag); | |
c95246c3 | 421 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); |
8722ff8c | 422 | continue; |
423 | } | |
424 | ||
0317cd6d PK |
425 | if (dma->cmd != HW_CMD_BLK_DISCARD) { |
426 | if (dma->cmd == HW_CMD_BLK_WRITE) | |
77a12e51 | 427 | dir = DMA_TO_DEVICE; |
0317cd6d | 428 | else |
77a12e51 | 429 | dir = DMA_FROM_DEVICE; |
0317cd6d PK |
430 | |
431 | /* | |
77a12e51 | 432 | * The function dma_map_page is placed here because we |
0317cd6d PK |
433 | * can only, by design, issue up to 255 commands to the |
434 | * hardware at one time per DMA channel. So the maximum | |
435 | * amount of mapped memory would be 255 * 4 channels * | |
436 | * 4096 Bytes which is less than 2GB, the limit of a x8 | |
77a12e51 | 437 | * Non-HWWD PCIe slot. This way the dma_map_page |
0317cd6d PK |
438 | * function should never fail because of a lack of |
439 | * mappable memory. | |
440 | */ | |
77a12e51 | 441 | dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page, |
0317cd6d | 442 | dma->pg_off, dma->sub_page.cnt << 9, dir); |
77a12e51 | 443 | if (dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) { |
0317cd6d PK |
444 | push_tracker(ctrl->trackers, tag); |
445 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | |
446 | continue; | |
447 | } | |
1b21f5b2 PK |
448 | } |
449 | ||
8722ff8c | 450 | set_tracker_dma(ctrl->trackers, tag, dma); |
451 | hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; | |
452 | hw_cmd_buf[ctrl->cmd.idx].tag = tag; | |
453 | hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0; | |
454 | hw_cmd_buf[ctrl->cmd.idx].sub_page = | |
455 | ((dma->sub_page.cnt & 0x7) << 4) | | |
456 | (dma->sub_page.off & 0x7); | |
457 | ||
458 | hw_cmd_buf[ctrl->cmd.idx].device_addr = | |
459 | cpu_to_le32(dma->laddr); | |
460 | ||
461 | hw_cmd_buf[ctrl->cmd.idx].host_addr = | |
462 | cpu_to_le64(dma->dma_addr); | |
463 | ||
464 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
465 | "Issue DMA%d(laddr %d tag %d) to idx %d\n", | |
466 | ctrl->id, dma->laddr, tag, ctrl->cmd.idx); | |
467 | ||
468 | ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK; | |
469 | cmds_pending++; | |
470 | ||
471 | if (dma->cmd == HW_CMD_BLK_WRITE) | |
472 | ctrl->stats.writes_issued++; | |
473 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | |
474 | ctrl->stats.discards_issued++; | |
475 | else | |
476 | ctrl->stats.reads_issued++; | |
477 | } | |
478 | ||
479 | /* Let HW know we've queued commands. */ | |
480 | if (cmds_pending) { | |
8722ff8c | 481 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); |
482 | mod_timer(&ctrl->activity_timer, | |
483 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
c95246c3 PK |
484 | |
485 | if (unlikely(ctrl->card->eeh_state)) { | |
486 | del_timer_sync(&ctrl->activity_timer); | |
487 | return; | |
488 | } | |
489 | ||
8722ff8c | 490 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); |
491 | } | |
492 | } | |
493 | ||
31a70bb4 | 494 | static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl) |
8722ff8c | 495 | { |
8722ff8c | 496 | struct rsxx_dma *dma; |
497 | unsigned long flags; | |
498 | u16 count; | |
499 | u8 status; | |
500 | u8 tag; | |
501 | struct hw_status *hw_st_buf; | |
502 | ||
8722ff8c | 503 | hw_st_buf = ctrl->status.buf; |
504 | ||
505 | if (unlikely(ctrl->card->halt) || | |
c95246c3 PK |
506 | unlikely(ctrl->card->dma_fault) || |
507 | unlikely(ctrl->card->eeh_state)) | |
8722ff8c | 508 | return; |
509 | ||
510 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | |
511 | ||
512 | while (count == ctrl->e_cnt) { | |
513 | /* | |
514 | * The read memory-barrier is necessary to keep aggressive | |
515 | * processors/optimizers (such as the PPC Apple G5) from | |
516 | * reordering the following status-buffer tag & status read | |
517 | * *before* the count read on subsequent iterations of the | |
518 | * loop! | |
519 | */ | |
520 | rmb(); | |
521 | ||
522 | status = hw_st_buf[ctrl->status.idx].status; | |
523 | tag = hw_st_buf[ctrl->status.idx].tag; | |
524 | ||
525 | dma = get_tracker_dma(ctrl->trackers, tag); | |
526 | if (dma == NULL) { | |
527 | spin_lock_irqsave(&ctrl->card->irq_lock, flags); | |
528 | rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL); | |
529 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | |
530 | ||
531 | dev_err(CARD_TO_DEV(ctrl->card), | |
532 | "No tracker for tag %d " | |
533 | "(idx %d id %d)\n", | |
534 | tag, ctrl->status.idx, ctrl->id); | |
535 | return; | |
536 | } | |
537 | ||
538 | dev_dbg(CARD_TO_DEV(ctrl->card), | |
539 | "Completing DMA%d" | |
540 | "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n", | |
541 | ctrl->id, dma->laddr, tag, status, count, | |
542 | ctrl->status.idx); | |
543 | ||
544 | atomic_dec(&ctrl->stats.hw_q_depth); | |
545 | ||
546 | mod_timer(&ctrl->activity_timer, | |
547 | jiffies + DMA_ACTIVITY_TIMEOUT); | |
548 | ||
549 | if (status) | |
550 | rsxx_handle_dma_error(ctrl, dma, status); | |
551 | else | |
c95246c3 | 552 | rsxx_complete_dma(ctrl, dma, 0); |
8722ff8c | 553 | |
554 | push_tracker(ctrl->trackers, tag); | |
555 | ||
556 | ctrl->status.idx = (ctrl->status.idx + 1) & | |
557 | RSXX_CS_IDX_MASK; | |
558 | ctrl->e_cnt++; | |
559 | ||
560 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | |
561 | } | |
562 | ||
563 | dma_intr_coal_auto_tune(ctrl->card); | |
564 | ||
565 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0) | |
566 | del_timer_sync(&ctrl->activity_timer); | |
567 | ||
568 | spin_lock_irqsave(&ctrl->card->irq_lock, flags); | |
569 | rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); | |
570 | spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); | |
571 | ||
0ab4743e | 572 | spin_lock_bh(&ctrl->queue_lock); |
8722ff8c | 573 | if (ctrl->stats.sw_q_depth) |
574 | queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); | |
0ab4743e | 575 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 576 | } |
577 | ||
31a70bb4 PK |
578 | static void rsxx_schedule_issue(struct work_struct *work) |
579 | { | |
580 | struct rsxx_dma_ctrl *ctrl; | |
581 | ||
582 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); | |
583 | ||
584 | mutex_lock(&ctrl->work_lock); | |
585 | rsxx_issue_dmas(ctrl); | |
586 | mutex_unlock(&ctrl->work_lock); | |
587 | } | |
588 | ||
589 | static void rsxx_schedule_done(struct work_struct *work) | |
590 | { | |
591 | struct rsxx_dma_ctrl *ctrl; | |
592 | ||
593 | ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); | |
594 | ||
595 | mutex_lock(&ctrl->work_lock); | |
596 | rsxx_dma_done(ctrl); | |
597 | mutex_unlock(&ctrl->work_lock); | |
598 | } | |
599 | ||
4e4cbee9 | 600 | static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card, |
8722ff8c | 601 | struct list_head *q, |
602 | unsigned int laddr, | |
603 | rsxx_dma_cb cb, | |
604 | void *cb_data) | |
605 | { | |
606 | struct rsxx_dma *dma; | |
607 | ||
608 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | |
609 | if (!dma) | |
4e4cbee9 | 610 | return BLK_STS_RESOURCE; |
8722ff8c | 611 | |
612 | dma->cmd = HW_CMD_BLK_DISCARD; | |
613 | dma->laddr = laddr; | |
614 | dma->dma_addr = 0; | |
615 | dma->sub_page.off = 0; | |
616 | dma->sub_page.cnt = 0; | |
617 | dma->page = NULL; | |
618 | dma->pg_off = 0; | |
619 | dma->cb = cb; | |
620 | dma->cb_data = cb_data; | |
621 | ||
622 | dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr); | |
623 | ||
624 | list_add_tail(&dma->list, q); | |
625 | ||
626 | return 0; | |
627 | } | |
628 | ||
4e4cbee9 | 629 | static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card, |
8722ff8c | 630 | struct list_head *q, |
631 | int dir, | |
632 | unsigned int dma_off, | |
633 | unsigned int dma_len, | |
634 | unsigned int laddr, | |
635 | struct page *page, | |
636 | unsigned int pg_off, | |
637 | rsxx_dma_cb cb, | |
638 | void *cb_data) | |
639 | { | |
640 | struct rsxx_dma *dma; | |
641 | ||
642 | dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL); | |
643 | if (!dma) | |
4e4cbee9 | 644 | return BLK_STS_RESOURCE; |
8722ff8c | 645 | |
8722ff8c | 646 | dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; |
647 | dma->laddr = laddr; | |
648 | dma->sub_page.off = (dma_off >> 9); | |
649 | dma->sub_page.cnt = (dma_len >> 9); | |
650 | dma->page = page; | |
651 | dma->pg_off = pg_off; | |
652 | dma->cb = cb; | |
653 | dma->cb_data = cb_data; | |
654 | ||
655 | dev_dbg(CARD_TO_DEV(card), | |
656 | "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n", | |
657 | dir ? 'W' : 'R', dma->laddr, dma->sub_page.off, | |
658 | dma->sub_page.cnt, dma->page, dma->pg_off); | |
659 | ||
660 | /* Queue the DMA */ | |
661 | list_add_tail(&dma->list, q); | |
662 | ||
663 | return 0; | |
664 | } | |
665 | ||
4e4cbee9 | 666 | blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card, |
8722ff8c | 667 | struct bio *bio, |
668 | atomic_t *n_dmas, | |
669 | rsxx_dma_cb cb, | |
670 | void *cb_data) | |
671 | { | |
672 | struct list_head dma_list[RSXX_MAX_TARGETS]; | |
7988613b KO |
673 | struct bio_vec bvec; |
674 | struct bvec_iter iter; | |
8722ff8c | 675 | unsigned long long addr8; |
676 | unsigned int laddr; | |
677 | unsigned int bv_len; | |
678 | unsigned int bv_off; | |
679 | unsigned int dma_off; | |
680 | unsigned int dma_len; | |
681 | int dma_cnt[RSXX_MAX_TARGETS]; | |
682 | int tgt; | |
4e4cbee9 | 683 | blk_status_t st; |
8722ff8c | 684 | int i; |
685 | ||
4f024f37 | 686 | addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ |
8722ff8c | 687 | atomic_set(n_dmas, 0); |
688 | ||
689 | for (i = 0; i < card->n_targets; i++) { | |
690 | INIT_LIST_HEAD(&dma_list[i]); | |
691 | dma_cnt[i] = 0; | |
692 | } | |
693 | ||
95fe6c1a | 694 | if (bio_op(bio) == REQ_OP_DISCARD) { |
4f024f37 | 695 | bv_len = bio->bi_iter.bi_size; |
8722ff8c | 696 | |
697 | while (bv_len > 0) { | |
698 | tgt = rsxx_get_dma_tgt(card, addr8); | |
699 | laddr = rsxx_addr8_to_laddr(addr8, card); | |
700 | ||
701 | st = rsxx_queue_discard(card, &dma_list[tgt], laddr, | |
702 | cb, cb_data); | |
703 | if (st) | |
704 | goto bvec_err; | |
705 | ||
706 | dma_cnt[tgt]++; | |
707 | atomic_inc(n_dmas); | |
708 | addr8 += RSXX_HW_BLK_SIZE; | |
709 | bv_len -= RSXX_HW_BLK_SIZE; | |
710 | } | |
711 | } else { | |
7988613b KO |
712 | bio_for_each_segment(bvec, bio, iter) { |
713 | bv_len = bvec.bv_len; | |
714 | bv_off = bvec.bv_offset; | |
8722ff8c | 715 | |
716 | while (bv_len > 0) { | |
717 | tgt = rsxx_get_dma_tgt(card, addr8); | |
718 | laddr = rsxx_addr8_to_laddr(addr8, card); | |
719 | dma_off = addr8 & RSXX_HW_BLK_MASK; | |
720 | dma_len = min(bv_len, | |
721 | RSXX_HW_BLK_SIZE - dma_off); | |
722 | ||
723 | st = rsxx_queue_dma(card, &dma_list[tgt], | |
724 | bio_data_dir(bio), | |
725 | dma_off, dma_len, | |
7988613b | 726 | laddr, bvec.bv_page, |
8722ff8c | 727 | bv_off, cb, cb_data); |
728 | if (st) | |
729 | goto bvec_err; | |
730 | ||
731 | dma_cnt[tgt]++; | |
732 | atomic_inc(n_dmas); | |
733 | addr8 += dma_len; | |
734 | bv_off += dma_len; | |
735 | bv_len -= dma_len; | |
736 | } | |
737 | } | |
738 | } | |
739 | ||
740 | for (i = 0; i < card->n_targets; i++) { | |
741 | if (!list_empty(&dma_list[i])) { | |
0ab4743e | 742 | spin_lock_bh(&card->ctrl[i].queue_lock); |
8722ff8c | 743 | card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; |
744 | list_splice_tail(&dma_list[i], &card->ctrl[i].queue); | |
0ab4743e | 745 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
8722ff8c | 746 | |
747 | queue_work(card->ctrl[i].issue_wq, | |
748 | &card->ctrl[i].issue_dma_work); | |
749 | } | |
750 | } | |
751 | ||
752 | return 0; | |
753 | ||
754 | bvec_err: | |
e5feab22 PK |
755 | for (i = 0; i < card->n_targets; i++) |
756 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], | |
757 | FREE_DMA); | |
8722ff8c | 758 | return st; |
759 | } | |
760 | ||
761 | ||
762 | /*----------------- DMA Engine Initialization & Setup -------------------*/ | |
c95246c3 PK |
763 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) |
764 | { | |
77a12e51 CH |
765 | ctrl->status.buf = dma_alloc_coherent(&dev->dev, STATUS_BUFFER_SIZE8, |
766 | &ctrl->status.dma_addr, GFP_KERNEL); | |
767 | ctrl->cmd.buf = dma_alloc_coherent(&dev->dev, COMMAND_BUFFER_SIZE8, | |
768 | &ctrl->cmd.dma_addr, GFP_KERNEL); | |
c95246c3 PK |
769 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) |
770 | return -ENOMEM; | |
771 | ||
772 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | |
773 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | |
774 | ctrl->regmap + SB_ADD_LO); | |
775 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | |
776 | ctrl->regmap + SB_ADD_HI); | |
777 | ||
778 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | |
779 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | |
780 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | |
781 | ||
782 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | |
783 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | |
784 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | |
785 | ctrl->status.idx); | |
786 | return -EINVAL; | |
787 | } | |
788 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | |
789 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | |
790 | ||
791 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | |
792 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | |
793 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | |
794 | ctrl->status.idx); | |
795 | return -EINVAL; | |
796 | } | |
797 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | |
798 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | |
799 | ||
800 | return 0; | |
801 | } | |
802 | ||
8722ff8c | 803 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, |
804 | struct rsxx_dma_ctrl *ctrl) | |
805 | { | |
806 | int i; | |
c95246c3 | 807 | int st; |
8722ff8c | 808 | |
809 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); | |
810 | ||
8722ff8c | 811 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); |
812 | if (!ctrl->trackers) | |
813 | return -ENOMEM; | |
814 | ||
815 | ctrl->trackers->head = 0; | |
816 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | |
817 | ctrl->trackers->list[i].next_tag = i + 1; | |
818 | ctrl->trackers->list[i].dma = NULL; | |
819 | } | |
820 | ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1; | |
821 | spin_lock_init(&ctrl->trackers->lock); | |
822 | ||
823 | spin_lock_init(&ctrl->queue_lock); | |
31a70bb4 | 824 | mutex_init(&ctrl->work_lock); |
8722ff8c | 825 | INIT_LIST_HEAD(&ctrl->queue); |
826 | ||
e99e88a9 | 827 | timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0); |
8722ff8c | 828 | |
829 | ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0); | |
830 | if (!ctrl->issue_wq) | |
831 | return -ENOMEM; | |
832 | ||
833 | ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0); | |
834 | if (!ctrl->done_wq) | |
835 | return -ENOMEM; | |
836 | ||
31a70bb4 PK |
837 | INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue); |
838 | INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done); | |
8722ff8c | 839 | |
c95246c3 PK |
840 | st = rsxx_hw_buffers_init(dev, ctrl); |
841 | if (st) | |
842 | return st; | |
8722ff8c | 843 | |
8722ff8c | 844 | return 0; |
845 | } | |
846 | ||
c206c709 | 847 | static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card, |
8722ff8c | 848 | unsigned int stripe_size8) |
849 | { | |
850 | if (!is_power_of_2(stripe_size8)) { | |
851 | dev_err(CARD_TO_DEV(card), | |
852 | "stripe_size is NOT a power of 2!\n"); | |
853 | return -EINVAL; | |
854 | } | |
855 | ||
856 | card->_stripe.lower_mask = stripe_size8 - 1; | |
857 | ||
858 | card->_stripe.upper_mask = ~(card->_stripe.lower_mask); | |
859 | card->_stripe.upper_shift = ffs(card->n_targets) - 1; | |
860 | ||
861 | card->_stripe.target_mask = card->n_targets - 1; | |
862 | card->_stripe.target_shift = ffs(stripe_size8) - 1; | |
863 | ||
864 | dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n", | |
865 | card->_stripe.lower_mask); | |
866 | dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n", | |
867 | card->_stripe.upper_shift); | |
868 | dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n", | |
869 | card->_stripe.upper_mask); | |
870 | dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n", | |
871 | card->_stripe.target_mask); | |
872 | dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n", | |
873 | card->_stripe.target_shift); | |
874 | ||
875 | return 0; | |
876 | } | |
877 | ||
c95246c3 | 878 | int rsxx_dma_configure(struct rsxx_cardinfo *card) |
8722ff8c | 879 | { |
880 | u32 intr_coal; | |
881 | ||
882 | intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode, | |
883 | card->config.data.intr_coal.count, | |
884 | card->config.data.intr_coal.latency); | |
885 | iowrite32(intr_coal, card->regmap + INTR_COAL); | |
886 | ||
887 | return rsxx_dma_stripe_setup(card, card->config.data.stripe_size); | |
888 | } | |
889 | ||
890 | int rsxx_dma_setup(struct rsxx_cardinfo *card) | |
891 | { | |
892 | unsigned long flags; | |
893 | int st; | |
894 | int i; | |
895 | ||
896 | dev_info(CARD_TO_DEV(card), | |
897 | "Initializing %d DMA targets\n", | |
898 | card->n_targets); | |
899 | ||
900 | /* Regmap is divided up into 4K chunks. One for each DMA channel */ | |
901 | for (i = 0; i < card->n_targets; i++) | |
902 | card->ctrl[i].regmap = card->regmap + (i * 4096); | |
903 | ||
904 | card->dma_fault = 0; | |
905 | ||
906 | /* Reset the DMA queues */ | |
907 | rsxx_dma_queue_reset(card); | |
908 | ||
909 | /************* Setup DMA Control *************/ | |
910 | for (i = 0; i < card->n_targets; i++) { | |
911 | st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]); | |
912 | if (st) | |
913 | goto failed_dma_setup; | |
914 | ||
915 | card->ctrl[i].card = card; | |
916 | card->ctrl[i].id = i; | |
917 | } | |
918 | ||
919 | card->scrub_hard = 1; | |
920 | ||
921 | if (card->config_valid) | |
922 | rsxx_dma_configure(card); | |
923 | ||
924 | /* Enable the interrupts after all setup has completed. */ | |
925 | for (i = 0; i < card->n_targets; i++) { | |
926 | spin_lock_irqsave(&card->irq_lock, flags); | |
927 | rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i)); | |
928 | spin_unlock_irqrestore(&card->irq_lock, flags); | |
929 | } | |
930 | ||
931 | return 0; | |
932 | ||
933 | failed_dma_setup: | |
934 | for (i = 0; i < card->n_targets; i++) { | |
935 | struct rsxx_dma_ctrl *ctrl = &card->ctrl[i]; | |
936 | ||
937 | if (ctrl->issue_wq) { | |
938 | destroy_workqueue(ctrl->issue_wq); | |
939 | ctrl->issue_wq = NULL; | |
940 | } | |
941 | ||
942 | if (ctrl->done_wq) { | |
943 | destroy_workqueue(ctrl->done_wq); | |
944 | ctrl->done_wq = NULL; | |
945 | } | |
946 | ||
947 | if (ctrl->trackers) | |
948 | vfree(ctrl->trackers); | |
949 | ||
950 | if (ctrl->status.buf) | |
77a12e51 CH |
951 | dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8, |
952 | ctrl->status.buf, | |
953 | ctrl->status.dma_addr); | |
8722ff8c | 954 | if (ctrl->cmd.buf) |
77a12e51 CH |
955 | dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8, |
956 | ctrl->cmd.buf, ctrl->cmd.dma_addr); | |
8722ff8c | 957 | } |
958 | ||
959 | return st; | |
960 | } | |
961 | ||
0ab4743e PK |
962 | int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl) |
963 | { | |
964 | struct rsxx_dma *dma; | |
965 | int i; | |
966 | int cnt = 0; | |
967 | ||
968 | /* Clean up issued DMAs */ | |
969 | for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { | |
970 | dma = get_tracker_dma(ctrl->trackers, i); | |
971 | if (dma) { | |
972 | atomic_dec(&ctrl->stats.hw_q_depth); | |
973 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | |
974 | push_tracker(ctrl->trackers, i); | |
975 | cnt++; | |
976 | } | |
977 | } | |
978 | ||
979 | return cnt; | |
980 | } | |
8722ff8c | 981 | |
982 | void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |
983 | { | |
984 | struct rsxx_dma_ctrl *ctrl; | |
0ab4743e | 985 | int i; |
8722ff8c | 986 | |
987 | for (i = 0; i < card->n_targets; i++) { | |
988 | ctrl = &card->ctrl[i]; | |
989 | ||
990 | if (ctrl->issue_wq) { | |
991 | destroy_workqueue(ctrl->issue_wq); | |
992 | ctrl->issue_wq = NULL; | |
993 | } | |
994 | ||
995 | if (ctrl->done_wq) { | |
996 | destroy_workqueue(ctrl->done_wq); | |
997 | ctrl->done_wq = NULL; | |
998 | } | |
999 | ||
1000 | if (timer_pending(&ctrl->activity_timer)) | |
1001 | del_timer_sync(&ctrl->activity_timer); | |
1002 | ||
1003 | /* Clean up the DMA queue */ | |
0ab4743e | 1004 | spin_lock_bh(&ctrl->queue_lock); |
e5feab22 | 1005 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
0ab4743e | 1006 | spin_unlock_bh(&ctrl->queue_lock); |
8722ff8c | 1007 | |
0ab4743e | 1008 | rsxx_dma_cancel(ctrl); |
8722ff8c | 1009 | |
1010 | vfree(ctrl->trackers); | |
1011 | ||
77a12e51 CH |
1012 | dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8, |
1013 | ctrl->status.buf, ctrl->status.dma_addr); | |
1014 | dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8, | |
1015 | ctrl->cmd.buf, ctrl->cmd.dma_addr); | |
8722ff8c | 1016 | } |
1017 | } | |
1018 | ||
4dcaf472 | 1019 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) |
c95246c3 PK |
1020 | { |
1021 | int i; | |
1022 | int j; | |
1023 | int cnt; | |
1024 | struct rsxx_dma *dma; | |
d8d595df PK |
1025 | struct list_head *issued_dmas; |
1026 | ||
6396bb22 | 1027 | issued_dmas = kcalloc(card->n_targets, sizeof(*issued_dmas), |
d8d595df | 1028 | GFP_KERNEL); |
4dcaf472 PK |
1029 | if (!issued_dmas) |
1030 | return -ENOMEM; | |
c95246c3 PK |
1031 | |
1032 | for (i = 0; i < card->n_targets; i++) { | |
1033 | INIT_LIST_HEAD(&issued_dmas[i]); | |
1034 | cnt = 0; | |
1035 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | |
1036 | dma = get_tracker_dma(card->ctrl[i].trackers, j); | |
1037 | if (dma == NULL) | |
1038 | continue; | |
1039 | ||
1040 | if (dma->cmd == HW_CMD_BLK_WRITE) | |
1041 | card->ctrl[i].stats.writes_issued--; | |
1042 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | |
1043 | card->ctrl[i].stats.discards_issued--; | |
1044 | else | |
1045 | card->ctrl[i].stats.reads_issued--; | |
1046 | ||
e35f38bf | 1047 | if (dma->cmd != HW_CMD_BLK_DISCARD) { |
77a12e51 | 1048 | dma_unmap_page(&card->dev->dev, dma->dma_addr, |
e35f38bf PK |
1049 | get_dma_size(dma), |
1050 | dma->cmd == HW_CMD_BLK_WRITE ? | |
77a12e51 CH |
1051 | DMA_TO_DEVICE : |
1052 | DMA_FROM_DEVICE); | |
e35f38bf PK |
1053 | } |
1054 | ||
c95246c3 PK |
1055 | list_add_tail(&dma->list, &issued_dmas[i]); |
1056 | push_tracker(card->ctrl[i].trackers, j); | |
1057 | cnt++; | |
1058 | } | |
1059 | ||
0ab4743e | 1060 | spin_lock_bh(&card->ctrl[i].queue_lock); |
c95246c3 PK |
1061 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); |
1062 | ||
1063 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | |
1064 | card->ctrl[i].stats.sw_q_depth += cnt; | |
1065 | card->ctrl[i].e_cnt = 0; | |
0ab4743e | 1066 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
c95246c3 | 1067 | } |
d8d595df PK |
1068 | |
1069 | kfree(issued_dmas); | |
4dcaf472 PK |
1070 | |
1071 | return 0; | |
c95246c3 PK |
1072 | } |
1073 | ||
8722ff8c | 1074 | int rsxx_dma_init(void) |
1075 | { | |
1076 | rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); | |
1077 | if (!rsxx_dma_pool) | |
1078 | return -ENOMEM; | |
1079 | ||
1080 | return 0; | |
1081 | } | |
1082 | ||
1083 | ||
1084 | void rsxx_dma_cleanup(void) | |
1085 | { | |
1086 | kmem_cache_destroy(rsxx_dma_pool); | |
1087 | } | |
1088 |