Commit | Line | Data |
---|---|---|
77241056 | 1 | /* |
f9458bc2 | 2 | * Copyright(c) 2015 - 2018 Intel Corporation. |
77241056 MM |
3 | * |
4 | * This file is provided under a dual BSD/GPLv2 license. When using or | |
5 | * redistributing this file, you may do so under either license. | |
6 | * | |
7 | * GPL LICENSE SUMMARY | |
8 | * | |
77241056 MM |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of version 2 of the GNU General Public License as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * General Public License for more details. | |
17 | * | |
18 | * BSD LICENSE | |
19 | * | |
77241056 MM |
20 | * Redistribution and use in source and binary forms, with or without |
21 | * modification, are permitted provided that the following conditions | |
22 | * are met: | |
23 | * | |
24 | * - Redistributions of source code must retain the above copyright | |
25 | * notice, this list of conditions and the following disclaimer. | |
26 | * - Redistributions in binary form must reproduce the above copyright | |
27 | * notice, this list of conditions and the following disclaimer in | |
28 | * the documentation and/or other materials provided with the | |
29 | * distribution. | |
30 | * - Neither the name of Intel Corporation nor the names of its | |
31 | * contributors may be used to endorse or promote products derived | |
32 | * from this software without specific prior written permission. | |
33 | * | |
34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
45 | * | |
46 | */ | |
47 | ||
48 | #include <linux/spinlock.h> | |
49 | #include <linux/seqlock.h> | |
50 | #include <linux/netdevice.h> | |
51 | #include <linux/moduleparam.h> | |
52 | #include <linux/bitops.h> | |
53 | #include <linux/timer.h> | |
54 | #include <linux/vmalloc.h> | |
f4d26d81 | 55 | #include <linux/highmem.h> |
77241056 MM |
56 | |
57 | #include "hfi.h" | |
58 | #include "common.h" | |
59 | #include "qp.h" | |
60 | #include "sdma.h" | |
61 | #include "iowait.h" | |
62 | #include "trace.h" | |
63 | ||
64 | /* must be a power of 2 >= 64 <= 32768 */ | |
028d7254 | 65 | #define SDMA_DESCQ_CNT 2048 |
ee947859 | 66 | #define SDMA_DESC_INTR 64 |
77241056 | 67 | #define INVALID_TAIL 0xffff |
22bb1365 | 68 | #define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32)) |
77241056 MM |
69 | |
70 | static uint sdma_descq_cnt = SDMA_DESCQ_CNT; | |
71 | module_param(sdma_descq_cnt, uint, S_IRUGO); | |
72 | MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries"); | |
73 | ||
74 | static uint sdma_idle_cnt = 250; | |
75 | module_param(sdma_idle_cnt, uint, S_IRUGO); | |
76 | MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)"); | |
77 | ||
78 | uint mod_num_sdma; | |
79 | module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO); | |
80 | MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use"); | |
81 | ||
ee947859 MH |
82 | static uint sdma_desct_intr = SDMA_DESC_INTR; |
83 | module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR); | |
84 | MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt"); | |
85 | ||
77241056 MM |
86 | #define SDMA_WAIT_BATCH_SIZE 20 |
87 | /* max wait time for a SDMA engine to indicate it has halted */ | |
88 | #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */ | |
89 | /* all SDMA engine errors that cause a halt */ | |
90 | ||
91 | #define SD(name) SEND_DMA_##name | |
92 | #define ALL_SDMA_ENG_HALT_ERRS \ | |
93 | (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \ | |
94 | | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \ | |
95 | | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \ | |
96 | | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \ | |
97 | | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \ | |
98 | | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \ | |
99 | | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \ | |
100 | | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \ | |
101 | | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \ | |
102 | | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \ | |
103 | | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \ | |
104 | | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \ | |
105 | | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \ | |
106 | | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \ | |
107 | | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \ | |
108 | | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \ | |
109 | | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \ | |
110 | | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK)) | |
111 | ||
112 | /* sdma_sendctrl operations */ | |
349ac71f | 113 | #define SDMA_SENDCTRL_OP_ENABLE BIT(0) |
114 | #define SDMA_SENDCTRL_OP_INTENABLE BIT(1) | |
115 | #define SDMA_SENDCTRL_OP_HALT BIT(2) | |
116 | #define SDMA_SENDCTRL_OP_CLEANUP BIT(3) | |
77241056 MM |
117 | |
118 | /* handle long defines */ | |
119 | #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \ | |
120 | SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK | |
121 | #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \ | |
122 | SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT | |
123 | ||
124 | static const char * const sdma_state_names[] = { | |
125 | [sdma_state_s00_hw_down] = "s00_HwDown", | |
126 | [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait", | |
127 | [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait", | |
128 | [sdma_state_s20_idle] = "s20_Idle", | |
129 | [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait", | |
130 | [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait", | |
131 | [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait", | |
132 | [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait", | |
133 | [sdma_state_s80_hw_freeze] = "s80_HwFreeze", | |
134 | [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean", | |
135 | [sdma_state_s99_running] = "s99_Running", | |
136 | }; | |
137 | ||
eac71936 | 138 | #ifdef CONFIG_SDMA_VERBOSITY |
77241056 MM |
139 | static const char * const sdma_event_names[] = { |
140 | [sdma_event_e00_go_hw_down] = "e00_GoHwDown", | |
141 | [sdma_event_e10_go_hw_start] = "e10_GoHwStart", | |
142 | [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone", | |
143 | [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone", | |
144 | [sdma_event_e30_go_running] = "e30_GoRunning", | |
145 | [sdma_event_e40_sw_cleaned] = "e40_SwCleaned", | |
146 | [sdma_event_e50_hw_cleaned] = "e50_HwCleaned", | |
147 | [sdma_event_e60_hw_halted] = "e60_HwHalted", | |
148 | [sdma_event_e70_go_idle] = "e70_GoIdle", | |
149 | [sdma_event_e80_hw_freeze] = "e80_HwFreeze", | |
150 | [sdma_event_e81_hw_frozen] = "e81_HwFrozen", | |
151 | [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze", | |
152 | [sdma_event_e85_link_down] = "e85_LinkDown", | |
153 | [sdma_event_e90_sw_halted] = "e90_SwHalted", | |
154 | }; | |
eac71936 | 155 | #endif |
77241056 MM |
156 | |
157 | static const struct sdma_set_state_action sdma_action_table[] = { | |
158 | [sdma_state_s00_hw_down] = { | |
159 | .go_s99_running_tofalse = 1, | |
160 | .op_enable = 0, | |
161 | .op_intenable = 0, | |
162 | .op_halt = 0, | |
163 | .op_cleanup = 0, | |
164 | }, | |
165 | [sdma_state_s10_hw_start_up_halt_wait] = { | |
166 | .op_enable = 0, | |
167 | .op_intenable = 0, | |
168 | .op_halt = 1, | |
169 | .op_cleanup = 0, | |
170 | }, | |
171 | [sdma_state_s15_hw_start_up_clean_wait] = { | |
172 | .op_enable = 0, | |
173 | .op_intenable = 1, | |
174 | .op_halt = 0, | |
175 | .op_cleanup = 1, | |
176 | }, | |
177 | [sdma_state_s20_idle] = { | |
178 | .op_enable = 0, | |
179 | .op_intenable = 1, | |
180 | .op_halt = 0, | |
181 | .op_cleanup = 0, | |
182 | }, | |
183 | [sdma_state_s30_sw_clean_up_wait] = { | |
184 | .op_enable = 0, | |
185 | .op_intenable = 0, | |
186 | .op_halt = 0, | |
187 | .op_cleanup = 0, | |
188 | }, | |
189 | [sdma_state_s40_hw_clean_up_wait] = { | |
190 | .op_enable = 0, | |
191 | .op_intenable = 0, | |
192 | .op_halt = 0, | |
193 | .op_cleanup = 1, | |
194 | }, | |
195 | [sdma_state_s50_hw_halt_wait] = { | |
196 | .op_enable = 0, | |
197 | .op_intenable = 0, | |
198 | .op_halt = 0, | |
199 | .op_cleanup = 0, | |
200 | }, | |
201 | [sdma_state_s60_idle_halt_wait] = { | |
202 | .go_s99_running_tofalse = 1, | |
203 | .op_enable = 0, | |
204 | .op_intenable = 0, | |
205 | .op_halt = 1, | |
206 | .op_cleanup = 0, | |
207 | }, | |
208 | [sdma_state_s80_hw_freeze] = { | |
209 | .op_enable = 0, | |
210 | .op_intenable = 0, | |
211 | .op_halt = 0, | |
212 | .op_cleanup = 0, | |
213 | }, | |
214 | [sdma_state_s82_freeze_sw_clean] = { | |
215 | .op_enable = 0, | |
216 | .op_intenable = 0, | |
217 | .op_halt = 0, | |
218 | .op_cleanup = 0, | |
219 | }, | |
220 | [sdma_state_s99_running] = { | |
221 | .op_enable = 1, | |
222 | .op_intenable = 1, | |
223 | .op_halt = 0, | |
224 | .op_cleanup = 0, | |
225 | .go_s99_running_totrue = 1, | |
226 | }, | |
227 | }; | |
228 | ||
229 | #define SDMA_TAIL_UPDATE_THRESH 0x1F | |
230 | ||
231 | /* declare all statics here rather than keep sorting */ | |
232 | static void sdma_complete(struct kref *); | |
233 | static void sdma_finalput(struct sdma_state *); | |
234 | static void sdma_get(struct sdma_state *); | |
235 | static void sdma_hw_clean_up_task(unsigned long); | |
236 | static void sdma_put(struct sdma_state *); | |
237 | static void sdma_set_state(struct sdma_engine *, enum sdma_states); | |
238 | static void sdma_start_hw_clean_up(struct sdma_engine *); | |
77241056 MM |
239 | static void sdma_sw_clean_up_task(unsigned long); |
240 | static void sdma_sendctrl(struct sdma_engine *, unsigned); | |
241 | static void init_sdma_regs(struct sdma_engine *, u32, uint); | |
242 | static void sdma_process_event( | |
243 | struct sdma_engine *sde, | |
244 | enum sdma_events event); | |
245 | static void __sdma_process_event( | |
246 | struct sdma_engine *sde, | |
247 | enum sdma_events event); | |
248 | static void dump_sdma_state(struct sdma_engine *sde); | |
249 | static void sdma_make_progress(struct sdma_engine *sde, u64 status); | |
bcad2913 | 250 | static void sdma_desc_avail(struct sdma_engine *sde, uint avail); |
77241056 MM |
251 | static void sdma_flush_descq(struct sdma_engine *sde); |
252 | ||
253 | /** | |
254 | * sdma_state_name() - return state string from enum | |
255 | * @state: state | |
256 | */ | |
257 | static const char *sdma_state_name(enum sdma_states state) | |
258 | { | |
259 | return sdma_state_names[state]; | |
260 | } | |
261 | ||
262 | static void sdma_get(struct sdma_state *ss) | |
263 | { | |
264 | kref_get(&ss->kref); | |
265 | } | |
266 | ||
267 | static void sdma_complete(struct kref *kref) | |
268 | { | |
269 | struct sdma_state *ss = | |
270 | container_of(kref, struct sdma_state, kref); | |
271 | ||
272 | complete(&ss->comp); | |
273 | } | |
274 | ||
275 | static void sdma_put(struct sdma_state *ss) | |
276 | { | |
277 | kref_put(&ss->kref, sdma_complete); | |
278 | } | |
279 | ||
280 | static void sdma_finalput(struct sdma_state *ss) | |
281 | { | |
282 | sdma_put(ss); | |
283 | wait_for_completion(&ss->comp); | |
284 | } | |
285 | ||
286 | static inline void write_sde_csr( | |
287 | struct sdma_engine *sde, | |
288 | u32 offset0, | |
289 | u64 value) | |
290 | { | |
291 | write_kctxt_csr(sde->dd, sde->this_idx, offset0, value); | |
292 | } | |
293 | ||
294 | static inline u64 read_sde_csr( | |
295 | struct sdma_engine *sde, | |
296 | u32 offset0) | |
297 | { | |
298 | return read_kctxt_csr(sde->dd, sde->this_idx, offset0); | |
299 | } | |
300 | ||
301 | /* | |
302 | * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for | |
303 | * sdma engine 'sde' to drop to 0. | |
304 | */ | |
305 | static void sdma_wait_for_packet_egress(struct sdma_engine *sde, | |
306 | int pause) | |
307 | { | |
308 | u64 off = 8 * sde->this_idx; | |
309 | struct hfi1_devdata *dd = sde->dd; | |
310 | int lcnt = 0; | |
25d97dd5 VM |
311 | u64 reg_prev; |
312 | u64 reg = 0; | |
77241056 MM |
313 | |
314 | while (1) { | |
25d97dd5 VM |
315 | reg_prev = reg; |
316 | reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS); | |
77241056 MM |
317 | |
318 | reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK; | |
319 | reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT; | |
320 | if (reg == 0) | |
321 | break; | |
25d97dd5 VM |
322 | /* counter is reest if accupancy count changes */ |
323 | if (reg != reg_prev) | |
324 | lcnt = 0; | |
325 | if (lcnt++ > 500) { | |
326 | /* timed out - bounce the link */ | |
327 | dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n", | |
17fb4f29 | 328 | __func__, sde->this_idx, (u32)reg); |
71d47008 | 329 | queue_work(dd->pport->link_wq, |
17fb4f29 | 330 | &dd->pport->link_bounce_work); |
77241056 MM |
331 | break; |
332 | } | |
333 | udelay(1); | |
334 | } | |
335 | } | |
336 | ||
337 | /* | |
338 | * sdma_wait() - wait for packet egress to complete for all SDMA engines, | |
339 | * and pause for credit return. | |
340 | */ | |
341 | void sdma_wait(struct hfi1_devdata *dd) | |
342 | { | |
343 | int i; | |
344 | ||
345 | for (i = 0; i < dd->num_sdma; i++) { | |
346 | struct sdma_engine *sde = &dd->per_sdma[i]; | |
347 | ||
348 | sdma_wait_for_packet_egress(sde, 0); | |
349 | } | |
350 | } | |
351 | ||
352 | static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt) | |
353 | { | |
354 | u64 reg; | |
355 | ||
356 | if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT)) | |
357 | return; | |
358 | reg = cnt; | |
359 | reg &= SD(DESC_CNT_CNT_MASK); | |
360 | reg <<= SD(DESC_CNT_CNT_SHIFT); | |
361 | write_sde_csr(sde, SD(DESC_CNT), reg); | |
362 | } | |
363 | ||
a545f530 MM |
364 | static inline void complete_tx(struct sdma_engine *sde, |
365 | struct sdma_txreq *tx, | |
366 | int res) | |
367 | { | |
368 | /* protect against complete modifying */ | |
369 | struct iowait *wait = tx->wait; | |
370 | callback_t complete = tx->complete; | |
371 | ||
372 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER | |
6b5c5213 MM |
373 | trace_hfi1_sdma_out_sn(sde, tx->sn); |
374 | if (WARN_ON_ONCE(sde->head_sn != tx->sn)) | |
a545f530 | 375 | dd_dev_err(sde->dd, "expected %llu got %llu\n", |
6b5c5213 | 376 | sde->head_sn, tx->sn); |
a545f530 MM |
377 | sde->head_sn++; |
378 | #endif | |
63df8e09 | 379 | __sdma_txclean(sde->dd, tx); |
a545f530 MM |
380 | if (complete) |
381 | (*complete)(tx, res); | |
5da0fc9d | 382 | if (iowait_sdma_dec(wait)) |
a545f530 MM |
383 | iowait_drain_wakeup(wait); |
384 | } | |
385 | ||
77241056 MM |
386 | /* |
387 | * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status | |
388 | * | |
389 | * Depending on timing there can be txreqs in two places: | |
390 | * - in the descq ring | |
391 | * - in the flush list | |
392 | * | |
393 | * To avoid ordering issues the descq ring needs to be flushed | |
394 | * first followed by the flush list. | |
395 | * | |
396 | * This routine is called from two places | |
397 | * - From a work queue item | |
398 | * - Directly from the state machine just before setting the | |
399 | * state to running | |
400 | * | |
401 | * Must be called with head_lock held | |
402 | * | |
403 | */ | |
404 | static void sdma_flush(struct sdma_engine *sde) | |
405 | { | |
406 | struct sdma_txreq *txp, *txp_next; | |
407 | LIST_HEAD(flushlist); | |
b77d713a | 408 | unsigned long flags; |
f972775b | 409 | uint seq; |
77241056 MM |
410 | |
411 | /* flush from head to tail */ | |
412 | sdma_flush_descq(sde); | |
b77d713a | 413 | spin_lock_irqsave(&sde->flushlist_lock, flags); |
77241056 | 414 | /* copy flush list */ |
cf131a81 | 415 | list_splice_init(&sde->flushlist, &flushlist); |
b77d713a | 416 | spin_unlock_irqrestore(&sde->flushlist_lock, flags); |
77241056 | 417 | /* flush from flush list */ |
a545f530 MM |
418 | list_for_each_entry_safe(txp, txp_next, &flushlist, list) |
419 | complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); | |
f972775b MM |
420 | /* wakeup QPs orphaned on the dmawait list */ |
421 | do { | |
422 | struct iowait *w, *nw; | |
423 | ||
424 | seq = read_seqbegin(&sde->waitlock); | |
425 | if (!list_empty(&sde->dmawait)) { | |
426 | write_seqlock(&sde->waitlock); | |
427 | list_for_each_entry_safe(w, nw, &sde->dmawait, list) { | |
428 | if (w->wakeup) { | |
429 | w->wakeup(w, SDMA_AVAIL_REASON); | |
430 | list_del_init(&w->list); | |
431 | } | |
432 | } | |
433 | write_sequnlock(&sde->waitlock); | |
434 | } | |
435 | } while (read_seqretry(&sde->waitlock, seq)); | |
77241056 MM |
436 | } |
437 | ||
438 | /* | |
439 | * Fields a work request for flushing the descq ring | |
440 | * and the flush list | |
441 | * | |
442 | * If the engine has been brought to running during | |
443 | * the scheduling delay, the flush is ignored, assuming | |
444 | * that the process of bringing the engine to running | |
445 | * would have done this flush prior to going to running. | |
446 | * | |
447 | */ | |
448 | static void sdma_field_flush(struct work_struct *work) | |
449 | { | |
450 | unsigned long flags; | |
451 | struct sdma_engine *sde = | |
452 | container_of(work, struct sdma_engine, flush_worker); | |
453 | ||
454 | write_seqlock_irqsave(&sde->head_lock, flags); | |
455 | if (!__sdma_running(sde)) | |
456 | sdma_flush(sde); | |
457 | write_sequnlock_irqrestore(&sde->head_lock, flags); | |
458 | } | |
459 | ||
460 | static void sdma_err_halt_wait(struct work_struct *work) | |
461 | { | |
462 | struct sdma_engine *sde = container_of(work, struct sdma_engine, | |
463 | err_halt_worker); | |
464 | u64 statuscsr; | |
465 | unsigned long timeout; | |
466 | ||
467 | timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT); | |
468 | while (1) { | |
469 | statuscsr = read_sde_csr(sde, SD(STATUS)); | |
470 | statuscsr &= SD(STATUS_ENG_HALTED_SMASK); | |
471 | if (statuscsr) | |
472 | break; | |
473 | if (time_after(jiffies, timeout)) { | |
474 | dd_dev_err(sde->dd, | |
17fb4f29 JJ |
475 | "SDMA engine %d - timeout waiting for engine to halt\n", |
476 | sde->this_idx); | |
77241056 MM |
477 | /* |
478 | * Continue anyway. This could happen if there was | |
479 | * an uncorrectable error in the wrong spot. | |
480 | */ | |
481 | break; | |
482 | } | |
483 | usleep_range(80, 120); | |
484 | } | |
485 | ||
486 | sdma_process_event(sde, sdma_event_e15_hw_halt_done); | |
487 | } | |
488 | ||
77241056 MM |
489 | static void sdma_err_progress_check_schedule(struct sdma_engine *sde) |
490 | { | |
491 | if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) { | |
77241056 MM |
492 | unsigned index; |
493 | struct hfi1_devdata *dd = sde->dd; | |
494 | ||
495 | for (index = 0; index < dd->num_sdma; index++) { | |
496 | struct sdma_engine *curr_sdma = &dd->per_sdma[index]; | |
497 | ||
498 | if (curr_sdma != sde) | |
499 | curr_sdma->progress_check_head = | |
500 | curr_sdma->descq_head; | |
501 | } | |
502 | dd_dev_err(sde->dd, | |
503 | "SDMA engine %d - check scheduled\n", | |
504 | sde->this_idx); | |
505 | mod_timer(&sde->err_progress_check_timer, jiffies + 10); | |
506 | } | |
507 | } | |
508 | ||
8064135e | 509 | static void sdma_err_progress_check(struct timer_list *t) |
77241056 MM |
510 | { |
511 | unsigned index; | |
8064135e | 512 | struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer); |
77241056 MM |
513 | |
514 | dd_dev_err(sde->dd, "SDE progress check event\n"); | |
515 | for (index = 0; index < sde->dd->num_sdma; index++) { | |
516 | struct sdma_engine *curr_sde = &sde->dd->per_sdma[index]; | |
517 | unsigned long flags; | |
518 | ||
519 | /* check progress on each engine except the current one */ | |
520 | if (curr_sde == sde) | |
521 | continue; | |
522 | /* | |
523 | * We must lock interrupts when acquiring sde->lock, | |
524 | * to avoid a deadlock if interrupt triggers and spins on | |
525 | * the same lock on same CPU | |
526 | */ | |
527 | spin_lock_irqsave(&curr_sde->tail_lock, flags); | |
528 | write_seqlock(&curr_sde->head_lock); | |
529 | ||
530 | /* skip non-running queues */ | |
531 | if (curr_sde->state.current_state != sdma_state_s99_running) { | |
532 | write_sequnlock(&curr_sde->head_lock); | |
533 | spin_unlock_irqrestore(&curr_sde->tail_lock, flags); | |
534 | continue; | |
535 | } | |
536 | ||
537 | if ((curr_sde->descq_head != curr_sde->descq_tail) && | |
538 | (curr_sde->descq_head == | |
539 | curr_sde->progress_check_head)) | |
540 | __sdma_process_event(curr_sde, | |
541 | sdma_event_e90_sw_halted); | |
542 | write_sequnlock(&curr_sde->head_lock); | |
543 | spin_unlock_irqrestore(&curr_sde->tail_lock, flags); | |
544 | } | |
545 | schedule_work(&sde->err_halt_worker); | |
546 | } | |
547 | ||
548 | static void sdma_hw_clean_up_task(unsigned long opaque) | |
549 | { | |
50e5dcbe | 550 | struct sdma_engine *sde = (struct sdma_engine *)opaque; |
77241056 MM |
551 | u64 statuscsr; |
552 | ||
553 | while (1) { | |
554 | #ifdef CONFIG_SDMA_VERBOSITY | |
555 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", | |
556 | sde->this_idx, slashstrip(__FILE__), __LINE__, | |
557 | __func__); | |
558 | #endif | |
559 | statuscsr = read_sde_csr(sde, SD(STATUS)); | |
560 | statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK); | |
561 | if (statuscsr) | |
562 | break; | |
563 | udelay(10); | |
564 | } | |
565 | ||
566 | sdma_process_event(sde, sdma_event_e25_hw_clean_up_done); | |
567 | } | |
568 | ||
569 | static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde) | |
570 | { | |
77241056 MM |
571 | return sde->tx_ring[sde->tx_head & sde->sdma_mask]; |
572 | } | |
573 | ||
574 | /* | |
575 | * flush ring for recovery | |
576 | */ | |
577 | static void sdma_flush_descq(struct sdma_engine *sde) | |
578 | { | |
579 | u16 head, tail; | |
580 | int progress = 0; | |
581 | struct sdma_txreq *txp = get_txhead(sde); | |
582 | ||
583 | /* The reason for some of the complexity of this code is that | |
584 | * not all descriptors have corresponding txps. So, we have to | |
585 | * be able to skip over descs until we wander into the range of | |
586 | * the next txp on the list. | |
587 | */ | |
588 | head = sde->descq_head & sde->sdma_mask; | |
589 | tail = sde->descq_tail & sde->sdma_mask; | |
590 | while (head != tail) { | |
591 | /* advance head, wrap if needed */ | |
592 | head = ++sde->descq_head & sde->sdma_mask; | |
593 | /* if now past this txp's descs, do the callback */ | |
594 | if (txp && txp->next_descq_idx == head) { | |
77241056 MM |
595 | /* remove from list */ |
596 | sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; | |
a545f530 | 597 | complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); |
77241056 | 598 | trace_hfi1_sdma_progress(sde, head, tail, txp); |
77241056 MM |
599 | txp = get_txhead(sde); |
600 | } | |
601 | progress++; | |
602 | } | |
603 | if (progress) | |
604 | sdma_desc_avail(sde, sdma_descq_freecnt(sde)); | |
605 | } | |
606 | ||
607 | static void sdma_sw_clean_up_task(unsigned long opaque) | |
608 | { | |
50e5dcbe | 609 | struct sdma_engine *sde = (struct sdma_engine *)opaque; |
77241056 MM |
610 | unsigned long flags; |
611 | ||
612 | spin_lock_irqsave(&sde->tail_lock, flags); | |
613 | write_seqlock(&sde->head_lock); | |
614 | ||
615 | /* | |
616 | * At this point, the following should always be true: | |
617 | * - We are halted, so no more descriptors are getting retired. | |
618 | * - We are not running, so no one is submitting new work. | |
619 | * - Only we can send the e40_sw_cleaned, so we can't start | |
620 | * running again until we say so. So, the active list and | |
621 | * descq are ours to play with. | |
622 | */ | |
623 | ||
77241056 MM |
624 | /* |
625 | * In the error clean up sequence, software clean must be called | |
626 | * before the hardware clean so we can use the hardware head in | |
627 | * the progress routine. A hardware clean or SPC unfreeze will | |
628 | * reset the hardware head. | |
629 | * | |
630 | * Process all retired requests. The progress routine will use the | |
631 | * latest physical hardware head - we are not running so speed does | |
632 | * not matter. | |
633 | */ | |
634 | sdma_make_progress(sde, 0); | |
635 | ||
636 | sdma_flush(sde); | |
637 | ||
638 | /* | |
639 | * Reset our notion of head and tail. | |
640 | * Note that the HW registers have been reset via an earlier | |
641 | * clean up. | |
642 | */ | |
643 | sde->descq_tail = 0; | |
644 | sde->descq_head = 0; | |
645 | sde->desc_avail = sdma_descq_freecnt(sde); | |
646 | *sde->head_dma = 0; | |
647 | ||
648 | __sdma_process_event(sde, sdma_event_e40_sw_cleaned); | |
649 | ||
650 | write_sequnlock(&sde->head_lock); | |
651 | spin_unlock_irqrestore(&sde->tail_lock, flags); | |
652 | } | |
653 | ||
654 | static void sdma_sw_tear_down(struct sdma_engine *sde) | |
655 | { | |
656 | struct sdma_state *ss = &sde->state; | |
657 | ||
658 | /* Releasing this reference means the state machine has stopped. */ | |
659 | sdma_put(ss); | |
660 | ||
661 | /* stop waiting for all unfreeze events to complete */ | |
662 | atomic_set(&sde->dd->sdma_unfreeze_count, -1); | |
663 | wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); | |
664 | } | |
665 | ||
666 | static void sdma_start_hw_clean_up(struct sdma_engine *sde) | |
667 | { | |
668 | tasklet_hi_schedule(&sde->sdma_hw_clean_up_task); | |
669 | } | |
670 | ||
77241056 | 671 | static void sdma_set_state(struct sdma_engine *sde, |
17fb4f29 | 672 | enum sdma_states next_state) |
77241056 MM |
673 | { |
674 | struct sdma_state *ss = &sde->state; | |
675 | const struct sdma_set_state_action *action = sdma_action_table; | |
676 | unsigned op = 0; | |
677 | ||
678 | trace_hfi1_sdma_state( | |
679 | sde, | |
680 | sdma_state_names[ss->current_state], | |
681 | sdma_state_names[next_state]); | |
682 | ||
683 | /* debugging bookkeeping */ | |
684 | ss->previous_state = ss->current_state; | |
685 | ss->previous_op = ss->current_op; | |
686 | ss->current_state = next_state; | |
687 | ||
d0d236ea JJ |
688 | if (ss->previous_state != sdma_state_s99_running && |
689 | next_state == sdma_state_s99_running) | |
77241056 MM |
690 | sdma_flush(sde); |
691 | ||
692 | if (action[next_state].op_enable) | |
693 | op |= SDMA_SENDCTRL_OP_ENABLE; | |
694 | ||
695 | if (action[next_state].op_intenable) | |
696 | op |= SDMA_SENDCTRL_OP_INTENABLE; | |
697 | ||
698 | if (action[next_state].op_halt) | |
699 | op |= SDMA_SENDCTRL_OP_HALT; | |
700 | ||
701 | if (action[next_state].op_cleanup) | |
702 | op |= SDMA_SENDCTRL_OP_CLEANUP; | |
703 | ||
704 | if (action[next_state].go_s99_running_tofalse) | |
705 | ss->go_s99_running = 0; | |
706 | ||
707 | if (action[next_state].go_s99_running_totrue) | |
708 | ss->go_s99_running = 1; | |
709 | ||
710 | ss->current_op = op; | |
711 | sdma_sendctrl(sde, ss->current_op); | |
712 | } | |
713 | ||
714 | /** | |
715 | * sdma_get_descq_cnt() - called when device probed | |
716 | * | |
717 | * Return a validated descq count. | |
718 | * | |
719 | * This is currently only used in the verbs initialization to build the tx | |
720 | * list. | |
721 | * | |
722 | * This will probably be deleted in favor of a more scalable approach to | |
723 | * alloc tx's. | |
724 | * | |
725 | */ | |
726 | u16 sdma_get_descq_cnt(void) | |
727 | { | |
728 | u16 count = sdma_descq_cnt; | |
729 | ||
730 | if (!count) | |
731 | return SDMA_DESCQ_CNT; | |
732 | /* count must be a power of 2 greater than 64 and less than | |
733 | * 32768. Otherwise return default. | |
734 | */ | |
735 | if (!is_power_of_2(count)) | |
736 | return SDMA_DESCQ_CNT; | |
aeef010a | 737 | if (count < 64 || count > 32768) |
77241056 MM |
738 | return SDMA_DESCQ_CNT; |
739 | return count; | |
740 | } | |
b91cc573 | 741 | |
0cb2aa69 TS |
742 | /** |
743 | * sdma_engine_get_vl() - return vl for a given sdma engine | |
744 | * @sde: sdma engine | |
745 | * | |
746 | * This function returns the vl mapped to a given engine, or an error if | |
747 | * the mapping can't be found. The mapping fields are protected by RCU. | |
748 | */ | |
749 | int sdma_engine_get_vl(struct sdma_engine *sde) | |
750 | { | |
751 | struct hfi1_devdata *dd = sde->dd; | |
752 | struct sdma_vl_map *m; | |
753 | u8 vl; | |
754 | ||
755 | if (sde->this_idx >= TXE_NUM_SDMA_ENGINES) | |
756 | return -EINVAL; | |
757 | ||
758 | rcu_read_lock(); | |
759 | m = rcu_dereference(dd->sdma_map); | |
760 | if (unlikely(!m)) { | |
761 | rcu_read_unlock(); | |
762 | return -EINVAL; | |
763 | } | |
764 | vl = m->engine_to_vl[sde->this_idx]; | |
765 | rcu_read_unlock(); | |
766 | ||
767 | return vl; | |
768 | } | |
769 | ||
77241056 MM |
770 | /** |
771 | * sdma_select_engine_vl() - select sdma engine | |
772 | * @dd: devdata | |
773 | * @selector: a spreading factor | |
774 | * @vl: this vl | |
775 | * | |
776 | * | |
777 | * This function returns an engine based on the selector and a vl. The | |
778 | * mapping fields are protected by RCU. | |
779 | */ | |
780 | struct sdma_engine *sdma_select_engine_vl( | |
781 | struct hfi1_devdata *dd, | |
782 | u32 selector, | |
783 | u8 vl) | |
784 | { | |
785 | struct sdma_vl_map *m; | |
786 | struct sdma_map_elem *e; | |
787 | struct sdma_engine *rval; | |
788 | ||
4be81991 IW |
789 | /* NOTE This should only happen if SC->VL changed after the initial |
790 | * checks on the QP/AH | |
791 | * Default will return engine 0 below | |
792 | */ | |
793 | if (vl >= num_vls) { | |
794 | rval = NULL; | |
795 | goto done; | |
796 | } | |
77241056 MM |
797 | |
798 | rcu_read_lock(); | |
799 | m = rcu_dereference(dd->sdma_map); | |
800 | if (unlikely(!m)) { | |
801 | rcu_read_unlock(); | |
0a226edd | 802 | return &dd->per_sdma[0]; |
77241056 MM |
803 | } |
804 | e = m->map[vl & m->mask]; | |
805 | rval = e->sde[selector & e->mask]; | |
806 | rcu_read_unlock(); | |
807 | ||
4be81991 | 808 | done: |
0a226edd | 809 | rval = !rval ? &dd->per_sdma[0] : rval; |
77241056 MM |
810 | trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx); |
811 | return rval; | |
812 | } | |
813 | ||
814 | /** | |
815 | * sdma_select_engine_sc() - select sdma engine | |
816 | * @dd: devdata | |
817 | * @selector: a spreading factor | |
818 | * @sc5: the 5 bit sc | |
819 | * | |
820 | * | |
821 | * This function returns an engine based on the selector and an sc. | |
822 | */ | |
823 | struct sdma_engine *sdma_select_engine_sc( | |
824 | struct hfi1_devdata *dd, | |
825 | u32 selector, | |
826 | u8 sc5) | |
827 | { | |
828 | u8 vl = sc_to_vlt(dd, sc5); | |
829 | ||
830 | return sdma_select_engine_vl(dd, selector, vl); | |
831 | } | |
832 | ||
0cb2aa69 TS |
833 | struct sdma_rht_map_elem { |
834 | u32 mask; | |
835 | u8 ctr; | |
836 | struct sdma_engine *sde[0]; | |
837 | }; | |
838 | ||
839 | struct sdma_rht_node { | |
840 | unsigned long cpu_id; | |
841 | struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED]; | |
842 | struct rhash_head node; | |
843 | }; | |
844 | ||
845 | #define NR_CPUS_HINT 192 | |
846 | ||
847 | static const struct rhashtable_params sdma_rht_params = { | |
848 | .nelem_hint = NR_CPUS_HINT, | |
849 | .head_offset = offsetof(struct sdma_rht_node, node), | |
850 | .key_offset = offsetof(struct sdma_rht_node, cpu_id), | |
c593642c | 851 | .key_len = sizeof_field(struct sdma_rht_node, cpu_id), |
0cb2aa69 TS |
852 | .max_size = NR_CPUS, |
853 | .min_size = 8, | |
854 | .automatic_shrinking = true, | |
855 | }; | |
856 | ||
857 | /* | |
858 | * sdma_select_user_engine() - select sdma engine based on user setup | |
859 | * @dd: devdata | |
860 | * @selector: a spreading factor | |
861 | * @vl: this vl | |
862 | * | |
863 | * This function returns an sdma engine for a user sdma request. | |
864 | * User defined sdma engine affinity setting is honored when applicable, | |
865 | * otherwise system default sdma engine mapping is used. To ensure correct | |
866 | * ordering, the mapping from <selector, vl> to sde must remain unchanged. | |
867 | */ | |
868 | struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, | |
869 | u32 selector, u8 vl) | |
870 | { | |
871 | struct sdma_rht_node *rht_node; | |
872 | struct sdma_engine *sde = NULL; | |
0cb2aa69 TS |
873 | unsigned long cpu_id; |
874 | ||
875 | /* | |
876 | * To ensure that always the same sdma engine(s) will be | |
877 | * selected make sure the process is pinned to this CPU only. | |
878 | */ | |
3bd37062 | 879 | if (current->nr_cpus_allowed != 1) |
0cb2aa69 TS |
880 | goto out; |
881 | ||
882 | cpu_id = smp_processor_id(); | |
883 | rcu_read_lock(); | |
ab818362 TY |
884 | rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id, |
885 | sdma_rht_params); | |
0cb2aa69 TS |
886 | |
887 | if (rht_node && rht_node->map[vl]) { | |
888 | struct sdma_rht_map_elem *map = rht_node->map[vl]; | |
889 | ||
890 | sde = map->sde[selector & map->mask]; | |
891 | } | |
892 | rcu_read_unlock(); | |
893 | ||
894 | if (sde) | |
895 | return sde; | |
896 | ||
897 | out: | |
898 | return sdma_select_engine_vl(dd, selector, vl); | |
899 | } | |
900 | ||
901 | static void sdma_populate_sde_map(struct sdma_rht_map_elem *map) | |
902 | { | |
903 | int i; | |
904 | ||
905 | for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++) | |
906 | map->sde[map->ctr + i] = map->sde[i]; | |
907 | } | |
908 | ||
909 | static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map, | |
910 | struct sdma_engine *sde) | |
911 | { | |
912 | unsigned int i, pow; | |
913 | ||
914 | /* only need to check the first ctr entries for a match */ | |
915 | for (i = 0; i < map->ctr; i++) { | |
916 | if (map->sde[i] == sde) { | |
917 | memmove(&map->sde[i], &map->sde[i + 1], | |
918 | (map->ctr - i - 1) * sizeof(map->sde[0])); | |
919 | map->ctr--; | |
920 | pow = roundup_pow_of_two(map->ctr ? : 1); | |
921 | map->mask = pow - 1; | |
922 | sdma_populate_sde_map(map); | |
923 | break; | |
924 | } | |
925 | } | |
926 | } | |
927 | ||
928 | /* | |
929 | * Prevents concurrent reads and writes of the sdma engine cpu_mask | |
930 | */ | |
931 | static DEFINE_MUTEX(process_to_sde_mutex); | |
932 | ||
933 | ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, | |
934 | size_t count) | |
935 | { | |
936 | struct hfi1_devdata *dd = sde->dd; | |
937 | cpumask_var_t mask, new_mask; | |
938 | unsigned long cpu; | |
939 | int ret, vl, sz; | |
f9458bc2 | 940 | struct sdma_rht_node *rht_node; |
0cb2aa69 TS |
941 | |
942 | vl = sdma_engine_get_vl(sde); | |
f9458bc2 | 943 | if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map))) |
0cb2aa69 TS |
944 | return -EINVAL; |
945 | ||
946 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); | |
947 | if (!ret) | |
948 | return -ENOMEM; | |
949 | ||
950 | ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL); | |
951 | if (!ret) { | |
952 | free_cpumask_var(mask); | |
953 | return -ENOMEM; | |
954 | } | |
955 | ret = cpulist_parse(buf, mask); | |
956 | if (ret) | |
957 | goto out_free; | |
958 | ||
959 | if (!cpumask_subset(mask, cpu_online_mask)) { | |
960 | dd_dev_warn(sde->dd, "Invalid CPU mask\n"); | |
961 | ret = -EINVAL; | |
962 | goto out_free; | |
963 | } | |
964 | ||
965 | sz = sizeof(struct sdma_rht_map_elem) + | |
966 | (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *)); | |
967 | ||
968 | mutex_lock(&process_to_sde_mutex); | |
969 | ||
970 | for_each_cpu(cpu, mask) { | |
0cb2aa69 TS |
971 | /* Check if we have this already mapped */ |
972 | if (cpumask_test_cpu(cpu, &sde->cpu_mask)) { | |
973 | cpumask_set_cpu(cpu, new_mask); | |
974 | continue; | |
975 | } | |
976 | ||
5a52a7ac | 977 | rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, |
0cb2aa69 TS |
978 | sdma_rht_params); |
979 | if (!rht_node) { | |
980 | rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL); | |
981 | if (!rht_node) { | |
982 | ret = -ENOMEM; | |
983 | goto out; | |
984 | } | |
985 | ||
986 | rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); | |
987 | if (!rht_node->map[vl]) { | |
988 | kfree(rht_node); | |
989 | ret = -ENOMEM; | |
990 | goto out; | |
991 | } | |
992 | rht_node->cpu_id = cpu; | |
993 | rht_node->map[vl]->mask = 0; | |
994 | rht_node->map[vl]->ctr = 1; | |
995 | rht_node->map[vl]->sde[0] = sde; | |
996 | ||
5a52a7ac | 997 | ret = rhashtable_insert_fast(dd->sdma_rht, |
0cb2aa69 TS |
998 | &rht_node->node, |
999 | sdma_rht_params); | |
1000 | if (ret) { | |
1001 | kfree(rht_node->map[vl]); | |
1002 | kfree(rht_node); | |
1003 | dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n", | |
1004 | cpu); | |
1005 | goto out; | |
1006 | } | |
1007 | ||
1008 | } else { | |
1009 | int ctr, pow; | |
1010 | ||
1011 | /* Add new user mappings */ | |
1012 | if (!rht_node->map[vl]) | |
1013 | rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); | |
1014 | ||
1015 | if (!rht_node->map[vl]) { | |
1016 | ret = -ENOMEM; | |
1017 | goto out; | |
1018 | } | |
1019 | ||
1020 | rht_node->map[vl]->ctr++; | |
1021 | ctr = rht_node->map[vl]->ctr; | |
1022 | rht_node->map[vl]->sde[ctr - 1] = sde; | |
1023 | pow = roundup_pow_of_two(ctr); | |
1024 | rht_node->map[vl]->mask = pow - 1; | |
1025 | ||
1026 | /* Populate the sde map table */ | |
1027 | sdma_populate_sde_map(rht_node->map[vl]); | |
1028 | } | |
1029 | cpumask_set_cpu(cpu, new_mask); | |
1030 | } | |
1031 | ||
1032 | /* Clean up old mappings */ | |
1033 | for_each_cpu(cpu, cpu_online_mask) { | |
1034 | struct sdma_rht_node *rht_node; | |
1035 | ||
1036 | /* Don't cleanup sdes that are set in the new mask */ | |
1037 | if (cpumask_test_cpu(cpu, mask)) | |
1038 | continue; | |
1039 | ||
5a52a7ac | 1040 | rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu, |
0cb2aa69 TS |
1041 | sdma_rht_params); |
1042 | if (rht_node) { | |
1043 | bool empty = true; | |
1044 | int i; | |
1045 | ||
1046 | /* Remove mappings for old sde */ | |
1047 | for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) | |
1048 | if (rht_node->map[i]) | |
1049 | sdma_cleanup_sde_map(rht_node->map[i], | |
1050 | sde); | |
1051 | ||
1052 | /* Free empty hash table entries */ | |
1053 | for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { | |
1054 | if (!rht_node->map[i]) | |
1055 | continue; | |
1056 | ||
1057 | if (rht_node->map[i]->ctr) { | |
1058 | empty = false; | |
1059 | break; | |
1060 | } | |
1061 | } | |
1062 | ||
1063 | if (empty) { | |
5a52a7ac | 1064 | ret = rhashtable_remove_fast(dd->sdma_rht, |
0cb2aa69 TS |
1065 | &rht_node->node, |
1066 | sdma_rht_params); | |
1067 | WARN_ON(ret); | |
1068 | ||
1069 | for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) | |
1070 | kfree(rht_node->map[i]); | |
1071 | ||
1072 | kfree(rht_node); | |
1073 | } | |
1074 | } | |
1075 | } | |
1076 | ||
1077 | cpumask_copy(&sde->cpu_mask, new_mask); | |
1078 | out: | |
1079 | mutex_unlock(&process_to_sde_mutex); | |
1080 | out_free: | |
1081 | free_cpumask_var(mask); | |
1082 | free_cpumask_var(new_mask); | |
1083 | return ret ? : strnlen(buf, PAGE_SIZE); | |
1084 | } | |
1085 | ||
1086 | ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf) | |
1087 | { | |
1088 | mutex_lock(&process_to_sde_mutex); | |
1089 | if (cpumask_empty(&sde->cpu_mask)) | |
1090 | snprintf(buf, PAGE_SIZE, "%s\n", "empty"); | |
1091 | else | |
1092 | cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask); | |
1093 | mutex_unlock(&process_to_sde_mutex); | |
1094 | return strnlen(buf, PAGE_SIZE); | |
1095 | } | |
1096 | ||
1097 | static void sdma_rht_free(void *ptr, void *arg) | |
1098 | { | |
1099 | struct sdma_rht_node *rht_node = ptr; | |
1100 | int i; | |
1101 | ||
1102 | for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) | |
1103 | kfree(rht_node->map[i]); | |
1104 | ||
1105 | kfree(rht_node); | |
1106 | } | |
1107 | ||
af3674d6 TS |
1108 | /** |
1109 | * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings | |
1110 | * @s: seq file | |
1111 | * @dd: hfi1_devdata | |
1112 | * @cpuid: cpu id | |
1113 | * | |
1114 | * This routine dumps the process to sde mappings per cpu | |
1115 | */ | |
1116 | void sdma_seqfile_dump_cpu_list(struct seq_file *s, | |
1117 | struct hfi1_devdata *dd, | |
1118 | unsigned long cpuid) | |
1119 | { | |
1120 | struct sdma_rht_node *rht_node; | |
1121 | int i, j; | |
1122 | ||
5a52a7ac | 1123 | rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid, |
af3674d6 TS |
1124 | sdma_rht_params); |
1125 | if (!rht_node) | |
1126 | return; | |
1127 | ||
1128 | seq_printf(s, "cpu%3lu: ", cpuid); | |
1129 | for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) { | |
1130 | if (!rht_node->map[i] || !rht_node->map[i]->ctr) | |
1131 | continue; | |
1132 | ||
1133 | seq_printf(s, " vl%d: [", i); | |
1134 | ||
1135 | for (j = 0; j < rht_node->map[i]->ctr; j++) { | |
1136 | if (!rht_node->map[i]->sde[j]) | |
1137 | continue; | |
1138 | ||
1139 | if (j > 0) | |
1140 | seq_puts(s, ","); | |
1141 | ||
1142 | seq_printf(s, " sdma%2d", | |
1143 | rht_node->map[i]->sde[j]->this_idx); | |
1144 | } | |
1145 | seq_puts(s, " ]"); | |
1146 | } | |
1147 | ||
1148 | seq_puts(s, "\n"); | |
1149 | } | |
1150 | ||
77241056 MM |
1151 | /* |
1152 | * Free the indicated map struct | |
1153 | */ | |
1154 | static void sdma_map_free(struct sdma_vl_map *m) | |
1155 | { | |
1156 | int i; | |
1157 | ||
1158 | for (i = 0; m && i < m->actual_vls; i++) | |
1159 | kfree(m->map[i]); | |
1160 | kfree(m); | |
1161 | } | |
1162 | ||
1163 | /* | |
1164 | * Handle RCU callback | |
1165 | */ | |
1166 | static void sdma_map_rcu_callback(struct rcu_head *list) | |
1167 | { | |
1168 | struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list); | |
1169 | ||
1170 | sdma_map_free(m); | |
1171 | } | |
1172 | ||
1173 | /** | |
1174 | * sdma_map_init - called when # vls change | |
1175 | * @dd: hfi1_devdata | |
1176 | * @port: port number | |
1177 | * @num_vls: number of vls | |
1178 | * @vl_engines: per vl engine mapping (optional) | |
1179 | * | |
1180 | * This routine changes the mapping based on the number of vls. | |
1181 | * | |
1182 | * vl_engines is used to specify a non-uniform vl/engine loading. NULL | |
1183 | * implies auto computing the loading and giving each VLs a uniform | |
1184 | * distribution of engines per VL. | |
1185 | * | |
1186 | * The auto algorithm computes the sde_per_vl and the number of extra | |
1187 | * engines. Any extra engines are added from the last VL on down. | |
1188 | * | |
1189 | * rcu locking is used here to control access to the mapping fields. | |
1190 | * | |
1191 | * If either the num_vls or num_sdma are non-power of 2, the array sizes | |
1192 | * in the struct sdma_vl_map and the struct sdma_map_elem are rounded | |
1193 | * up to the next highest power of 2 and the first entry is reused | |
1194 | * in a round robin fashion. | |
1195 | * | |
1196 | * If an error occurs the map change is not done and the mapping is | |
1197 | * not changed. | |
1198 | * | |
1199 | */ | |
1200 | int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines) | |
1201 | { | |
1202 | int i, j; | |
1203 | int extra, sde_per_vl; | |
1204 | int engine = 0; | |
1205 | u8 lvl_engines[OPA_MAX_VLS]; | |
1206 | struct sdma_vl_map *oldmap, *newmap; | |
1207 | ||
1208 | if (!(dd->flags & HFI1_HAS_SEND_DMA)) | |
1209 | return 0; | |
1210 | ||
1211 | if (!vl_engines) { | |
1212 | /* truncate divide */ | |
1213 | sde_per_vl = dd->num_sdma / num_vls; | |
1214 | /* extras */ | |
1215 | extra = dd->num_sdma % num_vls; | |
1216 | vl_engines = lvl_engines; | |
1217 | /* add extras from last vl down */ | |
1218 | for (i = num_vls - 1; i >= 0; i--, extra--) | |
1219 | vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0); | |
1220 | } | |
1221 | /* build new map */ | |
1222 | newmap = kzalloc( | |
1223 | sizeof(struct sdma_vl_map) + | |
1224 | roundup_pow_of_two(num_vls) * | |
1225 | sizeof(struct sdma_map_elem *), | |
1226 | GFP_KERNEL); | |
1227 | if (!newmap) | |
1228 | goto bail; | |
1229 | newmap->actual_vls = num_vls; | |
1230 | newmap->vls = roundup_pow_of_two(num_vls); | |
1231 | newmap->mask = (1 << ilog2(newmap->vls)) - 1; | |
69a00b8e MM |
1232 | /* initialize back-map */ |
1233 | for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++) | |
1234 | newmap->engine_to_vl[i] = -1; | |
77241056 MM |
1235 | for (i = 0; i < newmap->vls; i++) { |
1236 | /* save for wrap around */ | |
1237 | int first_engine = engine; | |
1238 | ||
1239 | if (i < newmap->actual_vls) { | |
1240 | int sz = roundup_pow_of_two(vl_engines[i]); | |
1241 | ||
1242 | /* only allocate once */ | |
1243 | newmap->map[i] = kzalloc( | |
1244 | sizeof(struct sdma_map_elem) + | |
1245 | sz * sizeof(struct sdma_engine *), | |
1246 | GFP_KERNEL); | |
1247 | if (!newmap->map[i]) | |
1248 | goto bail; | |
1249 | newmap->map[i]->mask = (1 << ilog2(sz)) - 1; | |
1250 | /* assign engines */ | |
1251 | for (j = 0; j < sz; j++) { | |
1252 | newmap->map[i]->sde[j] = | |
1253 | &dd->per_sdma[engine]; | |
1254 | if (++engine >= first_engine + vl_engines[i]) | |
1255 | /* wrap back to first engine */ | |
1256 | engine = first_engine; | |
1257 | } | |
69a00b8e MM |
1258 | /* assign back-map */ |
1259 | for (j = 0; j < vl_engines[i]; j++) | |
1260 | newmap->engine_to_vl[first_engine + j] = i; | |
77241056 MM |
1261 | } else { |
1262 | /* just re-use entry without allocating */ | |
1263 | newmap->map[i] = newmap->map[i % num_vls]; | |
1264 | } | |
1265 | engine = first_engine + vl_engines[i]; | |
1266 | } | |
1267 | /* newmap in hand, save old map */ | |
1268 | spin_lock_irq(&dd->sde_map_lock); | |
1269 | oldmap = rcu_dereference_protected(dd->sdma_map, | |
17fb4f29 | 1270 | lockdep_is_held(&dd->sde_map_lock)); |
77241056 MM |
1271 | |
1272 | /* publish newmap */ | |
1273 | rcu_assign_pointer(dd->sdma_map, newmap); | |
1274 | ||
1275 | spin_unlock_irq(&dd->sde_map_lock); | |
1276 | /* success, free any old map after grace period */ | |
1277 | if (oldmap) | |
1278 | call_rcu(&oldmap->list, sdma_map_rcu_callback); | |
1279 | return 0; | |
1280 | bail: | |
1281 | /* free any partial allocation */ | |
1282 | sdma_map_free(newmap); | |
1283 | return -ENOMEM; | |
1284 | } | |
1285 | ||
473291b3 AE |
1286 | /** |
1287 | * sdma_clean() Clean up allocated memory | |
1288 | * @dd: struct hfi1_devdata | |
1289 | * @num_engines: num sdma engines | |
77241056 | 1290 | * |
473291b3 AE |
1291 | * This routine can be called regardless of the success of |
1292 | * sdma_init() | |
77241056 | 1293 | */ |
473291b3 | 1294 | void sdma_clean(struct hfi1_devdata *dd, size_t num_engines) |
77241056 MM |
1295 | { |
1296 | size_t i; | |
1297 | struct sdma_engine *sde; | |
1298 | ||
1299 | if (dd->sdma_pad_dma) { | |
22bb1365 | 1300 | dma_free_coherent(&dd->pcidev->dev, SDMA_PAD, |
77241056 MM |
1301 | (void *)dd->sdma_pad_dma, |
1302 | dd->sdma_pad_phys); | |
1303 | dd->sdma_pad_dma = NULL; | |
1304 | dd->sdma_pad_phys = 0; | |
1305 | } | |
1306 | if (dd->sdma_heads_dma) { | |
1307 | dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size, | |
1308 | (void *)dd->sdma_heads_dma, | |
1309 | dd->sdma_heads_phys); | |
1310 | dd->sdma_heads_dma = NULL; | |
1311 | dd->sdma_heads_phys = 0; | |
1312 | } | |
1313 | for (i = 0; dd->per_sdma && i < num_engines; ++i) { | |
1314 | sde = &dd->per_sdma[i]; | |
1315 | ||
1316 | sde->head_dma = NULL; | |
1317 | sde->head_phys = 0; | |
1318 | ||
1319 | if (sde->descq) { | |
1320 | dma_free_coherent( | |
1321 | &dd->pcidev->dev, | |
1322 | sde->descq_cnt * sizeof(u64[2]), | |
1323 | sde->descq, | |
1324 | sde->descq_phys | |
1325 | ); | |
1326 | sde->descq = NULL; | |
1327 | sde->descq_phys = 0; | |
1328 | } | |
60f57ec2 | 1329 | kvfree(sde->tx_ring); |
77241056 MM |
1330 | sde->tx_ring = NULL; |
1331 | } | |
1332 | spin_lock_irq(&dd->sde_map_lock); | |
79d0c088 | 1333 | sdma_map_free(rcu_access_pointer(dd->sdma_map)); |
77241056 MM |
1334 | RCU_INIT_POINTER(dd->sdma_map, NULL); |
1335 | spin_unlock_irq(&dd->sde_map_lock); | |
1336 | synchronize_rcu(); | |
1337 | kfree(dd->per_sdma); | |
1338 | dd->per_sdma = NULL; | |
5a52a7ac SS |
1339 | |
1340 | if (dd->sdma_rht) { | |
1341 | rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL); | |
1342 | kfree(dd->sdma_rht); | |
1343 | dd->sdma_rht = NULL; | |
1344 | } | |
77241056 MM |
1345 | } |
1346 | ||
1347 | /** | |
1348 | * sdma_init() - called when device probed | |
1349 | * @dd: hfi1_devdata | |
1350 | * @port: port number (currently only zero) | |
1351 | * | |
90dba23e IW |
1352 | * Initializes each sde and its csrs. |
1353 | * Interrupts are not required to be enabled. | |
77241056 MM |
1354 | * |
1355 | * Returns: | |
1356 | * 0 - success, -errno on failure | |
1357 | */ | |
1358 | int sdma_init(struct hfi1_devdata *dd, u8 port) | |
1359 | { | |
1360 | unsigned this_idx; | |
1361 | struct sdma_engine *sde; | |
5a52a7ac | 1362 | struct rhashtable *tmp_sdma_rht; |
77241056 MM |
1363 | u16 descq_cnt; |
1364 | void *curr_head; | |
1365 | struct hfi1_pportdata *ppd = dd->pport + port; | |
1366 | u32 per_sdma_credits; | |
1367 | uint idle_cnt = sdma_idle_cnt; | |
06e81e3e | 1368 | size_t num_engines = chip_sdma_engines(dd); |
5a52a7ac | 1369 | int ret = -ENOMEM; |
77241056 MM |
1370 | |
1371 | if (!HFI1_CAP_IS_KSET(SDMA)) { | |
1372 | HFI1_CAP_CLEAR(SDMA_AHG); | |
1373 | return 0; | |
1374 | } | |
1375 | if (mod_num_sdma && | |
17fb4f29 | 1376 | /* can't exceed chip support */ |
06e81e3e | 1377 | mod_num_sdma <= chip_sdma_engines(dd) && |
17fb4f29 JJ |
1378 | /* count must be >= vls */ |
1379 | mod_num_sdma >= num_vls) | |
77241056 MM |
1380 | num_engines = mod_num_sdma; |
1381 | ||
1382 | dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma); | |
06e81e3e | 1383 | dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", chip_sdma_engines(dd)); |
77241056 | 1384 | dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n", |
06e81e3e | 1385 | chip_sdma_mem_size(dd)); |
77241056 MM |
1386 | |
1387 | per_sdma_credits = | |
06e81e3e | 1388 | chip_sdma_mem_size(dd) / (num_engines * SDMA_BLOCK_SIZE); |
77241056 MM |
1389 | |
1390 | /* set up freeze waitqueue */ | |
1391 | init_waitqueue_head(&dd->sdma_unfreeze_wq); | |
1392 | atomic_set(&dd->sdma_unfreeze_count, 0); | |
1393 | ||
1394 | descq_cnt = sdma_get_descq_cnt(); | |
1395 | dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n", | |
17fb4f29 | 1396 | num_engines, descq_cnt); |
77241056 MM |
1397 | |
1398 | /* alloc memory for array of send engines */ | |
953a9ceb KA |
1399 | dd->per_sdma = kcalloc_node(num_engines, sizeof(*dd->per_sdma), |
1400 | GFP_KERNEL, dd->node); | |
77241056 | 1401 | if (!dd->per_sdma) |
5a52a7ac | 1402 | return ret; |
77241056 MM |
1403 | |
1404 | idle_cnt = ns_to_cclock(dd, idle_cnt); | |
aadd7020 IW |
1405 | if (idle_cnt) |
1406 | dd->default_desc1 = | |
1407 | SDMA_DESC1_HEAD_TO_HOST_FLAG; | |
1408 | else | |
1409 | dd->default_desc1 = | |
1410 | SDMA_DESC1_INT_REQ_FLAG; | |
1411 | ||
ee947859 MH |
1412 | if (!sdma_desct_intr) |
1413 | sdma_desct_intr = SDMA_DESC_INTR; | |
1414 | ||
77241056 MM |
1415 | /* Allocate memory for SendDMA descriptor FIFOs */ |
1416 | for (this_idx = 0; this_idx < num_engines; ++this_idx) { | |
1417 | sde = &dd->per_sdma[this_idx]; | |
1418 | sde->dd = dd; | |
1419 | sde->ppd = ppd; | |
1420 | sde->this_idx = this_idx; | |
1421 | sde->descq_cnt = descq_cnt; | |
1422 | sde->desc_avail = sdma_descq_freecnt(sde); | |
1423 | sde->sdma_shift = ilog2(descq_cnt); | |
1424 | sde->sdma_mask = (1 << sde->sdma_shift) - 1; | |
a699c6c2 VM |
1425 | |
1426 | /* Create a mask specifically for each interrupt source */ | |
1427 | sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES + | |
1428 | this_idx); | |
1429 | sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES + | |
1430 | this_idx); | |
1431 | sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES + | |
1432 | this_idx); | |
1433 | /* Create a combined mask to cover all 3 interrupt sources */ | |
1434 | sde->imask = sde->int_mask | sde->progress_mask | | |
1435 | sde->idle_mask; | |
1436 | ||
77241056 MM |
1437 | spin_lock_init(&sde->tail_lock); |
1438 | seqlock_init(&sde->head_lock); | |
1439 | spin_lock_init(&sde->senddmactrl_lock); | |
1440 | spin_lock_init(&sde->flushlist_lock); | |
9aefcabe | 1441 | seqlock_init(&sde->waitlock); |
77241056 MM |
1442 | /* insure there is always a zero bit */ |
1443 | sde->ahg_bits = 0xfffffffe00000000ULL; | |
1444 | ||
1445 | sdma_set_state(sde, sdma_state_s00_hw_down); | |
1446 | ||
1447 | /* set up reference counting */ | |
1448 | kref_init(&sde->state.kref); | |
1449 | init_completion(&sde->state.comp); | |
1450 | ||
1451 | INIT_LIST_HEAD(&sde->flushlist); | |
1452 | INIT_LIST_HEAD(&sde->dmawait); | |
1453 | ||
1454 | sde->tail_csr = | |
1455 | get_kctxt_csr_addr(dd, this_idx, SD(TAIL)); | |
1456 | ||
77241056 | 1457 | tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task, |
17fb4f29 | 1458 | (unsigned long)sde); |
77241056 MM |
1459 | |
1460 | tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task, | |
17fb4f29 | 1461 | (unsigned long)sde); |
77241056 MM |
1462 | INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait); |
1463 | INIT_WORK(&sde->flush_worker, sdma_field_flush); | |
1464 | ||
1465 | sde->progress_check_head = 0; | |
1466 | ||
8064135e KC |
1467 | timer_setup(&sde->err_progress_check_timer, |
1468 | sdma_err_progress_check, 0); | |
77241056 | 1469 | |
750afb08 LC |
1470 | sde->descq = dma_alloc_coherent(&dd->pcidev->dev, |
1471 | descq_cnt * sizeof(u64[2]), | |
1472 | &sde->descq_phys, GFP_KERNEL); | |
77241056 MM |
1473 | if (!sde->descq) |
1474 | goto bail; | |
1475 | sde->tx_ring = | |
84ca176b KC |
1476 | kvzalloc_node(array_size(descq_cnt, |
1477 | sizeof(struct sdma_txreq *)), | |
31acd18b | 1478 | GFP_KERNEL, dd->node); |
77241056 MM |
1479 | if (!sde->tx_ring) |
1480 | goto bail; | |
1481 | } | |
1482 | ||
1483 | dd->sdma_heads_size = L1_CACHE_BYTES * num_engines; | |
1484 | /* Allocate memory for DMA of head registers to memory */ | |
750afb08 LC |
1485 | dd->sdma_heads_dma = dma_alloc_coherent(&dd->pcidev->dev, |
1486 | dd->sdma_heads_size, | |
1487 | &dd->sdma_heads_phys, | |
1488 | GFP_KERNEL); | |
77241056 MM |
1489 | if (!dd->sdma_heads_dma) { |
1490 | dd_dev_err(dd, "failed to allocate SendDMA head memory\n"); | |
1491 | goto bail; | |
1492 | } | |
1493 | ||
1494 | /* Allocate memory for pad */ | |
22bb1365 | 1495 | dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD, |
750afb08 | 1496 | &dd->sdma_pad_phys, GFP_KERNEL); |
77241056 MM |
1497 | if (!dd->sdma_pad_dma) { |
1498 | dd_dev_err(dd, "failed to allocate SendDMA pad memory\n"); | |
1499 | goto bail; | |
1500 | } | |
1501 | ||
1502 | /* assign each engine to different cacheline and init registers */ | |
1503 | curr_head = (void *)dd->sdma_heads_dma; | |
1504 | for (this_idx = 0; this_idx < num_engines; ++this_idx) { | |
1505 | unsigned long phys_offset; | |
1506 | ||
1507 | sde = &dd->per_sdma[this_idx]; | |
1508 | ||
1509 | sde->head_dma = curr_head; | |
1510 | curr_head += L1_CACHE_BYTES; | |
1511 | phys_offset = (unsigned long)sde->head_dma - | |
1512 | (unsigned long)dd->sdma_heads_dma; | |
1513 | sde->head_phys = dd->sdma_heads_phys + phys_offset; | |
1514 | init_sdma_regs(sde, per_sdma_credits, idle_cnt); | |
1515 | } | |
1516 | dd->flags |= HFI1_HAS_SEND_DMA; | |
1517 | dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0; | |
1518 | dd->num_sdma = num_engines; | |
5a52a7ac SS |
1519 | ret = sdma_map_init(dd, port, ppd->vls_operational, NULL); |
1520 | if (ret < 0) | |
77241056 | 1521 | goto bail; |
0cb2aa69 | 1522 | |
5a52a7ac SS |
1523 | tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL); |
1524 | if (!tmp_sdma_rht) { | |
1525 | ret = -ENOMEM; | |
0cb2aa69 | 1526 | goto bail; |
5a52a7ac SS |
1527 | } |
1528 | ||
1529 | ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params); | |
34b3be18 NE |
1530 | if (ret < 0) { |
1531 | kfree(tmp_sdma_rht); | |
5a52a7ac | 1532 | goto bail; |
34b3be18 NE |
1533 | } |
1534 | ||
5a52a7ac | 1535 | dd->sdma_rht = tmp_sdma_rht; |
0cb2aa69 | 1536 | |
77241056 MM |
1537 | dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma); |
1538 | return 0; | |
1539 | ||
1540 | bail: | |
1541 | sdma_clean(dd, num_engines); | |
5a52a7ac | 1542 | return ret; |
77241056 MM |
1543 | } |
1544 | ||
1545 | /** | |
1546 | * sdma_all_running() - called when the link goes up | |
1547 | * @dd: hfi1_devdata | |
1548 | * | |
1549 | * This routine moves all engines to the running state. | |
1550 | */ | |
1551 | void sdma_all_running(struct hfi1_devdata *dd) | |
1552 | { | |
1553 | struct sdma_engine *sde; | |
1554 | unsigned int i; | |
1555 | ||
1556 | /* move all engines to running */ | |
1557 | for (i = 0; i < dd->num_sdma; ++i) { | |
1558 | sde = &dd->per_sdma[i]; | |
1559 | sdma_process_event(sde, sdma_event_e30_go_running); | |
1560 | } | |
1561 | } | |
1562 | ||
1563 | /** | |
1564 | * sdma_all_idle() - called when the link goes down | |
1565 | * @dd: hfi1_devdata | |
1566 | * | |
1567 | * This routine moves all engines to the idle state. | |
1568 | */ | |
1569 | void sdma_all_idle(struct hfi1_devdata *dd) | |
1570 | { | |
1571 | struct sdma_engine *sde; | |
1572 | unsigned int i; | |
1573 | ||
1574 | /* idle all engines */ | |
1575 | for (i = 0; i < dd->num_sdma; ++i) { | |
1576 | sde = &dd->per_sdma[i]; | |
1577 | sdma_process_event(sde, sdma_event_e70_go_idle); | |
1578 | } | |
1579 | } | |
1580 | ||
1581 | /** | |
1582 | * sdma_start() - called to kick off state processing for all engines | |
1583 | * @dd: hfi1_devdata | |
1584 | * | |
1585 | * This routine is for kicking off the state processing for all required | |
1586 | * sdma engines. Interrupts need to be working at this point. | |
1587 | * | |
1588 | */ | |
1589 | void sdma_start(struct hfi1_devdata *dd) | |
1590 | { | |
1591 | unsigned i; | |
1592 | struct sdma_engine *sde; | |
1593 | ||
1594 | /* kick off the engines state processing */ | |
1595 | for (i = 0; i < dd->num_sdma; ++i) { | |
1596 | sde = &dd->per_sdma[i]; | |
1597 | sdma_process_event(sde, sdma_event_e10_go_hw_start); | |
1598 | } | |
1599 | } | |
1600 | ||
1601 | /** | |
1602 | * sdma_exit() - used when module is removed | |
1603 | * @dd: hfi1_devdata | |
1604 | */ | |
1605 | void sdma_exit(struct hfi1_devdata *dd) | |
1606 | { | |
1607 | unsigned this_idx; | |
1608 | struct sdma_engine *sde; | |
1609 | ||
1610 | for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma; | |
1611 | ++this_idx) { | |
77241056 MM |
1612 | sde = &dd->per_sdma[this_idx]; |
1613 | if (!list_empty(&sde->dmawait)) | |
1614 | dd_dev_err(dd, "sde %u: dmawait list not empty!\n", | |
17fb4f29 | 1615 | sde->this_idx); |
77241056 MM |
1616 | sdma_process_event(sde, sdma_event_e00_go_hw_down); |
1617 | ||
1618 | del_timer_sync(&sde->err_progress_check_timer); | |
1619 | ||
1620 | /* | |
1621 | * This waits for the state machine to exit so it is not | |
1622 | * necessary to kill the sdma_sw_clean_up_task to make sure | |
1623 | * it is not running. | |
1624 | */ | |
1625 | sdma_finalput(&sde->state); | |
1626 | } | |
77241056 MM |
1627 | } |
1628 | ||
1629 | /* | |
1630 | * unmap the indicated descriptor | |
1631 | */ | |
1632 | static inline void sdma_unmap_desc( | |
1633 | struct hfi1_devdata *dd, | |
1634 | struct sdma_desc *descp) | |
1635 | { | |
1636 | switch (sdma_mapping_type(descp)) { | |
1637 | case SDMA_MAP_SINGLE: | |
1638 | dma_unmap_single( | |
1639 | &dd->pcidev->dev, | |
1640 | sdma_mapping_addr(descp), | |
1641 | sdma_mapping_len(descp), | |
1642 | DMA_TO_DEVICE); | |
1643 | break; | |
1644 | case SDMA_MAP_PAGE: | |
1645 | dma_unmap_page( | |
1646 | &dd->pcidev->dev, | |
1647 | sdma_mapping_addr(descp), | |
1648 | sdma_mapping_len(descp), | |
1649 | DMA_TO_DEVICE); | |
1650 | break; | |
1651 | } | |
1652 | } | |
1653 | ||
1654 | /* | |
1655 | * return the mode as indicated by the first | |
1656 | * descriptor in the tx. | |
1657 | */ | |
1658 | static inline u8 ahg_mode(struct sdma_txreq *tx) | |
1659 | { | |
1660 | return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK) | |
1661 | >> SDMA_DESC1_HEADER_MODE_SHIFT; | |
1662 | } | |
1663 | ||
1664 | /** | |
63df8e09 | 1665 | * __sdma_txclean() - clean tx of mappings, descp *kmalloc's |
77241056 MM |
1666 | * @dd: hfi1_devdata for unmapping |
1667 | * @tx: tx request to clean | |
1668 | * | |
1669 | * This is used in the progress routine to clean the tx or | |
1670 | * by the ULP to toss an in-process tx build. | |
1671 | * | |
1672 | * The code can be called multiple times without issue. | |
1673 | * | |
1674 | */ | |
63df8e09 | 1675 | void __sdma_txclean( |
77241056 MM |
1676 | struct hfi1_devdata *dd, |
1677 | struct sdma_txreq *tx) | |
1678 | { | |
1679 | u16 i; | |
1680 | ||
1681 | if (tx->num_desc) { | |
1682 | u8 skip = 0, mode = ahg_mode(tx); | |
1683 | ||
1684 | /* unmap first */ | |
1685 | sdma_unmap_desc(dd, &tx->descp[0]); | |
1686 | /* determine number of AHG descriptors to skip */ | |
1687 | if (mode > SDMA_AHG_APPLY_UPDATE1) | |
1688 | skip = mode >> 1; | |
1689 | for (i = 1 + skip; i < tx->num_desc; i++) | |
1690 | sdma_unmap_desc(dd, &tx->descp[i]); | |
1691 | tx->num_desc = 0; | |
1692 | } | |
1693 | kfree(tx->coalesce_buf); | |
1694 | tx->coalesce_buf = NULL; | |
1695 | /* kmalloc'ed descp */ | |
1696 | if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) { | |
1697 | tx->desc_limit = ARRAY_SIZE(tx->descs); | |
1698 | kfree(tx->descp); | |
1699 | } | |
1700 | } | |
1701 | ||
1702 | static inline u16 sdma_gethead(struct sdma_engine *sde) | |
1703 | { | |
1704 | struct hfi1_devdata *dd = sde->dd; | |
1705 | int use_dmahead; | |
1706 | u16 hwhead; | |
1707 | ||
1708 | #ifdef CONFIG_SDMA_VERBOSITY | |
1709 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", | |
1710 | sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); | |
1711 | #endif | |
1712 | ||
1713 | retry: | |
1714 | use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) && | |
1715 | (dd->flags & HFI1_HAS_SDMA_TIMEOUT); | |
1716 | hwhead = use_dmahead ? | |
50e5dcbe JJ |
1717 | (u16)le64_to_cpu(*sde->head_dma) : |
1718 | (u16)read_sde_csr(sde, SD(HEAD)); | |
77241056 MM |
1719 | |
1720 | if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) { | |
1721 | u16 cnt; | |
1722 | u16 swtail; | |
1723 | u16 swhead; | |
1724 | int sane; | |
1725 | ||
1726 | swhead = sde->descq_head & sde->sdma_mask; | |
1727 | /* this code is really bad for cache line trading */ | |
6aa7de05 | 1728 | swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
77241056 MM |
1729 | cnt = sde->descq_cnt; |
1730 | ||
1731 | if (swhead < swtail) | |
1732 | /* not wrapped */ | |
1733 | sane = (hwhead >= swhead) & (hwhead <= swtail); | |
1734 | else if (swhead > swtail) | |
1735 | /* wrapped around */ | |
1736 | sane = ((hwhead >= swhead) && (hwhead < cnt)) || | |
1737 | (hwhead <= swtail); | |
1738 | else | |
1739 | /* empty */ | |
1740 | sane = (hwhead == swhead); | |
1741 | ||
1742 | if (unlikely(!sane)) { | |
1743 | dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n", | |
17fb4f29 JJ |
1744 | sde->this_idx, |
1745 | use_dmahead ? "dma" : "kreg", | |
1746 | hwhead, swhead, swtail, cnt); | |
77241056 MM |
1747 | if (use_dmahead) { |
1748 | /* try one more time, using csr */ | |
1749 | use_dmahead = 0; | |
1750 | goto retry; | |
1751 | } | |
1752 | /* proceed as if no progress */ | |
1753 | hwhead = swhead; | |
1754 | } | |
1755 | } | |
1756 | return hwhead; | |
1757 | } | |
1758 | ||
1759 | /* | |
1760 | * This is called when there are send DMA descriptors that might be | |
1761 | * available. | |
1762 | * | |
1763 | * This is called with head_lock held. | |
1764 | */ | |
bcad2913 | 1765 | static void sdma_desc_avail(struct sdma_engine *sde, uint avail) |
77241056 | 1766 | { |
34025fb0 | 1767 | struct iowait *wait, *nw, *twait; |
77241056 | 1768 | struct iowait *waits[SDMA_WAIT_BATCH_SIZE]; |
34025fb0 | 1769 | uint i, n = 0, seq, tidx = 0; |
77241056 MM |
1770 | |
1771 | #ifdef CONFIG_SDMA_VERBOSITY | |
1772 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx, | |
1773 | slashstrip(__FILE__), __LINE__, __func__); | |
1774 | dd_dev_err(sde->dd, "avail: %u\n", avail); | |
1775 | #endif | |
1776 | ||
1777 | do { | |
9aefcabe | 1778 | seq = read_seqbegin(&sde->waitlock); |
77241056 MM |
1779 | if (!list_empty(&sde->dmawait)) { |
1780 | /* at least one item */ | |
9aefcabe | 1781 | write_seqlock(&sde->waitlock); |
77241056 MM |
1782 | /* Harvest waiters wanting DMA descriptors */ |
1783 | list_for_each_entry_safe( | |
1784 | wait, | |
1785 | nw, | |
1786 | &sde->dmawait, | |
1787 | list) { | |
5da0fc9d | 1788 | u32 num_desc; |
77241056 MM |
1789 | |
1790 | if (!wait->wakeup) | |
1791 | continue; | |
1792 | if (n == ARRAY_SIZE(waits)) | |
1793 | break; | |
34025fb0 | 1794 | iowait_init_priority(wait); |
5da0fc9d | 1795 | num_desc = iowait_get_all_desc(wait); |
77241056 MM |
1796 | if (num_desc > avail) |
1797 | break; | |
1798 | avail -= num_desc; | |
34025fb0 KW |
1799 | /* Find the top-priority wait memeber */ |
1800 | if (n) { | |
1801 | twait = waits[tidx]; | |
1802 | tidx = | |
1803 | iowait_priority_update_top(wait, | |
1804 | twait, | |
1805 | n, | |
1806 | tidx); | |
1807 | } | |
77241056 MM |
1808 | list_del_init(&wait->list); |
1809 | waits[n++] = wait; | |
1810 | } | |
9aefcabe | 1811 | write_sequnlock(&sde->waitlock); |
77241056 MM |
1812 | break; |
1813 | } | |
9aefcabe | 1814 | } while (read_seqretry(&sde->waitlock, seq)); |
77241056 | 1815 | |
34025fb0 | 1816 | /* Schedule the top-priority entry first */ |
bcad2913 | 1817 | if (n) |
34025fb0 | 1818 | waits[tidx]->wakeup(waits[tidx], SDMA_AVAIL_REASON); |
bcad2913 | 1819 | |
77241056 | 1820 | for (i = 0; i < n; i++) |
34025fb0 | 1821 | if (i != tidx) |
bcad2913 | 1822 | waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON); |
77241056 MM |
1823 | } |
1824 | ||
1825 | /* head_lock must be held */ | |
1826 | static void sdma_make_progress(struct sdma_engine *sde, u64 status) | |
1827 | { | |
1828 | struct sdma_txreq *txp = NULL; | |
1829 | int progress = 0; | |
a545f530 | 1830 | u16 hwhead, swhead; |
77241056 MM |
1831 | int idle_check_done = 0; |
1832 | ||
1833 | hwhead = sdma_gethead(sde); | |
1834 | ||
1835 | /* The reason for some of the complexity of this code is that | |
1836 | * not all descriptors have corresponding txps. So, we have to | |
1837 | * be able to skip over descs until we wander into the range of | |
1838 | * the next txp on the list. | |
1839 | */ | |
1840 | ||
1841 | retry: | |
1842 | txp = get_txhead(sde); | |
1843 | swhead = sde->descq_head & sde->sdma_mask; | |
1844 | trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); | |
1845 | while (swhead != hwhead) { | |
1846 | /* advance head, wrap if needed */ | |
1847 | swhead = ++sde->descq_head & sde->sdma_mask; | |
1848 | ||
1849 | /* if now past this txp's descs, do the callback */ | |
1850 | if (txp && txp->next_descq_idx == swhead) { | |
77241056 MM |
1851 | /* remove from list */ |
1852 | sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL; | |
a545f530 | 1853 | complete_tx(sde, txp, SDMA_TXREQ_S_OK); |
77241056 MM |
1854 | /* see if there is another txp */ |
1855 | txp = get_txhead(sde); | |
1856 | } | |
1857 | trace_hfi1_sdma_progress(sde, hwhead, swhead, txp); | |
1858 | progress++; | |
1859 | } | |
1860 | ||
1861 | /* | |
1862 | * The SDMA idle interrupt is not guaranteed to be ordered with respect | |
1863 | * to updates to the the dma_head location in host memory. The head | |
1864 | * value read might not be fully up to date. If there are pending | |
1865 | * descriptors and the SDMA idle interrupt fired then read from the | |
1866 | * CSR SDMA head instead to get the latest value from the hardware. | |
1867 | * The hardware SDMA head should be read at most once in this invocation | |
1868 | * of sdma_make_progress(..) which is ensured by idle_check_done flag | |
1869 | */ | |
1870 | if ((status & sde->idle_mask) && !idle_check_done) { | |
a545f530 MM |
1871 | u16 swtail; |
1872 | ||
6aa7de05 | 1873 | swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
77241056 MM |
1874 | if (swtail != hwhead) { |
1875 | hwhead = (u16)read_sde_csr(sde, SD(HEAD)); | |
1876 | idle_check_done = 1; | |
1877 | goto retry; | |
1878 | } | |
1879 | } | |
1880 | ||
1881 | sde->last_status = status; | |
1882 | if (progress) | |
1883 | sdma_desc_avail(sde, sdma_descq_freecnt(sde)); | |
1884 | } | |
1885 | ||
1886 | /* | |
1887 | * sdma_engine_interrupt() - interrupt handler for engine | |
1888 | * @sde: sdma engine | |
1889 | * @status: sdma interrupt reason | |
1890 | * | |
1891 | * Status is a mask of the 3 possible interrupts for this engine. It will | |
1892 | * contain bits _only_ for this SDMA engine. It will contain at least one | |
1893 | * bit, it may contain more. | |
1894 | */ | |
1895 | void sdma_engine_interrupt(struct sdma_engine *sde, u64 status) | |
1896 | { | |
1897 | trace_hfi1_sdma_engine_interrupt(sde, status); | |
1898 | write_seqlock(&sde->head_lock); | |
ee947859 | 1899 | sdma_set_desc_cnt(sde, sdma_desct_intr); |
a699c6c2 VM |
1900 | if (status & sde->idle_mask) |
1901 | sde->idle_int_cnt++; | |
1902 | else if (status & sde->progress_mask) | |
1903 | sde->progress_int_cnt++; | |
1904 | else if (status & sde->int_mask) | |
1905 | sde->sdma_int_cnt++; | |
77241056 MM |
1906 | sdma_make_progress(sde, status); |
1907 | write_sequnlock(&sde->head_lock); | |
1908 | } | |
1909 | ||
1910 | /** | |
1911 | * sdma_engine_error() - error handler for engine | |
1912 | * @sde: sdma engine | |
1913 | * @status: sdma interrupt reason | |
1914 | */ | |
1915 | void sdma_engine_error(struct sdma_engine *sde, u64 status) | |
1916 | { | |
1917 | unsigned long flags; | |
1918 | ||
1919 | #ifdef CONFIG_SDMA_VERBOSITY | |
1920 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n", | |
1921 | sde->this_idx, | |
1922 | (unsigned long long)status, | |
1923 | sdma_state_names[sde->state.current_state]); | |
1924 | #endif | |
1925 | spin_lock_irqsave(&sde->tail_lock, flags); | |
1926 | write_seqlock(&sde->head_lock); | |
1927 | if (status & ALL_SDMA_ENG_HALT_ERRS) | |
1928 | __sdma_process_event(sde, sdma_event_e60_hw_halted); | |
1929 | if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) { | |
1930 | dd_dev_err(sde->dd, | |
17fb4f29 JJ |
1931 | "SDMA (%u) engine error: 0x%llx state %s\n", |
1932 | sde->this_idx, | |
1933 | (unsigned long long)status, | |
1934 | sdma_state_names[sde->state.current_state]); | |
77241056 MM |
1935 | dump_sdma_state(sde); |
1936 | } | |
1937 | write_sequnlock(&sde->head_lock); | |
1938 | spin_unlock_irqrestore(&sde->tail_lock, flags); | |
1939 | } | |
1940 | ||
1941 | static void sdma_sendctrl(struct sdma_engine *sde, unsigned op) | |
1942 | { | |
1943 | u64 set_senddmactrl = 0; | |
1944 | u64 clr_senddmactrl = 0; | |
1945 | unsigned long flags; | |
1946 | ||
1947 | #ifdef CONFIG_SDMA_VERBOSITY | |
1948 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n", | |
1949 | sde->this_idx, | |
1950 | (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0, | |
1951 | (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0, | |
1952 | (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0, | |
1953 | (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0); | |
1954 | #endif | |
1955 | ||
1956 | if (op & SDMA_SENDCTRL_OP_ENABLE) | |
1957 | set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); | |
1958 | else | |
1959 | clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK); | |
1960 | ||
1961 | if (op & SDMA_SENDCTRL_OP_INTENABLE) | |
1962 | set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); | |
1963 | else | |
1964 | clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK); | |
1965 | ||
1966 | if (op & SDMA_SENDCTRL_OP_HALT) | |
1967 | set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); | |
1968 | else | |
1969 | clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK); | |
1970 | ||
1971 | spin_lock_irqsave(&sde->senddmactrl_lock, flags); | |
1972 | ||
1973 | sde->p_senddmactrl |= set_senddmactrl; | |
1974 | sde->p_senddmactrl &= ~clr_senddmactrl; | |
1975 | ||
1976 | if (op & SDMA_SENDCTRL_OP_CLEANUP) | |
1977 | write_sde_csr(sde, SD(CTRL), | |
17fb4f29 JJ |
1978 | sde->p_senddmactrl | |
1979 | SD(CTRL_SDMA_CLEANUP_SMASK)); | |
77241056 MM |
1980 | else |
1981 | write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl); | |
1982 | ||
1983 | spin_unlock_irqrestore(&sde->senddmactrl_lock, flags); | |
1984 | ||
1985 | #ifdef CONFIG_SDMA_VERBOSITY | |
1986 | sdma_dumpstate(sde); | |
1987 | #endif | |
1988 | } | |
1989 | ||
1990 | static void sdma_setlengen(struct sdma_engine *sde) | |
1991 | { | |
1992 | #ifdef CONFIG_SDMA_VERBOSITY | |
1993 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", | |
1994 | sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); | |
1995 | #endif | |
1996 | ||
1997 | /* | |
1998 | * Set SendDmaLenGen and clear-then-set the MSB of the generation | |
1999 | * count to enable generation checking and load the internal | |
2000 | * generation counter. | |
2001 | */ | |
2002 | write_sde_csr(sde, SD(LEN_GEN), | |
17fb4f29 | 2003 | (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)); |
77241056 | 2004 | write_sde_csr(sde, SD(LEN_GEN), |
17fb4f29 JJ |
2005 | ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) | |
2006 | (4ULL << SD(LEN_GEN_GENERATION_SHIFT))); | |
77241056 MM |
2007 | } |
2008 | ||
2009 | static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail) | |
2010 | { | |
2011 | /* Commit writes to memory and advance the tail on the chip */ | |
2012 | smp_wmb(); /* see get_txhead() */ | |
2013 | writeq(tail, sde->tail_csr); | |
2014 | } | |
2015 | ||
2016 | /* | |
2017 | * This is called when changing to state s10_hw_start_up_halt_wait as | |
2018 | * a result of send buffer errors or send DMA descriptor errors. | |
2019 | */ | |
2020 | static void sdma_hw_start_up(struct sdma_engine *sde) | |
2021 | { | |
2022 | u64 reg; | |
2023 | ||
2024 | #ifdef CONFIG_SDMA_VERBOSITY | |
2025 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", | |
2026 | sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); | |
2027 | #endif | |
2028 | ||
2029 | sdma_setlengen(sde); | |
2030 | sdma_update_tail(sde, 0); /* Set SendDmaTail */ | |
2031 | *sde->head_dma = 0; | |
2032 | ||
2033 | reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) << | |
2034 | SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT); | |
2035 | write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); | |
2036 | } | |
2037 | ||
77241056 MM |
2038 | /* |
2039 | * set_sdma_integrity | |
2040 | * | |
2041 | * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'. | |
2042 | */ | |
2043 | static void set_sdma_integrity(struct sdma_engine *sde) | |
2044 | { | |
2045 | struct hfi1_devdata *dd = sde->dd; | |
77241056 | 2046 | |
d9ac4555 JP |
2047 | write_sde_csr(sde, SD(CHECK_ENABLE), |
2048 | hfi1_pkt_base_sdma_integrity(dd)); | |
77241056 MM |
2049 | } |
2050 | ||
77241056 MM |
2051 | static void init_sdma_regs( |
2052 | struct sdma_engine *sde, | |
2053 | u32 credits, | |
2054 | uint idle_cnt) | |
2055 | { | |
2056 | u8 opval, opmask; | |
2057 | #ifdef CONFIG_SDMA_VERBOSITY | |
2058 | struct hfi1_devdata *dd = sde->dd; | |
2059 | ||
2060 | dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", | |
2061 | sde->this_idx, slashstrip(__FILE__), __LINE__, __func__); | |
2062 | #endif | |
2063 | ||
2064 | write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys); | |
2065 | sdma_setlengen(sde); | |
2066 | sdma_update_tail(sde, 0); /* Set SendDmaTail */ | |
2067 | write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt); | |
2068 | write_sde_csr(sde, SD(DESC_CNT), 0); | |
2069 | write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys); | |
2070 | write_sde_csr(sde, SD(MEMORY), | |
17fb4f29 JJ |
2071 | ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) | |
2072 | ((u64)(credits * sde->this_idx) << | |
2073 | SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT))); | |
77241056 MM |
2074 | write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull); |
2075 | set_sdma_integrity(sde); | |
2076 | opmask = OPCODE_CHECK_MASK_DISABLED; | |
2077 | opval = OPCODE_CHECK_VAL_DISABLED; | |
2078 | write_sde_csr(sde, SD(CHECK_OPCODE), | |
17fb4f29 JJ |
2079 | (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) | |
2080 | (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT)); | |
77241056 MM |
2081 | } |
2082 | ||
2083 | #ifdef CONFIG_SDMA_VERBOSITY | |
2084 | ||
2085 | #define sdma_dumpstate_helper0(reg) do { \ | |
2086 | csr = read_csr(sde->dd, reg); \ | |
2087 | dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \ | |
2088 | } while (0) | |
2089 | ||
2090 | #define sdma_dumpstate_helper(reg) do { \ | |
2091 | csr = read_sde_csr(sde, reg); \ | |
2092 | dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \ | |
2093 | #reg, sde->this_idx, csr); \ | |
2094 | } while (0) | |
2095 | ||
2096 | #define sdma_dumpstate_helper2(reg) do { \ | |
2097 | csr = read_csr(sde->dd, reg + (8 * i)); \ | |
2098 | dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \ | |
2099 | #reg, i, csr); \ | |
2100 | } while (0) | |
2101 | ||
2102 | void sdma_dumpstate(struct sdma_engine *sde) | |
2103 | { | |
2104 | u64 csr; | |
2105 | unsigned i; | |
2106 | ||
2107 | sdma_dumpstate_helper(SD(CTRL)); | |
2108 | sdma_dumpstate_helper(SD(STATUS)); | |
2109 | sdma_dumpstate_helper0(SD(ERR_STATUS)); | |
2110 | sdma_dumpstate_helper0(SD(ERR_MASK)); | |
2111 | sdma_dumpstate_helper(SD(ENG_ERR_STATUS)); | |
2112 | sdma_dumpstate_helper(SD(ENG_ERR_MASK)); | |
2113 | ||
2114 | for (i = 0; i < CCE_NUM_INT_CSRS; ++i) { | |
6fd8edab | 2115 | sdma_dumpstate_helper2(CCE_INT_STATUS); |
77241056 MM |
2116 | sdma_dumpstate_helper2(CCE_INT_MASK); |
2117 | sdma_dumpstate_helper2(CCE_INT_BLOCKED); | |
2118 | } | |
2119 | ||
2120 | sdma_dumpstate_helper(SD(TAIL)); | |
2121 | sdma_dumpstate_helper(SD(HEAD)); | |
2122 | sdma_dumpstate_helper(SD(PRIORITY_THLD)); | |
6fd8edab | 2123 | sdma_dumpstate_helper(SD(IDLE_CNT)); |
77241056 MM |
2124 | sdma_dumpstate_helper(SD(RELOAD_CNT)); |
2125 | sdma_dumpstate_helper(SD(DESC_CNT)); | |
2126 | sdma_dumpstate_helper(SD(DESC_FETCHED_CNT)); | |
2127 | sdma_dumpstate_helper(SD(MEMORY)); | |
2128 | sdma_dumpstate_helper0(SD(ENGINES)); | |
2129 | sdma_dumpstate_helper0(SD(MEM_SIZE)); | |
2130 | /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */ | |
2131 | sdma_dumpstate_helper(SD(BASE_ADDR)); | |
2132 | sdma_dumpstate_helper(SD(LEN_GEN)); | |
2133 | sdma_dumpstate_helper(SD(HEAD_ADDR)); | |
2134 | sdma_dumpstate_helper(SD(CHECK_ENABLE)); | |
2135 | sdma_dumpstate_helper(SD(CHECK_VL)); | |
2136 | sdma_dumpstate_helper(SD(CHECK_JOB_KEY)); | |
2137 | sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY)); | |
2138 | sdma_dumpstate_helper(SD(CHECK_SLID)); | |
2139 | sdma_dumpstate_helper(SD(CHECK_OPCODE)); | |
2140 | } | |
2141 | #endif | |
2142 | ||
2143 | static void dump_sdma_state(struct sdma_engine *sde) | |
2144 | { | |
77241056 MM |
2145 | struct hw_sdma_desc *descqp; |
2146 | u64 desc[2]; | |
2147 | u64 addr; | |
2148 | u8 gen; | |
2149 | u16 len; | |
2150 | u16 head, tail, cnt; | |
2151 | ||
2152 | head = sde->descq_head & sde->sdma_mask; | |
2153 | tail = sde->descq_tail & sde->sdma_mask; | |
2154 | cnt = sdma_descq_freecnt(sde); | |
77241056 MM |
2155 | |
2156 | dd_dev_err(sde->dd, | |
17fb4f29 JJ |
2157 | "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n", |
2158 | sde->this_idx, head, tail, cnt, | |
2159 | !list_empty(&sde->flushlist)); | |
77241056 MM |
2160 | |
2161 | /* print info for each entry in the descriptor queue */ | |
2162 | while (head != tail) { | |
2163 | char flags[6] = { 'x', 'x', 'x', 'x', 0 }; | |
2164 | ||
2165 | descqp = &sde->descq[head]; | |
2166 | desc[0] = le64_to_cpu(descqp->qw[0]); | |
2167 | desc[1] = le64_to_cpu(descqp->qw[1]); | |
2168 | flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; | |
2169 | flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? | |
2170 | 'H' : '-'; | |
2171 | flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; | |
2172 | flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; | |
2173 | addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) | |
2174 | & SDMA_DESC0_PHY_ADDR_MASK; | |
2175 | gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) | |
2176 | & SDMA_DESC1_GENERATION_MASK; | |
2177 | len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) | |
2178 | & SDMA_DESC0_BYTE_COUNT_MASK; | |
2179 | dd_dev_err(sde->dd, | |
17fb4f29 JJ |
2180 | "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", |
2181 | head, flags, addr, gen, len); | |
77241056 | 2182 | dd_dev_err(sde->dd, |
17fb4f29 JJ |
2183 | "\tdesc0:0x%016llx desc1 0x%016llx\n", |
2184 | desc[0], desc[1]); | |
77241056 MM |
2185 | if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) |
2186 | dd_dev_err(sde->dd, | |
17fb4f29 JJ |
2187 | "\taidx: %u amode: %u alen: %u\n", |
2188 | (u8)((desc[1] & | |
2189 | SDMA_DESC1_HEADER_INDEX_SMASK) >> | |
2190 | SDMA_DESC1_HEADER_INDEX_SHIFT), | |
2191 | (u8)((desc[1] & | |
2192 | SDMA_DESC1_HEADER_MODE_SMASK) >> | |
2193 | SDMA_DESC1_HEADER_MODE_SHIFT), | |
2194 | (u8)((desc[1] & | |
2195 | SDMA_DESC1_HEADER_DWS_SMASK) >> | |
2196 | SDMA_DESC1_HEADER_DWS_SHIFT)); | |
77241056 MM |
2197 | head++; |
2198 | head &= sde->sdma_mask; | |
2199 | } | |
2200 | } | |
2201 | ||
2202 | #define SDE_FMT \ | |
0a226edd | 2203 | "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n" |
77241056 MM |
2204 | /** |
2205 | * sdma_seqfile_dump_sde() - debugfs dump of sde | |
2206 | * @s: seq file | |
2207 | * @sde: send dma engine to dump | |
2208 | * | |
2209 | * This routine dumps the sde to the indicated seq file. | |
2210 | */ | |
2211 | void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) | |
2212 | { | |
2213 | u16 head, tail; | |
2214 | struct hw_sdma_desc *descqp; | |
2215 | u64 desc[2]; | |
2216 | u64 addr; | |
2217 | u8 gen; | |
2218 | u16 len; | |
2219 | ||
2220 | head = sde->descq_head & sde->sdma_mask; | |
6aa7de05 | 2221 | tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; |
77241056 | 2222 | seq_printf(s, SDE_FMT, sde->this_idx, |
17fb4f29 JJ |
2223 | sde->cpu, |
2224 | sdma_state_name(sde->state.current_state), | |
2225 | (unsigned long long)read_sde_csr(sde, SD(CTRL)), | |
2226 | (unsigned long long)read_sde_csr(sde, SD(STATUS)), | |
2227 | (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)), | |
2228 | (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail, | |
2229 | (unsigned long long)read_sde_csr(sde, SD(HEAD)), head, | |
2230 | (unsigned long long)le64_to_cpu(*sde->head_dma), | |
2231 | (unsigned long long)read_sde_csr(sde, SD(MEMORY)), | |
2232 | (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)), | |
2233 | (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)), | |
2234 | (unsigned long long)sde->last_status, | |
2235 | (unsigned long long)sde->ahg_bits, | |
2236 | sde->tx_tail, | |
2237 | sde->tx_head, | |
2238 | sde->descq_tail, | |
2239 | sde->descq_head, | |
77241056 | 2240 | !list_empty(&sde->flushlist), |
17fb4f29 JJ |
2241 | sde->descq_full_count, |
2242 | (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID)); | |
77241056 MM |
2243 | |
2244 | /* print info for each entry in the descriptor queue */ | |
2245 | while (head != tail) { | |
2246 | char flags[6] = { 'x', 'x', 'x', 'x', 0 }; | |
2247 | ||
2248 | descqp = &sde->descq[head]; | |
2249 | desc[0] = le64_to_cpu(descqp->qw[0]); | |
2250 | desc[1] = le64_to_cpu(descqp->qw[1]); | |
2251 | flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-'; | |
2252 | flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ? | |
2253 | 'H' : '-'; | |
2254 | flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-'; | |
2255 | flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-'; | |
2256 | addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT) | |
2257 | & SDMA_DESC0_PHY_ADDR_MASK; | |
2258 | gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT) | |
2259 | & SDMA_DESC1_GENERATION_MASK; | |
2260 | len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT) | |
2261 | & SDMA_DESC0_BYTE_COUNT_MASK; | |
2262 | seq_printf(s, | |
17fb4f29 JJ |
2263 | "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n", |
2264 | head, flags, addr, gen, len); | |
77241056 MM |
2265 | if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) |
2266 | seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n", | |
17fb4f29 JJ |
2267 | (u8)((desc[1] & |
2268 | SDMA_DESC1_HEADER_INDEX_SMASK) >> | |
2269 | SDMA_DESC1_HEADER_INDEX_SHIFT), | |
2270 | (u8)((desc[1] & | |
2271 | SDMA_DESC1_HEADER_MODE_SMASK) >> | |
2272 | SDMA_DESC1_HEADER_MODE_SHIFT)); | |
77241056 MM |
2273 | head = (head + 1) & sde->sdma_mask; |
2274 | } | |
2275 | } | |
2276 | ||
2277 | /* | |
2278 | * add the generation number into | |
2279 | * the qw1 and return | |
2280 | */ | |
2281 | static inline u64 add_gen(struct sdma_engine *sde, u64 qw1) | |
2282 | { | |
2283 | u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3; | |
2284 | ||
2285 | qw1 &= ~SDMA_DESC1_GENERATION_SMASK; | |
2286 | qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK) | |
2287 | << SDMA_DESC1_GENERATION_SHIFT; | |
2288 | return qw1; | |
2289 | } | |
2290 | ||
2291 | /* | |
2292 | * This routine submits the indicated tx | |
2293 | * | |
2294 | * Space has already been guaranteed and | |
2295 | * tail side of ring is locked. | |
2296 | * | |
2297 | * The hardware tail update is done | |
2298 | * in the caller and that is facilitated | |
2299 | * by returning the new tail. | |
2300 | * | |
2301 | * There is special case logic for ahg | |
2302 | * to not add the generation number for | |
2303 | * up to 2 descriptors that follow the | |
2304 | * first descriptor. | |
2305 | * | |
2306 | */ | |
2307 | static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx) | |
2308 | { | |
2309 | int i; | |
2310 | u16 tail; | |
2311 | struct sdma_desc *descp = tx->descp; | |
2312 | u8 skip = 0, mode = ahg_mode(tx); | |
2313 | ||
2314 | tail = sde->descq_tail & sde->sdma_mask; | |
2315 | sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); | |
2316 | sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1])); | |
2317 | trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1], | |
2318 | tail, &sde->descq[tail]); | |
2319 | tail = ++sde->descq_tail & sde->sdma_mask; | |
2320 | descp++; | |
2321 | if (mode > SDMA_AHG_APPLY_UPDATE1) | |
2322 | skip = mode >> 1; | |
2323 | for (i = 1; i < tx->num_desc; i++, descp++) { | |
2324 | u64 qw1; | |
2325 | ||
2326 | sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]); | |
2327 | if (skip) { | |
2328 | /* edits don't have generation */ | |
2329 | qw1 = descp->qw[1]; | |
2330 | skip--; | |
2331 | } else { | |
2332 | /* replace generation with real one for non-edits */ | |
2333 | qw1 = add_gen(sde, descp->qw[1]); | |
2334 | } | |
2335 | sde->descq[tail].qw[1] = cpu_to_le64(qw1); | |
2336 | trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1, | |
2337 | tail, &sde->descq[tail]); | |
2338 | tail = ++sde->descq_tail & sde->sdma_mask; | |
2339 | } | |
2340 | tx->next_descq_idx = tail; | |
2341 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER | |
2342 | tx->sn = sde->tail_sn++; | |
2343 | trace_hfi1_sdma_in_sn(sde, tx->sn); | |
2344 | WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]); | |
2345 | #endif | |
2346 | sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx; | |
2347 | sde->desc_avail -= tx->num_desc; | |
2348 | return tail; | |
2349 | } | |
2350 | ||
2351 | /* | |
2352 | * Check for progress | |
2353 | */ | |
2354 | static int sdma_check_progress( | |
2355 | struct sdma_engine *sde, | |
5da0fc9d | 2356 | struct iowait_work *wait, |
bcad2913 KW |
2357 | struct sdma_txreq *tx, |
2358 | bool pkts_sent) | |
77241056 MM |
2359 | { |
2360 | int ret; | |
2361 | ||
2362 | sde->desc_avail = sdma_descq_freecnt(sde); | |
2363 | if (tx->num_desc <= sde->desc_avail) | |
2364 | return -EAGAIN; | |
2365 | /* pulse the head_lock */ | |
5da0fc9d | 2366 | if (wait && iowait_ioww_to_iow(wait)->sleep) { |
77241056 MM |
2367 | unsigned seq; |
2368 | ||
2369 | seq = raw_seqcount_begin( | |
2370 | (const seqcount_t *)&sde->head_lock.seqcount); | |
5da0fc9d | 2371 | ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent); |
77241056 MM |
2372 | if (ret == -EAGAIN) |
2373 | sde->desc_avail = sdma_descq_freecnt(sde); | |
e490974e | 2374 | } else { |
77241056 | 2375 | ret = -EBUSY; |
e490974e | 2376 | } |
77241056 MM |
2377 | return ret; |
2378 | } | |
2379 | ||
2380 | /** | |
2381 | * sdma_send_txreq() - submit a tx req to ring | |
2382 | * @sde: sdma engine to use | |
5da0fc9d | 2383 | * @wait: SE wait structure to use when full (may be NULL) |
77241056 | 2384 | * @tx: sdma_txreq to submit |
bcad2913 | 2385 | * @pkts_sent: has any packet been sent yet? |
77241056 MM |
2386 | * |
2387 | * The call submits the tx into the ring. If a iowait structure is non-NULL | |
2388 | * the packet will be queued to the list in wait. | |
2389 | * | |
2390 | * Return: | |
2391 | * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in | |
2392 | * ring (wait == NULL) | |
2393 | * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state | |
2394 | */ | |
2395 | int sdma_send_txreq(struct sdma_engine *sde, | |
5da0fc9d | 2396 | struct iowait_work *wait, |
bcad2913 KW |
2397 | struct sdma_txreq *tx, |
2398 | bool pkts_sent) | |
77241056 MM |
2399 | { |
2400 | int ret = 0; | |
2401 | u16 tail; | |
2402 | unsigned long flags; | |
2403 | ||
2404 | /* user should have supplied entire packet */ | |
2405 | if (unlikely(tx->tlen)) | |
2406 | return -EINVAL; | |
5da0fc9d | 2407 | tx->wait = iowait_ioww_to_iow(wait); |
77241056 MM |
2408 | spin_lock_irqsave(&sde->tail_lock, flags); |
2409 | retry: | |
2410 | if (unlikely(!__sdma_running(sde))) | |
2411 | goto unlock_noconn; | |
2412 | if (unlikely(tx->num_desc > sde->desc_avail)) | |
2413 | goto nodesc; | |
2414 | tail = submit_tx(sde, tx); | |
2415 | if (wait) | |
5da0fc9d | 2416 | iowait_sdma_inc(iowait_ioww_to_iow(wait)); |
77241056 MM |
2417 | sdma_update_tail(sde, tail); |
2418 | unlock: | |
2419 | spin_unlock_irqrestore(&sde->tail_lock, flags); | |
2420 | return ret; | |
2421 | unlock_noconn: | |
2422 | if (wait) | |
5da0fc9d | 2423 | iowait_sdma_inc(iowait_ioww_to_iow(wait)); |
77241056 MM |
2424 | tx->next_descq_idx = 0; |
2425 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER | |
2426 | tx->sn = sde->tail_sn++; | |
2427 | trace_hfi1_sdma_in_sn(sde, tx->sn); | |
2428 | #endif | |
f4f30031 | 2429 | spin_lock(&sde->flushlist_lock); |
77241056 | 2430 | list_add_tail(&tx->list, &sde->flushlist); |
f4f30031 | 2431 | spin_unlock(&sde->flushlist_lock); |
5da0fc9d | 2432 | iowait_inc_wait_count(wait, tx->num_desc); |
cf131a81 | 2433 | queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); |
77241056 MM |
2434 | ret = -ECOMM; |
2435 | goto unlock; | |
2436 | nodesc: | |
bcad2913 | 2437 | ret = sdma_check_progress(sde, wait, tx, pkts_sent); |
77241056 MM |
2438 | if (ret == -EAGAIN) { |
2439 | ret = 0; | |
2440 | goto retry; | |
2441 | } | |
2442 | sde->descq_full_count++; | |
2443 | goto unlock; | |
2444 | } | |
2445 | ||
2446 | /** | |
2447 | * sdma_send_txlist() - submit a list of tx req to ring | |
2448 | * @sde: sdma engine to use | |
5da0fc9d | 2449 | * @wait: SE wait structure to use when full (may be NULL) |
77241056 | 2450 | * @tx_list: list of sdma_txreqs to submit |
3ca633f1 | 2451 | * @count: pointer to a u16 which, after return will contain the total number of |
0b115ef1 HC |
2452 | * sdma_txreqs removed from the tx_list. This will include sdma_txreqs |
2453 | * whose SDMA descriptors are submitted to the ring and the sdma_txreqs | |
2454 | * which are added to SDMA engine flush list if the SDMA engine state is | |
2455 | * not running. | |
77241056 MM |
2456 | * |
2457 | * The call submits the list into the ring. | |
2458 | * | |
2459 | * If the iowait structure is non-NULL and not equal to the iowait list | |
2460 | * the unprocessed part of the list will be appended to the list in wait. | |
2461 | * | |
2462 | * In all cases, the tx_list will be updated so the head of the tx_list is | |
2463 | * the list of descriptors that have yet to be transmitted. | |
2464 | * | |
2465 | * The intent of this call is to provide a more efficient | |
2466 | * way of submitting multiple packets to SDMA while holding the tail | |
2467 | * side locking. | |
2468 | * | |
2469 | * Return: | |
0b115ef1 | 2470 | * 0 - Success, |
c7cbf2fa | 2471 | * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL) |
77241056 MM |
2472 | * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state |
2473 | */ | |
5da0fc9d | 2474 | int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait, |
3ca633f1 | 2475 | struct list_head *tx_list, u16 *count_out) |
77241056 MM |
2476 | { |
2477 | struct sdma_txreq *tx, *tx_next; | |
2478 | int ret = 0; | |
2479 | unsigned long flags; | |
2480 | u16 tail = INVALID_TAIL; | |
0b115ef1 | 2481 | u32 submit_count = 0, flush_count = 0, total_count; |
77241056 MM |
2482 | |
2483 | spin_lock_irqsave(&sde->tail_lock, flags); | |
2484 | retry: | |
2485 | list_for_each_entry_safe(tx, tx_next, tx_list, list) { | |
5da0fc9d | 2486 | tx->wait = iowait_ioww_to_iow(wait); |
77241056 MM |
2487 | if (unlikely(!__sdma_running(sde))) |
2488 | goto unlock_noconn; | |
2489 | if (unlikely(tx->num_desc > sde->desc_avail)) | |
2490 | goto nodesc; | |
2491 | if (unlikely(tx->tlen)) { | |
2492 | ret = -EINVAL; | |
2493 | goto update_tail; | |
2494 | } | |
2495 | list_del_init(&tx->list); | |
2496 | tail = submit_tx(sde, tx); | |
0b115ef1 | 2497 | submit_count++; |
77241056 | 2498 | if (tail != INVALID_TAIL && |
0b115ef1 | 2499 | (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) { |
77241056 MM |
2500 | sdma_update_tail(sde, tail); |
2501 | tail = INVALID_TAIL; | |
2502 | } | |
2503 | } | |
2504 | update_tail: | |
0b115ef1 | 2505 | total_count = submit_count + flush_count; |
bcad2913 | 2506 | if (wait) { |
5da0fc9d DD |
2507 | iowait_sdma_add(iowait_ioww_to_iow(wait), total_count); |
2508 | iowait_starve_clear(submit_count > 0, | |
2509 | iowait_ioww_to_iow(wait)); | |
bcad2913 | 2510 | } |
77241056 MM |
2511 | if (tail != INVALID_TAIL) |
2512 | sdma_update_tail(sde, tail); | |
2513 | spin_unlock_irqrestore(&sde->tail_lock, flags); | |
0b115ef1 HC |
2514 | *count_out = total_count; |
2515 | return ret; | |
77241056 MM |
2516 | unlock_noconn: |
2517 | spin_lock(&sde->flushlist_lock); | |
2518 | list_for_each_entry_safe(tx, tx_next, tx_list, list) { | |
5da0fc9d | 2519 | tx->wait = iowait_ioww_to_iow(wait); |
77241056 | 2520 | list_del_init(&tx->list); |
77241056 MM |
2521 | tx->next_descq_idx = 0; |
2522 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER | |
2523 | tx->sn = sde->tail_sn++; | |
2524 | trace_hfi1_sdma_in_sn(sde, tx->sn); | |
2525 | #endif | |
2526 | list_add_tail(&tx->list, &sde->flushlist); | |
0b115ef1 | 2527 | flush_count++; |
5da0fc9d | 2528 | iowait_inc_wait_count(wait, tx->num_desc); |
77241056 MM |
2529 | } |
2530 | spin_unlock(&sde->flushlist_lock); | |
cf131a81 | 2531 | queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker); |
77241056 MM |
2532 | ret = -ECOMM; |
2533 | goto update_tail; | |
2534 | nodesc: | |
bcad2913 | 2535 | ret = sdma_check_progress(sde, wait, tx, submit_count > 0); |
77241056 MM |
2536 | if (ret == -EAGAIN) { |
2537 | ret = 0; | |
2538 | goto retry; | |
2539 | } | |
2540 | sde->descq_full_count++; | |
2541 | goto update_tail; | |
2542 | } | |
2543 | ||
17fb4f29 | 2544 | static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event) |
77241056 MM |
2545 | { |
2546 | unsigned long flags; | |
2547 | ||
2548 | spin_lock_irqsave(&sde->tail_lock, flags); | |
2549 | write_seqlock(&sde->head_lock); | |
2550 | ||
2551 | __sdma_process_event(sde, event); | |
2552 | ||
2553 | if (sde->state.current_state == sdma_state_s99_running) | |
2554 | sdma_desc_avail(sde, sdma_descq_freecnt(sde)); | |
2555 | ||
2556 | write_sequnlock(&sde->head_lock); | |
2557 | spin_unlock_irqrestore(&sde->tail_lock, flags); | |
2558 | } | |
2559 | ||
2560 | static void __sdma_process_event(struct sdma_engine *sde, | |
17fb4f29 | 2561 | enum sdma_events event) |
77241056 MM |
2562 | { |
2563 | struct sdma_state *ss = &sde->state; | |
2564 | int need_progress = 0; | |
2565 | ||
2566 | /* CONFIG SDMA temporary */ | |
2567 | #ifdef CONFIG_SDMA_VERBOSITY | |
2568 | dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx, | |
2569 | sdma_state_names[ss->current_state], | |
2570 | sdma_event_names[event]); | |
2571 | #endif | |
2572 | ||
2573 | switch (ss->current_state) { | |
2574 | case sdma_state_s00_hw_down: | |
2575 | switch (event) { | |
2576 | case sdma_event_e00_go_hw_down: | |
2577 | break; | |
2578 | case sdma_event_e30_go_running: | |
2579 | /* | |
2580 | * If down, but running requested (usually result | |
2581 | * of link up, then we need to start up. | |
2582 | * This can happen when hw down is requested while | |
2583 | * bringing the link up with traffic active on | |
4d114fdd JJ |
2584 | * 7220, e.g. |
2585 | */ | |
77241056 | 2586 | ss->go_s99_running = 1; |
6ffeb21f | 2587 | /* fall through -- and start dma engine */ |
77241056 MM |
2588 | case sdma_event_e10_go_hw_start: |
2589 | /* This reference means the state machine is started */ | |
2590 | sdma_get(&sde->state); | |
2591 | sdma_set_state(sde, | |
17fb4f29 | 2592 | sdma_state_s10_hw_start_up_halt_wait); |
77241056 MM |
2593 | break; |
2594 | case sdma_event_e15_hw_halt_done: | |
2595 | break; | |
2596 | case sdma_event_e25_hw_clean_up_done: | |
2597 | break; | |
2598 | case sdma_event_e40_sw_cleaned: | |
2599 | sdma_sw_tear_down(sde); | |
2600 | break; | |
2601 | case sdma_event_e50_hw_cleaned: | |
2602 | break; | |
2603 | case sdma_event_e60_hw_halted: | |
2604 | break; | |
2605 | case sdma_event_e70_go_idle: | |
2606 | break; | |
2607 | case sdma_event_e80_hw_freeze: | |
2608 | break; | |
2609 | case sdma_event_e81_hw_frozen: | |
2610 | break; | |
2611 | case sdma_event_e82_hw_unfreeze: | |
2612 | break; | |
2613 | case sdma_event_e85_link_down: | |
2614 | break; | |
2615 | case sdma_event_e90_sw_halted: | |
2616 | break; | |
2617 | } | |
2618 | break; | |
2619 | ||
2620 | case sdma_state_s10_hw_start_up_halt_wait: | |
2621 | switch (event) { | |
2622 | case sdma_event_e00_go_hw_down: | |
2623 | sdma_set_state(sde, sdma_state_s00_hw_down); | |
2624 | sdma_sw_tear_down(sde); | |
2625 | break; | |
2626 | case sdma_event_e10_go_hw_start: | |
2627 | break; | |
2628 | case sdma_event_e15_hw_halt_done: | |
2629 | sdma_set_state(sde, | |
17fb4f29 | 2630 | sdma_state_s15_hw_start_up_clean_wait); |
77241056 MM |
2631 | sdma_start_hw_clean_up(sde); |
2632 | break; | |
2633 | case sdma_event_e25_hw_clean_up_done: | |
2634 | break; | |
2635 | case sdma_event_e30_go_running: | |
2636 | ss->go_s99_running = 1; | |
2637 | break; | |
2638 | case sdma_event_e40_sw_cleaned: | |
2639 | break; | |
2640 | case sdma_event_e50_hw_cleaned: | |
2641 | break; | |
2642 | case sdma_event_e60_hw_halted: | |
8edf7502 | 2643 | schedule_work(&sde->err_halt_worker); |
77241056 MM |
2644 | break; |
2645 | case sdma_event_e70_go_idle: | |
2646 | ss->go_s99_running = 0; | |
2647 | break; | |
2648 | case sdma_event_e80_hw_freeze: | |
2649 | break; | |
2650 | case sdma_event_e81_hw_frozen: | |
2651 | break; | |
2652 | case sdma_event_e82_hw_unfreeze: | |
2653 | break; | |
2654 | case sdma_event_e85_link_down: | |
2655 | break; | |
2656 | case sdma_event_e90_sw_halted: | |
2657 | break; | |
2658 | } | |
2659 | break; | |
2660 | ||
2661 | case sdma_state_s15_hw_start_up_clean_wait: | |
2662 | switch (event) { | |
2663 | case sdma_event_e00_go_hw_down: | |
2664 | sdma_set_state(sde, sdma_state_s00_hw_down); | |
2665 | sdma_sw_tear_down(sde); | |
2666 | break; | |
2667 | case sdma_event_e10_go_hw_start: | |
2668 | break; | |
2669 | case sdma_event_e15_hw_halt_done: | |
2670 | break; | |
2671 | case sdma_event_e25_hw_clean_up_done: | |
2672 | sdma_hw_start_up(sde); | |
2673 | sdma_set_state(sde, ss->go_s99_running ? | |
2674 | sdma_state_s99_running : | |
2675 | sdma_state_s20_idle); | |
2676 | break; | |
2677 | case sdma_event_e30_go_running: | |
2678 | ss->go_s99_running = 1; | |
2679 | break; | |
2680 | case sdma_event_e40_sw_cleaned: | |
2681 | break; | |
2682 | case sdma_event_e50_hw_cleaned: | |
2683 | break; | |
2684 | case sdma_event_e60_hw_halted: | |
2685 | break; | |
2686 | case sdma_event_e70_go_idle: | |
2687 | ss->go_s99_running = 0; | |
2688 | break; | |
2689 | case sdma_event_e80_hw_freeze: | |
2690 | break; | |
2691 | case sdma_event_e81_hw_frozen: | |
2692 | break; | |
2693 | case sdma_event_e82_hw_unfreeze: | |
2694 | break; | |
2695 | case sdma_event_e85_link_down: | |
2696 | break; | |
2697 | case sdma_event_e90_sw_halted: | |
2698 | break; | |
2699 | } | |
2700 | break; | |
2701 | ||
2702 | case sdma_state_s20_idle: | |
2703 | switch (event) { | |
2704 | case sdma_event_e00_go_hw_down: | |
2705 | sdma_set_state(sde, sdma_state_s00_hw_down); | |
2706 | sdma_sw_tear_down(sde); | |
2707 | break; | |
2708 | case sdma_event_e10_go_hw_start: | |
2709 | break; | |
2710 | case sdma_event_e15_hw_halt_done: | |
2711 | break; | |
2712 | case sdma_event_e25_hw_clean_up_done: | |
2713 | break; | |
2714 | case sdma_event_e30_go_running: | |
2715 | sdma_set_state(sde, sdma_state_s99_running); | |
2716 | ss->go_s99_running = 1; | |
2717 | break; | |
2718 | case sdma_event_e40_sw_cleaned: | |
2719 | break; | |
2720 | case sdma_event_e50_hw_cleaned: | |
2721 | break; | |
2722 | case sdma_event_e60_hw_halted: | |
2723 | sdma_set_state(sde, sdma_state_s50_hw_halt_wait); | |
8edf7502 | 2724 | schedule_work(&sde->err_halt_worker); |
77241056 MM |
2725 | break; |
2726 | case sdma_event_e70_go_idle: | |
2727 | break; | |
2728 | case sdma_event_e85_link_down: | |
2729 | /* fall through */ | |
2730 | case sdma_event_e80_hw_freeze: | |
2731 | sdma_set_state(sde, sdma_state_s80_hw_freeze); | |
2732 | atomic_dec(&sde->dd->sdma_unfreeze_count); | |
2733 | wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); | |
2734 | break; | |
2735 | case sdma_event_e81_hw_frozen: | |
2736 | break; | |
2737 | case sdma_event_e82_hw_unfreeze: | |
2738 | break; | |
2739 | case sdma_event_e90_sw_halted: | |
2740 | break; | |
2741 | } | |
2742 | break; | |
2743 | ||
2744 | case sdma_state_s30_sw_clean_up_wait: | |
2745 | switch (event) { | |
2746 | case sdma_event_e00_go_hw_down: | |
2747 | sdma_set_state(sde, sdma_state_s00_hw_down); | |
2748 | break; | |
2749 | case sdma_event_e10_go_hw_start: | |
2750 | break; | |
2751 | case sdma_event_e15_hw_halt_done: | |
2752 | break; | |
2753 | case sdma_event_e25_hw_clean_up_done: | |
2754 | break; | |
2755 | case sdma_event_e30_go_running: | |
2756 | ss->go_s99_running = 1; | |
2757 | break; | |
2758 | case sdma_event_e40_sw_cleaned: | |
2759 | sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait); | |
2760 | sdma_start_hw_clean_up(sde); | |
2761 | break; | |
2762 | case sdma_event_e50_hw_cleaned: | |
2763 | break; | |
2764 | case sdma_event_e60_hw_halted: | |
2765 | break; | |
2766 | case sdma_event_e70_go_idle: | |
2767 | ss->go_s99_running = 0; | |
2768 | break; | |
2769 | case sdma_event_e80_hw_freeze: | |
2770 | break; | |
2771 | case sdma_event_e81_hw_frozen: | |
2772 | break; | |
2773 | case sdma_event_e82_hw_unfreeze: | |
2774 | break; | |
2775 | case sdma_event_e85_link_down: | |
2776 | ss->go_s99_running = 0; | |
2777 | break; | |
2778 | case sdma_event_e90_sw_halted: | |
2779 | break; | |
2780 | } | |
2781 | break; | |
2782 | ||
2783 | case sdma_state_s40_hw_clean_up_wait: | |
2784 | switch (event) { | |
2785 | case sdma_event_e00_go_hw_down: | |
2786 | sdma_set_state(sde, sdma_state_s00_hw_down); | |
8edf7502 | 2787 | tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); |
77241056 MM |
2788 | break; |
2789 | case sdma_event_e10_go_hw_start: | |
2790 | break; | |
2791 | case sdma_event_e15_hw_halt_done: | |
2792 | break; | |
2793 | case sdma_event_e25_hw_clean_up_done: | |
2794 | sdma_hw_start_up(sde); | |
2795 | sdma_set_state(sde, ss->go_s99_running ? | |
2796 | sdma_state_s99_running : | |
2797 | sdma_state_s20_idle); | |
2798 | break; | |
2799 | case sdma_event_e30_go_running: | |
2800 | ss->go_s99_running = 1; | |
2801 | break; | |
2802 | case sdma_event_e40_sw_cleaned: | |
2803 | break; | |
2804 | case sdma_event_e50_hw_cleaned: | |
2805 | break; | |
2806 | case sdma_event_e60_hw_halted: | |
2807 | break; | |
2808 | case sdma_event_e70_go_idle: | |
2809 | ss->go_s99_running = 0; | |
2810 | break; | |
2811 | case sdma_event_e80_hw_freeze: | |
2812 | break; | |
2813 | case sdma_event_e81_hw_frozen: | |
2814 | break; | |
2815 | case sdma_event_e82_hw_unfreeze: | |
2816 | break; | |
2817 | case sdma_event_e85_link_down: | |
2818 | ss->go_s99_running = 0; | |
2819 | break; | |
2820 | case sdma_event_e90_sw_halted: | |
2821 | break; | |
2822 | } | |
2823 | break; | |
2824 | ||
2825 | case sdma_state_s50_hw_halt_wait: | |
2826 | switch (event) { | |
2827 | case sdma_event_e00_go_hw_down: | |
2828 | sdma_set_state(sde, sdma_state_s00_hw_down); | |
8edf7502 | 2829 | tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); |
77241056 MM |
2830 | break; |
2831 | case sdma_event_e10_go_hw_start: | |
2832 | break; | |
2833 | case sdma_event_e15_hw_halt_done: | |
2834 | sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); | |
8edf7502 | 2835 | tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); |
77241056 MM |
2836 | break; |
2837 | case sdma_event_e25_hw_clean_up_done: | |
2838 | break; | |
2839 | case sdma_event_e30_go_running: | |
2840 | ss->go_s99_running = 1; | |
2841 | break; | |
2842 | case sdma_event_e40_sw_cleaned: | |
2843 | break; | |
2844 | case sdma_event_e50_hw_cleaned: | |
2845 | break; | |
2846 | case sdma_event_e60_hw_halted: | |
8edf7502 | 2847 | schedule_work(&sde->err_halt_worker); |
77241056 MM |
2848 | break; |
2849 | case sdma_event_e70_go_idle: | |
2850 | ss->go_s99_running = 0; | |
2851 | break; | |
2852 | case sdma_event_e80_hw_freeze: | |
2853 | break; | |
2854 | case sdma_event_e81_hw_frozen: | |
2855 | break; | |
2856 | case sdma_event_e82_hw_unfreeze: | |
2857 | break; | |
2858 | case sdma_event_e85_link_down: | |
2859 | ss->go_s99_running = 0; | |
2860 | break; | |
2861 | case sdma_event_e90_sw_halted: | |
2862 | break; | |
2863 | } | |
2864 | break; | |
2865 | ||
2866 | case sdma_state_s60_idle_halt_wait: | |
2867 | switch (event) { | |
2868 | case sdma_event_e00_go_hw_down: | |
2869 | sdma_set_state(sde, sdma_state_s00_hw_down); | |
8edf7502 | 2870 | tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); |
77241056 MM |
2871 | break; |
2872 | case sdma_event_e10_go_hw_start: | |
2873 | break; | |
2874 | case sdma_event_e15_hw_halt_done: | |
2875 | sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait); | |
8edf7502 | 2876 | tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); |
77241056 MM |
2877 | break; |
2878 | case sdma_event_e25_hw_clean_up_done: | |
2879 | break; | |
2880 | case sdma_event_e30_go_running: | |
2881 | ss->go_s99_running = 1; | |
2882 | break; | |
2883 | case sdma_event_e40_sw_cleaned: | |
2884 | break; | |
2885 | case sdma_event_e50_hw_cleaned: | |
2886 | break; | |
2887 | case sdma_event_e60_hw_halted: | |
8edf7502 | 2888 | schedule_work(&sde->err_halt_worker); |
77241056 MM |
2889 | break; |
2890 | case sdma_event_e70_go_idle: | |
2891 | ss->go_s99_running = 0; | |
2892 | break; | |
2893 | case sdma_event_e80_hw_freeze: | |
2894 | break; | |
2895 | case sdma_event_e81_hw_frozen: | |
2896 | break; | |
2897 | case sdma_event_e82_hw_unfreeze: | |
2898 | break; | |
2899 | case sdma_event_e85_link_down: | |
2900 | break; | |
2901 | case sdma_event_e90_sw_halted: | |
2902 | break; | |
2903 | } | |
2904 | break; | |
2905 | ||
2906 | case sdma_state_s80_hw_freeze: | |
2907 | switch (event) { | |
2908 | case sdma_event_e00_go_hw_down: | |
2909 | sdma_set_state(sde, sdma_state_s00_hw_down); | |
8edf7502 | 2910 | tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); |
77241056 MM |
2911 | break; |
2912 | case sdma_event_e10_go_hw_start: | |
2913 | break; | |
2914 | case sdma_event_e15_hw_halt_done: | |
2915 | break; | |
2916 | case sdma_event_e25_hw_clean_up_done: | |
2917 | break; | |
2918 | case sdma_event_e30_go_running: | |
2919 | ss->go_s99_running = 1; | |
2920 | break; | |
2921 | case sdma_event_e40_sw_cleaned: | |
2922 | break; | |
2923 | case sdma_event_e50_hw_cleaned: | |
2924 | break; | |
2925 | case sdma_event_e60_hw_halted: | |
2926 | break; | |
2927 | case sdma_event_e70_go_idle: | |
2928 | ss->go_s99_running = 0; | |
2929 | break; | |
2930 | case sdma_event_e80_hw_freeze: | |
2931 | break; | |
2932 | case sdma_event_e81_hw_frozen: | |
2933 | sdma_set_state(sde, sdma_state_s82_freeze_sw_clean); | |
8edf7502 | 2934 | tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); |
77241056 MM |
2935 | break; |
2936 | case sdma_event_e82_hw_unfreeze: | |
2937 | break; | |
2938 | case sdma_event_e85_link_down: | |
2939 | break; | |
2940 | case sdma_event_e90_sw_halted: | |
2941 | break; | |
2942 | } | |
2943 | break; | |
2944 | ||
2945 | case sdma_state_s82_freeze_sw_clean: | |
2946 | switch (event) { | |
2947 | case sdma_event_e00_go_hw_down: | |
2948 | sdma_set_state(sde, sdma_state_s00_hw_down); | |
8edf7502 | 2949 | tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); |
77241056 MM |
2950 | break; |
2951 | case sdma_event_e10_go_hw_start: | |
2952 | break; | |
2953 | case sdma_event_e15_hw_halt_done: | |
2954 | break; | |
2955 | case sdma_event_e25_hw_clean_up_done: | |
2956 | break; | |
2957 | case sdma_event_e30_go_running: | |
2958 | ss->go_s99_running = 1; | |
2959 | break; | |
2960 | case sdma_event_e40_sw_cleaned: | |
2961 | /* notify caller this engine is done cleaning */ | |
2962 | atomic_dec(&sde->dd->sdma_unfreeze_count); | |
2963 | wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); | |
2964 | break; | |
2965 | case sdma_event_e50_hw_cleaned: | |
2966 | break; | |
2967 | case sdma_event_e60_hw_halted: | |
2968 | break; | |
2969 | case sdma_event_e70_go_idle: | |
2970 | ss->go_s99_running = 0; | |
2971 | break; | |
2972 | case sdma_event_e80_hw_freeze: | |
2973 | break; | |
2974 | case sdma_event_e81_hw_frozen: | |
2975 | break; | |
2976 | case sdma_event_e82_hw_unfreeze: | |
2977 | sdma_hw_start_up(sde); | |
2978 | sdma_set_state(sde, ss->go_s99_running ? | |
2979 | sdma_state_s99_running : | |
2980 | sdma_state_s20_idle); | |
2981 | break; | |
2982 | case sdma_event_e85_link_down: | |
2983 | break; | |
2984 | case sdma_event_e90_sw_halted: | |
2985 | break; | |
2986 | } | |
2987 | break; | |
2988 | ||
2989 | case sdma_state_s99_running: | |
2990 | switch (event) { | |
2991 | case sdma_event_e00_go_hw_down: | |
2992 | sdma_set_state(sde, sdma_state_s00_hw_down); | |
8edf7502 | 2993 | tasklet_hi_schedule(&sde->sdma_sw_clean_up_task); |
77241056 MM |
2994 | break; |
2995 | case sdma_event_e10_go_hw_start: | |
2996 | break; | |
2997 | case sdma_event_e15_hw_halt_done: | |
2998 | break; | |
2999 | case sdma_event_e25_hw_clean_up_done: | |
3000 | break; | |
3001 | case sdma_event_e30_go_running: | |
3002 | break; | |
3003 | case sdma_event_e40_sw_cleaned: | |
3004 | break; | |
3005 | case sdma_event_e50_hw_cleaned: | |
3006 | break; | |
3007 | case sdma_event_e60_hw_halted: | |
3008 | need_progress = 1; | |
3009 | sdma_err_progress_check_schedule(sde); | |
6ffeb21f | 3010 | /* fall through */ |
77241056 MM |
3011 | case sdma_event_e90_sw_halted: |
3012 | /* | |
3013 | * SW initiated halt does not perform engines | |
3014 | * progress check | |
3015 | */ | |
3016 | sdma_set_state(sde, sdma_state_s50_hw_halt_wait); | |
8edf7502 | 3017 | schedule_work(&sde->err_halt_worker); |
77241056 MM |
3018 | break; |
3019 | case sdma_event_e70_go_idle: | |
3020 | sdma_set_state(sde, sdma_state_s60_idle_halt_wait); | |
3021 | break; | |
3022 | case sdma_event_e85_link_down: | |
3023 | ss->go_s99_running = 0; | |
3024 | /* fall through */ | |
3025 | case sdma_event_e80_hw_freeze: | |
3026 | sdma_set_state(sde, sdma_state_s80_hw_freeze); | |
3027 | atomic_dec(&sde->dd->sdma_unfreeze_count); | |
3028 | wake_up_interruptible(&sde->dd->sdma_unfreeze_wq); | |
3029 | break; | |
3030 | case sdma_event_e81_hw_frozen: | |
3031 | break; | |
3032 | case sdma_event_e82_hw_unfreeze: | |
3033 | break; | |
3034 | } | |
3035 | break; | |
3036 | } | |
3037 | ||
3038 | ss->last_event = event; | |
3039 | if (need_progress) | |
3040 | sdma_make_progress(sde, 0); | |
3041 | } | |
3042 | ||
3043 | /* | |
3044 | * _extend_sdma_tx_descs() - helper to extend txreq | |
3045 | * | |
3046 | * This is called once the initial nominal allocation | |
3047 | * of descriptors in the sdma_txreq is exhausted. | |
3048 | * | |
3049 | * The code will bump the allocation up to the max | |
f4d26d81 NV |
3050 | * of MAX_DESC (64) descriptors. There doesn't seem |
3051 | * much point in an interim step. The last descriptor | |
3052 | * is reserved for coalesce buffer in order to support | |
3053 | * cases where input packet has >MAX_DESC iovecs. | |
77241056 MM |
3054 | * |
3055 | */ | |
f4d26d81 | 3056 | static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) |
77241056 MM |
3057 | { |
3058 | int i; | |
3059 | ||
f4d26d81 NV |
3060 | /* Handle last descriptor */ |
3061 | if (unlikely((tx->num_desc == (MAX_DESC - 1)))) { | |
3062 | /* if tlen is 0, it is for padding, release last descriptor */ | |
3063 | if (!tx->tlen) { | |
3064 | tx->desc_limit = MAX_DESC; | |
3065 | } else if (!tx->coalesce_buf) { | |
3066 | /* allocate coalesce buffer with space for padding */ | |
3067 | tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32), | |
3068 | GFP_ATOMIC); | |
3069 | if (!tx->coalesce_buf) | |
a5a9e8cc | 3070 | goto enomem; |
f4d26d81 NV |
3071 | tx->coalesce_idx = 0; |
3072 | } | |
3073 | return 0; | |
3074 | } | |
3075 | ||
3076 | if (unlikely(tx->num_desc == MAX_DESC)) | |
a5a9e8cc | 3077 | goto enomem; |
f4d26d81 | 3078 | |
77241056 MM |
3079 | tx->descp = kmalloc_array( |
3080 | MAX_DESC, | |
3081 | sizeof(struct sdma_desc), | |
3082 | GFP_ATOMIC); | |
3083 | if (!tx->descp) | |
a5a9e8cc | 3084 | goto enomem; |
f4d26d81 NV |
3085 | |
3086 | /* reserve last descriptor for coalescing */ | |
3087 | tx->desc_limit = MAX_DESC - 1; | |
77241056 MM |
3088 | /* copy ones already built */ |
3089 | for (i = 0; i < tx->num_desc; i++) | |
3090 | tx->descp[i] = tx->descs[i]; | |
3091 | return 0; | |
a5a9e8cc | 3092 | enomem: |
63df8e09 | 3093 | __sdma_txclean(dd, tx); |
a5a9e8cc | 3094 | return -ENOMEM; |
77241056 MM |
3095 | } |
3096 | ||
f4d26d81 NV |
3097 | /* |
3098 | * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors | |
3099 | * | |
3100 | * This is called once the initial nominal allocation of descriptors | |
3101 | * in the sdma_txreq is exhausted. | |
3102 | * | |
3103 | * This function calls _extend_sdma_tx_descs to extend or allocate | |
3104 | * coalesce buffer. If there is a allocated coalesce buffer, it will | |
3105 | * copy the input packet data into the coalesce buffer. It also adds | |
16733b88 | 3106 | * coalesce buffer descriptor once when whole packet is received. |
f4d26d81 NV |
3107 | * |
3108 | * Return: | |
3109 | * <0 - error | |
3110 | * 0 - coalescing, don't populate descriptor | |
3111 | * 1 - continue with populating descriptor | |
3112 | */ | |
3113 | int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, | |
3114 | int type, void *kvaddr, struct page *page, | |
3115 | unsigned long offset, u16 len) | |
3116 | { | |
3117 | int pad_len, rval; | |
3118 | dma_addr_t addr; | |
3119 | ||
3120 | rval = _extend_sdma_tx_descs(dd, tx); | |
3121 | if (rval) { | |
63df8e09 | 3122 | __sdma_txclean(dd, tx); |
f4d26d81 NV |
3123 | return rval; |
3124 | } | |
3125 | ||
3126 | /* If coalesce buffer is allocated, copy data into it */ | |
3127 | if (tx->coalesce_buf) { | |
3128 | if (type == SDMA_MAP_NONE) { | |
63df8e09 | 3129 | __sdma_txclean(dd, tx); |
f4d26d81 NV |
3130 | return -EINVAL; |
3131 | } | |
3132 | ||
3133 | if (type == SDMA_MAP_PAGE) { | |
3134 | kvaddr = kmap(page); | |
3135 | kvaddr += offset; | |
3136 | } else if (WARN_ON(!kvaddr)) { | |
63df8e09 | 3137 | __sdma_txclean(dd, tx); |
f4d26d81 NV |
3138 | return -EINVAL; |
3139 | } | |
3140 | ||
3141 | memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len); | |
3142 | tx->coalesce_idx += len; | |
3143 | if (type == SDMA_MAP_PAGE) | |
3144 | kunmap(page); | |
3145 | ||
3146 | /* If there is more data, return */ | |
3147 | if (tx->tlen - tx->coalesce_idx) | |
3148 | return 0; | |
3149 | ||
3150 | /* Whole packet is received; add any padding */ | |
3151 | pad_len = tx->packet_len & (sizeof(u32) - 1); | |
3152 | if (pad_len) { | |
3153 | pad_len = sizeof(u32) - pad_len; | |
3154 | memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len); | |
3155 | /* padding is taken care of for coalescing case */ | |
3156 | tx->packet_len += pad_len; | |
3157 | tx->tlen += pad_len; | |
3158 | } | |
3159 | ||
3160 | /* dma map the coalesce buffer */ | |
3161 | addr = dma_map_single(&dd->pcidev->dev, | |
3162 | tx->coalesce_buf, | |
3163 | tx->tlen, | |
3164 | DMA_TO_DEVICE); | |
3165 | ||
3166 | if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { | |
63df8e09 | 3167 | __sdma_txclean(dd, tx); |
f4d26d81 NV |
3168 | return -ENOSPC; |
3169 | } | |
3170 | ||
3171 | /* Add descriptor for coalesce buffer */ | |
3172 | tx->desc_limit = MAX_DESC; | |
3173 | return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, | |
3174 | addr, tx->tlen); | |
3175 | } | |
3176 | ||
3177 | return 1; | |
3178 | } | |
3179 | ||
77241056 MM |
3180 | /* Update sdes when the lmc changes */ |
3181 | void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid) | |
3182 | { | |
3183 | struct sdma_engine *sde; | |
3184 | int i; | |
3185 | u64 sreg; | |
3186 | ||
3187 | sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) << | |
3188 | SD(CHECK_SLID_MASK_SHIFT)) | | |
3189 | (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) << | |
3190 | SD(CHECK_SLID_VALUE_SHIFT)); | |
3191 | ||
3192 | for (i = 0; i < dd->num_sdma; i++) { | |
3193 | hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x", | |
3194 | i, (u32)sreg); | |
3195 | sde = &dd->per_sdma[i]; | |
3196 | write_sde_csr(sde, SD(CHECK_SLID), sreg); | |
3197 | } | |
3198 | } | |
3199 | ||
3200 | /* tx not dword sized - pad */ | |
3201 | int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) | |
3202 | { | |
3203 | int rval = 0; | |
3204 | ||
f4d26d81 | 3205 | tx->num_desc++; |
77241056 MM |
3206 | if ((unlikely(tx->num_desc == tx->desc_limit))) { |
3207 | rval = _extend_sdma_tx_descs(dd, tx); | |
f4d26d81 | 3208 | if (rval) { |
63df8e09 | 3209 | __sdma_txclean(dd, tx); |
77241056 | 3210 | return rval; |
f4d26d81 | 3211 | } |
77241056 | 3212 | } |
f4d26d81 | 3213 | /* finish the one just added */ |
77241056 MM |
3214 | make_tx_sdma_desc( |
3215 | tx, | |
3216 | SDMA_MAP_NONE, | |
3217 | dd->sdma_pad_phys, | |
3218 | sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1))); | |
3219 | _sdma_close_tx(dd, tx); | |
3220 | return rval; | |
3221 | } | |
3222 | ||
3223 | /* | |
3224 | * Add ahg to the sdma_txreq | |
3225 | * | |
3226 | * The logic will consume up to 3 | |
3227 | * descriptors at the beginning of | |
3228 | * sdma_txreq. | |
3229 | */ | |
3230 | void _sdma_txreq_ahgadd( | |
3231 | struct sdma_txreq *tx, | |
3232 | u8 num_ahg, | |
3233 | u8 ahg_entry, | |
3234 | u32 *ahg, | |
3235 | u8 ahg_hlen) | |
3236 | { | |
3237 | u32 i, shift = 0, desc = 0; | |
3238 | u8 mode; | |
3239 | ||
3240 | WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4); | |
3241 | /* compute mode */ | |
3242 | if (num_ahg == 1) | |
3243 | mode = SDMA_AHG_APPLY_UPDATE1; | |
3244 | else if (num_ahg <= 5) | |
3245 | mode = SDMA_AHG_APPLY_UPDATE2; | |
3246 | else | |
3247 | mode = SDMA_AHG_APPLY_UPDATE3; | |
3248 | tx->num_desc++; | |
3249 | /* initialize to consumed descriptors to zero */ | |
3250 | switch (mode) { | |
3251 | case SDMA_AHG_APPLY_UPDATE3: | |
3252 | tx->num_desc++; | |
3253 | tx->descs[2].qw[0] = 0; | |
3254 | tx->descs[2].qw[1] = 0; | |
3255 | /* FALLTHROUGH */ | |
3256 | case SDMA_AHG_APPLY_UPDATE2: | |
3257 | tx->num_desc++; | |
3258 | tx->descs[1].qw[0] = 0; | |
3259 | tx->descs[1].qw[1] = 0; | |
3260 | break; | |
3261 | } | |
3262 | ahg_hlen >>= 2; | |
3263 | tx->descs[0].qw[1] |= | |
3264 | (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK) | |
3265 | << SDMA_DESC1_HEADER_INDEX_SHIFT) | | |
3266 | (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK) | |
3267 | << SDMA_DESC1_HEADER_DWS_SHIFT) | | |
3268 | (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK) | |
3269 | << SDMA_DESC1_HEADER_MODE_SHIFT) | | |
3270 | (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK) | |
3271 | << SDMA_DESC1_HEADER_UPDATE1_SHIFT); | |
3272 | for (i = 0; i < (num_ahg - 1); i++) { | |
3273 | if (!shift && !(i & 2)) | |
3274 | desc++; | |
3275 | tx->descs[desc].qw[!!(i & 2)] |= | |
3276 | (((u64)ahg[i + 1]) | |
3277 | << shift); | |
3278 | shift = (shift + 32) & 63; | |
3279 | } | |
3280 | } | |
3281 | ||
3282 | /** | |
3283 | * sdma_ahg_alloc - allocate an AHG entry | |
3284 | * @sde: engine to allocate from | |
3285 | * | |
3286 | * Return: | |
3287 | * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled, | |
3288 | * -ENOSPC if an entry is not available | |
3289 | */ | |
3290 | int sdma_ahg_alloc(struct sdma_engine *sde) | |
3291 | { | |
3292 | int nr; | |
3293 | int oldbit; | |
3294 | ||
3295 | if (!sde) { | |
3296 | trace_hfi1_ahg_allocate(sde, -EINVAL); | |
3297 | return -EINVAL; | |
3298 | } | |
3299 | while (1) { | |
6aa7de05 | 3300 | nr = ffz(READ_ONCE(sde->ahg_bits)); |
77241056 MM |
3301 | if (nr > 31) { |
3302 | trace_hfi1_ahg_allocate(sde, -ENOSPC); | |
3303 | return -ENOSPC; | |
3304 | } | |
3305 | oldbit = test_and_set_bit(nr, &sde->ahg_bits); | |
3306 | if (!oldbit) | |
3307 | break; | |
3308 | cpu_relax(); | |
3309 | } | |
3310 | trace_hfi1_ahg_allocate(sde, nr); | |
3311 | return nr; | |
3312 | } | |
3313 | ||
3314 | /** | |
3315 | * sdma_ahg_free - free an AHG entry | |
3316 | * @sde: engine to return AHG entry | |
3317 | * @ahg_index: index to free | |
3318 | * | |
3319 | * This routine frees the indicate AHG entry. | |
3320 | */ | |
3321 | void sdma_ahg_free(struct sdma_engine *sde, int ahg_index) | |
3322 | { | |
3323 | if (!sde) | |
3324 | return; | |
3325 | trace_hfi1_ahg_deallocate(sde, ahg_index); | |
3326 | if (ahg_index < 0 || ahg_index > 31) | |
3327 | return; | |
3328 | clear_bit(ahg_index, &sde->ahg_bits); | |
3329 | } | |
3330 | ||
3331 | /* | |
3332 | * SPC freeze handling for SDMA engines. Called when the driver knows | |
3333 | * the SPC is going into a freeze but before the freeze is fully | |
3334 | * settled. Generally an error interrupt. | |
3335 | * | |
3336 | * This event will pull the engine out of running so no more entries can be | |
3337 | * added to the engine's queue. | |
3338 | */ | |
3339 | void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down) | |
3340 | { | |
3341 | int i; | |
3342 | enum sdma_events event = link_down ? sdma_event_e85_link_down : | |
3343 | sdma_event_e80_hw_freeze; | |
3344 | ||
3345 | /* set up the wait but do not wait here */ | |
3346 | atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); | |
3347 | ||
3348 | /* tell all engines to stop running and wait */ | |
3349 | for (i = 0; i < dd->num_sdma; i++) | |
3350 | sdma_process_event(&dd->per_sdma[i], event); | |
3351 | ||
3352 | /* sdma_freeze() will wait for all engines to have stopped */ | |
3353 | } | |
3354 | ||
3355 | /* | |
3356 | * SPC freeze handling for SDMA engines. Called when the driver knows | |
3357 | * the SPC is fully frozen. | |
3358 | */ | |
3359 | void sdma_freeze(struct hfi1_devdata *dd) | |
3360 | { | |
3361 | int i; | |
3362 | int ret; | |
3363 | ||
3364 | /* | |
3365 | * Make sure all engines have moved out of the running state before | |
3366 | * continuing. | |
3367 | */ | |
3368 | ret = wait_event_interruptible(dd->sdma_unfreeze_wq, | |
17fb4f29 JJ |
3369 | atomic_read(&dd->sdma_unfreeze_count) <= |
3370 | 0); | |
77241056 MM |
3371 | /* interrupted or count is negative, then unloading - just exit */ |
3372 | if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0) | |
3373 | return; | |
3374 | ||
3375 | /* set up the count for the next wait */ | |
3376 | atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma); | |
3377 | ||
3378 | /* tell all engines that the SPC is frozen, they can start cleaning */ | |
3379 | for (i = 0; i < dd->num_sdma; i++) | |
3380 | sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen); | |
3381 | ||
3382 | /* | |
3383 | * Wait for everyone to finish software clean before exiting. The | |
3384 | * software clean will read engine CSRs, so must be completed before | |
3385 | * the next step, which will clear the engine CSRs. | |
3386 | */ | |
50e5dcbe | 3387 | (void)wait_event_interruptible(dd->sdma_unfreeze_wq, |
77241056 MM |
3388 | atomic_read(&dd->sdma_unfreeze_count) <= 0); |
3389 | /* no need to check results - done no matter what */ | |
3390 | } | |
3391 | ||
3392 | /* | |
3393 | * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen. | |
3394 | * | |
3395 | * The SPC freeze acts like a SDMA halt and a hardware clean combined. All | |
3396 | * that is left is a software clean. We could do it after the SPC is fully | |
3397 | * frozen, but then we'd have to add another state to wait for the unfreeze. | |
3398 | * Instead, just defer the software clean until the unfreeze step. | |
3399 | */ | |
3400 | void sdma_unfreeze(struct hfi1_devdata *dd) | |
3401 | { | |
3402 | int i; | |
3403 | ||
3404 | /* tell all engines start freeze clean up */ | |
3405 | for (i = 0; i < dd->num_sdma; i++) | |
3406 | sdma_process_event(&dd->per_sdma[i], | |
17fb4f29 | 3407 | sdma_event_e82_hw_unfreeze); |
77241056 MM |
3408 | } |
3409 | ||
3410 | /** | |
3411 | * _sdma_engine_progress_schedule() - schedule progress on engine | |
3412 | * @sde: sdma_engine to schedule progress | |
3413 | * | |
3414 | */ | |
3415 | void _sdma_engine_progress_schedule( | |
3416 | struct sdma_engine *sde) | |
3417 | { | |
3418 | trace_hfi1_sdma_engine_progress(sde, sde->progress_mask); | |
3419 | /* assume we have selected a good cpu */ | |
3420 | write_csr(sde->dd, | |
17fb4f29 JJ |
3421 | CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)), |
3422 | sde->progress_mask); | |
77241056 | 3423 | } |