IB/hfi1: Re-order IRQ cleanup to address driver cleanup race
[linux-2.6-block.git] / drivers / infiniband / hw / hfi1 / sdma.c
CommitLineData
77241056 1/*
05d6ac1d 2 * Copyright(c) 2015, 2016 Intel Corporation.
77241056
MM
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
77241056
MM
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
77241056
MM
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47
48#include <linux/spinlock.h>
49#include <linux/seqlock.h>
50#include <linux/netdevice.h>
51#include <linux/moduleparam.h>
52#include <linux/bitops.h>
53#include <linux/timer.h>
54#include <linux/vmalloc.h>
f4d26d81 55#include <linux/highmem.h>
77241056
MM
56
57#include "hfi.h"
58#include "common.h"
59#include "qp.h"
60#include "sdma.h"
61#include "iowait.h"
62#include "trace.h"
63
64/* must be a power of 2 >= 64 <= 32768 */
028d7254 65#define SDMA_DESCQ_CNT 2048
ee947859 66#define SDMA_DESC_INTR 64
77241056
MM
67#define INVALID_TAIL 0xffff
68
69static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
70module_param(sdma_descq_cnt, uint, S_IRUGO);
71MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
72
73static uint sdma_idle_cnt = 250;
74module_param(sdma_idle_cnt, uint, S_IRUGO);
75MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
76
77uint mod_num_sdma;
78module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
79MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
80
ee947859
MH
81static uint sdma_desct_intr = SDMA_DESC_INTR;
82module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
83MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
84
77241056
MM
85#define SDMA_WAIT_BATCH_SIZE 20
86/* max wait time for a SDMA engine to indicate it has halted */
87#define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
88/* all SDMA engine errors that cause a halt */
89
90#define SD(name) SEND_DMA_##name
91#define ALL_SDMA_ENG_HALT_ERRS \
92 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
93 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
94 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
95 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
107 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
108 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
109 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
110
111/* sdma_sendctrl operations */
349ac71f 112#define SDMA_SENDCTRL_OP_ENABLE BIT(0)
113#define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
114#define SDMA_SENDCTRL_OP_HALT BIT(2)
115#define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
77241056
MM
116
117/* handle long defines */
118#define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
119SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
120#define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
121SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
122
123static const char * const sdma_state_names[] = {
124 [sdma_state_s00_hw_down] = "s00_HwDown",
125 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
126 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
127 [sdma_state_s20_idle] = "s20_Idle",
128 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
129 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
130 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
131 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
132 [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
133 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
134 [sdma_state_s99_running] = "s99_Running",
135};
136
eac71936 137#ifdef CONFIG_SDMA_VERBOSITY
77241056
MM
138static const char * const sdma_event_names[] = {
139 [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
140 [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
141 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
142 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
143 [sdma_event_e30_go_running] = "e30_GoRunning",
144 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
145 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
146 [sdma_event_e60_hw_halted] = "e60_HwHalted",
147 [sdma_event_e70_go_idle] = "e70_GoIdle",
148 [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
149 [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
150 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
151 [sdma_event_e85_link_down] = "e85_LinkDown",
152 [sdma_event_e90_sw_halted] = "e90_SwHalted",
153};
eac71936 154#endif
77241056
MM
155
156static const struct sdma_set_state_action sdma_action_table[] = {
157 [sdma_state_s00_hw_down] = {
158 .go_s99_running_tofalse = 1,
159 .op_enable = 0,
160 .op_intenable = 0,
161 .op_halt = 0,
162 .op_cleanup = 0,
163 },
164 [sdma_state_s10_hw_start_up_halt_wait] = {
165 .op_enable = 0,
166 .op_intenable = 0,
167 .op_halt = 1,
168 .op_cleanup = 0,
169 },
170 [sdma_state_s15_hw_start_up_clean_wait] = {
171 .op_enable = 0,
172 .op_intenable = 1,
173 .op_halt = 0,
174 .op_cleanup = 1,
175 },
176 [sdma_state_s20_idle] = {
177 .op_enable = 0,
178 .op_intenable = 1,
179 .op_halt = 0,
180 .op_cleanup = 0,
181 },
182 [sdma_state_s30_sw_clean_up_wait] = {
183 .op_enable = 0,
184 .op_intenable = 0,
185 .op_halt = 0,
186 .op_cleanup = 0,
187 },
188 [sdma_state_s40_hw_clean_up_wait] = {
189 .op_enable = 0,
190 .op_intenable = 0,
191 .op_halt = 0,
192 .op_cleanup = 1,
193 },
194 [sdma_state_s50_hw_halt_wait] = {
195 .op_enable = 0,
196 .op_intenable = 0,
197 .op_halt = 0,
198 .op_cleanup = 0,
199 },
200 [sdma_state_s60_idle_halt_wait] = {
201 .go_s99_running_tofalse = 1,
202 .op_enable = 0,
203 .op_intenable = 0,
204 .op_halt = 1,
205 .op_cleanup = 0,
206 },
207 [sdma_state_s80_hw_freeze] = {
208 .op_enable = 0,
209 .op_intenable = 0,
210 .op_halt = 0,
211 .op_cleanup = 0,
212 },
213 [sdma_state_s82_freeze_sw_clean] = {
214 .op_enable = 0,
215 .op_intenable = 0,
216 .op_halt = 0,
217 .op_cleanup = 0,
218 },
219 [sdma_state_s99_running] = {
220 .op_enable = 1,
221 .op_intenable = 1,
222 .op_halt = 0,
223 .op_cleanup = 0,
224 .go_s99_running_totrue = 1,
225 },
226};
227
228#define SDMA_TAIL_UPDATE_THRESH 0x1F
229
230/* declare all statics here rather than keep sorting */
231static void sdma_complete(struct kref *);
232static void sdma_finalput(struct sdma_state *);
233static void sdma_get(struct sdma_state *);
234static void sdma_hw_clean_up_task(unsigned long);
235static void sdma_put(struct sdma_state *);
236static void sdma_set_state(struct sdma_engine *, enum sdma_states);
237static void sdma_start_hw_clean_up(struct sdma_engine *);
77241056
MM
238static void sdma_sw_clean_up_task(unsigned long);
239static void sdma_sendctrl(struct sdma_engine *, unsigned);
240static void init_sdma_regs(struct sdma_engine *, u32, uint);
241static void sdma_process_event(
242 struct sdma_engine *sde,
243 enum sdma_events event);
244static void __sdma_process_event(
245 struct sdma_engine *sde,
246 enum sdma_events event);
247static void dump_sdma_state(struct sdma_engine *sde);
248static void sdma_make_progress(struct sdma_engine *sde, u64 status);
bcad2913 249static void sdma_desc_avail(struct sdma_engine *sde, uint avail);
77241056
MM
250static void sdma_flush_descq(struct sdma_engine *sde);
251
252/**
253 * sdma_state_name() - return state string from enum
254 * @state: state
255 */
256static const char *sdma_state_name(enum sdma_states state)
257{
258 return sdma_state_names[state];
259}
260
261static void sdma_get(struct sdma_state *ss)
262{
263 kref_get(&ss->kref);
264}
265
266static void sdma_complete(struct kref *kref)
267{
268 struct sdma_state *ss =
269 container_of(kref, struct sdma_state, kref);
270
271 complete(&ss->comp);
272}
273
274static void sdma_put(struct sdma_state *ss)
275{
276 kref_put(&ss->kref, sdma_complete);
277}
278
279static void sdma_finalput(struct sdma_state *ss)
280{
281 sdma_put(ss);
282 wait_for_completion(&ss->comp);
283}
284
285static inline void write_sde_csr(
286 struct sdma_engine *sde,
287 u32 offset0,
288 u64 value)
289{
290 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
291}
292
293static inline u64 read_sde_csr(
294 struct sdma_engine *sde,
295 u32 offset0)
296{
297 return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
298}
299
300/*
301 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
302 * sdma engine 'sde' to drop to 0.
303 */
304static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
305 int pause)
306{
307 u64 off = 8 * sde->this_idx;
308 struct hfi1_devdata *dd = sde->dd;
309 int lcnt = 0;
25d97dd5
VM
310 u64 reg_prev;
311 u64 reg = 0;
77241056
MM
312
313 while (1) {
25d97dd5
VM
314 reg_prev = reg;
315 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
77241056
MM
316
317 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
318 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
319 if (reg == 0)
320 break;
25d97dd5
VM
321 /* counter is reest if accupancy count changes */
322 if (reg != reg_prev)
323 lcnt = 0;
324 if (lcnt++ > 500) {
325 /* timed out - bounce the link */
326 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
17fb4f29 327 __func__, sde->this_idx, (u32)reg);
71d47008 328 queue_work(dd->pport->link_wq,
17fb4f29 329 &dd->pport->link_bounce_work);
77241056
MM
330 break;
331 }
332 udelay(1);
333 }
334}
335
336/*
337 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
338 * and pause for credit return.
339 */
340void sdma_wait(struct hfi1_devdata *dd)
341{
342 int i;
343
344 for (i = 0; i < dd->num_sdma; i++) {
345 struct sdma_engine *sde = &dd->per_sdma[i];
346
347 sdma_wait_for_packet_egress(sde, 0);
348 }
349}
350
351static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
352{
353 u64 reg;
354
355 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
356 return;
357 reg = cnt;
358 reg &= SD(DESC_CNT_CNT_MASK);
359 reg <<= SD(DESC_CNT_CNT_SHIFT);
360 write_sde_csr(sde, SD(DESC_CNT), reg);
361}
362
a545f530
MM
363static inline void complete_tx(struct sdma_engine *sde,
364 struct sdma_txreq *tx,
365 int res)
366{
367 /* protect against complete modifying */
368 struct iowait *wait = tx->wait;
369 callback_t complete = tx->complete;
370
371#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
6b5c5213
MM
372 trace_hfi1_sdma_out_sn(sde, tx->sn);
373 if (WARN_ON_ONCE(sde->head_sn != tx->sn))
a545f530 374 dd_dev_err(sde->dd, "expected %llu got %llu\n",
6b5c5213 375 sde->head_sn, tx->sn);
a545f530
MM
376 sde->head_sn++;
377#endif
63df8e09 378 __sdma_txclean(sde->dd, tx);
a545f530
MM
379 if (complete)
380 (*complete)(tx, res);
b96b0404 381 if (wait && iowait_sdma_dec(wait))
a545f530
MM
382 iowait_drain_wakeup(wait);
383}
384
77241056
MM
385/*
386 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
387 *
388 * Depending on timing there can be txreqs in two places:
389 * - in the descq ring
390 * - in the flush list
391 *
392 * To avoid ordering issues the descq ring needs to be flushed
393 * first followed by the flush list.
394 *
395 * This routine is called from two places
396 * - From a work queue item
397 * - Directly from the state machine just before setting the
398 * state to running
399 *
400 * Must be called with head_lock held
401 *
402 */
403static void sdma_flush(struct sdma_engine *sde)
404{
405 struct sdma_txreq *txp, *txp_next;
406 LIST_HEAD(flushlist);
b77d713a 407 unsigned long flags;
77241056
MM
408
409 /* flush from head to tail */
410 sdma_flush_descq(sde);
b77d713a 411 spin_lock_irqsave(&sde->flushlist_lock, flags);
77241056
MM
412 /* copy flush list */
413 list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
414 list_del_init(&txp->list);
415 list_add_tail(&txp->list, &flushlist);
416 }
b77d713a 417 spin_unlock_irqrestore(&sde->flushlist_lock, flags);
77241056 418 /* flush from flush list */
a545f530
MM
419 list_for_each_entry_safe(txp, txp_next, &flushlist, list)
420 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
77241056
MM
421}
422
423/*
424 * Fields a work request for flushing the descq ring
425 * and the flush list
426 *
427 * If the engine has been brought to running during
428 * the scheduling delay, the flush is ignored, assuming
429 * that the process of bringing the engine to running
430 * would have done this flush prior to going to running.
431 *
432 */
433static void sdma_field_flush(struct work_struct *work)
434{
435 unsigned long flags;
436 struct sdma_engine *sde =
437 container_of(work, struct sdma_engine, flush_worker);
438
439 write_seqlock_irqsave(&sde->head_lock, flags);
440 if (!__sdma_running(sde))
441 sdma_flush(sde);
442 write_sequnlock_irqrestore(&sde->head_lock, flags);
443}
444
445static void sdma_err_halt_wait(struct work_struct *work)
446{
447 struct sdma_engine *sde = container_of(work, struct sdma_engine,
448 err_halt_worker);
449 u64 statuscsr;
450 unsigned long timeout;
451
452 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
453 while (1) {
454 statuscsr = read_sde_csr(sde, SD(STATUS));
455 statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
456 if (statuscsr)
457 break;
458 if (time_after(jiffies, timeout)) {
459 dd_dev_err(sde->dd,
17fb4f29
JJ
460 "SDMA engine %d - timeout waiting for engine to halt\n",
461 sde->this_idx);
77241056
MM
462 /*
463 * Continue anyway. This could happen if there was
464 * an uncorrectable error in the wrong spot.
465 */
466 break;
467 }
468 usleep_range(80, 120);
469 }
470
471 sdma_process_event(sde, sdma_event_e15_hw_halt_done);
472}
473
77241056
MM
474static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
475{
476 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
77241056
MM
477 unsigned index;
478 struct hfi1_devdata *dd = sde->dd;
479
480 for (index = 0; index < dd->num_sdma; index++) {
481 struct sdma_engine *curr_sdma = &dd->per_sdma[index];
482
483 if (curr_sdma != sde)
484 curr_sdma->progress_check_head =
485 curr_sdma->descq_head;
486 }
487 dd_dev_err(sde->dd,
488 "SDMA engine %d - check scheduled\n",
489 sde->this_idx);
490 mod_timer(&sde->err_progress_check_timer, jiffies + 10);
491 }
492}
493
8064135e 494static void sdma_err_progress_check(struct timer_list *t)
77241056
MM
495{
496 unsigned index;
8064135e 497 struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer);
77241056
MM
498
499 dd_dev_err(sde->dd, "SDE progress check event\n");
500 for (index = 0; index < sde->dd->num_sdma; index++) {
501 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
502 unsigned long flags;
503
504 /* check progress on each engine except the current one */
505 if (curr_sde == sde)
506 continue;
507 /*
508 * We must lock interrupts when acquiring sde->lock,
509 * to avoid a deadlock if interrupt triggers and spins on
510 * the same lock on same CPU
511 */
512 spin_lock_irqsave(&curr_sde->tail_lock, flags);
513 write_seqlock(&curr_sde->head_lock);
514
515 /* skip non-running queues */
516 if (curr_sde->state.current_state != sdma_state_s99_running) {
517 write_sequnlock(&curr_sde->head_lock);
518 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
519 continue;
520 }
521
522 if ((curr_sde->descq_head != curr_sde->descq_tail) &&
523 (curr_sde->descq_head ==
524 curr_sde->progress_check_head))
525 __sdma_process_event(curr_sde,
526 sdma_event_e90_sw_halted);
527 write_sequnlock(&curr_sde->head_lock);
528 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
529 }
530 schedule_work(&sde->err_halt_worker);
531}
532
533static void sdma_hw_clean_up_task(unsigned long opaque)
534{
50e5dcbe 535 struct sdma_engine *sde = (struct sdma_engine *)opaque;
77241056
MM
536 u64 statuscsr;
537
538 while (1) {
539#ifdef CONFIG_SDMA_VERBOSITY
540 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
541 sde->this_idx, slashstrip(__FILE__), __LINE__,
542 __func__);
543#endif
544 statuscsr = read_sde_csr(sde, SD(STATUS));
545 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
546 if (statuscsr)
547 break;
548 udelay(10);
549 }
550
551 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
552}
553
554static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
555{
556 smp_read_barrier_depends(); /* see sdma_update_tail() */
557 return sde->tx_ring[sde->tx_head & sde->sdma_mask];
558}
559
560/*
561 * flush ring for recovery
562 */
563static void sdma_flush_descq(struct sdma_engine *sde)
564{
565 u16 head, tail;
566 int progress = 0;
567 struct sdma_txreq *txp = get_txhead(sde);
568
569 /* The reason for some of the complexity of this code is that
570 * not all descriptors have corresponding txps. So, we have to
571 * be able to skip over descs until we wander into the range of
572 * the next txp on the list.
573 */
574 head = sde->descq_head & sde->sdma_mask;
575 tail = sde->descq_tail & sde->sdma_mask;
576 while (head != tail) {
577 /* advance head, wrap if needed */
578 head = ++sde->descq_head & sde->sdma_mask;
579 /* if now past this txp's descs, do the callback */
580 if (txp && txp->next_descq_idx == head) {
77241056
MM
581 /* remove from list */
582 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
a545f530 583 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
77241056 584 trace_hfi1_sdma_progress(sde, head, tail, txp);
77241056
MM
585 txp = get_txhead(sde);
586 }
587 progress++;
588 }
589 if (progress)
590 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
591}
592
593static void sdma_sw_clean_up_task(unsigned long opaque)
594{
50e5dcbe 595 struct sdma_engine *sde = (struct sdma_engine *)opaque;
77241056
MM
596 unsigned long flags;
597
598 spin_lock_irqsave(&sde->tail_lock, flags);
599 write_seqlock(&sde->head_lock);
600
601 /*
602 * At this point, the following should always be true:
603 * - We are halted, so no more descriptors are getting retired.
604 * - We are not running, so no one is submitting new work.
605 * - Only we can send the e40_sw_cleaned, so we can't start
606 * running again until we say so. So, the active list and
607 * descq are ours to play with.
608 */
609
77241056
MM
610 /*
611 * In the error clean up sequence, software clean must be called
612 * before the hardware clean so we can use the hardware head in
613 * the progress routine. A hardware clean or SPC unfreeze will
614 * reset the hardware head.
615 *
616 * Process all retired requests. The progress routine will use the
617 * latest physical hardware head - we are not running so speed does
618 * not matter.
619 */
620 sdma_make_progress(sde, 0);
621
622 sdma_flush(sde);
623
624 /*
625 * Reset our notion of head and tail.
626 * Note that the HW registers have been reset via an earlier
627 * clean up.
628 */
629 sde->descq_tail = 0;
630 sde->descq_head = 0;
631 sde->desc_avail = sdma_descq_freecnt(sde);
632 *sde->head_dma = 0;
633
634 __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
635
636 write_sequnlock(&sde->head_lock);
637 spin_unlock_irqrestore(&sde->tail_lock, flags);
638}
639
640static void sdma_sw_tear_down(struct sdma_engine *sde)
641{
642 struct sdma_state *ss = &sde->state;
643
644 /* Releasing this reference means the state machine has stopped. */
645 sdma_put(ss);
646
647 /* stop waiting for all unfreeze events to complete */
648 atomic_set(&sde->dd->sdma_unfreeze_count, -1);
649 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
650}
651
652static void sdma_start_hw_clean_up(struct sdma_engine *sde)
653{
654 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
655}
656
77241056 657static void sdma_set_state(struct sdma_engine *sde,
17fb4f29 658 enum sdma_states next_state)
77241056
MM
659{
660 struct sdma_state *ss = &sde->state;
661 const struct sdma_set_state_action *action = sdma_action_table;
662 unsigned op = 0;
663
664 trace_hfi1_sdma_state(
665 sde,
666 sdma_state_names[ss->current_state],
667 sdma_state_names[next_state]);
668
669 /* debugging bookkeeping */
670 ss->previous_state = ss->current_state;
671 ss->previous_op = ss->current_op;
672 ss->current_state = next_state;
673
d0d236ea
JJ
674 if (ss->previous_state != sdma_state_s99_running &&
675 next_state == sdma_state_s99_running)
77241056
MM
676 sdma_flush(sde);
677
678 if (action[next_state].op_enable)
679 op |= SDMA_SENDCTRL_OP_ENABLE;
680
681 if (action[next_state].op_intenable)
682 op |= SDMA_SENDCTRL_OP_INTENABLE;
683
684 if (action[next_state].op_halt)
685 op |= SDMA_SENDCTRL_OP_HALT;
686
687 if (action[next_state].op_cleanup)
688 op |= SDMA_SENDCTRL_OP_CLEANUP;
689
690 if (action[next_state].go_s99_running_tofalse)
691 ss->go_s99_running = 0;
692
693 if (action[next_state].go_s99_running_totrue)
694 ss->go_s99_running = 1;
695
696 ss->current_op = op;
697 sdma_sendctrl(sde, ss->current_op);
698}
699
700/**
701 * sdma_get_descq_cnt() - called when device probed
702 *
703 * Return a validated descq count.
704 *
705 * This is currently only used in the verbs initialization to build the tx
706 * list.
707 *
708 * This will probably be deleted in favor of a more scalable approach to
709 * alloc tx's.
710 *
711 */
712u16 sdma_get_descq_cnt(void)
713{
714 u16 count = sdma_descq_cnt;
715
716 if (!count)
717 return SDMA_DESCQ_CNT;
718 /* count must be a power of 2 greater than 64 and less than
719 * 32768. Otherwise return default.
720 */
721 if (!is_power_of_2(count))
722 return SDMA_DESCQ_CNT;
aeef010a 723 if (count < 64 || count > 32768)
77241056
MM
724 return SDMA_DESCQ_CNT;
725 return count;
726}
b91cc573 727
0cb2aa69
TS
728/**
729 * sdma_engine_get_vl() - return vl for a given sdma engine
730 * @sde: sdma engine
731 *
732 * This function returns the vl mapped to a given engine, or an error if
733 * the mapping can't be found. The mapping fields are protected by RCU.
734 */
735int sdma_engine_get_vl(struct sdma_engine *sde)
736{
737 struct hfi1_devdata *dd = sde->dd;
738 struct sdma_vl_map *m;
739 u8 vl;
740
741 if (sde->this_idx >= TXE_NUM_SDMA_ENGINES)
742 return -EINVAL;
743
744 rcu_read_lock();
745 m = rcu_dereference(dd->sdma_map);
746 if (unlikely(!m)) {
747 rcu_read_unlock();
748 return -EINVAL;
749 }
750 vl = m->engine_to_vl[sde->this_idx];
751 rcu_read_unlock();
752
753 return vl;
754}
755
77241056
MM
756/**
757 * sdma_select_engine_vl() - select sdma engine
758 * @dd: devdata
759 * @selector: a spreading factor
760 * @vl: this vl
761 *
762 *
763 * This function returns an engine based on the selector and a vl. The
764 * mapping fields are protected by RCU.
765 */
766struct sdma_engine *sdma_select_engine_vl(
767 struct hfi1_devdata *dd,
768 u32 selector,
769 u8 vl)
770{
771 struct sdma_vl_map *m;
772 struct sdma_map_elem *e;
773 struct sdma_engine *rval;
774
4be81991
IW
775 /* NOTE This should only happen if SC->VL changed after the initial
776 * checks on the QP/AH
777 * Default will return engine 0 below
778 */
779 if (vl >= num_vls) {
780 rval = NULL;
781 goto done;
782 }
77241056
MM
783
784 rcu_read_lock();
785 m = rcu_dereference(dd->sdma_map);
786 if (unlikely(!m)) {
787 rcu_read_unlock();
0a226edd 788 return &dd->per_sdma[0];
77241056
MM
789 }
790 e = m->map[vl & m->mask];
791 rval = e->sde[selector & e->mask];
792 rcu_read_unlock();
793
4be81991 794done:
0a226edd 795 rval = !rval ? &dd->per_sdma[0] : rval;
77241056
MM
796 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
797 return rval;
798}
799
800/**
801 * sdma_select_engine_sc() - select sdma engine
802 * @dd: devdata
803 * @selector: a spreading factor
804 * @sc5: the 5 bit sc
805 *
806 *
807 * This function returns an engine based on the selector and an sc.
808 */
809struct sdma_engine *sdma_select_engine_sc(
810 struct hfi1_devdata *dd,
811 u32 selector,
812 u8 sc5)
813{
814 u8 vl = sc_to_vlt(dd, sc5);
815
816 return sdma_select_engine_vl(dd, selector, vl);
817}
818
0cb2aa69
TS
819struct sdma_rht_map_elem {
820 u32 mask;
821 u8 ctr;
822 struct sdma_engine *sde[0];
823};
824
825struct sdma_rht_node {
826 unsigned long cpu_id;
827 struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED];
828 struct rhash_head node;
829};
830
831#define NR_CPUS_HINT 192
832
833static const struct rhashtable_params sdma_rht_params = {
834 .nelem_hint = NR_CPUS_HINT,
835 .head_offset = offsetof(struct sdma_rht_node, node),
836 .key_offset = offsetof(struct sdma_rht_node, cpu_id),
837 .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id),
838 .max_size = NR_CPUS,
839 .min_size = 8,
840 .automatic_shrinking = true,
841};
842
843/*
844 * sdma_select_user_engine() - select sdma engine based on user setup
845 * @dd: devdata
846 * @selector: a spreading factor
847 * @vl: this vl
848 *
849 * This function returns an sdma engine for a user sdma request.
850 * User defined sdma engine affinity setting is honored when applicable,
851 * otherwise system default sdma engine mapping is used. To ensure correct
852 * ordering, the mapping from <selector, vl> to sde must remain unchanged.
853 */
854struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd,
855 u32 selector, u8 vl)
856{
857 struct sdma_rht_node *rht_node;
858 struct sdma_engine *sde = NULL;
0c98d344 859 const struct cpumask *current_mask = &current->cpus_allowed;
0cb2aa69
TS
860 unsigned long cpu_id;
861
862 /*
863 * To ensure that always the same sdma engine(s) will be
864 * selected make sure the process is pinned to this CPU only.
865 */
866 if (cpumask_weight(current_mask) != 1)
867 goto out;
868
869 cpu_id = smp_processor_id();
870 rcu_read_lock();
5a52a7ac 871 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu_id,
0cb2aa69
TS
872 sdma_rht_params);
873
874 if (rht_node && rht_node->map[vl]) {
875 struct sdma_rht_map_elem *map = rht_node->map[vl];
876
877 sde = map->sde[selector & map->mask];
878 }
879 rcu_read_unlock();
880
881 if (sde)
882 return sde;
883
884out:
885 return sdma_select_engine_vl(dd, selector, vl);
886}
887
888static void sdma_populate_sde_map(struct sdma_rht_map_elem *map)
889{
890 int i;
891
892 for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++)
893 map->sde[map->ctr + i] = map->sde[i];
894}
895
896static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map,
897 struct sdma_engine *sde)
898{
899 unsigned int i, pow;
900
901 /* only need to check the first ctr entries for a match */
902 for (i = 0; i < map->ctr; i++) {
903 if (map->sde[i] == sde) {
904 memmove(&map->sde[i], &map->sde[i + 1],
905 (map->ctr - i - 1) * sizeof(map->sde[0]));
906 map->ctr--;
907 pow = roundup_pow_of_two(map->ctr ? : 1);
908 map->mask = pow - 1;
909 sdma_populate_sde_map(map);
910 break;
911 }
912 }
913}
914
915/*
916 * Prevents concurrent reads and writes of the sdma engine cpu_mask
917 */
918static DEFINE_MUTEX(process_to_sde_mutex);
919
920ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf,
921 size_t count)
922{
923 struct hfi1_devdata *dd = sde->dd;
924 cpumask_var_t mask, new_mask;
925 unsigned long cpu;
926 int ret, vl, sz;
927
928 vl = sdma_engine_get_vl(sde);
929 if (unlikely(vl < 0))
930 return -EINVAL;
931
932 ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
933 if (!ret)
934 return -ENOMEM;
935
936 ret = zalloc_cpumask_var(&new_mask, GFP_KERNEL);
937 if (!ret) {
938 free_cpumask_var(mask);
939 return -ENOMEM;
940 }
941 ret = cpulist_parse(buf, mask);
942 if (ret)
943 goto out_free;
944
945 if (!cpumask_subset(mask, cpu_online_mask)) {
946 dd_dev_warn(sde->dd, "Invalid CPU mask\n");
947 ret = -EINVAL;
948 goto out_free;
949 }
950
951 sz = sizeof(struct sdma_rht_map_elem) +
952 (TXE_NUM_SDMA_ENGINES * sizeof(struct sdma_engine *));
953
954 mutex_lock(&process_to_sde_mutex);
955
956 for_each_cpu(cpu, mask) {
957 struct sdma_rht_node *rht_node;
958
959 /* Check if we have this already mapped */
960 if (cpumask_test_cpu(cpu, &sde->cpu_mask)) {
961 cpumask_set_cpu(cpu, new_mask);
962 continue;
963 }
964
f7b42633
MR
965 if (vl >= ARRAY_SIZE(rht_node->map)) {
966 ret = -EINVAL;
967 goto out;
968 }
969
5a52a7ac 970 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
0cb2aa69
TS
971 sdma_rht_params);
972 if (!rht_node) {
973 rht_node = kzalloc(sizeof(*rht_node), GFP_KERNEL);
974 if (!rht_node) {
975 ret = -ENOMEM;
976 goto out;
977 }
978
979 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
980 if (!rht_node->map[vl]) {
981 kfree(rht_node);
982 ret = -ENOMEM;
983 goto out;
984 }
985 rht_node->cpu_id = cpu;
986 rht_node->map[vl]->mask = 0;
987 rht_node->map[vl]->ctr = 1;
988 rht_node->map[vl]->sde[0] = sde;
989
5a52a7ac 990 ret = rhashtable_insert_fast(dd->sdma_rht,
0cb2aa69
TS
991 &rht_node->node,
992 sdma_rht_params);
993 if (ret) {
994 kfree(rht_node->map[vl]);
995 kfree(rht_node);
996 dd_dev_err(sde->dd, "Failed to set process to sde affinity for cpu %lu\n",
997 cpu);
998 goto out;
999 }
1000
1001 } else {
1002 int ctr, pow;
1003
1004 /* Add new user mappings */
1005 if (!rht_node->map[vl])
1006 rht_node->map[vl] = kzalloc(sz, GFP_KERNEL);
1007
1008 if (!rht_node->map[vl]) {
1009 ret = -ENOMEM;
1010 goto out;
1011 }
1012
1013 rht_node->map[vl]->ctr++;
1014 ctr = rht_node->map[vl]->ctr;
1015 rht_node->map[vl]->sde[ctr - 1] = sde;
1016 pow = roundup_pow_of_two(ctr);
1017 rht_node->map[vl]->mask = pow - 1;
1018
1019 /* Populate the sde map table */
1020 sdma_populate_sde_map(rht_node->map[vl]);
1021 }
1022 cpumask_set_cpu(cpu, new_mask);
1023 }
1024
1025 /* Clean up old mappings */
1026 for_each_cpu(cpu, cpu_online_mask) {
1027 struct sdma_rht_node *rht_node;
1028
1029 /* Don't cleanup sdes that are set in the new mask */
1030 if (cpumask_test_cpu(cpu, mask))
1031 continue;
1032
5a52a7ac 1033 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpu,
0cb2aa69
TS
1034 sdma_rht_params);
1035 if (rht_node) {
1036 bool empty = true;
1037 int i;
1038
1039 /* Remove mappings for old sde */
1040 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1041 if (rht_node->map[i])
1042 sdma_cleanup_sde_map(rht_node->map[i],
1043 sde);
1044
1045 /* Free empty hash table entries */
1046 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1047 if (!rht_node->map[i])
1048 continue;
1049
1050 if (rht_node->map[i]->ctr) {
1051 empty = false;
1052 break;
1053 }
1054 }
1055
1056 if (empty) {
5a52a7ac 1057 ret = rhashtable_remove_fast(dd->sdma_rht,
0cb2aa69
TS
1058 &rht_node->node,
1059 sdma_rht_params);
1060 WARN_ON(ret);
1061
1062 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1063 kfree(rht_node->map[i]);
1064
1065 kfree(rht_node);
1066 }
1067 }
1068 }
1069
1070 cpumask_copy(&sde->cpu_mask, new_mask);
1071out:
1072 mutex_unlock(&process_to_sde_mutex);
1073out_free:
1074 free_cpumask_var(mask);
1075 free_cpumask_var(new_mask);
1076 return ret ? : strnlen(buf, PAGE_SIZE);
1077}
1078
1079ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf)
1080{
1081 mutex_lock(&process_to_sde_mutex);
1082 if (cpumask_empty(&sde->cpu_mask))
1083 snprintf(buf, PAGE_SIZE, "%s\n", "empty");
1084 else
1085 cpumap_print_to_pagebuf(true, buf, &sde->cpu_mask);
1086 mutex_unlock(&process_to_sde_mutex);
1087 return strnlen(buf, PAGE_SIZE);
1088}
1089
1090static void sdma_rht_free(void *ptr, void *arg)
1091{
1092 struct sdma_rht_node *rht_node = ptr;
1093 int i;
1094
1095 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++)
1096 kfree(rht_node->map[i]);
1097
1098 kfree(rht_node);
1099}
1100
af3674d6
TS
1101/**
1102 * sdma_seqfile_dump_cpu_list() - debugfs dump the cpu to sdma mappings
1103 * @s: seq file
1104 * @dd: hfi1_devdata
1105 * @cpuid: cpu id
1106 *
1107 * This routine dumps the process to sde mappings per cpu
1108 */
1109void sdma_seqfile_dump_cpu_list(struct seq_file *s,
1110 struct hfi1_devdata *dd,
1111 unsigned long cpuid)
1112{
1113 struct sdma_rht_node *rht_node;
1114 int i, j;
1115
5a52a7ac 1116 rht_node = rhashtable_lookup_fast(dd->sdma_rht, &cpuid,
af3674d6
TS
1117 sdma_rht_params);
1118 if (!rht_node)
1119 return;
1120
1121 seq_printf(s, "cpu%3lu: ", cpuid);
1122 for (i = 0; i < HFI1_MAX_VLS_SUPPORTED; i++) {
1123 if (!rht_node->map[i] || !rht_node->map[i]->ctr)
1124 continue;
1125
1126 seq_printf(s, " vl%d: [", i);
1127
1128 for (j = 0; j < rht_node->map[i]->ctr; j++) {
1129 if (!rht_node->map[i]->sde[j])
1130 continue;
1131
1132 if (j > 0)
1133 seq_puts(s, ",");
1134
1135 seq_printf(s, " sdma%2d",
1136 rht_node->map[i]->sde[j]->this_idx);
1137 }
1138 seq_puts(s, " ]");
1139 }
1140
1141 seq_puts(s, "\n");
1142}
1143
77241056
MM
1144/*
1145 * Free the indicated map struct
1146 */
1147static void sdma_map_free(struct sdma_vl_map *m)
1148{
1149 int i;
1150
1151 for (i = 0; m && i < m->actual_vls; i++)
1152 kfree(m->map[i]);
1153 kfree(m);
1154}
1155
1156/*
1157 * Handle RCU callback
1158 */
1159static void sdma_map_rcu_callback(struct rcu_head *list)
1160{
1161 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
1162
1163 sdma_map_free(m);
1164}
1165
1166/**
1167 * sdma_map_init - called when # vls change
1168 * @dd: hfi1_devdata
1169 * @port: port number
1170 * @num_vls: number of vls
1171 * @vl_engines: per vl engine mapping (optional)
1172 *
1173 * This routine changes the mapping based on the number of vls.
1174 *
1175 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
1176 * implies auto computing the loading and giving each VLs a uniform
1177 * distribution of engines per VL.
1178 *
1179 * The auto algorithm computes the sde_per_vl and the number of extra
1180 * engines. Any extra engines are added from the last VL on down.
1181 *
1182 * rcu locking is used here to control access to the mapping fields.
1183 *
1184 * If either the num_vls or num_sdma are non-power of 2, the array sizes
1185 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
1186 * up to the next highest power of 2 and the first entry is reused
1187 * in a round robin fashion.
1188 *
1189 * If an error occurs the map change is not done and the mapping is
1190 * not changed.
1191 *
1192 */
1193int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
1194{
1195 int i, j;
1196 int extra, sde_per_vl;
1197 int engine = 0;
1198 u8 lvl_engines[OPA_MAX_VLS];
1199 struct sdma_vl_map *oldmap, *newmap;
1200
1201 if (!(dd->flags & HFI1_HAS_SEND_DMA))
1202 return 0;
1203
1204 if (!vl_engines) {
1205 /* truncate divide */
1206 sde_per_vl = dd->num_sdma / num_vls;
1207 /* extras */
1208 extra = dd->num_sdma % num_vls;
1209 vl_engines = lvl_engines;
1210 /* add extras from last vl down */
1211 for (i = num_vls - 1; i >= 0; i--, extra--)
1212 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
1213 }
1214 /* build new map */
1215 newmap = kzalloc(
1216 sizeof(struct sdma_vl_map) +
1217 roundup_pow_of_two(num_vls) *
1218 sizeof(struct sdma_map_elem *),
1219 GFP_KERNEL);
1220 if (!newmap)
1221 goto bail;
1222 newmap->actual_vls = num_vls;
1223 newmap->vls = roundup_pow_of_two(num_vls);
1224 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
69a00b8e
MM
1225 /* initialize back-map */
1226 for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
1227 newmap->engine_to_vl[i] = -1;
77241056
MM
1228 for (i = 0; i < newmap->vls; i++) {
1229 /* save for wrap around */
1230 int first_engine = engine;
1231
1232 if (i < newmap->actual_vls) {
1233 int sz = roundup_pow_of_two(vl_engines[i]);
1234
1235 /* only allocate once */
1236 newmap->map[i] = kzalloc(
1237 sizeof(struct sdma_map_elem) +
1238 sz * sizeof(struct sdma_engine *),
1239 GFP_KERNEL);
1240 if (!newmap->map[i])
1241 goto bail;
1242 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
1243 /* assign engines */
1244 for (j = 0; j < sz; j++) {
1245 newmap->map[i]->sde[j] =
1246 &dd->per_sdma[engine];
1247 if (++engine >= first_engine + vl_engines[i])
1248 /* wrap back to first engine */
1249 engine = first_engine;
1250 }
69a00b8e
MM
1251 /* assign back-map */
1252 for (j = 0; j < vl_engines[i]; j++)
1253 newmap->engine_to_vl[first_engine + j] = i;
77241056
MM
1254 } else {
1255 /* just re-use entry without allocating */
1256 newmap->map[i] = newmap->map[i % num_vls];
1257 }
1258 engine = first_engine + vl_engines[i];
1259 }
1260 /* newmap in hand, save old map */
1261 spin_lock_irq(&dd->sde_map_lock);
1262 oldmap = rcu_dereference_protected(dd->sdma_map,
17fb4f29 1263 lockdep_is_held(&dd->sde_map_lock));
77241056
MM
1264
1265 /* publish newmap */
1266 rcu_assign_pointer(dd->sdma_map, newmap);
1267
1268 spin_unlock_irq(&dd->sde_map_lock);
1269 /* success, free any old map after grace period */
1270 if (oldmap)
1271 call_rcu(&oldmap->list, sdma_map_rcu_callback);
1272 return 0;
1273bail:
1274 /* free any partial allocation */
1275 sdma_map_free(newmap);
1276 return -ENOMEM;
1277}
1278
1279/*
1280 * Clean up allocated memory.
1281 *
1282 * This routine is can be called regardless of the success of sdma_init()
1283 *
1284 */
1285static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
1286{
1287 size_t i;
1288 struct sdma_engine *sde;
1289
1290 if (dd->sdma_pad_dma) {
1291 dma_free_coherent(&dd->pcidev->dev, 4,
1292 (void *)dd->sdma_pad_dma,
1293 dd->sdma_pad_phys);
1294 dd->sdma_pad_dma = NULL;
1295 dd->sdma_pad_phys = 0;
1296 }
1297 if (dd->sdma_heads_dma) {
1298 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
1299 (void *)dd->sdma_heads_dma,
1300 dd->sdma_heads_phys);
1301 dd->sdma_heads_dma = NULL;
1302 dd->sdma_heads_phys = 0;
1303 }
1304 for (i = 0; dd->per_sdma && i < num_engines; ++i) {
1305 sde = &dd->per_sdma[i];
1306
1307 sde->head_dma = NULL;
1308 sde->head_phys = 0;
1309
1310 if (sde->descq) {
1311 dma_free_coherent(
1312 &dd->pcidev->dev,
1313 sde->descq_cnt * sizeof(u64[2]),
1314 sde->descq,
1315 sde->descq_phys
1316 );
1317 sde->descq = NULL;
1318 sde->descq_phys = 0;
1319 }
60f57ec2 1320 kvfree(sde->tx_ring);
77241056
MM
1321 sde->tx_ring = NULL;
1322 }
1323 spin_lock_irq(&dd->sde_map_lock);
79d0c088 1324 sdma_map_free(rcu_access_pointer(dd->sdma_map));
77241056
MM
1325 RCU_INIT_POINTER(dd->sdma_map, NULL);
1326 spin_unlock_irq(&dd->sde_map_lock);
1327 synchronize_rcu();
1328 kfree(dd->per_sdma);
1329 dd->per_sdma = NULL;
5a52a7ac
SS
1330
1331 if (dd->sdma_rht) {
1332 rhashtable_free_and_destroy(dd->sdma_rht, sdma_rht_free, NULL);
1333 kfree(dd->sdma_rht);
1334 dd->sdma_rht = NULL;
1335 }
77241056
MM
1336}
1337
1338/**
1339 * sdma_init() - called when device probed
1340 * @dd: hfi1_devdata
1341 * @port: port number (currently only zero)
1342 *
90dba23e
IW
1343 * Initializes each sde and its csrs.
1344 * Interrupts are not required to be enabled.
77241056
MM
1345 *
1346 * Returns:
1347 * 0 - success, -errno on failure
1348 */
1349int sdma_init(struct hfi1_devdata *dd, u8 port)
1350{
1351 unsigned this_idx;
1352 struct sdma_engine *sde;
5a52a7ac 1353 struct rhashtable *tmp_sdma_rht;
77241056
MM
1354 u16 descq_cnt;
1355 void *curr_head;
1356 struct hfi1_pportdata *ppd = dd->pport + port;
1357 u32 per_sdma_credits;
1358 uint idle_cnt = sdma_idle_cnt;
1359 size_t num_engines = dd->chip_sdma_engines;
5a52a7ac 1360 int ret = -ENOMEM;
77241056
MM
1361
1362 if (!HFI1_CAP_IS_KSET(SDMA)) {
1363 HFI1_CAP_CLEAR(SDMA_AHG);
1364 return 0;
1365 }
1366 if (mod_num_sdma &&
17fb4f29
JJ
1367 /* can't exceed chip support */
1368 mod_num_sdma <= dd->chip_sdma_engines &&
1369 /* count must be >= vls */
1370 mod_num_sdma >= num_vls)
77241056
MM
1371 num_engines = mod_num_sdma;
1372
1373 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
1374 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
1375 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
17fb4f29 1376 dd->chip_sdma_mem_size);
77241056
MM
1377
1378 per_sdma_credits =
8638b77f 1379 dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE);
77241056
MM
1380
1381 /* set up freeze waitqueue */
1382 init_waitqueue_head(&dd->sdma_unfreeze_wq);
1383 atomic_set(&dd->sdma_unfreeze_count, 0);
1384
1385 descq_cnt = sdma_get_descq_cnt();
1386 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
17fb4f29 1387 num_engines, descq_cnt);
77241056
MM
1388
1389 /* alloc memory for array of send engines */
1390 dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
1391 if (!dd->per_sdma)
5a52a7ac 1392 return ret;
77241056
MM
1393
1394 idle_cnt = ns_to_cclock(dd, idle_cnt);
aadd7020
IW
1395 if (idle_cnt)
1396 dd->default_desc1 =
1397 SDMA_DESC1_HEAD_TO_HOST_FLAG;
1398 else
1399 dd->default_desc1 =
1400 SDMA_DESC1_INT_REQ_FLAG;
1401
ee947859
MH
1402 if (!sdma_desct_intr)
1403 sdma_desct_intr = SDMA_DESC_INTR;
1404
77241056
MM
1405 /* Allocate memory for SendDMA descriptor FIFOs */
1406 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1407 sde = &dd->per_sdma[this_idx];
1408 sde->dd = dd;
1409 sde->ppd = ppd;
1410 sde->this_idx = this_idx;
1411 sde->descq_cnt = descq_cnt;
1412 sde->desc_avail = sdma_descq_freecnt(sde);
1413 sde->sdma_shift = ilog2(descq_cnt);
1414 sde->sdma_mask = (1 << sde->sdma_shift) - 1;
a699c6c2
VM
1415
1416 /* Create a mask specifically for each interrupt source */
1417 sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
1418 this_idx);
1419 sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
1420 this_idx);
1421 sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
1422 this_idx);
1423 /* Create a combined mask to cover all 3 interrupt sources */
1424 sde->imask = sde->int_mask | sde->progress_mask |
1425 sde->idle_mask;
1426
77241056
MM
1427 spin_lock_init(&sde->tail_lock);
1428 seqlock_init(&sde->head_lock);
1429 spin_lock_init(&sde->senddmactrl_lock);
1430 spin_lock_init(&sde->flushlist_lock);
1431 /* insure there is always a zero bit */
1432 sde->ahg_bits = 0xfffffffe00000000ULL;
1433
1434 sdma_set_state(sde, sdma_state_s00_hw_down);
1435
1436 /* set up reference counting */
1437 kref_init(&sde->state.kref);
1438 init_completion(&sde->state.comp);
1439
1440 INIT_LIST_HEAD(&sde->flushlist);
1441 INIT_LIST_HEAD(&sde->dmawait);
1442
1443 sde->tail_csr =
1444 get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
1445
77241056 1446 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
17fb4f29 1447 (unsigned long)sde);
77241056
MM
1448
1449 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
17fb4f29 1450 (unsigned long)sde);
77241056
MM
1451 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
1452 INIT_WORK(&sde->flush_worker, sdma_field_flush);
1453
1454 sde->progress_check_head = 0;
1455
8064135e
KC
1456 timer_setup(&sde->err_progress_check_timer,
1457 sdma_err_progress_check, 0);
77241056
MM
1458
1459 sde->descq = dma_zalloc_coherent(
1460 &dd->pcidev->dev,
1461 descq_cnt * sizeof(u64[2]),
1462 &sde->descq_phys,
1463 GFP_KERNEL
1464 );
1465 if (!sde->descq)
1466 goto bail;
1467 sde->tx_ring =
31acd18b
MM
1468 kvzalloc_node(sizeof(struct sdma_txreq *) * descq_cnt,
1469 GFP_KERNEL, dd->node);
77241056
MM
1470 if (!sde->tx_ring)
1471 goto bail;
1472 }
1473
1474 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1475 /* Allocate memory for DMA of head registers to memory */
1476 dd->sdma_heads_dma = dma_zalloc_coherent(
1477 &dd->pcidev->dev,
1478 dd->sdma_heads_size,
1479 &dd->sdma_heads_phys,
1480 GFP_KERNEL
1481 );
1482 if (!dd->sdma_heads_dma) {
1483 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1484 goto bail;
1485 }
1486
1487 /* Allocate memory for pad */
1488 dd->sdma_pad_dma = dma_zalloc_coherent(
1489 &dd->pcidev->dev,
1490 sizeof(u32),
1491 &dd->sdma_pad_phys,
1492 GFP_KERNEL
1493 );
1494 if (!dd->sdma_pad_dma) {
1495 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1496 goto bail;
1497 }
1498
1499 /* assign each engine to different cacheline and init registers */
1500 curr_head = (void *)dd->sdma_heads_dma;
1501 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1502 unsigned long phys_offset;
1503
1504 sde = &dd->per_sdma[this_idx];
1505
1506 sde->head_dma = curr_head;
1507 curr_head += L1_CACHE_BYTES;
1508 phys_offset = (unsigned long)sde->head_dma -
1509 (unsigned long)dd->sdma_heads_dma;
1510 sde->head_phys = dd->sdma_heads_phys + phys_offset;
1511 init_sdma_regs(sde, per_sdma_credits, idle_cnt);
1512 }
1513 dd->flags |= HFI1_HAS_SEND_DMA;
1514 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
1515 dd->num_sdma = num_engines;
5a52a7ac
SS
1516 ret = sdma_map_init(dd, port, ppd->vls_operational, NULL);
1517 if (ret < 0)
77241056 1518 goto bail;
0cb2aa69 1519
5a52a7ac
SS
1520 tmp_sdma_rht = kzalloc(sizeof(*tmp_sdma_rht), GFP_KERNEL);
1521 if (!tmp_sdma_rht) {
1522 ret = -ENOMEM;
0cb2aa69 1523 goto bail;
5a52a7ac
SS
1524 }
1525
1526 ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
1527 if (ret < 0)
1528 goto bail;
1529 dd->sdma_rht = tmp_sdma_rht;
0cb2aa69 1530
77241056
MM
1531 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
1532 return 0;
1533
1534bail:
1535 sdma_clean(dd, num_engines);
5a52a7ac 1536 return ret;
77241056
MM
1537}
1538
1539/**
1540 * sdma_all_running() - called when the link goes up
1541 * @dd: hfi1_devdata
1542 *
1543 * This routine moves all engines to the running state.
1544 */
1545void sdma_all_running(struct hfi1_devdata *dd)
1546{
1547 struct sdma_engine *sde;
1548 unsigned int i;
1549
1550 /* move all engines to running */
1551 for (i = 0; i < dd->num_sdma; ++i) {
1552 sde = &dd->per_sdma[i];
1553 sdma_process_event(sde, sdma_event_e30_go_running);
1554 }
1555}
1556
1557/**
1558 * sdma_all_idle() - called when the link goes down
1559 * @dd: hfi1_devdata
1560 *
1561 * This routine moves all engines to the idle state.
1562 */
1563void sdma_all_idle(struct hfi1_devdata *dd)
1564{
1565 struct sdma_engine *sde;
1566 unsigned int i;
1567
1568 /* idle all engines */
1569 for (i = 0; i < dd->num_sdma; ++i) {
1570 sde = &dd->per_sdma[i];
1571 sdma_process_event(sde, sdma_event_e70_go_idle);
1572 }
1573}
1574
1575/**
1576 * sdma_start() - called to kick off state processing for all engines
1577 * @dd: hfi1_devdata
1578 *
1579 * This routine is for kicking off the state processing for all required
1580 * sdma engines. Interrupts need to be working at this point.
1581 *
1582 */
1583void sdma_start(struct hfi1_devdata *dd)
1584{
1585 unsigned i;
1586 struct sdma_engine *sde;
1587
1588 /* kick off the engines state processing */
1589 for (i = 0; i < dd->num_sdma; ++i) {
1590 sde = &dd->per_sdma[i];
1591 sdma_process_event(sde, sdma_event_e10_go_hw_start);
1592 }
1593}
1594
1595/**
1596 * sdma_exit() - used when module is removed
1597 * @dd: hfi1_devdata
1598 */
1599void sdma_exit(struct hfi1_devdata *dd)
1600{
1601 unsigned this_idx;
1602 struct sdma_engine *sde;
1603
1604 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
1605 ++this_idx) {
77241056
MM
1606 sde = &dd->per_sdma[this_idx];
1607 if (!list_empty(&sde->dmawait))
1608 dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
17fb4f29 1609 sde->this_idx);
77241056
MM
1610 sdma_process_event(sde, sdma_event_e00_go_hw_down);
1611
1612 del_timer_sync(&sde->err_progress_check_timer);
1613
1614 /*
1615 * This waits for the state machine to exit so it is not
1616 * necessary to kill the sdma_sw_clean_up_task to make sure
1617 * it is not running.
1618 */
1619 sdma_finalput(&sde->state);
1620 }
1621 sdma_clean(dd, dd->num_sdma);
1622}
1623
1624/*
1625 * unmap the indicated descriptor
1626 */
1627static inline void sdma_unmap_desc(
1628 struct hfi1_devdata *dd,
1629 struct sdma_desc *descp)
1630{
1631 switch (sdma_mapping_type(descp)) {
1632 case SDMA_MAP_SINGLE:
1633 dma_unmap_single(
1634 &dd->pcidev->dev,
1635 sdma_mapping_addr(descp),
1636 sdma_mapping_len(descp),
1637 DMA_TO_DEVICE);
1638 break;
1639 case SDMA_MAP_PAGE:
1640 dma_unmap_page(
1641 &dd->pcidev->dev,
1642 sdma_mapping_addr(descp),
1643 sdma_mapping_len(descp),
1644 DMA_TO_DEVICE);
1645 break;
1646 }
1647}
1648
1649/*
1650 * return the mode as indicated by the first
1651 * descriptor in the tx.
1652 */
1653static inline u8 ahg_mode(struct sdma_txreq *tx)
1654{
1655 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1656 >> SDMA_DESC1_HEADER_MODE_SHIFT;
1657}
1658
1659/**
63df8e09 1660 * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
77241056
MM
1661 * @dd: hfi1_devdata for unmapping
1662 * @tx: tx request to clean
1663 *
1664 * This is used in the progress routine to clean the tx or
1665 * by the ULP to toss an in-process tx build.
1666 *
1667 * The code can be called multiple times without issue.
1668 *
1669 */
63df8e09 1670void __sdma_txclean(
77241056
MM
1671 struct hfi1_devdata *dd,
1672 struct sdma_txreq *tx)
1673{
1674 u16 i;
1675
1676 if (tx->num_desc) {
1677 u8 skip = 0, mode = ahg_mode(tx);
1678
1679 /* unmap first */
1680 sdma_unmap_desc(dd, &tx->descp[0]);
1681 /* determine number of AHG descriptors to skip */
1682 if (mode > SDMA_AHG_APPLY_UPDATE1)
1683 skip = mode >> 1;
1684 for (i = 1 + skip; i < tx->num_desc; i++)
1685 sdma_unmap_desc(dd, &tx->descp[i]);
1686 tx->num_desc = 0;
1687 }
1688 kfree(tx->coalesce_buf);
1689 tx->coalesce_buf = NULL;
1690 /* kmalloc'ed descp */
1691 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
1692 tx->desc_limit = ARRAY_SIZE(tx->descs);
1693 kfree(tx->descp);
1694 }
1695}
1696
1697static inline u16 sdma_gethead(struct sdma_engine *sde)
1698{
1699 struct hfi1_devdata *dd = sde->dd;
1700 int use_dmahead;
1701 u16 hwhead;
1702
1703#ifdef CONFIG_SDMA_VERBOSITY
1704 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1705 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1706#endif
1707
1708retry:
1709 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
1710 (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
1711 hwhead = use_dmahead ?
50e5dcbe
JJ
1712 (u16)le64_to_cpu(*sde->head_dma) :
1713 (u16)read_sde_csr(sde, SD(HEAD));
77241056
MM
1714
1715 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
1716 u16 cnt;
1717 u16 swtail;
1718 u16 swhead;
1719 int sane;
1720
1721 swhead = sde->descq_head & sde->sdma_mask;
1722 /* this code is really bad for cache line trading */
6aa7de05 1723 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
77241056
MM
1724 cnt = sde->descq_cnt;
1725
1726 if (swhead < swtail)
1727 /* not wrapped */
1728 sane = (hwhead >= swhead) & (hwhead <= swtail);
1729 else if (swhead > swtail)
1730 /* wrapped around */
1731 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
1732 (hwhead <= swtail);
1733 else
1734 /* empty */
1735 sane = (hwhead == swhead);
1736
1737 if (unlikely(!sane)) {
1738 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
17fb4f29
JJ
1739 sde->this_idx,
1740 use_dmahead ? "dma" : "kreg",
1741 hwhead, swhead, swtail, cnt);
77241056
MM
1742 if (use_dmahead) {
1743 /* try one more time, using csr */
1744 use_dmahead = 0;
1745 goto retry;
1746 }
1747 /* proceed as if no progress */
1748 hwhead = swhead;
1749 }
1750 }
1751 return hwhead;
1752}
1753
1754/*
1755 * This is called when there are send DMA descriptors that might be
1756 * available.
1757 *
1758 * This is called with head_lock held.
1759 */
bcad2913 1760static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
77241056
MM
1761{
1762 struct iowait *wait, *nw;
1763 struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
bcad2913 1764 uint i, n = 0, seq, max_idx = 0;
77241056
MM
1765 struct sdma_txreq *stx;
1766 struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
bcad2913 1767 u8 max_starved_cnt = 0;
77241056
MM
1768
1769#ifdef CONFIG_SDMA_VERBOSITY
1770 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
1771 slashstrip(__FILE__), __LINE__, __func__);
1772 dd_dev_err(sde->dd, "avail: %u\n", avail);
1773#endif
1774
1775 do {
1776 seq = read_seqbegin(&dev->iowait_lock);
1777 if (!list_empty(&sde->dmawait)) {
1778 /* at least one item */
1779 write_seqlock(&dev->iowait_lock);
1780 /* Harvest waiters wanting DMA descriptors */
1781 list_for_each_entry_safe(
1782 wait,
1783 nw,
1784 &sde->dmawait,
1785 list) {
1786 u16 num_desc = 0;
1787
1788 if (!wait->wakeup)
1789 continue;
1790 if (n == ARRAY_SIZE(waits))
1791 break;
1792 if (!list_empty(&wait->tx_head)) {
1793 stx = list_first_entry(
1794 &wait->tx_head,
1795 struct sdma_txreq,
1796 list);
1797 num_desc = stx->num_desc;
1798 }
1799 if (num_desc > avail)
1800 break;
1801 avail -= num_desc;
bcad2913
KW
1802 /* Find the most starved wait memeber */
1803 iowait_starve_find_max(wait, &max_starved_cnt,
1804 n, &max_idx);
77241056
MM
1805 list_del_init(&wait->list);
1806 waits[n++] = wait;
1807 }
1808 write_sequnlock(&dev->iowait_lock);
1809 break;
1810 }
1811 } while (read_seqretry(&dev->iowait_lock, seq));
1812
bcad2913
KW
1813 /* Schedule the most starved one first */
1814 if (n)
1815 waits[max_idx]->wakeup(waits[max_idx], SDMA_AVAIL_REASON);
1816
77241056 1817 for (i = 0; i < n; i++)
bcad2913
KW
1818 if (i != max_idx)
1819 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
77241056
MM
1820}
1821
1822/* head_lock must be held */
1823static void sdma_make_progress(struct sdma_engine *sde, u64 status)
1824{
1825 struct sdma_txreq *txp = NULL;
1826 int progress = 0;
a545f530 1827 u16 hwhead, swhead;
77241056
MM
1828 int idle_check_done = 0;
1829
1830 hwhead = sdma_gethead(sde);
1831
1832 /* The reason for some of the complexity of this code is that
1833 * not all descriptors have corresponding txps. So, we have to
1834 * be able to skip over descs until we wander into the range of
1835 * the next txp on the list.
1836 */
1837
1838retry:
1839 txp = get_txhead(sde);
1840 swhead = sde->descq_head & sde->sdma_mask;
1841 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1842 while (swhead != hwhead) {
1843 /* advance head, wrap if needed */
1844 swhead = ++sde->descq_head & sde->sdma_mask;
1845
1846 /* if now past this txp's descs, do the callback */
1847 if (txp && txp->next_descq_idx == swhead) {
77241056
MM
1848 /* remove from list */
1849 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
a545f530 1850 complete_tx(sde, txp, SDMA_TXREQ_S_OK);
77241056
MM
1851 /* see if there is another txp */
1852 txp = get_txhead(sde);
1853 }
1854 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1855 progress++;
1856 }
1857
1858 /*
1859 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1860 * to updates to the the dma_head location in host memory. The head
1861 * value read might not be fully up to date. If there are pending
1862 * descriptors and the SDMA idle interrupt fired then read from the
1863 * CSR SDMA head instead to get the latest value from the hardware.
1864 * The hardware SDMA head should be read at most once in this invocation
1865 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1866 */
1867 if ((status & sde->idle_mask) && !idle_check_done) {
a545f530
MM
1868 u16 swtail;
1869
6aa7de05 1870 swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
77241056
MM
1871 if (swtail != hwhead) {
1872 hwhead = (u16)read_sde_csr(sde, SD(HEAD));
1873 idle_check_done = 1;
1874 goto retry;
1875 }
1876 }
1877
1878 sde->last_status = status;
1879 if (progress)
1880 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
1881}
1882
1883/*
1884 * sdma_engine_interrupt() - interrupt handler for engine
1885 * @sde: sdma engine
1886 * @status: sdma interrupt reason
1887 *
1888 * Status is a mask of the 3 possible interrupts for this engine. It will
1889 * contain bits _only_ for this SDMA engine. It will contain at least one
1890 * bit, it may contain more.
1891 */
1892void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
1893{
1894 trace_hfi1_sdma_engine_interrupt(sde, status);
1895 write_seqlock(&sde->head_lock);
ee947859 1896 sdma_set_desc_cnt(sde, sdma_desct_intr);
a699c6c2
VM
1897 if (status & sde->idle_mask)
1898 sde->idle_int_cnt++;
1899 else if (status & sde->progress_mask)
1900 sde->progress_int_cnt++;
1901 else if (status & sde->int_mask)
1902 sde->sdma_int_cnt++;
77241056
MM
1903 sdma_make_progress(sde, status);
1904 write_sequnlock(&sde->head_lock);
1905}
1906
1907/**
1908 * sdma_engine_error() - error handler for engine
1909 * @sde: sdma engine
1910 * @status: sdma interrupt reason
1911 */
1912void sdma_engine_error(struct sdma_engine *sde, u64 status)
1913{
1914 unsigned long flags;
1915
1916#ifdef CONFIG_SDMA_VERBOSITY
1917 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1918 sde->this_idx,
1919 (unsigned long long)status,
1920 sdma_state_names[sde->state.current_state]);
1921#endif
1922 spin_lock_irqsave(&sde->tail_lock, flags);
1923 write_seqlock(&sde->head_lock);
1924 if (status & ALL_SDMA_ENG_HALT_ERRS)
1925 __sdma_process_event(sde, sdma_event_e60_hw_halted);
1926 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
1927 dd_dev_err(sde->dd,
17fb4f29
JJ
1928 "SDMA (%u) engine error: 0x%llx state %s\n",
1929 sde->this_idx,
1930 (unsigned long long)status,
1931 sdma_state_names[sde->state.current_state]);
77241056
MM
1932 dump_sdma_state(sde);
1933 }
1934 write_sequnlock(&sde->head_lock);
1935 spin_unlock_irqrestore(&sde->tail_lock, flags);
1936}
1937
1938static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
1939{
1940 u64 set_senddmactrl = 0;
1941 u64 clr_senddmactrl = 0;
1942 unsigned long flags;
1943
1944#ifdef CONFIG_SDMA_VERBOSITY
1945 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1946 sde->this_idx,
1947 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
1948 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
1949 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
1950 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
1951#endif
1952
1953 if (op & SDMA_SENDCTRL_OP_ENABLE)
1954 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1955 else
1956 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1957
1958 if (op & SDMA_SENDCTRL_OP_INTENABLE)
1959 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1960 else
1961 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1962
1963 if (op & SDMA_SENDCTRL_OP_HALT)
1964 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1965 else
1966 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1967
1968 spin_lock_irqsave(&sde->senddmactrl_lock, flags);
1969
1970 sde->p_senddmactrl |= set_senddmactrl;
1971 sde->p_senddmactrl &= ~clr_senddmactrl;
1972
1973 if (op & SDMA_SENDCTRL_OP_CLEANUP)
1974 write_sde_csr(sde, SD(CTRL),
17fb4f29
JJ
1975 sde->p_senddmactrl |
1976 SD(CTRL_SDMA_CLEANUP_SMASK));
77241056
MM
1977 else
1978 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
1979
1980 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
1981
1982#ifdef CONFIG_SDMA_VERBOSITY
1983 sdma_dumpstate(sde);
1984#endif
1985}
1986
1987static void sdma_setlengen(struct sdma_engine *sde)
1988{
1989#ifdef CONFIG_SDMA_VERBOSITY
1990 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1991 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1992#endif
1993
1994 /*
1995 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1996 * count to enable generation checking and load the internal
1997 * generation counter.
1998 */
1999 write_sde_csr(sde, SD(LEN_GEN),
17fb4f29 2000 (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
77241056 2001 write_sde_csr(sde, SD(LEN_GEN),
17fb4f29
JJ
2002 ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
2003 (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
77241056
MM
2004}
2005
2006static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
2007{
2008 /* Commit writes to memory and advance the tail on the chip */
2009 smp_wmb(); /* see get_txhead() */
2010 writeq(tail, sde->tail_csr);
2011}
2012
2013/*
2014 * This is called when changing to state s10_hw_start_up_halt_wait as
2015 * a result of send buffer errors or send DMA descriptor errors.
2016 */
2017static void sdma_hw_start_up(struct sdma_engine *sde)
2018{
2019 u64 reg;
2020
2021#ifdef CONFIG_SDMA_VERBOSITY
2022 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2023 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2024#endif
2025
2026 sdma_setlengen(sde);
2027 sdma_update_tail(sde, 0); /* Set SendDmaTail */
2028 *sde->head_dma = 0;
2029
2030 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
2031 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
2032 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
2033}
2034
77241056
MM
2035/*
2036 * set_sdma_integrity
2037 *
2038 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
2039 */
2040static void set_sdma_integrity(struct sdma_engine *sde)
2041{
2042 struct hfi1_devdata *dd = sde->dd;
77241056 2043
d9ac4555
JP
2044 write_sde_csr(sde, SD(CHECK_ENABLE),
2045 hfi1_pkt_base_sdma_integrity(dd));
77241056
MM
2046}
2047
77241056
MM
2048static void init_sdma_regs(
2049 struct sdma_engine *sde,
2050 u32 credits,
2051 uint idle_cnt)
2052{
2053 u8 opval, opmask;
2054#ifdef CONFIG_SDMA_VERBOSITY
2055 struct hfi1_devdata *dd = sde->dd;
2056
2057 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
2058 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
2059#endif
2060
2061 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
2062 sdma_setlengen(sde);
2063 sdma_update_tail(sde, 0); /* Set SendDmaTail */
2064 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
2065 write_sde_csr(sde, SD(DESC_CNT), 0);
2066 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
2067 write_sde_csr(sde, SD(MEMORY),
17fb4f29
JJ
2068 ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
2069 ((u64)(credits * sde->this_idx) <<
2070 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
77241056
MM
2071 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
2072 set_sdma_integrity(sde);
2073 opmask = OPCODE_CHECK_MASK_DISABLED;
2074 opval = OPCODE_CHECK_VAL_DISABLED;
2075 write_sde_csr(sde, SD(CHECK_OPCODE),
17fb4f29
JJ
2076 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
2077 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
77241056
MM
2078}
2079
2080#ifdef CONFIG_SDMA_VERBOSITY
2081
2082#define sdma_dumpstate_helper0(reg) do { \
2083 csr = read_csr(sde->dd, reg); \
2084 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
2085 } while (0)
2086
2087#define sdma_dumpstate_helper(reg) do { \
2088 csr = read_sde_csr(sde, reg); \
2089 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
2090 #reg, sde->this_idx, csr); \
2091 } while (0)
2092
2093#define sdma_dumpstate_helper2(reg) do { \
2094 csr = read_csr(sde->dd, reg + (8 * i)); \
2095 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
2096 #reg, i, csr); \
2097 } while (0)
2098
2099void sdma_dumpstate(struct sdma_engine *sde)
2100{
2101 u64 csr;
2102 unsigned i;
2103
2104 sdma_dumpstate_helper(SD(CTRL));
2105 sdma_dumpstate_helper(SD(STATUS));
2106 sdma_dumpstate_helper0(SD(ERR_STATUS));
2107 sdma_dumpstate_helper0(SD(ERR_MASK));
2108 sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
2109 sdma_dumpstate_helper(SD(ENG_ERR_MASK));
2110
2111 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
6fd8edab 2112 sdma_dumpstate_helper2(CCE_INT_STATUS);
77241056
MM
2113 sdma_dumpstate_helper2(CCE_INT_MASK);
2114 sdma_dumpstate_helper2(CCE_INT_BLOCKED);
2115 }
2116
2117 sdma_dumpstate_helper(SD(TAIL));
2118 sdma_dumpstate_helper(SD(HEAD));
2119 sdma_dumpstate_helper(SD(PRIORITY_THLD));
6fd8edab 2120 sdma_dumpstate_helper(SD(IDLE_CNT));
77241056
MM
2121 sdma_dumpstate_helper(SD(RELOAD_CNT));
2122 sdma_dumpstate_helper(SD(DESC_CNT));
2123 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
2124 sdma_dumpstate_helper(SD(MEMORY));
2125 sdma_dumpstate_helper0(SD(ENGINES));
2126 sdma_dumpstate_helper0(SD(MEM_SIZE));
2127 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
2128 sdma_dumpstate_helper(SD(BASE_ADDR));
2129 sdma_dumpstate_helper(SD(LEN_GEN));
2130 sdma_dumpstate_helper(SD(HEAD_ADDR));
2131 sdma_dumpstate_helper(SD(CHECK_ENABLE));
2132 sdma_dumpstate_helper(SD(CHECK_VL));
2133 sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
2134 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
2135 sdma_dumpstate_helper(SD(CHECK_SLID));
2136 sdma_dumpstate_helper(SD(CHECK_OPCODE));
2137}
2138#endif
2139
2140static void dump_sdma_state(struct sdma_engine *sde)
2141{
77241056
MM
2142 struct hw_sdma_desc *descqp;
2143 u64 desc[2];
2144 u64 addr;
2145 u8 gen;
2146 u16 len;
2147 u16 head, tail, cnt;
2148
2149 head = sde->descq_head & sde->sdma_mask;
2150 tail = sde->descq_tail & sde->sdma_mask;
2151 cnt = sdma_descq_freecnt(sde);
77241056
MM
2152
2153 dd_dev_err(sde->dd,
17fb4f29
JJ
2154 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
2155 sde->this_idx, head, tail, cnt,
2156 !list_empty(&sde->flushlist));
77241056
MM
2157
2158 /* print info for each entry in the descriptor queue */
2159 while (head != tail) {
2160 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2161
2162 descqp = &sde->descq[head];
2163 desc[0] = le64_to_cpu(descqp->qw[0]);
2164 desc[1] = le64_to_cpu(descqp->qw[1]);
2165 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2166 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2167 'H' : '-';
2168 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2169 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2170 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2171 & SDMA_DESC0_PHY_ADDR_MASK;
2172 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2173 & SDMA_DESC1_GENERATION_MASK;
2174 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2175 & SDMA_DESC0_BYTE_COUNT_MASK;
2176 dd_dev_err(sde->dd,
17fb4f29
JJ
2177 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2178 head, flags, addr, gen, len);
77241056 2179 dd_dev_err(sde->dd,
17fb4f29
JJ
2180 "\tdesc0:0x%016llx desc1 0x%016llx\n",
2181 desc[0], desc[1]);
77241056
MM
2182 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2183 dd_dev_err(sde->dd,
17fb4f29
JJ
2184 "\taidx: %u amode: %u alen: %u\n",
2185 (u8)((desc[1] &
2186 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2187 SDMA_DESC1_HEADER_INDEX_SHIFT),
2188 (u8)((desc[1] &
2189 SDMA_DESC1_HEADER_MODE_SMASK) >>
2190 SDMA_DESC1_HEADER_MODE_SHIFT),
2191 (u8)((desc[1] &
2192 SDMA_DESC1_HEADER_DWS_SMASK) >>
2193 SDMA_DESC1_HEADER_DWS_SHIFT));
77241056
MM
2194 head++;
2195 head &= sde->sdma_mask;
2196 }
2197}
2198
2199#define SDE_FMT \
0a226edd 2200 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
77241056
MM
2201/**
2202 * sdma_seqfile_dump_sde() - debugfs dump of sde
2203 * @s: seq file
2204 * @sde: send dma engine to dump
2205 *
2206 * This routine dumps the sde to the indicated seq file.
2207 */
2208void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
2209{
2210 u16 head, tail;
2211 struct hw_sdma_desc *descqp;
2212 u64 desc[2];
2213 u64 addr;
2214 u8 gen;
2215 u16 len;
2216
2217 head = sde->descq_head & sde->sdma_mask;
6aa7de05 2218 tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask;
77241056 2219 seq_printf(s, SDE_FMT, sde->this_idx,
17fb4f29
JJ
2220 sde->cpu,
2221 sdma_state_name(sde->state.current_state),
2222 (unsigned long long)read_sde_csr(sde, SD(CTRL)),
2223 (unsigned long long)read_sde_csr(sde, SD(STATUS)),
2224 (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
2225 (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
2226 (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
2227 (unsigned long long)le64_to_cpu(*sde->head_dma),
2228 (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
2229 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
2230 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
2231 (unsigned long long)sde->last_status,
2232 (unsigned long long)sde->ahg_bits,
2233 sde->tx_tail,
2234 sde->tx_head,
2235 sde->descq_tail,
2236 sde->descq_head,
77241056 2237 !list_empty(&sde->flushlist),
17fb4f29
JJ
2238 sde->descq_full_count,
2239 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
77241056
MM
2240
2241 /* print info for each entry in the descriptor queue */
2242 while (head != tail) {
2243 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
2244
2245 descqp = &sde->descq[head];
2246 desc[0] = le64_to_cpu(descqp->qw[0]);
2247 desc[1] = le64_to_cpu(descqp->qw[1]);
2248 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
2249 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
2250 'H' : '-';
2251 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
2252 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
2253 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
2254 & SDMA_DESC0_PHY_ADDR_MASK;
2255 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
2256 & SDMA_DESC1_GENERATION_MASK;
2257 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
2258 & SDMA_DESC0_BYTE_COUNT_MASK;
2259 seq_printf(s,
17fb4f29
JJ
2260 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
2261 head, flags, addr, gen, len);
77241056
MM
2262 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
2263 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
17fb4f29
JJ
2264 (u8)((desc[1] &
2265 SDMA_DESC1_HEADER_INDEX_SMASK) >>
2266 SDMA_DESC1_HEADER_INDEX_SHIFT),
2267 (u8)((desc[1] &
2268 SDMA_DESC1_HEADER_MODE_SMASK) >>
2269 SDMA_DESC1_HEADER_MODE_SHIFT));
77241056
MM
2270 head = (head + 1) & sde->sdma_mask;
2271 }
2272}
2273
2274/*
2275 * add the generation number into
2276 * the qw1 and return
2277 */
2278static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
2279{
2280 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
2281
2282 qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
2283 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
2284 << SDMA_DESC1_GENERATION_SHIFT;
2285 return qw1;
2286}
2287
2288/*
2289 * This routine submits the indicated tx
2290 *
2291 * Space has already been guaranteed and
2292 * tail side of ring is locked.
2293 *
2294 * The hardware tail update is done
2295 * in the caller and that is facilitated
2296 * by returning the new tail.
2297 *
2298 * There is special case logic for ahg
2299 * to not add the generation number for
2300 * up to 2 descriptors that follow the
2301 * first descriptor.
2302 *
2303 */
2304static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
2305{
2306 int i;
2307 u16 tail;
2308 struct sdma_desc *descp = tx->descp;
2309 u8 skip = 0, mode = ahg_mode(tx);
2310
2311 tail = sde->descq_tail & sde->sdma_mask;
2312 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2313 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
2314 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
2315 tail, &sde->descq[tail]);
2316 tail = ++sde->descq_tail & sde->sdma_mask;
2317 descp++;
2318 if (mode > SDMA_AHG_APPLY_UPDATE1)
2319 skip = mode >> 1;
2320 for (i = 1; i < tx->num_desc; i++, descp++) {
2321 u64 qw1;
2322
2323 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
2324 if (skip) {
2325 /* edits don't have generation */
2326 qw1 = descp->qw[1];
2327 skip--;
2328 } else {
2329 /* replace generation with real one for non-edits */
2330 qw1 = add_gen(sde, descp->qw[1]);
2331 }
2332 sde->descq[tail].qw[1] = cpu_to_le64(qw1);
2333 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
2334 tail, &sde->descq[tail]);
2335 tail = ++sde->descq_tail & sde->sdma_mask;
2336 }
2337 tx->next_descq_idx = tail;
2338#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2339 tx->sn = sde->tail_sn++;
2340 trace_hfi1_sdma_in_sn(sde, tx->sn);
2341 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
2342#endif
2343 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
2344 sde->desc_avail -= tx->num_desc;
2345 return tail;
2346}
2347
2348/*
2349 * Check for progress
2350 */
2351static int sdma_check_progress(
2352 struct sdma_engine *sde,
2353 struct iowait *wait,
bcad2913
KW
2354 struct sdma_txreq *tx,
2355 bool pkts_sent)
77241056
MM
2356{
2357 int ret;
2358
2359 sde->desc_avail = sdma_descq_freecnt(sde);
2360 if (tx->num_desc <= sde->desc_avail)
2361 return -EAGAIN;
2362 /* pulse the head_lock */
2363 if (wait && wait->sleep) {
2364 unsigned seq;
2365
2366 seq = raw_seqcount_begin(
2367 (const seqcount_t *)&sde->head_lock.seqcount);
bcad2913 2368 ret = wait->sleep(sde, wait, tx, seq, pkts_sent);
77241056
MM
2369 if (ret == -EAGAIN)
2370 sde->desc_avail = sdma_descq_freecnt(sde);
e490974e 2371 } else {
77241056 2372 ret = -EBUSY;
e490974e 2373 }
77241056
MM
2374 return ret;
2375}
2376
2377/**
2378 * sdma_send_txreq() - submit a tx req to ring
2379 * @sde: sdma engine to use
2380 * @wait: wait structure to use when full (may be NULL)
2381 * @tx: sdma_txreq to submit
bcad2913 2382 * @pkts_sent: has any packet been sent yet?
77241056
MM
2383 *
2384 * The call submits the tx into the ring. If a iowait structure is non-NULL
2385 * the packet will be queued to the list in wait.
2386 *
2387 * Return:
2388 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2389 * ring (wait == NULL)
2390 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2391 */
2392int sdma_send_txreq(struct sdma_engine *sde,
2393 struct iowait *wait,
bcad2913
KW
2394 struct sdma_txreq *tx,
2395 bool pkts_sent)
77241056
MM
2396{
2397 int ret = 0;
2398 u16 tail;
2399 unsigned long flags;
2400
2401 /* user should have supplied entire packet */
2402 if (unlikely(tx->tlen))
2403 return -EINVAL;
2404 tx->wait = wait;
2405 spin_lock_irqsave(&sde->tail_lock, flags);
2406retry:
2407 if (unlikely(!__sdma_running(sde)))
2408 goto unlock_noconn;
2409 if (unlikely(tx->num_desc > sde->desc_avail))
2410 goto nodesc;
2411 tail = submit_tx(sde, tx);
2412 if (wait)
14553ca1 2413 iowait_sdma_inc(wait);
77241056
MM
2414 sdma_update_tail(sde, tail);
2415unlock:
2416 spin_unlock_irqrestore(&sde->tail_lock, flags);
2417 return ret;
2418unlock_noconn:
2419 if (wait)
14553ca1 2420 iowait_sdma_inc(wait);
77241056
MM
2421 tx->next_descq_idx = 0;
2422#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2423 tx->sn = sde->tail_sn++;
2424 trace_hfi1_sdma_in_sn(sde, tx->sn);
2425#endif
f4f30031 2426 spin_lock(&sde->flushlist_lock);
77241056 2427 list_add_tail(&tx->list, &sde->flushlist);
f4f30031 2428 spin_unlock(&sde->flushlist_lock);
77241056
MM
2429 if (wait) {
2430 wait->tx_count++;
2431 wait->count += tx->num_desc;
2432 }
2433 schedule_work(&sde->flush_worker);
2434 ret = -ECOMM;
2435 goto unlock;
2436nodesc:
bcad2913 2437 ret = sdma_check_progress(sde, wait, tx, pkts_sent);
77241056
MM
2438 if (ret == -EAGAIN) {
2439 ret = 0;
2440 goto retry;
2441 }
2442 sde->descq_full_count++;
2443 goto unlock;
2444}
2445
2446/**
2447 * sdma_send_txlist() - submit a list of tx req to ring
2448 * @sde: sdma engine to use
2449 * @wait: wait structure to use when full (may be NULL)
2450 * @tx_list: list of sdma_txreqs to submit
0b115ef1
HC
2451 * @count: pointer to a u32 which, after return will contain the total number of
2452 * sdma_txreqs removed from the tx_list. This will include sdma_txreqs
2453 * whose SDMA descriptors are submitted to the ring and the sdma_txreqs
2454 * which are added to SDMA engine flush list if the SDMA engine state is
2455 * not running.
77241056
MM
2456 *
2457 * The call submits the list into the ring.
2458 *
2459 * If the iowait structure is non-NULL and not equal to the iowait list
2460 * the unprocessed part of the list will be appended to the list in wait.
2461 *
2462 * In all cases, the tx_list will be updated so the head of the tx_list is
2463 * the list of descriptors that have yet to be transmitted.
2464 *
2465 * The intent of this call is to provide a more efficient
2466 * way of submitting multiple packets to SDMA while holding the tail
2467 * side locking.
2468 *
2469 * Return:
0b115ef1 2470 * 0 - Success,
c7cbf2fa 2471 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
77241056
MM
2472 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2473 */
17fb4f29 2474int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
0b115ef1 2475 struct list_head *tx_list, u32 *count_out)
77241056
MM
2476{
2477 struct sdma_txreq *tx, *tx_next;
2478 int ret = 0;
2479 unsigned long flags;
2480 u16 tail = INVALID_TAIL;
0b115ef1 2481 u32 submit_count = 0, flush_count = 0, total_count;
77241056
MM
2482
2483 spin_lock_irqsave(&sde->tail_lock, flags);
2484retry:
2485 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2486 tx->wait = wait;
2487 if (unlikely(!__sdma_running(sde)))
2488 goto unlock_noconn;
2489 if (unlikely(tx->num_desc > sde->desc_avail))
2490 goto nodesc;
2491 if (unlikely(tx->tlen)) {
2492 ret = -EINVAL;
2493 goto update_tail;
2494 }
2495 list_del_init(&tx->list);
2496 tail = submit_tx(sde, tx);
0b115ef1 2497 submit_count++;
77241056 2498 if (tail != INVALID_TAIL &&
0b115ef1 2499 (submit_count & SDMA_TAIL_UPDATE_THRESH) == 0) {
77241056
MM
2500 sdma_update_tail(sde, tail);
2501 tail = INVALID_TAIL;
2502 }
2503 }
2504update_tail:
0b115ef1 2505 total_count = submit_count + flush_count;
bcad2913 2506 if (wait) {
0b115ef1 2507 iowait_sdma_add(wait, total_count);
bcad2913
KW
2508 iowait_starve_clear(submit_count > 0, wait);
2509 }
77241056
MM
2510 if (tail != INVALID_TAIL)
2511 sdma_update_tail(sde, tail);
2512 spin_unlock_irqrestore(&sde->tail_lock, flags);
0b115ef1
HC
2513 *count_out = total_count;
2514 return ret;
77241056
MM
2515unlock_noconn:
2516 spin_lock(&sde->flushlist_lock);
2517 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2518 tx->wait = wait;
2519 list_del_init(&tx->list);
77241056
MM
2520 tx->next_descq_idx = 0;
2521#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2522 tx->sn = sde->tail_sn++;
2523 trace_hfi1_sdma_in_sn(sde, tx->sn);
2524#endif
2525 list_add_tail(&tx->list, &sde->flushlist);
0b115ef1 2526 flush_count++;
77241056
MM
2527 if (wait) {
2528 wait->tx_count++;
2529 wait->count += tx->num_desc;
2530 }
2531 }
2532 spin_unlock(&sde->flushlist_lock);
2533 schedule_work(&sde->flush_worker);
2534 ret = -ECOMM;
2535 goto update_tail;
2536nodesc:
bcad2913 2537 ret = sdma_check_progress(sde, wait, tx, submit_count > 0);
77241056
MM
2538 if (ret == -EAGAIN) {
2539 ret = 0;
2540 goto retry;
2541 }
2542 sde->descq_full_count++;
2543 goto update_tail;
2544}
2545
17fb4f29 2546static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
77241056
MM
2547{
2548 unsigned long flags;
2549
2550 spin_lock_irqsave(&sde->tail_lock, flags);
2551 write_seqlock(&sde->head_lock);
2552
2553 __sdma_process_event(sde, event);
2554
2555 if (sde->state.current_state == sdma_state_s99_running)
2556 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
2557
2558 write_sequnlock(&sde->head_lock);
2559 spin_unlock_irqrestore(&sde->tail_lock, flags);
2560}
2561
2562static void __sdma_process_event(struct sdma_engine *sde,
17fb4f29 2563 enum sdma_events event)
77241056
MM
2564{
2565 struct sdma_state *ss = &sde->state;
2566 int need_progress = 0;
2567
2568 /* CONFIG SDMA temporary */
2569#ifdef CONFIG_SDMA_VERBOSITY
2570 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
2571 sdma_state_names[ss->current_state],
2572 sdma_event_names[event]);
2573#endif
2574
2575 switch (ss->current_state) {
2576 case sdma_state_s00_hw_down:
2577 switch (event) {
2578 case sdma_event_e00_go_hw_down:
2579 break;
2580 case sdma_event_e30_go_running:
2581 /*
2582 * If down, but running requested (usually result
2583 * of link up, then we need to start up.
2584 * This can happen when hw down is requested while
2585 * bringing the link up with traffic active on
4d114fdd
JJ
2586 * 7220, e.g.
2587 */
77241056 2588 ss->go_s99_running = 1;
6ffeb21f 2589 /* fall through -- and start dma engine */
77241056
MM
2590 case sdma_event_e10_go_hw_start:
2591 /* This reference means the state machine is started */
2592 sdma_get(&sde->state);
2593 sdma_set_state(sde,
17fb4f29 2594 sdma_state_s10_hw_start_up_halt_wait);
77241056
MM
2595 break;
2596 case sdma_event_e15_hw_halt_done:
2597 break;
2598 case sdma_event_e25_hw_clean_up_done:
2599 break;
2600 case sdma_event_e40_sw_cleaned:
2601 sdma_sw_tear_down(sde);
2602 break;
2603 case sdma_event_e50_hw_cleaned:
2604 break;
2605 case sdma_event_e60_hw_halted:
2606 break;
2607 case sdma_event_e70_go_idle:
2608 break;
2609 case sdma_event_e80_hw_freeze:
2610 break;
2611 case sdma_event_e81_hw_frozen:
2612 break;
2613 case sdma_event_e82_hw_unfreeze:
2614 break;
2615 case sdma_event_e85_link_down:
2616 break;
2617 case sdma_event_e90_sw_halted:
2618 break;
2619 }
2620 break;
2621
2622 case sdma_state_s10_hw_start_up_halt_wait:
2623 switch (event) {
2624 case sdma_event_e00_go_hw_down:
2625 sdma_set_state(sde, sdma_state_s00_hw_down);
2626 sdma_sw_tear_down(sde);
2627 break;
2628 case sdma_event_e10_go_hw_start:
2629 break;
2630 case sdma_event_e15_hw_halt_done:
2631 sdma_set_state(sde,
17fb4f29 2632 sdma_state_s15_hw_start_up_clean_wait);
77241056
MM
2633 sdma_start_hw_clean_up(sde);
2634 break;
2635 case sdma_event_e25_hw_clean_up_done:
2636 break;
2637 case sdma_event_e30_go_running:
2638 ss->go_s99_running = 1;
2639 break;
2640 case sdma_event_e40_sw_cleaned:
2641 break;
2642 case sdma_event_e50_hw_cleaned:
2643 break;
2644 case sdma_event_e60_hw_halted:
8edf7502 2645 schedule_work(&sde->err_halt_worker);
77241056
MM
2646 break;
2647 case sdma_event_e70_go_idle:
2648 ss->go_s99_running = 0;
2649 break;
2650 case sdma_event_e80_hw_freeze:
2651 break;
2652 case sdma_event_e81_hw_frozen:
2653 break;
2654 case sdma_event_e82_hw_unfreeze:
2655 break;
2656 case sdma_event_e85_link_down:
2657 break;
2658 case sdma_event_e90_sw_halted:
2659 break;
2660 }
2661 break;
2662
2663 case sdma_state_s15_hw_start_up_clean_wait:
2664 switch (event) {
2665 case sdma_event_e00_go_hw_down:
2666 sdma_set_state(sde, sdma_state_s00_hw_down);
2667 sdma_sw_tear_down(sde);
2668 break;
2669 case sdma_event_e10_go_hw_start:
2670 break;
2671 case sdma_event_e15_hw_halt_done:
2672 break;
2673 case sdma_event_e25_hw_clean_up_done:
2674 sdma_hw_start_up(sde);
2675 sdma_set_state(sde, ss->go_s99_running ?
2676 sdma_state_s99_running :
2677 sdma_state_s20_idle);
2678 break;
2679 case sdma_event_e30_go_running:
2680 ss->go_s99_running = 1;
2681 break;
2682 case sdma_event_e40_sw_cleaned:
2683 break;
2684 case sdma_event_e50_hw_cleaned:
2685 break;
2686 case sdma_event_e60_hw_halted:
2687 break;
2688 case sdma_event_e70_go_idle:
2689 ss->go_s99_running = 0;
2690 break;
2691 case sdma_event_e80_hw_freeze:
2692 break;
2693 case sdma_event_e81_hw_frozen:
2694 break;
2695 case sdma_event_e82_hw_unfreeze:
2696 break;
2697 case sdma_event_e85_link_down:
2698 break;
2699 case sdma_event_e90_sw_halted:
2700 break;
2701 }
2702 break;
2703
2704 case sdma_state_s20_idle:
2705 switch (event) {
2706 case sdma_event_e00_go_hw_down:
2707 sdma_set_state(sde, sdma_state_s00_hw_down);
2708 sdma_sw_tear_down(sde);
2709 break;
2710 case sdma_event_e10_go_hw_start:
2711 break;
2712 case sdma_event_e15_hw_halt_done:
2713 break;
2714 case sdma_event_e25_hw_clean_up_done:
2715 break;
2716 case sdma_event_e30_go_running:
2717 sdma_set_state(sde, sdma_state_s99_running);
2718 ss->go_s99_running = 1;
2719 break;
2720 case sdma_event_e40_sw_cleaned:
2721 break;
2722 case sdma_event_e50_hw_cleaned:
2723 break;
2724 case sdma_event_e60_hw_halted:
2725 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
8edf7502 2726 schedule_work(&sde->err_halt_worker);
77241056
MM
2727 break;
2728 case sdma_event_e70_go_idle:
2729 break;
2730 case sdma_event_e85_link_down:
2731 /* fall through */
2732 case sdma_event_e80_hw_freeze:
2733 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2734 atomic_dec(&sde->dd->sdma_unfreeze_count);
2735 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2736 break;
2737 case sdma_event_e81_hw_frozen:
2738 break;
2739 case sdma_event_e82_hw_unfreeze:
2740 break;
2741 case sdma_event_e90_sw_halted:
2742 break;
2743 }
2744 break;
2745
2746 case sdma_state_s30_sw_clean_up_wait:
2747 switch (event) {
2748 case sdma_event_e00_go_hw_down:
2749 sdma_set_state(sde, sdma_state_s00_hw_down);
2750 break;
2751 case sdma_event_e10_go_hw_start:
2752 break;
2753 case sdma_event_e15_hw_halt_done:
2754 break;
2755 case sdma_event_e25_hw_clean_up_done:
2756 break;
2757 case sdma_event_e30_go_running:
2758 ss->go_s99_running = 1;
2759 break;
2760 case sdma_event_e40_sw_cleaned:
2761 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
2762 sdma_start_hw_clean_up(sde);
2763 break;
2764 case sdma_event_e50_hw_cleaned:
2765 break;
2766 case sdma_event_e60_hw_halted:
2767 break;
2768 case sdma_event_e70_go_idle:
2769 ss->go_s99_running = 0;
2770 break;
2771 case sdma_event_e80_hw_freeze:
2772 break;
2773 case sdma_event_e81_hw_frozen:
2774 break;
2775 case sdma_event_e82_hw_unfreeze:
2776 break;
2777 case sdma_event_e85_link_down:
2778 ss->go_s99_running = 0;
2779 break;
2780 case sdma_event_e90_sw_halted:
2781 break;
2782 }
2783 break;
2784
2785 case sdma_state_s40_hw_clean_up_wait:
2786 switch (event) {
2787 case sdma_event_e00_go_hw_down:
2788 sdma_set_state(sde, sdma_state_s00_hw_down);
8edf7502 2789 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
77241056
MM
2790 break;
2791 case sdma_event_e10_go_hw_start:
2792 break;
2793 case sdma_event_e15_hw_halt_done:
2794 break;
2795 case sdma_event_e25_hw_clean_up_done:
2796 sdma_hw_start_up(sde);
2797 sdma_set_state(sde, ss->go_s99_running ?
2798 sdma_state_s99_running :
2799 sdma_state_s20_idle);
2800 break;
2801 case sdma_event_e30_go_running:
2802 ss->go_s99_running = 1;
2803 break;
2804 case sdma_event_e40_sw_cleaned:
2805 break;
2806 case sdma_event_e50_hw_cleaned:
2807 break;
2808 case sdma_event_e60_hw_halted:
2809 break;
2810 case sdma_event_e70_go_idle:
2811 ss->go_s99_running = 0;
2812 break;
2813 case sdma_event_e80_hw_freeze:
2814 break;
2815 case sdma_event_e81_hw_frozen:
2816 break;
2817 case sdma_event_e82_hw_unfreeze:
2818 break;
2819 case sdma_event_e85_link_down:
2820 ss->go_s99_running = 0;
2821 break;
2822 case sdma_event_e90_sw_halted:
2823 break;
2824 }
2825 break;
2826
2827 case sdma_state_s50_hw_halt_wait:
2828 switch (event) {
2829 case sdma_event_e00_go_hw_down:
2830 sdma_set_state(sde, sdma_state_s00_hw_down);
8edf7502 2831 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
77241056
MM
2832 break;
2833 case sdma_event_e10_go_hw_start:
2834 break;
2835 case sdma_event_e15_hw_halt_done:
2836 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
8edf7502 2837 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
77241056
MM
2838 break;
2839 case sdma_event_e25_hw_clean_up_done:
2840 break;
2841 case sdma_event_e30_go_running:
2842 ss->go_s99_running = 1;
2843 break;
2844 case sdma_event_e40_sw_cleaned:
2845 break;
2846 case sdma_event_e50_hw_cleaned:
2847 break;
2848 case sdma_event_e60_hw_halted:
8edf7502 2849 schedule_work(&sde->err_halt_worker);
77241056
MM
2850 break;
2851 case sdma_event_e70_go_idle:
2852 ss->go_s99_running = 0;
2853 break;
2854 case sdma_event_e80_hw_freeze:
2855 break;
2856 case sdma_event_e81_hw_frozen:
2857 break;
2858 case sdma_event_e82_hw_unfreeze:
2859 break;
2860 case sdma_event_e85_link_down:
2861 ss->go_s99_running = 0;
2862 break;
2863 case sdma_event_e90_sw_halted:
2864 break;
2865 }
2866 break;
2867
2868 case sdma_state_s60_idle_halt_wait:
2869 switch (event) {
2870 case sdma_event_e00_go_hw_down:
2871 sdma_set_state(sde, sdma_state_s00_hw_down);
8edf7502 2872 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
77241056
MM
2873 break;
2874 case sdma_event_e10_go_hw_start:
2875 break;
2876 case sdma_event_e15_hw_halt_done:
2877 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
8edf7502 2878 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
77241056
MM
2879 break;
2880 case sdma_event_e25_hw_clean_up_done:
2881 break;
2882 case sdma_event_e30_go_running:
2883 ss->go_s99_running = 1;
2884 break;
2885 case sdma_event_e40_sw_cleaned:
2886 break;
2887 case sdma_event_e50_hw_cleaned:
2888 break;
2889 case sdma_event_e60_hw_halted:
8edf7502 2890 schedule_work(&sde->err_halt_worker);
77241056
MM
2891 break;
2892 case sdma_event_e70_go_idle:
2893 ss->go_s99_running = 0;
2894 break;
2895 case sdma_event_e80_hw_freeze:
2896 break;
2897 case sdma_event_e81_hw_frozen:
2898 break;
2899 case sdma_event_e82_hw_unfreeze:
2900 break;
2901 case sdma_event_e85_link_down:
2902 break;
2903 case sdma_event_e90_sw_halted:
2904 break;
2905 }
2906 break;
2907
2908 case sdma_state_s80_hw_freeze:
2909 switch (event) {
2910 case sdma_event_e00_go_hw_down:
2911 sdma_set_state(sde, sdma_state_s00_hw_down);
8edf7502 2912 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
77241056
MM
2913 break;
2914 case sdma_event_e10_go_hw_start:
2915 break;
2916 case sdma_event_e15_hw_halt_done:
2917 break;
2918 case sdma_event_e25_hw_clean_up_done:
2919 break;
2920 case sdma_event_e30_go_running:
2921 ss->go_s99_running = 1;
2922 break;
2923 case sdma_event_e40_sw_cleaned:
2924 break;
2925 case sdma_event_e50_hw_cleaned:
2926 break;
2927 case sdma_event_e60_hw_halted:
2928 break;
2929 case sdma_event_e70_go_idle:
2930 ss->go_s99_running = 0;
2931 break;
2932 case sdma_event_e80_hw_freeze:
2933 break;
2934 case sdma_event_e81_hw_frozen:
2935 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
8edf7502 2936 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
77241056
MM
2937 break;
2938 case sdma_event_e82_hw_unfreeze:
2939 break;
2940 case sdma_event_e85_link_down:
2941 break;
2942 case sdma_event_e90_sw_halted:
2943 break;
2944 }
2945 break;
2946
2947 case sdma_state_s82_freeze_sw_clean:
2948 switch (event) {
2949 case sdma_event_e00_go_hw_down:
2950 sdma_set_state(sde, sdma_state_s00_hw_down);
8edf7502 2951 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
77241056
MM
2952 break;
2953 case sdma_event_e10_go_hw_start:
2954 break;
2955 case sdma_event_e15_hw_halt_done:
2956 break;
2957 case sdma_event_e25_hw_clean_up_done:
2958 break;
2959 case sdma_event_e30_go_running:
2960 ss->go_s99_running = 1;
2961 break;
2962 case sdma_event_e40_sw_cleaned:
2963 /* notify caller this engine is done cleaning */
2964 atomic_dec(&sde->dd->sdma_unfreeze_count);
2965 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2966 break;
2967 case sdma_event_e50_hw_cleaned:
2968 break;
2969 case sdma_event_e60_hw_halted:
2970 break;
2971 case sdma_event_e70_go_idle:
2972 ss->go_s99_running = 0;
2973 break;
2974 case sdma_event_e80_hw_freeze:
2975 break;
2976 case sdma_event_e81_hw_frozen:
2977 break;
2978 case sdma_event_e82_hw_unfreeze:
2979 sdma_hw_start_up(sde);
2980 sdma_set_state(sde, ss->go_s99_running ?
2981 sdma_state_s99_running :
2982 sdma_state_s20_idle);
2983 break;
2984 case sdma_event_e85_link_down:
2985 break;
2986 case sdma_event_e90_sw_halted:
2987 break;
2988 }
2989 break;
2990
2991 case sdma_state_s99_running:
2992 switch (event) {
2993 case sdma_event_e00_go_hw_down:
2994 sdma_set_state(sde, sdma_state_s00_hw_down);
8edf7502 2995 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
77241056
MM
2996 break;
2997 case sdma_event_e10_go_hw_start:
2998 break;
2999 case sdma_event_e15_hw_halt_done:
3000 break;
3001 case sdma_event_e25_hw_clean_up_done:
3002 break;
3003 case sdma_event_e30_go_running:
3004 break;
3005 case sdma_event_e40_sw_cleaned:
3006 break;
3007 case sdma_event_e50_hw_cleaned:
3008 break;
3009 case sdma_event_e60_hw_halted:
3010 need_progress = 1;
3011 sdma_err_progress_check_schedule(sde);
6ffeb21f 3012 /* fall through */
77241056
MM
3013 case sdma_event_e90_sw_halted:
3014 /*
3015 * SW initiated halt does not perform engines
3016 * progress check
3017 */
3018 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
8edf7502 3019 schedule_work(&sde->err_halt_worker);
77241056
MM
3020 break;
3021 case sdma_event_e70_go_idle:
3022 sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
3023 break;
3024 case sdma_event_e85_link_down:
3025 ss->go_s99_running = 0;
3026 /* fall through */
3027 case sdma_event_e80_hw_freeze:
3028 sdma_set_state(sde, sdma_state_s80_hw_freeze);
3029 atomic_dec(&sde->dd->sdma_unfreeze_count);
3030 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
3031 break;
3032 case sdma_event_e81_hw_frozen:
3033 break;
3034 case sdma_event_e82_hw_unfreeze:
3035 break;
3036 }
3037 break;
3038 }
3039
3040 ss->last_event = event;
3041 if (need_progress)
3042 sdma_make_progress(sde, 0);
3043}
3044
3045/*
3046 * _extend_sdma_tx_descs() - helper to extend txreq
3047 *
3048 * This is called once the initial nominal allocation
3049 * of descriptors in the sdma_txreq is exhausted.
3050 *
3051 * The code will bump the allocation up to the max
f4d26d81
NV
3052 * of MAX_DESC (64) descriptors. There doesn't seem
3053 * much point in an interim step. The last descriptor
3054 * is reserved for coalesce buffer in order to support
3055 * cases where input packet has >MAX_DESC iovecs.
77241056
MM
3056 *
3057 */
f4d26d81 3058static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
77241056
MM
3059{
3060 int i;
3061
f4d26d81
NV
3062 /* Handle last descriptor */
3063 if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
3064 /* if tlen is 0, it is for padding, release last descriptor */
3065 if (!tx->tlen) {
3066 tx->desc_limit = MAX_DESC;
3067 } else if (!tx->coalesce_buf) {
3068 /* allocate coalesce buffer with space for padding */
3069 tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
3070 GFP_ATOMIC);
3071 if (!tx->coalesce_buf)
a5a9e8cc 3072 goto enomem;
f4d26d81
NV
3073 tx->coalesce_idx = 0;
3074 }
3075 return 0;
3076 }
3077
3078 if (unlikely(tx->num_desc == MAX_DESC))
a5a9e8cc 3079 goto enomem;
f4d26d81 3080
77241056
MM
3081 tx->descp = kmalloc_array(
3082 MAX_DESC,
3083 sizeof(struct sdma_desc),
3084 GFP_ATOMIC);
3085 if (!tx->descp)
a5a9e8cc 3086 goto enomem;
f4d26d81
NV
3087
3088 /* reserve last descriptor for coalescing */
3089 tx->desc_limit = MAX_DESC - 1;
77241056
MM
3090 /* copy ones already built */
3091 for (i = 0; i < tx->num_desc; i++)
3092 tx->descp[i] = tx->descs[i];
3093 return 0;
a5a9e8cc 3094enomem:
63df8e09 3095 __sdma_txclean(dd, tx);
a5a9e8cc 3096 return -ENOMEM;
77241056
MM
3097}
3098
f4d26d81
NV
3099/*
3100 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
3101 *
3102 * This is called once the initial nominal allocation of descriptors
3103 * in the sdma_txreq is exhausted.
3104 *
3105 * This function calls _extend_sdma_tx_descs to extend or allocate
3106 * coalesce buffer. If there is a allocated coalesce buffer, it will
3107 * copy the input packet data into the coalesce buffer. It also adds
16733b88 3108 * coalesce buffer descriptor once when whole packet is received.
f4d26d81
NV
3109 *
3110 * Return:
3111 * <0 - error
3112 * 0 - coalescing, don't populate descriptor
3113 * 1 - continue with populating descriptor
3114 */
3115int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
3116 int type, void *kvaddr, struct page *page,
3117 unsigned long offset, u16 len)
3118{
3119 int pad_len, rval;
3120 dma_addr_t addr;
3121
3122 rval = _extend_sdma_tx_descs(dd, tx);
3123 if (rval) {
63df8e09 3124 __sdma_txclean(dd, tx);
f4d26d81
NV
3125 return rval;
3126 }
3127
3128 /* If coalesce buffer is allocated, copy data into it */
3129 if (tx->coalesce_buf) {
3130 if (type == SDMA_MAP_NONE) {
63df8e09 3131 __sdma_txclean(dd, tx);
f4d26d81
NV
3132 return -EINVAL;
3133 }
3134
3135 if (type == SDMA_MAP_PAGE) {
3136 kvaddr = kmap(page);
3137 kvaddr += offset;
3138 } else if (WARN_ON(!kvaddr)) {
63df8e09 3139 __sdma_txclean(dd, tx);
f4d26d81
NV
3140 return -EINVAL;
3141 }
3142
3143 memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
3144 tx->coalesce_idx += len;
3145 if (type == SDMA_MAP_PAGE)
3146 kunmap(page);
3147
3148 /* If there is more data, return */
3149 if (tx->tlen - tx->coalesce_idx)
3150 return 0;
3151
3152 /* Whole packet is received; add any padding */
3153 pad_len = tx->packet_len & (sizeof(u32) - 1);
3154 if (pad_len) {
3155 pad_len = sizeof(u32) - pad_len;
3156 memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
3157 /* padding is taken care of for coalescing case */
3158 tx->packet_len += pad_len;
3159 tx->tlen += pad_len;
3160 }
3161
3162 /* dma map the coalesce buffer */
3163 addr = dma_map_single(&dd->pcidev->dev,
3164 tx->coalesce_buf,
3165 tx->tlen,
3166 DMA_TO_DEVICE);
3167
3168 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
63df8e09 3169 __sdma_txclean(dd, tx);
f4d26d81
NV
3170 return -ENOSPC;
3171 }
3172
3173 /* Add descriptor for coalesce buffer */
3174 tx->desc_limit = MAX_DESC;
3175 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
3176 addr, tx->tlen);
3177 }
3178
3179 return 1;
3180}
3181
77241056
MM
3182/* Update sdes when the lmc changes */
3183void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
3184{
3185 struct sdma_engine *sde;
3186 int i;
3187 u64 sreg;
3188
3189 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
3190 SD(CHECK_SLID_MASK_SHIFT)) |
3191 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
3192 SD(CHECK_SLID_VALUE_SHIFT));
3193
3194 for (i = 0; i < dd->num_sdma; i++) {
3195 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
3196 i, (u32)sreg);
3197 sde = &dd->per_sdma[i];
3198 write_sde_csr(sde, SD(CHECK_SLID), sreg);
3199 }
3200}
3201
3202/* tx not dword sized - pad */
3203int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
3204{
3205 int rval = 0;
3206
f4d26d81 3207 tx->num_desc++;
77241056
MM
3208 if ((unlikely(tx->num_desc == tx->desc_limit))) {
3209 rval = _extend_sdma_tx_descs(dd, tx);
f4d26d81 3210 if (rval) {
63df8e09 3211 __sdma_txclean(dd, tx);
77241056 3212 return rval;
f4d26d81 3213 }
77241056 3214 }
f4d26d81 3215 /* finish the one just added */
77241056
MM
3216 make_tx_sdma_desc(
3217 tx,
3218 SDMA_MAP_NONE,
3219 dd->sdma_pad_phys,
3220 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
3221 _sdma_close_tx(dd, tx);
3222 return rval;
3223}
3224
3225/*
3226 * Add ahg to the sdma_txreq
3227 *
3228 * The logic will consume up to 3
3229 * descriptors at the beginning of
3230 * sdma_txreq.
3231 */
3232void _sdma_txreq_ahgadd(
3233 struct sdma_txreq *tx,
3234 u8 num_ahg,
3235 u8 ahg_entry,
3236 u32 *ahg,
3237 u8 ahg_hlen)
3238{
3239 u32 i, shift = 0, desc = 0;
3240 u8 mode;
3241
3242 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
3243 /* compute mode */
3244 if (num_ahg == 1)
3245 mode = SDMA_AHG_APPLY_UPDATE1;
3246 else if (num_ahg <= 5)
3247 mode = SDMA_AHG_APPLY_UPDATE2;
3248 else
3249 mode = SDMA_AHG_APPLY_UPDATE3;
3250 tx->num_desc++;
3251 /* initialize to consumed descriptors to zero */
3252 switch (mode) {
3253 case SDMA_AHG_APPLY_UPDATE3:
3254 tx->num_desc++;
3255 tx->descs[2].qw[0] = 0;
3256 tx->descs[2].qw[1] = 0;
3257 /* FALLTHROUGH */
3258 case SDMA_AHG_APPLY_UPDATE2:
3259 tx->num_desc++;
3260 tx->descs[1].qw[0] = 0;
3261 tx->descs[1].qw[1] = 0;
3262 break;
3263 }
3264 ahg_hlen >>= 2;
3265 tx->descs[0].qw[1] |=
3266 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
3267 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
3268 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
3269 << SDMA_DESC1_HEADER_DWS_SHIFT) |
3270 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
3271 << SDMA_DESC1_HEADER_MODE_SHIFT) |
3272 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
3273 << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
3274 for (i = 0; i < (num_ahg - 1); i++) {
3275 if (!shift && !(i & 2))
3276 desc++;
3277 tx->descs[desc].qw[!!(i & 2)] |=
3278 (((u64)ahg[i + 1])
3279 << shift);
3280 shift = (shift + 32) & 63;
3281 }
3282}
3283
3284/**
3285 * sdma_ahg_alloc - allocate an AHG entry
3286 * @sde: engine to allocate from
3287 *
3288 * Return:
3289 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
3290 * -ENOSPC if an entry is not available
3291 */
3292int sdma_ahg_alloc(struct sdma_engine *sde)
3293{
3294 int nr;
3295 int oldbit;
3296
3297 if (!sde) {
3298 trace_hfi1_ahg_allocate(sde, -EINVAL);
3299 return -EINVAL;
3300 }
3301 while (1) {
6aa7de05 3302 nr = ffz(READ_ONCE(sde->ahg_bits));
77241056
MM
3303 if (nr > 31) {
3304 trace_hfi1_ahg_allocate(sde, -ENOSPC);
3305 return -ENOSPC;
3306 }
3307 oldbit = test_and_set_bit(nr, &sde->ahg_bits);
3308 if (!oldbit)
3309 break;
3310 cpu_relax();
3311 }
3312 trace_hfi1_ahg_allocate(sde, nr);
3313 return nr;
3314}
3315
3316/**
3317 * sdma_ahg_free - free an AHG entry
3318 * @sde: engine to return AHG entry
3319 * @ahg_index: index to free
3320 *
3321 * This routine frees the indicate AHG entry.
3322 */
3323void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
3324{
3325 if (!sde)
3326 return;
3327 trace_hfi1_ahg_deallocate(sde, ahg_index);
3328 if (ahg_index < 0 || ahg_index > 31)
3329 return;
3330 clear_bit(ahg_index, &sde->ahg_bits);
3331}
3332
3333/*
3334 * SPC freeze handling for SDMA engines. Called when the driver knows
3335 * the SPC is going into a freeze but before the freeze is fully
3336 * settled. Generally an error interrupt.
3337 *
3338 * This event will pull the engine out of running so no more entries can be
3339 * added to the engine's queue.
3340 */
3341void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
3342{
3343 int i;
3344 enum sdma_events event = link_down ? sdma_event_e85_link_down :
3345 sdma_event_e80_hw_freeze;
3346
3347 /* set up the wait but do not wait here */
3348 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3349
3350 /* tell all engines to stop running and wait */
3351 for (i = 0; i < dd->num_sdma; i++)
3352 sdma_process_event(&dd->per_sdma[i], event);
3353
3354 /* sdma_freeze() will wait for all engines to have stopped */
3355}
3356
3357/*
3358 * SPC freeze handling for SDMA engines. Called when the driver knows
3359 * the SPC is fully frozen.
3360 */
3361void sdma_freeze(struct hfi1_devdata *dd)
3362{
3363 int i;
3364 int ret;
3365
3366 /*
3367 * Make sure all engines have moved out of the running state before
3368 * continuing.
3369 */
3370 ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
17fb4f29
JJ
3371 atomic_read(&dd->sdma_unfreeze_count) <=
3372 0);
77241056
MM
3373 /* interrupted or count is negative, then unloading - just exit */
3374 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
3375 return;
3376
3377 /* set up the count for the next wait */
3378 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3379
3380 /* tell all engines that the SPC is frozen, they can start cleaning */
3381 for (i = 0; i < dd->num_sdma; i++)
3382 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
3383
3384 /*
3385 * Wait for everyone to finish software clean before exiting. The
3386 * software clean will read engine CSRs, so must be completed before
3387 * the next step, which will clear the engine CSRs.
3388 */
50e5dcbe 3389 (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
77241056
MM
3390 atomic_read(&dd->sdma_unfreeze_count) <= 0);
3391 /* no need to check results - done no matter what */
3392}
3393
3394/*
3395 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
3396 *
3397 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
3398 * that is left is a software clean. We could do it after the SPC is fully
3399 * frozen, but then we'd have to add another state to wait for the unfreeze.
3400 * Instead, just defer the software clean until the unfreeze step.
3401 */
3402void sdma_unfreeze(struct hfi1_devdata *dd)
3403{
3404 int i;
3405
3406 /* tell all engines start freeze clean up */
3407 for (i = 0; i < dd->num_sdma; i++)
3408 sdma_process_event(&dd->per_sdma[i],
17fb4f29 3409 sdma_event_e82_hw_unfreeze);
77241056
MM
3410}
3411
3412/**
3413 * _sdma_engine_progress_schedule() - schedule progress on engine
3414 * @sde: sdma_engine to schedule progress
3415 *
3416 */
3417void _sdma_engine_progress_schedule(
3418 struct sdma_engine *sde)
3419{
3420 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
3421 /* assume we have selected a good cpu */
3422 write_csr(sde->dd,
17fb4f29
JJ
3423 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
3424 sde->progress_mask);
77241056 3425}