2 * Copyright(c) 2015, 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
49 #include <linux/seqlock.h>
50 #include <linux/netdevice.h>
51 #include <linux/moduleparam.h>
52 #include <linux/bitops.h>
53 #include <linux/timer.h>
54 #include <linux/vmalloc.h>
55 #include <linux/highmem.h>
64 /* must be a power of 2 >= 64 <= 32768 */
65 #define SDMA_DESCQ_CNT 2048
66 #define SDMA_DESC_INTR 64
67 #define INVALID_TAIL 0xffff
69 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
70 module_param(sdma_descq_cnt, uint, S_IRUGO);
71 MODULE_PARM_DESC(sdma_descq_cnt, "Number of SDMA descq entries");
73 static uint sdma_idle_cnt = 250;
74 module_param(sdma_idle_cnt, uint, S_IRUGO);
75 MODULE_PARM_DESC(sdma_idle_cnt, "sdma interrupt idle delay (ns,default 250)");
78 module_param_named(num_sdma, mod_num_sdma, uint, S_IRUGO);
79 MODULE_PARM_DESC(num_sdma, "Set max number SDMA engines to use");
81 static uint sdma_desct_intr = SDMA_DESC_INTR;
82 module_param_named(desct_intr, sdma_desct_intr, uint, S_IRUGO | S_IWUSR);
83 MODULE_PARM_DESC(desct_intr, "Number of SDMA descriptor before interrupt");
85 #define SDMA_WAIT_BATCH_SIZE 20
86 /* max wait time for a SDMA engine to indicate it has halted */
87 #define SDMA_ERR_HALT_TIMEOUT 10 /* ms */
88 /* all SDMA engine errors that cause a halt */
90 #define SD(name) SEND_DMA_##name
91 #define ALL_SDMA_ENG_HALT_ERRS \
92 (SD(ENG_ERR_STATUS_SDMA_WRONG_DW_ERR_SMASK) \
93 | SD(ENG_ERR_STATUS_SDMA_GEN_MISMATCH_ERR_SMASK) \
94 | SD(ENG_ERR_STATUS_SDMA_TOO_LONG_ERR_SMASK) \
95 | SD(ENG_ERR_STATUS_SDMA_TAIL_OUT_OF_BOUNDS_ERR_SMASK) \
96 | SD(ENG_ERR_STATUS_SDMA_FIRST_DESC_ERR_SMASK) \
97 | SD(ENG_ERR_STATUS_SDMA_MEM_READ_ERR_SMASK) \
98 | SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK) \
99 | SD(ENG_ERR_STATUS_SDMA_LENGTH_MISMATCH_ERR_SMASK) \
100 | SD(ENG_ERR_STATUS_SDMA_PACKET_DESC_OVERFLOW_ERR_SMASK) \
101 | SD(ENG_ERR_STATUS_SDMA_HEADER_SELECT_ERR_SMASK) \
102 | SD(ENG_ERR_STATUS_SDMA_HEADER_ADDRESS_ERR_SMASK) \
103 | SD(ENG_ERR_STATUS_SDMA_HEADER_LENGTH_ERR_SMASK) \
104 | SD(ENG_ERR_STATUS_SDMA_TIMEOUT_ERR_SMASK) \
105 | SD(ENG_ERR_STATUS_SDMA_DESC_TABLE_UNC_ERR_SMASK) \
106 | SD(ENG_ERR_STATUS_SDMA_ASSEMBLY_UNC_ERR_SMASK) \
107 | SD(ENG_ERR_STATUS_SDMA_PACKET_TRACKING_UNC_ERR_SMASK) \
108 | SD(ENG_ERR_STATUS_SDMA_HEADER_STORAGE_UNC_ERR_SMASK) \
109 | SD(ENG_ERR_STATUS_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SMASK))
111 /* sdma_sendctrl operations */
112 #define SDMA_SENDCTRL_OP_ENABLE BIT(0)
113 #define SDMA_SENDCTRL_OP_INTENABLE BIT(1)
114 #define SDMA_SENDCTRL_OP_HALT BIT(2)
115 #define SDMA_SENDCTRL_OP_CLEANUP BIT(3)
117 /* handle long defines */
118 #define SDMA_EGRESS_PACKET_OCCUPANCY_SMASK \
119 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SMASK
120 #define SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT \
121 SEND_EGRESS_SEND_DMA_STATUS_SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT
123 static const char * const sdma_state_names[] = {
124 [sdma_state_s00_hw_down] = "s00_HwDown",
125 [sdma_state_s10_hw_start_up_halt_wait] = "s10_HwStartUpHaltWait",
126 [sdma_state_s15_hw_start_up_clean_wait] = "s15_HwStartUpCleanWait",
127 [sdma_state_s20_idle] = "s20_Idle",
128 [sdma_state_s30_sw_clean_up_wait] = "s30_SwCleanUpWait",
129 [sdma_state_s40_hw_clean_up_wait] = "s40_HwCleanUpWait",
130 [sdma_state_s50_hw_halt_wait] = "s50_HwHaltWait",
131 [sdma_state_s60_idle_halt_wait] = "s60_IdleHaltWait",
132 [sdma_state_s80_hw_freeze] = "s80_HwFreeze",
133 [sdma_state_s82_freeze_sw_clean] = "s82_FreezeSwClean",
134 [sdma_state_s99_running] = "s99_Running",
137 static const char * const sdma_event_names[] = {
138 [sdma_event_e00_go_hw_down] = "e00_GoHwDown",
139 [sdma_event_e10_go_hw_start] = "e10_GoHwStart",
140 [sdma_event_e15_hw_halt_done] = "e15_HwHaltDone",
141 [sdma_event_e25_hw_clean_up_done] = "e25_HwCleanUpDone",
142 [sdma_event_e30_go_running] = "e30_GoRunning",
143 [sdma_event_e40_sw_cleaned] = "e40_SwCleaned",
144 [sdma_event_e50_hw_cleaned] = "e50_HwCleaned",
145 [sdma_event_e60_hw_halted] = "e60_HwHalted",
146 [sdma_event_e70_go_idle] = "e70_GoIdle",
147 [sdma_event_e80_hw_freeze] = "e80_HwFreeze",
148 [sdma_event_e81_hw_frozen] = "e81_HwFrozen",
149 [sdma_event_e82_hw_unfreeze] = "e82_HwUnfreeze",
150 [sdma_event_e85_link_down] = "e85_LinkDown",
151 [sdma_event_e90_sw_halted] = "e90_SwHalted",
154 static const struct sdma_set_state_action sdma_action_table[] = {
155 [sdma_state_s00_hw_down] = {
156 .go_s99_running_tofalse = 1,
162 [sdma_state_s10_hw_start_up_halt_wait] = {
168 [sdma_state_s15_hw_start_up_clean_wait] = {
174 [sdma_state_s20_idle] = {
180 [sdma_state_s30_sw_clean_up_wait] = {
186 [sdma_state_s40_hw_clean_up_wait] = {
192 [sdma_state_s50_hw_halt_wait] = {
198 [sdma_state_s60_idle_halt_wait] = {
199 .go_s99_running_tofalse = 1,
205 [sdma_state_s80_hw_freeze] = {
211 [sdma_state_s82_freeze_sw_clean] = {
217 [sdma_state_s99_running] = {
222 .go_s99_running_totrue = 1,
226 #define SDMA_TAIL_UPDATE_THRESH 0x1F
228 /* declare all statics here rather than keep sorting */
229 static void sdma_complete(struct kref *);
230 static void sdma_finalput(struct sdma_state *);
231 static void sdma_get(struct sdma_state *);
232 static void sdma_hw_clean_up_task(unsigned long);
233 static void sdma_put(struct sdma_state *);
234 static void sdma_set_state(struct sdma_engine *, enum sdma_states);
235 static void sdma_start_hw_clean_up(struct sdma_engine *);
236 static void sdma_sw_clean_up_task(unsigned long);
237 static void sdma_sendctrl(struct sdma_engine *, unsigned);
238 static void init_sdma_regs(struct sdma_engine *, u32, uint);
239 static void sdma_process_event(
240 struct sdma_engine *sde,
241 enum sdma_events event);
242 static void __sdma_process_event(
243 struct sdma_engine *sde,
244 enum sdma_events event);
245 static void dump_sdma_state(struct sdma_engine *sde);
246 static void sdma_make_progress(struct sdma_engine *sde, u64 status);
247 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail);
248 static void sdma_flush_descq(struct sdma_engine *sde);
251 * sdma_state_name() - return state string from enum
254 static const char *sdma_state_name(enum sdma_states state)
256 return sdma_state_names[state];
259 static void sdma_get(struct sdma_state *ss)
264 static void sdma_complete(struct kref *kref)
266 struct sdma_state *ss =
267 container_of(kref, struct sdma_state, kref);
272 static void sdma_put(struct sdma_state *ss)
274 kref_put(&ss->kref, sdma_complete);
277 static void sdma_finalput(struct sdma_state *ss)
280 wait_for_completion(&ss->comp);
283 static inline void write_sde_csr(
284 struct sdma_engine *sde,
288 write_kctxt_csr(sde->dd, sde->this_idx, offset0, value);
291 static inline u64 read_sde_csr(
292 struct sdma_engine *sde,
295 return read_kctxt_csr(sde->dd, sde->this_idx, offset0);
299 * sdma_wait_for_packet_egress() - wait for the VL FIFO occupancy for
300 * sdma engine 'sde' to drop to 0.
302 static void sdma_wait_for_packet_egress(struct sdma_engine *sde,
305 u64 off = 8 * sde->this_idx;
306 struct hfi1_devdata *dd = sde->dd;
313 reg = read_csr(dd, off + SEND_EGRESS_SEND_DMA_STATUS);
315 reg &= SDMA_EGRESS_PACKET_OCCUPANCY_SMASK;
316 reg >>= SDMA_EGRESS_PACKET_OCCUPANCY_SHIFT;
319 /* counter is reest if accupancy count changes */
323 /* timed out - bounce the link */
324 dd_dev_err(dd, "%s: engine %u timeout waiting for packets to egress, remaining count %u, bouncing link\n",
325 __func__, sde->this_idx, (u32)reg);
326 queue_work(dd->pport->hfi1_wq,
327 &dd->pport->link_bounce_work);
335 * sdma_wait() - wait for packet egress to complete for all SDMA engines,
336 * and pause for credit return.
338 void sdma_wait(struct hfi1_devdata *dd)
342 for (i = 0; i < dd->num_sdma; i++) {
343 struct sdma_engine *sde = &dd->per_sdma[i];
345 sdma_wait_for_packet_egress(sde, 0);
349 static inline void sdma_set_desc_cnt(struct sdma_engine *sde, unsigned cnt)
353 if (!(sde->dd->flags & HFI1_HAS_SDMA_TIMEOUT))
356 reg &= SD(DESC_CNT_CNT_MASK);
357 reg <<= SD(DESC_CNT_CNT_SHIFT);
358 write_sde_csr(sde, SD(DESC_CNT), reg);
361 static inline void complete_tx(struct sdma_engine *sde,
362 struct sdma_txreq *tx,
365 /* protect against complete modifying */
366 struct iowait *wait = tx->wait;
367 callback_t complete = tx->complete;
369 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
370 trace_hfi1_sdma_out_sn(sde, tx->sn);
371 if (WARN_ON_ONCE(sde->head_sn != tx->sn))
372 dd_dev_err(sde->dd, "expected %llu got %llu\n",
373 sde->head_sn, tx->sn);
376 sdma_txclean(sde->dd, tx);
378 (*complete)(tx, res);
379 if (iowait_sdma_dec(wait) && wait)
380 iowait_drain_wakeup(wait);
384 * Complete all the sdma requests with a SDMA_TXREQ_S_ABORTED status
386 * Depending on timing there can be txreqs in two places:
387 * - in the descq ring
388 * - in the flush list
390 * To avoid ordering issues the descq ring needs to be flushed
391 * first followed by the flush list.
393 * This routine is called from two places
394 * - From a work queue item
395 * - Directly from the state machine just before setting the
398 * Must be called with head_lock held
401 static void sdma_flush(struct sdma_engine *sde)
403 struct sdma_txreq *txp, *txp_next;
404 LIST_HEAD(flushlist);
407 /* flush from head to tail */
408 sdma_flush_descq(sde);
409 spin_lock_irqsave(&sde->flushlist_lock, flags);
410 /* copy flush list */
411 list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
412 list_del_init(&txp->list);
413 list_add_tail(&txp->list, &flushlist);
415 spin_unlock_irqrestore(&sde->flushlist_lock, flags);
416 /* flush from flush list */
417 list_for_each_entry_safe(txp, txp_next, &flushlist, list)
418 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
422 * Fields a work request for flushing the descq ring
425 * If the engine has been brought to running during
426 * the scheduling delay, the flush is ignored, assuming
427 * that the process of bringing the engine to running
428 * would have done this flush prior to going to running.
431 static void sdma_field_flush(struct work_struct *work)
434 struct sdma_engine *sde =
435 container_of(work, struct sdma_engine, flush_worker);
437 write_seqlock_irqsave(&sde->head_lock, flags);
438 if (!__sdma_running(sde))
440 write_sequnlock_irqrestore(&sde->head_lock, flags);
443 static void sdma_err_halt_wait(struct work_struct *work)
445 struct sdma_engine *sde = container_of(work, struct sdma_engine,
448 unsigned long timeout;
450 timeout = jiffies + msecs_to_jiffies(SDMA_ERR_HALT_TIMEOUT);
452 statuscsr = read_sde_csr(sde, SD(STATUS));
453 statuscsr &= SD(STATUS_ENG_HALTED_SMASK);
456 if (time_after(jiffies, timeout)) {
458 "SDMA engine %d - timeout waiting for engine to halt\n",
461 * Continue anyway. This could happen if there was
462 * an uncorrectable error in the wrong spot.
466 usleep_range(80, 120);
469 sdma_process_event(sde, sdma_event_e15_hw_halt_done);
472 static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
474 if (!is_bx(sde->dd) && HFI1_CAP_IS_KSET(SDMA_AHG)) {
476 struct hfi1_devdata *dd = sde->dd;
478 for (index = 0; index < dd->num_sdma; index++) {
479 struct sdma_engine *curr_sdma = &dd->per_sdma[index];
481 if (curr_sdma != sde)
482 curr_sdma->progress_check_head =
483 curr_sdma->descq_head;
486 "SDMA engine %d - check scheduled\n",
488 mod_timer(&sde->err_progress_check_timer, jiffies + 10);
492 static void sdma_err_progress_check(unsigned long data)
495 struct sdma_engine *sde = (struct sdma_engine *)data;
497 dd_dev_err(sde->dd, "SDE progress check event\n");
498 for (index = 0; index < sde->dd->num_sdma; index++) {
499 struct sdma_engine *curr_sde = &sde->dd->per_sdma[index];
502 /* check progress on each engine except the current one */
506 * We must lock interrupts when acquiring sde->lock,
507 * to avoid a deadlock if interrupt triggers and spins on
508 * the same lock on same CPU
510 spin_lock_irqsave(&curr_sde->tail_lock, flags);
511 write_seqlock(&curr_sde->head_lock);
513 /* skip non-running queues */
514 if (curr_sde->state.current_state != sdma_state_s99_running) {
515 write_sequnlock(&curr_sde->head_lock);
516 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
520 if ((curr_sde->descq_head != curr_sde->descq_tail) &&
521 (curr_sde->descq_head ==
522 curr_sde->progress_check_head))
523 __sdma_process_event(curr_sde,
524 sdma_event_e90_sw_halted);
525 write_sequnlock(&curr_sde->head_lock);
526 spin_unlock_irqrestore(&curr_sde->tail_lock, flags);
528 schedule_work(&sde->err_halt_worker);
531 static void sdma_hw_clean_up_task(unsigned long opaque)
533 struct sdma_engine *sde = (struct sdma_engine *)opaque;
537 #ifdef CONFIG_SDMA_VERBOSITY
538 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
539 sde->this_idx, slashstrip(__FILE__), __LINE__,
542 statuscsr = read_sde_csr(sde, SD(STATUS));
543 statuscsr &= SD(STATUS_ENG_CLEANED_UP_SMASK);
549 sdma_process_event(sde, sdma_event_e25_hw_clean_up_done);
552 static inline struct sdma_txreq *get_txhead(struct sdma_engine *sde)
554 smp_read_barrier_depends(); /* see sdma_update_tail() */
555 return sde->tx_ring[sde->tx_head & sde->sdma_mask];
559 * flush ring for recovery
561 static void sdma_flush_descq(struct sdma_engine *sde)
565 struct sdma_txreq *txp = get_txhead(sde);
567 /* The reason for some of the complexity of this code is that
568 * not all descriptors have corresponding txps. So, we have to
569 * be able to skip over descs until we wander into the range of
570 * the next txp on the list.
572 head = sde->descq_head & sde->sdma_mask;
573 tail = sde->descq_tail & sde->sdma_mask;
574 while (head != tail) {
575 /* advance head, wrap if needed */
576 head = ++sde->descq_head & sde->sdma_mask;
577 /* if now past this txp's descs, do the callback */
578 if (txp && txp->next_descq_idx == head) {
579 /* remove from list */
580 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
581 complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
582 trace_hfi1_sdma_progress(sde, head, tail, txp);
583 txp = get_txhead(sde);
588 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
591 static void sdma_sw_clean_up_task(unsigned long opaque)
593 struct sdma_engine *sde = (struct sdma_engine *)opaque;
596 spin_lock_irqsave(&sde->tail_lock, flags);
597 write_seqlock(&sde->head_lock);
600 * At this point, the following should always be true:
601 * - We are halted, so no more descriptors are getting retired.
602 * - We are not running, so no one is submitting new work.
603 * - Only we can send the e40_sw_cleaned, so we can't start
604 * running again until we say so. So, the active list and
605 * descq are ours to play with.
609 * In the error clean up sequence, software clean must be called
610 * before the hardware clean so we can use the hardware head in
611 * the progress routine. A hardware clean or SPC unfreeze will
612 * reset the hardware head.
614 * Process all retired requests. The progress routine will use the
615 * latest physical hardware head - we are not running so speed does
618 sdma_make_progress(sde, 0);
623 * Reset our notion of head and tail.
624 * Note that the HW registers have been reset via an earlier
629 sde->desc_avail = sdma_descq_freecnt(sde);
632 __sdma_process_event(sde, sdma_event_e40_sw_cleaned);
634 write_sequnlock(&sde->head_lock);
635 spin_unlock_irqrestore(&sde->tail_lock, flags);
638 static void sdma_sw_tear_down(struct sdma_engine *sde)
640 struct sdma_state *ss = &sde->state;
642 /* Releasing this reference means the state machine has stopped. */
645 /* stop waiting for all unfreeze events to complete */
646 atomic_set(&sde->dd->sdma_unfreeze_count, -1);
647 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
650 static void sdma_start_hw_clean_up(struct sdma_engine *sde)
652 tasklet_hi_schedule(&sde->sdma_hw_clean_up_task);
655 static void sdma_set_state(struct sdma_engine *sde,
656 enum sdma_states next_state)
658 struct sdma_state *ss = &sde->state;
659 const struct sdma_set_state_action *action = sdma_action_table;
662 trace_hfi1_sdma_state(
664 sdma_state_names[ss->current_state],
665 sdma_state_names[next_state]);
667 /* debugging bookkeeping */
668 ss->previous_state = ss->current_state;
669 ss->previous_op = ss->current_op;
670 ss->current_state = next_state;
672 if (ss->previous_state != sdma_state_s99_running &&
673 next_state == sdma_state_s99_running)
676 if (action[next_state].op_enable)
677 op |= SDMA_SENDCTRL_OP_ENABLE;
679 if (action[next_state].op_intenable)
680 op |= SDMA_SENDCTRL_OP_INTENABLE;
682 if (action[next_state].op_halt)
683 op |= SDMA_SENDCTRL_OP_HALT;
685 if (action[next_state].op_cleanup)
686 op |= SDMA_SENDCTRL_OP_CLEANUP;
688 if (action[next_state].go_s99_running_tofalse)
689 ss->go_s99_running = 0;
691 if (action[next_state].go_s99_running_totrue)
692 ss->go_s99_running = 1;
695 sdma_sendctrl(sde, ss->current_op);
699 * sdma_get_descq_cnt() - called when device probed
701 * Return a validated descq count.
703 * This is currently only used in the verbs initialization to build the tx
706 * This will probably be deleted in favor of a more scalable approach to
710 u16 sdma_get_descq_cnt(void)
712 u16 count = sdma_descq_cnt;
715 return SDMA_DESCQ_CNT;
716 /* count must be a power of 2 greater than 64 and less than
717 * 32768. Otherwise return default.
719 if (!is_power_of_2(count))
720 return SDMA_DESCQ_CNT;
721 if (count < 64 || count > 32768)
722 return SDMA_DESCQ_CNT;
727 * sdma_select_engine_vl() - select sdma engine
729 * @selector: a spreading factor
733 * This function returns an engine based on the selector and a vl. The
734 * mapping fields are protected by RCU.
736 struct sdma_engine *sdma_select_engine_vl(
737 struct hfi1_devdata *dd,
741 struct sdma_vl_map *m;
742 struct sdma_map_elem *e;
743 struct sdma_engine *rval;
745 /* NOTE This should only happen if SC->VL changed after the initial
746 * checks on the QP/AH
747 * Default will return engine 0 below
755 m = rcu_dereference(dd->sdma_map);
758 return &dd->per_sdma[0];
760 e = m->map[vl & m->mask];
761 rval = e->sde[selector & e->mask];
765 rval = !rval ? &dd->per_sdma[0] : rval;
766 trace_hfi1_sdma_engine_select(dd, selector, vl, rval->this_idx);
771 * sdma_select_engine_sc() - select sdma engine
773 * @selector: a spreading factor
777 * This function returns an engine based on the selector and an sc.
779 struct sdma_engine *sdma_select_engine_sc(
780 struct hfi1_devdata *dd,
784 u8 vl = sc_to_vlt(dd, sc5);
786 return sdma_select_engine_vl(dd, selector, vl);
790 * Free the indicated map struct
792 static void sdma_map_free(struct sdma_vl_map *m)
796 for (i = 0; m && i < m->actual_vls; i++)
802 * Handle RCU callback
804 static void sdma_map_rcu_callback(struct rcu_head *list)
806 struct sdma_vl_map *m = container_of(list, struct sdma_vl_map, list);
812 * sdma_map_init - called when # vls change
815 * @num_vls: number of vls
816 * @vl_engines: per vl engine mapping (optional)
818 * This routine changes the mapping based on the number of vls.
820 * vl_engines is used to specify a non-uniform vl/engine loading. NULL
821 * implies auto computing the loading and giving each VLs a uniform
822 * distribution of engines per VL.
824 * The auto algorithm computes the sde_per_vl and the number of extra
825 * engines. Any extra engines are added from the last VL on down.
827 * rcu locking is used here to control access to the mapping fields.
829 * If either the num_vls or num_sdma are non-power of 2, the array sizes
830 * in the struct sdma_vl_map and the struct sdma_map_elem are rounded
831 * up to the next highest power of 2 and the first entry is reused
832 * in a round robin fashion.
834 * If an error occurs the map change is not done and the mapping is
838 int sdma_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_engines)
841 int extra, sde_per_vl;
843 u8 lvl_engines[OPA_MAX_VLS];
844 struct sdma_vl_map *oldmap, *newmap;
846 if (!(dd->flags & HFI1_HAS_SEND_DMA))
850 /* truncate divide */
851 sde_per_vl = dd->num_sdma / num_vls;
853 extra = dd->num_sdma % num_vls;
854 vl_engines = lvl_engines;
855 /* add extras from last vl down */
856 for (i = num_vls - 1; i >= 0; i--, extra--)
857 vl_engines[i] = sde_per_vl + (extra > 0 ? 1 : 0);
861 sizeof(struct sdma_vl_map) +
862 roundup_pow_of_two(num_vls) *
863 sizeof(struct sdma_map_elem *),
867 newmap->actual_vls = num_vls;
868 newmap->vls = roundup_pow_of_two(num_vls);
869 newmap->mask = (1 << ilog2(newmap->vls)) - 1;
870 /* initialize back-map */
871 for (i = 0; i < TXE_NUM_SDMA_ENGINES; i++)
872 newmap->engine_to_vl[i] = -1;
873 for (i = 0; i < newmap->vls; i++) {
874 /* save for wrap around */
875 int first_engine = engine;
877 if (i < newmap->actual_vls) {
878 int sz = roundup_pow_of_two(vl_engines[i]);
880 /* only allocate once */
881 newmap->map[i] = kzalloc(
882 sizeof(struct sdma_map_elem) +
883 sz * sizeof(struct sdma_engine *),
887 newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
889 for (j = 0; j < sz; j++) {
890 newmap->map[i]->sde[j] =
891 &dd->per_sdma[engine];
892 if (++engine >= first_engine + vl_engines[i])
893 /* wrap back to first engine */
894 engine = first_engine;
896 /* assign back-map */
897 for (j = 0; j < vl_engines[i]; j++)
898 newmap->engine_to_vl[first_engine + j] = i;
900 /* just re-use entry without allocating */
901 newmap->map[i] = newmap->map[i % num_vls];
903 engine = first_engine + vl_engines[i];
905 /* newmap in hand, save old map */
906 spin_lock_irq(&dd->sde_map_lock);
907 oldmap = rcu_dereference_protected(dd->sdma_map,
908 lockdep_is_held(&dd->sde_map_lock));
911 rcu_assign_pointer(dd->sdma_map, newmap);
913 spin_unlock_irq(&dd->sde_map_lock);
914 /* success, free any old map after grace period */
916 call_rcu(&oldmap->list, sdma_map_rcu_callback);
919 /* free any partial allocation */
920 sdma_map_free(newmap);
925 * Clean up allocated memory.
927 * This routine is can be called regardless of the success of sdma_init()
930 static void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
933 struct sdma_engine *sde;
935 if (dd->sdma_pad_dma) {
936 dma_free_coherent(&dd->pcidev->dev, 4,
937 (void *)dd->sdma_pad_dma,
939 dd->sdma_pad_dma = NULL;
940 dd->sdma_pad_phys = 0;
942 if (dd->sdma_heads_dma) {
943 dma_free_coherent(&dd->pcidev->dev, dd->sdma_heads_size,
944 (void *)dd->sdma_heads_dma,
945 dd->sdma_heads_phys);
946 dd->sdma_heads_dma = NULL;
947 dd->sdma_heads_phys = 0;
949 for (i = 0; dd->per_sdma && i < num_engines; ++i) {
950 sde = &dd->per_sdma[i];
952 sde->head_dma = NULL;
958 sde->descq_cnt * sizeof(u64[2]),
965 kvfree(sde->tx_ring);
968 spin_lock_irq(&dd->sde_map_lock);
969 sdma_map_free(rcu_access_pointer(dd->sdma_map));
970 RCU_INIT_POINTER(dd->sdma_map, NULL);
971 spin_unlock_irq(&dd->sde_map_lock);
978 * sdma_init() - called when device probed
980 * @port: port number (currently only zero)
982 * sdma_init initializes the specified number of engines.
984 * The code initializes each sde, its csrs. Interrupts
985 * are not required to be enabled.
988 * 0 - success, -errno on failure
990 int sdma_init(struct hfi1_devdata *dd, u8 port)
993 struct sdma_engine *sde;
996 struct hfi1_pportdata *ppd = dd->pport + port;
997 u32 per_sdma_credits;
998 uint idle_cnt = sdma_idle_cnt;
999 size_t num_engines = dd->chip_sdma_engines;
1001 if (!HFI1_CAP_IS_KSET(SDMA)) {
1002 HFI1_CAP_CLEAR(SDMA_AHG);
1006 /* can't exceed chip support */
1007 mod_num_sdma <= dd->chip_sdma_engines &&
1008 /* count must be >= vls */
1009 mod_num_sdma >= num_vls)
1010 num_engines = mod_num_sdma;
1012 dd_dev_info(dd, "SDMA mod_num_sdma: %u\n", mod_num_sdma);
1013 dd_dev_info(dd, "SDMA chip_sdma_engines: %u\n", dd->chip_sdma_engines);
1014 dd_dev_info(dd, "SDMA chip_sdma_mem_size: %u\n",
1015 dd->chip_sdma_mem_size);
1018 dd->chip_sdma_mem_size / (num_engines * SDMA_BLOCK_SIZE);
1020 /* set up freeze waitqueue */
1021 init_waitqueue_head(&dd->sdma_unfreeze_wq);
1022 atomic_set(&dd->sdma_unfreeze_count, 0);
1024 descq_cnt = sdma_get_descq_cnt();
1025 dd_dev_info(dd, "SDMA engines %zu descq_cnt %u\n",
1026 num_engines, descq_cnt);
1028 /* alloc memory for array of send engines */
1029 dd->per_sdma = kcalloc(num_engines, sizeof(*dd->per_sdma), GFP_KERNEL);
1033 idle_cnt = ns_to_cclock(dd, idle_cnt);
1034 if (!sdma_desct_intr)
1035 sdma_desct_intr = SDMA_DESC_INTR;
1037 /* Allocate memory for SendDMA descriptor FIFOs */
1038 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1039 sde = &dd->per_sdma[this_idx];
1042 sde->this_idx = this_idx;
1043 sde->descq_cnt = descq_cnt;
1044 sde->desc_avail = sdma_descq_freecnt(sde);
1045 sde->sdma_shift = ilog2(descq_cnt);
1046 sde->sdma_mask = (1 << sde->sdma_shift) - 1;
1048 /* Create a mask specifically for each interrupt source */
1049 sde->int_mask = (u64)1 << (0 * TXE_NUM_SDMA_ENGINES +
1051 sde->progress_mask = (u64)1 << (1 * TXE_NUM_SDMA_ENGINES +
1053 sde->idle_mask = (u64)1 << (2 * TXE_NUM_SDMA_ENGINES +
1055 /* Create a combined mask to cover all 3 interrupt sources */
1056 sde->imask = sde->int_mask | sde->progress_mask |
1059 spin_lock_init(&sde->tail_lock);
1060 seqlock_init(&sde->head_lock);
1061 spin_lock_init(&sde->senddmactrl_lock);
1062 spin_lock_init(&sde->flushlist_lock);
1063 /* insure there is always a zero bit */
1064 sde->ahg_bits = 0xfffffffe00000000ULL;
1066 sdma_set_state(sde, sdma_state_s00_hw_down);
1068 /* set up reference counting */
1069 kref_init(&sde->state.kref);
1070 init_completion(&sde->state.comp);
1072 INIT_LIST_HEAD(&sde->flushlist);
1073 INIT_LIST_HEAD(&sde->dmawait);
1076 get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
1080 SDMA_DESC1_HEAD_TO_HOST_FLAG;
1083 SDMA_DESC1_INT_REQ_FLAG;
1085 tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
1086 (unsigned long)sde);
1088 tasklet_init(&sde->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
1089 (unsigned long)sde);
1090 INIT_WORK(&sde->err_halt_worker, sdma_err_halt_wait);
1091 INIT_WORK(&sde->flush_worker, sdma_field_flush);
1093 sde->progress_check_head = 0;
1095 setup_timer(&sde->err_progress_check_timer,
1096 sdma_err_progress_check, (unsigned long)sde);
1098 sde->descq = dma_zalloc_coherent(
1100 descq_cnt * sizeof(u64[2]),
1107 kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
1112 sizeof(struct sdma_txreq *) *
1118 dd->sdma_heads_size = L1_CACHE_BYTES * num_engines;
1119 /* Allocate memory for DMA of head registers to memory */
1120 dd->sdma_heads_dma = dma_zalloc_coherent(
1122 dd->sdma_heads_size,
1123 &dd->sdma_heads_phys,
1126 if (!dd->sdma_heads_dma) {
1127 dd_dev_err(dd, "failed to allocate SendDMA head memory\n");
1131 /* Allocate memory for pad */
1132 dd->sdma_pad_dma = dma_zalloc_coherent(
1138 if (!dd->sdma_pad_dma) {
1139 dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
1143 /* assign each engine to different cacheline and init registers */
1144 curr_head = (void *)dd->sdma_heads_dma;
1145 for (this_idx = 0; this_idx < num_engines; ++this_idx) {
1146 unsigned long phys_offset;
1148 sde = &dd->per_sdma[this_idx];
1150 sde->head_dma = curr_head;
1151 curr_head += L1_CACHE_BYTES;
1152 phys_offset = (unsigned long)sde->head_dma -
1153 (unsigned long)dd->sdma_heads_dma;
1154 sde->head_phys = dd->sdma_heads_phys + phys_offset;
1155 init_sdma_regs(sde, per_sdma_credits, idle_cnt);
1157 dd->flags |= HFI1_HAS_SEND_DMA;
1158 dd->flags |= idle_cnt ? HFI1_HAS_SDMA_TIMEOUT : 0;
1159 dd->num_sdma = num_engines;
1160 if (sdma_map_init(dd, port, ppd->vls_operational, NULL))
1162 dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
1166 sdma_clean(dd, num_engines);
1171 * sdma_all_running() - called when the link goes up
1174 * This routine moves all engines to the running state.
1176 void sdma_all_running(struct hfi1_devdata *dd)
1178 struct sdma_engine *sde;
1181 /* move all engines to running */
1182 for (i = 0; i < dd->num_sdma; ++i) {
1183 sde = &dd->per_sdma[i];
1184 sdma_process_event(sde, sdma_event_e30_go_running);
1189 * sdma_all_idle() - called when the link goes down
1192 * This routine moves all engines to the idle state.
1194 void sdma_all_idle(struct hfi1_devdata *dd)
1196 struct sdma_engine *sde;
1199 /* idle all engines */
1200 for (i = 0; i < dd->num_sdma; ++i) {
1201 sde = &dd->per_sdma[i];
1202 sdma_process_event(sde, sdma_event_e70_go_idle);
1207 * sdma_start() - called to kick off state processing for all engines
1210 * This routine is for kicking off the state processing for all required
1211 * sdma engines. Interrupts need to be working at this point.
1214 void sdma_start(struct hfi1_devdata *dd)
1217 struct sdma_engine *sde;
1219 /* kick off the engines state processing */
1220 for (i = 0; i < dd->num_sdma; ++i) {
1221 sde = &dd->per_sdma[i];
1222 sdma_process_event(sde, sdma_event_e10_go_hw_start);
1227 * sdma_exit() - used when module is removed
1230 void sdma_exit(struct hfi1_devdata *dd)
1233 struct sdma_engine *sde;
1235 for (this_idx = 0; dd->per_sdma && this_idx < dd->num_sdma;
1237 sde = &dd->per_sdma[this_idx];
1238 if (!list_empty(&sde->dmawait))
1239 dd_dev_err(dd, "sde %u: dmawait list not empty!\n",
1241 sdma_process_event(sde, sdma_event_e00_go_hw_down);
1243 del_timer_sync(&sde->err_progress_check_timer);
1246 * This waits for the state machine to exit so it is not
1247 * necessary to kill the sdma_sw_clean_up_task to make sure
1248 * it is not running.
1250 sdma_finalput(&sde->state);
1252 sdma_clean(dd, dd->num_sdma);
1256 * unmap the indicated descriptor
1258 static inline void sdma_unmap_desc(
1259 struct hfi1_devdata *dd,
1260 struct sdma_desc *descp)
1262 switch (sdma_mapping_type(descp)) {
1263 case SDMA_MAP_SINGLE:
1266 sdma_mapping_addr(descp),
1267 sdma_mapping_len(descp),
1273 sdma_mapping_addr(descp),
1274 sdma_mapping_len(descp),
1281 * return the mode as indicated by the first
1282 * descriptor in the tx.
1284 static inline u8 ahg_mode(struct sdma_txreq *tx)
1286 return (tx->descp[0].qw[1] & SDMA_DESC1_HEADER_MODE_SMASK)
1287 >> SDMA_DESC1_HEADER_MODE_SHIFT;
1291 * sdma_txclean() - clean tx of mappings, descp *kmalloc's
1292 * @dd: hfi1_devdata for unmapping
1293 * @tx: tx request to clean
1295 * This is used in the progress routine to clean the tx or
1296 * by the ULP to toss an in-process tx build.
1298 * The code can be called multiple times without issue.
1302 struct hfi1_devdata *dd,
1303 struct sdma_txreq *tx)
1308 u8 skip = 0, mode = ahg_mode(tx);
1311 sdma_unmap_desc(dd, &tx->descp[0]);
1312 /* determine number of AHG descriptors to skip */
1313 if (mode > SDMA_AHG_APPLY_UPDATE1)
1315 for (i = 1 + skip; i < tx->num_desc; i++)
1316 sdma_unmap_desc(dd, &tx->descp[i]);
1319 kfree(tx->coalesce_buf);
1320 tx->coalesce_buf = NULL;
1321 /* kmalloc'ed descp */
1322 if (unlikely(tx->desc_limit > ARRAY_SIZE(tx->descs))) {
1323 tx->desc_limit = ARRAY_SIZE(tx->descs);
1328 static inline u16 sdma_gethead(struct sdma_engine *sde)
1330 struct hfi1_devdata *dd = sde->dd;
1334 #ifdef CONFIG_SDMA_VERBOSITY
1335 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1336 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1340 use_dmahead = HFI1_CAP_IS_KSET(USE_SDMA_HEAD) && __sdma_running(sde) &&
1341 (dd->flags & HFI1_HAS_SDMA_TIMEOUT);
1342 hwhead = use_dmahead ?
1343 (u16)le64_to_cpu(*sde->head_dma) :
1344 (u16)read_sde_csr(sde, SD(HEAD));
1346 if (unlikely(HFI1_CAP_IS_KSET(SDMA_HEAD_CHECK))) {
1352 swhead = sde->descq_head & sde->sdma_mask;
1353 /* this code is really bad for cache line trading */
1354 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1355 cnt = sde->descq_cnt;
1357 if (swhead < swtail)
1359 sane = (hwhead >= swhead) & (hwhead <= swtail);
1360 else if (swhead > swtail)
1361 /* wrapped around */
1362 sane = ((hwhead >= swhead) && (hwhead < cnt)) ||
1366 sane = (hwhead == swhead);
1368 if (unlikely(!sane)) {
1369 dd_dev_err(dd, "SDMA(%u) bad head (%s) hwhd=%hu swhd=%hu swtl=%hu cnt=%hu\n",
1371 use_dmahead ? "dma" : "kreg",
1372 hwhead, swhead, swtail, cnt);
1374 /* try one more time, using csr */
1378 /* proceed as if no progress */
1386 * This is called when there are send DMA descriptors that might be
1389 * This is called with head_lock held.
1391 static void sdma_desc_avail(struct sdma_engine *sde, unsigned avail)
1393 struct iowait *wait, *nw;
1394 struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
1395 unsigned i, n = 0, seq;
1396 struct sdma_txreq *stx;
1397 struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
1399 #ifdef CONFIG_SDMA_VERBOSITY
1400 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
1401 slashstrip(__FILE__), __LINE__, __func__);
1402 dd_dev_err(sde->dd, "avail: %u\n", avail);
1406 seq = read_seqbegin(&dev->iowait_lock);
1407 if (!list_empty(&sde->dmawait)) {
1408 /* at least one item */
1409 write_seqlock(&dev->iowait_lock);
1410 /* Harvest waiters wanting DMA descriptors */
1411 list_for_each_entry_safe(
1420 if (n == ARRAY_SIZE(waits))
1422 if (!list_empty(&wait->tx_head)) {
1423 stx = list_first_entry(
1427 num_desc = stx->num_desc;
1429 if (num_desc > avail)
1432 list_del_init(&wait->list);
1435 write_sequnlock(&dev->iowait_lock);
1438 } while (read_seqretry(&dev->iowait_lock, seq));
1440 for (i = 0; i < n; i++)
1441 waits[i]->wakeup(waits[i], SDMA_AVAIL_REASON);
1444 /* head_lock must be held */
1445 static void sdma_make_progress(struct sdma_engine *sde, u64 status)
1447 struct sdma_txreq *txp = NULL;
1450 int idle_check_done = 0;
1452 hwhead = sdma_gethead(sde);
1454 /* The reason for some of the complexity of this code is that
1455 * not all descriptors have corresponding txps. So, we have to
1456 * be able to skip over descs until we wander into the range of
1457 * the next txp on the list.
1461 txp = get_txhead(sde);
1462 swhead = sde->descq_head & sde->sdma_mask;
1463 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1464 while (swhead != hwhead) {
1465 /* advance head, wrap if needed */
1466 swhead = ++sde->descq_head & sde->sdma_mask;
1468 /* if now past this txp's descs, do the callback */
1469 if (txp && txp->next_descq_idx == swhead) {
1470 /* remove from list */
1471 sde->tx_ring[sde->tx_head++ & sde->sdma_mask] = NULL;
1472 complete_tx(sde, txp, SDMA_TXREQ_S_OK);
1473 /* see if there is another txp */
1474 txp = get_txhead(sde);
1476 trace_hfi1_sdma_progress(sde, hwhead, swhead, txp);
1481 * The SDMA idle interrupt is not guaranteed to be ordered with respect
1482 * to updates to the the dma_head location in host memory. The head
1483 * value read might not be fully up to date. If there are pending
1484 * descriptors and the SDMA idle interrupt fired then read from the
1485 * CSR SDMA head instead to get the latest value from the hardware.
1486 * The hardware SDMA head should be read at most once in this invocation
1487 * of sdma_make_progress(..) which is ensured by idle_check_done flag
1489 if ((status & sde->idle_mask) && !idle_check_done) {
1492 swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1493 if (swtail != hwhead) {
1494 hwhead = (u16)read_sde_csr(sde, SD(HEAD));
1495 idle_check_done = 1;
1500 sde->last_status = status;
1502 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
1506 * sdma_engine_interrupt() - interrupt handler for engine
1508 * @status: sdma interrupt reason
1510 * Status is a mask of the 3 possible interrupts for this engine. It will
1511 * contain bits _only_ for this SDMA engine. It will contain at least one
1512 * bit, it may contain more.
1514 void sdma_engine_interrupt(struct sdma_engine *sde, u64 status)
1516 trace_hfi1_sdma_engine_interrupt(sde, status);
1517 write_seqlock(&sde->head_lock);
1518 sdma_set_desc_cnt(sde, sdma_desct_intr);
1519 if (status & sde->idle_mask)
1520 sde->idle_int_cnt++;
1521 else if (status & sde->progress_mask)
1522 sde->progress_int_cnt++;
1523 else if (status & sde->int_mask)
1524 sde->sdma_int_cnt++;
1525 sdma_make_progress(sde, status);
1526 write_sequnlock(&sde->head_lock);
1530 * sdma_engine_error() - error handler for engine
1532 * @status: sdma interrupt reason
1534 void sdma_engine_error(struct sdma_engine *sde, u64 status)
1536 unsigned long flags;
1538 #ifdef CONFIG_SDMA_VERBOSITY
1539 dd_dev_err(sde->dd, "CONFIG SDMA(%u) error status 0x%llx state %s\n",
1541 (unsigned long long)status,
1542 sdma_state_names[sde->state.current_state]);
1544 spin_lock_irqsave(&sde->tail_lock, flags);
1545 write_seqlock(&sde->head_lock);
1546 if (status & ALL_SDMA_ENG_HALT_ERRS)
1547 __sdma_process_event(sde, sdma_event_e60_hw_halted);
1548 if (status & ~SD(ENG_ERR_STATUS_SDMA_HALT_ERR_SMASK)) {
1550 "SDMA (%u) engine error: 0x%llx state %s\n",
1552 (unsigned long long)status,
1553 sdma_state_names[sde->state.current_state]);
1554 dump_sdma_state(sde);
1556 write_sequnlock(&sde->head_lock);
1557 spin_unlock_irqrestore(&sde->tail_lock, flags);
1560 static void sdma_sendctrl(struct sdma_engine *sde, unsigned op)
1562 u64 set_senddmactrl = 0;
1563 u64 clr_senddmactrl = 0;
1564 unsigned long flags;
1566 #ifdef CONFIG_SDMA_VERBOSITY
1567 dd_dev_err(sde->dd, "CONFIG SDMA(%u) senddmactrl E=%d I=%d H=%d C=%d\n",
1569 (op & SDMA_SENDCTRL_OP_ENABLE) ? 1 : 0,
1570 (op & SDMA_SENDCTRL_OP_INTENABLE) ? 1 : 0,
1571 (op & SDMA_SENDCTRL_OP_HALT) ? 1 : 0,
1572 (op & SDMA_SENDCTRL_OP_CLEANUP) ? 1 : 0);
1575 if (op & SDMA_SENDCTRL_OP_ENABLE)
1576 set_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1578 clr_senddmactrl |= SD(CTRL_SDMA_ENABLE_SMASK);
1580 if (op & SDMA_SENDCTRL_OP_INTENABLE)
1581 set_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1583 clr_senddmactrl |= SD(CTRL_SDMA_INT_ENABLE_SMASK);
1585 if (op & SDMA_SENDCTRL_OP_HALT)
1586 set_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1588 clr_senddmactrl |= SD(CTRL_SDMA_HALT_SMASK);
1590 spin_lock_irqsave(&sde->senddmactrl_lock, flags);
1592 sde->p_senddmactrl |= set_senddmactrl;
1593 sde->p_senddmactrl &= ~clr_senddmactrl;
1595 if (op & SDMA_SENDCTRL_OP_CLEANUP)
1596 write_sde_csr(sde, SD(CTRL),
1597 sde->p_senddmactrl |
1598 SD(CTRL_SDMA_CLEANUP_SMASK));
1600 write_sde_csr(sde, SD(CTRL), sde->p_senddmactrl);
1602 spin_unlock_irqrestore(&sde->senddmactrl_lock, flags);
1604 #ifdef CONFIG_SDMA_VERBOSITY
1605 sdma_dumpstate(sde);
1609 static void sdma_setlengen(struct sdma_engine *sde)
1611 #ifdef CONFIG_SDMA_VERBOSITY
1612 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1613 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1617 * Set SendDmaLenGen and clear-then-set the MSB of the generation
1618 * count to enable generation checking and load the internal
1619 * generation counter.
1621 write_sde_csr(sde, SD(LEN_GEN),
1622 (sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT));
1623 write_sde_csr(sde, SD(LEN_GEN),
1624 ((sde->descq_cnt / 64) << SD(LEN_GEN_LENGTH_SHIFT)) |
1625 (4ULL << SD(LEN_GEN_GENERATION_SHIFT)));
1628 static inline void sdma_update_tail(struct sdma_engine *sde, u16 tail)
1630 /* Commit writes to memory and advance the tail on the chip */
1631 smp_wmb(); /* see get_txhead() */
1632 writeq(tail, sde->tail_csr);
1636 * This is called when changing to state s10_hw_start_up_halt_wait as
1637 * a result of send buffer errors or send DMA descriptor errors.
1639 static void sdma_hw_start_up(struct sdma_engine *sde)
1643 #ifdef CONFIG_SDMA_VERBOSITY
1644 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1645 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1648 sdma_setlengen(sde);
1649 sdma_update_tail(sde, 0); /* Set SendDmaTail */
1652 reg = SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_MASK) <<
1653 SD(ENG_ERR_CLEAR_SDMA_HEADER_REQUEST_FIFO_UNC_ERR_SHIFT);
1654 write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg);
1657 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
1658 (r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1660 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
1661 (r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
1663 * set_sdma_integrity
1665 * Set the SEND_DMA_CHECK_ENABLE register for send DMA engine 'sde'.
1667 static void set_sdma_integrity(struct sdma_engine *sde)
1669 struct hfi1_devdata *dd = sde->dd;
1672 if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY)))
1675 reg = hfi1_pkt_base_sdma_integrity(dd);
1677 if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL))
1678 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
1680 SET_STATIC_RATE_CONTROL_SMASK(reg);
1682 write_sde_csr(sde, SD(CHECK_ENABLE), reg);
1685 static void init_sdma_regs(
1686 struct sdma_engine *sde,
1691 #ifdef CONFIG_SDMA_VERBOSITY
1692 struct hfi1_devdata *dd = sde->dd;
1694 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n",
1695 sde->this_idx, slashstrip(__FILE__), __LINE__, __func__);
1698 write_sde_csr(sde, SD(BASE_ADDR), sde->descq_phys);
1699 sdma_setlengen(sde);
1700 sdma_update_tail(sde, 0); /* Set SendDmaTail */
1701 write_sde_csr(sde, SD(RELOAD_CNT), idle_cnt);
1702 write_sde_csr(sde, SD(DESC_CNT), 0);
1703 write_sde_csr(sde, SD(HEAD_ADDR), sde->head_phys);
1704 write_sde_csr(sde, SD(MEMORY),
1705 ((u64)credits << SD(MEMORY_SDMA_MEMORY_CNT_SHIFT)) |
1706 ((u64)(credits * sde->this_idx) <<
1707 SD(MEMORY_SDMA_MEMORY_INDEX_SHIFT)));
1708 write_sde_csr(sde, SD(ENG_ERR_MASK), ~0ull);
1709 set_sdma_integrity(sde);
1710 opmask = OPCODE_CHECK_MASK_DISABLED;
1711 opval = OPCODE_CHECK_VAL_DISABLED;
1712 write_sde_csr(sde, SD(CHECK_OPCODE),
1713 (opmask << SEND_CTXT_CHECK_OPCODE_MASK_SHIFT) |
1714 (opval << SEND_CTXT_CHECK_OPCODE_VALUE_SHIFT));
1717 #ifdef CONFIG_SDMA_VERBOSITY
1719 #define sdma_dumpstate_helper0(reg) do { \
1720 csr = read_csr(sde->dd, reg); \
1721 dd_dev_err(sde->dd, "%36s 0x%016llx\n", #reg, csr); \
1724 #define sdma_dumpstate_helper(reg) do { \
1725 csr = read_sde_csr(sde, reg); \
1726 dd_dev_err(sde->dd, "%36s[%02u] 0x%016llx\n", \
1727 #reg, sde->this_idx, csr); \
1730 #define sdma_dumpstate_helper2(reg) do { \
1731 csr = read_csr(sde->dd, reg + (8 * i)); \
1732 dd_dev_err(sde->dd, "%33s_%02u 0x%016llx\n", \
1736 void sdma_dumpstate(struct sdma_engine *sde)
1741 sdma_dumpstate_helper(SD(CTRL));
1742 sdma_dumpstate_helper(SD(STATUS));
1743 sdma_dumpstate_helper0(SD(ERR_STATUS));
1744 sdma_dumpstate_helper0(SD(ERR_MASK));
1745 sdma_dumpstate_helper(SD(ENG_ERR_STATUS));
1746 sdma_dumpstate_helper(SD(ENG_ERR_MASK));
1748 for (i = 0; i < CCE_NUM_INT_CSRS; ++i) {
1749 sdma_dumpstate_helper2(CCE_INT_STATUS);
1750 sdma_dumpstate_helper2(CCE_INT_MASK);
1751 sdma_dumpstate_helper2(CCE_INT_BLOCKED);
1754 sdma_dumpstate_helper(SD(TAIL));
1755 sdma_dumpstate_helper(SD(HEAD));
1756 sdma_dumpstate_helper(SD(PRIORITY_THLD));
1757 sdma_dumpstate_helper(SD(IDLE_CNT));
1758 sdma_dumpstate_helper(SD(RELOAD_CNT));
1759 sdma_dumpstate_helper(SD(DESC_CNT));
1760 sdma_dumpstate_helper(SD(DESC_FETCHED_CNT));
1761 sdma_dumpstate_helper(SD(MEMORY));
1762 sdma_dumpstate_helper0(SD(ENGINES));
1763 sdma_dumpstate_helper0(SD(MEM_SIZE));
1764 /* sdma_dumpstate_helper(SEND_EGRESS_SEND_DMA_STATUS); */
1765 sdma_dumpstate_helper(SD(BASE_ADDR));
1766 sdma_dumpstate_helper(SD(LEN_GEN));
1767 sdma_dumpstate_helper(SD(HEAD_ADDR));
1768 sdma_dumpstate_helper(SD(CHECK_ENABLE));
1769 sdma_dumpstate_helper(SD(CHECK_VL));
1770 sdma_dumpstate_helper(SD(CHECK_JOB_KEY));
1771 sdma_dumpstate_helper(SD(CHECK_PARTITION_KEY));
1772 sdma_dumpstate_helper(SD(CHECK_SLID));
1773 sdma_dumpstate_helper(SD(CHECK_OPCODE));
1777 static void dump_sdma_state(struct sdma_engine *sde)
1779 struct hw_sdma_desc *descq;
1780 struct hw_sdma_desc *descqp;
1785 u16 head, tail, cnt;
1787 head = sde->descq_head & sde->sdma_mask;
1788 tail = sde->descq_tail & sde->sdma_mask;
1789 cnt = sdma_descq_freecnt(sde);
1793 "SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
1794 sde->this_idx, head, tail, cnt,
1795 !list_empty(&sde->flushlist));
1797 /* print info for each entry in the descriptor queue */
1798 while (head != tail) {
1799 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
1801 descqp = &sde->descq[head];
1802 desc[0] = le64_to_cpu(descqp->qw[0]);
1803 desc[1] = le64_to_cpu(descqp->qw[1]);
1804 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
1805 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
1807 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
1808 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
1809 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
1810 & SDMA_DESC0_PHY_ADDR_MASK;
1811 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
1812 & SDMA_DESC1_GENERATION_MASK;
1813 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
1814 & SDMA_DESC0_BYTE_COUNT_MASK;
1816 "SDMA sdmadesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1817 head, flags, addr, gen, len);
1819 "\tdesc0:0x%016llx desc1 0x%016llx\n",
1821 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
1823 "\taidx: %u amode: %u alen: %u\n",
1825 SDMA_DESC1_HEADER_INDEX_SMASK) >>
1826 SDMA_DESC1_HEADER_INDEX_SHIFT),
1828 SDMA_DESC1_HEADER_MODE_SMASK) >>
1829 SDMA_DESC1_HEADER_MODE_SHIFT),
1831 SDMA_DESC1_HEADER_DWS_SMASK) >>
1832 SDMA_DESC1_HEADER_DWS_SHIFT));
1834 head &= sde->sdma_mask;
1839 "SDE %u CPU %d STE %s C 0x%llx S 0x%016llx E 0x%llx T(HW) 0x%llx T(SW) 0x%x H(HW) 0x%llx H(SW) 0x%x H(D) 0x%llx DM 0x%llx GL 0x%llx R 0x%llx LIS 0x%llx AHGI 0x%llx TXT %u TXH %u DT %u DH %u FLNE %d DQF %u SLC 0x%llx\n"
1841 * sdma_seqfile_dump_sde() - debugfs dump of sde
1843 * @sde: send dma engine to dump
1845 * This routine dumps the sde to the indicated seq file.
1847 void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
1850 struct hw_sdma_desc *descqp;
1856 head = sde->descq_head & sde->sdma_mask;
1857 tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask;
1858 seq_printf(s, SDE_FMT, sde->this_idx,
1860 sdma_state_name(sde->state.current_state),
1861 (unsigned long long)read_sde_csr(sde, SD(CTRL)),
1862 (unsigned long long)read_sde_csr(sde, SD(STATUS)),
1863 (unsigned long long)read_sde_csr(sde, SD(ENG_ERR_STATUS)),
1864 (unsigned long long)read_sde_csr(sde, SD(TAIL)), tail,
1865 (unsigned long long)read_sde_csr(sde, SD(HEAD)), head,
1866 (unsigned long long)le64_to_cpu(*sde->head_dma),
1867 (unsigned long long)read_sde_csr(sde, SD(MEMORY)),
1868 (unsigned long long)read_sde_csr(sde, SD(LEN_GEN)),
1869 (unsigned long long)read_sde_csr(sde, SD(RELOAD_CNT)),
1870 (unsigned long long)sde->last_status,
1871 (unsigned long long)sde->ahg_bits,
1876 !list_empty(&sde->flushlist),
1877 sde->descq_full_count,
1878 (unsigned long long)read_sde_csr(sde, SEND_DMA_CHECK_SLID));
1880 /* print info for each entry in the descriptor queue */
1881 while (head != tail) {
1882 char flags[6] = { 'x', 'x', 'x', 'x', 0 };
1884 descqp = &sde->descq[head];
1885 desc[0] = le64_to_cpu(descqp->qw[0]);
1886 desc[1] = le64_to_cpu(descqp->qw[1]);
1887 flags[0] = (desc[1] & SDMA_DESC1_INT_REQ_FLAG) ? 'I' : '-';
1888 flags[1] = (desc[1] & SDMA_DESC1_HEAD_TO_HOST_FLAG) ?
1890 flags[2] = (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG) ? 'F' : '-';
1891 flags[3] = (desc[0] & SDMA_DESC0_LAST_DESC_FLAG) ? 'L' : '-';
1892 addr = (desc[0] >> SDMA_DESC0_PHY_ADDR_SHIFT)
1893 & SDMA_DESC0_PHY_ADDR_MASK;
1894 gen = (desc[1] >> SDMA_DESC1_GENERATION_SHIFT)
1895 & SDMA_DESC1_GENERATION_MASK;
1896 len = (desc[0] >> SDMA_DESC0_BYTE_COUNT_SHIFT)
1897 & SDMA_DESC0_BYTE_COUNT_MASK;
1899 "\tdesc[%u]: flags:%s addr:0x%016llx gen:%u len:%u bytes\n",
1900 head, flags, addr, gen, len);
1901 if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
1902 seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
1904 SDMA_DESC1_HEADER_INDEX_SMASK) >>
1905 SDMA_DESC1_HEADER_INDEX_SHIFT),
1907 SDMA_DESC1_HEADER_MODE_SMASK) >>
1908 SDMA_DESC1_HEADER_MODE_SHIFT));
1909 head = (head + 1) & sde->sdma_mask;
1914 * add the generation number into
1915 * the qw1 and return
1917 static inline u64 add_gen(struct sdma_engine *sde, u64 qw1)
1919 u8 generation = (sde->descq_tail >> sde->sdma_shift) & 3;
1921 qw1 &= ~SDMA_DESC1_GENERATION_SMASK;
1922 qw1 |= ((u64)generation & SDMA_DESC1_GENERATION_MASK)
1923 << SDMA_DESC1_GENERATION_SHIFT;
1928 * This routine submits the indicated tx
1930 * Space has already been guaranteed and
1931 * tail side of ring is locked.
1933 * The hardware tail update is done
1934 * in the caller and that is facilitated
1935 * by returning the new tail.
1937 * There is special case logic for ahg
1938 * to not add the generation number for
1939 * up to 2 descriptors that follow the
1943 static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
1947 struct sdma_desc *descp = tx->descp;
1948 u8 skip = 0, mode = ahg_mode(tx);
1950 tail = sde->descq_tail & sde->sdma_mask;
1951 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
1952 sde->descq[tail].qw[1] = cpu_to_le64(add_gen(sde, descp->qw[1]));
1953 trace_hfi1_sdma_descriptor(sde, descp->qw[0], descp->qw[1],
1954 tail, &sde->descq[tail]);
1955 tail = ++sde->descq_tail & sde->sdma_mask;
1957 if (mode > SDMA_AHG_APPLY_UPDATE1)
1959 for (i = 1; i < tx->num_desc; i++, descp++) {
1962 sde->descq[tail].qw[0] = cpu_to_le64(descp->qw[0]);
1964 /* edits don't have generation */
1968 /* replace generation with real one for non-edits */
1969 qw1 = add_gen(sde, descp->qw[1]);
1971 sde->descq[tail].qw[1] = cpu_to_le64(qw1);
1972 trace_hfi1_sdma_descriptor(sde, descp->qw[0], qw1,
1973 tail, &sde->descq[tail]);
1974 tail = ++sde->descq_tail & sde->sdma_mask;
1976 tx->next_descq_idx = tail;
1977 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
1978 tx->sn = sde->tail_sn++;
1979 trace_hfi1_sdma_in_sn(sde, tx->sn);
1980 WARN_ON_ONCE(sde->tx_ring[sde->tx_tail & sde->sdma_mask]);
1982 sde->tx_ring[sde->tx_tail++ & sde->sdma_mask] = tx;
1983 sde->desc_avail -= tx->num_desc;
1988 * Check for progress
1990 static int sdma_check_progress(
1991 struct sdma_engine *sde,
1992 struct iowait *wait,
1993 struct sdma_txreq *tx)
1997 sde->desc_avail = sdma_descq_freecnt(sde);
1998 if (tx->num_desc <= sde->desc_avail)
2000 /* pulse the head_lock */
2001 if (wait && wait->sleep) {
2004 seq = raw_seqcount_begin(
2005 (const seqcount_t *)&sde->head_lock.seqcount);
2006 ret = wait->sleep(sde, wait, tx, seq);
2008 sde->desc_avail = sdma_descq_freecnt(sde);
2016 * sdma_send_txreq() - submit a tx req to ring
2017 * @sde: sdma engine to use
2018 * @wait: wait structure to use when full (may be NULL)
2019 * @tx: sdma_txreq to submit
2021 * The call submits the tx into the ring. If a iowait structure is non-NULL
2022 * the packet will be queued to the list in wait.
2025 * 0 - Success, -EINVAL - sdma_txreq incomplete, -EBUSY - no space in
2026 * ring (wait == NULL)
2027 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2029 int sdma_send_txreq(struct sdma_engine *sde,
2030 struct iowait *wait,
2031 struct sdma_txreq *tx)
2035 unsigned long flags;
2037 /* user should have supplied entire packet */
2038 if (unlikely(tx->tlen))
2041 spin_lock_irqsave(&sde->tail_lock, flags);
2043 if (unlikely(!__sdma_running(sde)))
2045 if (unlikely(tx->num_desc > sde->desc_avail))
2047 tail = submit_tx(sde, tx);
2049 iowait_sdma_inc(wait);
2050 sdma_update_tail(sde, tail);
2052 spin_unlock_irqrestore(&sde->tail_lock, flags);
2056 iowait_sdma_inc(wait);
2057 tx->next_descq_idx = 0;
2058 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2059 tx->sn = sde->tail_sn++;
2060 trace_hfi1_sdma_in_sn(sde, tx->sn);
2062 spin_lock(&sde->flushlist_lock);
2063 list_add_tail(&tx->list, &sde->flushlist);
2064 spin_unlock(&sde->flushlist_lock);
2067 wait->count += tx->num_desc;
2069 schedule_work(&sde->flush_worker);
2073 ret = sdma_check_progress(sde, wait, tx);
2074 if (ret == -EAGAIN) {
2078 sde->descq_full_count++;
2083 * sdma_send_txlist() - submit a list of tx req to ring
2084 * @sde: sdma engine to use
2085 * @wait: wait structure to use when full (may be NULL)
2086 * @tx_list: list of sdma_txreqs to submit
2088 * The call submits the list into the ring.
2090 * If the iowait structure is non-NULL and not equal to the iowait list
2091 * the unprocessed part of the list will be appended to the list in wait.
2093 * In all cases, the tx_list will be updated so the head of the tx_list is
2094 * the list of descriptors that have yet to be transmitted.
2096 * The intent of this call is to provide a more efficient
2097 * way of submitting multiple packets to SDMA while holding the tail
2101 * > 0 - Success (value is number of sdma_txreq's submitted),
2102 * -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
2103 * -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
2105 int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
2106 struct list_head *tx_list)
2108 struct sdma_txreq *tx, *tx_next;
2110 unsigned long flags;
2111 u16 tail = INVALID_TAIL;
2114 spin_lock_irqsave(&sde->tail_lock, flags);
2116 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2118 if (unlikely(!__sdma_running(sde)))
2120 if (unlikely(tx->num_desc > sde->desc_avail))
2122 if (unlikely(tx->tlen)) {
2126 list_del_init(&tx->list);
2127 tail = submit_tx(sde, tx);
2129 if (tail != INVALID_TAIL &&
2130 (count & SDMA_TAIL_UPDATE_THRESH) == 0) {
2131 sdma_update_tail(sde, tail);
2132 tail = INVALID_TAIL;
2137 iowait_sdma_add(wait, count);
2138 if (tail != INVALID_TAIL)
2139 sdma_update_tail(sde, tail);
2140 spin_unlock_irqrestore(&sde->tail_lock, flags);
2141 return ret == 0 ? count : ret;
2143 spin_lock(&sde->flushlist_lock);
2144 list_for_each_entry_safe(tx, tx_next, tx_list, list) {
2146 list_del_init(&tx->list);
2148 iowait_sdma_inc(wait);
2149 tx->next_descq_idx = 0;
2150 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
2151 tx->sn = sde->tail_sn++;
2152 trace_hfi1_sdma_in_sn(sde, tx->sn);
2154 list_add_tail(&tx->list, &sde->flushlist);
2157 wait->count += tx->num_desc;
2160 spin_unlock(&sde->flushlist_lock);
2161 schedule_work(&sde->flush_worker);
2165 ret = sdma_check_progress(sde, wait, tx);
2166 if (ret == -EAGAIN) {
2170 sde->descq_full_count++;
2174 static void sdma_process_event(struct sdma_engine *sde, enum sdma_events event)
2176 unsigned long flags;
2178 spin_lock_irqsave(&sde->tail_lock, flags);
2179 write_seqlock(&sde->head_lock);
2181 __sdma_process_event(sde, event);
2183 if (sde->state.current_state == sdma_state_s99_running)
2184 sdma_desc_avail(sde, sdma_descq_freecnt(sde));
2186 write_sequnlock(&sde->head_lock);
2187 spin_unlock_irqrestore(&sde->tail_lock, flags);
2190 static void __sdma_process_event(struct sdma_engine *sde,
2191 enum sdma_events event)
2193 struct sdma_state *ss = &sde->state;
2194 int need_progress = 0;
2196 /* CONFIG SDMA temporary */
2197 #ifdef CONFIG_SDMA_VERBOSITY
2198 dd_dev_err(sde->dd, "CONFIG SDMA(%u) [%s] %s\n", sde->this_idx,
2199 sdma_state_names[ss->current_state],
2200 sdma_event_names[event]);
2203 switch (ss->current_state) {
2204 case sdma_state_s00_hw_down:
2206 case sdma_event_e00_go_hw_down:
2208 case sdma_event_e30_go_running:
2210 * If down, but running requested (usually result
2211 * of link up, then we need to start up.
2212 * This can happen when hw down is requested while
2213 * bringing the link up with traffic active on
2216 ss->go_s99_running = 1;
2217 /* fall through and start dma engine */
2218 case sdma_event_e10_go_hw_start:
2219 /* This reference means the state machine is started */
2220 sdma_get(&sde->state);
2222 sdma_state_s10_hw_start_up_halt_wait);
2224 case sdma_event_e15_hw_halt_done:
2226 case sdma_event_e25_hw_clean_up_done:
2228 case sdma_event_e40_sw_cleaned:
2229 sdma_sw_tear_down(sde);
2231 case sdma_event_e50_hw_cleaned:
2233 case sdma_event_e60_hw_halted:
2235 case sdma_event_e70_go_idle:
2237 case sdma_event_e80_hw_freeze:
2239 case sdma_event_e81_hw_frozen:
2241 case sdma_event_e82_hw_unfreeze:
2243 case sdma_event_e85_link_down:
2245 case sdma_event_e90_sw_halted:
2250 case sdma_state_s10_hw_start_up_halt_wait:
2252 case sdma_event_e00_go_hw_down:
2253 sdma_set_state(sde, sdma_state_s00_hw_down);
2254 sdma_sw_tear_down(sde);
2256 case sdma_event_e10_go_hw_start:
2258 case sdma_event_e15_hw_halt_done:
2260 sdma_state_s15_hw_start_up_clean_wait);
2261 sdma_start_hw_clean_up(sde);
2263 case sdma_event_e25_hw_clean_up_done:
2265 case sdma_event_e30_go_running:
2266 ss->go_s99_running = 1;
2268 case sdma_event_e40_sw_cleaned:
2270 case sdma_event_e50_hw_cleaned:
2272 case sdma_event_e60_hw_halted:
2273 schedule_work(&sde->err_halt_worker);
2275 case sdma_event_e70_go_idle:
2276 ss->go_s99_running = 0;
2278 case sdma_event_e80_hw_freeze:
2280 case sdma_event_e81_hw_frozen:
2282 case sdma_event_e82_hw_unfreeze:
2284 case sdma_event_e85_link_down:
2286 case sdma_event_e90_sw_halted:
2291 case sdma_state_s15_hw_start_up_clean_wait:
2293 case sdma_event_e00_go_hw_down:
2294 sdma_set_state(sde, sdma_state_s00_hw_down);
2295 sdma_sw_tear_down(sde);
2297 case sdma_event_e10_go_hw_start:
2299 case sdma_event_e15_hw_halt_done:
2301 case sdma_event_e25_hw_clean_up_done:
2302 sdma_hw_start_up(sde);
2303 sdma_set_state(sde, ss->go_s99_running ?
2304 sdma_state_s99_running :
2305 sdma_state_s20_idle);
2307 case sdma_event_e30_go_running:
2308 ss->go_s99_running = 1;
2310 case sdma_event_e40_sw_cleaned:
2312 case sdma_event_e50_hw_cleaned:
2314 case sdma_event_e60_hw_halted:
2316 case sdma_event_e70_go_idle:
2317 ss->go_s99_running = 0;
2319 case sdma_event_e80_hw_freeze:
2321 case sdma_event_e81_hw_frozen:
2323 case sdma_event_e82_hw_unfreeze:
2325 case sdma_event_e85_link_down:
2327 case sdma_event_e90_sw_halted:
2332 case sdma_state_s20_idle:
2334 case sdma_event_e00_go_hw_down:
2335 sdma_set_state(sde, sdma_state_s00_hw_down);
2336 sdma_sw_tear_down(sde);
2338 case sdma_event_e10_go_hw_start:
2340 case sdma_event_e15_hw_halt_done:
2342 case sdma_event_e25_hw_clean_up_done:
2344 case sdma_event_e30_go_running:
2345 sdma_set_state(sde, sdma_state_s99_running);
2346 ss->go_s99_running = 1;
2348 case sdma_event_e40_sw_cleaned:
2350 case sdma_event_e50_hw_cleaned:
2352 case sdma_event_e60_hw_halted:
2353 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2354 schedule_work(&sde->err_halt_worker);
2356 case sdma_event_e70_go_idle:
2358 case sdma_event_e85_link_down:
2360 case sdma_event_e80_hw_freeze:
2361 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2362 atomic_dec(&sde->dd->sdma_unfreeze_count);
2363 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2365 case sdma_event_e81_hw_frozen:
2367 case sdma_event_e82_hw_unfreeze:
2369 case sdma_event_e90_sw_halted:
2374 case sdma_state_s30_sw_clean_up_wait:
2376 case sdma_event_e00_go_hw_down:
2377 sdma_set_state(sde, sdma_state_s00_hw_down);
2379 case sdma_event_e10_go_hw_start:
2381 case sdma_event_e15_hw_halt_done:
2383 case sdma_event_e25_hw_clean_up_done:
2385 case sdma_event_e30_go_running:
2386 ss->go_s99_running = 1;
2388 case sdma_event_e40_sw_cleaned:
2389 sdma_set_state(sde, sdma_state_s40_hw_clean_up_wait);
2390 sdma_start_hw_clean_up(sde);
2392 case sdma_event_e50_hw_cleaned:
2394 case sdma_event_e60_hw_halted:
2396 case sdma_event_e70_go_idle:
2397 ss->go_s99_running = 0;
2399 case sdma_event_e80_hw_freeze:
2401 case sdma_event_e81_hw_frozen:
2403 case sdma_event_e82_hw_unfreeze:
2405 case sdma_event_e85_link_down:
2406 ss->go_s99_running = 0;
2408 case sdma_event_e90_sw_halted:
2413 case sdma_state_s40_hw_clean_up_wait:
2415 case sdma_event_e00_go_hw_down:
2416 sdma_set_state(sde, sdma_state_s00_hw_down);
2417 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2419 case sdma_event_e10_go_hw_start:
2421 case sdma_event_e15_hw_halt_done:
2423 case sdma_event_e25_hw_clean_up_done:
2424 sdma_hw_start_up(sde);
2425 sdma_set_state(sde, ss->go_s99_running ?
2426 sdma_state_s99_running :
2427 sdma_state_s20_idle);
2429 case sdma_event_e30_go_running:
2430 ss->go_s99_running = 1;
2432 case sdma_event_e40_sw_cleaned:
2434 case sdma_event_e50_hw_cleaned:
2436 case sdma_event_e60_hw_halted:
2438 case sdma_event_e70_go_idle:
2439 ss->go_s99_running = 0;
2441 case sdma_event_e80_hw_freeze:
2443 case sdma_event_e81_hw_frozen:
2445 case sdma_event_e82_hw_unfreeze:
2447 case sdma_event_e85_link_down:
2448 ss->go_s99_running = 0;
2450 case sdma_event_e90_sw_halted:
2455 case sdma_state_s50_hw_halt_wait:
2457 case sdma_event_e00_go_hw_down:
2458 sdma_set_state(sde, sdma_state_s00_hw_down);
2459 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2461 case sdma_event_e10_go_hw_start:
2463 case sdma_event_e15_hw_halt_done:
2464 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2465 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2467 case sdma_event_e25_hw_clean_up_done:
2469 case sdma_event_e30_go_running:
2470 ss->go_s99_running = 1;
2472 case sdma_event_e40_sw_cleaned:
2474 case sdma_event_e50_hw_cleaned:
2476 case sdma_event_e60_hw_halted:
2477 schedule_work(&sde->err_halt_worker);
2479 case sdma_event_e70_go_idle:
2480 ss->go_s99_running = 0;
2482 case sdma_event_e80_hw_freeze:
2484 case sdma_event_e81_hw_frozen:
2486 case sdma_event_e82_hw_unfreeze:
2488 case sdma_event_e85_link_down:
2489 ss->go_s99_running = 0;
2491 case sdma_event_e90_sw_halted:
2496 case sdma_state_s60_idle_halt_wait:
2498 case sdma_event_e00_go_hw_down:
2499 sdma_set_state(sde, sdma_state_s00_hw_down);
2500 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2502 case sdma_event_e10_go_hw_start:
2504 case sdma_event_e15_hw_halt_done:
2505 sdma_set_state(sde, sdma_state_s30_sw_clean_up_wait);
2506 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2508 case sdma_event_e25_hw_clean_up_done:
2510 case sdma_event_e30_go_running:
2511 ss->go_s99_running = 1;
2513 case sdma_event_e40_sw_cleaned:
2515 case sdma_event_e50_hw_cleaned:
2517 case sdma_event_e60_hw_halted:
2518 schedule_work(&sde->err_halt_worker);
2520 case sdma_event_e70_go_idle:
2521 ss->go_s99_running = 0;
2523 case sdma_event_e80_hw_freeze:
2525 case sdma_event_e81_hw_frozen:
2527 case sdma_event_e82_hw_unfreeze:
2529 case sdma_event_e85_link_down:
2531 case sdma_event_e90_sw_halted:
2536 case sdma_state_s80_hw_freeze:
2538 case sdma_event_e00_go_hw_down:
2539 sdma_set_state(sde, sdma_state_s00_hw_down);
2540 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2542 case sdma_event_e10_go_hw_start:
2544 case sdma_event_e15_hw_halt_done:
2546 case sdma_event_e25_hw_clean_up_done:
2548 case sdma_event_e30_go_running:
2549 ss->go_s99_running = 1;
2551 case sdma_event_e40_sw_cleaned:
2553 case sdma_event_e50_hw_cleaned:
2555 case sdma_event_e60_hw_halted:
2557 case sdma_event_e70_go_idle:
2558 ss->go_s99_running = 0;
2560 case sdma_event_e80_hw_freeze:
2562 case sdma_event_e81_hw_frozen:
2563 sdma_set_state(sde, sdma_state_s82_freeze_sw_clean);
2564 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2566 case sdma_event_e82_hw_unfreeze:
2568 case sdma_event_e85_link_down:
2570 case sdma_event_e90_sw_halted:
2575 case sdma_state_s82_freeze_sw_clean:
2577 case sdma_event_e00_go_hw_down:
2578 sdma_set_state(sde, sdma_state_s00_hw_down);
2579 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2581 case sdma_event_e10_go_hw_start:
2583 case sdma_event_e15_hw_halt_done:
2585 case sdma_event_e25_hw_clean_up_done:
2587 case sdma_event_e30_go_running:
2588 ss->go_s99_running = 1;
2590 case sdma_event_e40_sw_cleaned:
2591 /* notify caller this engine is done cleaning */
2592 atomic_dec(&sde->dd->sdma_unfreeze_count);
2593 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2595 case sdma_event_e50_hw_cleaned:
2597 case sdma_event_e60_hw_halted:
2599 case sdma_event_e70_go_idle:
2600 ss->go_s99_running = 0;
2602 case sdma_event_e80_hw_freeze:
2604 case sdma_event_e81_hw_frozen:
2606 case sdma_event_e82_hw_unfreeze:
2607 sdma_hw_start_up(sde);
2608 sdma_set_state(sde, ss->go_s99_running ?
2609 sdma_state_s99_running :
2610 sdma_state_s20_idle);
2612 case sdma_event_e85_link_down:
2614 case sdma_event_e90_sw_halted:
2619 case sdma_state_s99_running:
2621 case sdma_event_e00_go_hw_down:
2622 sdma_set_state(sde, sdma_state_s00_hw_down);
2623 tasklet_hi_schedule(&sde->sdma_sw_clean_up_task);
2625 case sdma_event_e10_go_hw_start:
2627 case sdma_event_e15_hw_halt_done:
2629 case sdma_event_e25_hw_clean_up_done:
2631 case sdma_event_e30_go_running:
2633 case sdma_event_e40_sw_cleaned:
2635 case sdma_event_e50_hw_cleaned:
2637 case sdma_event_e60_hw_halted:
2639 sdma_err_progress_check_schedule(sde);
2640 case sdma_event_e90_sw_halted:
2642 * SW initiated halt does not perform engines
2645 sdma_set_state(sde, sdma_state_s50_hw_halt_wait);
2646 schedule_work(&sde->err_halt_worker);
2648 case sdma_event_e70_go_idle:
2649 sdma_set_state(sde, sdma_state_s60_idle_halt_wait);
2651 case sdma_event_e85_link_down:
2652 ss->go_s99_running = 0;
2654 case sdma_event_e80_hw_freeze:
2655 sdma_set_state(sde, sdma_state_s80_hw_freeze);
2656 atomic_dec(&sde->dd->sdma_unfreeze_count);
2657 wake_up_interruptible(&sde->dd->sdma_unfreeze_wq);
2659 case sdma_event_e81_hw_frozen:
2661 case sdma_event_e82_hw_unfreeze:
2667 ss->last_event = event;
2669 sdma_make_progress(sde, 0);
2673 * _extend_sdma_tx_descs() - helper to extend txreq
2675 * This is called once the initial nominal allocation
2676 * of descriptors in the sdma_txreq is exhausted.
2678 * The code will bump the allocation up to the max
2679 * of MAX_DESC (64) descriptors. There doesn't seem
2680 * much point in an interim step. The last descriptor
2681 * is reserved for coalesce buffer in order to support
2682 * cases where input packet has >MAX_DESC iovecs.
2685 static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
2689 /* Handle last descriptor */
2690 if (unlikely((tx->num_desc == (MAX_DESC - 1)))) {
2691 /* if tlen is 0, it is for padding, release last descriptor */
2693 tx->desc_limit = MAX_DESC;
2694 } else if (!tx->coalesce_buf) {
2695 /* allocate coalesce buffer with space for padding */
2696 tx->coalesce_buf = kmalloc(tx->tlen + sizeof(u32),
2698 if (!tx->coalesce_buf)
2700 tx->coalesce_idx = 0;
2705 if (unlikely(tx->num_desc == MAX_DESC))
2708 tx->descp = kmalloc_array(
2710 sizeof(struct sdma_desc),
2715 /* reserve last descriptor for coalescing */
2716 tx->desc_limit = MAX_DESC - 1;
2717 /* copy ones already built */
2718 for (i = 0; i < tx->num_desc; i++)
2719 tx->descp[i] = tx->descs[i];
2722 sdma_txclean(dd, tx);
2727 * ext_coal_sdma_tx_descs() - extend or coalesce sdma tx descriptors
2729 * This is called once the initial nominal allocation of descriptors
2730 * in the sdma_txreq is exhausted.
2732 * This function calls _extend_sdma_tx_descs to extend or allocate
2733 * coalesce buffer. If there is a allocated coalesce buffer, it will
2734 * copy the input packet data into the coalesce buffer. It also adds
2735 * coalesce buffer descriptor once when whole packet is received.
2739 * 0 - coalescing, don't populate descriptor
2740 * 1 - continue with populating descriptor
2742 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
2743 int type, void *kvaddr, struct page *page,
2744 unsigned long offset, u16 len)
2749 rval = _extend_sdma_tx_descs(dd, tx);
2751 sdma_txclean(dd, tx);
2755 /* If coalesce buffer is allocated, copy data into it */
2756 if (tx->coalesce_buf) {
2757 if (type == SDMA_MAP_NONE) {
2758 sdma_txclean(dd, tx);
2762 if (type == SDMA_MAP_PAGE) {
2763 kvaddr = kmap(page);
2765 } else if (WARN_ON(!kvaddr)) {
2766 sdma_txclean(dd, tx);
2770 memcpy(tx->coalesce_buf + tx->coalesce_idx, kvaddr, len);
2771 tx->coalesce_idx += len;
2772 if (type == SDMA_MAP_PAGE)
2775 /* If there is more data, return */
2776 if (tx->tlen - tx->coalesce_idx)
2779 /* Whole packet is received; add any padding */
2780 pad_len = tx->packet_len & (sizeof(u32) - 1);
2782 pad_len = sizeof(u32) - pad_len;
2783 memset(tx->coalesce_buf + tx->coalesce_idx, 0, pad_len);
2784 /* padding is taken care of for coalescing case */
2785 tx->packet_len += pad_len;
2786 tx->tlen += pad_len;
2789 /* dma map the coalesce buffer */
2790 addr = dma_map_single(&dd->pcidev->dev,
2795 if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
2796 sdma_txclean(dd, tx);
2800 /* Add descriptor for coalesce buffer */
2801 tx->desc_limit = MAX_DESC;
2802 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx,
2809 /* Update sdes when the lmc changes */
2810 void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid)
2812 struct sdma_engine *sde;
2816 sreg = ((mask & SD(CHECK_SLID_MASK_MASK)) <<
2817 SD(CHECK_SLID_MASK_SHIFT)) |
2818 (((lid & mask) & SD(CHECK_SLID_VALUE_MASK)) <<
2819 SD(CHECK_SLID_VALUE_SHIFT));
2821 for (i = 0; i < dd->num_sdma; i++) {
2822 hfi1_cdbg(LINKVERB, "SendDmaEngine[%d].SLID_CHECK = 0x%x",
2824 sde = &dd->per_sdma[i];
2825 write_sde_csr(sde, SD(CHECK_SLID), sreg);
2829 /* tx not dword sized - pad */
2830 int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
2835 if ((unlikely(tx->num_desc == tx->desc_limit))) {
2836 rval = _extend_sdma_tx_descs(dd, tx);
2838 sdma_txclean(dd, tx);
2842 /* finish the one just added */
2847 sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
2848 _sdma_close_tx(dd, tx);
2853 * Add ahg to the sdma_txreq
2855 * The logic will consume up to 3
2856 * descriptors at the beginning of
2859 void _sdma_txreq_ahgadd(
2860 struct sdma_txreq *tx,
2866 u32 i, shift = 0, desc = 0;
2869 WARN_ON_ONCE(num_ahg > 9 || (ahg_hlen & 3) || ahg_hlen == 4);
2872 mode = SDMA_AHG_APPLY_UPDATE1;
2873 else if (num_ahg <= 5)
2874 mode = SDMA_AHG_APPLY_UPDATE2;
2876 mode = SDMA_AHG_APPLY_UPDATE3;
2878 /* initialize to consumed descriptors to zero */
2880 case SDMA_AHG_APPLY_UPDATE3:
2882 tx->descs[2].qw[0] = 0;
2883 tx->descs[2].qw[1] = 0;
2885 case SDMA_AHG_APPLY_UPDATE2:
2887 tx->descs[1].qw[0] = 0;
2888 tx->descs[1].qw[1] = 0;
2892 tx->descs[0].qw[1] |=
2893 (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK)
2894 << SDMA_DESC1_HEADER_INDEX_SHIFT) |
2895 (((u64)ahg_hlen & SDMA_DESC1_HEADER_DWS_MASK)
2896 << SDMA_DESC1_HEADER_DWS_SHIFT) |
2897 (((u64)mode & SDMA_DESC1_HEADER_MODE_MASK)
2898 << SDMA_DESC1_HEADER_MODE_SHIFT) |
2899 (((u64)ahg[0] & SDMA_DESC1_HEADER_UPDATE1_MASK)
2900 << SDMA_DESC1_HEADER_UPDATE1_SHIFT);
2901 for (i = 0; i < (num_ahg - 1); i++) {
2902 if (!shift && !(i & 2))
2904 tx->descs[desc].qw[!!(i & 2)] |=
2907 shift = (shift + 32) & 63;
2912 * sdma_ahg_alloc - allocate an AHG entry
2913 * @sde: engine to allocate from
2916 * 0-31 when successful, -EOPNOTSUPP if AHG is not enabled,
2917 * -ENOSPC if an entry is not available
2919 int sdma_ahg_alloc(struct sdma_engine *sde)
2925 trace_hfi1_ahg_allocate(sde, -EINVAL);
2929 nr = ffz(ACCESS_ONCE(sde->ahg_bits));
2931 trace_hfi1_ahg_allocate(sde, -ENOSPC);
2934 oldbit = test_and_set_bit(nr, &sde->ahg_bits);
2939 trace_hfi1_ahg_allocate(sde, nr);
2944 * sdma_ahg_free - free an AHG entry
2945 * @sde: engine to return AHG entry
2946 * @ahg_index: index to free
2948 * This routine frees the indicate AHG entry.
2950 void sdma_ahg_free(struct sdma_engine *sde, int ahg_index)
2954 trace_hfi1_ahg_deallocate(sde, ahg_index);
2955 if (ahg_index < 0 || ahg_index > 31)
2957 clear_bit(ahg_index, &sde->ahg_bits);
2961 * SPC freeze handling for SDMA engines. Called when the driver knows
2962 * the SPC is going into a freeze but before the freeze is fully
2963 * settled. Generally an error interrupt.
2965 * This event will pull the engine out of running so no more entries can be
2966 * added to the engine's queue.
2968 void sdma_freeze_notify(struct hfi1_devdata *dd, int link_down)
2971 enum sdma_events event = link_down ? sdma_event_e85_link_down :
2972 sdma_event_e80_hw_freeze;
2974 /* set up the wait but do not wait here */
2975 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
2977 /* tell all engines to stop running and wait */
2978 for (i = 0; i < dd->num_sdma; i++)
2979 sdma_process_event(&dd->per_sdma[i], event);
2981 /* sdma_freeze() will wait for all engines to have stopped */
2985 * SPC freeze handling for SDMA engines. Called when the driver knows
2986 * the SPC is fully frozen.
2988 void sdma_freeze(struct hfi1_devdata *dd)
2994 * Make sure all engines have moved out of the running state before
2997 ret = wait_event_interruptible(dd->sdma_unfreeze_wq,
2998 atomic_read(&dd->sdma_unfreeze_count) <=
3000 /* interrupted or count is negative, then unloading - just exit */
3001 if (ret || atomic_read(&dd->sdma_unfreeze_count) < 0)
3004 /* set up the count for the next wait */
3005 atomic_set(&dd->sdma_unfreeze_count, dd->num_sdma);
3007 /* tell all engines that the SPC is frozen, they can start cleaning */
3008 for (i = 0; i < dd->num_sdma; i++)
3009 sdma_process_event(&dd->per_sdma[i], sdma_event_e81_hw_frozen);
3012 * Wait for everyone to finish software clean before exiting. The
3013 * software clean will read engine CSRs, so must be completed before
3014 * the next step, which will clear the engine CSRs.
3016 (void)wait_event_interruptible(dd->sdma_unfreeze_wq,
3017 atomic_read(&dd->sdma_unfreeze_count) <= 0);
3018 /* no need to check results - done no matter what */
3022 * SPC freeze handling for the SDMA engines. Called after the SPC is unfrozen.
3024 * The SPC freeze acts like a SDMA halt and a hardware clean combined. All
3025 * that is left is a software clean. We could do it after the SPC is fully
3026 * frozen, but then we'd have to add another state to wait for the unfreeze.
3027 * Instead, just defer the software clean until the unfreeze step.
3029 void sdma_unfreeze(struct hfi1_devdata *dd)
3033 /* tell all engines start freeze clean up */
3034 for (i = 0; i < dd->num_sdma; i++)
3035 sdma_process_event(&dd->per_sdma[i],
3036 sdma_event_e82_hw_unfreeze);
3040 * _sdma_engine_progress_schedule() - schedule progress on engine
3041 * @sde: sdma_engine to schedule progress
3044 void _sdma_engine_progress_schedule(
3045 struct sdma_engine *sde)
3047 trace_hfi1_sdma_engine_progress(sde, sde->progress_mask);
3048 /* assume we have selected a good cpu */
3050 CCE_INT_FORCE + (8 * (IS_SDMA_START / 64)),
3051 sde->progress_mask);