Commit | Line | Data |
---|---|---|
eff380aa AV |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2019, Intel Corporation. */ | |
3 | ||
175fc430 | 4 | #include <net/xdp_sock_drv.h> |
eff380aa | 5 | #include "ice_base.h" |
401ce33b | 6 | #include "ice_lib.h" |
eff380aa | 7 | #include "ice_dcb_lib.h" |
0deb0bf7 | 8 | #include "ice_sriov.h" |
eff380aa AV |
9 | |
10 | /** | |
11 | * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI | |
12 | * @qs_cfg: gathered variables needed for PF->VSI queues assignment | |
13 | * | |
14 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap | |
15 | */ | |
16 | static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) | |
17 | { | |
22bef5e7 | 18 | unsigned int offset, i; |
eff380aa AV |
19 | |
20 | mutex_lock(qs_cfg->qs_mutex); | |
21 | offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, | |
22 | 0, qs_cfg->q_count, 0); | |
23 | if (offset >= qs_cfg->pf_map_size) { | |
24 | mutex_unlock(qs_cfg->qs_mutex); | |
25 | return -ENOMEM; | |
26 | } | |
27 | ||
28 | bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); | |
29 | for (i = 0; i < qs_cfg->q_count; i++) | |
88865fc4 | 30 | qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset); |
eff380aa AV |
31 | mutex_unlock(qs_cfg->qs_mutex); |
32 | ||
33 | return 0; | |
34 | } | |
35 | ||
36 | /** | |
37 | * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI | |
38 | * @qs_cfg: gathered variables needed for pf->vsi queues assignment | |
39 | * | |
40 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap | |
41 | */ | |
42 | static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) | |
43 | { | |
22bef5e7 | 44 | unsigned int i, index = 0; |
eff380aa AV |
45 | |
46 | mutex_lock(qs_cfg->qs_mutex); | |
47 | for (i = 0; i < qs_cfg->q_count; i++) { | |
48 | index = find_next_zero_bit(qs_cfg->pf_map, | |
49 | qs_cfg->pf_map_size, index); | |
50 | if (index >= qs_cfg->pf_map_size) | |
51 | goto err_scatter; | |
52 | set_bit(index, qs_cfg->pf_map); | |
88865fc4 | 53 | qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index; |
eff380aa AV |
54 | } |
55 | mutex_unlock(qs_cfg->qs_mutex); | |
56 | ||
57 | return 0; | |
58 | err_scatter: | |
59 | for (index = 0; index < i; index++) { | |
60 | clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); | |
61 | qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; | |
62 | } | |
63 | mutex_unlock(qs_cfg->qs_mutex); | |
64 | ||
65 | return -ENOMEM; | |
66 | } | |
67 | ||
68 | /** | |
69 | * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled | |
70 | * @pf: the PF being configured | |
71 | * @pf_q: the PF queue | |
72 | * @ena: enable or disable state of the queue | |
73 | * | |
74 | * This routine will wait for the given Rx queue of the PF to reach the | |
75 | * enabled or disabled state. | |
76 | * Returns -ETIMEDOUT in case of failing to reach the requested state after | |
77 | * multiple retries; else will return 0 in case of success. | |
78 | */ | |
79 | static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) | |
80 | { | |
81 | int i; | |
82 | ||
83 | for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { | |
84 | if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & | |
85 | QRX_CTRL_QENA_STAT_M)) | |
86 | return 0; | |
87 | ||
88 | usleep_range(20, 40); | |
89 | } | |
90 | ||
91 | return -ETIMEDOUT; | |
92 | } | |
93 | ||
94 | /** | |
95 | * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector | |
96 | * @vsi: the VSI being configured | |
97 | * @v_idx: index of the vector in the VSI struct | |
98 | * | |
118e0e10 MS |
99 | * We allocate one q_vector and set default value for ITR setting associated |
100 | * with this q_vector. If allocation fails we return -ENOMEM. | |
eff380aa | 101 | */ |
88865fc4 | 102 | static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) |
eff380aa AV |
103 | { |
104 | struct ice_pf *pf = vsi->back; | |
105 | struct ice_q_vector *q_vector; | |
106 | ||
107 | /* allocate q_vector */ | |
4015d11e BC |
108 | q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector), |
109 | GFP_KERNEL); | |
eff380aa AV |
110 | if (!q_vector) |
111 | return -ENOMEM; | |
112 | ||
113 | q_vector->vsi = vsi; | |
114 | q_vector->v_idx = v_idx; | |
118e0e10 MS |
115 | q_vector->tx.itr_setting = ICE_DFLT_TX_ITR; |
116 | q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; | |
d59684a0 JB |
117 | q_vector->tx.itr_mode = ITR_DYNAMIC; |
118 | q_vector->rx.itr_mode = ITR_DYNAMIC; | |
dc23715c MF |
119 | q_vector->tx.type = ICE_TX_CONTAINER; |
120 | q_vector->rx.type = ICE_RX_CONTAINER; | |
d59684a0 | 121 | |
eff380aa AV |
122 | if (vsi->type == ICE_VSI_VF) |
123 | goto out; | |
124 | /* only set affinity_mask if the CPU is online */ | |
125 | if (cpu_online(v_idx)) | |
126 | cpumask_set_cpu(v_idx, &q_vector->affinity_mask); | |
127 | ||
128 | /* This will not be called in the driver load path because the netdev | |
129 | * will not be created yet. All other cases with register the NAPI | |
130 | * handler here (i.e. resume, reset/rebuild, etc.) | |
131 | */ | |
132 | if (vsi->netdev) | |
b48b89f9 | 133 | netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll); |
eff380aa AV |
134 | |
135 | out: | |
136 | /* tie q_vector and VSI together */ | |
137 | vsi->q_vectors[v_idx] = q_vector; | |
138 | ||
139 | return 0; | |
140 | } | |
141 | ||
142 | /** | |
143 | * ice_free_q_vector - Free memory allocated for a specific interrupt vector | |
144 | * @vsi: VSI having the memory freed | |
145 | * @v_idx: index of the vector to be freed | |
146 | */ | |
147 | static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) | |
148 | { | |
149 | struct ice_q_vector *q_vector; | |
150 | struct ice_pf *pf = vsi->back; | |
e72bba21 MF |
151 | struct ice_tx_ring *tx_ring; |
152 | struct ice_rx_ring *rx_ring; | |
4015d11e | 153 | struct device *dev; |
eff380aa | 154 | |
4015d11e | 155 | dev = ice_pf_to_dev(pf); |
eff380aa | 156 | if (!vsi->q_vectors[v_idx]) { |
4015d11e | 157 | dev_dbg(dev, "Queue vector at index %d not found\n", v_idx); |
eff380aa AV |
158 | return; |
159 | } | |
160 | q_vector = vsi->q_vectors[v_idx]; | |
161 | ||
e72bba21 MF |
162 | ice_for_each_tx_ring(tx_ring, q_vector->tx) |
163 | tx_ring->q_vector = NULL; | |
164 | ice_for_each_rx_ring(rx_ring, q_vector->rx) | |
165 | rx_ring->q_vector = NULL; | |
eff380aa AV |
166 | |
167 | /* only VSI with an associated netdev is set up with NAPI */ | |
168 | if (vsi->netdev) | |
169 | netif_napi_del(&q_vector->napi); | |
170 | ||
4015d11e | 171 | devm_kfree(dev, q_vector); |
eff380aa AV |
172 | vsi->q_vectors[v_idx] = NULL; |
173 | } | |
174 | ||
175 | /** | |
176 | * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set | |
177 | * @hw: board specific structure | |
178 | */ | |
179 | static void ice_cfg_itr_gran(struct ice_hw *hw) | |
180 | { | |
181 | u32 regval = rd32(hw, GLINT_CTL); | |
182 | ||
183 | /* no need to update global register if ITR gran is already set */ | |
184 | if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && | |
185 | (((regval & GLINT_CTL_ITR_GRAN_200_M) >> | |
186 | GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && | |
187 | (((regval & GLINT_CTL_ITR_GRAN_100_M) >> | |
188 | GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && | |
189 | (((regval & GLINT_CTL_ITR_GRAN_50_M) >> | |
190 | GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && | |
191 | (((regval & GLINT_CTL_ITR_GRAN_25_M) >> | |
192 | GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) | |
193 | return; | |
194 | ||
195 | regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & | |
196 | GLINT_CTL_ITR_GRAN_200_M) | | |
197 | ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & | |
198 | GLINT_CTL_ITR_GRAN_100_M) | | |
199 | ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & | |
200 | GLINT_CTL_ITR_GRAN_50_M) | | |
201 | ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & | |
202 | GLINT_CTL_ITR_GRAN_25_M); | |
203 | wr32(hw, GLINT_CTL, regval); | |
204 | } | |
205 | ||
e75d1b2c | 206 | /** |
e72bba21 | 207 | * ice_calc_txq_handle - calculate the queue handle |
e75d1b2c MF |
208 | * @vsi: VSI that ring belongs to |
209 | * @ring: ring to get the absolute queue index | |
210 | * @tc: traffic class number | |
211 | */ | |
e72bba21 | 212 | static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc) |
e75d1b2c | 213 | { |
af23635a | 214 | WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n"); |
efc2214b | 215 | |
0754d65b KP |
216 | if (ring->ch) |
217 | return ring->q_index - ring->ch->base_q; | |
218 | ||
e75d1b2c MF |
219 | /* Idea here for calculation is that we subtract the number of queue |
220 | * count from TC that ring belongs to from it's absolute queue index | |
221 | * and as a result we get the queue's index within TC. | |
222 | */ | |
223 | return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset; | |
224 | } | |
225 | ||
f66756e0 | 226 | /** |
e72bba21 | 227 | * ice_eswitch_calc_txq_handle |
f66756e0 GN |
228 | * @ring: pointer to ring which unique index is needed |
229 | * | |
230 | * To correctly work with many netdevs ring->q_index of Tx rings on switchdev | |
231 | * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it | |
232 | * here by finding index in vsi->tx_rings of this ring. | |
233 | * | |
234 | * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen, | |
235 | * because VSI is get from ring->vsi, so it has to be present in this VSI. | |
236 | */ | |
e72bba21 | 237 | static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring) |
f66756e0 GN |
238 | { |
239 | struct ice_vsi *vsi = ring->vsi; | |
240 | int i; | |
241 | ||
242 | ice_for_each_txq(vsi, i) { | |
243 | if (vsi->tx_rings[i] == ring) | |
244 | return i; | |
245 | } | |
246 | ||
247 | return ICE_INVAL_Q_INDEX; | |
248 | } | |
249 | ||
634da4c1 BB |
250 | /** |
251 | * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring | |
252 | * @ring: The Tx ring to configure | |
253 | * | |
254 | * This enables/disables XPS for a given Tx descriptor ring | |
255 | * based on the TCs enabled for the VSI that ring belongs to. | |
256 | */ | |
e72bba21 | 257 | static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring) |
634da4c1 BB |
258 | { |
259 | if (!ring->q_vector || !ring->netdev) | |
260 | return; | |
261 | ||
262 | /* We only initialize XPS once, so as not to overwrite user settings */ | |
263 | if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state)) | |
264 | return; | |
265 | ||
266 | netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, | |
267 | ring->q_index); | |
268 | } | |
269 | ||
eff380aa AV |
270 | /** |
271 | * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance | |
272 | * @ring: The Tx ring to configure | |
273 | * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized | |
274 | * @pf_q: queue index in the PF space | |
275 | * | |
276 | * Configure the Tx descriptor ring in TLAN context. | |
277 | */ | |
278 | static void | |
e72bba21 | 279 | ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) |
eff380aa AV |
280 | { |
281 | struct ice_vsi *vsi = ring->vsi; | |
282 | struct ice_hw *hw = &vsi->back->hw; | |
283 | ||
284 | tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; | |
285 | ||
286 | tlan_ctx->port_num = vsi->port_info->lport; | |
287 | ||
288 | /* Transmit Queue Length */ | |
289 | tlan_ctx->qlen = ring->count; | |
290 | ||
e72bba21 | 291 | ice_set_cgd_num(tlan_ctx, ring->dcb_tc); |
eff380aa AV |
292 | |
293 | /* PF number */ | |
294 | tlan_ctx->pf_num = hw->pf_id; | |
295 | ||
296 | /* queue belongs to a specific VSI type | |
297 | * VF / VM index should be programmed per vmvf_type setting: | |
298 | * for vmvf_type = VF, it is VF number between 0-256 | |
299 | * for vmvf_type = VM, it is VM number between 0-767 | |
300 | * for PF or EMP this field should be set to zero | |
301 | */ | |
302 | switch (vsi->type) { | |
303 | case ICE_VSI_LB: | |
148beb61 | 304 | case ICE_VSI_CTRL: |
eff380aa | 305 | case ICE_VSI_PF: |
0754d65b KP |
306 | if (ring->ch) |
307 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; | |
308 | else | |
309 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; | |
eff380aa AV |
310 | break; |
311 | case ICE_VSI_VF: | |
312 | /* Firmware expects vmvf_num to be absolute VF ID */ | |
b03d519d | 313 | tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id; |
eff380aa AV |
314 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; |
315 | break; | |
f66756e0 GN |
316 | case ICE_VSI_SWITCHDEV_CTRL: |
317 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ; | |
318 | break; | |
eff380aa AV |
319 | default: |
320 | return; | |
321 | } | |
322 | ||
323 | /* make sure the context is associated with the right VSI */ | |
0754d65b KP |
324 | if (ring->ch) |
325 | tlan_ctx->src_vsi = ring->ch->vsi_num; | |
326 | else | |
327 | tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); | |
eff380aa | 328 | |
ea9b847c JK |
329 | /* Restrict Tx timestamps to the PF VSI */ |
330 | switch (vsi->type) { | |
331 | case ICE_VSI_PF: | |
332 | tlan_ctx->tsyn_ena = 1; | |
333 | break; | |
334 | default: | |
335 | break; | |
336 | } | |
337 | ||
eff380aa AV |
338 | tlan_ctx->tso_ena = ICE_TX_LEGACY; |
339 | tlan_ctx->tso_qnum = pf_q; | |
340 | ||
341 | /* Legacy or Advanced Host Interface: | |
342 | * 0: Advanced Host Interface | |
343 | * 1: Legacy Host Interface | |
344 | */ | |
345 | tlan_ctx->legacy_int = ICE_TX_LEGACY; | |
346 | } | |
347 | ||
89861c48 MF |
348 | /** |
349 | * ice_rx_offset - Return expected offset into page to access data | |
350 | * @rx_ring: Ring we are requesting offset of | |
351 | * | |
352 | * Returns the offset value for ring into the data buffer. | |
353 | */ | |
e72bba21 | 354 | static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring) |
89861c48 MF |
355 | { |
356 | if (ice_ring_uses_build_skb(rx_ring)) | |
357 | return ICE_SKB_PAD; | |
358 | else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) | |
359 | return XDP_PACKET_HEADROOM; | |
360 | ||
361 | return 0; | |
362 | } | |
363 | ||
eff380aa AV |
364 | /** |
365 | * ice_setup_rx_ctx - Configure a receive ring context | |
366 | * @ring: The Rx ring to configure | |
367 | * | |
368 | * Configure the Rx descriptor ring in RLAN context. | |
369 | */ | |
e72bba21 | 370 | static int ice_setup_rx_ctx(struct ice_rx_ring *ring) |
eff380aa | 371 | { |
2d4238f5 | 372 | int chain_len = ICE_MAX_CHAINED_RX_BUFS; |
eff380aa | 373 | struct ice_vsi *vsi = ring->vsi; |
eff380aa AV |
374 | u32 rxdid = ICE_RXDID_FLEX_NIC; |
375 | struct ice_rlan_ctx rlan_ctx; | |
2d4238f5 | 376 | struct ice_hw *hw; |
eff380aa AV |
377 | u16 pf_q; |
378 | int err; | |
379 | ||
2d4238f5 KK |
380 | hw = &vsi->back->hw; |
381 | ||
eff380aa AV |
382 | /* what is Rx queue number in global space of 2K Rx queues */ |
383 | pf_q = vsi->rxq_map[ring->q_index]; | |
384 | ||
385 | /* clear the context structure first */ | |
386 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); | |
387 | ||
efc2214b MF |
388 | /* Receive Queue Base Address. |
389 | * Indicates the starting address of the descriptor queue defined in | |
390 | * 128 Byte units. | |
391 | */ | |
eff380aa AV |
392 | rlan_ctx.base = ring->dma >> 7; |
393 | ||
394 | rlan_ctx.qlen = ring->count; | |
395 | ||
396 | /* Receive Packet Data Buffer Size. | |
397 | * The Packet Data Buffer Size is defined in 128 byte units. | |
398 | */ | |
efc2214b | 399 | rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; |
eff380aa AV |
400 | |
401 | /* use 32 byte descriptors */ | |
402 | rlan_ctx.dsize = 1; | |
403 | ||
404 | /* Strip the Ethernet CRC bytes before the packet is posted to host | |
405 | * memory. | |
406 | */ | |
dddd406d | 407 | rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS); |
eff380aa | 408 | |
0d54d8f7 BC |
409 | /* L2TSEL flag defines the reported L2 Tags in the receive descriptor |
410 | * and it needs to remain 1 for non-DVM capable configurations to not | |
411 | * break backward compatibility for VF drivers. Setting this field to 0 | |
412 | * will cause the single/outer VLAN tag to be stripped to the L2TAG2_2ND | |
413 | * field in the Rx descriptor. Setting it to 1 allows the VLAN tag to | |
414 | * be stripped in L2TAG1 of the Rx descriptor, which is where VFs will | |
415 | * check for the tag | |
416 | */ | |
417 | if (ice_is_dvm_ena(hw)) | |
418 | if (vsi->type == ICE_VSI_VF && | |
b03d519d | 419 | ice_vf_is_port_vlan_ena(vsi->vf)) |
0d54d8f7 BC |
420 | rlan_ctx.l2tsel = 1; |
421 | else | |
422 | rlan_ctx.l2tsel = 0; | |
423 | else | |
424 | rlan_ctx.l2tsel = 1; | |
eff380aa AV |
425 | |
426 | rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; | |
427 | rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; | |
428 | rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; | |
429 | ||
430 | /* This controls whether VLAN is stripped from inner headers | |
431 | * The VLAN in the inner L2 header is stripped to the receive | |
432 | * descriptor if enabled by this flag. | |
433 | */ | |
434 | rlan_ctx.showiv = 0; | |
435 | ||
43c7f919 KK |
436 | /* For AF_XDP ZC, we disallow packets to span on |
437 | * multiple buffers, thus letting us skip that | |
438 | * handling in the fast-path. | |
439 | */ | |
440 | if (ring->xsk_pool) | |
441 | chain_len = 1; | |
eff380aa AV |
442 | /* Max packet size for this queue - must not be set to a larger value |
443 | * than 5 x DBUF | |
444 | */ | |
88865fc4 | 445 | rlan_ctx.rxmax = min_t(u32, vsi->max_frame, |
2d4238f5 | 446 | chain_len * ring->rx_buf_len); |
eff380aa AV |
447 | |
448 | /* Rx queue threshold in units of 64 */ | |
449 | rlan_ctx.lrxqthresh = 1; | |
450 | ||
401ce33b BC |
451 | /* Enable Flexible Descriptors in the queue context which |
452 | * allows this driver to select a specific receive descriptor format | |
453 | * increasing context priority to pick up profile ID; default is 0x01; | |
454 | * setting to 0x03 to ensure profile is programming if prev context is | |
455 | * of same priority | |
456 | */ | |
457 | if (vsi->type != ICE_VSI_VF) | |
77a78115 | 458 | ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true); |
401ce33b | 459 | else |
77a78115 JK |
460 | ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3, |
461 | false); | |
eff380aa AV |
462 | |
463 | /* Absolute queue number out of 2K needs to be passed */ | |
464 | err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); | |
465 | if (err) { | |
43c7f919 | 466 | dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", |
eff380aa AV |
467 | pf_q, err); |
468 | return -EIO; | |
469 | } | |
470 | ||
471 | if (vsi->type == ICE_VSI_VF) | |
472 | return 0; | |
473 | ||
59bb0808 MF |
474 | /* configure Rx buffer alignment */ |
475 | if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) | |
476 | ice_clear_ring_build_skb_ena(ring); | |
477 | else | |
478 | ice_set_ring_build_skb_ena(ring); | |
479 | ||
89861c48 MF |
480 | ring->rx_offset = ice_rx_offset(ring); |
481 | ||
eff380aa AV |
482 | /* init queue specific tail register */ |
483 | ring->tail = hw->hw_addr + QRX_TAIL(pf_q); | |
484 | writel(0, ring->tail); | |
2d4238f5 | 485 | |
43c7f919 KK |
486 | return 0; |
487 | } | |
488 | ||
489 | /** | |
490 | * ice_vsi_cfg_rxq - Configure an Rx queue | |
491 | * @ring: the ring being configured | |
492 | * | |
493 | * Return 0 on success and a negative value on error. | |
494 | */ | |
e72bba21 | 495 | int ice_vsi_cfg_rxq(struct ice_rx_ring *ring) |
43c7f919 KK |
496 | { |
497 | struct device *dev = ice_pf_to_dev(ring->vsi->back); | |
498 | u16 num_bufs = ICE_DESC_UNUSED(ring); | |
499 | int err; | |
500 | ||
501 | ring->rx_buf_len = ring->vsi->rx_buf_len; | |
502 | ||
503 | if (ring->vsi->type == ICE_VSI_PF) { | |
504 | if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) | |
505 | /* coverity[check_return] */ | |
506 | xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, | |
507 | ring->q_index, ring->q_vector->napi.napi_id); | |
508 | ||
509 | ring->xsk_pool = ice_xsk_pool(ring); | |
510 | if (ring->xsk_pool) { | |
511 | xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); | |
512 | ||
513 | ring->rx_buf_len = | |
514 | xsk_pool_get_rx_frame_size(ring->xsk_pool); | |
515 | err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, | |
516 | MEM_TYPE_XSK_BUFF_POOL, | |
517 | NULL); | |
518 | if (err) | |
519 | return err; | |
520 | xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); | |
521 | ||
522 | dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", | |
523 | ring->q_index); | |
524 | } else { | |
525 | if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) | |
526 | /* coverity[check_return] */ | |
527 | xdp_rxq_info_reg(&ring->xdp_rxq, | |
528 | ring->netdev, | |
529 | ring->q_index, ring->q_vector->napi.napi_id); | |
530 | ||
531 | err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, | |
532 | MEM_TYPE_PAGE_SHARED, | |
533 | NULL); | |
534 | if (err) | |
535 | return err; | |
536 | } | |
537 | } | |
538 | ||
539 | err = ice_setup_rx_ctx(ring); | |
540 | if (err) { | |
541 | dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n", | |
542 | ring->q_index, err); | |
543 | return err; | |
544 | } | |
545 | ||
1742b3d5 | 546 | if (ring->xsk_pool) { |
ed0907e3 MK |
547 | bool ok; |
548 | ||
c4655761 | 549 | if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { |
1742b3d5 | 550 | dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", |
3f0d97cd KK |
551 | num_bufs, ring->q_index); |
552 | dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n"); | |
553 | ||
554 | return 0; | |
555 | } | |
556 | ||
ed0907e3 | 557 | ok = ice_alloc_rx_bufs_zc(ring, num_bufs); |
43c7f919 KK |
558 | if (!ok) { |
559 | u16 pf_q = ring->vsi->rxq_map[ring->q_index]; | |
560 | ||
1742b3d5 | 561 | dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n", |
3f0d97cd | 562 | ring->q_index, pf_q); |
43c7f919 KK |
563 | } |
564 | ||
3f0d97cd KK |
565 | return 0; |
566 | } | |
567 | ||
568 | ice_alloc_rx_bufs(ring, num_bufs); | |
eff380aa AV |
569 | |
570 | return 0; | |
571 | } | |
572 | ||
573 | /** | |
574 | * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI | |
575 | * @qs_cfg: gathered variables needed for pf->vsi queues assignment | |
576 | * | |
577 | * This function first tries to find contiguous space. If it is not successful, | |
578 | * it tries with the scatter approach. | |
579 | * | |
580 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap | |
581 | */ | |
582 | int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) | |
583 | { | |
584 | int ret = 0; | |
585 | ||
586 | ret = __ice_vsi_get_qs_contig(qs_cfg); | |
587 | if (ret) { | |
588 | /* contig failed, so try with scatter approach */ | |
589 | qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; | |
88865fc4 | 590 | qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count, |
eff380aa AV |
591 | qs_cfg->scatter_count); |
592 | ret = __ice_vsi_get_qs_sc(qs_cfg); | |
593 | } | |
594 | return ret; | |
595 | } | |
596 | ||
597 | /** | |
13a6233b | 598 | * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait |
eff380aa | 599 | * @vsi: the VSI being configured |
13a6233b BC |
600 | * @ena: start or stop the Rx ring |
601 | * @rxq_idx: 0-based Rx queue index for the VSI passed in | |
602 | * @wait: wait or don't wait for configuration to finish in hardware | |
603 | * | |
604 | * Return 0 on success and negative on error. | |
eff380aa | 605 | */ |
13a6233b BC |
606 | int |
607 | ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait) | |
eff380aa AV |
608 | { |
609 | int pf_q = vsi->rxq_map[rxq_idx]; | |
610 | struct ice_pf *pf = vsi->back; | |
611 | struct ice_hw *hw = &pf->hw; | |
eff380aa AV |
612 | u32 rx_reg; |
613 | ||
614 | rx_reg = rd32(hw, QRX_CTRL(pf_q)); | |
615 | ||
616 | /* Skip if the queue is already in the requested state */ | |
617 | if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) | |
618 | return 0; | |
619 | ||
620 | /* turn on/off the queue */ | |
621 | if (ena) | |
622 | rx_reg |= QRX_CTRL_QENA_REQ_M; | |
623 | else | |
624 | rx_reg &= ~QRX_CTRL_QENA_REQ_M; | |
625 | wr32(hw, QRX_CTRL(pf_q), rx_reg); | |
626 | ||
13a6233b BC |
627 | if (!wait) |
628 | return 0; | |
eff380aa | 629 | |
13a6233b BC |
630 | ice_flush(hw); |
631 | return ice_pf_rxq_wait(pf, pf_q, ena); | |
632 | } | |
633 | ||
634 | /** | |
635 | * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started | |
636 | * @vsi: the VSI being configured | |
637 | * @ena: true/false to verify Rx ring has been enabled/disabled respectively | |
638 | * @rxq_idx: 0-based Rx queue index for the VSI passed in | |
639 | * | |
640 | * This routine will wait for the given Rx queue of the VSI to reach the | |
641 | * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach | |
642 | * the requested state after multiple retries; else will return 0 in case of | |
643 | * success. | |
644 | */ | |
645 | int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) | |
646 | { | |
647 | int pf_q = vsi->rxq_map[rxq_idx]; | |
648 | struct ice_pf *pf = vsi->back; | |
649 | ||
650 | return ice_pf_rxq_wait(pf, pf_q, ena); | |
eff380aa AV |
651 | } |
652 | ||
653 | /** | |
654 | * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors | |
655 | * @vsi: the VSI being configured | |
656 | * | |
657 | * We allocate one q_vector per queue interrupt. If allocation fails we | |
658 | * return -ENOMEM. | |
659 | */ | |
660 | int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) | |
661 | { | |
3306f79f | 662 | struct device *dev = ice_pf_to_dev(vsi->back); |
88865fc4 KK |
663 | u16 v_idx; |
664 | int err; | |
eff380aa AV |
665 | |
666 | if (vsi->q_vectors[0]) { | |
4015d11e | 667 | dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num); |
eff380aa AV |
668 | return -EEXIST; |
669 | } | |
670 | ||
3306f79f | 671 | for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { |
eff380aa AV |
672 | err = ice_vsi_alloc_q_vector(vsi, v_idx); |
673 | if (err) | |
674 | goto err_out; | |
675 | } | |
676 | ||
677 | return 0; | |
678 | ||
679 | err_out: | |
680 | while (v_idx--) | |
681 | ice_free_q_vector(vsi, v_idx); | |
682 | ||
4015d11e | 683 | dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n", |
eff380aa AV |
684 | vsi->num_q_vectors, vsi->vsi_num, err); |
685 | vsi->num_q_vectors = 0; | |
686 | return err; | |
687 | } | |
688 | ||
689 | /** | |
690 | * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors | |
691 | * @vsi: the VSI being configured | |
692 | * | |
693 | * This function maps descriptor rings to the queue-specific vectors allotted | |
694 | * through the MSI-X enabling code. On a constrained vector budget, we map Tx | |
695 | * and Rx rings to the vector as "efficiently" as possible. | |
696 | */ | |
697 | void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) | |
698 | { | |
699 | int q_vectors = vsi->num_q_vectors; | |
88865fc4 | 700 | u16 tx_rings_rem, rx_rings_rem; |
eff380aa AV |
701 | int v_id; |
702 | ||
703 | /* initially assigning remaining rings count to VSIs num queue value */ | |
704 | tx_rings_rem = vsi->num_txq; | |
705 | rx_rings_rem = vsi->num_rxq; | |
706 | ||
707 | for (v_id = 0; v_id < q_vectors; v_id++) { | |
708 | struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; | |
88865fc4 KK |
709 | u8 tx_rings_per_v, rx_rings_per_v; |
710 | u16 q_id, q_base; | |
eff380aa AV |
711 | |
712 | /* Tx rings mapping to vector */ | |
88865fc4 KK |
713 | tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem, |
714 | q_vectors - v_id); | |
eff380aa | 715 | q_vector->num_ring_tx = tx_rings_per_v; |
e72bba21 | 716 | q_vector->tx.tx_ring = NULL; |
eff380aa AV |
717 | q_vector->tx.itr_idx = ICE_TX_ITR; |
718 | q_base = vsi->num_txq - tx_rings_rem; | |
719 | ||
720 | for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { | |
e72bba21 | 721 | struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; |
eff380aa AV |
722 | |
723 | tx_ring->q_vector = q_vector; | |
e72bba21 MF |
724 | tx_ring->next = q_vector->tx.tx_ring; |
725 | q_vector->tx.tx_ring = tx_ring; | |
eff380aa AV |
726 | } |
727 | tx_rings_rem -= tx_rings_per_v; | |
728 | ||
729 | /* Rx rings mapping to vector */ | |
88865fc4 KK |
730 | rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem, |
731 | q_vectors - v_id); | |
eff380aa | 732 | q_vector->num_ring_rx = rx_rings_per_v; |
e72bba21 | 733 | q_vector->rx.rx_ring = NULL; |
eff380aa AV |
734 | q_vector->rx.itr_idx = ICE_RX_ITR; |
735 | q_base = vsi->num_rxq - rx_rings_rem; | |
736 | ||
737 | for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { | |
e72bba21 | 738 | struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; |
eff380aa AV |
739 | |
740 | rx_ring->q_vector = q_vector; | |
e72bba21 MF |
741 | rx_ring->next = q_vector->rx.rx_ring; |
742 | q_vector->rx.rx_ring = rx_ring; | |
eff380aa AV |
743 | } |
744 | rx_rings_rem -= rx_rings_per_v; | |
745 | } | |
746 | } | |
747 | ||
748 | /** | |
749 | * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors | |
750 | * @vsi: the VSI having memory freed | |
751 | */ | |
752 | void ice_vsi_free_q_vectors(struct ice_vsi *vsi) | |
753 | { | |
754 | int v_idx; | |
755 | ||
756 | ice_for_each_q_vector(vsi, v_idx) | |
757 | ice_free_q_vector(vsi, v_idx); | |
758 | } | |
759 | ||
760 | /** | |
761 | * ice_vsi_cfg_txq - Configure single Tx queue | |
762 | * @vsi: the VSI that queue belongs to | |
763 | * @ring: Tx ring to be configured | |
eff380aa | 764 | * @qg_buf: queue group buffer |
eff380aa AV |
765 | */ |
766 | int | |
e72bba21 | 767 | ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring, |
e75d1b2c | 768 | struct ice_aqc_add_tx_qgrp *qg_buf) |
eff380aa | 769 | { |
66486d89 | 770 | u8 buf_len = struct_size(qg_buf, txqs, 1); |
eff380aa AV |
771 | struct ice_tlan_ctx tlan_ctx = { 0 }; |
772 | struct ice_aqc_add_txqs_perq *txq; | |
0754d65b | 773 | struct ice_channel *ch = ring->ch; |
eff380aa | 774 | struct ice_pf *pf = vsi->back; |
7e34786a | 775 | struct ice_hw *hw = &pf->hw; |
5e24d598 | 776 | int status; |
eff380aa | 777 | u16 pf_q; |
e75d1b2c | 778 | u8 tc; |
eff380aa | 779 | |
634da4c1 BB |
780 | /* Configure XPS */ |
781 | ice_cfg_xps_tx_ring(ring); | |
782 | ||
eff380aa AV |
783 | pf_q = ring->reg_idx; |
784 | ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); | |
785 | /* copy context contents into the qg_buf */ | |
786 | qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); | |
7e34786a | 787 | ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, |
eff380aa AV |
788 | ice_tlan_ctx_info); |
789 | ||
790 | /* init queue specific tail reg. It is referred as | |
791 | * transmit comm scheduler queue doorbell. | |
792 | */ | |
7e34786a | 793 | ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q); |
eff380aa | 794 | |
e75d1b2c MF |
795 | if (IS_ENABLED(CONFIG_DCB)) |
796 | tc = ring->dcb_tc; | |
797 | else | |
798 | tc = 0; | |
799 | ||
eff380aa AV |
800 | /* Add unique software queue handle of the Tx queue per |
801 | * TC into the VSI Tx ring | |
802 | */ | |
f66756e0 | 803 | if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) { |
e72bba21 | 804 | ring->q_handle = ice_eswitch_calc_txq_handle(ring); |
f66756e0 GN |
805 | |
806 | if (ring->q_handle == ICE_INVAL_Q_INDEX) | |
807 | return -ENODEV; | |
808 | } else { | |
e72bba21 | 809 | ring->q_handle = ice_calc_txq_handle(vsi, ring, tc); |
f66756e0 | 810 | } |
eff380aa | 811 | |
0754d65b KP |
812 | if (ch) |
813 | status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0, | |
814 | ring->q_handle, 1, qg_buf, buf_len, | |
815 | NULL); | |
816 | else | |
817 | status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, | |
818 | ring->q_handle, 1, qg_buf, buf_len, | |
819 | NULL); | |
eff380aa | 820 | if (status) { |
5f87ec48 TN |
821 | dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n", |
822 | status); | |
c1484691 | 823 | return status; |
eff380aa AV |
824 | } |
825 | ||
826 | /* Add Tx Queue TEID into the VSI Tx ring from the | |
827 | * response. This will complete configuring and | |
828 | * enabling the queue. | |
829 | */ | |
830 | txq = &qg_buf->txqs[0]; | |
831 | if (pf_q == le16_to_cpu(txq->txq_id)) | |
832 | ring->txq_teid = le32_to_cpu(txq->q_teid); | |
833 | ||
834 | return 0; | |
835 | } | |
836 | ||
837 | /** | |
838 | * ice_cfg_itr - configure the initial interrupt throttle values | |
839 | * @hw: pointer to the HW structure | |
840 | * @q_vector: interrupt vector that's being configured | |
841 | * | |
842 | * Configure interrupt throttling values for the ring containers that are | |
843 | * associated with the interrupt vector passed in. | |
844 | */ | |
845 | void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) | |
846 | { | |
847 | ice_cfg_itr_gran(hw); | |
848 | ||
b8b47723 JB |
849 | if (q_vector->num_ring_rx) |
850 | ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting); | |
eff380aa | 851 | |
b8b47723 JB |
852 | if (q_vector->num_ring_tx) |
853 | ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting); | |
eff380aa | 854 | |
b8b47723 | 855 | ice_write_intrl(q_vector, q_vector->intrl); |
eff380aa AV |
856 | } |
857 | ||
858 | /** | |
859 | * ice_cfg_txq_interrupt - configure interrupt on Tx queue | |
860 | * @vsi: the VSI being configured | |
861 | * @txq: Tx queue being mapped to MSI-X vector | |
862 | * @msix_idx: MSI-X vector index within the function | |
863 | * @itr_idx: ITR index of the interrupt cause | |
864 | * | |
865 | * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector | |
866 | * within the function space. | |
867 | */ | |
868 | void | |
869 | ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) | |
870 | { | |
871 | struct ice_pf *pf = vsi->back; | |
872 | struct ice_hw *hw = &pf->hw; | |
873 | u32 val; | |
874 | ||
875 | itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; | |
876 | ||
877 | val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | | |
878 | ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); | |
879 | ||
880 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); | |
efc2214b MF |
881 | if (ice_is_xdp_ena_vsi(vsi)) { |
882 | u32 xdp_txq = txq + vsi->num_xdp_txq; | |
883 | ||
884 | wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), | |
885 | val); | |
886 | } | |
887 | ice_flush(hw); | |
eff380aa AV |
888 | } |
889 | ||
890 | /** | |
891 | * ice_cfg_rxq_interrupt - configure interrupt on Rx queue | |
892 | * @vsi: the VSI being configured | |
893 | * @rxq: Rx queue being mapped to MSI-X vector | |
894 | * @msix_idx: MSI-X vector index within the function | |
895 | * @itr_idx: ITR index of the interrupt cause | |
896 | * | |
897 | * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector | |
898 | * within the function space. | |
899 | */ | |
900 | void | |
901 | ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) | |
902 | { | |
903 | struct ice_pf *pf = vsi->back; | |
904 | struct ice_hw *hw = &pf->hw; | |
905 | u32 val; | |
906 | ||
907 | itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; | |
908 | ||
909 | val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | | |
910 | ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); | |
911 | ||
912 | wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); | |
913 | ||
914 | ice_flush(hw); | |
915 | } | |
916 | ||
917 | /** | |
918 | * ice_trigger_sw_intr - trigger a software interrupt | |
919 | * @hw: pointer to the HW structure | |
920 | * @q_vector: interrupt vector to trigger the software interrupt for | |
921 | */ | |
922 | void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) | |
923 | { | |
924 | wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), | |
925 | (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | | |
926 | GLINT_DYN_CTL_SWINT_TRIG_M | | |
927 | GLINT_DYN_CTL_INTENA_M); | |
928 | } | |
929 | ||
930 | /** | |
931 | * ice_vsi_stop_tx_ring - Disable single Tx ring | |
932 | * @vsi: the VSI being configured | |
933 | * @rst_src: reset source | |
934 | * @rel_vmvf_num: Relative ID of VF/VM | |
935 | * @ring: Tx ring to be stopped | |
936 | * @txq_meta: Meta data of Tx ring to be stopped | |
937 | */ | |
938 | int | |
939 | ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, | |
e72bba21 | 940 | u16 rel_vmvf_num, struct ice_tx_ring *ring, |
eff380aa AV |
941 | struct ice_txq_meta *txq_meta) |
942 | { | |
943 | struct ice_pf *pf = vsi->back; | |
944 | struct ice_q_vector *q_vector; | |
945 | struct ice_hw *hw = &pf->hw; | |
5e24d598 | 946 | int status; |
eff380aa AV |
947 | u32 val; |
948 | ||
949 | /* clear cause_ena bit for disabled queues */ | |
950 | val = rd32(hw, QINT_TQCTL(ring->reg_idx)); | |
951 | val &= ~QINT_TQCTL_CAUSE_ENA_M; | |
952 | wr32(hw, QINT_TQCTL(ring->reg_idx), val); | |
953 | ||
954 | /* software is expected to wait for 100 ns */ | |
955 | ndelay(100); | |
956 | ||
957 | /* trigger a software interrupt for the vector | |
958 | * associated to the queue to schedule NAPI handler | |
959 | */ | |
960 | q_vector = ring->q_vector; | |
961 | if (q_vector) | |
962 | ice_trigger_sw_intr(hw, q_vector); | |
963 | ||
964 | status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, | |
965 | txq_meta->tc, 1, &txq_meta->q_handle, | |
966 | &txq_meta->q_id, &txq_meta->q_teid, rst_src, | |
967 | rel_vmvf_num, NULL); | |
968 | ||
969 | /* if the disable queue command was exercised during an | |
d54699e2 | 970 | * active reset flow, -EBUSY is returned. |
eff380aa AV |
971 | * This is not an error as the reset operation disables |
972 | * queues at the hardware level anyway. | |
973 | */ | |
d54699e2 | 974 | if (status == -EBUSY) { |
19cce2c6 | 975 | dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n"); |
d54699e2 | 976 | } else if (status == -ENOENT) { |
19cce2c6 | 977 | dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); |
eff380aa | 978 | } else if (status) { |
5f87ec48 TN |
979 | dev_dbg(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n", |
980 | status); | |
c1484691 | 981 | return status; |
eff380aa AV |
982 | } |
983 | ||
984 | return 0; | |
985 | } | |
986 | ||
987 | /** | |
988 | * ice_fill_txq_meta - Prepare the Tx queue's meta data | |
989 | * @vsi: VSI that ring belongs to | |
990 | * @ring: ring that txq_meta will be based on | |
991 | * @txq_meta: a helper struct that wraps Tx queue's information | |
992 | * | |
993 | * Set up a helper struct that will contain all the necessary fields that | |
994 | * are needed for stopping Tx queue | |
995 | */ | |
996 | void | |
e72bba21 | 997 | ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring, |
eff380aa AV |
998 | struct ice_txq_meta *txq_meta) |
999 | { | |
0754d65b | 1000 | struct ice_channel *ch = ring->ch; |
eff380aa AV |
1001 | u8 tc; |
1002 | ||
1003 | if (IS_ENABLED(CONFIG_DCB)) | |
1004 | tc = ring->dcb_tc; | |
1005 | else | |
1006 | tc = 0; | |
1007 | ||
1008 | txq_meta->q_id = ring->reg_idx; | |
1009 | txq_meta->q_teid = ring->txq_teid; | |
1010 | txq_meta->q_handle = ring->q_handle; | |
0754d65b KP |
1011 | if (ch) { |
1012 | txq_meta->vsi_idx = ch->ch_vsi->idx; | |
1013 | txq_meta->tc = 0; | |
1014 | } else { | |
1015 | txq_meta->vsi_idx = vsi->idx; | |
1016 | txq_meta->tc = tc; | |
1017 | } | |
eff380aa | 1018 | } |