Commit | Line | Data |
---|---|---|
eff380aa AV |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2019, Intel Corporation. */ | |
3 | ||
4 | #include "ice_base.h" | |
5 | #include "ice_dcb_lib.h" | |
6 | ||
7 | /** | |
8 | * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI | |
9 | * @qs_cfg: gathered variables needed for PF->VSI queues assignment | |
10 | * | |
11 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap | |
12 | */ | |
13 | static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) | |
14 | { | |
15 | int offset, i; | |
16 | ||
17 | mutex_lock(qs_cfg->qs_mutex); | |
18 | offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, | |
19 | 0, qs_cfg->q_count, 0); | |
20 | if (offset >= qs_cfg->pf_map_size) { | |
21 | mutex_unlock(qs_cfg->qs_mutex); | |
22 | return -ENOMEM; | |
23 | } | |
24 | ||
25 | bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); | |
26 | for (i = 0; i < qs_cfg->q_count; i++) | |
27 | qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset; | |
28 | mutex_unlock(qs_cfg->qs_mutex); | |
29 | ||
30 | return 0; | |
31 | } | |
32 | ||
33 | /** | |
34 | * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI | |
35 | * @qs_cfg: gathered variables needed for pf->vsi queues assignment | |
36 | * | |
37 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap | |
38 | */ | |
39 | static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) | |
40 | { | |
41 | int i, index = 0; | |
42 | ||
43 | mutex_lock(qs_cfg->qs_mutex); | |
44 | for (i = 0; i < qs_cfg->q_count; i++) { | |
45 | index = find_next_zero_bit(qs_cfg->pf_map, | |
46 | qs_cfg->pf_map_size, index); | |
47 | if (index >= qs_cfg->pf_map_size) | |
48 | goto err_scatter; | |
49 | set_bit(index, qs_cfg->pf_map); | |
50 | qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index; | |
51 | } | |
52 | mutex_unlock(qs_cfg->qs_mutex); | |
53 | ||
54 | return 0; | |
55 | err_scatter: | |
56 | for (index = 0; index < i; index++) { | |
57 | clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); | |
58 | qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; | |
59 | } | |
60 | mutex_unlock(qs_cfg->qs_mutex); | |
61 | ||
62 | return -ENOMEM; | |
63 | } | |
64 | ||
65 | /** | |
66 | * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled | |
67 | * @pf: the PF being configured | |
68 | * @pf_q: the PF queue | |
69 | * @ena: enable or disable state of the queue | |
70 | * | |
71 | * This routine will wait for the given Rx queue of the PF to reach the | |
72 | * enabled or disabled state. | |
73 | * Returns -ETIMEDOUT in case of failing to reach the requested state after | |
74 | * multiple retries; else will return 0 in case of success. | |
75 | */ | |
76 | static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) | |
77 | { | |
78 | int i; | |
79 | ||
80 | for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { | |
81 | if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & | |
82 | QRX_CTRL_QENA_STAT_M)) | |
83 | return 0; | |
84 | ||
85 | usleep_range(20, 40); | |
86 | } | |
87 | ||
88 | return -ETIMEDOUT; | |
89 | } | |
90 | ||
91 | /** | |
92 | * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector | |
93 | * @vsi: the VSI being configured | |
94 | * @v_idx: index of the vector in the VSI struct | |
95 | * | |
96 | * We allocate one q_vector. If allocation fails we return -ENOMEM. | |
97 | */ | |
98 | static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) | |
99 | { | |
100 | struct ice_pf *pf = vsi->back; | |
101 | struct ice_q_vector *q_vector; | |
102 | ||
103 | /* allocate q_vector */ | |
104 | q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL); | |
105 | if (!q_vector) | |
106 | return -ENOMEM; | |
107 | ||
108 | q_vector->vsi = vsi; | |
109 | q_vector->v_idx = v_idx; | |
110 | if (vsi->type == ICE_VSI_VF) | |
111 | goto out; | |
112 | /* only set affinity_mask if the CPU is online */ | |
113 | if (cpu_online(v_idx)) | |
114 | cpumask_set_cpu(v_idx, &q_vector->affinity_mask); | |
115 | ||
116 | /* This will not be called in the driver load path because the netdev | |
117 | * will not be created yet. All other cases with register the NAPI | |
118 | * handler here (i.e. resume, reset/rebuild, etc.) | |
119 | */ | |
120 | if (vsi->netdev) | |
121 | netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, | |
122 | NAPI_POLL_WEIGHT); | |
123 | ||
124 | out: | |
125 | /* tie q_vector and VSI together */ | |
126 | vsi->q_vectors[v_idx] = q_vector; | |
127 | ||
128 | return 0; | |
129 | } | |
130 | ||
131 | /** | |
132 | * ice_free_q_vector - Free memory allocated for a specific interrupt vector | |
133 | * @vsi: VSI having the memory freed | |
134 | * @v_idx: index of the vector to be freed | |
135 | */ | |
136 | static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) | |
137 | { | |
138 | struct ice_q_vector *q_vector; | |
139 | struct ice_pf *pf = vsi->back; | |
140 | struct ice_ring *ring; | |
141 | ||
142 | if (!vsi->q_vectors[v_idx]) { | |
143 | dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n", | |
144 | v_idx); | |
145 | return; | |
146 | } | |
147 | q_vector = vsi->q_vectors[v_idx]; | |
148 | ||
149 | ice_for_each_ring(ring, q_vector->tx) | |
150 | ring->q_vector = NULL; | |
151 | ice_for_each_ring(ring, q_vector->rx) | |
152 | ring->q_vector = NULL; | |
153 | ||
154 | /* only VSI with an associated netdev is set up with NAPI */ | |
155 | if (vsi->netdev) | |
156 | netif_napi_del(&q_vector->napi); | |
157 | ||
158 | devm_kfree(&pf->pdev->dev, q_vector); | |
159 | vsi->q_vectors[v_idx] = NULL; | |
160 | } | |
161 | ||
162 | /** | |
163 | * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set | |
164 | * @hw: board specific structure | |
165 | */ | |
166 | static void ice_cfg_itr_gran(struct ice_hw *hw) | |
167 | { | |
168 | u32 regval = rd32(hw, GLINT_CTL); | |
169 | ||
170 | /* no need to update global register if ITR gran is already set */ | |
171 | if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && | |
172 | (((regval & GLINT_CTL_ITR_GRAN_200_M) >> | |
173 | GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && | |
174 | (((regval & GLINT_CTL_ITR_GRAN_100_M) >> | |
175 | GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && | |
176 | (((regval & GLINT_CTL_ITR_GRAN_50_M) >> | |
177 | GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && | |
178 | (((regval & GLINT_CTL_ITR_GRAN_25_M) >> | |
179 | GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) | |
180 | return; | |
181 | ||
182 | regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & | |
183 | GLINT_CTL_ITR_GRAN_200_M) | | |
184 | ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & | |
185 | GLINT_CTL_ITR_GRAN_100_M) | | |
186 | ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & | |
187 | GLINT_CTL_ITR_GRAN_50_M) | | |
188 | ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & | |
189 | GLINT_CTL_ITR_GRAN_25_M); | |
190 | wr32(hw, GLINT_CTL, regval); | |
191 | } | |
192 | ||
e75d1b2c MF |
193 | /** |
194 | * ice_calc_q_handle - calculate the queue handle | |
195 | * @vsi: VSI that ring belongs to | |
196 | * @ring: ring to get the absolute queue index | |
197 | * @tc: traffic class number | |
198 | */ | |
199 | static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) | |
200 | { | |
201 | /* Idea here for calculation is that we subtract the number of queue | |
202 | * count from TC that ring belongs to from it's absolute queue index | |
203 | * and as a result we get the queue's index within TC. | |
204 | */ | |
205 | return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset; | |
206 | } | |
207 | ||
eff380aa AV |
208 | /** |
209 | * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance | |
210 | * @ring: The Tx ring to configure | |
211 | * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized | |
212 | * @pf_q: queue index in the PF space | |
213 | * | |
214 | * Configure the Tx descriptor ring in TLAN context. | |
215 | */ | |
216 | static void | |
217 | ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) | |
218 | { | |
219 | struct ice_vsi *vsi = ring->vsi; | |
220 | struct ice_hw *hw = &vsi->back->hw; | |
221 | ||
222 | tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; | |
223 | ||
224 | tlan_ctx->port_num = vsi->port_info->lport; | |
225 | ||
226 | /* Transmit Queue Length */ | |
227 | tlan_ctx->qlen = ring->count; | |
228 | ||
229 | ice_set_cgd_num(tlan_ctx, ring); | |
230 | ||
231 | /* PF number */ | |
232 | tlan_ctx->pf_num = hw->pf_id; | |
233 | ||
234 | /* queue belongs to a specific VSI type | |
235 | * VF / VM index should be programmed per vmvf_type setting: | |
236 | * for vmvf_type = VF, it is VF number between 0-256 | |
237 | * for vmvf_type = VM, it is VM number between 0-767 | |
238 | * for PF or EMP this field should be set to zero | |
239 | */ | |
240 | switch (vsi->type) { | |
241 | case ICE_VSI_LB: | |
242 | /* fall through */ | |
243 | case ICE_VSI_PF: | |
244 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; | |
245 | break; | |
246 | case ICE_VSI_VF: | |
247 | /* Firmware expects vmvf_num to be absolute VF ID */ | |
248 | tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; | |
249 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; | |
250 | break; | |
251 | default: | |
252 | return; | |
253 | } | |
254 | ||
255 | /* make sure the context is associated with the right VSI */ | |
256 | tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); | |
257 | ||
258 | tlan_ctx->tso_ena = ICE_TX_LEGACY; | |
259 | tlan_ctx->tso_qnum = pf_q; | |
260 | ||
261 | /* Legacy or Advanced Host Interface: | |
262 | * 0: Advanced Host Interface | |
263 | * 1: Legacy Host Interface | |
264 | */ | |
265 | tlan_ctx->legacy_int = ICE_TX_LEGACY; | |
266 | } | |
267 | ||
268 | /** | |
269 | * ice_setup_rx_ctx - Configure a receive ring context | |
270 | * @ring: The Rx ring to configure | |
271 | * | |
272 | * Configure the Rx descriptor ring in RLAN context. | |
273 | */ | |
274 | int ice_setup_rx_ctx(struct ice_ring *ring) | |
275 | { | |
276 | struct ice_vsi *vsi = ring->vsi; | |
277 | struct ice_hw *hw = &vsi->back->hw; | |
278 | u32 rxdid = ICE_RXDID_FLEX_NIC; | |
279 | struct ice_rlan_ctx rlan_ctx; | |
280 | u32 regval; | |
281 | u16 pf_q; | |
282 | int err; | |
283 | ||
284 | /* what is Rx queue number in global space of 2K Rx queues */ | |
285 | pf_q = vsi->rxq_map[ring->q_index]; | |
286 | ||
287 | /* clear the context structure first */ | |
288 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); | |
289 | ||
290 | rlan_ctx.base = ring->dma >> 7; | |
291 | ||
292 | rlan_ctx.qlen = ring->count; | |
293 | ||
294 | /* Receive Packet Data Buffer Size. | |
295 | * The Packet Data Buffer Size is defined in 128 byte units. | |
296 | */ | |
297 | rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; | |
298 | ||
299 | /* use 32 byte descriptors */ | |
300 | rlan_ctx.dsize = 1; | |
301 | ||
302 | /* Strip the Ethernet CRC bytes before the packet is posted to host | |
303 | * memory. | |
304 | */ | |
305 | rlan_ctx.crcstrip = 1; | |
306 | ||
307 | /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ | |
308 | rlan_ctx.l2tsel = 1; | |
309 | ||
310 | rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; | |
311 | rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; | |
312 | rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; | |
313 | ||
314 | /* This controls whether VLAN is stripped from inner headers | |
315 | * The VLAN in the inner L2 header is stripped to the receive | |
316 | * descriptor if enabled by this flag. | |
317 | */ | |
318 | rlan_ctx.showiv = 0; | |
319 | ||
320 | /* Max packet size for this queue - must not be set to a larger value | |
321 | * than 5 x DBUF | |
322 | */ | |
323 | rlan_ctx.rxmax = min_t(u16, vsi->max_frame, | |
324 | ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len); | |
325 | ||
326 | /* Rx queue threshold in units of 64 */ | |
327 | rlan_ctx.lrxqthresh = 1; | |
328 | ||
329 | /* Enable Flexible Descriptors in the queue context which | |
330 | * allows this driver to select a specific receive descriptor format | |
331 | */ | |
332 | if (vsi->type != ICE_VSI_VF) { | |
333 | regval = rd32(hw, QRXFLXP_CNTXT(pf_q)); | |
334 | regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) & | |
335 | QRXFLXP_CNTXT_RXDID_IDX_M; | |
336 | ||
337 | /* increasing context priority to pick up profile ID; | |
338 | * default is 0x01; setting to 0x03 to ensure profile | |
339 | * is programming if prev context is of same priority | |
340 | */ | |
341 | regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) & | |
342 | QRXFLXP_CNTXT_RXDID_PRIO_M; | |
343 | ||
344 | wr32(hw, QRXFLXP_CNTXT(pf_q), regval); | |
345 | } | |
346 | ||
347 | /* Absolute queue number out of 2K needs to be passed */ | |
348 | err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); | |
349 | if (err) { | |
350 | dev_err(&vsi->back->pdev->dev, | |
351 | "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", | |
352 | pf_q, err); | |
353 | return -EIO; | |
354 | } | |
355 | ||
356 | if (vsi->type == ICE_VSI_VF) | |
357 | return 0; | |
358 | ||
359 | /* init queue specific tail register */ | |
360 | ring->tail = hw->hw_addr + QRX_TAIL(pf_q); | |
361 | writel(0, ring->tail); | |
362 | ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); | |
363 | ||
364 | return 0; | |
365 | } | |
366 | ||
367 | /** | |
368 | * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI | |
369 | * @qs_cfg: gathered variables needed for pf->vsi queues assignment | |
370 | * | |
371 | * This function first tries to find contiguous space. If it is not successful, | |
372 | * it tries with the scatter approach. | |
373 | * | |
374 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap | |
375 | */ | |
376 | int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) | |
377 | { | |
378 | int ret = 0; | |
379 | ||
380 | ret = __ice_vsi_get_qs_contig(qs_cfg); | |
381 | if (ret) { | |
382 | /* contig failed, so try with scatter approach */ | |
383 | qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; | |
384 | qs_cfg->q_count = min_t(u16, qs_cfg->q_count, | |
385 | qs_cfg->scatter_count); | |
386 | ret = __ice_vsi_get_qs_sc(qs_cfg); | |
387 | } | |
388 | return ret; | |
389 | } | |
390 | ||
391 | /** | |
392 | * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring | |
393 | * @vsi: the VSI being configured | |
394 | * @ena: start or stop the Rx rings | |
395 | * @rxq_idx: Rx queue index | |
396 | */ | |
397 | int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) | |
398 | { | |
399 | int pf_q = vsi->rxq_map[rxq_idx]; | |
400 | struct ice_pf *pf = vsi->back; | |
401 | struct ice_hw *hw = &pf->hw; | |
402 | int ret = 0; | |
403 | u32 rx_reg; | |
404 | ||
405 | rx_reg = rd32(hw, QRX_CTRL(pf_q)); | |
406 | ||
407 | /* Skip if the queue is already in the requested state */ | |
408 | if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) | |
409 | return 0; | |
410 | ||
411 | /* turn on/off the queue */ | |
412 | if (ena) | |
413 | rx_reg |= QRX_CTRL_QENA_REQ_M; | |
414 | else | |
415 | rx_reg &= ~QRX_CTRL_QENA_REQ_M; | |
416 | wr32(hw, QRX_CTRL(pf_q), rx_reg); | |
417 | ||
418 | /* wait for the change to finish */ | |
419 | ret = ice_pf_rxq_wait(pf, pf_q, ena); | |
420 | if (ret) | |
421 | dev_err(&pf->pdev->dev, | |
422 | "VSI idx %d Rx ring %d %sable timeout\n", | |
423 | vsi->idx, pf_q, (ena ? "en" : "dis")); | |
424 | ||
425 | return ret; | |
426 | } | |
427 | ||
428 | /** | |
429 | * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors | |
430 | * @vsi: the VSI being configured | |
431 | * | |
432 | * We allocate one q_vector per queue interrupt. If allocation fails we | |
433 | * return -ENOMEM. | |
434 | */ | |
435 | int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) | |
436 | { | |
437 | struct ice_pf *pf = vsi->back; | |
438 | int v_idx = 0, num_q_vectors; | |
439 | int err; | |
440 | ||
441 | if (vsi->q_vectors[0]) { | |
442 | dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n", | |
443 | vsi->vsi_num); | |
444 | return -EEXIST; | |
445 | } | |
446 | ||
447 | num_q_vectors = vsi->num_q_vectors; | |
448 | ||
449 | for (v_idx = 0; v_idx < num_q_vectors; v_idx++) { | |
450 | err = ice_vsi_alloc_q_vector(vsi, v_idx); | |
451 | if (err) | |
452 | goto err_out; | |
453 | } | |
454 | ||
455 | return 0; | |
456 | ||
457 | err_out: | |
458 | while (v_idx--) | |
459 | ice_free_q_vector(vsi, v_idx); | |
460 | ||
461 | dev_err(&pf->pdev->dev, | |
462 | "Failed to allocate %d q_vector for VSI %d, ret=%d\n", | |
463 | vsi->num_q_vectors, vsi->vsi_num, err); | |
464 | vsi->num_q_vectors = 0; | |
465 | return err; | |
466 | } | |
467 | ||
468 | /** | |
469 | * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors | |
470 | * @vsi: the VSI being configured | |
471 | * | |
472 | * This function maps descriptor rings to the queue-specific vectors allotted | |
473 | * through the MSI-X enabling code. On a constrained vector budget, we map Tx | |
474 | * and Rx rings to the vector as "efficiently" as possible. | |
475 | */ | |
476 | void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) | |
477 | { | |
478 | int q_vectors = vsi->num_q_vectors; | |
479 | int tx_rings_rem, rx_rings_rem; | |
480 | int v_id; | |
481 | ||
482 | /* initially assigning remaining rings count to VSIs num queue value */ | |
483 | tx_rings_rem = vsi->num_txq; | |
484 | rx_rings_rem = vsi->num_rxq; | |
485 | ||
486 | for (v_id = 0; v_id < q_vectors; v_id++) { | |
487 | struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; | |
488 | int tx_rings_per_v, rx_rings_per_v, q_id, q_base; | |
489 | ||
490 | /* Tx rings mapping to vector */ | |
491 | tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id); | |
492 | q_vector->num_ring_tx = tx_rings_per_v; | |
493 | q_vector->tx.ring = NULL; | |
494 | q_vector->tx.itr_idx = ICE_TX_ITR; | |
495 | q_base = vsi->num_txq - tx_rings_rem; | |
496 | ||
497 | for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { | |
498 | struct ice_ring *tx_ring = vsi->tx_rings[q_id]; | |
499 | ||
500 | tx_ring->q_vector = q_vector; | |
501 | tx_ring->next = q_vector->tx.ring; | |
502 | q_vector->tx.ring = tx_ring; | |
503 | } | |
504 | tx_rings_rem -= tx_rings_per_v; | |
505 | ||
506 | /* Rx rings mapping to vector */ | |
507 | rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id); | |
508 | q_vector->num_ring_rx = rx_rings_per_v; | |
509 | q_vector->rx.ring = NULL; | |
510 | q_vector->rx.itr_idx = ICE_RX_ITR; | |
511 | q_base = vsi->num_rxq - rx_rings_rem; | |
512 | ||
513 | for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { | |
514 | struct ice_ring *rx_ring = vsi->rx_rings[q_id]; | |
515 | ||
516 | rx_ring->q_vector = q_vector; | |
517 | rx_ring->next = q_vector->rx.ring; | |
518 | q_vector->rx.ring = rx_ring; | |
519 | } | |
520 | rx_rings_rem -= rx_rings_per_v; | |
521 | } | |
522 | } | |
523 | ||
524 | /** | |
525 | * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors | |
526 | * @vsi: the VSI having memory freed | |
527 | */ | |
528 | void ice_vsi_free_q_vectors(struct ice_vsi *vsi) | |
529 | { | |
530 | int v_idx; | |
531 | ||
532 | ice_for_each_q_vector(vsi, v_idx) | |
533 | ice_free_q_vector(vsi, v_idx); | |
534 | } | |
535 | ||
536 | /** | |
537 | * ice_vsi_cfg_txq - Configure single Tx queue | |
538 | * @vsi: the VSI that queue belongs to | |
539 | * @ring: Tx ring to be configured | |
eff380aa | 540 | * @qg_buf: queue group buffer |
eff380aa AV |
541 | */ |
542 | int | |
e75d1b2c MF |
543 | ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, |
544 | struct ice_aqc_add_tx_qgrp *qg_buf) | |
eff380aa AV |
545 | { |
546 | struct ice_tlan_ctx tlan_ctx = { 0 }; | |
547 | struct ice_aqc_add_txqs_perq *txq; | |
548 | struct ice_pf *pf = vsi->back; | |
549 | u8 buf_len = sizeof(*qg_buf); | |
550 | enum ice_status status; | |
551 | u16 pf_q; | |
e75d1b2c | 552 | u8 tc; |
eff380aa AV |
553 | |
554 | pf_q = ring->reg_idx; | |
555 | ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); | |
556 | /* copy context contents into the qg_buf */ | |
557 | qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); | |
558 | ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, | |
559 | ice_tlan_ctx_info); | |
560 | ||
561 | /* init queue specific tail reg. It is referred as | |
562 | * transmit comm scheduler queue doorbell. | |
563 | */ | |
564 | ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); | |
565 | ||
e75d1b2c MF |
566 | if (IS_ENABLED(CONFIG_DCB)) |
567 | tc = ring->dcb_tc; | |
568 | else | |
569 | tc = 0; | |
570 | ||
eff380aa AV |
571 | /* Add unique software queue handle of the Tx queue per |
572 | * TC into the VSI Tx ring | |
573 | */ | |
e75d1b2c | 574 | ring->q_handle = ice_calc_q_handle(vsi, ring, tc); |
eff380aa AV |
575 | |
576 | status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, | |
577 | 1, qg_buf, buf_len, NULL); | |
578 | if (status) { | |
579 | dev_err(&pf->pdev->dev, | |
580 | "Failed to set LAN Tx queue context, error: %d\n", | |
581 | status); | |
582 | return -ENODEV; | |
583 | } | |
584 | ||
585 | /* Add Tx Queue TEID into the VSI Tx ring from the | |
586 | * response. This will complete configuring and | |
587 | * enabling the queue. | |
588 | */ | |
589 | txq = &qg_buf->txqs[0]; | |
590 | if (pf_q == le16_to_cpu(txq->txq_id)) | |
591 | ring->txq_teid = le32_to_cpu(txq->q_teid); | |
592 | ||
593 | return 0; | |
594 | } | |
595 | ||
596 | /** | |
597 | * ice_cfg_itr - configure the initial interrupt throttle values | |
598 | * @hw: pointer to the HW structure | |
599 | * @q_vector: interrupt vector that's being configured | |
600 | * | |
601 | * Configure interrupt throttling values for the ring containers that are | |
602 | * associated with the interrupt vector passed in. | |
603 | */ | |
604 | void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) | |
605 | { | |
606 | ice_cfg_itr_gran(hw); | |
607 | ||
608 | if (q_vector->num_ring_rx) { | |
609 | struct ice_ring_container *rc = &q_vector->rx; | |
610 | ||
611 | /* if this value is set then don't overwrite with default */ | |
612 | if (!rc->itr_setting) | |
613 | rc->itr_setting = ICE_DFLT_RX_ITR; | |
614 | ||
615 | rc->target_itr = ITR_TO_REG(rc->itr_setting); | |
616 | rc->next_update = jiffies + 1; | |
617 | rc->current_itr = rc->target_itr; | |
618 | wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), | |
619 | ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); | |
620 | } | |
621 | ||
622 | if (q_vector->num_ring_tx) { | |
623 | struct ice_ring_container *rc = &q_vector->tx; | |
624 | ||
625 | /* if this value is set then don't overwrite with default */ | |
626 | if (!rc->itr_setting) | |
627 | rc->itr_setting = ICE_DFLT_TX_ITR; | |
628 | ||
629 | rc->target_itr = ITR_TO_REG(rc->itr_setting); | |
630 | rc->next_update = jiffies + 1; | |
631 | rc->current_itr = rc->target_itr; | |
632 | wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx), | |
633 | ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S); | |
634 | } | |
635 | } | |
636 | ||
637 | /** | |
638 | * ice_cfg_txq_interrupt - configure interrupt on Tx queue | |
639 | * @vsi: the VSI being configured | |
640 | * @txq: Tx queue being mapped to MSI-X vector | |
641 | * @msix_idx: MSI-X vector index within the function | |
642 | * @itr_idx: ITR index of the interrupt cause | |
643 | * | |
644 | * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector | |
645 | * within the function space. | |
646 | */ | |
647 | void | |
648 | ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) | |
649 | { | |
650 | struct ice_pf *pf = vsi->back; | |
651 | struct ice_hw *hw = &pf->hw; | |
652 | u32 val; | |
653 | ||
654 | itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; | |
655 | ||
656 | val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | | |
657 | ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); | |
658 | ||
659 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); | |
660 | } | |
661 | ||
662 | /** | |
663 | * ice_cfg_rxq_interrupt - configure interrupt on Rx queue | |
664 | * @vsi: the VSI being configured | |
665 | * @rxq: Rx queue being mapped to MSI-X vector | |
666 | * @msix_idx: MSI-X vector index within the function | |
667 | * @itr_idx: ITR index of the interrupt cause | |
668 | * | |
669 | * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector | |
670 | * within the function space. | |
671 | */ | |
672 | void | |
673 | ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) | |
674 | { | |
675 | struct ice_pf *pf = vsi->back; | |
676 | struct ice_hw *hw = &pf->hw; | |
677 | u32 val; | |
678 | ||
679 | itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; | |
680 | ||
681 | val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | | |
682 | ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); | |
683 | ||
684 | wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); | |
685 | ||
686 | ice_flush(hw); | |
687 | } | |
688 | ||
689 | /** | |
690 | * ice_trigger_sw_intr - trigger a software interrupt | |
691 | * @hw: pointer to the HW structure | |
692 | * @q_vector: interrupt vector to trigger the software interrupt for | |
693 | */ | |
694 | void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) | |
695 | { | |
696 | wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), | |
697 | (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | | |
698 | GLINT_DYN_CTL_SWINT_TRIG_M | | |
699 | GLINT_DYN_CTL_INTENA_M); | |
700 | } | |
701 | ||
702 | /** | |
703 | * ice_vsi_stop_tx_ring - Disable single Tx ring | |
704 | * @vsi: the VSI being configured | |
705 | * @rst_src: reset source | |
706 | * @rel_vmvf_num: Relative ID of VF/VM | |
707 | * @ring: Tx ring to be stopped | |
708 | * @txq_meta: Meta data of Tx ring to be stopped | |
709 | */ | |
710 | int | |
711 | ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, | |
712 | u16 rel_vmvf_num, struct ice_ring *ring, | |
713 | struct ice_txq_meta *txq_meta) | |
714 | { | |
715 | struct ice_pf *pf = vsi->back; | |
716 | struct ice_q_vector *q_vector; | |
717 | struct ice_hw *hw = &pf->hw; | |
718 | enum ice_status status; | |
719 | u32 val; | |
720 | ||
721 | /* clear cause_ena bit for disabled queues */ | |
722 | val = rd32(hw, QINT_TQCTL(ring->reg_idx)); | |
723 | val &= ~QINT_TQCTL_CAUSE_ENA_M; | |
724 | wr32(hw, QINT_TQCTL(ring->reg_idx), val); | |
725 | ||
726 | /* software is expected to wait for 100 ns */ | |
727 | ndelay(100); | |
728 | ||
729 | /* trigger a software interrupt for the vector | |
730 | * associated to the queue to schedule NAPI handler | |
731 | */ | |
732 | q_vector = ring->q_vector; | |
733 | if (q_vector) | |
734 | ice_trigger_sw_intr(hw, q_vector); | |
735 | ||
736 | status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, | |
737 | txq_meta->tc, 1, &txq_meta->q_handle, | |
738 | &txq_meta->q_id, &txq_meta->q_teid, rst_src, | |
739 | rel_vmvf_num, NULL); | |
740 | ||
741 | /* if the disable queue command was exercised during an | |
742 | * active reset flow, ICE_ERR_RESET_ONGOING is returned. | |
743 | * This is not an error as the reset operation disables | |
744 | * queues at the hardware level anyway. | |
745 | */ | |
746 | if (status == ICE_ERR_RESET_ONGOING) { | |
747 | dev_dbg(&vsi->back->pdev->dev, | |
748 | "Reset in progress. LAN Tx queues already disabled\n"); | |
749 | } else if (status == ICE_ERR_DOES_NOT_EXIST) { | |
750 | dev_dbg(&vsi->back->pdev->dev, | |
751 | "LAN Tx queues do not exist, nothing to disable\n"); | |
752 | } else if (status) { | |
753 | dev_err(&vsi->back->pdev->dev, | |
754 | "Failed to disable LAN Tx queues, error: %d\n", status); | |
755 | return -ENODEV; | |
756 | } | |
757 | ||
758 | return 0; | |
759 | } | |
760 | ||
761 | /** | |
762 | * ice_fill_txq_meta - Prepare the Tx queue's meta data | |
763 | * @vsi: VSI that ring belongs to | |
764 | * @ring: ring that txq_meta will be based on | |
765 | * @txq_meta: a helper struct that wraps Tx queue's information | |
766 | * | |
767 | * Set up a helper struct that will contain all the necessary fields that | |
768 | * are needed for stopping Tx queue | |
769 | */ | |
770 | void | |
771 | ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, | |
772 | struct ice_txq_meta *txq_meta) | |
773 | { | |
774 | u8 tc; | |
775 | ||
776 | if (IS_ENABLED(CONFIG_DCB)) | |
777 | tc = ring->dcb_tc; | |
778 | else | |
779 | tc = 0; | |
780 | ||
781 | txq_meta->q_id = ring->reg_idx; | |
782 | txq_meta->q_teid = ring->txq_teid; | |
783 | txq_meta->q_handle = ring->q_handle; | |
784 | txq_meta->vsi_idx = vsi->idx; | |
785 | txq_meta->tc = tc; | |
786 | } |