Commit | Line | Data |
---|---|---|
eff380aa AV |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2019, Intel Corporation. */ | |
3 | ||
175fc430 | 4 | #include <net/xdp_sock_drv.h> |
eff380aa | 5 | #include "ice_base.h" |
401ce33b | 6 | #include "ice_lib.h" |
eff380aa AV |
7 | #include "ice_dcb_lib.h" |
8 | ||
9 | /** | |
10 | * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI | |
11 | * @qs_cfg: gathered variables needed for PF->VSI queues assignment | |
12 | * | |
13 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap | |
14 | */ | |
15 | static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg) | |
16 | { | |
22bef5e7 | 17 | unsigned int offset, i; |
eff380aa AV |
18 | |
19 | mutex_lock(qs_cfg->qs_mutex); | |
20 | offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size, | |
21 | 0, qs_cfg->q_count, 0); | |
22 | if (offset >= qs_cfg->pf_map_size) { | |
23 | mutex_unlock(qs_cfg->qs_mutex); | |
24 | return -ENOMEM; | |
25 | } | |
26 | ||
27 | bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count); | |
28 | for (i = 0; i < qs_cfg->q_count; i++) | |
88865fc4 | 29 | qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)(i + offset); |
eff380aa AV |
30 | mutex_unlock(qs_cfg->qs_mutex); |
31 | ||
32 | return 0; | |
33 | } | |
34 | ||
35 | /** | |
36 | * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI | |
37 | * @qs_cfg: gathered variables needed for pf->vsi queues assignment | |
38 | * | |
39 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap | |
40 | */ | |
41 | static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg) | |
42 | { | |
22bef5e7 | 43 | unsigned int i, index = 0; |
eff380aa AV |
44 | |
45 | mutex_lock(qs_cfg->qs_mutex); | |
46 | for (i = 0; i < qs_cfg->q_count; i++) { | |
47 | index = find_next_zero_bit(qs_cfg->pf_map, | |
48 | qs_cfg->pf_map_size, index); | |
49 | if (index >= qs_cfg->pf_map_size) | |
50 | goto err_scatter; | |
51 | set_bit(index, qs_cfg->pf_map); | |
88865fc4 | 52 | qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = (u16)index; |
eff380aa AV |
53 | } |
54 | mutex_unlock(qs_cfg->qs_mutex); | |
55 | ||
56 | return 0; | |
57 | err_scatter: | |
58 | for (index = 0; index < i; index++) { | |
59 | clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map); | |
60 | qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0; | |
61 | } | |
62 | mutex_unlock(qs_cfg->qs_mutex); | |
63 | ||
64 | return -ENOMEM; | |
65 | } | |
66 | ||
67 | /** | |
68 | * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled | |
69 | * @pf: the PF being configured | |
70 | * @pf_q: the PF queue | |
71 | * @ena: enable or disable state of the queue | |
72 | * | |
73 | * This routine will wait for the given Rx queue of the PF to reach the | |
74 | * enabled or disabled state. | |
75 | * Returns -ETIMEDOUT in case of failing to reach the requested state after | |
76 | * multiple retries; else will return 0 in case of success. | |
77 | */ | |
78 | static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) | |
79 | { | |
80 | int i; | |
81 | ||
82 | for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { | |
83 | if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) & | |
84 | QRX_CTRL_QENA_STAT_M)) | |
85 | return 0; | |
86 | ||
87 | usleep_range(20, 40); | |
88 | } | |
89 | ||
90 | return -ETIMEDOUT; | |
91 | } | |
92 | ||
93 | /** | |
94 | * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector | |
95 | * @vsi: the VSI being configured | |
96 | * @v_idx: index of the vector in the VSI struct | |
97 | * | |
118e0e10 MS |
98 | * We allocate one q_vector and set default value for ITR setting associated |
99 | * with this q_vector. If allocation fails we return -ENOMEM. | |
eff380aa | 100 | */ |
88865fc4 | 101 | static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx) |
eff380aa AV |
102 | { |
103 | struct ice_pf *pf = vsi->back; | |
104 | struct ice_q_vector *q_vector; | |
105 | ||
106 | /* allocate q_vector */ | |
4015d11e BC |
107 | q_vector = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*q_vector), |
108 | GFP_KERNEL); | |
eff380aa AV |
109 | if (!q_vector) |
110 | return -ENOMEM; | |
111 | ||
112 | q_vector->vsi = vsi; | |
113 | q_vector->v_idx = v_idx; | |
118e0e10 MS |
114 | q_vector->tx.itr_setting = ICE_DFLT_TX_ITR; |
115 | q_vector->rx.itr_setting = ICE_DFLT_RX_ITR; | |
d59684a0 JB |
116 | q_vector->tx.itr_mode = ITR_DYNAMIC; |
117 | q_vector->rx.itr_mode = ITR_DYNAMIC; | |
118 | ||
eff380aa AV |
119 | if (vsi->type == ICE_VSI_VF) |
120 | goto out; | |
121 | /* only set affinity_mask if the CPU is online */ | |
122 | if (cpu_online(v_idx)) | |
123 | cpumask_set_cpu(v_idx, &q_vector->affinity_mask); | |
124 | ||
125 | /* This will not be called in the driver load path because the netdev | |
126 | * will not be created yet. All other cases with register the NAPI | |
127 | * handler here (i.e. resume, reset/rebuild, etc.) | |
128 | */ | |
129 | if (vsi->netdev) | |
130 | netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll, | |
131 | NAPI_POLL_WEIGHT); | |
132 | ||
133 | out: | |
134 | /* tie q_vector and VSI together */ | |
135 | vsi->q_vectors[v_idx] = q_vector; | |
136 | ||
137 | return 0; | |
138 | } | |
139 | ||
140 | /** | |
141 | * ice_free_q_vector - Free memory allocated for a specific interrupt vector | |
142 | * @vsi: VSI having the memory freed | |
143 | * @v_idx: index of the vector to be freed | |
144 | */ | |
145 | static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) | |
146 | { | |
147 | struct ice_q_vector *q_vector; | |
148 | struct ice_pf *pf = vsi->back; | |
149 | struct ice_ring *ring; | |
4015d11e | 150 | struct device *dev; |
eff380aa | 151 | |
4015d11e | 152 | dev = ice_pf_to_dev(pf); |
eff380aa | 153 | if (!vsi->q_vectors[v_idx]) { |
4015d11e | 154 | dev_dbg(dev, "Queue vector at index %d not found\n", v_idx); |
eff380aa AV |
155 | return; |
156 | } | |
157 | q_vector = vsi->q_vectors[v_idx]; | |
158 | ||
159 | ice_for_each_ring(ring, q_vector->tx) | |
160 | ring->q_vector = NULL; | |
161 | ice_for_each_ring(ring, q_vector->rx) | |
162 | ring->q_vector = NULL; | |
163 | ||
164 | /* only VSI with an associated netdev is set up with NAPI */ | |
165 | if (vsi->netdev) | |
166 | netif_napi_del(&q_vector->napi); | |
167 | ||
4015d11e | 168 | devm_kfree(dev, q_vector); |
eff380aa AV |
169 | vsi->q_vectors[v_idx] = NULL; |
170 | } | |
171 | ||
172 | /** | |
173 | * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set | |
174 | * @hw: board specific structure | |
175 | */ | |
176 | static void ice_cfg_itr_gran(struct ice_hw *hw) | |
177 | { | |
178 | u32 regval = rd32(hw, GLINT_CTL); | |
179 | ||
180 | /* no need to update global register if ITR gran is already set */ | |
181 | if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) && | |
182 | (((regval & GLINT_CTL_ITR_GRAN_200_M) >> | |
183 | GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) && | |
184 | (((regval & GLINT_CTL_ITR_GRAN_100_M) >> | |
185 | GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) && | |
186 | (((regval & GLINT_CTL_ITR_GRAN_50_M) >> | |
187 | GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) && | |
188 | (((regval & GLINT_CTL_ITR_GRAN_25_M) >> | |
189 | GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US)) | |
190 | return; | |
191 | ||
192 | regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) & | |
193 | GLINT_CTL_ITR_GRAN_200_M) | | |
194 | ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) & | |
195 | GLINT_CTL_ITR_GRAN_100_M) | | |
196 | ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) & | |
197 | GLINT_CTL_ITR_GRAN_50_M) | | |
198 | ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) & | |
199 | GLINT_CTL_ITR_GRAN_25_M); | |
200 | wr32(hw, GLINT_CTL, regval); | |
201 | } | |
202 | ||
e75d1b2c MF |
203 | /** |
204 | * ice_calc_q_handle - calculate the queue handle | |
205 | * @vsi: VSI that ring belongs to | |
206 | * @ring: ring to get the absolute queue index | |
207 | * @tc: traffic class number | |
208 | */ | |
209 | static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc) | |
210 | { | |
af23635a | 211 | WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n"); |
efc2214b | 212 | |
e75d1b2c MF |
213 | /* Idea here for calculation is that we subtract the number of queue |
214 | * count from TC that ring belongs to from it's absolute queue index | |
215 | * and as a result we get the queue's index within TC. | |
216 | */ | |
217 | return ring->q_index - vsi->tc_cfg.tc_info[tc].qoffset; | |
218 | } | |
219 | ||
634da4c1 BB |
220 | /** |
221 | * ice_cfg_xps_tx_ring - Configure XPS for a Tx ring | |
222 | * @ring: The Tx ring to configure | |
223 | * | |
224 | * This enables/disables XPS for a given Tx descriptor ring | |
225 | * based on the TCs enabled for the VSI that ring belongs to. | |
226 | */ | |
227 | static void ice_cfg_xps_tx_ring(struct ice_ring *ring) | |
228 | { | |
229 | if (!ring->q_vector || !ring->netdev) | |
230 | return; | |
231 | ||
232 | /* We only initialize XPS once, so as not to overwrite user settings */ | |
233 | if (test_and_set_bit(ICE_TX_XPS_INIT_DONE, ring->xps_state)) | |
234 | return; | |
235 | ||
236 | netif_set_xps_queue(ring->netdev, &ring->q_vector->affinity_mask, | |
237 | ring->q_index); | |
238 | } | |
239 | ||
eff380aa AV |
240 | /** |
241 | * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance | |
242 | * @ring: The Tx ring to configure | |
243 | * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized | |
244 | * @pf_q: queue index in the PF space | |
245 | * | |
246 | * Configure the Tx descriptor ring in TLAN context. | |
247 | */ | |
248 | static void | |
249 | ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q) | |
250 | { | |
251 | struct ice_vsi *vsi = ring->vsi; | |
252 | struct ice_hw *hw = &vsi->back->hw; | |
253 | ||
254 | tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S; | |
255 | ||
256 | tlan_ctx->port_num = vsi->port_info->lport; | |
257 | ||
258 | /* Transmit Queue Length */ | |
259 | tlan_ctx->qlen = ring->count; | |
260 | ||
261 | ice_set_cgd_num(tlan_ctx, ring); | |
262 | ||
263 | /* PF number */ | |
264 | tlan_ctx->pf_num = hw->pf_id; | |
265 | ||
266 | /* queue belongs to a specific VSI type | |
267 | * VF / VM index should be programmed per vmvf_type setting: | |
268 | * for vmvf_type = VF, it is VF number between 0-256 | |
269 | * for vmvf_type = VM, it is VM number between 0-767 | |
270 | * for PF or EMP this field should be set to zero | |
271 | */ | |
272 | switch (vsi->type) { | |
273 | case ICE_VSI_LB: | |
148beb61 | 274 | case ICE_VSI_CTRL: |
eff380aa AV |
275 | case ICE_VSI_PF: |
276 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF; | |
277 | break; | |
278 | case ICE_VSI_VF: | |
279 | /* Firmware expects vmvf_num to be absolute VF ID */ | |
280 | tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id; | |
281 | tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF; | |
282 | break; | |
283 | default: | |
284 | return; | |
285 | } | |
286 | ||
287 | /* make sure the context is associated with the right VSI */ | |
288 | tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx); | |
289 | ||
290 | tlan_ctx->tso_ena = ICE_TX_LEGACY; | |
291 | tlan_ctx->tso_qnum = pf_q; | |
292 | ||
293 | /* Legacy or Advanced Host Interface: | |
294 | * 0: Advanced Host Interface | |
295 | * 1: Legacy Host Interface | |
296 | */ | |
297 | tlan_ctx->legacy_int = ICE_TX_LEGACY; | |
298 | } | |
299 | ||
89861c48 MF |
300 | /** |
301 | * ice_rx_offset - Return expected offset into page to access data | |
302 | * @rx_ring: Ring we are requesting offset of | |
303 | * | |
304 | * Returns the offset value for ring into the data buffer. | |
305 | */ | |
306 | static unsigned int ice_rx_offset(struct ice_ring *rx_ring) | |
307 | { | |
308 | if (ice_ring_uses_build_skb(rx_ring)) | |
309 | return ICE_SKB_PAD; | |
310 | else if (ice_is_xdp_ena_vsi(rx_ring->vsi)) | |
311 | return XDP_PACKET_HEADROOM; | |
312 | ||
313 | return 0; | |
314 | } | |
315 | ||
eff380aa AV |
316 | /** |
317 | * ice_setup_rx_ctx - Configure a receive ring context | |
318 | * @ring: The Rx ring to configure | |
319 | * | |
320 | * Configure the Rx descriptor ring in RLAN context. | |
321 | */ | |
322 | int ice_setup_rx_ctx(struct ice_ring *ring) | |
323 | { | |
3f0d97cd | 324 | struct device *dev = ice_pf_to_dev(ring->vsi->back); |
2d4238f5 | 325 | int chain_len = ICE_MAX_CHAINED_RX_BUFS; |
3f0d97cd | 326 | u16 num_bufs = ICE_DESC_UNUSED(ring); |
eff380aa | 327 | struct ice_vsi *vsi = ring->vsi; |
eff380aa AV |
328 | u32 rxdid = ICE_RXDID_FLEX_NIC; |
329 | struct ice_rlan_ctx rlan_ctx; | |
2d4238f5 | 330 | struct ice_hw *hw; |
eff380aa AV |
331 | u16 pf_q; |
332 | int err; | |
333 | ||
2d4238f5 KK |
334 | hw = &vsi->back->hw; |
335 | ||
eff380aa AV |
336 | /* what is Rx queue number in global space of 2K Rx queues */ |
337 | pf_q = vsi->rxq_map[ring->q_index]; | |
338 | ||
339 | /* clear the context structure first */ | |
340 | memset(&rlan_ctx, 0, sizeof(rlan_ctx)); | |
341 | ||
efc2214b MF |
342 | ring->rx_buf_len = vsi->rx_buf_len; |
343 | ||
344 | if (ring->vsi->type == ICE_VSI_PF) { | |
345 | if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) | |
91125399 | 346 | /* coverity[check_return] */ |
efc2214b | 347 | xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev, |
b02e5a0e | 348 | ring->q_index, ring->q_vector->napi.napi_id); |
efc2214b | 349 | |
1742b3d5 MK |
350 | ring->xsk_pool = ice_xsk_pool(ring); |
351 | if (ring->xsk_pool) { | |
2d4238f5 KK |
352 | xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); |
353 | ||
175fc430 | 354 | ring->rx_buf_len = |
c4655761 | 355 | xsk_pool_get_rx_frame_size(ring->xsk_pool); |
2d4238f5 KK |
356 | /* For AF_XDP ZC, we disallow packets to span on |
357 | * multiple buffers, thus letting us skip that | |
358 | * handling in the fast-path. | |
359 | */ | |
360 | chain_len = 1; | |
2d4238f5 | 361 | err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, |
175fc430 BT |
362 | MEM_TYPE_XSK_BUFF_POOL, |
363 | NULL); | |
2d4238f5 KK |
364 | if (err) |
365 | return err; | |
c4655761 | 366 | xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); |
2d4238f5 | 367 | |
3f0d97cd | 368 | dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n", |
2d4238f5 KK |
369 | ring->q_index); |
370 | } else { | |
371 | if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) | |
91125399 | 372 | /* coverity[check_return] */ |
2d4238f5 KK |
373 | xdp_rxq_info_reg(&ring->xdp_rxq, |
374 | ring->netdev, | |
b02e5a0e | 375 | ring->q_index, ring->q_vector->napi.napi_id); |
2d4238f5 KK |
376 | |
377 | err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, | |
378 | MEM_TYPE_PAGE_SHARED, | |
379 | NULL); | |
380 | if (err) | |
381 | return err; | |
382 | } | |
efc2214b MF |
383 | } |
384 | /* Receive Queue Base Address. | |
385 | * Indicates the starting address of the descriptor queue defined in | |
386 | * 128 Byte units. | |
387 | */ | |
eff380aa AV |
388 | rlan_ctx.base = ring->dma >> 7; |
389 | ||
390 | rlan_ctx.qlen = ring->count; | |
391 | ||
392 | /* Receive Packet Data Buffer Size. | |
393 | * The Packet Data Buffer Size is defined in 128 byte units. | |
394 | */ | |
efc2214b | 395 | rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; |
eff380aa AV |
396 | |
397 | /* use 32 byte descriptors */ | |
398 | rlan_ctx.dsize = 1; | |
399 | ||
400 | /* Strip the Ethernet CRC bytes before the packet is posted to host | |
401 | * memory. | |
402 | */ | |
403 | rlan_ctx.crcstrip = 1; | |
404 | ||
405 | /* L2TSEL flag defines the reported L2 Tags in the receive descriptor */ | |
406 | rlan_ctx.l2tsel = 1; | |
407 | ||
408 | rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT; | |
409 | rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT; | |
410 | rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT; | |
411 | ||
412 | /* This controls whether VLAN is stripped from inner headers | |
413 | * The VLAN in the inner L2 header is stripped to the receive | |
414 | * descriptor if enabled by this flag. | |
415 | */ | |
416 | rlan_ctx.showiv = 0; | |
417 | ||
418 | /* Max packet size for this queue - must not be set to a larger value | |
419 | * than 5 x DBUF | |
420 | */ | |
88865fc4 | 421 | rlan_ctx.rxmax = min_t(u32, vsi->max_frame, |
2d4238f5 | 422 | chain_len * ring->rx_buf_len); |
eff380aa AV |
423 | |
424 | /* Rx queue threshold in units of 64 */ | |
425 | rlan_ctx.lrxqthresh = 1; | |
426 | ||
401ce33b BC |
427 | /* Enable Flexible Descriptors in the queue context which |
428 | * allows this driver to select a specific receive descriptor format | |
429 | * increasing context priority to pick up profile ID; default is 0x01; | |
430 | * setting to 0x03 to ensure profile is programming if prev context is | |
431 | * of same priority | |
432 | */ | |
433 | if (vsi->type != ICE_VSI_VF) | |
434 | ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3); | |
435 | else | |
436 | ice_write_qrxflxp_cntxt(hw, pf_q, ICE_RXDID_LEGACY_1, 0x3); | |
eff380aa AV |
437 | |
438 | /* Absolute queue number out of 2K needs to be passed */ | |
439 | err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); | |
440 | if (err) { | |
3f0d97cd | 441 | dev_err(dev, "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n", |
eff380aa AV |
442 | pf_q, err); |
443 | return -EIO; | |
444 | } | |
445 | ||
446 | if (vsi->type == ICE_VSI_VF) | |
447 | return 0; | |
448 | ||
59bb0808 MF |
449 | /* configure Rx buffer alignment */ |
450 | if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) | |
451 | ice_clear_ring_build_skb_ena(ring); | |
452 | else | |
453 | ice_set_ring_build_skb_ena(ring); | |
454 | ||
89861c48 MF |
455 | ring->rx_offset = ice_rx_offset(ring); |
456 | ||
eff380aa AV |
457 | /* init queue specific tail register */ |
458 | ring->tail = hw->hw_addr + QRX_TAIL(pf_q); | |
459 | writel(0, ring->tail); | |
2d4238f5 | 460 | |
1742b3d5 | 461 | if (ring->xsk_pool) { |
ed0907e3 MK |
462 | bool ok; |
463 | ||
c4655761 | 464 | if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { |
1742b3d5 | 465 | dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n", |
3f0d97cd KK |
466 | num_bufs, ring->q_index); |
467 | dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n"); | |
468 | ||
469 | return 0; | |
470 | } | |
471 | ||
ed0907e3 MK |
472 | ok = ice_alloc_rx_bufs_zc(ring, num_bufs); |
473 | if (!ok) | |
1742b3d5 | 474 | dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n", |
3f0d97cd KK |
475 | ring->q_index, pf_q); |
476 | return 0; | |
477 | } | |
478 | ||
479 | ice_alloc_rx_bufs(ring, num_bufs); | |
eff380aa AV |
480 | |
481 | return 0; | |
482 | } | |
483 | ||
484 | /** | |
485 | * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI | |
486 | * @qs_cfg: gathered variables needed for pf->vsi queues assignment | |
487 | * | |
488 | * This function first tries to find contiguous space. If it is not successful, | |
489 | * it tries with the scatter approach. | |
490 | * | |
491 | * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap | |
492 | */ | |
493 | int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg) | |
494 | { | |
495 | int ret = 0; | |
496 | ||
497 | ret = __ice_vsi_get_qs_contig(qs_cfg); | |
498 | if (ret) { | |
499 | /* contig failed, so try with scatter approach */ | |
500 | qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER; | |
88865fc4 | 501 | qs_cfg->q_count = min_t(unsigned int, qs_cfg->q_count, |
eff380aa AV |
502 | qs_cfg->scatter_count); |
503 | ret = __ice_vsi_get_qs_sc(qs_cfg); | |
504 | } | |
505 | return ret; | |
506 | } | |
507 | ||
508 | /** | |
13a6233b | 509 | * ice_vsi_ctrl_one_rx_ring - start/stop VSI's Rx ring with no busy wait |
eff380aa | 510 | * @vsi: the VSI being configured |
13a6233b BC |
511 | * @ena: start or stop the Rx ring |
512 | * @rxq_idx: 0-based Rx queue index for the VSI passed in | |
513 | * @wait: wait or don't wait for configuration to finish in hardware | |
514 | * | |
515 | * Return 0 on success and negative on error. | |
eff380aa | 516 | */ |
13a6233b BC |
517 | int |
518 | ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait) | |
eff380aa AV |
519 | { |
520 | int pf_q = vsi->rxq_map[rxq_idx]; | |
521 | struct ice_pf *pf = vsi->back; | |
522 | struct ice_hw *hw = &pf->hw; | |
eff380aa AV |
523 | u32 rx_reg; |
524 | ||
525 | rx_reg = rd32(hw, QRX_CTRL(pf_q)); | |
526 | ||
527 | /* Skip if the queue is already in the requested state */ | |
528 | if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) | |
529 | return 0; | |
530 | ||
531 | /* turn on/off the queue */ | |
532 | if (ena) | |
533 | rx_reg |= QRX_CTRL_QENA_REQ_M; | |
534 | else | |
535 | rx_reg &= ~QRX_CTRL_QENA_REQ_M; | |
536 | wr32(hw, QRX_CTRL(pf_q), rx_reg); | |
537 | ||
13a6233b BC |
538 | if (!wait) |
539 | return 0; | |
eff380aa | 540 | |
13a6233b BC |
541 | ice_flush(hw); |
542 | return ice_pf_rxq_wait(pf, pf_q, ena); | |
543 | } | |
544 | ||
545 | /** | |
546 | * ice_vsi_wait_one_rx_ring - wait for a VSI's Rx ring to be stopped/started | |
547 | * @vsi: the VSI being configured | |
548 | * @ena: true/false to verify Rx ring has been enabled/disabled respectively | |
549 | * @rxq_idx: 0-based Rx queue index for the VSI passed in | |
550 | * | |
551 | * This routine will wait for the given Rx queue of the VSI to reach the | |
552 | * enabled or disabled state. Returns -ETIMEDOUT in case of failing to reach | |
553 | * the requested state after multiple retries; else will return 0 in case of | |
554 | * success. | |
555 | */ | |
556 | int ice_vsi_wait_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) | |
557 | { | |
558 | int pf_q = vsi->rxq_map[rxq_idx]; | |
559 | struct ice_pf *pf = vsi->back; | |
560 | ||
561 | return ice_pf_rxq_wait(pf, pf_q, ena); | |
eff380aa AV |
562 | } |
563 | ||
564 | /** | |
565 | * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors | |
566 | * @vsi: the VSI being configured | |
567 | * | |
568 | * We allocate one q_vector per queue interrupt. If allocation fails we | |
569 | * return -ENOMEM. | |
570 | */ | |
571 | int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) | |
572 | { | |
3306f79f | 573 | struct device *dev = ice_pf_to_dev(vsi->back); |
88865fc4 KK |
574 | u16 v_idx; |
575 | int err; | |
eff380aa AV |
576 | |
577 | if (vsi->q_vectors[0]) { | |
4015d11e | 578 | dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num); |
eff380aa AV |
579 | return -EEXIST; |
580 | } | |
581 | ||
3306f79f | 582 | for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) { |
eff380aa AV |
583 | err = ice_vsi_alloc_q_vector(vsi, v_idx); |
584 | if (err) | |
585 | goto err_out; | |
586 | } | |
587 | ||
588 | return 0; | |
589 | ||
590 | err_out: | |
591 | while (v_idx--) | |
592 | ice_free_q_vector(vsi, v_idx); | |
593 | ||
4015d11e | 594 | dev_err(dev, "Failed to allocate %d q_vector for VSI %d, ret=%d\n", |
eff380aa AV |
595 | vsi->num_q_vectors, vsi->vsi_num, err); |
596 | vsi->num_q_vectors = 0; | |
597 | return err; | |
598 | } | |
599 | ||
600 | /** | |
601 | * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors | |
602 | * @vsi: the VSI being configured | |
603 | * | |
604 | * This function maps descriptor rings to the queue-specific vectors allotted | |
605 | * through the MSI-X enabling code. On a constrained vector budget, we map Tx | |
606 | * and Rx rings to the vector as "efficiently" as possible. | |
607 | */ | |
608 | void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) | |
609 | { | |
610 | int q_vectors = vsi->num_q_vectors; | |
88865fc4 | 611 | u16 tx_rings_rem, rx_rings_rem; |
eff380aa AV |
612 | int v_id; |
613 | ||
614 | /* initially assigning remaining rings count to VSIs num queue value */ | |
615 | tx_rings_rem = vsi->num_txq; | |
616 | rx_rings_rem = vsi->num_rxq; | |
617 | ||
618 | for (v_id = 0; v_id < q_vectors; v_id++) { | |
619 | struct ice_q_vector *q_vector = vsi->q_vectors[v_id]; | |
88865fc4 KK |
620 | u8 tx_rings_per_v, rx_rings_per_v; |
621 | u16 q_id, q_base; | |
eff380aa AV |
622 | |
623 | /* Tx rings mapping to vector */ | |
88865fc4 KK |
624 | tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem, |
625 | q_vectors - v_id); | |
eff380aa AV |
626 | q_vector->num_ring_tx = tx_rings_per_v; |
627 | q_vector->tx.ring = NULL; | |
628 | q_vector->tx.itr_idx = ICE_TX_ITR; | |
629 | q_base = vsi->num_txq - tx_rings_rem; | |
630 | ||
631 | for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { | |
632 | struct ice_ring *tx_ring = vsi->tx_rings[q_id]; | |
633 | ||
634 | tx_ring->q_vector = q_vector; | |
635 | tx_ring->next = q_vector->tx.ring; | |
636 | q_vector->tx.ring = tx_ring; | |
637 | } | |
638 | tx_rings_rem -= tx_rings_per_v; | |
639 | ||
640 | /* Rx rings mapping to vector */ | |
88865fc4 KK |
641 | rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem, |
642 | q_vectors - v_id); | |
eff380aa AV |
643 | q_vector->num_ring_rx = rx_rings_per_v; |
644 | q_vector->rx.ring = NULL; | |
645 | q_vector->rx.itr_idx = ICE_RX_ITR; | |
646 | q_base = vsi->num_rxq - rx_rings_rem; | |
647 | ||
648 | for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { | |
649 | struct ice_ring *rx_ring = vsi->rx_rings[q_id]; | |
650 | ||
651 | rx_ring->q_vector = q_vector; | |
652 | rx_ring->next = q_vector->rx.ring; | |
653 | q_vector->rx.ring = rx_ring; | |
654 | } | |
655 | rx_rings_rem -= rx_rings_per_v; | |
656 | } | |
657 | } | |
658 | ||
659 | /** | |
660 | * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors | |
661 | * @vsi: the VSI having memory freed | |
662 | */ | |
663 | void ice_vsi_free_q_vectors(struct ice_vsi *vsi) | |
664 | { | |
665 | int v_idx; | |
666 | ||
667 | ice_for_each_q_vector(vsi, v_idx) | |
668 | ice_free_q_vector(vsi, v_idx); | |
669 | } | |
670 | ||
671 | /** | |
672 | * ice_vsi_cfg_txq - Configure single Tx queue | |
673 | * @vsi: the VSI that queue belongs to | |
674 | * @ring: Tx ring to be configured | |
eff380aa | 675 | * @qg_buf: queue group buffer |
eff380aa AV |
676 | */ |
677 | int | |
e75d1b2c MF |
678 | ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, |
679 | struct ice_aqc_add_tx_qgrp *qg_buf) | |
eff380aa | 680 | { |
66486d89 | 681 | u8 buf_len = struct_size(qg_buf, txqs, 1); |
eff380aa AV |
682 | struct ice_tlan_ctx tlan_ctx = { 0 }; |
683 | struct ice_aqc_add_txqs_perq *txq; | |
684 | struct ice_pf *pf = vsi->back; | |
7e34786a | 685 | struct ice_hw *hw = &pf->hw; |
eff380aa AV |
686 | enum ice_status status; |
687 | u16 pf_q; | |
e75d1b2c | 688 | u8 tc; |
eff380aa | 689 | |
634da4c1 BB |
690 | /* Configure XPS */ |
691 | ice_cfg_xps_tx_ring(ring); | |
692 | ||
eff380aa AV |
693 | pf_q = ring->reg_idx; |
694 | ice_setup_tx_ctx(ring, &tlan_ctx, pf_q); | |
695 | /* copy context contents into the qg_buf */ | |
696 | qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); | |
7e34786a | 697 | ice_set_ctx(hw, (u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, |
eff380aa AV |
698 | ice_tlan_ctx_info); |
699 | ||
700 | /* init queue specific tail reg. It is referred as | |
701 | * transmit comm scheduler queue doorbell. | |
702 | */ | |
7e34786a | 703 | ring->tail = hw->hw_addr + QTX_COMM_DBELL(pf_q); |
eff380aa | 704 | |
e75d1b2c MF |
705 | if (IS_ENABLED(CONFIG_DCB)) |
706 | tc = ring->dcb_tc; | |
707 | else | |
708 | tc = 0; | |
709 | ||
eff380aa AV |
710 | /* Add unique software queue handle of the Tx queue per |
711 | * TC into the VSI Tx ring | |
712 | */ | |
e75d1b2c | 713 | ring->q_handle = ice_calc_q_handle(vsi, ring, tc); |
eff380aa AV |
714 | |
715 | status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, | |
716 | 1, qg_buf, buf_len, NULL); | |
717 | if (status) { | |
0fee3577 LY |
718 | dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n", |
719 | ice_stat_str(status)); | |
eff380aa AV |
720 | return -ENODEV; |
721 | } | |
722 | ||
723 | /* Add Tx Queue TEID into the VSI Tx ring from the | |
724 | * response. This will complete configuring and | |
725 | * enabling the queue. | |
726 | */ | |
727 | txq = &qg_buf->txqs[0]; | |
728 | if (pf_q == le16_to_cpu(txq->txq_id)) | |
729 | ring->txq_teid = le32_to_cpu(txq->q_teid); | |
730 | ||
731 | return 0; | |
732 | } | |
733 | ||
734 | /** | |
735 | * ice_cfg_itr - configure the initial interrupt throttle values | |
736 | * @hw: pointer to the HW structure | |
737 | * @q_vector: interrupt vector that's being configured | |
738 | * | |
739 | * Configure interrupt throttling values for the ring containers that are | |
740 | * associated with the interrupt vector passed in. | |
741 | */ | |
742 | void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) | |
743 | { | |
744 | ice_cfg_itr_gran(hw); | |
745 | ||
b8b47723 JB |
746 | if (q_vector->num_ring_rx) |
747 | ice_write_itr(&q_vector->rx, q_vector->rx.itr_setting); | |
eff380aa | 748 | |
b8b47723 JB |
749 | if (q_vector->num_ring_tx) |
750 | ice_write_itr(&q_vector->tx, q_vector->tx.itr_setting); | |
eff380aa | 751 | |
b8b47723 | 752 | ice_write_intrl(q_vector, q_vector->intrl); |
eff380aa AV |
753 | } |
754 | ||
755 | /** | |
756 | * ice_cfg_txq_interrupt - configure interrupt on Tx queue | |
757 | * @vsi: the VSI being configured | |
758 | * @txq: Tx queue being mapped to MSI-X vector | |
759 | * @msix_idx: MSI-X vector index within the function | |
760 | * @itr_idx: ITR index of the interrupt cause | |
761 | * | |
762 | * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector | |
763 | * within the function space. | |
764 | */ | |
765 | void | |
766 | ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx) | |
767 | { | |
768 | struct ice_pf *pf = vsi->back; | |
769 | struct ice_hw *hw = &pf->hw; | |
770 | u32 val; | |
771 | ||
772 | itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M; | |
773 | ||
774 | val = QINT_TQCTL_CAUSE_ENA_M | itr_idx | | |
775 | ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M); | |
776 | ||
777 | wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val); | |
efc2214b MF |
778 | if (ice_is_xdp_ena_vsi(vsi)) { |
779 | u32 xdp_txq = txq + vsi->num_xdp_txq; | |
780 | ||
781 | wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), | |
782 | val); | |
783 | } | |
784 | ice_flush(hw); | |
eff380aa AV |
785 | } |
786 | ||
787 | /** | |
788 | * ice_cfg_rxq_interrupt - configure interrupt on Rx queue | |
789 | * @vsi: the VSI being configured | |
790 | * @rxq: Rx queue being mapped to MSI-X vector | |
791 | * @msix_idx: MSI-X vector index within the function | |
792 | * @itr_idx: ITR index of the interrupt cause | |
793 | * | |
794 | * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector | |
795 | * within the function space. | |
796 | */ | |
797 | void | |
798 | ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx) | |
799 | { | |
800 | struct ice_pf *pf = vsi->back; | |
801 | struct ice_hw *hw = &pf->hw; | |
802 | u32 val; | |
803 | ||
804 | itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M; | |
805 | ||
806 | val = QINT_RQCTL_CAUSE_ENA_M | itr_idx | | |
807 | ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M); | |
808 | ||
809 | wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val); | |
810 | ||
811 | ice_flush(hw); | |
812 | } | |
813 | ||
814 | /** | |
815 | * ice_trigger_sw_intr - trigger a software interrupt | |
816 | * @hw: pointer to the HW structure | |
817 | * @q_vector: interrupt vector to trigger the software interrupt for | |
818 | */ | |
819 | void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) | |
820 | { | |
821 | wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), | |
822 | (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) | | |
823 | GLINT_DYN_CTL_SWINT_TRIG_M | | |
824 | GLINT_DYN_CTL_INTENA_M); | |
825 | } | |
826 | ||
827 | /** | |
828 | * ice_vsi_stop_tx_ring - Disable single Tx ring | |
829 | * @vsi: the VSI being configured | |
830 | * @rst_src: reset source | |
831 | * @rel_vmvf_num: Relative ID of VF/VM | |
832 | * @ring: Tx ring to be stopped | |
833 | * @txq_meta: Meta data of Tx ring to be stopped | |
834 | */ | |
835 | int | |
836 | ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, | |
837 | u16 rel_vmvf_num, struct ice_ring *ring, | |
838 | struct ice_txq_meta *txq_meta) | |
839 | { | |
840 | struct ice_pf *pf = vsi->back; | |
841 | struct ice_q_vector *q_vector; | |
842 | struct ice_hw *hw = &pf->hw; | |
843 | enum ice_status status; | |
844 | u32 val; | |
845 | ||
846 | /* clear cause_ena bit for disabled queues */ | |
847 | val = rd32(hw, QINT_TQCTL(ring->reg_idx)); | |
848 | val &= ~QINT_TQCTL_CAUSE_ENA_M; | |
849 | wr32(hw, QINT_TQCTL(ring->reg_idx), val); | |
850 | ||
851 | /* software is expected to wait for 100 ns */ | |
852 | ndelay(100); | |
853 | ||
854 | /* trigger a software interrupt for the vector | |
855 | * associated to the queue to schedule NAPI handler | |
856 | */ | |
857 | q_vector = ring->q_vector; | |
858 | if (q_vector) | |
859 | ice_trigger_sw_intr(hw, q_vector); | |
860 | ||
861 | status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, | |
862 | txq_meta->tc, 1, &txq_meta->q_handle, | |
863 | &txq_meta->q_id, &txq_meta->q_teid, rst_src, | |
864 | rel_vmvf_num, NULL); | |
865 | ||
866 | /* if the disable queue command was exercised during an | |
867 | * active reset flow, ICE_ERR_RESET_ONGOING is returned. | |
868 | * This is not an error as the reset operation disables | |
869 | * queues at the hardware level anyway. | |
870 | */ | |
871 | if (status == ICE_ERR_RESET_ONGOING) { | |
19cce2c6 | 872 | dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n"); |
eff380aa | 873 | } else if (status == ICE_ERR_DOES_NOT_EXIST) { |
19cce2c6 | 874 | dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n"); |
eff380aa | 875 | } else if (status) { |
0fee3577 LY |
876 | dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %s\n", |
877 | ice_stat_str(status)); | |
eff380aa AV |
878 | return -ENODEV; |
879 | } | |
880 | ||
881 | return 0; | |
882 | } | |
883 | ||
884 | /** | |
885 | * ice_fill_txq_meta - Prepare the Tx queue's meta data | |
886 | * @vsi: VSI that ring belongs to | |
887 | * @ring: ring that txq_meta will be based on | |
888 | * @txq_meta: a helper struct that wraps Tx queue's information | |
889 | * | |
890 | * Set up a helper struct that will contain all the necessary fields that | |
891 | * are needed for stopping Tx queue | |
892 | */ | |
893 | void | |
894 | ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring, | |
895 | struct ice_txq_meta *txq_meta) | |
896 | { | |
897 | u8 tc; | |
898 | ||
899 | if (IS_ENABLED(CONFIG_DCB)) | |
900 | tc = ring->dcb_tc; | |
901 | else | |
902 | tc = 0; | |
903 | ||
904 | txq_meta->q_id = ring->reg_idx; | |
905 | txq_meta->q_teid = ring->txq_teid; | |
906 | txq_meta->q_handle = ring->q_handle; | |
907 | txq_meta->vsi_idx = vsi->idx; | |
908 | txq_meta->tc = tc; | |
909 | } |