| 1 | /* |
| 2 | * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. |
| 3 | * |
| 4 | * This software is available to you under a choice of one of two |
| 5 | * licenses. You may choose to be licensed under the terms of the GNU |
| 6 | * General Public License (GPL) Version 2, available from the file |
| 7 | * COPYING in the main directory of this source tree, or the |
| 8 | * OpenIB.org BSD license below: |
| 9 | * |
| 10 | * Redistribution and use in source and binary forms, with or |
| 11 | * without modification, are permitted provided that the following |
| 12 | * conditions are met: |
| 13 | * |
| 14 | * - Redistributions of source code must retain the above |
| 15 | * copyright notice, this list of conditions and the following |
| 16 | * disclaimer. |
| 17 | * |
| 18 | * - Redistributions in binary form must reproduce the above |
| 19 | * copyright notice, this list of conditions and the following |
| 20 | * disclaimer in the documentation and/or other materials |
| 21 | * provided with the distribution. |
| 22 | * |
| 23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 30 | * SOFTWARE. |
| 31 | */ |
| 32 | |
| 33 | #include <linux/tcp.h> |
| 34 | #include <linux/if_vlan.h> |
| 35 | #include "en.h" |
| 36 | |
| 37 | #define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS |
| 38 | #define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\ |
| 39 | MLX5E_SQ_NOPS_ROOM) |
| 40 | |
| 41 | void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) |
| 42 | { |
| 43 | struct mlx5_wq_cyc *wq = &sq->wq; |
| 44 | |
| 45 | u16 pi = sq->pc & wq->sz_m1; |
| 46 | struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); |
| 47 | |
| 48 | struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; |
| 49 | |
| 50 | memset(cseg, 0, sizeof(*cseg)); |
| 51 | |
| 52 | cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP); |
| 53 | cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01); |
| 54 | |
| 55 | sq->skb[pi] = NULL; |
| 56 | sq->pc++; |
| 57 | sq->stats.nop++; |
| 58 | |
| 59 | if (notify_hw) { |
| 60 | cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; |
| 61 | mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); |
| 62 | } |
| 63 | } |
| 64 | |
| 65 | static inline void mlx5e_tx_dma_unmap(struct device *pdev, |
| 66 | struct mlx5e_sq_dma *dma) |
| 67 | { |
| 68 | switch (dma->type) { |
| 69 | case MLX5E_DMA_MAP_SINGLE: |
| 70 | dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE); |
| 71 | break; |
| 72 | case MLX5E_DMA_MAP_PAGE: |
| 73 | dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE); |
| 74 | break; |
| 75 | default: |
| 76 | WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n"); |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | static inline void mlx5e_dma_push(struct mlx5e_sq *sq, |
| 81 | dma_addr_t addr, |
| 82 | u32 size, |
| 83 | enum mlx5e_dma_map_type map_type) |
| 84 | { |
| 85 | sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; |
| 86 | sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; |
| 87 | sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type; |
| 88 | sq->dma_fifo_pc++; |
| 89 | } |
| 90 | |
| 91 | static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i) |
| 92 | { |
| 93 | return &sq->dma_fifo[i & sq->dma_fifo_mask]; |
| 94 | } |
| 95 | |
| 96 | static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, u8 num_dma) |
| 97 | { |
| 98 | int i; |
| 99 | |
| 100 | for (i = 0; i < num_dma; i++) { |
| 101 | struct mlx5e_sq_dma *last_pushed_dma = |
| 102 | mlx5e_dma_get(sq, --sq->dma_fifo_pc); |
| 103 | |
| 104 | mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); |
| 105 | } |
| 106 | } |
| 107 | |
| 108 | u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, |
| 109 | void *accel_priv, select_queue_fallback_t fallback) |
| 110 | { |
| 111 | struct mlx5e_priv *priv = netdev_priv(dev); |
| 112 | int channel_ix = fallback(dev, skb); |
| 113 | int up = 0; |
| 114 | |
| 115 | if (!netdev_get_num_tc(dev)) |
| 116 | return channel_ix; |
| 117 | |
| 118 | if (skb_vlan_tag_present(skb)) |
| 119 | up = skb->vlan_tci >> VLAN_PRIO_SHIFT; |
| 120 | |
| 121 | /* channel_ix can be larger than num_channels since |
| 122 | * dev->num_real_tx_queues = num_channels * num_tc |
| 123 | */ |
| 124 | if (channel_ix >= priv->params.num_channels) |
| 125 | channel_ix = reciprocal_scale(channel_ix, |
| 126 | priv->params.num_channels); |
| 127 | |
| 128 | return priv->channeltc_to_txq_map[channel_ix][up]; |
| 129 | } |
| 130 | |
| 131 | static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, |
| 132 | struct sk_buff *skb, bool bf) |
| 133 | { |
| 134 | /* Some NIC TX decisions, e.g loopback, are based on the packet |
| 135 | * headers and occur before the data gather. |
| 136 | * Therefore these headers must be copied into the WQE |
| 137 | */ |
| 138 | #define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN) |
| 139 | |
| 140 | if (bf) { |
| 141 | u16 ihs = skb_headlen(skb); |
| 142 | |
| 143 | if (skb_vlan_tag_present(skb)) |
| 144 | ihs += VLAN_HLEN; |
| 145 | |
| 146 | if (ihs <= sq->max_inline) |
| 147 | return skb_headlen(skb); |
| 148 | } |
| 149 | |
| 150 | return max(skb_network_offset(skb), MLX5E_MIN_INLINE); |
| 151 | } |
| 152 | |
| 153 | static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data, |
| 154 | unsigned int *skb_len, |
| 155 | unsigned int len) |
| 156 | { |
| 157 | *skb_len -= len; |
| 158 | *skb_data += len; |
| 159 | } |
| 160 | |
| 161 | static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs, |
| 162 | unsigned char **skb_data, |
| 163 | unsigned int *skb_len) |
| 164 | { |
| 165 | struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start; |
| 166 | int cpy1_sz = 2 * ETH_ALEN; |
| 167 | int cpy2_sz = ihs - cpy1_sz; |
| 168 | |
| 169 | memcpy(vhdr, *skb_data, cpy1_sz); |
| 170 | mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy1_sz); |
| 171 | vhdr->h_vlan_proto = skb->vlan_proto; |
| 172 | vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); |
| 173 | memcpy(&vhdr->h_vlan_encapsulated_proto, *skb_data, cpy2_sz); |
| 174 | mlx5e_tx_skb_pull_inline(skb_data, skb_len, cpy2_sz); |
| 175 | } |
| 176 | |
| 177 | static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) |
| 178 | { |
| 179 | struct mlx5_wq_cyc *wq = &sq->wq; |
| 180 | |
| 181 | u16 pi = sq->pc & wq->sz_m1; |
| 182 | struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); |
| 183 | struct mlx5e_tx_wqe_info *wi = &sq->wqe_info[pi]; |
| 184 | |
| 185 | struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; |
| 186 | struct mlx5_wqe_eth_seg *eseg = &wqe->eth; |
| 187 | struct mlx5_wqe_data_seg *dseg; |
| 188 | |
| 189 | unsigned char *skb_data = skb->data; |
| 190 | unsigned int skb_len = skb->len; |
| 191 | u8 opcode = MLX5_OPCODE_SEND; |
| 192 | dma_addr_t dma_addr = 0; |
| 193 | unsigned int num_bytes; |
| 194 | bool bf = false; |
| 195 | u16 headlen; |
| 196 | u16 ds_cnt; |
| 197 | u16 ihs; |
| 198 | int i; |
| 199 | |
| 200 | memset(wqe, 0, sizeof(*wqe)); |
| 201 | |
| 202 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { |
| 203 | eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; |
| 204 | if (skb->encapsulation) { |
| 205 | eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | |
| 206 | MLX5_ETH_WQE_L4_INNER_CSUM; |
| 207 | sq->stats.csum_partial_inner++; |
| 208 | } else { |
| 209 | eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; |
| 210 | } |
| 211 | } else |
| 212 | sq->stats.csum_none++; |
| 213 | |
| 214 | if (sq->cc != sq->prev_cc) { |
| 215 | sq->prev_cc = sq->cc; |
| 216 | sq->bf_budget = (sq->cc == sq->pc) ? MLX5E_SQ_BF_BUDGET : 0; |
| 217 | } |
| 218 | |
| 219 | if (skb_is_gso(skb)) { |
| 220 | eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); |
| 221 | opcode = MLX5_OPCODE_LSO; |
| 222 | |
| 223 | if (skb->encapsulation) { |
| 224 | ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); |
| 225 | sq->stats.tso_inner_packets++; |
| 226 | sq->stats.tso_inner_bytes += skb->len - ihs; |
| 227 | } else { |
| 228 | ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); |
| 229 | sq->stats.tso_packets++; |
| 230 | sq->stats.tso_bytes += skb->len - ihs; |
| 231 | } |
| 232 | |
| 233 | num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; |
| 234 | } else { |
| 235 | bf = sq->bf_budget && |
| 236 | !skb->xmit_more && |
| 237 | !skb_shinfo(skb)->nr_frags; |
| 238 | ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); |
| 239 | num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); |
| 240 | } |
| 241 | |
| 242 | wi->num_bytes = num_bytes; |
| 243 | |
| 244 | if (skb_vlan_tag_present(skb)) { |
| 245 | mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, |
| 246 | &skb_len); |
| 247 | ihs += VLAN_HLEN; |
| 248 | } else { |
| 249 | memcpy(eseg->inline_hdr_start, skb_data, ihs); |
| 250 | mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs); |
| 251 | } |
| 252 | |
| 253 | eseg->inline_hdr_sz = cpu_to_be16(ihs); |
| 254 | |
| 255 | ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; |
| 256 | ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start), |
| 257 | MLX5_SEND_WQE_DS); |
| 258 | dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt; |
| 259 | |
| 260 | wi->num_dma = 0; |
| 261 | |
| 262 | headlen = skb_len - skb->data_len; |
| 263 | if (headlen) { |
| 264 | dma_addr = dma_map_single(sq->pdev, skb_data, headlen, |
| 265 | DMA_TO_DEVICE); |
| 266 | if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) |
| 267 | goto dma_unmap_wqe_err; |
| 268 | |
| 269 | dseg->addr = cpu_to_be64(dma_addr); |
| 270 | dseg->lkey = sq->mkey_be; |
| 271 | dseg->byte_count = cpu_to_be32(headlen); |
| 272 | |
| 273 | mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); |
| 274 | wi->num_dma++; |
| 275 | |
| 276 | dseg++; |
| 277 | } |
| 278 | |
| 279 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
| 280 | struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; |
| 281 | int fsz = skb_frag_size(frag); |
| 282 | |
| 283 | dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, |
| 284 | DMA_TO_DEVICE); |
| 285 | if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) |
| 286 | goto dma_unmap_wqe_err; |
| 287 | |
| 288 | dseg->addr = cpu_to_be64(dma_addr); |
| 289 | dseg->lkey = sq->mkey_be; |
| 290 | dseg->byte_count = cpu_to_be32(fsz); |
| 291 | |
| 292 | mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); |
| 293 | wi->num_dma++; |
| 294 | |
| 295 | dseg++; |
| 296 | } |
| 297 | |
| 298 | ds_cnt += wi->num_dma; |
| 299 | |
| 300 | cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); |
| 301 | cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); |
| 302 | |
| 303 | sq->skb[pi] = skb; |
| 304 | |
| 305 | wi->num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); |
| 306 | sq->pc += wi->num_wqebbs; |
| 307 | |
| 308 | netdev_tx_sent_queue(sq->txq, wi->num_bytes); |
| 309 | |
| 310 | if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) |
| 311 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
| 312 | |
| 313 | if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM))) { |
| 314 | netif_tx_stop_queue(sq->txq); |
| 315 | sq->stats.stopped++; |
| 316 | } |
| 317 | |
| 318 | if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { |
| 319 | int bf_sz = 0; |
| 320 | |
| 321 | if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state)) |
| 322 | bf_sz = wi->num_wqebbs << 3; |
| 323 | |
| 324 | cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; |
| 325 | mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz); |
| 326 | } |
| 327 | |
| 328 | /* fill sq edge with nops to avoid wqe wrap around */ |
| 329 | while ((sq->pc & wq->sz_m1) > sq->edge) |
| 330 | mlx5e_send_nop(sq, false); |
| 331 | |
| 332 | if (bf) |
| 333 | sq->bf_budget--; |
| 334 | |
| 335 | sq->stats.packets++; |
| 336 | sq->stats.bytes += num_bytes; |
| 337 | return NETDEV_TX_OK; |
| 338 | |
| 339 | dma_unmap_wqe_err: |
| 340 | sq->stats.dropped++; |
| 341 | mlx5e_dma_unmap_wqe_err(sq, wi->num_dma); |
| 342 | |
| 343 | dev_kfree_skb_any(skb); |
| 344 | |
| 345 | return NETDEV_TX_OK; |
| 346 | } |
| 347 | |
| 348 | netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) |
| 349 | { |
| 350 | struct mlx5e_priv *priv = netdev_priv(dev); |
| 351 | struct mlx5e_sq *sq = priv->txq_to_sq_map[skb_get_queue_mapping(skb)]; |
| 352 | |
| 353 | return mlx5e_sq_xmit(sq, skb); |
| 354 | } |
| 355 | |
| 356 | void mlx5e_free_tx_descs(struct mlx5e_sq *sq) |
| 357 | { |
| 358 | struct mlx5e_tx_wqe_info *wi; |
| 359 | struct sk_buff *skb; |
| 360 | u16 ci; |
| 361 | int i; |
| 362 | |
| 363 | while (sq->cc != sq->pc) { |
| 364 | ci = sq->cc & sq->wq.sz_m1; |
| 365 | skb = sq->skb[ci]; |
| 366 | wi = &sq->wqe_info[ci]; |
| 367 | |
| 368 | if (!skb) { /* nop */ |
| 369 | sq->cc++; |
| 370 | continue; |
| 371 | } |
| 372 | |
| 373 | for (i = 0; i < wi->num_dma; i++) { |
| 374 | struct mlx5e_sq_dma *dma = |
| 375 | mlx5e_dma_get(sq, sq->dma_fifo_cc++); |
| 376 | |
| 377 | mlx5e_tx_dma_unmap(sq->pdev, dma); |
| 378 | } |
| 379 | |
| 380 | dev_kfree_skb_any(skb); |
| 381 | sq->cc += wi->num_wqebbs; |
| 382 | } |
| 383 | } |
| 384 | |
| 385 | bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) |
| 386 | { |
| 387 | struct mlx5e_sq *sq; |
| 388 | u32 dma_fifo_cc; |
| 389 | u32 nbytes; |
| 390 | u16 npkts; |
| 391 | u16 sqcc; |
| 392 | int i; |
| 393 | |
| 394 | sq = container_of(cq, struct mlx5e_sq, cq); |
| 395 | |
| 396 | if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state))) |
| 397 | return false; |
| 398 | |
| 399 | npkts = 0; |
| 400 | nbytes = 0; |
| 401 | |
| 402 | /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), |
| 403 | * otherwise a cq overrun may occur |
| 404 | */ |
| 405 | sqcc = sq->cc; |
| 406 | |
| 407 | /* avoid dirtying sq cache line every cqe */ |
| 408 | dma_fifo_cc = sq->dma_fifo_cc; |
| 409 | |
| 410 | for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) { |
| 411 | struct mlx5_cqe64 *cqe; |
| 412 | u16 wqe_counter; |
| 413 | bool last_wqe; |
| 414 | |
| 415 | cqe = mlx5e_get_cqe(cq); |
| 416 | if (!cqe) |
| 417 | break; |
| 418 | |
| 419 | mlx5_cqwq_pop(&cq->wq); |
| 420 | |
| 421 | wqe_counter = be16_to_cpu(cqe->wqe_counter); |
| 422 | |
| 423 | do { |
| 424 | struct mlx5e_tx_wqe_info *wi; |
| 425 | struct sk_buff *skb; |
| 426 | u16 ci; |
| 427 | int j; |
| 428 | |
| 429 | last_wqe = (sqcc == wqe_counter); |
| 430 | |
| 431 | ci = sqcc & sq->wq.sz_m1; |
| 432 | skb = sq->skb[ci]; |
| 433 | wi = &sq->wqe_info[ci]; |
| 434 | |
| 435 | if (unlikely(!skb)) { /* nop */ |
| 436 | sqcc++; |
| 437 | continue; |
| 438 | } |
| 439 | |
| 440 | if (unlikely(skb_shinfo(skb)->tx_flags & |
| 441 | SKBTX_HW_TSTAMP)) { |
| 442 | struct skb_shared_hwtstamps hwts = {}; |
| 443 | |
| 444 | mlx5e_fill_hwstamp(sq->tstamp, |
| 445 | get_cqe_ts(cqe), &hwts); |
| 446 | skb_tstamp_tx(skb, &hwts); |
| 447 | } |
| 448 | |
| 449 | for (j = 0; j < wi->num_dma; j++) { |
| 450 | struct mlx5e_sq_dma *dma = |
| 451 | mlx5e_dma_get(sq, dma_fifo_cc++); |
| 452 | |
| 453 | mlx5e_tx_dma_unmap(sq->pdev, dma); |
| 454 | } |
| 455 | |
| 456 | npkts++; |
| 457 | nbytes += wi->num_bytes; |
| 458 | sqcc += wi->num_wqebbs; |
| 459 | napi_consume_skb(skb, napi_budget); |
| 460 | } while (!last_wqe); |
| 461 | } |
| 462 | |
| 463 | mlx5_cqwq_update_db_record(&cq->wq); |
| 464 | |
| 465 | /* ensure cq space is freed before enabling more cqes */ |
| 466 | wmb(); |
| 467 | |
| 468 | sq->dma_fifo_cc = dma_fifo_cc; |
| 469 | sq->cc = sqcc; |
| 470 | |
| 471 | netdev_tx_completed_queue(sq->txq, npkts, nbytes); |
| 472 | |
| 473 | if (netif_tx_queue_stopped(sq->txq) && |
| 474 | mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) && |
| 475 | likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) { |
| 476 | netif_tx_wake_queue(sq->txq); |
| 477 | sq->stats.wake++; |
| 478 | } |
| 479 | |
| 480 | return (i == MLX5E_TX_CQ_POLL_BUDGET); |
| 481 | } |