inet: Move VRF table lookup to inlined function
[linux-2.6-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_rx.c
CommitLineData
e586b3b0
AV
1/*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/ip.h>
34#include <linux/ipv6.h>
35#include <linux/tcp.h>
36#include "en.h"
37
38static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
39 struct mlx5e_rx_wqe *wqe, u16 ix)
40{
41 struct sk_buff *skb;
42 dma_addr_t dma_addr;
43
44 skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
45 if (unlikely(!skb))
46 return -ENOMEM;
47
e586b3b0
AV
48 dma_addr = dma_map_single(rq->pdev,
49 /* hw start padding */
fc11fbf9
SM
50 skb->data,
51 /* hw end padding */
e586b3b0
AV
52 rq->wqe_sz,
53 DMA_FROM_DEVICE);
54
55 if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
56 goto err_free_skb;
57
fc11fbf9
SM
58 skb_reserve(skb, MLX5E_NET_IP_ALIGN);
59
e586b3b0
AV
60 *((dma_addr_t *)skb->cb) = dma_addr;
61 wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
62
63 rq->skb[ix] = skb;
64
65 return 0;
66
67err_free_skb:
68 dev_kfree_skb(skb);
69
70 return -ENOMEM;
71}
72
73bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
74{
75 struct mlx5_wq_ll *wq = &rq->wq;
76
77 if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
78 return false;
79
80 while (!mlx5_wq_ll_is_full(wq)) {
81 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
82
83 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
84 break;
85
86 mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
87 }
88
89 /* ensure wqes are visible to device before updating doorbell record */
90 dma_wmb();
91
92 mlx5_wq_ll_update_db_record(wq);
93
94 return !mlx5_wq_ll_is_full(wq);
95}
96
97static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
98{
99 struct ethhdr *eth = (struct ethhdr *)(skb->data);
100 struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
101 struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
102 struct tcphdr *tcp;
103
104 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
105 int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
106 (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
107
108 u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
109
110 if (eth->h_proto == htons(ETH_P_IP)) {
111 tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
112 sizeof(struct iphdr));
113 ipv6 = NULL;
d9a40271 114 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
e586b3b0
AV
115 } else {
116 tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
117 sizeof(struct ipv6hdr));
118 ipv4 = NULL;
d9a40271 119 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
e586b3b0
AV
120 }
121
122 if (get_cqe_lro_tcppsh(cqe))
123 tcp->psh = 1;
124
125 if (tcp_ack) {
126 tcp->ack = 1;
127 tcp->ack_seq = cqe->lro_ack_seq_num;
128 tcp->window = cqe->lro_tcp_win;
129 }
130
131 if (ipv4) {
132 ipv4->ttl = cqe->lro_min_ttl;
133 ipv4->tot_len = cpu_to_be16(tot_len);
134 ipv4->check = 0;
135 ipv4->check = ip_fast_csum((unsigned char *)ipv4,
136 ipv4->ihl);
137 } else {
138 ipv6->hop_limit = cqe->lro_min_ttl;
139 ipv6->payload_len = cpu_to_be16(tot_len -
140 sizeof(struct ipv6hdr));
141 }
142}
143
144static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
145 struct sk_buff *skb)
146{
147 u8 cht = cqe->rss_hash_type;
148 int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
149 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
150 PKT_HASH_TYPE_NONE;
151 skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
152}
153
bbceefce
AS
154static inline bool is_first_ethertype_ip(struct sk_buff *skb)
155{
156 __be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
157
158 return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
159}
160
161static inline void mlx5e_handle_csum(struct net_device *netdev,
162 struct mlx5_cqe64 *cqe,
163 struct mlx5e_rq *rq,
164 struct sk_buff *skb)
165{
166 if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
167 goto csum_none;
168
169 if (likely(cqe->hds_ip_ext & CQE_L4_OK)) {
170 skb->ip_summed = CHECKSUM_UNNECESSARY;
171 } else if (is_first_ethertype_ip(skb)) {
172 skb->ip_summed = CHECKSUM_COMPLETE;
173 skb->csum = csum_unfold(cqe->check_sum);
174 rq->stats.csum_sw++;
175 } else {
176 goto csum_none;
177 }
178
179 return;
180
181csum_none:
182 skb->ip_summed = CHECKSUM_NONE;
183 rq->stats.csum_none++;
184}
185
e586b3b0
AV
186static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
187 struct mlx5e_rq *rq,
188 struct sk_buff *skb)
189{
190 struct net_device *netdev = rq->netdev;
191 u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
192 int lro_num_seg;
193
194 skb_put(skb, cqe_bcnt);
195
196 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
197 if (lro_num_seg > 1) {
198 mlx5e_lro_update_hdr(skb, cqe);
d9a40271 199 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
e586b3b0
AV
200 rq->stats.lro_packets++;
201 rq->stats.lro_bytes += cqe_bcnt;
202 }
203
bbceefce 204 mlx5e_handle_csum(netdev, cqe, rq, skb);
e586b3b0
AV
205
206 skb->protocol = eth_type_trans(skb, netdev);
207
208 skb_record_rx_queue(skb, rq->ix);
209
210 if (likely(netdev->features & NETIF_F_RXHASH))
211 mlx5e_skb_set_hash(cqe, skb);
212
213 if (cqe_has_vlan(cqe))
214 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
215 be16_to_cpu(cqe->vlan_info));
216}
217
218bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
219{
e3391054 220 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
e586b3b0
AV
221 int i;
222
223 /* avoid accessing cq (dma coherent memory) if not needed */
224 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
225 return false;
226
227 for (i = 0; i < budget; i++) {
228 struct mlx5e_rx_wqe *wqe;
229 struct mlx5_cqe64 *cqe;
230 struct sk_buff *skb;
231 __be16 wqe_counter_be;
232 u16 wqe_counter;
233
234 cqe = mlx5e_get_cqe(cq);
235 if (!cqe)
236 break;
237
a1f5a1a8
AS
238 mlx5_cqwq_pop(&cq->wq);
239
e586b3b0
AV
240 wqe_counter_be = cqe->wqe_counter;
241 wqe_counter = be16_to_cpu(wqe_counter_be);
242 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
243 skb = rq->skb[wqe_counter];
99611ba1 244 prefetch(skb->data);
e586b3b0
AV
245 rq->skb[wqe_counter] = NULL;
246
247 dma_unmap_single(rq->pdev,
248 *((dma_addr_t *)skb->cb),
fc11fbf9 249 rq->wqe_sz,
e586b3b0
AV
250 DMA_FROM_DEVICE);
251
252 if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
253 rq->stats.wqe_err++;
254 dev_kfree_skb(skb);
255 goto wq_ll_pop;
256 }
257
258 mlx5e_build_rx_skb(cqe, rq, skb);
259 rq->stats.packets++;
260 napi_gro_receive(cq->napi, skb);
261
262wq_ll_pop:
263 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
264 &wqe->next.next_wqe_index);
265 }
266
267 mlx5_cqwq_update_db_record(&cq->wq);
268
269 /* ensure cq space is freed before enabling more cqes */
270 wmb();
271
272 if (i == budget) {
273 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
274 return true;
275 }
276
277 return false;
278}