net: ptp: mark filter as __initdata
[linux-2.6-block.git] / drivers / net / hyperv / netvsc_drv.c
CommitLineData
fceaf24a 1/*
fceaf24a
HJ
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
adf8d3ff 14 * this program; if not, see <http://www.gnu.org/licenses/>.
fceaf24a
HJ
15 *
16 * Authors:
d0e94d17 17 * Haiyang Zhang <haiyangz@microsoft.com>
fceaf24a 18 * Hank Janssen <hjanssen@microsoft.com>
fceaf24a 19 */
eb335bc4
HJ
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
fceaf24a 22#include <linux/init.h>
9079ce69 23#include <linux/atomic.h>
fceaf24a
HJ
24#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
fceaf24a 27#include <linux/io.h>
fceaf24a
HJ
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
c802db11 33#include <linux/if_vlan.h>
fceaf24a 34#include <linux/in.h>
5a0e3ad6 35#include <linux/slab.h>
fceaf24a
HJ
36#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
3f335ea2 40
5ca7252a 41#include "hyperv_net.h"
fceaf24a 42
fceaf24a 43struct net_device_context {
02fafbc6 44 /* point back to our device context */
6bad88da 45 struct hv_device *device_ctx;
122a5f64 46 struct delayed_work dwork;
792df872 47 struct work_struct work;
fceaf24a
HJ
48};
49
fa85a6c2 50#define RING_SIZE_MIN 64
99c8da0f 51static int ring_size = 128;
450d7a4b
SH
52module_param(ring_size, int, S_IRUGO);
53MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
fceaf24a 54
d426b2e3
HZ
55static void do_set_multicast(struct work_struct *w)
56{
792df872
WM
57 struct net_device_context *ndevctx =
58 container_of(w, struct net_device_context, work);
d426b2e3
HZ
59 struct netvsc_device *nvdev;
60 struct rndis_device *rdev;
61
62 nvdev = hv_get_drvdata(ndevctx->device_ctx);
792df872
WM
63 if (nvdev == NULL || nvdev->ndev == NULL)
64 return;
d426b2e3
HZ
65
66 rdev = nvdev->extension;
67 if (rdev == NULL)
792df872 68 return;
d426b2e3 69
792df872 70 if (nvdev->ndev->flags & IFF_PROMISC)
d426b2e3
HZ
71 rndis_filter_set_packet_filter(rdev,
72 NDIS_PACKET_TYPE_PROMISCUOUS);
73 else
74 rndis_filter_set_packet_filter(rdev,
75 NDIS_PACKET_TYPE_BROADCAST |
76 NDIS_PACKET_TYPE_ALL_MULTICAST |
77 NDIS_PACKET_TYPE_DIRECTED);
d426b2e3
HZ
78}
79
4e9bfefa 80static void netvsc_set_multicast_list(struct net_device *net)
fceaf24a 81{
792df872 82 struct net_device_context *net_device_ctx = netdev_priv(net);
d426b2e3 83
792df872 84 schedule_work(&net_device_ctx->work);
fceaf24a
HJ
85}
86
fceaf24a
HJ
87static int netvsc_open(struct net_device *net)
88{
fceaf24a 89 struct net_device_context *net_device_ctx = netdev_priv(net);
6bad88da 90 struct hv_device *device_obj = net_device_ctx->device_ctx;
891de74d
HZ
91 struct netvsc_device *nvdev;
92 struct rndis_device *rdev;
02fafbc6 93 int ret = 0;
fceaf24a 94
891de74d
HZ
95 netif_carrier_off(net);
96
d515d0ff
HZ
97 /* Open up the device */
98 ret = rndis_filter_open(device_obj);
99 if (ret != 0) {
100 netdev_err(net, "unable to open device (ret %d).\n", ret);
101 return ret;
fceaf24a
HJ
102 }
103
5b54dac8 104 netif_tx_start_all_queues(net);
d515d0ff 105
891de74d
HZ
106 nvdev = hv_get_drvdata(device_obj);
107 rdev = nvdev->extension;
108 if (!rdev->link_state)
109 netif_carrier_on(net);
110
fceaf24a
HJ
111 return ret;
112}
113
fceaf24a
HJ
114static int netvsc_close(struct net_device *net)
115{
fceaf24a 116 struct net_device_context *net_device_ctx = netdev_priv(net);
6bad88da 117 struct hv_device *device_obj = net_device_ctx->device_ctx;
02fafbc6 118 int ret;
fceaf24a 119
0a282538 120 netif_tx_disable(net);
fceaf24a 121
792df872
WM
122 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
123 cancel_work_sync(&net_device_ctx->work);
9c26aa0d 124 ret = rndis_filter_close(device_obj);
fceaf24a 125 if (ret != 0)
eb335bc4 126 netdev_err(net, "unable to close device (ret %d).\n", ret);
fceaf24a 127
fceaf24a
HJ
128 return ret;
129}
130
8a00251a
KS
131static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
132 int pkt_type)
133{
134 struct rndis_packet *rndis_pkt;
135 struct rndis_per_packet_info *ppi;
136
137 rndis_pkt = &msg->msg.pkt;
138 rndis_pkt->data_offset += ppi_size;
139
140 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
141 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
142
143 ppi->size = ppi_size;
144 ppi->type = pkt_type;
145 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
146
147 rndis_pkt->per_pkt_info_len += ppi_size;
148
149 return ppi;
150}
151
5b54dac8
HZ
152union sub_key {
153 u64 k;
154 struct {
155 u8 pad[3];
156 u8 kb;
157 u32 ka;
158 };
159};
160
161/* Toeplitz hash function
162 * data: network byte order
163 * return: host byte order
164 */
165static u32 comp_hash(u8 *key, int klen, u8 *data, int dlen)
166{
167 union sub_key subk;
168 int k_next = 4;
169 u8 dt;
170 int i, j;
171 u32 ret = 0;
172
173 subk.k = 0;
174 subk.ka = ntohl(*(u32 *)key);
175
176 for (i = 0; i < dlen; i++) {
177 subk.kb = key[k_next];
178 k_next = (k_next + 1) % klen;
179 dt = data[i];
180 for (j = 0; j < 8; j++) {
181 if (dt & 0x80)
182 ret ^= subk.ka;
183 dt <<= 1;
184 subk.k <<= 1;
185 }
186 }
187
188 return ret;
189}
190
191static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
192{
193 struct iphdr *iphdr;
194 int data_len;
195 bool ret = false;
196
197 if (eth_hdr(skb)->h_proto != htons(ETH_P_IP))
198 return false;
199
200 iphdr = ip_hdr(skb);
201
202 if (iphdr->version == 4) {
203 if (iphdr->protocol == IPPROTO_TCP)
204 data_len = 12;
205 else
206 data_len = 8;
207 *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN,
208 (u8 *)&iphdr->saddr, data_len);
209 ret = true;
210 }
211
212 return ret;
213}
214
215static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
216 void *accel_priv, select_queue_fallback_t fallback)
217{
218 struct net_device_context *net_device_ctx = netdev_priv(ndev);
219 struct hv_device *hdev = net_device_ctx->device_ctx;
220 struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
221 u32 hash;
222 u16 q_idx = 0;
223
224 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
225 return 0;
226
227 if (netvsc_set_hash(&hash, skb))
228 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
229 ndev->real_num_tx_queues;
230
231 return q_idx;
232}
233
fceaf24a
HJ
234static void netvsc_xmit_completion(void *context)
235{
4193d4f4 236 struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
02fafbc6 237 struct sk_buff *skb = (struct sk_buff *)
893f6627 238 (unsigned long)packet->send_completion_tid;
c25aaf81 239 u32 index = packet->send_buf_index;
fceaf24a 240
fceaf24a
HJ
241 kfree(packet);
242
c25aaf81 243 if (skb && (index == NETVSC_INVALID_INDEX))
b220f5f9 244 dev_kfree_skb_any(skb);
fceaf24a
HJ
245}
246
54a7357f
KS
247static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
248 struct hv_page_buffer *pb)
249{
250 int j = 0;
251
252 /* Deal with compund pages by ignoring unused part
253 * of the page.
254 */
255 page += (offset >> PAGE_SHIFT);
256 offset &= ~PAGE_MASK;
257
258 while (len > 0) {
259 unsigned long bytes;
260
261 bytes = PAGE_SIZE - offset;
262 if (bytes > len)
263 bytes = len;
264 pb[j].pfn = page_to_pfn(page);
265 pb[j].offset = offset;
266 pb[j].len = bytes;
267
268 offset += bytes;
269 len -= bytes;
270
271 if (offset == PAGE_SIZE && len) {
272 page++;
273 offset = 0;
274 j++;
275 }
276 }
277
278 return j + 1;
279}
280
8a00251a
KS
281static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
282 struct hv_page_buffer *pb)
54a7357f
KS
283{
284 u32 slots_used = 0;
285 char *data = skb->data;
286 int frags = skb_shinfo(skb)->nr_frags;
287 int i;
288
289 /* The packet is laid out thus:
290 * 1. hdr
291 * 2. skb linear data
292 * 3. skb fragment data
293 */
294 if (hdr != NULL)
295 slots_used += fill_pg_buf(virt_to_page(hdr),
296 offset_in_page(hdr),
297 len, &pb[slots_used]);
298
299 slots_used += fill_pg_buf(virt_to_page(data),
300 offset_in_page(data),
301 skb_headlen(skb), &pb[slots_used]);
302
303 for (i = 0; i < frags; i++) {
304 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
305
306 slots_used += fill_pg_buf(skb_frag_page(frag),
307 frag->page_offset,
308 skb_frag_size(frag), &pb[slots_used]);
309 }
8a00251a 310 return slots_used;
54a7357f
KS
311}
312
313static int count_skb_frag_slots(struct sk_buff *skb)
314{
315 int i, frags = skb_shinfo(skb)->nr_frags;
316 int pages = 0;
317
318 for (i = 0; i < frags; i++) {
319 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
320 unsigned long size = skb_frag_size(frag);
321 unsigned long offset = frag->page_offset;
322
323 /* Skip unused frames from start of page */
324 offset &= ~PAGE_MASK;
325 pages += PFN_UP(offset + size);
326 }
327 return pages;
328}
329
330static int netvsc_get_slots(struct sk_buff *skb)
331{
332 char *data = skb->data;
333 unsigned int offset = offset_in_page(data);
334 unsigned int len = skb_headlen(skb);
335 int slots;
336 int frag_slots;
337
338 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
339 frag_slots = count_skb_frag_slots(skb);
340 return slots + frag_slots;
341}
342
08cd04bf
KS
343static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
344{
345 u32 ret_val = TRANSPORT_INFO_NOT_IP;
346
347 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
348 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
349 goto not_ip;
350 }
351
352 *trans_off = skb_transport_offset(skb);
353
354 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
355 struct iphdr *iphdr = ip_hdr(skb);
356
357 if (iphdr->protocol == IPPROTO_TCP)
358 ret_val = TRANSPORT_INFO_IPV4_TCP;
359 else if (iphdr->protocol == IPPROTO_UDP)
360 ret_val = TRANSPORT_INFO_IPV4_UDP;
361 } else {
362 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
363 ret_val = TRANSPORT_INFO_IPV6_TCP;
364 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
365 ret_val = TRANSPORT_INFO_IPV6_UDP;
366 }
367
368not_ip:
369 return ret_val;
370}
371
02fafbc6 372static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
fceaf24a 373{
fceaf24a 374 struct net_device_context *net_device_ctx = netdev_priv(net);
4193d4f4 375 struct hv_netvsc_packet *packet;
02fafbc6 376 int ret;
8a00251a
KS
377 unsigned int num_data_pgs;
378 struct rndis_message *rndis_msg;
379 struct rndis_packet *rndis_pkt;
380 u32 rndis_msg_size;
381 bool isvlan;
382 struct rndis_per_packet_info *ppi;
08cd04bf 383 struct ndis_tcp_ip_checksum_info *csum_info;
77bf5487 384 struct ndis_tcp_lso_info *lso_info;
08cd04bf
KS
385 int hdr_offset;
386 u32 net_trans_info;
387
fceaf24a 388
54a7357f
KS
389 /* We will atmost need two pages to describe the rndis
390 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
391 * of pages in a single packet.
392 */
8a00251a
KS
393 num_data_pgs = netvsc_get_slots(skb) + 2;
394 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
54a7357f
KS
395 netdev_err(net, "Packet too big: %u\n", skb->len);
396 dev_kfree_skb(skb);
397 net->stats.tx_dropped++;
398 return NETDEV_TX_OK;
399 }
fceaf24a 400
454f18a9 401 /* Allocate a netvsc packet based on # of frags. */
02fafbc6 402 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
8a00251a 403 (num_data_pgs * sizeof(struct hv_page_buffer)) +
86eedacc 404 sizeof(struct rndis_message) +
4276372f
KS
405 NDIS_VLAN_PPI_SIZE +
406 NDIS_CSUM_PPI_SIZE +
407 NDIS_LSO_PPI_SIZE, GFP_ATOMIC);
02fafbc6 408 if (!packet) {
bf769375 409 /* out of memory, drop packet */
eb335bc4 410 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
b220f5f9
SH
411
412 dev_kfree_skb(skb);
413 net->stats.tx_dropped++;
bb6d5e76 414 return NETDEV_TX_OK;
fceaf24a
HJ
415 }
416
1f5f3a75
HZ
417 packet->vlan_tci = skb->vlan_tci;
418
5b54dac8
HZ
419 packet->q_idx = skb_get_queue_mapping(skb);
420
8a00251a 421 packet->is_data_pkt = true;
4d447c9a 422 packet->total_data_buflen = skb->len;
fceaf24a 423
8a00251a
KS
424 packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
425 sizeof(struct hv_netvsc_packet) +
426 (num_data_pgs * sizeof(struct hv_page_buffer)));
fceaf24a 427
454f18a9 428 /* Set the completion routine */
893f6627
HZ
429 packet->send_completion = netvsc_xmit_completion;
430 packet->send_completion_ctx = packet;
431 packet->send_completion_tid = (unsigned long)skb;
fceaf24a 432
8a00251a
KS
433 isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
434
435 /* Add the rndis header */
436 rndis_msg = packet->rndis_msg;
437 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
438 rndis_msg->msg_len = packet->total_data_buflen;
439 rndis_pkt = &rndis_msg->msg.pkt;
440 rndis_pkt->data_offset = sizeof(struct rndis_packet);
441 rndis_pkt->data_len = packet->total_data_buflen;
442 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
443
444 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
445
446 if (isvlan) {
447 struct ndis_pkt_8021q_info *vlan;
448
449 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
450 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
451 IEEE_8021Q_INFO);
452 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
453 ppi->ppi_offset);
454 vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
455 vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
456 VLAN_PRIO_SHIFT;
457 }
458
08cd04bf
KS
459 net_trans_info = get_net_transport_info(skb, &hdr_offset);
460 if (net_trans_info == TRANSPORT_INFO_NOT_IP)
461 goto do_send;
462
463 /*
464 * Setup the sendside checksum offload only if this is not a
465 * GSO packet.
466 */
467 if (skb_is_gso(skb))
77bf5487 468 goto do_lso;
08cd04bf 469
22041fb0
KS
470 if ((skb->ip_summed == CHECKSUM_NONE) ||
471 (skb->ip_summed == CHECKSUM_UNNECESSARY))
472 goto do_send;
473
08cd04bf
KS
474 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
475 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
476 TCPIP_CHKSUM_PKTINFO);
477
478 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
479 ppi->ppi_offset);
480
481 if (net_trans_info & (INFO_IPV4 << 16))
482 csum_info->transmit.is_ipv4 = 1;
483 else
484 csum_info->transmit.is_ipv6 = 1;
485
486 if (net_trans_info & INFO_TCP) {
487 csum_info->transmit.tcp_checksum = 1;
488 csum_info->transmit.tcp_header_offset = hdr_offset;
489 } else if (net_trans_info & INFO_UDP) {
af9893a3
KS
490 /* UDP checksum offload is not supported on ws2008r2.
491 * Furthermore, on ws2012 and ws2012r2, there are some
492 * issues with udp checksum offload from Linux guests.
493 * (these are host issues).
494 * For now compute the checksum here.
495 */
496 struct udphdr *uh;
497 u16 udp_len;
498
499 ret = skb_cow_head(skb, 0);
500 if (ret)
501 goto drop;
502
503 uh = udp_hdr(skb);
504 udp_len = ntohs(uh->len);
505 uh->check = 0;
506 uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
507 ip_hdr(skb)->daddr,
508 udp_len, IPPROTO_UDP,
509 csum_partial(uh, udp_len, 0));
510 if (uh->check == 0)
511 uh->check = CSUM_MANGLED_0;
512
513 csum_info->transmit.udp_checksum = 0;
08cd04bf 514 }
77bf5487
KS
515 goto do_send;
516
517do_lso:
518 rndis_msg_size += NDIS_LSO_PPI_SIZE;
519 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
520 TCP_LARGESEND_PKTINFO);
521
522 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
523 ppi->ppi_offset);
524
525 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
526 if (net_trans_info & (INFO_IPV4 << 16)) {
527 lso_info->lso_v2_transmit.ip_version =
528 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
529 ip_hdr(skb)->tot_len = 0;
530 ip_hdr(skb)->check = 0;
531 tcp_hdr(skb)->check =
532 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
533 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
534 } else {
535 lso_info->lso_v2_transmit.ip_version =
536 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
537 ipv6_hdr(skb)->payload_len = 0;
538 tcp_hdr(skb)->check =
539 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
540 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
541 }
542 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
543 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
08cd04bf
KS
544
545do_send:
8a00251a
KS
546 /* Start filling in the page buffers with the rndis hdr */
547 rndis_msg->msg_len += rndis_msg_size;
548 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
549 skb, &packet->page_buf[0]);
550
551 ret = netvsc_send(net_device_ctx->device_ctx, packet);
552
af9893a3 553drop:
02fafbc6 554 if (ret == 0) {
b852fdce
SH
555 net->stats.tx_bytes += skb->len;
556 net->stats.tx_packets++;
b220f5f9 557 } else {
8a5f9edc 558 kfree(packet);
33be96e4
HZ
559 if (ret != -EAGAIN) {
560 dev_kfree_skb_any(skb);
561 net->stats.tx_dropped++;
562 }
fceaf24a
HJ
563 }
564
33be96e4 565 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
fceaf24a
HJ
566}
567
3e189519 568/*
02fafbc6
GKH
569 * netvsc_linkstatus_callback - Link up/down notification
570 */
90ef117a 571void netvsc_linkstatus_callback(struct hv_device *device_obj,
02fafbc6 572 unsigned int status)
fceaf24a 573{
2ddd5e5f 574 struct net_device *net;
c996edcf 575 struct net_device_context *ndev_ctx;
2ddd5e5f 576 struct netvsc_device *net_device;
891de74d 577 struct rndis_device *rdev;
2ddd5e5f
S
578
579 net_device = hv_get_drvdata(device_obj);
891de74d
HZ
580 rdev = net_device->extension;
581
582 rdev->link_state = status != 1;
583
2ddd5e5f 584 net = net_device->ndev;
fceaf24a 585
891de74d 586 if (!net || net->reg_state != NETREG_REGISTERED)
fceaf24a 587 return;
fceaf24a 588
891de74d 589 ndev_ctx = netdev_priv(net);
02fafbc6 590 if (status == 1) {
c4b6a2ea 591 schedule_delayed_work(&ndev_ctx->dwork, 0);
122a5f64 592 schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
02fafbc6 593 } else {
891de74d 594 schedule_delayed_work(&ndev_ctx->dwork, 0);
fceaf24a 595 }
fceaf24a
HJ
596}
597
3e189519
HJ
598/*
599 * netvsc_recv_callback - Callback when we receive a packet from the
600 * "wire" on the specified device.
02fafbc6 601 */
f79adf8f 602int netvsc_recv_callback(struct hv_device *device_obj,
e3d605ed
KS
603 struct hv_netvsc_packet *packet,
604 struct ndis_tcp_ip_checksum_info *csum_info)
fceaf24a 605{
6f4c4446 606 struct net_device *net;
fceaf24a 607 struct sk_buff *skb;
fceaf24a 608
6f4c4446 609 net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
a68f9614 610 if (!net || net->reg_state != NETREG_REGISTERED) {
63f6921d 611 packet->status = NVSP_STAT_FAIL;
fceaf24a
HJ
612 return 0;
613 }
614
9495c282 615 /* Allocate a skb - TODO direct I/O to pages? */
72a2f5bd 616 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
9495c282
SH
617 if (unlikely(!skb)) {
618 ++net->stats.rx_dropped;
63f6921d 619 packet->status = NVSP_STAT_FAIL;
9495c282
SH
620 return 0;
621 }
fceaf24a 622
02fafbc6
GKH
623 /*
624 * Copy to skb. This copy is needed here since the memory pointed by
625 * hv_netvsc_packet cannot be deallocated
626 */
45326342
HZ
627 memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
628 packet->total_data_buflen);
fceaf24a
HJ
629
630 skb->protocol = eth_type_trans(skb, net);
e3d605ed
KS
631 if (csum_info) {
632 /* We only look at the IP checksum here.
633 * Should we be dropping the packet if checksum
634 * failed? How do we deal with other checksums - TCP/UDP?
635 */
636 if (csum_info->receive.ip_checksum_succeeded)
637 skb->ip_summed = CHECKSUM_UNNECESSARY;
638 else
639 skb->ip_summed = CHECKSUM_NONE;
640 }
641
93725cbd
HZ
642 if (packet->vlan_tci & VLAN_TAG_PRESENT)
643 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
644 packet->vlan_tci);
fceaf24a 645
4baab261 646 skb_record_rx_queue(skb, packet->channel->
e565e803 647 offermsg.offer.sub_channel_index);
5b54dac8 648
9495c282 649 net->stats.rx_packets++;
48c38839 650 net->stats.rx_bytes += packet->total_data_buflen;
9495c282 651
02fafbc6
GKH
652 /*
653 * Pass the skb back up. Network stack will deallocate the skb when it
9495c282
SH
654 * is done.
655 * TODO - use NAPI?
02fafbc6 656 */
9495c282 657 netif_rx(skb);
fceaf24a 658
fceaf24a
HJ
659 return 0;
660}
661
f82f4ad7
SH
662static void netvsc_get_drvinfo(struct net_device *net,
663 struct ethtool_drvinfo *info)
664{
7826d43f 665 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
7826d43f 666 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
f82f4ad7
SH
667}
668
4d447c9a
HZ
669static int netvsc_change_mtu(struct net_device *ndev, int mtu)
670{
671 struct net_device_context *ndevctx = netdev_priv(ndev);
672 struct hv_device *hdev = ndevctx->device_ctx;
673 struct netvsc_device *nvdev = hv_get_drvdata(hdev);
674 struct netvsc_device_info device_info;
675 int limit = ETH_DATA_LEN;
676
677 if (nvdev == NULL || nvdev->destroy)
678 return -ENODEV;
679
a1eabb01 680 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
4d447c9a
HZ
681 limit = NETVSC_MTU;
682
683 if (mtu < 68 || mtu > limit)
684 return -EINVAL;
685
686 nvdev->start_remove = true;
792df872 687 cancel_work_sync(&ndevctx->work);
0a282538 688 netif_tx_disable(ndev);
4d447c9a
HZ
689 rndis_filter_device_remove(hdev);
690
691 ndev->mtu = mtu;
692
693 ndevctx->device_ctx = hdev;
694 hv_set_drvdata(hdev, ndev);
695 device_info.ring_size = ring_size;
696 rndis_filter_device_add(hdev, &device_info);
5b54dac8 697 netif_tx_wake_all_queues(ndev);
4d447c9a
HZ
698
699 return 0;
700}
701
1ce09e89
HZ
702
703static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
704{
705 struct net_device_context *ndevctx = netdev_priv(ndev);
706 struct hv_device *hdev = ndevctx->device_ctx;
707 struct sockaddr *addr = p;
9a4c831e 708 char save_adr[ETH_ALEN];
1ce09e89
HZ
709 unsigned char save_aatype;
710 int err;
711
712 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
713 save_aatype = ndev->addr_assign_type;
714
715 err = eth_mac_addr(ndev, p);
716 if (err != 0)
717 return err;
718
719 err = rndis_filter_set_device_mac(hdev, addr->sa_data);
720 if (err != 0) {
721 /* roll back to saved MAC */
722 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
723 ndev->addr_assign_type = save_aatype;
724 }
725
726 return err;
727}
728
729
f82f4ad7
SH
730static const struct ethtool_ops ethtool_ops = {
731 .get_drvinfo = netvsc_get_drvinfo,
f82f4ad7
SH
732 .get_link = ethtool_op_get_link,
733};
734
df2fff28
GKH
735static const struct net_device_ops device_ops = {
736 .ndo_open = netvsc_open,
737 .ndo_stop = netvsc_close,
738 .ndo_start_xmit = netvsc_start_xmit,
afc4b13d 739 .ndo_set_rx_mode = netvsc_set_multicast_list,
4d447c9a 740 .ndo_change_mtu = netvsc_change_mtu,
b681b588 741 .ndo_validate_addr = eth_validate_addr,
1ce09e89 742 .ndo_set_mac_address = netvsc_set_mac_addr,
5b54dac8 743 .ndo_select_queue = netvsc_select_queue,
df2fff28
GKH
744};
745
c996edcf
HZ
746/*
747 * Send GARP packet to network peers after migrations.
748 * After Quick Migration, the network is not immediately operational in the
749 * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
122a5f64 750 * another netif_notify_peers() into a delayed work, otherwise GARP packet
c996edcf 751 * will not be sent after quick migration, and cause network disconnection.
891de74d 752 * Also, we update the carrier status here.
c996edcf 753 */
891de74d 754static void netvsc_link_change(struct work_struct *w)
c996edcf
HZ
755{
756 struct net_device_context *ndev_ctx;
757 struct net_device *net;
2ddd5e5f 758 struct netvsc_device *net_device;
891de74d
HZ
759 struct rndis_device *rdev;
760 bool notify;
761
762 rtnl_lock();
c996edcf 763
122a5f64 764 ndev_ctx = container_of(w, struct net_device_context, dwork.work);
2ddd5e5f 765 net_device = hv_get_drvdata(ndev_ctx->device_ctx);
891de74d 766 rdev = net_device->extension;
2ddd5e5f 767 net = net_device->ndev;
891de74d
HZ
768
769 if (rdev->link_state) {
770 netif_carrier_off(net);
771 notify = false;
772 } else {
773 netif_carrier_on(net);
774 notify = true;
775 }
776
777 rtnl_unlock();
778
779 if (notify)
780 netdev_notify_peers(net);
c996edcf
HZ
781}
782
783
84946899
S
784static int netvsc_probe(struct hv_device *dev,
785 const struct hv_vmbus_device_id *dev_id)
df2fff28 786{
df2fff28
GKH
787 struct net_device *net = NULL;
788 struct net_device_context *net_device_ctx;
789 struct netvsc_device_info device_info;
5b54dac8 790 struct netvsc_device *nvdev;
df2fff28
GKH
791 int ret;
792
5b54dac8
HZ
793 net = alloc_etherdev_mq(sizeof(struct net_device_context),
794 num_online_cpus());
df2fff28 795 if (!net)
51a805d0 796 return -ENOMEM;
df2fff28 797
1b07da51
HZ
798 netif_carrier_off(net);
799
df2fff28 800 net_device_ctx = netdev_priv(net);
9efd21e1 801 net_device_ctx->device_ctx = dev;
2ddd5e5f 802 hv_set_drvdata(dev, net);
891de74d 803 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
792df872 804 INIT_WORK(&net_device_ctx->work, do_set_multicast);
df2fff28 805
df2fff28
GKH
806 net->netdev_ops = &device_ops;
807
77bf5487
KS
808 net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
809 NETIF_F_TSO;
08cd04bf 810 net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
77bf5487 811 NETIF_F_IP_CSUM | NETIF_F_TSO;
6048718d 812
f82f4ad7 813 SET_ETHTOOL_OPS(net, &ethtool_ops);
9efd21e1 814 SET_NETDEV_DEV(net, &dev->device);
df2fff28 815
692e084e
HZ
816 /* Notify the netvsc driver of the new device */
817 device_info.ring_size = ring_size;
818 ret = rndis_filter_device_add(dev, &device_info);
819 if (ret != 0) {
820 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
df2fff28 821 free_netdev(net);
2ddd5e5f 822 hv_set_drvdata(dev, NULL);
692e084e 823 return ret;
df2fff28 824 }
692e084e
HZ
825 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
826
5b54dac8
HZ
827 nvdev = hv_get_drvdata(dev);
828 netif_set_real_num_tx_queues(net, nvdev->num_chn);
829 netif_set_real_num_rx_queues(net, nvdev->num_chn);
5b54dac8 830
a68f9614
HZ
831 ret = register_netdev(net);
832 if (ret != 0) {
833 pr_err("Unable to register netdev.\n");
834 rndis_filter_device_remove(dev);
835 free_netdev(net);
1b07da51
HZ
836 } else {
837 schedule_delayed_work(&net_device_ctx->dwork, 0);
a68f9614
HZ
838 }
839
df2fff28
GKH
840 return ret;
841}
842
415b023a 843static int netvsc_remove(struct hv_device *dev)
df2fff28 844{
2ddd5e5f 845 struct net_device *net;
122a5f64 846 struct net_device_context *ndev_ctx;
2ddd5e5f
S
847 struct netvsc_device *net_device;
848
849 net_device = hv_get_drvdata(dev);
850 net = net_device->ndev;
df2fff28 851
df2fff28 852 if (net == NULL) {
415b023a 853 dev_err(&dev->device, "No net device to remove\n");
df2fff28
GKH
854 return 0;
855 }
856
4d447c9a
HZ
857 net_device->start_remove = true;
858
122a5f64
HZ
859 ndev_ctx = netdev_priv(net);
860 cancel_delayed_work_sync(&ndev_ctx->dwork);
792df872 861 cancel_work_sync(&ndev_ctx->work);
122a5f64 862
df2fff28 863 /* Stop outbound asap */
0a282538 864 netif_tx_disable(net);
df2fff28
GKH
865
866 unregister_netdev(net);
867
868 /*
869 * Call to the vsc driver to let it know that the device is being
870 * removed
871 */
df06bcff 872 rndis_filter_device_remove(dev);
df2fff28
GKH
873
874 free_netdev(net);
df06bcff 875 return 0;
df2fff28
GKH
876}
877
345c4cc3 878static const struct hv_vmbus_device_id id_table[] = {
c45cf2d4 879 /* Network guid */
8f505944 880 { HV_NIC_GUID, },
c45cf2d4 881 { },
345c4cc3
S
882};
883
884MODULE_DEVICE_TABLE(vmbus, id_table);
885
f1542a66 886/* The one and only one */
fde0ef9b 887static struct hv_driver netvsc_drv = {
d31b20fc 888 .name = KBUILD_MODNAME,
345c4cc3 889 .id_table = id_table,
fde0ef9b
S
890 .probe = netvsc_probe,
891 .remove = netvsc_remove,
d4890970 892};
f1542a66 893
a9869c94 894static void __exit netvsc_drv_exit(void)
fceaf24a 895{
768fa219 896 vmbus_driver_unregister(&netvsc_drv);
fceaf24a
HJ
897}
898
1fde28cf 899static int __init netvsc_drv_init(void)
df2fff28 900{
fa85a6c2
HZ
901 if (ring_size < RING_SIZE_MIN) {
902 ring_size = RING_SIZE_MIN;
903 pr_info("Increased ring_size to %d (min allowed)\n",
904 ring_size);
905 }
768fa219 906 return vmbus_driver_register(&netvsc_drv);
df2fff28
GKH
907}
908
26c14cc1 909MODULE_LICENSE("GPL");
7880fc54 910MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
fceaf24a 911
1fde28cf 912module_init(netvsc_drv_init);
a9869c94 913module_exit(netvsc_drv_exit);