Commit | Line | Data |
---|---|---|
9952f691 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
fceaf24a | 2 | /* |
fceaf24a HJ |
3 | * Copyright (c) 2009, Microsoft Corporation. |
4 | * | |
fceaf24a | 5 | * Authors: |
d0e94d17 | 6 | * Haiyang Zhang <haiyangz@microsoft.com> |
fceaf24a | 7 | * Hank Janssen <hjanssen@microsoft.com> |
fceaf24a | 8 | */ |
eb335bc4 HJ |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | ||
5654e932 | 11 | #include <linux/kernel.h> |
0c3b7b2f S |
12 | #include <linux/sched.h> |
13 | #include <linux/wait.h> | |
0ffa63b0 | 14 | #include <linux/mm.h> |
b4362c9c | 15 | #include <linux/delay.h> |
21a80820 | 16 | #include <linux/io.h> |
5a0e3ad6 | 17 | #include <linux/slab.h> |
d9871158 | 18 | #include <linux/netdevice.h> |
f157e78d | 19 | #include <linux/if_ether.h> |
d6472302 | 20 | #include <linux/vmalloc.h> |
9749fed5 | 21 | #include <linux/rtnetlink.h> |
43bf99ce | 22 | #include <linux/prefetch.h> |
1cb9d3b6 | 23 | #include <linux/filter.h> |
9749fed5 | 24 | |
c25aaf81 | 25 | #include <asm/sync_bitops.h> |
96854bbd | 26 | #include <asm/mshyperv.h> |
3f335ea2 | 27 | |
5ca7252a | 28 | #include "hyperv_net.h" |
ec966381 | 29 | #include "netvsc_trace.h" |
fceaf24a | 30 | |
84bf9cef KS |
31 | /* |
32 | * Switch the data path from the synthetic interface to the VF | |
33 | * interface. | |
34 | */ | |
d0922bf7 | 35 | int netvsc_switch_datapath(struct net_device *ndev, bool vf) |
84bf9cef | 36 | { |
3d541ac5 VK |
37 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
38 | struct hv_device *dev = net_device_ctx->device_ctx; | |
79e8cbe7 | 39 | struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev); |
0a1275ca | 40 | struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; |
d0922bf7 | 41 | int ret, retry = 0; |
84bf9cef | 42 | |
8b31f8c9 LL |
43 | /* Block sending traffic to VF if it's about to be gone */ |
44 | if (!vf) | |
45 | net_device_ctx->data_path_is_vf = vf; | |
46 | ||
84bf9cef KS |
47 | memset(init_pkt, 0, sizeof(struct nvsp_message)); |
48 | init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH; | |
49 | if (vf) | |
50 | init_pkt->msg.v4_msg.active_dp.active_datapath = | |
51 | NVSP_DATAPATH_VF; | |
52 | else | |
53 | init_pkt->msg.v4_msg.active_dp.active_datapath = | |
54 | NVSP_DATAPATH_SYNTHETIC; | |
55 | ||
d0922bf7 | 56 | again: |
ec966381 SH |
57 | trace_nvsp_send(ndev, init_pkt); |
58 | ||
d0922bf7 | 59 | ret = vmbus_sendpacket(dev->channel, init_pkt, |
84bf9cef | 60 | sizeof(struct nvsp_message), |
d0922bf7 | 61 | (unsigned long)init_pkt, VM_PKT_DATA_INBAND, |
8b31f8c9 | 62 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
d0922bf7 HZ |
63 | |
64 | /* If failed to switch to/from VF, let data_path_is_vf stay false, | |
65 | * so we use synthetic path to send data. | |
66 | */ | |
67 | if (ret) { | |
68 | if (ret != -EAGAIN) { | |
69 | netdev_err(ndev, | |
70 | "Unable to send sw datapath msg, err: %d\n", | |
71 | ret); | |
72 | return ret; | |
73 | } | |
74 | ||
75 | if (retry++ < RETRY_MAX) { | |
76 | usleep_range(RETRY_US_LO, RETRY_US_HI); | |
77 | goto again; | |
78 | } else { | |
79 | netdev_err( | |
80 | ndev, | |
81 | "Retry failed to send sw datapath msg, err: %d\n", | |
82 | ret); | |
83 | return ret; | |
84 | } | |
85 | } | |
86 | ||
8b31f8c9 LL |
87 | wait_for_completion(&nv_dev->channel_init_wait); |
88 | net_device_ctx->data_path_is_vf = vf; | |
d0922bf7 HZ |
89 | |
90 | return 0; | |
84bf9cef KS |
91 | } |
92 | ||
3ffe64f1 SH |
93 | /* Worker to setup sub channels on initial setup |
94 | * Initial hotplug event occurs in softirq context | |
95 | * and can't wait for channels. | |
96 | */ | |
97 | static void netvsc_subchan_work(struct work_struct *w) | |
98 | { | |
99 | struct netvsc_device *nvdev = | |
100 | container_of(w, struct netvsc_device, subchan_work); | |
101 | struct rndis_device *rdev; | |
102 | int i, ret; | |
103 | ||
104 | /* Avoid deadlock with device removal already under RTNL */ | |
105 | if (!rtnl_trylock()) { | |
106 | schedule_work(w); | |
107 | return; | |
108 | } | |
109 | ||
110 | rdev = nvdev->extension; | |
111 | if (rdev) { | |
17d91256 | 112 | ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL); |
3ffe64f1 SH |
113 | if (ret == 0) { |
114 | netif_device_attach(rdev->ndev); | |
115 | } else { | |
116 | /* fallback to only primary channel */ | |
117 | for (i = 1; i < nvdev->num_chn; i++) | |
118 | netif_napi_del(&nvdev->chan_table[i].napi); | |
119 | ||
120 | nvdev->max_chn = 1; | |
121 | nvdev->num_chn = 1; | |
122 | } | |
123 | } | |
124 | ||
125 | rtnl_unlock(); | |
126 | } | |
127 | ||
88098834 | 128 | static struct netvsc_device *alloc_net_device(void) |
fceaf24a | 129 | { |
85799a37 | 130 | struct netvsc_device *net_device; |
fceaf24a | 131 | |
85799a37 HZ |
132 | net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); |
133 | if (!net_device) | |
fceaf24a HJ |
134 | return NULL; |
135 | ||
dc5cd894 | 136 | init_waitqueue_head(&net_device->wait_drain); |
c38b9c71 | 137 | net_device->destroy = false; |
f6f13c12 | 138 | net_device->tx_disable = true; |
0da6edbd | 139 | |
7c3877f2 HZ |
140 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
141 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | |
8b532797 | 142 | |
fd612602 | 143 | init_completion(&net_device->channel_init_wait); |
732e4985 | 144 | init_waitqueue_head(&net_device->subchan_open); |
3ffe64f1 | 145 | INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); |
7c3877f2 | 146 | |
85799a37 | 147 | return net_device; |
fceaf24a HJ |
148 | } |
149 | ||
545a8e79 | 150 | static void free_netvsc_device(struct rcu_head *head) |
f90251c8 | 151 | { |
545a8e79 | 152 | struct netvsc_device *nvdev |
153 | = container_of(head, struct netvsc_device, rcu); | |
c0b558e5 HZ |
154 | int i; |
155 | ||
02400fce | 156 | kfree(nvdev->extension); |
25727aae MK |
157 | vfree(nvdev->recv_buf); |
158 | vfree(nvdev->send_buf); | |
e9268a94 | 159 | bitmap_free(nvdev->send_section_map); |
02400fce | 160 | |
351e1581 HZ |
161 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) { |
162 | xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq); | |
0ba35fe9 | 163 | kfree(nvdev->chan_table[i].recv_buf); |
7426b1a5 | 164 | vfree(nvdev->chan_table[i].mrc.slots); |
351e1581 | 165 | } |
c0b558e5 | 166 | |
f90251c8 HZ |
167 | kfree(nvdev); |
168 | } | |
169 | ||
545a8e79 | 170 | static void free_netvsc_device_rcu(struct netvsc_device *nvdev) |
171 | { | |
172 | call_rcu(&nvdev->rcu, free_netvsc_device); | |
173 | } | |
fceaf24a | 174 | |
7992894c | 175 | static void netvsc_revoke_recv_buf(struct hv_device *device, |
3f076eff MG |
176 | struct netvsc_device *net_device, |
177 | struct net_device *ndev) | |
ec91cd09 | 178 | { |
7992894c | 179 | struct nvsp_message *revoke_packet; |
7a2a0a84 | 180 | int ret; |
ec91cd09 HZ |
181 | |
182 | /* | |
183 | * If we got a section count, it means we received a | |
184 | * SendReceiveBufferComplete msg (ie sent | |
185 | * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need | |
186 | * to send a revoke msg here | |
187 | */ | |
188 | if (net_device->recv_section_cnt) { | |
189 | /* Send the revoke receive buffer */ | |
190 | revoke_packet = &net_device->revoke_packet; | |
191 | memset(revoke_packet, 0, sizeof(struct nvsp_message)); | |
192 | ||
193 | revoke_packet->hdr.msg_type = | |
194 | NVSP_MSG1_TYPE_REVOKE_RECV_BUF; | |
195 | revoke_packet->msg.v1_msg. | |
196 | revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; | |
197 | ||
ec966381 SH |
198 | trace_nvsp_send(ndev, revoke_packet); |
199 | ||
3d541ac5 | 200 | ret = vmbus_sendpacket(device->channel, |
ec91cd09 HZ |
201 | revoke_packet, |
202 | sizeof(struct nvsp_message), | |
4d18fcc9 | 203 | VMBUS_RQST_ID_NO_RESPONSE, |
ec91cd09 | 204 | VM_PKT_DATA_INBAND, 0); |
73e64fa4 S |
205 | /* If the failure is because the channel is rescinded; |
206 | * ignore the failure since we cannot send on a rescinded | |
207 | * channel. This would allow us to properly cleanup | |
208 | * even when the channel is rescinded. | |
209 | */ | |
210 | if (device->channel->rescind) | |
211 | ret = 0; | |
ec91cd09 HZ |
212 | /* |
213 | * If we failed here, we might as well return and | |
214 | * have a leak rather than continue and a bugchk | |
215 | */ | |
216 | if (ret != 0) { | |
d9871158 | 217 | netdev_err(ndev, "unable to send " |
c909ebbd | 218 | "revoke receive buffer to netvsp\n"); |
7a2a0a84 | 219 | return; |
ec91cd09 | 220 | } |
8b532797 | 221 | net_device->recv_section_cnt = 0; |
ec91cd09 | 222 | } |
7992894c MG |
223 | } |
224 | ||
225 | static void netvsc_revoke_send_buf(struct hv_device *device, | |
3f076eff MG |
226 | struct netvsc_device *net_device, |
227 | struct net_device *ndev) | |
7992894c | 228 | { |
7992894c MG |
229 | struct nvsp_message *revoke_packet; |
230 | int ret; | |
ec91cd09 | 231 | |
c25aaf81 KS |
232 | /* Deal with the send buffer we may have setup. |
233 | * If we got a send section size, it means we received a | |
c51ed182 HZ |
234 | * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent |
235 | * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need | |
c25aaf81 KS |
236 | * to send a revoke msg here |
237 | */ | |
8b532797 | 238 | if (net_device->send_section_cnt) { |
c25aaf81 KS |
239 | /* Send the revoke receive buffer */ |
240 | revoke_packet = &net_device->revoke_packet; | |
241 | memset(revoke_packet, 0, sizeof(struct nvsp_message)); | |
242 | ||
243 | revoke_packet->hdr.msg_type = | |
244 | NVSP_MSG1_TYPE_REVOKE_SEND_BUF; | |
c51ed182 HZ |
245 | revoke_packet->msg.v1_msg.revoke_send_buf.id = |
246 | NETVSC_SEND_BUFFER_ID; | |
c25aaf81 | 247 | |
ec966381 SH |
248 | trace_nvsp_send(ndev, revoke_packet); |
249 | ||
3d541ac5 | 250 | ret = vmbus_sendpacket(device->channel, |
c25aaf81 KS |
251 | revoke_packet, |
252 | sizeof(struct nvsp_message), | |
4d18fcc9 | 253 | VMBUS_RQST_ID_NO_RESPONSE, |
c25aaf81 | 254 | VM_PKT_DATA_INBAND, 0); |
73e64fa4 S |
255 | |
256 | /* If the failure is because the channel is rescinded; | |
257 | * ignore the failure since we cannot send on a rescinded | |
258 | * channel. This would allow us to properly cleanup | |
259 | * even when the channel is rescinded. | |
260 | */ | |
261 | if (device->channel->rescind) | |
262 | ret = 0; | |
263 | ||
c25aaf81 KS |
264 | /* If we failed here, we might as well return and |
265 | * have a leak rather than continue and a bugchk | |
266 | */ | |
267 | if (ret != 0) { | |
268 | netdev_err(ndev, "unable to send " | |
269 | "revoke send buffer to netvsp\n"); | |
7a2a0a84 | 270 | return; |
c25aaf81 | 271 | } |
8b532797 | 272 | net_device->send_section_cnt = 0; |
c25aaf81 | 273 | } |
0cf73780 VK |
274 | } |
275 | ||
7992894c | 276 | static void netvsc_teardown_recv_gpadl(struct hv_device *device, |
3f076eff MG |
277 | struct netvsc_device *net_device, |
278 | struct net_device *ndev) | |
0cf73780 | 279 | { |
0cf73780 VK |
280 | int ret; |
281 | ||
d4dccf35 | 282 | if (net_device->recv_buf_gpadl_handle.gpadl_handle) { |
0cf73780 | 283 | ret = vmbus_teardown_gpadl(device->channel, |
d4dccf35 | 284 | &net_device->recv_buf_gpadl_handle); |
0cf73780 VK |
285 | |
286 | /* If we failed here, we might as well return and have a leak | |
287 | * rather than continue and a bugchk | |
288 | */ | |
289 | if (ret != 0) { | |
290 | netdev_err(ndev, | |
291 | "unable to teardown receive buffer's gpadl\n"); | |
292 | return; | |
293 | } | |
0cf73780 | 294 | } |
7992894c MG |
295 | } |
296 | ||
297 | static void netvsc_teardown_send_gpadl(struct hv_device *device, | |
3f076eff MG |
298 | struct netvsc_device *net_device, |
299 | struct net_device *ndev) | |
7992894c | 300 | { |
7992894c | 301 | int ret; |
0cf73780 | 302 | |
d4dccf35 | 303 | if (net_device->send_buf_gpadl_handle.gpadl_handle) { |
3d541ac5 | 304 | ret = vmbus_teardown_gpadl(device->channel, |
d4dccf35 | 305 | &net_device->send_buf_gpadl_handle); |
c25aaf81 KS |
306 | |
307 | /* If we failed here, we might as well return and have a leak | |
308 | * rather than continue and a bugchk | |
309 | */ | |
310 | if (ret != 0) { | |
311 | netdev_err(ndev, | |
312 | "unable to teardown send buffer's gpadl\n"); | |
7a2a0a84 | 313 | return; |
c25aaf81 | 314 | } |
c25aaf81 | 315 | } |
ec91cd09 HZ |
316 | } |
317 | ||
7426b1a5 | 318 | int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) |
319 | { | |
320 | struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; | |
321 | int node = cpu_to_node(nvchan->channel->target_cpu); | |
322 | size_t size; | |
323 | ||
324 | size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data); | |
325 | nvchan->mrc.slots = vzalloc_node(size, node); | |
326 | if (!nvchan->mrc.slots) | |
327 | nvchan->mrc.slots = vzalloc(size); | |
328 | ||
329 | return nvchan->mrc.slots ? 0 : -ENOMEM; | |
330 | } | |
331 | ||
95790837 | 332 | static int netvsc_init_buf(struct hv_device *device, |
8b532797 | 333 | struct netvsc_device *net_device, |
334 | const struct netvsc_device_info *device_info) | |
fceaf24a | 335 | { |
7426b1a5 | 336 | struct nvsp_1_message_send_receive_buffer_complete *resp; |
95833370 | 337 | struct net_device *ndev = hv_get_drvdata(device); |
338 | struct nvsp_message *init_packet; | |
8b532797 | 339 | unsigned int buf_size; |
0102eeed | 340 | int i, ret = 0; |
0a726c2b | 341 | |
8b532797 | 342 | /* Get receive buffer area. */ |
0ab09bef | 343 | buf_size = device_info->recv_sections * device_info->recv_section_size; |
8b532797 | 344 | buf_size = roundup(buf_size, PAGE_SIZE); |
345 | ||
11b2b653 HZ |
346 | /* Legacy hosts only allow smaller receive buffer */ |
347 | if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) | |
348 | buf_size = min_t(unsigned int, buf_size, | |
349 | NETVSC_RECEIVE_BUFFER_SIZE_LEGACY); | |
350 | ||
8b532797 | 351 | net_device->recv_buf = vzalloc(buf_size); |
53d21fdb | 352 | if (!net_device->recv_buf) { |
8b532797 | 353 | netdev_err(ndev, |
354 | "unable to allocate receive buffer of size %u\n", | |
355 | buf_size); | |
927bc33c | 356 | ret = -ENOMEM; |
0c3b7b2f | 357 | goto cleanup; |
fceaf24a | 358 | } |
fceaf24a | 359 | |
c5d24bdd HZ |
360 | net_device->recv_buf_size = buf_size; |
361 | ||
454f18a9 BP |
362 | /* |
363 | * Establish the gpadl handle for this buffer on this | |
364 | * channel. Note: This call uses the vmbus connection rather | |
365 | * than the channel to establish the gpadl handle. | |
366 | */ | |
53d21fdb | 367 | ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, |
8b532797 | 368 | buf_size, |
53d21fdb | 369 | &net_device->recv_buf_gpadl_handle); |
21a80820 | 370 | if (ret != 0) { |
d9871158 | 371 | netdev_err(ndev, |
c909ebbd | 372 | "unable to establish receive buffer's gpadl\n"); |
0c3b7b2f | 373 | goto cleanup; |
fceaf24a HJ |
374 | } |
375 | ||
454f18a9 | 376 | /* Notify the NetVsp of the gpadl handle */ |
53d21fdb | 377 | init_packet = &net_device->channel_init_pkt; |
85799a37 | 378 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
53d21fdb HZ |
379 | init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; |
380 | init_packet->msg.v1_msg.send_recv_buf. | |
d4dccf35 | 381 | gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle; |
53d21fdb HZ |
382 | init_packet->msg.v1_msg. |
383 | send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; | |
fceaf24a | 384 | |
ec966381 SH |
385 | trace_nvsp_send(ndev, init_packet); |
386 | ||
454f18a9 | 387 | /* Send the gpadl notification request */ |
85799a37 | 388 | ret = vmbus_sendpacket(device->channel, init_packet, |
5a4df290 | 389 | sizeof(struct nvsp_message), |
85799a37 | 390 | (unsigned long)init_packet, |
415f2287 | 391 | VM_PKT_DATA_INBAND, |
5a4df290 | 392 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
21a80820 | 393 | if (ret != 0) { |
d9871158 | 394 | netdev_err(ndev, |
c909ebbd | 395 | "unable to send receive buffer's gpadl to netvsp\n"); |
0c3b7b2f | 396 | goto cleanup; |
fceaf24a HJ |
397 | } |
398 | ||
5362855a | 399 | wait_for_completion(&net_device->channel_init_wait); |
fceaf24a | 400 | |
454f18a9 | 401 | /* Check the response */ |
7426b1a5 | 402 | resp = &init_packet->msg.v1_msg.send_recv_buf_complete; |
403 | if (resp->status != NVSP_STAT_SUCCESS) { | |
404 | netdev_err(ndev, | |
405 | "Unable to complete receive buffer initialization with NetVsp - status %d\n", | |
406 | resp->status); | |
927bc33c | 407 | ret = -EINVAL; |
0c3b7b2f | 408 | goto cleanup; |
fceaf24a HJ |
409 | } |
410 | ||
454f18a9 | 411 | /* Parse the response */ |
7426b1a5 | 412 | netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n", |
413 | resp->num_sections, resp->sections[0].sub_alloc_size, | |
414 | resp->sections[0].num_sub_allocs); | |
fceaf24a | 415 | |
8b532797 | 416 | /* There should only be one section for the entire receive buffer */ |
417 | if (resp->num_sections != 1 || resp->sections[0].offset != 0) { | |
927bc33c | 418 | ret = -EINVAL; |
0c3b7b2f | 419 | goto cleanup; |
fceaf24a HJ |
420 | } |
421 | ||
8b532797 | 422 | net_device->recv_section_size = resp->sections[0].sub_alloc_size; |
423 | net_device->recv_section_cnt = resp->sections[0].num_sub_allocs; | |
424 | ||
44144185 AB |
425 | /* Ensure buffer will not overflow */ |
426 | if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size * | |
427 | (u64)net_device->recv_section_cnt > (u64)buf_size) { | |
428 | netdev_err(ndev, "invalid recv_section_size %u\n", | |
429 | net_device->recv_section_size); | |
430 | ret = -EINVAL; | |
431 | goto cleanup; | |
432 | } | |
433 | ||
0102eeed APM |
434 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) { |
435 | struct netvsc_channel *nvchan = &net_device->chan_table[i]; | |
436 | ||
437 | nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL); | |
438 | if (nvchan->recv_buf == NULL) { | |
439 | ret = -ENOMEM; | |
440 | goto cleanup; | |
441 | } | |
442 | } | |
443 | ||
f87238d3 HZ |
444 | /* Setup receive completion ring. |
445 | * Add 1 to the recv_section_cnt because at least one entry in a | |
446 | * ring buffer has to be empty. | |
447 | */ | |
448 | net_device->recv_completion_cnt = net_device->recv_section_cnt + 1; | |
7426b1a5 | 449 | ret = netvsc_alloc_recv_comp_ring(net_device, 0); |
450 | if (ret) | |
451 | goto cleanup; | |
452 | ||
453 | /* Now setup the send buffer. */ | |
0ab09bef | 454 | buf_size = device_info->send_sections * device_info->send_section_size; |
8b532797 | 455 | buf_size = round_up(buf_size, PAGE_SIZE); |
456 | ||
457 | net_device->send_buf = vzalloc(buf_size); | |
c25aaf81 | 458 | if (!net_device->send_buf) { |
8b532797 | 459 | netdev_err(ndev, "unable to allocate send buffer of size %u\n", |
460 | buf_size); | |
c25aaf81 KS |
461 | ret = -ENOMEM; |
462 | goto cleanup; | |
463 | } | |
d4dccf35 | 464 | net_device->send_buf_size = buf_size; |
c25aaf81 KS |
465 | |
466 | /* Establish the gpadl handle for this buffer on this | |
467 | * channel. Note: This call uses the vmbus connection rather | |
468 | * than the channel to establish the gpadl handle. | |
469 | */ | |
470 | ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, | |
8b532797 | 471 | buf_size, |
c25aaf81 KS |
472 | &net_device->send_buf_gpadl_handle); |
473 | if (ret != 0) { | |
474 | netdev_err(ndev, | |
475 | "unable to establish send buffer's gpadl\n"); | |
476 | goto cleanup; | |
477 | } | |
478 | ||
479 | /* Notify the NetVsp of the gpadl handle */ | |
480 | init_packet = &net_device->channel_init_pkt; | |
481 | memset(init_packet, 0, sizeof(struct nvsp_message)); | |
482 | init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; | |
c51ed182 | 483 | init_packet->msg.v1_msg.send_send_buf.gpadl_handle = |
d4dccf35 | 484 | net_device->send_buf_gpadl_handle.gpadl_handle; |
c51ed182 | 485 | init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID; |
c25aaf81 | 486 | |
ec966381 SH |
487 | trace_nvsp_send(ndev, init_packet); |
488 | ||
c25aaf81 KS |
489 | /* Send the gpadl notification request */ |
490 | ret = vmbus_sendpacket(device->channel, init_packet, | |
491 | sizeof(struct nvsp_message), | |
492 | (unsigned long)init_packet, | |
493 | VM_PKT_DATA_INBAND, | |
494 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | |
495 | if (ret != 0) { | |
496 | netdev_err(ndev, | |
497 | "unable to send send buffer's gpadl to netvsp\n"); | |
498 | goto cleanup; | |
499 | } | |
500 | ||
5362855a | 501 | wait_for_completion(&net_device->channel_init_wait); |
c25aaf81 KS |
502 | |
503 | /* Check the response */ | |
504 | if (init_packet->msg.v1_msg. | |
505 | send_send_buf_complete.status != NVSP_STAT_SUCCESS) { | |
506 | netdev_err(ndev, "Unable to complete send buffer " | |
507 | "initialization with NetVsp - status %d\n", | |
508 | init_packet->msg.v1_msg. | |
c51ed182 | 509 | send_send_buf_complete.status); |
c25aaf81 KS |
510 | ret = -EINVAL; |
511 | goto cleanup; | |
512 | } | |
513 | ||
514 | /* Parse the response */ | |
515 | net_device->send_section_size = init_packet->msg. | |
516 | v1_msg.send_send_buf_complete.section_size; | |
44144185 AB |
517 | if (net_device->send_section_size < NETVSC_MTU_MIN) { |
518 | netdev_err(ndev, "invalid send_section_size %u\n", | |
519 | net_device->send_section_size); | |
520 | ret = -EINVAL; | |
521 | goto cleanup; | |
522 | } | |
c25aaf81 | 523 | |
8b532797 | 524 | /* Section count is simply the size divided by the section size. */ |
525 | net_device->send_section_cnt = buf_size / net_device->send_section_size; | |
c25aaf81 | 526 | |
93ba2222 VK |
527 | netdev_dbg(ndev, "Send section size: %d, Section count:%d\n", |
528 | net_device->send_section_size, net_device->send_section_cnt); | |
c25aaf81 KS |
529 | |
530 | /* Setup state for managing the send buffer. */ | |
e9268a94 CJ |
531 | net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt, |
532 | GFP_KERNEL); | |
533 | if (!net_device->send_section_map) { | |
dd1d3f8f | 534 | ret = -ENOMEM; |
c25aaf81 | 535 | goto cleanup; |
dd1d3f8f | 536 | } |
c25aaf81 | 537 | |
0c3b7b2f | 538 | goto exit; |
fceaf24a | 539 | |
0c3b7b2f | 540 | cleanup: |
3f076eff MG |
541 | netvsc_revoke_recv_buf(device, net_device, ndev); |
542 | netvsc_revoke_send_buf(device, net_device, ndev); | |
543 | netvsc_teardown_recv_gpadl(device, net_device, ndev); | |
544 | netvsc_teardown_send_gpadl(device, net_device, ndev); | |
fceaf24a | 545 | |
0c3b7b2f | 546 | exit: |
fceaf24a HJ |
547 | return ret; |
548 | } | |
549 | ||
f157e78d HZ |
550 | /* Negotiate NVSP protocol version */ |
551 | static int negotiate_nvsp_ver(struct hv_device *device, | |
552 | struct netvsc_device *net_device, | |
553 | struct nvsp_message *init_packet, | |
554 | u32 nvsp_ver) | |
fceaf24a | 555 | { |
0a1275ca | 556 | struct net_device *ndev = hv_get_drvdata(device); |
7390fe9c | 557 | int ret; |
fceaf24a | 558 | |
85799a37 | 559 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
53d21fdb | 560 | init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; |
f157e78d HZ |
561 | init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; |
562 | init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; | |
ec966381 SH |
563 | trace_nvsp_send(ndev, init_packet); |
564 | ||
454f18a9 | 565 | /* Send the init request */ |
85799a37 | 566 | ret = vmbus_sendpacket(device->channel, init_packet, |
5a4df290 | 567 | sizeof(struct nvsp_message), |
85799a37 | 568 | (unsigned long)init_packet, |
415f2287 | 569 | VM_PKT_DATA_INBAND, |
5a4df290 | 570 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
21a80820 | 571 | |
b8a3d52b | 572 | if (ret != 0) |
f157e78d | 573 | return ret; |
fceaf24a | 574 | |
5362855a | 575 | wait_for_completion(&net_device->channel_init_wait); |
fceaf24a | 576 | |
53d21fdb | 577 | if (init_packet->msg.init_msg.init_complete.status != |
f157e78d HZ |
578 | NVSP_STAT_SUCCESS) |
579 | return -EINVAL; | |
fceaf24a | 580 | |
a1eabb01 | 581 | if (nvsp_ver == NVSP_PROTOCOL_VERSION_1) |
f157e78d HZ |
582 | return 0; |
583 | ||
71790a27 | 584 | /* NVSPv2 or later: Send NDIS config */ |
f157e78d HZ |
585 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
586 | init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; | |
0a1275ca | 587 | init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN; |
1f5f3a75 | 588 | init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; |
f157e78d | 589 | |
7f5d5af0 | 590 | if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) { |
96854bbd APM |
591 | if (hv_is_isolation_supported()) |
592 | netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n"); | |
593 | else | |
594 | init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; | |
71790a27 | 595 | |
7f5d5af0 HZ |
596 | /* Teaming bit is needed to receive link speed updates */ |
597 | init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1; | |
598 | } | |
599 | ||
c8e4eff4 HZ |
600 | if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61) |
601 | init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1; | |
602 | ||
ec966381 SH |
603 | trace_nvsp_send(ndev, init_packet); |
604 | ||
f157e78d HZ |
605 | ret = vmbus_sendpacket(device->channel, init_packet, |
606 | sizeof(struct nvsp_message), | |
4d18fcc9 | 607 | VMBUS_RQST_ID_NO_RESPONSE, |
f157e78d HZ |
608 | VM_PKT_DATA_INBAND, 0); |
609 | ||
610 | return ret; | |
611 | } | |
612 | ||
95790837 | 613 | static int netvsc_connect_vsp(struct hv_device *device, |
8b532797 | 614 | struct netvsc_device *net_device, |
615 | const struct netvsc_device_info *device_info) | |
f157e78d | 616 | { |
ec966381 | 617 | struct net_device *ndev = hv_get_drvdata(device); |
1b17ca04 | 618 | static const u32 ver_list[] = { |
e5a78fad | 619 | NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, |
0dcec221 HZ |
620 | NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5, |
621 | NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61 | |
95790837 | 622 | }; |
623 | struct nvsp_message *init_packet; | |
624 | int ndis_version, i, ret; | |
f157e78d HZ |
625 | |
626 | init_packet = &net_device->channel_init_pkt; | |
627 | ||
628 | /* Negotiate the latest NVSP protocol supported */ | |
e5a78fad | 629 | for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--) |
a1eabb01 HZ |
630 | if (negotiate_nvsp_ver(device, net_device, init_packet, |
631 | ver_list[i]) == 0) { | |
632 | net_device->nvsp_version = ver_list[i]; | |
633 | break; | |
634 | } | |
635 | ||
636 | if (i < 0) { | |
0f48c72c | 637 | ret = -EPROTO; |
0c3b7b2f | 638 | goto cleanup; |
fceaf24a | 639 | } |
f157e78d | 640 | |
96854bbd APM |
641 | if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) { |
642 | netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n", | |
643 | net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61); | |
644 | ret = -EPROTO; | |
645 | goto cleanup; | |
646 | } | |
647 | ||
f157e78d HZ |
648 | pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version); |
649 | ||
454f18a9 | 650 | /* Send the ndis version */ |
85799a37 | 651 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
fceaf24a | 652 | |
a1eabb01 | 653 | if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) |
1f73db49 | 654 | ndis_version = 0x00060001; |
a1eabb01 HZ |
655 | else |
656 | ndis_version = 0x0006001e; | |
fceaf24a | 657 | |
53d21fdb HZ |
658 | init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; |
659 | init_packet->msg.v1_msg. | |
660 | send_ndis_ver.ndis_major_ver = | |
85799a37 | 661 | (ndis_version & 0xFFFF0000) >> 16; |
53d21fdb HZ |
662 | init_packet->msg.v1_msg. |
663 | send_ndis_ver.ndis_minor_ver = | |
85799a37 | 664 | ndis_version & 0xFFFF; |
fceaf24a | 665 | |
ec966381 SH |
666 | trace_nvsp_send(ndev, init_packet); |
667 | ||
454f18a9 | 668 | /* Send the init request */ |
85799a37 | 669 | ret = vmbus_sendpacket(device->channel, init_packet, |
0c3b7b2f | 670 | sizeof(struct nvsp_message), |
4d18fcc9 | 671 | VMBUS_RQST_ID_NO_RESPONSE, |
0c3b7b2f | 672 | VM_PKT_DATA_INBAND, 0); |
0f48c72c | 673 | if (ret != 0) |
0c3b7b2f | 674 | goto cleanup; |
454f18a9 | 675 | |
99d3016d | 676 | |
8b532797 | 677 | ret = netvsc_init_buf(device, net_device, device_info); |
fceaf24a | 678 | |
0c3b7b2f | 679 | cleanup: |
fceaf24a HJ |
680 | return ret; |
681 | } | |
682 | ||
3e189519 | 683 | /* |
5a71ae30 | 684 | * netvsc_device_remove - Callback when the root bus device is removed |
21a80820 | 685 | */ |
e08f3ea5 | 686 | void netvsc_device_remove(struct hv_device *device) |
fceaf24a | 687 | { |
3d541ac5 VK |
688 | struct net_device *ndev = hv_get_drvdata(device); |
689 | struct net_device_context *net_device_ctx = netdev_priv(ndev); | |
79e8cbe7 | 690 | struct netvsc_device *net_device |
691 | = rtnl_dereference(net_device_ctx->nvdev); | |
15a863bf | 692 | int i; |
fceaf24a | 693 | |
a56d99d7 MG |
694 | /* |
695 | * Revoke receive buffer. If host is pre-Win2016 then tear down | |
696 | * receive buffer GPADL. Do the same for send buffer. | |
697 | */ | |
3f076eff | 698 | netvsc_revoke_recv_buf(device, net_device, ndev); |
a56d99d7 | 699 | if (vmbus_proto_version < VERSION_WIN10) |
3f076eff | 700 | netvsc_teardown_recv_gpadl(device, net_device, ndev); |
a56d99d7 | 701 | |
3f076eff | 702 | netvsc_revoke_send_buf(device, net_device, ndev); |
a56d99d7 | 703 | if (vmbus_proto_version < VERSION_WIN10) |
3f076eff | 704 | netvsc_teardown_send_gpadl(device, net_device, ndev); |
9d88f33a | 705 | |
545a8e79 | 706 | RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); |
3852409b | 707 | |
ac504767 APM |
708 | /* Disable NAPI and disassociate its context from the device. */ |
709 | for (i = 0; i < net_device->num_chn; i++) { | |
710 | /* See also vmbus_reset_channel_cb(). */ | |
711 | napi_disable(&net_device->chan_table[i].napi); | |
8348e046 | 712 | netif_napi_del(&net_device->chan_table[i].napi); |
ac504767 | 713 | } |
8348e046 | 714 | |
86c921af S |
715 | /* |
716 | * At this point, no one should be accessing net_device | |
717 | * except in here | |
718 | */ | |
93ba2222 | 719 | netdev_dbg(ndev, "net device safe to remove\n"); |
fceaf24a | 720 | |
454f18a9 | 721 | /* Now, we can close the channel safely */ |
85799a37 | 722 | vmbus_close(device->channel); |
fceaf24a | 723 | |
a56d99d7 MG |
724 | /* |
725 | * If host is Win2016 or higher then we do the GPADL tear down | |
726 | * here after VMBus is closed. | |
727 | */ | |
7992894c | 728 | if (vmbus_proto_version >= VERSION_WIN10) { |
3f076eff MG |
729 | netvsc_teardown_recv_gpadl(device, net_device, ndev); |
730 | netvsc_teardown_send_gpadl(device, net_device, ndev); | |
7992894c | 731 | } |
15a863bf | 732 | |
454f18a9 | 733 | /* Release all resources */ |
545a8e79 | 734 | free_netvsc_device_rcu(net_device); |
fceaf24a HJ |
735 | } |
736 | ||
33be96e4 HZ |
737 | #define RING_AVAIL_PERCENT_HIWATER 20 |
738 | #define RING_AVAIL_PERCENT_LOWATER 10 | |
739 | ||
c25aaf81 KS |
740 | static inline void netvsc_free_send_slot(struct netvsc_device *net_device, |
741 | u32 index) | |
742 | { | |
743 | sync_change_bit(index, net_device->send_section_map); | |
744 | } | |
745 | ||
c347b927 SH |
746 | static void netvsc_send_tx_complete(struct net_device *ndev, |
747 | struct netvsc_device *net_device, | |
748 | struct vmbus_channel *channel, | |
f9645430 | 749 | const struct vmpacket_descriptor *desc, |
750 | int budget) | |
bc304dd3 | 751 | { |
09af87d1 | 752 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
4d18fcc9 | 753 | struct sk_buff *skb; |
bc304dd3 SH |
754 | u16 q_idx = 0; |
755 | int queue_sends; | |
4d18fcc9 AB |
756 | u64 cmd_rqst; |
757 | ||
26894cd9 | 758 | cmd_rqst = channel->request_addr_callback(channel, desc->trans_id); |
4d18fcc9 | 759 | if (cmd_rqst == VMBUS_RQST_ERROR) { |
26894cd9 | 760 | netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); |
4d18fcc9 AB |
761 | return; |
762 | } | |
763 | ||
764 | skb = (struct sk_buff *)(unsigned long)cmd_rqst; | |
bc304dd3 SH |
765 | |
766 | /* Notify the layer above us */ | |
767 | if (likely(skb)) { | |
846da38d | 768 | struct hv_netvsc_packet *packet |
bc304dd3 | 769 | = (struct hv_netvsc_packet *)skb->cb; |
793e3955 | 770 | u32 send_index = packet->send_buf_index; |
1cb9d3b6 | 771 | struct netvsc_stats_tx *tx_stats; |
bc304dd3 SH |
772 | |
773 | if (send_index != NETVSC_INVALID_INDEX) | |
774 | netvsc_free_send_slot(net_device, send_index); | |
793e3955 | 775 | q_idx = packet->q_idx; |
bc304dd3 | 776 | |
6c80f3fc | 777 | tx_stats = &net_device->chan_table[q_idx].tx_stats; |
793e3955 | 778 | |
779 | u64_stats_update_begin(&tx_stats->syncp); | |
780 | tx_stats->packets += packet->total_packets; | |
781 | tx_stats->bytes += packet->total_bytes; | |
782 | u64_stats_update_end(&tx_stats->syncp); | |
783 | ||
846da38d | 784 | netvsc_dma_unmap(ndev_ctx->device_ctx, packet); |
f9645430 | 785 | napi_consume_skb(skb, budget); |
bc304dd3 SH |
786 | } |
787 | ||
b8b835a8 | 788 | queue_sends = |
789 | atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); | |
bc304dd3 | 790 | |
7b2ee50c SH |
791 | if (unlikely(net_device->destroy)) { |
792 | if (queue_sends == 0) | |
793 | wake_up(&net_device->wait_drain); | |
794 | } else { | |
795 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); | |
796 | ||
1b704c4a | 797 | if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && |
6b1f8376 LL |
798 | (hv_get_avail_to_write_percent(&channel->outbound) > |
799 | RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { | |
7b2ee50c SH |
800 | netif_tx_wake_queue(txq); |
801 | ndev_ctx->eth_stats.wake_queue++; | |
802 | } | |
09af87d1 | 803 | } |
bc304dd3 SH |
804 | } |
805 | ||
c347b927 SH |
806 | static void netvsc_send_completion(struct net_device *ndev, |
807 | struct netvsc_device *net_device, | |
25b85ee8 | 808 | struct vmbus_channel *incoming_channel, |
f9645430 | 809 | const struct vmpacket_descriptor *desc, |
810 | int budget) | |
fceaf24a | 811 | { |
8b31f8c9 | 812 | const struct nvsp_message *nvsp_packet; |
44144185 | 813 | u32 msglen = hv_pkt_datalen(desc); |
8b31f8c9 LL |
814 | struct nvsp_message *pkt_rqst; |
815 | u64 cmd_rqst; | |
dca5161f | 816 | u32 status; |
8b31f8c9 LL |
817 | |
818 | /* First check if this is a VMBUS completion without data payload */ | |
819 | if (!msglen) { | |
bf5fd8ca | 820 | cmd_rqst = incoming_channel->request_addr_callback(incoming_channel, |
26894cd9 | 821 | desc->trans_id); |
8b31f8c9 | 822 | if (cmd_rqst == VMBUS_RQST_ERROR) { |
26894cd9 | 823 | netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); |
8b31f8c9 LL |
824 | return; |
825 | } | |
826 | ||
827 | pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst; | |
828 | switch (pkt_rqst->hdr.msg_type) { | |
829 | case NVSP_MSG4_TYPE_SWITCH_DATA_PATH: | |
830 | complete(&net_device->channel_init_wait); | |
831 | break; | |
832 | ||
833 | default: | |
834 | netdev_err(ndev, "Unexpected VMBUS completion!!\n"); | |
835 | } | |
836 | return; | |
837 | } | |
44144185 AB |
838 | |
839 | /* Ensure packet is big enough to read header fields */ | |
840 | if (msglen < sizeof(struct nvsp_message_header)) { | |
841 | netdev_err(ndev, "nvsp_message length too small: %u\n", msglen); | |
842 | return; | |
843 | } | |
fceaf24a | 844 | |
8b31f8c9 | 845 | nvsp_packet = hv_pkt_data(desc); |
bc304dd3 SH |
846 | switch (nvsp_packet->hdr.msg_type) { |
847 | case NVSP_MSG_TYPE_INIT_COMPLETE: | |
44144185 AB |
848 | if (msglen < sizeof(struct nvsp_message_header) + |
849 | sizeof(struct nvsp_message_init_complete)) { | |
850 | netdev_err(ndev, "nvsp_msg length too small: %u\n", | |
851 | msglen); | |
852 | return; | |
853 | } | |
9bae5b05 | 854 | break; |
44144185 | 855 | |
bc304dd3 | 856 | case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: |
44144185 AB |
857 | if (msglen < sizeof(struct nvsp_message_header) + |
858 | sizeof(struct nvsp_1_message_send_receive_buffer_complete)) { | |
859 | netdev_err(ndev, "nvsp_msg1 length too small: %u\n", | |
860 | msglen); | |
861 | return; | |
862 | } | |
9bae5b05 | 863 | break; |
44144185 | 864 | |
bc304dd3 | 865 | case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE: |
44144185 AB |
866 | if (msglen < sizeof(struct nvsp_message_header) + |
867 | sizeof(struct nvsp_1_message_send_send_buffer_complete)) { | |
868 | netdev_err(ndev, "nvsp_msg1 length too small: %u\n", | |
869 | msglen); | |
870 | return; | |
871 | } | |
9bae5b05 | 872 | break; |
44144185 | 873 | |
bc304dd3 | 874 | case NVSP_MSG5_TYPE_SUBCHANNEL: |
44144185 AB |
875 | if (msglen < sizeof(struct nvsp_message_header) + |
876 | sizeof(struct nvsp_5_subchannel_complete)) { | |
877 | netdev_err(ndev, "nvsp_msg5 length too small: %u\n", | |
878 | msglen); | |
879 | return; | |
880 | } | |
bc304dd3 SH |
881 | break; |
882 | ||
883 | case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: | |
dca5161f MK |
884 | if (msglen < sizeof(struct nvsp_message_header) + |
885 | sizeof(struct nvsp_1_message_send_rndis_packet_complete)) { | |
886 | if (net_ratelimit()) | |
887 | netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n", | |
888 | msglen); | |
889 | return; | |
890 | } | |
891 | ||
892 | /* If status indicates an error, output a message so we know | |
893 | * there's a problem. But process the completion anyway so the | |
894 | * resources are released. | |
895 | */ | |
896 | status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status; | |
897 | if (status != NVSP_STAT_SUCCESS && net_ratelimit()) | |
898 | netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n", | |
899 | status); | |
900 | ||
c347b927 SH |
901 | netvsc_send_tx_complete(ndev, net_device, incoming_channel, |
902 | desc, budget); | |
9bae5b05 | 903 | return; |
fceaf24a | 904 | |
bc304dd3 SH |
905 | default: |
906 | netdev_err(ndev, | |
907 | "Unknown send completion type %d received!!\n", | |
908 | nvsp_packet->hdr.msg_type); | |
9bae5b05 | 909 | return; |
fceaf24a | 910 | } |
9bae5b05 SS |
911 | |
912 | /* Copy the response back */ | |
913 | memcpy(&net_device->channel_init_pkt, nvsp_packet, | |
914 | sizeof(struct nvsp_message)); | |
915 | complete(&net_device->channel_init_wait); | |
fceaf24a HJ |
916 | } |
917 | ||
c25aaf81 KS |
918 | static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) |
919 | { | |
b58a1858 | 920 | unsigned long *map_addr = net_device->send_section_map; |
921 | unsigned int i; | |
922 | ||
fdfb70d2 | 923 | for_each_clear_bit(i, map_addr, net_device->send_section_cnt) { |
b58a1858 | 924 | if (sync_test_and_set_bit(i, map_addr) == 0) |
925 | return i; | |
c25aaf81 | 926 | } |
b58a1858 | 927 | |
928 | return NETVSC_INVALID_INDEX; | |
c25aaf81 KS |
929 | } |
930 | ||
26a11262 SH |
931 | static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, |
932 | unsigned int section_index, | |
933 | u32 pend_size, | |
934 | struct hv_netvsc_packet *packet, | |
935 | struct rndis_message *rndis_msg, | |
936 | struct hv_page_buffer *pb, | |
cfd8afd9 | 937 | bool xmit_more) |
c25aaf81 KS |
938 | { |
939 | char *start = net_device->send_buf; | |
7c3877f2 HZ |
940 | char *dest = start + (section_index * net_device->send_section_size) |
941 | + pend_size; | |
c25aaf81 | 942 | int i; |
7c3877f2 | 943 | u32 padding = 0; |
aa0a34be HZ |
944 | u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : |
945 | packet->page_buf_cnt; | |
b85e06f7 | 946 | u32 remain; |
7c3877f2 HZ |
947 | |
948 | /* Add padding */ | |
b85e06f7 | 949 | remain = packet->total_data_buflen & (net_device->pkt_align - 1); |
cfd8afd9 | 950 | if (xmit_more && remain) { |
7c3877f2 | 951 | padding = net_device->pkt_align - remain; |
24476760 | 952 | rndis_msg->msg_len += padding; |
7c3877f2 HZ |
953 | packet->total_data_buflen += padding; |
954 | } | |
c25aaf81 | 955 | |
aa0a34be | 956 | for (i = 0; i < page_count; i++) { |
11d8620e | 957 | char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT); |
02b6de01 | 958 | u32 offset = pb[i].offset; |
959 | u32 len = pb[i].len; | |
c25aaf81 KS |
960 | |
961 | memcpy(dest, (src + offset), len); | |
c25aaf81 KS |
962 | dest += len; |
963 | } | |
7c3877f2 | 964 | |
26a11262 | 965 | if (padding) |
7c3877f2 | 966 | memset(dest, 0, padding); |
c25aaf81 KS |
967 | } |
968 | ||
846da38d TL |
969 | void netvsc_dma_unmap(struct hv_device *hv_dev, |
970 | struct hv_netvsc_packet *packet) | |
971 | { | |
846da38d TL |
972 | int i; |
973 | ||
974 | if (!hv_is_isolation_supported()) | |
975 | return; | |
976 | ||
977 | if (!packet->dma_range) | |
978 | return; | |
979 | ||
99f1c460 | 980 | for (i = 0; i < packet->page_buf_cnt; i++) |
846da38d TL |
981 | dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma, |
982 | packet->dma_range[i].mapping_size, | |
983 | DMA_TO_DEVICE); | |
984 | ||
985 | kfree(packet->dma_range); | |
986 | } | |
987 | ||
988 | /* netvsc_dma_map - Map swiotlb bounce buffer with data page of | |
989 | * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation | |
990 | * VM. | |
991 | * | |
992 | * In isolation VM, netvsc send buffer has been marked visible to | |
993 | * host and so the data copied to send buffer doesn't need to use | |
994 | * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer() | |
995 | * may not be copied to send buffer and so these pages need to be | |
996 | * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do | |
997 | * that. The pfns in the struct hv_page_buffer need to be converted | |
998 | * to bounce buffer's pfn. The loop here is necessary because the | |
999 | * entries in the page buffer array are not necessarily full | |
1000 | * pages of data. Each entry in the array has a separate offset and | |
1001 | * len that may be non-zero, even for entries in the middle of the | |
1002 | * array. And the entries are not physically contiguous. So each | |
1003 | * entry must be individually mapped rather than as a contiguous unit. | |
1004 | * So not use dma_map_sg() here. | |
1005 | */ | |
1006 | static int netvsc_dma_map(struct hv_device *hv_dev, | |
1007 | struct hv_netvsc_packet *packet, | |
1008 | struct hv_page_buffer *pb) | |
1009 | { | |
99f1c460 | 1010 | u32 page_count = packet->page_buf_cnt; |
846da38d TL |
1011 | dma_addr_t dma; |
1012 | int i; | |
1013 | ||
1014 | if (!hv_is_isolation_supported()) | |
1015 | return 0; | |
1016 | ||
1017 | packet->dma_range = kcalloc(page_count, | |
1018 | sizeof(*packet->dma_range), | |
c6aa9d3b | 1019 | GFP_ATOMIC); |
846da38d TL |
1020 | if (!packet->dma_range) |
1021 | return -ENOMEM; | |
1022 | ||
1023 | for (i = 0; i < page_count; i++) { | |
1024 | char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT) | |
1025 | + pb[i].offset); | |
1026 | u32 len = pb[i].len; | |
1027 | ||
1028 | dma = dma_map_single(&hv_dev->device, src, len, | |
1029 | DMA_TO_DEVICE); | |
1030 | if (dma_mapping_error(&hv_dev->device, dma)) { | |
1031 | kfree(packet->dma_range); | |
1032 | return -ENOMEM; | |
1033 | } | |
1034 | ||
1035 | /* pb[].offset and pb[].len are not changed during dma mapping | |
1036 | * and so not reassign. | |
1037 | */ | |
1038 | packet->dma_range[i].dma = dma; | |
1039 | packet->dma_range[i].mapping_size = len; | |
1040 | pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT; | |
1041 | } | |
1042 | ||
1043 | return 0; | |
1044 | } | |
1045 | ||
3a8963ac | 1046 | static inline int netvsc_send_pkt( |
0a1275ca | 1047 | struct hv_device *device, |
7c3877f2 | 1048 | struct hv_netvsc_packet *packet, |
a9f2e2d6 | 1049 | struct netvsc_device *net_device, |
02b6de01 | 1050 | struct hv_page_buffer *pb, |
3a3d9a0a | 1051 | struct sk_buff *skb) |
fceaf24a | 1052 | { |
7c3877f2 | 1053 | struct nvsp_message nvmsg; |
ec966381 | 1054 | struct nvsp_1_message_send_rndis_packet *rpkt = |
956a25c9 JP |
1055 | &nvmsg.msg.v1_msg.send_rndis_pkt; |
1056 | struct netvsc_channel * const nvchan = | |
1057 | &net_device->chan_table[packet->q_idx]; | |
b8b835a8 | 1058 | struct vmbus_channel *out_channel = nvchan->channel; |
0a1275ca | 1059 | struct net_device *ndev = hv_get_drvdata(device); |
09af87d1 | 1060 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
b8b835a8 | 1061 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); |
7c3877f2 HZ |
1062 | u64 req_id; |
1063 | int ret; | |
6b1f8376 | 1064 | u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound); |
c25aaf81 | 1065 | |
505e3f00 | 1066 | memset(&nvmsg, 0, sizeof(struct nvsp_message)); |
7c3877f2 | 1067 | nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; |
956a25c9 JP |
1068 | if (skb) |
1069 | rpkt->channel_type = 0; /* 0 is RMC_DATA */ | |
1070 | else | |
1071 | rpkt->channel_type = 1; /* 1 is RMC_CONTROL */ | |
fceaf24a | 1072 | |
956a25c9 | 1073 | rpkt->send_buf_section_index = packet->send_buf_index; |
7c3877f2 | 1074 | if (packet->send_buf_index == NETVSC_INVALID_INDEX) |
956a25c9 | 1075 | rpkt->send_buf_section_size = 0; |
7c3877f2 | 1076 | else |
956a25c9 | 1077 | rpkt->send_buf_section_size = packet->total_data_buflen; |
21a80820 | 1078 | |
3a3d9a0a | 1079 | req_id = (ulong)skb; |
f1ea3cd7 | 1080 | |
c3582a2c HZ |
1081 | if (out_channel->rescind) |
1082 | return -ENODEV; | |
1083 | ||
ec966381 SH |
1084 | trace_nvsp_send_pkt(ndev, out_channel, rpkt); |
1085 | ||
846da38d | 1086 | packet->dma_range = NULL; |
72a2f5bd | 1087 | if (packet->page_buf_cnt) { |
02b6de01 | 1088 | if (packet->cp_partial) |
1089 | pb += packet->rmsg_pgcnt; | |
1090 | ||
846da38d TL |
1091 | ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb); |
1092 | if (ret) { | |
1093 | ret = -EAGAIN; | |
1094 | goto exit; | |
1095 | } | |
1096 | ||
5a668d8c | 1097 | ret = vmbus_sendpacket_pagebuffer(out_channel, |
1098 | pb, packet->page_buf_cnt, | |
1099 | &nvmsg, sizeof(nvmsg), | |
1100 | req_id); | |
846da38d TL |
1101 | |
1102 | if (ret) | |
1103 | netvsc_dma_unmap(ndev_ctx->device_ctx, packet); | |
21a80820 | 1104 | } else { |
5dd0fb9b | 1105 | ret = vmbus_sendpacket(out_channel, |
1106 | &nvmsg, sizeof(nvmsg), | |
1107 | req_id, VM_PKT_DATA_INBAND, | |
1108 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | |
fceaf24a HJ |
1109 | } |
1110 | ||
846da38d | 1111 | exit: |
1d06825b | 1112 | if (ret == 0) { |
b8b835a8 | 1113 | atomic_inc_return(&nvchan->queue_sends); |
5b54dac8 | 1114 | |
09af87d1 | 1115 | if (ring_avail < RING_AVAIL_PERCENT_LOWATER) { |
b8b835a8 | 1116 | netif_tx_stop_queue(txq); |
09af87d1 SX |
1117 | ndev_ctx->eth_stats.stop_queue++; |
1118 | } | |
1d06825b | 1119 | } else if (ret == -EAGAIN) { |
b8b835a8 | 1120 | netif_tx_stop_queue(txq); |
09af87d1 | 1121 | ndev_ctx->eth_stats.stop_queue++; |
1d06825b | 1122 | } else { |
4a2176c6 | 1123 | netdev_err(ndev, |
1124 | "Unable to send packet pages %u len %u, ret %d\n", | |
1125 | packet->page_buf_cnt, packet->total_data_buflen, | |
1126 | ret); | |
1d06825b | 1127 | } |
fceaf24a | 1128 | |
93aa4792 HZ |
1129 | if (netif_tx_queue_stopped(txq) && |
1130 | atomic_read(&nvchan->queue_sends) < 1 && | |
1131 | !net_device->tx_disable) { | |
1132 | netif_tx_wake_queue(txq); | |
1133 | ndev_ctx->eth_stats.wake_queue++; | |
1134 | if (ret == -EAGAIN) | |
1135 | ret = -ENOSPC; | |
1136 | } | |
1137 | ||
7c3877f2 HZ |
1138 | return ret; |
1139 | } | |
1140 | ||
c85e4924 HZ |
1141 | /* Move packet out of multi send data (msd), and clear msd */ |
1142 | static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, | |
1143 | struct sk_buff **msd_skb, | |
1144 | struct multi_send_data *msdp) | |
1145 | { | |
1146 | *msd_skb = msdp->skb; | |
1147 | *msd_send = msdp->pkt; | |
1148 | msdp->skb = NULL; | |
1149 | msdp->pkt = NULL; | |
1150 | msdp->count = 0; | |
1151 | } | |
1152 | ||
2a926f79 | 1153 | /* RCU already held by caller */ |
bd49fea7 SR |
1154 | /* Batching/bouncing logic is designed to attempt to optimize |
1155 | * performance. | |
1156 | * | |
1157 | * For small, non-LSO packets we copy the packet to a send buffer | |
1158 | * which is pre-registered with the Hyper-V side. This enables the | |
1159 | * hypervisor to avoid remapping the aperture to access the packet | |
1160 | * descriptor and data. | |
1161 | * | |
1162 | * If we already started using a buffer and the netdev is transmitting | |
1163 | * a burst of packets, keep on copying into the buffer until it is | |
1164 | * full or we are done collecting a burst. If there is an existing | |
1165 | * buffer with space for the RNDIS descriptor but not the packet, copy | |
1166 | * the RNDIS descriptor to the buffer, keeping the packet in place. | |
1167 | * | |
1168 | * If we do batching and send more than one packet using a single | |
1169 | * NetVSC message, free the SKBs of the packets copied, except for the | |
1170 | * last packet. This is done to streamline the handling of the case | |
1171 | * where the last packet only had the RNDIS descriptor copied to the | |
1172 | * send buffer, with the data pointers included in the NetVSC message. | |
1173 | */ | |
cfd8afd9 | 1174 | int netvsc_send(struct net_device *ndev, |
24476760 | 1175 | struct hv_netvsc_packet *packet, |
a9f2e2d6 | 1176 | struct rndis_message *rndis_msg, |
02b6de01 | 1177 | struct hv_page_buffer *pb, |
351e1581 HZ |
1178 | struct sk_buff *skb, |
1179 | bool xdp_tx) | |
7c3877f2 | 1180 | { |
cfd8afd9 | 1181 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
3962981f | 1182 | struct netvsc_device *net_device |
867047c4 | 1183 | = rcu_dereference_bh(ndev_ctx->nvdev); |
2a926f79 | 1184 | struct hv_device *device = ndev_ctx->device_ctx; |
6c4c137e | 1185 | int ret = 0; |
b8b835a8 | 1186 | struct netvsc_channel *nvchan; |
7c3877f2 HZ |
1187 | u32 pktlen = packet->total_data_buflen, msd_len = 0; |
1188 | unsigned int section_index = NETVSC_INVALID_INDEX; | |
7c3877f2 HZ |
1189 | struct multi_send_data *msdp; |
1190 | struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; | |
c85e4924 | 1191 | struct sk_buff *msd_skb = NULL; |
cfd8afd9 | 1192 | bool try_batch, xmit_more; |
7c3877f2 | 1193 | |
592b4fe8 | 1194 | /* If device is rescinded, return error and packet will get dropped. */ |
2a926f79 | 1195 | if (unlikely(!net_device || net_device->destroy)) |
7c3877f2 HZ |
1196 | return -ENODEV; |
1197 | ||
b8b835a8 | 1198 | nvchan = &net_device->chan_table[packet->q_idx]; |
7c3877f2 | 1199 | packet->send_buf_index = NETVSC_INVALID_INDEX; |
aa0a34be | 1200 | packet->cp_partial = false; |
7c3877f2 | 1201 | |
351e1581 HZ |
1202 | /* Send a control message or XDP packet directly without accessing |
1203 | * msd (Multi-Send Data) field which may be changed during data packet | |
1204 | * processing. | |
cf8190e4 | 1205 | */ |
351e1581 | 1206 | if (!skb || xdp_tx) |
12f69661 | 1207 | return netvsc_send_pkt(device, packet, net_device, pb, skb); |
cf8190e4 | 1208 | |
7c3877f2 | 1209 | /* batch packets in send buffer if possible */ |
b8b835a8 | 1210 | msdp = &nvchan->msd; |
7c3877f2 HZ |
1211 | if (msdp->pkt) |
1212 | msd_len = msdp->pkt->total_data_buflen; | |
1213 | ||
ebc1dcf6 | 1214 | try_batch = msd_len > 0 && msdp->count < net_device->max_pkt; |
aa0a34be | 1215 | if (try_batch && msd_len + pktlen + net_device->pkt_align < |
7c3877f2 HZ |
1216 | net_device->send_section_size) { |
1217 | section_index = msdp->pkt->send_buf_index; | |
1218 | ||
aa0a34be HZ |
1219 | } else if (try_batch && msd_len + packet->rmsg_size < |
1220 | net_device->send_section_size) { | |
1221 | section_index = msdp->pkt->send_buf_index; | |
1222 | packet->cp_partial = true; | |
1223 | ||
ebc1dcf6 | 1224 | } else if (pktlen + net_device->pkt_align < |
7c3877f2 HZ |
1225 | net_device->send_section_size) { |
1226 | section_index = netvsc_get_next_send_section(net_device); | |
cad5c197 | 1227 | if (unlikely(section_index == NETVSC_INVALID_INDEX)) { |
1228 | ++ndev_ctx->eth_stats.tx_send_full; | |
1229 | } else { | |
c85e4924 HZ |
1230 | move_pkt_msd(&msd_send, &msd_skb, msdp); |
1231 | msd_len = 0; | |
7c3877f2 HZ |
1232 | } |
1233 | } | |
1234 | ||
cfd8afd9 SH |
1235 | /* Keep aggregating only if stack says more data is coming |
1236 | * and not doing mixed modes send and not flow blocked | |
1237 | */ | |
6b16f9ee | 1238 | xmit_more = netdev_xmit_more() && |
cfd8afd9 SH |
1239 | !packet->cp_partial && |
1240 | !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); | |
1241 | ||
7c3877f2 HZ |
1242 | if (section_index != NETVSC_INVALID_INDEX) { |
1243 | netvsc_copy_to_send_buf(net_device, | |
1244 | section_index, msd_len, | |
cfd8afd9 | 1245 | packet, rndis_msg, pb, xmit_more); |
b08cc791 | 1246 | |
7c3877f2 | 1247 | packet->send_buf_index = section_index; |
aa0a34be HZ |
1248 | |
1249 | if (packet->cp_partial) { | |
1250 | packet->page_buf_cnt -= packet->rmsg_pgcnt; | |
1251 | packet->total_data_buflen = msd_len + packet->rmsg_size; | |
1252 | } else { | |
1253 | packet->page_buf_cnt = 0; | |
1254 | packet->total_data_buflen += msd_len; | |
aa0a34be | 1255 | } |
7c3877f2 | 1256 | |
793e3955 | 1257 | if (msdp->pkt) { |
1258 | packet->total_packets += msdp->pkt->total_packets; | |
1259 | packet->total_bytes += msdp->pkt->total_bytes; | |
1260 | } | |
1261 | ||
c85e4924 | 1262 | if (msdp->skb) |
17db4bce | 1263 | dev_consume_skb_any(msdp->skb); |
ee90b812 | 1264 | |
cfd8afd9 | 1265 | if (xmit_more) { |
c85e4924 | 1266 | msdp->skb = skb; |
7c3877f2 HZ |
1267 | msdp->pkt = packet; |
1268 | msdp->count++; | |
1269 | } else { | |
1270 | cur_send = packet; | |
c85e4924 | 1271 | msdp->skb = NULL; |
7c3877f2 HZ |
1272 | msdp->pkt = NULL; |
1273 | msdp->count = 0; | |
1274 | } | |
1275 | } else { | |
c85e4924 | 1276 | move_pkt_msd(&msd_send, &msd_skb, msdp); |
7c3877f2 HZ |
1277 | cur_send = packet; |
1278 | } | |
1279 | ||
7c3877f2 | 1280 | if (msd_send) { |
6c4c137e SH |
1281 | int m_ret = netvsc_send_pkt(device, msd_send, net_device, |
1282 | NULL, msd_skb); | |
7c3877f2 HZ |
1283 | |
1284 | if (m_ret != 0) { | |
1285 | netvsc_free_send_slot(net_device, | |
1286 | msd_send->send_buf_index); | |
c85e4924 | 1287 | dev_kfree_skb_any(msd_skb); |
7c3877f2 HZ |
1288 | } |
1289 | } | |
1290 | ||
1291 | if (cur_send) | |
0a1275ca | 1292 | ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); |
7c3877f2 | 1293 | |
7aab5159 JS |
1294 | if (ret != 0 && section_index != NETVSC_INVALID_INDEX) |
1295 | netvsc_free_send_slot(net_device, section_index); | |
d953ca4d | 1296 | |
fceaf24a HJ |
1297 | return ret; |
1298 | } | |
1299 | ||
7426b1a5 | 1300 | /* Send pending recv completions */ |
cad5c197 | 1301 | static int send_recv_completions(struct net_device *ndev, |
1302 | struct netvsc_device *nvdev, | |
1303 | struct netvsc_channel *nvchan) | |
5fa9d3c5 | 1304 | { |
7426b1a5 | 1305 | struct multi_recv_comp *mrc = &nvchan->mrc; |
1306 | struct recv_comp_msg { | |
1307 | struct nvsp_message_header hdr; | |
1308 | u32 status; | |
1309 | } __packed; | |
1310 | struct recv_comp_msg msg = { | |
1311 | .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE, | |
1312 | }; | |
5fa9d3c5 HZ |
1313 | int ret; |
1314 | ||
7426b1a5 | 1315 | while (mrc->first != mrc->next) { |
1316 | const struct recv_comp_data *rcd | |
1317 | = mrc->slots + mrc->first; | |
c0b558e5 | 1318 | |
7426b1a5 | 1319 | msg.status = rcd->status; |
1320 | ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), | |
1321 | rcd->tid, VM_PKT_COMP, 0); | |
cad5c197 | 1322 | if (unlikely(ret)) { |
1323 | struct net_device_context *ndev_ctx = netdev_priv(ndev); | |
1324 | ||
1325 | ++ndev_ctx->eth_stats.rx_comp_busy; | |
7426b1a5 | 1326 | return ret; |
cad5c197 | 1327 | } |
c0b558e5 | 1328 | |
7426b1a5 | 1329 | if (++mrc->first == nvdev->recv_completion_cnt) |
1330 | mrc->first = 0; | |
1331 | } | |
c0b558e5 | 1332 | |
7426b1a5 | 1333 | /* receive completion ring has been emptied */ |
1334 | if (unlikely(nvdev->destroy)) | |
1335 | wake_up(&nvdev->wait_drain); | |
c0b558e5 | 1336 | |
7426b1a5 | 1337 | return 0; |
c0b558e5 HZ |
1338 | } |
1339 | ||
7426b1a5 | 1340 | /* Count how many receive completions are outstanding */ |
1341 | static void recv_comp_slot_avail(const struct netvsc_device *nvdev, | |
1342 | const struct multi_recv_comp *mrc, | |
1343 | u32 *filled, u32 *avail) | |
c0b558e5 | 1344 | { |
7426b1a5 | 1345 | u32 count = nvdev->recv_completion_cnt; |
c0b558e5 | 1346 | |
7426b1a5 | 1347 | if (mrc->next >= mrc->first) |
1348 | *filled = mrc->next - mrc->first; | |
1349 | else | |
1350 | *filled = (count - mrc->first) + mrc->next; | |
c0b558e5 | 1351 | |
7426b1a5 | 1352 | *avail = count - *filled - 1; |
c0b558e5 HZ |
1353 | } |
1354 | ||
7426b1a5 | 1355 | /* Add receive complete to ring to send to host. */ |
1356 | static void enq_receive_complete(struct net_device *ndev, | |
1357 | struct netvsc_device *nvdev, u16 q_idx, | |
1358 | u64 tid, u32 status) | |
c0b558e5 | 1359 | { |
7426b1a5 | 1360 | struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx]; |
1361 | struct multi_recv_comp *mrc = &nvchan->mrc; | |
c0b558e5 | 1362 | struct recv_comp_data *rcd; |
7426b1a5 | 1363 | u32 filled, avail; |
c0b558e5 | 1364 | |
7426b1a5 | 1365 | recv_comp_slot_avail(nvdev, mrc, &filled, &avail); |
c0b558e5 | 1366 | |
7426b1a5 | 1367 | if (unlikely(filled > NAPI_POLL_WEIGHT)) { |
cad5c197 | 1368 | send_recv_completions(ndev, nvdev, nvchan); |
7426b1a5 | 1369 | recv_comp_slot_avail(nvdev, mrc, &filled, &avail); |
5fa9d3c5 | 1370 | } |
c0b558e5 | 1371 | |
7426b1a5 | 1372 | if (unlikely(!avail)) { |
1373 | netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", | |
1374 | q_idx, tid); | |
1375 | return; | |
1376 | } | |
c0b558e5 | 1377 | |
7426b1a5 | 1378 | rcd = mrc->slots + mrc->next; |
1379 | rcd->tid = tid; | |
1380 | rcd->status = status; | |
c0b558e5 | 1381 | |
7426b1a5 | 1382 | if (++mrc->next == nvdev->recv_completion_cnt) |
1383 | mrc->next = 0; | |
c0b558e5 HZ |
1384 | } |
1385 | ||
15a863bf | 1386 | static int netvsc_receive(struct net_device *ndev, |
7426b1a5 | 1387 | struct netvsc_device *net_device, |
c8e4eff4 | 1388 | struct netvsc_channel *nvchan, |
44144185 | 1389 | const struct vmpacket_descriptor *desc) |
fceaf24a | 1390 | { |
c347b927 | 1391 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
c8e4eff4 | 1392 | struct vmbus_channel *channel = nvchan->channel; |
f3dd3f47 | 1393 | const struct vmtransfer_page_packet_header *vmxferpage_packet |
1394 | = container_of(desc, const struct vmtransfer_page_packet_header, d); | |
44144185 AB |
1395 | const struct nvsp_message *nvsp = hv_pkt_data(desc); |
1396 | u32 msglen = hv_pkt_datalen(desc); | |
15a863bf | 1397 | u16 q_idx = channel->offermsg.offer.sub_channel_index; |
dc54a08c | 1398 | char *recv_buf = net_device->recv_buf; |
4baab261 | 1399 | u32 status = NVSP_STAT_SUCCESS; |
45326342 HZ |
1400 | int i; |
1401 | int count = 0; | |
779b4d17 | 1402 | |
44144185 AB |
1403 | /* Ensure packet is big enough to read header fields */ |
1404 | if (msglen < sizeof(struct nvsp_message_header)) { | |
1405 | netif_err(net_device_ctx, rx_err, ndev, | |
1406 | "invalid nvsp header, length too small: %u\n", | |
1407 | msglen); | |
1408 | return 0; | |
1409 | } | |
1410 | ||
454f18a9 | 1411 | /* Make sure this is a valid nvsp packet */ |
dc54a08c | 1412 | if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { |
1413 | netif_err(net_device_ctx, rx_err, ndev, | |
1414 | "Unknown nvsp packet type received %u\n", | |
1415 | nvsp->hdr.msg_type); | |
15a863bf | 1416 | return 0; |
fceaf24a HJ |
1417 | } |
1418 | ||
44144185 AB |
1419 | /* Validate xfer page pkt header */ |
1420 | if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) { | |
1421 | netif_err(net_device_ctx, rx_err, ndev, | |
1422 | "Invalid xfer page pkt, offset too small: %u\n", | |
1423 | desc->offset8 << 3); | |
1424 | return 0; | |
1425 | } | |
1426 | ||
dc54a08c | 1427 | if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { |
1428 | netif_err(net_device_ctx, rx_err, ndev, | |
1429 | "Invalid xfer page set id - expecting %x got %x\n", | |
1430 | NETVSC_RECEIVE_BUFFER_ID, | |
1431 | vmxferpage_packet->xfer_pageset_id); | |
15a863bf | 1432 | return 0; |
fceaf24a HJ |
1433 | } |
1434 | ||
4baab261 | 1435 | count = vmxferpage_packet->range_cnt; |
fceaf24a | 1436 | |
44144185 AB |
1437 | /* Check count for a valid value */ |
1438 | if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) { | |
1439 | netif_err(net_device_ctx, rx_err, ndev, | |
1440 | "Range count is not valid: %d\n", | |
1441 | count); | |
1442 | return 0; | |
1443 | } | |
1444 | ||
454f18a9 | 1445 | /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ |
4baab261 | 1446 | for (i = 0; i < count; i++) { |
c5d24bdd | 1447 | u32 offset = vmxferpage_packet->ranges[i].byte_offset; |
dc54a08c | 1448 | u32 buflen = vmxferpage_packet->ranges[i].byte_count; |
c5d24bdd | 1449 | void *data; |
5c71dadb | 1450 | int ret; |
fceaf24a | 1451 | |
44144185 AB |
1452 | if (unlikely(offset > net_device->recv_buf_size || |
1453 | buflen > net_device->recv_buf_size - offset)) { | |
c8e4eff4 | 1454 | nvchan->rsc.cnt = 0; |
c5d24bdd HZ |
1455 | status = NVSP_STAT_FAIL; |
1456 | netif_err(net_device_ctx, rx_err, ndev, | |
1457 | "Packet offset:%u + len:%u too big\n", | |
1458 | offset, buflen); | |
1459 | ||
1460 | continue; | |
1461 | } | |
1462 | ||
0ba35fe9 APM |
1463 | /* We're going to copy (sections of) the packet into nvchan->recv_buf; |
1464 | * make sure that nvchan->recv_buf is large enough to hold the packet. | |
1465 | */ | |
1466 | if (unlikely(buflen > net_device->recv_section_size)) { | |
1467 | nvchan->rsc.cnt = 0; | |
1468 | status = NVSP_STAT_FAIL; | |
1469 | netif_err(net_device_ctx, rx_err, ndev, | |
1470 | "Packet too big: buflen=%u recv_section_size=%u\n", | |
1471 | buflen, net_device->recv_section_size); | |
1472 | ||
1473 | continue; | |
1474 | } | |
1475 | ||
c5d24bdd HZ |
1476 | data = recv_buf + offset; |
1477 | ||
c8e4eff4 HZ |
1478 | nvchan->rsc.is_last = (i == count - 1); |
1479 | ||
ec966381 SH |
1480 | trace_rndis_recv(ndev, q_idx, data); |
1481 | ||
454f18a9 | 1482 | /* Pass it to the upper layer */ |
5c71dadb | 1483 | ret = rndis_filter_receive(ndev, net_device, |
c8e4eff4 | 1484 | nvchan, data, buflen); |
5c71dadb | 1485 | |
12bc8dfb APM |
1486 | if (unlikely(ret != NVSP_STAT_SUCCESS)) { |
1487 | /* Drop incomplete packet */ | |
1488 | nvchan->rsc.cnt = 0; | |
5c71dadb | 1489 | status = NVSP_STAT_FAIL; |
12bc8dfb | 1490 | } |
fceaf24a HJ |
1491 | } |
1492 | ||
7426b1a5 | 1493 | enq_receive_complete(ndev, net_device, q_idx, |
1494 | vmxferpage_packet->d.trans_id, status); | |
15a863bf | 1495 | |
15a863bf | 1496 | return count; |
fceaf24a HJ |
1497 | } |
1498 | ||
c347b927 | 1499 | static void netvsc_send_table(struct net_device *ndev, |
171c1fd9 | 1500 | struct netvsc_device *nvscdev, |
71f21959 HZ |
1501 | const struct nvsp_message *nvmsg, |
1502 | u32 msglen) | |
5b54dac8 | 1503 | { |
7ce10124 | 1504 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
71f21959 | 1505 | u32 count, offset, *tab; |
c347b927 | 1506 | int i; |
5b54dac8 | 1507 | |
44144185 AB |
1508 | /* Ensure packet is big enough to read send_table fields */ |
1509 | if (msglen < sizeof(struct nvsp_message_header) + | |
1510 | sizeof(struct nvsp_5_send_indirect_table)) { | |
1511 | netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen); | |
1512 | return; | |
1513 | } | |
1514 | ||
5b54dac8 | 1515 | count = nvmsg->msg.v5_msg.send_table.count; |
71f21959 HZ |
1516 | offset = nvmsg->msg.v5_msg.send_table.offset; |
1517 | ||
5b54dac8 HZ |
1518 | if (count != VRSS_SEND_TAB_SIZE) { |
1519 | netdev_err(ndev, "Received wrong send-table size:%u\n", count); | |
1520 | return; | |
1521 | } | |
1522 | ||
171c1fd9 HZ |
1523 | /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be |
1524 | * wrong due to a host bug. So fix the offset here. | |
1525 | */ | |
1526 | if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 && | |
1527 | msglen >= sizeof(struct nvsp_message_header) + | |
1528 | sizeof(union nvsp_6_message_uber) + count * sizeof(u32)) | |
1529 | offset = sizeof(struct nvsp_message_header) + | |
1530 | sizeof(union nvsp_6_message_uber); | |
1531 | ||
1532 | /* Boundary check for all versions */ | |
505e3f00 | 1533 | if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) { |
71f21959 HZ |
1534 | netdev_err(ndev, "Received send-table offset too big:%u\n", |
1535 | offset); | |
1536 | return; | |
1537 | } | |
1538 | ||
1539 | tab = (void *)nvmsg + offset; | |
5b54dac8 HZ |
1540 | |
1541 | for (i = 0; i < count; i++) | |
39e91cfb | 1542 | net_device_ctx->tx_table[i] = tab[i]; |
5b54dac8 HZ |
1543 | } |
1544 | ||
c347b927 | 1545 | static void netvsc_send_vf(struct net_device *ndev, |
44144185 AB |
1546 | const struct nvsp_message *nvmsg, |
1547 | u32 msglen) | |
71790a27 | 1548 | { |
c347b927 SH |
1549 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
1550 | ||
44144185 AB |
1551 | /* Ensure packet is big enough to read its fields */ |
1552 | if (msglen < sizeof(struct nvsp_message_header) + | |
1553 | sizeof(struct nvsp_4_send_vf_association)) { | |
1554 | netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen); | |
1555 | return; | |
1556 | } | |
1557 | ||
f9a7da91 VK |
1558 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; |
1559 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; | |
365e1ece GK |
1560 | |
1561 | if (net_device_ctx->vf_alloc) | |
1562 | complete(&net_device_ctx->vf_add); | |
1563 | ||
00d7ddba SH |
1564 | netdev_info(ndev, "VF slot %u %s\n", |
1565 | net_device_ctx->vf_serial, | |
1566 | net_device_ctx->vf_alloc ? "added" : "removed"); | |
71790a27 HZ |
1567 | } |
1568 | ||
71f21959 | 1569 | static void netvsc_receive_inband(struct net_device *ndev, |
171c1fd9 | 1570 | struct netvsc_device *nvscdev, |
44144185 | 1571 | const struct vmpacket_descriptor *desc) |
71790a27 | 1572 | { |
44144185 AB |
1573 | const struct nvsp_message *nvmsg = hv_pkt_data(desc); |
1574 | u32 msglen = hv_pkt_datalen(desc); | |
1575 | ||
1576 | /* Ensure packet is big enough to read header fields */ | |
1577 | if (msglen < sizeof(struct nvsp_message_header)) { | |
1578 | netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen); | |
1579 | return; | |
1580 | } | |
1581 | ||
71790a27 HZ |
1582 | switch (nvmsg->hdr.msg_type) { |
1583 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: | |
171c1fd9 | 1584 | netvsc_send_table(ndev, nvscdev, nvmsg, msglen); |
71790a27 HZ |
1585 | break; |
1586 | ||
1587 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: | |
96854bbd APM |
1588 | if (hv_is_isolation_supported()) |
1589 | netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n"); | |
1590 | else | |
1591 | netvsc_send_vf(ndev, nvmsg, msglen); | |
71790a27 HZ |
1592 | break; |
1593 | } | |
1594 | } | |
1595 | ||
15a863bf | 1596 | static int netvsc_process_raw_pkt(struct hv_device *device, |
c8e4eff4 | 1597 | struct netvsc_channel *nvchan, |
15a863bf | 1598 | struct netvsc_device *net_device, |
1599 | struct net_device *ndev, | |
f9645430 | 1600 | const struct vmpacket_descriptor *desc, |
1601 | int budget) | |
99a50bb1 | 1602 | { |
c8e4eff4 | 1603 | struct vmbus_channel *channel = nvchan->channel; |
c347b927 | 1604 | const struct nvsp_message *nvmsg = hv_pkt_data(desc); |
99a50bb1 | 1605 | |
ec966381 SH |
1606 | trace_nvsp_recv(ndev, channel, nvmsg); |
1607 | ||
99a50bb1 S |
1608 | switch (desc->type) { |
1609 | case VM_PKT_COMP: | |
44144185 | 1610 | netvsc_send_completion(ndev, net_device, channel, desc, budget); |
99a50bb1 S |
1611 | break; |
1612 | ||
1613 | case VM_PKT_DATA_USING_XFER_PAGES: | |
44144185 | 1614 | return netvsc_receive(ndev, net_device, nvchan, desc); |
99a50bb1 S |
1615 | |
1616 | case VM_PKT_DATA_INBAND: | |
44144185 | 1617 | netvsc_receive_inband(ndev, net_device, desc); |
99a50bb1 S |
1618 | break; |
1619 | ||
1620 | default: | |
1621 | netdev_err(ndev, "unhandled packet type %d, tid %llx\n", | |
f4f1c23d | 1622 | desc->type, desc->trans_id); |
99a50bb1 S |
1623 | break; |
1624 | } | |
15a863bf | 1625 | |
1626 | return 0; | |
1627 | } | |
1628 | ||
1629 | static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel) | |
1630 | { | |
1631 | struct vmbus_channel *primary = channel->primary_channel; | |
1632 | ||
1633 | return primary ? primary->device_obj : channel->device_obj; | |
1634 | } | |
1635 | ||
262b7f14 | 1636 | /* Network processing softirq |
1637 | * Process data in incoming ring buffer from host | |
1638 | * Stops when ring is empty or budget is met or exceeded. | |
1639 | */ | |
15a863bf | 1640 | int netvsc_poll(struct napi_struct *napi, int budget) |
1641 | { | |
1642 | struct netvsc_channel *nvchan | |
1643 | = container_of(napi, struct netvsc_channel, napi); | |
35fbbccf | 1644 | struct netvsc_device *net_device = nvchan->net_device; |
15a863bf | 1645 | struct vmbus_channel *channel = nvchan->channel; |
1646 | struct hv_device *device = netvsc_channel_to_device(channel); | |
15a863bf | 1647 | struct net_device *ndev = hv_get_drvdata(device); |
15a863bf | 1648 | int work_done = 0; |
6b81b193 | 1649 | int ret; |
15a863bf | 1650 | |
f4f1c23d | 1651 | /* If starting a new interval */ |
1652 | if (!nvchan->desc) | |
1653 | nvchan->desc = hv_pkt_iter_first(channel); | |
15a863bf | 1654 | |
1cb9d3b6 HZ |
1655 | nvchan->xdp_flush = false; |
1656 | ||
f4f1c23d | 1657 | while (nvchan->desc && work_done < budget) { |
c8e4eff4 | 1658 | work_done += netvsc_process_raw_pkt(device, nvchan, net_device, |
f9645430 | 1659 | ndev, nvchan->desc, budget); |
f4f1c23d | 1660 | nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); |
15a863bf | 1661 | } |
15a863bf | 1662 | |
1cb9d3b6 HZ |
1663 | if (nvchan->xdp_flush) |
1664 | xdp_do_flush(); | |
1665 | ||
6b81b193 HZ |
1666 | /* Send any pending receive completions */ |
1667 | ret = send_recv_completions(ndev, net_device, nvchan); | |
1668 | ||
1669 | /* If it did not exhaust NAPI budget this time | |
1670 | * and not doing busy poll | |
f4e40363 | 1671 | * then re-enable host interrupts |
6b81b193 HZ |
1672 | * and reschedule if ring is not empty |
1673 | * or sending receive completion failed. | |
262b7f14 | 1674 | */ |
6b81b193 | 1675 | if (work_done < budget && |
15a863bf | 1676 | napi_complete_done(napi, work_done) && |
6b81b193 | 1677 | (ret || hv_end_read(&channel->inbound)) && |
d64e38ae | 1678 | napi_schedule_prep(napi)) { |
7426b1a5 | 1679 | hv_begin_read(&channel->inbound); |
d64e38ae | 1680 | __napi_schedule(napi); |
7426b1a5 | 1681 | } |
f4f1c23d | 1682 | |
1683 | /* Driver may overshoot since multiple packets per descriptor */ | |
1684 | return min(work_done, budget); | |
99a50bb1 S |
1685 | } |
1686 | ||
262b7f14 | 1687 | /* Call back when data is available in host ring buffer. |
1688 | * Processing is deferred until network softirq (NAPI) | |
1689 | */ | |
5b54dac8 | 1690 | void netvsc_channel_cb(void *context) |
fceaf24a | 1691 | { |
6de38af6 | 1692 | struct netvsc_channel *nvchan = context; |
43bf99ce | 1693 | struct vmbus_channel *channel = nvchan->channel; |
1694 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
1695 | ||
1696 | /* preload first vmpacket descriptor */ | |
1697 | prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); | |
0b307ebd | 1698 | |
f4f1c23d | 1699 | if (napi_schedule_prep(&nvchan->napi)) { |
52d3b494 | 1700 | /* disable interrupts from host */ |
43bf99ce | 1701 | hv_begin_read(rbi); |
0d6dd357 | 1702 | |
68633eda | 1703 | __napi_schedule_irqoff(&nvchan->napi); |
f4f1c23d | 1704 | } |
fceaf24a | 1705 | } |
af24ce42 | 1706 | |
b637e023 HZ |
1707 | /* |
1708 | * netvsc_device_add - Callback when the device belonging to this | |
1709 | * driver is added | |
1710 | */ | |
9749fed5 | 1711 | struct netvsc_device *netvsc_device_add(struct hv_device *device, |
1712 | const struct netvsc_device_info *device_info) | |
b637e023 | 1713 | { |
88098834 | 1714 | int i, ret = 0; |
b637e023 | 1715 | struct netvsc_device *net_device; |
88098834 VK |
1716 | struct net_device *ndev = hv_get_drvdata(device); |
1717 | struct net_device_context *net_device_ctx = netdev_priv(ndev); | |
b637e023 | 1718 | |
88098834 | 1719 | net_device = alloc_net_device(); |
b1c84927 | 1720 | if (!net_device) |
9749fed5 | 1721 | return ERR_PTR(-ENOMEM); |
b637e023 | 1722 | |
6b0cbe31 HZ |
1723 | for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) |
1724 | net_device_ctx->tx_table[i] = 0; | |
1725 | ||
15a863bf | 1726 | /* Because the device uses NAPI, all the interrupt batching and |
1727 | * control is done via Net softirq, not the channel handling | |
1728 | */ | |
1729 | set_channel_read_mode(device->channel, HV_CALL_ISR); | |
1730 | ||
bffb1842 S |
1731 | /* If we're reopening the device we may have multiple queues, fill the |
1732 | * chn_table with the default channel to use it before subchannels are | |
1733 | * opened. | |
1734 | * Initialize the channel state before we open; | |
1735 | * we can be interrupted as soon as we open the channel. | |
1736 | */ | |
1737 | ||
1738 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) { | |
1739 | struct netvsc_channel *nvchan = &net_device->chan_table[i]; | |
1740 | ||
1741 | nvchan->channel = device->channel; | |
35fbbccf | 1742 | nvchan->net_device = net_device; |
4a0dee1f FF |
1743 | u64_stats_init(&nvchan->tx_stats.syncp); |
1744 | u64_stats_init(&nvchan->rx_stats.syncp); | |
351e1581 | 1745 | |
b02e5a0e | 1746 | ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0); |
351e1581 HZ |
1747 | |
1748 | if (ret) { | |
1749 | netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret); | |
1750 | goto cleanup2; | |
1751 | } | |
1752 | ||
1753 | ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq, | |
1754 | MEM_TYPE_PAGE_SHARED, NULL); | |
1755 | ||
1756 | if (ret) { | |
1757 | netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret); | |
1758 | goto cleanup2; | |
1759 | } | |
bffb1842 S |
1760 | } |
1761 | ||
2be0f264 | 1762 | /* Enable NAPI handler before init callbacks */ |
b48b89f9 | 1763 | netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll); |
2be0f264 | 1764 | |
b637e023 | 1765 | /* Open the channel */ |
bf5fd8ca APM |
1766 | device->channel->next_request_id_callback = vmbus_next_request_id; |
1767 | device->channel->request_addr_callback = vmbus_request_addr; | |
4d18fcc9 | 1768 | device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes); |
adae1e93 AB |
1769 | device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE; |
1770 | ||
a7f99d0f SH |
1771 | ret = vmbus_open(device->channel, netvsc_ring_bytes, |
1772 | netvsc_ring_bytes, NULL, 0, | |
1773 | netvsc_channel_cb, net_device->chan_table); | |
b637e023 HZ |
1774 | |
1775 | if (ret != 0) { | |
d9871158 | 1776 | netdev_err(ndev, "unable to open channel: %d\n", ret); |
b637e023 HZ |
1777 | goto cleanup; |
1778 | } | |
1779 | ||
1780 | /* Channel is opened */ | |
93ba2222 | 1781 | netdev_dbg(ndev, "hv_netvsc channel opened successfully\n"); |
b637e023 | 1782 | |
15a863bf | 1783 | napi_enable(&net_device->chan_table[0].napi); |
88098834 | 1784 | |
b637e023 | 1785 | /* Connect with the NetVsp */ |
8b532797 | 1786 | ret = netvsc_connect_vsp(device, net_device, device_info); |
b637e023 | 1787 | if (ret != 0) { |
d9871158 | 1788 | netdev_err(ndev, |
c909ebbd | 1789 | "unable to connect to NetVSP - %d\n", ret); |
b637e023 HZ |
1790 | goto close; |
1791 | } | |
1792 | ||
12f69661 SH |
1793 | /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is |
1794 | * populated. | |
1795 | */ | |
1796 | rcu_assign_pointer(net_device_ctx->nvdev, net_device); | |
1797 | ||
9749fed5 | 1798 | return net_device; |
b637e023 HZ |
1799 | |
1800 | close: | |
49393347 | 1801 | RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); |
1802 | napi_disable(&net_device->chan_table[0].napi); | |
15a863bf | 1803 | |
b637e023 HZ |
1804 | /* Now, we can close the channel safely */ |
1805 | vmbus_close(device->channel); | |
1806 | ||
1807 | cleanup: | |
fcfb4a00 | 1808 | netif_napi_del(&net_device->chan_table[0].napi); |
351e1581 HZ |
1809 | |
1810 | cleanup2: | |
545a8e79 | 1811 | free_netvsc_device(&net_device->rcu); |
b637e023 | 1812 | |
9749fed5 | 1813 | return ERR_PTR(ret); |
b637e023 | 1814 | } |