Commit | Line | Data |
---|---|---|
9952f691 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
fceaf24a | 2 | /* |
fceaf24a HJ |
3 | * Copyright (c) 2009, Microsoft Corporation. |
4 | * | |
fceaf24a | 5 | * Authors: |
d0e94d17 | 6 | * Haiyang Zhang <haiyangz@microsoft.com> |
fceaf24a | 7 | * Hank Janssen <hjanssen@microsoft.com> |
fceaf24a | 8 | */ |
eb335bc4 HJ |
9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
10 | ||
5654e932 | 11 | #include <linux/kernel.h> |
0c3b7b2f S |
12 | #include <linux/sched.h> |
13 | #include <linux/wait.h> | |
0ffa63b0 | 14 | #include <linux/mm.h> |
b4362c9c | 15 | #include <linux/delay.h> |
21a80820 | 16 | #include <linux/io.h> |
5a0e3ad6 | 17 | #include <linux/slab.h> |
d9871158 | 18 | #include <linux/netdevice.h> |
f157e78d | 19 | #include <linux/if_ether.h> |
d6472302 | 20 | #include <linux/vmalloc.h> |
9749fed5 | 21 | #include <linux/rtnetlink.h> |
43bf99ce | 22 | #include <linux/prefetch.h> |
1cb9d3b6 | 23 | #include <linux/filter.h> |
9749fed5 | 24 | |
c25aaf81 | 25 | #include <asm/sync_bitops.h> |
96854bbd | 26 | #include <asm/mshyperv.h> |
3f335ea2 | 27 | |
5ca7252a | 28 | #include "hyperv_net.h" |
ec966381 | 29 | #include "netvsc_trace.h" |
fceaf24a | 30 | |
84bf9cef KS |
31 | /* |
32 | * Switch the data path from the synthetic interface to the VF | |
33 | * interface. | |
34 | */ | |
d0922bf7 | 35 | int netvsc_switch_datapath(struct net_device *ndev, bool vf) |
84bf9cef | 36 | { |
3d541ac5 VK |
37 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
38 | struct hv_device *dev = net_device_ctx->device_ctx; | |
79e8cbe7 | 39 | struct netvsc_device *nv_dev = rtnl_dereference(net_device_ctx->nvdev); |
0a1275ca | 40 | struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt; |
d0922bf7 | 41 | int ret, retry = 0; |
84bf9cef | 42 | |
8b31f8c9 LL |
43 | /* Block sending traffic to VF if it's about to be gone */ |
44 | if (!vf) | |
45 | net_device_ctx->data_path_is_vf = vf; | |
46 | ||
84bf9cef KS |
47 | memset(init_pkt, 0, sizeof(struct nvsp_message)); |
48 | init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH; | |
49 | if (vf) | |
50 | init_pkt->msg.v4_msg.active_dp.active_datapath = | |
51 | NVSP_DATAPATH_VF; | |
52 | else | |
53 | init_pkt->msg.v4_msg.active_dp.active_datapath = | |
54 | NVSP_DATAPATH_SYNTHETIC; | |
55 | ||
d0922bf7 | 56 | again: |
ec966381 SH |
57 | trace_nvsp_send(ndev, init_pkt); |
58 | ||
d0922bf7 | 59 | ret = vmbus_sendpacket(dev->channel, init_pkt, |
84bf9cef | 60 | sizeof(struct nvsp_message), |
d0922bf7 | 61 | (unsigned long)init_pkt, VM_PKT_DATA_INBAND, |
8b31f8c9 | 62 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
d0922bf7 HZ |
63 | |
64 | /* If failed to switch to/from VF, let data_path_is_vf stay false, | |
65 | * so we use synthetic path to send data. | |
66 | */ | |
67 | if (ret) { | |
68 | if (ret != -EAGAIN) { | |
69 | netdev_err(ndev, | |
70 | "Unable to send sw datapath msg, err: %d\n", | |
71 | ret); | |
72 | return ret; | |
73 | } | |
74 | ||
75 | if (retry++ < RETRY_MAX) { | |
76 | usleep_range(RETRY_US_LO, RETRY_US_HI); | |
77 | goto again; | |
78 | } else { | |
79 | netdev_err( | |
80 | ndev, | |
81 | "Retry failed to send sw datapath msg, err: %d\n", | |
82 | ret); | |
83 | return ret; | |
84 | } | |
85 | } | |
86 | ||
8b31f8c9 LL |
87 | wait_for_completion(&nv_dev->channel_init_wait); |
88 | net_device_ctx->data_path_is_vf = vf; | |
d0922bf7 HZ |
89 | |
90 | return 0; | |
84bf9cef KS |
91 | } |
92 | ||
3ffe64f1 SH |
93 | /* Worker to setup sub channels on initial setup |
94 | * Initial hotplug event occurs in softirq context | |
95 | * and can't wait for channels. | |
96 | */ | |
97 | static void netvsc_subchan_work(struct work_struct *w) | |
98 | { | |
99 | struct netvsc_device *nvdev = | |
100 | container_of(w, struct netvsc_device, subchan_work); | |
101 | struct rndis_device *rdev; | |
102 | int i, ret; | |
103 | ||
104 | /* Avoid deadlock with device removal already under RTNL */ | |
105 | if (!rtnl_trylock()) { | |
106 | schedule_work(w); | |
107 | return; | |
108 | } | |
109 | ||
110 | rdev = nvdev->extension; | |
111 | if (rdev) { | |
17d91256 | 112 | ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL); |
3ffe64f1 SH |
113 | if (ret == 0) { |
114 | netif_device_attach(rdev->ndev); | |
115 | } else { | |
116 | /* fallback to only primary channel */ | |
117 | for (i = 1; i < nvdev->num_chn; i++) | |
118 | netif_napi_del(&nvdev->chan_table[i].napi); | |
119 | ||
120 | nvdev->max_chn = 1; | |
121 | nvdev->num_chn = 1; | |
122 | } | |
123 | } | |
124 | ||
125 | rtnl_unlock(); | |
126 | } | |
127 | ||
88098834 | 128 | static struct netvsc_device *alloc_net_device(void) |
fceaf24a | 129 | { |
85799a37 | 130 | struct netvsc_device *net_device; |
fceaf24a | 131 | |
85799a37 HZ |
132 | net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL); |
133 | if (!net_device) | |
fceaf24a HJ |
134 | return NULL; |
135 | ||
dc5cd894 | 136 | init_waitqueue_head(&net_device->wait_drain); |
c38b9c71 | 137 | net_device->destroy = false; |
f6f13c12 | 138 | net_device->tx_disable = true; |
0da6edbd | 139 | |
7c3877f2 HZ |
140 | net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; |
141 | net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; | |
8b532797 | 142 | |
fd612602 | 143 | init_completion(&net_device->channel_init_wait); |
732e4985 | 144 | init_waitqueue_head(&net_device->subchan_open); |
3ffe64f1 | 145 | INIT_WORK(&net_device->subchan_work, netvsc_subchan_work); |
7c3877f2 | 146 | |
85799a37 | 147 | return net_device; |
fceaf24a HJ |
148 | } |
149 | ||
545a8e79 | 150 | static void free_netvsc_device(struct rcu_head *head) |
f90251c8 | 151 | { |
545a8e79 | 152 | struct netvsc_device *nvdev |
153 | = container_of(head, struct netvsc_device, rcu); | |
c0b558e5 HZ |
154 | int i; |
155 | ||
02400fce | 156 | kfree(nvdev->extension); |
bbf9ac34 RE |
157 | |
158 | if (!nvdev->recv_buf_gpadl_handle.decrypted) | |
159 | vfree(nvdev->recv_buf); | |
160 | if (!nvdev->send_buf_gpadl_handle.decrypted) | |
161 | vfree(nvdev->send_buf); | |
e9268a94 | 162 | bitmap_free(nvdev->send_section_map); |
02400fce | 163 | |
351e1581 HZ |
164 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) { |
165 | xdp_rxq_info_unreg(&nvdev->chan_table[i].xdp_rxq); | |
0ba35fe9 | 166 | kfree(nvdev->chan_table[i].recv_buf); |
7426b1a5 | 167 | vfree(nvdev->chan_table[i].mrc.slots); |
351e1581 | 168 | } |
c0b558e5 | 169 | |
f90251c8 HZ |
170 | kfree(nvdev); |
171 | } | |
172 | ||
545a8e79 | 173 | static void free_netvsc_device_rcu(struct netvsc_device *nvdev) |
174 | { | |
175 | call_rcu(&nvdev->rcu, free_netvsc_device); | |
176 | } | |
fceaf24a | 177 | |
7992894c | 178 | static void netvsc_revoke_recv_buf(struct hv_device *device, |
3f076eff MG |
179 | struct netvsc_device *net_device, |
180 | struct net_device *ndev) | |
ec91cd09 | 181 | { |
7992894c | 182 | struct nvsp_message *revoke_packet; |
7a2a0a84 | 183 | int ret; |
ec91cd09 HZ |
184 | |
185 | /* | |
186 | * If we got a section count, it means we received a | |
187 | * SendReceiveBufferComplete msg (ie sent | |
188 | * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need | |
189 | * to send a revoke msg here | |
190 | */ | |
191 | if (net_device->recv_section_cnt) { | |
192 | /* Send the revoke receive buffer */ | |
193 | revoke_packet = &net_device->revoke_packet; | |
194 | memset(revoke_packet, 0, sizeof(struct nvsp_message)); | |
195 | ||
196 | revoke_packet->hdr.msg_type = | |
197 | NVSP_MSG1_TYPE_REVOKE_RECV_BUF; | |
198 | revoke_packet->msg.v1_msg. | |
199 | revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; | |
200 | ||
ec966381 SH |
201 | trace_nvsp_send(ndev, revoke_packet); |
202 | ||
3d541ac5 | 203 | ret = vmbus_sendpacket(device->channel, |
ec91cd09 HZ |
204 | revoke_packet, |
205 | sizeof(struct nvsp_message), | |
4d18fcc9 | 206 | VMBUS_RQST_ID_NO_RESPONSE, |
ec91cd09 | 207 | VM_PKT_DATA_INBAND, 0); |
73e64fa4 S |
208 | /* If the failure is because the channel is rescinded; |
209 | * ignore the failure since we cannot send on a rescinded | |
210 | * channel. This would allow us to properly cleanup | |
211 | * even when the channel is rescinded. | |
212 | */ | |
213 | if (device->channel->rescind) | |
214 | ret = 0; | |
ec91cd09 HZ |
215 | /* |
216 | * If we failed here, we might as well return and | |
217 | * have a leak rather than continue and a bugchk | |
218 | */ | |
219 | if (ret != 0) { | |
d9871158 | 220 | netdev_err(ndev, "unable to send " |
c909ebbd | 221 | "revoke receive buffer to netvsp\n"); |
7a2a0a84 | 222 | return; |
ec91cd09 | 223 | } |
8b532797 | 224 | net_device->recv_section_cnt = 0; |
ec91cd09 | 225 | } |
7992894c MG |
226 | } |
227 | ||
228 | static void netvsc_revoke_send_buf(struct hv_device *device, | |
3f076eff MG |
229 | struct netvsc_device *net_device, |
230 | struct net_device *ndev) | |
7992894c | 231 | { |
7992894c MG |
232 | struct nvsp_message *revoke_packet; |
233 | int ret; | |
ec91cd09 | 234 | |
c25aaf81 KS |
235 | /* Deal with the send buffer we may have setup. |
236 | * If we got a send section size, it means we received a | |
c51ed182 HZ |
237 | * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent |
238 | * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need | |
c25aaf81 KS |
239 | * to send a revoke msg here |
240 | */ | |
8b532797 | 241 | if (net_device->send_section_cnt) { |
c25aaf81 KS |
242 | /* Send the revoke receive buffer */ |
243 | revoke_packet = &net_device->revoke_packet; | |
244 | memset(revoke_packet, 0, sizeof(struct nvsp_message)); | |
245 | ||
246 | revoke_packet->hdr.msg_type = | |
247 | NVSP_MSG1_TYPE_REVOKE_SEND_BUF; | |
c51ed182 HZ |
248 | revoke_packet->msg.v1_msg.revoke_send_buf.id = |
249 | NETVSC_SEND_BUFFER_ID; | |
c25aaf81 | 250 | |
ec966381 SH |
251 | trace_nvsp_send(ndev, revoke_packet); |
252 | ||
3d541ac5 | 253 | ret = vmbus_sendpacket(device->channel, |
c25aaf81 KS |
254 | revoke_packet, |
255 | sizeof(struct nvsp_message), | |
4d18fcc9 | 256 | VMBUS_RQST_ID_NO_RESPONSE, |
c25aaf81 | 257 | VM_PKT_DATA_INBAND, 0); |
73e64fa4 S |
258 | |
259 | /* If the failure is because the channel is rescinded; | |
260 | * ignore the failure since we cannot send on a rescinded | |
261 | * channel. This would allow us to properly cleanup | |
262 | * even when the channel is rescinded. | |
263 | */ | |
264 | if (device->channel->rescind) | |
265 | ret = 0; | |
266 | ||
c25aaf81 KS |
267 | /* If we failed here, we might as well return and |
268 | * have a leak rather than continue and a bugchk | |
269 | */ | |
270 | if (ret != 0) { | |
271 | netdev_err(ndev, "unable to send " | |
272 | "revoke send buffer to netvsp\n"); | |
7a2a0a84 | 273 | return; |
c25aaf81 | 274 | } |
8b532797 | 275 | net_device->send_section_cnt = 0; |
c25aaf81 | 276 | } |
0cf73780 VK |
277 | } |
278 | ||
7992894c | 279 | static void netvsc_teardown_recv_gpadl(struct hv_device *device, |
3f076eff MG |
280 | struct netvsc_device *net_device, |
281 | struct net_device *ndev) | |
0cf73780 | 282 | { |
0cf73780 VK |
283 | int ret; |
284 | ||
d4dccf35 | 285 | if (net_device->recv_buf_gpadl_handle.gpadl_handle) { |
0cf73780 | 286 | ret = vmbus_teardown_gpadl(device->channel, |
d4dccf35 | 287 | &net_device->recv_buf_gpadl_handle); |
0cf73780 VK |
288 | |
289 | /* If we failed here, we might as well return and have a leak | |
290 | * rather than continue and a bugchk | |
291 | */ | |
292 | if (ret != 0) { | |
293 | netdev_err(ndev, | |
294 | "unable to teardown receive buffer's gpadl\n"); | |
295 | return; | |
296 | } | |
0cf73780 | 297 | } |
7992894c MG |
298 | } |
299 | ||
300 | static void netvsc_teardown_send_gpadl(struct hv_device *device, | |
3f076eff MG |
301 | struct netvsc_device *net_device, |
302 | struct net_device *ndev) | |
7992894c | 303 | { |
7992894c | 304 | int ret; |
0cf73780 | 305 | |
d4dccf35 | 306 | if (net_device->send_buf_gpadl_handle.gpadl_handle) { |
3d541ac5 | 307 | ret = vmbus_teardown_gpadl(device->channel, |
d4dccf35 | 308 | &net_device->send_buf_gpadl_handle); |
c25aaf81 KS |
309 | |
310 | /* If we failed here, we might as well return and have a leak | |
311 | * rather than continue and a bugchk | |
312 | */ | |
313 | if (ret != 0) { | |
314 | netdev_err(ndev, | |
315 | "unable to teardown send buffer's gpadl\n"); | |
7a2a0a84 | 316 | return; |
c25aaf81 | 317 | } |
c25aaf81 | 318 | } |
ec91cd09 HZ |
319 | } |
320 | ||
7426b1a5 | 321 | int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) |
322 | { | |
323 | struct netvsc_channel *nvchan = &net_device->chan_table[q_idx]; | |
324 | int node = cpu_to_node(nvchan->channel->target_cpu); | |
325 | size_t size; | |
326 | ||
327 | size = net_device->recv_completion_cnt * sizeof(struct recv_comp_data); | |
328 | nvchan->mrc.slots = vzalloc_node(size, node); | |
329 | if (!nvchan->mrc.slots) | |
330 | nvchan->mrc.slots = vzalloc(size); | |
331 | ||
332 | return nvchan->mrc.slots ? 0 : -ENOMEM; | |
333 | } | |
334 | ||
95790837 | 335 | static int netvsc_init_buf(struct hv_device *device, |
8b532797 | 336 | struct netvsc_device *net_device, |
337 | const struct netvsc_device_info *device_info) | |
fceaf24a | 338 | { |
7426b1a5 | 339 | struct nvsp_1_message_send_receive_buffer_complete *resp; |
95833370 | 340 | struct net_device *ndev = hv_get_drvdata(device); |
341 | struct nvsp_message *init_packet; | |
8b532797 | 342 | unsigned int buf_size; |
0102eeed | 343 | int i, ret = 0; |
0a726c2b | 344 | |
8b532797 | 345 | /* Get receive buffer area. */ |
0ab09bef | 346 | buf_size = device_info->recv_sections * device_info->recv_section_size; |
8b532797 | 347 | buf_size = roundup(buf_size, PAGE_SIZE); |
348 | ||
11b2b653 HZ |
349 | /* Legacy hosts only allow smaller receive buffer */ |
350 | if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_2) | |
351 | buf_size = min_t(unsigned int, buf_size, | |
352 | NETVSC_RECEIVE_BUFFER_SIZE_LEGACY); | |
353 | ||
8b532797 | 354 | net_device->recv_buf = vzalloc(buf_size); |
53d21fdb | 355 | if (!net_device->recv_buf) { |
8b532797 | 356 | netdev_err(ndev, |
357 | "unable to allocate receive buffer of size %u\n", | |
358 | buf_size); | |
927bc33c | 359 | ret = -ENOMEM; |
0c3b7b2f | 360 | goto cleanup; |
fceaf24a | 361 | } |
fceaf24a | 362 | |
c5d24bdd HZ |
363 | net_device->recv_buf_size = buf_size; |
364 | ||
454f18a9 BP |
365 | /* |
366 | * Establish the gpadl handle for this buffer on this | |
367 | * channel. Note: This call uses the vmbus connection rather | |
368 | * than the channel to establish the gpadl handle. | |
369 | */ | |
53d21fdb | 370 | ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf, |
8b532797 | 371 | buf_size, |
53d21fdb | 372 | &net_device->recv_buf_gpadl_handle); |
21a80820 | 373 | if (ret != 0) { |
d9871158 | 374 | netdev_err(ndev, |
c909ebbd | 375 | "unable to establish receive buffer's gpadl\n"); |
0c3b7b2f | 376 | goto cleanup; |
fceaf24a HJ |
377 | } |
378 | ||
454f18a9 | 379 | /* Notify the NetVsp of the gpadl handle */ |
53d21fdb | 380 | init_packet = &net_device->channel_init_pkt; |
85799a37 | 381 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
53d21fdb HZ |
382 | init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF; |
383 | init_packet->msg.v1_msg.send_recv_buf. | |
d4dccf35 | 384 | gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle; |
53d21fdb HZ |
385 | init_packet->msg.v1_msg. |
386 | send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID; | |
fceaf24a | 387 | |
ec966381 SH |
388 | trace_nvsp_send(ndev, init_packet); |
389 | ||
454f18a9 | 390 | /* Send the gpadl notification request */ |
85799a37 | 391 | ret = vmbus_sendpacket(device->channel, init_packet, |
5a4df290 | 392 | sizeof(struct nvsp_message), |
85799a37 | 393 | (unsigned long)init_packet, |
415f2287 | 394 | VM_PKT_DATA_INBAND, |
5a4df290 | 395 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
21a80820 | 396 | if (ret != 0) { |
d9871158 | 397 | netdev_err(ndev, |
c909ebbd | 398 | "unable to send receive buffer's gpadl to netvsp\n"); |
0c3b7b2f | 399 | goto cleanup; |
fceaf24a HJ |
400 | } |
401 | ||
5362855a | 402 | wait_for_completion(&net_device->channel_init_wait); |
fceaf24a | 403 | |
454f18a9 | 404 | /* Check the response */ |
7426b1a5 | 405 | resp = &init_packet->msg.v1_msg.send_recv_buf_complete; |
406 | if (resp->status != NVSP_STAT_SUCCESS) { | |
407 | netdev_err(ndev, | |
408 | "Unable to complete receive buffer initialization with NetVsp - status %d\n", | |
409 | resp->status); | |
927bc33c | 410 | ret = -EINVAL; |
0c3b7b2f | 411 | goto cleanup; |
fceaf24a HJ |
412 | } |
413 | ||
454f18a9 | 414 | /* Parse the response */ |
7426b1a5 | 415 | netdev_dbg(ndev, "Receive sections: %u sub_allocs: size %u count: %u\n", |
416 | resp->num_sections, resp->sections[0].sub_alloc_size, | |
417 | resp->sections[0].num_sub_allocs); | |
fceaf24a | 418 | |
8b532797 | 419 | /* There should only be one section for the entire receive buffer */ |
420 | if (resp->num_sections != 1 || resp->sections[0].offset != 0) { | |
927bc33c | 421 | ret = -EINVAL; |
0c3b7b2f | 422 | goto cleanup; |
fceaf24a HJ |
423 | } |
424 | ||
8b532797 | 425 | net_device->recv_section_size = resp->sections[0].sub_alloc_size; |
426 | net_device->recv_section_cnt = resp->sections[0].num_sub_allocs; | |
427 | ||
44144185 AB |
428 | /* Ensure buffer will not overflow */ |
429 | if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size * | |
430 | (u64)net_device->recv_section_cnt > (u64)buf_size) { | |
431 | netdev_err(ndev, "invalid recv_section_size %u\n", | |
432 | net_device->recv_section_size); | |
433 | ret = -EINVAL; | |
434 | goto cleanup; | |
435 | } | |
436 | ||
0102eeed APM |
437 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) { |
438 | struct netvsc_channel *nvchan = &net_device->chan_table[i]; | |
439 | ||
440 | nvchan->recv_buf = kzalloc(net_device->recv_section_size, GFP_KERNEL); | |
441 | if (nvchan->recv_buf == NULL) { | |
442 | ret = -ENOMEM; | |
443 | goto cleanup; | |
444 | } | |
445 | } | |
446 | ||
f87238d3 HZ |
447 | /* Setup receive completion ring. |
448 | * Add 1 to the recv_section_cnt because at least one entry in a | |
449 | * ring buffer has to be empty. | |
450 | */ | |
451 | net_device->recv_completion_cnt = net_device->recv_section_cnt + 1; | |
7426b1a5 | 452 | ret = netvsc_alloc_recv_comp_ring(net_device, 0); |
453 | if (ret) | |
454 | goto cleanup; | |
455 | ||
456 | /* Now setup the send buffer. */ | |
0ab09bef | 457 | buf_size = device_info->send_sections * device_info->send_section_size; |
8b532797 | 458 | buf_size = round_up(buf_size, PAGE_SIZE); |
459 | ||
460 | net_device->send_buf = vzalloc(buf_size); | |
c25aaf81 | 461 | if (!net_device->send_buf) { |
8b532797 | 462 | netdev_err(ndev, "unable to allocate send buffer of size %u\n", |
463 | buf_size); | |
c25aaf81 KS |
464 | ret = -ENOMEM; |
465 | goto cleanup; | |
466 | } | |
d4dccf35 | 467 | net_device->send_buf_size = buf_size; |
c25aaf81 KS |
468 | |
469 | /* Establish the gpadl handle for this buffer on this | |
470 | * channel. Note: This call uses the vmbus connection rather | |
471 | * than the channel to establish the gpadl handle. | |
472 | */ | |
473 | ret = vmbus_establish_gpadl(device->channel, net_device->send_buf, | |
8b532797 | 474 | buf_size, |
c25aaf81 KS |
475 | &net_device->send_buf_gpadl_handle); |
476 | if (ret != 0) { | |
477 | netdev_err(ndev, | |
478 | "unable to establish send buffer's gpadl\n"); | |
479 | goto cleanup; | |
480 | } | |
481 | ||
482 | /* Notify the NetVsp of the gpadl handle */ | |
483 | init_packet = &net_device->channel_init_pkt; | |
484 | memset(init_packet, 0, sizeof(struct nvsp_message)); | |
485 | init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF; | |
c51ed182 | 486 | init_packet->msg.v1_msg.send_send_buf.gpadl_handle = |
d4dccf35 | 487 | net_device->send_buf_gpadl_handle.gpadl_handle; |
c51ed182 | 488 | init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID; |
c25aaf81 | 489 | |
ec966381 SH |
490 | trace_nvsp_send(ndev, init_packet); |
491 | ||
c25aaf81 KS |
492 | /* Send the gpadl notification request */ |
493 | ret = vmbus_sendpacket(device->channel, init_packet, | |
494 | sizeof(struct nvsp_message), | |
495 | (unsigned long)init_packet, | |
496 | VM_PKT_DATA_INBAND, | |
497 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | |
498 | if (ret != 0) { | |
499 | netdev_err(ndev, | |
500 | "unable to send send buffer's gpadl to netvsp\n"); | |
501 | goto cleanup; | |
502 | } | |
503 | ||
5362855a | 504 | wait_for_completion(&net_device->channel_init_wait); |
c25aaf81 KS |
505 | |
506 | /* Check the response */ | |
507 | if (init_packet->msg.v1_msg. | |
508 | send_send_buf_complete.status != NVSP_STAT_SUCCESS) { | |
509 | netdev_err(ndev, "Unable to complete send buffer " | |
510 | "initialization with NetVsp - status %d\n", | |
511 | init_packet->msg.v1_msg. | |
c51ed182 | 512 | send_send_buf_complete.status); |
c25aaf81 KS |
513 | ret = -EINVAL; |
514 | goto cleanup; | |
515 | } | |
516 | ||
517 | /* Parse the response */ | |
518 | net_device->send_section_size = init_packet->msg. | |
519 | v1_msg.send_send_buf_complete.section_size; | |
44144185 AB |
520 | if (net_device->send_section_size < NETVSC_MTU_MIN) { |
521 | netdev_err(ndev, "invalid send_section_size %u\n", | |
522 | net_device->send_section_size); | |
523 | ret = -EINVAL; | |
524 | goto cleanup; | |
525 | } | |
c25aaf81 | 526 | |
8b532797 | 527 | /* Section count is simply the size divided by the section size. */ |
528 | net_device->send_section_cnt = buf_size / net_device->send_section_size; | |
c25aaf81 | 529 | |
93ba2222 VK |
530 | netdev_dbg(ndev, "Send section size: %d, Section count:%d\n", |
531 | net_device->send_section_size, net_device->send_section_cnt); | |
c25aaf81 KS |
532 | |
533 | /* Setup state for managing the send buffer. */ | |
e9268a94 CJ |
534 | net_device->send_section_map = bitmap_zalloc(net_device->send_section_cnt, |
535 | GFP_KERNEL); | |
536 | if (!net_device->send_section_map) { | |
dd1d3f8f | 537 | ret = -ENOMEM; |
c25aaf81 | 538 | goto cleanup; |
dd1d3f8f | 539 | } |
c25aaf81 | 540 | |
0c3b7b2f | 541 | goto exit; |
fceaf24a | 542 | |
0c3b7b2f | 543 | cleanup: |
3f076eff MG |
544 | netvsc_revoke_recv_buf(device, net_device, ndev); |
545 | netvsc_revoke_send_buf(device, net_device, ndev); | |
546 | netvsc_teardown_recv_gpadl(device, net_device, ndev); | |
547 | netvsc_teardown_send_gpadl(device, net_device, ndev); | |
fceaf24a | 548 | |
0c3b7b2f | 549 | exit: |
fceaf24a HJ |
550 | return ret; |
551 | } | |
552 | ||
f157e78d HZ |
553 | /* Negotiate NVSP protocol version */ |
554 | static int negotiate_nvsp_ver(struct hv_device *device, | |
555 | struct netvsc_device *net_device, | |
556 | struct nvsp_message *init_packet, | |
557 | u32 nvsp_ver) | |
fceaf24a | 558 | { |
0a1275ca | 559 | struct net_device *ndev = hv_get_drvdata(device); |
7390fe9c | 560 | int ret; |
fceaf24a | 561 | |
85799a37 | 562 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
53d21fdb | 563 | init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT; |
f157e78d HZ |
564 | init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver; |
565 | init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver; | |
ec966381 SH |
566 | trace_nvsp_send(ndev, init_packet); |
567 | ||
454f18a9 | 568 | /* Send the init request */ |
85799a37 | 569 | ret = vmbus_sendpacket(device->channel, init_packet, |
5a4df290 | 570 | sizeof(struct nvsp_message), |
85799a37 | 571 | (unsigned long)init_packet, |
415f2287 | 572 | VM_PKT_DATA_INBAND, |
5a4df290 | 573 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
21a80820 | 574 | |
b8a3d52b | 575 | if (ret != 0) |
f157e78d | 576 | return ret; |
fceaf24a | 577 | |
5362855a | 578 | wait_for_completion(&net_device->channel_init_wait); |
fceaf24a | 579 | |
53d21fdb | 580 | if (init_packet->msg.init_msg.init_complete.status != |
f157e78d HZ |
581 | NVSP_STAT_SUCCESS) |
582 | return -EINVAL; | |
fceaf24a | 583 | |
a1eabb01 | 584 | if (nvsp_ver == NVSP_PROTOCOL_VERSION_1) |
f157e78d HZ |
585 | return 0; |
586 | ||
71790a27 | 587 | /* NVSPv2 or later: Send NDIS config */ |
f157e78d HZ |
588 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
589 | init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG; | |
0a1275ca | 590 | init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN; |
1f5f3a75 | 591 | init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1; |
f157e78d | 592 | |
7f5d5af0 | 593 | if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5) { |
96854bbd APM |
594 | if (hv_is_isolation_supported()) |
595 | netdev_info(ndev, "SR-IOV not advertised by guests on the host supporting isolation\n"); | |
596 | else | |
597 | init_packet->msg.v2_msg.send_ndis_config.capability.sriov = 1; | |
71790a27 | 598 | |
7f5d5af0 HZ |
599 | /* Teaming bit is needed to receive link speed updates */ |
600 | init_packet->msg.v2_msg.send_ndis_config.capability.teaming = 1; | |
601 | } | |
602 | ||
c8e4eff4 HZ |
603 | if (nvsp_ver >= NVSP_PROTOCOL_VERSION_61) |
604 | init_packet->msg.v2_msg.send_ndis_config.capability.rsc = 1; | |
605 | ||
ec966381 SH |
606 | trace_nvsp_send(ndev, init_packet); |
607 | ||
f157e78d HZ |
608 | ret = vmbus_sendpacket(device->channel, init_packet, |
609 | sizeof(struct nvsp_message), | |
4d18fcc9 | 610 | VMBUS_RQST_ID_NO_RESPONSE, |
f157e78d HZ |
611 | VM_PKT_DATA_INBAND, 0); |
612 | ||
613 | return ret; | |
614 | } | |
615 | ||
95790837 | 616 | static int netvsc_connect_vsp(struct hv_device *device, |
8b532797 | 617 | struct netvsc_device *net_device, |
618 | const struct netvsc_device_info *device_info) | |
f157e78d | 619 | { |
ec966381 | 620 | struct net_device *ndev = hv_get_drvdata(device); |
1b17ca04 | 621 | static const u32 ver_list[] = { |
e5a78fad | 622 | NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2, |
0dcec221 HZ |
623 | NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5, |
624 | NVSP_PROTOCOL_VERSION_6, NVSP_PROTOCOL_VERSION_61 | |
95790837 | 625 | }; |
626 | struct nvsp_message *init_packet; | |
627 | int ndis_version, i, ret; | |
f157e78d HZ |
628 | |
629 | init_packet = &net_device->channel_init_pkt; | |
630 | ||
631 | /* Negotiate the latest NVSP protocol supported */ | |
e5a78fad | 632 | for (i = ARRAY_SIZE(ver_list) - 1; i >= 0; i--) |
a1eabb01 HZ |
633 | if (negotiate_nvsp_ver(device, net_device, init_packet, |
634 | ver_list[i]) == 0) { | |
635 | net_device->nvsp_version = ver_list[i]; | |
636 | break; | |
637 | } | |
638 | ||
639 | if (i < 0) { | |
0f48c72c | 640 | ret = -EPROTO; |
0c3b7b2f | 641 | goto cleanup; |
fceaf24a | 642 | } |
f157e78d | 643 | |
96854bbd APM |
644 | if (hv_is_isolation_supported() && net_device->nvsp_version < NVSP_PROTOCOL_VERSION_61) { |
645 | netdev_err(ndev, "Invalid NVSP version 0x%x (expected >= 0x%x) from the host supporting isolation\n", | |
646 | net_device->nvsp_version, NVSP_PROTOCOL_VERSION_61); | |
647 | ret = -EPROTO; | |
648 | goto cleanup; | |
649 | } | |
650 | ||
f157e78d HZ |
651 | pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version); |
652 | ||
454f18a9 | 653 | /* Send the ndis version */ |
85799a37 | 654 | memset(init_packet, 0, sizeof(struct nvsp_message)); |
fceaf24a | 655 | |
a1eabb01 | 656 | if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4) |
1f73db49 | 657 | ndis_version = 0x00060001; |
a1eabb01 HZ |
658 | else |
659 | ndis_version = 0x0006001e; | |
fceaf24a | 660 | |
53d21fdb HZ |
661 | init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER; |
662 | init_packet->msg.v1_msg. | |
663 | send_ndis_ver.ndis_major_ver = | |
85799a37 | 664 | (ndis_version & 0xFFFF0000) >> 16; |
53d21fdb HZ |
665 | init_packet->msg.v1_msg. |
666 | send_ndis_ver.ndis_minor_ver = | |
85799a37 | 667 | ndis_version & 0xFFFF; |
fceaf24a | 668 | |
ec966381 SH |
669 | trace_nvsp_send(ndev, init_packet); |
670 | ||
454f18a9 | 671 | /* Send the init request */ |
85799a37 | 672 | ret = vmbus_sendpacket(device->channel, init_packet, |
0c3b7b2f | 673 | sizeof(struct nvsp_message), |
4d18fcc9 | 674 | VMBUS_RQST_ID_NO_RESPONSE, |
0c3b7b2f | 675 | VM_PKT_DATA_INBAND, 0); |
0f48c72c | 676 | if (ret != 0) |
0c3b7b2f | 677 | goto cleanup; |
454f18a9 | 678 | |
99d3016d | 679 | |
8b532797 | 680 | ret = netvsc_init_buf(device, net_device, device_info); |
fceaf24a | 681 | |
0c3b7b2f | 682 | cleanup: |
fceaf24a HJ |
683 | return ret; |
684 | } | |
685 | ||
3e189519 | 686 | /* |
5a71ae30 | 687 | * netvsc_device_remove - Callback when the root bus device is removed |
21a80820 | 688 | */ |
e08f3ea5 | 689 | void netvsc_device_remove(struct hv_device *device) |
fceaf24a | 690 | { |
3d541ac5 VK |
691 | struct net_device *ndev = hv_get_drvdata(device); |
692 | struct net_device_context *net_device_ctx = netdev_priv(ndev); | |
79e8cbe7 | 693 | struct netvsc_device *net_device |
694 | = rtnl_dereference(net_device_ctx->nvdev); | |
15a863bf | 695 | int i; |
fceaf24a | 696 | |
a56d99d7 MG |
697 | /* |
698 | * Revoke receive buffer. If host is pre-Win2016 then tear down | |
699 | * receive buffer GPADL. Do the same for send buffer. | |
700 | */ | |
3f076eff | 701 | netvsc_revoke_recv_buf(device, net_device, ndev); |
a56d99d7 | 702 | if (vmbus_proto_version < VERSION_WIN10) |
3f076eff | 703 | netvsc_teardown_recv_gpadl(device, net_device, ndev); |
a56d99d7 | 704 | |
3f076eff | 705 | netvsc_revoke_send_buf(device, net_device, ndev); |
a56d99d7 | 706 | if (vmbus_proto_version < VERSION_WIN10) |
3f076eff | 707 | netvsc_teardown_send_gpadl(device, net_device, ndev); |
9d88f33a | 708 | |
545a8e79 | 709 | RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); |
3852409b | 710 | |
ac504767 APM |
711 | /* Disable NAPI and disassociate its context from the device. */ |
712 | for (i = 0; i < net_device->num_chn; i++) { | |
713 | /* See also vmbus_reset_channel_cb(). */ | |
e0526ec5 SC |
714 | /* only disable enabled NAPI channel */ |
715 | if (i < ndev->real_num_rx_queues) | |
716 | napi_disable(&net_device->chan_table[i].napi); | |
717 | ||
8348e046 | 718 | netif_napi_del(&net_device->chan_table[i].napi); |
ac504767 | 719 | } |
8348e046 | 720 | |
86c921af S |
721 | /* |
722 | * At this point, no one should be accessing net_device | |
723 | * except in here | |
724 | */ | |
93ba2222 | 725 | netdev_dbg(ndev, "net device safe to remove\n"); |
fceaf24a | 726 | |
454f18a9 | 727 | /* Now, we can close the channel safely */ |
85799a37 | 728 | vmbus_close(device->channel); |
fceaf24a | 729 | |
a56d99d7 MG |
730 | /* |
731 | * If host is Win2016 or higher then we do the GPADL tear down | |
732 | * here after VMBus is closed. | |
733 | */ | |
7992894c | 734 | if (vmbus_proto_version >= VERSION_WIN10) { |
3f076eff MG |
735 | netvsc_teardown_recv_gpadl(device, net_device, ndev); |
736 | netvsc_teardown_send_gpadl(device, net_device, ndev); | |
7992894c | 737 | } |
15a863bf | 738 | |
454f18a9 | 739 | /* Release all resources */ |
545a8e79 | 740 | free_netvsc_device_rcu(net_device); |
fceaf24a HJ |
741 | } |
742 | ||
33be96e4 HZ |
743 | #define RING_AVAIL_PERCENT_HIWATER 20 |
744 | #define RING_AVAIL_PERCENT_LOWATER 10 | |
745 | ||
c25aaf81 KS |
746 | static inline void netvsc_free_send_slot(struct netvsc_device *net_device, |
747 | u32 index) | |
748 | { | |
749 | sync_change_bit(index, net_device->send_section_map); | |
750 | } | |
751 | ||
c347b927 SH |
752 | static void netvsc_send_tx_complete(struct net_device *ndev, |
753 | struct netvsc_device *net_device, | |
754 | struct vmbus_channel *channel, | |
f9645430 | 755 | const struct vmpacket_descriptor *desc, |
756 | int budget) | |
bc304dd3 | 757 | { |
09af87d1 | 758 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
4d18fcc9 | 759 | struct sk_buff *skb; |
bc304dd3 SH |
760 | u16 q_idx = 0; |
761 | int queue_sends; | |
4d18fcc9 AB |
762 | u64 cmd_rqst; |
763 | ||
26894cd9 | 764 | cmd_rqst = channel->request_addr_callback(channel, desc->trans_id); |
4d18fcc9 | 765 | if (cmd_rqst == VMBUS_RQST_ERROR) { |
26894cd9 | 766 | netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); |
4d18fcc9 AB |
767 | return; |
768 | } | |
769 | ||
770 | skb = (struct sk_buff *)(unsigned long)cmd_rqst; | |
bc304dd3 SH |
771 | |
772 | /* Notify the layer above us */ | |
773 | if (likely(skb)) { | |
846da38d | 774 | struct hv_netvsc_packet *packet |
bc304dd3 | 775 | = (struct hv_netvsc_packet *)skb->cb; |
793e3955 | 776 | u32 send_index = packet->send_buf_index; |
1cb9d3b6 | 777 | struct netvsc_stats_tx *tx_stats; |
bc304dd3 SH |
778 | |
779 | if (send_index != NETVSC_INVALID_INDEX) | |
780 | netvsc_free_send_slot(net_device, send_index); | |
793e3955 | 781 | q_idx = packet->q_idx; |
bc304dd3 | 782 | |
6c80f3fc | 783 | tx_stats = &net_device->chan_table[q_idx].tx_stats; |
793e3955 | 784 | |
785 | u64_stats_update_begin(&tx_stats->syncp); | |
786 | tx_stats->packets += packet->total_packets; | |
787 | tx_stats->bytes += packet->total_bytes; | |
788 | u64_stats_update_end(&tx_stats->syncp); | |
789 | ||
846da38d | 790 | netvsc_dma_unmap(ndev_ctx->device_ctx, packet); |
f9645430 | 791 | napi_consume_skb(skb, budget); |
bc304dd3 SH |
792 | } |
793 | ||
b8b835a8 | 794 | queue_sends = |
795 | atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); | |
bc304dd3 | 796 | |
7b2ee50c SH |
797 | if (unlikely(net_device->destroy)) { |
798 | if (queue_sends == 0) | |
799 | wake_up(&net_device->wait_drain); | |
800 | } else { | |
801 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); | |
802 | ||
1b704c4a | 803 | if (netif_tx_queue_stopped(txq) && !net_device->tx_disable && |
6b1f8376 LL |
804 | (hv_get_avail_to_write_percent(&channel->outbound) > |
805 | RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { | |
7b2ee50c SH |
806 | netif_tx_wake_queue(txq); |
807 | ndev_ctx->eth_stats.wake_queue++; | |
808 | } | |
09af87d1 | 809 | } |
bc304dd3 SH |
810 | } |
811 | ||
c347b927 SH |
812 | static void netvsc_send_completion(struct net_device *ndev, |
813 | struct netvsc_device *net_device, | |
25b85ee8 | 814 | struct vmbus_channel *incoming_channel, |
f9645430 | 815 | const struct vmpacket_descriptor *desc, |
816 | int budget) | |
fceaf24a | 817 | { |
8b31f8c9 | 818 | const struct nvsp_message *nvsp_packet; |
44144185 | 819 | u32 msglen = hv_pkt_datalen(desc); |
8b31f8c9 LL |
820 | struct nvsp_message *pkt_rqst; |
821 | u64 cmd_rqst; | |
dca5161f | 822 | u32 status; |
8b31f8c9 LL |
823 | |
824 | /* First check if this is a VMBUS completion without data payload */ | |
825 | if (!msglen) { | |
bf5fd8ca | 826 | cmd_rqst = incoming_channel->request_addr_callback(incoming_channel, |
26894cd9 | 827 | desc->trans_id); |
8b31f8c9 | 828 | if (cmd_rqst == VMBUS_RQST_ERROR) { |
26894cd9 | 829 | netdev_err(ndev, "Invalid transaction ID %llx\n", desc->trans_id); |
8b31f8c9 LL |
830 | return; |
831 | } | |
832 | ||
833 | pkt_rqst = (struct nvsp_message *)(uintptr_t)cmd_rqst; | |
834 | switch (pkt_rqst->hdr.msg_type) { | |
835 | case NVSP_MSG4_TYPE_SWITCH_DATA_PATH: | |
836 | complete(&net_device->channel_init_wait); | |
837 | break; | |
838 | ||
839 | default: | |
840 | netdev_err(ndev, "Unexpected VMBUS completion!!\n"); | |
841 | } | |
842 | return; | |
843 | } | |
44144185 AB |
844 | |
845 | /* Ensure packet is big enough to read header fields */ | |
846 | if (msglen < sizeof(struct nvsp_message_header)) { | |
847 | netdev_err(ndev, "nvsp_message length too small: %u\n", msglen); | |
848 | return; | |
849 | } | |
fceaf24a | 850 | |
8b31f8c9 | 851 | nvsp_packet = hv_pkt_data(desc); |
bc304dd3 SH |
852 | switch (nvsp_packet->hdr.msg_type) { |
853 | case NVSP_MSG_TYPE_INIT_COMPLETE: | |
44144185 AB |
854 | if (msglen < sizeof(struct nvsp_message_header) + |
855 | sizeof(struct nvsp_message_init_complete)) { | |
856 | netdev_err(ndev, "nvsp_msg length too small: %u\n", | |
857 | msglen); | |
858 | return; | |
859 | } | |
9bae5b05 | 860 | break; |
44144185 | 861 | |
bc304dd3 | 862 | case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: |
44144185 AB |
863 | if (msglen < sizeof(struct nvsp_message_header) + |
864 | sizeof(struct nvsp_1_message_send_receive_buffer_complete)) { | |
865 | netdev_err(ndev, "nvsp_msg1 length too small: %u\n", | |
866 | msglen); | |
867 | return; | |
868 | } | |
9bae5b05 | 869 | break; |
44144185 | 870 | |
bc304dd3 | 871 | case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE: |
44144185 AB |
872 | if (msglen < sizeof(struct nvsp_message_header) + |
873 | sizeof(struct nvsp_1_message_send_send_buffer_complete)) { | |
874 | netdev_err(ndev, "nvsp_msg1 length too small: %u\n", | |
875 | msglen); | |
876 | return; | |
877 | } | |
9bae5b05 | 878 | break; |
44144185 | 879 | |
bc304dd3 | 880 | case NVSP_MSG5_TYPE_SUBCHANNEL: |
44144185 AB |
881 | if (msglen < sizeof(struct nvsp_message_header) + |
882 | sizeof(struct nvsp_5_subchannel_complete)) { | |
883 | netdev_err(ndev, "nvsp_msg5 length too small: %u\n", | |
884 | msglen); | |
885 | return; | |
886 | } | |
bc304dd3 SH |
887 | break; |
888 | ||
889 | case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE: | |
dca5161f MK |
890 | if (msglen < sizeof(struct nvsp_message_header) + |
891 | sizeof(struct nvsp_1_message_send_rndis_packet_complete)) { | |
892 | if (net_ratelimit()) | |
893 | netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n", | |
894 | msglen); | |
895 | return; | |
896 | } | |
897 | ||
898 | /* If status indicates an error, output a message so we know | |
899 | * there's a problem. But process the completion anyway so the | |
900 | * resources are released. | |
901 | */ | |
902 | status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status; | |
903 | if (status != NVSP_STAT_SUCCESS && net_ratelimit()) | |
904 | netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n", | |
905 | status); | |
906 | ||
c347b927 SH |
907 | netvsc_send_tx_complete(ndev, net_device, incoming_channel, |
908 | desc, budget); | |
9bae5b05 | 909 | return; |
fceaf24a | 910 | |
bc304dd3 SH |
911 | default: |
912 | netdev_err(ndev, | |
913 | "Unknown send completion type %d received!!\n", | |
914 | nvsp_packet->hdr.msg_type); | |
9bae5b05 | 915 | return; |
fceaf24a | 916 | } |
9bae5b05 SS |
917 | |
918 | /* Copy the response back */ | |
919 | memcpy(&net_device->channel_init_pkt, nvsp_packet, | |
920 | sizeof(struct nvsp_message)); | |
921 | complete(&net_device->channel_init_wait); | |
fceaf24a HJ |
922 | } |
923 | ||
c25aaf81 KS |
924 | static u32 netvsc_get_next_send_section(struct netvsc_device *net_device) |
925 | { | |
b58a1858 | 926 | unsigned long *map_addr = net_device->send_section_map; |
927 | unsigned int i; | |
928 | ||
fdfb70d2 | 929 | for_each_clear_bit(i, map_addr, net_device->send_section_cnt) { |
b58a1858 | 930 | if (sync_test_and_set_bit(i, map_addr) == 0) |
931 | return i; | |
c25aaf81 | 932 | } |
b58a1858 | 933 | |
934 | return NETVSC_INVALID_INDEX; | |
c25aaf81 KS |
935 | } |
936 | ||
26a11262 SH |
937 | static void netvsc_copy_to_send_buf(struct netvsc_device *net_device, |
938 | unsigned int section_index, | |
939 | u32 pend_size, | |
940 | struct hv_netvsc_packet *packet, | |
941 | struct rndis_message *rndis_msg, | |
942 | struct hv_page_buffer *pb, | |
cfd8afd9 | 943 | bool xmit_more) |
c25aaf81 KS |
944 | { |
945 | char *start = net_device->send_buf; | |
7c3877f2 HZ |
946 | char *dest = start + (section_index * net_device->send_section_size) |
947 | + pend_size; | |
c25aaf81 | 948 | int i; |
7c3877f2 | 949 | u32 padding = 0; |
aa0a34be HZ |
950 | u32 page_count = packet->cp_partial ? packet->rmsg_pgcnt : |
951 | packet->page_buf_cnt; | |
b85e06f7 | 952 | u32 remain; |
7c3877f2 HZ |
953 | |
954 | /* Add padding */ | |
b85e06f7 | 955 | remain = packet->total_data_buflen & (net_device->pkt_align - 1); |
cfd8afd9 | 956 | if (xmit_more && remain) { |
7c3877f2 | 957 | padding = net_device->pkt_align - remain; |
24476760 | 958 | rndis_msg->msg_len += padding; |
7c3877f2 HZ |
959 | packet->total_data_buflen += padding; |
960 | } | |
c25aaf81 | 961 | |
aa0a34be | 962 | for (i = 0; i < page_count; i++) { |
11d8620e | 963 | char *src = phys_to_virt(pb[i].pfn << HV_HYP_PAGE_SHIFT); |
02b6de01 | 964 | u32 offset = pb[i].offset; |
965 | u32 len = pb[i].len; | |
c25aaf81 KS |
966 | |
967 | memcpy(dest, (src + offset), len); | |
c25aaf81 KS |
968 | dest += len; |
969 | } | |
7c3877f2 | 970 | |
26a11262 | 971 | if (padding) |
7c3877f2 | 972 | memset(dest, 0, padding); |
c25aaf81 KS |
973 | } |
974 | ||
846da38d TL |
975 | void netvsc_dma_unmap(struct hv_device *hv_dev, |
976 | struct hv_netvsc_packet *packet) | |
977 | { | |
846da38d TL |
978 | int i; |
979 | ||
980 | if (!hv_is_isolation_supported()) | |
981 | return; | |
982 | ||
983 | if (!packet->dma_range) | |
984 | return; | |
985 | ||
99f1c460 | 986 | for (i = 0; i < packet->page_buf_cnt; i++) |
846da38d TL |
987 | dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma, |
988 | packet->dma_range[i].mapping_size, | |
989 | DMA_TO_DEVICE); | |
990 | ||
991 | kfree(packet->dma_range); | |
992 | } | |
993 | ||
994 | /* netvsc_dma_map - Map swiotlb bounce buffer with data page of | |
995 | * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation | |
996 | * VM. | |
997 | * | |
998 | * In isolation VM, netvsc send buffer has been marked visible to | |
999 | * host and so the data copied to send buffer doesn't need to use | |
1000 | * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer() | |
1001 | * may not be copied to send buffer and so these pages need to be | |
1002 | * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do | |
1003 | * that. The pfns in the struct hv_page_buffer need to be converted | |
1004 | * to bounce buffer's pfn. The loop here is necessary because the | |
1005 | * entries in the page buffer array are not necessarily full | |
1006 | * pages of data. Each entry in the array has a separate offset and | |
1007 | * len that may be non-zero, even for entries in the middle of the | |
1008 | * array. And the entries are not physically contiguous. So each | |
1009 | * entry must be individually mapped rather than as a contiguous unit. | |
1010 | * So not use dma_map_sg() here. | |
1011 | */ | |
1012 | static int netvsc_dma_map(struct hv_device *hv_dev, | |
1013 | struct hv_netvsc_packet *packet, | |
1014 | struct hv_page_buffer *pb) | |
1015 | { | |
99f1c460 | 1016 | u32 page_count = packet->page_buf_cnt; |
846da38d TL |
1017 | dma_addr_t dma; |
1018 | int i; | |
1019 | ||
1020 | if (!hv_is_isolation_supported()) | |
1021 | return 0; | |
1022 | ||
1023 | packet->dma_range = kcalloc(page_count, | |
1024 | sizeof(*packet->dma_range), | |
c6aa9d3b | 1025 | GFP_ATOMIC); |
846da38d TL |
1026 | if (!packet->dma_range) |
1027 | return -ENOMEM; | |
1028 | ||
1029 | for (i = 0; i < page_count; i++) { | |
1030 | char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT) | |
1031 | + pb[i].offset); | |
1032 | u32 len = pb[i].len; | |
1033 | ||
1034 | dma = dma_map_single(&hv_dev->device, src, len, | |
1035 | DMA_TO_DEVICE); | |
1036 | if (dma_mapping_error(&hv_dev->device, dma)) { | |
1037 | kfree(packet->dma_range); | |
1038 | return -ENOMEM; | |
1039 | } | |
1040 | ||
1041 | /* pb[].offset and pb[].len are not changed during dma mapping | |
1042 | * and so not reassign. | |
1043 | */ | |
1044 | packet->dma_range[i].dma = dma; | |
1045 | packet->dma_range[i].mapping_size = len; | |
1046 | pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT; | |
1047 | } | |
1048 | ||
1049 | return 0; | |
1050 | } | |
1051 | ||
3a8963ac | 1052 | static inline int netvsc_send_pkt( |
0a1275ca | 1053 | struct hv_device *device, |
7c3877f2 | 1054 | struct hv_netvsc_packet *packet, |
a9f2e2d6 | 1055 | struct netvsc_device *net_device, |
02b6de01 | 1056 | struct hv_page_buffer *pb, |
3a3d9a0a | 1057 | struct sk_buff *skb) |
fceaf24a | 1058 | { |
7c3877f2 | 1059 | struct nvsp_message nvmsg; |
ec966381 | 1060 | struct nvsp_1_message_send_rndis_packet *rpkt = |
956a25c9 JP |
1061 | &nvmsg.msg.v1_msg.send_rndis_pkt; |
1062 | struct netvsc_channel * const nvchan = | |
1063 | &net_device->chan_table[packet->q_idx]; | |
b8b835a8 | 1064 | struct vmbus_channel *out_channel = nvchan->channel; |
0a1275ca | 1065 | struct net_device *ndev = hv_get_drvdata(device); |
09af87d1 | 1066 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
b8b835a8 | 1067 | struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx); |
7c3877f2 HZ |
1068 | u64 req_id; |
1069 | int ret; | |
6b1f8376 | 1070 | u32 ring_avail = hv_get_avail_to_write_percent(&out_channel->outbound); |
c25aaf81 | 1071 | |
505e3f00 | 1072 | memset(&nvmsg, 0, sizeof(struct nvsp_message)); |
7c3877f2 | 1073 | nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT; |
956a25c9 JP |
1074 | if (skb) |
1075 | rpkt->channel_type = 0; /* 0 is RMC_DATA */ | |
1076 | else | |
1077 | rpkt->channel_type = 1; /* 1 is RMC_CONTROL */ | |
fceaf24a | 1078 | |
956a25c9 | 1079 | rpkt->send_buf_section_index = packet->send_buf_index; |
7c3877f2 | 1080 | if (packet->send_buf_index == NETVSC_INVALID_INDEX) |
956a25c9 | 1081 | rpkt->send_buf_section_size = 0; |
7c3877f2 | 1082 | else |
956a25c9 | 1083 | rpkt->send_buf_section_size = packet->total_data_buflen; |
21a80820 | 1084 | |
3a3d9a0a | 1085 | req_id = (ulong)skb; |
f1ea3cd7 | 1086 | |
c3582a2c HZ |
1087 | if (out_channel->rescind) |
1088 | return -ENODEV; | |
1089 | ||
ec966381 SH |
1090 | trace_nvsp_send_pkt(ndev, out_channel, rpkt); |
1091 | ||
846da38d | 1092 | packet->dma_range = NULL; |
72a2f5bd | 1093 | if (packet->page_buf_cnt) { |
02b6de01 | 1094 | if (packet->cp_partial) |
1095 | pb += packet->rmsg_pgcnt; | |
1096 | ||
846da38d TL |
1097 | ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb); |
1098 | if (ret) { | |
1099 | ret = -EAGAIN; | |
1100 | goto exit; | |
1101 | } | |
1102 | ||
5a668d8c | 1103 | ret = vmbus_sendpacket_pagebuffer(out_channel, |
1104 | pb, packet->page_buf_cnt, | |
1105 | &nvmsg, sizeof(nvmsg), | |
1106 | req_id); | |
846da38d TL |
1107 | |
1108 | if (ret) | |
1109 | netvsc_dma_unmap(ndev_ctx->device_ctx, packet); | |
21a80820 | 1110 | } else { |
5dd0fb9b | 1111 | ret = vmbus_sendpacket(out_channel, |
1112 | &nvmsg, sizeof(nvmsg), | |
1113 | req_id, VM_PKT_DATA_INBAND, | |
1114 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | |
fceaf24a HJ |
1115 | } |
1116 | ||
846da38d | 1117 | exit: |
1d06825b | 1118 | if (ret == 0) { |
b8b835a8 | 1119 | atomic_inc_return(&nvchan->queue_sends); |
5b54dac8 | 1120 | |
09af87d1 | 1121 | if (ring_avail < RING_AVAIL_PERCENT_LOWATER) { |
b8b835a8 | 1122 | netif_tx_stop_queue(txq); |
09af87d1 SX |
1123 | ndev_ctx->eth_stats.stop_queue++; |
1124 | } | |
1d06825b | 1125 | } else if (ret == -EAGAIN) { |
b8b835a8 | 1126 | netif_tx_stop_queue(txq); |
09af87d1 | 1127 | ndev_ctx->eth_stats.stop_queue++; |
1d06825b | 1128 | } else { |
4a2176c6 | 1129 | netdev_err(ndev, |
1130 | "Unable to send packet pages %u len %u, ret %d\n", | |
1131 | packet->page_buf_cnt, packet->total_data_buflen, | |
1132 | ret); | |
1d06825b | 1133 | } |
fceaf24a | 1134 | |
93aa4792 HZ |
1135 | if (netif_tx_queue_stopped(txq) && |
1136 | atomic_read(&nvchan->queue_sends) < 1 && | |
1137 | !net_device->tx_disable) { | |
1138 | netif_tx_wake_queue(txq); | |
1139 | ndev_ctx->eth_stats.wake_queue++; | |
1140 | if (ret == -EAGAIN) | |
1141 | ret = -ENOSPC; | |
1142 | } | |
1143 | ||
7c3877f2 HZ |
1144 | return ret; |
1145 | } | |
1146 | ||
c85e4924 HZ |
1147 | /* Move packet out of multi send data (msd), and clear msd */ |
1148 | static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, | |
1149 | struct sk_buff **msd_skb, | |
1150 | struct multi_send_data *msdp) | |
1151 | { | |
1152 | *msd_skb = msdp->skb; | |
1153 | *msd_send = msdp->pkt; | |
1154 | msdp->skb = NULL; | |
1155 | msdp->pkt = NULL; | |
1156 | msdp->count = 0; | |
1157 | } | |
1158 | ||
2a926f79 | 1159 | /* RCU already held by caller */ |
bd49fea7 SR |
1160 | /* Batching/bouncing logic is designed to attempt to optimize |
1161 | * performance. | |
1162 | * | |
1163 | * For small, non-LSO packets we copy the packet to a send buffer | |
1164 | * which is pre-registered with the Hyper-V side. This enables the | |
1165 | * hypervisor to avoid remapping the aperture to access the packet | |
1166 | * descriptor and data. | |
1167 | * | |
1168 | * If we already started using a buffer and the netdev is transmitting | |
1169 | * a burst of packets, keep on copying into the buffer until it is | |
1170 | * full or we are done collecting a burst. If there is an existing | |
1171 | * buffer with space for the RNDIS descriptor but not the packet, copy | |
1172 | * the RNDIS descriptor to the buffer, keeping the packet in place. | |
1173 | * | |
1174 | * If we do batching and send more than one packet using a single | |
1175 | * NetVSC message, free the SKBs of the packets copied, except for the | |
1176 | * last packet. This is done to streamline the handling of the case | |
1177 | * where the last packet only had the RNDIS descriptor copied to the | |
1178 | * send buffer, with the data pointers included in the NetVSC message. | |
1179 | */ | |
cfd8afd9 | 1180 | int netvsc_send(struct net_device *ndev, |
24476760 | 1181 | struct hv_netvsc_packet *packet, |
a9f2e2d6 | 1182 | struct rndis_message *rndis_msg, |
02b6de01 | 1183 | struct hv_page_buffer *pb, |
351e1581 HZ |
1184 | struct sk_buff *skb, |
1185 | bool xdp_tx) | |
7c3877f2 | 1186 | { |
cfd8afd9 | 1187 | struct net_device_context *ndev_ctx = netdev_priv(ndev); |
3962981f | 1188 | struct netvsc_device *net_device |
867047c4 | 1189 | = rcu_dereference_bh(ndev_ctx->nvdev); |
2a926f79 | 1190 | struct hv_device *device = ndev_ctx->device_ctx; |
6c4c137e | 1191 | int ret = 0; |
b8b835a8 | 1192 | struct netvsc_channel *nvchan; |
7c3877f2 HZ |
1193 | u32 pktlen = packet->total_data_buflen, msd_len = 0; |
1194 | unsigned int section_index = NETVSC_INVALID_INDEX; | |
7c3877f2 HZ |
1195 | struct multi_send_data *msdp; |
1196 | struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; | |
c85e4924 | 1197 | struct sk_buff *msd_skb = NULL; |
cfd8afd9 | 1198 | bool try_batch, xmit_more; |
7c3877f2 | 1199 | |
592b4fe8 | 1200 | /* If device is rescinded, return error and packet will get dropped. */ |
2a926f79 | 1201 | if (unlikely(!net_device || net_device->destroy)) |
7c3877f2 HZ |
1202 | return -ENODEV; |
1203 | ||
b8b835a8 | 1204 | nvchan = &net_device->chan_table[packet->q_idx]; |
7c3877f2 | 1205 | packet->send_buf_index = NETVSC_INVALID_INDEX; |
aa0a34be | 1206 | packet->cp_partial = false; |
7c3877f2 | 1207 | |
351e1581 HZ |
1208 | /* Send a control message or XDP packet directly without accessing |
1209 | * msd (Multi-Send Data) field which may be changed during data packet | |
1210 | * processing. | |
cf8190e4 | 1211 | */ |
351e1581 | 1212 | if (!skb || xdp_tx) |
12f69661 | 1213 | return netvsc_send_pkt(device, packet, net_device, pb, skb); |
cf8190e4 | 1214 | |
7c3877f2 | 1215 | /* batch packets in send buffer if possible */ |
b8b835a8 | 1216 | msdp = &nvchan->msd; |
7c3877f2 HZ |
1217 | if (msdp->pkt) |
1218 | msd_len = msdp->pkt->total_data_buflen; | |
1219 | ||
ebc1dcf6 | 1220 | try_batch = msd_len > 0 && msdp->count < net_device->max_pkt; |
aa0a34be | 1221 | if (try_batch && msd_len + pktlen + net_device->pkt_align < |
7c3877f2 HZ |
1222 | net_device->send_section_size) { |
1223 | section_index = msdp->pkt->send_buf_index; | |
1224 | ||
aa0a34be HZ |
1225 | } else if (try_batch && msd_len + packet->rmsg_size < |
1226 | net_device->send_section_size) { | |
1227 | section_index = msdp->pkt->send_buf_index; | |
1228 | packet->cp_partial = true; | |
1229 | ||
ebc1dcf6 | 1230 | } else if (pktlen + net_device->pkt_align < |
7c3877f2 HZ |
1231 | net_device->send_section_size) { |
1232 | section_index = netvsc_get_next_send_section(net_device); | |
cad5c197 | 1233 | if (unlikely(section_index == NETVSC_INVALID_INDEX)) { |
1234 | ++ndev_ctx->eth_stats.tx_send_full; | |
1235 | } else { | |
c85e4924 HZ |
1236 | move_pkt_msd(&msd_send, &msd_skb, msdp); |
1237 | msd_len = 0; | |
7c3877f2 HZ |
1238 | } |
1239 | } | |
1240 | ||
cfd8afd9 SH |
1241 | /* Keep aggregating only if stack says more data is coming |
1242 | * and not doing mixed modes send and not flow blocked | |
1243 | */ | |
6b16f9ee | 1244 | xmit_more = netdev_xmit_more() && |
cfd8afd9 SH |
1245 | !packet->cp_partial && |
1246 | !netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx)); | |
1247 | ||
7c3877f2 HZ |
1248 | if (section_index != NETVSC_INVALID_INDEX) { |
1249 | netvsc_copy_to_send_buf(net_device, | |
1250 | section_index, msd_len, | |
cfd8afd9 | 1251 | packet, rndis_msg, pb, xmit_more); |
b08cc791 | 1252 | |
7c3877f2 | 1253 | packet->send_buf_index = section_index; |
aa0a34be HZ |
1254 | |
1255 | if (packet->cp_partial) { | |
1256 | packet->page_buf_cnt -= packet->rmsg_pgcnt; | |
1257 | packet->total_data_buflen = msd_len + packet->rmsg_size; | |
1258 | } else { | |
1259 | packet->page_buf_cnt = 0; | |
1260 | packet->total_data_buflen += msd_len; | |
aa0a34be | 1261 | } |
7c3877f2 | 1262 | |
793e3955 | 1263 | if (msdp->pkt) { |
1264 | packet->total_packets += msdp->pkt->total_packets; | |
1265 | packet->total_bytes += msdp->pkt->total_bytes; | |
1266 | } | |
1267 | ||
c85e4924 | 1268 | if (msdp->skb) |
17db4bce | 1269 | dev_consume_skb_any(msdp->skb); |
ee90b812 | 1270 | |
cfd8afd9 | 1271 | if (xmit_more) { |
c85e4924 | 1272 | msdp->skb = skb; |
7c3877f2 HZ |
1273 | msdp->pkt = packet; |
1274 | msdp->count++; | |
1275 | } else { | |
1276 | cur_send = packet; | |
c85e4924 | 1277 | msdp->skb = NULL; |
7c3877f2 HZ |
1278 | msdp->pkt = NULL; |
1279 | msdp->count = 0; | |
1280 | } | |
1281 | } else { | |
c85e4924 | 1282 | move_pkt_msd(&msd_send, &msd_skb, msdp); |
7c3877f2 HZ |
1283 | cur_send = packet; |
1284 | } | |
1285 | ||
7c3877f2 | 1286 | if (msd_send) { |
6c4c137e SH |
1287 | int m_ret = netvsc_send_pkt(device, msd_send, net_device, |
1288 | NULL, msd_skb); | |
7c3877f2 HZ |
1289 | |
1290 | if (m_ret != 0) { | |
1291 | netvsc_free_send_slot(net_device, | |
1292 | msd_send->send_buf_index); | |
c85e4924 | 1293 | dev_kfree_skb_any(msd_skb); |
7c3877f2 HZ |
1294 | } |
1295 | } | |
1296 | ||
1297 | if (cur_send) | |
0a1275ca | 1298 | ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb); |
7c3877f2 | 1299 | |
7aab5159 JS |
1300 | if (ret != 0 && section_index != NETVSC_INVALID_INDEX) |
1301 | netvsc_free_send_slot(net_device, section_index); | |
d953ca4d | 1302 | |
fceaf24a HJ |
1303 | return ret; |
1304 | } | |
1305 | ||
7426b1a5 | 1306 | /* Send pending recv completions */ |
cad5c197 | 1307 | static int send_recv_completions(struct net_device *ndev, |
1308 | struct netvsc_device *nvdev, | |
1309 | struct netvsc_channel *nvchan) | |
5fa9d3c5 | 1310 | { |
7426b1a5 | 1311 | struct multi_recv_comp *mrc = &nvchan->mrc; |
1312 | struct recv_comp_msg { | |
1313 | struct nvsp_message_header hdr; | |
1314 | u32 status; | |
1315 | } __packed; | |
1316 | struct recv_comp_msg msg = { | |
1317 | .hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE, | |
1318 | }; | |
5fa9d3c5 HZ |
1319 | int ret; |
1320 | ||
7426b1a5 | 1321 | while (mrc->first != mrc->next) { |
1322 | const struct recv_comp_data *rcd | |
1323 | = mrc->slots + mrc->first; | |
c0b558e5 | 1324 | |
7426b1a5 | 1325 | msg.status = rcd->status; |
1326 | ret = vmbus_sendpacket(nvchan->channel, &msg, sizeof(msg), | |
1327 | rcd->tid, VM_PKT_COMP, 0); | |
cad5c197 | 1328 | if (unlikely(ret)) { |
1329 | struct net_device_context *ndev_ctx = netdev_priv(ndev); | |
1330 | ||
1331 | ++ndev_ctx->eth_stats.rx_comp_busy; | |
7426b1a5 | 1332 | return ret; |
cad5c197 | 1333 | } |
c0b558e5 | 1334 | |
7426b1a5 | 1335 | if (++mrc->first == nvdev->recv_completion_cnt) |
1336 | mrc->first = 0; | |
1337 | } | |
c0b558e5 | 1338 | |
7426b1a5 | 1339 | /* receive completion ring has been emptied */ |
1340 | if (unlikely(nvdev->destroy)) | |
1341 | wake_up(&nvdev->wait_drain); | |
c0b558e5 | 1342 | |
7426b1a5 | 1343 | return 0; |
c0b558e5 HZ |
1344 | } |
1345 | ||
7426b1a5 | 1346 | /* Count how many receive completions are outstanding */ |
1347 | static void recv_comp_slot_avail(const struct netvsc_device *nvdev, | |
1348 | const struct multi_recv_comp *mrc, | |
1349 | u32 *filled, u32 *avail) | |
c0b558e5 | 1350 | { |
7426b1a5 | 1351 | u32 count = nvdev->recv_completion_cnt; |
c0b558e5 | 1352 | |
7426b1a5 | 1353 | if (mrc->next >= mrc->first) |
1354 | *filled = mrc->next - mrc->first; | |
1355 | else | |
1356 | *filled = (count - mrc->first) + mrc->next; | |
c0b558e5 | 1357 | |
7426b1a5 | 1358 | *avail = count - *filled - 1; |
c0b558e5 HZ |
1359 | } |
1360 | ||
7426b1a5 | 1361 | /* Add receive complete to ring to send to host. */ |
1362 | static void enq_receive_complete(struct net_device *ndev, | |
1363 | struct netvsc_device *nvdev, u16 q_idx, | |
1364 | u64 tid, u32 status) | |
c0b558e5 | 1365 | { |
7426b1a5 | 1366 | struct netvsc_channel *nvchan = &nvdev->chan_table[q_idx]; |
1367 | struct multi_recv_comp *mrc = &nvchan->mrc; | |
c0b558e5 | 1368 | struct recv_comp_data *rcd; |
7426b1a5 | 1369 | u32 filled, avail; |
c0b558e5 | 1370 | |
7426b1a5 | 1371 | recv_comp_slot_avail(nvdev, mrc, &filled, &avail); |
c0b558e5 | 1372 | |
7426b1a5 | 1373 | if (unlikely(filled > NAPI_POLL_WEIGHT)) { |
cad5c197 | 1374 | send_recv_completions(ndev, nvdev, nvchan); |
7426b1a5 | 1375 | recv_comp_slot_avail(nvdev, mrc, &filled, &avail); |
5fa9d3c5 | 1376 | } |
c0b558e5 | 1377 | |
7426b1a5 | 1378 | if (unlikely(!avail)) { |
1379 | netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n", | |
1380 | q_idx, tid); | |
1381 | return; | |
1382 | } | |
c0b558e5 | 1383 | |
7426b1a5 | 1384 | rcd = mrc->slots + mrc->next; |
1385 | rcd->tid = tid; | |
1386 | rcd->status = status; | |
c0b558e5 | 1387 | |
7426b1a5 | 1388 | if (++mrc->next == nvdev->recv_completion_cnt) |
1389 | mrc->next = 0; | |
c0b558e5 HZ |
1390 | } |
1391 | ||
15a863bf | 1392 | static int netvsc_receive(struct net_device *ndev, |
7426b1a5 | 1393 | struct netvsc_device *net_device, |
c8e4eff4 | 1394 | struct netvsc_channel *nvchan, |
44144185 | 1395 | const struct vmpacket_descriptor *desc) |
fceaf24a | 1396 | { |
c347b927 | 1397 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
c8e4eff4 | 1398 | struct vmbus_channel *channel = nvchan->channel; |
f3dd3f47 | 1399 | const struct vmtransfer_page_packet_header *vmxferpage_packet |
1400 | = container_of(desc, const struct vmtransfer_page_packet_header, d); | |
44144185 AB |
1401 | const struct nvsp_message *nvsp = hv_pkt_data(desc); |
1402 | u32 msglen = hv_pkt_datalen(desc); | |
15a863bf | 1403 | u16 q_idx = channel->offermsg.offer.sub_channel_index; |
dc54a08c | 1404 | char *recv_buf = net_device->recv_buf; |
4baab261 | 1405 | u32 status = NVSP_STAT_SUCCESS; |
45326342 HZ |
1406 | int i; |
1407 | int count = 0; | |
779b4d17 | 1408 | |
44144185 AB |
1409 | /* Ensure packet is big enough to read header fields */ |
1410 | if (msglen < sizeof(struct nvsp_message_header)) { | |
1411 | netif_err(net_device_ctx, rx_err, ndev, | |
1412 | "invalid nvsp header, length too small: %u\n", | |
1413 | msglen); | |
1414 | return 0; | |
1415 | } | |
1416 | ||
454f18a9 | 1417 | /* Make sure this is a valid nvsp packet */ |
dc54a08c | 1418 | if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { |
1419 | netif_err(net_device_ctx, rx_err, ndev, | |
1420 | "Unknown nvsp packet type received %u\n", | |
1421 | nvsp->hdr.msg_type); | |
15a863bf | 1422 | return 0; |
fceaf24a HJ |
1423 | } |
1424 | ||
44144185 AB |
1425 | /* Validate xfer page pkt header */ |
1426 | if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) { | |
1427 | netif_err(net_device_ctx, rx_err, ndev, | |
1428 | "Invalid xfer page pkt, offset too small: %u\n", | |
1429 | desc->offset8 << 3); | |
1430 | return 0; | |
1431 | } | |
1432 | ||
dc54a08c | 1433 | if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { |
1434 | netif_err(net_device_ctx, rx_err, ndev, | |
1435 | "Invalid xfer page set id - expecting %x got %x\n", | |
1436 | NETVSC_RECEIVE_BUFFER_ID, | |
1437 | vmxferpage_packet->xfer_pageset_id); | |
15a863bf | 1438 | return 0; |
fceaf24a HJ |
1439 | } |
1440 | ||
4baab261 | 1441 | count = vmxferpage_packet->range_cnt; |
fceaf24a | 1442 | |
44144185 AB |
1443 | /* Check count for a valid value */ |
1444 | if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) { | |
1445 | netif_err(net_device_ctx, rx_err, ndev, | |
1446 | "Range count is not valid: %d\n", | |
1447 | count); | |
1448 | return 0; | |
1449 | } | |
1450 | ||
454f18a9 | 1451 | /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ |
4baab261 | 1452 | for (i = 0; i < count; i++) { |
c5d24bdd | 1453 | u32 offset = vmxferpage_packet->ranges[i].byte_offset; |
dc54a08c | 1454 | u32 buflen = vmxferpage_packet->ranges[i].byte_count; |
c5d24bdd | 1455 | void *data; |
5c71dadb | 1456 | int ret; |
fceaf24a | 1457 | |
44144185 AB |
1458 | if (unlikely(offset > net_device->recv_buf_size || |
1459 | buflen > net_device->recv_buf_size - offset)) { | |
c8e4eff4 | 1460 | nvchan->rsc.cnt = 0; |
c5d24bdd HZ |
1461 | status = NVSP_STAT_FAIL; |
1462 | netif_err(net_device_ctx, rx_err, ndev, | |
1463 | "Packet offset:%u + len:%u too big\n", | |
1464 | offset, buflen); | |
1465 | ||
1466 | continue; | |
1467 | } | |
1468 | ||
0ba35fe9 APM |
1469 | /* We're going to copy (sections of) the packet into nvchan->recv_buf; |
1470 | * make sure that nvchan->recv_buf is large enough to hold the packet. | |
1471 | */ | |
1472 | if (unlikely(buflen > net_device->recv_section_size)) { | |
1473 | nvchan->rsc.cnt = 0; | |
1474 | status = NVSP_STAT_FAIL; | |
1475 | netif_err(net_device_ctx, rx_err, ndev, | |
1476 | "Packet too big: buflen=%u recv_section_size=%u\n", | |
1477 | buflen, net_device->recv_section_size); | |
1478 | ||
1479 | continue; | |
1480 | } | |
1481 | ||
c5d24bdd HZ |
1482 | data = recv_buf + offset; |
1483 | ||
c8e4eff4 HZ |
1484 | nvchan->rsc.is_last = (i == count - 1); |
1485 | ||
ec966381 SH |
1486 | trace_rndis_recv(ndev, q_idx, data); |
1487 | ||
454f18a9 | 1488 | /* Pass it to the upper layer */ |
5c71dadb | 1489 | ret = rndis_filter_receive(ndev, net_device, |
c8e4eff4 | 1490 | nvchan, data, buflen); |
5c71dadb | 1491 | |
12bc8dfb APM |
1492 | if (unlikely(ret != NVSP_STAT_SUCCESS)) { |
1493 | /* Drop incomplete packet */ | |
1494 | nvchan->rsc.cnt = 0; | |
5c71dadb | 1495 | status = NVSP_STAT_FAIL; |
12bc8dfb | 1496 | } |
fceaf24a HJ |
1497 | } |
1498 | ||
7426b1a5 | 1499 | enq_receive_complete(ndev, net_device, q_idx, |
1500 | vmxferpage_packet->d.trans_id, status); | |
15a863bf | 1501 | |
15a863bf | 1502 | return count; |
fceaf24a HJ |
1503 | } |
1504 | ||
c347b927 | 1505 | static void netvsc_send_table(struct net_device *ndev, |
171c1fd9 | 1506 | struct netvsc_device *nvscdev, |
71f21959 HZ |
1507 | const struct nvsp_message *nvmsg, |
1508 | u32 msglen) | |
5b54dac8 | 1509 | { |
7ce10124 | 1510 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
71f21959 | 1511 | u32 count, offset, *tab; |
c347b927 | 1512 | int i; |
5b54dac8 | 1513 | |
44144185 AB |
1514 | /* Ensure packet is big enough to read send_table fields */ |
1515 | if (msglen < sizeof(struct nvsp_message_header) + | |
1516 | sizeof(struct nvsp_5_send_indirect_table)) { | |
1517 | netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen); | |
1518 | return; | |
1519 | } | |
1520 | ||
5b54dac8 | 1521 | count = nvmsg->msg.v5_msg.send_table.count; |
71f21959 HZ |
1522 | offset = nvmsg->msg.v5_msg.send_table.offset; |
1523 | ||
5b54dac8 HZ |
1524 | if (count != VRSS_SEND_TAB_SIZE) { |
1525 | netdev_err(ndev, "Received wrong send-table size:%u\n", count); | |
1526 | return; | |
1527 | } | |
1528 | ||
171c1fd9 HZ |
1529 | /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be |
1530 | * wrong due to a host bug. So fix the offset here. | |
1531 | */ | |
1532 | if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 && | |
1533 | msglen >= sizeof(struct nvsp_message_header) + | |
1534 | sizeof(union nvsp_6_message_uber) + count * sizeof(u32)) | |
1535 | offset = sizeof(struct nvsp_message_header) + | |
1536 | sizeof(union nvsp_6_message_uber); | |
1537 | ||
1538 | /* Boundary check for all versions */ | |
505e3f00 | 1539 | if (msglen < count * sizeof(u32) || offset > msglen - count * sizeof(u32)) { |
71f21959 HZ |
1540 | netdev_err(ndev, "Received send-table offset too big:%u\n", |
1541 | offset); | |
1542 | return; | |
1543 | } | |
1544 | ||
1545 | tab = (void *)nvmsg + offset; | |
5b54dac8 HZ |
1546 | |
1547 | for (i = 0; i < count; i++) | |
39e91cfb | 1548 | net_device_ctx->tx_table[i] = tab[i]; |
5b54dac8 HZ |
1549 | } |
1550 | ||
c347b927 | 1551 | static void netvsc_send_vf(struct net_device *ndev, |
44144185 AB |
1552 | const struct nvsp_message *nvmsg, |
1553 | u32 msglen) | |
71790a27 | 1554 | { |
c347b927 SH |
1555 | struct net_device_context *net_device_ctx = netdev_priv(ndev); |
1556 | ||
44144185 AB |
1557 | /* Ensure packet is big enough to read its fields */ |
1558 | if (msglen < sizeof(struct nvsp_message_header) + | |
1559 | sizeof(struct nvsp_4_send_vf_association)) { | |
1560 | netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen); | |
1561 | return; | |
1562 | } | |
1563 | ||
f9a7da91 VK |
1564 | net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; |
1565 | net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; | |
365e1ece GK |
1566 | |
1567 | if (net_device_ctx->vf_alloc) | |
1568 | complete(&net_device_ctx->vf_add); | |
1569 | ||
00d7ddba SH |
1570 | netdev_info(ndev, "VF slot %u %s\n", |
1571 | net_device_ctx->vf_serial, | |
1572 | net_device_ctx->vf_alloc ? "added" : "removed"); | |
71790a27 HZ |
1573 | } |
1574 | ||
71f21959 | 1575 | static void netvsc_receive_inband(struct net_device *ndev, |
171c1fd9 | 1576 | struct netvsc_device *nvscdev, |
44144185 | 1577 | const struct vmpacket_descriptor *desc) |
71790a27 | 1578 | { |
44144185 AB |
1579 | const struct nvsp_message *nvmsg = hv_pkt_data(desc); |
1580 | u32 msglen = hv_pkt_datalen(desc); | |
1581 | ||
1582 | /* Ensure packet is big enough to read header fields */ | |
1583 | if (msglen < sizeof(struct nvsp_message_header)) { | |
1584 | netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen); | |
1585 | return; | |
1586 | } | |
1587 | ||
71790a27 HZ |
1588 | switch (nvmsg->hdr.msg_type) { |
1589 | case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: | |
171c1fd9 | 1590 | netvsc_send_table(ndev, nvscdev, nvmsg, msglen); |
71790a27 HZ |
1591 | break; |
1592 | ||
1593 | case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: | |
96854bbd APM |
1594 | if (hv_is_isolation_supported()) |
1595 | netdev_err(ndev, "Ignore VF_ASSOCIATION msg from the host supporting isolation\n"); | |
1596 | else | |
1597 | netvsc_send_vf(ndev, nvmsg, msglen); | |
71790a27 HZ |
1598 | break; |
1599 | } | |
1600 | } | |
1601 | ||
15a863bf | 1602 | static int netvsc_process_raw_pkt(struct hv_device *device, |
c8e4eff4 | 1603 | struct netvsc_channel *nvchan, |
15a863bf | 1604 | struct netvsc_device *net_device, |
1605 | struct net_device *ndev, | |
f9645430 | 1606 | const struct vmpacket_descriptor *desc, |
1607 | int budget) | |
99a50bb1 | 1608 | { |
c8e4eff4 | 1609 | struct vmbus_channel *channel = nvchan->channel; |
c347b927 | 1610 | const struct nvsp_message *nvmsg = hv_pkt_data(desc); |
99a50bb1 | 1611 | |
ec966381 SH |
1612 | trace_nvsp_recv(ndev, channel, nvmsg); |
1613 | ||
99a50bb1 S |
1614 | switch (desc->type) { |
1615 | case VM_PKT_COMP: | |
44144185 | 1616 | netvsc_send_completion(ndev, net_device, channel, desc, budget); |
99a50bb1 S |
1617 | break; |
1618 | ||
1619 | case VM_PKT_DATA_USING_XFER_PAGES: | |
44144185 | 1620 | return netvsc_receive(ndev, net_device, nvchan, desc); |
99a50bb1 S |
1621 | |
1622 | case VM_PKT_DATA_INBAND: | |
44144185 | 1623 | netvsc_receive_inband(ndev, net_device, desc); |
99a50bb1 S |
1624 | break; |
1625 | ||
1626 | default: | |
1627 | netdev_err(ndev, "unhandled packet type %d, tid %llx\n", | |
f4f1c23d | 1628 | desc->type, desc->trans_id); |
99a50bb1 S |
1629 | break; |
1630 | } | |
15a863bf | 1631 | |
1632 | return 0; | |
1633 | } | |
1634 | ||
1635 | static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel) | |
1636 | { | |
1637 | struct vmbus_channel *primary = channel->primary_channel; | |
1638 | ||
1639 | return primary ? primary->device_obj : channel->device_obj; | |
1640 | } | |
1641 | ||
262b7f14 | 1642 | /* Network processing softirq |
1643 | * Process data in incoming ring buffer from host | |
1644 | * Stops when ring is empty or budget is met or exceeded. | |
1645 | */ | |
15a863bf | 1646 | int netvsc_poll(struct napi_struct *napi, int budget) |
1647 | { | |
1648 | struct netvsc_channel *nvchan | |
1649 | = container_of(napi, struct netvsc_channel, napi); | |
35fbbccf | 1650 | struct netvsc_device *net_device = nvchan->net_device; |
15a863bf | 1651 | struct vmbus_channel *channel = nvchan->channel; |
1652 | struct hv_device *device = netvsc_channel_to_device(channel); | |
15a863bf | 1653 | struct net_device *ndev = hv_get_drvdata(device); |
15a863bf | 1654 | int work_done = 0; |
6b81b193 | 1655 | int ret; |
15a863bf | 1656 | |
f4f1c23d | 1657 | /* If starting a new interval */ |
1658 | if (!nvchan->desc) | |
1659 | nvchan->desc = hv_pkt_iter_first(channel); | |
15a863bf | 1660 | |
1cb9d3b6 HZ |
1661 | nvchan->xdp_flush = false; |
1662 | ||
f4f1c23d | 1663 | while (nvchan->desc && work_done < budget) { |
c8e4eff4 | 1664 | work_done += netvsc_process_raw_pkt(device, nvchan, net_device, |
f9645430 | 1665 | ndev, nvchan->desc, budget); |
f4f1c23d | 1666 | nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); |
15a863bf | 1667 | } |
15a863bf | 1668 | |
1cb9d3b6 HZ |
1669 | if (nvchan->xdp_flush) |
1670 | xdp_do_flush(); | |
1671 | ||
6b81b193 HZ |
1672 | /* Send any pending receive completions */ |
1673 | ret = send_recv_completions(ndev, net_device, nvchan); | |
1674 | ||
1675 | /* If it did not exhaust NAPI budget this time | |
1676 | * and not doing busy poll | |
f4e40363 | 1677 | * then re-enable host interrupts |
6b81b193 HZ |
1678 | * and reschedule if ring is not empty |
1679 | * or sending receive completion failed. | |
262b7f14 | 1680 | */ |
6b81b193 | 1681 | if (work_done < budget && |
15a863bf | 1682 | napi_complete_done(napi, work_done) && |
6b81b193 | 1683 | (ret || hv_end_read(&channel->inbound)) && |
d64e38ae | 1684 | napi_schedule_prep(napi)) { |
7426b1a5 | 1685 | hv_begin_read(&channel->inbound); |
d64e38ae | 1686 | __napi_schedule(napi); |
7426b1a5 | 1687 | } |
f4f1c23d | 1688 | |
1689 | /* Driver may overshoot since multiple packets per descriptor */ | |
1690 | return min(work_done, budget); | |
99a50bb1 S |
1691 | } |
1692 | ||
262b7f14 | 1693 | /* Call back when data is available in host ring buffer. |
1694 | * Processing is deferred until network softirq (NAPI) | |
1695 | */ | |
5b54dac8 | 1696 | void netvsc_channel_cb(void *context) |
fceaf24a | 1697 | { |
6de38af6 | 1698 | struct netvsc_channel *nvchan = context; |
43bf99ce | 1699 | struct vmbus_channel *channel = nvchan->channel; |
1700 | struct hv_ring_buffer_info *rbi = &channel->inbound; | |
1701 | ||
1702 | /* preload first vmpacket descriptor */ | |
1703 | prefetch(hv_get_ring_buffer(rbi) + rbi->priv_read_index); | |
0b307ebd | 1704 | |
f4f1c23d | 1705 | if (napi_schedule_prep(&nvchan->napi)) { |
52d3b494 | 1706 | /* disable interrupts from host */ |
43bf99ce | 1707 | hv_begin_read(rbi); |
0d6dd357 | 1708 | |
68633eda | 1709 | __napi_schedule_irqoff(&nvchan->napi); |
f4f1c23d | 1710 | } |
fceaf24a | 1711 | } |
af24ce42 | 1712 | |
b637e023 HZ |
1713 | /* |
1714 | * netvsc_device_add - Callback when the device belonging to this | |
1715 | * driver is added | |
1716 | */ | |
9749fed5 | 1717 | struct netvsc_device *netvsc_device_add(struct hv_device *device, |
1718 | const struct netvsc_device_info *device_info) | |
b637e023 | 1719 | { |
88098834 | 1720 | int i, ret = 0; |
b637e023 | 1721 | struct netvsc_device *net_device; |
88098834 VK |
1722 | struct net_device *ndev = hv_get_drvdata(device); |
1723 | struct net_device_context *net_device_ctx = netdev_priv(ndev); | |
b637e023 | 1724 | |
88098834 | 1725 | net_device = alloc_net_device(); |
b1c84927 | 1726 | if (!net_device) |
9749fed5 | 1727 | return ERR_PTR(-ENOMEM); |
b637e023 | 1728 | |
6b0cbe31 HZ |
1729 | for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) |
1730 | net_device_ctx->tx_table[i] = 0; | |
1731 | ||
15a863bf | 1732 | /* Because the device uses NAPI, all the interrupt batching and |
1733 | * control is done via Net softirq, not the channel handling | |
1734 | */ | |
1735 | set_channel_read_mode(device->channel, HV_CALL_ISR); | |
1736 | ||
bffb1842 S |
1737 | /* If we're reopening the device we may have multiple queues, fill the |
1738 | * chn_table with the default channel to use it before subchannels are | |
1739 | * opened. | |
1740 | * Initialize the channel state before we open; | |
1741 | * we can be interrupted as soon as we open the channel. | |
1742 | */ | |
1743 | ||
1744 | for (i = 0; i < VRSS_CHANNEL_MAX; i++) { | |
1745 | struct netvsc_channel *nvchan = &net_device->chan_table[i]; | |
1746 | ||
1747 | nvchan->channel = device->channel; | |
35fbbccf | 1748 | nvchan->net_device = net_device; |
4a0dee1f FF |
1749 | u64_stats_init(&nvchan->tx_stats.syncp); |
1750 | u64_stats_init(&nvchan->rx_stats.syncp); | |
351e1581 | 1751 | |
b02e5a0e | 1752 | ret = xdp_rxq_info_reg(&nvchan->xdp_rxq, ndev, i, 0); |
351e1581 HZ |
1753 | |
1754 | if (ret) { | |
1755 | netdev_err(ndev, "xdp_rxq_info_reg fail: %d\n", ret); | |
1756 | goto cleanup2; | |
1757 | } | |
1758 | ||
1759 | ret = xdp_rxq_info_reg_mem_model(&nvchan->xdp_rxq, | |
1760 | MEM_TYPE_PAGE_SHARED, NULL); | |
1761 | ||
1762 | if (ret) { | |
1763 | netdev_err(ndev, "xdp reg_mem_model fail: %d\n", ret); | |
1764 | goto cleanup2; | |
1765 | } | |
bffb1842 S |
1766 | } |
1767 | ||
2be0f264 | 1768 | /* Enable NAPI handler before init callbacks */ |
b48b89f9 | 1769 | netif_napi_add(ndev, &net_device->chan_table[0].napi, netvsc_poll); |
2be0f264 | 1770 | |
b637e023 | 1771 | /* Open the channel */ |
bf5fd8ca APM |
1772 | device->channel->next_request_id_callback = vmbus_next_request_id; |
1773 | device->channel->request_addr_callback = vmbus_request_addr; | |
4d18fcc9 | 1774 | device->channel->rqstor_size = netvsc_rqstor_size(netvsc_ring_bytes); |
adae1e93 AB |
1775 | device->channel->max_pkt_size = NETVSC_MAX_PKT_SIZE; |
1776 | ||
a7f99d0f SH |
1777 | ret = vmbus_open(device->channel, netvsc_ring_bytes, |
1778 | netvsc_ring_bytes, NULL, 0, | |
1779 | netvsc_channel_cb, net_device->chan_table); | |
b637e023 HZ |
1780 | |
1781 | if (ret != 0) { | |
d9871158 | 1782 | netdev_err(ndev, "unable to open channel: %d\n", ret); |
b637e023 HZ |
1783 | goto cleanup; |
1784 | } | |
1785 | ||
1786 | /* Channel is opened */ | |
93ba2222 | 1787 | netdev_dbg(ndev, "hv_netvsc channel opened successfully\n"); |
b637e023 | 1788 | |
15a863bf | 1789 | napi_enable(&net_device->chan_table[0].napi); |
88098834 | 1790 | |
b637e023 | 1791 | /* Connect with the NetVsp */ |
8b532797 | 1792 | ret = netvsc_connect_vsp(device, net_device, device_info); |
b637e023 | 1793 | if (ret != 0) { |
d9871158 | 1794 | netdev_err(ndev, |
c909ebbd | 1795 | "unable to connect to NetVSP - %d\n", ret); |
b637e023 HZ |
1796 | goto close; |
1797 | } | |
1798 | ||
12f69661 SH |
1799 | /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is |
1800 | * populated. | |
1801 | */ | |
1802 | rcu_assign_pointer(net_device_ctx->nvdev, net_device); | |
1803 | ||
9749fed5 | 1804 | return net_device; |
b637e023 HZ |
1805 | |
1806 | close: | |
49393347 | 1807 | RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); |
1808 | napi_disable(&net_device->chan_table[0].napi); | |
15a863bf | 1809 | |
b637e023 HZ |
1810 | /* Now, we can close the channel safely */ |
1811 | vmbus_close(device->channel); | |
1812 | ||
1813 | cleanup: | |
fcfb4a00 | 1814 | netif_napi_del(&net_device->chan_table[0].napi); |
351e1581 HZ |
1815 | |
1816 | cleanup2: | |
545a8e79 | 1817 | free_netvsc_device(&net_device->rcu); |
b637e023 | 1818 | |
9749fed5 | 1819 | return ERR_PTR(ret); |
b637e023 | 1820 | } |