i40evf: virtual channel interface
[linux-2.6-block.git] / drivers / net / ethernet / intel / i40evf / i40evf_virtchnl.c
CommitLineData
62683ab5
GR
1/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
17 *
18 * Contact Information:
19 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
20 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
21 *
22 ******************************************************************************/
23
24#include "i40evf.h"
25#include "i40e_prototype.h"
26
27/* busy wait delay in msec */
28#define I40EVF_BUSY_WAIT_DELAY 10
29#define I40EVF_BUSY_WAIT_COUNT 50
30
31/**
32 * i40evf_send_pf_msg
33 * @adapter: adapter structure
34 * @op: virtual channel opcode
35 * @msg: pointer to message buffer
36 * @len: message length
37 *
38 * Send message to PF and print status if failure.
39 **/
40static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
41 enum i40e_virtchnl_ops op, u8 *msg, u16 len)
42{
43 struct i40e_hw *hw = &adapter->hw;
44 i40e_status err;
45
46 err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
47 if (err)
48 dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
49 op, err, hw->aq.asq_last_status);
50 return err;
51}
52
53/**
54 * i40evf_send_api_ver
55 * @adapter: adapter structure
56 *
57 * Send API version admin queue message to the PF. The reply is not checked
58 * in this function. Returns 0 if the message was successfully
59 * sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
60 **/
61int i40evf_send_api_ver(struct i40evf_adapter *adapter)
62{
63 struct i40e_virtchnl_version_info vvi;
64
65 vvi.major = I40E_VIRTCHNL_VERSION_MAJOR;
66 vvi.minor = I40E_VIRTCHNL_VERSION_MINOR;
67
68 return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_VERSION, (u8 *)&vvi,
69 sizeof(vvi));
70}
71
72/**
73 * i40evf_verify_api_ver
74 * @adapter: adapter structure
75 *
76 * Compare API versions with the PF. Must be called after admin queue is
77 * initialized. Returns 0 if API versions match, -EIO if
78 * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
79 **/
80int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
81{
82 struct i40e_virtchnl_version_info *pf_vvi;
83 struct i40e_hw *hw = &adapter->hw;
84 struct i40e_arq_event_info event;
85 i40e_status err;
86
87 event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
88 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
89 if (!event.msg_buf) {
90 err = -ENOMEM;
91 goto out;
92 }
93
94 err = i40evf_clean_arq_element(hw, &event, NULL);
95 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
96 goto out_alloc;
97
98 err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
99 if (err) {
100 err = -EIO;
101 goto out_alloc;
102 }
103
104 if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
105 I40E_VIRTCHNL_OP_VERSION) {
106 err = -EIO;
107 goto out_alloc;
108 }
109
110 pf_vvi = (struct i40e_virtchnl_version_info *)event.msg_buf;
111 if ((pf_vvi->major != I40E_VIRTCHNL_VERSION_MAJOR) ||
112 (pf_vvi->minor != I40E_VIRTCHNL_VERSION_MINOR))
113 err = -EIO;
114
115out_alloc:
116 kfree(event.msg_buf);
117out:
118 return err;
119}
120
121/**
122 * i40evf_send_vf_config_msg
123 * @adapter: adapter structure
124 *
125 * Send VF configuration request admin queue message to the PF. The reply
126 * is not checked in this function. Returns 0 if the message was
127 * successfully sent, or one of the I40E_ADMIN_QUEUE_ERROR_ statuses if not.
128 **/
129int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
130{
131 return i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
132 NULL, 0);
133}
134
135/**
136 * i40evf_get_vf_config
137 * @hw: pointer to the hardware structure
138 * @len: length of buffer
139 *
140 * Get VF configuration from PF and populate hw structure. Must be called after
141 * admin queue is initialized. Busy waits until response is received from PF,
142 * with maximum timeout. Response from PF is returned in the buffer for further
143 * processing by the caller.
144 **/
145int i40evf_get_vf_config(struct i40evf_adapter *adapter)
146{
147 struct i40e_hw *hw = &adapter->hw;
148 struct i40e_arq_event_info event;
149 u16 len;
150 i40e_status err;
151
152 len = sizeof(struct i40e_virtchnl_vf_resource) +
153 I40E_MAX_VF_VSI * sizeof(struct i40e_virtchnl_vsi_resource);
154 event.msg_size = len;
155 event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
156 if (!event.msg_buf) {
157 err = -ENOMEM;
158 goto out;
159 }
160
161 err = i40evf_clean_arq_element(hw, &event, NULL);
162 if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
163 goto out_alloc;
164
165 err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
166 if (err) {
167 dev_err(&adapter->pdev->dev,
168 "%s: Error returned from PF, %d, %d\n", __func__,
169 le32_to_cpu(event.desc.cookie_high),
170 le32_to_cpu(event.desc.cookie_low));
171 err = -EIO;
172 goto out_alloc;
173 }
174
175 if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
176 I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
177 dev_err(&adapter->pdev->dev,
178 "%s: Invalid response from PF, %d, %d\n", __func__,
179 le32_to_cpu(event.desc.cookie_high),
180 le32_to_cpu(event.desc.cookie_low));
181 err = -EIO;
182 goto out_alloc;
183 }
184 memcpy(adapter->vf_res, event.msg_buf, min(event.msg_size, len));
185
186 i40e_vf_parse_hw_config(hw, adapter->vf_res);
187out_alloc:
188 kfree(event.msg_buf);
189out:
190 return err;
191}
192
193/**
194 * i40evf_configure_queues
195 * @adapter: adapter structure
196 *
197 * Request that the PF set up our (previously allocated) queues.
198 **/
199void i40evf_configure_queues(struct i40evf_adapter *adapter)
200{
201 struct i40e_virtchnl_vsi_queue_config_info *vqci;
202 struct i40e_virtchnl_queue_pair_info *vqpi;
203 int pairs = adapter->vsi_res->num_queue_pairs;
204 int i, len;
205
206 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
207 /* bail because we already have a command pending */
208 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
209 __func__, adapter->current_op);
210 return;
211 }
212 adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES;
213 len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
214 (sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
215 vqci = kzalloc(len, GFP_ATOMIC);
216 if (!vqci) {
217 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
218 __func__);
219 return;
220 }
221 vqci->vsi_id = adapter->vsi_res->vsi_id;
222 vqci->num_queue_pairs = pairs;
223 vqpi = vqci->qpair;
224 /* Size check is not needed here - HW max is 16 queue pairs, and we
225 * can fit info for 31 of them into the AQ buffer before it overflows.
226 */
227 for (i = 0; i < pairs; i++) {
228 vqpi->txq.vsi_id = vqci->vsi_id;
229 vqpi->txq.queue_id = i;
230 vqpi->txq.ring_len = adapter->tx_rings[i]->count;
231 vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
232
233 vqpi->rxq.vsi_id = vqci->vsi_id;
234 vqpi->rxq.queue_id = i;
235 vqpi->rxq.ring_len = adapter->rx_rings[i]->count;
236 vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma;
237 vqpi->rxq.max_pkt_size = adapter->netdev->mtu
238 + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
239 vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len;
240 vqpi++;
241 }
242
243 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
244 (u8 *)vqci, len);
245 kfree(vqci);
246 adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
247 adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
248}
249
250/**
251 * i40evf_enable_queues
252 * @adapter: adapter structure
253 *
254 * Request that the PF enable all of our queues.
255 **/
256void i40evf_enable_queues(struct i40evf_adapter *adapter)
257{
258 struct i40e_virtchnl_queue_select vqs;
259
260 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
261 /* bail because we already have a command pending */
262 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
263 __func__, adapter->current_op);
264 return;
265 }
266 adapter->current_op = I40E_VIRTCHNL_OP_ENABLE_QUEUES;
267 vqs.vsi_id = adapter->vsi_res->vsi_id;
268 vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
269 vqs.rx_queues = vqs.tx_queues;
270 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
271 (u8 *)&vqs, sizeof(vqs));
272 adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
273 adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
274}
275
276/**
277 * i40evf_disable_queues
278 * @adapter: adapter structure
279 *
280 * Request that the PF disable all of our queues.
281 **/
282void i40evf_disable_queues(struct i40evf_adapter *adapter)
283{
284 struct i40e_virtchnl_queue_select vqs;
285
286 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
287 /* bail because we already have a command pending */
288 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
289 __func__, adapter->current_op);
290 return;
291 }
292 adapter->current_op = I40E_VIRTCHNL_OP_DISABLE_QUEUES;
293 vqs.vsi_id = adapter->vsi_res->vsi_id;
294 vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
295 vqs.rx_queues = vqs.tx_queues;
296 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
297 (u8 *)&vqs, sizeof(vqs));
298 adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
299 adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
300}
301
302/**
303 * i40evf_map_queues
304 * @adapter: adapter structure
305 *
306 * Request that the PF map queues to interrupt vectors. Misc causes, including
307 * admin queue, are always mapped to vector 0.
308 **/
309void i40evf_map_queues(struct i40evf_adapter *adapter)
310{
311 struct i40e_virtchnl_irq_map_info *vimi;
312 int v_idx, q_vectors, len;
313 struct i40e_q_vector *q_vector;
314
315 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
316 /* bail because we already have a command pending */
317 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
318 __func__, adapter->current_op);
319 return;
320 }
321 adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP;
322
323 q_vectors = adapter->num_msix_vectors - NONQ_VECS;
324
325 len = sizeof(struct i40e_virtchnl_irq_map_info) +
326 (adapter->num_msix_vectors *
327 sizeof(struct i40e_virtchnl_vector_map));
328 vimi = kzalloc(len, GFP_ATOMIC);
329 if (!vimi) {
330 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
331 __func__);
332 return;
333 }
334
335 vimi->num_vectors = adapter->num_msix_vectors;
336 /* Queue vectors first */
337 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
338 q_vector = adapter->q_vector[v_idx];
339 vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
340 vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
341 vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
342 vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
343 }
344 /* Misc vector last - this is only for AdminQ messages */
345 vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
346 vimi->vecmap[v_idx].vector_id = 0;
347 vimi->vecmap[v_idx].txq_map = 0;
348 vimi->vecmap[v_idx].rxq_map = 0;
349
350 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
351 (u8 *)vimi, len);
352 kfree(vimi);
353 adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
354 adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
355}
356
357/**
358 * i40evf_add_ether_addrs
359 * @adapter: adapter structure
360 * @addrs: the MAC address filters to add (contiguous)
361 * @count: number of filters
362 *
363 * Request that the PF add one or more addresses to our filters.
364 **/
365void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
366{
367 struct i40e_virtchnl_ether_addr_list *veal;
368 int len, i = 0, count = 0;
369 struct i40evf_mac_filter *f;
370
371 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
372 /* bail because we already have a command pending */
373 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
374 __func__, adapter->current_op);
375 return;
376 }
377 list_for_each_entry(f, &adapter->mac_filter_list, list) {
378 if (f->add)
379 count++;
380 }
381 if (!count) {
382 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
383 return;
384 }
385 adapter->current_op = I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS;
386
387 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
388 (count * sizeof(struct i40e_virtchnl_ether_addr));
389 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
390 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
391 __func__);
392 count = (I40EVF_MAX_AQ_BUF_SIZE -
393 sizeof(struct i40e_virtchnl_ether_addr_list)) /
394 sizeof(struct i40e_virtchnl_ether_addr);
395 len = I40EVF_MAX_AQ_BUF_SIZE;
396 }
397
398 veal = kzalloc(len, GFP_ATOMIC);
399 if (!veal) {
400 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
401 __func__);
402 return;
403 }
404 veal->vsi_id = adapter->vsi_res->vsi_id;
405 veal->num_elements = count;
406 list_for_each_entry(f, &adapter->mac_filter_list, list) {
407 if (f->add) {
408 memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
409 i++;
410 f->add = false;
411 }
412 }
413 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
414 (u8 *)veal, len);
415 kfree(veal);
416 adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
417 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
418
419}
420
421/**
422 * i40evf_del_ether_addrs
423 * @adapter: adapter structure
424 * @addrs: the MAC address filters to remove (contiguous)
425 * @count: number of filtes
426 *
427 * Request that the PF remove one or more addresses from our filters.
428 **/
429void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
430{
431 struct i40e_virtchnl_ether_addr_list *veal;
432 struct i40evf_mac_filter *f, *ftmp;
433 int len, i = 0, count = 0;
434
435 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
436 /* bail because we already have a command pending */
437 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
438 __func__, adapter->current_op);
439 return;
440 }
441 list_for_each_entry(f, &adapter->mac_filter_list, list) {
442 if (f->remove)
443 count++;
444 }
445 if (!count) {
446 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
447 return;
448 }
449 adapter->current_op = I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS;
450
451 len = sizeof(struct i40e_virtchnl_ether_addr_list) +
452 (count * sizeof(struct i40e_virtchnl_ether_addr));
453 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
454 dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
455 __func__);
456 count = (I40EVF_MAX_AQ_BUF_SIZE -
457 sizeof(struct i40e_virtchnl_ether_addr_list)) /
458 sizeof(struct i40e_virtchnl_ether_addr);
459 len = I40EVF_MAX_AQ_BUF_SIZE;
460 }
461 veal = kzalloc(len, GFP_ATOMIC);
462 if (!veal) {
463 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
464 __func__);
465 return;
466 }
467 veal->vsi_id = adapter->vsi_res->vsi_id;
468 veal->num_elements = count;
469 list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
470 if (f->remove) {
471 memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
472 i++;
473 list_del(&f->list);
474 kfree(f);
475 }
476 }
477 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
478 (u8 *)veal, len);
479 kfree(veal);
480 adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
481 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
482}
483
484/**
485 * i40evf_add_vlans
486 * @adapter: adapter structure
487 * @vlans: the VLANs to add
488 * @count: number of VLANs
489 *
490 * Request that the PF add one or more VLAN filters to our VSI.
491 **/
492void i40evf_add_vlans(struct i40evf_adapter *adapter)
493{
494 struct i40e_virtchnl_vlan_filter_list *vvfl;
495 int len, i = 0, count = 0;
496 struct i40evf_vlan_filter *f;
497
498 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
499 /* bail because we already have a command pending */
500 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
501 __func__, adapter->current_op);
502 return;
503 }
504
505 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
506 if (f->add)
507 count++;
508 }
509 if (!count) {
510 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
511 return;
512 }
513 adapter->current_op = I40E_VIRTCHNL_OP_ADD_VLAN;
514
515 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
516 (count * sizeof(u16));
517 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
518 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
519 __func__);
520 count = (I40EVF_MAX_AQ_BUF_SIZE -
521 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
522 sizeof(u16);
523 len = I40EVF_MAX_AQ_BUF_SIZE;
524 }
525 vvfl = kzalloc(len, GFP_ATOMIC);
526 if (!vvfl) {
527 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
528 __func__);
529 return;
530 }
531 vvfl->vsi_id = adapter->vsi_res->vsi_id;
532 vvfl->num_elements = count;
533 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
534 if (f->add) {
535 vvfl->vlan_id[i] = f->vlan;
536 i++;
537 f->add = false;
538 }
539 }
540 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
541 kfree(vvfl);
542 adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
543 adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
544}
545
546/**
547 * i40evf_del_vlans
548 * @adapter: adapter structure
549 * @vlans: the VLANs to remove
550 * @count: number of VLANs
551 *
552 * Request that the PF remove one or more VLAN filters from our VSI.
553 **/
554void i40evf_del_vlans(struct i40evf_adapter *adapter)
555{
556 struct i40e_virtchnl_vlan_filter_list *vvfl;
557 struct i40evf_vlan_filter *f, *ftmp;
558 int len, i = 0, count = 0;
559
560 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
561 /* bail because we already have a command pending */
562 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
563 __func__, adapter->current_op);
564 return;
565 }
566
567 list_for_each_entry(f, &adapter->vlan_filter_list, list) {
568 if (f->remove)
569 count++;
570 }
571 if (!count) {
572 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
573 return;
574 }
575 adapter->current_op = I40E_VIRTCHNL_OP_DEL_VLAN;
576
577 len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
578 (count * sizeof(u16));
579 if (len > I40EVF_MAX_AQ_BUF_SIZE) {
580 dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
581 __func__);
582 count = (I40EVF_MAX_AQ_BUF_SIZE -
583 sizeof(struct i40e_virtchnl_vlan_filter_list)) /
584 sizeof(u16);
585 len = I40EVF_MAX_AQ_BUF_SIZE;
586 }
587 vvfl = kzalloc(len, GFP_ATOMIC);
588 if (!vvfl) {
589 dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
590 __func__);
591 return;
592 }
593 vvfl->vsi_id = adapter->vsi_res->vsi_id;
594 vvfl->num_elements = count;
595 list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
596 if (f->remove) {
597 vvfl->vlan_id[i] = f->vlan;
598 i++;
599 list_del(&f->list);
600 kfree(f);
601 }
602 }
603 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
604 kfree(vvfl);
605 adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
606 adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
607}
608
609/**
610 * i40evf_set_promiscuous
611 * @adapter: adapter structure
612 * @flags: bitmask to control unicast/multicast promiscuous.
613 *
614 * Request that the PF enable promiscuous mode for our VSI.
615 **/
616void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
617{
618 struct i40e_virtchnl_promisc_info vpi;
619
620 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
621 /* bail because we already have a command pending */
622 dev_err(&adapter->pdev->dev, "%s: command %d pending\n",
623 __func__, adapter->current_op);
624 return;
625 }
626 adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
627 vpi.vsi_id = adapter->vsi_res->vsi_id;
628 vpi.flags = flags;
629 i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
630 (u8 *)&vpi, sizeof(vpi));
631}
632
633/**
634 * i40evf_request_stats
635 * @adapter: adapter structure
636 *
637 * Request VSI statistics from PF.
638 **/
639void i40evf_request_stats(struct i40evf_adapter *adapter)
640{
641 struct i40e_virtchnl_queue_select vqs;
642 if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
643 /* no error message, this isn't crucial */
644 return;
645 }
646 adapter->current_op = I40E_VIRTCHNL_OP_GET_STATS;
647 vqs.vsi_id = adapter->vsi_res->vsi_id;
648 /* queue maps are ignored for this message - only the vsi is used */
649 if (i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_STATS,
650 (u8 *)&vqs, sizeof(vqs)))
651 /* if the request failed, don't lock out others */
652 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
653}
654
655/**
656 * i40evf_virtchnl_completion
657 * @adapter: adapter structure
658 * @v_opcode: opcode sent by PF
659 * @v_retval: retval sent by PF
660 * @msg: message sent by PF
661 * @msglen: message length
662 *
663 * Asynchronous completion function for admin queue messages. Rather than busy
664 * wait, we fire off our requests and assume that no errors will be returned.
665 * This function handles the reply messages.
666 **/
667void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
668 enum i40e_virtchnl_ops v_opcode,
669 i40e_status v_retval,
670 u8 *msg, u16 msglen)
671{
672 struct net_device *netdev = adapter->netdev;
673
674 if (v_opcode == I40E_VIRTCHNL_OP_EVENT) {
675 struct i40e_virtchnl_pf_event *vpe =
676 (struct i40e_virtchnl_pf_event *)msg;
677 switch (vpe->event) {
678 case I40E_VIRTCHNL_EVENT_LINK_CHANGE:
679 adapter->link_up =
680 vpe->event_data.link_event.link_status;
681 if (adapter->link_up && !netif_carrier_ok(netdev)) {
682 dev_info(&adapter->pdev->dev, "NIC Link is Up\n");
683 netif_carrier_on(netdev);
684 netif_tx_wake_all_queues(netdev);
685 } else if (!adapter->link_up) {
686 dev_info(&adapter->pdev->dev, "NIC Link is Down\n");
687 netif_carrier_off(netdev);
688 netif_tx_stop_all_queues(netdev);
689 }
690 break;
691 case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
692 adapter->state = __I40EVF_RESETTING;
693 schedule_work(&adapter->reset_task);
694 dev_info(&adapter->pdev->dev,
695 "%s: hardware reset pending\n", __func__);
696 break;
697 default:
698 dev_err(&adapter->pdev->dev,
699 "%s: Unknown event %d from pf\n",
700 __func__, vpe->event);
701 break;
702
703 }
704 return;
705 }
706 if (v_opcode != adapter->current_op) {
707 dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n",
708 __func__, adapter->current_op, v_opcode);
709 /* We're probably completely screwed at this point, but clear
710 * the current op and try to carry on....
711 */
712 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
713 return;
714 }
715 if (v_retval) {
716 dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n",
717 __func__, v_retval, v_opcode);
718 }
719 switch (v_opcode) {
720 case I40E_VIRTCHNL_OP_GET_STATS: {
721 struct i40e_eth_stats *stats =
722 (struct i40e_eth_stats *)msg;
723 adapter->net_stats.rx_packets = stats->rx_unicast +
724 stats->rx_multicast +
725 stats->rx_broadcast;
726 adapter->net_stats.tx_packets = stats->tx_unicast +
727 stats->tx_multicast +
728 stats->tx_broadcast;
729 adapter->net_stats.rx_bytes = stats->rx_bytes;
730 adapter->net_stats.tx_bytes = stats->tx_bytes;
731 adapter->net_stats.rx_errors = stats->rx_errors;
732 adapter->net_stats.tx_errors = stats->tx_errors;
733 adapter->net_stats.rx_dropped = stats->rx_missed;
734 adapter->net_stats.tx_dropped = stats->tx_discards;
735 adapter->current_stats = *stats;
736 }
737 break;
738 case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
739 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_MAC_FILTER);
740 break;
741 case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
742 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_MAC_FILTER);
743 break;
744 case I40E_VIRTCHNL_OP_ADD_VLAN:
745 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ADD_VLAN_FILTER);
746 break;
747 case I40E_VIRTCHNL_OP_DEL_VLAN:
748 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DEL_VLAN_FILTER);
749 break;
750 case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
751 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_ENABLE_QUEUES);
752 /* enable transmits */
753 i40evf_irq_enable(adapter, true);
754 netif_tx_start_all_queues(adapter->netdev);
755 netif_carrier_on(adapter->netdev);
756 break;
757 case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
758 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_DISABLE_QUEUES);
759 break;
760 case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
761 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_CONFIGURE_QUEUES);
762 break;
763 case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
764 adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
765 break;
766 default:
767 dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n",
768 __func__, v_opcode);
769 break;
770 } /* switch v_opcode */
771 adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
772}