1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static int ibmvnic_remove(struct vio_dev *);
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
88 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
89 static int enable_scrq_irq(struct ibmvnic_adapter *,
90 struct ibmvnic_sub_crq_queue *);
91 static int disable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93 static int pending_scrq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97 static int ibmvnic_poll(struct napi_struct *napi, int data);
98 static void send_query_map(struct ibmvnic_adapter *adapter);
99 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
100 static int send_request_unmap(struct ibmvnic_adapter *, u8);
101 static int send_login(struct ibmvnic_adapter *adapter);
102 static void send_query_cap(struct ibmvnic_adapter *adapter);
103 static int init_sub_crqs(struct ibmvnic_adapter *);
104 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
105 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
106 static void release_crq_queue(struct ibmvnic_adapter *);
107 static int __ibmvnic_set_mac(struct net_device *, u8 *);
108 static int init_crq_queue(struct ibmvnic_adapter *adapter);
109 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
111 struct ibmvnic_stat {
112 char name[ETH_GSTRING_LEN];
116 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
117 offsetof(struct ibmvnic_statistics, stat))
118 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
120 static const struct ibmvnic_stat ibmvnic_stats[] = {
121 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
122 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
123 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
124 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
125 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
126 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
127 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
128 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
129 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
130 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
131 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
132 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
133 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
134 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
135 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
136 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
137 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
138 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
139 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
140 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
141 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
142 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
145 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
146 unsigned long length, unsigned long *number,
149 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
152 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
160 * ibmvnic_wait_for_completion - Check device state and wait for completion
161 * @adapter: private device data
162 * @comp_done: completion structure to wait for
163 * @timeout: time to wait in milliseconds
165 * Wait for a completion signal or until the timeout limit is reached
166 * while checking that the device is still active.
168 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
169 struct completion *comp_done,
170 unsigned long timeout)
172 struct net_device *netdev;
173 unsigned long div_timeout;
176 netdev = adapter->netdev;
178 div_timeout = msecs_to_jiffies(timeout / retry);
180 if (!adapter->crq.active) {
181 netdev_err(netdev, "Device down!\n");
186 if (wait_for_completion_timeout(comp_done, div_timeout))
189 netdev_err(netdev, "Operation timed out.\n");
193 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
194 struct ibmvnic_long_term_buff *ltb, int size)
196 struct device *dev = &adapter->vdev->dev;
200 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
204 dev_err(dev, "Couldn't alloc long term buffer\n");
207 ltb->map_id = adapter->map_id;
210 mutex_lock(&adapter->fw_lock);
211 adapter->fw_done_rc = 0;
212 reinit_completion(&adapter->fw_done);
213 rc = send_request_map(adapter, ltb->addr,
214 ltb->size, ltb->map_id);
216 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
217 mutex_unlock(&adapter->fw_lock);
221 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
224 "Long term map request aborted or timed out,rc = %d\n",
226 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
227 mutex_unlock(&adapter->fw_lock);
231 if (adapter->fw_done_rc) {
232 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
233 adapter->fw_done_rc);
234 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
235 mutex_unlock(&adapter->fw_lock);
238 mutex_unlock(&adapter->fw_lock);
242 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
243 struct ibmvnic_long_term_buff *ltb)
245 struct device *dev = &adapter->vdev->dev;
250 /* VIOS automatically unmaps the long term buffer at remote
251 * end for the following resets:
252 * FAILOVER, MOBILITY, TIMEOUT.
254 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
255 adapter->reset_reason != VNIC_RESET_MOBILITY &&
256 adapter->reset_reason != VNIC_RESET_TIMEOUT)
257 send_request_unmap(adapter, ltb->map_id);
258 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
261 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
262 struct ibmvnic_long_term_buff *ltb)
264 struct device *dev = &adapter->vdev->dev;
267 memset(ltb->buff, 0, ltb->size);
269 mutex_lock(&adapter->fw_lock);
270 adapter->fw_done_rc = 0;
272 reinit_completion(&adapter->fw_done);
273 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
275 mutex_unlock(&adapter->fw_lock);
279 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
282 "Reset failed, long term map request timed out or aborted\n");
283 mutex_unlock(&adapter->fw_lock);
287 if (adapter->fw_done_rc) {
289 "Reset failed, attempting to free and reallocate buffer\n");
290 free_long_term_buff(adapter, ltb);
291 mutex_unlock(&adapter->fw_lock);
292 return alloc_long_term_buff(adapter, ltb, ltb->size);
294 mutex_unlock(&adapter->fw_lock);
298 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
302 for (i = 0; i < adapter->num_active_rx_pools; i++)
303 adapter->rx_pool[i].active = 0;
306 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
307 struct ibmvnic_rx_pool *pool)
309 int count = pool->size - atomic_read(&pool->available);
310 u64 handle = adapter->rx_scrq[pool->index]->handle;
311 struct device *dev = &adapter->vdev->dev;
312 struct ibmvnic_ind_xmit_queue *ind_bufp;
313 struct ibmvnic_sub_crq_queue *rx_scrq;
314 union sub_crq *sub_crq;
315 int buffers_added = 0;
316 unsigned long lpar_rc;
328 rx_scrq = adapter->rx_scrq[pool->index];
329 ind_bufp = &rx_scrq->ind_buf;
330 for (i = 0; i < count; ++i) {
331 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
333 dev_err(dev, "Couldn't replenish rx buff\n");
334 adapter->replenish_no_mem++;
338 index = pool->free_map[pool->next_free];
340 if (pool->rx_buff[index].skb)
341 dev_err(dev, "Inconsistent free_map!\n");
343 /* Copy the skb to the long term mapped DMA buffer */
344 offset = index * pool->buff_size;
345 dst = pool->long_term_buff.buff + offset;
346 memset(dst, 0, pool->buff_size);
347 dma_addr = pool->long_term_buff.addr + offset;
348 pool->rx_buff[index].data = dst;
350 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
351 pool->rx_buff[index].dma = dma_addr;
352 pool->rx_buff[index].skb = skb;
353 pool->rx_buff[index].pool_index = pool->index;
354 pool->rx_buff[index].size = pool->buff_size;
356 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
357 memset(sub_crq, 0, sizeof(*sub_crq));
358 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
359 sub_crq->rx_add.correlator =
360 cpu_to_be64((u64)&pool->rx_buff[index]);
361 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
362 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
364 /* The length field of the sCRQ is defined to be 24 bits so the
365 * buffer size needs to be left shifted by a byte before it is
366 * converted to big endian to prevent the last byte from being
369 #ifdef __LITTLE_ENDIAN__
372 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
373 pool->next_free = (pool->next_free + 1) % pool->size;
374 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
377 send_subcrq_indirect(adapter, handle,
378 (u64)ind_bufp->indir_dma,
379 (u64)ind_bufp->index);
380 if (lpar_rc != H_SUCCESS)
382 buffers_added += ind_bufp->index;
383 adapter->replenish_add_buff_success += ind_bufp->index;
387 atomic_add(buffers_added, &pool->available);
391 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
392 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
393 for (i = ind_bufp->index - 1; i >= 0; --i) {
394 struct ibmvnic_rx_buff *rx_buff;
396 pool->next_free = pool->next_free == 0 ?
397 pool->size - 1 : pool->next_free - 1;
398 sub_crq = &ind_bufp->indir_arr[i];
399 rx_buff = (struct ibmvnic_rx_buff *)
400 be64_to_cpu(sub_crq->rx_add.correlator);
401 index = (int)(rx_buff - pool->rx_buff);
402 pool->free_map[pool->next_free] = index;
403 dev_kfree_skb_any(pool->rx_buff[index].skb);
404 pool->rx_buff[index].skb = NULL;
406 adapter->replenish_add_buff_failure += ind_bufp->index;
407 atomic_add(buffers_added, &pool->available);
409 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
410 /* Disable buffer pool replenishment and report carrier off if
411 * queue is closed or pending failover.
412 * Firmware guarantees that a signal will be sent to the
413 * driver, triggering a reset.
415 deactivate_rx_pools(adapter);
416 netif_carrier_off(adapter->netdev);
420 static void replenish_pools(struct ibmvnic_adapter *adapter)
424 adapter->replenish_task_cycles++;
425 for (i = 0; i < adapter->num_active_rx_pools; i++) {
426 if (adapter->rx_pool[i].active)
427 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
430 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
433 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
435 kfree(adapter->tx_stats_buffers);
436 kfree(adapter->rx_stats_buffers);
437 adapter->tx_stats_buffers = NULL;
438 adapter->rx_stats_buffers = NULL;
441 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
443 adapter->tx_stats_buffers =
444 kcalloc(IBMVNIC_MAX_QUEUES,
445 sizeof(struct ibmvnic_tx_queue_stats),
447 if (!adapter->tx_stats_buffers)
450 adapter->rx_stats_buffers =
451 kcalloc(IBMVNIC_MAX_QUEUES,
452 sizeof(struct ibmvnic_rx_queue_stats),
454 if (!adapter->rx_stats_buffers)
460 static void release_stats_token(struct ibmvnic_adapter *adapter)
462 struct device *dev = &adapter->vdev->dev;
464 if (!adapter->stats_token)
467 dma_unmap_single(dev, adapter->stats_token,
468 sizeof(struct ibmvnic_statistics),
470 adapter->stats_token = 0;
473 static int init_stats_token(struct ibmvnic_adapter *adapter)
475 struct device *dev = &adapter->vdev->dev;
478 stok = dma_map_single(dev, &adapter->stats,
479 sizeof(struct ibmvnic_statistics),
481 if (dma_mapping_error(dev, stok)) {
482 dev_err(dev, "Couldn't map stats buffer\n");
486 adapter->stats_token = stok;
487 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
491 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
493 struct ibmvnic_rx_pool *rx_pool;
498 if (!adapter->rx_pool)
501 buff_size = adapter->cur_rx_buf_sz;
502 rx_scrqs = adapter->num_active_rx_pools;
503 for (i = 0; i < rx_scrqs; i++) {
504 rx_pool = &adapter->rx_pool[i];
506 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
508 if (rx_pool->buff_size != buff_size) {
509 free_long_term_buff(adapter, &rx_pool->long_term_buff);
510 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
511 rc = alloc_long_term_buff(adapter,
512 &rx_pool->long_term_buff,
516 rc = reset_long_term_buff(adapter,
517 &rx_pool->long_term_buff);
523 for (j = 0; j < rx_pool->size; j++)
524 rx_pool->free_map[j] = j;
526 memset(rx_pool->rx_buff, 0,
527 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
529 atomic_set(&rx_pool->available, 0);
530 rx_pool->next_alloc = 0;
531 rx_pool->next_free = 0;
538 static void release_rx_pools(struct ibmvnic_adapter *adapter)
540 struct ibmvnic_rx_pool *rx_pool;
543 if (!adapter->rx_pool)
546 for (i = 0; i < adapter->num_active_rx_pools; i++) {
547 rx_pool = &adapter->rx_pool[i];
549 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
551 kfree(rx_pool->free_map);
552 free_long_term_buff(adapter, &rx_pool->long_term_buff);
554 if (!rx_pool->rx_buff)
557 for (j = 0; j < rx_pool->size; j++) {
558 if (rx_pool->rx_buff[j].skb) {
559 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
560 rx_pool->rx_buff[j].skb = NULL;
564 kfree(rx_pool->rx_buff);
567 kfree(adapter->rx_pool);
568 adapter->rx_pool = NULL;
569 adapter->num_active_rx_pools = 0;
572 static int init_rx_pools(struct net_device *netdev)
574 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
575 struct device *dev = &adapter->vdev->dev;
576 struct ibmvnic_rx_pool *rx_pool;
581 rxadd_subcrqs = adapter->num_active_rx_scrqs;
582 buff_size = adapter->cur_rx_buf_sz;
584 adapter->rx_pool = kcalloc(rxadd_subcrqs,
585 sizeof(struct ibmvnic_rx_pool),
587 if (!adapter->rx_pool) {
588 dev_err(dev, "Failed to allocate rx pools\n");
592 adapter->num_active_rx_pools = rxadd_subcrqs;
594 for (i = 0; i < rxadd_subcrqs; i++) {
595 rx_pool = &adapter->rx_pool[i];
597 netdev_dbg(adapter->netdev,
598 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
599 i, adapter->req_rx_add_entries_per_subcrq,
602 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
604 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
607 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
609 if (!rx_pool->free_map) {
610 release_rx_pools(adapter);
614 rx_pool->rx_buff = kcalloc(rx_pool->size,
615 sizeof(struct ibmvnic_rx_buff),
617 if (!rx_pool->rx_buff) {
618 dev_err(dev, "Couldn't alloc rx buffers\n");
619 release_rx_pools(adapter);
623 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
624 rx_pool->size * rx_pool->buff_size)) {
625 release_rx_pools(adapter);
629 for (j = 0; j < rx_pool->size; ++j)
630 rx_pool->free_map[j] = j;
632 atomic_set(&rx_pool->available, 0);
633 rx_pool->next_alloc = 0;
634 rx_pool->next_free = 0;
640 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
641 struct ibmvnic_tx_pool *tx_pool)
645 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
649 memset(tx_pool->tx_buff, 0,
650 tx_pool->num_buffers *
651 sizeof(struct ibmvnic_tx_buff));
653 for (i = 0; i < tx_pool->num_buffers; i++)
654 tx_pool->free_map[i] = i;
656 tx_pool->consumer_index = 0;
657 tx_pool->producer_index = 0;
662 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
667 if (!adapter->tx_pool)
670 tx_scrqs = adapter->num_active_tx_pools;
671 for (i = 0; i < tx_scrqs; i++) {
672 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
675 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
683 static void release_vpd_data(struct ibmvnic_adapter *adapter)
688 kfree(adapter->vpd->buff);
694 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
695 struct ibmvnic_tx_pool *tx_pool)
697 kfree(tx_pool->tx_buff);
698 kfree(tx_pool->free_map);
699 free_long_term_buff(adapter, &tx_pool->long_term_buff);
702 static void release_tx_pools(struct ibmvnic_adapter *adapter)
706 if (!adapter->tx_pool)
709 for (i = 0; i < adapter->num_active_tx_pools; i++) {
710 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
711 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
714 kfree(adapter->tx_pool);
715 adapter->tx_pool = NULL;
716 kfree(adapter->tso_pool);
717 adapter->tso_pool = NULL;
718 adapter->num_active_tx_pools = 0;
721 static int init_one_tx_pool(struct net_device *netdev,
722 struct ibmvnic_tx_pool *tx_pool,
723 int num_entries, int buf_size)
725 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
728 tx_pool->tx_buff = kcalloc(num_entries,
729 sizeof(struct ibmvnic_tx_buff),
731 if (!tx_pool->tx_buff)
734 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
735 num_entries * buf_size))
738 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
739 if (!tx_pool->free_map)
742 for (i = 0; i < num_entries; i++)
743 tx_pool->free_map[i] = i;
745 tx_pool->consumer_index = 0;
746 tx_pool->producer_index = 0;
747 tx_pool->num_buffers = num_entries;
748 tx_pool->buf_size = buf_size;
753 static int init_tx_pools(struct net_device *netdev)
755 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
760 tx_subcrqs = adapter->num_active_tx_scrqs;
761 adapter->tx_pool = kcalloc(tx_subcrqs,
762 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
763 if (!adapter->tx_pool)
766 adapter->tso_pool = kcalloc(tx_subcrqs,
767 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
768 if (!adapter->tso_pool)
771 adapter->num_active_tx_pools = tx_subcrqs;
773 for (i = 0; i < tx_subcrqs; i++) {
774 buff_size = adapter->req_mtu + VLAN_HLEN;
775 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
776 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
777 adapter->req_tx_entries_per_subcrq,
780 release_tx_pools(adapter);
784 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
788 release_tx_pools(adapter);
796 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
800 if (adapter->napi_enabled)
803 for (i = 0; i < adapter->req_rx_queues; i++)
804 napi_enable(&adapter->napi[i]);
806 adapter->napi_enabled = true;
809 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
813 if (!adapter->napi_enabled)
816 for (i = 0; i < adapter->req_rx_queues; i++) {
817 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
818 napi_disable(&adapter->napi[i]);
821 adapter->napi_enabled = false;
824 static int init_napi(struct ibmvnic_adapter *adapter)
828 adapter->napi = kcalloc(adapter->req_rx_queues,
829 sizeof(struct napi_struct), GFP_KERNEL);
833 for (i = 0; i < adapter->req_rx_queues; i++) {
834 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
835 netif_napi_add(adapter->netdev, &adapter->napi[i],
836 ibmvnic_poll, NAPI_POLL_WEIGHT);
839 adapter->num_active_rx_napi = adapter->req_rx_queues;
843 static void release_napi(struct ibmvnic_adapter *adapter)
850 for (i = 0; i < adapter->num_active_rx_napi; i++) {
851 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
852 netif_napi_del(&adapter->napi[i]);
855 kfree(adapter->napi);
856 adapter->napi = NULL;
857 adapter->num_active_rx_napi = 0;
858 adapter->napi_enabled = false;
861 static int ibmvnic_login(struct net_device *netdev)
863 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
864 unsigned long timeout = msecs_to_jiffies(20000);
872 if (retry_count > retries) {
873 netdev_warn(netdev, "Login attempts exceeded\n");
877 adapter->init_done_rc = 0;
878 reinit_completion(&adapter->init_done);
879 rc = send_login(adapter);
883 if (!wait_for_completion_timeout(&adapter->init_done,
885 netdev_warn(netdev, "Login timed out, retrying...\n");
887 adapter->init_done_rc = 0;
892 if (adapter->init_done_rc == ABORTED) {
893 netdev_warn(netdev, "Login aborted, retrying...\n");
895 adapter->init_done_rc = 0;
897 /* FW or device may be busy, so
898 * wait a bit before retrying login
901 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
903 release_sub_crqs(adapter, 1);
907 "Received partial success, retrying...\n");
908 adapter->init_done_rc = 0;
909 reinit_completion(&adapter->init_done);
910 send_query_cap(adapter);
911 if (!wait_for_completion_timeout(&adapter->init_done,
914 "Capabilities query timed out\n");
918 rc = init_sub_crqs(adapter);
921 "SCRQ initialization failed\n");
925 rc = init_sub_crq_irqs(adapter);
928 "SCRQ irq initialization failed\n");
931 } else if (adapter->init_done_rc) {
932 netdev_warn(netdev, "Adapter login failed\n");
937 __ibmvnic_set_mac(netdev, adapter->mac_addr);
939 netdev_dbg(netdev, "[S:%d] Login succeeded\n", adapter->state);
943 static void release_login_buffer(struct ibmvnic_adapter *adapter)
945 kfree(adapter->login_buf);
946 adapter->login_buf = NULL;
949 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
951 kfree(adapter->login_rsp_buf);
952 adapter->login_rsp_buf = NULL;
955 static void release_resources(struct ibmvnic_adapter *adapter)
957 release_vpd_data(adapter);
959 release_tx_pools(adapter);
960 release_rx_pools(adapter);
962 release_napi(adapter);
963 release_login_buffer(adapter);
964 release_login_rsp_buffer(adapter);
967 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
969 struct net_device *netdev = adapter->netdev;
970 unsigned long timeout = msecs_to_jiffies(20000);
971 union ibmvnic_crq crq;
975 netdev_dbg(netdev, "setting link state %d\n", link_state);
977 memset(&crq, 0, sizeof(crq));
978 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
979 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
980 crq.logical_link_state.link_state = link_state;
985 reinit_completion(&adapter->init_done);
986 rc = ibmvnic_send_crq(adapter, &crq);
988 netdev_err(netdev, "Failed to set link state\n");
992 if (!wait_for_completion_timeout(&adapter->init_done,
994 netdev_err(netdev, "timeout setting link state\n");
998 if (adapter->init_done_rc == PARTIALSUCCESS) {
999 /* Partuial success, delay and re-send */
1002 } else if (adapter->init_done_rc) {
1003 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1004 adapter->init_done_rc);
1005 return adapter->init_done_rc;
1012 static int set_real_num_queues(struct net_device *netdev)
1014 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1017 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1018 adapter->req_tx_queues, adapter->req_rx_queues);
1020 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1022 netdev_err(netdev, "failed to set the number of tx queues\n");
1026 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1028 netdev_err(netdev, "failed to set the number of rx queues\n");
1033 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1035 struct device *dev = &adapter->vdev->dev;
1036 union ibmvnic_crq crq;
1040 if (adapter->vpd->buff)
1041 len = adapter->vpd->len;
1043 mutex_lock(&adapter->fw_lock);
1044 adapter->fw_done_rc = 0;
1045 reinit_completion(&adapter->fw_done);
1047 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1048 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1049 rc = ibmvnic_send_crq(adapter, &crq);
1051 mutex_unlock(&adapter->fw_lock);
1055 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1057 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1058 mutex_unlock(&adapter->fw_lock);
1061 mutex_unlock(&adapter->fw_lock);
1063 if (!adapter->vpd->len)
1066 if (!adapter->vpd->buff)
1067 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1068 else if (adapter->vpd->len != len)
1069 adapter->vpd->buff =
1070 krealloc(adapter->vpd->buff,
1071 adapter->vpd->len, GFP_KERNEL);
1073 if (!adapter->vpd->buff) {
1074 dev_err(dev, "Could allocate VPD buffer\n");
1078 adapter->vpd->dma_addr =
1079 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1081 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1082 dev_err(dev, "Could not map VPD buffer\n");
1083 kfree(adapter->vpd->buff);
1084 adapter->vpd->buff = NULL;
1088 mutex_lock(&adapter->fw_lock);
1089 adapter->fw_done_rc = 0;
1090 reinit_completion(&adapter->fw_done);
1092 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1093 crq.get_vpd.cmd = GET_VPD;
1094 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1095 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1096 rc = ibmvnic_send_crq(adapter, &crq);
1098 kfree(adapter->vpd->buff);
1099 adapter->vpd->buff = NULL;
1100 mutex_unlock(&adapter->fw_lock);
1104 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1106 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1107 kfree(adapter->vpd->buff);
1108 adapter->vpd->buff = NULL;
1109 mutex_unlock(&adapter->fw_lock);
1113 mutex_unlock(&adapter->fw_lock);
1117 static int init_resources(struct ibmvnic_adapter *adapter)
1119 struct net_device *netdev = adapter->netdev;
1122 rc = set_real_num_queues(netdev);
1126 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1130 /* Vital Product Data (VPD) */
1131 rc = ibmvnic_get_vpd(adapter);
1133 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1137 adapter->map_id = 1;
1139 rc = init_napi(adapter);
1143 send_query_map(adapter);
1145 rc = init_rx_pools(netdev);
1149 rc = init_tx_pools(netdev);
1153 static int __ibmvnic_open(struct net_device *netdev)
1155 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1156 enum vnic_state prev_state = adapter->state;
1159 adapter->state = VNIC_OPENING;
1160 replenish_pools(adapter);
1161 ibmvnic_napi_enable(adapter);
1163 /* We're ready to receive frames, enable the sub-crq interrupts and
1164 * set the logical link state to up
1166 for (i = 0; i < adapter->req_rx_queues; i++) {
1167 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1168 if (prev_state == VNIC_CLOSED)
1169 enable_irq(adapter->rx_scrq[i]->irq);
1170 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1173 for (i = 0; i < adapter->req_tx_queues; i++) {
1174 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1175 if (prev_state == VNIC_CLOSED)
1176 enable_irq(adapter->tx_scrq[i]->irq);
1177 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1178 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1181 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1183 for (i = 0; i < adapter->req_rx_queues; i++)
1184 napi_disable(&adapter->napi[i]);
1185 release_resources(adapter);
1189 netif_tx_start_all_queues(netdev);
1191 if (prev_state == VNIC_CLOSED) {
1192 for (i = 0; i < adapter->req_rx_queues; i++)
1193 napi_schedule(&adapter->napi[i]);
1196 adapter->state = VNIC_OPEN;
1200 static int ibmvnic_open(struct net_device *netdev)
1202 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1205 /* If device failover is pending, just set device state and return.
1206 * Device operation will be handled by reset routine.
1208 if (adapter->failover_pending) {
1209 adapter->state = VNIC_OPEN;
1213 if (adapter->state != VNIC_CLOSED) {
1214 rc = ibmvnic_login(netdev);
1218 rc = init_resources(adapter);
1220 netdev_err(netdev, "failed to initialize resources\n");
1221 release_resources(adapter);
1226 rc = __ibmvnic_open(netdev);
1230 * If open fails due to a pending failover, set device state and
1231 * return. Device operation will be handled by reset routine.
1233 if (rc && adapter->failover_pending) {
1234 adapter->state = VNIC_OPEN;
1240 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1242 struct ibmvnic_rx_pool *rx_pool;
1243 struct ibmvnic_rx_buff *rx_buff;
1248 if (!adapter->rx_pool)
1251 rx_scrqs = adapter->num_active_rx_pools;
1252 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1254 /* Free any remaining skbs in the rx buffer pools */
1255 for (i = 0; i < rx_scrqs; i++) {
1256 rx_pool = &adapter->rx_pool[i];
1257 if (!rx_pool || !rx_pool->rx_buff)
1260 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1261 for (j = 0; j < rx_entries; j++) {
1262 rx_buff = &rx_pool->rx_buff[j];
1263 if (rx_buff && rx_buff->skb) {
1264 dev_kfree_skb_any(rx_buff->skb);
1265 rx_buff->skb = NULL;
1271 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1272 struct ibmvnic_tx_pool *tx_pool)
1274 struct ibmvnic_tx_buff *tx_buff;
1278 if (!tx_pool || !tx_pool->tx_buff)
1281 tx_entries = tx_pool->num_buffers;
1283 for (i = 0; i < tx_entries; i++) {
1284 tx_buff = &tx_pool->tx_buff[i];
1285 if (tx_buff && tx_buff->skb) {
1286 dev_kfree_skb_any(tx_buff->skb);
1287 tx_buff->skb = NULL;
1292 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1297 if (!adapter->tx_pool || !adapter->tso_pool)
1300 tx_scrqs = adapter->num_active_tx_pools;
1302 /* Free any remaining skbs in the tx buffer pools */
1303 for (i = 0; i < tx_scrqs; i++) {
1304 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1305 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1306 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1310 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1312 struct net_device *netdev = adapter->netdev;
1315 if (adapter->tx_scrq) {
1316 for (i = 0; i < adapter->req_tx_queues; i++)
1317 if (adapter->tx_scrq[i]->irq) {
1319 "Disabling tx_scrq[%d] irq\n", i);
1320 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1321 disable_irq(adapter->tx_scrq[i]->irq);
1325 if (adapter->rx_scrq) {
1326 for (i = 0; i < adapter->req_rx_queues; i++) {
1327 if (adapter->rx_scrq[i]->irq) {
1329 "Disabling rx_scrq[%d] irq\n", i);
1330 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1331 disable_irq(adapter->rx_scrq[i]->irq);
1337 static void ibmvnic_cleanup(struct net_device *netdev)
1339 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1341 /* ensure that transmissions are stopped if called by do_reset */
1342 if (test_bit(0, &adapter->resetting))
1343 netif_tx_disable(netdev);
1345 netif_tx_stop_all_queues(netdev);
1347 ibmvnic_napi_disable(adapter);
1348 ibmvnic_disable_irqs(adapter);
1350 clean_rx_pools(adapter);
1351 clean_tx_pools(adapter);
1354 static int __ibmvnic_close(struct net_device *netdev)
1356 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1359 adapter->state = VNIC_CLOSING;
1360 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1361 adapter->state = VNIC_CLOSED;
1365 static int ibmvnic_close(struct net_device *netdev)
1367 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1370 netdev_dbg(netdev, "[S:%d FOP:%d FRR:%d] Closing\n",
1371 adapter->state, adapter->failover_pending,
1372 adapter->force_reset_recovery);
1374 /* If device failover is pending, just set device state and return.
1375 * Device operation will be handled by reset routine.
1377 if (adapter->failover_pending) {
1378 adapter->state = VNIC_CLOSED;
1382 rc = __ibmvnic_close(netdev);
1383 ibmvnic_cleanup(netdev);
1389 * build_hdr_data - creates L2/L3/L4 header data buffer
1390 * @hdr_field - bitfield determining needed headers
1391 * @skb - socket buffer
1392 * @hdr_len - array of header lengths
1393 * @tot_len - total length of data
1395 * Reads hdr_field to determine which headers are needed by firmware.
1396 * Builds a buffer containing these headers. Saves individual header
1397 * lengths and total buffer length to be used to build descriptors.
1399 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1400 int *hdr_len, u8 *hdr_data)
1405 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1406 hdr_len[0] = sizeof(struct vlan_ethhdr);
1408 hdr_len[0] = sizeof(struct ethhdr);
1410 if (skb->protocol == htons(ETH_P_IP)) {
1411 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1412 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1413 hdr_len[2] = tcp_hdrlen(skb);
1414 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1415 hdr_len[2] = sizeof(struct udphdr);
1416 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1417 hdr_len[1] = sizeof(struct ipv6hdr);
1418 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1419 hdr_len[2] = tcp_hdrlen(skb);
1420 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1421 hdr_len[2] = sizeof(struct udphdr);
1422 } else if (skb->protocol == htons(ETH_P_ARP)) {
1423 hdr_len[1] = arp_hdr_len(skb->dev);
1427 memset(hdr_data, 0, 120);
1428 if ((hdr_field >> 6) & 1) {
1429 hdr = skb_mac_header(skb);
1430 memcpy(hdr_data, hdr, hdr_len[0]);
1434 if ((hdr_field >> 5) & 1) {
1435 hdr = skb_network_header(skb);
1436 memcpy(hdr_data + len, hdr, hdr_len[1]);
1440 if ((hdr_field >> 4) & 1) {
1441 hdr = skb_transport_header(skb);
1442 memcpy(hdr_data + len, hdr, hdr_len[2]);
1449 * create_hdr_descs - create header and header extension descriptors
1450 * @hdr_field - bitfield determining needed headers
1451 * @data - buffer containing header data
1452 * @len - length of data buffer
1453 * @hdr_len - array of individual header lengths
1454 * @scrq_arr - descriptor array
1456 * Creates header and, if needed, header extension descriptors and
1457 * places them in a descriptor array, scrq_arr
1460 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1461 union sub_crq *scrq_arr)
1463 union sub_crq hdr_desc;
1469 while (tmp_len > 0) {
1470 cur = hdr_data + len - tmp_len;
1472 memset(&hdr_desc, 0, sizeof(hdr_desc));
1473 if (cur != hdr_data) {
1474 data = hdr_desc.hdr_ext.data;
1475 tmp = tmp_len > 29 ? 29 : tmp_len;
1476 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1477 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1478 hdr_desc.hdr_ext.len = tmp;
1480 data = hdr_desc.hdr.data;
1481 tmp = tmp_len > 24 ? 24 : tmp_len;
1482 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1483 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1484 hdr_desc.hdr.len = tmp;
1485 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1486 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1487 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1488 hdr_desc.hdr.flag = hdr_field << 1;
1490 memcpy(data, cur, tmp);
1492 *scrq_arr = hdr_desc;
1501 * build_hdr_descs_arr - build a header descriptor array
1502 * @skb - socket buffer
1503 * @num_entries - number of descriptors to be sent
1504 * @subcrq - first TX descriptor
1505 * @hdr_field - bit field determining which headers will be sent
1507 * This function will build a TX descriptor array with applicable
1508 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1511 static void build_hdr_descs_arr(struct sk_buff *skb,
1512 union sub_crq *indir_arr,
1513 int *num_entries, u8 hdr_field)
1515 int hdr_len[3] = {0, 0, 0};
1516 u8 hdr_data[140] = {0};
1519 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1521 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1525 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1526 struct net_device *netdev)
1528 /* For some backing devices, mishandling of small packets
1529 * can result in a loss of connection or TX stall. Device
1530 * architects recommend that no packet should be smaller
1531 * than the minimum MTU value provided to the driver, so
1532 * pad any packets to that length
1534 if (skb->len < netdev->min_mtu)
1535 return skb_put_padto(skb, netdev->min_mtu);
1540 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1541 struct ibmvnic_sub_crq_queue *tx_scrq)
1543 struct ibmvnic_ind_xmit_queue *ind_bufp;
1544 struct ibmvnic_tx_buff *tx_buff;
1545 struct ibmvnic_tx_pool *tx_pool;
1546 union sub_crq tx_scrq_entry;
1552 ind_bufp = &tx_scrq->ind_buf;
1553 entries = (u64)ind_bufp->index;
1554 queue_num = tx_scrq->pool_index;
1556 for (i = entries - 1; i >= 0; --i) {
1557 tx_scrq_entry = ind_bufp->indir_arr[i];
1558 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1560 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1561 if (index & IBMVNIC_TSO_POOL_MASK) {
1562 tx_pool = &adapter->tso_pool[queue_num];
1563 index &= ~IBMVNIC_TSO_POOL_MASK;
1565 tx_pool = &adapter->tx_pool[queue_num];
1567 tx_pool->free_map[tx_pool->consumer_index] = index;
1568 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1569 tx_pool->num_buffers - 1 :
1570 tx_pool->consumer_index - 1;
1571 tx_buff = &tx_pool->tx_buff[index];
1572 adapter->netdev->stats.tx_packets--;
1573 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1574 adapter->tx_stats_buffers[queue_num].packets--;
1575 adapter->tx_stats_buffers[queue_num].bytes -=
1577 dev_kfree_skb_any(tx_buff->skb);
1578 tx_buff->skb = NULL;
1579 adapter->netdev->stats.tx_dropped++;
1581 ind_bufp->index = 0;
1582 if (atomic_sub_return(entries, &tx_scrq->used) <=
1583 (adapter->req_tx_entries_per_subcrq / 2) &&
1584 __netif_subqueue_stopped(adapter->netdev, queue_num)) {
1585 netif_wake_subqueue(adapter->netdev, queue_num);
1586 netdev_dbg(adapter->netdev, "Started queue %d\n",
1591 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1592 struct ibmvnic_sub_crq_queue *tx_scrq)
1594 struct ibmvnic_ind_xmit_queue *ind_bufp;
1600 ind_bufp = &tx_scrq->ind_buf;
1601 dma_addr = (u64)ind_bufp->indir_dma;
1602 entries = (u64)ind_bufp->index;
1603 handle = tx_scrq->handle;
1607 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1609 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1611 ind_bufp->index = 0;
1615 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1617 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1618 int queue_num = skb_get_queue_mapping(skb);
1619 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1620 struct device *dev = &adapter->vdev->dev;
1621 struct ibmvnic_ind_xmit_queue *ind_bufp;
1622 struct ibmvnic_tx_buff *tx_buff = NULL;
1623 struct ibmvnic_sub_crq_queue *tx_scrq;
1624 struct ibmvnic_tx_pool *tx_pool;
1625 unsigned int tx_send_failed = 0;
1626 netdev_tx_t ret = NETDEV_TX_OK;
1627 unsigned int tx_map_failed = 0;
1628 union sub_crq indir_arr[16];
1629 unsigned int tx_dropped = 0;
1630 unsigned int tx_packets = 0;
1631 unsigned int tx_bytes = 0;
1632 dma_addr_t data_dma_addr;
1633 struct netdev_queue *txq;
1634 unsigned long lpar_rc;
1635 union sub_crq tx_crq;
1636 unsigned int offset;
1637 int num_entries = 1;
1642 tx_scrq = adapter->tx_scrq[queue_num];
1643 txq = netdev_get_tx_queue(netdev, queue_num);
1644 ind_bufp = &tx_scrq->ind_buf;
1646 if (test_bit(0, &adapter->resetting)) {
1647 if (!netif_subqueue_stopped(netdev, skb))
1648 netif_stop_subqueue(netdev, queue_num);
1649 dev_kfree_skb_any(skb);
1654 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1658 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1662 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1665 if (skb_is_gso(skb))
1666 tx_pool = &adapter->tso_pool[queue_num];
1668 tx_pool = &adapter->tx_pool[queue_num];
1670 index = tx_pool->free_map[tx_pool->consumer_index];
1672 if (index == IBMVNIC_INVALID_MAP) {
1673 dev_kfree_skb_any(skb);
1677 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1681 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1683 offset = index * tx_pool->buf_size;
1684 dst = tx_pool->long_term_buff.buff + offset;
1685 memset(dst, 0, tx_pool->buf_size);
1686 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1688 if (skb_shinfo(skb)->nr_frags) {
1692 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1693 cur = skb_headlen(skb);
1695 /* Copy the frags */
1696 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1697 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1700 page_address(skb_frag_page(frag)) +
1701 skb_frag_off(frag), skb_frag_size(frag));
1702 cur += skb_frag_size(frag);
1705 skb_copy_from_linear_data(skb, dst, skb->len);
1708 /* post changes to long_term_buff *dst before VIOS accessing it */
1711 tx_pool->consumer_index =
1712 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1714 tx_buff = &tx_pool->tx_buff[index];
1716 tx_buff->index = index;
1717 tx_buff->pool_index = queue_num;
1719 memset(&tx_crq, 0, sizeof(tx_crq));
1720 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1721 tx_crq.v1.type = IBMVNIC_TX_DESC;
1722 tx_crq.v1.n_crq_elem = 1;
1723 tx_crq.v1.n_sge = 1;
1724 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1726 if (skb_is_gso(skb))
1727 tx_crq.v1.correlator =
1728 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1730 tx_crq.v1.correlator = cpu_to_be32(index);
1731 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1732 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1733 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1735 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1736 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1737 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1740 if (skb->protocol == htons(ETH_P_IP)) {
1741 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1742 proto = ip_hdr(skb)->protocol;
1743 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1744 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1745 proto = ipv6_hdr(skb)->nexthdr;
1748 if (proto == IPPROTO_TCP)
1749 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1750 else if (proto == IPPROTO_UDP)
1751 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1753 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1754 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1757 if (skb_is_gso(skb)) {
1758 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1759 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1763 if ((*hdrs >> 7) & 1)
1764 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
1766 tx_crq.v1.n_crq_elem = num_entries;
1767 tx_buff->num_entries = num_entries;
1768 /* flush buffer if current entry can not fit */
1769 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
1770 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1771 if (lpar_rc != H_SUCCESS)
1775 indir_arr[0] = tx_crq;
1776 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
1777 num_entries * sizeof(struct ibmvnic_generic_scrq));
1778 ind_bufp->index += num_entries;
1779 if (__netdev_tx_sent_queue(txq, skb->len,
1780 netdev_xmit_more() &&
1781 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
1782 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1783 if (lpar_rc != H_SUCCESS)
1787 if (atomic_add_return(num_entries, &tx_scrq->used)
1788 >= adapter->req_tx_entries_per_subcrq) {
1789 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1790 netif_stop_subqueue(netdev, queue_num);
1794 tx_bytes += skb->len;
1795 txq->trans_start = jiffies;
1800 dev_kfree_skb_any(skb);
1801 tx_buff->skb = NULL;
1802 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1803 tx_pool->num_buffers - 1 :
1804 tx_pool->consumer_index - 1;
1807 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1808 dev_err_ratelimited(dev, "tx: send failed\n");
1810 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1811 /* Disable TX and report carrier off if queue is closed
1812 * or pending failover.
1813 * Firmware guarantees that a signal will be sent to the
1814 * driver, triggering a reset or some other action.
1816 netif_tx_stop_all_queues(netdev);
1817 netif_carrier_off(netdev);
1820 netdev->stats.tx_dropped += tx_dropped;
1821 netdev->stats.tx_bytes += tx_bytes;
1822 netdev->stats.tx_packets += tx_packets;
1823 adapter->tx_send_failed += tx_send_failed;
1824 adapter->tx_map_failed += tx_map_failed;
1825 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1826 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1827 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1832 static void ibmvnic_set_multi(struct net_device *netdev)
1834 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1835 struct netdev_hw_addr *ha;
1836 union ibmvnic_crq crq;
1838 memset(&crq, 0, sizeof(crq));
1839 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1840 crq.request_capability.cmd = REQUEST_CAPABILITY;
1842 if (netdev->flags & IFF_PROMISC) {
1843 if (!adapter->promisc_supported)
1846 if (netdev->flags & IFF_ALLMULTI) {
1847 /* Accept all multicast */
1848 memset(&crq, 0, sizeof(crq));
1849 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1850 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1851 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1852 ibmvnic_send_crq(adapter, &crq);
1853 } else if (netdev_mc_empty(netdev)) {
1854 /* Reject all multicast */
1855 memset(&crq, 0, sizeof(crq));
1856 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1857 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1858 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1859 ibmvnic_send_crq(adapter, &crq);
1861 /* Accept one or more multicast(s) */
1862 netdev_for_each_mc_addr(ha, netdev) {
1863 memset(&crq, 0, sizeof(crq));
1864 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1865 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1866 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1867 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1869 ibmvnic_send_crq(adapter, &crq);
1875 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1877 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1878 union ibmvnic_crq crq;
1881 if (!is_valid_ether_addr(dev_addr)) {
1882 rc = -EADDRNOTAVAIL;
1886 memset(&crq, 0, sizeof(crq));
1887 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1888 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1889 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1891 mutex_lock(&adapter->fw_lock);
1892 adapter->fw_done_rc = 0;
1893 reinit_completion(&adapter->fw_done);
1895 rc = ibmvnic_send_crq(adapter, &crq);
1898 mutex_unlock(&adapter->fw_lock);
1902 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1903 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1904 if (rc || adapter->fw_done_rc) {
1906 mutex_unlock(&adapter->fw_lock);
1909 mutex_unlock(&adapter->fw_lock);
1912 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1916 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1918 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1919 struct sockaddr *addr = p;
1923 if (!is_valid_ether_addr(addr->sa_data))
1924 return -EADDRNOTAVAIL;
1926 if (adapter->state != VNIC_PROBED) {
1927 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1928 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1935 * do_change_param_reset returns zero if we are able to keep processing reset
1936 * events, or non-zero if we hit a fatal error and must halt.
1938 static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1939 struct ibmvnic_rwi *rwi,
1942 struct net_device *netdev = adapter->netdev;
1945 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1948 netif_carrier_off(netdev);
1949 adapter->reset_reason = rwi->reset_reason;
1951 ibmvnic_cleanup(netdev);
1953 if (reset_state == VNIC_OPEN) {
1954 rc = __ibmvnic_close(netdev);
1959 release_resources(adapter);
1960 release_sub_crqs(adapter, 1);
1961 release_crq_queue(adapter);
1963 adapter->state = VNIC_PROBED;
1965 rc = init_crq_queue(adapter);
1968 netdev_err(adapter->netdev,
1969 "Couldn't initialize crq. rc=%d\n", rc);
1973 rc = ibmvnic_reset_init(adapter, true);
1975 rc = IBMVNIC_INIT_FAILED;
1979 /* If the adapter was in PROBE state prior to the reset,
1982 if (reset_state == VNIC_PROBED)
1985 rc = ibmvnic_login(netdev);
1990 rc = init_resources(adapter);
1994 ibmvnic_disable_irqs(adapter);
1996 adapter->state = VNIC_CLOSED;
1998 if (reset_state == VNIC_CLOSED)
2001 rc = __ibmvnic_open(netdev);
2003 rc = IBMVNIC_OPEN_FAILED;
2007 /* refresh device's multicast list */
2008 ibmvnic_set_multi(netdev);
2011 for (i = 0; i < adapter->req_rx_queues; i++)
2012 napi_schedule(&adapter->napi[i]);
2016 adapter->state = reset_state;
2021 * do_reset returns zero if we are able to keep processing reset events, or
2022 * non-zero if we hit a fatal error and must halt.
2024 static int do_reset(struct ibmvnic_adapter *adapter,
2025 struct ibmvnic_rwi *rwi, u32 reset_state)
2027 u64 old_num_rx_queues, old_num_tx_queues;
2028 u64 old_num_rx_slots, old_num_tx_slots;
2029 struct net_device *netdev = adapter->netdev;
2032 netdev_dbg(adapter->netdev,
2033 "[S:%d FOP:%d] Reset reason %d, reset_state %d\n",
2034 adapter->state, adapter->failover_pending,
2035 rwi->reset_reason, reset_state);
2039 * Now that we have the rtnl lock, clear any pending failover.
2040 * This will ensure ibmvnic_open() has either completed or will
2041 * block until failover is complete.
2043 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2044 adapter->failover_pending = false;
2046 netif_carrier_off(netdev);
2047 adapter->reset_reason = rwi->reset_reason;
2049 old_num_rx_queues = adapter->req_rx_queues;
2050 old_num_tx_queues = adapter->req_tx_queues;
2051 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2052 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2054 ibmvnic_cleanup(netdev);
2056 if (reset_state == VNIC_OPEN &&
2057 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2058 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2059 adapter->state = VNIC_CLOSING;
2061 /* Release the RTNL lock before link state change and
2062 * re-acquire after the link state change to allow
2063 * linkwatch_event to grab the RTNL lock and run during
2067 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2072 if (adapter->state != VNIC_CLOSING) {
2077 adapter->state = VNIC_CLOSED;
2080 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2081 /* remove the closed state so when we call open it appears
2082 * we are coming from the probed state.
2084 adapter->state = VNIC_PROBED;
2086 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2087 rc = ibmvnic_reenable_crq_queue(adapter);
2088 release_sub_crqs(adapter, 1);
2090 rc = ibmvnic_reset_crq(adapter);
2091 if (rc == H_CLOSED || rc == H_SUCCESS) {
2092 rc = vio_enable_interrupts(adapter->vdev);
2094 netdev_err(adapter->netdev,
2095 "Reset failed to enable interrupts. rc=%d\n",
2101 netdev_err(adapter->netdev,
2102 "Reset couldn't initialize crq. rc=%d\n", rc);
2106 rc = ibmvnic_reset_init(adapter, true);
2108 rc = IBMVNIC_INIT_FAILED;
2112 /* If the adapter was in PROBE state prior to the reset,
2115 if (reset_state == VNIC_PROBED) {
2120 rc = ibmvnic_login(netdev);
2125 if (adapter->req_rx_queues != old_num_rx_queues ||
2126 adapter->req_tx_queues != old_num_tx_queues ||
2127 adapter->req_rx_add_entries_per_subcrq !=
2129 adapter->req_tx_entries_per_subcrq !=
2131 !adapter->rx_pool ||
2132 !adapter->tso_pool ||
2133 !adapter->tx_pool) {
2134 release_rx_pools(adapter);
2135 release_tx_pools(adapter);
2136 release_napi(adapter);
2137 release_vpd_data(adapter);
2139 rc = init_resources(adapter);
2144 rc = reset_tx_pools(adapter);
2146 netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
2151 rc = reset_rx_pools(adapter);
2153 netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
2158 ibmvnic_disable_irqs(adapter);
2160 adapter->state = VNIC_CLOSED;
2162 if (reset_state == VNIC_CLOSED) {
2167 rc = __ibmvnic_open(netdev);
2169 rc = IBMVNIC_OPEN_FAILED;
2173 /* refresh device's multicast list */
2174 ibmvnic_set_multi(netdev);
2177 for (i = 0; i < adapter->req_rx_queues; i++)
2178 napi_schedule(&adapter->napi[i]);
2180 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2181 adapter->reset_reason == VNIC_RESET_MOBILITY)
2182 __netdev_notify_peers(netdev);
2187 /* restore the adapter state if reset failed */
2189 adapter->state = reset_state;
2192 netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Reset done, rc %d\n",
2193 adapter->state, adapter->failover_pending, rc);
2197 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2198 struct ibmvnic_rwi *rwi, u32 reset_state)
2200 struct net_device *netdev = adapter->netdev;
2203 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
2206 netif_carrier_off(netdev);
2207 adapter->reset_reason = rwi->reset_reason;
2209 ibmvnic_cleanup(netdev);
2210 release_resources(adapter);
2211 release_sub_crqs(adapter, 0);
2212 release_crq_queue(adapter);
2214 /* remove the closed state so when we call open it appears
2215 * we are coming from the probed state.
2217 adapter->state = VNIC_PROBED;
2219 reinit_completion(&adapter->init_done);
2220 rc = init_crq_queue(adapter);
2222 netdev_err(adapter->netdev,
2223 "Couldn't initialize crq. rc=%d\n", rc);
2227 rc = ibmvnic_reset_init(adapter, false);
2231 /* If the adapter was in PROBE state prior to the reset,
2234 if (reset_state == VNIC_PROBED)
2237 rc = ibmvnic_login(netdev);
2241 rc = init_resources(adapter);
2245 ibmvnic_disable_irqs(adapter);
2246 adapter->state = VNIC_CLOSED;
2248 if (reset_state == VNIC_CLOSED)
2251 rc = __ibmvnic_open(netdev);
2253 rc = IBMVNIC_OPEN_FAILED;
2257 __netdev_notify_peers(netdev);
2259 /* restore adapter state if reset failed */
2261 adapter->state = reset_state;
2262 netdev_dbg(adapter->netdev, "[S:%d FOP:%d] Hard reset done, rc %d\n",
2263 adapter->state, adapter->failover_pending, rc);
2267 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2269 struct ibmvnic_rwi *rwi;
2270 unsigned long flags;
2272 spin_lock_irqsave(&adapter->rwi_lock, flags);
2274 if (!list_empty(&adapter->rwi_list)) {
2275 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2277 list_del(&rwi->list);
2282 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2286 static void __ibmvnic_reset(struct work_struct *work)
2288 struct ibmvnic_rwi *rwi;
2289 struct ibmvnic_adapter *adapter;
2290 bool saved_state = false;
2291 unsigned long flags;
2295 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2297 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2298 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2299 IBMVNIC_RESET_DELAY);
2303 rwi = get_next_rwi(adapter);
2305 spin_lock_irqsave(&adapter->state_lock, flags);
2307 if (adapter->state == VNIC_REMOVING ||
2308 adapter->state == VNIC_REMOVED) {
2309 spin_unlock_irqrestore(&adapter->state_lock, flags);
2316 reset_state = adapter->state;
2319 spin_unlock_irqrestore(&adapter->state_lock, flags);
2321 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2322 /* CHANGE_PARAM requestor holds rtnl_lock */
2323 rc = do_change_param_reset(adapter, rwi, reset_state);
2324 } else if (adapter->force_reset_recovery) {
2326 * Since we are doing a hard reset now, clear the
2327 * failover_pending flag so we don't ignore any
2328 * future MOBILITY or other resets.
2330 adapter->failover_pending = false;
2332 /* Transport event occurred during previous reset */
2333 if (adapter->wait_for_reset) {
2334 /* Previous was CHANGE_PARAM; caller locked */
2335 adapter->force_reset_recovery = false;
2336 rc = do_hard_reset(adapter, rwi, reset_state);
2339 adapter->force_reset_recovery = false;
2340 rc = do_hard_reset(adapter, rwi, reset_state);
2344 /* give backing device time to settle down */
2345 netdev_dbg(adapter->netdev,
2346 "[S:%d] Hard reset failed, waiting 60 secs\n",
2348 set_current_state(TASK_UNINTERRUPTIBLE);
2349 schedule_timeout(60 * HZ);
2352 rc = do_reset(adapter, rwi, reset_state);
2355 adapter->last_reset_time = jiffies;
2358 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2360 rwi = get_next_rwi(adapter);
2362 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2363 rwi->reset_reason == VNIC_RESET_MOBILITY))
2364 adapter->force_reset_recovery = true;
2367 if (adapter->wait_for_reset) {
2368 adapter->reset_done_rc = rc;
2369 complete(&adapter->reset_done);
2372 clear_bit_unlock(0, &adapter->resetting);
2374 netdev_dbg(adapter->netdev,
2375 "[S:%d FRR:%d WFR:%d] Done processing resets\n",
2376 adapter->state, adapter->force_reset_recovery,
2377 adapter->wait_for_reset);
2380 static void __ibmvnic_delayed_reset(struct work_struct *work)
2382 struct ibmvnic_adapter *adapter;
2384 adapter = container_of(work, struct ibmvnic_adapter,
2385 ibmvnic_delayed_reset.work);
2386 __ibmvnic_reset(&adapter->ibmvnic_reset);
2389 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2390 enum ibmvnic_reset_reason reason)
2392 struct list_head *entry, *tmp_entry;
2393 struct ibmvnic_rwi *rwi, *tmp;
2394 struct net_device *netdev = adapter->netdev;
2395 unsigned long flags;
2398 spin_lock_irqsave(&adapter->rwi_lock, flags);
2401 * If failover is pending don't schedule any other reset.
2402 * Instead let the failover complete. If there is already a
2403 * a failover reset scheduled, we will detect and drop the
2404 * duplicate reset when walking the ->rwi_list below.
2406 if (adapter->state == VNIC_REMOVING ||
2407 adapter->state == VNIC_REMOVED ||
2408 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2410 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2414 if (adapter->state == VNIC_PROBING) {
2415 netdev_warn(netdev, "Adapter reset during probe\n");
2416 ret = adapter->init_done_rc = EAGAIN;
2420 list_for_each(entry, &adapter->rwi_list) {
2421 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2422 if (tmp->reset_reason == reason) {
2423 netdev_dbg(netdev, "Skipping matching reset, reason=%d\n",
2430 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2435 /* if we just received a transport event,
2436 * flush reset queue and process this reset
2438 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2439 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2442 rwi->reset_reason = reason;
2443 list_add_tail(&rwi->list, &adapter->rwi_list);
2444 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2445 schedule_work(&adapter->ibmvnic_reset);
2449 /* ibmvnic_close() below can block, so drop the lock first */
2450 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2453 ibmvnic_close(netdev);
2458 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2460 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2462 if (test_bit(0, &adapter->resetting)) {
2463 netdev_err(adapter->netdev,
2464 "Adapter is resetting, skip timeout reset\n");
2467 /* No queuing up reset until at least 5 seconds (default watchdog val)
2470 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2471 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2474 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2477 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2478 struct ibmvnic_rx_buff *rx_buff)
2480 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2482 rx_buff->skb = NULL;
2484 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2485 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2487 atomic_dec(&pool->available);
2490 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2492 struct ibmvnic_sub_crq_queue *rx_scrq;
2493 struct ibmvnic_adapter *adapter;
2494 struct net_device *netdev;
2495 int frames_processed;
2499 adapter = netdev_priv(netdev);
2500 scrq_num = (int)(napi - adapter->napi);
2501 frames_processed = 0;
2502 rx_scrq = adapter->rx_scrq[scrq_num];
2505 while (frames_processed < budget) {
2506 struct sk_buff *skb;
2507 struct ibmvnic_rx_buff *rx_buff;
2508 union sub_crq *next;
2513 if (unlikely(test_bit(0, &adapter->resetting) &&
2514 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2515 enable_scrq_irq(adapter, rx_scrq);
2516 napi_complete_done(napi, frames_processed);
2517 return frames_processed;
2520 if (!pending_scrq(adapter, rx_scrq))
2522 /* The queue entry at the current index is peeked at above
2523 * to determine that there is a valid descriptor awaiting
2524 * processing. We want to be sure that the current slot
2525 * holds a valid descriptor before reading its contents.
2528 next = ibmvnic_next_scrq(adapter, rx_scrq);
2530 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2531 rx_comp.correlator);
2532 /* do error checking */
2533 if (next->rx_comp.rc) {
2534 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2535 be16_to_cpu(next->rx_comp.rc));
2536 /* free the entry */
2537 next->rx_comp.first = 0;
2538 dev_kfree_skb_any(rx_buff->skb);
2539 remove_buff_from_pool(adapter, rx_buff);
2541 } else if (!rx_buff->skb) {
2542 /* free the entry */
2543 next->rx_comp.first = 0;
2544 remove_buff_from_pool(adapter, rx_buff);
2548 length = be32_to_cpu(next->rx_comp.len);
2549 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2550 flags = next->rx_comp.flags;
2552 /* load long_term_buff before copying to skb */
2554 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2557 /* VLAN Header has been stripped by the system firmware and
2558 * needs to be inserted by the driver
2560 if (adapter->rx_vlan_header_insertion &&
2561 (flags & IBMVNIC_VLAN_STRIPPED))
2562 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2563 ntohs(next->rx_comp.vlan_tci));
2565 /* free the entry */
2566 next->rx_comp.first = 0;
2567 remove_buff_from_pool(adapter, rx_buff);
2569 skb_put(skb, length);
2570 skb->protocol = eth_type_trans(skb, netdev);
2571 skb_record_rx_queue(skb, scrq_num);
2573 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2574 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2575 skb->ip_summed = CHECKSUM_UNNECESSARY;
2579 napi_gro_receive(napi, skb); /* send it up */
2580 netdev->stats.rx_packets++;
2581 netdev->stats.rx_bytes += length;
2582 adapter->rx_stats_buffers[scrq_num].packets++;
2583 adapter->rx_stats_buffers[scrq_num].bytes += length;
2587 if (adapter->state != VNIC_CLOSING &&
2588 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
2589 adapter->req_rx_add_entries_per_subcrq / 2) ||
2590 frames_processed < budget))
2591 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2592 if (frames_processed < budget) {
2593 if (napi_complete_done(napi, frames_processed)) {
2594 enable_scrq_irq(adapter, rx_scrq);
2595 if (pending_scrq(adapter, rx_scrq)) {
2597 if (napi_reschedule(napi)) {
2598 disable_scrq_irq(adapter, rx_scrq);
2604 return frames_processed;
2607 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2611 adapter->fallback.mtu = adapter->req_mtu;
2612 adapter->fallback.rx_queues = adapter->req_rx_queues;
2613 adapter->fallback.tx_queues = adapter->req_tx_queues;
2614 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2615 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2617 reinit_completion(&adapter->reset_done);
2618 adapter->wait_for_reset = true;
2619 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2625 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2632 if (adapter->reset_done_rc) {
2634 adapter->desired.mtu = adapter->fallback.mtu;
2635 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2636 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2637 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2638 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2640 reinit_completion(&adapter->reset_done);
2641 adapter->wait_for_reset = true;
2642 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2647 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2655 adapter->wait_for_reset = false;
2660 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2662 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2664 adapter->desired.mtu = new_mtu + ETH_HLEN;
2666 return wait_for_reset(adapter);
2669 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2670 struct net_device *dev,
2671 netdev_features_t features)
2673 /* Some backing hardware adapters can not
2674 * handle packets with a MSS less than 224
2675 * or with only one segment.
2677 if (skb_is_gso(skb)) {
2678 if (skb_shinfo(skb)->gso_size < 224 ||
2679 skb_shinfo(skb)->gso_segs == 1)
2680 features &= ~NETIF_F_GSO_MASK;
2686 static const struct net_device_ops ibmvnic_netdev_ops = {
2687 .ndo_open = ibmvnic_open,
2688 .ndo_stop = ibmvnic_close,
2689 .ndo_start_xmit = ibmvnic_xmit,
2690 .ndo_set_rx_mode = ibmvnic_set_multi,
2691 .ndo_set_mac_address = ibmvnic_set_mac,
2692 .ndo_validate_addr = eth_validate_addr,
2693 .ndo_tx_timeout = ibmvnic_tx_timeout,
2694 .ndo_change_mtu = ibmvnic_change_mtu,
2695 .ndo_features_check = ibmvnic_features_check,
2698 /* ethtool functions */
2700 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2701 struct ethtool_link_ksettings *cmd)
2703 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2706 rc = send_query_phys_parms(adapter);
2708 adapter->speed = SPEED_UNKNOWN;
2709 adapter->duplex = DUPLEX_UNKNOWN;
2711 cmd->base.speed = adapter->speed;
2712 cmd->base.duplex = adapter->duplex;
2713 cmd->base.port = PORT_FIBRE;
2714 cmd->base.phy_address = 0;
2715 cmd->base.autoneg = AUTONEG_ENABLE;
2720 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2721 struct ethtool_drvinfo *info)
2723 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2725 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2726 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2727 strlcpy(info->fw_version, adapter->fw_version,
2728 sizeof(info->fw_version));
2731 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2733 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2735 return adapter->msg_enable;
2738 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2740 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2742 adapter->msg_enable = data;
2745 static u32 ibmvnic_get_link(struct net_device *netdev)
2747 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2749 /* Don't need to send a query because we request a logical link up at
2750 * init and then we wait for link state indications
2752 return adapter->logical_link_state;
2755 static void ibmvnic_get_ringparam(struct net_device *netdev,
2756 struct ethtool_ringparam *ring)
2758 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2760 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2761 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2762 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2764 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2765 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2767 ring->rx_mini_max_pending = 0;
2768 ring->rx_jumbo_max_pending = 0;
2769 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2770 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2771 ring->rx_mini_pending = 0;
2772 ring->rx_jumbo_pending = 0;
2775 static int ibmvnic_set_ringparam(struct net_device *netdev,
2776 struct ethtool_ringparam *ring)
2778 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2782 adapter->desired.rx_entries = ring->rx_pending;
2783 adapter->desired.tx_entries = ring->tx_pending;
2785 ret = wait_for_reset(adapter);
2788 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2789 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2791 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2792 ring->rx_pending, ring->tx_pending,
2793 adapter->req_rx_add_entries_per_subcrq,
2794 adapter->req_tx_entries_per_subcrq);
2798 static void ibmvnic_get_channels(struct net_device *netdev,
2799 struct ethtool_channels *channels)
2801 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2803 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2804 channels->max_rx = adapter->max_rx_queues;
2805 channels->max_tx = adapter->max_tx_queues;
2807 channels->max_rx = IBMVNIC_MAX_QUEUES;
2808 channels->max_tx = IBMVNIC_MAX_QUEUES;
2811 channels->max_other = 0;
2812 channels->max_combined = 0;
2813 channels->rx_count = adapter->req_rx_queues;
2814 channels->tx_count = adapter->req_tx_queues;
2815 channels->other_count = 0;
2816 channels->combined_count = 0;
2819 static int ibmvnic_set_channels(struct net_device *netdev,
2820 struct ethtool_channels *channels)
2822 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2826 adapter->desired.rx_queues = channels->rx_count;
2827 adapter->desired.tx_queues = channels->tx_count;
2829 ret = wait_for_reset(adapter);
2832 (adapter->req_rx_queues != channels->rx_count ||
2833 adapter->req_tx_queues != channels->tx_count))
2835 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2836 channels->rx_count, channels->tx_count,
2837 adapter->req_rx_queues, adapter->req_tx_queues);
2842 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2844 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2847 switch (stringset) {
2849 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2850 i++, data += ETH_GSTRING_LEN)
2851 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2853 for (i = 0; i < adapter->req_tx_queues; i++) {
2854 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2855 data += ETH_GSTRING_LEN;
2857 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2858 data += ETH_GSTRING_LEN;
2860 snprintf(data, ETH_GSTRING_LEN,
2861 "tx%d_dropped_packets", i);
2862 data += ETH_GSTRING_LEN;
2865 for (i = 0; i < adapter->req_rx_queues; i++) {
2866 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2867 data += ETH_GSTRING_LEN;
2869 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2870 data += ETH_GSTRING_LEN;
2872 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2873 data += ETH_GSTRING_LEN;
2877 case ETH_SS_PRIV_FLAGS:
2878 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2879 strcpy(data + i * ETH_GSTRING_LEN,
2880 ibmvnic_priv_flags[i]);
2887 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2889 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2893 return ARRAY_SIZE(ibmvnic_stats) +
2894 adapter->req_tx_queues * NUM_TX_STATS +
2895 adapter->req_rx_queues * NUM_RX_STATS;
2896 case ETH_SS_PRIV_FLAGS:
2897 return ARRAY_SIZE(ibmvnic_priv_flags);
2903 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2904 struct ethtool_stats *stats, u64 *data)
2906 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2907 union ibmvnic_crq crq;
2911 memset(&crq, 0, sizeof(crq));
2912 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2913 crq.request_statistics.cmd = REQUEST_STATISTICS;
2914 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2915 crq.request_statistics.len =
2916 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2918 /* Wait for data to be written */
2919 reinit_completion(&adapter->stats_done);
2920 rc = ibmvnic_send_crq(adapter, &crq);
2923 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
2927 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2928 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2929 ibmvnic_stats[i].offset));
2931 for (j = 0; j < adapter->req_tx_queues; j++) {
2932 data[i] = adapter->tx_stats_buffers[j].packets;
2934 data[i] = adapter->tx_stats_buffers[j].bytes;
2936 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2940 for (j = 0; j < adapter->req_rx_queues; j++) {
2941 data[i] = adapter->rx_stats_buffers[j].packets;
2943 data[i] = adapter->rx_stats_buffers[j].bytes;
2945 data[i] = adapter->rx_stats_buffers[j].interrupts;
2950 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2952 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2954 return adapter->priv_flags;
2957 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2959 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2960 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2963 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2965 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2969 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2970 .get_drvinfo = ibmvnic_get_drvinfo,
2971 .get_msglevel = ibmvnic_get_msglevel,
2972 .set_msglevel = ibmvnic_set_msglevel,
2973 .get_link = ibmvnic_get_link,
2974 .get_ringparam = ibmvnic_get_ringparam,
2975 .set_ringparam = ibmvnic_set_ringparam,
2976 .get_channels = ibmvnic_get_channels,
2977 .set_channels = ibmvnic_set_channels,
2978 .get_strings = ibmvnic_get_strings,
2979 .get_sset_count = ibmvnic_get_sset_count,
2980 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2981 .get_link_ksettings = ibmvnic_get_link_ksettings,
2982 .get_priv_flags = ibmvnic_get_priv_flags,
2983 .set_priv_flags = ibmvnic_set_priv_flags,
2986 /* Routines for managing CRQs/sCRQs */
2988 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2989 struct ibmvnic_sub_crq_queue *scrq)
2994 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
2999 free_irq(scrq->irq, scrq);
3000 irq_dispose_mapping(scrq->irq);
3005 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3006 atomic_set(&scrq->used, 0);
3008 scrq->ind_buf.index = 0;
3010 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3014 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3015 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3019 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3023 if (!adapter->tx_scrq || !adapter->rx_scrq)
3026 for (i = 0; i < adapter->req_tx_queues; i++) {
3027 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3028 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3033 for (i = 0; i < adapter->req_rx_queues; i++) {
3034 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3035 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3043 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3044 struct ibmvnic_sub_crq_queue *scrq,
3047 struct device *dev = &adapter->vdev->dev;
3050 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3053 /* Close the sub-crqs */
3055 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3056 adapter->vdev->unit_address,
3058 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3061 netdev_err(adapter->netdev,
3062 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3067 dma_free_coherent(dev,
3069 scrq->ind_buf.indir_arr,
3070 scrq->ind_buf.indir_dma);
3072 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3074 free_pages((unsigned long)scrq->msgs, 2);
3078 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3081 struct device *dev = &adapter->vdev->dev;
3082 struct ibmvnic_sub_crq_queue *scrq;
3085 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3090 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3092 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3093 goto zero_page_failed;
3096 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3098 if (dma_mapping_error(dev, scrq->msg_token)) {
3099 dev_warn(dev, "Couldn't map crq queue messages page\n");
3103 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3104 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3106 if (rc == H_RESOURCE)
3107 rc = ibmvnic_reset_crq(adapter);
3109 if (rc == H_CLOSED) {
3110 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3112 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3116 scrq->adapter = adapter;
3117 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3118 scrq->ind_buf.index = 0;
3120 scrq->ind_buf.indir_arr =
3121 dma_alloc_coherent(dev,
3123 &scrq->ind_buf.indir_dma,
3126 if (!scrq->ind_buf.indir_arr)
3129 spin_lock_init(&scrq->lock);
3131 netdev_dbg(adapter->netdev,
3132 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3133 scrq->crq_num, scrq->hw_irq, scrq->irq);
3139 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3140 adapter->vdev->unit_address,
3142 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
3144 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3147 free_pages((unsigned long)scrq->msgs, 2);
3154 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
3158 if (adapter->tx_scrq) {
3159 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
3160 if (!adapter->tx_scrq[i])
3163 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3165 if (adapter->tx_scrq[i]->irq) {
3166 free_irq(adapter->tx_scrq[i]->irq,
3167 adapter->tx_scrq[i]);
3168 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3169 adapter->tx_scrq[i]->irq = 0;
3172 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3176 kfree(adapter->tx_scrq);
3177 adapter->tx_scrq = NULL;
3178 adapter->num_active_tx_scrqs = 0;
3181 if (adapter->rx_scrq) {
3182 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3183 if (!adapter->rx_scrq[i])
3186 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3188 if (adapter->rx_scrq[i]->irq) {
3189 free_irq(adapter->rx_scrq[i]->irq,
3190 adapter->rx_scrq[i]);
3191 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3192 adapter->rx_scrq[i]->irq = 0;
3195 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3199 kfree(adapter->rx_scrq);
3200 adapter->rx_scrq = NULL;
3201 adapter->num_active_rx_scrqs = 0;
3205 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3206 struct ibmvnic_sub_crq_queue *scrq)
3208 struct device *dev = &adapter->vdev->dev;
3211 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3212 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3214 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3219 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3220 struct ibmvnic_sub_crq_queue *scrq)
3222 struct device *dev = &adapter->vdev->dev;
3225 if (scrq->hw_irq > 0x100000000ULL) {
3226 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3230 if (test_bit(0, &adapter->resetting) &&
3231 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3232 u64 val = (0xff000000) | scrq->hw_irq;
3234 rc = plpar_hcall_norets(H_EOI, val);
3235 /* H_EOI would fail with rc = H_FUNCTION when running
3236 * in XIVE mode which is expected, but not an error.
3238 if (rc && (rc != H_FUNCTION))
3239 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3243 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3244 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3246 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3251 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3252 struct ibmvnic_sub_crq_queue *scrq)
3254 struct device *dev = &adapter->vdev->dev;
3255 struct ibmvnic_tx_pool *tx_pool;
3256 struct ibmvnic_tx_buff *txbuff;
3257 struct netdev_queue *txq;
3258 union sub_crq *next;
3263 while (pending_scrq(adapter, scrq)) {
3264 unsigned int pool = scrq->pool_index;
3265 int num_entries = 0;
3266 int total_bytes = 0;
3267 int num_packets = 0;
3269 /* The queue entry at the current index is peeked at above
3270 * to determine that there is a valid descriptor awaiting
3271 * processing. We want to be sure that the current slot
3272 * holds a valid descriptor before reading its contents.
3276 next = ibmvnic_next_scrq(adapter, scrq);
3277 for (i = 0; i < next->tx_comp.num_comps; i++) {
3278 if (next->tx_comp.rcs[i])
3279 dev_err(dev, "tx error %x\n",
3280 next->tx_comp.rcs[i]);
3281 index = be32_to_cpu(next->tx_comp.correlators[i]);
3282 if (index & IBMVNIC_TSO_POOL_MASK) {
3283 tx_pool = &adapter->tso_pool[pool];
3284 index &= ~IBMVNIC_TSO_POOL_MASK;
3286 tx_pool = &adapter->tx_pool[pool];
3289 txbuff = &tx_pool->tx_buff[index];
3291 num_entries += txbuff->num_entries;
3293 total_bytes += txbuff->skb->len;
3294 dev_consume_skb_irq(txbuff->skb);
3297 netdev_warn(adapter->netdev,
3298 "TX completion received with NULL socket buffer\n");
3300 tx_pool->free_map[tx_pool->producer_index] = index;
3301 tx_pool->producer_index =
3302 (tx_pool->producer_index + 1) %
3303 tx_pool->num_buffers;
3305 /* remove tx_comp scrq*/
3306 next->tx_comp.first = 0;
3308 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3309 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3311 if (atomic_sub_return(num_entries, &scrq->used) <=
3312 (adapter->req_tx_entries_per_subcrq / 2) &&
3313 __netif_subqueue_stopped(adapter->netdev,
3314 scrq->pool_index)) {
3315 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3316 netdev_dbg(adapter->netdev, "Started queue %d\n",
3321 enable_scrq_irq(adapter, scrq);
3323 if (pending_scrq(adapter, scrq)) {
3324 disable_scrq_irq(adapter, scrq);
3331 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3333 struct ibmvnic_sub_crq_queue *scrq = instance;
3334 struct ibmvnic_adapter *adapter = scrq->adapter;
3336 disable_scrq_irq(adapter, scrq);
3337 ibmvnic_complete_tx(adapter, scrq);
3342 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3344 struct ibmvnic_sub_crq_queue *scrq = instance;
3345 struct ibmvnic_adapter *adapter = scrq->adapter;
3347 /* When booting a kdump kernel we can hit pending interrupts
3348 * prior to completing driver initialization.
3350 if (unlikely(adapter->state != VNIC_OPEN))
3353 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3355 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3356 disable_scrq_irq(adapter, scrq);
3357 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3363 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3365 struct device *dev = &adapter->vdev->dev;
3366 struct ibmvnic_sub_crq_queue *scrq;
3370 for (i = 0; i < adapter->req_tx_queues; i++) {
3371 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3373 scrq = adapter->tx_scrq[i];
3374 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3378 dev_err(dev, "Error mapping irq\n");
3379 goto req_tx_irq_failed;
3382 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3383 adapter->vdev->unit_address, i);
3384 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3385 0, scrq->name, scrq);
3388 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3390 irq_dispose_mapping(scrq->irq);
3391 goto req_tx_irq_failed;
3395 for (i = 0; i < adapter->req_rx_queues; i++) {
3396 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3398 scrq = adapter->rx_scrq[i];
3399 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3402 dev_err(dev, "Error mapping irq\n");
3403 goto req_rx_irq_failed;
3405 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3406 adapter->vdev->unit_address, i);
3407 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3408 0, scrq->name, scrq);
3410 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3412 irq_dispose_mapping(scrq->irq);
3413 goto req_rx_irq_failed;
3419 for (j = 0; j < i; j++) {
3420 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3421 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3423 i = adapter->req_tx_queues;
3425 for (j = 0; j < i; j++) {
3426 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3427 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3429 release_sub_crqs(adapter, 1);
3433 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3435 struct device *dev = &adapter->vdev->dev;
3436 struct ibmvnic_sub_crq_queue **allqueues;
3437 int registered_queues = 0;
3442 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3444 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3448 for (i = 0; i < total_queues; i++) {
3449 allqueues[i] = init_sub_crq_queue(adapter);
3450 if (!allqueues[i]) {
3451 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3454 registered_queues++;
3457 /* Make sure we were able to register the minimum number of queues */
3458 if (registered_queues <
3459 adapter->min_tx_queues + adapter->min_rx_queues) {
3460 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3464 /* Distribute the failed allocated queues*/
3465 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3466 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3469 if (adapter->req_rx_queues > adapter->min_rx_queues)
3470 adapter->req_rx_queues--;
3475 if (adapter->req_tx_queues > adapter->min_tx_queues)
3476 adapter->req_tx_queues--;
3483 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3484 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3485 if (!adapter->tx_scrq)
3488 for (i = 0; i < adapter->req_tx_queues; i++) {
3489 adapter->tx_scrq[i] = allqueues[i];
3490 adapter->tx_scrq[i]->pool_index = i;
3491 adapter->num_active_tx_scrqs++;
3494 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3495 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3496 if (!adapter->rx_scrq)
3499 for (i = 0; i < adapter->req_rx_queues; i++) {
3500 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3501 adapter->rx_scrq[i]->scrq_num = i;
3502 adapter->num_active_rx_scrqs++;
3509 kfree(adapter->tx_scrq);
3510 adapter->tx_scrq = NULL;
3512 for (i = 0; i < registered_queues; i++)
3513 release_sub_crq_queue(adapter, allqueues[i], 1);
3518 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3520 struct device *dev = &adapter->vdev->dev;
3521 union ibmvnic_crq crq;
3525 /* Sub-CRQ entries are 32 byte long */
3526 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3528 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3529 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3530 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3534 if (adapter->desired.mtu)
3535 adapter->req_mtu = adapter->desired.mtu;
3537 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3539 if (!adapter->desired.tx_entries)
3540 adapter->desired.tx_entries =
3541 adapter->max_tx_entries_per_subcrq;
3542 if (!adapter->desired.rx_entries)
3543 adapter->desired.rx_entries =
3544 adapter->max_rx_add_entries_per_subcrq;
3546 max_entries = IBMVNIC_MAX_LTB_SIZE /
3547 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3549 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3550 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3551 adapter->desired.tx_entries = max_entries;
3554 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3555 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3556 adapter->desired.rx_entries = max_entries;
3559 if (adapter->desired.tx_entries)
3560 adapter->req_tx_entries_per_subcrq =
3561 adapter->desired.tx_entries;
3563 adapter->req_tx_entries_per_subcrq =
3564 adapter->max_tx_entries_per_subcrq;
3566 if (adapter->desired.rx_entries)
3567 adapter->req_rx_add_entries_per_subcrq =
3568 adapter->desired.rx_entries;
3570 adapter->req_rx_add_entries_per_subcrq =
3571 adapter->max_rx_add_entries_per_subcrq;
3573 if (adapter->desired.tx_queues)
3574 adapter->req_tx_queues =
3575 adapter->desired.tx_queues;
3577 adapter->req_tx_queues =
3578 adapter->opt_tx_comp_sub_queues;
3580 if (adapter->desired.rx_queues)
3581 adapter->req_rx_queues =
3582 adapter->desired.rx_queues;
3584 adapter->req_rx_queues =
3585 adapter->opt_rx_comp_queues;
3587 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3590 memset(&crq, 0, sizeof(crq));
3591 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3592 crq.request_capability.cmd = REQUEST_CAPABILITY;
3594 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3595 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3596 atomic_inc(&adapter->running_cap_crqs);
3597 ibmvnic_send_crq(adapter, &crq);
3599 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3600 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3601 atomic_inc(&adapter->running_cap_crqs);
3602 ibmvnic_send_crq(adapter, &crq);
3604 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3605 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3606 atomic_inc(&adapter->running_cap_crqs);
3607 ibmvnic_send_crq(adapter, &crq);
3609 crq.request_capability.capability =
3610 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3611 crq.request_capability.number =
3612 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3613 atomic_inc(&adapter->running_cap_crqs);
3614 ibmvnic_send_crq(adapter, &crq);
3616 crq.request_capability.capability =
3617 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3618 crq.request_capability.number =
3619 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3620 atomic_inc(&adapter->running_cap_crqs);
3621 ibmvnic_send_crq(adapter, &crq);
3623 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3624 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3625 atomic_inc(&adapter->running_cap_crqs);
3626 ibmvnic_send_crq(adapter, &crq);
3628 if (adapter->netdev->flags & IFF_PROMISC) {
3629 if (adapter->promisc_supported) {
3630 crq.request_capability.capability =
3631 cpu_to_be16(PROMISC_REQUESTED);
3632 crq.request_capability.number = cpu_to_be64(1);
3633 atomic_inc(&adapter->running_cap_crqs);
3634 ibmvnic_send_crq(adapter, &crq);
3637 crq.request_capability.capability =
3638 cpu_to_be16(PROMISC_REQUESTED);
3639 crq.request_capability.number = cpu_to_be64(0);
3640 atomic_inc(&adapter->running_cap_crqs);
3641 ibmvnic_send_crq(adapter, &crq);
3645 static int pending_scrq(struct ibmvnic_adapter *adapter,
3646 struct ibmvnic_sub_crq_queue *scrq)
3648 union sub_crq *entry = &scrq->msgs[scrq->cur];
3650 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3656 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3657 struct ibmvnic_sub_crq_queue *scrq)
3659 union sub_crq *entry;
3660 unsigned long flags;
3662 spin_lock_irqsave(&scrq->lock, flags);
3663 entry = &scrq->msgs[scrq->cur];
3664 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3665 if (++scrq->cur == scrq->size)
3670 spin_unlock_irqrestore(&scrq->lock, flags);
3672 /* Ensure that the entire buffer descriptor has been
3673 * loaded before reading its contents
3680 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3682 struct ibmvnic_crq_queue *queue = &adapter->crq;
3683 union ibmvnic_crq *crq;
3685 crq = &queue->msgs[queue->cur];
3686 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3687 if (++queue->cur == queue->size)
3696 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3700 dev_warn_ratelimited(dev,
3701 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3705 dev_warn_ratelimited(dev,
3706 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3710 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3715 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3716 u64 remote_handle, u64 ioba, u64 num_entries)
3718 unsigned int ua = adapter->vdev->unit_address;
3719 struct device *dev = &adapter->vdev->dev;
3722 /* Make sure the hypervisor sees the complete request */
3724 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3725 cpu_to_be64(remote_handle),
3729 print_subcrq_error(dev, rc, __func__);
3734 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3735 union ibmvnic_crq *crq)
3737 unsigned int ua = adapter->vdev->unit_address;
3738 struct device *dev = &adapter->vdev->dev;
3739 u64 *u64_crq = (u64 *)crq;
3742 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3743 (unsigned long int)cpu_to_be64(u64_crq[0]),
3744 (unsigned long int)cpu_to_be64(u64_crq[1]));
3746 if (!adapter->crq.active &&
3747 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3748 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3752 /* Make sure the hypervisor sees the complete request */
3755 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3756 cpu_to_be64(u64_crq[0]),
3757 cpu_to_be64(u64_crq[1]));
3760 if (rc == H_CLOSED) {
3761 dev_warn(dev, "CRQ Queue closed\n");
3762 /* do not reset, report the fail, wait for passive init from server */
3765 dev_warn(dev, "Send error (rc=%d)\n", rc);
3771 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3773 struct device *dev = &adapter->vdev->dev;
3774 union ibmvnic_crq crq;
3778 memset(&crq, 0, sizeof(crq));
3779 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3780 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3781 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3784 rc = ibmvnic_send_crq(adapter, &crq);
3790 } while (retries > 0);
3793 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
3800 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3802 union ibmvnic_crq crq;
3804 memset(&crq, 0, sizeof(crq));
3805 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3806 crq.version_exchange.cmd = VERSION_EXCHANGE;
3807 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3809 return ibmvnic_send_crq(adapter, &crq);
3812 struct vnic_login_client_data {
3818 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3822 /* Calculate the amount of buffer space needed for the
3823 * vnic client data in the login buffer. There are four entries,
3824 * OS name, LPAR name, device name, and a null last entry.
3826 len = 4 * sizeof(struct vnic_login_client_data);
3827 len += 6; /* "Linux" plus NULL */
3828 len += strlen(utsname()->nodename) + 1;
3829 len += strlen(adapter->netdev->name) + 1;
3834 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3835 struct vnic_login_client_data *vlcd)
3837 const char *os_name = "Linux";
3840 /* Type 1 - LPAR OS */
3842 len = strlen(os_name) + 1;
3843 vlcd->len = cpu_to_be16(len);
3844 strncpy(vlcd->name, os_name, len);
3845 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3847 /* Type 2 - LPAR name */
3849 len = strlen(utsname()->nodename) + 1;
3850 vlcd->len = cpu_to_be16(len);
3851 strncpy(vlcd->name, utsname()->nodename, len);
3852 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3854 /* Type 3 - device name */
3856 len = strlen(adapter->netdev->name) + 1;
3857 vlcd->len = cpu_to_be16(len);
3858 strncpy(vlcd->name, adapter->netdev->name, len);
3861 static int send_login(struct ibmvnic_adapter *adapter)
3863 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3864 struct ibmvnic_login_buffer *login_buffer;
3865 struct device *dev = &adapter->vdev->dev;
3866 struct vnic_login_client_data *vlcd;
3867 dma_addr_t rsp_buffer_token;
3868 dma_addr_t buffer_token;
3869 size_t rsp_buffer_size;
3870 union ibmvnic_crq crq;
3871 int client_data_len;
3878 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3879 netdev_err(adapter->netdev,
3880 "RX or TX queues are not allocated, device login failed\n");
3884 release_login_buffer(adapter);
3885 release_login_rsp_buffer(adapter);
3887 client_data_len = vnic_client_data_len(adapter);
3890 sizeof(struct ibmvnic_login_buffer) +
3891 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3894 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3896 goto buf_alloc_failed;
3898 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3900 if (dma_mapping_error(dev, buffer_token)) {
3901 dev_err(dev, "Couldn't map login buffer\n");
3902 goto buf_map_failed;
3905 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3906 sizeof(u64) * adapter->req_tx_queues +
3907 sizeof(u64) * adapter->req_rx_queues +
3908 sizeof(u64) * adapter->req_rx_queues +
3909 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3911 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3912 if (!login_rsp_buffer)
3913 goto buf_rsp_alloc_failed;
3915 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3916 rsp_buffer_size, DMA_FROM_DEVICE);
3917 if (dma_mapping_error(dev, rsp_buffer_token)) {
3918 dev_err(dev, "Couldn't map login rsp buffer\n");
3919 goto buf_rsp_map_failed;
3922 adapter->login_buf = login_buffer;
3923 adapter->login_buf_token = buffer_token;
3924 adapter->login_buf_sz = buffer_size;
3925 adapter->login_rsp_buf = login_rsp_buffer;
3926 adapter->login_rsp_buf_token = rsp_buffer_token;
3927 adapter->login_rsp_buf_sz = rsp_buffer_size;
3929 login_buffer->len = cpu_to_be32(buffer_size);
3930 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3931 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3932 login_buffer->off_txcomp_subcrqs =
3933 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3934 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3935 login_buffer->off_rxcomp_subcrqs =
3936 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3937 sizeof(u64) * adapter->req_tx_queues);
3938 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3939 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3941 tx_list_p = (__be64 *)((char *)login_buffer +
3942 sizeof(struct ibmvnic_login_buffer));
3943 rx_list_p = (__be64 *)((char *)login_buffer +
3944 sizeof(struct ibmvnic_login_buffer) +
3945 sizeof(u64) * adapter->req_tx_queues);
3947 for (i = 0; i < adapter->req_tx_queues; i++) {
3948 if (adapter->tx_scrq[i]) {
3949 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3954 for (i = 0; i < adapter->req_rx_queues; i++) {
3955 if (adapter->rx_scrq[i]) {
3956 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3961 /* Insert vNIC login client data */
3962 vlcd = (struct vnic_login_client_data *)
3963 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3964 login_buffer->client_data_offset =
3965 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3966 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3968 vnic_add_client_data(adapter, vlcd);
3970 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3971 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3972 netdev_dbg(adapter->netdev, "%016lx\n",
3973 ((unsigned long int *)(adapter->login_buf))[i]);
3976 memset(&crq, 0, sizeof(crq));
3977 crq.login.first = IBMVNIC_CRQ_CMD;
3978 crq.login.cmd = LOGIN;
3979 crq.login.ioba = cpu_to_be32(buffer_token);
3980 crq.login.len = cpu_to_be32(buffer_size);
3982 adapter->login_pending = true;
3983 rc = ibmvnic_send_crq(adapter, &crq);
3985 adapter->login_pending = false;
3986 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
3987 goto buf_rsp_map_failed;
3993 kfree(login_rsp_buffer);
3994 adapter->login_rsp_buf = NULL;
3995 buf_rsp_alloc_failed:
3996 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3998 kfree(login_buffer);
3999 adapter->login_buf = NULL;
4004 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4007 union ibmvnic_crq crq;
4009 memset(&crq, 0, sizeof(crq));
4010 crq.request_map.first = IBMVNIC_CRQ_CMD;
4011 crq.request_map.cmd = REQUEST_MAP;
4012 crq.request_map.map_id = map_id;
4013 crq.request_map.ioba = cpu_to_be32(addr);
4014 crq.request_map.len = cpu_to_be32(len);
4015 return ibmvnic_send_crq(adapter, &crq);
4018 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4020 union ibmvnic_crq crq;
4022 memset(&crq, 0, sizeof(crq));
4023 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4024 crq.request_unmap.cmd = REQUEST_UNMAP;
4025 crq.request_unmap.map_id = map_id;
4026 return ibmvnic_send_crq(adapter, &crq);
4029 static void send_query_map(struct ibmvnic_adapter *adapter)
4031 union ibmvnic_crq crq;
4033 memset(&crq, 0, sizeof(crq));
4034 crq.query_map.first = IBMVNIC_CRQ_CMD;
4035 crq.query_map.cmd = QUERY_MAP;
4036 ibmvnic_send_crq(adapter, &crq);
4039 /* Send a series of CRQs requesting various capabilities of the VNIC server */
4040 static void send_query_cap(struct ibmvnic_adapter *adapter)
4042 union ibmvnic_crq crq;
4044 atomic_set(&adapter->running_cap_crqs, 0);
4045 memset(&crq, 0, sizeof(crq));
4046 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4047 crq.query_capability.cmd = QUERY_CAPABILITY;
4049 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
4050 atomic_inc(&adapter->running_cap_crqs);
4051 ibmvnic_send_crq(adapter, &crq);
4053 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
4054 atomic_inc(&adapter->running_cap_crqs);
4055 ibmvnic_send_crq(adapter, &crq);
4057 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
4058 atomic_inc(&adapter->running_cap_crqs);
4059 ibmvnic_send_crq(adapter, &crq);
4061 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
4062 atomic_inc(&adapter->running_cap_crqs);
4063 ibmvnic_send_crq(adapter, &crq);
4065 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4066 atomic_inc(&adapter->running_cap_crqs);
4067 ibmvnic_send_crq(adapter, &crq);
4069 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4070 atomic_inc(&adapter->running_cap_crqs);
4071 ibmvnic_send_crq(adapter, &crq);
4073 crq.query_capability.capability =
4074 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
4075 atomic_inc(&adapter->running_cap_crqs);
4076 ibmvnic_send_crq(adapter, &crq);
4078 crq.query_capability.capability =
4079 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
4080 atomic_inc(&adapter->running_cap_crqs);
4081 ibmvnic_send_crq(adapter, &crq);
4083 crq.query_capability.capability =
4084 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
4085 atomic_inc(&adapter->running_cap_crqs);
4086 ibmvnic_send_crq(adapter, &crq);
4088 crq.query_capability.capability =
4089 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
4090 atomic_inc(&adapter->running_cap_crqs);
4091 ibmvnic_send_crq(adapter, &crq);
4093 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
4094 atomic_inc(&adapter->running_cap_crqs);
4095 ibmvnic_send_crq(adapter, &crq);
4097 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
4098 atomic_inc(&adapter->running_cap_crqs);
4099 ibmvnic_send_crq(adapter, &crq);
4101 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4102 atomic_inc(&adapter->running_cap_crqs);
4103 ibmvnic_send_crq(adapter, &crq);
4105 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4106 atomic_inc(&adapter->running_cap_crqs);
4107 ibmvnic_send_crq(adapter, &crq);
4109 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4110 atomic_inc(&adapter->running_cap_crqs);
4111 ibmvnic_send_crq(adapter, &crq);
4113 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4114 atomic_inc(&adapter->running_cap_crqs);
4115 ibmvnic_send_crq(adapter, &crq);
4117 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4118 atomic_inc(&adapter->running_cap_crqs);
4119 ibmvnic_send_crq(adapter, &crq);
4121 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4122 atomic_inc(&adapter->running_cap_crqs);
4123 ibmvnic_send_crq(adapter, &crq);
4125 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4126 atomic_inc(&adapter->running_cap_crqs);
4127 ibmvnic_send_crq(adapter, &crq);
4129 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4130 atomic_inc(&adapter->running_cap_crqs);
4131 ibmvnic_send_crq(adapter, &crq);
4133 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4134 atomic_inc(&adapter->running_cap_crqs);
4135 ibmvnic_send_crq(adapter, &crq);
4137 crq.query_capability.capability =
4138 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4139 atomic_inc(&adapter->running_cap_crqs);
4140 ibmvnic_send_crq(adapter, &crq);
4142 crq.query_capability.capability =
4143 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4144 atomic_inc(&adapter->running_cap_crqs);
4145 ibmvnic_send_crq(adapter, &crq);
4147 crq.query_capability.capability =
4148 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4149 atomic_inc(&adapter->running_cap_crqs);
4150 ibmvnic_send_crq(adapter, &crq);
4152 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4153 atomic_inc(&adapter->running_cap_crqs);
4154 ibmvnic_send_crq(adapter, &crq);
4157 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4159 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4160 struct device *dev = &adapter->vdev->dev;
4161 union ibmvnic_crq crq;
4163 adapter->ip_offload_tok =
4165 &adapter->ip_offload_buf,
4169 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4170 if (!firmware_has_feature(FW_FEATURE_CMO))
4171 dev_err(dev, "Couldn't map offload buffer\n");
4175 memset(&crq, 0, sizeof(crq));
4176 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4177 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4178 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4179 crq.query_ip_offload.ioba =
4180 cpu_to_be32(adapter->ip_offload_tok);
4182 ibmvnic_send_crq(adapter, &crq);
4185 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4187 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4188 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4189 struct device *dev = &adapter->vdev->dev;
4190 netdev_features_t old_hw_features = 0;
4191 union ibmvnic_crq crq;
4193 adapter->ip_offload_ctrl_tok =
4196 sizeof(adapter->ip_offload_ctrl),
4199 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4200 dev_err(dev, "Couldn't map ip offload control buffer\n");
4204 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4205 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4206 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4207 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4208 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4209 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4210 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4211 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4212 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4213 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4215 /* large_rx disabled for now, additional features needed */
4216 ctrl_buf->large_rx_ipv4 = 0;
4217 ctrl_buf->large_rx_ipv6 = 0;
4219 if (adapter->state != VNIC_PROBING) {
4220 old_hw_features = adapter->netdev->hw_features;
4221 adapter->netdev->hw_features = 0;
4224 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4226 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4227 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4229 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4230 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4232 if ((adapter->netdev->features &
4233 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4234 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4236 if (buf->large_tx_ipv4)
4237 adapter->netdev->hw_features |= NETIF_F_TSO;
4238 if (buf->large_tx_ipv6)
4239 adapter->netdev->hw_features |= NETIF_F_TSO6;
4241 if (adapter->state == VNIC_PROBING) {
4242 adapter->netdev->features |= adapter->netdev->hw_features;
4243 } else if (old_hw_features != adapter->netdev->hw_features) {
4244 netdev_features_t tmp = 0;
4246 /* disable features no longer supported */
4247 adapter->netdev->features &= adapter->netdev->hw_features;
4248 /* turn on features now supported if previously enabled */
4249 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4250 adapter->netdev->hw_features;
4251 adapter->netdev->features |=
4252 tmp & adapter->netdev->wanted_features;
4255 memset(&crq, 0, sizeof(crq));
4256 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4257 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4258 crq.control_ip_offload.len =
4259 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4260 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4261 ibmvnic_send_crq(adapter, &crq);
4264 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4265 struct ibmvnic_adapter *adapter)
4267 struct device *dev = &adapter->vdev->dev;
4269 if (crq->get_vpd_size_rsp.rc.code) {
4270 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4271 crq->get_vpd_size_rsp.rc.code);
4272 complete(&adapter->fw_done);
4276 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4277 complete(&adapter->fw_done);
4280 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4281 struct ibmvnic_adapter *adapter)
4283 struct device *dev = &adapter->vdev->dev;
4284 unsigned char *substr = NULL;
4285 u8 fw_level_len = 0;
4287 memset(adapter->fw_version, 0, 32);
4289 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4292 if (crq->get_vpd_rsp.rc.code) {
4293 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4294 crq->get_vpd_rsp.rc.code);
4298 /* get the position of the firmware version info
4299 * located after the ASCII 'RM' substring in the buffer
4301 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4303 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4307 /* get length of firmware level ASCII substring */
4308 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4309 fw_level_len = *(substr + 2);
4311 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4315 /* copy firmware version string from vpd into adapter */
4316 if ((substr + 3 + fw_level_len) <
4317 (adapter->vpd->buff + adapter->vpd->len)) {
4318 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4320 dev_info(dev, "FW substr extrapolated VPD buff\n");
4324 if (adapter->fw_version[0] == '\0')
4325 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
4326 complete(&adapter->fw_done);
4329 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4331 struct device *dev = &adapter->vdev->dev;
4332 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4335 dma_unmap_single(dev, adapter->ip_offload_tok,
4336 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4338 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4339 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4340 netdev_dbg(adapter->netdev, "%016lx\n",
4341 ((unsigned long int *)(buf))[i]);
4343 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4344 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4345 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4346 buf->tcp_ipv4_chksum);
4347 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4348 buf->tcp_ipv6_chksum);
4349 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4350 buf->udp_ipv4_chksum);
4351 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4352 buf->udp_ipv6_chksum);
4353 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4354 buf->large_tx_ipv4);
4355 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4356 buf->large_tx_ipv6);
4357 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4358 buf->large_rx_ipv4);
4359 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4360 buf->large_rx_ipv6);
4361 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4362 buf->max_ipv4_header_size);
4363 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4364 buf->max_ipv6_header_size);
4365 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4366 buf->max_tcp_header_size);
4367 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4368 buf->max_udp_header_size);
4369 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4370 buf->max_large_tx_size);
4371 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4372 buf->max_large_rx_size);
4373 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4374 buf->ipv6_extension_header);
4375 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4376 buf->tcp_pseudosum_req);
4377 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4378 buf->num_ipv6_ext_headers);
4379 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4380 buf->off_ipv6_ext_headers);
4382 send_control_ip_offload(adapter);
4385 static const char *ibmvnic_fw_err_cause(u16 cause)
4388 case ADAPTER_PROBLEM:
4389 return "adapter problem";
4391 return "bus problem";
4393 return "firmware problem";
4395 return "device driver problem";
4397 return "EEH recovery";
4399 return "firmware updated";
4401 return "low Memory";
4407 static void handle_error_indication(union ibmvnic_crq *crq,
4408 struct ibmvnic_adapter *adapter)
4410 struct device *dev = &adapter->vdev->dev;
4413 cause = be16_to_cpu(crq->error_indication.error_cause);
4415 dev_warn_ratelimited(dev,
4416 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4417 crq->error_indication.flags
4418 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4419 ibmvnic_fw_err_cause(cause));
4421 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4422 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4424 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4427 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4428 struct ibmvnic_adapter *adapter)
4430 struct net_device *netdev = adapter->netdev;
4431 struct device *dev = &adapter->vdev->dev;
4434 rc = crq->change_mac_addr_rsp.rc.code;
4436 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4439 /* crq->change_mac_addr.mac_addr is the requested one
4440 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4442 ether_addr_copy(netdev->dev_addr,
4443 &crq->change_mac_addr_rsp.mac_addr[0]);
4444 ether_addr_copy(adapter->mac_addr,
4445 &crq->change_mac_addr_rsp.mac_addr[0]);
4447 complete(&adapter->fw_done);
4451 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4452 struct ibmvnic_adapter *adapter)
4454 struct device *dev = &adapter->vdev->dev;
4458 atomic_dec(&adapter->running_cap_crqs);
4459 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4461 req_value = &adapter->req_tx_queues;
4465 req_value = &adapter->req_rx_queues;
4468 case REQ_RX_ADD_QUEUES:
4469 req_value = &adapter->req_rx_add_queues;
4472 case REQ_TX_ENTRIES_PER_SUBCRQ:
4473 req_value = &adapter->req_tx_entries_per_subcrq;
4474 name = "tx_entries_per_subcrq";
4476 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4477 req_value = &adapter->req_rx_add_entries_per_subcrq;
4478 name = "rx_add_entries_per_subcrq";
4481 req_value = &adapter->req_mtu;
4484 case PROMISC_REQUESTED:
4485 req_value = &adapter->promisc;
4489 dev_err(dev, "Got invalid cap request rsp %d\n",
4490 crq->request_capability.capability);
4494 switch (crq->request_capability_rsp.rc.code) {
4497 case PARTIALSUCCESS:
4498 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4500 (long int)be64_to_cpu(crq->request_capability_rsp.
4503 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4505 pr_err("mtu of %llu is not supported. Reverting.\n",
4507 *req_value = adapter->fallback.mtu;
4510 be64_to_cpu(crq->request_capability_rsp.number);
4513 send_request_cap(adapter, 1);
4516 dev_err(dev, "Error %d in request cap rsp\n",
4517 crq->request_capability_rsp.rc.code);
4521 /* Done receiving requested capabilities, query IP offload support */
4522 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4523 adapter->wait_capability = false;
4524 send_query_ip_offload(adapter);
4528 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4529 struct ibmvnic_adapter *adapter)
4531 struct device *dev = &adapter->vdev->dev;
4532 struct net_device *netdev = adapter->netdev;
4533 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4534 struct ibmvnic_login_buffer *login = adapter->login_buf;
4535 u64 *tx_handle_array;
4536 u64 *rx_handle_array;
4542 /* CHECK: Test/set of login_pending does not need to be atomic
4543 * because only ibmvnic_tasklet tests/clears this.
4545 if (!adapter->login_pending) {
4546 netdev_warn(netdev, "Ignoring unexpected login response\n");
4549 adapter->login_pending = false;
4551 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4553 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4554 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4556 /* If the number of queues requested can't be allocated by the
4557 * server, the login response will return with code 1. We will need
4558 * to resend the login buffer with fewer queues requested.
4560 if (login_rsp_crq->generic.rc.code) {
4561 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4562 complete(&adapter->init_done);
4566 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4568 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4569 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4570 netdev_dbg(adapter->netdev, "%016lx\n",
4571 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4575 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4576 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4577 adapter->req_rx_add_queues !=
4578 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4579 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4580 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4583 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4584 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4585 /* variable buffer sizes are not supported, so just read the
4588 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4590 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4591 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4593 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4594 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4595 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4596 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4598 for (i = 0; i < num_tx_pools; i++)
4599 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4601 for (i = 0; i < num_rx_pools; i++)
4602 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4604 adapter->num_active_tx_scrqs = num_tx_pools;
4605 adapter->num_active_rx_scrqs = num_rx_pools;
4606 release_login_rsp_buffer(adapter);
4607 release_login_buffer(adapter);
4608 complete(&adapter->init_done);
4613 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4614 struct ibmvnic_adapter *adapter)
4616 struct device *dev = &adapter->vdev->dev;
4619 rc = crq->request_unmap_rsp.rc.code;
4621 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4624 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4625 struct ibmvnic_adapter *adapter)
4627 struct net_device *netdev = adapter->netdev;
4628 struct device *dev = &adapter->vdev->dev;
4631 rc = crq->query_map_rsp.rc.code;
4633 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4636 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4637 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4638 crq->query_map_rsp.free_pages);
4641 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4642 struct ibmvnic_adapter *adapter)
4644 struct net_device *netdev = adapter->netdev;
4645 struct device *dev = &adapter->vdev->dev;
4648 atomic_dec(&adapter->running_cap_crqs);
4649 netdev_dbg(netdev, "Outstanding queries: %d\n",
4650 atomic_read(&adapter->running_cap_crqs));
4651 rc = crq->query_capability.rc.code;
4653 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4657 switch (be16_to_cpu(crq->query_capability.capability)) {
4659 adapter->min_tx_queues =
4660 be64_to_cpu(crq->query_capability.number);
4661 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4662 adapter->min_tx_queues);
4665 adapter->min_rx_queues =
4666 be64_to_cpu(crq->query_capability.number);
4667 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4668 adapter->min_rx_queues);
4670 case MIN_RX_ADD_QUEUES:
4671 adapter->min_rx_add_queues =
4672 be64_to_cpu(crq->query_capability.number);
4673 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4674 adapter->min_rx_add_queues);
4677 adapter->max_tx_queues =
4678 be64_to_cpu(crq->query_capability.number);
4679 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4680 adapter->max_tx_queues);
4683 adapter->max_rx_queues =
4684 be64_to_cpu(crq->query_capability.number);
4685 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4686 adapter->max_rx_queues);
4688 case MAX_RX_ADD_QUEUES:
4689 adapter->max_rx_add_queues =
4690 be64_to_cpu(crq->query_capability.number);
4691 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4692 adapter->max_rx_add_queues);
4694 case MIN_TX_ENTRIES_PER_SUBCRQ:
4695 adapter->min_tx_entries_per_subcrq =
4696 be64_to_cpu(crq->query_capability.number);
4697 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4698 adapter->min_tx_entries_per_subcrq);
4700 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4701 adapter->min_rx_add_entries_per_subcrq =
4702 be64_to_cpu(crq->query_capability.number);
4703 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4704 adapter->min_rx_add_entries_per_subcrq);
4706 case MAX_TX_ENTRIES_PER_SUBCRQ:
4707 adapter->max_tx_entries_per_subcrq =
4708 be64_to_cpu(crq->query_capability.number);
4709 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4710 adapter->max_tx_entries_per_subcrq);
4712 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4713 adapter->max_rx_add_entries_per_subcrq =
4714 be64_to_cpu(crq->query_capability.number);
4715 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4716 adapter->max_rx_add_entries_per_subcrq);
4718 case TCP_IP_OFFLOAD:
4719 adapter->tcp_ip_offload =
4720 be64_to_cpu(crq->query_capability.number);
4721 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4722 adapter->tcp_ip_offload);
4724 case PROMISC_SUPPORTED:
4725 adapter->promisc_supported =
4726 be64_to_cpu(crq->query_capability.number);
4727 netdev_dbg(netdev, "promisc_supported = %lld\n",
4728 adapter->promisc_supported);
4731 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4732 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4733 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4736 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4737 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4738 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4740 case MAX_MULTICAST_FILTERS:
4741 adapter->max_multicast_filters =
4742 be64_to_cpu(crq->query_capability.number);
4743 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4744 adapter->max_multicast_filters);
4746 case VLAN_HEADER_INSERTION:
4747 adapter->vlan_header_insertion =
4748 be64_to_cpu(crq->query_capability.number);
4749 if (adapter->vlan_header_insertion)
4750 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4751 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4752 adapter->vlan_header_insertion);
4754 case RX_VLAN_HEADER_INSERTION:
4755 adapter->rx_vlan_header_insertion =
4756 be64_to_cpu(crq->query_capability.number);
4757 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4758 adapter->rx_vlan_header_insertion);
4760 case MAX_TX_SG_ENTRIES:
4761 adapter->max_tx_sg_entries =
4762 be64_to_cpu(crq->query_capability.number);
4763 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4764 adapter->max_tx_sg_entries);
4766 case RX_SG_SUPPORTED:
4767 adapter->rx_sg_supported =
4768 be64_to_cpu(crq->query_capability.number);
4769 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4770 adapter->rx_sg_supported);
4772 case OPT_TX_COMP_SUB_QUEUES:
4773 adapter->opt_tx_comp_sub_queues =
4774 be64_to_cpu(crq->query_capability.number);
4775 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4776 adapter->opt_tx_comp_sub_queues);
4778 case OPT_RX_COMP_QUEUES:
4779 adapter->opt_rx_comp_queues =
4780 be64_to_cpu(crq->query_capability.number);
4781 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4782 adapter->opt_rx_comp_queues);
4784 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4785 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4786 be64_to_cpu(crq->query_capability.number);
4787 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4788 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4790 case OPT_TX_ENTRIES_PER_SUBCRQ:
4791 adapter->opt_tx_entries_per_subcrq =
4792 be64_to_cpu(crq->query_capability.number);
4793 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4794 adapter->opt_tx_entries_per_subcrq);
4796 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4797 adapter->opt_rxba_entries_per_subcrq =
4798 be64_to_cpu(crq->query_capability.number);
4799 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4800 adapter->opt_rxba_entries_per_subcrq);
4802 case TX_RX_DESC_REQ:
4803 adapter->tx_rx_desc_req = crq->query_capability.number;
4804 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4805 adapter->tx_rx_desc_req);
4809 netdev_err(netdev, "Got invalid cap rsp %d\n",
4810 crq->query_capability.capability);
4814 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4815 adapter->wait_capability = false;
4816 send_request_cap(adapter, 0);
4820 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4822 union ibmvnic_crq crq;
4825 memset(&crq, 0, sizeof(crq));
4826 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4827 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4829 mutex_lock(&adapter->fw_lock);
4830 adapter->fw_done_rc = 0;
4831 reinit_completion(&adapter->fw_done);
4833 rc = ibmvnic_send_crq(adapter, &crq);
4835 mutex_unlock(&adapter->fw_lock);
4839 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
4841 mutex_unlock(&adapter->fw_lock);
4845 mutex_unlock(&adapter->fw_lock);
4846 return adapter->fw_done_rc ? -EIO : 0;
4849 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4850 struct ibmvnic_adapter *adapter)
4852 struct net_device *netdev = adapter->netdev;
4854 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4856 rc = crq->query_phys_parms_rsp.rc.code;
4858 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4862 case IBMVNIC_10MBPS:
4863 adapter->speed = SPEED_10;
4865 case IBMVNIC_100MBPS:
4866 adapter->speed = SPEED_100;
4869 adapter->speed = SPEED_1000;
4871 case IBMVNIC_10GBPS:
4872 adapter->speed = SPEED_10000;
4874 case IBMVNIC_25GBPS:
4875 adapter->speed = SPEED_25000;
4877 case IBMVNIC_40GBPS:
4878 adapter->speed = SPEED_40000;
4880 case IBMVNIC_50GBPS:
4881 adapter->speed = SPEED_50000;
4883 case IBMVNIC_100GBPS:
4884 adapter->speed = SPEED_100000;
4886 case IBMVNIC_200GBPS:
4887 adapter->speed = SPEED_200000;
4890 if (netif_carrier_ok(netdev))
4891 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4892 adapter->speed = SPEED_UNKNOWN;
4894 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4895 adapter->duplex = DUPLEX_FULL;
4896 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4897 adapter->duplex = DUPLEX_HALF;
4899 adapter->duplex = DUPLEX_UNKNOWN;
4904 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4905 struct ibmvnic_adapter *adapter)
4907 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4908 struct net_device *netdev = adapter->netdev;
4909 struct device *dev = &adapter->vdev->dev;
4910 u64 *u64_crq = (u64 *)crq;
4913 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4914 (unsigned long int)cpu_to_be64(u64_crq[0]),
4915 (unsigned long int)cpu_to_be64(u64_crq[1]));
4916 switch (gen_crq->first) {
4917 case IBMVNIC_CRQ_INIT_RSP:
4918 switch (gen_crq->cmd) {
4919 case IBMVNIC_CRQ_INIT:
4920 dev_info(dev, "Partner initialized\n");
4921 adapter->from_passive_init = true;
4922 /* Discard any stale login responses from prev reset.
4923 * CHECK: should we clear even on INIT_COMPLETE?
4925 adapter->login_pending = false;
4927 if (!completion_done(&adapter->init_done)) {
4928 complete(&adapter->init_done);
4929 adapter->init_done_rc = -EIO;
4931 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4932 if (rc && rc != -EBUSY) {
4933 /* We were unable to schedule the failover
4934 * reset either because the adapter was still
4935 * probing (eg: during kexec) or we could not
4936 * allocate memory. Clear the failover_pending
4937 * flag since no one else will. We ignore
4938 * EBUSY because it means either FAILOVER reset
4939 * is already scheduled or the adapter is
4943 "Error %ld scheduling failover reset\n",
4945 adapter->failover_pending = false;
4948 case IBMVNIC_CRQ_INIT_COMPLETE:
4949 dev_info(dev, "Partner initialization complete\n");
4950 adapter->crq.active = true;
4951 send_version_xchg(adapter);
4954 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4957 case IBMVNIC_CRQ_XPORT_EVENT:
4958 netif_carrier_off(netdev);
4959 adapter->crq.active = false;
4960 /* terminate any thread waiting for a response
4963 if (!completion_done(&adapter->fw_done)) {
4964 adapter->fw_done_rc = -EIO;
4965 complete(&adapter->fw_done);
4967 if (!completion_done(&adapter->stats_done))
4968 complete(&adapter->stats_done);
4969 if (test_bit(0, &adapter->resetting))
4970 adapter->force_reset_recovery = true;
4971 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4972 dev_info(dev, "Migrated, re-enabling adapter\n");
4973 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4974 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4975 dev_info(dev, "Backing device failover detected\n");
4976 adapter->failover_pending = true;
4978 /* The adapter lost the connection */
4979 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4981 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4984 case IBMVNIC_CRQ_CMD_RSP:
4987 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4992 switch (gen_crq->cmd) {
4993 case VERSION_EXCHANGE_RSP:
4994 rc = crq->version_exchange_rsp.rc.code;
4996 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5000 be16_to_cpu(crq->version_exchange_rsp.version);
5001 dev_info(dev, "Partner protocol version is %d\n",
5003 send_query_cap(adapter);
5005 case QUERY_CAPABILITY_RSP:
5006 handle_query_cap_rsp(crq, adapter);
5009 handle_query_map_rsp(crq, adapter);
5011 case REQUEST_MAP_RSP:
5012 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5013 complete(&adapter->fw_done);
5015 case REQUEST_UNMAP_RSP:
5016 handle_request_unmap_rsp(crq, adapter);
5018 case REQUEST_CAPABILITY_RSP:
5019 handle_request_cap_rsp(crq, adapter);
5022 netdev_dbg(netdev, "Got Login Response\n");
5023 handle_login_rsp(crq, adapter);
5025 case LOGICAL_LINK_STATE_RSP:
5027 "Got Logical Link State Response, state: %d rc: %d\n",
5028 crq->logical_link_state_rsp.link_state,
5029 crq->logical_link_state_rsp.rc.code);
5030 adapter->logical_link_state =
5031 crq->logical_link_state_rsp.link_state;
5032 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5033 complete(&adapter->init_done);
5035 case LINK_STATE_INDICATION:
5036 netdev_dbg(netdev, "Got Logical Link State Indication\n");
5037 adapter->phys_link_state =
5038 crq->link_state_indication.phys_link_state;
5039 adapter->logical_link_state =
5040 crq->link_state_indication.logical_link_state;
5041 if (adapter->phys_link_state && adapter->logical_link_state)
5042 netif_carrier_on(netdev);
5044 netif_carrier_off(netdev);
5046 case CHANGE_MAC_ADDR_RSP:
5047 netdev_dbg(netdev, "Got MAC address change Response\n");
5048 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
5050 case ERROR_INDICATION:
5051 netdev_dbg(netdev, "Got Error Indication\n");
5052 handle_error_indication(crq, adapter);
5054 case REQUEST_STATISTICS_RSP:
5055 netdev_dbg(netdev, "Got Statistics Response\n");
5056 complete(&adapter->stats_done);
5058 case QUERY_IP_OFFLOAD_RSP:
5059 netdev_dbg(netdev, "Got Query IP offload Response\n");
5060 handle_query_ip_offload_rsp(adapter);
5062 case MULTICAST_CTRL_RSP:
5063 netdev_dbg(netdev, "Got multicast control Response\n");
5065 case CONTROL_IP_OFFLOAD_RSP:
5066 netdev_dbg(netdev, "Got Control IP offload Response\n");
5067 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5068 sizeof(adapter->ip_offload_ctrl),
5070 complete(&adapter->init_done);
5072 case COLLECT_FW_TRACE_RSP:
5073 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5074 complete(&adapter->fw_done);
5076 case GET_VPD_SIZE_RSP:
5077 handle_vpd_size_rsp(crq, adapter);
5080 handle_vpd_rsp(crq, adapter);
5082 case QUERY_PHYS_PARMS_RSP:
5083 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5084 complete(&adapter->fw_done);
5087 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5092 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5094 struct ibmvnic_adapter *adapter = instance;
5096 tasklet_schedule(&adapter->tasklet);
5100 static void ibmvnic_tasklet(struct tasklet_struct *t)
5102 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5103 struct ibmvnic_crq_queue *queue = &adapter->crq;
5104 union ibmvnic_crq *crq;
5105 unsigned long flags;
5108 spin_lock_irqsave(&queue->lock, flags);
5110 /* Pull all the valid messages off the CRQ */
5111 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5112 /* This barrier makes sure ibmvnic_next_crq()'s
5113 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5114 * before ibmvnic_handle_crq()'s
5115 * switch(gen_crq->first) and switch(gen_crq->cmd).
5118 ibmvnic_handle_crq(crq, adapter);
5119 crq->generic.first = 0;
5122 /* remain in tasklet until all
5123 * capabilities responses are received
5125 if (!adapter->wait_capability)
5128 /* if capabilities CRQ's were sent in this tasklet, the following
5129 * tasklet must wait until all responses are received
5131 if (atomic_read(&adapter->running_cap_crqs) != 0)
5132 adapter->wait_capability = true;
5133 spin_unlock_irqrestore(&queue->lock, flags);
5136 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5138 struct vio_dev *vdev = adapter->vdev;
5142 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5143 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5146 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5151 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5153 struct ibmvnic_crq_queue *crq = &adapter->crq;
5154 struct device *dev = &adapter->vdev->dev;
5155 struct vio_dev *vdev = adapter->vdev;
5160 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5161 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5163 /* Clean out the queue */
5167 memset(crq->msgs, 0, PAGE_SIZE);
5169 crq->active = false;
5171 /* And re-open it again */
5172 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5173 crq->msg_token, PAGE_SIZE);
5176 /* Adapter is good, but other end is not ready */
5177 dev_warn(dev, "Partner adapter not ready\n");
5179 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5184 static void release_crq_queue(struct ibmvnic_adapter *adapter)
5186 struct ibmvnic_crq_queue *crq = &adapter->crq;
5187 struct vio_dev *vdev = adapter->vdev;
5193 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5194 free_irq(vdev->irq, adapter);
5195 tasklet_kill(&adapter->tasklet);
5197 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5198 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5200 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5202 free_page((unsigned long)crq->msgs);
5204 crq->active = false;
5207 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5209 struct ibmvnic_crq_queue *crq = &adapter->crq;
5210 struct device *dev = &adapter->vdev->dev;
5211 struct vio_dev *vdev = adapter->vdev;
5212 int rc, retrc = -ENOMEM;
5217 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5218 /* Should we allocate more than one page? */
5223 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5224 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5226 if (dma_mapping_error(dev, crq->msg_token))
5229 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5230 crq->msg_token, PAGE_SIZE);
5232 if (rc == H_RESOURCE)
5233 /* maybe kexecing and resource is busy. try a reset */
5234 rc = ibmvnic_reset_crq(adapter);
5237 if (rc == H_CLOSED) {
5238 dev_warn(dev, "Partner adapter not ready\n");
5240 dev_warn(dev, "Error %d opening adapter\n", rc);
5241 goto reg_crq_failed;
5246 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5248 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5249 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5250 adapter->vdev->unit_address);
5251 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5253 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5255 goto req_irq_failed;
5258 rc = vio_enable_interrupts(vdev);
5260 dev_err(dev, "Error %d enabling interrupts\n", rc);
5261 goto req_irq_failed;
5265 spin_lock_init(&crq->lock);
5270 tasklet_kill(&adapter->tasklet);
5272 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5273 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5275 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5277 free_page((unsigned long)crq->msgs);
5282 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5284 struct device *dev = &adapter->vdev->dev;
5285 unsigned long timeout = msecs_to_jiffies(20000);
5286 u64 old_num_rx_queues, old_num_tx_queues;
5289 adapter->from_passive_init = false;
5292 old_num_rx_queues = adapter->req_rx_queues;
5293 old_num_tx_queues = adapter->req_tx_queues;
5294 reinit_completion(&adapter->init_done);
5297 adapter->init_done_rc = 0;
5298 rc = ibmvnic_send_crq_init(adapter);
5300 dev_err(dev, "Send crq init failed with error %d\n", rc);
5304 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5305 dev_err(dev, "Initialization sequence timed out\n");
5309 if (adapter->init_done_rc) {
5310 release_crq_queue(adapter);
5311 return adapter->init_done_rc;
5314 if (adapter->from_passive_init) {
5315 adapter->state = VNIC_OPEN;
5316 adapter->from_passive_init = false;
5321 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5322 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5323 if (adapter->req_rx_queues != old_num_rx_queues ||
5324 adapter->req_tx_queues != old_num_tx_queues) {
5325 release_sub_crqs(adapter, 0);
5326 rc = init_sub_crqs(adapter);
5328 rc = reset_sub_crq_queues(adapter);
5331 rc = init_sub_crqs(adapter);
5335 dev_err(dev, "Initialization of sub crqs failed\n");
5336 release_crq_queue(adapter);
5340 rc = init_sub_crq_irqs(adapter);
5342 dev_err(dev, "Failed to initialize sub crq irqs\n");
5343 release_crq_queue(adapter);
5349 static struct device_attribute dev_attr_failover;
5351 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5353 struct ibmvnic_adapter *adapter;
5354 struct net_device *netdev;
5355 unsigned char *mac_addr_p;
5358 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5361 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5362 VETH_MAC_ADDR, NULL);
5365 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5366 __FILE__, __LINE__);
5370 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5371 IBMVNIC_MAX_QUEUES);
5375 adapter = netdev_priv(netdev);
5376 adapter->state = VNIC_PROBING;
5377 dev_set_drvdata(&dev->dev, netdev);
5378 adapter->vdev = dev;
5379 adapter->netdev = netdev;
5380 adapter->login_pending = false;
5382 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5383 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
5384 netdev->irq = dev->irq;
5385 netdev->netdev_ops = &ibmvnic_netdev_ops;
5386 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5387 SET_NETDEV_DEV(netdev, &dev->dev);
5389 spin_lock_init(&adapter->stats_lock);
5391 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5392 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5393 __ibmvnic_delayed_reset);
5394 INIT_LIST_HEAD(&adapter->rwi_list);
5395 spin_lock_init(&adapter->rwi_lock);
5396 spin_lock_init(&adapter->state_lock);
5397 mutex_init(&adapter->fw_lock);
5398 init_completion(&adapter->init_done);
5399 init_completion(&adapter->fw_done);
5400 init_completion(&adapter->reset_done);
5401 init_completion(&adapter->stats_done);
5402 clear_bit(0, &adapter->resetting);
5405 rc = init_crq_queue(adapter);
5407 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5409 goto ibmvnic_init_fail;
5412 rc = ibmvnic_reset_init(adapter, false);
5413 if (rc && rc != EAGAIN)
5414 goto ibmvnic_init_fail;
5415 } while (rc == EAGAIN);
5417 rc = init_stats_buffers(adapter);
5419 goto ibmvnic_init_fail;
5421 rc = init_stats_token(adapter);
5423 goto ibmvnic_stats_fail;
5425 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5426 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5427 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5429 rc = device_create_file(&dev->dev, &dev_attr_failover);
5431 goto ibmvnic_dev_file_err;
5433 netif_carrier_off(netdev);
5434 rc = register_netdev(netdev);
5436 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5437 goto ibmvnic_register_fail;
5439 dev_info(&dev->dev, "ibmvnic registered\n");
5441 adapter->state = VNIC_PROBED;
5443 adapter->wait_for_reset = false;
5444 adapter->last_reset_time = jiffies;
5447 ibmvnic_register_fail:
5448 device_remove_file(&dev->dev, &dev_attr_failover);
5450 ibmvnic_dev_file_err:
5451 release_stats_token(adapter);
5454 release_stats_buffers(adapter);
5457 release_sub_crqs(adapter, 1);
5458 release_crq_queue(adapter);
5459 mutex_destroy(&adapter->fw_lock);
5460 free_netdev(netdev);
5465 static int ibmvnic_remove(struct vio_dev *dev)
5467 struct net_device *netdev = dev_get_drvdata(&dev->dev);
5468 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5469 unsigned long flags;
5471 spin_lock_irqsave(&adapter->state_lock, flags);
5473 /* If ibmvnic_reset() is scheduling a reset, wait for it to
5474 * finish. Then, set the state to REMOVING to prevent it from
5475 * scheduling any more work and to have reset functions ignore
5476 * any resets that have already been scheduled. Drop the lock
5477 * after setting state, so __ibmvnic_reset() which is called
5478 * from the flush_work() below, can make progress.
5480 spin_lock_irqsave(&adapter->rwi_lock, flags);
5481 adapter->state = VNIC_REMOVING;
5482 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
5484 spin_unlock_irqrestore(&adapter->state_lock, flags);
5486 flush_work(&adapter->ibmvnic_reset);
5487 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5490 unregister_netdevice(netdev);
5492 release_resources(adapter);
5493 release_sub_crqs(adapter, 1);
5494 release_crq_queue(adapter);
5496 release_stats_token(adapter);
5497 release_stats_buffers(adapter);
5499 adapter->state = VNIC_REMOVED;
5502 mutex_destroy(&adapter->fw_lock);
5503 device_remove_file(&dev->dev, &dev_attr_failover);
5504 free_netdev(netdev);
5505 dev_set_drvdata(&dev->dev, NULL);
5510 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5511 const char *buf, size_t count)
5513 struct net_device *netdev = dev_get_drvdata(dev);
5514 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5515 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5516 __be64 session_token;
5519 if (!sysfs_streq(buf, "1"))
5522 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5523 H_GET_SESSION_TOKEN, 0, 0, 0);
5525 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5530 session_token = (__be64)retbuf[0];
5531 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5532 be64_to_cpu(session_token));
5533 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5534 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5536 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5544 static DEVICE_ATTR_WO(failover);
5546 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5548 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5549 struct ibmvnic_adapter *adapter;
5550 struct iommu_table *tbl;
5551 unsigned long ret = 0;
5554 tbl = get_iommu_table_base(&vdev->dev);
5556 /* netdev inits at probe time along with the structures we need below*/
5558 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5560 adapter = netdev_priv(netdev);
5562 ret += PAGE_SIZE; /* the crq message queue */
5563 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5565 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5566 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5568 for (i = 0; i < adapter->num_active_rx_pools; i++)
5569 ret += adapter->rx_pool[i].size *
5570 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5575 static int ibmvnic_resume(struct device *dev)
5577 struct net_device *netdev = dev_get_drvdata(dev);
5578 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5580 if (adapter->state != VNIC_OPEN)
5583 tasklet_schedule(&adapter->tasklet);
5588 static const struct vio_device_id ibmvnic_device_table[] = {
5589 {"network", "IBM,vnic"},
5592 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5594 static const struct dev_pm_ops ibmvnic_pm_ops = {
5595 .resume = ibmvnic_resume
5598 static struct vio_driver ibmvnic_driver = {
5599 .id_table = ibmvnic_device_table,
5600 .probe = ibmvnic_probe,
5601 .remove = ibmvnic_remove,
5602 .get_desired_dma = ibmvnic_get_desired_dma,
5603 .name = ibmvnic_driver_name,
5604 .pm = &ibmvnic_pm_ops,
5607 /* module functions */
5608 static int __init ibmvnic_module_init(void)
5610 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5611 IBMVNIC_DRIVER_VERSION);
5613 return vio_register_driver(&ibmvnic_driver);
5616 static void __exit ibmvnic_module_exit(void)
5618 vio_unregister_driver(&ibmvnic_driver);
5621 module_init(ibmvnic_module_init);
5622 module_exit(ibmvnic_module_exit);