1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /**************************************************************************/
4 /* IBM System i and System p Virtual NIC Device Driver */
5 /* Copyright (C) 2014 IBM Corp. */
6 /* Santiago Leon (santi_leon@yahoo.com) */
7 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
8 /* John Allen (jallen@linux.vnet.ibm.com) */
11 /* This module contains the implementation of a virtual ethernet device */
12 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
13 /* option of the RS/6000 Platform Architecture to interface with virtual */
14 /* ethernet NICs that are presented to the partition by the hypervisor. */
16 /* Messages are passed between the VNIC driver and the VNIC server using */
17 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
18 /* issue and receive commands that initiate communication with the server */
19 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
20 /* are used by the driver to notify the server that a packet is */
21 /* ready for transmission or that a buffer has been added to receive a */
22 /* packet. Subsequently, sCRQs are used by the server to notify the */
23 /* driver that a packet transmission has been completed or that a packet */
24 /* has been received and placed in a waiting buffer. */
26 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
27 /* which skbs are DMA mapped and immediately unmapped when the transmit */
28 /* or receive has been completed, the VNIC driver is required to use */
29 /* "long term mapping". This entails that large, continuous DMA mapped */
30 /* buffers are allocated on driver initialization and these buffers are */
31 /* then continuously reused to pass skbs to and from the VNIC server. */
33 /**************************************************************************/
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
82 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
83 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
84 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
85 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
86 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
87 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
88 static int enable_scrq_irq(struct ibmvnic_adapter *,
89 struct ibmvnic_sub_crq_queue *);
90 static int disable_scrq_irq(struct ibmvnic_adapter *,
91 struct ibmvnic_sub_crq_queue *);
92 static int pending_scrq(struct ibmvnic_adapter *,
93 struct ibmvnic_sub_crq_queue *);
94 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
95 struct ibmvnic_sub_crq_queue *);
96 static int ibmvnic_poll(struct napi_struct *napi, int data);
97 static void send_query_map(struct ibmvnic_adapter *adapter);
98 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
99 static int send_request_unmap(struct ibmvnic_adapter *, u8);
100 static int send_login(struct ibmvnic_adapter *adapter);
101 static void send_query_cap(struct ibmvnic_adapter *adapter);
102 static int init_sub_crqs(struct ibmvnic_adapter *);
103 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
104 static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
105 static void release_crq_queue(struct ibmvnic_adapter *);
106 static int __ibmvnic_set_mac(struct net_device *, u8 *);
107 static int init_crq_queue(struct ibmvnic_adapter *adapter);
108 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
109 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
110 struct ibmvnic_sub_crq_queue *tx_scrq);
111 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
112 struct ibmvnic_long_term_buff *ltb);
114 struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
119 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + (off))))
123 static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
148 static int send_crq_init_complete(struct ibmvnic_adapter *adapter)
150 union ibmvnic_crq crq;
152 memset(&crq, 0, sizeof(crq));
153 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
154 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
156 return ibmvnic_send_crq(adapter, &crq);
159 static int send_version_xchg(struct ibmvnic_adapter *adapter)
161 union ibmvnic_crq crq;
163 memset(&crq, 0, sizeof(crq));
164 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
165 crq.version_exchange.cmd = VERSION_EXCHANGE;
166 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
168 return ibmvnic_send_crq(adapter, &crq);
171 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
172 unsigned long length, unsigned long *number,
175 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
178 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
186 * ibmvnic_wait_for_completion - Check device state and wait for completion
187 * @adapter: private device data
188 * @comp_done: completion structure to wait for
189 * @timeout: time to wait in milliseconds
191 * Wait for a completion signal or until the timeout limit is reached
192 * while checking that the device is still active.
194 static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
195 struct completion *comp_done,
196 unsigned long timeout)
198 struct net_device *netdev;
199 unsigned long div_timeout;
202 netdev = adapter->netdev;
204 div_timeout = msecs_to_jiffies(timeout / retry);
206 if (!adapter->crq.active) {
207 netdev_err(netdev, "Device down!\n");
212 if (wait_for_completion_timeout(comp_done, div_timeout))
215 netdev_err(netdev, "Operation timed out.\n");
220 * reuse_ltb() - Check if a long term buffer can be reused
221 * @ltb: The long term buffer to be checked
222 * @size: The size of the long term buffer.
224 * An LTB can be reused unless its size has changed.
226 * Return: Return true if the LTB can be reused, false otherwise.
228 static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
230 return (ltb->buff && ltb->size == size);
234 * alloc_long_term_buff() - Allocate a long term buffer (LTB)
236 * @adapter: ibmvnic adapter associated to the LTB
237 * @ltb: container object for the LTB
238 * @size: size of the LTB
240 * Allocate an LTB of the specified size and notify VIOS.
242 * If the given @ltb already has the correct size, reuse it. Otherwise if
243 * its non-NULL, free it. Then allocate a new one of the correct size.
244 * Notify the VIOS either way since we may now be working with a new VIOS.
246 * Allocating larger chunks of memory during resets, specially LPM or under
247 * low memory situations can cause resets to fail/timeout and for LPAR to
248 * lose connectivity. So hold onto the LTB even if we fail to communicate
249 * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
251 * Return: 0 if we were able to allocate the LTB and notify the VIOS and
252 * a negative value otherwise.
254 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
255 struct ibmvnic_long_term_buff *ltb, int size)
257 struct device *dev = &adapter->vdev->dev;
260 if (!reuse_ltb(ltb, size)) {
262 "LTB size changed from 0x%llx to 0x%x, reallocating\n",
264 free_long_term_buff(adapter, ltb);
268 dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
269 ltb->map_id, ltb->size);
271 ltb->buff = dma_alloc_coherent(dev, size, <b->addr,
274 dev_err(dev, "Couldn't alloc long term buffer\n");
279 ltb->map_id = find_first_zero_bit(adapter->map_ids,
281 bitmap_set(adapter->map_ids, ltb->map_id, 1);
284 "Allocated new LTB [map %d, size 0x%llx]\n",
285 ltb->map_id, ltb->size);
288 /* Ensure ltb is zeroed - specially when reusing it. */
289 memset(ltb->buff, 0, ltb->size);
291 mutex_lock(&adapter->fw_lock);
292 adapter->fw_done_rc = 0;
293 reinit_completion(&adapter->fw_done);
295 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
297 dev_err(dev, "send_request_map failed, rc = %d\n", rc);
301 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
303 dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
308 if (adapter->fw_done_rc) {
309 dev_err(dev, "Couldn't map LTB, rc = %d\n",
310 adapter->fw_done_rc);
316 /* don't free LTB on communication error - see function header */
317 mutex_unlock(&adapter->fw_lock);
321 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
322 struct ibmvnic_long_term_buff *ltb)
324 struct device *dev = &adapter->vdev->dev;
329 /* VIOS automatically unmaps the long term buffer at remote
330 * end for the following resets:
331 * FAILOVER, MOBILITY, TIMEOUT.
333 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
334 adapter->reset_reason != VNIC_RESET_MOBILITY &&
335 adapter->reset_reason != VNIC_RESET_TIMEOUT)
336 send_request_unmap(adapter, ltb->map_id);
338 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
341 /* mark this map_id free */
342 bitmap_clear(adapter->map_ids, ltb->map_id, 1);
346 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
350 for (i = 0; i < adapter->num_active_rx_pools; i++)
351 adapter->rx_pool[i].active = 0;
354 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
355 struct ibmvnic_rx_pool *pool)
357 int count = pool->size - atomic_read(&pool->available);
358 u64 handle = adapter->rx_scrq[pool->index]->handle;
359 struct device *dev = &adapter->vdev->dev;
360 struct ibmvnic_ind_xmit_queue *ind_bufp;
361 struct ibmvnic_sub_crq_queue *rx_scrq;
362 union sub_crq *sub_crq;
363 int buffers_added = 0;
364 unsigned long lpar_rc;
376 rx_scrq = adapter->rx_scrq[pool->index];
377 ind_bufp = &rx_scrq->ind_buf;
379 /* netdev_skb_alloc() could have failed after we saved a few skbs
380 * in the indir_buf and we would not have sent them to VIOS yet.
381 * To account for them, start the loop at ind_bufp->index rather
382 * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will
385 for (i = ind_bufp->index; i < count; ++i) {
386 index = pool->free_map[pool->next_free];
388 /* We maybe reusing the skb from earlier resets. Allocate
389 * only if necessary. But since the LTB may have changed
390 * during reset (see init_rx_pools()), update LTB below
391 * even if reusing skb.
393 skb = pool->rx_buff[index].skb;
395 skb = netdev_alloc_skb(adapter->netdev,
398 dev_err(dev, "Couldn't replenish rx buff\n");
399 adapter->replenish_no_mem++;
404 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
405 pool->next_free = (pool->next_free + 1) % pool->size;
407 /* Copy the skb to the long term mapped DMA buffer */
408 offset = index * pool->buff_size;
409 dst = pool->long_term_buff.buff + offset;
410 memset(dst, 0, pool->buff_size);
411 dma_addr = pool->long_term_buff.addr + offset;
413 /* add the skb to an rx_buff in the pool */
414 pool->rx_buff[index].data = dst;
415 pool->rx_buff[index].dma = dma_addr;
416 pool->rx_buff[index].skb = skb;
417 pool->rx_buff[index].pool_index = pool->index;
418 pool->rx_buff[index].size = pool->buff_size;
420 /* queue the rx_buff for the next send_subcrq_indirect */
421 sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
422 memset(sub_crq, 0, sizeof(*sub_crq));
423 sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
424 sub_crq->rx_add.correlator =
425 cpu_to_be64((u64)&pool->rx_buff[index]);
426 sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
427 sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
429 /* The length field of the sCRQ is defined to be 24 bits so the
430 * buffer size needs to be left shifted by a byte before it is
431 * converted to big endian to prevent the last byte from being
434 #ifdef __LITTLE_ENDIAN__
437 sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
439 /* if send_subcrq_indirect queue is full, flush to VIOS */
440 if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
443 send_subcrq_indirect(adapter, handle,
444 (u64)ind_bufp->indir_dma,
445 (u64)ind_bufp->index);
446 if (lpar_rc != H_SUCCESS)
448 buffers_added += ind_bufp->index;
449 adapter->replenish_add_buff_success += ind_bufp->index;
453 atomic_add(buffers_added, &pool->available);
457 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
458 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
459 for (i = ind_bufp->index - 1; i >= 0; --i) {
460 struct ibmvnic_rx_buff *rx_buff;
462 pool->next_free = pool->next_free == 0 ?
463 pool->size - 1 : pool->next_free - 1;
464 sub_crq = &ind_bufp->indir_arr[i];
465 rx_buff = (struct ibmvnic_rx_buff *)
466 be64_to_cpu(sub_crq->rx_add.correlator);
467 index = (int)(rx_buff - pool->rx_buff);
468 pool->free_map[pool->next_free] = index;
469 dev_kfree_skb_any(pool->rx_buff[index].skb);
470 pool->rx_buff[index].skb = NULL;
472 adapter->replenish_add_buff_failure += ind_bufp->index;
473 atomic_add(buffers_added, &pool->available);
475 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
476 /* Disable buffer pool replenishment and report carrier off if
477 * queue is closed or pending failover.
478 * Firmware guarantees that a signal will be sent to the
479 * driver, triggering a reset.
481 deactivate_rx_pools(adapter);
482 netif_carrier_off(adapter->netdev);
486 static void replenish_pools(struct ibmvnic_adapter *adapter)
490 adapter->replenish_task_cycles++;
491 for (i = 0; i < adapter->num_active_rx_pools; i++) {
492 if (adapter->rx_pool[i].active)
493 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
496 netdev_dbg(adapter->netdev, "Replenished %d pools\n", i);
499 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
501 kfree(adapter->tx_stats_buffers);
502 kfree(adapter->rx_stats_buffers);
503 adapter->tx_stats_buffers = NULL;
504 adapter->rx_stats_buffers = NULL;
507 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
509 adapter->tx_stats_buffers =
510 kcalloc(IBMVNIC_MAX_QUEUES,
511 sizeof(struct ibmvnic_tx_queue_stats),
513 if (!adapter->tx_stats_buffers)
516 adapter->rx_stats_buffers =
517 kcalloc(IBMVNIC_MAX_QUEUES,
518 sizeof(struct ibmvnic_rx_queue_stats),
520 if (!adapter->rx_stats_buffers)
526 static void release_stats_token(struct ibmvnic_adapter *adapter)
528 struct device *dev = &adapter->vdev->dev;
530 if (!adapter->stats_token)
533 dma_unmap_single(dev, adapter->stats_token,
534 sizeof(struct ibmvnic_statistics),
536 adapter->stats_token = 0;
539 static int init_stats_token(struct ibmvnic_adapter *adapter)
541 struct device *dev = &adapter->vdev->dev;
545 stok = dma_map_single(dev, &adapter->stats,
546 sizeof(struct ibmvnic_statistics),
548 rc = dma_mapping_error(dev, stok);
550 dev_err(dev, "Couldn't map stats buffer, rc = %d\n", rc);
554 adapter->stats_token = stok;
555 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
560 * release_rx_pools() - Release any rx pools attached to @adapter.
561 * @adapter: ibmvnic adapter
563 * Safe to call this multiple times - even if no pools are attached.
565 static void release_rx_pools(struct ibmvnic_adapter *adapter)
567 struct ibmvnic_rx_pool *rx_pool;
570 if (!adapter->rx_pool)
573 for (i = 0; i < adapter->num_active_rx_pools; i++) {
574 rx_pool = &adapter->rx_pool[i];
576 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
578 kfree(rx_pool->free_map);
580 free_long_term_buff(adapter, &rx_pool->long_term_buff);
582 if (!rx_pool->rx_buff)
585 for (j = 0; j < rx_pool->size; j++) {
586 if (rx_pool->rx_buff[j].skb) {
587 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
588 rx_pool->rx_buff[j].skb = NULL;
592 kfree(rx_pool->rx_buff);
595 kfree(adapter->rx_pool);
596 adapter->rx_pool = NULL;
597 adapter->num_active_rx_pools = 0;
598 adapter->prev_rx_pool_size = 0;
602 * reuse_rx_pools() - Check if the existing rx pools can be reused.
603 * @adapter: ibmvnic adapter
605 * Check if the existing rx pools in the adapter can be reused. The
606 * pools can be reused if the pool parameters (number of pools,
607 * number of buffers in the pool and size of each buffer) have not
610 * NOTE: This assumes that all pools have the same number of buffers
611 * which is the case currently. If that changes, we must fix this.
613 * Return: true if the rx pools can be reused, false otherwise.
615 static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
617 u64 old_num_pools, new_num_pools;
618 u64 old_pool_size, new_pool_size;
619 u64 old_buff_size, new_buff_size;
621 if (!adapter->rx_pool)
624 old_num_pools = adapter->num_active_rx_pools;
625 new_num_pools = adapter->req_rx_queues;
627 old_pool_size = adapter->prev_rx_pool_size;
628 new_pool_size = adapter->req_rx_add_entries_per_subcrq;
630 old_buff_size = adapter->prev_rx_buf_sz;
631 new_buff_size = adapter->cur_rx_buf_sz;
633 if (old_buff_size != new_buff_size ||
634 old_num_pools != new_num_pools ||
635 old_pool_size != new_pool_size)
642 * init_rx_pools(): Initialize the set of receiver pools in the adapter.
643 * @netdev: net device associated with the vnic interface
645 * Initialize the set of receiver pools in the ibmvnic adapter associated
646 * with the net_device @netdev. If possible, reuse the existing rx pools.
647 * Otherwise free any existing pools and allocate a new set of pools
648 * before initializing them.
650 * Return: 0 on success and negative value on error.
652 static int init_rx_pools(struct net_device *netdev)
654 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
655 struct device *dev = &adapter->vdev->dev;
656 struct ibmvnic_rx_pool *rx_pool;
658 u64 pool_size; /* # of buffers in one pool */
662 pool_size = adapter->req_rx_add_entries_per_subcrq;
663 num_pools = adapter->req_rx_queues;
664 buff_size = adapter->cur_rx_buf_sz;
666 if (reuse_rx_pools(adapter)) {
667 dev_dbg(dev, "Reusing rx pools\n");
671 /* Allocate/populate the pools. */
672 release_rx_pools(adapter);
674 adapter->rx_pool = kcalloc(num_pools,
675 sizeof(struct ibmvnic_rx_pool),
677 if (!adapter->rx_pool) {
678 dev_err(dev, "Failed to allocate rx pools\n");
682 /* Set num_active_rx_pools early. If we fail below after partial
683 * allocation, release_rx_pools() will know how many to look for.
685 adapter->num_active_rx_pools = num_pools;
687 for (i = 0; i < num_pools; i++) {
688 rx_pool = &adapter->rx_pool[i];
690 netdev_dbg(adapter->netdev,
691 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
692 i, pool_size, buff_size);
694 rx_pool->size = pool_size;
696 rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
698 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
700 if (!rx_pool->free_map) {
701 dev_err(dev, "Couldn't alloc free_map %d\n", i);
706 rx_pool->rx_buff = kcalloc(rx_pool->size,
707 sizeof(struct ibmvnic_rx_buff),
709 if (!rx_pool->rx_buff) {
710 dev_err(dev, "Couldn't alloc rx buffers\n");
716 adapter->prev_rx_pool_size = pool_size;
717 adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
720 for (i = 0; i < num_pools; i++) {
721 rx_pool = &adapter->rx_pool[i];
722 dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
723 i, rx_pool->size, rx_pool->buff_size);
725 rc = alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
726 rx_pool->size * rx_pool->buff_size);
730 for (j = 0; j < rx_pool->size; ++j) {
731 struct ibmvnic_rx_buff *rx_buff;
733 rx_pool->free_map[j] = j;
735 /* NOTE: Don't clear rx_buff->skb here - will leak
736 * memory! replenish_rx_pool() will reuse skbs or
737 * allocate as necessary.
739 rx_buff = &rx_pool->rx_buff[j];
743 rx_buff->pool_index = 0;
746 /* Mark pool "empty" so replenish_rx_pools() will
747 * update the LTB info for each buffer
749 atomic_set(&rx_pool->available, 0);
750 rx_pool->next_alloc = 0;
751 rx_pool->next_free = 0;
752 /* replenish_rx_pool() may have called deactivate_rx_pools()
753 * on failover. Ensure pool is active now.
759 release_rx_pools(adapter);
761 /* We failed to allocate one or more LTBs or map them on the VIOS.
762 * Hold onto the pools and any LTBs that we did allocate/map.
767 static void release_vpd_data(struct ibmvnic_adapter *adapter)
772 kfree(adapter->vpd->buff);
778 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
779 struct ibmvnic_tx_pool *tx_pool)
781 kfree(tx_pool->tx_buff);
782 kfree(tx_pool->free_map);
783 free_long_term_buff(adapter, &tx_pool->long_term_buff);
787 * release_tx_pools() - Release any tx pools attached to @adapter.
788 * @adapter: ibmvnic adapter
790 * Safe to call this multiple times - even if no pools are attached.
792 static void release_tx_pools(struct ibmvnic_adapter *adapter)
796 /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
797 * both NULL or both non-NULL. So we only need to check one.
799 if (!adapter->tx_pool)
802 for (i = 0; i < adapter->num_active_tx_pools; i++) {
803 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
804 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
807 kfree(adapter->tx_pool);
808 adapter->tx_pool = NULL;
809 kfree(adapter->tso_pool);
810 adapter->tso_pool = NULL;
811 adapter->num_active_tx_pools = 0;
812 adapter->prev_tx_pool_size = 0;
815 static int init_one_tx_pool(struct net_device *netdev,
816 struct ibmvnic_tx_pool *tx_pool,
817 int pool_size, int buf_size)
821 tx_pool->tx_buff = kcalloc(pool_size,
822 sizeof(struct ibmvnic_tx_buff),
824 if (!tx_pool->tx_buff)
827 tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
828 if (!tx_pool->free_map) {
829 kfree(tx_pool->tx_buff);
830 tx_pool->tx_buff = NULL;
834 for (i = 0; i < pool_size; i++)
835 tx_pool->free_map[i] = i;
837 tx_pool->consumer_index = 0;
838 tx_pool->producer_index = 0;
839 tx_pool->num_buffers = pool_size;
840 tx_pool->buf_size = buf_size;
846 * reuse_tx_pools() - Check if the existing tx pools can be reused.
847 * @adapter: ibmvnic adapter
849 * Check if the existing tx pools in the adapter can be reused. The
850 * pools can be reused if the pool parameters (number of pools,
851 * number of buffers in the pool and mtu) have not changed.
853 * NOTE: This assumes that all pools have the same number of buffers
854 * which is the case currently. If that changes, we must fix this.
856 * Return: true if the tx pools can be reused, false otherwise.
858 static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
860 u64 old_num_pools, new_num_pools;
861 u64 old_pool_size, new_pool_size;
862 u64 old_mtu, new_mtu;
864 if (!adapter->tx_pool)
867 old_num_pools = adapter->num_active_tx_pools;
868 new_num_pools = adapter->num_active_tx_scrqs;
869 old_pool_size = adapter->prev_tx_pool_size;
870 new_pool_size = adapter->req_tx_entries_per_subcrq;
871 old_mtu = adapter->prev_mtu;
872 new_mtu = adapter->req_mtu;
874 if (old_mtu != new_mtu ||
875 old_num_pools != new_num_pools ||
876 old_pool_size != new_pool_size)
883 * init_tx_pools(): Initialize the set of transmit pools in the adapter.
884 * @netdev: net device associated with the vnic interface
886 * Initialize the set of transmit pools in the ibmvnic adapter associated
887 * with the net_device @netdev. If possible, reuse the existing tx pools.
888 * Otherwise free any existing pools and allocate a new set of pools
889 * before initializing them.
891 * Return: 0 on success and negative value on error.
893 static int init_tx_pools(struct net_device *netdev)
895 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
896 struct device *dev = &adapter->vdev->dev;
898 u64 pool_size; /* # of buffers in pool */
902 num_pools = adapter->req_tx_queues;
904 /* We must notify the VIOS about the LTB on all resets - but we only
905 * need to alloc/populate pools if either the number of buffers or
906 * size of each buffer in the pool has changed.
908 if (reuse_tx_pools(adapter)) {
909 netdev_dbg(netdev, "Reusing tx pools\n");
913 /* Allocate/populate the pools. */
914 release_tx_pools(adapter);
916 pool_size = adapter->req_tx_entries_per_subcrq;
917 num_pools = adapter->num_active_tx_scrqs;
919 adapter->tx_pool = kcalloc(num_pools,
920 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
921 if (!adapter->tx_pool)
924 adapter->tso_pool = kcalloc(num_pools,
925 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
926 /* To simplify release_tx_pools() ensure that ->tx_pool and
927 * ->tso_pool are either both NULL or both non-NULL.
929 if (!adapter->tso_pool) {
930 kfree(adapter->tx_pool);
931 adapter->tx_pool = NULL;
935 /* Set num_active_tx_pools early. If we fail below after partial
936 * allocation, release_tx_pools() will know how many to look for.
938 adapter->num_active_tx_pools = num_pools;
940 buff_size = adapter->req_mtu + VLAN_HLEN;
941 buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
943 for (i = 0; i < num_pools; i++) {
944 dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
945 i, adapter->req_tx_entries_per_subcrq, buff_size);
947 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
948 pool_size, buff_size);
952 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
959 adapter->prev_tx_pool_size = pool_size;
960 adapter->prev_mtu = adapter->req_mtu;
963 /* NOTE: All tx_pools have the same number of buffers (which is
964 * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
965 * buffers (see calls init_one_tx_pool() for these).
966 * For consistency, we use tx_pool->num_buffers and
967 * tso_pool->num_buffers below.
970 for (i = 0; i < num_pools; i++) {
971 struct ibmvnic_tx_pool *tso_pool;
972 struct ibmvnic_tx_pool *tx_pool;
975 tx_pool = &adapter->tx_pool[i];
976 ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
977 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
981 dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
982 i, tx_pool->long_term_buff.buff,
983 tx_pool->num_buffers, tx_pool->buf_size);
985 tx_pool->consumer_index = 0;
986 tx_pool->producer_index = 0;
988 for (j = 0; j < tx_pool->num_buffers; j++)
989 tx_pool->free_map[j] = j;
991 tso_pool = &adapter->tso_pool[i];
992 ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
993 if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
997 dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
998 i, tso_pool->long_term_buff.buff,
999 tso_pool->num_buffers, tso_pool->buf_size);
1001 tso_pool->consumer_index = 0;
1002 tso_pool->producer_index = 0;
1004 for (j = 0; j < tso_pool->num_buffers; j++)
1005 tso_pool->free_map[j] = j;
1010 release_tx_pools(adapter);
1012 /* We failed to allocate one or more LTBs or map them on the VIOS.
1013 * Hold onto the pools and any LTBs that we did allocate/map.
1018 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
1022 if (adapter->napi_enabled)
1025 for (i = 0; i < adapter->req_rx_queues; i++)
1026 napi_enable(&adapter->napi[i]);
1028 adapter->napi_enabled = true;
1031 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
1035 if (!adapter->napi_enabled)
1038 for (i = 0; i < adapter->req_rx_queues; i++) {
1039 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
1040 napi_disable(&adapter->napi[i]);
1043 adapter->napi_enabled = false;
1046 static int init_napi(struct ibmvnic_adapter *adapter)
1050 adapter->napi = kcalloc(adapter->req_rx_queues,
1051 sizeof(struct napi_struct), GFP_KERNEL);
1055 for (i = 0; i < adapter->req_rx_queues; i++) {
1056 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
1057 netif_napi_add(adapter->netdev, &adapter->napi[i],
1058 ibmvnic_poll, NAPI_POLL_WEIGHT);
1061 adapter->num_active_rx_napi = adapter->req_rx_queues;
1065 static void release_napi(struct ibmvnic_adapter *adapter)
1072 for (i = 0; i < adapter->num_active_rx_napi; i++) {
1073 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
1074 netif_napi_del(&adapter->napi[i]);
1077 kfree(adapter->napi);
1078 adapter->napi = NULL;
1079 adapter->num_active_rx_napi = 0;
1080 adapter->napi_enabled = false;
1083 static const char *adapter_state_to_string(enum vnic_state state)
1108 static int ibmvnic_login(struct net_device *netdev)
1110 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1111 unsigned long timeout = msecs_to_jiffies(20000);
1112 int retry_count = 0;
1119 if (retry_count > retries) {
1120 netdev_warn(netdev, "Login attempts exceeded\n");
1124 adapter->init_done_rc = 0;
1125 reinit_completion(&adapter->init_done);
1126 rc = send_login(adapter);
1130 if (!wait_for_completion_timeout(&adapter->init_done,
1132 netdev_warn(netdev, "Login timed out, retrying...\n");
1134 adapter->init_done_rc = 0;
1139 if (adapter->init_done_rc == ABORTED) {
1140 netdev_warn(netdev, "Login aborted, retrying...\n");
1142 adapter->init_done_rc = 0;
1144 /* FW or device may be busy, so
1145 * wait a bit before retrying login
1148 } else if (adapter->init_done_rc == PARTIALSUCCESS) {
1150 release_sub_crqs(adapter, 1);
1154 "Received partial success, retrying...\n");
1155 adapter->init_done_rc = 0;
1156 reinit_completion(&adapter->init_done);
1157 send_query_cap(adapter);
1158 if (!wait_for_completion_timeout(&adapter->init_done,
1161 "Capabilities query timed out\n");
1165 rc = init_sub_crqs(adapter);
1168 "SCRQ initialization failed\n");
1172 rc = init_sub_crq_irqs(adapter);
1175 "SCRQ irq initialization failed\n");
1178 } else if (adapter->init_done_rc) {
1179 netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
1180 adapter->init_done_rc);
1185 __ibmvnic_set_mac(netdev, adapter->mac_addr);
1187 netdev_dbg(netdev, "[S:%s] Login succeeded\n", adapter_state_to_string(adapter->state));
1191 static void release_login_buffer(struct ibmvnic_adapter *adapter)
1193 kfree(adapter->login_buf);
1194 adapter->login_buf = NULL;
1197 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
1199 kfree(adapter->login_rsp_buf);
1200 adapter->login_rsp_buf = NULL;
1203 static void release_resources(struct ibmvnic_adapter *adapter)
1205 release_vpd_data(adapter);
1207 release_napi(adapter);
1208 release_login_buffer(adapter);
1209 release_login_rsp_buffer(adapter);
1212 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
1214 struct net_device *netdev = adapter->netdev;
1215 unsigned long timeout = msecs_to_jiffies(20000);
1216 union ibmvnic_crq crq;
1220 netdev_dbg(netdev, "setting link state %d\n", link_state);
1222 memset(&crq, 0, sizeof(crq));
1223 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
1224 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
1225 crq.logical_link_state.link_state = link_state;
1230 reinit_completion(&adapter->init_done);
1231 rc = ibmvnic_send_crq(adapter, &crq);
1233 netdev_err(netdev, "Failed to set link state\n");
1237 if (!wait_for_completion_timeout(&adapter->init_done,
1239 netdev_err(netdev, "timeout setting link state\n");
1243 if (adapter->init_done_rc == PARTIALSUCCESS) {
1244 /* Partuial success, delay and re-send */
1247 } else if (adapter->init_done_rc) {
1248 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
1249 adapter->init_done_rc);
1250 return adapter->init_done_rc;
1257 static int set_real_num_queues(struct net_device *netdev)
1259 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1262 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
1263 adapter->req_tx_queues, adapter->req_rx_queues);
1265 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
1267 netdev_err(netdev, "failed to set the number of tx queues\n");
1271 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
1273 netdev_err(netdev, "failed to set the number of rx queues\n");
1278 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
1280 struct device *dev = &adapter->vdev->dev;
1281 union ibmvnic_crq crq;
1285 if (adapter->vpd->buff)
1286 len = adapter->vpd->len;
1288 mutex_lock(&adapter->fw_lock);
1289 adapter->fw_done_rc = 0;
1290 reinit_completion(&adapter->fw_done);
1292 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
1293 crq.get_vpd_size.cmd = GET_VPD_SIZE;
1294 rc = ibmvnic_send_crq(adapter, &crq);
1296 mutex_unlock(&adapter->fw_lock);
1300 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1302 dev_err(dev, "Could not retrieve VPD size, rc = %d\n", rc);
1303 mutex_unlock(&adapter->fw_lock);
1306 mutex_unlock(&adapter->fw_lock);
1308 if (!adapter->vpd->len)
1311 if (!adapter->vpd->buff)
1312 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
1313 else if (adapter->vpd->len != len)
1314 adapter->vpd->buff =
1315 krealloc(adapter->vpd->buff,
1316 adapter->vpd->len, GFP_KERNEL);
1318 if (!adapter->vpd->buff) {
1319 dev_err(dev, "Could allocate VPD buffer\n");
1323 adapter->vpd->dma_addr =
1324 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
1326 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
1327 dev_err(dev, "Could not map VPD buffer\n");
1328 kfree(adapter->vpd->buff);
1329 adapter->vpd->buff = NULL;
1333 mutex_lock(&adapter->fw_lock);
1334 adapter->fw_done_rc = 0;
1335 reinit_completion(&adapter->fw_done);
1337 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
1338 crq.get_vpd.cmd = GET_VPD;
1339 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
1340 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
1341 rc = ibmvnic_send_crq(adapter, &crq);
1343 kfree(adapter->vpd->buff);
1344 adapter->vpd->buff = NULL;
1345 mutex_unlock(&adapter->fw_lock);
1349 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
1351 dev_err(dev, "Unable to retrieve VPD, rc = %d\n", rc);
1352 kfree(adapter->vpd->buff);
1353 adapter->vpd->buff = NULL;
1354 mutex_unlock(&adapter->fw_lock);
1358 mutex_unlock(&adapter->fw_lock);
1362 static int init_resources(struct ibmvnic_adapter *adapter)
1364 struct net_device *netdev = adapter->netdev;
1367 rc = set_real_num_queues(netdev);
1371 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1375 /* Vital Product Data (VPD) */
1376 rc = ibmvnic_get_vpd(adapter);
1378 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1382 rc = init_napi(adapter);
1386 send_query_map(adapter);
1388 rc = init_rx_pools(netdev);
1392 rc = init_tx_pools(netdev);
1396 static int __ibmvnic_open(struct net_device *netdev)
1398 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1399 enum vnic_state prev_state = adapter->state;
1402 adapter->state = VNIC_OPENING;
1403 replenish_pools(adapter);
1404 ibmvnic_napi_enable(adapter);
1406 /* We're ready to receive frames, enable the sub-crq interrupts and
1407 * set the logical link state to up
1409 for (i = 0; i < adapter->req_rx_queues; i++) {
1410 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1411 if (prev_state == VNIC_CLOSED)
1412 enable_irq(adapter->rx_scrq[i]->irq);
1413 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1416 for (i = 0; i < adapter->req_tx_queues; i++) {
1417 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1418 if (prev_state == VNIC_CLOSED)
1419 enable_irq(adapter->tx_scrq[i]->irq);
1420 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1421 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
1424 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1426 ibmvnic_napi_disable(adapter);
1427 release_resources(adapter);
1431 netif_tx_start_all_queues(netdev);
1433 if (prev_state == VNIC_CLOSED) {
1434 for (i = 0; i < adapter->req_rx_queues; i++)
1435 napi_schedule(&adapter->napi[i]);
1438 adapter->state = VNIC_OPEN;
1442 static int ibmvnic_open(struct net_device *netdev)
1444 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1449 /* If device failover is pending or we are about to reset, just set
1450 * device state and return. Device operation will be handled by reset
1453 * It should be safe to overwrite the adapter->state here. Since
1454 * we hold the rtnl, either the reset has not actually started or
1455 * the rtnl got dropped during the set_link_state() in do_reset().
1456 * In the former case, no one else is changing the state (again we
1457 * have the rtnl) and in the latter case, do_reset() will detect and
1458 * honor our setting below.
1460 if (adapter->failover_pending || (test_bit(0, &adapter->resetting))) {
1461 netdev_dbg(netdev, "[S:%s FOP:%d] Resetting, deferring open\n",
1462 adapter_state_to_string(adapter->state),
1463 adapter->failover_pending);
1464 adapter->state = VNIC_OPEN;
1469 if (adapter->state != VNIC_CLOSED) {
1470 rc = ibmvnic_login(netdev);
1474 rc = init_resources(adapter);
1476 netdev_err(netdev, "failed to initialize resources\n");
1477 release_resources(adapter);
1478 release_rx_pools(adapter);
1479 release_tx_pools(adapter);
1484 rc = __ibmvnic_open(netdev);
1487 /* If open failed and there is a pending failover or in-progress reset,
1488 * set device state and return. Device operation will be handled by
1489 * reset routine. See also comments above regarding rtnl.
1492 (adapter->failover_pending || (test_bit(0, &adapter->resetting)))) {
1493 adapter->state = VNIC_OPEN;
1499 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1501 struct ibmvnic_rx_pool *rx_pool;
1502 struct ibmvnic_rx_buff *rx_buff;
1507 if (!adapter->rx_pool)
1510 rx_scrqs = adapter->num_active_rx_pools;
1511 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1513 /* Free any remaining skbs in the rx buffer pools */
1514 for (i = 0; i < rx_scrqs; i++) {
1515 rx_pool = &adapter->rx_pool[i];
1516 if (!rx_pool || !rx_pool->rx_buff)
1519 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1520 for (j = 0; j < rx_entries; j++) {
1521 rx_buff = &rx_pool->rx_buff[j];
1522 if (rx_buff && rx_buff->skb) {
1523 dev_kfree_skb_any(rx_buff->skb);
1524 rx_buff->skb = NULL;
1530 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1531 struct ibmvnic_tx_pool *tx_pool)
1533 struct ibmvnic_tx_buff *tx_buff;
1537 if (!tx_pool || !tx_pool->tx_buff)
1540 tx_entries = tx_pool->num_buffers;
1542 for (i = 0; i < tx_entries; i++) {
1543 tx_buff = &tx_pool->tx_buff[i];
1544 if (tx_buff && tx_buff->skb) {
1545 dev_kfree_skb_any(tx_buff->skb);
1546 tx_buff->skb = NULL;
1551 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1556 if (!adapter->tx_pool || !adapter->tso_pool)
1559 tx_scrqs = adapter->num_active_tx_pools;
1561 /* Free any remaining skbs in the tx buffer pools */
1562 for (i = 0; i < tx_scrqs; i++) {
1563 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1564 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1565 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1569 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1571 struct net_device *netdev = adapter->netdev;
1574 if (adapter->tx_scrq) {
1575 for (i = 0; i < adapter->req_tx_queues; i++)
1576 if (adapter->tx_scrq[i]->irq) {
1578 "Disabling tx_scrq[%d] irq\n", i);
1579 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1580 disable_irq(adapter->tx_scrq[i]->irq);
1584 if (adapter->rx_scrq) {
1585 for (i = 0; i < adapter->req_rx_queues; i++) {
1586 if (adapter->rx_scrq[i]->irq) {
1588 "Disabling rx_scrq[%d] irq\n", i);
1589 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1590 disable_irq(adapter->rx_scrq[i]->irq);
1596 static void ibmvnic_cleanup(struct net_device *netdev)
1598 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1600 /* ensure that transmissions are stopped if called by do_reset */
1601 if (test_bit(0, &adapter->resetting))
1602 netif_tx_disable(netdev);
1604 netif_tx_stop_all_queues(netdev);
1606 ibmvnic_napi_disable(adapter);
1607 ibmvnic_disable_irqs(adapter);
1610 static int __ibmvnic_close(struct net_device *netdev)
1612 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1615 adapter->state = VNIC_CLOSING;
1616 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1617 adapter->state = VNIC_CLOSED;
1621 static int ibmvnic_close(struct net_device *netdev)
1623 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1626 netdev_dbg(netdev, "[S:%s FOP:%d FRR:%d] Closing\n",
1627 adapter_state_to_string(adapter->state),
1628 adapter->failover_pending,
1629 adapter->force_reset_recovery);
1631 /* If device failover is pending, just set device state and return.
1632 * Device operation will be handled by reset routine.
1634 if (adapter->failover_pending) {
1635 adapter->state = VNIC_CLOSED;
1639 rc = __ibmvnic_close(netdev);
1640 ibmvnic_cleanup(netdev);
1641 clean_rx_pools(adapter);
1642 clean_tx_pools(adapter);
1648 * build_hdr_data - creates L2/L3/L4 header data buffer
1649 * @hdr_field: bitfield determining needed headers
1650 * @skb: socket buffer
1651 * @hdr_len: array of header lengths
1652 * @hdr_data: buffer to write the header to
1654 * Reads hdr_field to determine which headers are needed by firmware.
1655 * Builds a buffer containing these headers. Saves individual header
1656 * lengths and total buffer length to be used to build descriptors.
1658 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1659 int *hdr_len, u8 *hdr_data)
1664 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1665 hdr_len[0] = sizeof(struct vlan_ethhdr);
1667 hdr_len[0] = sizeof(struct ethhdr);
1669 if (skb->protocol == htons(ETH_P_IP)) {
1670 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1671 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1672 hdr_len[2] = tcp_hdrlen(skb);
1673 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1674 hdr_len[2] = sizeof(struct udphdr);
1675 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1676 hdr_len[1] = sizeof(struct ipv6hdr);
1677 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1678 hdr_len[2] = tcp_hdrlen(skb);
1679 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1680 hdr_len[2] = sizeof(struct udphdr);
1681 } else if (skb->protocol == htons(ETH_P_ARP)) {
1682 hdr_len[1] = arp_hdr_len(skb->dev);
1686 memset(hdr_data, 0, 120);
1687 if ((hdr_field >> 6) & 1) {
1688 hdr = skb_mac_header(skb);
1689 memcpy(hdr_data, hdr, hdr_len[0]);
1693 if ((hdr_field >> 5) & 1) {
1694 hdr = skb_network_header(skb);
1695 memcpy(hdr_data + len, hdr, hdr_len[1]);
1699 if ((hdr_field >> 4) & 1) {
1700 hdr = skb_transport_header(skb);
1701 memcpy(hdr_data + len, hdr, hdr_len[2]);
1708 * create_hdr_descs - create header and header extension descriptors
1709 * @hdr_field: bitfield determining needed headers
1710 * @hdr_data: buffer containing header data
1711 * @len: length of data buffer
1712 * @hdr_len: array of individual header lengths
1713 * @scrq_arr: descriptor array
1715 * Creates header and, if needed, header extension descriptors and
1716 * places them in a descriptor array, scrq_arr
1719 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1720 union sub_crq *scrq_arr)
1722 union sub_crq hdr_desc;
1728 while (tmp_len > 0) {
1729 cur = hdr_data + len - tmp_len;
1731 memset(&hdr_desc, 0, sizeof(hdr_desc));
1732 if (cur != hdr_data) {
1733 data = hdr_desc.hdr_ext.data;
1734 tmp = tmp_len > 29 ? 29 : tmp_len;
1735 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1736 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1737 hdr_desc.hdr_ext.len = tmp;
1739 data = hdr_desc.hdr.data;
1740 tmp = tmp_len > 24 ? 24 : tmp_len;
1741 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1742 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1743 hdr_desc.hdr.len = tmp;
1744 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1745 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1746 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1747 hdr_desc.hdr.flag = hdr_field << 1;
1749 memcpy(data, cur, tmp);
1751 *scrq_arr = hdr_desc;
1760 * build_hdr_descs_arr - build a header descriptor array
1761 * @skb: tx socket buffer
1762 * @indir_arr: indirect array
1763 * @num_entries: number of descriptors to be sent
1764 * @hdr_field: bit field determining which headers will be sent
1766 * This function will build a TX descriptor array with applicable
1767 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1770 static void build_hdr_descs_arr(struct sk_buff *skb,
1771 union sub_crq *indir_arr,
1772 int *num_entries, u8 hdr_field)
1774 int hdr_len[3] = {0, 0, 0};
1775 u8 hdr_data[140] = {0};
1778 tot_len = build_hdr_data(hdr_field, skb, hdr_len,
1780 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1784 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1785 struct net_device *netdev)
1787 /* For some backing devices, mishandling of small packets
1788 * can result in a loss of connection or TX stall. Device
1789 * architects recommend that no packet should be smaller
1790 * than the minimum MTU value provided to the driver, so
1791 * pad any packets to that length
1793 if (skb->len < netdev->min_mtu)
1794 return skb_put_padto(skb, netdev->min_mtu);
1799 static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
1800 struct ibmvnic_sub_crq_queue *tx_scrq)
1802 struct ibmvnic_ind_xmit_queue *ind_bufp;
1803 struct ibmvnic_tx_buff *tx_buff;
1804 struct ibmvnic_tx_pool *tx_pool;
1805 union sub_crq tx_scrq_entry;
1811 ind_bufp = &tx_scrq->ind_buf;
1812 entries = (u64)ind_bufp->index;
1813 queue_num = tx_scrq->pool_index;
1815 for (i = entries - 1; i >= 0; --i) {
1816 tx_scrq_entry = ind_bufp->indir_arr[i];
1817 if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
1819 index = be32_to_cpu(tx_scrq_entry.v1.correlator);
1820 if (index & IBMVNIC_TSO_POOL_MASK) {
1821 tx_pool = &adapter->tso_pool[queue_num];
1822 index &= ~IBMVNIC_TSO_POOL_MASK;
1824 tx_pool = &adapter->tx_pool[queue_num];
1826 tx_pool->free_map[tx_pool->consumer_index] = index;
1827 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
1828 tx_pool->num_buffers - 1 :
1829 tx_pool->consumer_index - 1;
1830 tx_buff = &tx_pool->tx_buff[index];
1831 adapter->netdev->stats.tx_packets--;
1832 adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
1833 adapter->tx_stats_buffers[queue_num].packets--;
1834 adapter->tx_stats_buffers[queue_num].bytes -=
1836 dev_kfree_skb_any(tx_buff->skb);
1837 tx_buff->skb = NULL;
1838 adapter->netdev->stats.tx_dropped++;
1840 ind_bufp->index = 0;
1841 if (atomic_sub_return(entries, &tx_scrq->used) <=
1842 (adapter->req_tx_entries_per_subcrq / 2) &&
1843 __netif_subqueue_stopped(adapter->netdev, queue_num) &&
1844 !test_bit(0, &adapter->resetting)) {
1845 netif_wake_subqueue(adapter->netdev, queue_num);
1846 netdev_dbg(adapter->netdev, "Started queue %d\n",
1851 static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
1852 struct ibmvnic_sub_crq_queue *tx_scrq)
1854 struct ibmvnic_ind_xmit_queue *ind_bufp;
1860 ind_bufp = &tx_scrq->ind_buf;
1861 dma_addr = (u64)ind_bufp->indir_dma;
1862 entries = (u64)ind_bufp->index;
1863 handle = tx_scrq->handle;
1867 rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
1869 ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
1871 ind_bufp->index = 0;
1875 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1877 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1878 int queue_num = skb_get_queue_mapping(skb);
1879 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1880 struct device *dev = &adapter->vdev->dev;
1881 struct ibmvnic_ind_xmit_queue *ind_bufp;
1882 struct ibmvnic_tx_buff *tx_buff = NULL;
1883 struct ibmvnic_sub_crq_queue *tx_scrq;
1884 struct ibmvnic_tx_pool *tx_pool;
1885 unsigned int tx_send_failed = 0;
1886 netdev_tx_t ret = NETDEV_TX_OK;
1887 unsigned int tx_map_failed = 0;
1888 union sub_crq indir_arr[16];
1889 unsigned int tx_dropped = 0;
1890 unsigned int tx_packets = 0;
1891 unsigned int tx_bytes = 0;
1892 dma_addr_t data_dma_addr;
1893 struct netdev_queue *txq;
1894 unsigned long lpar_rc;
1895 union sub_crq tx_crq;
1896 unsigned int offset;
1897 int num_entries = 1;
1902 tx_scrq = adapter->tx_scrq[queue_num];
1903 txq = netdev_get_tx_queue(netdev, queue_num);
1904 ind_bufp = &tx_scrq->ind_buf;
1906 if (test_bit(0, &adapter->resetting)) {
1907 dev_kfree_skb_any(skb);
1915 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1919 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1922 if (skb_is_gso(skb))
1923 tx_pool = &adapter->tso_pool[queue_num];
1925 tx_pool = &adapter->tx_pool[queue_num];
1927 index = tx_pool->free_map[tx_pool->consumer_index];
1929 if (index == IBMVNIC_INVALID_MAP) {
1930 dev_kfree_skb_any(skb);
1933 ibmvnic_tx_scrq_flush(adapter, tx_scrq);
1938 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1940 offset = index * tx_pool->buf_size;
1941 dst = tx_pool->long_term_buff.buff + offset;
1942 memset(dst, 0, tx_pool->buf_size);
1943 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1945 if (skb_shinfo(skb)->nr_frags) {
1949 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1950 cur = skb_headlen(skb);
1952 /* Copy the frags */
1953 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1954 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1956 memcpy(dst + cur, skb_frag_address(frag),
1957 skb_frag_size(frag));
1958 cur += skb_frag_size(frag);
1961 skb_copy_from_linear_data(skb, dst, skb->len);
1964 /* post changes to long_term_buff *dst before VIOS accessing it */
1967 tx_pool->consumer_index =
1968 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1970 tx_buff = &tx_pool->tx_buff[index];
1972 tx_buff->index = index;
1973 tx_buff->pool_index = queue_num;
1975 memset(&tx_crq, 0, sizeof(tx_crq));
1976 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1977 tx_crq.v1.type = IBMVNIC_TX_DESC;
1978 tx_crq.v1.n_crq_elem = 1;
1979 tx_crq.v1.n_sge = 1;
1980 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1982 if (skb_is_gso(skb))
1983 tx_crq.v1.correlator =
1984 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1986 tx_crq.v1.correlator = cpu_to_be32(index);
1987 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1988 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1989 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1991 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1992 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1993 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1996 if (skb->protocol == htons(ETH_P_IP)) {
1997 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1998 proto = ip_hdr(skb)->protocol;
1999 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2000 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
2001 proto = ipv6_hdr(skb)->nexthdr;
2004 if (proto == IPPROTO_TCP)
2005 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
2006 else if (proto == IPPROTO_UDP)
2007 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
2009 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2010 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
2013 if (skb_is_gso(skb)) {
2014 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
2015 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
2019 if ((*hdrs >> 7) & 1)
2020 build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
2022 tx_crq.v1.n_crq_elem = num_entries;
2023 tx_buff->num_entries = num_entries;
2024 /* flush buffer if current entry can not fit */
2025 if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
2026 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2027 if (lpar_rc != H_SUCCESS)
2031 indir_arr[0] = tx_crq;
2032 memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
2033 num_entries * sizeof(struct ibmvnic_generic_scrq));
2034 ind_bufp->index += num_entries;
2035 if (__netdev_tx_sent_queue(txq, skb->len,
2036 netdev_xmit_more() &&
2037 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
2038 lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
2039 if (lpar_rc != H_SUCCESS)
2043 if (atomic_add_return(num_entries, &tx_scrq->used)
2044 >= adapter->req_tx_entries_per_subcrq) {
2045 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
2046 netif_stop_subqueue(netdev, queue_num);
2050 tx_bytes += skb->len;
2051 txq_trans_cond_update(txq);
2056 dev_kfree_skb_any(skb);
2057 tx_buff->skb = NULL;
2058 tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
2059 tx_pool->num_buffers - 1 :
2060 tx_pool->consumer_index - 1;
2063 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
2064 dev_err_ratelimited(dev, "tx: send failed\n");
2066 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
2067 /* Disable TX and report carrier off if queue is closed
2068 * or pending failover.
2069 * Firmware guarantees that a signal will be sent to the
2070 * driver, triggering a reset or some other action.
2072 netif_tx_stop_all_queues(netdev);
2073 netif_carrier_off(netdev);
2076 netdev->stats.tx_dropped += tx_dropped;
2077 netdev->stats.tx_bytes += tx_bytes;
2078 netdev->stats.tx_packets += tx_packets;
2079 adapter->tx_send_failed += tx_send_failed;
2080 adapter->tx_map_failed += tx_map_failed;
2081 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
2082 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
2083 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
2088 static void ibmvnic_set_multi(struct net_device *netdev)
2090 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2091 struct netdev_hw_addr *ha;
2092 union ibmvnic_crq crq;
2094 memset(&crq, 0, sizeof(crq));
2095 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2096 crq.request_capability.cmd = REQUEST_CAPABILITY;
2098 if (netdev->flags & IFF_PROMISC) {
2099 if (!adapter->promisc_supported)
2102 if (netdev->flags & IFF_ALLMULTI) {
2103 /* Accept all multicast */
2104 memset(&crq, 0, sizeof(crq));
2105 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2106 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2107 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
2108 ibmvnic_send_crq(adapter, &crq);
2109 } else if (netdev_mc_empty(netdev)) {
2110 /* Reject all multicast */
2111 memset(&crq, 0, sizeof(crq));
2112 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2113 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2114 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
2115 ibmvnic_send_crq(adapter, &crq);
2117 /* Accept one or more multicast(s) */
2118 netdev_for_each_mc_addr(ha, netdev) {
2119 memset(&crq, 0, sizeof(crq));
2120 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
2121 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
2122 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
2123 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
2125 ibmvnic_send_crq(adapter, &crq);
2131 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
2133 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2134 union ibmvnic_crq crq;
2137 if (!is_valid_ether_addr(dev_addr)) {
2138 rc = -EADDRNOTAVAIL;
2142 memset(&crq, 0, sizeof(crq));
2143 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
2144 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
2145 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
2147 mutex_lock(&adapter->fw_lock);
2148 adapter->fw_done_rc = 0;
2149 reinit_completion(&adapter->fw_done);
2151 rc = ibmvnic_send_crq(adapter, &crq);
2154 mutex_unlock(&adapter->fw_lock);
2158 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
2159 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
2160 if (rc || adapter->fw_done_rc) {
2162 mutex_unlock(&adapter->fw_lock);
2165 mutex_unlock(&adapter->fw_lock);
2168 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
2172 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
2174 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2175 struct sockaddr *addr = p;
2179 if (!is_valid_ether_addr(addr->sa_data))
2180 return -EADDRNOTAVAIL;
2182 ether_addr_copy(adapter->mac_addr, addr->sa_data);
2183 if (adapter->state != VNIC_PROBED)
2184 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
2189 static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
2192 case VNIC_RESET_FAILOVER:
2194 case VNIC_RESET_MOBILITY:
2196 case VNIC_RESET_FATAL:
2198 case VNIC_RESET_NON_FATAL:
2200 case VNIC_RESET_TIMEOUT:
2202 case VNIC_RESET_CHANGE_PARAM:
2203 return "CHANGE_PARAM";
2204 case VNIC_RESET_PASSIVE_INIT:
2205 return "PASSIVE_INIT";
2211 * do_reset returns zero if we are able to keep processing reset events, or
2212 * non-zero if we hit a fatal error and must halt.
2214 static int do_reset(struct ibmvnic_adapter *adapter,
2215 struct ibmvnic_rwi *rwi, u32 reset_state)
2217 struct net_device *netdev = adapter->netdev;
2218 u64 old_num_rx_queues, old_num_tx_queues;
2219 u64 old_num_rx_slots, old_num_tx_slots;
2222 netdev_dbg(adapter->netdev,
2223 "[S:%s FOP:%d] Reset reason: %s, reset_state: %s\n",
2224 adapter_state_to_string(adapter->state),
2225 adapter->failover_pending,
2226 reset_reason_to_string(rwi->reset_reason),
2227 adapter_state_to_string(reset_state));
2229 adapter->reset_reason = rwi->reset_reason;
2230 /* requestor of VNIC_RESET_CHANGE_PARAM already has the rtnl lock */
2231 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2234 /* Now that we have the rtnl lock, clear any pending failover.
2235 * This will ensure ibmvnic_open() has either completed or will
2236 * block until failover is complete.
2238 if (rwi->reset_reason == VNIC_RESET_FAILOVER)
2239 adapter->failover_pending = false;
2241 /* read the state and check (again) after getting rtnl */
2242 reset_state = adapter->state;
2244 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2249 netif_carrier_off(netdev);
2251 old_num_rx_queues = adapter->req_rx_queues;
2252 old_num_tx_queues = adapter->req_tx_queues;
2253 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
2254 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
2256 ibmvnic_cleanup(netdev);
2258 if (reset_state == VNIC_OPEN &&
2259 adapter->reset_reason != VNIC_RESET_MOBILITY &&
2260 adapter->reset_reason != VNIC_RESET_FAILOVER) {
2261 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2262 rc = __ibmvnic_close(netdev);
2266 adapter->state = VNIC_CLOSING;
2268 /* Release the RTNL lock before link state change and
2269 * re-acquire after the link state change to allow
2270 * linkwatch_event to grab the RTNL lock and run during
2274 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
2279 if (adapter->state == VNIC_OPEN) {
2280 /* When we dropped rtnl, ibmvnic_open() got
2281 * it and noticed that we are resetting and
2282 * set the adapter state to OPEN. Update our
2283 * new "target" state, and resume the reset
2284 * from VNIC_CLOSING state.
2287 "Open changed state from %s, updating.\n",
2288 adapter_state_to_string(reset_state));
2289 reset_state = VNIC_OPEN;
2290 adapter->state = VNIC_CLOSING;
2293 if (adapter->state != VNIC_CLOSING) {
2294 /* If someone else changed the adapter state
2295 * when we dropped the rtnl, fail the reset
2300 adapter->state = VNIC_CLOSED;
2304 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2305 release_resources(adapter);
2306 release_sub_crqs(adapter, 1);
2307 release_crq_queue(adapter);
2310 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
2311 /* remove the closed state so when we call open it appears
2312 * we are coming from the probed state.
2314 adapter->state = VNIC_PROBED;
2316 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2317 rc = init_crq_queue(adapter);
2318 } else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
2319 rc = ibmvnic_reenable_crq_queue(adapter);
2320 release_sub_crqs(adapter, 1);
2322 rc = ibmvnic_reset_crq(adapter);
2323 if (rc == H_CLOSED || rc == H_SUCCESS) {
2324 rc = vio_enable_interrupts(adapter->vdev);
2326 netdev_err(adapter->netdev,
2327 "Reset failed to enable interrupts. rc=%d\n",
2333 netdev_err(adapter->netdev,
2334 "Reset couldn't initialize crq. rc=%d\n", rc);
2338 rc = ibmvnic_reset_init(adapter, true);
2342 /* If the adapter was in PROBE or DOWN state prior to the reset,
2345 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN) {
2350 rc = ibmvnic_login(netdev);
2354 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2355 rc = init_resources(adapter);
2358 } else if (adapter->req_rx_queues != old_num_rx_queues ||
2359 adapter->req_tx_queues != old_num_tx_queues ||
2360 adapter->req_rx_add_entries_per_subcrq !=
2362 adapter->req_tx_entries_per_subcrq !=
2364 !adapter->rx_pool ||
2365 !adapter->tso_pool ||
2366 !adapter->tx_pool) {
2367 release_napi(adapter);
2368 release_vpd_data(adapter);
2370 rc = init_resources(adapter);
2375 rc = init_tx_pools(netdev);
2378 "init tx pools failed (%d)\n",
2383 rc = init_rx_pools(netdev);
2386 "init rx pools failed (%d)\n",
2391 ibmvnic_disable_irqs(adapter);
2393 adapter->state = VNIC_CLOSED;
2395 if (reset_state == VNIC_CLOSED) {
2400 rc = __ibmvnic_open(netdev);
2402 rc = IBMVNIC_OPEN_FAILED;
2406 /* refresh device's multicast list */
2407 ibmvnic_set_multi(netdev);
2409 if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
2410 adapter->reset_reason == VNIC_RESET_MOBILITY)
2411 __netdev_notify_peers(netdev);
2416 /* restore the adapter state if reset failed */
2418 adapter->state = reset_state;
2419 /* requestor of VNIC_RESET_CHANGE_PARAM should still hold the rtnl lock */
2420 if (!(adapter->reset_reason == VNIC_RESET_CHANGE_PARAM))
2423 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Reset done, rc %d\n",
2424 adapter_state_to_string(adapter->state),
2425 adapter->failover_pending, rc);
2429 static int do_hard_reset(struct ibmvnic_adapter *adapter,
2430 struct ibmvnic_rwi *rwi, u32 reset_state)
2432 struct net_device *netdev = adapter->netdev;
2435 netdev_dbg(adapter->netdev, "Hard resetting driver (%s)\n",
2436 reset_reason_to_string(rwi->reset_reason));
2438 /* read the state and check (again) after getting rtnl */
2439 reset_state = adapter->state;
2441 if (reset_state == VNIC_REMOVING || reset_state == VNIC_REMOVED) {
2446 netif_carrier_off(netdev);
2447 adapter->reset_reason = rwi->reset_reason;
2449 ibmvnic_cleanup(netdev);
2450 release_resources(adapter);
2451 release_sub_crqs(adapter, 0);
2452 release_crq_queue(adapter);
2454 /* remove the closed state so when we call open it appears
2455 * we are coming from the probed state.
2457 adapter->state = VNIC_PROBED;
2459 reinit_completion(&adapter->init_done);
2460 rc = init_crq_queue(adapter);
2462 netdev_err(adapter->netdev,
2463 "Couldn't initialize crq. rc=%d\n", rc);
2467 rc = ibmvnic_reset_init(adapter, false);
2471 /* If the adapter was in PROBE or DOWN state prior to the reset,
2474 if (reset_state == VNIC_PROBED || reset_state == VNIC_DOWN)
2477 rc = ibmvnic_login(netdev);
2481 rc = init_resources(adapter);
2485 ibmvnic_disable_irqs(adapter);
2486 adapter->state = VNIC_CLOSED;
2488 if (reset_state == VNIC_CLOSED)
2491 rc = __ibmvnic_open(netdev);
2493 rc = IBMVNIC_OPEN_FAILED;
2497 __netdev_notify_peers(netdev);
2499 /* restore adapter state if reset failed */
2501 adapter->state = reset_state;
2502 netdev_dbg(adapter->netdev, "[S:%s FOP:%d] Hard reset done, rc %d\n",
2503 adapter_state_to_string(adapter->state),
2504 adapter->failover_pending, rc);
2508 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2510 struct ibmvnic_rwi *rwi;
2511 unsigned long flags;
2513 spin_lock_irqsave(&adapter->rwi_lock, flags);
2515 if (!list_empty(&adapter->rwi_list)) {
2516 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2518 list_del(&rwi->list);
2523 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2528 * do_passive_init - complete probing when partner device is detected.
2529 * @adapter: ibmvnic_adapter struct
2531 * If the ibmvnic device does not have a partner device to communicate with at boot
2532 * and that partner device comes online at a later time, this function is called
2533 * to complete the initialization process of ibmvnic device.
2534 * Caller is expected to hold rtnl_lock().
2536 * Returns non-zero if sub-CRQs are not initialized properly leaving the device
2537 * in the down state.
2538 * Returns 0 upon success and the device is in PROBED state.
2541 static int do_passive_init(struct ibmvnic_adapter *adapter)
2543 unsigned long timeout = msecs_to_jiffies(30000);
2544 struct net_device *netdev = adapter->netdev;
2545 struct device *dev = &adapter->vdev->dev;
2548 netdev_dbg(netdev, "Partner device found, probing.\n");
2550 adapter->state = VNIC_PROBING;
2551 reinit_completion(&adapter->init_done);
2552 adapter->init_done_rc = 0;
2553 adapter->crq.active = true;
2555 rc = send_crq_init_complete(adapter);
2559 rc = send_version_xchg(adapter);
2561 netdev_dbg(adapter->netdev, "send_version_xchg failed, rc=%d\n", rc);
2563 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2564 dev_err(dev, "Initialization sequence timed out\n");
2569 rc = init_sub_crqs(adapter);
2571 dev_err(dev, "Initialization of sub crqs failed, rc=%d\n", rc);
2575 rc = init_sub_crq_irqs(adapter);
2577 dev_err(dev, "Failed to initialize sub crq irqs\n, rc=%d", rc);
2581 netdev->mtu = adapter->req_mtu - ETH_HLEN;
2582 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
2583 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
2585 adapter->state = VNIC_PROBED;
2586 netdev_dbg(netdev, "Probed successfully. Waiting for signal from partner device.\n");
2591 release_sub_crqs(adapter, 1);
2593 adapter->state = VNIC_DOWN;
2597 static void __ibmvnic_reset(struct work_struct *work)
2599 struct ibmvnic_adapter *adapter;
2600 bool saved_state = false;
2601 struct ibmvnic_rwi *tmprwi;
2602 struct ibmvnic_rwi *rwi;
2603 unsigned long flags;
2608 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2610 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2611 queue_delayed_work(system_long_wq,
2612 &adapter->ibmvnic_delayed_reset,
2613 IBMVNIC_RESET_DELAY);
2617 rwi = get_next_rwi(adapter);
2619 spin_lock_irqsave(&adapter->state_lock, flags);
2621 if (adapter->state == VNIC_REMOVING ||
2622 adapter->state == VNIC_REMOVED) {
2623 spin_unlock_irqrestore(&adapter->state_lock, flags);
2630 reset_state = adapter->state;
2633 spin_unlock_irqrestore(&adapter->state_lock, flags);
2635 if (rwi->reset_reason == VNIC_RESET_PASSIVE_INIT) {
2637 rc = do_passive_init(adapter);
2640 netif_carrier_on(adapter->netdev);
2641 } else if (adapter->force_reset_recovery) {
2642 /* Since we are doing a hard reset now, clear the
2643 * failover_pending flag so we don't ignore any
2644 * future MOBILITY or other resets.
2646 adapter->failover_pending = false;
2648 /* Transport event occurred during previous reset */
2649 if (adapter->wait_for_reset) {
2650 /* Previous was CHANGE_PARAM; caller locked */
2651 adapter->force_reset_recovery = false;
2652 rc = do_hard_reset(adapter, rwi, reset_state);
2655 adapter->force_reset_recovery = false;
2656 rc = do_hard_reset(adapter, rwi, reset_state);
2664 /* If auto-priority-failover is enabled we can get
2665 * back to back failovers during resets, resulting
2666 * in at least two failed resets (from high-priority
2667 * backing device to low-priority one and then back)
2668 * If resets continue to fail beyond that, give the
2669 * adapter some time to settle down before retrying.
2671 if (num_fails >= 3) {
2672 netdev_dbg(adapter->netdev,
2673 "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
2674 adapter_state_to_string(adapter->state),
2676 set_current_state(TASK_UNINTERRUPTIBLE);
2677 schedule_timeout(60 * HZ);
2680 rc = do_reset(adapter, rwi, reset_state);
2683 adapter->last_reset_time = jiffies;
2686 netdev_dbg(adapter->netdev, "Reset failed, rc=%d\n", rc);
2688 rwi = get_next_rwi(adapter);
2691 * If there is another reset queued, free the previous rwi
2692 * and process the new reset even if previous reset failed
2693 * (the previous reset could have failed because of a fail
2694 * over for instance, so process the fail over).
2696 * If there are no resets queued and the previous reset failed,
2697 * the adapter would be in an undefined state. So retry the
2698 * previous reset as a hard reset.
2705 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2706 rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
2707 adapter->force_reset_recovery = true;
2710 if (adapter->wait_for_reset) {
2711 adapter->reset_done_rc = rc;
2712 complete(&adapter->reset_done);
2715 clear_bit_unlock(0, &adapter->resetting);
2717 netdev_dbg(adapter->netdev,
2718 "[S:%s FRR:%d WFR:%d] Done processing resets\n",
2719 adapter_state_to_string(adapter->state),
2720 adapter->force_reset_recovery,
2721 adapter->wait_for_reset);
2724 static void __ibmvnic_delayed_reset(struct work_struct *work)
2726 struct ibmvnic_adapter *adapter;
2728 adapter = container_of(work, struct ibmvnic_adapter,
2729 ibmvnic_delayed_reset.work);
2730 __ibmvnic_reset(&adapter->ibmvnic_reset);
2733 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2734 enum ibmvnic_reset_reason reason)
2736 struct list_head *entry, *tmp_entry;
2737 struct ibmvnic_rwi *rwi, *tmp;
2738 struct net_device *netdev = adapter->netdev;
2739 unsigned long flags;
2742 spin_lock_irqsave(&adapter->rwi_lock, flags);
2744 /* If failover is pending don't schedule any other reset.
2745 * Instead let the failover complete. If there is already a
2746 * a failover reset scheduled, we will detect and drop the
2747 * duplicate reset when walking the ->rwi_list below.
2749 if (adapter->state == VNIC_REMOVING ||
2750 adapter->state == VNIC_REMOVED ||
2751 (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) {
2753 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2757 if (adapter->state == VNIC_PROBING) {
2758 netdev_warn(netdev, "Adapter reset during probe\n");
2759 adapter->init_done_rc = -EAGAIN;
2764 list_for_each_entry(tmp, &adapter->rwi_list, list) {
2765 if (tmp->reset_reason == reason) {
2766 netdev_dbg(netdev, "Skipping matching reset, reason=%s\n",
2767 reset_reason_to_string(reason));
2773 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2778 /* if we just received a transport event,
2779 * flush reset queue and process this reset
2781 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2782 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2785 rwi->reset_reason = reason;
2786 list_add_tail(&rwi->list, &adapter->rwi_list);
2787 netdev_dbg(adapter->netdev, "Scheduling reset (reason %s)\n",
2788 reset_reason_to_string(reason));
2789 queue_work(system_long_wq, &adapter->ibmvnic_reset);
2793 /* ibmvnic_close() below can block, so drop the lock first */
2794 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2797 ibmvnic_close(netdev);
2802 static void ibmvnic_tx_timeout(struct net_device *dev, unsigned int txqueue)
2804 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2806 if (test_bit(0, &adapter->resetting)) {
2807 netdev_err(adapter->netdev,
2808 "Adapter is resetting, skip timeout reset\n");
2811 /* No queuing up reset until at least 5 seconds (default watchdog val)
2814 if (time_before(jiffies, (adapter->last_reset_time + dev->watchdog_timeo))) {
2815 netdev_dbg(dev, "Not yet time to tx timeout.\n");
2818 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2821 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2822 struct ibmvnic_rx_buff *rx_buff)
2824 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2826 rx_buff->skb = NULL;
2828 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2829 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2831 atomic_dec(&pool->available);
2834 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2836 struct ibmvnic_sub_crq_queue *rx_scrq;
2837 struct ibmvnic_adapter *adapter;
2838 struct net_device *netdev;
2839 int frames_processed;
2843 adapter = netdev_priv(netdev);
2844 scrq_num = (int)(napi - adapter->napi);
2845 frames_processed = 0;
2846 rx_scrq = adapter->rx_scrq[scrq_num];
2849 while (frames_processed < budget) {
2850 struct sk_buff *skb;
2851 struct ibmvnic_rx_buff *rx_buff;
2852 union sub_crq *next;
2857 if (unlikely(test_bit(0, &adapter->resetting) &&
2858 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2859 enable_scrq_irq(adapter, rx_scrq);
2860 napi_complete_done(napi, frames_processed);
2861 return frames_processed;
2864 if (!pending_scrq(adapter, rx_scrq))
2866 next = ibmvnic_next_scrq(adapter, rx_scrq);
2867 rx_buff = (struct ibmvnic_rx_buff *)
2868 be64_to_cpu(next->rx_comp.correlator);
2869 /* do error checking */
2870 if (next->rx_comp.rc) {
2871 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2872 be16_to_cpu(next->rx_comp.rc));
2873 /* free the entry */
2874 next->rx_comp.first = 0;
2875 dev_kfree_skb_any(rx_buff->skb);
2876 remove_buff_from_pool(adapter, rx_buff);
2878 } else if (!rx_buff->skb) {
2879 /* free the entry */
2880 next->rx_comp.first = 0;
2881 remove_buff_from_pool(adapter, rx_buff);
2885 length = be32_to_cpu(next->rx_comp.len);
2886 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2887 flags = next->rx_comp.flags;
2889 /* load long_term_buff before copying to skb */
2891 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2894 /* VLAN Header has been stripped by the system firmware and
2895 * needs to be inserted by the driver
2897 if (adapter->rx_vlan_header_insertion &&
2898 (flags & IBMVNIC_VLAN_STRIPPED))
2899 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2900 ntohs(next->rx_comp.vlan_tci));
2902 /* free the entry */
2903 next->rx_comp.first = 0;
2904 remove_buff_from_pool(adapter, rx_buff);
2906 skb_put(skb, length);
2907 skb->protocol = eth_type_trans(skb, netdev);
2908 skb_record_rx_queue(skb, scrq_num);
2910 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2911 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2912 skb->ip_summed = CHECKSUM_UNNECESSARY;
2916 napi_gro_receive(napi, skb); /* send it up */
2917 netdev->stats.rx_packets++;
2918 netdev->stats.rx_bytes += length;
2919 adapter->rx_stats_buffers[scrq_num].packets++;
2920 adapter->rx_stats_buffers[scrq_num].bytes += length;
2924 if (adapter->state != VNIC_CLOSING &&
2925 ((atomic_read(&adapter->rx_pool[scrq_num].available) <
2926 adapter->req_rx_add_entries_per_subcrq / 2) ||
2927 frames_processed < budget))
2928 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2929 if (frames_processed < budget) {
2930 if (napi_complete_done(napi, frames_processed)) {
2931 enable_scrq_irq(adapter, rx_scrq);
2932 if (pending_scrq(adapter, rx_scrq)) {
2933 if (napi_reschedule(napi)) {
2934 disable_scrq_irq(adapter, rx_scrq);
2940 return frames_processed;
2943 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2947 adapter->fallback.mtu = adapter->req_mtu;
2948 adapter->fallback.rx_queues = adapter->req_rx_queues;
2949 adapter->fallback.tx_queues = adapter->req_tx_queues;
2950 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2951 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2953 reinit_completion(&adapter->reset_done);
2954 adapter->wait_for_reset = true;
2955 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2961 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done, 60000);
2968 if (adapter->reset_done_rc) {
2970 adapter->desired.mtu = adapter->fallback.mtu;
2971 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2972 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2973 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2974 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2976 reinit_completion(&adapter->reset_done);
2977 adapter->wait_for_reset = true;
2978 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2983 rc = ibmvnic_wait_for_completion(adapter, &adapter->reset_done,
2991 adapter->wait_for_reset = false;
2996 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2998 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3000 adapter->desired.mtu = new_mtu + ETH_HLEN;
3002 return wait_for_reset(adapter);
3005 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
3006 struct net_device *dev,
3007 netdev_features_t features)
3009 /* Some backing hardware adapters can not
3010 * handle packets with a MSS less than 224
3011 * or with only one segment.
3013 if (skb_is_gso(skb)) {
3014 if (skb_shinfo(skb)->gso_size < 224 ||
3015 skb_shinfo(skb)->gso_segs == 1)
3016 features &= ~NETIF_F_GSO_MASK;
3022 static const struct net_device_ops ibmvnic_netdev_ops = {
3023 .ndo_open = ibmvnic_open,
3024 .ndo_stop = ibmvnic_close,
3025 .ndo_start_xmit = ibmvnic_xmit,
3026 .ndo_set_rx_mode = ibmvnic_set_multi,
3027 .ndo_set_mac_address = ibmvnic_set_mac,
3028 .ndo_validate_addr = eth_validate_addr,
3029 .ndo_tx_timeout = ibmvnic_tx_timeout,
3030 .ndo_change_mtu = ibmvnic_change_mtu,
3031 .ndo_features_check = ibmvnic_features_check,
3034 /* ethtool functions */
3036 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
3037 struct ethtool_link_ksettings *cmd)
3039 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3042 rc = send_query_phys_parms(adapter);
3044 adapter->speed = SPEED_UNKNOWN;
3045 adapter->duplex = DUPLEX_UNKNOWN;
3047 cmd->base.speed = adapter->speed;
3048 cmd->base.duplex = adapter->duplex;
3049 cmd->base.port = PORT_FIBRE;
3050 cmd->base.phy_address = 0;
3051 cmd->base.autoneg = AUTONEG_ENABLE;
3056 static void ibmvnic_get_drvinfo(struct net_device *netdev,
3057 struct ethtool_drvinfo *info)
3059 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3061 strscpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
3062 strscpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
3063 strscpy(info->fw_version, adapter->fw_version,
3064 sizeof(info->fw_version));
3067 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
3069 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3071 return adapter->msg_enable;
3074 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
3076 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3078 adapter->msg_enable = data;
3081 static u32 ibmvnic_get_link(struct net_device *netdev)
3083 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3085 /* Don't need to send a query because we request a logical link up at
3086 * init and then we wait for link state indications
3088 return adapter->logical_link_state;
3091 static void ibmvnic_get_ringparam(struct net_device *netdev,
3092 struct ethtool_ringparam *ring,
3093 struct kernel_ethtool_ringparam *kernel_ring,
3094 struct netlink_ext_ack *extack)
3096 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3098 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
3099 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
3100 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
3102 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
3103 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
3105 ring->rx_mini_max_pending = 0;
3106 ring->rx_jumbo_max_pending = 0;
3107 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
3108 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
3109 ring->rx_mini_pending = 0;
3110 ring->rx_jumbo_pending = 0;
3113 static int ibmvnic_set_ringparam(struct net_device *netdev,
3114 struct ethtool_ringparam *ring,
3115 struct kernel_ethtool_ringparam *kernel_ring,
3116 struct netlink_ext_ack *extack)
3118 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3122 adapter->desired.rx_entries = ring->rx_pending;
3123 adapter->desired.tx_entries = ring->tx_pending;
3125 ret = wait_for_reset(adapter);
3128 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
3129 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
3131 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3132 ring->rx_pending, ring->tx_pending,
3133 adapter->req_rx_add_entries_per_subcrq,
3134 adapter->req_tx_entries_per_subcrq);
3138 static void ibmvnic_get_channels(struct net_device *netdev,
3139 struct ethtool_channels *channels)
3141 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3143 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
3144 channels->max_rx = adapter->max_rx_queues;
3145 channels->max_tx = adapter->max_tx_queues;
3147 channels->max_rx = IBMVNIC_MAX_QUEUES;
3148 channels->max_tx = IBMVNIC_MAX_QUEUES;
3151 channels->max_other = 0;
3152 channels->max_combined = 0;
3153 channels->rx_count = adapter->req_rx_queues;
3154 channels->tx_count = adapter->req_tx_queues;
3155 channels->other_count = 0;
3156 channels->combined_count = 0;
3159 static int ibmvnic_set_channels(struct net_device *netdev,
3160 struct ethtool_channels *channels)
3162 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3166 adapter->desired.rx_queues = channels->rx_count;
3167 adapter->desired.tx_queues = channels->tx_count;
3169 ret = wait_for_reset(adapter);
3172 (adapter->req_rx_queues != channels->rx_count ||
3173 adapter->req_tx_queues != channels->tx_count))
3175 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
3176 channels->rx_count, channels->tx_count,
3177 adapter->req_rx_queues, adapter->req_tx_queues);
3181 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
3183 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3186 switch (stringset) {
3188 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
3189 i++, data += ETH_GSTRING_LEN)
3190 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
3192 for (i = 0; i < adapter->req_tx_queues; i++) {
3193 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
3194 data += ETH_GSTRING_LEN;
3196 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
3197 data += ETH_GSTRING_LEN;
3199 snprintf(data, ETH_GSTRING_LEN,
3200 "tx%d_dropped_packets", i);
3201 data += ETH_GSTRING_LEN;
3204 for (i = 0; i < adapter->req_rx_queues; i++) {
3205 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
3206 data += ETH_GSTRING_LEN;
3208 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
3209 data += ETH_GSTRING_LEN;
3211 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
3212 data += ETH_GSTRING_LEN;
3216 case ETH_SS_PRIV_FLAGS:
3217 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
3218 strcpy(data + i * ETH_GSTRING_LEN,
3219 ibmvnic_priv_flags[i]);
3226 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
3228 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3232 return ARRAY_SIZE(ibmvnic_stats) +
3233 adapter->req_tx_queues * NUM_TX_STATS +
3234 adapter->req_rx_queues * NUM_RX_STATS;
3235 case ETH_SS_PRIV_FLAGS:
3236 return ARRAY_SIZE(ibmvnic_priv_flags);
3242 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
3243 struct ethtool_stats *stats, u64 *data)
3245 struct ibmvnic_adapter *adapter = netdev_priv(dev);
3246 union ibmvnic_crq crq;
3250 memset(&crq, 0, sizeof(crq));
3251 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
3252 crq.request_statistics.cmd = REQUEST_STATISTICS;
3253 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
3254 crq.request_statistics.len =
3255 cpu_to_be32(sizeof(struct ibmvnic_statistics));
3257 /* Wait for data to be written */
3258 reinit_completion(&adapter->stats_done);
3259 rc = ibmvnic_send_crq(adapter, &crq);
3262 rc = ibmvnic_wait_for_completion(adapter, &adapter->stats_done, 10000);
3266 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
3267 data[i] = be64_to_cpu(IBMVNIC_GET_STAT
3268 (adapter, ibmvnic_stats[i].offset));
3270 for (j = 0; j < adapter->req_tx_queues; j++) {
3271 data[i] = adapter->tx_stats_buffers[j].packets;
3273 data[i] = adapter->tx_stats_buffers[j].bytes;
3275 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
3279 for (j = 0; j < adapter->req_rx_queues; j++) {
3280 data[i] = adapter->rx_stats_buffers[j].packets;
3282 data[i] = adapter->rx_stats_buffers[j].bytes;
3284 data[i] = adapter->rx_stats_buffers[j].interrupts;
3289 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
3291 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3293 return adapter->priv_flags;
3296 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
3298 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3299 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
3302 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
3304 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
3309 static const struct ethtool_ops ibmvnic_ethtool_ops = {
3310 .get_drvinfo = ibmvnic_get_drvinfo,
3311 .get_msglevel = ibmvnic_get_msglevel,
3312 .set_msglevel = ibmvnic_set_msglevel,
3313 .get_link = ibmvnic_get_link,
3314 .get_ringparam = ibmvnic_get_ringparam,
3315 .set_ringparam = ibmvnic_set_ringparam,
3316 .get_channels = ibmvnic_get_channels,
3317 .set_channels = ibmvnic_set_channels,
3318 .get_strings = ibmvnic_get_strings,
3319 .get_sset_count = ibmvnic_get_sset_count,
3320 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
3321 .get_link_ksettings = ibmvnic_get_link_ksettings,
3322 .get_priv_flags = ibmvnic_get_priv_flags,
3323 .set_priv_flags = ibmvnic_set_priv_flags,
3326 /* Routines for managing CRQs/sCRQs */
3328 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
3329 struct ibmvnic_sub_crq_queue *scrq)
3334 netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
3339 free_irq(scrq->irq, scrq);
3340 irq_dispose_mapping(scrq->irq);
3345 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
3346 atomic_set(&scrq->used, 0);
3348 scrq->ind_buf.index = 0;
3350 netdev_dbg(adapter->netdev, "Invalid scrq reset\n");
3354 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3355 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3359 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
3363 if (!adapter->tx_scrq || !adapter->rx_scrq)
3366 for (i = 0; i < adapter->req_tx_queues; i++) {
3367 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
3368 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
3373 for (i = 0; i < adapter->req_rx_queues; i++) {
3374 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
3375 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
3383 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
3384 struct ibmvnic_sub_crq_queue *scrq,
3387 struct device *dev = &adapter->vdev->dev;
3390 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
3393 /* Close the sub-crqs */
3395 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3396 adapter->vdev->unit_address,
3398 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3401 netdev_err(adapter->netdev,
3402 "Failed to release sub-CRQ %16lx, rc = %ld\n",
3407 dma_free_coherent(dev,
3409 scrq->ind_buf.indir_arr,
3410 scrq->ind_buf.indir_dma);
3412 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3414 free_pages((unsigned long)scrq->msgs, 2);
3418 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
3421 struct device *dev = &adapter->vdev->dev;
3422 struct ibmvnic_sub_crq_queue *scrq;
3425 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
3430 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
3432 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
3433 goto zero_page_failed;
3436 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
3438 if (dma_mapping_error(dev, scrq->msg_token)) {
3439 dev_warn(dev, "Couldn't map crq queue messages page\n");
3443 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
3444 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
3446 if (rc == H_RESOURCE)
3447 rc = ibmvnic_reset_crq(adapter);
3449 if (rc == H_CLOSED) {
3450 dev_warn(dev, "Partner adapter not ready, waiting.\n");
3452 dev_warn(dev, "Error %d registering sub-crq\n", rc);
3456 scrq->adapter = adapter;
3457 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
3458 scrq->ind_buf.index = 0;
3460 scrq->ind_buf.indir_arr =
3461 dma_alloc_coherent(dev,
3463 &scrq->ind_buf.indir_dma,
3466 if (!scrq->ind_buf.indir_arr)
3469 spin_lock_init(&scrq->lock);
3471 netdev_dbg(adapter->netdev,
3472 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
3473 scrq->crq_num, scrq->hw_irq, scrq->irq);
3479 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
3480 adapter->vdev->unit_address,
3482 } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
3484 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
3487 free_pages((unsigned long)scrq->msgs, 2);
3494 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
3498 if (adapter->tx_scrq) {
3499 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
3500 if (!adapter->tx_scrq[i])
3503 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
3505 ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
3506 if (adapter->tx_scrq[i]->irq) {
3507 free_irq(adapter->tx_scrq[i]->irq,
3508 adapter->tx_scrq[i]);
3509 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
3510 adapter->tx_scrq[i]->irq = 0;
3513 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
3517 kfree(adapter->tx_scrq);
3518 adapter->tx_scrq = NULL;
3519 adapter->num_active_tx_scrqs = 0;
3522 if (adapter->rx_scrq) {
3523 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
3524 if (!adapter->rx_scrq[i])
3527 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
3529 if (adapter->rx_scrq[i]->irq) {
3530 free_irq(adapter->rx_scrq[i]->irq,
3531 adapter->rx_scrq[i]);
3532 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
3533 adapter->rx_scrq[i]->irq = 0;
3536 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
3540 kfree(adapter->rx_scrq);
3541 adapter->rx_scrq = NULL;
3542 adapter->num_active_rx_scrqs = 0;
3546 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
3547 struct ibmvnic_sub_crq_queue *scrq)
3549 struct device *dev = &adapter->vdev->dev;
3552 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3553 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3555 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
3560 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
3561 struct ibmvnic_sub_crq_queue *scrq)
3563 struct device *dev = &adapter->vdev->dev;
3566 if (scrq->hw_irq > 0x100000000ULL) {
3567 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
3571 if (test_bit(0, &adapter->resetting) &&
3572 adapter->reset_reason == VNIC_RESET_MOBILITY) {
3573 u64 val = (0xff000000) | scrq->hw_irq;
3575 rc = plpar_hcall_norets(H_EOI, val);
3576 /* H_EOI would fail with rc = H_FUNCTION when running
3577 * in XIVE mode which is expected, but not an error.
3579 if (rc && (rc != H_FUNCTION))
3580 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
3584 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
3585 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
3587 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
3592 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
3593 struct ibmvnic_sub_crq_queue *scrq)
3595 struct device *dev = &adapter->vdev->dev;
3596 struct ibmvnic_tx_pool *tx_pool;
3597 struct ibmvnic_tx_buff *txbuff;
3598 struct netdev_queue *txq;
3599 union sub_crq *next;
3604 while (pending_scrq(adapter, scrq)) {
3605 unsigned int pool = scrq->pool_index;
3606 int num_entries = 0;
3607 int total_bytes = 0;
3608 int num_packets = 0;
3610 next = ibmvnic_next_scrq(adapter, scrq);
3611 for (i = 0; i < next->tx_comp.num_comps; i++) {
3612 index = be32_to_cpu(next->tx_comp.correlators[i]);
3613 if (index & IBMVNIC_TSO_POOL_MASK) {
3614 tx_pool = &adapter->tso_pool[pool];
3615 index &= ~IBMVNIC_TSO_POOL_MASK;
3617 tx_pool = &adapter->tx_pool[pool];
3620 txbuff = &tx_pool->tx_buff[index];
3622 num_entries += txbuff->num_entries;
3624 total_bytes += txbuff->skb->len;
3625 if (next->tx_comp.rcs[i]) {
3626 dev_err(dev, "tx error %x\n",
3627 next->tx_comp.rcs[i]);
3628 dev_kfree_skb_irq(txbuff->skb);
3630 dev_consume_skb_irq(txbuff->skb);
3634 netdev_warn(adapter->netdev,
3635 "TX completion received with NULL socket buffer\n");
3637 tx_pool->free_map[tx_pool->producer_index] = index;
3638 tx_pool->producer_index =
3639 (tx_pool->producer_index + 1) %
3640 tx_pool->num_buffers;
3642 /* remove tx_comp scrq*/
3643 next->tx_comp.first = 0;
3645 txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
3646 netdev_tx_completed_queue(txq, num_packets, total_bytes);
3648 if (atomic_sub_return(num_entries, &scrq->used) <=
3649 (adapter->req_tx_entries_per_subcrq / 2) &&
3650 __netif_subqueue_stopped(adapter->netdev,
3651 scrq->pool_index)) {
3652 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
3653 netdev_dbg(adapter->netdev, "Started queue %d\n",
3658 enable_scrq_irq(adapter, scrq);
3660 if (pending_scrq(adapter, scrq)) {
3661 disable_scrq_irq(adapter, scrq);
3668 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
3670 struct ibmvnic_sub_crq_queue *scrq = instance;
3671 struct ibmvnic_adapter *adapter = scrq->adapter;
3673 disable_scrq_irq(adapter, scrq);
3674 ibmvnic_complete_tx(adapter, scrq);
3679 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
3681 struct ibmvnic_sub_crq_queue *scrq = instance;
3682 struct ibmvnic_adapter *adapter = scrq->adapter;
3684 /* When booting a kdump kernel we can hit pending interrupts
3685 * prior to completing driver initialization.
3687 if (unlikely(adapter->state != VNIC_OPEN))
3690 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
3692 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3693 disable_scrq_irq(adapter, scrq);
3694 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3700 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3702 struct device *dev = &adapter->vdev->dev;
3703 struct ibmvnic_sub_crq_queue *scrq;
3707 for (i = 0; i < adapter->req_tx_queues; i++) {
3708 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3710 scrq = adapter->tx_scrq[i];
3711 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3715 dev_err(dev, "Error mapping irq\n");
3716 goto req_tx_irq_failed;
3719 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3720 adapter->vdev->unit_address, i);
3721 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3722 0, scrq->name, scrq);
3725 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3727 irq_dispose_mapping(scrq->irq);
3728 goto req_tx_irq_failed;
3732 for (i = 0; i < adapter->req_rx_queues; i++) {
3733 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3735 scrq = adapter->rx_scrq[i];
3736 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3739 dev_err(dev, "Error mapping irq\n");
3740 goto req_rx_irq_failed;
3742 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3743 adapter->vdev->unit_address, i);
3744 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3745 0, scrq->name, scrq);
3747 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3749 irq_dispose_mapping(scrq->irq);
3750 goto req_rx_irq_failed;
3756 for (j = 0; j < i; j++) {
3757 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3758 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3760 i = adapter->req_tx_queues;
3762 for (j = 0; j < i; j++) {
3763 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3764 irq_dispose_mapping(adapter->tx_scrq[j]->irq);
3766 release_sub_crqs(adapter, 1);
3770 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3772 struct device *dev = &adapter->vdev->dev;
3773 struct ibmvnic_sub_crq_queue **allqueues;
3774 int registered_queues = 0;
3779 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3781 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3785 for (i = 0; i < total_queues; i++) {
3786 allqueues[i] = init_sub_crq_queue(adapter);
3787 if (!allqueues[i]) {
3788 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3791 registered_queues++;
3794 /* Make sure we were able to register the minimum number of queues */
3795 if (registered_queues <
3796 adapter->min_tx_queues + adapter->min_rx_queues) {
3797 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3801 /* Distribute the failed allocated queues*/
3802 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3803 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3806 if (adapter->req_rx_queues > adapter->min_rx_queues)
3807 adapter->req_rx_queues--;
3812 if (adapter->req_tx_queues > adapter->min_tx_queues)
3813 adapter->req_tx_queues--;
3820 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3821 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3822 if (!adapter->tx_scrq)
3825 for (i = 0; i < adapter->req_tx_queues; i++) {
3826 adapter->tx_scrq[i] = allqueues[i];
3827 adapter->tx_scrq[i]->pool_index = i;
3828 adapter->num_active_tx_scrqs++;
3831 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3832 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3833 if (!adapter->rx_scrq)
3836 for (i = 0; i < adapter->req_rx_queues; i++) {
3837 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3838 adapter->rx_scrq[i]->scrq_num = i;
3839 adapter->num_active_rx_scrqs++;
3846 kfree(adapter->tx_scrq);
3847 adapter->tx_scrq = NULL;
3849 for (i = 0; i < registered_queues; i++)
3850 release_sub_crq_queue(adapter, allqueues[i], 1);
3855 static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
3857 struct device *dev = &adapter->vdev->dev;
3858 union ibmvnic_crq crq;
3862 /* Sub-CRQ entries are 32 byte long */
3863 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3865 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3866 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3867 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3871 if (adapter->desired.mtu)
3872 adapter->req_mtu = adapter->desired.mtu;
3874 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3876 if (!adapter->desired.tx_entries)
3877 adapter->desired.tx_entries =
3878 adapter->max_tx_entries_per_subcrq;
3879 if (!adapter->desired.rx_entries)
3880 adapter->desired.rx_entries =
3881 adapter->max_rx_add_entries_per_subcrq;
3883 max_entries = IBMVNIC_MAX_LTB_SIZE /
3884 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3886 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3887 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3888 adapter->desired.tx_entries = max_entries;
3891 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3892 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3893 adapter->desired.rx_entries = max_entries;
3896 if (adapter->desired.tx_entries)
3897 adapter->req_tx_entries_per_subcrq =
3898 adapter->desired.tx_entries;
3900 adapter->req_tx_entries_per_subcrq =
3901 adapter->max_tx_entries_per_subcrq;
3903 if (adapter->desired.rx_entries)
3904 adapter->req_rx_add_entries_per_subcrq =
3905 adapter->desired.rx_entries;
3907 adapter->req_rx_add_entries_per_subcrq =
3908 adapter->max_rx_add_entries_per_subcrq;
3910 if (adapter->desired.tx_queues)
3911 adapter->req_tx_queues =
3912 adapter->desired.tx_queues;
3914 adapter->req_tx_queues =
3915 adapter->opt_tx_comp_sub_queues;
3917 if (adapter->desired.rx_queues)
3918 adapter->req_rx_queues =
3919 adapter->desired.rx_queues;
3921 adapter->req_rx_queues =
3922 adapter->opt_rx_comp_queues;
3924 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3927 memset(&crq, 0, sizeof(crq));
3928 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3929 crq.request_capability.cmd = REQUEST_CAPABILITY;
3931 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3932 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3933 atomic_inc(&adapter->running_cap_crqs);
3934 ibmvnic_send_crq(adapter, &crq);
3936 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3937 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3938 atomic_inc(&adapter->running_cap_crqs);
3939 ibmvnic_send_crq(adapter, &crq);
3941 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3942 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3943 atomic_inc(&adapter->running_cap_crqs);
3944 ibmvnic_send_crq(adapter, &crq);
3946 crq.request_capability.capability =
3947 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3948 crq.request_capability.number =
3949 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3950 atomic_inc(&adapter->running_cap_crqs);
3951 ibmvnic_send_crq(adapter, &crq);
3953 crq.request_capability.capability =
3954 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3955 crq.request_capability.number =
3956 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3957 atomic_inc(&adapter->running_cap_crqs);
3958 ibmvnic_send_crq(adapter, &crq);
3960 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3961 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3962 atomic_inc(&adapter->running_cap_crqs);
3963 ibmvnic_send_crq(adapter, &crq);
3965 if (adapter->netdev->flags & IFF_PROMISC) {
3966 if (adapter->promisc_supported) {
3967 crq.request_capability.capability =
3968 cpu_to_be16(PROMISC_REQUESTED);
3969 crq.request_capability.number = cpu_to_be64(1);
3970 atomic_inc(&adapter->running_cap_crqs);
3971 ibmvnic_send_crq(adapter, &crq);
3974 crq.request_capability.capability =
3975 cpu_to_be16(PROMISC_REQUESTED);
3976 crq.request_capability.number = cpu_to_be64(0);
3977 atomic_inc(&adapter->running_cap_crqs);
3978 ibmvnic_send_crq(adapter, &crq);
3982 static int pending_scrq(struct ibmvnic_adapter *adapter,
3983 struct ibmvnic_sub_crq_queue *scrq)
3985 union sub_crq *entry = &scrq->msgs[scrq->cur];
3988 rc = !!(entry->generic.first & IBMVNIC_CRQ_CMD_RSP);
3990 /* Ensure that the SCRQ valid flag is loaded prior to loading the
3991 * contents of the SCRQ descriptor
3998 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3999 struct ibmvnic_sub_crq_queue *scrq)
4001 union sub_crq *entry;
4002 unsigned long flags;
4004 spin_lock_irqsave(&scrq->lock, flags);
4005 entry = &scrq->msgs[scrq->cur];
4006 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4007 if (++scrq->cur == scrq->size)
4012 spin_unlock_irqrestore(&scrq->lock, flags);
4014 /* Ensure that the SCRQ valid flag is loaded prior to loading the
4015 * contents of the SCRQ descriptor
4022 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
4024 struct ibmvnic_crq_queue *queue = &adapter->crq;
4025 union ibmvnic_crq *crq;
4027 crq = &queue->msgs[queue->cur];
4028 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
4029 if (++queue->cur == queue->size)
4038 static void print_subcrq_error(struct device *dev, int rc, const char *func)
4042 dev_warn_ratelimited(dev,
4043 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
4047 dev_warn_ratelimited(dev,
4048 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
4052 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
4057 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
4058 u64 remote_handle, u64 ioba, u64 num_entries)
4060 unsigned int ua = adapter->vdev->unit_address;
4061 struct device *dev = &adapter->vdev->dev;
4064 /* Make sure the hypervisor sees the complete request */
4066 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
4067 cpu_to_be64(remote_handle),
4071 print_subcrq_error(dev, rc, __func__);
4076 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
4077 union ibmvnic_crq *crq)
4079 unsigned int ua = adapter->vdev->unit_address;
4080 struct device *dev = &adapter->vdev->dev;
4081 u64 *u64_crq = (u64 *)crq;
4084 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
4085 (unsigned long)cpu_to_be64(u64_crq[0]),
4086 (unsigned long)cpu_to_be64(u64_crq[1]));
4088 if (!adapter->crq.active &&
4089 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
4090 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
4094 /* Make sure the hypervisor sees the complete request */
4097 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
4098 cpu_to_be64(u64_crq[0]),
4099 cpu_to_be64(u64_crq[1]));
4102 if (rc == H_CLOSED) {
4103 dev_warn(dev, "CRQ Queue closed\n");
4104 /* do not reset, report the fail, wait for passive init from server */
4107 dev_warn(dev, "Send error (rc=%d)\n", rc);
4113 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
4115 struct device *dev = &adapter->vdev->dev;
4116 union ibmvnic_crq crq;
4120 memset(&crq, 0, sizeof(crq));
4121 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
4122 crq.generic.cmd = IBMVNIC_CRQ_INIT;
4123 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
4126 rc = ibmvnic_send_crq(adapter, &crq);
4132 } while (retries > 0);
4135 dev_err(dev, "Failed to send init request, rc = %d\n", rc);
4142 struct vnic_login_client_data {
4148 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
4152 /* Calculate the amount of buffer space needed for the
4153 * vnic client data in the login buffer. There are four entries,
4154 * OS name, LPAR name, device name, and a null last entry.
4156 len = 4 * sizeof(struct vnic_login_client_data);
4157 len += 6; /* "Linux" plus NULL */
4158 len += strlen(utsname()->nodename) + 1;
4159 len += strlen(adapter->netdev->name) + 1;
4164 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
4165 struct vnic_login_client_data *vlcd)
4167 const char *os_name = "Linux";
4170 /* Type 1 - LPAR OS */
4172 len = strlen(os_name) + 1;
4173 vlcd->len = cpu_to_be16(len);
4174 strscpy(vlcd->name, os_name, len);
4175 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4177 /* Type 2 - LPAR name */
4179 len = strlen(utsname()->nodename) + 1;
4180 vlcd->len = cpu_to_be16(len);
4181 strscpy(vlcd->name, utsname()->nodename, len);
4182 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
4184 /* Type 3 - device name */
4186 len = strlen(adapter->netdev->name) + 1;
4187 vlcd->len = cpu_to_be16(len);
4188 strscpy(vlcd->name, adapter->netdev->name, len);
4191 static int send_login(struct ibmvnic_adapter *adapter)
4193 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
4194 struct ibmvnic_login_buffer *login_buffer;
4195 struct device *dev = &adapter->vdev->dev;
4196 struct vnic_login_client_data *vlcd;
4197 dma_addr_t rsp_buffer_token;
4198 dma_addr_t buffer_token;
4199 size_t rsp_buffer_size;
4200 union ibmvnic_crq crq;
4201 int client_data_len;
4208 if (!adapter->tx_scrq || !adapter->rx_scrq) {
4209 netdev_err(adapter->netdev,
4210 "RX or TX queues are not allocated, device login failed\n");
4214 release_login_buffer(adapter);
4215 release_login_rsp_buffer(adapter);
4217 client_data_len = vnic_client_data_len(adapter);
4220 sizeof(struct ibmvnic_login_buffer) +
4221 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
4224 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
4226 goto buf_alloc_failed;
4228 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
4230 if (dma_mapping_error(dev, buffer_token)) {
4231 dev_err(dev, "Couldn't map login buffer\n");
4232 goto buf_map_failed;
4235 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
4236 sizeof(u64) * adapter->req_tx_queues +
4237 sizeof(u64) * adapter->req_rx_queues +
4238 sizeof(u64) * adapter->req_rx_queues +
4239 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
4241 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
4242 if (!login_rsp_buffer)
4243 goto buf_rsp_alloc_failed;
4245 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
4246 rsp_buffer_size, DMA_FROM_DEVICE);
4247 if (dma_mapping_error(dev, rsp_buffer_token)) {
4248 dev_err(dev, "Couldn't map login rsp buffer\n");
4249 goto buf_rsp_map_failed;
4252 adapter->login_buf = login_buffer;
4253 adapter->login_buf_token = buffer_token;
4254 adapter->login_buf_sz = buffer_size;
4255 adapter->login_rsp_buf = login_rsp_buffer;
4256 adapter->login_rsp_buf_token = rsp_buffer_token;
4257 adapter->login_rsp_buf_sz = rsp_buffer_size;
4259 login_buffer->len = cpu_to_be32(buffer_size);
4260 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
4261 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
4262 login_buffer->off_txcomp_subcrqs =
4263 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
4264 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
4265 login_buffer->off_rxcomp_subcrqs =
4266 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
4267 sizeof(u64) * adapter->req_tx_queues);
4268 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
4269 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
4271 tx_list_p = (__be64 *)((char *)login_buffer +
4272 sizeof(struct ibmvnic_login_buffer));
4273 rx_list_p = (__be64 *)((char *)login_buffer +
4274 sizeof(struct ibmvnic_login_buffer) +
4275 sizeof(u64) * adapter->req_tx_queues);
4277 for (i = 0; i < adapter->req_tx_queues; i++) {
4278 if (adapter->tx_scrq[i]) {
4280 cpu_to_be64(adapter->tx_scrq[i]->crq_num);
4284 for (i = 0; i < adapter->req_rx_queues; i++) {
4285 if (adapter->rx_scrq[i]) {
4287 cpu_to_be64(adapter->rx_scrq[i]->crq_num);
4291 /* Insert vNIC login client data */
4292 vlcd = (struct vnic_login_client_data *)
4293 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
4294 login_buffer->client_data_offset =
4295 cpu_to_be32((char *)vlcd - (char *)login_buffer);
4296 login_buffer->client_data_len = cpu_to_be32(client_data_len);
4298 vnic_add_client_data(adapter, vlcd);
4300 netdev_dbg(adapter->netdev, "Login Buffer:\n");
4301 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
4302 netdev_dbg(adapter->netdev, "%016lx\n",
4303 ((unsigned long *)(adapter->login_buf))[i]);
4306 memset(&crq, 0, sizeof(crq));
4307 crq.login.first = IBMVNIC_CRQ_CMD;
4308 crq.login.cmd = LOGIN;
4309 crq.login.ioba = cpu_to_be32(buffer_token);
4310 crq.login.len = cpu_to_be32(buffer_size);
4312 adapter->login_pending = true;
4313 rc = ibmvnic_send_crq(adapter, &crq);
4315 adapter->login_pending = false;
4316 netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
4317 goto buf_rsp_map_failed;
4323 kfree(login_rsp_buffer);
4324 adapter->login_rsp_buf = NULL;
4325 buf_rsp_alloc_failed:
4326 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
4328 kfree(login_buffer);
4329 adapter->login_buf = NULL;
4334 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
4337 union ibmvnic_crq crq;
4339 memset(&crq, 0, sizeof(crq));
4340 crq.request_map.first = IBMVNIC_CRQ_CMD;
4341 crq.request_map.cmd = REQUEST_MAP;
4342 crq.request_map.map_id = map_id;
4343 crq.request_map.ioba = cpu_to_be32(addr);
4344 crq.request_map.len = cpu_to_be32(len);
4345 return ibmvnic_send_crq(adapter, &crq);
4348 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
4350 union ibmvnic_crq crq;
4352 memset(&crq, 0, sizeof(crq));
4353 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
4354 crq.request_unmap.cmd = REQUEST_UNMAP;
4355 crq.request_unmap.map_id = map_id;
4356 return ibmvnic_send_crq(adapter, &crq);
4359 static void send_query_map(struct ibmvnic_adapter *adapter)
4361 union ibmvnic_crq crq;
4363 memset(&crq, 0, sizeof(crq));
4364 crq.query_map.first = IBMVNIC_CRQ_CMD;
4365 crq.query_map.cmd = QUERY_MAP;
4366 ibmvnic_send_crq(adapter, &crq);
4369 /* Send a series of CRQs requesting various capabilities of the VNIC server */
4370 static void send_query_cap(struct ibmvnic_adapter *adapter)
4372 union ibmvnic_crq crq;
4374 atomic_set(&adapter->running_cap_crqs, 0);
4375 memset(&crq, 0, sizeof(crq));
4376 crq.query_capability.first = IBMVNIC_CRQ_CMD;
4377 crq.query_capability.cmd = QUERY_CAPABILITY;
4379 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
4380 atomic_inc(&adapter->running_cap_crqs);
4381 ibmvnic_send_crq(adapter, &crq);
4383 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
4384 atomic_inc(&adapter->running_cap_crqs);
4385 ibmvnic_send_crq(adapter, &crq);
4387 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
4388 atomic_inc(&adapter->running_cap_crqs);
4389 ibmvnic_send_crq(adapter, &crq);
4391 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
4392 atomic_inc(&adapter->running_cap_crqs);
4393 ibmvnic_send_crq(adapter, &crq);
4395 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
4396 atomic_inc(&adapter->running_cap_crqs);
4397 ibmvnic_send_crq(adapter, &crq);
4399 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
4400 atomic_inc(&adapter->running_cap_crqs);
4401 ibmvnic_send_crq(adapter, &crq);
4403 crq.query_capability.capability =
4404 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
4405 atomic_inc(&adapter->running_cap_crqs);
4406 ibmvnic_send_crq(adapter, &crq);
4408 crq.query_capability.capability =
4409 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
4410 atomic_inc(&adapter->running_cap_crqs);
4411 ibmvnic_send_crq(adapter, &crq);
4413 crq.query_capability.capability =
4414 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
4415 atomic_inc(&adapter->running_cap_crqs);
4416 ibmvnic_send_crq(adapter, &crq);
4418 crq.query_capability.capability =
4419 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
4420 atomic_inc(&adapter->running_cap_crqs);
4421 ibmvnic_send_crq(adapter, &crq);
4423 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
4424 atomic_inc(&adapter->running_cap_crqs);
4425 ibmvnic_send_crq(adapter, &crq);
4427 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
4428 atomic_inc(&adapter->running_cap_crqs);
4429 ibmvnic_send_crq(adapter, &crq);
4431 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
4432 atomic_inc(&adapter->running_cap_crqs);
4433 ibmvnic_send_crq(adapter, &crq);
4435 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
4436 atomic_inc(&adapter->running_cap_crqs);
4437 ibmvnic_send_crq(adapter, &crq);
4439 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
4440 atomic_inc(&adapter->running_cap_crqs);
4441 ibmvnic_send_crq(adapter, &crq);
4443 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
4444 atomic_inc(&adapter->running_cap_crqs);
4445 ibmvnic_send_crq(adapter, &crq);
4447 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
4448 atomic_inc(&adapter->running_cap_crqs);
4449 ibmvnic_send_crq(adapter, &crq);
4451 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
4452 atomic_inc(&adapter->running_cap_crqs);
4453 ibmvnic_send_crq(adapter, &crq);
4455 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
4456 atomic_inc(&adapter->running_cap_crqs);
4457 ibmvnic_send_crq(adapter, &crq);
4459 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
4460 atomic_inc(&adapter->running_cap_crqs);
4461 ibmvnic_send_crq(adapter, &crq);
4463 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
4464 atomic_inc(&adapter->running_cap_crqs);
4465 ibmvnic_send_crq(adapter, &crq);
4467 crq.query_capability.capability =
4468 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
4469 atomic_inc(&adapter->running_cap_crqs);
4470 ibmvnic_send_crq(adapter, &crq);
4472 crq.query_capability.capability =
4473 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
4474 atomic_inc(&adapter->running_cap_crqs);
4475 ibmvnic_send_crq(adapter, &crq);
4477 crq.query_capability.capability =
4478 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
4479 atomic_inc(&adapter->running_cap_crqs);
4480 ibmvnic_send_crq(adapter, &crq);
4482 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
4483 atomic_inc(&adapter->running_cap_crqs);
4484 ibmvnic_send_crq(adapter, &crq);
4487 static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
4489 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4490 struct device *dev = &adapter->vdev->dev;
4491 union ibmvnic_crq crq;
4493 adapter->ip_offload_tok =
4495 &adapter->ip_offload_buf,
4499 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4500 if (!firmware_has_feature(FW_FEATURE_CMO))
4501 dev_err(dev, "Couldn't map offload buffer\n");
4505 memset(&crq, 0, sizeof(crq));
4506 crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4507 crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4508 crq.query_ip_offload.len = cpu_to_be32(buf_sz);
4509 crq.query_ip_offload.ioba =
4510 cpu_to_be32(adapter->ip_offload_tok);
4512 ibmvnic_send_crq(adapter, &crq);
4515 static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
4517 struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
4518 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4519 struct device *dev = &adapter->vdev->dev;
4520 netdev_features_t old_hw_features = 0;
4521 union ibmvnic_crq crq;
4523 adapter->ip_offload_ctrl_tok =
4526 sizeof(adapter->ip_offload_ctrl),
4529 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
4530 dev_err(dev, "Couldn't map ip offload control buffer\n");
4534 ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4535 ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
4536 ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
4537 ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
4538 ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
4539 ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
4540 ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
4541 ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
4542 ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
4543 ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
4545 /* large_rx disabled for now, additional features needed */
4546 ctrl_buf->large_rx_ipv4 = 0;
4547 ctrl_buf->large_rx_ipv6 = 0;
4549 if (adapter->state != VNIC_PROBING) {
4550 old_hw_features = adapter->netdev->hw_features;
4551 adapter->netdev->hw_features = 0;
4554 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
4556 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
4557 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
4559 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
4560 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
4562 if ((adapter->netdev->features &
4563 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
4564 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
4566 if (buf->large_tx_ipv4)
4567 adapter->netdev->hw_features |= NETIF_F_TSO;
4568 if (buf->large_tx_ipv6)
4569 adapter->netdev->hw_features |= NETIF_F_TSO6;
4571 if (adapter->state == VNIC_PROBING) {
4572 adapter->netdev->features |= adapter->netdev->hw_features;
4573 } else if (old_hw_features != adapter->netdev->hw_features) {
4574 netdev_features_t tmp = 0;
4576 /* disable features no longer supported */
4577 adapter->netdev->features &= adapter->netdev->hw_features;
4578 /* turn on features now supported if previously enabled */
4579 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
4580 adapter->netdev->hw_features;
4581 adapter->netdev->features |=
4582 tmp & adapter->netdev->wanted_features;
4585 memset(&crq, 0, sizeof(crq));
4586 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
4587 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
4588 crq.control_ip_offload.len =
4589 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
4590 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
4591 ibmvnic_send_crq(adapter, &crq);
4594 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
4595 struct ibmvnic_adapter *adapter)
4597 struct device *dev = &adapter->vdev->dev;
4599 if (crq->get_vpd_size_rsp.rc.code) {
4600 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
4601 crq->get_vpd_size_rsp.rc.code);
4602 complete(&adapter->fw_done);
4606 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
4607 complete(&adapter->fw_done);
4610 static void handle_vpd_rsp(union ibmvnic_crq *crq,
4611 struct ibmvnic_adapter *adapter)
4613 struct device *dev = &adapter->vdev->dev;
4614 unsigned char *substr = NULL;
4615 u8 fw_level_len = 0;
4617 memset(adapter->fw_version, 0, 32);
4619 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
4622 if (crq->get_vpd_rsp.rc.code) {
4623 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
4624 crq->get_vpd_rsp.rc.code);
4628 /* get the position of the firmware version info
4629 * located after the ASCII 'RM' substring in the buffer
4631 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
4633 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
4637 /* get length of firmware level ASCII substring */
4638 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
4639 fw_level_len = *(substr + 2);
4641 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
4645 /* copy firmware version string from vpd into adapter */
4646 if ((substr + 3 + fw_level_len) <
4647 (adapter->vpd->buff + adapter->vpd->len)) {
4648 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
4650 dev_info(dev, "FW substr extrapolated VPD buff\n");
4654 if (adapter->fw_version[0] == '\0')
4655 strscpy((char *)adapter->fw_version, "N/A", sizeof(adapter->fw_version));
4656 complete(&adapter->fw_done);
4659 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
4661 struct device *dev = &adapter->vdev->dev;
4662 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
4665 dma_unmap_single(dev, adapter->ip_offload_tok,
4666 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
4668 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
4669 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
4670 netdev_dbg(adapter->netdev, "%016lx\n",
4671 ((unsigned long *)(buf))[i]);
4673 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
4674 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
4675 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
4676 buf->tcp_ipv4_chksum);
4677 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
4678 buf->tcp_ipv6_chksum);
4679 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
4680 buf->udp_ipv4_chksum);
4681 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
4682 buf->udp_ipv6_chksum);
4683 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
4684 buf->large_tx_ipv4);
4685 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
4686 buf->large_tx_ipv6);
4687 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
4688 buf->large_rx_ipv4);
4689 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
4690 buf->large_rx_ipv6);
4691 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
4692 buf->max_ipv4_header_size);
4693 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
4694 buf->max_ipv6_header_size);
4695 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
4696 buf->max_tcp_header_size);
4697 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
4698 buf->max_udp_header_size);
4699 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
4700 buf->max_large_tx_size);
4701 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
4702 buf->max_large_rx_size);
4703 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
4704 buf->ipv6_extension_header);
4705 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
4706 buf->tcp_pseudosum_req);
4707 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
4708 buf->num_ipv6_ext_headers);
4709 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
4710 buf->off_ipv6_ext_headers);
4712 send_control_ip_offload(adapter);
4715 static const char *ibmvnic_fw_err_cause(u16 cause)
4718 case ADAPTER_PROBLEM:
4719 return "adapter problem";
4721 return "bus problem";
4723 return "firmware problem";
4725 return "device driver problem";
4727 return "EEH recovery";
4729 return "firmware updated";
4731 return "low Memory";
4737 static void handle_error_indication(union ibmvnic_crq *crq,
4738 struct ibmvnic_adapter *adapter)
4740 struct device *dev = &adapter->vdev->dev;
4743 cause = be16_to_cpu(crq->error_indication.error_cause);
4745 dev_warn_ratelimited(dev,
4746 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4747 crq->error_indication.flags
4748 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4749 ibmvnic_fw_err_cause(cause));
4751 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4752 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4754 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4757 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4758 struct ibmvnic_adapter *adapter)
4760 struct net_device *netdev = adapter->netdev;
4761 struct device *dev = &adapter->vdev->dev;
4764 rc = crq->change_mac_addr_rsp.rc.code;
4766 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4769 /* crq->change_mac_addr.mac_addr is the requested one
4770 * crq->change_mac_addr_rsp.mac_addr is the returned valid one.
4772 eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
4773 ether_addr_copy(adapter->mac_addr,
4774 &crq->change_mac_addr_rsp.mac_addr[0]);
4776 complete(&adapter->fw_done);
4780 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4781 struct ibmvnic_adapter *adapter)
4783 struct device *dev = &adapter->vdev->dev;
4787 atomic_dec(&adapter->running_cap_crqs);
4788 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4790 req_value = &adapter->req_tx_queues;
4794 req_value = &adapter->req_rx_queues;
4797 case REQ_RX_ADD_QUEUES:
4798 req_value = &adapter->req_rx_add_queues;
4801 case REQ_TX_ENTRIES_PER_SUBCRQ:
4802 req_value = &adapter->req_tx_entries_per_subcrq;
4803 name = "tx_entries_per_subcrq";
4805 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4806 req_value = &adapter->req_rx_add_entries_per_subcrq;
4807 name = "rx_add_entries_per_subcrq";
4810 req_value = &adapter->req_mtu;
4813 case PROMISC_REQUESTED:
4814 req_value = &adapter->promisc;
4818 dev_err(dev, "Got invalid cap request rsp %d\n",
4819 crq->request_capability.capability);
4823 switch (crq->request_capability_rsp.rc.code) {
4826 case PARTIALSUCCESS:
4827 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4829 (long)be64_to_cpu(crq->request_capability_rsp.number),
4832 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4834 pr_err("mtu of %llu is not supported. Reverting.\n",
4836 *req_value = adapter->fallback.mtu;
4839 be64_to_cpu(crq->request_capability_rsp.number);
4842 send_request_cap(adapter, 1);
4845 dev_err(dev, "Error %d in request cap rsp\n",
4846 crq->request_capability_rsp.rc.code);
4850 /* Done receiving requested capabilities, query IP offload support */
4851 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4852 adapter->wait_capability = false;
4853 send_query_ip_offload(adapter);
4857 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4858 struct ibmvnic_adapter *adapter)
4860 struct device *dev = &adapter->vdev->dev;
4861 struct net_device *netdev = adapter->netdev;
4862 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4863 struct ibmvnic_login_buffer *login = adapter->login_buf;
4864 u64 *tx_handle_array;
4865 u64 *rx_handle_array;
4871 /* CHECK: Test/set of login_pending does not need to be atomic
4872 * because only ibmvnic_tasklet tests/clears this.
4874 if (!adapter->login_pending) {
4875 netdev_warn(netdev, "Ignoring unexpected login response\n");
4878 adapter->login_pending = false;
4880 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4882 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4883 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4885 /* If the number of queues requested can't be allocated by the
4886 * server, the login response will return with code 1. We will need
4887 * to resend the login buffer with fewer queues requested.
4889 if (login_rsp_crq->generic.rc.code) {
4890 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4891 complete(&adapter->init_done);
4895 if (adapter->failover_pending) {
4896 adapter->init_done_rc = -EAGAIN;
4897 netdev_dbg(netdev, "Failover pending, ignoring login response\n");
4898 complete(&adapter->init_done);
4899 /* login response buffer will be released on reset */
4903 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4905 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4906 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4907 netdev_dbg(adapter->netdev, "%016lx\n",
4908 ((unsigned long *)(adapter->login_rsp_buf))[i]);
4912 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4913 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4914 adapter->req_rx_add_queues !=
4915 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4916 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4917 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4920 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4921 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
4922 /* variable buffer sizes are not supported, so just read the
4925 adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
4927 num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
4928 num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4930 tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4931 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
4932 rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
4933 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
4935 for (i = 0; i < num_tx_pools; i++)
4936 adapter->tx_scrq[i]->handle = tx_handle_array[i];
4938 for (i = 0; i < num_rx_pools; i++)
4939 adapter->rx_scrq[i]->handle = rx_handle_array[i];
4941 adapter->num_active_tx_scrqs = num_tx_pools;
4942 adapter->num_active_rx_scrqs = num_rx_pools;
4943 release_login_rsp_buffer(adapter);
4944 release_login_buffer(adapter);
4945 complete(&adapter->init_done);
4950 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4951 struct ibmvnic_adapter *adapter)
4953 struct device *dev = &adapter->vdev->dev;
4956 rc = crq->request_unmap_rsp.rc.code;
4958 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4961 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4962 struct ibmvnic_adapter *adapter)
4964 struct net_device *netdev = adapter->netdev;
4965 struct device *dev = &adapter->vdev->dev;
4968 rc = crq->query_map_rsp.rc.code;
4970 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4973 netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
4974 crq->query_map_rsp.page_size,
4975 __be32_to_cpu(crq->query_map_rsp.tot_pages),
4976 __be32_to_cpu(crq->query_map_rsp.free_pages));
4979 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4980 struct ibmvnic_adapter *adapter)
4982 struct net_device *netdev = adapter->netdev;
4983 struct device *dev = &adapter->vdev->dev;
4986 atomic_dec(&adapter->running_cap_crqs);
4987 netdev_dbg(netdev, "Outstanding queries: %d\n",
4988 atomic_read(&adapter->running_cap_crqs));
4989 rc = crq->query_capability.rc.code;
4991 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4995 switch (be16_to_cpu(crq->query_capability.capability)) {
4997 adapter->min_tx_queues =
4998 be64_to_cpu(crq->query_capability.number);
4999 netdev_dbg(netdev, "min_tx_queues = %lld\n",
5000 adapter->min_tx_queues);
5003 adapter->min_rx_queues =
5004 be64_to_cpu(crq->query_capability.number);
5005 netdev_dbg(netdev, "min_rx_queues = %lld\n",
5006 adapter->min_rx_queues);
5008 case MIN_RX_ADD_QUEUES:
5009 adapter->min_rx_add_queues =
5010 be64_to_cpu(crq->query_capability.number);
5011 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
5012 adapter->min_rx_add_queues);
5015 adapter->max_tx_queues =
5016 be64_to_cpu(crq->query_capability.number);
5017 netdev_dbg(netdev, "max_tx_queues = %lld\n",
5018 adapter->max_tx_queues);
5021 adapter->max_rx_queues =
5022 be64_to_cpu(crq->query_capability.number);
5023 netdev_dbg(netdev, "max_rx_queues = %lld\n",
5024 adapter->max_rx_queues);
5026 case MAX_RX_ADD_QUEUES:
5027 adapter->max_rx_add_queues =
5028 be64_to_cpu(crq->query_capability.number);
5029 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
5030 adapter->max_rx_add_queues);
5032 case MIN_TX_ENTRIES_PER_SUBCRQ:
5033 adapter->min_tx_entries_per_subcrq =
5034 be64_to_cpu(crq->query_capability.number);
5035 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
5036 adapter->min_tx_entries_per_subcrq);
5038 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
5039 adapter->min_rx_add_entries_per_subcrq =
5040 be64_to_cpu(crq->query_capability.number);
5041 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
5042 adapter->min_rx_add_entries_per_subcrq);
5044 case MAX_TX_ENTRIES_PER_SUBCRQ:
5045 adapter->max_tx_entries_per_subcrq =
5046 be64_to_cpu(crq->query_capability.number);
5047 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
5048 adapter->max_tx_entries_per_subcrq);
5050 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
5051 adapter->max_rx_add_entries_per_subcrq =
5052 be64_to_cpu(crq->query_capability.number);
5053 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
5054 adapter->max_rx_add_entries_per_subcrq);
5056 case TCP_IP_OFFLOAD:
5057 adapter->tcp_ip_offload =
5058 be64_to_cpu(crq->query_capability.number);
5059 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
5060 adapter->tcp_ip_offload);
5062 case PROMISC_SUPPORTED:
5063 adapter->promisc_supported =
5064 be64_to_cpu(crq->query_capability.number);
5065 netdev_dbg(netdev, "promisc_supported = %lld\n",
5066 adapter->promisc_supported);
5069 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
5070 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5071 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
5074 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
5075 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5076 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
5078 case MAX_MULTICAST_FILTERS:
5079 adapter->max_multicast_filters =
5080 be64_to_cpu(crq->query_capability.number);
5081 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
5082 adapter->max_multicast_filters);
5084 case VLAN_HEADER_INSERTION:
5085 adapter->vlan_header_insertion =
5086 be64_to_cpu(crq->query_capability.number);
5087 if (adapter->vlan_header_insertion)
5088 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
5089 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
5090 adapter->vlan_header_insertion);
5092 case RX_VLAN_HEADER_INSERTION:
5093 adapter->rx_vlan_header_insertion =
5094 be64_to_cpu(crq->query_capability.number);
5095 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
5096 adapter->rx_vlan_header_insertion);
5098 case MAX_TX_SG_ENTRIES:
5099 adapter->max_tx_sg_entries =
5100 be64_to_cpu(crq->query_capability.number);
5101 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
5102 adapter->max_tx_sg_entries);
5104 case RX_SG_SUPPORTED:
5105 adapter->rx_sg_supported =
5106 be64_to_cpu(crq->query_capability.number);
5107 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
5108 adapter->rx_sg_supported);
5110 case OPT_TX_COMP_SUB_QUEUES:
5111 adapter->opt_tx_comp_sub_queues =
5112 be64_to_cpu(crq->query_capability.number);
5113 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
5114 adapter->opt_tx_comp_sub_queues);
5116 case OPT_RX_COMP_QUEUES:
5117 adapter->opt_rx_comp_queues =
5118 be64_to_cpu(crq->query_capability.number);
5119 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
5120 adapter->opt_rx_comp_queues);
5122 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
5123 adapter->opt_rx_bufadd_q_per_rx_comp_q =
5124 be64_to_cpu(crq->query_capability.number);
5125 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
5126 adapter->opt_rx_bufadd_q_per_rx_comp_q);
5128 case OPT_TX_ENTRIES_PER_SUBCRQ:
5129 adapter->opt_tx_entries_per_subcrq =
5130 be64_to_cpu(crq->query_capability.number);
5131 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
5132 adapter->opt_tx_entries_per_subcrq);
5134 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
5135 adapter->opt_rxba_entries_per_subcrq =
5136 be64_to_cpu(crq->query_capability.number);
5137 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
5138 adapter->opt_rxba_entries_per_subcrq);
5140 case TX_RX_DESC_REQ:
5141 adapter->tx_rx_desc_req = crq->query_capability.number;
5142 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
5143 adapter->tx_rx_desc_req);
5147 netdev_err(netdev, "Got invalid cap rsp %d\n",
5148 crq->query_capability.capability);
5152 if (atomic_read(&adapter->running_cap_crqs) == 0) {
5153 adapter->wait_capability = false;
5154 send_request_cap(adapter, 0);
5158 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
5160 union ibmvnic_crq crq;
5163 memset(&crq, 0, sizeof(crq));
5164 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
5165 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
5167 mutex_lock(&adapter->fw_lock);
5168 adapter->fw_done_rc = 0;
5169 reinit_completion(&adapter->fw_done);
5171 rc = ibmvnic_send_crq(adapter, &crq);
5173 mutex_unlock(&adapter->fw_lock);
5177 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
5179 mutex_unlock(&adapter->fw_lock);
5183 mutex_unlock(&adapter->fw_lock);
5184 return adapter->fw_done_rc ? -EIO : 0;
5187 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
5188 struct ibmvnic_adapter *adapter)
5190 struct net_device *netdev = adapter->netdev;
5192 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
5194 rc = crq->query_phys_parms_rsp.rc.code;
5196 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
5200 case IBMVNIC_10MBPS:
5201 adapter->speed = SPEED_10;
5203 case IBMVNIC_100MBPS:
5204 adapter->speed = SPEED_100;
5207 adapter->speed = SPEED_1000;
5209 case IBMVNIC_10GBPS:
5210 adapter->speed = SPEED_10000;
5212 case IBMVNIC_25GBPS:
5213 adapter->speed = SPEED_25000;
5215 case IBMVNIC_40GBPS:
5216 adapter->speed = SPEED_40000;
5218 case IBMVNIC_50GBPS:
5219 adapter->speed = SPEED_50000;
5221 case IBMVNIC_100GBPS:
5222 adapter->speed = SPEED_100000;
5224 case IBMVNIC_200GBPS:
5225 adapter->speed = SPEED_200000;
5228 if (netif_carrier_ok(netdev))
5229 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
5230 adapter->speed = SPEED_UNKNOWN;
5232 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
5233 adapter->duplex = DUPLEX_FULL;
5234 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
5235 adapter->duplex = DUPLEX_HALF;
5237 adapter->duplex = DUPLEX_UNKNOWN;
5242 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
5243 struct ibmvnic_adapter *adapter)
5245 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
5246 struct net_device *netdev = adapter->netdev;
5247 struct device *dev = &adapter->vdev->dev;
5248 u64 *u64_crq = (u64 *)crq;
5251 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
5252 (unsigned long)cpu_to_be64(u64_crq[0]),
5253 (unsigned long)cpu_to_be64(u64_crq[1]));
5254 switch (gen_crq->first) {
5255 case IBMVNIC_CRQ_INIT_RSP:
5256 switch (gen_crq->cmd) {
5257 case IBMVNIC_CRQ_INIT:
5258 dev_info(dev, "Partner initialized\n");
5259 adapter->from_passive_init = true;
5260 /* Discard any stale login responses from prev reset.
5261 * CHECK: should we clear even on INIT_COMPLETE?
5263 adapter->login_pending = false;
5265 if (adapter->state == VNIC_DOWN)
5266 rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
5268 rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5270 if (rc && rc != -EBUSY) {
5271 /* We were unable to schedule the failover
5272 * reset either because the adapter was still
5273 * probing (eg: during kexec) or we could not
5274 * allocate memory. Clear the failover_pending
5275 * flag since no one else will. We ignore
5276 * EBUSY because it means either FAILOVER reset
5277 * is already scheduled or the adapter is
5281 "Error %ld scheduling failover reset\n",
5283 adapter->failover_pending = false;
5286 if (!completion_done(&adapter->init_done)) {
5287 complete(&adapter->init_done);
5288 if (!adapter->init_done_rc)
5289 adapter->init_done_rc = -EAGAIN;
5293 case IBMVNIC_CRQ_INIT_COMPLETE:
5294 dev_info(dev, "Partner initialization complete\n");
5295 adapter->crq.active = true;
5296 send_version_xchg(adapter);
5299 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
5302 case IBMVNIC_CRQ_XPORT_EVENT:
5303 netif_carrier_off(netdev);
5304 adapter->crq.active = false;
5305 /* terminate any thread waiting for a response
5308 if (!completion_done(&adapter->fw_done)) {
5309 adapter->fw_done_rc = -EIO;
5310 complete(&adapter->fw_done);
5312 if (!completion_done(&adapter->stats_done))
5313 complete(&adapter->stats_done);
5314 if (test_bit(0, &adapter->resetting))
5315 adapter->force_reset_recovery = true;
5316 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
5317 dev_info(dev, "Migrated, re-enabling adapter\n");
5318 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
5319 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
5320 dev_info(dev, "Backing device failover detected\n");
5321 adapter->failover_pending = true;
5323 /* The adapter lost the connection */
5324 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
5326 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
5329 case IBMVNIC_CRQ_CMD_RSP:
5332 dev_err(dev, "Got an invalid msg type 0x%02x\n",
5337 switch (gen_crq->cmd) {
5338 case VERSION_EXCHANGE_RSP:
5339 rc = crq->version_exchange_rsp.rc.code;
5341 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
5345 be16_to_cpu(crq->version_exchange_rsp.version);
5346 dev_info(dev, "Partner protocol version is %d\n",
5348 send_query_cap(adapter);
5350 case QUERY_CAPABILITY_RSP:
5351 handle_query_cap_rsp(crq, adapter);
5354 handle_query_map_rsp(crq, adapter);
5356 case REQUEST_MAP_RSP:
5357 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
5358 complete(&adapter->fw_done);
5360 case REQUEST_UNMAP_RSP:
5361 handle_request_unmap_rsp(crq, adapter);
5363 case REQUEST_CAPABILITY_RSP:
5364 handle_request_cap_rsp(crq, adapter);
5367 netdev_dbg(netdev, "Got Login Response\n");
5368 handle_login_rsp(crq, adapter);
5370 case LOGICAL_LINK_STATE_RSP:
5372 "Got Logical Link State Response, state: %d rc: %d\n",
5373 crq->logical_link_state_rsp.link_state,
5374 crq->logical_link_state_rsp.rc.code);
5375 adapter->logical_link_state =
5376 crq->logical_link_state_rsp.link_state;
5377 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
5378 complete(&adapter->init_done);
5380 case LINK_STATE_INDICATION:
5381 netdev_dbg(netdev, "Got Logical Link State Indication\n");
5382 adapter->phys_link_state =
5383 crq->link_state_indication.phys_link_state;
5384 adapter->logical_link_state =
5385 crq->link_state_indication.logical_link_state;
5386 if (adapter->phys_link_state && adapter->logical_link_state)
5387 netif_carrier_on(netdev);
5389 netif_carrier_off(netdev);
5391 case CHANGE_MAC_ADDR_RSP:
5392 netdev_dbg(netdev, "Got MAC address change Response\n");
5393 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
5395 case ERROR_INDICATION:
5396 netdev_dbg(netdev, "Got Error Indication\n");
5397 handle_error_indication(crq, adapter);
5399 case REQUEST_STATISTICS_RSP:
5400 netdev_dbg(netdev, "Got Statistics Response\n");
5401 complete(&adapter->stats_done);
5403 case QUERY_IP_OFFLOAD_RSP:
5404 netdev_dbg(netdev, "Got Query IP offload Response\n");
5405 handle_query_ip_offload_rsp(adapter);
5407 case MULTICAST_CTRL_RSP:
5408 netdev_dbg(netdev, "Got multicast control Response\n");
5410 case CONTROL_IP_OFFLOAD_RSP:
5411 netdev_dbg(netdev, "Got Control IP offload Response\n");
5412 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
5413 sizeof(adapter->ip_offload_ctrl),
5415 complete(&adapter->init_done);
5417 case COLLECT_FW_TRACE_RSP:
5418 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
5419 complete(&adapter->fw_done);
5421 case GET_VPD_SIZE_RSP:
5422 handle_vpd_size_rsp(crq, adapter);
5425 handle_vpd_rsp(crq, adapter);
5427 case QUERY_PHYS_PARMS_RSP:
5428 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
5429 complete(&adapter->fw_done);
5432 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
5437 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
5439 struct ibmvnic_adapter *adapter = instance;
5441 tasklet_schedule(&adapter->tasklet);
5445 static void ibmvnic_tasklet(struct tasklet_struct *t)
5447 struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
5448 struct ibmvnic_crq_queue *queue = &adapter->crq;
5449 union ibmvnic_crq *crq;
5450 unsigned long flags;
5453 spin_lock_irqsave(&queue->lock, flags);
5455 /* Pull all the valid messages off the CRQ */
5456 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
5457 /* This barrier makes sure ibmvnic_next_crq()'s
5458 * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
5459 * before ibmvnic_handle_crq()'s
5460 * switch(gen_crq->first) and switch(gen_crq->cmd).
5463 ibmvnic_handle_crq(crq, adapter);
5464 crq->generic.first = 0;
5467 /* remain in tasklet until all
5468 * capabilities responses are received
5470 if (!adapter->wait_capability)
5473 /* if capabilities CRQ's were sent in this tasklet, the following
5474 * tasklet must wait until all responses are received
5476 if (atomic_read(&adapter->running_cap_crqs) != 0)
5477 adapter->wait_capability = true;
5478 spin_unlock_irqrestore(&queue->lock, flags);
5481 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
5483 struct vio_dev *vdev = adapter->vdev;
5487 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
5488 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
5491 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
5496 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
5498 struct ibmvnic_crq_queue *crq = &adapter->crq;
5499 struct device *dev = &adapter->vdev->dev;
5500 struct vio_dev *vdev = adapter->vdev;
5505 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5506 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5508 /* Clean out the queue */
5512 memset(crq->msgs, 0, PAGE_SIZE);
5514 crq->active = false;
5516 /* And re-open it again */
5517 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5518 crq->msg_token, PAGE_SIZE);
5521 /* Adapter is good, but other end is not ready */
5522 dev_warn(dev, "Partner adapter not ready\n");
5524 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
5529 static void release_crq_queue(struct ibmvnic_adapter *adapter)
5531 struct ibmvnic_crq_queue *crq = &adapter->crq;
5532 struct vio_dev *vdev = adapter->vdev;
5538 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
5539 free_irq(vdev->irq, adapter);
5540 tasklet_kill(&adapter->tasklet);
5542 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5543 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5545 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
5547 free_page((unsigned long)crq->msgs);
5549 crq->active = false;
5552 static int init_crq_queue(struct ibmvnic_adapter *adapter)
5554 struct ibmvnic_crq_queue *crq = &adapter->crq;
5555 struct device *dev = &adapter->vdev->dev;
5556 struct vio_dev *vdev = adapter->vdev;
5557 int rc, retrc = -ENOMEM;
5562 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
5563 /* Should we allocate more than one page? */
5568 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
5569 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
5571 if (dma_mapping_error(dev, crq->msg_token))
5574 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
5575 crq->msg_token, PAGE_SIZE);
5577 if (rc == H_RESOURCE)
5578 /* maybe kexecing and resource is busy. try a reset */
5579 rc = ibmvnic_reset_crq(adapter);
5582 if (rc == H_CLOSED) {
5583 dev_warn(dev, "Partner adapter not ready\n");
5585 dev_warn(dev, "Error %d opening adapter\n", rc);
5586 goto reg_crq_failed;
5591 tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
5593 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
5594 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
5595 adapter->vdev->unit_address);
5596 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
5598 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
5600 goto req_irq_failed;
5603 rc = vio_enable_interrupts(vdev);
5605 dev_err(dev, "Error %d enabling interrupts\n", rc);
5606 goto req_irq_failed;
5610 spin_lock_init(&crq->lock);
5612 /* process any CRQs that were queued before we enabled interrupts */
5613 tasklet_schedule(&adapter->tasklet);
5618 tasklet_kill(&adapter->tasklet);
5620 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
5621 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
5623 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
5625 free_page((unsigned long)crq->msgs);
5630 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
5632 struct device *dev = &adapter->vdev->dev;
5633 unsigned long timeout = msecs_to_jiffies(20000);
5634 u64 old_num_rx_queues = adapter->req_rx_queues;
5635 u64 old_num_tx_queues = adapter->req_tx_queues;
5638 adapter->from_passive_init = false;
5641 reinit_completion(&adapter->init_done);
5643 adapter->init_done_rc = 0;
5644 rc = ibmvnic_send_crq_init(adapter);
5646 dev_err(dev, "Send crq init failed with error %d\n", rc);
5650 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
5651 dev_err(dev, "Initialization sequence timed out\n");
5655 if (adapter->init_done_rc) {
5656 release_crq_queue(adapter);
5657 return adapter->init_done_rc;
5660 if (adapter->from_passive_init) {
5661 adapter->state = VNIC_OPEN;
5662 adapter->from_passive_init = false;
5667 test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
5668 adapter->reset_reason != VNIC_RESET_MOBILITY) {
5669 if (adapter->req_rx_queues != old_num_rx_queues ||
5670 adapter->req_tx_queues != old_num_tx_queues) {
5671 release_sub_crqs(adapter, 0);
5672 rc = init_sub_crqs(adapter);
5674 rc = reset_sub_crq_queues(adapter);
5677 rc = init_sub_crqs(adapter);
5681 dev_err(dev, "Initialization of sub crqs failed\n");
5682 release_crq_queue(adapter);
5686 rc = init_sub_crq_irqs(adapter);
5688 dev_err(dev, "Failed to initialize sub crq irqs\n");
5689 release_crq_queue(adapter);
5695 static struct device_attribute dev_attr_failover;
5697 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
5699 struct ibmvnic_adapter *adapter;
5700 struct net_device *netdev;
5701 unsigned char *mac_addr_p;
5705 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
5708 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
5709 VETH_MAC_ADDR, NULL);
5712 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
5713 __FILE__, __LINE__);
5717 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
5718 IBMVNIC_MAX_QUEUES);
5722 adapter = netdev_priv(netdev);
5723 adapter->state = VNIC_PROBING;
5724 dev_set_drvdata(&dev->dev, netdev);
5725 adapter->vdev = dev;
5726 adapter->netdev = netdev;
5727 adapter->login_pending = false;
5728 memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
5729 /* map_ids start at 1, so ensure map_id 0 is always "in-use" */
5730 bitmap_set(adapter->map_ids, 0, 1);
5732 ether_addr_copy(adapter->mac_addr, mac_addr_p);
5733 eth_hw_addr_set(netdev, adapter->mac_addr);
5734 netdev->irq = dev->irq;
5735 netdev->netdev_ops = &ibmvnic_netdev_ops;
5736 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
5737 SET_NETDEV_DEV(netdev, &dev->dev);
5739 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
5740 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
5741 __ibmvnic_delayed_reset);
5742 INIT_LIST_HEAD(&adapter->rwi_list);
5743 spin_lock_init(&adapter->rwi_lock);
5744 spin_lock_init(&adapter->state_lock);
5745 mutex_init(&adapter->fw_lock);
5746 init_completion(&adapter->init_done);
5747 init_completion(&adapter->fw_done);
5748 init_completion(&adapter->reset_done);
5749 init_completion(&adapter->stats_done);
5750 clear_bit(0, &adapter->resetting);
5751 adapter->prev_rx_buf_sz = 0;
5752 adapter->prev_mtu = 0;
5754 init_success = false;
5756 rc = init_crq_queue(adapter);
5758 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
5760 goto ibmvnic_init_fail;
5763 rc = ibmvnic_reset_init(adapter, false);
5764 } while (rc == -EAGAIN);
5766 /* We are ignoring the error from ibmvnic_reset_init() assuming that the
5767 * partner is not ready. CRQ is not active. When the partner becomes
5768 * ready, we will do the passive init reset.
5772 init_success = true;
5774 rc = init_stats_buffers(adapter);
5776 goto ibmvnic_init_fail;
5778 rc = init_stats_token(adapter);
5780 goto ibmvnic_stats_fail;
5782 rc = device_create_file(&dev->dev, &dev_attr_failover);
5784 goto ibmvnic_dev_file_err;
5786 netif_carrier_off(netdev);
5787 rc = register_netdev(netdev);
5789 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5790 goto ibmvnic_register_fail;
5792 dev_info(&dev->dev, "ibmvnic registered\n");
5795 adapter->state = VNIC_PROBED;
5796 netdev->mtu = adapter->req_mtu - ETH_HLEN;
5797 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
5798 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
5800 adapter->state = VNIC_DOWN;
5803 adapter->wait_for_reset = false;
5804 adapter->last_reset_time = jiffies;
5807 ibmvnic_register_fail:
5808 device_remove_file(&dev->dev, &dev_attr_failover);
5810 ibmvnic_dev_file_err:
5811 release_stats_token(adapter);
5814 release_stats_buffers(adapter);
5817 release_sub_crqs(adapter, 1);
5818 release_crq_queue(adapter);
5819 mutex_destroy(&adapter->fw_lock);
5820 free_netdev(netdev);
5825 static void ibmvnic_remove(struct vio_dev *dev)
5827 struct net_device *netdev = dev_get_drvdata(&dev->dev);
5828 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5829 unsigned long flags;
5831 spin_lock_irqsave(&adapter->state_lock, flags);
5833 /* If ibmvnic_reset() is scheduling a reset, wait for it to
5834 * finish. Then, set the state to REMOVING to prevent it from
5835 * scheduling any more work and to have reset functions ignore
5836 * any resets that have already been scheduled. Drop the lock
5837 * after setting state, so __ibmvnic_reset() which is called
5838 * from the flush_work() below, can make progress.
5840 spin_lock(&adapter->rwi_lock);
5841 adapter->state = VNIC_REMOVING;
5842 spin_unlock(&adapter->rwi_lock);
5844 spin_unlock_irqrestore(&adapter->state_lock, flags);
5846 flush_work(&adapter->ibmvnic_reset);
5847 flush_delayed_work(&adapter->ibmvnic_delayed_reset);
5850 unregister_netdevice(netdev);
5852 release_resources(adapter);
5853 release_rx_pools(adapter);
5854 release_tx_pools(adapter);
5855 release_sub_crqs(adapter, 1);
5856 release_crq_queue(adapter);
5858 release_stats_token(adapter);
5859 release_stats_buffers(adapter);
5861 adapter->state = VNIC_REMOVED;
5864 mutex_destroy(&adapter->fw_lock);
5865 device_remove_file(&dev->dev, &dev_attr_failover);
5866 free_netdev(netdev);
5867 dev_set_drvdata(&dev->dev, NULL);
5870 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5871 const char *buf, size_t count)
5873 struct net_device *netdev = dev_get_drvdata(dev);
5874 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5875 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5876 __be64 session_token;
5879 if (!sysfs_streq(buf, "1"))
5882 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5883 H_GET_SESSION_TOKEN, 0, 0, 0);
5885 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5890 session_token = (__be64)retbuf[0];
5891 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5892 be64_to_cpu(session_token));
5893 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5894 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5897 "H_VIOCTL initiated failover failed, rc %ld\n",
5901 netdev_dbg(netdev, "Trying to send CRQ_CMD, the last resort\n");
5902 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
5906 static DEVICE_ATTR_WO(failover);
5908 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5910 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5911 struct ibmvnic_adapter *adapter;
5912 struct iommu_table *tbl;
5913 unsigned long ret = 0;
5916 tbl = get_iommu_table_base(&vdev->dev);
5918 /* netdev inits at probe time along with the structures we need below*/
5920 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5922 adapter = netdev_priv(netdev);
5924 ret += PAGE_SIZE; /* the crq message queue */
5925 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5927 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5928 ret += 4 * PAGE_SIZE; /* the scrq message queue */
5930 for (i = 0; i < adapter->num_active_rx_pools; i++)
5931 ret += adapter->rx_pool[i].size *
5932 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5937 static int ibmvnic_resume(struct device *dev)
5939 struct net_device *netdev = dev_get_drvdata(dev);
5940 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5942 if (adapter->state != VNIC_OPEN)
5945 tasklet_schedule(&adapter->tasklet);
5950 static const struct vio_device_id ibmvnic_device_table[] = {
5951 {"network", "IBM,vnic"},
5954 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5956 static const struct dev_pm_ops ibmvnic_pm_ops = {
5957 .resume = ibmvnic_resume
5960 static struct vio_driver ibmvnic_driver = {
5961 .id_table = ibmvnic_device_table,
5962 .probe = ibmvnic_probe,
5963 .remove = ibmvnic_remove,
5964 .get_desired_dma = ibmvnic_get_desired_dma,
5965 .name = ibmvnic_driver_name,
5966 .pm = &ibmvnic_pm_ops,
5969 /* module functions */
5970 static int __init ibmvnic_module_init(void)
5972 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5973 IBMVNIC_DRIVER_VERSION);
5975 return vio_register_driver(&ibmvnic_driver);
5978 static void __exit ibmvnic_module_exit(void)
5980 vio_unregister_driver(&ibmvnic_driver);
5983 module_init(ibmvnic_module_init);
5984 module_exit(ibmvnic_module_exit);