2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
8 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
17 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * PCIe NTB Network Linux driver
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
50 #include <linux/etherdevice.h>
51 #include <linux/ethtool.h>
52 #include <linux/module.h>
53 #include <linux/pci.h>
54 #include <linux/ntb.h>
55 #include <linux/ntb_transport.h>
57 #define NTB_NETDEV_VER "0.7"
59 MODULE_DESCRIPTION(KBUILD_MODNAME);
60 MODULE_VERSION(NTB_NETDEV_VER);
61 MODULE_LICENSE("Dual BSD/GPL");
62 MODULE_AUTHOR("Intel Corporation");
64 /* Time in usecs for tx resource reaper */
65 static unsigned int tx_time = 1;
67 /* Number of descriptors to free before resuming tx */
68 static unsigned int tx_start = 10;
70 /* Number of descriptors still available before stop upper layer tx */
71 static unsigned int tx_stop = 5;
74 struct list_head list;
76 struct net_device *ndev;
77 struct ntb_transport_qp *qp;
78 struct timer_list tx_timer;
81 #define NTB_TX_TIMEOUT_MS 1000
82 #define NTB_RXQ_SIZE 100
84 static LIST_HEAD(dev_list);
86 static void ntb_netdev_event_handler(void *data, int link_is_up)
88 struct net_device *ndev = data;
89 struct ntb_netdev *dev = netdev_priv(ndev);
91 netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up,
92 ntb_transport_link_query(dev->qp));
95 if (ntb_transport_link_query(dev->qp))
96 netif_carrier_on(ndev);
98 netif_carrier_off(ndev);
102 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
105 struct net_device *ndev = qp_data;
113 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
116 ndev->stats.rx_errors++;
117 ndev->stats.rx_length_errors++;
122 skb->protocol = eth_type_trans(skb, ndev);
123 skb->ip_summed = CHECKSUM_NONE;
125 if (netif_rx(skb) == NET_RX_DROP) {
126 ndev->stats.rx_errors++;
127 ndev->stats.rx_dropped++;
129 ndev->stats.rx_packets++;
130 ndev->stats.rx_bytes += len;
133 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
135 ndev->stats.rx_errors++;
136 ndev->stats.rx_frame_errors++;
141 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
144 ndev->stats.rx_errors++;
145 ndev->stats.rx_fifo_errors++;
149 static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
150 struct ntb_transport_qp *qp, int size)
152 struct ntb_netdev *dev = netdev_priv(netdev);
154 netif_stop_queue(netdev);
155 /* Make sure to see the latest value of ntb_transport_tx_free_entry()
156 * since the queue was last started.
160 if (likely(ntb_transport_tx_free_entry(qp) < size)) {
161 mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
165 netif_start_queue(netdev);
169 static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
170 struct ntb_transport_qp *qp, int size)
172 if (netif_queue_stopped(ndev) ||
173 (ntb_transport_tx_free_entry(qp) >= size))
176 return __ntb_netdev_maybe_stop_tx(ndev, qp, size);
179 static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
182 struct net_device *ndev = qp_data;
184 struct ntb_netdev *dev = netdev_priv(ndev);
191 ndev->stats.tx_packets++;
192 ndev->stats.tx_bytes += skb->len;
194 ndev->stats.tx_errors++;
195 ndev->stats.tx_aborted_errors++;
200 if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
201 /* Make sure anybody stopping the queue after this sees the new
202 * value of ntb_transport_tx_free_entry()
205 if (netif_queue_stopped(ndev))
206 netif_wake_queue(ndev);
210 static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
211 struct net_device *ndev)
213 struct ntb_netdev *dev = netdev_priv(ndev);
216 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
218 rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
222 /* check for next submit */
223 ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
228 ndev->stats.tx_dropped++;
229 ndev->stats.tx_errors++;
230 return NETDEV_TX_BUSY;
233 static void ntb_netdev_tx_timer(unsigned long data)
235 struct net_device *ndev = (struct net_device *)data;
236 struct ntb_netdev *dev = netdev_priv(ndev);
238 if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
239 mod_timer(&dev->tx_timer, jiffies + msecs_to_jiffies(tx_time));
241 /* Make sure anybody stopping the queue after this sees the new
242 * value of ntb_transport_tx_free_entry()
245 if (netif_queue_stopped(ndev))
246 netif_wake_queue(ndev);
250 static int ntb_netdev_open(struct net_device *ndev)
252 struct ntb_netdev *dev = netdev_priv(ndev);
256 /* Add some empty rx bufs */
257 for (i = 0; i < NTB_RXQ_SIZE; i++) {
258 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
264 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
265 ndev->mtu + ETH_HLEN);
272 setup_timer(&dev->tx_timer, ntb_netdev_tx_timer, (unsigned long)ndev);
274 netif_carrier_off(ndev);
275 ntb_transport_link_up(dev->qp);
276 netif_start_queue(ndev);
281 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
286 static int ntb_netdev_close(struct net_device *ndev)
288 struct ntb_netdev *dev = netdev_priv(ndev);
292 ntb_transport_link_down(dev->qp);
294 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
297 del_timer_sync(&dev->tx_timer);
302 static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
304 struct ntb_netdev *dev = netdev_priv(ndev);
308 if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
311 if (!netif_running(ndev)) {
316 /* Bring down the link and dispose of posted rx entries */
317 ntb_transport_link_down(dev->qp);
319 if (ndev->mtu < new_mtu) {
322 for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
326 skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
332 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
343 ntb_transport_link_up(dev->qp);
348 ntb_transport_link_down(dev->qp);
350 while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
353 netdev_err(ndev, "Error changing MTU, device inoperable\n");
357 static const struct net_device_ops ntb_netdev_ops = {
358 .ndo_open = ntb_netdev_open,
359 .ndo_stop = ntb_netdev_close,
360 .ndo_start_xmit = ntb_netdev_start_xmit,
361 .ndo_change_mtu = ntb_netdev_change_mtu,
362 .ndo_set_mac_address = eth_mac_addr,
365 static void ntb_get_drvinfo(struct net_device *ndev,
366 struct ethtool_drvinfo *info)
368 struct ntb_netdev *dev = netdev_priv(ndev);
370 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
371 strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
372 strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
375 static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
377 cmd->supported = SUPPORTED_Backplane;
378 cmd->advertising = ADVERTISED_Backplane;
379 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
380 cmd->duplex = DUPLEX_FULL;
381 cmd->port = PORT_OTHER;
382 cmd->phy_address = 0;
383 cmd->transceiver = XCVR_DUMMY1;
384 cmd->autoneg = AUTONEG_ENABLE;
391 static const struct ethtool_ops ntb_ethtool_ops = {
392 .get_drvinfo = ntb_get_drvinfo,
393 .get_link = ethtool_op_get_link,
394 .get_settings = ntb_get_settings,
397 static const struct ntb_queue_handlers ntb_netdev_handlers = {
398 .tx_handler = ntb_netdev_tx_handler,
399 .rx_handler = ntb_netdev_rx_handler,
400 .event_handler = ntb_netdev_event_handler,
403 static int ntb_netdev_probe(struct device *client_dev)
406 struct net_device *ndev;
407 struct pci_dev *pdev;
408 struct ntb_netdev *dev;
411 ntb = dev_ntb(client_dev->parent);
416 ndev = alloc_etherdev(sizeof(*dev));
420 dev = netdev_priv(ndev);
423 ndev->features = NETIF_F_HIGHDMA;
425 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
427 ndev->hw_features = ndev->features;
428 ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
430 random_ether_addr(ndev->perm_addr);
431 memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
433 ndev->netdev_ops = &ntb_netdev_ops;
434 ndev->ethtool_ops = &ntb_ethtool_ops;
437 ndev->max_mtu = ETH_MAX_MTU;
439 dev->qp = ntb_transport_create_queue(ndev, client_dev,
440 &ntb_netdev_handlers);
446 ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
448 rc = register_netdev(ndev);
452 list_add(&dev->list, &dev_list);
453 dev_info(&pdev->dev, "%s created\n", ndev->name);
457 ntb_transport_free_queue(dev->qp);
463 static void ntb_netdev_remove(struct device *client_dev)
466 struct net_device *ndev;
467 struct pci_dev *pdev;
468 struct ntb_netdev *dev;
471 ntb = dev_ntb(client_dev->parent);
474 list_for_each_entry(dev, &dev_list, list) {
475 if (dev->pdev == pdev) {
483 list_del(&dev->list);
487 unregister_netdev(ndev);
488 ntb_transport_free_queue(dev->qp);
492 static struct ntb_transport_client ntb_netdev_client = {
493 .driver.name = KBUILD_MODNAME,
494 .driver.owner = THIS_MODULE,
495 .probe = ntb_netdev_probe,
496 .remove = ntb_netdev_remove,
499 static int __init ntb_netdev_init_module(void)
503 rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
506 return ntb_transport_register_client(&ntb_netdev_client);
508 module_init(ntb_netdev_init_module);
510 static void __exit ntb_netdev_exit_module(void)
512 ntb_transport_unregister_client(&ntb_netdev_client);
513 ntb_transport_unregister_client_dev(KBUILD_MODNAME);
515 module_exit(ntb_netdev_exit_module);