2 * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * Maintained at www.Open-FCoE.org
20 #include <linux/module.h>
21 #include <linux/version.h>
22 #include <linux/spinlock.h>
23 #include <linux/netdevice.h>
24 #include <linux/etherdevice.h>
25 #include <linux/ethtool.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/crc32.h>
29 #include <linux/cpu.h>
31 #include <linux/sysfs.h>
32 #include <linux/ctype.h>
33 #include <scsi/scsi_tcq.h>
34 #include <scsi/scsicam.h>
35 #include <scsi/scsi_transport.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <net/rtnetlink.h>
39 #include <scsi/fc/fc_encaps.h>
40 #include <scsi/fc/fc_fip.h>
42 #include <scsi/libfc.h>
43 #include <scsi/fc_frame.h>
44 #include <scsi/libfcoe.h>
48 MODULE_AUTHOR("Open-FCoE.org");
49 MODULE_DESCRIPTION("FCoE");
50 MODULE_LICENSE("GPL v2");
52 /* Performance tuning parameters for fcoe */
53 static unsigned int fcoe_ddp_min;
54 module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
55 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
56 "Direct Data Placement (DDP).");
58 DEFINE_MUTEX(fcoe_config_mutex);
61 LIST_HEAD(fcoe_hostlist);
62 DEFINE_RWLOCK(fcoe_hostlist_lock);
63 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
65 /* Function Prototypes */
66 static int fcoe_reset(struct Scsi_Host *shost);
67 static int fcoe_xmit(struct fc_lport *, struct fc_frame *);
68 static int fcoe_rcv(struct sk_buff *, struct net_device *,
69 struct packet_type *, struct net_device *);
70 static int fcoe_percpu_receive_thread(void *arg);
71 static void fcoe_clean_pending_queue(struct fc_lport *lp);
72 static void fcoe_percpu_clean(struct fc_lport *lp);
73 static int fcoe_link_ok(struct fc_lport *lp);
75 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
76 static int fcoe_hostlist_add(const struct fc_lport *);
78 static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
79 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
80 static void fcoe_dev_setup(void);
81 static void fcoe_dev_cleanup(void);
82 static struct fcoe_interface *
83 fcoe_hostlist_lookup_port(const struct net_device *dev);
85 /* notification function from net device */
86 static struct notifier_block fcoe_notifier = {
87 .notifier_call = fcoe_device_notification,
90 static struct scsi_transport_template *scsi_transport_fcoe_sw;
92 struct fc_function_template fcoe_transport_function = {
93 .show_host_node_name = 1,
94 .show_host_port_name = 1,
95 .show_host_supported_classes = 1,
96 .show_host_supported_fc4s = 1,
97 .show_host_active_fc4s = 1,
98 .show_host_maxframe_size = 1,
100 .show_host_port_id = 1,
101 .show_host_supported_speeds = 1,
102 .get_host_speed = fc_get_host_speed,
103 .show_host_speed = 1,
104 .show_host_port_type = 1,
105 .get_host_port_state = fc_get_host_port_state,
106 .show_host_port_state = 1,
107 .show_host_symbolic_name = 1,
109 .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
110 .show_rport_maxframe_size = 1,
111 .show_rport_supported_classes = 1,
113 .show_host_fabric_name = 1,
114 .show_starget_node_name = 1,
115 .show_starget_port_name = 1,
116 .show_starget_port_id = 1,
117 .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo,
118 .show_rport_dev_loss_tmo = 1,
119 .get_fc_host_stats = fc_get_host_stats,
120 .issue_fc_host_lip = fcoe_reset,
122 .terminate_rport_io = fc_rport_terminate_io,
125 static struct scsi_host_template fcoe_shost_template = {
126 .module = THIS_MODULE,
127 .name = "FCoE Driver",
128 .proc_name = FCOE_NAME,
129 .queuecommand = fc_queuecommand,
130 .eh_abort_handler = fc_eh_abort,
131 .eh_device_reset_handler = fc_eh_device_reset,
132 .eh_host_reset_handler = fc_eh_host_reset,
133 .slave_alloc = fc_slave_alloc,
134 .change_queue_depth = fc_change_queue_depth,
135 .change_queue_type = fc_change_queue_type,
138 .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
139 .use_clustering = ENABLE_CLUSTERING,
140 .sg_tablesize = SG_ALL,
141 .max_sectors = 0xffff,
144 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
145 struct packet_type *ptype,
146 struct net_device *orig_dev);
148 * fcoe_interface_setup()
149 * @fcoe: new fcoe_interface
150 * @netdev : ptr to the associated netdevice struct
152 * Returns : 0 for success
153 * Locking: must be called with the RTNL mutex held
155 static int fcoe_interface_setup(struct fcoe_interface *fcoe,
156 struct net_device *netdev)
158 struct fcoe_ctlr *fip = &fcoe->ctlr;
159 struct netdev_hw_addr *ha;
160 u8 flogi_maddr[ETH_ALEN];
162 fcoe->netdev = netdev;
164 /* Do not support for bonding device */
165 if ((netdev->priv_flags & IFF_MASTER_ALB) ||
166 (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
167 (netdev->priv_flags & IFF_MASTER_8023AD)) {
171 /* look for SAN MAC address, if multiple SAN MACs exist, only
172 * use the first one for SPMA */
174 for_each_dev_addr(netdev, ha) {
175 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
176 (is_valid_ether_addr(fip->ctl_src_addr))) {
177 memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
184 /* setup Source Mac Address */
186 memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
189 * Add FCoE MAC address as second unicast MAC address
190 * or enter promiscuous mode if not capable of listening
191 * for multiple unicast MACs.
193 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
194 dev_unicast_add(netdev, flogi_maddr);
196 dev_unicast_add(netdev, fip->ctl_src_addr);
197 dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
200 * setup the receive function from ethernet driver
201 * on the ethertype for the given device
203 fcoe->fcoe_packet_type.func = fcoe_rcv;
204 fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
205 fcoe->fcoe_packet_type.dev = netdev;
206 dev_add_pack(&fcoe->fcoe_packet_type);
208 fcoe->fip_packet_type.func = fcoe_fip_recv;
209 fcoe->fip_packet_type.type = htons(ETH_P_FIP);
210 fcoe->fip_packet_type.dev = netdev;
211 dev_add_pack(&fcoe->fip_packet_type);
216 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
217 static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new);
218 static void fcoe_destroy_work(struct work_struct *work);
221 * fcoe_interface_create()
222 * @netdev: network interface
224 * Returns: pointer to a struct fcoe_interface or NULL on error
226 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
228 struct fcoe_interface *fcoe;
230 fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
232 FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
237 kref_init(&fcoe->kref);
242 fcoe_ctlr_init(&fcoe->ctlr);
243 fcoe->ctlr.send = fcoe_fip_send;
244 fcoe->ctlr.update_mac = fcoe_update_src_mac;
246 fcoe_interface_setup(fcoe, netdev);
252 * fcoe_interface_cleanup() - clean up netdev configurations
255 * Caller must be holding the RTNL mutex
257 void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
259 struct net_device *netdev = fcoe->netdev;
260 struct fcoe_ctlr *fip = &fcoe->ctlr;
261 u8 flogi_maddr[ETH_ALEN];
264 * Don't listen for Ethernet packets anymore.
265 * synchronize_net() ensures that the packet handlers are not running
266 * on another CPU. dev_remove_pack() would do that, this calls the
267 * unsyncronized version __dev_remove_pack() to avoid multiple delays.
269 __dev_remove_pack(&fcoe->fcoe_packet_type);
270 __dev_remove_pack(&fcoe->fip_packet_type);
273 /* Delete secondary MAC addresses */
274 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
275 dev_unicast_delete(netdev, flogi_maddr);
276 if (!is_zero_ether_addr(fip->data_src_addr))
277 dev_unicast_delete(netdev, fip->data_src_addr);
279 dev_unicast_delete(netdev, fip->ctl_src_addr);
280 dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
284 * fcoe_interface_release() - fcoe_port kref release function
285 * @kref: embedded reference count in an fcoe_interface struct
287 static void fcoe_interface_release(struct kref *kref)
289 struct fcoe_interface *fcoe;
290 struct net_device *netdev;
292 fcoe = container_of(kref, struct fcoe_interface, kref);
293 netdev = fcoe->netdev;
294 /* tear-down the FCoE controller */
295 fcoe_ctlr_destroy(&fcoe->ctlr);
301 * fcoe_interface_get()
304 static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
306 kref_get(&fcoe->kref);
310 * fcoe_interface_put()
313 static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
315 kref_put(&fcoe->kref, fcoe_interface_release);
319 * fcoe_fip_recv - handle a received FIP frame.
320 * @skb: the receive skb
321 * @dev: associated &net_device
322 * @ptype: the &packet_type structure which was used to register this handler.
323 * @orig_dev: original receive &net_device, in case @dev is a bond.
325 * Returns: 0 for success
327 static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
328 struct packet_type *ptype,
329 struct net_device *orig_dev)
331 struct fcoe_interface *fcoe;
333 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
334 fcoe_ctlr_recv(&fcoe->ctlr, skb);
339 * fcoe_fip_send() - send an Ethernet-encapsulated FIP frame.
340 * @fip: FCoE controller.
343 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
345 skb->dev = fcoe_from_ctlr(fip)->netdev;
350 * fcoe_update_src_mac() - Update Ethernet MAC filters.
351 * @fip: FCoE controller.
352 * @old: Unicast MAC address to delete if the MAC is non-zero.
353 * @new: Unicast MAC address to add.
355 * Remove any previously-set unicast MAC filter.
356 * Add secondary FCoE MAC address filter for our OUI.
358 static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
360 struct fcoe_interface *fcoe;
362 fcoe = fcoe_from_ctlr(fip);
364 if (!is_zero_ether_addr(old))
365 dev_unicast_delete(fcoe->netdev, old);
366 dev_unicast_add(fcoe->netdev, new);
371 * fcoe_lport_config() - sets up the fc_lport
372 * @lp: ptr to the fc_lport
374 * Returns: 0 for success
376 static int fcoe_lport_config(struct fc_lport *lp)
380 lp->max_retry_count = 3;
381 lp->max_rport_retry_count = 3;
382 lp->e_d_tov = 2 * 1000; /* FC-FS default */
383 lp->r_a_tov = 2 * 2 * 1000;
384 lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
385 FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL);
387 fc_lport_init_stats(lp);
389 /* lport fc_lport related configuration */
392 /* offload related configuration */
403 * fcoe_queue_timer() - fcoe queue timer
404 * @lp: the fc_lport pointer
406 * Calls fcoe_check_wait_queue on timeout
409 static void fcoe_queue_timer(ulong lp)
411 fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
415 * fcoe_netdev_config() - Set up netdev for SW FCoE
416 * @lp : ptr to the fc_lport
417 * @netdev : ptr to the associated netdevice struct
419 * Must be called after fcoe_lport_config() as it will use lport mutex
421 * Returns : 0 for success
423 static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
427 struct fcoe_interface *fcoe;
428 struct fcoe_port *port;
430 /* Setup lport private data to point to fcoe softc */
431 port = lport_priv(lp);
435 * Determine max frame size based on underlying device and optional
436 * user-configured limit. If the MFS is too low, fcoe_link_ok()
437 * will return 0, so do this first.
439 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
440 sizeof(struct fcoe_crc_eof));
441 if (fc_set_mfs(lp, mfs))
444 /* offload features support */
445 if (netdev->features & NETIF_F_SG)
448 if (netdev->features & NETIF_F_FCOE_CRC) {
450 FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
452 if (netdev->features & NETIF_F_FSO) {
454 lp->lso_max = netdev->gso_max_size;
455 FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
458 if (netdev->fcoe_ddp_xid) {
460 lp->lro_xid = netdev->fcoe_ddp_xid;
461 FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
464 skb_queue_head_init(&port->fcoe_pending_queue);
465 port->fcoe_pending_queue_active = 0;
466 setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp);
468 wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0);
469 fc_set_wwnn(lp, wwnn);
470 /* XXX - 3rd arg needs to be vlan id */
471 wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0);
472 fc_set_wwpn(lp, wwpn);
478 * fcoe_shost_config() - Sets up fc_lport->host
479 * @lp : ptr to the fc_lport
480 * @shost : ptr to the associated scsi host
481 * @dev : device associated to scsi host
483 * Must be called after fcoe_lport_config() and fcoe_netdev_config()
485 * Returns : 0 for success
487 static int fcoe_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
492 /* lport scsi host config */
495 lp->host->max_lun = FCOE_MAX_LUN;
496 lp->host->max_id = FCOE_MAX_FCP_TARGET;
497 lp->host->max_channel = 0;
498 lp->host->transportt = scsi_transport_fcoe_sw;
500 /* add the new host to the SCSI-ml */
501 rc = scsi_add_host(lp->host, dev);
503 FCOE_NETDEV_DBG(fcoe_netdev(lp), "fcoe_shost_config: "
504 "error on scsi_add_host\n");
507 sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
508 FCOE_NAME, FCOE_VERSION,
509 fcoe_netdev(lp)->name);
515 * fcoe_oem_match() - match for read types IO
516 * @fp: the fc_frame for new IO.
518 * Returns : true for read types IO, otherwise returns false.
520 bool fcoe_oem_match(struct fc_frame *fp)
522 return fc_fcp_is_read(fr_fsp(fp)) &&
523 (fr_fsp(fp)->data_len > fcoe_ddp_min);
527 * fcoe_em_config() - allocates em for this lport
528 * @lp: the fcoe that em is to allocated for
530 * Called with write fcoe_hostlist_lock held.
532 * Returns : 0 on success
534 static inline int fcoe_em_config(struct fc_lport *lp)
536 struct fcoe_port *port = lport_priv(lp);
537 struct fcoe_interface *fcoe = port->fcoe;
538 struct fcoe_interface *oldfcoe = NULL;
539 struct net_device *old_real_dev, *cur_real_dev;
540 u16 min_xid = FCOE_MIN_XID;
541 u16 max_xid = FCOE_MAX_XID;
544 * Check if need to allocate an em instance for
545 * offload exchange ids to be shared across all VN_PORTs/lport.
547 if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) {
553 * Reuse existing offload em instance in case
554 * it is already allocated on real eth device
556 if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
557 cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
559 cur_real_dev = fcoe->netdev;
561 list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
562 if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
563 old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
565 old_real_dev = oldfcoe->netdev;
567 if (cur_real_dev == old_real_dev) {
568 fcoe->oem = oldfcoe->oem;
574 if (!fc_exch_mgr_add(lp, fcoe->oem, fcoe_oem_match)) {
575 printk(KERN_ERR "fcoe_em_config: failed to add "
576 "offload em:%p on interface:%s\n",
577 fcoe->oem, fcoe->netdev->name);
581 fcoe->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3,
582 FCOE_MIN_XID, lp->lro_xid,
585 printk(KERN_ERR "fcoe_em_config: failed to allocate "
586 "em for offload exches on interface:%s\n",
593 * Exclude offload EM xid range from next EM xid range.
595 min_xid += lp->lro_xid + 1;
598 if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) {
599 printk(KERN_ERR "fcoe_em_config: failed to "
600 "allocate em on interface %s\n", fcoe->netdev->name);
608 * fcoe_if_destroy() - FCoE software HBA tear-down function
609 * @lport: fc_lport to destroy
611 static void fcoe_if_destroy(struct fc_lport *lport)
613 struct fcoe_port *port = lport_priv(lport);
614 struct fcoe_interface *fcoe = port->fcoe;
615 struct net_device *netdev = fcoe->netdev;
617 FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
619 /* Logout of the fabric */
620 fc_fabric_logoff(lport);
622 /* Cleanup the fc_lport */
623 fc_lport_destroy(lport);
624 fc_fcp_destroy(lport);
626 /* Stop the transmit retry timer */
627 del_timer_sync(&port->timer);
629 /* Free existing transmit skbs */
630 fcoe_clean_pending_queue(lport);
632 /* receives may not be stopped until after this */
633 fcoe_interface_put(fcoe);
635 /* Free queued packets for the per-CPU receive threads */
636 fcoe_percpu_clean(lport);
638 /* Detach from the scsi-ml */
639 fc_remove_host(lport->host);
640 scsi_remove_host(lport->host);
642 /* There are no more rports or I/O, free the EM */
643 fc_exch_mgr_free(lport);
645 /* Free memory used by statistical counters */
646 fc_lport_free_stats(lport);
648 /* Release the Scsi_Host */
649 scsi_host_put(lport->host);
653 * fcoe_ddp_setup - calls LLD's ddp_setup through net_device
654 * @lp: the corresponding fc_lport
655 * @xid: the exchange id for this ddp transfer
656 * @sgl: the scatterlist describing this transfer
657 * @sgc: number of sg items
661 static int fcoe_ddp_setup(struct fc_lport *lp, u16 xid,
662 struct scatterlist *sgl, unsigned int sgc)
664 struct net_device *n = fcoe_netdev(lp);
666 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_setup)
667 return n->netdev_ops->ndo_fcoe_ddp_setup(n, xid, sgl, sgc);
673 * fcoe_ddp_done - calls LLD's ddp_done through net_device
674 * @lp: the corresponding fc_lport
675 * @xid: the exchange id for this ddp transfer
677 * Returns : the length of data that have been completed by ddp
679 static int fcoe_ddp_done(struct fc_lport *lp, u16 xid)
681 struct net_device *n = fcoe_netdev(lp);
683 if (n->netdev_ops && n->netdev_ops->ndo_fcoe_ddp_done)
684 return n->netdev_ops->ndo_fcoe_ddp_done(n, xid);
688 static struct libfc_function_template fcoe_libfc_fcn_templ = {
689 .frame_send = fcoe_xmit,
690 .ddp_setup = fcoe_ddp_setup,
691 .ddp_done = fcoe_ddp_done,
695 * fcoe_if_create() - this function creates the fcoe port
696 * @fcoe: fcoe_interface structure to create an fc_lport instance on
697 * @parent: device pointer to be the parent in sysfs for the SCSI host
699 * Creates fc_lport struct and scsi_host for lport, configures lport.
701 * Returns : The allocated fc_lport or an error pointer
703 static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
704 struct device *parent)
707 struct fc_lport *lport = NULL;
708 struct fcoe_port *port;
709 struct Scsi_Host *shost;
710 struct net_device *netdev = fcoe->netdev;
712 FCOE_NETDEV_DBG(netdev, "Create Interface\n");
714 shost = libfc_host_alloc(&fcoe_shost_template,
715 sizeof(struct fcoe_port));
717 FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
721 lport = shost_priv(shost);
722 port = lport_priv(lport);
725 INIT_WORK(&port->destroy_work, fcoe_destroy_work);
727 /* configure fc_lport, e.g., em */
728 rc = fcoe_lport_config(lport);
730 FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
735 /* configure lport network properties */
736 rc = fcoe_netdev_config(lport, netdev);
738 FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
743 /* configure lport scsi host properties */
744 rc = fcoe_shost_config(lport, shost, parent);
746 FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
751 /* Initialize the library */
752 rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ);
754 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
760 * fcoe_em_alloc() and fcoe_hostlist_add() both
761 * need to be atomic with respect to other changes to the hostlist
762 * since fcoe_em_alloc() looks for an existing EM
763 * instance on host list updated by fcoe_hostlist_add().
765 * This is currently handled through the fcoe_config_mutex begin held.
768 /* lport exch manager allocation */
769 rc = fcoe_em_config(lport);
771 FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
776 fcoe_interface_get(fcoe);
780 fc_exch_mgr_free(lport);
782 scsi_host_put(lport->host);
788 * fcoe_if_init() - attach to scsi transport
790 * Returns : 0 on success
792 static int __init fcoe_if_init(void)
794 /* attach to scsi transport */
795 scsi_transport_fcoe_sw =
796 fc_attach_transport(&fcoe_transport_function);
798 if (!scsi_transport_fcoe_sw) {
799 printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n");
807 * fcoe_if_exit() - detach from scsi transport
809 * Returns : 0 on success
811 int __exit fcoe_if_exit(void)
813 fc_release_transport(scsi_transport_fcoe_sw);
814 scsi_transport_fcoe_sw = NULL;
819 * fcoe_percpu_thread_create() - Create a receive thread for an online cpu
820 * @cpu: cpu index for the online cpu
822 static void fcoe_percpu_thread_create(unsigned int cpu)
824 struct fcoe_percpu_s *p;
825 struct task_struct *thread;
827 p = &per_cpu(fcoe_percpu, cpu);
829 thread = kthread_create(fcoe_percpu_receive_thread,
830 (void *)p, "fcoethread/%d", cpu);
832 if (likely(!IS_ERR(p->thread))) {
833 kthread_bind(thread, cpu);
834 wake_up_process(thread);
836 spin_lock_bh(&p->fcoe_rx_list.lock);
838 spin_unlock_bh(&p->fcoe_rx_list.lock);
843 * fcoe_percpu_thread_destroy() - removes the rx thread for the given cpu
844 * @cpu: cpu index the rx thread is to be removed
846 * Destroys a per-CPU Rx thread. Any pending skbs are moved to the
847 * current CPU's Rx thread. If the thread being destroyed is bound to
848 * the CPU processing this context the skbs will be freed.
850 static void fcoe_percpu_thread_destroy(unsigned int cpu)
852 struct fcoe_percpu_s *p;
853 struct task_struct *thread;
854 struct page *crc_eof;
857 struct fcoe_percpu_s *p0;
858 unsigned targ_cpu = smp_processor_id();
859 #endif /* CONFIG_SMP */
861 FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
863 /* Prevent any new skbs from being queued for this CPU. */
864 p = &per_cpu(fcoe_percpu, cpu);
865 spin_lock_bh(&p->fcoe_rx_list.lock);
868 crc_eof = p->crc_eof_page;
869 p->crc_eof_page = NULL;
870 p->crc_eof_offset = 0;
871 spin_unlock_bh(&p->fcoe_rx_list.lock);
875 * Don't bother moving the skb's if this context is running
876 * on the same CPU that is having its thread destroyed. This
877 * can easily happen when the module is removed.
879 if (cpu != targ_cpu) {
880 p0 = &per_cpu(fcoe_percpu, targ_cpu);
881 spin_lock_bh(&p0->fcoe_rx_list.lock);
883 FCOE_DBG("Moving frames from CPU %d to CPU %d\n",
886 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
887 __skb_queue_tail(&p0->fcoe_rx_list, skb);
888 spin_unlock_bh(&p0->fcoe_rx_list.lock);
891 * The targeted CPU is not initialized and cannot accept
892 * new skbs. Unlock the targeted CPU and drop the skbs
893 * on the CPU that is going offline.
895 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
897 spin_unlock_bh(&p0->fcoe_rx_list.lock);
901 * This scenario occurs when the module is being removed
902 * and all threads are being destroyed. skbs will continue
903 * to be shifted from the CPU thread that is being removed
904 * to the CPU thread associated with the CPU that is processing
905 * the module removal. Once there is only one CPU Rx thread it
906 * will reach this case and we will drop all skbs and later
909 spin_lock_bh(&p->fcoe_rx_list.lock);
910 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
912 spin_unlock_bh(&p->fcoe_rx_list.lock);
916 * This a non-SMP scenario where the singular Rx thread is
917 * being removed. Free all skbs and stop the thread.
919 spin_lock_bh(&p->fcoe_rx_list.lock);
920 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) != NULL)
922 spin_unlock_bh(&p->fcoe_rx_list.lock);
926 kthread_stop(thread);
933 * fcoe_cpu_callback() - fcoe cpu hotplug event callback
934 * @nfb: callback data block
935 * @action: event triggering the callback
936 * @hcpu: index for the cpu of this event
938 * This creates or destroys per cpu data for fcoe
940 * Returns NOTIFY_OK always.
942 static int fcoe_cpu_callback(struct notifier_block *nfb,
943 unsigned long action, void *hcpu)
945 unsigned cpu = (unsigned long)hcpu;
949 case CPU_ONLINE_FROZEN:
950 FCOE_DBG("CPU %x online: Create Rx thread\n", cpu);
951 fcoe_percpu_thread_create(cpu);
954 case CPU_DEAD_FROZEN:
955 FCOE_DBG("CPU %x offline: Remove Rx thread\n", cpu);
956 fcoe_percpu_thread_destroy(cpu);
964 static struct notifier_block fcoe_cpu_notifier = {
965 .notifier_call = fcoe_cpu_callback,
969 * fcoe_rcv() - this is the fcoe receive function called by NET_RX_SOFTIRQ
970 * @skb: the receive skb
971 * @dev: associated net device
973 * @olddev: last device
975 * this function will receive the packet and build fc frame and pass it up
977 * Returns: 0 for success
979 int fcoe_rcv(struct sk_buff *skb, struct net_device *dev,
980 struct packet_type *ptype, struct net_device *olddev)
983 struct fcoe_rcv_info *fr;
984 struct fcoe_interface *fcoe;
985 struct fc_frame_header *fh;
986 struct fcoe_percpu_s *fps;
989 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
991 if (unlikely(lp == NULL)) {
992 FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
998 FCOE_NETDEV_DBG(dev, "skb_info: len:%d data_len:%d head:%p "
999 "data:%p tail:%p end:%p sum:%d dev:%s",
1000 skb->len, skb->data_len, skb->head, skb->data,
1001 skb_tail_pointer(skb), skb_end_pointer(skb),
1002 skb->csum, skb->dev ? skb->dev->name : "<NULL>");
1004 /* check for FCOE packet type */
1005 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
1006 FCOE_NETDEV_DBG(dev, "Wrong FC type frame");
1011 * Check for minimum frame length, and make sure required FCoE
1012 * and FC headers are pulled into the linear data area.
1014 if (unlikely((skb->len < FCOE_MIN_FRAME) ||
1015 !pskb_may_pull(skb, FCOE_HEADER_LEN)))
1018 skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
1019 fh = (struct fc_frame_header *) skb_transport_header(skb);
1021 fr = fcoe_dev_from_skb(skb);
1026 * In case the incoming frame's exchange is originated from
1027 * the initiator, then received frame's exchange id is ANDed
1028 * with fc_cpu_mask bits to get the same cpu on which exchange
1029 * was originated, otherwise just use the current cpu.
1031 if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
1032 cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
1034 cpu = smp_processor_id();
1036 fps = &per_cpu(fcoe_percpu, cpu);
1037 spin_lock_bh(&fps->fcoe_rx_list.lock);
1038 if (unlikely(!fps->thread)) {
1040 * The targeted CPU is not ready, let's target
1041 * the first CPU now. For non-SMP systems this
1042 * will check the same CPU twice.
1044 FCOE_NETDEV_DBG(dev, "CPU is online, but no receive thread "
1045 "ready for incoming skb- using first online "
1048 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1049 cpu = first_cpu(cpu_online_map);
1050 fps = &per_cpu(fcoe_percpu, cpu);
1051 spin_lock_bh(&fps->fcoe_rx_list.lock);
1053 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1059 * We now have a valid CPU that we're targeting for
1060 * this skb. We also have this receive thread locked,
1061 * so we're free to queue skbs into it's queue.
1063 __skb_queue_tail(&fps->fcoe_rx_list, skb);
1064 if (fps->fcoe_rx_list.qlen == 1)
1065 wake_up_process(fps->thread);
1067 spin_unlock_bh(&fps->fcoe_rx_list.lock);
1071 fc_lport_get_stats(lp)->ErrorFrames++;
1079 * fcoe_start_io() - pass to netdev to start xmit for fcoe
1080 * @skb: the skb to be xmitted
1082 * Returns: 0 for success
1084 static inline int fcoe_start_io(struct sk_buff *skb)
1089 rc = dev_queue_xmit(skb);
1097 * fcoe_get_paged_crc_eof() - in case we need to alloc a page for crc_eof
1098 * @skb: the skb to be xmitted
1101 * Returns: 0 for success
1103 static int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen)
1105 struct fcoe_percpu_s *fps;
1108 fps = &get_cpu_var(fcoe_percpu);
1109 page = fps->crc_eof_page;
1111 page = alloc_page(GFP_ATOMIC);
1113 put_cpu_var(fcoe_percpu);
1116 fps->crc_eof_page = page;
1117 fps->crc_eof_offset = 0;
1121 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page,
1122 fps->crc_eof_offset, tlen);
1124 skb->data_len += tlen;
1125 skb->truesize += tlen;
1126 fps->crc_eof_offset += sizeof(struct fcoe_crc_eof);
1128 if (fps->crc_eof_offset >= PAGE_SIZE) {
1129 fps->crc_eof_page = NULL;
1130 fps->crc_eof_offset = 0;
1133 put_cpu_var(fcoe_percpu);
1138 * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
1139 * @fp: the fc_frame containing data to be checksummed
1141 * This uses crc32() to calculate the crc for port frame
1142 * Return : 32 bit crc
1144 u32 fcoe_fc_crc(struct fc_frame *fp)
1146 struct sk_buff *skb = fp_skb(fp);
1147 struct skb_frag_struct *frag;
1148 unsigned char *data;
1149 unsigned long off, len, clen;
1153 crc = crc32(~0, skb->data, skb_headlen(skb));
1155 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1156 frag = &skb_shinfo(skb)->frags[i];
1157 off = frag->page_offset;
1160 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
1161 data = kmap_atomic(frag->page + (off >> PAGE_SHIFT),
1162 KM_SKB_DATA_SOFTIRQ);
1163 crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
1164 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
1173 * fcoe_xmit() - FCoE frame transmit function
1174 * @lp: the associated local fcoe
1175 * @fp: the fc_frame to be transmitted
1177 * Return : 0 for success
1179 int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
1184 struct fcoe_crc_eof *cp;
1185 struct sk_buff *skb;
1186 struct fcoe_dev_stats *stats;
1187 struct fc_frame_header *fh;
1188 unsigned int hlen; /* header length implies the version */
1189 unsigned int tlen; /* trailer length */
1190 unsigned int elen; /* eth header, may include vlan */
1191 struct fcoe_port *port = lport_priv(lp);
1192 struct fcoe_interface *fcoe = port->fcoe;
1194 struct fcoe_hdr *hp;
1196 WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
1198 fh = fc_frame_header_get(fp);
1200 wlen = skb->len / FCOE_WORD_TO_BYTE;
1207 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1208 fcoe_ctlr_els_send(&fcoe->ctlr, skb))
1214 elen = sizeof(struct ethhdr);
1215 hlen = sizeof(struct fcoe_hdr);
1216 tlen = sizeof(struct fcoe_crc_eof);
1217 wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
1220 if (likely(lp->crc_offload)) {
1221 skb->ip_summed = CHECKSUM_PARTIAL;
1222 skb->csum_start = skb_headroom(skb);
1223 skb->csum_offset = skb->len;
1226 skb->ip_summed = CHECKSUM_NONE;
1227 crc = fcoe_fc_crc(fp);
1230 /* copy port crc and eof to the skb buff */
1231 if (skb_is_nonlinear(skb)) {
1233 if (fcoe_get_paged_crc_eof(skb, tlen)) {
1237 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1238 cp = kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ)
1239 + frag->page_offset;
1241 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
1244 memset(cp, 0, sizeof(*cp));
1246 cp->fcoe_crc32 = cpu_to_le32(~crc);
1248 if (skb_is_nonlinear(skb)) {
1249 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
1253 /* adjust skb network/transport offsets to match mac/fcoe/port */
1254 skb_push(skb, elen + hlen);
1255 skb_reset_mac_header(skb);
1256 skb_reset_network_header(skb);
1257 skb->mac_len = elen;
1258 skb->protocol = htons(ETH_P_FCOE);
1259 skb->dev = fcoe->netdev;
1261 /* fill up mac and fcoe headers */
1263 eh->h_proto = htons(ETH_P_FCOE);
1264 if (fcoe->ctlr.map_dest)
1265 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
1267 /* insert GW address */
1268 memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
1270 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
1271 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
1273 memcpy(eh->h_source, fcoe->ctlr.data_src_addr, ETH_ALEN);
1275 hp = (struct fcoe_hdr *)(eh + 1);
1276 memset(hp, 0, sizeof(*hp));
1278 FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
1281 /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
1282 if (lp->seq_offload && fr_max_payload(fp)) {
1283 skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
1284 skb_shinfo(skb)->gso_size = fr_max_payload(fp);
1286 skb_shinfo(skb)->gso_type = 0;
1287 skb_shinfo(skb)->gso_size = 0;
1289 /* update tx stats: regardless if LLD fails */
1290 stats = fc_lport_get_stats(lp);
1292 stats->TxWords += wlen;
1294 /* send down to lld */
1296 if (port->fcoe_pending_queue.qlen)
1297 fcoe_check_wait_queue(lp, skb);
1298 else if (fcoe_start_io(skb))
1299 fcoe_check_wait_queue(lp, skb);
1305 * fcoe_percpu_receive_thread() - recv thread per cpu
1306 * @arg: ptr to the fcoe per cpu struct
1308 * Return: 0 for success
1310 int fcoe_percpu_receive_thread(void *arg)
1312 struct fcoe_percpu_s *p = arg;
1314 struct fc_lport *lp;
1315 struct fcoe_rcv_info *fr;
1316 struct fcoe_dev_stats *stats;
1317 struct fc_frame_header *fh;
1318 struct sk_buff *skb;
1319 struct fcoe_crc_eof crc_eof;
1320 struct fc_frame *fp;
1322 struct fcoe_port *port;
1323 struct fcoe_hdr *hp;
1325 set_user_nice(current, -20);
1327 while (!kthread_should_stop()) {
1329 spin_lock_bh(&p->fcoe_rx_list.lock);
1330 while ((skb = __skb_dequeue(&p->fcoe_rx_list)) == NULL) {
1331 set_current_state(TASK_INTERRUPTIBLE);
1332 spin_unlock_bh(&p->fcoe_rx_list.lock);
1334 set_current_state(TASK_RUNNING);
1335 if (kthread_should_stop())
1337 spin_lock_bh(&p->fcoe_rx_list.lock);
1339 spin_unlock_bh(&p->fcoe_rx_list.lock);
1340 fr = fcoe_dev_from_skb(skb);
1342 if (unlikely(lp == NULL)) {
1343 FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure");
1348 FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
1349 "head:%p data:%p tail:%p end:%p sum:%d dev:%s",
1350 skb->len, skb->data_len,
1351 skb->head, skb->data, skb_tail_pointer(skb),
1352 skb_end_pointer(skb), skb->csum,
1353 skb->dev ? skb->dev->name : "<NULL>");
1356 * Save source MAC address before discarding header.
1358 port = lport_priv(lp);
1359 if (skb_is_nonlinear(skb))
1360 skb_linearize(skb); /* not ideal */
1361 mac = eth_hdr(skb)->h_source;
1364 * Frame length checks and setting up the header pointers
1365 * was done in fcoe_rcv already.
1367 hp = (struct fcoe_hdr *) skb_network_header(skb);
1368 fh = (struct fc_frame_header *) skb_transport_header(skb);
1370 stats = fc_lport_get_stats(lp);
1371 if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
1372 if (stats->ErrorFrames < 5)
1373 printk(KERN_WARNING "fcoe: FCoE version "
1374 "mismatch: The frame has "
1375 "version %x, but the "
1376 "initiator supports version "
1377 "%x\n", FC_FCOE_DECAPS_VER(hp),
1379 stats->ErrorFrames++;
1384 skb_pull(skb, sizeof(struct fcoe_hdr));
1385 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
1388 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
1390 fp = (struct fc_frame *)skb;
1393 fr_sof(fp) = hp->fcoe_sof;
1395 /* Copy out the CRC and EOF trailer for access */
1396 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
1400 fr_eof(fp) = crc_eof.fcoe_eof;
1401 fr_crc(fp) = crc_eof.fcoe_crc32;
1402 if (pskb_trim(skb, fr_len)) {
1408 * We only check CRC if no offload is available and if it is
1409 * it's solicited data, in which case, the FCP layer would
1410 * check it during the copy.
1412 if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
1413 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1415 fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
1417 fh = fc_frame_header_get(fp);
1418 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
1419 fh->fh_type == FC_TYPE_FCP) {
1420 fc_exch_recv(lp, fp);
1423 if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
1424 if (le32_to_cpu(fr_crc(fp)) !=
1425 ~crc32(~0, skb->data, fr_len)) {
1426 if (stats->InvalidCRCCount < 5)
1427 printk(KERN_WARNING "fcoe: dropping "
1428 "frame with CRC error\n");
1429 stats->InvalidCRCCount++;
1430 stats->ErrorFrames++;
1434 fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
1436 if (unlikely(port->fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
1437 fcoe_ctlr_recv_flogi(&port->fcoe->ctlr, fp, mac)) {
1441 fc_exch_recv(lp, fp);
1447 * fcoe_check_wait_queue() - attempt to clear the transmit backlog
1450 * This empties the wait_queue, dequeue the head of the wait_queue queue
1451 * and calls fcoe_start_io() for each packet, if all skb have been
1452 * transmitted, return qlen or -1 if a error occurs, then restore
1453 * wait_queue and try again later.
1455 * The wait_queue is used when the skb transmit fails. skb will go
1456 * in the wait_queue which will be emptied by the timer function or
1457 * by the next skb transmit.
1459 static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
1461 struct fcoe_port *port = lport_priv(lp);
1464 spin_lock_bh(&port->fcoe_pending_queue.lock);
1467 __skb_queue_tail(&port->fcoe_pending_queue, skb);
1469 if (port->fcoe_pending_queue_active)
1471 port->fcoe_pending_queue_active = 1;
1473 while (port->fcoe_pending_queue.qlen) {
1474 /* keep qlen > 0 until fcoe_start_io succeeds */
1475 port->fcoe_pending_queue.qlen++;
1476 skb = __skb_dequeue(&port->fcoe_pending_queue);
1478 spin_unlock_bh(&port->fcoe_pending_queue.lock);
1479 rc = fcoe_start_io(skb);
1480 spin_lock_bh(&port->fcoe_pending_queue.lock);
1483 __skb_queue_head(&port->fcoe_pending_queue, skb);
1484 /* undo temporary increment above */
1485 port->fcoe_pending_queue.qlen--;
1488 /* undo temporary increment above */
1489 port->fcoe_pending_queue.qlen--;
1492 if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
1494 if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
1495 mod_timer(&port->timer, jiffies + 2);
1496 port->fcoe_pending_queue_active = 0;
1498 if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
1500 spin_unlock_bh(&port->fcoe_pending_queue.lock);
1505 * fcoe_dev_setup() - setup link change notification interface
1507 static void fcoe_dev_setup(void)
1509 register_netdevice_notifier(&fcoe_notifier);
1513 * fcoe_dev_cleanup() - cleanup link change notification interface
1515 static void fcoe_dev_cleanup(void)
1517 unregister_netdevice_notifier(&fcoe_notifier);
1521 * fcoe_device_notification() - netdev event notification callback
1522 * @notifier: context of the notification
1523 * @event: type of event
1524 * @ptr: fixed array for output parsed ifname
1526 * This function is called by the ethernet driver in case of link change event
1528 * Returns: 0 for success
1530 static int fcoe_device_notification(struct notifier_block *notifier,
1531 ulong event, void *ptr)
1533 struct fc_lport *lp = NULL;
1534 struct net_device *netdev = ptr;
1535 struct fcoe_interface *fcoe;
1536 struct fcoe_port *port;
1537 struct fcoe_dev_stats *stats;
1538 u32 link_possible = 1;
1542 write_lock(&fcoe_hostlist_lock);
1543 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1544 if (fcoe->netdev == netdev) {
1556 case NETDEV_GOING_DOWN:
1562 case NETDEV_CHANGEMTU:
1563 mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
1564 sizeof(struct fcoe_crc_eof));
1565 if (mfs >= FC_MIN_MAX_FRAME)
1566 fc_set_mfs(lp, mfs);
1568 case NETDEV_REGISTER:
1570 case NETDEV_UNREGISTER:
1571 list_del(&fcoe->list);
1572 port = lport_priv(fcoe->ctlr.lp);
1573 fcoe_interface_cleanup(fcoe);
1574 schedule_work(&port->destroy_work);
1578 FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
1579 "from netdev netlink\n", event);
1581 if (link_possible && !fcoe_link_ok(lp))
1582 fcoe_ctlr_link_up(&fcoe->ctlr);
1583 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
1584 stats = fc_lport_get_stats(lp);
1585 stats->LinkFailureCount++;
1586 fcoe_clean_pending_queue(lp);
1589 write_unlock(&fcoe_hostlist_lock);
1594 * fcoe_if_to_netdev() - parse a name buffer to get netdev
1595 * @buffer: incoming buffer to be copied
1597 * Returns: NULL or ptr to net_device
1599 static struct net_device *fcoe_if_to_netdev(const char *buffer)
1602 char ifname[IFNAMSIZ + 2];
1605 strlcpy(ifname, buffer, IFNAMSIZ);
1606 cp = ifname + strlen(ifname);
1607 while (--cp >= ifname && *cp == '\n')
1609 return dev_get_by_name(&init_net, ifname);
1615 * fcoe_destroy() - handles the destroy from sysfs
1616 * @buffer: expected to be an eth if name
1617 * @kp: associated kernel param
1619 * Returns: 0 for success
1621 static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
1623 struct fcoe_interface *fcoe;
1624 struct net_device *netdev;
1627 mutex_lock(&fcoe_config_mutex);
1628 #ifdef CONFIG_FCOE_MODULE
1630 * Make sure the module has been initialized, and is not about to be
1631 * removed. Module paramter sysfs files are writable before the
1632 * module_init function is called and after module_exit.
1634 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1640 netdev = fcoe_if_to_netdev(buffer);
1646 write_lock(&fcoe_hostlist_lock);
1647 fcoe = fcoe_hostlist_lookup_port(netdev);
1649 write_unlock(&fcoe_hostlist_lock);
1653 list_del(&fcoe->list);
1654 write_unlock(&fcoe_hostlist_lock);
1656 fcoe_interface_cleanup(fcoe);
1658 fcoe_if_destroy(fcoe->ctlr.lp);
1662 mutex_unlock(&fcoe_config_mutex);
1666 static void fcoe_destroy_work(struct work_struct *work)
1668 struct fcoe_port *port;
1670 port = container_of(work, struct fcoe_port, destroy_work);
1671 mutex_lock(&fcoe_config_mutex);
1672 fcoe_if_destroy(port->lport);
1673 mutex_unlock(&fcoe_config_mutex);
1677 * fcoe_create() - Handles the create call from sysfs
1678 * @buffer: expected to be an eth if name
1679 * @kp: associated kernel param
1681 * Returns: 0 for success
1683 static int fcoe_create(const char *buffer, struct kernel_param *kp)
1686 struct fcoe_interface *fcoe;
1687 struct fc_lport *lport;
1688 struct net_device *netdev;
1690 mutex_lock(&fcoe_config_mutex);
1691 #ifdef CONFIG_FCOE_MODULE
1693 * Make sure the module has been initialized, and is not about to be
1694 * removed. Module paramter sysfs files are writable before the
1695 * module_init function is called and after module_exit.
1697 if (THIS_MODULE->state != MODULE_STATE_LIVE) {
1704 netdev = fcoe_if_to_netdev(buffer);
1710 /* look for existing lport */
1711 if (fcoe_hostlist_lookup(netdev)) {
1716 fcoe = fcoe_interface_create(netdev);
1722 lport = fcoe_if_create(fcoe, &netdev->dev);
1723 if (IS_ERR(lport)) {
1724 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
1727 fcoe_interface_cleanup(fcoe);
1731 /* Make this the "master" N_Port */
1732 fcoe->ctlr.lp = lport;
1734 /* add to lports list */
1735 fcoe_hostlist_add(lport);
1737 /* start FIP Discovery and FLOGI */
1738 lport->boot_time = jiffies;
1739 fc_fabric_login(lport);
1740 if (!fcoe_link_ok(lport))
1741 fcoe_ctlr_link_up(&fcoe->ctlr);
1746 * Release from init in fcoe_interface_create(), on success lport
1747 * should be holding a reference taken in fcoe_if_create().
1749 fcoe_interface_put(fcoe);
1754 mutex_unlock(&fcoe_config_mutex);
1758 module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
1759 __MODULE_PARM_TYPE(create, "string");
1760 MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
1761 module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
1762 __MODULE_PARM_TYPE(destroy, "string");
1763 MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
1766 * fcoe_link_ok() - Check if link is ok for the fc_lport
1767 * @lp: ptr to the fc_lport
1769 * Any permanently-disqualifying conditions have been previously checked.
1770 * This also updates the speed setting, which may change with link for 100/1000.
1772 * This function should probably be checking for PAUSE support at some point
1773 * in the future. Currently Per-priority-pause is not determinable using
1774 * ethtool, so we shouldn't be restrictive until that problem is resolved.
1776 * Returns: 0 if link is OK for use by FCoE.
1779 int fcoe_link_ok(struct fc_lport *lp)
1781 struct fcoe_port *port = lport_priv(lp);
1782 struct net_device *dev = port->fcoe->netdev;
1783 struct ethtool_cmd ecmd = { ETHTOOL_GSET };
1785 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
1786 (!dev_ethtool_get_settings(dev, &ecmd))) {
1787 lp->link_supported_speeds &=
1788 ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
1789 if (ecmd.supported & (SUPPORTED_1000baseT_Half |
1790 SUPPORTED_1000baseT_Full))
1791 lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
1792 if (ecmd.supported & SUPPORTED_10000baseT_Full)
1793 lp->link_supported_speeds |=
1794 FC_PORTSPEED_10GBIT;
1795 if (ecmd.speed == SPEED_1000)
1796 lp->link_speed = FC_PORTSPEED_1GBIT;
1797 if (ecmd.speed == SPEED_10000)
1798 lp->link_speed = FC_PORTSPEED_10GBIT;
1806 * fcoe_percpu_clean() - Clear the pending skbs for an lport
1809 void fcoe_percpu_clean(struct fc_lport *lp)
1811 struct fcoe_percpu_s *pp;
1812 struct fcoe_rcv_info *fr;
1813 struct sk_buff_head *list;
1814 struct sk_buff *skb, *next;
1815 struct sk_buff *head;
1818 for_each_possible_cpu(cpu) {
1819 pp = &per_cpu(fcoe_percpu, cpu);
1820 spin_lock_bh(&pp->fcoe_rx_list.lock);
1821 list = &pp->fcoe_rx_list;
1823 for (skb = head; skb != (struct sk_buff *)list;
1826 fr = fcoe_dev_from_skb(skb);
1827 if (fr->fr_dev == lp) {
1828 __skb_unlink(skb, list);
1832 spin_unlock_bh(&pp->fcoe_rx_list.lock);
1837 * fcoe_clean_pending_queue() - Dequeue a skb and free it
1838 * @lp: the corresponding fc_lport
1842 void fcoe_clean_pending_queue(struct fc_lport *lp)
1844 struct fcoe_port *port = lport_priv(lp);
1845 struct sk_buff *skb;
1847 spin_lock_bh(&port->fcoe_pending_queue.lock);
1848 while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
1849 spin_unlock_bh(&port->fcoe_pending_queue.lock);
1851 spin_lock_bh(&port->fcoe_pending_queue.lock);
1853 spin_unlock_bh(&port->fcoe_pending_queue.lock);
1857 * fcoe_reset() - Resets the fcoe
1858 * @shost: shost the reset is from
1862 int fcoe_reset(struct Scsi_Host *shost)
1864 struct fc_lport *lport = shost_priv(shost);
1865 fc_lport_reset(lport);
1870 * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device
1871 * @dev: this is currently ptr to net_device
1873 * Called with fcoe_hostlist_lock held.
1875 * Returns: NULL or the located fcoe_port
1877 static struct fcoe_interface *
1878 fcoe_hostlist_lookup_port(const struct net_device *dev)
1880 struct fcoe_interface *fcoe;
1882 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1883 if (fcoe->netdev == dev)
1890 * fcoe_hostlist_lookup() - Find the corresponding lport by netdev
1891 * @netdev: ptr to net_device
1893 * Returns: 0 for success
1895 struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
1897 struct fcoe_interface *fcoe;
1899 read_lock(&fcoe_hostlist_lock);
1900 fcoe = fcoe_hostlist_lookup_port(netdev);
1901 read_unlock(&fcoe_hostlist_lock);
1903 return (fcoe) ? fcoe->ctlr.lp : NULL;
1907 * fcoe_hostlist_add() - Add a lport to lports list
1908 * @lp: ptr to the fc_lport to be added
1910 * Returns: 0 for success
1912 int fcoe_hostlist_add(const struct fc_lport *lport)
1914 struct fcoe_interface *fcoe;
1915 struct fcoe_port *port;
1917 write_lock_bh(&fcoe_hostlist_lock);
1918 fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
1920 port = lport_priv(lport);
1922 list_add_tail(&fcoe->list, &fcoe_hostlist);
1924 write_unlock_bh(&fcoe_hostlist_lock);
1929 * fcoe_init() - fcoe module loading initialization
1931 * Returns 0 on success, negative on failure
1933 static int __init fcoe_init(void)
1937 struct fcoe_percpu_s *p;
1939 mutex_lock(&fcoe_config_mutex);
1941 for_each_possible_cpu(cpu) {
1942 p = &per_cpu(fcoe_percpu, cpu);
1943 skb_queue_head_init(&p->fcoe_rx_list);
1946 for_each_online_cpu(cpu)
1947 fcoe_percpu_thread_create(cpu);
1949 /* Initialize per CPU interrupt thread */
1950 rc = register_hotcpu_notifier(&fcoe_cpu_notifier);
1954 /* Setup link change notification */
1957 rc = fcoe_if_init();
1961 mutex_unlock(&fcoe_config_mutex);
1965 for_each_online_cpu(cpu) {
1966 fcoe_percpu_thread_destroy(cpu);
1968 mutex_unlock(&fcoe_config_mutex);
1971 module_init(fcoe_init);
1974 * fcoe_exit() - fcoe module unloading cleanup
1976 * Returns 0 on success, negative on failure
1978 static void __exit fcoe_exit(void)
1981 struct fcoe_interface *fcoe, *tmp;
1982 LIST_HEAD(local_list);
1983 struct fcoe_port *port;
1985 mutex_lock(&fcoe_config_mutex);
1989 /* releases the associated fcoe hosts */
1990 write_lock_bh(&fcoe_hostlist_lock);
1991 list_splice_init(&fcoe_hostlist, &local_list);
1992 write_unlock_bh(&fcoe_hostlist_lock);
1994 list_for_each_entry_safe(fcoe, tmp, &local_list, list) {
1995 list_del(&fcoe->list);
1996 port = lport_priv(fcoe->ctlr.lp);
1998 fcoe_interface_cleanup(fcoe);
2000 schedule_work(&port->destroy_work);
2003 unregister_hotcpu_notifier(&fcoe_cpu_notifier);
2005 for_each_online_cpu(cpu)
2006 fcoe_percpu_thread_destroy(cpu);
2008 mutex_unlock(&fcoe_config_mutex);
2010 /* flush any asyncronous interface destroys,
2011 * this should happen after the netdev notifier is unregistered */
2012 flush_scheduled_work();
2014 /* detach from scsi transport
2015 * must happen after all destroys are done, therefor after the flush */
2018 module_exit(fcoe_exit);