1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright Gavin Shan, IBM Corporation 2016.
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
12 #include <linux/platform_device.h>
15 #include <net/net_namespace.h>
17 #include <net/addrconf.h>
19 #include <net/genetlink.h>
23 #include "ncsi-netlink.h"
25 LIST_HEAD(ncsi_dev_list);
26 DEFINE_SPINLOCK(ncsi_dev_lock);
28 bool ncsi_channel_has_link(struct ncsi_channel *channel)
30 return !!(channel->modes[NCSI_MODE_LINK].data[2] & 0x1);
33 bool ncsi_channel_is_last(struct ncsi_dev_priv *ndp,
34 struct ncsi_channel *channel)
36 struct ncsi_package *np;
37 struct ncsi_channel *nc;
39 NCSI_FOR_EACH_PACKAGE(ndp, np)
40 NCSI_FOR_EACH_CHANNEL(np, nc) {
43 if (nc->state == NCSI_CHANNEL_ACTIVE &&
44 ncsi_channel_has_link(nc))
51 static void ncsi_report_link(struct ncsi_dev_priv *ndp, bool force_down)
53 struct ncsi_dev *nd = &ndp->ndev;
54 struct ncsi_package *np;
55 struct ncsi_channel *nc;
58 nd->state = ncsi_dev_state_functional;
65 NCSI_FOR_EACH_PACKAGE(ndp, np) {
66 NCSI_FOR_EACH_CHANNEL(np, nc) {
67 spin_lock_irqsave(&nc->lock, flags);
69 if (!list_empty(&nc->link) ||
70 nc->state != NCSI_CHANNEL_ACTIVE) {
71 spin_unlock_irqrestore(&nc->lock, flags);
75 if (ncsi_channel_has_link(nc)) {
76 spin_unlock_irqrestore(&nc->lock, flags);
81 spin_unlock_irqrestore(&nc->lock, flags);
89 static void ncsi_channel_monitor(struct timer_list *t)
91 struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
92 struct ncsi_package *np = nc->package;
93 struct ncsi_dev_priv *ndp = np->ndp;
94 struct ncsi_channel_mode *ncm;
95 struct ncsi_cmd_arg nca;
96 bool enabled, chained;
97 unsigned int monitor_state;
101 spin_lock_irqsave(&nc->lock, flags);
103 chained = !list_empty(&nc->link);
104 enabled = nc->monitor.enabled;
105 monitor_state = nc->monitor.state;
106 spin_unlock_irqrestore(&nc->lock, flags);
108 if (!enabled || chained) {
109 ncsi_stop_channel_monitor(nc);
112 if (state != NCSI_CHANNEL_INACTIVE &&
113 state != NCSI_CHANNEL_ACTIVE) {
114 ncsi_stop_channel_monitor(nc);
118 switch (monitor_state) {
119 case NCSI_CHANNEL_MONITOR_START:
120 case NCSI_CHANNEL_MONITOR_RETRY:
122 nca.package = np->id;
123 nca.channel = nc->id;
124 nca.type = NCSI_PKT_CMD_GLS;
126 ret = ncsi_xmit_cmd(&nca);
128 netdev_err(ndp->ndev.dev, "Error %d sending GLS\n",
131 case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX:
134 netdev_err(ndp->ndev.dev, "NCSI Channel %d timed out!\n",
136 ncsi_report_link(ndp, true);
137 ndp->flags |= NCSI_DEV_RESHUFFLE;
139 ncsi_stop_channel_monitor(nc);
141 ncm = &nc->modes[NCSI_MODE_LINK];
142 spin_lock_irqsave(&nc->lock, flags);
143 nc->state = NCSI_CHANNEL_INVISIBLE;
144 ncm->data[2] &= ~0x1;
145 spin_unlock_irqrestore(&nc->lock, flags);
147 spin_lock_irqsave(&ndp->lock, flags);
148 nc->state = NCSI_CHANNEL_ACTIVE;
149 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
150 spin_unlock_irqrestore(&ndp->lock, flags);
151 ncsi_process_next_channel(ndp);
155 spin_lock_irqsave(&nc->lock, flags);
157 spin_unlock_irqrestore(&nc->lock, flags);
158 mod_timer(&nc->monitor.timer, jiffies + HZ);
161 void ncsi_start_channel_monitor(struct ncsi_channel *nc)
165 spin_lock_irqsave(&nc->lock, flags);
166 WARN_ON_ONCE(nc->monitor.enabled);
167 nc->monitor.enabled = true;
168 nc->monitor.state = NCSI_CHANNEL_MONITOR_START;
169 spin_unlock_irqrestore(&nc->lock, flags);
171 mod_timer(&nc->monitor.timer, jiffies + HZ);
174 void ncsi_stop_channel_monitor(struct ncsi_channel *nc)
178 spin_lock_irqsave(&nc->lock, flags);
179 if (!nc->monitor.enabled) {
180 spin_unlock_irqrestore(&nc->lock, flags);
183 nc->monitor.enabled = false;
184 spin_unlock_irqrestore(&nc->lock, flags);
186 del_timer_sync(&nc->monitor.timer);
189 struct ncsi_channel *ncsi_find_channel(struct ncsi_package *np,
192 struct ncsi_channel *nc;
194 NCSI_FOR_EACH_CHANNEL(np, nc) {
202 struct ncsi_channel *ncsi_add_channel(struct ncsi_package *np, unsigned char id)
204 struct ncsi_channel *nc, *tmp;
208 nc = kzalloc(sizeof(*nc), GFP_ATOMIC);
214 nc->state = NCSI_CHANNEL_INACTIVE;
215 nc->monitor.enabled = false;
216 timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
217 spin_lock_init(&nc->lock);
218 INIT_LIST_HEAD(&nc->link);
219 for (index = 0; index < NCSI_CAP_MAX; index++)
220 nc->caps[index].index = index;
221 for (index = 0; index < NCSI_MODE_MAX; index++)
222 nc->modes[index].index = index;
224 spin_lock_irqsave(&np->lock, flags);
225 tmp = ncsi_find_channel(np, id);
227 spin_unlock_irqrestore(&np->lock, flags);
232 list_add_tail_rcu(&nc->node, &np->channels);
234 spin_unlock_irqrestore(&np->lock, flags);
239 static void ncsi_remove_channel(struct ncsi_channel *nc)
241 struct ncsi_package *np = nc->package;
244 spin_lock_irqsave(&nc->lock, flags);
246 /* Release filters */
247 kfree(nc->mac_filter.addrs);
248 kfree(nc->vlan_filter.vids);
250 nc->state = NCSI_CHANNEL_INACTIVE;
251 spin_unlock_irqrestore(&nc->lock, flags);
252 ncsi_stop_channel_monitor(nc);
254 /* Remove and free channel */
255 spin_lock_irqsave(&np->lock, flags);
256 list_del_rcu(&nc->node);
258 spin_unlock_irqrestore(&np->lock, flags);
263 struct ncsi_package *ncsi_find_package(struct ncsi_dev_priv *ndp,
266 struct ncsi_package *np;
268 NCSI_FOR_EACH_PACKAGE(ndp, np) {
276 struct ncsi_package *ncsi_add_package(struct ncsi_dev_priv *ndp,
279 struct ncsi_package *np, *tmp;
282 np = kzalloc(sizeof(*np), GFP_ATOMIC);
288 spin_lock_init(&np->lock);
289 INIT_LIST_HEAD(&np->channels);
290 np->channel_whitelist = UINT_MAX;
292 spin_lock_irqsave(&ndp->lock, flags);
293 tmp = ncsi_find_package(ndp, id);
295 spin_unlock_irqrestore(&ndp->lock, flags);
300 list_add_tail_rcu(&np->node, &ndp->packages);
302 spin_unlock_irqrestore(&ndp->lock, flags);
307 void ncsi_remove_package(struct ncsi_package *np)
309 struct ncsi_dev_priv *ndp = np->ndp;
310 struct ncsi_channel *nc, *tmp;
313 /* Release all child channels */
314 list_for_each_entry_safe(nc, tmp, &np->channels, node)
315 ncsi_remove_channel(nc);
317 /* Remove and free package */
318 spin_lock_irqsave(&ndp->lock, flags);
319 list_del_rcu(&np->node);
321 spin_unlock_irqrestore(&ndp->lock, flags);
326 void ncsi_find_package_and_channel(struct ncsi_dev_priv *ndp,
328 struct ncsi_package **np,
329 struct ncsi_channel **nc)
331 struct ncsi_package *p;
332 struct ncsi_channel *c;
334 p = ncsi_find_package(ndp, NCSI_PACKAGE_INDEX(id));
335 c = p ? ncsi_find_channel(p, NCSI_CHANNEL_INDEX(id)) : NULL;
343 /* For two consecutive NCSI commands, the packet IDs shouldn't
344 * be same. Otherwise, the bogus response might be replied. So
345 * the available IDs are allocated in round-robin fashion.
347 struct ncsi_request *ncsi_alloc_request(struct ncsi_dev_priv *ndp,
348 unsigned int req_flags)
350 struct ncsi_request *nr = NULL;
351 int i, limit = ARRAY_SIZE(ndp->requests);
354 /* Check if there is one available request until the ceiling */
355 spin_lock_irqsave(&ndp->lock, flags);
356 for (i = ndp->request_id; i < limit; i++) {
357 if (ndp->requests[i].used)
360 nr = &ndp->requests[i];
362 nr->flags = req_flags;
363 ndp->request_id = i + 1;
367 /* Fail back to check from the starting cursor */
368 for (i = NCSI_REQ_START_IDX; i < ndp->request_id; i++) {
369 if (ndp->requests[i].used)
372 nr = &ndp->requests[i];
374 nr->flags = req_flags;
375 ndp->request_id = i + 1;
380 spin_unlock_irqrestore(&ndp->lock, flags);
384 void ncsi_free_request(struct ncsi_request *nr)
386 struct ncsi_dev_priv *ndp = nr->ndp;
387 struct sk_buff *cmd, *rsp;
393 del_timer_sync(&nr->timer);
396 spin_lock_irqsave(&ndp->lock, flags);
402 driven = !!(nr->flags & NCSI_REQ_FLAG_EVENT_DRIVEN);
403 spin_unlock_irqrestore(&ndp->lock, flags);
405 if (driven && cmd && --ndp->pending_req_num == 0)
406 schedule_work(&ndp->work);
408 /* Release command and response */
413 struct ncsi_dev *ncsi_find_dev(struct net_device *dev)
415 struct ncsi_dev_priv *ndp;
417 NCSI_FOR_EACH_DEV(ndp) {
418 if (ndp->ndev.dev == dev)
425 static void ncsi_request_timeout(struct timer_list *t)
427 struct ncsi_request *nr = from_timer(nr, t, timer);
428 struct ncsi_dev_priv *ndp = nr->ndp;
429 struct ncsi_cmd_pkt *cmd;
430 struct ncsi_package *np;
431 struct ncsi_channel *nc;
434 /* If the request already had associated response,
435 * let the response handler to release it.
437 spin_lock_irqsave(&ndp->lock, flags);
439 if (nr->rsp || !nr->cmd) {
440 spin_unlock_irqrestore(&ndp->lock, flags);
443 spin_unlock_irqrestore(&ndp->lock, flags);
445 if (nr->flags == NCSI_REQ_FLAG_NETLINK_DRIVEN) {
447 /* Find the package */
448 cmd = (struct ncsi_cmd_pkt *)
449 skb_network_header(nr->cmd);
450 ncsi_find_package_and_channel(ndp,
451 cmd->cmd.common.channel,
453 ncsi_send_netlink_timeout(nr, np, nc);
457 /* Release the request */
458 ncsi_free_request(nr);
461 static void ncsi_suspend_channel(struct ncsi_dev_priv *ndp)
463 struct ncsi_dev *nd = &ndp->ndev;
464 struct ncsi_package *np;
465 struct ncsi_channel *nc, *tmp;
466 struct ncsi_cmd_arg nca;
470 np = ndp->active_package;
471 nc = ndp->active_channel;
473 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
475 case ncsi_dev_state_suspend:
476 nd->state = ncsi_dev_state_suspend_select;
478 case ncsi_dev_state_suspend_select:
479 ndp->pending_req_num = 1;
481 nca.type = NCSI_PKT_CMD_SP;
482 nca.package = np->id;
483 nca.channel = NCSI_RESERVED_CHANNEL;
484 if (ndp->flags & NCSI_DEV_HWA)
489 /* To retrieve the last link states of channels in current
490 * package when current active channel needs fail over to
491 * another one. It means we will possibly select another
492 * channel as next active one. The link states of channels
493 * are most important factor of the selection. So we need
494 * accurate link states. Unfortunately, the link states on
495 * inactive channels can't be updated with LSC AEN in time.
497 if (ndp->flags & NCSI_DEV_RESHUFFLE)
498 nd->state = ncsi_dev_state_suspend_gls;
500 nd->state = ncsi_dev_state_suspend_dcnt;
501 ret = ncsi_xmit_cmd(&nca);
506 case ncsi_dev_state_suspend_gls:
507 ndp->pending_req_num = np->channel_num;
509 nca.type = NCSI_PKT_CMD_GLS;
510 nca.package = np->id;
512 nd->state = ncsi_dev_state_suspend_dcnt;
513 NCSI_FOR_EACH_CHANNEL(np, nc) {
514 nca.channel = nc->id;
515 ret = ncsi_xmit_cmd(&nca);
521 case ncsi_dev_state_suspend_dcnt:
522 ndp->pending_req_num = 1;
524 nca.type = NCSI_PKT_CMD_DCNT;
525 nca.package = np->id;
526 nca.channel = nc->id;
528 nd->state = ncsi_dev_state_suspend_dc;
529 ret = ncsi_xmit_cmd(&nca);
534 case ncsi_dev_state_suspend_dc:
535 ndp->pending_req_num = 1;
537 nca.type = NCSI_PKT_CMD_DC;
538 nca.package = np->id;
539 nca.channel = nc->id;
542 nd->state = ncsi_dev_state_suspend_deselect;
543 ret = ncsi_xmit_cmd(&nca);
547 NCSI_FOR_EACH_CHANNEL(np, tmp) {
548 /* If there is another channel active on this package
549 * do not deselect the package.
551 if (tmp != nc && tmp->state == NCSI_CHANNEL_ACTIVE) {
552 nd->state = ncsi_dev_state_suspend_done;
557 case ncsi_dev_state_suspend_deselect:
558 ndp->pending_req_num = 1;
560 nca.type = NCSI_PKT_CMD_DP;
561 nca.package = np->id;
562 nca.channel = NCSI_RESERVED_CHANNEL;
564 nd->state = ncsi_dev_state_suspend_done;
565 ret = ncsi_xmit_cmd(&nca);
570 case ncsi_dev_state_suspend_done:
571 spin_lock_irqsave(&nc->lock, flags);
572 nc->state = NCSI_CHANNEL_INACTIVE;
573 spin_unlock_irqrestore(&nc->lock, flags);
574 if (ndp->flags & NCSI_DEV_RESET)
577 ncsi_process_next_channel(ndp);
580 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
586 nd->state = ncsi_dev_state_functional;
589 /* Check the VLAN filter bitmap for a set filter, and construct a
590 * "Set VLAN Filter - Disable" packet if found.
592 static int clear_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
593 struct ncsi_cmd_arg *nca)
595 struct ncsi_channel_vlan_filter *ncf;
601 ncf = &nc->vlan_filter;
602 bitmap = &ncf->bitmap;
604 spin_lock_irqsave(&nc->lock, flags);
605 index = find_next_bit(bitmap, ncf->n_vids, 0);
606 if (index >= ncf->n_vids) {
607 spin_unlock_irqrestore(&nc->lock, flags);
610 vid = ncf->vids[index];
612 clear_bit(index, bitmap);
613 ncf->vids[index] = 0;
614 spin_unlock_irqrestore(&nc->lock, flags);
616 nca->type = NCSI_PKT_CMD_SVF;
618 /* HW filter index starts at 1 */
619 nca->bytes[6] = index + 1;
620 nca->bytes[7] = 0x00;
624 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
627 static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc,
628 struct ncsi_cmd_arg *nca)
630 struct ncsi_channel_vlan_filter *ncf;
631 struct vlan_vid *vlan = NULL;
637 if (list_empty(&ndp->vlan_vids))
640 ncf = &nc->vlan_filter;
641 bitmap = &ncf->bitmap;
643 spin_lock_irqsave(&nc->lock, flags);
646 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
648 for (i = 0; i < ncf->n_vids; i++)
649 if (ncf->vids[i] == vid) {
659 /* No VLAN ID is not set */
660 spin_unlock_irqrestore(&nc->lock, flags);
664 index = find_next_zero_bit(bitmap, ncf->n_vids, 0);
665 if (index < 0 || index >= ncf->n_vids) {
666 netdev_err(ndp->ndev.dev,
667 "Channel %u already has all VLAN filters set\n",
669 spin_unlock_irqrestore(&nc->lock, flags);
673 ncf->vids[index] = vid;
674 set_bit(index, bitmap);
675 spin_unlock_irqrestore(&nc->lock, flags);
677 nca->type = NCSI_PKT_CMD_SVF;
679 /* HW filter index starts at 1 */
680 nca->bytes[6] = index + 1;
681 nca->bytes[7] = 0x01;
686 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
688 /* NCSI OEM Command APIs */
689 static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg *nca)
691 unsigned char data[NCSI_OEM_BCM_CMD_GMA_LEN];
694 nca->payload = NCSI_OEM_BCM_CMD_GMA_LEN;
696 memset(data, 0, NCSI_OEM_BCM_CMD_GMA_LEN);
697 *(unsigned int *)data = ntohl(NCSI_OEM_MFR_BCM_ID);
698 data[5] = NCSI_OEM_BCM_CMD_GMA;
702 ret = ncsi_xmit_cmd(nca);
704 netdev_err(nca->ndp->ndev.dev,
705 "NCSI: Failed to transmit cmd 0x%x during configure\n",
710 static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg *nca)
713 u8 data_u8[NCSI_OEM_MLX_CMD_GMA_LEN];
714 u32 data_u32[NCSI_OEM_MLX_CMD_GMA_LEN / sizeof(u32)];
718 nca->payload = NCSI_OEM_MLX_CMD_GMA_LEN;
720 memset(&u, 0, sizeof(u));
721 u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
722 u.data_u8[5] = NCSI_OEM_MLX_CMD_GMA;
723 u.data_u8[6] = NCSI_OEM_MLX_CMD_GMA_PARAM;
725 nca->data = u.data_u8;
727 ret = ncsi_xmit_cmd(nca);
729 netdev_err(nca->ndp->ndev.dev,
730 "NCSI: Failed to transmit cmd 0x%x during configure\n",
735 static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg *nca)
738 u8 data_u8[NCSI_OEM_MLX_CMD_SMAF_LEN];
739 u32 data_u32[NCSI_OEM_MLX_CMD_SMAF_LEN / sizeof(u32)];
743 memset(&u, 0, sizeof(u));
744 u.data_u32[0] = ntohl(NCSI_OEM_MFR_MLX_ID);
745 u.data_u8[5] = NCSI_OEM_MLX_CMD_SMAF;
746 u.data_u8[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM;
747 memcpy(&u.data_u8[MLX_SMAF_MAC_ADDR_OFFSET],
748 nca->ndp->ndev.dev->dev_addr, ETH_ALEN);
749 u.data_u8[MLX_SMAF_MED_SUPPORT_OFFSET] =
750 (MLX_MC_RBT_AVL | MLX_MC_RBT_SUPPORT);
752 nca->payload = NCSI_OEM_MLX_CMD_SMAF_LEN;
753 nca->data = u.data_u8;
755 ret = ncsi_xmit_cmd(nca);
757 netdev_err(nca->ndp->ndev.dev,
758 "NCSI: Failed to transmit cmd 0x%x during probe\n",
763 /* OEM Command handlers initialization */
764 static struct ncsi_oem_gma_handler {
766 int (*handler)(struct ncsi_cmd_arg *nca);
767 } ncsi_oem_gma_handlers[] = {
768 { NCSI_OEM_MFR_BCM_ID, ncsi_oem_gma_handler_bcm },
769 { NCSI_OEM_MFR_MLX_ID, ncsi_oem_gma_handler_mlx }
772 static int ncsi_gma_handler(struct ncsi_cmd_arg *nca, unsigned int mf_id)
774 struct ncsi_oem_gma_handler *nch = NULL;
777 /* This function should only be called once, return if flag set */
778 if (nca->ndp->gma_flag == 1)
781 /* Find gma handler for given manufacturer id */
782 for (i = 0; i < ARRAY_SIZE(ncsi_oem_gma_handlers); i++) {
783 if (ncsi_oem_gma_handlers[i].mfr_id == mf_id) {
784 if (ncsi_oem_gma_handlers[i].handler)
785 nch = &ncsi_oem_gma_handlers[i];
791 netdev_err(nca->ndp->ndev.dev,
792 "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
797 /* Get Mac address from NCSI device */
798 return nch->handler(nca);
801 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
803 /* Determine if a given channel from the channel_queue should be used for Tx */
804 static bool ncsi_channel_is_tx(struct ncsi_dev_priv *ndp,
805 struct ncsi_channel *nc)
807 struct ncsi_channel_mode *ncm;
808 struct ncsi_channel *channel;
809 struct ncsi_package *np;
811 /* Check if any other channel has Tx enabled; a channel may have already
812 * been configured and removed from the channel queue.
814 NCSI_FOR_EACH_PACKAGE(ndp, np) {
815 if (!ndp->multi_package && np != nc->package)
817 NCSI_FOR_EACH_CHANNEL(np, channel) {
818 ncm = &channel->modes[NCSI_MODE_TX_ENABLE];
824 /* This channel is the preferred channel and has link */
825 list_for_each_entry_rcu(channel, &ndp->channel_queue, link) {
826 np = channel->package;
827 if (np->preferred_channel &&
828 ncsi_channel_has_link(np->preferred_channel)) {
829 return np->preferred_channel == nc;
833 /* This channel has link */
834 if (ncsi_channel_has_link(nc))
837 list_for_each_entry_rcu(channel, &ndp->channel_queue, link)
838 if (ncsi_channel_has_link(channel))
841 /* No other channel has link; default to this one */
845 /* Change the active Tx channel in a multi-channel setup */
846 int ncsi_update_tx_channel(struct ncsi_dev_priv *ndp,
847 struct ncsi_package *package,
848 struct ncsi_channel *disable,
849 struct ncsi_channel *enable)
851 struct ncsi_cmd_arg nca;
852 struct ncsi_channel *nc;
853 struct ncsi_package *np;
856 if (!package->multi_channel && !ndp->multi_package)
857 netdev_warn(ndp->ndev.dev,
858 "NCSI: Trying to update Tx channel in single-channel mode\n");
862 /* Find current channel with Tx enabled */
863 NCSI_FOR_EACH_PACKAGE(ndp, np) {
866 if (!ndp->multi_package && np != package)
869 NCSI_FOR_EACH_CHANNEL(np, nc)
870 if (nc->modes[NCSI_MODE_TX_ENABLE].enable) {
876 /* Find a suitable channel for Tx */
877 NCSI_FOR_EACH_PACKAGE(ndp, np) {
880 if (!ndp->multi_package && np != package)
882 if (!(ndp->package_whitelist & (0x1 << np->id)))
885 if (np->preferred_channel &&
886 ncsi_channel_has_link(np->preferred_channel)) {
887 enable = np->preferred_channel;
891 NCSI_FOR_EACH_CHANNEL(np, nc) {
892 if (!(np->channel_whitelist & 0x1 << nc->id))
894 if (nc->state != NCSI_CHANNEL_ACTIVE)
896 if (ncsi_channel_has_link(nc)) {
903 if (disable == enable)
910 nca.channel = disable->id;
911 nca.package = disable->package->id;
912 nca.type = NCSI_PKT_CMD_DCNT;
913 ret = ncsi_xmit_cmd(&nca);
915 netdev_err(ndp->ndev.dev,
916 "Error %d sending DCNT\n",
920 netdev_info(ndp->ndev.dev, "NCSI: channel %u enables Tx\n", enable->id);
922 nca.channel = enable->id;
923 nca.package = enable->package->id;
924 nca.type = NCSI_PKT_CMD_ECNT;
925 ret = ncsi_xmit_cmd(&nca);
927 netdev_err(ndp->ndev.dev,
928 "Error %d sending ECNT\n",
934 static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
936 struct ncsi_package *np = ndp->active_package;
937 struct ncsi_channel *nc = ndp->active_channel;
938 struct ncsi_channel *hot_nc = NULL;
939 struct ncsi_dev *nd = &ndp->ndev;
940 struct net_device *dev = nd->dev;
941 struct ncsi_cmd_arg nca;
947 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
949 case ncsi_dev_state_config:
950 case ncsi_dev_state_config_sp:
951 ndp->pending_req_num = 1;
953 /* Select the specific package */
954 nca.type = NCSI_PKT_CMD_SP;
955 if (ndp->flags & NCSI_DEV_HWA)
959 nca.package = np->id;
960 nca.channel = NCSI_RESERVED_CHANNEL;
961 ret = ncsi_xmit_cmd(&nca);
963 netdev_err(ndp->ndev.dev,
964 "NCSI: Failed to transmit CMD_SP\n");
968 nd->state = ncsi_dev_state_config_cis;
970 case ncsi_dev_state_config_cis:
971 ndp->pending_req_num = 1;
973 /* Clear initial state */
974 nca.type = NCSI_PKT_CMD_CIS;
975 nca.package = np->id;
976 nca.channel = nc->id;
977 ret = ncsi_xmit_cmd(&nca);
979 netdev_err(ndp->ndev.dev,
980 "NCSI: Failed to transmit CMD_CIS\n");
984 nd->state = ncsi_dev_state_config_oem_gma;
986 case ncsi_dev_state_config_oem_gma:
987 nd->state = ncsi_dev_state_config_clear_vids;
990 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
991 nca.type = NCSI_PKT_CMD_OEM;
992 nca.package = np->id;
993 nca.channel = nc->id;
994 ndp->pending_req_num = 1;
995 ret = ncsi_gma_handler(&nca, nc->version.mf_id);
996 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
999 schedule_work(&ndp->work);
1002 case ncsi_dev_state_config_clear_vids:
1003 case ncsi_dev_state_config_svf:
1004 case ncsi_dev_state_config_ev:
1005 case ncsi_dev_state_config_sma:
1006 case ncsi_dev_state_config_ebf:
1007 case ncsi_dev_state_config_dgmf:
1008 case ncsi_dev_state_config_ecnt:
1009 case ncsi_dev_state_config_ec:
1010 case ncsi_dev_state_config_ae:
1011 case ncsi_dev_state_config_gls:
1012 ndp->pending_req_num = 1;
1014 nca.package = np->id;
1015 nca.channel = nc->id;
1017 /* Clear any active filters on the channel before setting */
1018 if (nd->state == ncsi_dev_state_config_clear_vids) {
1019 ret = clear_one_vid(ndp, nc, &nca);
1021 nd->state = ncsi_dev_state_config_svf;
1022 schedule_work(&ndp->work);
1026 nd->state = ncsi_dev_state_config_clear_vids;
1027 /* Add known VLAN tags to the filter */
1028 } else if (nd->state == ncsi_dev_state_config_svf) {
1029 ret = set_one_vid(ndp, nc, &nca);
1031 nd->state = ncsi_dev_state_config_ev;
1032 schedule_work(&ndp->work);
1036 nd->state = ncsi_dev_state_config_svf;
1037 /* Enable/Disable the VLAN filter */
1038 } else if (nd->state == ncsi_dev_state_config_ev) {
1039 if (list_empty(&ndp->vlan_vids)) {
1040 nca.type = NCSI_PKT_CMD_DV;
1042 nca.type = NCSI_PKT_CMD_EV;
1043 nca.bytes[3] = NCSI_CAP_VLAN_NO;
1045 nd->state = ncsi_dev_state_config_sma;
1046 } else if (nd->state == ncsi_dev_state_config_sma) {
1047 /* Use first entry in unicast filter table. Note that
1048 * the MAC filter table starts from entry 1 instead of
1051 nca.type = NCSI_PKT_CMD_SMA;
1052 for (index = 0; index < 6; index++)
1053 nca.bytes[index] = dev->dev_addr[index];
1056 nd->state = ncsi_dev_state_config_ebf;
1057 } else if (nd->state == ncsi_dev_state_config_ebf) {
1058 nca.type = NCSI_PKT_CMD_EBF;
1059 nca.dwords[0] = nc->caps[NCSI_CAP_BC].cap;
1060 /* if multicast global filtering is supported then
1061 * disable it so that all multicast packet will be
1062 * forwarded to management controller
1064 if (nc->caps[NCSI_CAP_GENERIC].cap &
1065 NCSI_CAP_GENERIC_MC)
1066 nd->state = ncsi_dev_state_config_dgmf;
1067 else if (ncsi_channel_is_tx(ndp, nc))
1068 nd->state = ncsi_dev_state_config_ecnt;
1070 nd->state = ncsi_dev_state_config_ec;
1071 } else if (nd->state == ncsi_dev_state_config_dgmf) {
1072 nca.type = NCSI_PKT_CMD_DGMF;
1073 if (ncsi_channel_is_tx(ndp, nc))
1074 nd->state = ncsi_dev_state_config_ecnt;
1076 nd->state = ncsi_dev_state_config_ec;
1077 } else if (nd->state == ncsi_dev_state_config_ecnt) {
1078 if (np->preferred_channel &&
1079 nc != np->preferred_channel)
1080 netdev_info(ndp->ndev.dev,
1081 "NCSI: Tx failed over to channel %u\n",
1083 nca.type = NCSI_PKT_CMD_ECNT;
1084 nd->state = ncsi_dev_state_config_ec;
1085 } else if (nd->state == ncsi_dev_state_config_ec) {
1086 /* Enable AEN if it's supported */
1087 nca.type = NCSI_PKT_CMD_EC;
1088 nd->state = ncsi_dev_state_config_ae;
1089 if (!(nc->caps[NCSI_CAP_AEN].cap & NCSI_CAP_AEN_MASK))
1090 nd->state = ncsi_dev_state_config_gls;
1091 } else if (nd->state == ncsi_dev_state_config_ae) {
1092 nca.type = NCSI_PKT_CMD_AE;
1094 nca.dwords[1] = nc->caps[NCSI_CAP_AEN].cap;
1095 nd->state = ncsi_dev_state_config_gls;
1096 } else if (nd->state == ncsi_dev_state_config_gls) {
1097 nca.type = NCSI_PKT_CMD_GLS;
1098 nd->state = ncsi_dev_state_config_done;
1101 ret = ncsi_xmit_cmd(&nca);
1103 netdev_err(ndp->ndev.dev,
1104 "NCSI: Failed to transmit CMD %x\n",
1109 case ncsi_dev_state_config_done:
1110 netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
1112 spin_lock_irqsave(&nc->lock, flags);
1113 nc->state = NCSI_CHANNEL_ACTIVE;
1115 if (ndp->flags & NCSI_DEV_RESET) {
1116 /* A reset event happened during config, start it now */
1117 nc->reconfigure_needed = false;
1118 spin_unlock_irqrestore(&nc->lock, flags);
1123 if (nc->reconfigure_needed) {
1124 /* This channel's configuration has been updated
1125 * part-way during the config state - start the
1126 * channel configuration over
1128 nc->reconfigure_needed = false;
1129 nc->state = NCSI_CHANNEL_INACTIVE;
1130 spin_unlock_irqrestore(&nc->lock, flags);
1132 spin_lock_irqsave(&ndp->lock, flags);
1133 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1134 spin_unlock_irqrestore(&ndp->lock, flags);
1136 netdev_dbg(dev, "Dirty NCSI channel state reset\n");
1137 ncsi_process_next_channel(ndp);
1141 if (nc->modes[NCSI_MODE_LINK].data[2] & 0x1) {
1145 netdev_dbg(ndp->ndev.dev,
1146 "NCSI: channel %u link down after config\n",
1149 spin_unlock_irqrestore(&nc->lock, flags);
1151 /* Update the hot channel */
1152 spin_lock_irqsave(&ndp->lock, flags);
1153 ndp->hot_channel = hot_nc;
1154 spin_unlock_irqrestore(&ndp->lock, flags);
1156 ncsi_start_channel_monitor(nc);
1157 ncsi_process_next_channel(ndp);
1160 netdev_alert(dev, "Wrong NCSI state 0x%x in config\n",
1167 ncsi_report_link(ndp, true);
1170 static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
1172 struct ncsi_channel *nc, *found, *hot_nc;
1173 struct ncsi_channel_mode *ncm;
1174 unsigned long flags, cflags;
1175 struct ncsi_package *np;
1178 spin_lock_irqsave(&ndp->lock, flags);
1179 hot_nc = ndp->hot_channel;
1180 spin_unlock_irqrestore(&ndp->lock, flags);
1182 /* By default the search is done once an inactive channel with up
1183 * link is found, unless a preferred channel is set.
1184 * If multi_package or multi_channel are configured all channels in the
1185 * whitelist are added to the channel queue.
1189 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1190 if (!(ndp->package_whitelist & (0x1 << np->id)))
1192 NCSI_FOR_EACH_CHANNEL(np, nc) {
1193 if (!(np->channel_whitelist & (0x1 << nc->id)))
1196 spin_lock_irqsave(&nc->lock, cflags);
1198 if (!list_empty(&nc->link) ||
1199 nc->state != NCSI_CHANNEL_INACTIVE) {
1200 spin_unlock_irqrestore(&nc->lock, cflags);
1210 ncm = &nc->modes[NCSI_MODE_LINK];
1211 if (ncm->data[2] & 0x1) {
1216 /* If multi_channel is enabled configure all valid
1217 * channels whether or not they currently have link
1218 * so they will have AENs enabled.
1220 if (with_link || np->multi_channel) {
1221 spin_lock_irqsave(&ndp->lock, flags);
1222 list_add_tail_rcu(&nc->link,
1223 &ndp->channel_queue);
1224 spin_unlock_irqrestore(&ndp->lock, flags);
1226 netdev_dbg(ndp->ndev.dev,
1227 "NCSI: Channel %u added to queue (link %s)\n",
1229 ncm->data[2] & 0x1 ? "up" : "down");
1232 spin_unlock_irqrestore(&nc->lock, cflags);
1234 if (with_link && !np->multi_channel)
1237 if (with_link && !ndp->multi_package)
1241 if (list_empty(&ndp->channel_queue) && found) {
1242 netdev_info(ndp->ndev.dev,
1243 "NCSI: No channel with link found, configuring channel %u\n",
1245 spin_lock_irqsave(&ndp->lock, flags);
1246 list_add_tail_rcu(&found->link, &ndp->channel_queue);
1247 spin_unlock_irqrestore(&ndp->lock, flags);
1248 } else if (!found) {
1249 netdev_warn(ndp->ndev.dev,
1250 "NCSI: No channel found to configure!\n");
1251 ncsi_report_link(ndp, true);
1255 return ncsi_process_next_channel(ndp);
1258 static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp)
1260 struct ncsi_package *np;
1261 struct ncsi_channel *nc;
1263 bool has_channel = false;
1265 /* The hardware arbitration is disabled if any one channel
1266 * doesn't support explicitly.
1268 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1269 NCSI_FOR_EACH_CHANNEL(np, nc) {
1272 cap = nc->caps[NCSI_CAP_GENERIC].cap;
1273 if (!(cap & NCSI_CAP_GENERIC_HWA) ||
1274 (cap & NCSI_CAP_GENERIC_HWA_MASK) !=
1275 NCSI_CAP_GENERIC_HWA_SUPPORT) {
1276 ndp->flags &= ~NCSI_DEV_HWA;
1283 ndp->flags |= NCSI_DEV_HWA;
1287 ndp->flags &= ~NCSI_DEV_HWA;
1291 static void ncsi_probe_channel(struct ncsi_dev_priv *ndp)
1293 struct ncsi_dev *nd = &ndp->ndev;
1294 struct ncsi_package *np;
1295 struct ncsi_channel *nc;
1296 struct ncsi_cmd_arg nca;
1297 unsigned char index;
1301 nca.req_flags = NCSI_REQ_FLAG_EVENT_DRIVEN;
1302 switch (nd->state) {
1303 case ncsi_dev_state_probe:
1304 nd->state = ncsi_dev_state_probe_deselect;
1306 case ncsi_dev_state_probe_deselect:
1307 ndp->pending_req_num = 8;
1309 /* Deselect all possible packages */
1310 nca.type = NCSI_PKT_CMD_DP;
1311 nca.channel = NCSI_RESERVED_CHANNEL;
1312 for (index = 0; index < 8; index++) {
1313 nca.package = index;
1314 ret = ncsi_xmit_cmd(&nca);
1319 nd->state = ncsi_dev_state_probe_package;
1321 case ncsi_dev_state_probe_package:
1322 ndp->pending_req_num = 1;
1324 nca.type = NCSI_PKT_CMD_SP;
1326 nca.package = ndp->package_probe_id;
1327 nca.channel = NCSI_RESERVED_CHANNEL;
1328 ret = ncsi_xmit_cmd(&nca);
1331 nd->state = ncsi_dev_state_probe_channel;
1333 case ncsi_dev_state_probe_channel:
1334 ndp->active_package = ncsi_find_package(ndp,
1335 ndp->package_probe_id);
1336 if (!ndp->active_package) {
1338 nd->state = ncsi_dev_state_probe_dp;
1339 schedule_work(&ndp->work);
1342 nd->state = ncsi_dev_state_probe_cis;
1343 if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC) &&
1344 ndp->mlx_multi_host)
1345 nd->state = ncsi_dev_state_probe_mlx_gma;
1347 schedule_work(&ndp->work);
1349 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
1350 case ncsi_dev_state_probe_mlx_gma:
1351 ndp->pending_req_num = 1;
1353 nca.type = NCSI_PKT_CMD_OEM;
1354 nca.package = ndp->active_package->id;
1356 ret = ncsi_oem_gma_handler_mlx(&nca);
1360 nd->state = ncsi_dev_state_probe_mlx_smaf;
1362 case ncsi_dev_state_probe_mlx_smaf:
1363 ndp->pending_req_num = 1;
1365 nca.type = NCSI_PKT_CMD_OEM;
1366 nca.package = ndp->active_package->id;
1368 ret = ncsi_oem_smaf_mlx(&nca);
1372 nd->state = ncsi_dev_state_probe_cis;
1374 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
1375 case ncsi_dev_state_probe_cis:
1376 ndp->pending_req_num = NCSI_RESERVED_CHANNEL;
1378 /* Clear initial state */
1379 nca.type = NCSI_PKT_CMD_CIS;
1380 nca.package = ndp->active_package->id;
1381 for (index = 0; index < NCSI_RESERVED_CHANNEL; index++) {
1382 nca.channel = index;
1383 ret = ncsi_xmit_cmd(&nca);
1388 nd->state = ncsi_dev_state_probe_gvi;
1390 case ncsi_dev_state_probe_gvi:
1391 case ncsi_dev_state_probe_gc:
1392 case ncsi_dev_state_probe_gls:
1393 np = ndp->active_package;
1394 ndp->pending_req_num = np->channel_num;
1396 /* Retrieve version, capability or link status */
1397 if (nd->state == ncsi_dev_state_probe_gvi)
1398 nca.type = NCSI_PKT_CMD_GVI;
1399 else if (nd->state == ncsi_dev_state_probe_gc)
1400 nca.type = NCSI_PKT_CMD_GC;
1402 nca.type = NCSI_PKT_CMD_GLS;
1404 nca.package = np->id;
1405 NCSI_FOR_EACH_CHANNEL(np, nc) {
1406 nca.channel = nc->id;
1407 ret = ncsi_xmit_cmd(&nca);
1412 if (nd->state == ncsi_dev_state_probe_gvi)
1413 nd->state = ncsi_dev_state_probe_gc;
1414 else if (nd->state == ncsi_dev_state_probe_gc)
1415 nd->state = ncsi_dev_state_probe_gls;
1417 nd->state = ncsi_dev_state_probe_dp;
1419 case ncsi_dev_state_probe_dp:
1420 ndp->pending_req_num = 1;
1422 /* Deselect the current package */
1423 nca.type = NCSI_PKT_CMD_DP;
1424 nca.package = ndp->package_probe_id;
1425 nca.channel = NCSI_RESERVED_CHANNEL;
1426 ret = ncsi_xmit_cmd(&nca);
1430 /* Probe next package */
1431 ndp->package_probe_id++;
1432 if (ndp->package_probe_id >= 8) {
1433 /* Probe finished */
1434 ndp->flags |= NCSI_DEV_PROBED;
1437 nd->state = ncsi_dev_state_probe_package;
1438 ndp->active_package = NULL;
1441 netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
1445 if (ndp->flags & NCSI_DEV_PROBED) {
1446 /* Check if all packages have HWA support */
1447 ncsi_check_hwa(ndp);
1448 ncsi_choose_active_channel(ndp);
1453 netdev_err(ndp->ndev.dev,
1454 "NCSI: Failed to transmit cmd 0x%x during probe\n",
1456 ncsi_report_link(ndp, true);
1459 static void ncsi_dev_work(struct work_struct *work)
1461 struct ncsi_dev_priv *ndp = container_of(work,
1462 struct ncsi_dev_priv, work);
1463 struct ncsi_dev *nd = &ndp->ndev;
1465 switch (nd->state & ncsi_dev_state_major) {
1466 case ncsi_dev_state_probe:
1467 ncsi_probe_channel(ndp);
1469 case ncsi_dev_state_suspend:
1470 ncsi_suspend_channel(ndp);
1472 case ncsi_dev_state_config:
1473 ncsi_configure_channel(ndp);
1476 netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
1481 int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
1483 struct ncsi_channel *nc;
1485 unsigned long flags;
1487 spin_lock_irqsave(&ndp->lock, flags);
1488 nc = list_first_or_null_rcu(&ndp->channel_queue,
1489 struct ncsi_channel, link);
1491 spin_unlock_irqrestore(&ndp->lock, flags);
1495 list_del_init(&nc->link);
1496 spin_unlock_irqrestore(&ndp->lock, flags);
1498 spin_lock_irqsave(&nc->lock, flags);
1499 old_state = nc->state;
1500 nc->state = NCSI_CHANNEL_INVISIBLE;
1501 spin_unlock_irqrestore(&nc->lock, flags);
1503 ndp->active_channel = nc;
1504 ndp->active_package = nc->package;
1506 switch (old_state) {
1507 case NCSI_CHANNEL_INACTIVE:
1508 ndp->ndev.state = ncsi_dev_state_config;
1509 netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
1511 ncsi_configure_channel(ndp);
1513 case NCSI_CHANNEL_ACTIVE:
1514 ndp->ndev.state = ncsi_dev_state_suspend;
1515 netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
1517 ncsi_suspend_channel(ndp);
1520 netdev_err(ndp->ndev.dev, "Invalid state 0x%x on %d:%d\n",
1521 old_state, nc->package->id, nc->id);
1522 ncsi_report_link(ndp, false);
1529 ndp->active_channel = NULL;
1530 ndp->active_package = NULL;
1531 if (ndp->flags & NCSI_DEV_RESHUFFLE) {
1532 ndp->flags &= ~NCSI_DEV_RESHUFFLE;
1533 return ncsi_choose_active_channel(ndp);
1536 ncsi_report_link(ndp, false);
1540 static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
1542 struct ncsi_dev *nd = &ndp->ndev;
1543 struct ncsi_channel *nc;
1544 struct ncsi_package *np;
1545 unsigned long flags;
1548 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1549 NCSI_FOR_EACH_CHANNEL(np, nc) {
1550 spin_lock_irqsave(&nc->lock, flags);
1552 /* Channels may be busy, mark dirty instead of
1554 * a) not ACTIVE (configured)
1555 * b) in the channel_queue (to be configured)
1556 * c) it's ndev is in the config state
1558 if (nc->state != NCSI_CHANNEL_ACTIVE) {
1559 if ((ndp->ndev.state & 0xff00) ==
1560 ncsi_dev_state_config ||
1561 !list_empty(&nc->link)) {
1563 "NCSI: channel %p marked dirty\n",
1565 nc->reconfigure_needed = true;
1567 spin_unlock_irqrestore(&nc->lock, flags);
1571 spin_unlock_irqrestore(&nc->lock, flags);
1573 ncsi_stop_channel_monitor(nc);
1574 spin_lock_irqsave(&nc->lock, flags);
1575 nc->state = NCSI_CHANNEL_INACTIVE;
1576 spin_unlock_irqrestore(&nc->lock, flags);
1578 spin_lock_irqsave(&ndp->lock, flags);
1579 list_add_tail_rcu(&nc->link, &ndp->channel_queue);
1580 spin_unlock_irqrestore(&ndp->lock, flags);
1582 netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
1590 int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1592 struct ncsi_dev_priv *ndp;
1593 unsigned int n_vids = 0;
1594 struct vlan_vid *vlan;
1595 struct ncsi_dev *nd;
1601 nd = ncsi_find_dev(dev);
1603 netdev_warn(dev, "NCSI: No net_device?\n");
1607 ndp = TO_NCSI_DEV_PRIV(nd);
1609 /* Add the VLAN id to our internal list */
1610 list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
1612 if (vlan->vid == vid) {
1613 netdev_dbg(dev, "NCSI: vid %u already registered\n",
1618 if (n_vids >= NCSI_MAX_VLAN_VIDS) {
1620 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1621 vid, NCSI_MAX_VLAN_VIDS);
1625 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1629 vlan->proto = proto;
1631 list_add_rcu(&vlan->list, &ndp->vlan_vids);
1633 netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
1635 found = ncsi_kick_channels(ndp) != 0;
1637 return found ? ncsi_process_next_channel(ndp) : 0;
1639 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid);
1641 int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1643 struct vlan_vid *vlan, *tmp;
1644 struct ncsi_dev_priv *ndp;
1645 struct ncsi_dev *nd;
1651 nd = ncsi_find_dev(dev);
1653 netdev_warn(dev, "NCSI: no net_device?\n");
1657 ndp = TO_NCSI_DEV_PRIV(nd);
1659 /* Remove the VLAN id from our internal list */
1660 list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
1661 if (vlan->vid == vid) {
1662 netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
1663 list_del_rcu(&vlan->list);
1669 netdev_err(dev, "NCSI: vid %u wasn't registered!\n", vid);
1673 found = ncsi_kick_channels(ndp) != 0;
1675 return found ? ncsi_process_next_channel(ndp) : 0;
1677 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid);
1679 struct ncsi_dev *ncsi_register_dev(struct net_device *dev,
1680 void (*handler)(struct ncsi_dev *ndev))
1682 struct ncsi_dev_priv *ndp;
1683 struct ncsi_dev *nd;
1684 struct platform_device *pdev;
1685 struct device_node *np;
1686 unsigned long flags;
1689 /* Check if the device has been registered or not */
1690 nd = ncsi_find_dev(dev);
1694 /* Create NCSI device */
1695 ndp = kzalloc(sizeof(*ndp), GFP_ATOMIC);
1700 nd->state = ncsi_dev_state_registered;
1702 nd->handler = handler;
1703 ndp->pending_req_num = 0;
1704 INIT_LIST_HEAD(&ndp->channel_queue);
1705 INIT_LIST_HEAD(&ndp->vlan_vids);
1706 INIT_WORK(&ndp->work, ncsi_dev_work);
1707 ndp->package_whitelist = UINT_MAX;
1709 /* Initialize private NCSI device */
1710 spin_lock_init(&ndp->lock);
1711 INIT_LIST_HEAD(&ndp->packages);
1712 ndp->request_id = NCSI_REQ_START_IDX;
1713 for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
1714 ndp->requests[i].id = i;
1715 ndp->requests[i].ndp = ndp;
1716 timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
1719 spin_lock_irqsave(&ncsi_dev_lock, flags);
1720 list_add_tail_rcu(&ndp->node, &ncsi_dev_list);
1721 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1723 /* Register NCSI packet Rx handler */
1724 ndp->ptype.type = cpu_to_be16(ETH_P_NCSI);
1725 ndp->ptype.func = ncsi_rcv_rsp;
1726 ndp->ptype.dev = dev;
1727 dev_add_pack(&ndp->ptype);
1729 /* Set up generic netlink interface */
1730 ncsi_init_netlink(dev);
1732 pdev = to_platform_device(dev->dev.parent);
1734 np = pdev->dev.of_node;
1735 if (np && of_get_property(np, "mlx,multi-host", NULL))
1736 ndp->mlx_multi_host = true;
1741 EXPORT_SYMBOL_GPL(ncsi_register_dev);
1743 int ncsi_start_dev(struct ncsi_dev *nd)
1745 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1747 if (nd->state != ncsi_dev_state_registered &&
1748 nd->state != ncsi_dev_state_functional)
1751 if (!(ndp->flags & NCSI_DEV_PROBED)) {
1752 ndp->package_probe_id = 0;
1753 nd->state = ncsi_dev_state_probe;
1754 schedule_work(&ndp->work);
1758 return ncsi_reset_dev(nd);
1760 EXPORT_SYMBOL_GPL(ncsi_start_dev);
1762 void ncsi_stop_dev(struct ncsi_dev *nd)
1764 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1765 struct ncsi_package *np;
1766 struct ncsi_channel *nc;
1769 unsigned long flags;
1771 /* Stop the channel monitor on any active channels. Don't reset the
1772 * channel state so we know which were active when ncsi_start_dev()
1775 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1776 NCSI_FOR_EACH_CHANNEL(np, nc) {
1777 ncsi_stop_channel_monitor(nc);
1779 spin_lock_irqsave(&nc->lock, flags);
1780 chained = !list_empty(&nc->link);
1781 old_state = nc->state;
1782 spin_unlock_irqrestore(&nc->lock, flags);
1784 WARN_ON_ONCE(chained ||
1785 old_state == NCSI_CHANNEL_INVISIBLE);
1789 netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
1790 ncsi_report_link(ndp, true);
1792 EXPORT_SYMBOL_GPL(ncsi_stop_dev);
1794 int ncsi_reset_dev(struct ncsi_dev *nd)
1796 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1797 struct ncsi_channel *nc, *active, *tmp;
1798 struct ncsi_package *np;
1799 unsigned long flags;
1801 spin_lock_irqsave(&ndp->lock, flags);
1803 if (!(ndp->flags & NCSI_DEV_RESET)) {
1804 /* Haven't been called yet, check states */
1805 switch (nd->state & ncsi_dev_state_major) {
1806 case ncsi_dev_state_registered:
1807 case ncsi_dev_state_probe:
1808 /* Not even probed yet - do nothing */
1809 spin_unlock_irqrestore(&ndp->lock, flags);
1811 case ncsi_dev_state_suspend:
1812 case ncsi_dev_state_config:
1813 /* Wait for the channel to finish its suspend/config
1814 * operation; once it finishes it will check for
1815 * NCSI_DEV_RESET and reset the state.
1817 ndp->flags |= NCSI_DEV_RESET;
1818 spin_unlock_irqrestore(&ndp->lock, flags);
1822 switch (nd->state) {
1823 case ncsi_dev_state_suspend_done:
1824 case ncsi_dev_state_config_done:
1825 case ncsi_dev_state_functional:
1829 /* Current reset operation happening */
1830 spin_unlock_irqrestore(&ndp->lock, flags);
1835 if (!list_empty(&ndp->channel_queue)) {
1836 /* Clear any channel queue we may have interrupted */
1837 list_for_each_entry_safe(nc, tmp, &ndp->channel_queue, link)
1838 list_del_init(&nc->link);
1840 spin_unlock_irqrestore(&ndp->lock, flags);
1843 NCSI_FOR_EACH_PACKAGE(ndp, np) {
1844 NCSI_FOR_EACH_CHANNEL(np, nc) {
1845 spin_lock_irqsave(&nc->lock, flags);
1847 if (nc->state == NCSI_CHANNEL_ACTIVE) {
1849 nc->state = NCSI_CHANNEL_INVISIBLE;
1850 spin_unlock_irqrestore(&nc->lock, flags);
1851 ncsi_stop_channel_monitor(nc);
1855 spin_unlock_irqrestore(&nc->lock, flags);
1863 spin_lock_irqsave(&ndp->lock, flags);
1864 ndp->flags &= ~NCSI_DEV_RESET;
1865 spin_unlock_irqrestore(&ndp->lock, flags);
1866 return ncsi_choose_active_channel(ndp);
1869 spin_lock_irqsave(&ndp->lock, flags);
1870 ndp->flags |= NCSI_DEV_RESET;
1871 ndp->active_channel = active;
1872 ndp->active_package = active->package;
1873 spin_unlock_irqrestore(&ndp->lock, flags);
1875 nd->state = ncsi_dev_state_suspend;
1876 schedule_work(&ndp->work);
1880 void ncsi_unregister_dev(struct ncsi_dev *nd)
1882 struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
1883 struct ncsi_package *np, *tmp;
1884 unsigned long flags;
1886 dev_remove_pack(&ndp->ptype);
1888 list_for_each_entry_safe(np, tmp, &ndp->packages, node)
1889 ncsi_remove_package(np);
1891 spin_lock_irqsave(&ncsi_dev_lock, flags);
1892 list_del_rcu(&ndp->node);
1893 spin_unlock_irqrestore(&ncsi_dev_lock, flags);
1895 ncsi_unregister_netlink(nd->dev);
1899 EXPORT_SYMBOL_GPL(ncsi_unregister_dev);