1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/etherdevice.h>
5 #include <linux/lockdep.h>
7 #include <linux/skbuff.h>
8 #include <linux/vmalloc.h>
9 #include <net/devlink.h>
10 #include <net/dst_metadata.h>
13 #include "../nfpcore/nfp_cpp.h"
14 #include "../nfpcore/nfp_nffw.h"
15 #include "../nfpcore/nfp_nsp.h"
16 #include "../nfp_app.h"
17 #include "../nfp_main.h"
18 #include "../nfp_net.h"
19 #include "../nfp_net_repr.h"
20 #include "../nfp_port.h"
23 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
25 #define NFP_MIN_INT_PORT_ID 1
26 #define NFP_MAX_INT_PORT_ID 256
28 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
33 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
35 return DEVLINK_ESWITCH_MODE_SWITCHDEV;
39 nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
40 struct net_device *netdev)
42 struct net_device *entry;
46 idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
47 if (entry == netdev) {
57 nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
59 struct nfp_flower_priv *priv = app->priv;
62 id = nfp_flower_lookup_internal_port_id(priv, netdev);
66 idr_preload(GFP_ATOMIC);
67 spin_lock_bh(&priv->internal_ports.lock);
68 id = idr_alloc(&priv->internal_ports.port_ids, netdev,
69 NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
70 spin_unlock_bh(&priv->internal_ports.lock);
76 u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
77 struct net_device *netdev)
81 if (nfp_netdev_is_nfp_repr(netdev)) {
82 return nfp_repr_get_port_id(netdev);
83 } else if (nfp_flower_internal_port_can_offload(app, netdev)) {
84 ext_port = nfp_flower_get_internal_port_id(app, netdev);
88 return nfp_flower_internal_port_get_port_id(ext_port);
95 nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
97 struct nfp_flower_priv *priv = app->priv;
100 id = nfp_flower_lookup_internal_port_id(priv, netdev);
104 spin_lock_bh(&priv->internal_ports.lock);
105 idr_remove(&priv->internal_ports.port_ids, id);
106 spin_unlock_bh(&priv->internal_ports.lock);
110 nfp_flower_internal_port_event_handler(struct nfp_app *app,
111 struct net_device *netdev,
114 if (event == NETDEV_UNREGISTER &&
115 nfp_flower_internal_port_can_offload(app, netdev))
116 nfp_flower_free_internal_port_id(app, netdev);
121 static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
123 spin_lock_init(&priv->internal_ports.lock);
124 idr_init(&priv->internal_ports.port_ids);
127 static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
129 idr_destroy(&priv->internal_ports.port_ids);
132 static struct nfp_flower_non_repr_priv *
133 nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
135 struct nfp_flower_priv *priv = app->priv;
136 struct nfp_flower_non_repr_priv *entry;
140 list_for_each_entry(entry, &priv->non_repr_priv, list)
141 if (entry->netdev == netdev)
148 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv)
150 non_repr_priv->ref_count++;
153 struct nfp_flower_non_repr_priv *
154 nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev)
156 struct nfp_flower_priv *priv = app->priv;
157 struct nfp_flower_non_repr_priv *entry;
159 entry = nfp_flower_non_repr_priv_lookup(app, netdev);
163 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
167 entry->netdev = netdev;
168 list_add(&entry->list, &priv->non_repr_priv);
171 __nfp_flower_non_repr_priv_get(entry);
176 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv)
178 if (--non_repr_priv->ref_count)
181 list_del(&non_repr_priv->list);
182 kfree(non_repr_priv);
186 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev)
188 struct nfp_flower_non_repr_priv *entry;
190 entry = nfp_flower_non_repr_priv_lookup(app, netdev);
194 __nfp_flower_non_repr_priv_put(entry);
197 static enum nfp_repr_type
198 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
200 switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
201 case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
202 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
204 return NFP_REPR_TYPE_PHYS_PORT;
206 case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
207 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
208 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
209 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
210 return NFP_REPR_TYPE_PF;
212 return NFP_REPR_TYPE_VF;
215 return __NFP_REPR_TYPE_MAX;
218 static struct net_device *
219 nfp_flower_repr_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
221 enum nfp_repr_type repr_type;
222 struct nfp_reprs *reprs;
225 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
226 if (repr_type > NFP_REPR_TYPE_MAX)
229 reprs = rcu_dereference(app->reprs[repr_type]);
233 if (port >= reprs->num_reprs)
236 return rcu_dereference(reprs->reprs[port]);
240 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
243 struct nfp_reprs *reprs;
244 int i, err, count = 0;
246 reprs = rcu_dereference_protected(app->reprs[type],
247 lockdep_is_held(&app->pf->lock));
251 for (i = 0; i < reprs->num_reprs; i++) {
252 struct net_device *netdev;
254 netdev = nfp_repr_get_locked(app, reprs, i);
256 struct nfp_repr *repr = netdev_priv(netdev);
258 err = nfp_flower_cmsg_portreify(repr, exists);
269 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
271 struct nfp_flower_priv *priv = app->priv;
276 lockdep_assert_held(&app->pf->lock);
277 if (!wait_event_timeout(priv->reify_wait_queue,
278 atomic_read(replies) >= tot_repl,
279 NFP_FL_REPLY_TIMEOUT)) {
280 nfp_warn(app->cpp, "Not all reprs responded to reify\n");
288 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
292 err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false);
296 netif_tx_wake_all_queues(repr->netdev);
302 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
304 netif_tx_disable(repr->netdev);
306 return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
310 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
312 struct nfp_repr *repr = netdev_priv(netdev);
314 kfree(repr->app_priv);
318 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
320 struct nfp_repr *repr = netdev_priv(netdev);
321 struct nfp_flower_priv *priv = app->priv;
322 atomic_t *replies = &priv->reify_replies;
325 atomic_set(replies, 0);
326 err = nfp_flower_cmsg_portreify(repr, false);
328 nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
332 nfp_flower_wait_repr_reify(app, replies, 1);
335 static void nfp_flower_sriov_disable(struct nfp_app *app)
337 struct nfp_flower_priv *priv = app->priv;
342 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
346 nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
347 enum nfp_flower_cmsg_port_vnic_type vnic_type,
348 enum nfp_repr_type repr_type, unsigned int cnt)
350 u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
351 struct nfp_flower_priv *priv = app->priv;
352 atomic_t *replies = &priv->reify_replies;
353 struct nfp_flower_repr_priv *repr_priv;
354 enum nfp_port_type port_type;
355 struct nfp_repr *nfp_repr;
356 struct nfp_reprs *reprs;
357 int i, err, reify_cnt;
360 port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
363 reprs = nfp_reprs_alloc(cnt);
367 for (i = 0; i < cnt; i++) {
368 struct net_device *repr;
369 struct nfp_port *port;
372 repr = nfp_repr_alloc(app);
375 goto err_reprs_clean;
378 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
381 goto err_reprs_clean;
384 nfp_repr = netdev_priv(repr);
385 nfp_repr->app_priv = repr_priv;
386 repr_priv->nfp_repr = nfp_repr;
388 /* For now we only support 1 PF */
389 WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
391 port = nfp_port_alloc(app, port_type, repr);
395 goto err_reprs_clean;
397 if (repr_type == NFP_REPR_TYPE_PF) {
399 port->vnic = priv->nn->dp.ctrl_bar;
404 app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
407 eth_hw_addr_random(repr);
409 port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
411 err = nfp_repr_init(app, repr,
412 port_id, port, priv->nn->dp.netdev);
416 goto err_reprs_clean;
419 RCU_INIT_POINTER(reprs->reprs[i], repr);
420 nfp_info(app->cpp, "%s%d Representor(%s) created\n",
421 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
425 nfp_app_reprs_set(app, repr_type, reprs);
427 atomic_set(replies, 0);
428 reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
431 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
432 goto err_reprs_remove;
435 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
437 goto err_reprs_remove;
441 reprs = nfp_app_reprs_set(app, repr_type, NULL);
443 nfp_reprs_clean_and_free(app, reprs);
447 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
449 struct nfp_flower_priv *priv = app->priv;
454 return nfp_flower_spawn_vnic_reprs(app,
455 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
456 NFP_REPR_TYPE_VF, num_vfs);
460 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
462 struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
463 atomic_t *replies = &priv->reify_replies;
464 struct nfp_flower_repr_priv *repr_priv;
465 struct nfp_repr *nfp_repr;
466 struct sk_buff *ctrl_skb;
467 struct nfp_reprs *reprs;
471 ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
475 reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
478 goto err_free_ctrl_skb;
481 for (i = 0; i < eth_tbl->count; i++) {
482 unsigned int phys_port = eth_tbl->ports[i].index;
483 struct net_device *repr;
484 struct nfp_port *port;
487 repr = nfp_repr_alloc(app);
490 goto err_reprs_clean;
493 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
496 goto err_reprs_clean;
499 nfp_repr = netdev_priv(repr);
500 nfp_repr->app_priv = repr_priv;
501 repr_priv->nfp_repr = nfp_repr;
503 port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
507 goto err_reprs_clean;
509 err = nfp_port_init_phy_port(app->pf, app, port, i);
513 goto err_reprs_clean;
516 SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
517 nfp_net_get_mac_addr(app->pf, repr, port);
519 cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
520 err = nfp_repr_init(app, repr,
521 cmsg_port_id, port, priv->nn->dp.netdev);
525 goto err_reprs_clean;
528 nfp_flower_cmsg_mac_repr_add(ctrl_skb, i,
529 eth_tbl->ports[i].nbi,
530 eth_tbl->ports[i].base,
533 RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
534 nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
535 phys_port, repr->name);
538 nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
540 /* The REIFY/MAC_REPR control messages should be sent after the MAC
541 * representors are registered using nfp_app_reprs_set(). This is
542 * because the firmware may respond with control messages for the
543 * MAC representors, f.e. to provide the driver with information
544 * about their state, and without registration the driver will drop
547 atomic_set(replies, 0);
548 reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
551 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
552 goto err_reprs_remove;
555 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
557 goto err_reprs_remove;
559 nfp_ctrl_tx(app->ctrl, ctrl_skb);
563 reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
565 nfp_reprs_clean_and_free(app, reprs);
571 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
575 nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
576 goto err_invalid_port;
579 eth_hw_addr_random(nn->dp.netdev);
580 netif_keep_dst(nn->dp.netdev);
581 nn->vnic_no_name = true;
586 nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
587 return PTR_ERR_OR_ZERO(nn->port);
590 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
592 struct nfp_flower_priv *priv = app->priv;
594 if (app->pf->num_vfs)
595 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
596 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
597 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
602 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
604 struct nfp_flower_priv *priv = app->priv;
609 err = nfp_flower_spawn_phy_reprs(app, app->priv);
613 err = nfp_flower_spawn_vnic_reprs(app,
614 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
615 NFP_REPR_TYPE_PF, 1);
617 goto err_destroy_reprs_phy;
619 if (app->pf->num_vfs) {
620 err = nfp_flower_spawn_vnic_reprs(app,
621 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
625 goto err_destroy_reprs_pf;
630 err_destroy_reprs_pf:
631 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
632 err_destroy_reprs_phy:
633 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
639 static int nfp_flower_init(struct nfp_app *app)
641 u64 version, features, ctx_count, num_mems;
642 const struct nfp_pf *pf = app->pf;
643 struct nfp_flower_priv *app_priv;
647 nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
651 if (!pf->mac_stats_bar) {
652 nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
656 if (!pf->vf_cfg_bar) {
657 nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
661 version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
663 nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
667 num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT",
671 "FlowerNIC: unsupported host context memory: %d\n",
677 if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) {
679 "FlowerNIC: invalid host context memory: %llu\n",
684 ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
688 "FlowerNIC: unsupported host context count: %d\n",
694 /* We need to ensure hardware has enough flower capabilities. */
695 if (version != NFP_FLOWER_ALLOWED_VER) {
696 nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
700 app_priv = vzalloc(sizeof(struct nfp_flower_priv));
704 app_priv->total_mem_units = num_mems;
705 app_priv->active_mem_unit = 0;
706 app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
707 app->priv = app_priv;
709 skb_queue_head_init(&app_priv->cmsg_skbs_high);
710 skb_queue_head_init(&app_priv->cmsg_skbs_low);
711 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
712 init_waitqueue_head(&app_priv->reify_wait_queue);
714 init_waitqueue_head(&app_priv->mtu_conf.wait_q);
715 spin_lock_init(&app_priv->mtu_conf.lock);
717 err = nfp_flower_metadata_init(app, ctx_count, num_mems);
719 goto err_free_app_priv;
721 /* Extract the extra features supported by the firmware. */
722 features = nfp_rtsym_read_le(app->pf->rtbl,
723 "_abi_flower_extra_features", &err);
725 app_priv->flower_ext_feats = 0;
727 app_priv->flower_ext_feats = features;
729 /* Tell the firmware that the driver supports lag. */
730 err = nfp_rtsym_write_le(app->pf->rtbl,
731 "_abi_flower_balance_sync_enable", 1);
733 app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG;
734 nfp_flower_lag_init(&app_priv->nfp_lag);
735 } else if (err == -ENOENT) {
736 nfp_warn(app->cpp, "LAG not supported by FW.\n");
738 goto err_cleanup_metadata;
741 if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
742 /* Tell the firmware that the driver supports flow merging. */
743 err = nfp_rtsym_write_le(app->pf->rtbl,
744 "_abi_flower_merge_hint_enable", 1);
746 app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE;
747 nfp_flower_internal_port_init(app_priv);
748 } else if (err == -ENOENT) {
749 nfp_warn(app->cpp, "Flow merge not supported by FW.\n");
754 nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
757 INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
758 INIT_LIST_HEAD(&app_priv->non_repr_priv);
763 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
764 nfp_flower_lag_cleanup(&app_priv->nfp_lag);
765 err_cleanup_metadata:
766 nfp_flower_metadata_cleanup(app);
772 static void nfp_flower_clean(struct nfp_app *app)
774 struct nfp_flower_priv *app_priv = app->priv;
776 skb_queue_purge(&app_priv->cmsg_skbs_high);
777 skb_queue_purge(&app_priv->cmsg_skbs_low);
778 flush_work(&app_priv->cmsg_work);
780 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
781 nfp_flower_lag_cleanup(&app_priv->nfp_lag);
783 if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE)
784 nfp_flower_internal_port_cleanup(app_priv);
786 nfp_flower_metadata_cleanup(app);
791 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
795 spin_lock_bh(&app_priv->mtu_conf.lock);
796 ret = app_priv->mtu_conf.ack;
797 spin_unlock_bh(&app_priv->mtu_conf.lock);
803 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
806 struct nfp_flower_priv *app_priv = app->priv;
807 struct nfp_repr *repr = netdev_priv(netdev);
810 /* Only need to config FW for physical port MTU change. */
811 if (repr->port->type != NFP_PORT_PHYS_PORT)
814 if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
815 nfp_err(app->cpp, "Physical port MTU setting not supported\n");
819 spin_lock_bh(&app_priv->mtu_conf.lock);
820 app_priv->mtu_conf.ack = false;
821 app_priv->mtu_conf.requested_val = new_mtu;
822 app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
823 spin_unlock_bh(&app_priv->mtu_conf.lock);
825 err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
828 spin_lock_bh(&app_priv->mtu_conf.lock);
829 app_priv->mtu_conf.requested_val = 0;
830 spin_unlock_bh(&app_priv->mtu_conf.lock);
834 /* Wait for fw to ack the change. */
835 if (!wait_event_timeout(app_priv->mtu_conf.wait_q,
836 nfp_flower_check_ack(app_priv),
837 NFP_FL_REPLY_TIMEOUT)) {
838 spin_lock_bh(&app_priv->mtu_conf.lock);
839 app_priv->mtu_conf.requested_val = 0;
840 spin_unlock_bh(&app_priv->mtu_conf.lock);
841 nfp_warn(app->cpp, "MTU change not verified with fw\n");
848 static int nfp_flower_start(struct nfp_app *app)
850 struct nfp_flower_priv *app_priv = app->priv;
853 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
854 err = nfp_flower_lag_reset(&app_priv->nfp_lag);
859 return nfp_tunnel_config_start(app);
862 static void nfp_flower_stop(struct nfp_app *app)
864 nfp_tunnel_config_stop(app);
868 nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
869 unsigned long event, void *ptr)
871 struct nfp_flower_priv *app_priv = app->priv;
874 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
875 ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
876 if (ret & NOTIFY_STOP_MASK)
880 ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
881 if (ret & NOTIFY_STOP_MASK)
884 ret = nfp_flower_internal_port_event_handler(app, netdev, event);
885 if (ret & NOTIFY_STOP_MASK)
888 return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
891 const struct nfp_app_type app_flower = {
892 .id = NFP_APP_FLOWER_NIC,
895 .ctrl_cap_mask = ~0U,
896 .ctrl_has_meta = true,
898 .extra_cap = nfp_flower_extra_cap,
900 .init = nfp_flower_init,
901 .clean = nfp_flower_clean,
903 .repr_change_mtu = nfp_flower_repr_change_mtu,
905 .vnic_alloc = nfp_flower_vnic_alloc,
906 .vnic_init = nfp_flower_vnic_init,
907 .vnic_clean = nfp_flower_vnic_clean,
909 .repr_preclean = nfp_flower_repr_netdev_preclean,
910 .repr_clean = nfp_flower_repr_netdev_clean,
912 .repr_open = nfp_flower_repr_netdev_open,
913 .repr_stop = nfp_flower_repr_netdev_stop,
915 .start = nfp_flower_start,
916 .stop = nfp_flower_stop,
918 .netdev_event = nfp_flower_netdev_event,
920 .ctrl_msg_rx = nfp_flower_cmsg_rx,
922 .sriov_enable = nfp_flower_sriov_enable,
923 .sriov_disable = nfp_flower_sriov_disable,
925 .eswitch_mode_get = eswitch_mode_get,
926 .dev_get = nfp_flower_repr_get,
928 .setup_tc = nfp_flower_setup_tc,