1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
4 #include <linux/bitfield.h>
5 #include <linux/netdevice.h>
6 #include <linux/skbuff.h>
7 #include <linux/workqueue.h>
8 #include <net/dst_metadata.h>
11 #include "../nfp_net.h"
12 #include "../nfp_net_repr.h"
15 static struct nfp_flower_cmsg_hdr *
16 nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
18 return (struct nfp_flower_cmsg_hdr *)skb->data;
22 nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
23 enum nfp_flower_cmsg_type_port type, gfp_t flag)
25 struct nfp_flower_cmsg_hdr *ch;
28 size += NFP_FLOWER_CMSG_HLEN;
30 skb = nfp_app_ctrl_msg_alloc(app, size, flag);
34 ch = nfp_flower_cmsg_get_hdr(skb);
36 ch->version = NFP_FLOWER_CMSG_VER1;
44 nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports)
46 struct nfp_flower_cmsg_mac_repr *msg;
50 size = sizeof(*msg) + num_ports * sizeof(msg->ports[0]);
51 skb = nfp_flower_cmsg_alloc(app, size, NFP_FLOWER_CMSG_TYPE_MAC_REPR,
56 msg = nfp_flower_cmsg_get_data(skb);
57 memset(msg->reserved, 0, sizeof(msg->reserved));
58 msg->num_ports = num_ports;
64 nfp_flower_cmsg_mac_repr_add(struct sk_buff *skb, unsigned int idx,
65 unsigned int nbi, unsigned int nbi_port,
66 unsigned int phys_port)
68 struct nfp_flower_cmsg_mac_repr *msg;
70 msg = nfp_flower_cmsg_get_data(skb);
71 msg->ports[idx].idx = idx;
72 msg->ports[idx].info = nbi & NFP_FLOWER_CMSG_MAC_REPR_NBI;
73 msg->ports[idx].nbi_port = nbi_port;
74 msg->ports[idx].phys_port = phys_port;
77 int nfp_flower_cmsg_portmod(struct nfp_repr *repr, bool carrier_ok,
78 unsigned int mtu, bool mtu_only)
80 struct nfp_flower_cmsg_portmod *msg;
83 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
84 NFP_FLOWER_CMSG_TYPE_PORT_MOD, GFP_KERNEL);
88 msg = nfp_flower_cmsg_get_data(skb);
89 msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
91 msg->info = carrier_ok;
94 msg->info |= NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY;
96 msg->mtu = cpu_to_be16(mtu);
98 nfp_ctrl_tx(repr->app->ctrl, skb);
103 int nfp_flower_cmsg_portreify(struct nfp_repr *repr, bool exists)
105 struct nfp_flower_cmsg_portreify *msg;
108 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
109 NFP_FLOWER_CMSG_TYPE_PORT_REIFY,
114 msg = nfp_flower_cmsg_get_data(skb);
115 msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
117 msg->info = cpu_to_be16(exists);
119 nfp_ctrl_tx(repr->app->ctrl, skb);
125 nfp_flower_process_mtu_ack(struct nfp_app *app, struct sk_buff *skb)
127 struct nfp_flower_priv *app_priv = app->priv;
128 struct nfp_flower_cmsg_portmod *msg;
130 msg = nfp_flower_cmsg_get_data(skb);
132 if (!(msg->info & NFP_FLOWER_CMSG_PORTMOD_MTU_CHANGE_ONLY))
135 spin_lock_bh(&app_priv->mtu_conf.lock);
136 if (!app_priv->mtu_conf.requested_val ||
137 app_priv->mtu_conf.portnum != be32_to_cpu(msg->portnum) ||
138 be16_to_cpu(msg->mtu) != app_priv->mtu_conf.requested_val) {
139 /* Not an ack for requested MTU change. */
140 spin_unlock_bh(&app_priv->mtu_conf.lock);
144 app_priv->mtu_conf.ack = true;
145 app_priv->mtu_conf.requested_val = 0;
146 wake_up(&app_priv->mtu_conf.wait_q);
147 spin_unlock_bh(&app_priv->mtu_conf.lock);
153 nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
155 struct nfp_flower_cmsg_portmod *msg;
156 struct net_device *netdev;
159 msg = nfp_flower_cmsg_get_data(skb);
160 link = msg->info & NFP_FLOWER_CMSG_PORTMOD_INFO_LINK;
164 netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
167 nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
168 be32_to_cpu(msg->portnum));
174 u16 mtu = be16_to_cpu(msg->mtu);
176 netif_carrier_on(netdev);
178 /* An MTU of 0 from the firmware should be ignored */
180 dev_set_mtu(netdev, mtu);
182 netif_carrier_off(netdev);
188 nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
190 struct nfp_flower_priv *priv = app->priv;
191 struct nfp_flower_cmsg_portreify *msg;
194 msg = nfp_flower_cmsg_get_data(skb);
197 exists = !!nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
200 nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
201 be32_to_cpu(msg->portnum));
205 atomic_inc(&priv->reify_replies);
206 wake_up_interruptible(&priv->reify_wait_queue);
210 nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
212 struct nfp_flower_priv *app_priv = app->priv;
213 struct nfp_flower_cmsg_hdr *cmsg_hdr;
214 enum nfp_flower_cmsg_type_port type;
215 bool skb_stored = false;
217 cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
219 type = cmsg_hdr->type;
221 case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
222 nfp_flower_cmsg_portreify_rx(app, skb);
224 case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
225 nfp_flower_cmsg_portmod_rx(app, skb);
227 case NFP_FLOWER_CMSG_TYPE_NO_NEIGH:
228 nfp_tunnel_request_route(app, skb);
230 case NFP_FLOWER_CMSG_TYPE_ACTIVE_TUNS:
231 nfp_tunnel_keep_alive(app, skb);
233 case NFP_FLOWER_CMSG_TYPE_LAG_CONFIG:
234 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
235 skb_stored = nfp_flower_lag_unprocessed_msg(app, skb);
240 nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
246 dev_consume_skb_any(skb);
249 dev_kfree_skb_any(skb);
252 void nfp_flower_cmsg_process_rx(struct work_struct *work)
254 struct sk_buff_head cmsg_joined;
255 struct nfp_flower_priv *priv;
258 priv = container_of(work, struct nfp_flower_priv, cmsg_work);
259 skb_queue_head_init(&cmsg_joined);
261 spin_lock_bh(&priv->cmsg_skbs_high.lock);
262 skb_queue_splice_tail_init(&priv->cmsg_skbs_high, &cmsg_joined);
263 spin_unlock_bh(&priv->cmsg_skbs_high.lock);
265 spin_lock_bh(&priv->cmsg_skbs_low.lock);
266 skb_queue_splice_tail_init(&priv->cmsg_skbs_low, &cmsg_joined);
267 spin_unlock_bh(&priv->cmsg_skbs_low.lock);
269 while ((skb = __skb_dequeue(&cmsg_joined)))
270 nfp_flower_cmsg_process_one_rx(priv->app, skb);
274 nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
276 struct nfp_flower_priv *priv = app->priv;
277 struct sk_buff_head *skb_head;
279 if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY ||
280 type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
281 skb_head = &priv->cmsg_skbs_high;
283 skb_head = &priv->cmsg_skbs_low;
285 if (skb_queue_len(skb_head) >= NFP_FLOWER_WORKQ_MAX_SKBS) {
286 nfp_flower_cmsg_warn(app, "Dropping queued control messages\n");
287 dev_kfree_skb_any(skb);
291 skb_queue_tail(skb_head, skb);
292 schedule_work(&priv->cmsg_work);
295 void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
297 struct nfp_flower_cmsg_hdr *cmsg_hdr;
299 cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
301 if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
302 nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
304 dev_kfree_skb_any(skb);
308 if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_FLOW_STATS) {
309 /* We need to deal with stats updates from HW asap */
310 nfp_flower_rx_flow_stats(app, skb);
311 dev_consume_skb_any(skb);
312 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_MOD &&
313 nfp_flower_process_mtu_ack(app, skb)) {
314 /* Handle MTU acks outside wq to prevent RTNL conflict. */
315 dev_consume_skb_any(skb);
316 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
317 /* Acks from the NFP that the route is added - ignore. */
318 dev_consume_skb_any(skb);
320 nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);