1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2019 Solarflare Communications Inc.
5 * Copyright 2020-2022 Xilinx Inc.
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation, incorporated herein by reference.
12 #include "ef100_rep.h"
13 #include "ef100_netdev.h"
14 #include "ef100_nic.h"
16 #include "rx_common.h"
17 #include "tc_bindings.h"
19 #define EFX_EF100_REP_DRIVER "efx_ef100_rep"
21 #define EFX_REP_DEFAULT_PSEUDO_RING_SIZE 64
23 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight);
25 static int efx_ef100_rep_init_struct(struct efx_nic *efx, struct efx_rep *efv,
30 INIT_LIST_HEAD(&efv->list);
31 efv->dflt.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
32 INIT_LIST_HEAD(&efv->dflt.acts.list);
33 INIT_LIST_HEAD(&efv->rx_list);
34 spin_lock_init(&efv->rx_lock);
35 efv->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
36 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
37 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
38 NETIF_MSG_TX_ERR | NETIF_MSG_HW;
42 static int efx_ef100_rep_open(struct net_device *net_dev)
44 struct efx_rep *efv = netdev_priv(net_dev);
46 netif_napi_add(net_dev, &efv->napi, efx_ef100_rep_poll,
48 napi_enable(&efv->napi);
52 static int efx_ef100_rep_close(struct net_device *net_dev)
54 struct efx_rep *efv = netdev_priv(net_dev);
56 napi_disable(&efv->napi);
57 netif_napi_del(&efv->napi);
61 static netdev_tx_t efx_ef100_rep_xmit(struct sk_buff *skb,
62 struct net_device *dev)
64 struct efx_rep *efv = netdev_priv(dev);
65 struct efx_nic *efx = efv->parent;
68 /* __ef100_hard_start_xmit() will always return success even in the
69 * case of TX drops, where it will increment efx's tx_dropped. The
70 * efv stats really only count attempted TX, not success/failure.
72 atomic64_inc(&efv->stats.tx_packets);
73 atomic64_add(skb->len, &efv->stats.tx_bytes);
74 netif_tx_lock(efx->net_dev);
75 rc = __ef100_hard_start_xmit(skb, efx, dev, efv);
76 netif_tx_unlock(efx->net_dev);
80 static int efx_ef100_rep_get_port_parent_id(struct net_device *dev,
81 struct netdev_phys_item_id *ppid)
83 struct efx_rep *efv = netdev_priv(dev);
84 struct efx_nic *efx = efv->parent;
85 struct ef100_nic_data *nic_data;
87 nic_data = efx->nic_data;
88 /* nic_data->port_id is a u8[] */
89 ppid->id_len = sizeof(nic_data->port_id);
90 memcpy(ppid->id, nic_data->port_id, sizeof(nic_data->port_id));
94 static int efx_ef100_rep_get_phys_port_name(struct net_device *dev,
95 char *buf, size_t len)
97 struct efx_rep *efv = netdev_priv(dev);
98 struct efx_nic *efx = efv->parent;
99 struct ef100_nic_data *nic_data;
102 nic_data = efx->nic_data;
103 ret = snprintf(buf, len, "p%upf%uvf%u", efx->port_num,
104 nic_data->pf_index, efv->idx);
111 static int efx_ef100_rep_setup_tc(struct net_device *net_dev,
112 enum tc_setup_type type, void *type_data)
114 struct efx_rep *efv = netdev_priv(net_dev);
115 struct efx_nic *efx = efv->parent;
117 if (type == TC_SETUP_CLSFLOWER)
118 return efx_tc_flower(efx, net_dev, type_data, efv);
119 if (type == TC_SETUP_BLOCK)
120 return efx_tc_setup_block(net_dev, efx, type_data, efv);
125 static void efx_ef100_rep_get_stats64(struct net_device *dev,
126 struct rtnl_link_stats64 *stats)
128 struct efx_rep *efv = netdev_priv(dev);
130 stats->rx_packets = atomic64_read(&efv->stats.rx_packets);
131 stats->tx_packets = atomic64_read(&efv->stats.tx_packets);
132 stats->rx_bytes = atomic64_read(&efv->stats.rx_bytes);
133 stats->tx_bytes = atomic64_read(&efv->stats.tx_bytes);
134 stats->rx_dropped = atomic64_read(&efv->stats.rx_dropped);
135 stats->tx_errors = atomic64_read(&efv->stats.tx_errors);
138 const struct net_device_ops efx_ef100_rep_netdev_ops = {
139 .ndo_open = efx_ef100_rep_open,
140 .ndo_stop = efx_ef100_rep_close,
141 .ndo_start_xmit = efx_ef100_rep_xmit,
142 .ndo_get_port_parent_id = efx_ef100_rep_get_port_parent_id,
143 .ndo_get_phys_port_name = efx_ef100_rep_get_phys_port_name,
144 .ndo_get_stats64 = efx_ef100_rep_get_stats64,
145 .ndo_setup_tc = efx_ef100_rep_setup_tc,
148 static void efx_ef100_rep_get_drvinfo(struct net_device *dev,
149 struct ethtool_drvinfo *drvinfo)
151 strscpy(drvinfo->driver, EFX_EF100_REP_DRIVER, sizeof(drvinfo->driver));
154 static u32 efx_ef100_rep_ethtool_get_msglevel(struct net_device *net_dev)
156 struct efx_rep *efv = netdev_priv(net_dev);
158 return efv->msg_enable;
161 static void efx_ef100_rep_ethtool_set_msglevel(struct net_device *net_dev,
164 struct efx_rep *efv = netdev_priv(net_dev);
166 efv->msg_enable = msg_enable;
169 static void efx_ef100_rep_ethtool_get_ringparam(struct net_device *net_dev,
170 struct ethtool_ringparam *ring,
171 struct kernel_ethtool_ringparam *kring,
172 struct netlink_ext_ack *ext_ack)
174 struct efx_rep *efv = netdev_priv(net_dev);
176 ring->rx_max_pending = U32_MAX;
177 ring->rx_pending = efv->rx_pring_size;
180 static int efx_ef100_rep_ethtool_set_ringparam(struct net_device *net_dev,
181 struct ethtool_ringparam *ring,
182 struct kernel_ethtool_ringparam *kring,
183 struct netlink_ext_ack *ext_ack)
185 struct efx_rep *efv = netdev_priv(net_dev);
187 if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->tx_pending)
190 efv->rx_pring_size = ring->rx_pending;
194 static const struct ethtool_ops efx_ef100_rep_ethtool_ops = {
195 .get_drvinfo = efx_ef100_rep_get_drvinfo,
196 .get_msglevel = efx_ef100_rep_ethtool_get_msglevel,
197 .set_msglevel = efx_ef100_rep_ethtool_set_msglevel,
198 .get_ringparam = efx_ef100_rep_ethtool_get_ringparam,
199 .set_ringparam = efx_ef100_rep_ethtool_set_ringparam,
202 static struct efx_rep *efx_ef100_rep_create_netdev(struct efx_nic *efx,
205 struct net_device *net_dev;
209 net_dev = alloc_etherdev_mq(sizeof(*efv), 1);
211 return ERR_PTR(-ENOMEM);
213 efv = netdev_priv(net_dev);
214 rc = efx_ef100_rep_init_struct(efx, efv, i);
217 efv->net_dev = net_dev;
219 spin_lock_bh(&efx->vf_reps_lock);
220 list_add_tail(&efv->list, &efx->vf_reps);
221 spin_unlock_bh(&efx->vf_reps_lock);
222 if (netif_running(efx->net_dev) && efx->state == STATE_NET_UP) {
223 netif_device_attach(net_dev);
224 netif_carrier_on(net_dev);
226 netif_carrier_off(net_dev);
227 netif_tx_stop_all_queues(net_dev);
231 net_dev->netdev_ops = &efx_ef100_rep_netdev_ops;
232 net_dev->ethtool_ops = &efx_ef100_rep_ethtool_ops;
233 net_dev->min_mtu = EFX_MIN_MTU;
234 net_dev->max_mtu = EFX_MAX_MTU;
235 net_dev->features |= NETIF_F_LLTX;
236 net_dev->hw_features |= NETIF_F_LLTX;
239 free_netdev(net_dev);
243 static int efx_ef100_configure_rep(struct efx_rep *efv)
245 struct efx_nic *efx = efv->parent;
249 efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
250 /* Construct mport selector for corresponding VF */
251 efx_mae_mport_vf(efx, efv->idx, &selector);
252 /* Look up actual mport ID */
253 rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
256 pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
257 /* mport label should fit in 16 bits */
258 WARN_ON(efv->mport >> 16);
260 return efx_tc_configure_default_rule_rep(efv);
263 static void efx_ef100_deconfigure_rep(struct efx_rep *efv)
265 struct efx_nic *efx = efv->parent;
267 efx_tc_deconfigure_default_rule(efx, &efv->dflt);
270 static void efx_ef100_rep_destroy_netdev(struct efx_rep *efv)
272 struct efx_nic *efx = efv->parent;
275 spin_lock_bh(&efx->vf_reps_lock);
276 list_del(&efv->list);
277 spin_unlock_bh(&efx->vf_reps_lock);
280 free_netdev(efv->net_dev);
283 int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
288 efv = efx_ef100_rep_create_netdev(efx, i);
291 pci_err(efx->pci_dev,
292 "Failed to create representor for VF %d, rc %d\n", i,
296 rc = efx_ef100_configure_rep(efv);
298 pci_err(efx->pci_dev,
299 "Failed to configure representor for VF %d, rc %d\n",
303 rc = register_netdev(efv->net_dev);
305 pci_err(efx->pci_dev,
306 "Failed to register representor for VF %d, rc %d\n",
310 pci_dbg(efx->pci_dev, "Representor for VF %d is %s\n", i,
314 efx_ef100_deconfigure_rep(efv);
316 efx_ef100_rep_destroy_netdev(efv);
320 void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
322 struct net_device *rep_dev;
324 rep_dev = efv->net_dev;
327 netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
328 unregister_netdev(rep_dev);
329 efx_ef100_deconfigure_rep(efv);
330 efx_ef100_rep_destroy_netdev(efv);
333 void efx_ef100_fini_vfreps(struct efx_nic *efx)
335 struct ef100_nic_data *nic_data = efx->nic_data;
336 struct efx_rep *efv, *next;
338 if (!nic_data->grp_mae)
341 list_for_each_entry_safe(efv, next, &efx->vf_reps, list)
342 efx_ef100_vfrep_destroy(efx, efv);
345 static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
347 struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
348 unsigned int read_index;
349 struct list_head head;
354 INIT_LIST_HEAD(&head);
355 /* Grab up to 'weight' pending SKBs */
356 spin_lock_bh(&efv->rx_lock);
357 read_index = efv->write_index;
358 while (spent < weight && !list_empty(&efv->rx_list)) {
359 skb = list_first_entry(&efv->rx_list, struct sk_buff, list);
360 list_del(&skb->list);
361 list_add_tail(&skb->list, &head);
364 spin_unlock_bh(&efv->rx_lock);
366 netif_receive_skb_list(&head);
368 if (napi_complete_done(napi, spent)) {
369 spin_lock_bh(&efv->rx_lock);
370 efv->read_index = read_index;
371 /* If write_index advanced while we were doing the
372 * RX, then storing our read_index won't re-prime the
373 * fake-interrupt. In that case, we need to schedule
374 * NAPI again to consume the additional packet(s).
376 need_resched = efv->write_index != read_index;
377 spin_unlock_bh(&efv->rx_lock);
379 napi_schedule(&efv->napi);
384 void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf)
386 u8 *eh = efx_rx_buf_va(rx_buf);
390 /* Don't allow too many queued SKBs to build up, as they consume
391 * GFP_ATOMIC memory. If we overrun, just start dropping.
393 if (efv->write_index - READ_ONCE(efv->read_index) > efv->rx_pring_size) {
394 atomic64_inc(&efv->stats.rx_dropped);
396 netif_dbg(efv->parent, rx_err, efv->net_dev,
397 "nodesc-dropped packet of length %u\n",
402 skb = netdev_alloc_skb(efv->net_dev, rx_buf->len);
404 atomic64_inc(&efv->stats.rx_dropped);
406 netif_dbg(efv->parent, rx_err, efv->net_dev,
407 "noskb-dropped packet of length %u\n",
411 memcpy(skb->data, eh, rx_buf->len);
412 __skb_put(skb, rx_buf->len);
414 skb_record_rx_queue(skb, 0); /* rep is single-queue */
416 /* Move past the ethernet header */
417 skb->protocol = eth_type_trans(skb, efv->net_dev);
419 skb_checksum_none_assert(skb);
421 atomic64_inc(&efv->stats.rx_packets);
422 atomic64_add(rx_buf->len, &efv->stats.rx_bytes);
424 /* Add it to the rx list */
425 spin_lock_bh(&efv->rx_lock);
426 primed = efv->read_index == efv->write_index;
427 list_add_tail(&skb->list, &efv->rx_list);
429 spin_unlock_bh(&efv->rx_lock);
430 /* Trigger rx work */
432 napi_schedule(&efv->napi);
435 struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport)
437 struct efx_rep *efv, *out = NULL;
439 /* spinlock guards against list mutation while we're walking it;
440 * but caller must also hold rcu_read_lock() to ensure the netdev
441 * isn't freed after we drop the spinlock.
443 spin_lock_bh(&efx->vf_reps_lock);
444 list_for_each_entry(efv, &efx->vf_reps, list)
445 if (efv->mport == mport) {
449 spin_unlock_bh(&efx->vf_reps_lock);