Commit | Line | Data |
---|---|---|
9ad1a374 MB |
1 | /* Copyright 2008 - 2016 Freescale Semiconductor Inc. |
2 | * | |
3 | * Redistribution and use in source and binary forms, with or without | |
4 | * modification, are permitted provided that the following conditions are met: | |
5 | * * Redistributions of source code must retain the above copyright | |
6 | * notice, this list of conditions and the following disclaimer. | |
7 | * * Redistributions in binary form must reproduce the above copyright | |
8 | * notice, this list of conditions and the following disclaimer in the | |
9 | * documentation and/or other materials provided with the distribution. | |
10 | * * Neither the name of Freescale Semiconductor nor the | |
11 | * names of its contributors may be used to endorse or promote products | |
12 | * derived from this software without specific prior written permission. | |
13 | * | |
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | |
15 | * GNU General Public License ("GPL") as published by the Free Software | |
16 | * Foundation, either version 2 of that License or (at your option) any | |
17 | * later version. | |
18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | |
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | |
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 | */ | |
30 | ||
31 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
32 | ||
33 | #include <linux/init.h> | |
34 | #include <linux/module.h> | |
35 | #include <linux/of_platform.h> | |
36 | #include <linux/of_mdio.h> | |
37 | #include <linux/of_net.h> | |
38 | #include <linux/io.h> | |
39 | #include <linux/if_arp.h> | |
40 | #include <linux/if_vlan.h> | |
41 | #include <linux/icmp.h> | |
42 | #include <linux/ip.h> | |
43 | #include <linux/ipv6.h> | |
44 | #include <linux/udp.h> | |
45 | #include <linux/tcp.h> | |
46 | #include <linux/net.h> | |
47 | #include <linux/skbuff.h> | |
48 | #include <linux/etherdevice.h> | |
49 | #include <linux/if_ether.h> | |
50 | #include <linux/highmem.h> | |
51 | #include <linux/percpu.h> | |
52 | #include <linux/dma-mapping.h> | |
53 | #include <linux/sort.h> | |
54 | #include <soc/fsl/bman.h> | |
55 | #include <soc/fsl/qman.h> | |
56 | ||
57 | #include "fman.h" | |
58 | #include "fman_port.h" | |
59 | #include "mac.h" | |
60 | #include "dpaa_eth.h" | |
61 | ||
eb11ddf3 MB |
62 | /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files |
63 | * using trace events only need to #include <trace/events/sched.h> | |
64 | */ | |
65 | #define CREATE_TRACE_POINTS | |
66 | #include "dpaa_eth_trace.h" | |
67 | ||
9ad1a374 MB |
68 | static int debug = -1; |
69 | module_param(debug, int, 0444); | |
70 | MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)"); | |
71 | ||
72 | static u16 tx_timeout = 1000; | |
73 | module_param(tx_timeout, ushort, 0444); | |
74 | MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); | |
75 | ||
76 | #define FM_FD_STAT_RX_ERRORS \ | |
77 | (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \ | |
78 | FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \ | |
79 | FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \ | |
80 | FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \ | |
81 | FM_FD_ERR_PRS_HDR_ERR) | |
82 | ||
83 | #define FM_FD_STAT_TX_ERRORS \ | |
84 | (FM_FD_ERR_UNSUPPORTED_FORMAT | \ | |
85 | FM_FD_ERR_LENGTH | FM_FD_ERR_DMA) | |
86 | ||
87 | #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | |
88 | NETIF_MSG_LINK | NETIF_MSG_IFUP | \ | |
89 | NETIF_MSG_IFDOWN) | |
90 | ||
91 | #define DPAA_INGRESS_CS_THRESHOLD 0x10000000 | |
92 | /* Ingress congestion threshold on FMan ports | |
93 | * The size in bytes of the ingress tail-drop threshold on FMan ports. | |
94 | * Traffic piling up above this value will be rejected by QMan and discarded | |
95 | * by FMan. | |
96 | */ | |
97 | ||
98 | /* Size in bytes of the FQ taildrop threshold */ | |
99 | #define DPAA_FQ_TD 0x200000 | |
100 | ||
101 | #define DPAA_CS_THRESHOLD_1G 0x06000000 | |
102 | /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000 | |
103 | * The size in bytes of the egress Congestion State notification threshold on | |
104 | * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a | |
105 | * tight loop (e.g. by sending UDP datagrams at "while(1) speed"), | |
106 | * and the larger the frame size, the more acute the problem. | |
107 | * So we have to find a balance between these factors: | |
108 | * - avoiding the device staying congested for a prolonged time (risking | |
109 | * the netdev watchdog to fire - see also the tx_timeout module param); | |
110 | * - affecting performance of protocols such as TCP, which otherwise | |
111 | * behave well under the congestion notification mechanism; | |
112 | * - preventing the Tx cores from tightly-looping (as if the congestion | |
113 | * threshold was too low to be effective); | |
114 | * - running out of memory if the CS threshold is set too high. | |
115 | */ | |
116 | ||
117 | #define DPAA_CS_THRESHOLD_10G 0x10000000 | |
118 | /* The size in bytes of the egress Congestion State notification threshold on | |
119 | * 10G ports, range 0x1000 .. 0x10000000 | |
120 | */ | |
121 | ||
122 | /* Largest value that the FQD's OAL field can hold */ | |
123 | #define FSL_QMAN_MAX_OAL 127 | |
124 | ||
125 | /* Default alignment for start of data in an Rx FD */ | |
126 | #define DPAA_FD_DATA_ALIGNMENT 16 | |
127 | ||
128 | /* Values for the L3R field of the FM Parse Results | |
129 | */ | |
130 | /* L3 Type field: First IP Present IPv4 */ | |
131 | #define FM_L3_PARSE_RESULT_IPV4 0x8000 | |
132 | /* L3 Type field: First IP Present IPv6 */ | |
133 | #define FM_L3_PARSE_RESULT_IPV6 0x4000 | |
134 | /* Values for the L4R field of the FM Parse Results */ | |
135 | /* L4 Type field: UDP */ | |
136 | #define FM_L4_PARSE_RESULT_UDP 0x40 | |
137 | /* L4 Type field: TCP */ | |
138 | #define FM_L4_PARSE_RESULT_TCP 0x20 | |
139 | ||
5accb282 MB |
140 | /* FD status field indicating whether the FM Parser has attempted to validate |
141 | * the L4 csum of the frame. | |
142 | * Note that having this bit set doesn't necessarily imply that the checksum | |
143 | * is valid. One would have to check the parse results to find that out. | |
144 | */ | |
145 | #define FM_FD_STAT_L4CV 0x00000004 | |
146 | ||
9ad1a374 MB |
147 | #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ |
148 | #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ | |
149 | ||
150 | #define FSL_DPAA_BPID_INV 0xff | |
151 | #define FSL_DPAA_ETH_MAX_BUF_COUNT 128 | |
152 | #define FSL_DPAA_ETH_REFILL_THRESHOLD 80 | |
153 | ||
154 | #define DPAA_TX_PRIV_DATA_SIZE 16 | |
155 | #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) | |
156 | #define DPAA_TIME_STAMP_SIZE 8 | |
157 | #define DPAA_HASH_RESULTS_SIZE 8 | |
158 | #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ | |
159 | dpaa_rx_extra_headroom) | |
160 | ||
161 | #define DPAA_ETH_RX_QUEUES 128 | |
162 | ||
163 | #define DPAA_ENQUEUE_RETRIES 100000 | |
164 | ||
165 | enum port_type {RX, TX}; | |
166 | ||
167 | struct fm_port_fqs { | |
168 | struct dpaa_fq *tx_defq; | |
169 | struct dpaa_fq *tx_errq; | |
170 | struct dpaa_fq *rx_defq; | |
171 | struct dpaa_fq *rx_errq; | |
172 | }; | |
173 | ||
174 | /* All the dpa bps in use at any moment */ | |
175 | static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; | |
176 | ||
177 | /* The raw buffer size must be cacheline aligned */ | |
178 | #define DPAA_BP_RAW_SIZE 4096 | |
179 | /* When using more than one buffer pool, the raw sizes are as follows: | |
180 | * 1 bp: 4KB | |
181 | * 2 bp: 2KB, 4KB | |
182 | * 3 bp: 1KB, 2KB, 4KB | |
183 | * 4 bp: 1KB, 2KB, 4KB, 8KB | |
184 | */ | |
185 | static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt) | |
186 | { | |
187 | size_t res = DPAA_BP_RAW_SIZE / 4; | |
188 | u8 i; | |
189 | ||
190 | for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++) | |
191 | res *= 2; | |
192 | return res; | |
193 | } | |
194 | ||
195 | /* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is | |
196 | * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, | |
197 | * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us | |
198 | * half-page-aligned buffers, so we reserve some more space for start-of-buffer | |
199 | * alignment. | |
200 | */ | |
201 | #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES) | |
202 | ||
203 | static int dpaa_max_frm; | |
204 | ||
205 | static int dpaa_rx_extra_headroom; | |
206 | ||
207 | #define dpaa_get_max_mtu() \ | |
208 | (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN)) | |
209 | ||
210 | static int dpaa_netdev_init(struct net_device *net_dev, | |
211 | const struct net_device_ops *dpaa_ops, | |
212 | u16 tx_timeout) | |
213 | { | |
214 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
215 | struct device *dev = net_dev->dev.parent; | |
216 | struct dpaa_percpu_priv *percpu_priv; | |
217 | const u8 *mac_addr; | |
218 | int i, err; | |
219 | ||
220 | /* Although we access another CPU's private data here | |
221 | * we do it at initialization so it is safe | |
222 | */ | |
223 | for_each_possible_cpu(i) { | |
224 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
225 | percpu_priv->net_dev = net_dev; | |
226 | } | |
227 | ||
228 | net_dev->netdev_ops = dpaa_ops; | |
229 | mac_addr = priv->mac_dev->addr; | |
230 | ||
231 | net_dev->mem_start = priv->mac_dev->res->start; | |
232 | net_dev->mem_end = priv->mac_dev->res->end; | |
233 | ||
234 | net_dev->min_mtu = ETH_MIN_MTU; | |
235 | net_dev->max_mtu = dpaa_get_max_mtu(); | |
236 | ||
237 | net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
238 | NETIF_F_LLTX); | |
239 | ||
240 | net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; | |
241 | /* The kernels enables GSO automatically, if we declare NETIF_F_SG. | |
242 | * For conformity, we'll still declare GSO explicitly. | |
243 | */ | |
244 | net_dev->features |= NETIF_F_GSO; | |
5accb282 | 245 | net_dev->features |= NETIF_F_RXCSUM; |
9ad1a374 MB |
246 | |
247 | net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | |
248 | /* we do not want shared skbs on TX */ | |
249 | net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; | |
250 | ||
251 | net_dev->features |= net_dev->hw_features; | |
252 | net_dev->vlan_features = net_dev->features; | |
253 | ||
254 | memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); | |
255 | memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); | |
256 | ||
b0cdb168 MB |
257 | net_dev->ethtool_ops = &dpaa_ethtool_ops; |
258 | ||
9ad1a374 MB |
259 | net_dev->needed_headroom = priv->tx_headroom; |
260 | net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); | |
261 | ||
262 | /* start without the RUNNING flag, phylib controls it later */ | |
263 | netif_carrier_off(net_dev); | |
264 | ||
265 | err = register_netdev(net_dev); | |
266 | if (err < 0) { | |
267 | dev_err(dev, "register_netdev() = %d\n", err); | |
268 | return err; | |
269 | } | |
270 | ||
271 | return 0; | |
272 | } | |
273 | ||
274 | static int dpaa_stop(struct net_device *net_dev) | |
275 | { | |
276 | struct mac_device *mac_dev; | |
277 | struct dpaa_priv *priv; | |
278 | int i, err, error; | |
279 | ||
280 | priv = netdev_priv(net_dev); | |
281 | mac_dev = priv->mac_dev; | |
282 | ||
283 | netif_tx_stop_all_queues(net_dev); | |
284 | /* Allow the Fman (Tx) port to process in-flight frames before we | |
285 | * try switching it off. | |
286 | */ | |
287 | usleep_range(5000, 10000); | |
288 | ||
289 | err = mac_dev->stop(mac_dev); | |
290 | if (err < 0) | |
291 | netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n", | |
292 | err); | |
293 | ||
294 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { | |
295 | error = fman_port_disable(mac_dev->port[i]); | |
296 | if (error) | |
297 | err = error; | |
298 | } | |
299 | ||
300 | if (net_dev->phydev) | |
301 | phy_disconnect(net_dev->phydev); | |
302 | net_dev->phydev = NULL; | |
303 | ||
304 | return err; | |
305 | } | |
306 | ||
307 | static void dpaa_tx_timeout(struct net_device *net_dev) | |
308 | { | |
309 | struct dpaa_percpu_priv *percpu_priv; | |
310 | const struct dpaa_priv *priv; | |
311 | ||
312 | priv = netdev_priv(net_dev); | |
313 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
314 | ||
315 | netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", | |
316 | jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); | |
317 | ||
318 | percpu_priv->stats.tx_errors++; | |
319 | } | |
320 | ||
321 | /* Calculates the statistics for the given device by adding the statistics | |
322 | * collected by each CPU. | |
323 | */ | |
bc1f4470 | 324 | static void dpaa_get_stats64(struct net_device *net_dev, |
325 | struct rtnl_link_stats64 *s) | |
9ad1a374 MB |
326 | { |
327 | int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); | |
328 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
329 | struct dpaa_percpu_priv *percpu_priv; | |
330 | u64 *netstats = (u64 *)s; | |
331 | u64 *cpustats; | |
332 | int i, j; | |
333 | ||
334 | for_each_possible_cpu(i) { | |
335 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
336 | ||
337 | cpustats = (u64 *)&percpu_priv->stats; | |
338 | ||
339 | /* add stats from all CPUs */ | |
340 | for (j = 0; j < numstats; j++) | |
341 | netstats[j] += cpustats[j]; | |
342 | } | |
9ad1a374 MB |
343 | } |
344 | ||
2572ac53 | 345 | static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, |
de4784ca | 346 | void *type_data) |
2ea08f82 CG |
347 | { |
348 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
de4784ca | 349 | struct tc_mqprio_qopt *mqprio = type_data; |
56f36acd | 350 | u8 num_tc; |
2ea08f82 CG |
351 | int i; |
352 | ||
2572ac53 | 353 | if (type != TC_SETUP_MQPRIO) |
38cf0426 | 354 | return -EOPNOTSUPP; |
2ea08f82 | 355 | |
de4784ca JP |
356 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
357 | num_tc = mqprio->num_tc; | |
56f36acd AN |
358 | |
359 | if (num_tc == priv->num_tc) | |
2ea08f82 CG |
360 | return 0; |
361 | ||
56f36acd | 362 | if (!num_tc) { |
2ea08f82 CG |
363 | netdev_reset_tc(net_dev); |
364 | goto out; | |
365 | } | |
366 | ||
56f36acd | 367 | if (num_tc > DPAA_TC_NUM) { |
2ea08f82 CG |
368 | netdev_err(net_dev, "Too many traffic classes: max %d supported.\n", |
369 | DPAA_TC_NUM); | |
370 | return -EINVAL; | |
371 | } | |
372 | ||
56f36acd | 373 | netdev_set_num_tc(net_dev, num_tc); |
2ea08f82 | 374 | |
56f36acd | 375 | for (i = 0; i < num_tc; i++) |
2ea08f82 CG |
376 | netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, |
377 | i * DPAA_TC_TXQ_NUM); | |
378 | ||
379 | out: | |
56f36acd | 380 | priv->num_tc = num_tc ? : 1; |
2ea08f82 CG |
381 | netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); |
382 | return 0; | |
383 | } | |
384 | ||
9ad1a374 MB |
385 | static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) |
386 | { | |
387 | struct platform_device *of_dev; | |
388 | struct dpaa_eth_data *eth_data; | |
389 | struct device *dpaa_dev, *dev; | |
390 | struct device_node *mac_node; | |
391 | struct mac_device *mac_dev; | |
392 | ||
393 | dpaa_dev = &pdev->dev; | |
394 | eth_data = dpaa_dev->platform_data; | |
395 | if (!eth_data) | |
396 | return ERR_PTR(-ENODEV); | |
397 | ||
398 | mac_node = eth_data->mac_node; | |
399 | ||
400 | of_dev = of_find_device_by_node(mac_node); | |
401 | if (!of_dev) { | |
f7ce9103 RH |
402 | dev_err(dpaa_dev, "of_find_device_by_node(%pOF) failed\n", |
403 | mac_node); | |
9ad1a374 MB |
404 | of_node_put(mac_node); |
405 | return ERR_PTR(-EINVAL); | |
406 | } | |
407 | of_node_put(mac_node); | |
408 | ||
409 | dev = &of_dev->dev; | |
410 | ||
411 | mac_dev = dev_get_drvdata(dev); | |
412 | if (!mac_dev) { | |
413 | dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n", | |
414 | dev_name(dev)); | |
415 | return ERR_PTR(-EINVAL); | |
416 | } | |
417 | ||
418 | return mac_dev; | |
419 | } | |
420 | ||
421 | static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) | |
422 | { | |
423 | const struct dpaa_priv *priv; | |
424 | struct mac_device *mac_dev; | |
425 | struct sockaddr old_addr; | |
426 | int err; | |
427 | ||
428 | priv = netdev_priv(net_dev); | |
429 | ||
430 | memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); | |
431 | ||
432 | err = eth_mac_addr(net_dev, addr); | |
433 | if (err < 0) { | |
434 | netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); | |
435 | return err; | |
436 | } | |
437 | ||
438 | mac_dev = priv->mac_dev; | |
439 | ||
440 | err = mac_dev->change_addr(mac_dev->fman_mac, | |
441 | (enet_addr_t *)net_dev->dev_addr); | |
442 | if (err < 0) { | |
443 | netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", | |
444 | err); | |
445 | /* reverting to previous address */ | |
446 | eth_mac_addr(net_dev, &old_addr); | |
447 | ||
448 | return err; | |
449 | } | |
450 | ||
451 | return 0; | |
452 | } | |
453 | ||
454 | static void dpaa_set_rx_mode(struct net_device *net_dev) | |
455 | { | |
456 | const struct dpaa_priv *priv; | |
457 | int err; | |
458 | ||
459 | priv = netdev_priv(net_dev); | |
460 | ||
461 | if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { | |
462 | priv->mac_dev->promisc = !priv->mac_dev->promisc; | |
463 | err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, | |
464 | priv->mac_dev->promisc); | |
465 | if (err < 0) | |
466 | netif_err(priv, drv, net_dev, | |
467 | "mac_dev->set_promisc() = %d\n", | |
468 | err); | |
469 | } | |
470 | ||
471 | err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); | |
472 | if (err < 0) | |
473 | netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", | |
474 | err); | |
475 | } | |
476 | ||
477 | static struct dpaa_bp *dpaa_bpid2pool(int bpid) | |
478 | { | |
479 | if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS)) | |
480 | return NULL; | |
481 | ||
482 | return dpaa_bp_array[bpid]; | |
483 | } | |
484 | ||
485 | /* checks if this bpool is already allocated */ | |
486 | static bool dpaa_bpid2pool_use(int bpid) | |
487 | { | |
488 | if (dpaa_bpid2pool(bpid)) { | |
489 | atomic_inc(&dpaa_bp_array[bpid]->refs); | |
490 | return true; | |
491 | } | |
492 | ||
493 | return false; | |
494 | } | |
495 | ||
496 | /* called only once per bpid by dpaa_bp_alloc_pool() */ | |
497 | static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp) | |
498 | { | |
499 | dpaa_bp_array[bpid] = dpaa_bp; | |
500 | atomic_set(&dpaa_bp->refs, 1); | |
501 | } | |
502 | ||
503 | static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp) | |
504 | { | |
505 | int err; | |
506 | ||
507 | if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) { | |
508 | pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n", | |
509 | __func__); | |
510 | return -EINVAL; | |
511 | } | |
512 | ||
513 | /* If the pool is already specified, we only create one per bpid */ | |
514 | if (dpaa_bp->bpid != FSL_DPAA_BPID_INV && | |
515 | dpaa_bpid2pool_use(dpaa_bp->bpid)) | |
516 | return 0; | |
517 | ||
518 | if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) { | |
519 | dpaa_bp->pool = bman_new_pool(); | |
520 | if (!dpaa_bp->pool) { | |
521 | pr_err("%s: bman_new_pool() failed\n", | |
522 | __func__); | |
523 | return -ENODEV; | |
524 | } | |
525 | ||
526 | dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool); | |
527 | } | |
528 | ||
529 | if (dpaa_bp->seed_cb) { | |
530 | err = dpaa_bp->seed_cb(dpaa_bp); | |
531 | if (err) | |
532 | goto pool_seed_failed; | |
533 | } | |
534 | ||
535 | dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp); | |
536 | ||
537 | return 0; | |
538 | ||
539 | pool_seed_failed: | |
540 | pr_err("%s: pool seeding failed\n", __func__); | |
541 | bman_free_pool(dpaa_bp->pool); | |
542 | ||
543 | return err; | |
544 | } | |
545 | ||
546 | /* remove and free all the buffers from the given buffer pool */ | |
547 | static void dpaa_bp_drain(struct dpaa_bp *bp) | |
548 | { | |
549 | u8 num = 8; | |
550 | int ret; | |
551 | ||
552 | do { | |
553 | struct bm_buffer bmb[8]; | |
554 | int i; | |
555 | ||
556 | ret = bman_acquire(bp->pool, bmb, num); | |
557 | if (ret < 0) { | |
558 | if (num == 8) { | |
559 | /* we have less than 8 buffers left; | |
560 | * drain them one by one | |
561 | */ | |
562 | num = 1; | |
563 | ret = 1; | |
564 | continue; | |
565 | } else { | |
566 | /* Pool is fully drained */ | |
567 | break; | |
568 | } | |
569 | } | |
570 | ||
571 | if (bp->free_buf_cb) | |
572 | for (i = 0; i < num; i++) | |
573 | bp->free_buf_cb(bp, &bmb[i]); | |
574 | } while (ret > 0); | |
575 | } | |
576 | ||
577 | static void dpaa_bp_free(struct dpaa_bp *dpaa_bp) | |
578 | { | |
579 | struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid); | |
580 | ||
581 | /* the mapping between bpid and dpaa_bp is done very late in the | |
582 | * allocation procedure; if something failed before the mapping, the bp | |
583 | * was not configured, therefore we don't need the below instructions | |
584 | */ | |
585 | if (!bp) | |
586 | return; | |
587 | ||
588 | if (!atomic_dec_and_test(&bp->refs)) | |
589 | return; | |
590 | ||
591 | if (bp->free_buf_cb) | |
592 | dpaa_bp_drain(bp); | |
593 | ||
594 | dpaa_bp_array[bp->bpid] = NULL; | |
595 | bman_free_pool(bp->pool); | |
596 | } | |
597 | ||
598 | static void dpaa_bps_free(struct dpaa_priv *priv) | |
599 | { | |
600 | int i; | |
601 | ||
602 | for (i = 0; i < DPAA_BPS_NUM; i++) | |
603 | dpaa_bp_free(priv->dpaa_bps[i]); | |
604 | } | |
605 | ||
606 | /* Use multiple WQs for FQ assignment: | |
607 | * - Tx Confirmation queues go to WQ1. | |
c44efa1d CG |
608 | * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance |
609 | * to be scheduled, in case there are many more FQs in WQ6). | |
610 | * - Rx Default goes to WQ6. | |
611 | * - Tx queues go to different WQs depending on their priority. Equal | |
612 | * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and | |
613 | * WQ0 (highest priority). | |
9ad1a374 MB |
614 | * This ensures that Tx-confirmed buffers are timely released. In particular, |
615 | * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they | |
616 | * are greatly outnumbered by other FQs in the system, while | |
617 | * dequeue scheduling is round-robin. | |
618 | */ | |
c44efa1d | 619 | static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) |
9ad1a374 MB |
620 | { |
621 | switch (fq->fq_type) { | |
622 | case FQ_TYPE_TX_CONFIRM: | |
623 | case FQ_TYPE_TX_CONF_MQ: | |
624 | fq->wq = 1; | |
625 | break; | |
626 | case FQ_TYPE_RX_ERROR: | |
627 | case FQ_TYPE_TX_ERROR: | |
c44efa1d | 628 | fq->wq = 5; |
9ad1a374 MB |
629 | break; |
630 | case FQ_TYPE_RX_DEFAULT: | |
c44efa1d CG |
631 | fq->wq = 6; |
632 | break; | |
9ad1a374 | 633 | case FQ_TYPE_TX: |
c44efa1d CG |
634 | switch (idx / DPAA_TC_TXQ_NUM) { |
635 | case 0: | |
636 | /* Low priority (best effort) */ | |
637 | fq->wq = 6; | |
638 | break; | |
639 | case 1: | |
640 | /* Medium priority */ | |
641 | fq->wq = 2; | |
642 | break; | |
643 | case 2: | |
644 | /* High priority */ | |
645 | fq->wq = 1; | |
646 | break; | |
647 | case 3: | |
648 | /* Very high priority */ | |
649 | fq->wq = 0; | |
650 | break; | |
651 | default: | |
652 | WARN(1, "Too many TX FQs: more than %d!\n", | |
653 | DPAA_ETH_TXQ_NUM); | |
654 | } | |
9ad1a374 MB |
655 | break; |
656 | default: | |
657 | WARN(1, "Invalid FQ type %d for FQID %d!\n", | |
658 | fq->fq_type, fq->fqid); | |
659 | } | |
660 | } | |
661 | ||
662 | static struct dpaa_fq *dpaa_fq_alloc(struct device *dev, | |
663 | u32 start, u32 count, | |
664 | struct list_head *list, | |
665 | enum dpaa_fq_type fq_type) | |
666 | { | |
667 | struct dpaa_fq *dpaa_fq; | |
668 | int i; | |
669 | ||
670 | dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count, | |
671 | GFP_KERNEL); | |
672 | if (!dpaa_fq) | |
673 | return NULL; | |
674 | ||
675 | for (i = 0; i < count; i++) { | |
676 | dpaa_fq[i].fq_type = fq_type; | |
677 | dpaa_fq[i].fqid = start ? start + i : 0; | |
678 | list_add_tail(&dpaa_fq[i].list, list); | |
679 | } | |
680 | ||
681 | for (i = 0; i < count; i++) | |
c44efa1d | 682 | dpaa_assign_wq(dpaa_fq + i, i); |
9ad1a374 MB |
683 | |
684 | return dpaa_fq; | |
685 | } | |
686 | ||
687 | static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, | |
688 | struct fm_port_fqs *port_fqs) | |
689 | { | |
690 | struct dpaa_fq *dpaa_fq; | |
691 | ||
692 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR); | |
693 | if (!dpaa_fq) | |
694 | goto fq_alloc_failed; | |
695 | ||
696 | port_fqs->rx_errq = &dpaa_fq[0]; | |
697 | ||
698 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT); | |
699 | if (!dpaa_fq) | |
700 | goto fq_alloc_failed; | |
701 | ||
702 | port_fqs->rx_defq = &dpaa_fq[0]; | |
703 | ||
704 | if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) | |
705 | goto fq_alloc_failed; | |
706 | ||
707 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); | |
708 | if (!dpaa_fq) | |
709 | goto fq_alloc_failed; | |
710 | ||
711 | port_fqs->tx_errq = &dpaa_fq[0]; | |
712 | ||
713 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM); | |
714 | if (!dpaa_fq) | |
715 | goto fq_alloc_failed; | |
716 | ||
717 | port_fqs->tx_defq = &dpaa_fq[0]; | |
718 | ||
719 | if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) | |
720 | goto fq_alloc_failed; | |
721 | ||
722 | return 0; | |
723 | ||
724 | fq_alloc_failed: | |
725 | dev_err(dev, "dpaa_fq_alloc() failed\n"); | |
726 | return -ENOMEM; | |
727 | } | |
728 | ||
729 | static u32 rx_pool_channel; | |
730 | static DEFINE_SPINLOCK(rx_pool_channel_init); | |
731 | ||
732 | static int dpaa_get_channel(void) | |
733 | { | |
734 | spin_lock(&rx_pool_channel_init); | |
735 | if (!rx_pool_channel) { | |
736 | u32 pool; | |
737 | int ret; | |
738 | ||
739 | ret = qman_alloc_pool(&pool); | |
740 | ||
741 | if (!ret) | |
742 | rx_pool_channel = pool; | |
743 | } | |
744 | spin_unlock(&rx_pool_channel_init); | |
745 | if (!rx_pool_channel) | |
746 | return -ENOMEM; | |
747 | return rx_pool_channel; | |
748 | } | |
749 | ||
750 | static void dpaa_release_channel(void) | |
751 | { | |
752 | qman_release_pool(rx_pool_channel); | |
753 | } | |
754 | ||
755 | static void dpaa_eth_add_channel(u16 channel) | |
756 | { | |
757 | u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); | |
758 | const cpumask_t *cpus = qman_affine_cpus(); | |
759 | struct qman_portal *portal; | |
760 | int cpu; | |
761 | ||
762 | for_each_cpu(cpu, cpus) { | |
763 | portal = qman_get_affine_portal(cpu); | |
764 | qman_p_static_dequeue_add(portal, pool); | |
765 | } | |
766 | } | |
767 | ||
768 | /* Congestion group state change notification callback. | |
769 | * Stops the device's egress queues while they are congested and | |
770 | * wakes them upon exiting congested state. | |
771 | * Also updates some CGR-related stats. | |
772 | */ | |
773 | static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, | |
774 | int congested) | |
775 | { | |
776 | struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr, | |
777 | struct dpaa_priv, cgr_data.cgr); | |
778 | ||
b0ce0d02 MB |
779 | if (congested) { |
780 | priv->cgr_data.congestion_start_jiffies = jiffies; | |
9ad1a374 | 781 | netif_tx_stop_all_queues(priv->net_dev); |
b0ce0d02 MB |
782 | priv->cgr_data.cgr_congested_count++; |
783 | } else { | |
784 | priv->cgr_data.congested_jiffies += | |
785 | (jiffies - priv->cgr_data.congestion_start_jiffies); | |
9ad1a374 | 786 | netif_tx_wake_all_queues(priv->net_dev); |
b0ce0d02 | 787 | } |
9ad1a374 MB |
788 | } |
789 | ||
790 | static int dpaa_eth_cgr_init(struct dpaa_priv *priv) | |
791 | { | |
792 | struct qm_mcc_initcgr initcgr; | |
793 | u32 cs_th; | |
794 | int err; | |
795 | ||
796 | err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); | |
797 | if (err < 0) { | |
798 | if (netif_msg_drv(priv)) | |
799 | pr_err("%s: Error %d allocating CGR ID\n", | |
800 | __func__, err); | |
801 | goto out_error; | |
802 | } | |
803 | priv->cgr_data.cgr.cb = dpaa_eth_cgscn; | |
804 | ||
805 | /* Enable Congestion State Change Notifications and CS taildrop */ | |
0fbb0f24 | 806 | memset(&initcgr, 0, sizeof(initcgr)); |
7d6f8dc0 | 807 | initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES); |
9ad1a374 MB |
808 | initcgr.cgr.cscn_en = QM_CGR_EN; |
809 | ||
810 | /* Set different thresholds based on the MAC speed. | |
811 | * This may turn suboptimal if the MAC is reconfigured at a speed | |
812 | * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. | |
813 | * In such cases, we ought to reconfigure the threshold, too. | |
814 | */ | |
815 | if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) | |
816 | cs_th = DPAA_CS_THRESHOLD_10G; | |
817 | else | |
818 | cs_th = DPAA_CS_THRESHOLD_1G; | |
819 | qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); | |
820 | ||
7d6f8dc0 | 821 | initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); |
9ad1a374 MB |
822 | initcgr.cgr.cstd_en = QM_CGR_EN; |
823 | ||
824 | err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, | |
825 | &initcgr); | |
826 | if (err < 0) { | |
827 | if (netif_msg_drv(priv)) | |
828 | pr_err("%s: Error %d creating CGR with ID %d\n", | |
829 | __func__, err, priv->cgr_data.cgr.cgrid); | |
830 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | |
831 | goto out_error; | |
832 | } | |
833 | if (netif_msg_drv(priv)) | |
834 | pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", | |
835 | priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, | |
836 | priv->cgr_data.cgr.chan); | |
837 | ||
838 | out_error: | |
839 | return err; | |
840 | } | |
841 | ||
842 | static inline void dpaa_setup_ingress(const struct dpaa_priv *priv, | |
843 | struct dpaa_fq *fq, | |
844 | const struct qman_fq *template) | |
845 | { | |
846 | fq->fq_base = *template; | |
847 | fq->net_dev = priv->net_dev; | |
848 | ||
849 | fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; | |
850 | fq->channel = priv->channel; | |
851 | } | |
852 | ||
853 | static inline void dpaa_setup_egress(const struct dpaa_priv *priv, | |
854 | struct dpaa_fq *fq, | |
855 | struct fman_port *port, | |
856 | const struct qman_fq *template) | |
857 | { | |
858 | fq->fq_base = *template; | |
859 | fq->net_dev = priv->net_dev; | |
860 | ||
861 | if (port) { | |
862 | fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; | |
863 | fq->channel = (u16)fman_port_get_qman_channel_id(port); | |
864 | } else { | |
865 | fq->flags = QMAN_FQ_FLAG_NO_MODIFY; | |
866 | } | |
867 | } | |
868 | ||
869 | static void dpaa_fq_setup(struct dpaa_priv *priv, | |
870 | const struct dpaa_fq_cbs *fq_cbs, | |
871 | struct fman_port *tx_port) | |
872 | { | |
873 | int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu; | |
874 | const cpumask_t *affine_cpus = qman_affine_cpus(); | |
875 | u16 portals[NR_CPUS]; | |
876 | struct dpaa_fq *fq; | |
877 | ||
878 | for_each_cpu(cpu, affine_cpus) | |
879 | portals[num_portals++] = qman_affine_channel(cpu); | |
880 | if (num_portals == 0) | |
881 | dev_err(priv->net_dev->dev.parent, | |
882 | "No Qman software (affine) channels found"); | |
883 | ||
884 | /* Initialize each FQ in the list */ | |
885 | list_for_each_entry(fq, &priv->dpaa_fq_list, list) { | |
886 | switch (fq->fq_type) { | |
887 | case FQ_TYPE_RX_DEFAULT: | |
888 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); | |
889 | break; | |
890 | case FQ_TYPE_RX_ERROR: | |
891 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); | |
892 | break; | |
893 | case FQ_TYPE_TX: | |
894 | dpaa_setup_egress(priv, fq, tx_port, | |
895 | &fq_cbs->egress_ern); | |
896 | /* If we have more Tx queues than the number of cores, | |
897 | * just ignore the extra ones. | |
898 | */ | |
899 | if (egress_cnt < DPAA_ETH_TXQ_NUM) | |
900 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; | |
901 | break; | |
902 | case FQ_TYPE_TX_CONF_MQ: | |
903 | priv->conf_fqs[conf_cnt++] = &fq->fq_base; | |
904 | /* fall through */ | |
905 | case FQ_TYPE_TX_CONFIRM: | |
906 | dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); | |
907 | break; | |
908 | case FQ_TYPE_TX_ERROR: | |
909 | dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); | |
910 | break; | |
911 | default: | |
912 | dev_warn(priv->net_dev->dev.parent, | |
913 | "Unknown FQ type detected!\n"); | |
914 | break; | |
915 | } | |
916 | } | |
917 | ||
918 | /* Make sure all CPUs receive a corresponding Tx queue. */ | |
919 | while (egress_cnt < DPAA_ETH_TXQ_NUM) { | |
920 | list_for_each_entry(fq, &priv->dpaa_fq_list, list) { | |
921 | if (fq->fq_type != FQ_TYPE_TX) | |
922 | continue; | |
923 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; | |
924 | if (egress_cnt == DPAA_ETH_TXQ_NUM) | |
925 | break; | |
926 | } | |
927 | } | |
928 | } | |
929 | ||
930 | static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, | |
931 | struct qman_fq *tx_fq) | |
932 | { | |
933 | int i; | |
934 | ||
935 | for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) | |
936 | if (priv->egress_fqs[i] == tx_fq) | |
937 | return i; | |
938 | ||
939 | return -EINVAL; | |
940 | } | |
941 | ||
942 | static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) | |
943 | { | |
944 | const struct dpaa_priv *priv; | |
945 | struct qman_fq *confq = NULL; | |
946 | struct qm_mcc_initfq initfq; | |
947 | struct device *dev; | |
948 | struct qman_fq *fq; | |
949 | int queue_id; | |
950 | int err; | |
951 | ||
952 | priv = netdev_priv(dpaa_fq->net_dev); | |
953 | dev = dpaa_fq->net_dev->dev.parent; | |
954 | ||
955 | if (dpaa_fq->fqid == 0) | |
956 | dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; | |
957 | ||
958 | dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); | |
959 | ||
960 | err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base); | |
961 | if (err) { | |
962 | dev_err(dev, "qman_create_fq() failed\n"); | |
963 | return err; | |
964 | } | |
965 | fq = &dpaa_fq->fq_base; | |
966 | ||
967 | if (dpaa_fq->init) { | |
968 | memset(&initfq, 0, sizeof(initfq)); | |
969 | ||
7d6f8dc0 | 970 | initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL); |
9ad1a374 | 971 | /* Note: we may get to keep an empty FQ in cache */ |
7d6f8dc0 | 972 | initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE); |
9ad1a374 MB |
973 | |
974 | /* Try to reduce the number of portal interrupts for | |
975 | * Tx Confirmation FQs. | |
976 | */ | |
977 | if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) | |
58b7bd0f | 978 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK); |
9ad1a374 MB |
979 | |
980 | /* FQ placement */ | |
7d6f8dc0 | 981 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ); |
9ad1a374 MB |
982 | |
983 | qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); | |
984 | ||
985 | /* Put all egress queues in a congestion group of their own. | |
986 | * Sensu stricto, the Tx confirmation queues are Rx FQs, | |
987 | * rather than Tx - but they nonetheless account for the | |
988 | * memory footprint on behalf of egress traffic. We therefore | |
989 | * place them in the netdev's CGR, along with the Tx FQs. | |
990 | */ | |
991 | if (dpaa_fq->fq_type == FQ_TYPE_TX || | |
992 | dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || | |
993 | dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { | |
7d6f8dc0 CM |
994 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); |
995 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); | |
9ad1a374 MB |
996 | initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; |
997 | /* Set a fixed overhead accounting, in an attempt to | |
998 | * reduce the impact of fixed-size skb shells and the | |
999 | * driver's needed headroom on system memory. This is | |
1000 | * especially the case when the egress traffic is | |
1001 | * composed of small datagrams. | |
1002 | * Unfortunately, QMan's OAL value is capped to an | |
1003 | * insufficient value, but even that is better than | |
1004 | * no overhead accounting at all. | |
1005 | */ | |
7d6f8dc0 | 1006 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); |
9ad1a374 MB |
1007 | qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); |
1008 | qm_fqd_set_oal(&initfq.fqd, | |
1009 | min(sizeof(struct sk_buff) + | |
1010 | priv->tx_headroom, | |
1011 | (size_t)FSL_QMAN_MAX_OAL)); | |
1012 | } | |
1013 | ||
1014 | if (td_enable) { | |
7d6f8dc0 | 1015 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH); |
9ad1a374 | 1016 | qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); |
7d6f8dc0 | 1017 | initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE); |
9ad1a374 MB |
1018 | } |
1019 | ||
1020 | if (dpaa_fq->fq_type == FQ_TYPE_TX) { | |
1021 | queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base); | |
1022 | if (queue_id >= 0) | |
1023 | confq = priv->conf_fqs[queue_id]; | |
1024 | if (confq) { | |
7d6f8dc0 CM |
1025 | initfq.we_mask |= |
1026 | cpu_to_be16(QM_INITFQ_WE_CONTEXTA); | |
9ad1a374 MB |
1027 | /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) |
1028 | * A2V=1 (contextA A2 field is valid) | |
1029 | * A0V=1 (contextA A0 field is valid) | |
1030 | * B0V=1 (contextB field is valid) | |
1031 | * ContextA A2: EBD=1 (deallocate buffers inside FMan) | |
1032 | * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) | |
1033 | */ | |
7d6f8dc0 CM |
1034 | qm_fqd_context_a_set64(&initfq.fqd, |
1035 | 0x1e00000080000000ULL); | |
9ad1a374 MB |
1036 | } |
1037 | } | |
1038 | ||
1039 | /* Put all the ingress queues in our "ingress CGR". */ | |
1040 | if (priv->use_ingress_cgr && | |
1041 | (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || | |
1042 | dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) { | |
7d6f8dc0 CM |
1043 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); |
1044 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); | |
9ad1a374 MB |
1045 | initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; |
1046 | /* Set a fixed overhead accounting, just like for the | |
1047 | * egress CGR. | |
1048 | */ | |
7d6f8dc0 | 1049 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); |
9ad1a374 MB |
1050 | qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); |
1051 | qm_fqd_set_oal(&initfq.fqd, | |
1052 | min(sizeof(struct sk_buff) + | |
1053 | priv->tx_headroom, | |
1054 | (size_t)FSL_QMAN_MAX_OAL)); | |
1055 | } | |
1056 | ||
1057 | /* Initialization common to all ingress queues */ | |
1058 | if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { | |
7d6f8dc0 | 1059 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA); |
7fe1e290 MB |
1060 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE | |
1061 | QM_FQCTRL_CTXASTASHING); | |
9ad1a374 MB |
1062 | initfq.fqd.context_a.stashing.exclusive = |
1063 | QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | | |
1064 | QM_STASHING_EXCL_ANNOTATION; | |
1065 | qm_fqd_set_stashing(&initfq.fqd, 1, 2, | |
1066 | DIV_ROUND_UP(sizeof(struct qman_fq), | |
1067 | 64)); | |
1068 | } | |
1069 | ||
1070 | err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); | |
1071 | if (err < 0) { | |
1072 | dev_err(dev, "qman_init_fq(%u) = %d\n", | |
1073 | qman_fq_fqid(fq), err); | |
1074 | qman_destroy_fq(fq); | |
1075 | return err; | |
1076 | } | |
1077 | } | |
1078 | ||
1079 | dpaa_fq->fqid = qman_fq_fqid(fq); | |
1080 | ||
1081 | return 0; | |
1082 | } | |
1083 | ||
1084 | static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq) | |
1085 | { | |
1086 | const struct dpaa_priv *priv; | |
1087 | struct dpaa_fq *dpaa_fq; | |
1088 | int err, error; | |
1089 | ||
1090 | err = 0; | |
1091 | ||
1092 | dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); | |
1093 | priv = netdev_priv(dpaa_fq->net_dev); | |
1094 | ||
1095 | if (dpaa_fq->init) { | |
1096 | err = qman_retire_fq(fq, NULL); | |
1097 | if (err < 0 && netif_msg_drv(priv)) | |
1098 | dev_err(dev, "qman_retire_fq(%u) = %d\n", | |
1099 | qman_fq_fqid(fq), err); | |
1100 | ||
1101 | error = qman_oos_fq(fq); | |
1102 | if (error < 0 && netif_msg_drv(priv)) { | |
1103 | dev_err(dev, "qman_oos_fq(%u) = %d\n", | |
1104 | qman_fq_fqid(fq), error); | |
1105 | if (err >= 0) | |
1106 | err = error; | |
1107 | } | |
1108 | } | |
1109 | ||
1110 | qman_destroy_fq(fq); | |
1111 | list_del(&dpaa_fq->list); | |
1112 | ||
1113 | return err; | |
1114 | } | |
1115 | ||
1116 | static int dpaa_fq_free(struct device *dev, struct list_head *list) | |
1117 | { | |
1118 | struct dpaa_fq *dpaa_fq, *tmp; | |
1119 | int err, error; | |
1120 | ||
1121 | err = 0; | |
1122 | list_for_each_entry_safe(dpaa_fq, tmp, list, list) { | |
1123 | error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq); | |
1124 | if (error < 0 && err >= 0) | |
1125 | err = error; | |
1126 | } | |
1127 | ||
1128 | return err; | |
1129 | } | |
1130 | ||
7f8a6a1b MB |
1131 | static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, |
1132 | struct dpaa_fq *defq, | |
1133 | struct dpaa_buffer_layout *buf_layout) | |
9ad1a374 MB |
1134 | { |
1135 | struct fman_buffer_prefix_content buf_prefix_content; | |
1136 | struct fman_port_params params; | |
1137 | int err; | |
1138 | ||
1139 | memset(¶ms, 0, sizeof(params)); | |
1140 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); | |
1141 | ||
1142 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; | |
1143 | buf_prefix_content.pass_prs_result = true; | |
1144 | buf_prefix_content.pass_hash_result = true; | |
1145 | buf_prefix_content.pass_time_stamp = false; | |
1146 | buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; | |
1147 | ||
1148 | params.specific_params.non_rx_params.err_fqid = errq->fqid; | |
1149 | params.specific_params.non_rx_params.dflt_fqid = defq->fqid; | |
1150 | ||
1151 | err = fman_port_config(port, ¶ms); | |
7f8a6a1b | 1152 | if (err) { |
9ad1a374 | 1153 | pr_err("%s: fman_port_config failed\n", __func__); |
7f8a6a1b MB |
1154 | return err; |
1155 | } | |
9ad1a374 MB |
1156 | |
1157 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); | |
7f8a6a1b | 1158 | if (err) { |
9ad1a374 MB |
1159 | pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", |
1160 | __func__); | |
7f8a6a1b MB |
1161 | return err; |
1162 | } | |
9ad1a374 MB |
1163 | |
1164 | err = fman_port_init(port); | |
1165 | if (err) | |
1166 | pr_err("%s: fm_port_init failed\n", __func__); | |
7f8a6a1b MB |
1167 | |
1168 | return err; | |
9ad1a374 MB |
1169 | } |
1170 | ||
7f8a6a1b MB |
1171 | static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, |
1172 | size_t count, struct dpaa_fq *errq, | |
1173 | struct dpaa_fq *defq, | |
1174 | struct dpaa_buffer_layout *buf_layout) | |
9ad1a374 MB |
1175 | { |
1176 | struct fman_buffer_prefix_content buf_prefix_content; | |
1177 | struct fman_port_rx_params *rx_p; | |
1178 | struct fman_port_params params; | |
1179 | int i, err; | |
1180 | ||
1181 | memset(¶ms, 0, sizeof(params)); | |
1182 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); | |
1183 | ||
1184 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; | |
1185 | buf_prefix_content.pass_prs_result = true; | |
1186 | buf_prefix_content.pass_hash_result = true; | |
1187 | buf_prefix_content.pass_time_stamp = false; | |
1188 | buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; | |
1189 | ||
1190 | rx_p = ¶ms.specific_params.rx_params; | |
1191 | rx_p->err_fqid = errq->fqid; | |
1192 | rx_p->dflt_fqid = defq->fqid; | |
1193 | ||
1194 | count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); | |
1195 | rx_p->ext_buf_pools.num_of_pools_used = (u8)count; | |
1196 | for (i = 0; i < count; i++) { | |
1197 | rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid; | |
1198 | rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size; | |
1199 | } | |
1200 | ||
1201 | err = fman_port_config(port, ¶ms); | |
7f8a6a1b | 1202 | if (err) { |
9ad1a374 | 1203 | pr_err("%s: fman_port_config failed\n", __func__); |
7f8a6a1b MB |
1204 | return err; |
1205 | } | |
9ad1a374 MB |
1206 | |
1207 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); | |
7f8a6a1b | 1208 | if (err) { |
9ad1a374 MB |
1209 | pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", |
1210 | __func__); | |
7f8a6a1b MB |
1211 | return err; |
1212 | } | |
9ad1a374 MB |
1213 | |
1214 | err = fman_port_init(port); | |
1215 | if (err) | |
1216 | pr_err("%s: fm_port_init failed\n", __func__); | |
7f8a6a1b MB |
1217 | |
1218 | return err; | |
9ad1a374 MB |
1219 | } |
1220 | ||
7f8a6a1b MB |
1221 | static int dpaa_eth_init_ports(struct mac_device *mac_dev, |
1222 | struct dpaa_bp **bps, size_t count, | |
1223 | struct fm_port_fqs *port_fqs, | |
1224 | struct dpaa_buffer_layout *buf_layout, | |
1225 | struct device *dev) | |
9ad1a374 MB |
1226 | { |
1227 | struct fman_port *rxport = mac_dev->port[RX]; | |
1228 | struct fman_port *txport = mac_dev->port[TX]; | |
7f8a6a1b | 1229 | int err; |
9ad1a374 | 1230 | |
7f8a6a1b MB |
1231 | err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, |
1232 | port_fqs->tx_defq, &buf_layout[TX]); | |
1233 | if (err) | |
1234 | return err; | |
1235 | ||
1236 | err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, | |
1237 | port_fqs->rx_defq, &buf_layout[RX]); | |
1238 | ||
1239 | return err; | |
9ad1a374 MB |
1240 | } |
1241 | ||
1242 | static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, | |
1243 | struct bm_buffer *bmb, int cnt) | |
1244 | { | |
1245 | int err; | |
1246 | ||
1247 | err = bman_release(dpaa_bp->pool, bmb, cnt); | |
1248 | /* Should never occur, address anyway to avoid leaking the buffers */ | |
1249 | if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb) | |
1250 | while (cnt-- > 0) | |
1251 | dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); | |
1252 | ||
1253 | return cnt; | |
1254 | } | |
1255 | ||
1256 | static void dpaa_release_sgt_members(struct qm_sg_entry *sgt) | |
1257 | { | |
1258 | struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX]; | |
1259 | struct dpaa_bp *dpaa_bp; | |
1260 | int i = 0, j; | |
1261 | ||
1262 | memset(bmb, 0, sizeof(bmb)); | |
1263 | ||
1264 | do { | |
1265 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | |
1266 | if (!dpaa_bp) | |
1267 | return; | |
1268 | ||
1269 | j = 0; | |
1270 | do { | |
1271 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | |
1272 | ||
1273 | bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i])); | |
1274 | ||
1275 | j++; i++; | |
1276 | } while (j < ARRAY_SIZE(bmb) && | |
1277 | !qm_sg_entry_is_final(&sgt[i - 1]) && | |
1278 | sgt[i - 1].bpid == sgt[i].bpid); | |
1279 | ||
1280 | dpaa_bman_release(dpaa_bp, bmb, j); | |
1281 | } while (!qm_sg_entry_is_final(&sgt[i - 1])); | |
1282 | } | |
1283 | ||
1284 | static void dpaa_fd_release(const struct net_device *net_dev, | |
1285 | const struct qm_fd *fd) | |
1286 | { | |
1287 | struct qm_sg_entry *sgt; | |
1288 | struct dpaa_bp *dpaa_bp; | |
1289 | struct bm_buffer bmb; | |
1290 | dma_addr_t addr; | |
1291 | void *vaddr; | |
1292 | ||
1293 | bmb.data = 0; | |
1294 | bm_buffer_set64(&bmb, qm_fd_addr(fd)); | |
1295 | ||
1296 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | |
1297 | if (!dpaa_bp) | |
1298 | return; | |
1299 | ||
1300 | if (qm_fd_get_format(fd) == qm_fd_sg) { | |
1301 | vaddr = phys_to_virt(qm_fd_addr(fd)); | |
1302 | sgt = vaddr + qm_fd_get_offset(fd); | |
1303 | ||
1304 | dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size, | |
1305 | DMA_FROM_DEVICE); | |
1306 | ||
1307 | dpaa_release_sgt_members(sgt); | |
1308 | ||
1309 | addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size, | |
1310 | DMA_FROM_DEVICE); | |
1311 | if (dma_mapping_error(dpaa_bp->dev, addr)) { | |
1312 | dev_err(dpaa_bp->dev, "DMA mapping failed"); | |
1313 | return; | |
1314 | } | |
1315 | bm_buffer_set64(&bmb, addr); | |
1316 | } | |
1317 | ||
1318 | dpaa_bman_release(dpaa_bp, &bmb, 1); | |
1319 | } | |
1320 | ||
b0ce0d02 MB |
1321 | static void count_ern(struct dpaa_percpu_priv *percpu_priv, |
1322 | const union qm_mr_entry *msg) | |
1323 | { | |
1324 | switch (msg->ern.rc & QM_MR_RC_MASK) { | |
1325 | case QM_MR_RC_CGR_TAILDROP: | |
1326 | percpu_priv->ern_cnt.cg_tdrop++; | |
1327 | break; | |
1328 | case QM_MR_RC_WRED: | |
1329 | percpu_priv->ern_cnt.wred++; | |
1330 | break; | |
1331 | case QM_MR_RC_ERROR: | |
1332 | percpu_priv->ern_cnt.err_cond++; | |
1333 | break; | |
1334 | case QM_MR_RC_ORPWINDOW_EARLY: | |
1335 | percpu_priv->ern_cnt.early_window++; | |
1336 | break; | |
1337 | case QM_MR_RC_ORPWINDOW_LATE: | |
1338 | percpu_priv->ern_cnt.late_window++; | |
1339 | break; | |
1340 | case QM_MR_RC_FQ_TAILDROP: | |
1341 | percpu_priv->ern_cnt.fq_tdrop++; | |
1342 | break; | |
1343 | case QM_MR_RC_ORPWINDOW_RETIRED: | |
1344 | percpu_priv->ern_cnt.fq_retired++; | |
1345 | break; | |
1346 | case QM_MR_RC_ORP_ZERO: | |
1347 | percpu_priv->ern_cnt.orp_zero++; | |
1348 | break; | |
1349 | } | |
1350 | } | |
1351 | ||
9ad1a374 MB |
1352 | /* Turn on HW checksum computation for this outgoing frame. |
1353 | * If the current protocol is not something we support in this regard | |
1354 | * (or if the stack has already computed the SW checksum), we do nothing. | |
1355 | * | |
1356 | * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value | |
1357 | * otherwise. | |
1358 | * | |
1359 | * Note that this function may modify the fd->cmd field and the skb data buffer | |
1360 | * (the Parse Results area). | |
1361 | */ | |
1362 | static int dpaa_enable_tx_csum(struct dpaa_priv *priv, | |
1363 | struct sk_buff *skb, | |
1364 | struct qm_fd *fd, | |
1365 | char *parse_results) | |
1366 | { | |
1367 | struct fman_prs_result *parse_result; | |
1368 | u16 ethertype = ntohs(skb->protocol); | |
1369 | struct ipv6hdr *ipv6h = NULL; | |
1370 | struct iphdr *iph; | |
1371 | int retval = 0; | |
1372 | u8 l4_proto; | |
1373 | ||
1374 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
1375 | return 0; | |
1376 | ||
1377 | /* Note: L3 csum seems to be already computed in sw, but we can't choose | |
1378 | * L4 alone from the FM configuration anyway. | |
1379 | */ | |
1380 | ||
1381 | /* Fill in some fields of the Parse Results array, so the FMan | |
1382 | * can find them as if they came from the FMan Parser. | |
1383 | */ | |
1384 | parse_result = (struct fman_prs_result *)parse_results; | |
1385 | ||
1386 | /* If we're dealing with VLAN, get the real Ethernet type */ | |
1387 | if (ethertype == ETH_P_8021Q) { | |
1388 | /* We can't always assume the MAC header is set correctly | |
1389 | * by the stack, so reset to beginning of skb->data | |
1390 | */ | |
1391 | skb_reset_mac_header(skb); | |
1392 | ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); | |
1393 | } | |
1394 | ||
1395 | /* Fill in the relevant L3 parse result fields | |
1396 | * and read the L4 protocol type | |
1397 | */ | |
1398 | switch (ethertype) { | |
1399 | case ETH_P_IP: | |
1400 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); | |
1401 | iph = ip_hdr(skb); | |
1402 | WARN_ON(!iph); | |
1403 | l4_proto = iph->protocol; | |
1404 | break; | |
1405 | case ETH_P_IPV6: | |
1406 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); | |
1407 | ipv6h = ipv6_hdr(skb); | |
1408 | WARN_ON(!ipv6h); | |
1409 | l4_proto = ipv6h->nexthdr; | |
1410 | break; | |
1411 | default: | |
1412 | /* We shouldn't even be here */ | |
1413 | if (net_ratelimit()) | |
1414 | netif_alert(priv, tx_err, priv->net_dev, | |
1415 | "Can't compute HW csum for L3 proto 0x%x\n", | |
1416 | ntohs(skb->protocol)); | |
1417 | retval = -EIO; | |
1418 | goto return_error; | |
1419 | } | |
1420 | ||
1421 | /* Fill in the relevant L4 parse result fields */ | |
1422 | switch (l4_proto) { | |
1423 | case IPPROTO_UDP: | |
1424 | parse_result->l4r = FM_L4_PARSE_RESULT_UDP; | |
1425 | break; | |
1426 | case IPPROTO_TCP: | |
1427 | parse_result->l4r = FM_L4_PARSE_RESULT_TCP; | |
1428 | break; | |
1429 | default: | |
1430 | if (net_ratelimit()) | |
1431 | netif_alert(priv, tx_err, priv->net_dev, | |
1432 | "Can't compute HW csum for L4 proto 0x%x\n", | |
1433 | l4_proto); | |
1434 | retval = -EIO; | |
1435 | goto return_error; | |
1436 | } | |
1437 | ||
1438 | /* At index 0 is IPOffset_1 as defined in the Parse Results */ | |
1439 | parse_result->ip_off[0] = (u8)skb_network_offset(skb); | |
1440 | parse_result->l4_off = (u8)skb_transport_offset(skb); | |
1441 | ||
1442 | /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ | |
7d6f8dc0 | 1443 | fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC); |
9ad1a374 MB |
1444 | |
1445 | /* On P1023 and similar platforms fd->cmd interpretation could | |
1446 | * be disabled by setting CONTEXT_A bit ICMD; currently this bit | |
1447 | * is not set so we do not need to check; in the future, if/when | |
1448 | * using context_a we need to check this bit | |
1449 | */ | |
1450 | ||
1451 | return_error: | |
1452 | return retval; | |
1453 | } | |
1454 | ||
1455 | static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) | |
1456 | { | |
1457 | struct device *dev = dpaa_bp->dev; | |
1458 | struct bm_buffer bmb[8]; | |
1459 | dma_addr_t addr; | |
1460 | void *new_buf; | |
1461 | u8 i; | |
1462 | ||
1463 | for (i = 0; i < 8; i++) { | |
1464 | new_buf = netdev_alloc_frag(dpaa_bp->raw_size); | |
1465 | if (unlikely(!new_buf)) { | |
1466 | dev_err(dev, "netdev_alloc_frag() failed, size %zu\n", | |
1467 | dpaa_bp->raw_size); | |
1468 | goto release_previous_buffs; | |
1469 | } | |
1470 | new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES); | |
1471 | ||
1472 | addr = dma_map_single(dev, new_buf, | |
1473 | dpaa_bp->size, DMA_FROM_DEVICE); | |
1474 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1475 | dev_err(dpaa_bp->dev, "DMA map failed"); | |
1476 | goto release_previous_buffs; | |
1477 | } | |
1478 | ||
1479 | bmb[i].data = 0; | |
1480 | bm_buffer_set64(&bmb[i], addr); | |
1481 | } | |
1482 | ||
1483 | release_bufs: | |
1484 | return dpaa_bman_release(dpaa_bp, bmb, i); | |
1485 | ||
1486 | release_previous_buffs: | |
1487 | WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n"); | |
1488 | ||
1489 | bm_buffer_set64(&bmb[i], 0); | |
1490 | /* Avoid releasing a completely null buffer; bman_release() requires | |
1491 | * at least one buffer. | |
1492 | */ | |
1493 | if (likely(i)) | |
1494 | goto release_bufs; | |
1495 | ||
1496 | return 0; | |
1497 | } | |
1498 | ||
1499 | static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp) | |
1500 | { | |
1501 | int i; | |
1502 | ||
1503 | /* Give each CPU an allotment of "config_count" buffers */ | |
1504 | for_each_possible_cpu(i) { | |
1505 | int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i); | |
1506 | int j; | |
1507 | ||
1508 | /* Although we access another CPU's counters here | |
1509 | * we do it at boot time so it is safe | |
1510 | */ | |
1511 | for (j = 0; j < dpaa_bp->config_count; j += 8) | |
1512 | *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp); | |
1513 | } | |
1514 | return 0; | |
1515 | } | |
1516 | ||
1517 | /* Add buffers/(pages) for Rx processing whenever bpool count falls below | |
1518 | * REFILL_THRESHOLD. | |
1519 | */ | |
1520 | static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr) | |
1521 | { | |
1522 | int count = *countptr; | |
1523 | int new_bufs; | |
1524 | ||
1525 | if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) { | |
1526 | do { | |
1527 | new_bufs = dpaa_bp_add_8_bufs(dpaa_bp); | |
1528 | if (unlikely(!new_bufs)) { | |
1529 | /* Avoid looping forever if we've temporarily | |
1530 | * run out of memory. We'll try again at the | |
1531 | * next NAPI cycle. | |
1532 | */ | |
1533 | break; | |
1534 | } | |
1535 | count += new_bufs; | |
1536 | } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT); | |
1537 | ||
1538 | *countptr = count; | |
1539 | if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT)) | |
1540 | return -ENOMEM; | |
1541 | } | |
1542 | ||
1543 | return 0; | |
1544 | } | |
1545 | ||
1546 | static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) | |
1547 | { | |
1548 | struct dpaa_bp *dpaa_bp; | |
1549 | int *countptr; | |
1550 | int res, i; | |
1551 | ||
1552 | for (i = 0; i < DPAA_BPS_NUM; i++) { | |
1553 | dpaa_bp = priv->dpaa_bps[i]; | |
1554 | if (!dpaa_bp) | |
1555 | return -EINVAL; | |
1556 | countptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1557 | res = dpaa_eth_refill_bpool(dpaa_bp, countptr); | |
1558 | if (res) | |
1559 | return res; | |
1560 | } | |
1561 | return 0; | |
1562 | } | |
1563 | ||
1564 | /* Cleanup function for outgoing frame descriptors that were built on Tx path, | |
1565 | * either contiguous frames or scatter/gather ones. | |
1566 | * Skb freeing is not handled here. | |
1567 | * | |
1568 | * This function may be called on error paths in the Tx function, so guard | |
1569 | * against cases when not all fd relevant fields were filled in. | |
1570 | * | |
1571 | * Return the skb backpointer, since for S/G frames the buffer containing it | |
1572 | * gets freed here. | |
1573 | */ | |
1574 | static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, | |
1575 | const struct qm_fd *fd) | |
1576 | { | |
1577 | const enum dma_data_direction dma_dir = DMA_TO_DEVICE; | |
1578 | struct device *dev = priv->net_dev->dev.parent; | |
1579 | dma_addr_t addr = qm_fd_addr(fd); | |
1580 | const struct qm_sg_entry *sgt; | |
1581 | struct sk_buff **skbh, *skb; | |
1582 | int nr_frags, i; | |
1583 | ||
1584 | skbh = (struct sk_buff **)phys_to_virt(addr); | |
1585 | skb = *skbh; | |
1586 | ||
1587 | if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { | |
1588 | nr_frags = skb_shinfo(skb)->nr_frags; | |
1589 | dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + | |
1590 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | |
1591 | dma_dir); | |
1592 | ||
1593 | /* The sgt buffer has been allocated with netdev_alloc_frag(), | |
1594 | * it's from lowmem. | |
1595 | */ | |
1596 | sgt = phys_to_virt(addr + qm_fd_get_offset(fd)); | |
1597 | ||
1598 | /* sgt[0] is from lowmem, was dma_map_single()-ed */ | |
1599 | dma_unmap_single(dev, qm_sg_addr(&sgt[0]), | |
1600 | qm_sg_entry_get_len(&sgt[0]), dma_dir); | |
1601 | ||
1602 | /* remaining pages were mapped with skb_frag_dma_map() */ | |
1603 | for (i = 1; i < nr_frags; i++) { | |
1604 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | |
1605 | ||
1606 | dma_unmap_page(dev, qm_sg_addr(&sgt[i]), | |
1607 | qm_sg_entry_get_len(&sgt[i]), dma_dir); | |
1608 | } | |
1609 | ||
1610 | /* Free the page frag that we allocated on Tx */ | |
1611 | skb_free_frag(phys_to_virt(addr)); | |
1612 | } else { | |
1613 | dma_unmap_single(dev, addr, | |
1614 | skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); | |
1615 | } | |
1616 | ||
1617 | return skb; | |
1618 | } | |
1619 | ||
5accb282 MB |
1620 | static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd) |
1621 | { | |
1622 | /* The parser has run and performed L4 checksum validation. | |
1623 | * We know there were no parser errors (and implicitly no | |
1624 | * L4 csum error), otherwise we wouldn't be here. | |
1625 | */ | |
1626 | if ((priv->net_dev->features & NETIF_F_RXCSUM) && | |
1627 | (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV)) | |
1628 | return CHECKSUM_UNNECESSARY; | |
1629 | ||
1630 | /* We're here because either the parser didn't run or the L4 checksum | |
1631 | * was not verified. This may include the case of a UDP frame with | |
1632 | * checksum zero or an L4 proto other than TCP/UDP | |
1633 | */ | |
1634 | return CHECKSUM_NONE; | |
1635 | } | |
1636 | ||
9ad1a374 MB |
1637 | /* Build a linear skb around the received buffer. |
1638 | * We are guaranteed there is enough room at the end of the data buffer to | |
1639 | * accommodate the shared info area of the skb. | |
1640 | */ | |
1641 | static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, | |
1642 | const struct qm_fd *fd) | |
1643 | { | |
1644 | ssize_t fd_off = qm_fd_get_offset(fd); | |
1645 | dma_addr_t addr = qm_fd_addr(fd); | |
1646 | struct dpaa_bp *dpaa_bp; | |
1647 | struct sk_buff *skb; | |
1648 | void *vaddr; | |
1649 | ||
1650 | vaddr = phys_to_virt(addr); | |
1651 | WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); | |
1652 | ||
1653 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | |
1654 | if (!dpaa_bp) | |
1655 | goto free_buffer; | |
1656 | ||
1657 | skb = build_skb(vaddr, dpaa_bp->size + | |
1658 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); | |
1659 | if (unlikely(!skb)) { | |
1660 | WARN_ONCE(1, "Build skb failure on Rx\n"); | |
1661 | goto free_buffer; | |
1662 | } | |
1663 | WARN_ON(fd_off != priv->rx_headroom); | |
1664 | skb_reserve(skb, fd_off); | |
1665 | skb_put(skb, qm_fd_get_length(fd)); | |
1666 | ||
5accb282 | 1667 | skb->ip_summed = rx_csum_offload(priv, fd); |
9ad1a374 MB |
1668 | |
1669 | return skb; | |
1670 | ||
1671 | free_buffer: | |
1672 | skb_free_frag(vaddr); | |
1673 | return NULL; | |
1674 | } | |
1675 | ||
1676 | /* Build an skb with the data of the first S/G entry in the linear portion and | |
1677 | * the rest of the frame as skb fragments. | |
1678 | * | |
1679 | * The page fragment holding the S/G Table is recycled here. | |
1680 | */ | |
1681 | static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, | |
1682 | const struct qm_fd *fd) | |
1683 | { | |
1684 | ssize_t fd_off = qm_fd_get_offset(fd); | |
1685 | dma_addr_t addr = qm_fd_addr(fd); | |
1686 | const struct qm_sg_entry *sgt; | |
1687 | struct page *page, *head_page; | |
1688 | struct dpaa_bp *dpaa_bp; | |
1689 | void *vaddr, *sg_vaddr; | |
1690 | int frag_off, frag_len; | |
1691 | struct sk_buff *skb; | |
1692 | dma_addr_t sg_addr; | |
1693 | int page_offset; | |
1694 | unsigned int sz; | |
1695 | int *count_ptr; | |
1696 | int i; | |
1697 | ||
1698 | vaddr = phys_to_virt(addr); | |
1699 | WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); | |
1700 | ||
1701 | /* Iterate through the SGT entries and add data buffers to the skb */ | |
1702 | sgt = vaddr + fd_off; | |
1703 | for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) { | |
1704 | /* Extension bit is not supported */ | |
1705 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | |
1706 | ||
1707 | sg_addr = qm_sg_addr(&sgt[i]); | |
1708 | sg_vaddr = phys_to_virt(sg_addr); | |
1709 | WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, | |
1710 | SMP_CACHE_BYTES)); | |
1711 | ||
1712 | /* We may use multiple Rx pools */ | |
1713 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | |
1714 | if (!dpaa_bp) | |
1715 | goto free_buffers; | |
1716 | ||
1717 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1718 | dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size, | |
1719 | DMA_FROM_DEVICE); | |
1720 | if (i == 0) { | |
1721 | sz = dpaa_bp->size + | |
1722 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
1723 | skb = build_skb(sg_vaddr, sz); | |
1724 | if (WARN_ON(unlikely(!skb))) | |
1725 | goto free_buffers; | |
1726 | ||
5accb282 | 1727 | skb->ip_summed = rx_csum_offload(priv, fd); |
9ad1a374 MB |
1728 | |
1729 | /* Make sure forwarded skbs will have enough space | |
1730 | * on Tx, if extra headers are added. | |
1731 | */ | |
1732 | WARN_ON(fd_off != priv->rx_headroom); | |
1733 | skb_reserve(skb, fd_off); | |
1734 | skb_put(skb, qm_sg_entry_get_len(&sgt[i])); | |
1735 | } else { | |
1736 | /* Not the first S/G entry; all data from buffer will | |
1737 | * be added in an skb fragment; fragment index is offset | |
1738 | * by one since first S/G entry was incorporated in the | |
1739 | * linear part of the skb. | |
1740 | * | |
1741 | * Caution: 'page' may be a tail page. | |
1742 | */ | |
1743 | page = virt_to_page(sg_vaddr); | |
1744 | head_page = virt_to_head_page(sg_vaddr); | |
1745 | ||
1746 | /* Compute offset in (possibly tail) page */ | |
1747 | page_offset = ((unsigned long)sg_vaddr & | |
1748 | (PAGE_SIZE - 1)) + | |
1749 | (page_address(page) - page_address(head_page)); | |
1750 | /* page_offset only refers to the beginning of sgt[i]; | |
1751 | * but the buffer itself may have an internal offset. | |
1752 | */ | |
1753 | frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset; | |
1754 | frag_len = qm_sg_entry_get_len(&sgt[i]); | |
1755 | /* skb_add_rx_frag() does no checking on the page; if | |
1756 | * we pass it a tail page, we'll end up with | |
1757 | * bad page accounting and eventually with segafults. | |
1758 | */ | |
1759 | skb_add_rx_frag(skb, i - 1, head_page, frag_off, | |
1760 | frag_len, dpaa_bp->size); | |
1761 | } | |
1762 | /* Update the pool count for the current {cpu x bpool} */ | |
1763 | (*count_ptr)--; | |
1764 | ||
1765 | if (qm_sg_entry_is_final(&sgt[i])) | |
1766 | break; | |
1767 | } | |
1768 | WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); | |
1769 | ||
1770 | /* free the SG table buffer */ | |
1771 | skb_free_frag(vaddr); | |
1772 | ||
1773 | return skb; | |
1774 | ||
1775 | free_buffers: | |
1776 | /* compensate sw bpool counter changes */ | |
785f3577 | 1777 | for (i--; i >= 0; i--) { |
9ad1a374 MB |
1778 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); |
1779 | if (dpaa_bp) { | |
1780 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1781 | (*count_ptr)++; | |
1782 | } | |
1783 | } | |
1784 | /* free all the SG entries */ | |
1785 | for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { | |
1786 | sg_addr = qm_sg_addr(&sgt[i]); | |
1787 | sg_vaddr = phys_to_virt(sg_addr); | |
1788 | skb_free_frag(sg_vaddr); | |
1789 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | |
1790 | if (dpaa_bp) { | |
1791 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1792 | (*count_ptr)--; | |
1793 | } | |
1794 | ||
1795 | if (qm_sg_entry_is_final(&sgt[i])) | |
1796 | break; | |
1797 | } | |
1798 | /* free the SGT fragment */ | |
1799 | skb_free_frag(vaddr); | |
1800 | ||
1801 | return NULL; | |
1802 | } | |
1803 | ||
1804 | static int skb_to_contig_fd(struct dpaa_priv *priv, | |
1805 | struct sk_buff *skb, struct qm_fd *fd, | |
1806 | int *offset) | |
1807 | { | |
1808 | struct net_device *net_dev = priv->net_dev; | |
1809 | struct device *dev = net_dev->dev.parent; | |
1810 | enum dma_data_direction dma_dir; | |
1811 | unsigned char *buffer_start; | |
1812 | struct sk_buff **skbh; | |
1813 | dma_addr_t addr; | |
1814 | int err; | |
1815 | ||
1816 | /* We are guaranteed to have at least tx_headroom bytes | |
1817 | * available, so just use that for offset. | |
1818 | */ | |
1819 | fd->bpid = FSL_DPAA_BPID_INV; | |
1820 | buffer_start = skb->data - priv->tx_headroom; | |
1821 | dma_dir = DMA_TO_DEVICE; | |
1822 | ||
1823 | skbh = (struct sk_buff **)buffer_start; | |
1824 | *skbh = skb; | |
1825 | ||
1826 | /* Enable L3/L4 hardware checksum computation. | |
1827 | * | |
1828 | * We must do this before dma_map_single(DMA_TO_DEVICE), because we may | |
1829 | * need to write into the skb. | |
1830 | */ | |
1831 | err = dpaa_enable_tx_csum(priv, skb, fd, | |
1832 | ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE); | |
1833 | if (unlikely(err < 0)) { | |
1834 | if (net_ratelimit()) | |
1835 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", | |
1836 | err); | |
1837 | return err; | |
1838 | } | |
1839 | ||
1840 | /* Fill in the rest of the FD fields */ | |
1841 | qm_fd_set_contig(fd, priv->tx_headroom, skb->len); | |
7d6f8dc0 | 1842 | fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); |
9ad1a374 MB |
1843 | |
1844 | /* Map the entire buffer size that may be seen by FMan, but no more */ | |
1845 | addr = dma_map_single(dev, skbh, | |
1846 | skb_tail_pointer(skb) - buffer_start, dma_dir); | |
1847 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1848 | if (net_ratelimit()) | |
1849 | netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); | |
1850 | return -EINVAL; | |
1851 | } | |
1852 | qm_fd_addr_set64(fd, addr); | |
1853 | ||
1854 | return 0; | |
1855 | } | |
1856 | ||
1857 | static int skb_to_sg_fd(struct dpaa_priv *priv, | |
1858 | struct sk_buff *skb, struct qm_fd *fd) | |
1859 | { | |
1860 | const enum dma_data_direction dma_dir = DMA_TO_DEVICE; | |
1861 | const int nr_frags = skb_shinfo(skb)->nr_frags; | |
1862 | struct net_device *net_dev = priv->net_dev; | |
1863 | struct device *dev = net_dev->dev.parent; | |
1864 | struct qm_sg_entry *sgt; | |
1865 | struct sk_buff **skbh; | |
1866 | int i, j, err, sz; | |
1867 | void *buffer_start; | |
1868 | skb_frag_t *frag; | |
1869 | dma_addr_t addr; | |
1870 | size_t frag_len; | |
1871 | void *sgt_buf; | |
1872 | ||
1873 | /* get a page frag to store the SGTable */ | |
1874 | sz = SKB_DATA_ALIGN(priv->tx_headroom + | |
1875 | sizeof(struct qm_sg_entry) * (1 + nr_frags)); | |
1876 | sgt_buf = netdev_alloc_frag(sz); | |
1877 | if (unlikely(!sgt_buf)) { | |
1878 | netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", | |
1879 | sz); | |
1880 | return -ENOMEM; | |
1881 | } | |
1882 | ||
1883 | /* Enable L3/L4 hardware checksum computation. | |
1884 | * | |
1885 | * We must do this before dma_map_single(DMA_TO_DEVICE), because we may | |
1886 | * need to write into the skb. | |
1887 | */ | |
1888 | err = dpaa_enable_tx_csum(priv, skb, fd, | |
1889 | sgt_buf + DPAA_TX_PRIV_DATA_SIZE); | |
1890 | if (unlikely(err < 0)) { | |
1891 | if (net_ratelimit()) | |
1892 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", | |
1893 | err); | |
1894 | goto csum_failed; | |
1895 | } | |
1896 | ||
1897 | sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); | |
1898 | qm_sg_entry_set_len(&sgt[0], skb_headlen(skb)); | |
1899 | sgt[0].bpid = FSL_DPAA_BPID_INV; | |
1900 | sgt[0].offset = 0; | |
1901 | addr = dma_map_single(dev, skb->data, | |
1902 | skb_headlen(skb), dma_dir); | |
1903 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1904 | dev_err(dev, "DMA mapping failed"); | |
1905 | err = -EINVAL; | |
1906 | goto sg0_map_failed; | |
1907 | } | |
1908 | qm_sg_entry_set64(&sgt[0], addr); | |
1909 | ||
1910 | /* populate the rest of SGT entries */ | |
1911 | frag = &skb_shinfo(skb)->frags[0]; | |
1912 | frag_len = frag->size; | |
1913 | for (i = 1; i <= nr_frags; i++, frag++) { | |
1914 | WARN_ON(!skb_frag_page(frag)); | |
1915 | addr = skb_frag_dma_map(dev, frag, 0, | |
1916 | frag_len, dma_dir); | |
1917 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1918 | dev_err(dev, "DMA mapping failed"); | |
1919 | err = -EINVAL; | |
1920 | goto sg_map_failed; | |
1921 | } | |
1922 | ||
1923 | qm_sg_entry_set_len(&sgt[i], frag_len); | |
1924 | sgt[i].bpid = FSL_DPAA_BPID_INV; | |
1925 | sgt[i].offset = 0; | |
1926 | ||
1927 | /* keep the offset in the address */ | |
1928 | qm_sg_entry_set64(&sgt[i], addr); | |
1929 | frag_len = frag->size; | |
1930 | } | |
1931 | qm_sg_entry_set_f(&sgt[i - 1], frag_len); | |
1932 | ||
1933 | qm_fd_set_sg(fd, priv->tx_headroom, skb->len); | |
1934 | ||
1935 | /* DMA map the SGT page */ | |
1936 | buffer_start = (void *)sgt - priv->tx_headroom; | |
1937 | skbh = (struct sk_buff **)buffer_start; | |
1938 | *skbh = skb; | |
1939 | ||
1940 | addr = dma_map_single(dev, buffer_start, priv->tx_headroom + | |
1941 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | |
1942 | dma_dir); | |
1943 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1944 | dev_err(dev, "DMA mapping failed"); | |
1945 | err = -EINVAL; | |
1946 | goto sgt_map_failed; | |
1947 | } | |
1948 | ||
1949 | fd->bpid = FSL_DPAA_BPID_INV; | |
7d6f8dc0 | 1950 | fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); |
9ad1a374 MB |
1951 | qm_fd_addr_set64(fd, addr); |
1952 | ||
1953 | return 0; | |
1954 | ||
1955 | sgt_map_failed: | |
1956 | sg_map_failed: | |
1957 | for (j = 0; j < i; j++) | |
1958 | dma_unmap_page(dev, qm_sg_addr(&sgt[j]), | |
1959 | qm_sg_entry_get_len(&sgt[j]), dma_dir); | |
1960 | sg0_map_failed: | |
1961 | csum_failed: | |
1962 | skb_free_frag(sgt_buf); | |
1963 | ||
1964 | return err; | |
1965 | } | |
1966 | ||
1967 | static inline int dpaa_xmit(struct dpaa_priv *priv, | |
1968 | struct rtnl_link_stats64 *percpu_stats, | |
1969 | int queue, | |
1970 | struct qm_fd *fd) | |
1971 | { | |
1972 | struct qman_fq *egress_fq; | |
1973 | int err, i; | |
1974 | ||
1975 | egress_fq = priv->egress_fqs[queue]; | |
1976 | if (fd->bpid == FSL_DPAA_BPID_INV) | |
7d6f8dc0 | 1977 | fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue])); |
9ad1a374 | 1978 | |
eb11ddf3 MB |
1979 | /* Trace this Tx fd */ |
1980 | trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); | |
1981 | ||
9ad1a374 MB |
1982 | for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) { |
1983 | err = qman_enqueue(egress_fq, fd); | |
1984 | if (err != -EBUSY) | |
1985 | break; | |
1986 | } | |
1987 | ||
1988 | if (unlikely(err < 0)) { | |
1989 | percpu_stats->tx_errors++; | |
1990 | percpu_stats->tx_fifo_errors++; | |
1991 | return err; | |
1992 | } | |
1993 | ||
1994 | percpu_stats->tx_packets++; | |
1995 | percpu_stats->tx_bytes += qm_fd_get_length(fd); | |
1996 | ||
1997 | return 0; | |
1998 | } | |
1999 | ||
2000 | static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |
2001 | { | |
2002 | const int queue_mapping = skb_get_queue_mapping(skb); | |
2003 | bool nonlinear = skb_is_nonlinear(skb); | |
2004 | struct rtnl_link_stats64 *percpu_stats; | |
2005 | struct dpaa_percpu_priv *percpu_priv; | |
2006 | struct dpaa_priv *priv; | |
2007 | struct qm_fd fd; | |
2008 | int offset = 0; | |
2009 | int err = 0; | |
2010 | ||
2011 | priv = netdev_priv(net_dev); | |
2012 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
2013 | percpu_stats = &percpu_priv->stats; | |
2014 | ||
2015 | qm_fd_clear_fd(&fd); | |
2016 | ||
2017 | if (!nonlinear) { | |
2018 | /* We're going to store the skb backpointer at the beginning | |
2019 | * of the data buffer, so we need a privately owned skb | |
2020 | * | |
2021 | * We've made sure skb is not shared in dev->priv_flags, | |
2022 | * we need to verify the skb head is not cloned | |
2023 | */ | |
2024 | if (skb_cow_head(skb, priv->tx_headroom)) | |
2025 | goto enomem; | |
2026 | ||
2027 | WARN_ON(skb_is_nonlinear(skb)); | |
2028 | } | |
2029 | ||
2030 | /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES; | |
2031 | * make sure we don't feed FMan with more fragments than it supports. | |
2032 | */ | |
2033 | if (nonlinear && | |
2034 | likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) { | |
2035 | /* Just create a S/G fd based on the skb */ | |
2036 | err = skb_to_sg_fd(priv, skb, &fd); | |
b0ce0d02 | 2037 | percpu_priv->tx_frag_skbuffs++; |
9ad1a374 MB |
2038 | } else { |
2039 | /* If the egress skb contains more fragments than we support | |
2040 | * we have no choice but to linearize it ourselves. | |
2041 | */ | |
2042 | if (unlikely(nonlinear) && __skb_linearize(skb)) | |
2043 | goto enomem; | |
2044 | ||
2045 | /* Finally, create a contig FD from this skb */ | |
2046 | err = skb_to_contig_fd(priv, skb, &fd, &offset); | |
2047 | } | |
2048 | if (unlikely(err < 0)) | |
2049 | goto skb_to_fd_failed; | |
2050 | ||
2051 | if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) | |
2052 | return NETDEV_TX_OK; | |
2053 | ||
2054 | dpaa_cleanup_tx_fd(priv, &fd); | |
2055 | skb_to_fd_failed: | |
2056 | enomem: | |
2057 | percpu_stats->tx_errors++; | |
2058 | dev_kfree_skb(skb); | |
2059 | return NETDEV_TX_OK; | |
2060 | } | |
2061 | ||
2062 | static void dpaa_rx_error(struct net_device *net_dev, | |
2063 | const struct dpaa_priv *priv, | |
2064 | struct dpaa_percpu_priv *percpu_priv, | |
2065 | const struct qm_fd *fd, | |
2066 | u32 fqid) | |
2067 | { | |
2068 | if (net_ratelimit()) | |
2069 | netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", | |
7d6f8dc0 | 2070 | be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS); |
9ad1a374 MB |
2071 | |
2072 | percpu_priv->stats.rx_errors++; | |
2073 | ||
7d6f8dc0 | 2074 | if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA) |
b0ce0d02 | 2075 | percpu_priv->rx_errors.dme++; |
7d6f8dc0 | 2076 | if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL) |
b0ce0d02 | 2077 | percpu_priv->rx_errors.fpe++; |
7d6f8dc0 | 2078 | if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE) |
b0ce0d02 | 2079 | percpu_priv->rx_errors.fse++; |
7d6f8dc0 | 2080 | if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR) |
b0ce0d02 MB |
2081 | percpu_priv->rx_errors.phe++; |
2082 | ||
9ad1a374 MB |
2083 | dpaa_fd_release(net_dev, fd); |
2084 | } | |
2085 | ||
2086 | static void dpaa_tx_error(struct net_device *net_dev, | |
2087 | const struct dpaa_priv *priv, | |
2088 | struct dpaa_percpu_priv *percpu_priv, | |
2089 | const struct qm_fd *fd, | |
2090 | u32 fqid) | |
2091 | { | |
2092 | struct sk_buff *skb; | |
2093 | ||
2094 | if (net_ratelimit()) | |
2095 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | |
7d6f8dc0 | 2096 | be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS); |
9ad1a374 MB |
2097 | |
2098 | percpu_priv->stats.tx_errors++; | |
2099 | ||
2100 | skb = dpaa_cleanup_tx_fd(priv, fd); | |
2101 | dev_kfree_skb(skb); | |
2102 | } | |
2103 | ||
2104 | static int dpaa_eth_poll(struct napi_struct *napi, int budget) | |
2105 | { | |
2106 | struct dpaa_napi_portal *np = | |
2107 | container_of(napi, struct dpaa_napi_portal, napi); | |
2108 | ||
2109 | int cleaned = qman_p_poll_dqrr(np->p, budget); | |
2110 | ||
2111 | if (cleaned < budget) { | |
6ad20165 | 2112 | napi_complete_done(napi, cleaned); |
9ad1a374 MB |
2113 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); |
2114 | ||
2115 | } else if (np->down) { | |
2116 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); | |
2117 | } | |
2118 | ||
2119 | return cleaned; | |
2120 | } | |
2121 | ||
2122 | static void dpaa_tx_conf(struct net_device *net_dev, | |
2123 | const struct dpaa_priv *priv, | |
2124 | struct dpaa_percpu_priv *percpu_priv, | |
2125 | const struct qm_fd *fd, | |
2126 | u32 fqid) | |
2127 | { | |
2128 | struct sk_buff *skb; | |
2129 | ||
7d6f8dc0 | 2130 | if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) { |
9ad1a374 MB |
2131 | if (net_ratelimit()) |
2132 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | |
7d6f8dc0 CM |
2133 | be32_to_cpu(fd->status) & |
2134 | FM_FD_STAT_TX_ERRORS); | |
9ad1a374 MB |
2135 | |
2136 | percpu_priv->stats.tx_errors++; | |
2137 | } | |
2138 | ||
b0ce0d02 MB |
2139 | percpu_priv->tx_confirm++; |
2140 | ||
9ad1a374 MB |
2141 | skb = dpaa_cleanup_tx_fd(priv, fd); |
2142 | ||
2143 | consume_skb(skb); | |
2144 | } | |
2145 | ||
2146 | static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv, | |
2147 | struct qman_portal *portal) | |
2148 | { | |
2149 | if (unlikely(in_irq() || !in_serving_softirq())) { | |
2150 | /* Disable QMan IRQ and invoke NAPI */ | |
2151 | qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); | |
2152 | ||
2153 | percpu_priv->np.p = portal; | |
2154 | napi_schedule(&percpu_priv->np.napi); | |
b0ce0d02 | 2155 | percpu_priv->in_interrupt++; |
9ad1a374 MB |
2156 | return 1; |
2157 | } | |
2158 | return 0; | |
2159 | } | |
2160 | ||
2161 | static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, | |
2162 | struct qman_fq *fq, | |
2163 | const struct qm_dqrr_entry *dq) | |
2164 | { | |
2165 | struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); | |
2166 | struct dpaa_percpu_priv *percpu_priv; | |
2167 | struct net_device *net_dev; | |
2168 | struct dpaa_bp *dpaa_bp; | |
2169 | struct dpaa_priv *priv; | |
2170 | ||
2171 | net_dev = dpaa_fq->net_dev; | |
2172 | priv = netdev_priv(net_dev); | |
2173 | dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); | |
2174 | if (!dpaa_bp) | |
2175 | return qman_cb_dqrr_consume; | |
2176 | ||
2177 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
2178 | ||
2179 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | |
2180 | return qman_cb_dqrr_stop; | |
2181 | ||
2182 | if (dpaa_eth_refill_bpools(priv)) | |
2183 | /* Unable to refill the buffer pool due to insufficient | |
2184 | * system memory. Just release the frame back into the pool, | |
2185 | * otherwise we'll soon end up with an empty buffer pool. | |
2186 | */ | |
2187 | dpaa_fd_release(net_dev, &dq->fd); | |
2188 | else | |
2189 | dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | |
2190 | ||
2191 | return qman_cb_dqrr_consume; | |
2192 | } | |
2193 | ||
2194 | static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, | |
2195 | struct qman_fq *fq, | |
2196 | const struct qm_dqrr_entry *dq) | |
2197 | { | |
2198 | struct rtnl_link_stats64 *percpu_stats; | |
2199 | struct dpaa_percpu_priv *percpu_priv; | |
2200 | const struct qm_fd *fd = &dq->fd; | |
2201 | dma_addr_t addr = qm_fd_addr(fd); | |
2202 | enum qm_fd_format fd_format; | |
2203 | struct net_device *net_dev; | |
4529da5b | 2204 | u32 fd_status; |
9ad1a374 MB |
2205 | struct dpaa_bp *dpaa_bp; |
2206 | struct dpaa_priv *priv; | |
2207 | unsigned int skb_len; | |
2208 | struct sk_buff *skb; | |
2209 | int *count_ptr; | |
2210 | ||
7d6f8dc0 CM |
2211 | fd_status = be32_to_cpu(fd->status); |
2212 | fd_format = qm_fd_get_format(fd); | |
9ad1a374 MB |
2213 | net_dev = ((struct dpaa_fq *)fq)->net_dev; |
2214 | priv = netdev_priv(net_dev); | |
2215 | dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); | |
2216 | if (!dpaa_bp) | |
2217 | return qman_cb_dqrr_consume; | |
2218 | ||
eb11ddf3 MB |
2219 | /* Trace the Rx fd */ |
2220 | trace_dpaa_rx_fd(net_dev, fq, &dq->fd); | |
2221 | ||
9ad1a374 MB |
2222 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
2223 | percpu_stats = &percpu_priv->stats; | |
2224 | ||
2225 | if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) | |
2226 | return qman_cb_dqrr_stop; | |
2227 | ||
2228 | /* Make sure we didn't run out of buffers */ | |
2229 | if (unlikely(dpaa_eth_refill_bpools(priv))) { | |
2230 | /* Unable to refill the buffer pool due to insufficient | |
2231 | * system memory. Just release the frame back into the pool, | |
2232 | * otherwise we'll soon end up with an empty buffer pool. | |
2233 | */ | |
2234 | dpaa_fd_release(net_dev, &dq->fd); | |
2235 | return qman_cb_dqrr_consume; | |
2236 | } | |
2237 | ||
2238 | if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { | |
2239 | if (net_ratelimit()) | |
2240 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | |
2241 | fd_status & FM_FD_STAT_RX_ERRORS); | |
2242 | ||
2243 | percpu_stats->rx_errors++; | |
2244 | dpaa_fd_release(net_dev, fd); | |
2245 | return qman_cb_dqrr_consume; | |
2246 | } | |
2247 | ||
2248 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | |
2249 | if (!dpaa_bp) | |
2250 | return qman_cb_dqrr_consume; | |
2251 | ||
2252 | dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE); | |
2253 | ||
2254 | /* prefetch the first 64 bytes of the frame or the SGT start */ | |
2255 | prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd)); | |
2256 | ||
2257 | fd_format = qm_fd_get_format(fd); | |
2258 | /* The only FD types that we may receive are contig and S/G */ | |
2259 | WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); | |
2260 | ||
2261 | /* Account for either the contig buffer or the SGT buffer (depending on | |
2262 | * which case we were in) having been removed from the pool. | |
2263 | */ | |
2264 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
2265 | (*count_ptr)--; | |
2266 | ||
2267 | if (likely(fd_format == qm_fd_contig)) | |
2268 | skb = contig_fd_to_skb(priv, fd); | |
2269 | else | |
2270 | skb = sg_fd_to_skb(priv, fd); | |
2271 | if (!skb) | |
2272 | return qman_cb_dqrr_consume; | |
2273 | ||
2274 | skb->protocol = eth_type_trans(skb, net_dev); | |
2275 | ||
2276 | skb_len = skb->len; | |
2277 | ||
2278 | if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) | |
2279 | return qman_cb_dqrr_consume; | |
2280 | ||
2281 | percpu_stats->rx_packets++; | |
2282 | percpu_stats->rx_bytes += skb_len; | |
2283 | ||
2284 | return qman_cb_dqrr_consume; | |
2285 | } | |
2286 | ||
2287 | static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, | |
2288 | struct qman_fq *fq, | |
2289 | const struct qm_dqrr_entry *dq) | |
2290 | { | |
2291 | struct dpaa_percpu_priv *percpu_priv; | |
2292 | struct net_device *net_dev; | |
2293 | struct dpaa_priv *priv; | |
2294 | ||
2295 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | |
2296 | priv = netdev_priv(net_dev); | |
2297 | ||
2298 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
2299 | ||
2300 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | |
2301 | return qman_cb_dqrr_stop; | |
2302 | ||
2303 | dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | |
2304 | ||
2305 | return qman_cb_dqrr_consume; | |
2306 | } | |
2307 | ||
2308 | static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, | |
2309 | struct qman_fq *fq, | |
2310 | const struct qm_dqrr_entry *dq) | |
2311 | { | |
2312 | struct dpaa_percpu_priv *percpu_priv; | |
2313 | struct net_device *net_dev; | |
2314 | struct dpaa_priv *priv; | |
2315 | ||
2316 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | |
2317 | priv = netdev_priv(net_dev); | |
2318 | ||
eb11ddf3 MB |
2319 | /* Trace the fd */ |
2320 | trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); | |
2321 | ||
9ad1a374 MB |
2322 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
2323 | ||
2324 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | |
2325 | return qman_cb_dqrr_stop; | |
2326 | ||
2327 | dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | |
2328 | ||
2329 | return qman_cb_dqrr_consume; | |
2330 | } | |
2331 | ||
2332 | static void egress_ern(struct qman_portal *portal, | |
2333 | struct qman_fq *fq, | |
2334 | const union qm_mr_entry *msg) | |
2335 | { | |
2336 | const struct qm_fd *fd = &msg->ern.fd; | |
2337 | struct dpaa_percpu_priv *percpu_priv; | |
2338 | const struct dpaa_priv *priv; | |
2339 | struct net_device *net_dev; | |
2340 | struct sk_buff *skb; | |
2341 | ||
2342 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | |
2343 | priv = netdev_priv(net_dev); | |
2344 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
2345 | ||
2346 | percpu_priv->stats.tx_dropped++; | |
2347 | percpu_priv->stats.tx_fifo_errors++; | |
b0ce0d02 | 2348 | count_ern(percpu_priv, msg); |
9ad1a374 MB |
2349 | |
2350 | skb = dpaa_cleanup_tx_fd(priv, fd); | |
2351 | dev_kfree_skb_any(skb); | |
2352 | } | |
2353 | ||
2354 | static const struct dpaa_fq_cbs dpaa_fq_cbs = { | |
2355 | .rx_defq = { .cb = { .dqrr = rx_default_dqrr } }, | |
2356 | .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } }, | |
2357 | .rx_errq = { .cb = { .dqrr = rx_error_dqrr } }, | |
2358 | .tx_errq = { .cb = { .dqrr = conf_error_dqrr } }, | |
2359 | .egress_ern = { .cb = { .ern = egress_ern } } | |
2360 | }; | |
2361 | ||
2362 | static void dpaa_eth_napi_enable(struct dpaa_priv *priv) | |
2363 | { | |
2364 | struct dpaa_percpu_priv *percpu_priv; | |
2365 | int i; | |
2366 | ||
2367 | for_each_possible_cpu(i) { | |
2368 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
2369 | ||
2370 | percpu_priv->np.down = 0; | |
2371 | napi_enable(&percpu_priv->np.napi); | |
2372 | } | |
2373 | } | |
2374 | ||
2375 | static void dpaa_eth_napi_disable(struct dpaa_priv *priv) | |
2376 | { | |
2377 | struct dpaa_percpu_priv *percpu_priv; | |
2378 | int i; | |
2379 | ||
2380 | for_each_possible_cpu(i) { | |
2381 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
2382 | ||
2383 | percpu_priv->np.down = 1; | |
2384 | napi_disable(&percpu_priv->np.napi); | |
2385 | } | |
2386 | } | |
2387 | ||
2388 | static int dpaa_open(struct net_device *net_dev) | |
2389 | { | |
2390 | struct mac_device *mac_dev; | |
2391 | struct dpaa_priv *priv; | |
2392 | int err, i; | |
2393 | ||
2394 | priv = netdev_priv(net_dev); | |
2395 | mac_dev = priv->mac_dev; | |
2396 | dpaa_eth_napi_enable(priv); | |
2397 | ||
2398 | net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev); | |
2399 | if (!net_dev->phydev) { | |
2400 | netif_err(priv, ifup, net_dev, "init_phy() failed\n"); | |
3fe61f09 MB |
2401 | err = -ENODEV; |
2402 | goto phy_init_failed; | |
9ad1a374 MB |
2403 | } |
2404 | ||
2405 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { | |
2406 | err = fman_port_enable(mac_dev->port[i]); | |
2407 | if (err) | |
2408 | goto mac_start_failed; | |
2409 | } | |
2410 | ||
2411 | err = priv->mac_dev->start(mac_dev); | |
2412 | if (err < 0) { | |
2413 | netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err); | |
2414 | goto mac_start_failed; | |
2415 | } | |
2416 | ||
2417 | netif_tx_start_all_queues(net_dev); | |
2418 | ||
2419 | return 0; | |
2420 | ||
2421 | mac_start_failed: | |
2422 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) | |
2423 | fman_port_disable(mac_dev->port[i]); | |
2424 | ||
3fe61f09 | 2425 | phy_init_failed: |
9ad1a374 MB |
2426 | dpaa_eth_napi_disable(priv); |
2427 | ||
2428 | return err; | |
2429 | } | |
2430 | ||
2431 | static int dpaa_eth_stop(struct net_device *net_dev) | |
2432 | { | |
2433 | struct dpaa_priv *priv; | |
2434 | int err; | |
2435 | ||
2436 | err = dpaa_stop(net_dev); | |
2437 | ||
2438 | priv = netdev_priv(net_dev); | |
2439 | dpaa_eth_napi_disable(priv); | |
2440 | ||
2441 | return err; | |
2442 | } | |
2443 | ||
1763413a MW |
2444 | static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) |
2445 | { | |
2446 | if (!net_dev->phydev) | |
2447 | return -EINVAL; | |
2448 | return phy_mii_ioctl(net_dev->phydev, rq, cmd); | |
2449 | } | |
2450 | ||
9ad1a374 MB |
2451 | static const struct net_device_ops dpaa_ops = { |
2452 | .ndo_open = dpaa_open, | |
2453 | .ndo_start_xmit = dpaa_start_xmit, | |
2454 | .ndo_stop = dpaa_eth_stop, | |
2455 | .ndo_tx_timeout = dpaa_tx_timeout, | |
2456 | .ndo_get_stats64 = dpaa_get_stats64, | |
2457 | .ndo_set_mac_address = dpaa_set_mac_address, | |
2458 | .ndo_validate_addr = eth_validate_addr, | |
2459 | .ndo_set_rx_mode = dpaa_set_rx_mode, | |
1763413a | 2460 | .ndo_do_ioctl = dpaa_ioctl, |
2ea08f82 | 2461 | .ndo_setup_tc = dpaa_setup_tc, |
9ad1a374 MB |
2462 | }; |
2463 | ||
2464 | static int dpaa_napi_add(struct net_device *net_dev) | |
2465 | { | |
2466 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
2467 | struct dpaa_percpu_priv *percpu_priv; | |
2468 | int cpu; | |
2469 | ||
2470 | for_each_possible_cpu(cpu) { | |
2471 | percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); | |
2472 | ||
2473 | netif_napi_add(net_dev, &percpu_priv->np.napi, | |
2474 | dpaa_eth_poll, NAPI_POLL_WEIGHT); | |
2475 | } | |
2476 | ||
2477 | return 0; | |
2478 | } | |
2479 | ||
2480 | static void dpaa_napi_del(struct net_device *net_dev) | |
2481 | { | |
2482 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
2483 | struct dpaa_percpu_priv *percpu_priv; | |
2484 | int cpu; | |
2485 | ||
2486 | for_each_possible_cpu(cpu) { | |
2487 | percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); | |
2488 | ||
2489 | netif_napi_del(&percpu_priv->np.napi); | |
2490 | } | |
2491 | } | |
2492 | ||
2493 | static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, | |
2494 | struct bm_buffer *bmb) | |
2495 | { | |
2496 | dma_addr_t addr = bm_buf_addr(bmb); | |
2497 | ||
2498 | dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE); | |
2499 | ||
2500 | skb_free_frag(phys_to_virt(addr)); | |
2501 | } | |
2502 | ||
2503 | /* Alloc the dpaa_bp struct and configure default values */ | |
2504 | static struct dpaa_bp *dpaa_bp_alloc(struct device *dev) | |
2505 | { | |
2506 | struct dpaa_bp *dpaa_bp; | |
2507 | ||
2508 | dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL); | |
2509 | if (!dpaa_bp) | |
2510 | return ERR_PTR(-ENOMEM); | |
2511 | ||
2512 | dpaa_bp->bpid = FSL_DPAA_BPID_INV; | |
2513 | dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); | |
2514 | dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; | |
2515 | ||
2516 | dpaa_bp->seed_cb = dpaa_bp_seed; | |
2517 | dpaa_bp->free_buf_cb = dpaa_bp_free_pf; | |
2518 | ||
2519 | return dpaa_bp; | |
2520 | } | |
2521 | ||
2522 | /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR. | |
2523 | * We won't be sending congestion notifications to FMan; for now, we just use | |
2524 | * this CGR to generate enqueue rejections to FMan in order to drop the frames | |
2525 | * before they reach our ingress queues and eat up memory. | |
2526 | */ | |
2527 | static int dpaa_ingress_cgr_init(struct dpaa_priv *priv) | |
2528 | { | |
2529 | struct qm_mcc_initcgr initcgr; | |
2530 | u32 cs_th; | |
2531 | int err; | |
2532 | ||
2533 | err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); | |
2534 | if (err < 0) { | |
2535 | if (netif_msg_drv(priv)) | |
2536 | pr_err("Error %d allocating CGR ID\n", err); | |
2537 | goto out_error; | |
2538 | } | |
2539 | ||
2540 | /* Enable CS TD, but disable Congestion State Change Notifications. */ | |
0fbb0f24 | 2541 | memset(&initcgr, 0, sizeof(initcgr)); |
7d6f8dc0 | 2542 | initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES); |
9ad1a374 MB |
2543 | initcgr.cgr.cscn_en = QM_CGR_EN; |
2544 | cs_th = DPAA_INGRESS_CS_THRESHOLD; | |
2545 | qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); | |
2546 | ||
7d6f8dc0 | 2547 | initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); |
9ad1a374 MB |
2548 | initcgr.cgr.cstd_en = QM_CGR_EN; |
2549 | ||
2550 | /* This CGR will be associated with the SWP affined to the current CPU. | |
2551 | * However, we'll place all our ingress FQs in it. | |
2552 | */ | |
2553 | err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, | |
2554 | &initcgr); | |
2555 | if (err < 0) { | |
2556 | if (netif_msg_drv(priv)) | |
2557 | pr_err("Error %d creating ingress CGR with ID %d\n", | |
2558 | err, priv->ingress_cgr.cgrid); | |
2559 | qman_release_cgrid(priv->ingress_cgr.cgrid); | |
2560 | goto out_error; | |
2561 | } | |
2562 | if (netif_msg_drv(priv)) | |
2563 | pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", | |
2564 | priv->ingress_cgr.cgrid, priv->mac_dev->addr); | |
2565 | ||
2566 | priv->use_ingress_cgr = true; | |
2567 | ||
2568 | out_error: | |
2569 | return err; | |
2570 | } | |
2571 | ||
2572 | static const struct of_device_id dpaa_match[]; | |
2573 | ||
2574 | static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) | |
2575 | { | |
2576 | u16 headroom; | |
2577 | ||
2578 | /* The frame headroom must accommodate: | |
2579 | * - the driver private data area | |
2580 | * - parse results, hash results, timestamp if selected | |
2581 | * If either hash results or time stamp are selected, both will | |
2582 | * be copied to/from the frame headroom, as TS is located between PR and | |
2583 | * HR in the IC and IC copy size has a granularity of 16bytes | |
2584 | * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) | |
2585 | * | |
2586 | * Also make sure the headroom is a multiple of data_align bytes | |
2587 | */ | |
2588 | headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + | |
2589 | DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); | |
2590 | ||
2591 | return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, | |
2592 | DPAA_FD_DATA_ALIGNMENT) : | |
2593 | headroom; | |
2594 | } | |
2595 | ||
2596 | static int dpaa_eth_probe(struct platform_device *pdev) | |
2597 | { | |
2598 | struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL}; | |
2599 | struct dpaa_percpu_priv *percpu_priv; | |
2600 | struct net_device *net_dev = NULL; | |
2601 | struct dpaa_fq *dpaa_fq, *tmp; | |
2602 | struct dpaa_priv *priv = NULL; | |
2603 | struct fm_port_fqs port_fqs; | |
2604 | struct mac_device *mac_dev; | |
2605 | int err = 0, i, channel; | |
2606 | struct device *dev; | |
2607 | ||
2608 | dev = &pdev->dev; | |
2609 | ||
2610 | /* Allocate this early, so we can store relevant information in | |
2611 | * the private area | |
2612 | */ | |
2613 | net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); | |
2614 | if (!net_dev) { | |
2615 | dev_err(dev, "alloc_etherdev_mq() failed\n"); | |
2616 | goto alloc_etherdev_mq_failed; | |
2617 | } | |
2618 | ||
2619 | /* Do this here, so we can be verbose early */ | |
2620 | SET_NETDEV_DEV(net_dev, dev); | |
2621 | dev_set_drvdata(dev, net_dev); | |
2622 | ||
2623 | priv = netdev_priv(net_dev); | |
2624 | priv->net_dev = net_dev; | |
2625 | ||
2626 | priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); | |
2627 | ||
2628 | mac_dev = dpaa_mac_dev_get(pdev); | |
2629 | if (IS_ERR(mac_dev)) { | |
2630 | dev_err(dev, "dpaa_mac_dev_get() failed\n"); | |
2631 | err = PTR_ERR(mac_dev); | |
2632 | goto mac_probe_failed; | |
2633 | } | |
2634 | ||
2635 | /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, | |
2636 | * we choose conservatively and let the user explicitly set a higher | |
2637 | * MTU via ifconfig. Otherwise, the user may end up with different MTUs | |
2638 | * in the same LAN. | |
2639 | * If on the other hand fsl_fm_max_frm has been chosen below 1500, | |
2640 | * start with the maximum allowed. | |
2641 | */ | |
2642 | net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); | |
2643 | ||
2644 | netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", | |
2645 | net_dev->mtu); | |
2646 | ||
2647 | priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ | |
2648 | priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ | |
2649 | ||
2650 | /* device used for DMA mapping */ | |
fb52728a | 2651 | set_dma_ops(dev, get_dma_ops(&pdev->dev)); |
9ad1a374 MB |
2652 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); |
2653 | if (err) { | |
2654 | dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); | |
2655 | goto dev_mask_failed; | |
2656 | } | |
2657 | ||
2658 | /* bp init */ | |
2659 | for (i = 0; i < DPAA_BPS_NUM; i++) { | |
2660 | int err; | |
2661 | ||
2662 | dpaa_bps[i] = dpaa_bp_alloc(dev); | |
2663 | if (IS_ERR(dpaa_bps[i])) | |
2664 | return PTR_ERR(dpaa_bps[i]); | |
2665 | /* the raw size of the buffers used for reception */ | |
2666 | dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); | |
2667 | /* avoid runtime computations by keeping the usable size here */ | |
2668 | dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size); | |
2669 | dpaa_bps[i]->dev = dev; | |
2670 | ||
2671 | err = dpaa_bp_alloc_pool(dpaa_bps[i]); | |
2672 | if (err < 0) { | |
2673 | dpaa_bps_free(priv); | |
2674 | priv->dpaa_bps[i] = NULL; | |
2675 | goto bp_create_failed; | |
2676 | } | |
2677 | priv->dpaa_bps[i] = dpaa_bps[i]; | |
2678 | } | |
2679 | ||
2680 | INIT_LIST_HEAD(&priv->dpaa_fq_list); | |
2681 | ||
2682 | memset(&port_fqs, 0, sizeof(port_fqs)); | |
2683 | ||
2684 | err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); | |
2685 | if (err < 0) { | |
2686 | dev_err(dev, "dpaa_alloc_all_fqs() failed\n"); | |
2687 | goto fq_probe_failed; | |
2688 | } | |
2689 | ||
2690 | priv->mac_dev = mac_dev; | |
2691 | ||
2692 | channel = dpaa_get_channel(); | |
2693 | if (channel < 0) { | |
2694 | dev_err(dev, "dpaa_get_channel() failed\n"); | |
2695 | err = channel; | |
2696 | goto get_channel_failed; | |
2697 | } | |
2698 | ||
2699 | priv->channel = (u16)channel; | |
2700 | ||
2701 | /* Start a thread that will walk the CPUs with affine portals | |
2702 | * and add this pool channel to each's dequeue mask. | |
2703 | */ | |
2704 | dpaa_eth_add_channel(priv->channel); | |
2705 | ||
2706 | dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); | |
2707 | ||
2708 | /* Create a congestion group for this netdev, with | |
2709 | * dynamically-allocated CGR ID. | |
2710 | * Must be executed after probing the MAC, but before | |
2711 | * assigning the egress FQs to the CGRs. | |
2712 | */ | |
2713 | err = dpaa_eth_cgr_init(priv); | |
2714 | if (err < 0) { | |
2715 | dev_err(dev, "Error initializing CGR\n"); | |
2716 | goto tx_cgr_init_failed; | |
2717 | } | |
2718 | ||
2719 | err = dpaa_ingress_cgr_init(priv); | |
2720 | if (err < 0) { | |
2721 | dev_err(dev, "Error initializing ingress CGR\n"); | |
2722 | goto rx_cgr_init_failed; | |
2723 | } | |
2724 | ||
2725 | /* Add the FQs to the interface, and make them active */ | |
2726 | list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { | |
2727 | err = dpaa_fq_init(dpaa_fq, false); | |
2728 | if (err < 0) | |
2729 | goto fq_alloc_failed; | |
2730 | } | |
2731 | ||
2732 | priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]); | |
2733 | priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); | |
2734 | ||
2735 | /* All real interfaces need their ports initialized */ | |
7f8a6a1b MB |
2736 | err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, |
2737 | &priv->buf_layout[0], dev); | |
2738 | if (err) | |
2739 | goto init_ports_failed; | |
9ad1a374 MB |
2740 | |
2741 | priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); | |
2742 | if (!priv->percpu_priv) { | |
2743 | dev_err(dev, "devm_alloc_percpu() failed\n"); | |
2744 | err = -ENOMEM; | |
2745 | goto alloc_percpu_failed; | |
2746 | } | |
2747 | for_each_possible_cpu(i) { | |
2748 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
2749 | memset(percpu_priv, 0, sizeof(*percpu_priv)); | |
2750 | } | |
2751 | ||
c44efa1d CG |
2752 | priv->num_tc = 1; |
2753 | netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); | |
2754 | ||
9ad1a374 MB |
2755 | /* Initialize NAPI */ |
2756 | err = dpaa_napi_add(net_dev); | |
2757 | if (err < 0) | |
2758 | goto napi_add_failed; | |
2759 | ||
2760 | err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout); | |
2761 | if (err < 0) | |
2762 | goto netdev_init_failed; | |
2763 | ||
846a86e2 MB |
2764 | dpaa_eth_sysfs_init(&net_dev->dev); |
2765 | ||
9ad1a374 MB |
2766 | netif_info(priv, probe, net_dev, "Probed interface %s\n", |
2767 | net_dev->name); | |
2768 | ||
2769 | return 0; | |
2770 | ||
2771 | netdev_init_failed: | |
2772 | napi_add_failed: | |
2773 | dpaa_napi_del(net_dev); | |
2774 | alloc_percpu_failed: | |
7f8a6a1b | 2775 | init_ports_failed: |
9ad1a374 MB |
2776 | dpaa_fq_free(dev, &priv->dpaa_fq_list); |
2777 | fq_alloc_failed: | |
2778 | qman_delete_cgr_safe(&priv->ingress_cgr); | |
2779 | qman_release_cgrid(priv->ingress_cgr.cgrid); | |
2780 | rx_cgr_init_failed: | |
2781 | qman_delete_cgr_safe(&priv->cgr_data.cgr); | |
2782 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | |
2783 | tx_cgr_init_failed: | |
2784 | get_channel_failed: | |
2785 | dpaa_bps_free(priv); | |
2786 | bp_create_failed: | |
2787 | fq_probe_failed: | |
2788 | dev_mask_failed: | |
2789 | mac_probe_failed: | |
2790 | dev_set_drvdata(dev, NULL); | |
2791 | free_netdev(net_dev); | |
2792 | alloc_etherdev_mq_failed: | |
2793 | for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) { | |
2794 | if (atomic_read(&dpaa_bps[i]->refs) == 0) | |
2795 | devm_kfree(dev, dpaa_bps[i]); | |
2796 | } | |
2797 | return err; | |
2798 | } | |
2799 | ||
2800 | static int dpaa_remove(struct platform_device *pdev) | |
2801 | { | |
2802 | struct net_device *net_dev; | |
2803 | struct dpaa_priv *priv; | |
2804 | struct device *dev; | |
2805 | int err; | |
2806 | ||
2807 | dev = &pdev->dev; | |
2808 | net_dev = dev_get_drvdata(dev); | |
2809 | ||
2810 | priv = netdev_priv(net_dev); | |
2811 | ||
846a86e2 MB |
2812 | dpaa_eth_sysfs_remove(dev); |
2813 | ||
9ad1a374 MB |
2814 | dev_set_drvdata(dev, NULL); |
2815 | unregister_netdev(net_dev); | |
2816 | ||
2817 | err = dpaa_fq_free(dev, &priv->dpaa_fq_list); | |
2818 | ||
2819 | qman_delete_cgr_safe(&priv->ingress_cgr); | |
2820 | qman_release_cgrid(priv->ingress_cgr.cgrid); | |
2821 | qman_delete_cgr_safe(&priv->cgr_data.cgr); | |
2822 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | |
2823 | ||
2824 | dpaa_napi_del(net_dev); | |
2825 | ||
2826 | dpaa_bps_free(priv); | |
2827 | ||
2828 | free_netdev(net_dev); | |
2829 | ||
2830 | return err; | |
2831 | } | |
2832 | ||
bef0fed4 | 2833 | static const struct platform_device_id dpaa_devtype[] = { |
9ad1a374 MB |
2834 | { |
2835 | .name = "dpaa-ethernet", | |
2836 | .driver_data = 0, | |
2837 | }, { | |
2838 | } | |
2839 | }; | |
2840 | MODULE_DEVICE_TABLE(platform, dpaa_devtype); | |
2841 | ||
2842 | static struct platform_driver dpaa_driver = { | |
2843 | .driver = { | |
2844 | .name = KBUILD_MODNAME, | |
2845 | }, | |
2846 | .id_table = dpaa_devtype, | |
2847 | .probe = dpaa_eth_probe, | |
2848 | .remove = dpaa_remove | |
2849 | }; | |
2850 | ||
2851 | static int __init dpaa_load(void) | |
2852 | { | |
2853 | int err; | |
2854 | ||
2855 | pr_debug("FSL DPAA Ethernet driver\n"); | |
2856 | ||
2857 | /* initialize dpaa_eth mirror values */ | |
2858 | dpaa_rx_extra_headroom = fman_get_rx_extra_headroom(); | |
2859 | dpaa_max_frm = fman_get_max_frm(); | |
2860 | ||
2861 | err = platform_driver_register(&dpaa_driver); | |
2862 | if (err < 0) | |
2863 | pr_err("Error, platform_driver_register() = %d\n", err); | |
2864 | ||
2865 | return err; | |
2866 | } | |
2867 | module_init(dpaa_load); | |
2868 | ||
2869 | static void __exit dpaa_unload(void) | |
2870 | { | |
2871 | platform_driver_unregister(&dpaa_driver); | |
2872 | ||
2873 | /* Only one channel is used and needs to be released after all | |
2874 | * interfaces are removed | |
2875 | */ | |
2876 | dpaa_release_channel(); | |
2877 | } | |
2878 | module_exit(dpaa_unload); | |
2879 | ||
2880 | MODULE_LICENSE("Dual BSD/GPL"); | |
2881 | MODULE_DESCRIPTION("FSL DPAA Ethernet driver"); |