Commit | Line | Data |
---|---|---|
9ad1a374 MB |
1 | /* Copyright 2008 - 2016 Freescale Semiconductor Inc. |
2 | * | |
3 | * Redistribution and use in source and binary forms, with or without | |
4 | * modification, are permitted provided that the following conditions are met: | |
5 | * * Redistributions of source code must retain the above copyright | |
6 | * notice, this list of conditions and the following disclaimer. | |
7 | * * Redistributions in binary form must reproduce the above copyright | |
8 | * notice, this list of conditions and the following disclaimer in the | |
9 | * documentation and/or other materials provided with the distribution. | |
10 | * * Neither the name of Freescale Semiconductor nor the | |
11 | * names of its contributors may be used to endorse or promote products | |
12 | * derived from this software without specific prior written permission. | |
13 | * | |
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | |
15 | * GNU General Public License ("GPL") as published by the Free Software | |
16 | * Foundation, either version 2 of that License or (at your option) any | |
17 | * later version. | |
18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | |
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | |
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 | */ | |
30 | ||
31 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
32 | ||
33 | #include <linux/init.h> | |
34 | #include <linux/module.h> | |
35 | #include <linux/of_platform.h> | |
36 | #include <linux/of_mdio.h> | |
37 | #include <linux/of_net.h> | |
38 | #include <linux/io.h> | |
39 | #include <linux/if_arp.h> | |
40 | #include <linux/if_vlan.h> | |
41 | #include <linux/icmp.h> | |
42 | #include <linux/ip.h> | |
43 | #include <linux/ipv6.h> | |
44 | #include <linux/udp.h> | |
45 | #include <linux/tcp.h> | |
46 | #include <linux/net.h> | |
47 | #include <linux/skbuff.h> | |
48 | #include <linux/etherdevice.h> | |
49 | #include <linux/if_ether.h> | |
50 | #include <linux/highmem.h> | |
51 | #include <linux/percpu.h> | |
52 | #include <linux/dma-mapping.h> | |
53 | #include <linux/sort.h> | |
54 | #include <soc/fsl/bman.h> | |
55 | #include <soc/fsl/qman.h> | |
56 | ||
57 | #include "fman.h" | |
58 | #include "fman_port.h" | |
59 | #include "mac.h" | |
60 | #include "dpaa_eth.h" | |
61 | ||
eb11ddf3 MB |
62 | /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files |
63 | * using trace events only need to #include <trace/events/sched.h> | |
64 | */ | |
65 | #define CREATE_TRACE_POINTS | |
66 | #include "dpaa_eth_trace.h" | |
67 | ||
9ad1a374 MB |
68 | static int debug = -1; |
69 | module_param(debug, int, 0444); | |
70 | MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)"); | |
71 | ||
72 | static u16 tx_timeout = 1000; | |
73 | module_param(tx_timeout, ushort, 0444); | |
74 | MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); | |
75 | ||
76 | #define FM_FD_STAT_RX_ERRORS \ | |
77 | (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \ | |
78 | FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \ | |
79 | FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \ | |
80 | FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \ | |
81 | FM_FD_ERR_PRS_HDR_ERR) | |
82 | ||
83 | #define FM_FD_STAT_TX_ERRORS \ | |
84 | (FM_FD_ERR_UNSUPPORTED_FORMAT | \ | |
85 | FM_FD_ERR_LENGTH | FM_FD_ERR_DMA) | |
86 | ||
87 | #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | |
88 | NETIF_MSG_LINK | NETIF_MSG_IFUP | \ | |
89 | NETIF_MSG_IFDOWN) | |
90 | ||
91 | #define DPAA_INGRESS_CS_THRESHOLD 0x10000000 | |
92 | /* Ingress congestion threshold on FMan ports | |
93 | * The size in bytes of the ingress tail-drop threshold on FMan ports. | |
94 | * Traffic piling up above this value will be rejected by QMan and discarded | |
95 | * by FMan. | |
96 | */ | |
97 | ||
98 | /* Size in bytes of the FQ taildrop threshold */ | |
99 | #define DPAA_FQ_TD 0x200000 | |
100 | ||
101 | #define DPAA_CS_THRESHOLD_1G 0x06000000 | |
102 | /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000 | |
103 | * The size in bytes of the egress Congestion State notification threshold on | |
104 | * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a | |
105 | * tight loop (e.g. by sending UDP datagrams at "while(1) speed"), | |
106 | * and the larger the frame size, the more acute the problem. | |
107 | * So we have to find a balance between these factors: | |
108 | * - avoiding the device staying congested for a prolonged time (risking | |
109 | * the netdev watchdog to fire - see also the tx_timeout module param); | |
110 | * - affecting performance of protocols such as TCP, which otherwise | |
111 | * behave well under the congestion notification mechanism; | |
112 | * - preventing the Tx cores from tightly-looping (as if the congestion | |
113 | * threshold was too low to be effective); | |
114 | * - running out of memory if the CS threshold is set too high. | |
115 | */ | |
116 | ||
117 | #define DPAA_CS_THRESHOLD_10G 0x10000000 | |
118 | /* The size in bytes of the egress Congestion State notification threshold on | |
119 | * 10G ports, range 0x1000 .. 0x10000000 | |
120 | */ | |
121 | ||
122 | /* Largest value that the FQD's OAL field can hold */ | |
123 | #define FSL_QMAN_MAX_OAL 127 | |
124 | ||
125 | /* Default alignment for start of data in an Rx FD */ | |
126 | #define DPAA_FD_DATA_ALIGNMENT 16 | |
127 | ||
128 | /* Values for the L3R field of the FM Parse Results | |
129 | */ | |
130 | /* L3 Type field: First IP Present IPv4 */ | |
131 | #define FM_L3_PARSE_RESULT_IPV4 0x8000 | |
132 | /* L3 Type field: First IP Present IPv6 */ | |
133 | #define FM_L3_PARSE_RESULT_IPV6 0x4000 | |
134 | /* Values for the L4R field of the FM Parse Results */ | |
135 | /* L4 Type field: UDP */ | |
136 | #define FM_L4_PARSE_RESULT_UDP 0x40 | |
137 | /* L4 Type field: TCP */ | |
138 | #define FM_L4_PARSE_RESULT_TCP 0x20 | |
139 | ||
140 | #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ | |
141 | #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ | |
142 | ||
143 | #define FSL_DPAA_BPID_INV 0xff | |
144 | #define FSL_DPAA_ETH_MAX_BUF_COUNT 128 | |
145 | #define FSL_DPAA_ETH_REFILL_THRESHOLD 80 | |
146 | ||
147 | #define DPAA_TX_PRIV_DATA_SIZE 16 | |
148 | #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) | |
149 | #define DPAA_TIME_STAMP_SIZE 8 | |
150 | #define DPAA_HASH_RESULTS_SIZE 8 | |
151 | #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ | |
152 | dpaa_rx_extra_headroom) | |
153 | ||
154 | #define DPAA_ETH_RX_QUEUES 128 | |
155 | ||
156 | #define DPAA_ENQUEUE_RETRIES 100000 | |
157 | ||
158 | enum port_type {RX, TX}; | |
159 | ||
160 | struct fm_port_fqs { | |
161 | struct dpaa_fq *tx_defq; | |
162 | struct dpaa_fq *tx_errq; | |
163 | struct dpaa_fq *rx_defq; | |
164 | struct dpaa_fq *rx_errq; | |
165 | }; | |
166 | ||
167 | /* All the dpa bps in use at any moment */ | |
168 | static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; | |
169 | ||
170 | /* The raw buffer size must be cacheline aligned */ | |
171 | #define DPAA_BP_RAW_SIZE 4096 | |
172 | /* When using more than one buffer pool, the raw sizes are as follows: | |
173 | * 1 bp: 4KB | |
174 | * 2 bp: 2KB, 4KB | |
175 | * 3 bp: 1KB, 2KB, 4KB | |
176 | * 4 bp: 1KB, 2KB, 4KB, 8KB | |
177 | */ | |
178 | static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt) | |
179 | { | |
180 | size_t res = DPAA_BP_RAW_SIZE / 4; | |
181 | u8 i; | |
182 | ||
183 | for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++) | |
184 | res *= 2; | |
185 | return res; | |
186 | } | |
187 | ||
188 | /* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is | |
189 | * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, | |
190 | * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us | |
191 | * half-page-aligned buffers, so we reserve some more space for start-of-buffer | |
192 | * alignment. | |
193 | */ | |
194 | #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES) | |
195 | ||
196 | static int dpaa_max_frm; | |
197 | ||
198 | static int dpaa_rx_extra_headroom; | |
199 | ||
200 | #define dpaa_get_max_mtu() \ | |
201 | (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN)) | |
202 | ||
203 | static int dpaa_netdev_init(struct net_device *net_dev, | |
204 | const struct net_device_ops *dpaa_ops, | |
205 | u16 tx_timeout) | |
206 | { | |
207 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
208 | struct device *dev = net_dev->dev.parent; | |
209 | struct dpaa_percpu_priv *percpu_priv; | |
210 | const u8 *mac_addr; | |
211 | int i, err; | |
212 | ||
213 | /* Although we access another CPU's private data here | |
214 | * we do it at initialization so it is safe | |
215 | */ | |
216 | for_each_possible_cpu(i) { | |
217 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
218 | percpu_priv->net_dev = net_dev; | |
219 | } | |
220 | ||
221 | net_dev->netdev_ops = dpaa_ops; | |
222 | mac_addr = priv->mac_dev->addr; | |
223 | ||
224 | net_dev->mem_start = priv->mac_dev->res->start; | |
225 | net_dev->mem_end = priv->mac_dev->res->end; | |
226 | ||
227 | net_dev->min_mtu = ETH_MIN_MTU; | |
228 | net_dev->max_mtu = dpaa_get_max_mtu(); | |
229 | ||
230 | net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
231 | NETIF_F_LLTX); | |
232 | ||
233 | net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; | |
234 | /* The kernels enables GSO automatically, if we declare NETIF_F_SG. | |
235 | * For conformity, we'll still declare GSO explicitly. | |
236 | */ | |
237 | net_dev->features |= NETIF_F_GSO; | |
238 | ||
239 | net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | |
240 | /* we do not want shared skbs on TX */ | |
241 | net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; | |
242 | ||
243 | net_dev->features |= net_dev->hw_features; | |
244 | net_dev->vlan_features = net_dev->features; | |
245 | ||
246 | memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); | |
247 | memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); | |
248 | ||
b0cdb168 MB |
249 | net_dev->ethtool_ops = &dpaa_ethtool_ops; |
250 | ||
9ad1a374 MB |
251 | net_dev->needed_headroom = priv->tx_headroom; |
252 | net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); | |
253 | ||
254 | /* start without the RUNNING flag, phylib controls it later */ | |
255 | netif_carrier_off(net_dev); | |
256 | ||
257 | err = register_netdev(net_dev); | |
258 | if (err < 0) { | |
259 | dev_err(dev, "register_netdev() = %d\n", err); | |
260 | return err; | |
261 | } | |
262 | ||
263 | return 0; | |
264 | } | |
265 | ||
266 | static int dpaa_stop(struct net_device *net_dev) | |
267 | { | |
268 | struct mac_device *mac_dev; | |
269 | struct dpaa_priv *priv; | |
270 | int i, err, error; | |
271 | ||
272 | priv = netdev_priv(net_dev); | |
273 | mac_dev = priv->mac_dev; | |
274 | ||
275 | netif_tx_stop_all_queues(net_dev); | |
276 | /* Allow the Fman (Tx) port to process in-flight frames before we | |
277 | * try switching it off. | |
278 | */ | |
279 | usleep_range(5000, 10000); | |
280 | ||
281 | err = mac_dev->stop(mac_dev); | |
282 | if (err < 0) | |
283 | netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n", | |
284 | err); | |
285 | ||
286 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { | |
287 | error = fman_port_disable(mac_dev->port[i]); | |
288 | if (error) | |
289 | err = error; | |
290 | } | |
291 | ||
292 | if (net_dev->phydev) | |
293 | phy_disconnect(net_dev->phydev); | |
294 | net_dev->phydev = NULL; | |
295 | ||
296 | return err; | |
297 | } | |
298 | ||
299 | static void dpaa_tx_timeout(struct net_device *net_dev) | |
300 | { | |
301 | struct dpaa_percpu_priv *percpu_priv; | |
302 | const struct dpaa_priv *priv; | |
303 | ||
304 | priv = netdev_priv(net_dev); | |
305 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
306 | ||
307 | netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", | |
308 | jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); | |
309 | ||
310 | percpu_priv->stats.tx_errors++; | |
311 | } | |
312 | ||
313 | /* Calculates the statistics for the given device by adding the statistics | |
314 | * collected by each CPU. | |
315 | */ | |
316 | static struct rtnl_link_stats64 *dpaa_get_stats64(struct net_device *net_dev, | |
317 | struct rtnl_link_stats64 *s) | |
318 | { | |
319 | int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); | |
320 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
321 | struct dpaa_percpu_priv *percpu_priv; | |
322 | u64 *netstats = (u64 *)s; | |
323 | u64 *cpustats; | |
324 | int i, j; | |
325 | ||
326 | for_each_possible_cpu(i) { | |
327 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
328 | ||
329 | cpustats = (u64 *)&percpu_priv->stats; | |
330 | ||
331 | /* add stats from all CPUs */ | |
332 | for (j = 0; j < numstats; j++) | |
333 | netstats[j] += cpustats[j]; | |
334 | } | |
335 | ||
336 | return s; | |
337 | } | |
338 | ||
339 | static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) | |
340 | { | |
341 | struct platform_device *of_dev; | |
342 | struct dpaa_eth_data *eth_data; | |
343 | struct device *dpaa_dev, *dev; | |
344 | struct device_node *mac_node; | |
345 | struct mac_device *mac_dev; | |
346 | ||
347 | dpaa_dev = &pdev->dev; | |
348 | eth_data = dpaa_dev->platform_data; | |
349 | if (!eth_data) | |
350 | return ERR_PTR(-ENODEV); | |
351 | ||
352 | mac_node = eth_data->mac_node; | |
353 | ||
354 | of_dev = of_find_device_by_node(mac_node); | |
355 | if (!of_dev) { | |
356 | dev_err(dpaa_dev, "of_find_device_by_node(%s) failed\n", | |
357 | mac_node->full_name); | |
358 | of_node_put(mac_node); | |
359 | return ERR_PTR(-EINVAL); | |
360 | } | |
361 | of_node_put(mac_node); | |
362 | ||
363 | dev = &of_dev->dev; | |
364 | ||
365 | mac_dev = dev_get_drvdata(dev); | |
366 | if (!mac_dev) { | |
367 | dev_err(dpaa_dev, "dev_get_drvdata(%s) failed\n", | |
368 | dev_name(dev)); | |
369 | return ERR_PTR(-EINVAL); | |
370 | } | |
371 | ||
372 | return mac_dev; | |
373 | } | |
374 | ||
375 | static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) | |
376 | { | |
377 | const struct dpaa_priv *priv; | |
378 | struct mac_device *mac_dev; | |
379 | struct sockaddr old_addr; | |
380 | int err; | |
381 | ||
382 | priv = netdev_priv(net_dev); | |
383 | ||
384 | memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); | |
385 | ||
386 | err = eth_mac_addr(net_dev, addr); | |
387 | if (err < 0) { | |
388 | netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); | |
389 | return err; | |
390 | } | |
391 | ||
392 | mac_dev = priv->mac_dev; | |
393 | ||
394 | err = mac_dev->change_addr(mac_dev->fman_mac, | |
395 | (enet_addr_t *)net_dev->dev_addr); | |
396 | if (err < 0) { | |
397 | netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", | |
398 | err); | |
399 | /* reverting to previous address */ | |
400 | eth_mac_addr(net_dev, &old_addr); | |
401 | ||
402 | return err; | |
403 | } | |
404 | ||
405 | return 0; | |
406 | } | |
407 | ||
408 | static void dpaa_set_rx_mode(struct net_device *net_dev) | |
409 | { | |
410 | const struct dpaa_priv *priv; | |
411 | int err; | |
412 | ||
413 | priv = netdev_priv(net_dev); | |
414 | ||
415 | if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { | |
416 | priv->mac_dev->promisc = !priv->mac_dev->promisc; | |
417 | err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, | |
418 | priv->mac_dev->promisc); | |
419 | if (err < 0) | |
420 | netif_err(priv, drv, net_dev, | |
421 | "mac_dev->set_promisc() = %d\n", | |
422 | err); | |
423 | } | |
424 | ||
425 | err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); | |
426 | if (err < 0) | |
427 | netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", | |
428 | err); | |
429 | } | |
430 | ||
431 | static struct dpaa_bp *dpaa_bpid2pool(int bpid) | |
432 | { | |
433 | if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS)) | |
434 | return NULL; | |
435 | ||
436 | return dpaa_bp_array[bpid]; | |
437 | } | |
438 | ||
439 | /* checks if this bpool is already allocated */ | |
440 | static bool dpaa_bpid2pool_use(int bpid) | |
441 | { | |
442 | if (dpaa_bpid2pool(bpid)) { | |
443 | atomic_inc(&dpaa_bp_array[bpid]->refs); | |
444 | return true; | |
445 | } | |
446 | ||
447 | return false; | |
448 | } | |
449 | ||
450 | /* called only once per bpid by dpaa_bp_alloc_pool() */ | |
451 | static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp) | |
452 | { | |
453 | dpaa_bp_array[bpid] = dpaa_bp; | |
454 | atomic_set(&dpaa_bp->refs, 1); | |
455 | } | |
456 | ||
457 | static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp) | |
458 | { | |
459 | int err; | |
460 | ||
461 | if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) { | |
462 | pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n", | |
463 | __func__); | |
464 | return -EINVAL; | |
465 | } | |
466 | ||
467 | /* If the pool is already specified, we only create one per bpid */ | |
468 | if (dpaa_bp->bpid != FSL_DPAA_BPID_INV && | |
469 | dpaa_bpid2pool_use(dpaa_bp->bpid)) | |
470 | return 0; | |
471 | ||
472 | if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) { | |
473 | dpaa_bp->pool = bman_new_pool(); | |
474 | if (!dpaa_bp->pool) { | |
475 | pr_err("%s: bman_new_pool() failed\n", | |
476 | __func__); | |
477 | return -ENODEV; | |
478 | } | |
479 | ||
480 | dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool); | |
481 | } | |
482 | ||
483 | if (dpaa_bp->seed_cb) { | |
484 | err = dpaa_bp->seed_cb(dpaa_bp); | |
485 | if (err) | |
486 | goto pool_seed_failed; | |
487 | } | |
488 | ||
489 | dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp); | |
490 | ||
491 | return 0; | |
492 | ||
493 | pool_seed_failed: | |
494 | pr_err("%s: pool seeding failed\n", __func__); | |
495 | bman_free_pool(dpaa_bp->pool); | |
496 | ||
497 | return err; | |
498 | } | |
499 | ||
500 | /* remove and free all the buffers from the given buffer pool */ | |
501 | static void dpaa_bp_drain(struct dpaa_bp *bp) | |
502 | { | |
503 | u8 num = 8; | |
504 | int ret; | |
505 | ||
506 | do { | |
507 | struct bm_buffer bmb[8]; | |
508 | int i; | |
509 | ||
510 | ret = bman_acquire(bp->pool, bmb, num); | |
511 | if (ret < 0) { | |
512 | if (num == 8) { | |
513 | /* we have less than 8 buffers left; | |
514 | * drain them one by one | |
515 | */ | |
516 | num = 1; | |
517 | ret = 1; | |
518 | continue; | |
519 | } else { | |
520 | /* Pool is fully drained */ | |
521 | break; | |
522 | } | |
523 | } | |
524 | ||
525 | if (bp->free_buf_cb) | |
526 | for (i = 0; i < num; i++) | |
527 | bp->free_buf_cb(bp, &bmb[i]); | |
528 | } while (ret > 0); | |
529 | } | |
530 | ||
531 | static void dpaa_bp_free(struct dpaa_bp *dpaa_bp) | |
532 | { | |
533 | struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid); | |
534 | ||
535 | /* the mapping between bpid and dpaa_bp is done very late in the | |
536 | * allocation procedure; if something failed before the mapping, the bp | |
537 | * was not configured, therefore we don't need the below instructions | |
538 | */ | |
539 | if (!bp) | |
540 | return; | |
541 | ||
542 | if (!atomic_dec_and_test(&bp->refs)) | |
543 | return; | |
544 | ||
545 | if (bp->free_buf_cb) | |
546 | dpaa_bp_drain(bp); | |
547 | ||
548 | dpaa_bp_array[bp->bpid] = NULL; | |
549 | bman_free_pool(bp->pool); | |
550 | } | |
551 | ||
552 | static void dpaa_bps_free(struct dpaa_priv *priv) | |
553 | { | |
554 | int i; | |
555 | ||
556 | for (i = 0; i < DPAA_BPS_NUM; i++) | |
557 | dpaa_bp_free(priv->dpaa_bps[i]); | |
558 | } | |
559 | ||
560 | /* Use multiple WQs for FQ assignment: | |
561 | * - Tx Confirmation queues go to WQ1. | |
562 | * - Rx Error and Tx Error queues go to WQ2 (giving them a better chance | |
563 | * to be scheduled, in case there are many more FQs in WQ3). | |
564 | * - Rx Default and Tx queues go to WQ3 (no differentiation between | |
565 | * Rx and Tx traffic). | |
566 | * This ensures that Tx-confirmed buffers are timely released. In particular, | |
567 | * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they | |
568 | * are greatly outnumbered by other FQs in the system, while | |
569 | * dequeue scheduling is round-robin. | |
570 | */ | |
571 | static inline void dpaa_assign_wq(struct dpaa_fq *fq) | |
572 | { | |
573 | switch (fq->fq_type) { | |
574 | case FQ_TYPE_TX_CONFIRM: | |
575 | case FQ_TYPE_TX_CONF_MQ: | |
576 | fq->wq = 1; | |
577 | break; | |
578 | case FQ_TYPE_RX_ERROR: | |
579 | case FQ_TYPE_TX_ERROR: | |
580 | fq->wq = 2; | |
581 | break; | |
582 | case FQ_TYPE_RX_DEFAULT: | |
583 | case FQ_TYPE_TX: | |
584 | fq->wq = 3; | |
585 | break; | |
586 | default: | |
587 | WARN(1, "Invalid FQ type %d for FQID %d!\n", | |
588 | fq->fq_type, fq->fqid); | |
589 | } | |
590 | } | |
591 | ||
592 | static struct dpaa_fq *dpaa_fq_alloc(struct device *dev, | |
593 | u32 start, u32 count, | |
594 | struct list_head *list, | |
595 | enum dpaa_fq_type fq_type) | |
596 | { | |
597 | struct dpaa_fq *dpaa_fq; | |
598 | int i; | |
599 | ||
600 | dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count, | |
601 | GFP_KERNEL); | |
602 | if (!dpaa_fq) | |
603 | return NULL; | |
604 | ||
605 | for (i = 0; i < count; i++) { | |
606 | dpaa_fq[i].fq_type = fq_type; | |
607 | dpaa_fq[i].fqid = start ? start + i : 0; | |
608 | list_add_tail(&dpaa_fq[i].list, list); | |
609 | } | |
610 | ||
611 | for (i = 0; i < count; i++) | |
612 | dpaa_assign_wq(dpaa_fq + i); | |
613 | ||
614 | return dpaa_fq; | |
615 | } | |
616 | ||
617 | static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, | |
618 | struct fm_port_fqs *port_fqs) | |
619 | { | |
620 | struct dpaa_fq *dpaa_fq; | |
621 | ||
622 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR); | |
623 | if (!dpaa_fq) | |
624 | goto fq_alloc_failed; | |
625 | ||
626 | port_fqs->rx_errq = &dpaa_fq[0]; | |
627 | ||
628 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT); | |
629 | if (!dpaa_fq) | |
630 | goto fq_alloc_failed; | |
631 | ||
632 | port_fqs->rx_defq = &dpaa_fq[0]; | |
633 | ||
634 | if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) | |
635 | goto fq_alloc_failed; | |
636 | ||
637 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); | |
638 | if (!dpaa_fq) | |
639 | goto fq_alloc_failed; | |
640 | ||
641 | port_fqs->tx_errq = &dpaa_fq[0]; | |
642 | ||
643 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM); | |
644 | if (!dpaa_fq) | |
645 | goto fq_alloc_failed; | |
646 | ||
647 | port_fqs->tx_defq = &dpaa_fq[0]; | |
648 | ||
649 | if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) | |
650 | goto fq_alloc_failed; | |
651 | ||
652 | return 0; | |
653 | ||
654 | fq_alloc_failed: | |
655 | dev_err(dev, "dpaa_fq_alloc() failed\n"); | |
656 | return -ENOMEM; | |
657 | } | |
658 | ||
659 | static u32 rx_pool_channel; | |
660 | static DEFINE_SPINLOCK(rx_pool_channel_init); | |
661 | ||
662 | static int dpaa_get_channel(void) | |
663 | { | |
664 | spin_lock(&rx_pool_channel_init); | |
665 | if (!rx_pool_channel) { | |
666 | u32 pool; | |
667 | int ret; | |
668 | ||
669 | ret = qman_alloc_pool(&pool); | |
670 | ||
671 | if (!ret) | |
672 | rx_pool_channel = pool; | |
673 | } | |
674 | spin_unlock(&rx_pool_channel_init); | |
675 | if (!rx_pool_channel) | |
676 | return -ENOMEM; | |
677 | return rx_pool_channel; | |
678 | } | |
679 | ||
680 | static void dpaa_release_channel(void) | |
681 | { | |
682 | qman_release_pool(rx_pool_channel); | |
683 | } | |
684 | ||
685 | static void dpaa_eth_add_channel(u16 channel) | |
686 | { | |
687 | u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); | |
688 | const cpumask_t *cpus = qman_affine_cpus(); | |
689 | struct qman_portal *portal; | |
690 | int cpu; | |
691 | ||
692 | for_each_cpu(cpu, cpus) { | |
693 | portal = qman_get_affine_portal(cpu); | |
694 | qman_p_static_dequeue_add(portal, pool); | |
695 | } | |
696 | } | |
697 | ||
698 | /* Congestion group state change notification callback. | |
699 | * Stops the device's egress queues while they are congested and | |
700 | * wakes them upon exiting congested state. | |
701 | * Also updates some CGR-related stats. | |
702 | */ | |
703 | static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, | |
704 | int congested) | |
705 | { | |
706 | struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr, | |
707 | struct dpaa_priv, cgr_data.cgr); | |
708 | ||
b0ce0d02 MB |
709 | if (congested) { |
710 | priv->cgr_data.congestion_start_jiffies = jiffies; | |
9ad1a374 | 711 | netif_tx_stop_all_queues(priv->net_dev); |
b0ce0d02 MB |
712 | priv->cgr_data.cgr_congested_count++; |
713 | } else { | |
714 | priv->cgr_data.congested_jiffies += | |
715 | (jiffies - priv->cgr_data.congestion_start_jiffies); | |
9ad1a374 | 716 | netif_tx_wake_all_queues(priv->net_dev); |
b0ce0d02 | 717 | } |
9ad1a374 MB |
718 | } |
719 | ||
720 | static int dpaa_eth_cgr_init(struct dpaa_priv *priv) | |
721 | { | |
722 | struct qm_mcc_initcgr initcgr; | |
723 | u32 cs_th; | |
724 | int err; | |
725 | ||
726 | err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); | |
727 | if (err < 0) { | |
728 | if (netif_msg_drv(priv)) | |
729 | pr_err("%s: Error %d allocating CGR ID\n", | |
730 | __func__, err); | |
731 | goto out_error; | |
732 | } | |
733 | priv->cgr_data.cgr.cb = dpaa_eth_cgscn; | |
734 | ||
735 | /* Enable Congestion State Change Notifications and CS taildrop */ | |
7d6f8dc0 | 736 | initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES); |
9ad1a374 MB |
737 | initcgr.cgr.cscn_en = QM_CGR_EN; |
738 | ||
739 | /* Set different thresholds based on the MAC speed. | |
740 | * This may turn suboptimal if the MAC is reconfigured at a speed | |
741 | * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. | |
742 | * In such cases, we ought to reconfigure the threshold, too. | |
743 | */ | |
744 | if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) | |
745 | cs_th = DPAA_CS_THRESHOLD_10G; | |
746 | else | |
747 | cs_th = DPAA_CS_THRESHOLD_1G; | |
748 | qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); | |
749 | ||
7d6f8dc0 | 750 | initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); |
9ad1a374 MB |
751 | initcgr.cgr.cstd_en = QM_CGR_EN; |
752 | ||
753 | err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, | |
754 | &initcgr); | |
755 | if (err < 0) { | |
756 | if (netif_msg_drv(priv)) | |
757 | pr_err("%s: Error %d creating CGR with ID %d\n", | |
758 | __func__, err, priv->cgr_data.cgr.cgrid); | |
759 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | |
760 | goto out_error; | |
761 | } | |
762 | if (netif_msg_drv(priv)) | |
763 | pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", | |
764 | priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, | |
765 | priv->cgr_data.cgr.chan); | |
766 | ||
767 | out_error: | |
768 | return err; | |
769 | } | |
770 | ||
771 | static inline void dpaa_setup_ingress(const struct dpaa_priv *priv, | |
772 | struct dpaa_fq *fq, | |
773 | const struct qman_fq *template) | |
774 | { | |
775 | fq->fq_base = *template; | |
776 | fq->net_dev = priv->net_dev; | |
777 | ||
778 | fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; | |
779 | fq->channel = priv->channel; | |
780 | } | |
781 | ||
782 | static inline void dpaa_setup_egress(const struct dpaa_priv *priv, | |
783 | struct dpaa_fq *fq, | |
784 | struct fman_port *port, | |
785 | const struct qman_fq *template) | |
786 | { | |
787 | fq->fq_base = *template; | |
788 | fq->net_dev = priv->net_dev; | |
789 | ||
790 | if (port) { | |
791 | fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; | |
792 | fq->channel = (u16)fman_port_get_qman_channel_id(port); | |
793 | } else { | |
794 | fq->flags = QMAN_FQ_FLAG_NO_MODIFY; | |
795 | } | |
796 | } | |
797 | ||
798 | static void dpaa_fq_setup(struct dpaa_priv *priv, | |
799 | const struct dpaa_fq_cbs *fq_cbs, | |
800 | struct fman_port *tx_port) | |
801 | { | |
802 | int egress_cnt = 0, conf_cnt = 0, num_portals = 0, cpu; | |
803 | const cpumask_t *affine_cpus = qman_affine_cpus(); | |
804 | u16 portals[NR_CPUS]; | |
805 | struct dpaa_fq *fq; | |
806 | ||
807 | for_each_cpu(cpu, affine_cpus) | |
808 | portals[num_portals++] = qman_affine_channel(cpu); | |
809 | if (num_portals == 0) | |
810 | dev_err(priv->net_dev->dev.parent, | |
811 | "No Qman software (affine) channels found"); | |
812 | ||
813 | /* Initialize each FQ in the list */ | |
814 | list_for_each_entry(fq, &priv->dpaa_fq_list, list) { | |
815 | switch (fq->fq_type) { | |
816 | case FQ_TYPE_RX_DEFAULT: | |
817 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); | |
818 | break; | |
819 | case FQ_TYPE_RX_ERROR: | |
820 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); | |
821 | break; | |
822 | case FQ_TYPE_TX: | |
823 | dpaa_setup_egress(priv, fq, tx_port, | |
824 | &fq_cbs->egress_ern); | |
825 | /* If we have more Tx queues than the number of cores, | |
826 | * just ignore the extra ones. | |
827 | */ | |
828 | if (egress_cnt < DPAA_ETH_TXQ_NUM) | |
829 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; | |
830 | break; | |
831 | case FQ_TYPE_TX_CONF_MQ: | |
832 | priv->conf_fqs[conf_cnt++] = &fq->fq_base; | |
833 | /* fall through */ | |
834 | case FQ_TYPE_TX_CONFIRM: | |
835 | dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); | |
836 | break; | |
837 | case FQ_TYPE_TX_ERROR: | |
838 | dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); | |
839 | break; | |
840 | default: | |
841 | dev_warn(priv->net_dev->dev.parent, | |
842 | "Unknown FQ type detected!\n"); | |
843 | break; | |
844 | } | |
845 | } | |
846 | ||
847 | /* Make sure all CPUs receive a corresponding Tx queue. */ | |
848 | while (egress_cnt < DPAA_ETH_TXQ_NUM) { | |
849 | list_for_each_entry(fq, &priv->dpaa_fq_list, list) { | |
850 | if (fq->fq_type != FQ_TYPE_TX) | |
851 | continue; | |
852 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; | |
853 | if (egress_cnt == DPAA_ETH_TXQ_NUM) | |
854 | break; | |
855 | } | |
856 | } | |
857 | } | |
858 | ||
859 | static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, | |
860 | struct qman_fq *tx_fq) | |
861 | { | |
862 | int i; | |
863 | ||
864 | for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) | |
865 | if (priv->egress_fqs[i] == tx_fq) | |
866 | return i; | |
867 | ||
868 | return -EINVAL; | |
869 | } | |
870 | ||
871 | static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) | |
872 | { | |
873 | const struct dpaa_priv *priv; | |
874 | struct qman_fq *confq = NULL; | |
875 | struct qm_mcc_initfq initfq; | |
876 | struct device *dev; | |
877 | struct qman_fq *fq; | |
878 | int queue_id; | |
879 | int err; | |
880 | ||
881 | priv = netdev_priv(dpaa_fq->net_dev); | |
882 | dev = dpaa_fq->net_dev->dev.parent; | |
883 | ||
884 | if (dpaa_fq->fqid == 0) | |
885 | dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; | |
886 | ||
887 | dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); | |
888 | ||
889 | err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base); | |
890 | if (err) { | |
891 | dev_err(dev, "qman_create_fq() failed\n"); | |
892 | return err; | |
893 | } | |
894 | fq = &dpaa_fq->fq_base; | |
895 | ||
896 | if (dpaa_fq->init) { | |
897 | memset(&initfq, 0, sizeof(initfq)); | |
898 | ||
7d6f8dc0 | 899 | initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL); |
9ad1a374 | 900 | /* Note: we may get to keep an empty FQ in cache */ |
7d6f8dc0 | 901 | initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE); |
9ad1a374 MB |
902 | |
903 | /* Try to reduce the number of portal interrupts for | |
904 | * Tx Confirmation FQs. | |
905 | */ | |
906 | if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) | |
7d6f8dc0 | 907 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE); |
9ad1a374 MB |
908 | |
909 | /* FQ placement */ | |
7d6f8dc0 | 910 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ); |
9ad1a374 MB |
911 | |
912 | qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); | |
913 | ||
914 | /* Put all egress queues in a congestion group of their own. | |
915 | * Sensu stricto, the Tx confirmation queues are Rx FQs, | |
916 | * rather than Tx - but they nonetheless account for the | |
917 | * memory footprint on behalf of egress traffic. We therefore | |
918 | * place them in the netdev's CGR, along with the Tx FQs. | |
919 | */ | |
920 | if (dpaa_fq->fq_type == FQ_TYPE_TX || | |
921 | dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || | |
922 | dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { | |
7d6f8dc0 CM |
923 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); |
924 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); | |
9ad1a374 MB |
925 | initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; |
926 | /* Set a fixed overhead accounting, in an attempt to | |
927 | * reduce the impact of fixed-size skb shells and the | |
928 | * driver's needed headroom on system memory. This is | |
929 | * especially the case when the egress traffic is | |
930 | * composed of small datagrams. | |
931 | * Unfortunately, QMan's OAL value is capped to an | |
932 | * insufficient value, but even that is better than | |
933 | * no overhead accounting at all. | |
934 | */ | |
7d6f8dc0 | 935 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); |
9ad1a374 MB |
936 | qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); |
937 | qm_fqd_set_oal(&initfq.fqd, | |
938 | min(sizeof(struct sk_buff) + | |
939 | priv->tx_headroom, | |
940 | (size_t)FSL_QMAN_MAX_OAL)); | |
941 | } | |
942 | ||
943 | if (td_enable) { | |
7d6f8dc0 | 944 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH); |
9ad1a374 | 945 | qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); |
7d6f8dc0 | 946 | initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE); |
9ad1a374 MB |
947 | } |
948 | ||
949 | if (dpaa_fq->fq_type == FQ_TYPE_TX) { | |
950 | queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base); | |
951 | if (queue_id >= 0) | |
952 | confq = priv->conf_fqs[queue_id]; | |
953 | if (confq) { | |
7d6f8dc0 CM |
954 | initfq.we_mask |= |
955 | cpu_to_be16(QM_INITFQ_WE_CONTEXTA); | |
9ad1a374 MB |
956 | /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) |
957 | * A2V=1 (contextA A2 field is valid) | |
958 | * A0V=1 (contextA A0 field is valid) | |
959 | * B0V=1 (contextB field is valid) | |
960 | * ContextA A2: EBD=1 (deallocate buffers inside FMan) | |
961 | * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) | |
962 | */ | |
7d6f8dc0 CM |
963 | qm_fqd_context_a_set64(&initfq.fqd, |
964 | 0x1e00000080000000ULL); | |
9ad1a374 MB |
965 | } |
966 | } | |
967 | ||
968 | /* Put all the ingress queues in our "ingress CGR". */ | |
969 | if (priv->use_ingress_cgr && | |
970 | (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || | |
971 | dpaa_fq->fq_type == FQ_TYPE_RX_ERROR)) { | |
7d6f8dc0 CM |
972 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); |
973 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); | |
9ad1a374 MB |
974 | initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; |
975 | /* Set a fixed overhead accounting, just like for the | |
976 | * egress CGR. | |
977 | */ | |
7d6f8dc0 | 978 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); |
9ad1a374 MB |
979 | qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); |
980 | qm_fqd_set_oal(&initfq.fqd, | |
981 | min(sizeof(struct sk_buff) + | |
982 | priv->tx_headroom, | |
983 | (size_t)FSL_QMAN_MAX_OAL)); | |
984 | } | |
985 | ||
986 | /* Initialization common to all ingress queues */ | |
987 | if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { | |
7d6f8dc0 CM |
988 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA); |
989 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE); | |
9ad1a374 MB |
990 | initfq.fqd.context_a.stashing.exclusive = |
991 | QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | | |
992 | QM_STASHING_EXCL_ANNOTATION; | |
993 | qm_fqd_set_stashing(&initfq.fqd, 1, 2, | |
994 | DIV_ROUND_UP(sizeof(struct qman_fq), | |
995 | 64)); | |
996 | } | |
997 | ||
998 | err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); | |
999 | if (err < 0) { | |
1000 | dev_err(dev, "qman_init_fq(%u) = %d\n", | |
1001 | qman_fq_fqid(fq), err); | |
1002 | qman_destroy_fq(fq); | |
1003 | return err; | |
1004 | } | |
1005 | } | |
1006 | ||
1007 | dpaa_fq->fqid = qman_fq_fqid(fq); | |
1008 | ||
1009 | return 0; | |
1010 | } | |
1011 | ||
1012 | static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq) | |
1013 | { | |
1014 | const struct dpaa_priv *priv; | |
1015 | struct dpaa_fq *dpaa_fq; | |
1016 | int err, error; | |
1017 | ||
1018 | err = 0; | |
1019 | ||
1020 | dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); | |
1021 | priv = netdev_priv(dpaa_fq->net_dev); | |
1022 | ||
1023 | if (dpaa_fq->init) { | |
1024 | err = qman_retire_fq(fq, NULL); | |
1025 | if (err < 0 && netif_msg_drv(priv)) | |
1026 | dev_err(dev, "qman_retire_fq(%u) = %d\n", | |
1027 | qman_fq_fqid(fq), err); | |
1028 | ||
1029 | error = qman_oos_fq(fq); | |
1030 | if (error < 0 && netif_msg_drv(priv)) { | |
1031 | dev_err(dev, "qman_oos_fq(%u) = %d\n", | |
1032 | qman_fq_fqid(fq), error); | |
1033 | if (err >= 0) | |
1034 | err = error; | |
1035 | } | |
1036 | } | |
1037 | ||
1038 | qman_destroy_fq(fq); | |
1039 | list_del(&dpaa_fq->list); | |
1040 | ||
1041 | return err; | |
1042 | } | |
1043 | ||
1044 | static int dpaa_fq_free(struct device *dev, struct list_head *list) | |
1045 | { | |
1046 | struct dpaa_fq *dpaa_fq, *tmp; | |
1047 | int err, error; | |
1048 | ||
1049 | err = 0; | |
1050 | list_for_each_entry_safe(dpaa_fq, tmp, list, list) { | |
1051 | error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq); | |
1052 | if (error < 0 && err >= 0) | |
1053 | err = error; | |
1054 | } | |
1055 | ||
1056 | return err; | |
1057 | } | |
1058 | ||
1059 | static void dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, | |
1060 | struct dpaa_fq *defq, | |
1061 | struct dpaa_buffer_layout *buf_layout) | |
1062 | { | |
1063 | struct fman_buffer_prefix_content buf_prefix_content; | |
1064 | struct fman_port_params params; | |
1065 | int err; | |
1066 | ||
1067 | memset(¶ms, 0, sizeof(params)); | |
1068 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); | |
1069 | ||
1070 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; | |
1071 | buf_prefix_content.pass_prs_result = true; | |
1072 | buf_prefix_content.pass_hash_result = true; | |
1073 | buf_prefix_content.pass_time_stamp = false; | |
1074 | buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; | |
1075 | ||
1076 | params.specific_params.non_rx_params.err_fqid = errq->fqid; | |
1077 | params.specific_params.non_rx_params.dflt_fqid = defq->fqid; | |
1078 | ||
1079 | err = fman_port_config(port, ¶ms); | |
1080 | if (err) | |
1081 | pr_err("%s: fman_port_config failed\n", __func__); | |
1082 | ||
1083 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); | |
1084 | if (err) | |
1085 | pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", | |
1086 | __func__); | |
1087 | ||
1088 | err = fman_port_init(port); | |
1089 | if (err) | |
1090 | pr_err("%s: fm_port_init failed\n", __func__); | |
1091 | } | |
1092 | ||
1093 | static void dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, | |
1094 | size_t count, struct dpaa_fq *errq, | |
1095 | struct dpaa_fq *defq, | |
1096 | struct dpaa_buffer_layout *buf_layout) | |
1097 | { | |
1098 | struct fman_buffer_prefix_content buf_prefix_content; | |
1099 | struct fman_port_rx_params *rx_p; | |
1100 | struct fman_port_params params; | |
1101 | int i, err; | |
1102 | ||
1103 | memset(¶ms, 0, sizeof(params)); | |
1104 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); | |
1105 | ||
1106 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; | |
1107 | buf_prefix_content.pass_prs_result = true; | |
1108 | buf_prefix_content.pass_hash_result = true; | |
1109 | buf_prefix_content.pass_time_stamp = false; | |
1110 | buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; | |
1111 | ||
1112 | rx_p = ¶ms.specific_params.rx_params; | |
1113 | rx_p->err_fqid = errq->fqid; | |
1114 | rx_p->dflt_fqid = defq->fqid; | |
1115 | ||
1116 | count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); | |
1117 | rx_p->ext_buf_pools.num_of_pools_used = (u8)count; | |
1118 | for (i = 0; i < count; i++) { | |
1119 | rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid; | |
1120 | rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size; | |
1121 | } | |
1122 | ||
1123 | err = fman_port_config(port, ¶ms); | |
1124 | if (err) | |
1125 | pr_err("%s: fman_port_config failed\n", __func__); | |
1126 | ||
1127 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); | |
1128 | if (err) | |
1129 | pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", | |
1130 | __func__); | |
1131 | ||
1132 | err = fman_port_init(port); | |
1133 | if (err) | |
1134 | pr_err("%s: fm_port_init failed\n", __func__); | |
1135 | } | |
1136 | ||
1137 | static void dpaa_eth_init_ports(struct mac_device *mac_dev, | |
1138 | struct dpaa_bp **bps, size_t count, | |
1139 | struct fm_port_fqs *port_fqs, | |
1140 | struct dpaa_buffer_layout *buf_layout, | |
1141 | struct device *dev) | |
1142 | { | |
1143 | struct fman_port *rxport = mac_dev->port[RX]; | |
1144 | struct fman_port *txport = mac_dev->port[TX]; | |
1145 | ||
1146 | dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, | |
1147 | port_fqs->tx_defq, &buf_layout[TX]); | |
1148 | dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, | |
1149 | port_fqs->rx_defq, &buf_layout[RX]); | |
1150 | } | |
1151 | ||
1152 | static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, | |
1153 | struct bm_buffer *bmb, int cnt) | |
1154 | { | |
1155 | int err; | |
1156 | ||
1157 | err = bman_release(dpaa_bp->pool, bmb, cnt); | |
1158 | /* Should never occur, address anyway to avoid leaking the buffers */ | |
1159 | if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb) | |
1160 | while (cnt-- > 0) | |
1161 | dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); | |
1162 | ||
1163 | return cnt; | |
1164 | } | |
1165 | ||
1166 | static void dpaa_release_sgt_members(struct qm_sg_entry *sgt) | |
1167 | { | |
1168 | struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX]; | |
1169 | struct dpaa_bp *dpaa_bp; | |
1170 | int i = 0, j; | |
1171 | ||
1172 | memset(bmb, 0, sizeof(bmb)); | |
1173 | ||
1174 | do { | |
1175 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | |
1176 | if (!dpaa_bp) | |
1177 | return; | |
1178 | ||
1179 | j = 0; | |
1180 | do { | |
1181 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | |
1182 | ||
1183 | bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i])); | |
1184 | ||
1185 | j++; i++; | |
1186 | } while (j < ARRAY_SIZE(bmb) && | |
1187 | !qm_sg_entry_is_final(&sgt[i - 1]) && | |
1188 | sgt[i - 1].bpid == sgt[i].bpid); | |
1189 | ||
1190 | dpaa_bman_release(dpaa_bp, bmb, j); | |
1191 | } while (!qm_sg_entry_is_final(&sgt[i - 1])); | |
1192 | } | |
1193 | ||
1194 | static void dpaa_fd_release(const struct net_device *net_dev, | |
1195 | const struct qm_fd *fd) | |
1196 | { | |
1197 | struct qm_sg_entry *sgt; | |
1198 | struct dpaa_bp *dpaa_bp; | |
1199 | struct bm_buffer bmb; | |
1200 | dma_addr_t addr; | |
1201 | void *vaddr; | |
1202 | ||
1203 | bmb.data = 0; | |
1204 | bm_buffer_set64(&bmb, qm_fd_addr(fd)); | |
1205 | ||
1206 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | |
1207 | if (!dpaa_bp) | |
1208 | return; | |
1209 | ||
1210 | if (qm_fd_get_format(fd) == qm_fd_sg) { | |
1211 | vaddr = phys_to_virt(qm_fd_addr(fd)); | |
1212 | sgt = vaddr + qm_fd_get_offset(fd); | |
1213 | ||
1214 | dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size, | |
1215 | DMA_FROM_DEVICE); | |
1216 | ||
1217 | dpaa_release_sgt_members(sgt); | |
1218 | ||
1219 | addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size, | |
1220 | DMA_FROM_DEVICE); | |
1221 | if (dma_mapping_error(dpaa_bp->dev, addr)) { | |
1222 | dev_err(dpaa_bp->dev, "DMA mapping failed"); | |
1223 | return; | |
1224 | } | |
1225 | bm_buffer_set64(&bmb, addr); | |
1226 | } | |
1227 | ||
1228 | dpaa_bman_release(dpaa_bp, &bmb, 1); | |
1229 | } | |
1230 | ||
b0ce0d02 MB |
1231 | static void count_ern(struct dpaa_percpu_priv *percpu_priv, |
1232 | const union qm_mr_entry *msg) | |
1233 | { | |
1234 | switch (msg->ern.rc & QM_MR_RC_MASK) { | |
1235 | case QM_MR_RC_CGR_TAILDROP: | |
1236 | percpu_priv->ern_cnt.cg_tdrop++; | |
1237 | break; | |
1238 | case QM_MR_RC_WRED: | |
1239 | percpu_priv->ern_cnt.wred++; | |
1240 | break; | |
1241 | case QM_MR_RC_ERROR: | |
1242 | percpu_priv->ern_cnt.err_cond++; | |
1243 | break; | |
1244 | case QM_MR_RC_ORPWINDOW_EARLY: | |
1245 | percpu_priv->ern_cnt.early_window++; | |
1246 | break; | |
1247 | case QM_MR_RC_ORPWINDOW_LATE: | |
1248 | percpu_priv->ern_cnt.late_window++; | |
1249 | break; | |
1250 | case QM_MR_RC_FQ_TAILDROP: | |
1251 | percpu_priv->ern_cnt.fq_tdrop++; | |
1252 | break; | |
1253 | case QM_MR_RC_ORPWINDOW_RETIRED: | |
1254 | percpu_priv->ern_cnt.fq_retired++; | |
1255 | break; | |
1256 | case QM_MR_RC_ORP_ZERO: | |
1257 | percpu_priv->ern_cnt.orp_zero++; | |
1258 | break; | |
1259 | } | |
1260 | } | |
1261 | ||
9ad1a374 MB |
1262 | /* Turn on HW checksum computation for this outgoing frame. |
1263 | * If the current protocol is not something we support in this regard | |
1264 | * (or if the stack has already computed the SW checksum), we do nothing. | |
1265 | * | |
1266 | * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value | |
1267 | * otherwise. | |
1268 | * | |
1269 | * Note that this function may modify the fd->cmd field and the skb data buffer | |
1270 | * (the Parse Results area). | |
1271 | */ | |
1272 | static int dpaa_enable_tx_csum(struct dpaa_priv *priv, | |
1273 | struct sk_buff *skb, | |
1274 | struct qm_fd *fd, | |
1275 | char *parse_results) | |
1276 | { | |
1277 | struct fman_prs_result *parse_result; | |
1278 | u16 ethertype = ntohs(skb->protocol); | |
1279 | struct ipv6hdr *ipv6h = NULL; | |
1280 | struct iphdr *iph; | |
1281 | int retval = 0; | |
1282 | u8 l4_proto; | |
1283 | ||
1284 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
1285 | return 0; | |
1286 | ||
1287 | /* Note: L3 csum seems to be already computed in sw, but we can't choose | |
1288 | * L4 alone from the FM configuration anyway. | |
1289 | */ | |
1290 | ||
1291 | /* Fill in some fields of the Parse Results array, so the FMan | |
1292 | * can find them as if they came from the FMan Parser. | |
1293 | */ | |
1294 | parse_result = (struct fman_prs_result *)parse_results; | |
1295 | ||
1296 | /* If we're dealing with VLAN, get the real Ethernet type */ | |
1297 | if (ethertype == ETH_P_8021Q) { | |
1298 | /* We can't always assume the MAC header is set correctly | |
1299 | * by the stack, so reset to beginning of skb->data | |
1300 | */ | |
1301 | skb_reset_mac_header(skb); | |
1302 | ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); | |
1303 | } | |
1304 | ||
1305 | /* Fill in the relevant L3 parse result fields | |
1306 | * and read the L4 protocol type | |
1307 | */ | |
1308 | switch (ethertype) { | |
1309 | case ETH_P_IP: | |
1310 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); | |
1311 | iph = ip_hdr(skb); | |
1312 | WARN_ON(!iph); | |
1313 | l4_proto = iph->protocol; | |
1314 | break; | |
1315 | case ETH_P_IPV6: | |
1316 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); | |
1317 | ipv6h = ipv6_hdr(skb); | |
1318 | WARN_ON(!ipv6h); | |
1319 | l4_proto = ipv6h->nexthdr; | |
1320 | break; | |
1321 | default: | |
1322 | /* We shouldn't even be here */ | |
1323 | if (net_ratelimit()) | |
1324 | netif_alert(priv, tx_err, priv->net_dev, | |
1325 | "Can't compute HW csum for L3 proto 0x%x\n", | |
1326 | ntohs(skb->protocol)); | |
1327 | retval = -EIO; | |
1328 | goto return_error; | |
1329 | } | |
1330 | ||
1331 | /* Fill in the relevant L4 parse result fields */ | |
1332 | switch (l4_proto) { | |
1333 | case IPPROTO_UDP: | |
1334 | parse_result->l4r = FM_L4_PARSE_RESULT_UDP; | |
1335 | break; | |
1336 | case IPPROTO_TCP: | |
1337 | parse_result->l4r = FM_L4_PARSE_RESULT_TCP; | |
1338 | break; | |
1339 | default: | |
1340 | if (net_ratelimit()) | |
1341 | netif_alert(priv, tx_err, priv->net_dev, | |
1342 | "Can't compute HW csum for L4 proto 0x%x\n", | |
1343 | l4_proto); | |
1344 | retval = -EIO; | |
1345 | goto return_error; | |
1346 | } | |
1347 | ||
1348 | /* At index 0 is IPOffset_1 as defined in the Parse Results */ | |
1349 | parse_result->ip_off[0] = (u8)skb_network_offset(skb); | |
1350 | parse_result->l4_off = (u8)skb_transport_offset(skb); | |
1351 | ||
1352 | /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ | |
7d6f8dc0 | 1353 | fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC); |
9ad1a374 MB |
1354 | |
1355 | /* On P1023 and similar platforms fd->cmd interpretation could | |
1356 | * be disabled by setting CONTEXT_A bit ICMD; currently this bit | |
1357 | * is not set so we do not need to check; in the future, if/when | |
1358 | * using context_a we need to check this bit | |
1359 | */ | |
1360 | ||
1361 | return_error: | |
1362 | return retval; | |
1363 | } | |
1364 | ||
1365 | static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) | |
1366 | { | |
1367 | struct device *dev = dpaa_bp->dev; | |
1368 | struct bm_buffer bmb[8]; | |
1369 | dma_addr_t addr; | |
1370 | void *new_buf; | |
1371 | u8 i; | |
1372 | ||
1373 | for (i = 0; i < 8; i++) { | |
1374 | new_buf = netdev_alloc_frag(dpaa_bp->raw_size); | |
1375 | if (unlikely(!new_buf)) { | |
1376 | dev_err(dev, "netdev_alloc_frag() failed, size %zu\n", | |
1377 | dpaa_bp->raw_size); | |
1378 | goto release_previous_buffs; | |
1379 | } | |
1380 | new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES); | |
1381 | ||
1382 | addr = dma_map_single(dev, new_buf, | |
1383 | dpaa_bp->size, DMA_FROM_DEVICE); | |
1384 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1385 | dev_err(dpaa_bp->dev, "DMA map failed"); | |
1386 | goto release_previous_buffs; | |
1387 | } | |
1388 | ||
1389 | bmb[i].data = 0; | |
1390 | bm_buffer_set64(&bmb[i], addr); | |
1391 | } | |
1392 | ||
1393 | release_bufs: | |
1394 | return dpaa_bman_release(dpaa_bp, bmb, i); | |
1395 | ||
1396 | release_previous_buffs: | |
1397 | WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n"); | |
1398 | ||
1399 | bm_buffer_set64(&bmb[i], 0); | |
1400 | /* Avoid releasing a completely null buffer; bman_release() requires | |
1401 | * at least one buffer. | |
1402 | */ | |
1403 | if (likely(i)) | |
1404 | goto release_bufs; | |
1405 | ||
1406 | return 0; | |
1407 | } | |
1408 | ||
1409 | static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp) | |
1410 | { | |
1411 | int i; | |
1412 | ||
1413 | /* Give each CPU an allotment of "config_count" buffers */ | |
1414 | for_each_possible_cpu(i) { | |
1415 | int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i); | |
1416 | int j; | |
1417 | ||
1418 | /* Although we access another CPU's counters here | |
1419 | * we do it at boot time so it is safe | |
1420 | */ | |
1421 | for (j = 0; j < dpaa_bp->config_count; j += 8) | |
1422 | *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp); | |
1423 | } | |
1424 | return 0; | |
1425 | } | |
1426 | ||
1427 | /* Add buffers/(pages) for Rx processing whenever bpool count falls below | |
1428 | * REFILL_THRESHOLD. | |
1429 | */ | |
1430 | static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr) | |
1431 | { | |
1432 | int count = *countptr; | |
1433 | int new_bufs; | |
1434 | ||
1435 | if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) { | |
1436 | do { | |
1437 | new_bufs = dpaa_bp_add_8_bufs(dpaa_bp); | |
1438 | if (unlikely(!new_bufs)) { | |
1439 | /* Avoid looping forever if we've temporarily | |
1440 | * run out of memory. We'll try again at the | |
1441 | * next NAPI cycle. | |
1442 | */ | |
1443 | break; | |
1444 | } | |
1445 | count += new_bufs; | |
1446 | } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT); | |
1447 | ||
1448 | *countptr = count; | |
1449 | if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT)) | |
1450 | return -ENOMEM; | |
1451 | } | |
1452 | ||
1453 | return 0; | |
1454 | } | |
1455 | ||
1456 | static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) | |
1457 | { | |
1458 | struct dpaa_bp *dpaa_bp; | |
1459 | int *countptr; | |
1460 | int res, i; | |
1461 | ||
1462 | for (i = 0; i < DPAA_BPS_NUM; i++) { | |
1463 | dpaa_bp = priv->dpaa_bps[i]; | |
1464 | if (!dpaa_bp) | |
1465 | return -EINVAL; | |
1466 | countptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1467 | res = dpaa_eth_refill_bpool(dpaa_bp, countptr); | |
1468 | if (res) | |
1469 | return res; | |
1470 | } | |
1471 | return 0; | |
1472 | } | |
1473 | ||
1474 | /* Cleanup function for outgoing frame descriptors that were built on Tx path, | |
1475 | * either contiguous frames or scatter/gather ones. | |
1476 | * Skb freeing is not handled here. | |
1477 | * | |
1478 | * This function may be called on error paths in the Tx function, so guard | |
1479 | * against cases when not all fd relevant fields were filled in. | |
1480 | * | |
1481 | * Return the skb backpointer, since for S/G frames the buffer containing it | |
1482 | * gets freed here. | |
1483 | */ | |
1484 | static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, | |
1485 | const struct qm_fd *fd) | |
1486 | { | |
1487 | const enum dma_data_direction dma_dir = DMA_TO_DEVICE; | |
1488 | struct device *dev = priv->net_dev->dev.parent; | |
1489 | dma_addr_t addr = qm_fd_addr(fd); | |
1490 | const struct qm_sg_entry *sgt; | |
1491 | struct sk_buff **skbh, *skb; | |
1492 | int nr_frags, i; | |
1493 | ||
1494 | skbh = (struct sk_buff **)phys_to_virt(addr); | |
1495 | skb = *skbh; | |
1496 | ||
1497 | if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { | |
1498 | nr_frags = skb_shinfo(skb)->nr_frags; | |
1499 | dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + | |
1500 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | |
1501 | dma_dir); | |
1502 | ||
1503 | /* The sgt buffer has been allocated with netdev_alloc_frag(), | |
1504 | * it's from lowmem. | |
1505 | */ | |
1506 | sgt = phys_to_virt(addr + qm_fd_get_offset(fd)); | |
1507 | ||
1508 | /* sgt[0] is from lowmem, was dma_map_single()-ed */ | |
1509 | dma_unmap_single(dev, qm_sg_addr(&sgt[0]), | |
1510 | qm_sg_entry_get_len(&sgt[0]), dma_dir); | |
1511 | ||
1512 | /* remaining pages were mapped with skb_frag_dma_map() */ | |
1513 | for (i = 1; i < nr_frags; i++) { | |
1514 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | |
1515 | ||
1516 | dma_unmap_page(dev, qm_sg_addr(&sgt[i]), | |
1517 | qm_sg_entry_get_len(&sgt[i]), dma_dir); | |
1518 | } | |
1519 | ||
1520 | /* Free the page frag that we allocated on Tx */ | |
1521 | skb_free_frag(phys_to_virt(addr)); | |
1522 | } else { | |
1523 | dma_unmap_single(dev, addr, | |
1524 | skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); | |
1525 | } | |
1526 | ||
1527 | return skb; | |
1528 | } | |
1529 | ||
1530 | /* Build a linear skb around the received buffer. | |
1531 | * We are guaranteed there is enough room at the end of the data buffer to | |
1532 | * accommodate the shared info area of the skb. | |
1533 | */ | |
1534 | static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, | |
1535 | const struct qm_fd *fd) | |
1536 | { | |
1537 | ssize_t fd_off = qm_fd_get_offset(fd); | |
1538 | dma_addr_t addr = qm_fd_addr(fd); | |
1539 | struct dpaa_bp *dpaa_bp; | |
1540 | struct sk_buff *skb; | |
1541 | void *vaddr; | |
1542 | ||
1543 | vaddr = phys_to_virt(addr); | |
1544 | WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); | |
1545 | ||
1546 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | |
1547 | if (!dpaa_bp) | |
1548 | goto free_buffer; | |
1549 | ||
1550 | skb = build_skb(vaddr, dpaa_bp->size + | |
1551 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); | |
1552 | if (unlikely(!skb)) { | |
1553 | WARN_ONCE(1, "Build skb failure on Rx\n"); | |
1554 | goto free_buffer; | |
1555 | } | |
1556 | WARN_ON(fd_off != priv->rx_headroom); | |
1557 | skb_reserve(skb, fd_off); | |
1558 | skb_put(skb, qm_fd_get_length(fd)); | |
1559 | ||
1560 | skb->ip_summed = CHECKSUM_NONE; | |
1561 | ||
1562 | return skb; | |
1563 | ||
1564 | free_buffer: | |
1565 | skb_free_frag(vaddr); | |
1566 | return NULL; | |
1567 | } | |
1568 | ||
1569 | /* Build an skb with the data of the first S/G entry in the linear portion and | |
1570 | * the rest of the frame as skb fragments. | |
1571 | * | |
1572 | * The page fragment holding the S/G Table is recycled here. | |
1573 | */ | |
1574 | static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, | |
1575 | const struct qm_fd *fd) | |
1576 | { | |
1577 | ssize_t fd_off = qm_fd_get_offset(fd); | |
1578 | dma_addr_t addr = qm_fd_addr(fd); | |
1579 | const struct qm_sg_entry *sgt; | |
1580 | struct page *page, *head_page; | |
1581 | struct dpaa_bp *dpaa_bp; | |
1582 | void *vaddr, *sg_vaddr; | |
1583 | int frag_off, frag_len; | |
1584 | struct sk_buff *skb; | |
1585 | dma_addr_t sg_addr; | |
1586 | int page_offset; | |
1587 | unsigned int sz; | |
1588 | int *count_ptr; | |
1589 | int i; | |
1590 | ||
1591 | vaddr = phys_to_virt(addr); | |
1592 | WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); | |
1593 | ||
1594 | /* Iterate through the SGT entries and add data buffers to the skb */ | |
1595 | sgt = vaddr + fd_off; | |
1596 | for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) { | |
1597 | /* Extension bit is not supported */ | |
1598 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | |
1599 | ||
1600 | sg_addr = qm_sg_addr(&sgt[i]); | |
1601 | sg_vaddr = phys_to_virt(sg_addr); | |
1602 | WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, | |
1603 | SMP_CACHE_BYTES)); | |
1604 | ||
1605 | /* We may use multiple Rx pools */ | |
1606 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | |
1607 | if (!dpaa_bp) | |
1608 | goto free_buffers; | |
1609 | ||
1610 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1611 | dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size, | |
1612 | DMA_FROM_DEVICE); | |
1613 | if (i == 0) { | |
1614 | sz = dpaa_bp->size + | |
1615 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
1616 | skb = build_skb(sg_vaddr, sz); | |
1617 | if (WARN_ON(unlikely(!skb))) | |
1618 | goto free_buffers; | |
1619 | ||
1620 | skb->ip_summed = CHECKSUM_NONE; | |
1621 | ||
1622 | /* Make sure forwarded skbs will have enough space | |
1623 | * on Tx, if extra headers are added. | |
1624 | */ | |
1625 | WARN_ON(fd_off != priv->rx_headroom); | |
1626 | skb_reserve(skb, fd_off); | |
1627 | skb_put(skb, qm_sg_entry_get_len(&sgt[i])); | |
1628 | } else { | |
1629 | /* Not the first S/G entry; all data from buffer will | |
1630 | * be added in an skb fragment; fragment index is offset | |
1631 | * by one since first S/G entry was incorporated in the | |
1632 | * linear part of the skb. | |
1633 | * | |
1634 | * Caution: 'page' may be a tail page. | |
1635 | */ | |
1636 | page = virt_to_page(sg_vaddr); | |
1637 | head_page = virt_to_head_page(sg_vaddr); | |
1638 | ||
1639 | /* Compute offset in (possibly tail) page */ | |
1640 | page_offset = ((unsigned long)sg_vaddr & | |
1641 | (PAGE_SIZE - 1)) + | |
1642 | (page_address(page) - page_address(head_page)); | |
1643 | /* page_offset only refers to the beginning of sgt[i]; | |
1644 | * but the buffer itself may have an internal offset. | |
1645 | */ | |
1646 | frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset; | |
1647 | frag_len = qm_sg_entry_get_len(&sgt[i]); | |
1648 | /* skb_add_rx_frag() does no checking on the page; if | |
1649 | * we pass it a tail page, we'll end up with | |
1650 | * bad page accounting and eventually with segafults. | |
1651 | */ | |
1652 | skb_add_rx_frag(skb, i - 1, head_page, frag_off, | |
1653 | frag_len, dpaa_bp->size); | |
1654 | } | |
1655 | /* Update the pool count for the current {cpu x bpool} */ | |
1656 | (*count_ptr)--; | |
1657 | ||
1658 | if (qm_sg_entry_is_final(&sgt[i])) | |
1659 | break; | |
1660 | } | |
1661 | WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); | |
1662 | ||
1663 | /* free the SG table buffer */ | |
1664 | skb_free_frag(vaddr); | |
1665 | ||
1666 | return skb; | |
1667 | ||
1668 | free_buffers: | |
1669 | /* compensate sw bpool counter changes */ | |
1670 | for (i--; i > 0; i--) { | |
1671 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | |
1672 | if (dpaa_bp) { | |
1673 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1674 | (*count_ptr)++; | |
1675 | } | |
1676 | } | |
1677 | /* free all the SG entries */ | |
1678 | for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { | |
1679 | sg_addr = qm_sg_addr(&sgt[i]); | |
1680 | sg_vaddr = phys_to_virt(sg_addr); | |
1681 | skb_free_frag(sg_vaddr); | |
1682 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | |
1683 | if (dpaa_bp) { | |
1684 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1685 | (*count_ptr)--; | |
1686 | } | |
1687 | ||
1688 | if (qm_sg_entry_is_final(&sgt[i])) | |
1689 | break; | |
1690 | } | |
1691 | /* free the SGT fragment */ | |
1692 | skb_free_frag(vaddr); | |
1693 | ||
1694 | return NULL; | |
1695 | } | |
1696 | ||
1697 | static int skb_to_contig_fd(struct dpaa_priv *priv, | |
1698 | struct sk_buff *skb, struct qm_fd *fd, | |
1699 | int *offset) | |
1700 | { | |
1701 | struct net_device *net_dev = priv->net_dev; | |
1702 | struct device *dev = net_dev->dev.parent; | |
1703 | enum dma_data_direction dma_dir; | |
1704 | unsigned char *buffer_start; | |
1705 | struct sk_buff **skbh; | |
1706 | dma_addr_t addr; | |
1707 | int err; | |
1708 | ||
1709 | /* We are guaranteed to have at least tx_headroom bytes | |
1710 | * available, so just use that for offset. | |
1711 | */ | |
1712 | fd->bpid = FSL_DPAA_BPID_INV; | |
1713 | buffer_start = skb->data - priv->tx_headroom; | |
1714 | dma_dir = DMA_TO_DEVICE; | |
1715 | ||
1716 | skbh = (struct sk_buff **)buffer_start; | |
1717 | *skbh = skb; | |
1718 | ||
1719 | /* Enable L3/L4 hardware checksum computation. | |
1720 | * | |
1721 | * We must do this before dma_map_single(DMA_TO_DEVICE), because we may | |
1722 | * need to write into the skb. | |
1723 | */ | |
1724 | err = dpaa_enable_tx_csum(priv, skb, fd, | |
1725 | ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE); | |
1726 | if (unlikely(err < 0)) { | |
1727 | if (net_ratelimit()) | |
1728 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", | |
1729 | err); | |
1730 | return err; | |
1731 | } | |
1732 | ||
1733 | /* Fill in the rest of the FD fields */ | |
1734 | qm_fd_set_contig(fd, priv->tx_headroom, skb->len); | |
7d6f8dc0 | 1735 | fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); |
9ad1a374 MB |
1736 | |
1737 | /* Map the entire buffer size that may be seen by FMan, but no more */ | |
1738 | addr = dma_map_single(dev, skbh, | |
1739 | skb_tail_pointer(skb) - buffer_start, dma_dir); | |
1740 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1741 | if (net_ratelimit()) | |
1742 | netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); | |
1743 | return -EINVAL; | |
1744 | } | |
1745 | qm_fd_addr_set64(fd, addr); | |
1746 | ||
1747 | return 0; | |
1748 | } | |
1749 | ||
1750 | static int skb_to_sg_fd(struct dpaa_priv *priv, | |
1751 | struct sk_buff *skb, struct qm_fd *fd) | |
1752 | { | |
1753 | const enum dma_data_direction dma_dir = DMA_TO_DEVICE; | |
1754 | const int nr_frags = skb_shinfo(skb)->nr_frags; | |
1755 | struct net_device *net_dev = priv->net_dev; | |
1756 | struct device *dev = net_dev->dev.parent; | |
1757 | struct qm_sg_entry *sgt; | |
1758 | struct sk_buff **skbh; | |
1759 | int i, j, err, sz; | |
1760 | void *buffer_start; | |
1761 | skb_frag_t *frag; | |
1762 | dma_addr_t addr; | |
1763 | size_t frag_len; | |
1764 | void *sgt_buf; | |
1765 | ||
1766 | /* get a page frag to store the SGTable */ | |
1767 | sz = SKB_DATA_ALIGN(priv->tx_headroom + | |
1768 | sizeof(struct qm_sg_entry) * (1 + nr_frags)); | |
1769 | sgt_buf = netdev_alloc_frag(sz); | |
1770 | if (unlikely(!sgt_buf)) { | |
1771 | netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", | |
1772 | sz); | |
1773 | return -ENOMEM; | |
1774 | } | |
1775 | ||
1776 | /* Enable L3/L4 hardware checksum computation. | |
1777 | * | |
1778 | * We must do this before dma_map_single(DMA_TO_DEVICE), because we may | |
1779 | * need to write into the skb. | |
1780 | */ | |
1781 | err = dpaa_enable_tx_csum(priv, skb, fd, | |
1782 | sgt_buf + DPAA_TX_PRIV_DATA_SIZE); | |
1783 | if (unlikely(err < 0)) { | |
1784 | if (net_ratelimit()) | |
1785 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", | |
1786 | err); | |
1787 | goto csum_failed; | |
1788 | } | |
1789 | ||
1790 | sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); | |
1791 | qm_sg_entry_set_len(&sgt[0], skb_headlen(skb)); | |
1792 | sgt[0].bpid = FSL_DPAA_BPID_INV; | |
1793 | sgt[0].offset = 0; | |
1794 | addr = dma_map_single(dev, skb->data, | |
1795 | skb_headlen(skb), dma_dir); | |
1796 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1797 | dev_err(dev, "DMA mapping failed"); | |
1798 | err = -EINVAL; | |
1799 | goto sg0_map_failed; | |
1800 | } | |
1801 | qm_sg_entry_set64(&sgt[0], addr); | |
1802 | ||
1803 | /* populate the rest of SGT entries */ | |
1804 | frag = &skb_shinfo(skb)->frags[0]; | |
1805 | frag_len = frag->size; | |
1806 | for (i = 1; i <= nr_frags; i++, frag++) { | |
1807 | WARN_ON(!skb_frag_page(frag)); | |
1808 | addr = skb_frag_dma_map(dev, frag, 0, | |
1809 | frag_len, dma_dir); | |
1810 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1811 | dev_err(dev, "DMA mapping failed"); | |
1812 | err = -EINVAL; | |
1813 | goto sg_map_failed; | |
1814 | } | |
1815 | ||
1816 | qm_sg_entry_set_len(&sgt[i], frag_len); | |
1817 | sgt[i].bpid = FSL_DPAA_BPID_INV; | |
1818 | sgt[i].offset = 0; | |
1819 | ||
1820 | /* keep the offset in the address */ | |
1821 | qm_sg_entry_set64(&sgt[i], addr); | |
1822 | frag_len = frag->size; | |
1823 | } | |
1824 | qm_sg_entry_set_f(&sgt[i - 1], frag_len); | |
1825 | ||
1826 | qm_fd_set_sg(fd, priv->tx_headroom, skb->len); | |
1827 | ||
1828 | /* DMA map the SGT page */ | |
1829 | buffer_start = (void *)sgt - priv->tx_headroom; | |
1830 | skbh = (struct sk_buff **)buffer_start; | |
1831 | *skbh = skb; | |
1832 | ||
1833 | addr = dma_map_single(dev, buffer_start, priv->tx_headroom + | |
1834 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | |
1835 | dma_dir); | |
1836 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1837 | dev_err(dev, "DMA mapping failed"); | |
1838 | err = -EINVAL; | |
1839 | goto sgt_map_failed; | |
1840 | } | |
1841 | ||
1842 | fd->bpid = FSL_DPAA_BPID_INV; | |
7d6f8dc0 | 1843 | fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); |
9ad1a374 MB |
1844 | qm_fd_addr_set64(fd, addr); |
1845 | ||
1846 | return 0; | |
1847 | ||
1848 | sgt_map_failed: | |
1849 | sg_map_failed: | |
1850 | for (j = 0; j < i; j++) | |
1851 | dma_unmap_page(dev, qm_sg_addr(&sgt[j]), | |
1852 | qm_sg_entry_get_len(&sgt[j]), dma_dir); | |
1853 | sg0_map_failed: | |
1854 | csum_failed: | |
1855 | skb_free_frag(sgt_buf); | |
1856 | ||
1857 | return err; | |
1858 | } | |
1859 | ||
1860 | static inline int dpaa_xmit(struct dpaa_priv *priv, | |
1861 | struct rtnl_link_stats64 *percpu_stats, | |
1862 | int queue, | |
1863 | struct qm_fd *fd) | |
1864 | { | |
1865 | struct qman_fq *egress_fq; | |
1866 | int err, i; | |
1867 | ||
1868 | egress_fq = priv->egress_fqs[queue]; | |
1869 | if (fd->bpid == FSL_DPAA_BPID_INV) | |
7d6f8dc0 | 1870 | fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue])); |
9ad1a374 | 1871 | |
eb11ddf3 MB |
1872 | /* Trace this Tx fd */ |
1873 | trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); | |
1874 | ||
9ad1a374 MB |
1875 | for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) { |
1876 | err = qman_enqueue(egress_fq, fd); | |
1877 | if (err != -EBUSY) | |
1878 | break; | |
1879 | } | |
1880 | ||
1881 | if (unlikely(err < 0)) { | |
1882 | percpu_stats->tx_errors++; | |
1883 | percpu_stats->tx_fifo_errors++; | |
1884 | return err; | |
1885 | } | |
1886 | ||
1887 | percpu_stats->tx_packets++; | |
1888 | percpu_stats->tx_bytes += qm_fd_get_length(fd); | |
1889 | ||
1890 | return 0; | |
1891 | } | |
1892 | ||
1893 | static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |
1894 | { | |
1895 | const int queue_mapping = skb_get_queue_mapping(skb); | |
1896 | bool nonlinear = skb_is_nonlinear(skb); | |
1897 | struct rtnl_link_stats64 *percpu_stats; | |
1898 | struct dpaa_percpu_priv *percpu_priv; | |
1899 | struct dpaa_priv *priv; | |
1900 | struct qm_fd fd; | |
1901 | int offset = 0; | |
1902 | int err = 0; | |
1903 | ||
1904 | priv = netdev_priv(net_dev); | |
1905 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
1906 | percpu_stats = &percpu_priv->stats; | |
1907 | ||
1908 | qm_fd_clear_fd(&fd); | |
1909 | ||
1910 | if (!nonlinear) { | |
1911 | /* We're going to store the skb backpointer at the beginning | |
1912 | * of the data buffer, so we need a privately owned skb | |
1913 | * | |
1914 | * We've made sure skb is not shared in dev->priv_flags, | |
1915 | * we need to verify the skb head is not cloned | |
1916 | */ | |
1917 | if (skb_cow_head(skb, priv->tx_headroom)) | |
1918 | goto enomem; | |
1919 | ||
1920 | WARN_ON(skb_is_nonlinear(skb)); | |
1921 | } | |
1922 | ||
1923 | /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES; | |
1924 | * make sure we don't feed FMan with more fragments than it supports. | |
1925 | */ | |
1926 | if (nonlinear && | |
1927 | likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) { | |
1928 | /* Just create a S/G fd based on the skb */ | |
1929 | err = skb_to_sg_fd(priv, skb, &fd); | |
b0ce0d02 | 1930 | percpu_priv->tx_frag_skbuffs++; |
9ad1a374 MB |
1931 | } else { |
1932 | /* If the egress skb contains more fragments than we support | |
1933 | * we have no choice but to linearize it ourselves. | |
1934 | */ | |
1935 | if (unlikely(nonlinear) && __skb_linearize(skb)) | |
1936 | goto enomem; | |
1937 | ||
1938 | /* Finally, create a contig FD from this skb */ | |
1939 | err = skb_to_contig_fd(priv, skb, &fd, &offset); | |
1940 | } | |
1941 | if (unlikely(err < 0)) | |
1942 | goto skb_to_fd_failed; | |
1943 | ||
1944 | if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) | |
1945 | return NETDEV_TX_OK; | |
1946 | ||
1947 | dpaa_cleanup_tx_fd(priv, &fd); | |
1948 | skb_to_fd_failed: | |
1949 | enomem: | |
1950 | percpu_stats->tx_errors++; | |
1951 | dev_kfree_skb(skb); | |
1952 | return NETDEV_TX_OK; | |
1953 | } | |
1954 | ||
1955 | static void dpaa_rx_error(struct net_device *net_dev, | |
1956 | const struct dpaa_priv *priv, | |
1957 | struct dpaa_percpu_priv *percpu_priv, | |
1958 | const struct qm_fd *fd, | |
1959 | u32 fqid) | |
1960 | { | |
1961 | if (net_ratelimit()) | |
1962 | netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", | |
7d6f8dc0 | 1963 | be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS); |
9ad1a374 MB |
1964 | |
1965 | percpu_priv->stats.rx_errors++; | |
1966 | ||
7d6f8dc0 | 1967 | if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA) |
b0ce0d02 | 1968 | percpu_priv->rx_errors.dme++; |
7d6f8dc0 | 1969 | if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL) |
b0ce0d02 | 1970 | percpu_priv->rx_errors.fpe++; |
7d6f8dc0 | 1971 | if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE) |
b0ce0d02 | 1972 | percpu_priv->rx_errors.fse++; |
7d6f8dc0 | 1973 | if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR) |
b0ce0d02 MB |
1974 | percpu_priv->rx_errors.phe++; |
1975 | ||
9ad1a374 MB |
1976 | dpaa_fd_release(net_dev, fd); |
1977 | } | |
1978 | ||
1979 | static void dpaa_tx_error(struct net_device *net_dev, | |
1980 | const struct dpaa_priv *priv, | |
1981 | struct dpaa_percpu_priv *percpu_priv, | |
1982 | const struct qm_fd *fd, | |
1983 | u32 fqid) | |
1984 | { | |
1985 | struct sk_buff *skb; | |
1986 | ||
1987 | if (net_ratelimit()) | |
1988 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | |
7d6f8dc0 | 1989 | be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS); |
9ad1a374 MB |
1990 | |
1991 | percpu_priv->stats.tx_errors++; | |
1992 | ||
1993 | skb = dpaa_cleanup_tx_fd(priv, fd); | |
1994 | dev_kfree_skb(skb); | |
1995 | } | |
1996 | ||
1997 | static int dpaa_eth_poll(struct napi_struct *napi, int budget) | |
1998 | { | |
1999 | struct dpaa_napi_portal *np = | |
2000 | container_of(napi, struct dpaa_napi_portal, napi); | |
2001 | ||
2002 | int cleaned = qman_p_poll_dqrr(np->p, budget); | |
2003 | ||
2004 | if (cleaned < budget) { | |
2005 | napi_complete(napi); | |
2006 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); | |
2007 | ||
2008 | } else if (np->down) { | |
2009 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); | |
2010 | } | |
2011 | ||
2012 | return cleaned; | |
2013 | } | |
2014 | ||
2015 | static void dpaa_tx_conf(struct net_device *net_dev, | |
2016 | const struct dpaa_priv *priv, | |
2017 | struct dpaa_percpu_priv *percpu_priv, | |
2018 | const struct qm_fd *fd, | |
2019 | u32 fqid) | |
2020 | { | |
2021 | struct sk_buff *skb; | |
2022 | ||
7d6f8dc0 | 2023 | if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) { |
9ad1a374 MB |
2024 | if (net_ratelimit()) |
2025 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | |
7d6f8dc0 CM |
2026 | be32_to_cpu(fd->status) & |
2027 | FM_FD_STAT_TX_ERRORS); | |
9ad1a374 MB |
2028 | |
2029 | percpu_priv->stats.tx_errors++; | |
2030 | } | |
2031 | ||
b0ce0d02 MB |
2032 | percpu_priv->tx_confirm++; |
2033 | ||
9ad1a374 MB |
2034 | skb = dpaa_cleanup_tx_fd(priv, fd); |
2035 | ||
2036 | consume_skb(skb); | |
2037 | } | |
2038 | ||
2039 | static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv, | |
2040 | struct qman_portal *portal) | |
2041 | { | |
2042 | if (unlikely(in_irq() || !in_serving_softirq())) { | |
2043 | /* Disable QMan IRQ and invoke NAPI */ | |
2044 | qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); | |
2045 | ||
2046 | percpu_priv->np.p = portal; | |
2047 | napi_schedule(&percpu_priv->np.napi); | |
b0ce0d02 | 2048 | percpu_priv->in_interrupt++; |
9ad1a374 MB |
2049 | return 1; |
2050 | } | |
2051 | return 0; | |
2052 | } | |
2053 | ||
2054 | static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, | |
2055 | struct qman_fq *fq, | |
2056 | const struct qm_dqrr_entry *dq) | |
2057 | { | |
2058 | struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); | |
2059 | struct dpaa_percpu_priv *percpu_priv; | |
2060 | struct net_device *net_dev; | |
2061 | struct dpaa_bp *dpaa_bp; | |
2062 | struct dpaa_priv *priv; | |
2063 | ||
2064 | net_dev = dpaa_fq->net_dev; | |
2065 | priv = netdev_priv(net_dev); | |
2066 | dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); | |
2067 | if (!dpaa_bp) | |
2068 | return qman_cb_dqrr_consume; | |
2069 | ||
2070 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
2071 | ||
2072 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | |
2073 | return qman_cb_dqrr_stop; | |
2074 | ||
2075 | if (dpaa_eth_refill_bpools(priv)) | |
2076 | /* Unable to refill the buffer pool due to insufficient | |
2077 | * system memory. Just release the frame back into the pool, | |
2078 | * otherwise we'll soon end up with an empty buffer pool. | |
2079 | */ | |
2080 | dpaa_fd_release(net_dev, &dq->fd); | |
2081 | else | |
2082 | dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | |
2083 | ||
2084 | return qman_cb_dqrr_consume; | |
2085 | } | |
2086 | ||
2087 | static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, | |
2088 | struct qman_fq *fq, | |
2089 | const struct qm_dqrr_entry *dq) | |
2090 | { | |
2091 | struct rtnl_link_stats64 *percpu_stats; | |
2092 | struct dpaa_percpu_priv *percpu_priv; | |
2093 | const struct qm_fd *fd = &dq->fd; | |
2094 | dma_addr_t addr = qm_fd_addr(fd); | |
2095 | enum qm_fd_format fd_format; | |
2096 | struct net_device *net_dev; | |
2097 | u32 fd_status = fd->status; | |
2098 | struct dpaa_bp *dpaa_bp; | |
2099 | struct dpaa_priv *priv; | |
2100 | unsigned int skb_len; | |
2101 | struct sk_buff *skb; | |
2102 | int *count_ptr; | |
2103 | ||
7d6f8dc0 CM |
2104 | fd_status = be32_to_cpu(fd->status); |
2105 | fd_format = qm_fd_get_format(fd); | |
9ad1a374 MB |
2106 | net_dev = ((struct dpaa_fq *)fq)->net_dev; |
2107 | priv = netdev_priv(net_dev); | |
2108 | dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); | |
2109 | if (!dpaa_bp) | |
2110 | return qman_cb_dqrr_consume; | |
2111 | ||
eb11ddf3 MB |
2112 | /* Trace the Rx fd */ |
2113 | trace_dpaa_rx_fd(net_dev, fq, &dq->fd); | |
2114 | ||
9ad1a374 MB |
2115 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
2116 | percpu_stats = &percpu_priv->stats; | |
2117 | ||
2118 | if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) | |
2119 | return qman_cb_dqrr_stop; | |
2120 | ||
2121 | /* Make sure we didn't run out of buffers */ | |
2122 | if (unlikely(dpaa_eth_refill_bpools(priv))) { | |
2123 | /* Unable to refill the buffer pool due to insufficient | |
2124 | * system memory. Just release the frame back into the pool, | |
2125 | * otherwise we'll soon end up with an empty buffer pool. | |
2126 | */ | |
2127 | dpaa_fd_release(net_dev, &dq->fd); | |
2128 | return qman_cb_dqrr_consume; | |
2129 | } | |
2130 | ||
2131 | if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { | |
2132 | if (net_ratelimit()) | |
2133 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | |
2134 | fd_status & FM_FD_STAT_RX_ERRORS); | |
2135 | ||
2136 | percpu_stats->rx_errors++; | |
2137 | dpaa_fd_release(net_dev, fd); | |
2138 | return qman_cb_dqrr_consume; | |
2139 | } | |
2140 | ||
2141 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | |
2142 | if (!dpaa_bp) | |
2143 | return qman_cb_dqrr_consume; | |
2144 | ||
2145 | dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE); | |
2146 | ||
2147 | /* prefetch the first 64 bytes of the frame or the SGT start */ | |
2148 | prefetch(phys_to_virt(addr) + qm_fd_get_offset(fd)); | |
2149 | ||
2150 | fd_format = qm_fd_get_format(fd); | |
2151 | /* The only FD types that we may receive are contig and S/G */ | |
2152 | WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); | |
2153 | ||
2154 | /* Account for either the contig buffer or the SGT buffer (depending on | |
2155 | * which case we were in) having been removed from the pool. | |
2156 | */ | |
2157 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
2158 | (*count_ptr)--; | |
2159 | ||
2160 | if (likely(fd_format == qm_fd_contig)) | |
2161 | skb = contig_fd_to_skb(priv, fd); | |
2162 | else | |
2163 | skb = sg_fd_to_skb(priv, fd); | |
2164 | if (!skb) | |
2165 | return qman_cb_dqrr_consume; | |
2166 | ||
2167 | skb->protocol = eth_type_trans(skb, net_dev); | |
2168 | ||
2169 | skb_len = skb->len; | |
2170 | ||
2171 | if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) | |
2172 | return qman_cb_dqrr_consume; | |
2173 | ||
2174 | percpu_stats->rx_packets++; | |
2175 | percpu_stats->rx_bytes += skb_len; | |
2176 | ||
2177 | return qman_cb_dqrr_consume; | |
2178 | } | |
2179 | ||
2180 | static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, | |
2181 | struct qman_fq *fq, | |
2182 | const struct qm_dqrr_entry *dq) | |
2183 | { | |
2184 | struct dpaa_percpu_priv *percpu_priv; | |
2185 | struct net_device *net_dev; | |
2186 | struct dpaa_priv *priv; | |
2187 | ||
2188 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | |
2189 | priv = netdev_priv(net_dev); | |
2190 | ||
2191 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
2192 | ||
2193 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | |
2194 | return qman_cb_dqrr_stop; | |
2195 | ||
2196 | dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | |
2197 | ||
2198 | return qman_cb_dqrr_consume; | |
2199 | } | |
2200 | ||
2201 | static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, | |
2202 | struct qman_fq *fq, | |
2203 | const struct qm_dqrr_entry *dq) | |
2204 | { | |
2205 | struct dpaa_percpu_priv *percpu_priv; | |
2206 | struct net_device *net_dev; | |
2207 | struct dpaa_priv *priv; | |
2208 | ||
2209 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | |
2210 | priv = netdev_priv(net_dev); | |
2211 | ||
eb11ddf3 MB |
2212 | /* Trace the fd */ |
2213 | trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); | |
2214 | ||
9ad1a374 MB |
2215 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
2216 | ||
2217 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | |
2218 | return qman_cb_dqrr_stop; | |
2219 | ||
2220 | dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | |
2221 | ||
2222 | return qman_cb_dqrr_consume; | |
2223 | } | |
2224 | ||
2225 | static void egress_ern(struct qman_portal *portal, | |
2226 | struct qman_fq *fq, | |
2227 | const union qm_mr_entry *msg) | |
2228 | { | |
2229 | const struct qm_fd *fd = &msg->ern.fd; | |
2230 | struct dpaa_percpu_priv *percpu_priv; | |
2231 | const struct dpaa_priv *priv; | |
2232 | struct net_device *net_dev; | |
2233 | struct sk_buff *skb; | |
2234 | ||
2235 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | |
2236 | priv = netdev_priv(net_dev); | |
2237 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
2238 | ||
2239 | percpu_priv->stats.tx_dropped++; | |
2240 | percpu_priv->stats.tx_fifo_errors++; | |
b0ce0d02 | 2241 | count_ern(percpu_priv, msg); |
9ad1a374 MB |
2242 | |
2243 | skb = dpaa_cleanup_tx_fd(priv, fd); | |
2244 | dev_kfree_skb_any(skb); | |
2245 | } | |
2246 | ||
2247 | static const struct dpaa_fq_cbs dpaa_fq_cbs = { | |
2248 | .rx_defq = { .cb = { .dqrr = rx_default_dqrr } }, | |
2249 | .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } }, | |
2250 | .rx_errq = { .cb = { .dqrr = rx_error_dqrr } }, | |
2251 | .tx_errq = { .cb = { .dqrr = conf_error_dqrr } }, | |
2252 | .egress_ern = { .cb = { .ern = egress_ern } } | |
2253 | }; | |
2254 | ||
2255 | static void dpaa_eth_napi_enable(struct dpaa_priv *priv) | |
2256 | { | |
2257 | struct dpaa_percpu_priv *percpu_priv; | |
2258 | int i; | |
2259 | ||
2260 | for_each_possible_cpu(i) { | |
2261 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
2262 | ||
2263 | percpu_priv->np.down = 0; | |
2264 | napi_enable(&percpu_priv->np.napi); | |
2265 | } | |
2266 | } | |
2267 | ||
2268 | static void dpaa_eth_napi_disable(struct dpaa_priv *priv) | |
2269 | { | |
2270 | struct dpaa_percpu_priv *percpu_priv; | |
2271 | int i; | |
2272 | ||
2273 | for_each_possible_cpu(i) { | |
2274 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
2275 | ||
2276 | percpu_priv->np.down = 1; | |
2277 | napi_disable(&percpu_priv->np.napi); | |
2278 | } | |
2279 | } | |
2280 | ||
2281 | static int dpaa_open(struct net_device *net_dev) | |
2282 | { | |
2283 | struct mac_device *mac_dev; | |
2284 | struct dpaa_priv *priv; | |
2285 | int err, i; | |
2286 | ||
2287 | priv = netdev_priv(net_dev); | |
2288 | mac_dev = priv->mac_dev; | |
2289 | dpaa_eth_napi_enable(priv); | |
2290 | ||
2291 | net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev); | |
2292 | if (!net_dev->phydev) { | |
2293 | netif_err(priv, ifup, net_dev, "init_phy() failed\n"); | |
2294 | return -ENODEV; | |
2295 | } | |
2296 | ||
2297 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { | |
2298 | err = fman_port_enable(mac_dev->port[i]); | |
2299 | if (err) | |
2300 | goto mac_start_failed; | |
2301 | } | |
2302 | ||
2303 | err = priv->mac_dev->start(mac_dev); | |
2304 | if (err < 0) { | |
2305 | netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err); | |
2306 | goto mac_start_failed; | |
2307 | } | |
2308 | ||
2309 | netif_tx_start_all_queues(net_dev); | |
2310 | ||
2311 | return 0; | |
2312 | ||
2313 | mac_start_failed: | |
2314 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) | |
2315 | fman_port_disable(mac_dev->port[i]); | |
2316 | ||
2317 | dpaa_eth_napi_disable(priv); | |
2318 | ||
2319 | return err; | |
2320 | } | |
2321 | ||
2322 | static int dpaa_eth_stop(struct net_device *net_dev) | |
2323 | { | |
2324 | struct dpaa_priv *priv; | |
2325 | int err; | |
2326 | ||
2327 | err = dpaa_stop(net_dev); | |
2328 | ||
2329 | priv = netdev_priv(net_dev); | |
2330 | dpaa_eth_napi_disable(priv); | |
2331 | ||
2332 | return err; | |
2333 | } | |
2334 | ||
2335 | static const struct net_device_ops dpaa_ops = { | |
2336 | .ndo_open = dpaa_open, | |
2337 | .ndo_start_xmit = dpaa_start_xmit, | |
2338 | .ndo_stop = dpaa_eth_stop, | |
2339 | .ndo_tx_timeout = dpaa_tx_timeout, | |
2340 | .ndo_get_stats64 = dpaa_get_stats64, | |
2341 | .ndo_set_mac_address = dpaa_set_mac_address, | |
2342 | .ndo_validate_addr = eth_validate_addr, | |
2343 | .ndo_set_rx_mode = dpaa_set_rx_mode, | |
2344 | }; | |
2345 | ||
2346 | static int dpaa_napi_add(struct net_device *net_dev) | |
2347 | { | |
2348 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
2349 | struct dpaa_percpu_priv *percpu_priv; | |
2350 | int cpu; | |
2351 | ||
2352 | for_each_possible_cpu(cpu) { | |
2353 | percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); | |
2354 | ||
2355 | netif_napi_add(net_dev, &percpu_priv->np.napi, | |
2356 | dpaa_eth_poll, NAPI_POLL_WEIGHT); | |
2357 | } | |
2358 | ||
2359 | return 0; | |
2360 | } | |
2361 | ||
2362 | static void dpaa_napi_del(struct net_device *net_dev) | |
2363 | { | |
2364 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
2365 | struct dpaa_percpu_priv *percpu_priv; | |
2366 | int cpu; | |
2367 | ||
2368 | for_each_possible_cpu(cpu) { | |
2369 | percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); | |
2370 | ||
2371 | netif_napi_del(&percpu_priv->np.napi); | |
2372 | } | |
2373 | } | |
2374 | ||
2375 | static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, | |
2376 | struct bm_buffer *bmb) | |
2377 | { | |
2378 | dma_addr_t addr = bm_buf_addr(bmb); | |
2379 | ||
2380 | dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE); | |
2381 | ||
2382 | skb_free_frag(phys_to_virt(addr)); | |
2383 | } | |
2384 | ||
2385 | /* Alloc the dpaa_bp struct and configure default values */ | |
2386 | static struct dpaa_bp *dpaa_bp_alloc(struct device *dev) | |
2387 | { | |
2388 | struct dpaa_bp *dpaa_bp; | |
2389 | ||
2390 | dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL); | |
2391 | if (!dpaa_bp) | |
2392 | return ERR_PTR(-ENOMEM); | |
2393 | ||
2394 | dpaa_bp->bpid = FSL_DPAA_BPID_INV; | |
2395 | dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); | |
2396 | dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; | |
2397 | ||
2398 | dpaa_bp->seed_cb = dpaa_bp_seed; | |
2399 | dpaa_bp->free_buf_cb = dpaa_bp_free_pf; | |
2400 | ||
2401 | return dpaa_bp; | |
2402 | } | |
2403 | ||
2404 | /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR. | |
2405 | * We won't be sending congestion notifications to FMan; for now, we just use | |
2406 | * this CGR to generate enqueue rejections to FMan in order to drop the frames | |
2407 | * before they reach our ingress queues and eat up memory. | |
2408 | */ | |
2409 | static int dpaa_ingress_cgr_init(struct dpaa_priv *priv) | |
2410 | { | |
2411 | struct qm_mcc_initcgr initcgr; | |
2412 | u32 cs_th; | |
2413 | int err; | |
2414 | ||
2415 | err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); | |
2416 | if (err < 0) { | |
2417 | if (netif_msg_drv(priv)) | |
2418 | pr_err("Error %d allocating CGR ID\n", err); | |
2419 | goto out_error; | |
2420 | } | |
2421 | ||
2422 | /* Enable CS TD, but disable Congestion State Change Notifications. */ | |
7d6f8dc0 | 2423 | initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES); |
9ad1a374 MB |
2424 | initcgr.cgr.cscn_en = QM_CGR_EN; |
2425 | cs_th = DPAA_INGRESS_CS_THRESHOLD; | |
2426 | qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); | |
2427 | ||
7d6f8dc0 | 2428 | initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); |
9ad1a374 MB |
2429 | initcgr.cgr.cstd_en = QM_CGR_EN; |
2430 | ||
2431 | /* This CGR will be associated with the SWP affined to the current CPU. | |
2432 | * However, we'll place all our ingress FQs in it. | |
2433 | */ | |
2434 | err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, | |
2435 | &initcgr); | |
2436 | if (err < 0) { | |
2437 | if (netif_msg_drv(priv)) | |
2438 | pr_err("Error %d creating ingress CGR with ID %d\n", | |
2439 | err, priv->ingress_cgr.cgrid); | |
2440 | qman_release_cgrid(priv->ingress_cgr.cgrid); | |
2441 | goto out_error; | |
2442 | } | |
2443 | if (netif_msg_drv(priv)) | |
2444 | pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", | |
2445 | priv->ingress_cgr.cgrid, priv->mac_dev->addr); | |
2446 | ||
2447 | priv->use_ingress_cgr = true; | |
2448 | ||
2449 | out_error: | |
2450 | return err; | |
2451 | } | |
2452 | ||
2453 | static const struct of_device_id dpaa_match[]; | |
2454 | ||
2455 | static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) | |
2456 | { | |
2457 | u16 headroom; | |
2458 | ||
2459 | /* The frame headroom must accommodate: | |
2460 | * - the driver private data area | |
2461 | * - parse results, hash results, timestamp if selected | |
2462 | * If either hash results or time stamp are selected, both will | |
2463 | * be copied to/from the frame headroom, as TS is located between PR and | |
2464 | * HR in the IC and IC copy size has a granularity of 16bytes | |
2465 | * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) | |
2466 | * | |
2467 | * Also make sure the headroom is a multiple of data_align bytes | |
2468 | */ | |
2469 | headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + | |
2470 | DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); | |
2471 | ||
2472 | return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, | |
2473 | DPAA_FD_DATA_ALIGNMENT) : | |
2474 | headroom; | |
2475 | } | |
2476 | ||
2477 | static int dpaa_eth_probe(struct platform_device *pdev) | |
2478 | { | |
2479 | struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL}; | |
2480 | struct dpaa_percpu_priv *percpu_priv; | |
2481 | struct net_device *net_dev = NULL; | |
2482 | struct dpaa_fq *dpaa_fq, *tmp; | |
2483 | struct dpaa_priv *priv = NULL; | |
2484 | struct fm_port_fqs port_fqs; | |
2485 | struct mac_device *mac_dev; | |
2486 | int err = 0, i, channel; | |
2487 | struct device *dev; | |
2488 | ||
2489 | dev = &pdev->dev; | |
2490 | ||
2491 | /* Allocate this early, so we can store relevant information in | |
2492 | * the private area | |
2493 | */ | |
2494 | net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); | |
2495 | if (!net_dev) { | |
2496 | dev_err(dev, "alloc_etherdev_mq() failed\n"); | |
2497 | goto alloc_etherdev_mq_failed; | |
2498 | } | |
2499 | ||
2500 | /* Do this here, so we can be verbose early */ | |
2501 | SET_NETDEV_DEV(net_dev, dev); | |
2502 | dev_set_drvdata(dev, net_dev); | |
2503 | ||
2504 | priv = netdev_priv(net_dev); | |
2505 | priv->net_dev = net_dev; | |
2506 | ||
2507 | priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); | |
2508 | ||
2509 | mac_dev = dpaa_mac_dev_get(pdev); | |
2510 | if (IS_ERR(mac_dev)) { | |
2511 | dev_err(dev, "dpaa_mac_dev_get() failed\n"); | |
2512 | err = PTR_ERR(mac_dev); | |
2513 | goto mac_probe_failed; | |
2514 | } | |
2515 | ||
2516 | /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, | |
2517 | * we choose conservatively and let the user explicitly set a higher | |
2518 | * MTU via ifconfig. Otherwise, the user may end up with different MTUs | |
2519 | * in the same LAN. | |
2520 | * If on the other hand fsl_fm_max_frm has been chosen below 1500, | |
2521 | * start with the maximum allowed. | |
2522 | */ | |
2523 | net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); | |
2524 | ||
2525 | netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", | |
2526 | net_dev->mtu); | |
2527 | ||
2528 | priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ | |
2529 | priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ | |
2530 | ||
2531 | /* device used for DMA mapping */ | |
2532 | arch_setup_dma_ops(dev, 0, 0, NULL, false); | |
2533 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); | |
2534 | if (err) { | |
2535 | dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); | |
2536 | goto dev_mask_failed; | |
2537 | } | |
2538 | ||
2539 | /* bp init */ | |
2540 | for (i = 0; i < DPAA_BPS_NUM; i++) { | |
2541 | int err; | |
2542 | ||
2543 | dpaa_bps[i] = dpaa_bp_alloc(dev); | |
2544 | if (IS_ERR(dpaa_bps[i])) | |
2545 | return PTR_ERR(dpaa_bps[i]); | |
2546 | /* the raw size of the buffers used for reception */ | |
2547 | dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); | |
2548 | /* avoid runtime computations by keeping the usable size here */ | |
2549 | dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size); | |
2550 | dpaa_bps[i]->dev = dev; | |
2551 | ||
2552 | err = dpaa_bp_alloc_pool(dpaa_bps[i]); | |
2553 | if (err < 0) { | |
2554 | dpaa_bps_free(priv); | |
2555 | priv->dpaa_bps[i] = NULL; | |
2556 | goto bp_create_failed; | |
2557 | } | |
2558 | priv->dpaa_bps[i] = dpaa_bps[i]; | |
2559 | } | |
2560 | ||
2561 | INIT_LIST_HEAD(&priv->dpaa_fq_list); | |
2562 | ||
2563 | memset(&port_fqs, 0, sizeof(port_fqs)); | |
2564 | ||
2565 | err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); | |
2566 | if (err < 0) { | |
2567 | dev_err(dev, "dpaa_alloc_all_fqs() failed\n"); | |
2568 | goto fq_probe_failed; | |
2569 | } | |
2570 | ||
2571 | priv->mac_dev = mac_dev; | |
2572 | ||
2573 | channel = dpaa_get_channel(); | |
2574 | if (channel < 0) { | |
2575 | dev_err(dev, "dpaa_get_channel() failed\n"); | |
2576 | err = channel; | |
2577 | goto get_channel_failed; | |
2578 | } | |
2579 | ||
2580 | priv->channel = (u16)channel; | |
2581 | ||
2582 | /* Start a thread that will walk the CPUs with affine portals | |
2583 | * and add this pool channel to each's dequeue mask. | |
2584 | */ | |
2585 | dpaa_eth_add_channel(priv->channel); | |
2586 | ||
2587 | dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); | |
2588 | ||
2589 | /* Create a congestion group for this netdev, with | |
2590 | * dynamically-allocated CGR ID. | |
2591 | * Must be executed after probing the MAC, but before | |
2592 | * assigning the egress FQs to the CGRs. | |
2593 | */ | |
2594 | err = dpaa_eth_cgr_init(priv); | |
2595 | if (err < 0) { | |
2596 | dev_err(dev, "Error initializing CGR\n"); | |
2597 | goto tx_cgr_init_failed; | |
2598 | } | |
2599 | ||
2600 | err = dpaa_ingress_cgr_init(priv); | |
2601 | if (err < 0) { | |
2602 | dev_err(dev, "Error initializing ingress CGR\n"); | |
2603 | goto rx_cgr_init_failed; | |
2604 | } | |
2605 | ||
2606 | /* Add the FQs to the interface, and make them active */ | |
2607 | list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { | |
2608 | err = dpaa_fq_init(dpaa_fq, false); | |
2609 | if (err < 0) | |
2610 | goto fq_alloc_failed; | |
2611 | } | |
2612 | ||
2613 | priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]); | |
2614 | priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); | |
2615 | ||
2616 | /* All real interfaces need their ports initialized */ | |
2617 | dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, | |
2618 | &priv->buf_layout[0], dev); | |
2619 | ||
2620 | priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); | |
2621 | if (!priv->percpu_priv) { | |
2622 | dev_err(dev, "devm_alloc_percpu() failed\n"); | |
2623 | err = -ENOMEM; | |
2624 | goto alloc_percpu_failed; | |
2625 | } | |
2626 | for_each_possible_cpu(i) { | |
2627 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
2628 | memset(percpu_priv, 0, sizeof(*percpu_priv)); | |
2629 | } | |
2630 | ||
2631 | /* Initialize NAPI */ | |
2632 | err = dpaa_napi_add(net_dev); | |
2633 | if (err < 0) | |
2634 | goto napi_add_failed; | |
2635 | ||
2636 | err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout); | |
2637 | if (err < 0) | |
2638 | goto netdev_init_failed; | |
2639 | ||
846a86e2 MB |
2640 | dpaa_eth_sysfs_init(&net_dev->dev); |
2641 | ||
9ad1a374 MB |
2642 | netif_info(priv, probe, net_dev, "Probed interface %s\n", |
2643 | net_dev->name); | |
2644 | ||
2645 | return 0; | |
2646 | ||
2647 | netdev_init_failed: | |
2648 | napi_add_failed: | |
2649 | dpaa_napi_del(net_dev); | |
2650 | alloc_percpu_failed: | |
2651 | dpaa_fq_free(dev, &priv->dpaa_fq_list); | |
2652 | fq_alloc_failed: | |
2653 | qman_delete_cgr_safe(&priv->ingress_cgr); | |
2654 | qman_release_cgrid(priv->ingress_cgr.cgrid); | |
2655 | rx_cgr_init_failed: | |
2656 | qman_delete_cgr_safe(&priv->cgr_data.cgr); | |
2657 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | |
2658 | tx_cgr_init_failed: | |
2659 | get_channel_failed: | |
2660 | dpaa_bps_free(priv); | |
2661 | bp_create_failed: | |
2662 | fq_probe_failed: | |
2663 | dev_mask_failed: | |
2664 | mac_probe_failed: | |
2665 | dev_set_drvdata(dev, NULL); | |
2666 | free_netdev(net_dev); | |
2667 | alloc_etherdev_mq_failed: | |
2668 | for (i = 0; i < DPAA_BPS_NUM && dpaa_bps[i]; i++) { | |
2669 | if (atomic_read(&dpaa_bps[i]->refs) == 0) | |
2670 | devm_kfree(dev, dpaa_bps[i]); | |
2671 | } | |
2672 | return err; | |
2673 | } | |
2674 | ||
2675 | static int dpaa_remove(struct platform_device *pdev) | |
2676 | { | |
2677 | struct net_device *net_dev; | |
2678 | struct dpaa_priv *priv; | |
2679 | struct device *dev; | |
2680 | int err; | |
2681 | ||
2682 | dev = &pdev->dev; | |
2683 | net_dev = dev_get_drvdata(dev); | |
2684 | ||
2685 | priv = netdev_priv(net_dev); | |
2686 | ||
846a86e2 MB |
2687 | dpaa_eth_sysfs_remove(dev); |
2688 | ||
9ad1a374 MB |
2689 | dev_set_drvdata(dev, NULL); |
2690 | unregister_netdev(net_dev); | |
2691 | ||
2692 | err = dpaa_fq_free(dev, &priv->dpaa_fq_list); | |
2693 | ||
2694 | qman_delete_cgr_safe(&priv->ingress_cgr); | |
2695 | qman_release_cgrid(priv->ingress_cgr.cgrid); | |
2696 | qman_delete_cgr_safe(&priv->cgr_data.cgr); | |
2697 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | |
2698 | ||
2699 | dpaa_napi_del(net_dev); | |
2700 | ||
2701 | dpaa_bps_free(priv); | |
2702 | ||
2703 | free_netdev(net_dev); | |
2704 | ||
2705 | return err; | |
2706 | } | |
2707 | ||
2708 | static struct platform_device_id dpaa_devtype[] = { | |
2709 | { | |
2710 | .name = "dpaa-ethernet", | |
2711 | .driver_data = 0, | |
2712 | }, { | |
2713 | } | |
2714 | }; | |
2715 | MODULE_DEVICE_TABLE(platform, dpaa_devtype); | |
2716 | ||
2717 | static struct platform_driver dpaa_driver = { | |
2718 | .driver = { | |
2719 | .name = KBUILD_MODNAME, | |
2720 | }, | |
2721 | .id_table = dpaa_devtype, | |
2722 | .probe = dpaa_eth_probe, | |
2723 | .remove = dpaa_remove | |
2724 | }; | |
2725 | ||
2726 | static int __init dpaa_load(void) | |
2727 | { | |
2728 | int err; | |
2729 | ||
2730 | pr_debug("FSL DPAA Ethernet driver\n"); | |
2731 | ||
2732 | /* initialize dpaa_eth mirror values */ | |
2733 | dpaa_rx_extra_headroom = fman_get_rx_extra_headroom(); | |
2734 | dpaa_max_frm = fman_get_max_frm(); | |
2735 | ||
2736 | err = platform_driver_register(&dpaa_driver); | |
2737 | if (err < 0) | |
2738 | pr_err("Error, platform_driver_register() = %d\n", err); | |
2739 | ||
2740 | return err; | |
2741 | } | |
2742 | module_init(dpaa_load); | |
2743 | ||
2744 | static void __exit dpaa_unload(void) | |
2745 | { | |
2746 | platform_driver_unregister(&dpaa_driver); | |
2747 | ||
2748 | /* Only one channel is used and needs to be released after all | |
2749 | * interfaces are removed | |
2750 | */ | |
2751 | dpaa_release_channel(); | |
2752 | } | |
2753 | module_exit(dpaa_unload); | |
2754 | ||
2755 | MODULE_LICENSE("Dual BSD/GPL"); | |
2756 | MODULE_DESCRIPTION("FSL DPAA Ethernet driver"); |