Commit | Line | Data |
---|---|---|
9ad1a374 MB |
1 | /* Copyright 2008 - 2016 Freescale Semiconductor Inc. |
2 | * | |
3 | * Redistribution and use in source and binary forms, with or without | |
4 | * modification, are permitted provided that the following conditions are met: | |
5 | * * Redistributions of source code must retain the above copyright | |
6 | * notice, this list of conditions and the following disclaimer. | |
7 | * * Redistributions in binary form must reproduce the above copyright | |
8 | * notice, this list of conditions and the following disclaimer in the | |
9 | * documentation and/or other materials provided with the distribution. | |
10 | * * Neither the name of Freescale Semiconductor nor the | |
11 | * names of its contributors may be used to endorse or promote products | |
12 | * derived from this software without specific prior written permission. | |
13 | * | |
14 | * ALTERNATIVELY, this software may be distributed under the terms of the | |
15 | * GNU General Public License ("GPL") as published by the Free Software | |
16 | * Foundation, either version 2 of that License or (at your option) any | |
17 | * later version. | |
18 | * | |
19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY | |
20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY | |
23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | |
28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 | */ | |
30 | ||
31 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
32 | ||
33 | #include <linux/init.h> | |
34 | #include <linux/module.h> | |
35 | #include <linux/of_platform.h> | |
36 | #include <linux/of_mdio.h> | |
37 | #include <linux/of_net.h> | |
38 | #include <linux/io.h> | |
39 | #include <linux/if_arp.h> | |
40 | #include <linux/if_vlan.h> | |
41 | #include <linux/icmp.h> | |
42 | #include <linux/ip.h> | |
43 | #include <linux/ipv6.h> | |
44 | #include <linux/udp.h> | |
45 | #include <linux/tcp.h> | |
46 | #include <linux/net.h> | |
47 | #include <linux/skbuff.h> | |
48 | #include <linux/etherdevice.h> | |
49 | #include <linux/if_ether.h> | |
50 | #include <linux/highmem.h> | |
51 | #include <linux/percpu.h> | |
52 | #include <linux/dma-mapping.h> | |
53 | #include <linux/sort.h> | |
54 | #include <soc/fsl/bman.h> | |
55 | #include <soc/fsl/qman.h> | |
56 | ||
57 | #include "fman.h" | |
58 | #include "fman_port.h" | |
59 | #include "mac.h" | |
60 | #include "dpaa_eth.h" | |
61 | ||
eb11ddf3 MB |
62 | /* CREATE_TRACE_POINTS only needs to be defined once. Other dpaa files |
63 | * using trace events only need to #include <trace/events/sched.h> | |
64 | */ | |
65 | #define CREATE_TRACE_POINTS | |
66 | #include "dpaa_eth_trace.h" | |
67 | ||
9ad1a374 MB |
68 | static int debug = -1; |
69 | module_param(debug, int, 0444); | |
70 | MODULE_PARM_DESC(debug, "Module/Driver verbosity level (0=none,...,16=all)"); | |
71 | ||
72 | static u16 tx_timeout = 1000; | |
73 | module_param(tx_timeout, ushort, 0444); | |
74 | MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); | |
75 | ||
76 | #define FM_FD_STAT_RX_ERRORS \ | |
77 | (FM_FD_ERR_DMA | FM_FD_ERR_PHYSICAL | \ | |
78 | FM_FD_ERR_SIZE | FM_FD_ERR_CLS_DISCARD | \ | |
79 | FM_FD_ERR_EXTRACTION | FM_FD_ERR_NO_SCHEME | \ | |
80 | FM_FD_ERR_PRS_TIMEOUT | FM_FD_ERR_PRS_ILL_INSTRUCT | \ | |
81 | FM_FD_ERR_PRS_HDR_ERR) | |
82 | ||
83 | #define FM_FD_STAT_TX_ERRORS \ | |
84 | (FM_FD_ERR_UNSUPPORTED_FORMAT | \ | |
85 | FM_FD_ERR_LENGTH | FM_FD_ERR_DMA) | |
86 | ||
87 | #define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | |
88 | NETIF_MSG_LINK | NETIF_MSG_IFUP | \ | |
89 | NETIF_MSG_IFDOWN) | |
90 | ||
91 | #define DPAA_INGRESS_CS_THRESHOLD 0x10000000 | |
92 | /* Ingress congestion threshold on FMan ports | |
93 | * The size in bytes of the ingress tail-drop threshold on FMan ports. | |
94 | * Traffic piling up above this value will be rejected by QMan and discarded | |
95 | * by FMan. | |
96 | */ | |
97 | ||
98 | /* Size in bytes of the FQ taildrop threshold */ | |
99 | #define DPAA_FQ_TD 0x200000 | |
100 | ||
101 | #define DPAA_CS_THRESHOLD_1G 0x06000000 | |
102 | /* Egress congestion threshold on 1G ports, range 0x1000 .. 0x10000000 | |
103 | * The size in bytes of the egress Congestion State notification threshold on | |
104 | * 1G ports. The 1G dTSECs can quite easily be flooded by cores doing Tx in a | |
105 | * tight loop (e.g. by sending UDP datagrams at "while(1) speed"), | |
106 | * and the larger the frame size, the more acute the problem. | |
107 | * So we have to find a balance between these factors: | |
108 | * - avoiding the device staying congested for a prolonged time (risking | |
109 | * the netdev watchdog to fire - see also the tx_timeout module param); | |
110 | * - affecting performance of protocols such as TCP, which otherwise | |
111 | * behave well under the congestion notification mechanism; | |
112 | * - preventing the Tx cores from tightly-looping (as if the congestion | |
113 | * threshold was too low to be effective); | |
114 | * - running out of memory if the CS threshold is set too high. | |
115 | */ | |
116 | ||
117 | #define DPAA_CS_THRESHOLD_10G 0x10000000 | |
118 | /* The size in bytes of the egress Congestion State notification threshold on | |
119 | * 10G ports, range 0x1000 .. 0x10000000 | |
120 | */ | |
121 | ||
122 | /* Largest value that the FQD's OAL field can hold */ | |
123 | #define FSL_QMAN_MAX_OAL 127 | |
124 | ||
125 | /* Default alignment for start of data in an Rx FD */ | |
126 | #define DPAA_FD_DATA_ALIGNMENT 16 | |
127 | ||
128 | /* Values for the L3R field of the FM Parse Results | |
129 | */ | |
130 | /* L3 Type field: First IP Present IPv4 */ | |
131 | #define FM_L3_PARSE_RESULT_IPV4 0x8000 | |
132 | /* L3 Type field: First IP Present IPv6 */ | |
133 | #define FM_L3_PARSE_RESULT_IPV6 0x4000 | |
134 | /* Values for the L4R field of the FM Parse Results */ | |
135 | /* L4 Type field: UDP */ | |
136 | #define FM_L4_PARSE_RESULT_UDP 0x40 | |
137 | /* L4 Type field: TCP */ | |
138 | #define FM_L4_PARSE_RESULT_TCP 0x20 | |
139 | ||
5accb282 MB |
140 | /* FD status field indicating whether the FM Parser has attempted to validate |
141 | * the L4 csum of the frame. | |
142 | * Note that having this bit set doesn't necessarily imply that the checksum | |
143 | * is valid. One would have to check the parse results to find that out. | |
144 | */ | |
145 | #define FM_FD_STAT_L4CV 0x00000004 | |
146 | ||
9ad1a374 MB |
147 | #define DPAA_SGT_MAX_ENTRIES 16 /* maximum number of entries in SG Table */ |
148 | #define DPAA_BUFF_RELEASE_MAX 8 /* maximum number of buffers released at once */ | |
149 | ||
150 | #define FSL_DPAA_BPID_INV 0xff | |
151 | #define FSL_DPAA_ETH_MAX_BUF_COUNT 128 | |
152 | #define FSL_DPAA_ETH_REFILL_THRESHOLD 80 | |
153 | ||
154 | #define DPAA_TX_PRIV_DATA_SIZE 16 | |
155 | #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) | |
156 | #define DPAA_TIME_STAMP_SIZE 8 | |
157 | #define DPAA_HASH_RESULTS_SIZE 8 | |
158 | #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ | |
159 | dpaa_rx_extra_headroom) | |
160 | ||
3150b7c2 | 161 | #define DPAA_ETH_PCD_RXQ_NUM 128 |
9ad1a374 MB |
162 | |
163 | #define DPAA_ENQUEUE_RETRIES 100000 | |
164 | ||
165 | enum port_type {RX, TX}; | |
166 | ||
167 | struct fm_port_fqs { | |
168 | struct dpaa_fq *tx_defq; | |
169 | struct dpaa_fq *tx_errq; | |
170 | struct dpaa_fq *rx_defq; | |
171 | struct dpaa_fq *rx_errq; | |
3150b7c2 | 172 | struct dpaa_fq *rx_pcdq; |
9ad1a374 MB |
173 | }; |
174 | ||
175 | /* All the dpa bps in use at any moment */ | |
176 | static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS]; | |
177 | ||
178 | /* The raw buffer size must be cacheline aligned */ | |
179 | #define DPAA_BP_RAW_SIZE 4096 | |
180 | /* When using more than one buffer pool, the raw sizes are as follows: | |
181 | * 1 bp: 4KB | |
182 | * 2 bp: 2KB, 4KB | |
183 | * 3 bp: 1KB, 2KB, 4KB | |
184 | * 4 bp: 1KB, 2KB, 4KB, 8KB | |
185 | */ | |
186 | static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt) | |
187 | { | |
188 | size_t res = DPAA_BP_RAW_SIZE / 4; | |
189 | u8 i; | |
190 | ||
191 | for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++) | |
192 | res *= 2; | |
193 | return res; | |
194 | } | |
195 | ||
196 | /* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is | |
197 | * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that, | |
198 | * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us | |
199 | * half-page-aligned buffers, so we reserve some more space for start-of-buffer | |
200 | * alignment. | |
201 | */ | |
202 | #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES) | |
203 | ||
204 | static int dpaa_max_frm; | |
205 | ||
206 | static int dpaa_rx_extra_headroom; | |
207 | ||
208 | #define dpaa_get_max_mtu() \ | |
209 | (dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN)) | |
210 | ||
211 | static int dpaa_netdev_init(struct net_device *net_dev, | |
212 | const struct net_device_ops *dpaa_ops, | |
213 | u16 tx_timeout) | |
214 | { | |
215 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
216 | struct device *dev = net_dev->dev.parent; | |
217 | struct dpaa_percpu_priv *percpu_priv; | |
218 | const u8 *mac_addr; | |
219 | int i, err; | |
220 | ||
221 | /* Although we access another CPU's private data here | |
222 | * we do it at initialization so it is safe | |
223 | */ | |
224 | for_each_possible_cpu(i) { | |
225 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
226 | percpu_priv->net_dev = net_dev; | |
227 | } | |
228 | ||
229 | net_dev->netdev_ops = dpaa_ops; | |
230 | mac_addr = priv->mac_dev->addr; | |
231 | ||
232 | net_dev->mem_start = priv->mac_dev->res->start; | |
233 | net_dev->mem_end = priv->mac_dev->res->end; | |
234 | ||
235 | net_dev->min_mtu = ETH_MIN_MTU; | |
236 | net_dev->max_mtu = dpaa_get_max_mtu(); | |
237 | ||
238 | net_dev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | |
056057e2 | 239 | NETIF_F_LLTX | NETIF_F_RXHASH); |
9ad1a374 MB |
240 | |
241 | net_dev->hw_features |= NETIF_F_SG | NETIF_F_HIGHDMA; | |
242 | /* The kernels enables GSO automatically, if we declare NETIF_F_SG. | |
243 | * For conformity, we'll still declare GSO explicitly. | |
244 | */ | |
245 | net_dev->features |= NETIF_F_GSO; | |
5accb282 | 246 | net_dev->features |= NETIF_F_RXCSUM; |
9ad1a374 MB |
247 | |
248 | net_dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | |
249 | /* we do not want shared skbs on TX */ | |
250 | net_dev->priv_flags &= ~IFF_TX_SKB_SHARING; | |
251 | ||
252 | net_dev->features |= net_dev->hw_features; | |
253 | net_dev->vlan_features = net_dev->features; | |
254 | ||
255 | memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len); | |
256 | memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); | |
257 | ||
b0cdb168 MB |
258 | net_dev->ethtool_ops = &dpaa_ethtool_ops; |
259 | ||
9ad1a374 MB |
260 | net_dev->needed_headroom = priv->tx_headroom; |
261 | net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout); | |
262 | ||
263 | /* start without the RUNNING flag, phylib controls it later */ | |
264 | netif_carrier_off(net_dev); | |
265 | ||
266 | err = register_netdev(net_dev); | |
267 | if (err < 0) { | |
268 | dev_err(dev, "register_netdev() = %d\n", err); | |
269 | return err; | |
270 | } | |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
275 | static int dpaa_stop(struct net_device *net_dev) | |
276 | { | |
277 | struct mac_device *mac_dev; | |
278 | struct dpaa_priv *priv; | |
279 | int i, err, error; | |
280 | ||
281 | priv = netdev_priv(net_dev); | |
282 | mac_dev = priv->mac_dev; | |
283 | ||
284 | netif_tx_stop_all_queues(net_dev); | |
285 | /* Allow the Fman (Tx) port to process in-flight frames before we | |
286 | * try switching it off. | |
287 | */ | |
288 | usleep_range(5000, 10000); | |
289 | ||
290 | err = mac_dev->stop(mac_dev); | |
291 | if (err < 0) | |
292 | netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n", | |
293 | err); | |
294 | ||
295 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { | |
296 | error = fman_port_disable(mac_dev->port[i]); | |
297 | if (error) | |
298 | err = error; | |
299 | } | |
300 | ||
301 | if (net_dev->phydev) | |
302 | phy_disconnect(net_dev->phydev); | |
303 | net_dev->phydev = NULL; | |
304 | ||
305 | return err; | |
306 | } | |
307 | ||
308 | static void dpaa_tx_timeout(struct net_device *net_dev) | |
309 | { | |
310 | struct dpaa_percpu_priv *percpu_priv; | |
311 | const struct dpaa_priv *priv; | |
312 | ||
313 | priv = netdev_priv(net_dev); | |
314 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
315 | ||
316 | netif_crit(priv, timer, net_dev, "Transmit timeout latency: %u ms\n", | |
317 | jiffies_to_msecs(jiffies - dev_trans_start(net_dev))); | |
318 | ||
319 | percpu_priv->stats.tx_errors++; | |
320 | } | |
321 | ||
322 | /* Calculates the statistics for the given device by adding the statistics | |
323 | * collected by each CPU. | |
324 | */ | |
bc1f4470 | 325 | static void dpaa_get_stats64(struct net_device *net_dev, |
326 | struct rtnl_link_stats64 *s) | |
9ad1a374 MB |
327 | { |
328 | int numstats = sizeof(struct rtnl_link_stats64) / sizeof(u64); | |
329 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
330 | struct dpaa_percpu_priv *percpu_priv; | |
331 | u64 *netstats = (u64 *)s; | |
332 | u64 *cpustats; | |
333 | int i, j; | |
334 | ||
335 | for_each_possible_cpu(i) { | |
336 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
337 | ||
338 | cpustats = (u64 *)&percpu_priv->stats; | |
339 | ||
340 | /* add stats from all CPUs */ | |
341 | for (j = 0; j < numstats; j++) | |
342 | netstats[j] += cpustats[j]; | |
343 | } | |
9ad1a374 MB |
344 | } |
345 | ||
2572ac53 | 346 | static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, |
de4784ca | 347 | void *type_data) |
2ea08f82 CG |
348 | { |
349 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
de4784ca | 350 | struct tc_mqprio_qopt *mqprio = type_data; |
56f36acd | 351 | u8 num_tc; |
2ea08f82 CG |
352 | int i; |
353 | ||
2572ac53 | 354 | if (type != TC_SETUP_MQPRIO) |
38cf0426 | 355 | return -EOPNOTSUPP; |
2ea08f82 | 356 | |
de4784ca JP |
357 | mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; |
358 | num_tc = mqprio->num_tc; | |
56f36acd AN |
359 | |
360 | if (num_tc == priv->num_tc) | |
2ea08f82 CG |
361 | return 0; |
362 | ||
56f36acd | 363 | if (!num_tc) { |
2ea08f82 CG |
364 | netdev_reset_tc(net_dev); |
365 | goto out; | |
366 | } | |
367 | ||
56f36acd | 368 | if (num_tc > DPAA_TC_NUM) { |
2ea08f82 CG |
369 | netdev_err(net_dev, "Too many traffic classes: max %d supported.\n", |
370 | DPAA_TC_NUM); | |
371 | return -EINVAL; | |
372 | } | |
373 | ||
56f36acd | 374 | netdev_set_num_tc(net_dev, num_tc); |
2ea08f82 | 375 | |
56f36acd | 376 | for (i = 0; i < num_tc; i++) |
2ea08f82 CG |
377 | netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, |
378 | i * DPAA_TC_TXQ_NUM); | |
379 | ||
380 | out: | |
56f36acd | 381 | priv->num_tc = num_tc ? : 1; |
2ea08f82 CG |
382 | netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); |
383 | return 0; | |
384 | } | |
385 | ||
9ad1a374 MB |
386 | static struct mac_device *dpaa_mac_dev_get(struct platform_device *pdev) |
387 | { | |
9ad1a374 | 388 | struct dpaa_eth_data *eth_data; |
c6e26ea8 | 389 | struct device *dpaa_dev; |
9ad1a374 MB |
390 | struct mac_device *mac_dev; |
391 | ||
392 | dpaa_dev = &pdev->dev; | |
393 | eth_data = dpaa_dev->platform_data; | |
c6e26ea8 MB |
394 | if (!eth_data) { |
395 | dev_err(dpaa_dev, "eth_data missing\n"); | |
9ad1a374 | 396 | return ERR_PTR(-ENODEV); |
9ad1a374 | 397 | } |
c6e26ea8 | 398 | mac_dev = eth_data->mac_dev; |
9ad1a374 | 399 | if (!mac_dev) { |
c6e26ea8 | 400 | dev_err(dpaa_dev, "mac_dev missing\n"); |
9ad1a374 MB |
401 | return ERR_PTR(-EINVAL); |
402 | } | |
403 | ||
404 | return mac_dev; | |
405 | } | |
406 | ||
407 | static int dpaa_set_mac_address(struct net_device *net_dev, void *addr) | |
408 | { | |
409 | const struct dpaa_priv *priv; | |
410 | struct mac_device *mac_dev; | |
411 | struct sockaddr old_addr; | |
412 | int err; | |
413 | ||
414 | priv = netdev_priv(net_dev); | |
415 | ||
416 | memcpy(old_addr.sa_data, net_dev->dev_addr, ETH_ALEN); | |
417 | ||
418 | err = eth_mac_addr(net_dev, addr); | |
419 | if (err < 0) { | |
420 | netif_err(priv, drv, net_dev, "eth_mac_addr() = %d\n", err); | |
421 | return err; | |
422 | } | |
423 | ||
424 | mac_dev = priv->mac_dev; | |
425 | ||
426 | err = mac_dev->change_addr(mac_dev->fman_mac, | |
427 | (enet_addr_t *)net_dev->dev_addr); | |
428 | if (err < 0) { | |
429 | netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n", | |
430 | err); | |
431 | /* reverting to previous address */ | |
432 | eth_mac_addr(net_dev, &old_addr); | |
433 | ||
434 | return err; | |
435 | } | |
436 | ||
437 | return 0; | |
438 | } | |
439 | ||
440 | static void dpaa_set_rx_mode(struct net_device *net_dev) | |
441 | { | |
442 | const struct dpaa_priv *priv; | |
443 | int err; | |
444 | ||
445 | priv = netdev_priv(net_dev); | |
446 | ||
447 | if (!!(net_dev->flags & IFF_PROMISC) != priv->mac_dev->promisc) { | |
448 | priv->mac_dev->promisc = !priv->mac_dev->promisc; | |
449 | err = priv->mac_dev->set_promisc(priv->mac_dev->fman_mac, | |
450 | priv->mac_dev->promisc); | |
451 | if (err < 0) | |
452 | netif_err(priv, drv, net_dev, | |
453 | "mac_dev->set_promisc() = %d\n", | |
454 | err); | |
455 | } | |
456 | ||
457 | err = priv->mac_dev->set_multi(net_dev, priv->mac_dev); | |
458 | if (err < 0) | |
459 | netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n", | |
460 | err); | |
461 | } | |
462 | ||
463 | static struct dpaa_bp *dpaa_bpid2pool(int bpid) | |
464 | { | |
465 | if (WARN_ON(bpid < 0 || bpid >= BM_MAX_NUM_OF_POOLS)) | |
466 | return NULL; | |
467 | ||
468 | return dpaa_bp_array[bpid]; | |
469 | } | |
470 | ||
471 | /* checks if this bpool is already allocated */ | |
472 | static bool dpaa_bpid2pool_use(int bpid) | |
473 | { | |
474 | if (dpaa_bpid2pool(bpid)) { | |
475 | atomic_inc(&dpaa_bp_array[bpid]->refs); | |
476 | return true; | |
477 | } | |
478 | ||
479 | return false; | |
480 | } | |
481 | ||
482 | /* called only once per bpid by dpaa_bp_alloc_pool() */ | |
483 | static void dpaa_bpid2pool_map(int bpid, struct dpaa_bp *dpaa_bp) | |
484 | { | |
485 | dpaa_bp_array[bpid] = dpaa_bp; | |
486 | atomic_set(&dpaa_bp->refs, 1); | |
487 | } | |
488 | ||
489 | static int dpaa_bp_alloc_pool(struct dpaa_bp *dpaa_bp) | |
490 | { | |
491 | int err; | |
492 | ||
493 | if (dpaa_bp->size == 0 || dpaa_bp->config_count == 0) { | |
494 | pr_err("%s: Buffer pool is not properly initialized! Missing size or initial number of buffers\n", | |
495 | __func__); | |
496 | return -EINVAL; | |
497 | } | |
498 | ||
499 | /* If the pool is already specified, we only create one per bpid */ | |
500 | if (dpaa_bp->bpid != FSL_DPAA_BPID_INV && | |
501 | dpaa_bpid2pool_use(dpaa_bp->bpid)) | |
502 | return 0; | |
503 | ||
504 | if (dpaa_bp->bpid == FSL_DPAA_BPID_INV) { | |
505 | dpaa_bp->pool = bman_new_pool(); | |
506 | if (!dpaa_bp->pool) { | |
507 | pr_err("%s: bman_new_pool() failed\n", | |
508 | __func__); | |
509 | return -ENODEV; | |
510 | } | |
511 | ||
512 | dpaa_bp->bpid = (u8)bman_get_bpid(dpaa_bp->pool); | |
513 | } | |
514 | ||
515 | if (dpaa_bp->seed_cb) { | |
516 | err = dpaa_bp->seed_cb(dpaa_bp); | |
517 | if (err) | |
518 | goto pool_seed_failed; | |
519 | } | |
520 | ||
521 | dpaa_bpid2pool_map(dpaa_bp->bpid, dpaa_bp); | |
522 | ||
523 | return 0; | |
524 | ||
525 | pool_seed_failed: | |
526 | pr_err("%s: pool seeding failed\n", __func__); | |
527 | bman_free_pool(dpaa_bp->pool); | |
528 | ||
529 | return err; | |
530 | } | |
531 | ||
532 | /* remove and free all the buffers from the given buffer pool */ | |
533 | static void dpaa_bp_drain(struct dpaa_bp *bp) | |
534 | { | |
535 | u8 num = 8; | |
536 | int ret; | |
537 | ||
538 | do { | |
539 | struct bm_buffer bmb[8]; | |
540 | int i; | |
541 | ||
542 | ret = bman_acquire(bp->pool, bmb, num); | |
543 | if (ret < 0) { | |
544 | if (num == 8) { | |
545 | /* we have less than 8 buffers left; | |
546 | * drain them one by one | |
547 | */ | |
548 | num = 1; | |
549 | ret = 1; | |
550 | continue; | |
551 | } else { | |
552 | /* Pool is fully drained */ | |
553 | break; | |
554 | } | |
555 | } | |
556 | ||
557 | if (bp->free_buf_cb) | |
558 | for (i = 0; i < num; i++) | |
559 | bp->free_buf_cb(bp, &bmb[i]); | |
560 | } while (ret > 0); | |
561 | } | |
562 | ||
563 | static void dpaa_bp_free(struct dpaa_bp *dpaa_bp) | |
564 | { | |
565 | struct dpaa_bp *bp = dpaa_bpid2pool(dpaa_bp->bpid); | |
566 | ||
567 | /* the mapping between bpid and dpaa_bp is done very late in the | |
568 | * allocation procedure; if something failed before the mapping, the bp | |
569 | * was not configured, therefore we don't need the below instructions | |
570 | */ | |
571 | if (!bp) | |
572 | return; | |
573 | ||
574 | if (!atomic_dec_and_test(&bp->refs)) | |
575 | return; | |
576 | ||
577 | if (bp->free_buf_cb) | |
578 | dpaa_bp_drain(bp); | |
579 | ||
580 | dpaa_bp_array[bp->bpid] = NULL; | |
581 | bman_free_pool(bp->pool); | |
582 | } | |
583 | ||
584 | static void dpaa_bps_free(struct dpaa_priv *priv) | |
585 | { | |
586 | int i; | |
587 | ||
588 | for (i = 0; i < DPAA_BPS_NUM; i++) | |
589 | dpaa_bp_free(priv->dpaa_bps[i]); | |
590 | } | |
591 | ||
592 | /* Use multiple WQs for FQ assignment: | |
593 | * - Tx Confirmation queues go to WQ1. | |
c44efa1d CG |
594 | * - Rx Error and Tx Error queues go to WQ5 (giving them a better chance |
595 | * to be scheduled, in case there are many more FQs in WQ6). | |
596 | * - Rx Default goes to WQ6. | |
597 | * - Tx queues go to different WQs depending on their priority. Equal | |
598 | * chunks of NR_CPUS queues go to WQ6 (lowest priority), WQ2, WQ1 and | |
599 | * WQ0 (highest priority). | |
9ad1a374 MB |
600 | * This ensures that Tx-confirmed buffers are timely released. In particular, |
601 | * it avoids congestion on the Tx Confirm FQs, which can pile up PFDRs if they | |
602 | * are greatly outnumbered by other FQs in the system, while | |
603 | * dequeue scheduling is round-robin. | |
604 | */ | |
c44efa1d | 605 | static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) |
9ad1a374 MB |
606 | { |
607 | switch (fq->fq_type) { | |
608 | case FQ_TYPE_TX_CONFIRM: | |
609 | case FQ_TYPE_TX_CONF_MQ: | |
610 | fq->wq = 1; | |
611 | break; | |
612 | case FQ_TYPE_RX_ERROR: | |
613 | case FQ_TYPE_TX_ERROR: | |
c44efa1d | 614 | fq->wq = 5; |
9ad1a374 MB |
615 | break; |
616 | case FQ_TYPE_RX_DEFAULT: | |
3150b7c2 | 617 | case FQ_TYPE_RX_PCD: |
c44efa1d CG |
618 | fq->wq = 6; |
619 | break; | |
9ad1a374 | 620 | case FQ_TYPE_TX: |
c44efa1d CG |
621 | switch (idx / DPAA_TC_TXQ_NUM) { |
622 | case 0: | |
623 | /* Low priority (best effort) */ | |
624 | fq->wq = 6; | |
625 | break; | |
626 | case 1: | |
627 | /* Medium priority */ | |
628 | fq->wq = 2; | |
629 | break; | |
630 | case 2: | |
631 | /* High priority */ | |
632 | fq->wq = 1; | |
633 | break; | |
634 | case 3: | |
635 | /* Very high priority */ | |
636 | fq->wq = 0; | |
637 | break; | |
638 | default: | |
639 | WARN(1, "Too many TX FQs: more than %d!\n", | |
640 | DPAA_ETH_TXQ_NUM); | |
641 | } | |
9ad1a374 MB |
642 | break; |
643 | default: | |
644 | WARN(1, "Invalid FQ type %d for FQID %d!\n", | |
645 | fq->fq_type, fq->fqid); | |
646 | } | |
647 | } | |
648 | ||
649 | static struct dpaa_fq *dpaa_fq_alloc(struct device *dev, | |
650 | u32 start, u32 count, | |
651 | struct list_head *list, | |
652 | enum dpaa_fq_type fq_type) | |
653 | { | |
654 | struct dpaa_fq *dpaa_fq; | |
655 | int i; | |
656 | ||
657 | dpaa_fq = devm_kzalloc(dev, sizeof(*dpaa_fq) * count, | |
658 | GFP_KERNEL); | |
659 | if (!dpaa_fq) | |
660 | return NULL; | |
661 | ||
662 | for (i = 0; i < count; i++) { | |
663 | dpaa_fq[i].fq_type = fq_type; | |
664 | dpaa_fq[i].fqid = start ? start + i : 0; | |
665 | list_add_tail(&dpaa_fq[i].list, list); | |
666 | } | |
667 | ||
668 | for (i = 0; i < count; i++) | |
c44efa1d | 669 | dpaa_assign_wq(dpaa_fq + i, i); |
9ad1a374 MB |
670 | |
671 | return dpaa_fq; | |
672 | } | |
673 | ||
674 | static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, | |
675 | struct fm_port_fqs *port_fqs) | |
676 | { | |
677 | struct dpaa_fq *dpaa_fq; | |
3150b7c2 | 678 | u32 fq_base, fq_base_aligned, i; |
9ad1a374 MB |
679 | |
680 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_ERROR); | |
681 | if (!dpaa_fq) | |
682 | goto fq_alloc_failed; | |
683 | ||
684 | port_fqs->rx_errq = &dpaa_fq[0]; | |
685 | ||
686 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_RX_DEFAULT); | |
687 | if (!dpaa_fq) | |
688 | goto fq_alloc_failed; | |
689 | ||
690 | port_fqs->rx_defq = &dpaa_fq[0]; | |
691 | ||
3150b7c2 MB |
692 | /* the PCD FQIDs range needs to be aligned for correct operation */ |
693 | if (qman_alloc_fqid_range(&fq_base, 2 * DPAA_ETH_PCD_RXQ_NUM)) | |
694 | goto fq_alloc_failed; | |
695 | ||
696 | fq_base_aligned = ALIGN(fq_base, DPAA_ETH_PCD_RXQ_NUM); | |
697 | ||
698 | for (i = fq_base; i < fq_base_aligned; i++) | |
699 | qman_release_fqid(i); | |
700 | ||
701 | for (i = fq_base_aligned + DPAA_ETH_PCD_RXQ_NUM; | |
702 | i < (fq_base + 2 * DPAA_ETH_PCD_RXQ_NUM); i++) | |
703 | qman_release_fqid(i); | |
704 | ||
705 | dpaa_fq = dpaa_fq_alloc(dev, fq_base_aligned, DPAA_ETH_PCD_RXQ_NUM, | |
706 | list, FQ_TYPE_RX_PCD); | |
707 | if (!dpaa_fq) | |
708 | goto fq_alloc_failed; | |
709 | ||
710 | port_fqs->rx_pcdq = &dpaa_fq[0]; | |
711 | ||
9ad1a374 MB |
712 | if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) |
713 | goto fq_alloc_failed; | |
714 | ||
715 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); | |
716 | if (!dpaa_fq) | |
717 | goto fq_alloc_failed; | |
718 | ||
719 | port_fqs->tx_errq = &dpaa_fq[0]; | |
720 | ||
721 | dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_CONFIRM); | |
722 | if (!dpaa_fq) | |
723 | goto fq_alloc_failed; | |
724 | ||
725 | port_fqs->tx_defq = &dpaa_fq[0]; | |
726 | ||
727 | if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) | |
728 | goto fq_alloc_failed; | |
729 | ||
730 | return 0; | |
731 | ||
732 | fq_alloc_failed: | |
733 | dev_err(dev, "dpaa_fq_alloc() failed\n"); | |
734 | return -ENOMEM; | |
735 | } | |
736 | ||
737 | static u32 rx_pool_channel; | |
738 | static DEFINE_SPINLOCK(rx_pool_channel_init); | |
739 | ||
740 | static int dpaa_get_channel(void) | |
741 | { | |
742 | spin_lock(&rx_pool_channel_init); | |
743 | if (!rx_pool_channel) { | |
744 | u32 pool; | |
745 | int ret; | |
746 | ||
747 | ret = qman_alloc_pool(&pool); | |
748 | ||
749 | if (!ret) | |
750 | rx_pool_channel = pool; | |
751 | } | |
752 | spin_unlock(&rx_pool_channel_init); | |
753 | if (!rx_pool_channel) | |
754 | return -ENOMEM; | |
755 | return rx_pool_channel; | |
756 | } | |
757 | ||
758 | static void dpaa_release_channel(void) | |
759 | { | |
760 | qman_release_pool(rx_pool_channel); | |
761 | } | |
762 | ||
763 | static void dpaa_eth_add_channel(u16 channel) | |
764 | { | |
765 | u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel); | |
766 | const cpumask_t *cpus = qman_affine_cpus(); | |
767 | struct qman_portal *portal; | |
768 | int cpu; | |
769 | ||
770 | for_each_cpu(cpu, cpus) { | |
771 | portal = qman_get_affine_portal(cpu); | |
772 | qman_p_static_dequeue_add(portal, pool); | |
773 | } | |
774 | } | |
775 | ||
776 | /* Congestion group state change notification callback. | |
777 | * Stops the device's egress queues while they are congested and | |
778 | * wakes them upon exiting congested state. | |
779 | * Also updates some CGR-related stats. | |
780 | */ | |
781 | static void dpaa_eth_cgscn(struct qman_portal *qm, struct qman_cgr *cgr, | |
782 | int congested) | |
783 | { | |
784 | struct dpaa_priv *priv = (struct dpaa_priv *)container_of(cgr, | |
785 | struct dpaa_priv, cgr_data.cgr); | |
786 | ||
b0ce0d02 MB |
787 | if (congested) { |
788 | priv->cgr_data.congestion_start_jiffies = jiffies; | |
9ad1a374 | 789 | netif_tx_stop_all_queues(priv->net_dev); |
b0ce0d02 MB |
790 | priv->cgr_data.cgr_congested_count++; |
791 | } else { | |
792 | priv->cgr_data.congested_jiffies += | |
793 | (jiffies - priv->cgr_data.congestion_start_jiffies); | |
9ad1a374 | 794 | netif_tx_wake_all_queues(priv->net_dev); |
b0ce0d02 | 795 | } |
9ad1a374 MB |
796 | } |
797 | ||
798 | static int dpaa_eth_cgr_init(struct dpaa_priv *priv) | |
799 | { | |
800 | struct qm_mcc_initcgr initcgr; | |
801 | u32 cs_th; | |
802 | int err; | |
803 | ||
804 | err = qman_alloc_cgrid(&priv->cgr_data.cgr.cgrid); | |
805 | if (err < 0) { | |
806 | if (netif_msg_drv(priv)) | |
807 | pr_err("%s: Error %d allocating CGR ID\n", | |
808 | __func__, err); | |
809 | goto out_error; | |
810 | } | |
811 | priv->cgr_data.cgr.cb = dpaa_eth_cgscn; | |
812 | ||
813 | /* Enable Congestion State Change Notifications and CS taildrop */ | |
0fbb0f24 | 814 | memset(&initcgr, 0, sizeof(initcgr)); |
7d6f8dc0 | 815 | initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES); |
9ad1a374 MB |
816 | initcgr.cgr.cscn_en = QM_CGR_EN; |
817 | ||
818 | /* Set different thresholds based on the MAC speed. | |
819 | * This may turn suboptimal if the MAC is reconfigured at a speed | |
820 | * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link. | |
821 | * In such cases, we ought to reconfigure the threshold, too. | |
822 | */ | |
823 | if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full) | |
824 | cs_th = DPAA_CS_THRESHOLD_10G; | |
825 | else | |
826 | cs_th = DPAA_CS_THRESHOLD_1G; | |
827 | qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); | |
828 | ||
7d6f8dc0 | 829 | initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); |
9ad1a374 MB |
830 | initcgr.cgr.cstd_en = QM_CGR_EN; |
831 | ||
832 | err = qman_create_cgr(&priv->cgr_data.cgr, QMAN_CGR_FLAG_USE_INIT, | |
833 | &initcgr); | |
834 | if (err < 0) { | |
835 | if (netif_msg_drv(priv)) | |
836 | pr_err("%s: Error %d creating CGR with ID %d\n", | |
837 | __func__, err, priv->cgr_data.cgr.cgrid); | |
838 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | |
839 | goto out_error; | |
840 | } | |
841 | if (netif_msg_drv(priv)) | |
842 | pr_debug("Created CGR %d for netdev with hwaddr %pM on QMan channel %d\n", | |
843 | priv->cgr_data.cgr.cgrid, priv->mac_dev->addr, | |
844 | priv->cgr_data.cgr.chan); | |
845 | ||
846 | out_error: | |
847 | return err; | |
848 | } | |
849 | ||
850 | static inline void dpaa_setup_ingress(const struct dpaa_priv *priv, | |
851 | struct dpaa_fq *fq, | |
852 | const struct qman_fq *template) | |
853 | { | |
854 | fq->fq_base = *template; | |
855 | fq->net_dev = priv->net_dev; | |
856 | ||
857 | fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; | |
858 | fq->channel = priv->channel; | |
859 | } | |
860 | ||
861 | static inline void dpaa_setup_egress(const struct dpaa_priv *priv, | |
862 | struct dpaa_fq *fq, | |
863 | struct fman_port *port, | |
864 | const struct qman_fq *template) | |
865 | { | |
866 | fq->fq_base = *template; | |
867 | fq->net_dev = priv->net_dev; | |
868 | ||
869 | if (port) { | |
870 | fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; | |
871 | fq->channel = (u16)fman_port_get_qman_channel_id(port); | |
872 | } else { | |
873 | fq->flags = QMAN_FQ_FLAG_NO_MODIFY; | |
874 | } | |
875 | } | |
876 | ||
877 | static void dpaa_fq_setup(struct dpaa_priv *priv, | |
878 | const struct dpaa_fq_cbs *fq_cbs, | |
879 | struct fman_port *tx_port) | |
880 | { | |
3150b7c2 | 881 | int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu; |
9ad1a374 | 882 | const cpumask_t *affine_cpus = qman_affine_cpus(); |
3150b7c2 | 883 | u16 channels[NR_CPUS]; |
9ad1a374 MB |
884 | struct dpaa_fq *fq; |
885 | ||
886 | for_each_cpu(cpu, affine_cpus) | |
3150b7c2 MB |
887 | channels[num_portals++] = qman_affine_channel(cpu); |
888 | ||
9ad1a374 MB |
889 | if (num_portals == 0) |
890 | dev_err(priv->net_dev->dev.parent, | |
891 | "No Qman software (affine) channels found"); | |
892 | ||
893 | /* Initialize each FQ in the list */ | |
894 | list_for_each_entry(fq, &priv->dpaa_fq_list, list) { | |
895 | switch (fq->fq_type) { | |
896 | case FQ_TYPE_RX_DEFAULT: | |
897 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); | |
898 | break; | |
899 | case FQ_TYPE_RX_ERROR: | |
900 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); | |
901 | break; | |
3150b7c2 MB |
902 | case FQ_TYPE_RX_PCD: |
903 | if (!num_portals) | |
904 | continue; | |
905 | dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); | |
906 | fq->channel = channels[portal_cnt++ % num_portals]; | |
907 | break; | |
9ad1a374 MB |
908 | case FQ_TYPE_TX: |
909 | dpaa_setup_egress(priv, fq, tx_port, | |
910 | &fq_cbs->egress_ern); | |
911 | /* If we have more Tx queues than the number of cores, | |
912 | * just ignore the extra ones. | |
913 | */ | |
914 | if (egress_cnt < DPAA_ETH_TXQ_NUM) | |
915 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; | |
916 | break; | |
917 | case FQ_TYPE_TX_CONF_MQ: | |
918 | priv->conf_fqs[conf_cnt++] = &fq->fq_base; | |
919 | /* fall through */ | |
920 | case FQ_TYPE_TX_CONFIRM: | |
921 | dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); | |
922 | break; | |
923 | case FQ_TYPE_TX_ERROR: | |
924 | dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); | |
925 | break; | |
926 | default: | |
927 | dev_warn(priv->net_dev->dev.parent, | |
928 | "Unknown FQ type detected!\n"); | |
929 | break; | |
930 | } | |
931 | } | |
932 | ||
933 | /* Make sure all CPUs receive a corresponding Tx queue. */ | |
934 | while (egress_cnt < DPAA_ETH_TXQ_NUM) { | |
935 | list_for_each_entry(fq, &priv->dpaa_fq_list, list) { | |
936 | if (fq->fq_type != FQ_TYPE_TX) | |
937 | continue; | |
938 | priv->egress_fqs[egress_cnt++] = &fq->fq_base; | |
939 | if (egress_cnt == DPAA_ETH_TXQ_NUM) | |
940 | break; | |
941 | } | |
942 | } | |
943 | } | |
944 | ||
945 | static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, | |
946 | struct qman_fq *tx_fq) | |
947 | { | |
948 | int i; | |
949 | ||
950 | for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) | |
951 | if (priv->egress_fqs[i] == tx_fq) | |
952 | return i; | |
953 | ||
954 | return -EINVAL; | |
955 | } | |
956 | ||
957 | static int dpaa_fq_init(struct dpaa_fq *dpaa_fq, bool td_enable) | |
958 | { | |
959 | const struct dpaa_priv *priv; | |
960 | struct qman_fq *confq = NULL; | |
961 | struct qm_mcc_initfq initfq; | |
962 | struct device *dev; | |
963 | struct qman_fq *fq; | |
964 | int queue_id; | |
965 | int err; | |
966 | ||
967 | priv = netdev_priv(dpaa_fq->net_dev); | |
968 | dev = dpaa_fq->net_dev->dev.parent; | |
969 | ||
970 | if (dpaa_fq->fqid == 0) | |
971 | dpaa_fq->flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; | |
972 | ||
973 | dpaa_fq->init = !(dpaa_fq->flags & QMAN_FQ_FLAG_NO_MODIFY); | |
974 | ||
975 | err = qman_create_fq(dpaa_fq->fqid, dpaa_fq->flags, &dpaa_fq->fq_base); | |
976 | if (err) { | |
977 | dev_err(dev, "qman_create_fq() failed\n"); | |
978 | return err; | |
979 | } | |
980 | fq = &dpaa_fq->fq_base; | |
981 | ||
982 | if (dpaa_fq->init) { | |
983 | memset(&initfq, 0, sizeof(initfq)); | |
984 | ||
7d6f8dc0 | 985 | initfq.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL); |
9ad1a374 | 986 | /* Note: we may get to keep an empty FQ in cache */ |
7d6f8dc0 | 987 | initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_PREFERINCACHE); |
9ad1a374 MB |
988 | |
989 | /* Try to reduce the number of portal interrupts for | |
990 | * Tx Confirmation FQs. | |
991 | */ | |
992 | if (dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM) | |
58b7bd0f | 993 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_AVOIDBLOCK); |
9ad1a374 MB |
994 | |
995 | /* FQ placement */ | |
7d6f8dc0 | 996 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_DESTWQ); |
9ad1a374 MB |
997 | |
998 | qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); | |
999 | ||
1000 | /* Put all egress queues in a congestion group of their own. | |
1001 | * Sensu stricto, the Tx confirmation queues are Rx FQs, | |
1002 | * rather than Tx - but they nonetheless account for the | |
1003 | * memory footprint on behalf of egress traffic. We therefore | |
1004 | * place them in the netdev's CGR, along with the Tx FQs. | |
1005 | */ | |
1006 | if (dpaa_fq->fq_type == FQ_TYPE_TX || | |
1007 | dpaa_fq->fq_type == FQ_TYPE_TX_CONFIRM || | |
1008 | dpaa_fq->fq_type == FQ_TYPE_TX_CONF_MQ) { | |
7d6f8dc0 CM |
1009 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); |
1010 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); | |
9ad1a374 MB |
1011 | initfq.fqd.cgid = (u8)priv->cgr_data.cgr.cgrid; |
1012 | /* Set a fixed overhead accounting, in an attempt to | |
1013 | * reduce the impact of fixed-size skb shells and the | |
1014 | * driver's needed headroom on system memory. This is | |
1015 | * especially the case when the egress traffic is | |
1016 | * composed of small datagrams. | |
1017 | * Unfortunately, QMan's OAL value is capped to an | |
1018 | * insufficient value, but even that is better than | |
1019 | * no overhead accounting at all. | |
1020 | */ | |
7d6f8dc0 | 1021 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); |
9ad1a374 MB |
1022 | qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); |
1023 | qm_fqd_set_oal(&initfq.fqd, | |
1024 | min(sizeof(struct sk_buff) + | |
1025 | priv->tx_headroom, | |
1026 | (size_t)FSL_QMAN_MAX_OAL)); | |
1027 | } | |
1028 | ||
1029 | if (td_enable) { | |
7d6f8dc0 | 1030 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_TDTHRESH); |
9ad1a374 | 1031 | qm_fqd_set_taildrop(&initfq.fqd, DPAA_FQ_TD, 1); |
7d6f8dc0 | 1032 | initfq.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_TDE); |
9ad1a374 MB |
1033 | } |
1034 | ||
1035 | if (dpaa_fq->fq_type == FQ_TYPE_TX) { | |
1036 | queue_id = dpaa_tx_fq_to_id(priv, &dpaa_fq->fq_base); | |
1037 | if (queue_id >= 0) | |
1038 | confq = priv->conf_fqs[queue_id]; | |
1039 | if (confq) { | |
7d6f8dc0 CM |
1040 | initfq.we_mask |= |
1041 | cpu_to_be16(QM_INITFQ_WE_CONTEXTA); | |
9ad1a374 MB |
1042 | /* ContextA: OVOM=1(use contextA2 bits instead of ICAD) |
1043 | * A2V=1 (contextA A2 field is valid) | |
1044 | * A0V=1 (contextA A0 field is valid) | |
1045 | * B0V=1 (contextB field is valid) | |
1046 | * ContextA A2: EBD=1 (deallocate buffers inside FMan) | |
1047 | * ContextB B0(ASPID): 0 (absolute Virtual Storage ID) | |
1048 | */ | |
7d6f8dc0 CM |
1049 | qm_fqd_context_a_set64(&initfq.fqd, |
1050 | 0x1e00000080000000ULL); | |
9ad1a374 MB |
1051 | } |
1052 | } | |
1053 | ||
1054 | /* Put all the ingress queues in our "ingress CGR". */ | |
1055 | if (priv->use_ingress_cgr && | |
1056 | (dpaa_fq->fq_type == FQ_TYPE_RX_DEFAULT || | |
3150b7c2 MB |
1057 | dpaa_fq->fq_type == FQ_TYPE_RX_ERROR || |
1058 | dpaa_fq->fq_type == FQ_TYPE_RX_PCD)) { | |
7d6f8dc0 CM |
1059 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CGID); |
1060 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_CGE); | |
9ad1a374 MB |
1061 | initfq.fqd.cgid = (u8)priv->ingress_cgr.cgrid; |
1062 | /* Set a fixed overhead accounting, just like for the | |
1063 | * egress CGR. | |
1064 | */ | |
7d6f8dc0 | 1065 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_OAC); |
9ad1a374 MB |
1066 | qm_fqd_set_oac(&initfq.fqd, QM_OAC_CG); |
1067 | qm_fqd_set_oal(&initfq.fqd, | |
1068 | min(sizeof(struct sk_buff) + | |
1069 | priv->tx_headroom, | |
1070 | (size_t)FSL_QMAN_MAX_OAL)); | |
1071 | } | |
1072 | ||
1073 | /* Initialization common to all ingress queues */ | |
1074 | if (dpaa_fq->flags & QMAN_FQ_FLAG_NO_ENQUEUE) { | |
7d6f8dc0 | 1075 | initfq.we_mask |= cpu_to_be16(QM_INITFQ_WE_CONTEXTA); |
7fe1e290 MB |
1076 | initfq.fqd.fq_ctrl |= cpu_to_be16(QM_FQCTRL_HOLDACTIVE | |
1077 | QM_FQCTRL_CTXASTASHING); | |
9ad1a374 MB |
1078 | initfq.fqd.context_a.stashing.exclusive = |
1079 | QM_STASHING_EXCL_DATA | QM_STASHING_EXCL_CTX | | |
1080 | QM_STASHING_EXCL_ANNOTATION; | |
1081 | qm_fqd_set_stashing(&initfq.fqd, 1, 2, | |
1082 | DIV_ROUND_UP(sizeof(struct qman_fq), | |
1083 | 64)); | |
1084 | } | |
1085 | ||
1086 | err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); | |
1087 | if (err < 0) { | |
1088 | dev_err(dev, "qman_init_fq(%u) = %d\n", | |
1089 | qman_fq_fqid(fq), err); | |
1090 | qman_destroy_fq(fq); | |
1091 | return err; | |
1092 | } | |
1093 | } | |
1094 | ||
1095 | dpaa_fq->fqid = qman_fq_fqid(fq); | |
1096 | ||
1097 | return 0; | |
1098 | } | |
1099 | ||
1100 | static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq) | |
1101 | { | |
1102 | const struct dpaa_priv *priv; | |
1103 | struct dpaa_fq *dpaa_fq; | |
1104 | int err, error; | |
1105 | ||
1106 | err = 0; | |
1107 | ||
1108 | dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); | |
1109 | priv = netdev_priv(dpaa_fq->net_dev); | |
1110 | ||
1111 | if (dpaa_fq->init) { | |
1112 | err = qman_retire_fq(fq, NULL); | |
1113 | if (err < 0 && netif_msg_drv(priv)) | |
1114 | dev_err(dev, "qman_retire_fq(%u) = %d\n", | |
1115 | qman_fq_fqid(fq), err); | |
1116 | ||
1117 | error = qman_oos_fq(fq); | |
1118 | if (error < 0 && netif_msg_drv(priv)) { | |
1119 | dev_err(dev, "qman_oos_fq(%u) = %d\n", | |
1120 | qman_fq_fqid(fq), error); | |
1121 | if (err >= 0) | |
1122 | err = error; | |
1123 | } | |
1124 | } | |
1125 | ||
1126 | qman_destroy_fq(fq); | |
1127 | list_del(&dpaa_fq->list); | |
1128 | ||
1129 | return err; | |
1130 | } | |
1131 | ||
1132 | static int dpaa_fq_free(struct device *dev, struct list_head *list) | |
1133 | { | |
1134 | struct dpaa_fq *dpaa_fq, *tmp; | |
1135 | int err, error; | |
1136 | ||
1137 | err = 0; | |
1138 | list_for_each_entry_safe(dpaa_fq, tmp, list, list) { | |
1139 | error = dpaa_fq_free_entry(dev, (struct qman_fq *)dpaa_fq); | |
1140 | if (error < 0 && err >= 0) | |
1141 | err = error; | |
1142 | } | |
1143 | ||
1144 | return err; | |
1145 | } | |
1146 | ||
7f8a6a1b MB |
1147 | static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq, |
1148 | struct dpaa_fq *defq, | |
1149 | struct dpaa_buffer_layout *buf_layout) | |
9ad1a374 MB |
1150 | { |
1151 | struct fman_buffer_prefix_content buf_prefix_content; | |
1152 | struct fman_port_params params; | |
1153 | int err; | |
1154 | ||
1155 | memset(¶ms, 0, sizeof(params)); | |
1156 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); | |
1157 | ||
1158 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; | |
1159 | buf_prefix_content.pass_prs_result = true; | |
1160 | buf_prefix_content.pass_hash_result = true; | |
1161 | buf_prefix_content.pass_time_stamp = false; | |
1162 | buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; | |
1163 | ||
1164 | params.specific_params.non_rx_params.err_fqid = errq->fqid; | |
1165 | params.specific_params.non_rx_params.dflt_fqid = defq->fqid; | |
1166 | ||
1167 | err = fman_port_config(port, ¶ms); | |
7f8a6a1b | 1168 | if (err) { |
9ad1a374 | 1169 | pr_err("%s: fman_port_config failed\n", __func__); |
7f8a6a1b MB |
1170 | return err; |
1171 | } | |
9ad1a374 MB |
1172 | |
1173 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); | |
7f8a6a1b | 1174 | if (err) { |
9ad1a374 MB |
1175 | pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", |
1176 | __func__); | |
7f8a6a1b MB |
1177 | return err; |
1178 | } | |
9ad1a374 MB |
1179 | |
1180 | err = fman_port_init(port); | |
1181 | if (err) | |
1182 | pr_err("%s: fm_port_init failed\n", __func__); | |
7f8a6a1b MB |
1183 | |
1184 | return err; | |
9ad1a374 MB |
1185 | } |
1186 | ||
7f8a6a1b MB |
1187 | static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps, |
1188 | size_t count, struct dpaa_fq *errq, | |
3150b7c2 | 1189 | struct dpaa_fq *defq, struct dpaa_fq *pcdq, |
7f8a6a1b | 1190 | struct dpaa_buffer_layout *buf_layout) |
9ad1a374 MB |
1191 | { |
1192 | struct fman_buffer_prefix_content buf_prefix_content; | |
1193 | struct fman_port_rx_params *rx_p; | |
1194 | struct fman_port_params params; | |
1195 | int i, err; | |
1196 | ||
1197 | memset(¶ms, 0, sizeof(params)); | |
1198 | memset(&buf_prefix_content, 0, sizeof(buf_prefix_content)); | |
1199 | ||
1200 | buf_prefix_content.priv_data_size = buf_layout->priv_data_size; | |
1201 | buf_prefix_content.pass_prs_result = true; | |
1202 | buf_prefix_content.pass_hash_result = true; | |
1203 | buf_prefix_content.pass_time_stamp = false; | |
1204 | buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT; | |
1205 | ||
1206 | rx_p = ¶ms.specific_params.rx_params; | |
1207 | rx_p->err_fqid = errq->fqid; | |
1208 | rx_p->dflt_fqid = defq->fqid; | |
3150b7c2 MB |
1209 | if (pcdq) { |
1210 | rx_p->pcd_base_fqid = pcdq->fqid; | |
1211 | rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM; | |
1212 | } | |
9ad1a374 MB |
1213 | |
1214 | count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count); | |
1215 | rx_p->ext_buf_pools.num_of_pools_used = (u8)count; | |
1216 | for (i = 0; i < count; i++) { | |
1217 | rx_p->ext_buf_pools.ext_buf_pool[i].id = bps[i]->bpid; | |
1218 | rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size; | |
1219 | } | |
1220 | ||
1221 | err = fman_port_config(port, ¶ms); | |
7f8a6a1b | 1222 | if (err) { |
9ad1a374 | 1223 | pr_err("%s: fman_port_config failed\n", __func__); |
7f8a6a1b MB |
1224 | return err; |
1225 | } | |
9ad1a374 MB |
1226 | |
1227 | err = fman_port_cfg_buf_prefix_content(port, &buf_prefix_content); | |
7f8a6a1b | 1228 | if (err) { |
9ad1a374 MB |
1229 | pr_err("%s: fman_port_cfg_buf_prefix_content failed\n", |
1230 | __func__); | |
7f8a6a1b MB |
1231 | return err; |
1232 | } | |
9ad1a374 MB |
1233 | |
1234 | err = fman_port_init(port); | |
1235 | if (err) | |
1236 | pr_err("%s: fm_port_init failed\n", __func__); | |
7f8a6a1b MB |
1237 | |
1238 | return err; | |
9ad1a374 MB |
1239 | } |
1240 | ||
7f8a6a1b MB |
1241 | static int dpaa_eth_init_ports(struct mac_device *mac_dev, |
1242 | struct dpaa_bp **bps, size_t count, | |
1243 | struct fm_port_fqs *port_fqs, | |
1244 | struct dpaa_buffer_layout *buf_layout, | |
1245 | struct device *dev) | |
9ad1a374 MB |
1246 | { |
1247 | struct fman_port *rxport = mac_dev->port[RX]; | |
1248 | struct fman_port *txport = mac_dev->port[TX]; | |
7f8a6a1b | 1249 | int err; |
9ad1a374 | 1250 | |
7f8a6a1b MB |
1251 | err = dpaa_eth_init_tx_port(txport, port_fqs->tx_errq, |
1252 | port_fqs->tx_defq, &buf_layout[TX]); | |
1253 | if (err) | |
1254 | return err; | |
1255 | ||
1256 | err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq, | |
3150b7c2 MB |
1257 | port_fqs->rx_defq, port_fqs->rx_pcdq, |
1258 | &buf_layout[RX]); | |
7f8a6a1b MB |
1259 | |
1260 | return err; | |
9ad1a374 MB |
1261 | } |
1262 | ||
1263 | static int dpaa_bman_release(const struct dpaa_bp *dpaa_bp, | |
1264 | struct bm_buffer *bmb, int cnt) | |
1265 | { | |
1266 | int err; | |
1267 | ||
1268 | err = bman_release(dpaa_bp->pool, bmb, cnt); | |
1269 | /* Should never occur, address anyway to avoid leaking the buffers */ | |
1270 | if (unlikely(WARN_ON(err)) && dpaa_bp->free_buf_cb) | |
1271 | while (cnt-- > 0) | |
1272 | dpaa_bp->free_buf_cb(dpaa_bp, &bmb[cnt]); | |
1273 | ||
1274 | return cnt; | |
1275 | } | |
1276 | ||
1277 | static void dpaa_release_sgt_members(struct qm_sg_entry *sgt) | |
1278 | { | |
1279 | struct bm_buffer bmb[DPAA_BUFF_RELEASE_MAX]; | |
1280 | struct dpaa_bp *dpaa_bp; | |
1281 | int i = 0, j; | |
1282 | ||
1283 | memset(bmb, 0, sizeof(bmb)); | |
1284 | ||
1285 | do { | |
1286 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | |
1287 | if (!dpaa_bp) | |
1288 | return; | |
1289 | ||
1290 | j = 0; | |
1291 | do { | |
1292 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | |
1293 | ||
1294 | bm_buffer_set64(&bmb[j], qm_sg_entry_get64(&sgt[i])); | |
1295 | ||
1296 | j++; i++; | |
1297 | } while (j < ARRAY_SIZE(bmb) && | |
1298 | !qm_sg_entry_is_final(&sgt[i - 1]) && | |
1299 | sgt[i - 1].bpid == sgt[i].bpid); | |
1300 | ||
1301 | dpaa_bman_release(dpaa_bp, bmb, j); | |
1302 | } while (!qm_sg_entry_is_final(&sgt[i - 1])); | |
1303 | } | |
1304 | ||
1305 | static void dpaa_fd_release(const struct net_device *net_dev, | |
1306 | const struct qm_fd *fd) | |
1307 | { | |
1308 | struct qm_sg_entry *sgt; | |
1309 | struct dpaa_bp *dpaa_bp; | |
1310 | struct bm_buffer bmb; | |
1311 | dma_addr_t addr; | |
1312 | void *vaddr; | |
1313 | ||
1314 | bmb.data = 0; | |
1315 | bm_buffer_set64(&bmb, qm_fd_addr(fd)); | |
1316 | ||
1317 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | |
1318 | if (!dpaa_bp) | |
1319 | return; | |
1320 | ||
1321 | if (qm_fd_get_format(fd) == qm_fd_sg) { | |
1322 | vaddr = phys_to_virt(qm_fd_addr(fd)); | |
1323 | sgt = vaddr + qm_fd_get_offset(fd); | |
1324 | ||
1325 | dma_unmap_single(dpaa_bp->dev, qm_fd_addr(fd), dpaa_bp->size, | |
1326 | DMA_FROM_DEVICE); | |
1327 | ||
1328 | dpaa_release_sgt_members(sgt); | |
1329 | ||
1330 | addr = dma_map_single(dpaa_bp->dev, vaddr, dpaa_bp->size, | |
1331 | DMA_FROM_DEVICE); | |
1332 | if (dma_mapping_error(dpaa_bp->dev, addr)) { | |
1333 | dev_err(dpaa_bp->dev, "DMA mapping failed"); | |
1334 | return; | |
1335 | } | |
1336 | bm_buffer_set64(&bmb, addr); | |
1337 | } | |
1338 | ||
1339 | dpaa_bman_release(dpaa_bp, &bmb, 1); | |
1340 | } | |
1341 | ||
b0ce0d02 MB |
1342 | static void count_ern(struct dpaa_percpu_priv *percpu_priv, |
1343 | const union qm_mr_entry *msg) | |
1344 | { | |
1345 | switch (msg->ern.rc & QM_MR_RC_MASK) { | |
1346 | case QM_MR_RC_CGR_TAILDROP: | |
1347 | percpu_priv->ern_cnt.cg_tdrop++; | |
1348 | break; | |
1349 | case QM_MR_RC_WRED: | |
1350 | percpu_priv->ern_cnt.wred++; | |
1351 | break; | |
1352 | case QM_MR_RC_ERROR: | |
1353 | percpu_priv->ern_cnt.err_cond++; | |
1354 | break; | |
1355 | case QM_MR_RC_ORPWINDOW_EARLY: | |
1356 | percpu_priv->ern_cnt.early_window++; | |
1357 | break; | |
1358 | case QM_MR_RC_ORPWINDOW_LATE: | |
1359 | percpu_priv->ern_cnt.late_window++; | |
1360 | break; | |
1361 | case QM_MR_RC_FQ_TAILDROP: | |
1362 | percpu_priv->ern_cnt.fq_tdrop++; | |
1363 | break; | |
1364 | case QM_MR_RC_ORPWINDOW_RETIRED: | |
1365 | percpu_priv->ern_cnt.fq_retired++; | |
1366 | break; | |
1367 | case QM_MR_RC_ORP_ZERO: | |
1368 | percpu_priv->ern_cnt.orp_zero++; | |
1369 | break; | |
1370 | } | |
1371 | } | |
1372 | ||
9ad1a374 MB |
1373 | /* Turn on HW checksum computation for this outgoing frame. |
1374 | * If the current protocol is not something we support in this regard | |
1375 | * (or if the stack has already computed the SW checksum), we do nothing. | |
1376 | * | |
1377 | * Returns 0 if all goes well (or HW csum doesn't apply), and a negative value | |
1378 | * otherwise. | |
1379 | * | |
1380 | * Note that this function may modify the fd->cmd field and the skb data buffer | |
1381 | * (the Parse Results area). | |
1382 | */ | |
1383 | static int dpaa_enable_tx_csum(struct dpaa_priv *priv, | |
1384 | struct sk_buff *skb, | |
1385 | struct qm_fd *fd, | |
1386 | char *parse_results) | |
1387 | { | |
1388 | struct fman_prs_result *parse_result; | |
1389 | u16 ethertype = ntohs(skb->protocol); | |
1390 | struct ipv6hdr *ipv6h = NULL; | |
1391 | struct iphdr *iph; | |
1392 | int retval = 0; | |
1393 | u8 l4_proto; | |
1394 | ||
1395 | if (skb->ip_summed != CHECKSUM_PARTIAL) | |
1396 | return 0; | |
1397 | ||
1398 | /* Note: L3 csum seems to be already computed in sw, but we can't choose | |
1399 | * L4 alone from the FM configuration anyway. | |
1400 | */ | |
1401 | ||
1402 | /* Fill in some fields of the Parse Results array, so the FMan | |
1403 | * can find them as if they came from the FMan Parser. | |
1404 | */ | |
1405 | parse_result = (struct fman_prs_result *)parse_results; | |
1406 | ||
1407 | /* If we're dealing with VLAN, get the real Ethernet type */ | |
1408 | if (ethertype == ETH_P_8021Q) { | |
1409 | /* We can't always assume the MAC header is set correctly | |
1410 | * by the stack, so reset to beginning of skb->data | |
1411 | */ | |
1412 | skb_reset_mac_header(skb); | |
1413 | ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); | |
1414 | } | |
1415 | ||
1416 | /* Fill in the relevant L3 parse result fields | |
1417 | * and read the L4 protocol type | |
1418 | */ | |
1419 | switch (ethertype) { | |
1420 | case ETH_P_IP: | |
1421 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV4); | |
1422 | iph = ip_hdr(skb); | |
1423 | WARN_ON(!iph); | |
1424 | l4_proto = iph->protocol; | |
1425 | break; | |
1426 | case ETH_P_IPV6: | |
1427 | parse_result->l3r = cpu_to_be16(FM_L3_PARSE_RESULT_IPV6); | |
1428 | ipv6h = ipv6_hdr(skb); | |
1429 | WARN_ON(!ipv6h); | |
1430 | l4_proto = ipv6h->nexthdr; | |
1431 | break; | |
1432 | default: | |
1433 | /* We shouldn't even be here */ | |
1434 | if (net_ratelimit()) | |
1435 | netif_alert(priv, tx_err, priv->net_dev, | |
1436 | "Can't compute HW csum for L3 proto 0x%x\n", | |
1437 | ntohs(skb->protocol)); | |
1438 | retval = -EIO; | |
1439 | goto return_error; | |
1440 | } | |
1441 | ||
1442 | /* Fill in the relevant L4 parse result fields */ | |
1443 | switch (l4_proto) { | |
1444 | case IPPROTO_UDP: | |
1445 | parse_result->l4r = FM_L4_PARSE_RESULT_UDP; | |
1446 | break; | |
1447 | case IPPROTO_TCP: | |
1448 | parse_result->l4r = FM_L4_PARSE_RESULT_TCP; | |
1449 | break; | |
1450 | default: | |
1451 | if (net_ratelimit()) | |
1452 | netif_alert(priv, tx_err, priv->net_dev, | |
1453 | "Can't compute HW csum for L4 proto 0x%x\n", | |
1454 | l4_proto); | |
1455 | retval = -EIO; | |
1456 | goto return_error; | |
1457 | } | |
1458 | ||
1459 | /* At index 0 is IPOffset_1 as defined in the Parse Results */ | |
1460 | parse_result->ip_off[0] = (u8)skb_network_offset(skb); | |
1461 | parse_result->l4_off = (u8)skb_transport_offset(skb); | |
1462 | ||
1463 | /* Enable L3 (and L4, if TCP or UDP) HW checksum. */ | |
7d6f8dc0 | 1464 | fd->cmd |= cpu_to_be32(FM_FD_CMD_RPD | FM_FD_CMD_DTC); |
9ad1a374 MB |
1465 | |
1466 | /* On P1023 and similar platforms fd->cmd interpretation could | |
1467 | * be disabled by setting CONTEXT_A bit ICMD; currently this bit | |
1468 | * is not set so we do not need to check; in the future, if/when | |
1469 | * using context_a we need to check this bit | |
1470 | */ | |
1471 | ||
1472 | return_error: | |
1473 | return retval; | |
1474 | } | |
1475 | ||
1476 | static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp) | |
1477 | { | |
1478 | struct device *dev = dpaa_bp->dev; | |
1479 | struct bm_buffer bmb[8]; | |
1480 | dma_addr_t addr; | |
1481 | void *new_buf; | |
1482 | u8 i; | |
1483 | ||
1484 | for (i = 0; i < 8; i++) { | |
1485 | new_buf = netdev_alloc_frag(dpaa_bp->raw_size); | |
1486 | if (unlikely(!new_buf)) { | |
1487 | dev_err(dev, "netdev_alloc_frag() failed, size %zu\n", | |
1488 | dpaa_bp->raw_size); | |
1489 | goto release_previous_buffs; | |
1490 | } | |
1491 | new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES); | |
1492 | ||
1493 | addr = dma_map_single(dev, new_buf, | |
1494 | dpaa_bp->size, DMA_FROM_DEVICE); | |
1495 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1496 | dev_err(dpaa_bp->dev, "DMA map failed"); | |
1497 | goto release_previous_buffs; | |
1498 | } | |
1499 | ||
1500 | bmb[i].data = 0; | |
1501 | bm_buffer_set64(&bmb[i], addr); | |
1502 | } | |
1503 | ||
1504 | release_bufs: | |
1505 | return dpaa_bman_release(dpaa_bp, bmb, i); | |
1506 | ||
1507 | release_previous_buffs: | |
1508 | WARN_ONCE(1, "dpaa_eth: failed to add buffers on Rx\n"); | |
1509 | ||
1510 | bm_buffer_set64(&bmb[i], 0); | |
1511 | /* Avoid releasing a completely null buffer; bman_release() requires | |
1512 | * at least one buffer. | |
1513 | */ | |
1514 | if (likely(i)) | |
1515 | goto release_bufs; | |
1516 | ||
1517 | return 0; | |
1518 | } | |
1519 | ||
1520 | static int dpaa_bp_seed(struct dpaa_bp *dpaa_bp) | |
1521 | { | |
1522 | int i; | |
1523 | ||
1524 | /* Give each CPU an allotment of "config_count" buffers */ | |
1525 | for_each_possible_cpu(i) { | |
1526 | int *count_ptr = per_cpu_ptr(dpaa_bp->percpu_count, i); | |
1527 | int j; | |
1528 | ||
1529 | /* Although we access another CPU's counters here | |
1530 | * we do it at boot time so it is safe | |
1531 | */ | |
1532 | for (j = 0; j < dpaa_bp->config_count; j += 8) | |
1533 | *count_ptr += dpaa_bp_add_8_bufs(dpaa_bp); | |
1534 | } | |
1535 | return 0; | |
1536 | } | |
1537 | ||
1538 | /* Add buffers/(pages) for Rx processing whenever bpool count falls below | |
1539 | * REFILL_THRESHOLD. | |
1540 | */ | |
1541 | static int dpaa_eth_refill_bpool(struct dpaa_bp *dpaa_bp, int *countptr) | |
1542 | { | |
1543 | int count = *countptr; | |
1544 | int new_bufs; | |
1545 | ||
1546 | if (unlikely(count < FSL_DPAA_ETH_REFILL_THRESHOLD)) { | |
1547 | do { | |
1548 | new_bufs = dpaa_bp_add_8_bufs(dpaa_bp); | |
1549 | if (unlikely(!new_bufs)) { | |
1550 | /* Avoid looping forever if we've temporarily | |
1551 | * run out of memory. We'll try again at the | |
1552 | * next NAPI cycle. | |
1553 | */ | |
1554 | break; | |
1555 | } | |
1556 | count += new_bufs; | |
1557 | } while (count < FSL_DPAA_ETH_MAX_BUF_COUNT); | |
1558 | ||
1559 | *countptr = count; | |
1560 | if (unlikely(count < FSL_DPAA_ETH_MAX_BUF_COUNT)) | |
1561 | return -ENOMEM; | |
1562 | } | |
1563 | ||
1564 | return 0; | |
1565 | } | |
1566 | ||
1567 | static int dpaa_eth_refill_bpools(struct dpaa_priv *priv) | |
1568 | { | |
1569 | struct dpaa_bp *dpaa_bp; | |
1570 | int *countptr; | |
1571 | int res, i; | |
1572 | ||
1573 | for (i = 0; i < DPAA_BPS_NUM; i++) { | |
1574 | dpaa_bp = priv->dpaa_bps[i]; | |
1575 | if (!dpaa_bp) | |
1576 | return -EINVAL; | |
1577 | countptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1578 | res = dpaa_eth_refill_bpool(dpaa_bp, countptr); | |
1579 | if (res) | |
1580 | return res; | |
1581 | } | |
1582 | return 0; | |
1583 | } | |
1584 | ||
1585 | /* Cleanup function for outgoing frame descriptors that were built on Tx path, | |
1586 | * either contiguous frames or scatter/gather ones. | |
1587 | * Skb freeing is not handled here. | |
1588 | * | |
1589 | * This function may be called on error paths in the Tx function, so guard | |
1590 | * against cases when not all fd relevant fields were filled in. | |
1591 | * | |
1592 | * Return the skb backpointer, since for S/G frames the buffer containing it | |
1593 | * gets freed here. | |
1594 | */ | |
1595 | static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, | |
1596 | const struct qm_fd *fd) | |
1597 | { | |
1598 | const enum dma_data_direction dma_dir = DMA_TO_DEVICE; | |
1599 | struct device *dev = priv->net_dev->dev.parent; | |
1600 | dma_addr_t addr = qm_fd_addr(fd); | |
1601 | const struct qm_sg_entry *sgt; | |
1602 | struct sk_buff **skbh, *skb; | |
1603 | int nr_frags, i; | |
1604 | ||
1605 | skbh = (struct sk_buff **)phys_to_virt(addr); | |
1606 | skb = *skbh; | |
1607 | ||
1608 | if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { | |
1609 | nr_frags = skb_shinfo(skb)->nr_frags; | |
1610 | dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + | |
1611 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | |
1612 | dma_dir); | |
1613 | ||
1614 | /* The sgt buffer has been allocated with netdev_alloc_frag(), | |
1615 | * it's from lowmem. | |
1616 | */ | |
1617 | sgt = phys_to_virt(addr + qm_fd_get_offset(fd)); | |
1618 | ||
1619 | /* sgt[0] is from lowmem, was dma_map_single()-ed */ | |
1620 | dma_unmap_single(dev, qm_sg_addr(&sgt[0]), | |
1621 | qm_sg_entry_get_len(&sgt[0]), dma_dir); | |
1622 | ||
1623 | /* remaining pages were mapped with skb_frag_dma_map() */ | |
1624 | for (i = 1; i < nr_frags; i++) { | |
1625 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | |
1626 | ||
1627 | dma_unmap_page(dev, qm_sg_addr(&sgt[i]), | |
1628 | qm_sg_entry_get_len(&sgt[i]), dma_dir); | |
1629 | } | |
1630 | ||
1631 | /* Free the page frag that we allocated on Tx */ | |
1632 | skb_free_frag(phys_to_virt(addr)); | |
1633 | } else { | |
1634 | dma_unmap_single(dev, addr, | |
1635 | skb_tail_pointer(skb) - (u8 *)skbh, dma_dir); | |
1636 | } | |
1637 | ||
1638 | return skb; | |
1639 | } | |
1640 | ||
5accb282 MB |
1641 | static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd) |
1642 | { | |
1643 | /* The parser has run and performed L4 checksum validation. | |
1644 | * We know there were no parser errors (and implicitly no | |
1645 | * L4 csum error), otherwise we wouldn't be here. | |
1646 | */ | |
1647 | if ((priv->net_dev->features & NETIF_F_RXCSUM) && | |
1648 | (be32_to_cpu(fd->status) & FM_FD_STAT_L4CV)) | |
1649 | return CHECKSUM_UNNECESSARY; | |
1650 | ||
1651 | /* We're here because either the parser didn't run or the L4 checksum | |
1652 | * was not verified. This may include the case of a UDP frame with | |
1653 | * checksum zero or an L4 proto other than TCP/UDP | |
1654 | */ | |
1655 | return CHECKSUM_NONE; | |
1656 | } | |
1657 | ||
9ad1a374 MB |
1658 | /* Build a linear skb around the received buffer. |
1659 | * We are guaranteed there is enough room at the end of the data buffer to | |
1660 | * accommodate the shared info area of the skb. | |
1661 | */ | |
1662 | static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv, | |
1663 | const struct qm_fd *fd) | |
1664 | { | |
1665 | ssize_t fd_off = qm_fd_get_offset(fd); | |
1666 | dma_addr_t addr = qm_fd_addr(fd); | |
1667 | struct dpaa_bp *dpaa_bp; | |
1668 | struct sk_buff *skb; | |
1669 | void *vaddr; | |
1670 | ||
1671 | vaddr = phys_to_virt(addr); | |
1672 | WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); | |
1673 | ||
1674 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | |
1675 | if (!dpaa_bp) | |
1676 | goto free_buffer; | |
1677 | ||
1678 | skb = build_skb(vaddr, dpaa_bp->size + | |
1679 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); | |
1680 | if (unlikely(!skb)) { | |
1681 | WARN_ONCE(1, "Build skb failure on Rx\n"); | |
1682 | goto free_buffer; | |
1683 | } | |
1684 | WARN_ON(fd_off != priv->rx_headroom); | |
1685 | skb_reserve(skb, fd_off); | |
1686 | skb_put(skb, qm_fd_get_length(fd)); | |
1687 | ||
5accb282 | 1688 | skb->ip_summed = rx_csum_offload(priv, fd); |
9ad1a374 MB |
1689 | |
1690 | return skb; | |
1691 | ||
1692 | free_buffer: | |
1693 | skb_free_frag(vaddr); | |
1694 | return NULL; | |
1695 | } | |
1696 | ||
1697 | /* Build an skb with the data of the first S/G entry in the linear portion and | |
1698 | * the rest of the frame as skb fragments. | |
1699 | * | |
1700 | * The page fragment holding the S/G Table is recycled here. | |
1701 | */ | |
1702 | static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv, | |
1703 | const struct qm_fd *fd) | |
1704 | { | |
1705 | ssize_t fd_off = qm_fd_get_offset(fd); | |
1706 | dma_addr_t addr = qm_fd_addr(fd); | |
1707 | const struct qm_sg_entry *sgt; | |
1708 | struct page *page, *head_page; | |
1709 | struct dpaa_bp *dpaa_bp; | |
1710 | void *vaddr, *sg_vaddr; | |
1711 | int frag_off, frag_len; | |
1712 | struct sk_buff *skb; | |
1713 | dma_addr_t sg_addr; | |
1714 | int page_offset; | |
1715 | unsigned int sz; | |
1716 | int *count_ptr; | |
1717 | int i; | |
1718 | ||
1719 | vaddr = phys_to_virt(addr); | |
1720 | WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); | |
1721 | ||
1722 | /* Iterate through the SGT entries and add data buffers to the skb */ | |
1723 | sgt = vaddr + fd_off; | |
1724 | for (i = 0; i < DPAA_SGT_MAX_ENTRIES; i++) { | |
1725 | /* Extension bit is not supported */ | |
1726 | WARN_ON(qm_sg_entry_is_ext(&sgt[i])); | |
1727 | ||
1728 | sg_addr = qm_sg_addr(&sgt[i]); | |
1729 | sg_vaddr = phys_to_virt(sg_addr); | |
1730 | WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, | |
1731 | SMP_CACHE_BYTES)); | |
1732 | ||
1733 | /* We may use multiple Rx pools */ | |
1734 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | |
1735 | if (!dpaa_bp) | |
1736 | goto free_buffers; | |
1737 | ||
1738 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1739 | dma_unmap_single(dpaa_bp->dev, sg_addr, dpaa_bp->size, | |
1740 | DMA_FROM_DEVICE); | |
1741 | if (i == 0) { | |
1742 | sz = dpaa_bp->size + | |
1743 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
1744 | skb = build_skb(sg_vaddr, sz); | |
1745 | if (WARN_ON(unlikely(!skb))) | |
1746 | goto free_buffers; | |
1747 | ||
5accb282 | 1748 | skb->ip_summed = rx_csum_offload(priv, fd); |
9ad1a374 MB |
1749 | |
1750 | /* Make sure forwarded skbs will have enough space | |
1751 | * on Tx, if extra headers are added. | |
1752 | */ | |
1753 | WARN_ON(fd_off != priv->rx_headroom); | |
1754 | skb_reserve(skb, fd_off); | |
1755 | skb_put(skb, qm_sg_entry_get_len(&sgt[i])); | |
1756 | } else { | |
1757 | /* Not the first S/G entry; all data from buffer will | |
1758 | * be added in an skb fragment; fragment index is offset | |
1759 | * by one since first S/G entry was incorporated in the | |
1760 | * linear part of the skb. | |
1761 | * | |
1762 | * Caution: 'page' may be a tail page. | |
1763 | */ | |
1764 | page = virt_to_page(sg_vaddr); | |
1765 | head_page = virt_to_head_page(sg_vaddr); | |
1766 | ||
1767 | /* Compute offset in (possibly tail) page */ | |
1768 | page_offset = ((unsigned long)sg_vaddr & | |
1769 | (PAGE_SIZE - 1)) + | |
1770 | (page_address(page) - page_address(head_page)); | |
1771 | /* page_offset only refers to the beginning of sgt[i]; | |
1772 | * but the buffer itself may have an internal offset. | |
1773 | */ | |
1774 | frag_off = qm_sg_entry_get_off(&sgt[i]) + page_offset; | |
1775 | frag_len = qm_sg_entry_get_len(&sgt[i]); | |
1776 | /* skb_add_rx_frag() does no checking on the page; if | |
1777 | * we pass it a tail page, we'll end up with | |
1778 | * bad page accounting and eventually with segafults. | |
1779 | */ | |
1780 | skb_add_rx_frag(skb, i - 1, head_page, frag_off, | |
1781 | frag_len, dpaa_bp->size); | |
1782 | } | |
1783 | /* Update the pool count for the current {cpu x bpool} */ | |
1784 | (*count_ptr)--; | |
1785 | ||
1786 | if (qm_sg_entry_is_final(&sgt[i])) | |
1787 | break; | |
1788 | } | |
1789 | WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n"); | |
1790 | ||
1791 | /* free the SG table buffer */ | |
1792 | skb_free_frag(vaddr); | |
1793 | ||
1794 | return skb; | |
1795 | ||
1796 | free_buffers: | |
1797 | /* compensate sw bpool counter changes */ | |
785f3577 | 1798 | for (i--; i >= 0; i--) { |
9ad1a374 MB |
1799 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); |
1800 | if (dpaa_bp) { | |
1801 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1802 | (*count_ptr)++; | |
1803 | } | |
1804 | } | |
1805 | /* free all the SG entries */ | |
1806 | for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { | |
1807 | sg_addr = qm_sg_addr(&sgt[i]); | |
1808 | sg_vaddr = phys_to_virt(sg_addr); | |
1809 | skb_free_frag(sg_vaddr); | |
1810 | dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); | |
1811 | if (dpaa_bp) { | |
1812 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
1813 | (*count_ptr)--; | |
1814 | } | |
1815 | ||
1816 | if (qm_sg_entry_is_final(&sgt[i])) | |
1817 | break; | |
1818 | } | |
1819 | /* free the SGT fragment */ | |
1820 | skb_free_frag(vaddr); | |
1821 | ||
1822 | return NULL; | |
1823 | } | |
1824 | ||
1825 | static int skb_to_contig_fd(struct dpaa_priv *priv, | |
1826 | struct sk_buff *skb, struct qm_fd *fd, | |
1827 | int *offset) | |
1828 | { | |
1829 | struct net_device *net_dev = priv->net_dev; | |
1830 | struct device *dev = net_dev->dev.parent; | |
1831 | enum dma_data_direction dma_dir; | |
1832 | unsigned char *buffer_start; | |
1833 | struct sk_buff **skbh; | |
1834 | dma_addr_t addr; | |
1835 | int err; | |
1836 | ||
1837 | /* We are guaranteed to have at least tx_headroom bytes | |
1838 | * available, so just use that for offset. | |
1839 | */ | |
1840 | fd->bpid = FSL_DPAA_BPID_INV; | |
1841 | buffer_start = skb->data - priv->tx_headroom; | |
1842 | dma_dir = DMA_TO_DEVICE; | |
1843 | ||
1844 | skbh = (struct sk_buff **)buffer_start; | |
1845 | *skbh = skb; | |
1846 | ||
1847 | /* Enable L3/L4 hardware checksum computation. | |
1848 | * | |
1849 | * We must do this before dma_map_single(DMA_TO_DEVICE), because we may | |
1850 | * need to write into the skb. | |
1851 | */ | |
1852 | err = dpaa_enable_tx_csum(priv, skb, fd, | |
1853 | ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE); | |
1854 | if (unlikely(err < 0)) { | |
1855 | if (net_ratelimit()) | |
1856 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", | |
1857 | err); | |
1858 | return err; | |
1859 | } | |
1860 | ||
1861 | /* Fill in the rest of the FD fields */ | |
1862 | qm_fd_set_contig(fd, priv->tx_headroom, skb->len); | |
7d6f8dc0 | 1863 | fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); |
9ad1a374 MB |
1864 | |
1865 | /* Map the entire buffer size that may be seen by FMan, but no more */ | |
1866 | addr = dma_map_single(dev, skbh, | |
1867 | skb_tail_pointer(skb) - buffer_start, dma_dir); | |
1868 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1869 | if (net_ratelimit()) | |
1870 | netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n"); | |
1871 | return -EINVAL; | |
1872 | } | |
1873 | qm_fd_addr_set64(fd, addr); | |
1874 | ||
1875 | return 0; | |
1876 | } | |
1877 | ||
1878 | static int skb_to_sg_fd(struct dpaa_priv *priv, | |
1879 | struct sk_buff *skb, struct qm_fd *fd) | |
1880 | { | |
1881 | const enum dma_data_direction dma_dir = DMA_TO_DEVICE; | |
1882 | const int nr_frags = skb_shinfo(skb)->nr_frags; | |
1883 | struct net_device *net_dev = priv->net_dev; | |
1884 | struct device *dev = net_dev->dev.parent; | |
1885 | struct qm_sg_entry *sgt; | |
1886 | struct sk_buff **skbh; | |
1887 | int i, j, err, sz; | |
1888 | void *buffer_start; | |
1889 | skb_frag_t *frag; | |
1890 | dma_addr_t addr; | |
1891 | size_t frag_len; | |
1892 | void *sgt_buf; | |
1893 | ||
1894 | /* get a page frag to store the SGTable */ | |
1895 | sz = SKB_DATA_ALIGN(priv->tx_headroom + | |
1896 | sizeof(struct qm_sg_entry) * (1 + nr_frags)); | |
1897 | sgt_buf = netdev_alloc_frag(sz); | |
1898 | if (unlikely(!sgt_buf)) { | |
1899 | netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", | |
1900 | sz); | |
1901 | return -ENOMEM; | |
1902 | } | |
1903 | ||
1904 | /* Enable L3/L4 hardware checksum computation. | |
1905 | * | |
1906 | * We must do this before dma_map_single(DMA_TO_DEVICE), because we may | |
1907 | * need to write into the skb. | |
1908 | */ | |
1909 | err = dpaa_enable_tx_csum(priv, skb, fd, | |
1910 | sgt_buf + DPAA_TX_PRIV_DATA_SIZE); | |
1911 | if (unlikely(err < 0)) { | |
1912 | if (net_ratelimit()) | |
1913 | netif_err(priv, tx_err, net_dev, "HW csum error: %d\n", | |
1914 | err); | |
1915 | goto csum_failed; | |
1916 | } | |
1917 | ||
1918 | sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom); | |
1919 | qm_sg_entry_set_len(&sgt[0], skb_headlen(skb)); | |
1920 | sgt[0].bpid = FSL_DPAA_BPID_INV; | |
1921 | sgt[0].offset = 0; | |
1922 | addr = dma_map_single(dev, skb->data, | |
1923 | skb_headlen(skb), dma_dir); | |
1924 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1925 | dev_err(dev, "DMA mapping failed"); | |
1926 | err = -EINVAL; | |
1927 | goto sg0_map_failed; | |
1928 | } | |
1929 | qm_sg_entry_set64(&sgt[0], addr); | |
1930 | ||
1931 | /* populate the rest of SGT entries */ | |
1932 | frag = &skb_shinfo(skb)->frags[0]; | |
1933 | frag_len = frag->size; | |
1934 | for (i = 1; i <= nr_frags; i++, frag++) { | |
1935 | WARN_ON(!skb_frag_page(frag)); | |
1936 | addr = skb_frag_dma_map(dev, frag, 0, | |
1937 | frag_len, dma_dir); | |
1938 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1939 | dev_err(dev, "DMA mapping failed"); | |
1940 | err = -EINVAL; | |
1941 | goto sg_map_failed; | |
1942 | } | |
1943 | ||
1944 | qm_sg_entry_set_len(&sgt[i], frag_len); | |
1945 | sgt[i].bpid = FSL_DPAA_BPID_INV; | |
1946 | sgt[i].offset = 0; | |
1947 | ||
1948 | /* keep the offset in the address */ | |
1949 | qm_sg_entry_set64(&sgt[i], addr); | |
1950 | frag_len = frag->size; | |
1951 | } | |
1952 | qm_sg_entry_set_f(&sgt[i - 1], frag_len); | |
1953 | ||
1954 | qm_fd_set_sg(fd, priv->tx_headroom, skb->len); | |
1955 | ||
1956 | /* DMA map the SGT page */ | |
1957 | buffer_start = (void *)sgt - priv->tx_headroom; | |
1958 | skbh = (struct sk_buff **)buffer_start; | |
1959 | *skbh = skb; | |
1960 | ||
1961 | addr = dma_map_single(dev, buffer_start, priv->tx_headroom + | |
1962 | sizeof(struct qm_sg_entry) * (1 + nr_frags), | |
1963 | dma_dir); | |
1964 | if (unlikely(dma_mapping_error(dev, addr))) { | |
1965 | dev_err(dev, "DMA mapping failed"); | |
1966 | err = -EINVAL; | |
1967 | goto sgt_map_failed; | |
1968 | } | |
1969 | ||
1970 | fd->bpid = FSL_DPAA_BPID_INV; | |
7d6f8dc0 | 1971 | fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO); |
9ad1a374 MB |
1972 | qm_fd_addr_set64(fd, addr); |
1973 | ||
1974 | return 0; | |
1975 | ||
1976 | sgt_map_failed: | |
1977 | sg_map_failed: | |
1978 | for (j = 0; j < i; j++) | |
1979 | dma_unmap_page(dev, qm_sg_addr(&sgt[j]), | |
1980 | qm_sg_entry_get_len(&sgt[j]), dma_dir); | |
1981 | sg0_map_failed: | |
1982 | csum_failed: | |
1983 | skb_free_frag(sgt_buf); | |
1984 | ||
1985 | return err; | |
1986 | } | |
1987 | ||
1988 | static inline int dpaa_xmit(struct dpaa_priv *priv, | |
1989 | struct rtnl_link_stats64 *percpu_stats, | |
1990 | int queue, | |
1991 | struct qm_fd *fd) | |
1992 | { | |
1993 | struct qman_fq *egress_fq; | |
1994 | int err, i; | |
1995 | ||
1996 | egress_fq = priv->egress_fqs[queue]; | |
1997 | if (fd->bpid == FSL_DPAA_BPID_INV) | |
7d6f8dc0 | 1998 | fd->cmd |= cpu_to_be32(qman_fq_fqid(priv->conf_fqs[queue])); |
9ad1a374 | 1999 | |
eb11ddf3 MB |
2000 | /* Trace this Tx fd */ |
2001 | trace_dpaa_tx_fd(priv->net_dev, egress_fq, fd); | |
2002 | ||
9ad1a374 MB |
2003 | for (i = 0; i < DPAA_ENQUEUE_RETRIES; i++) { |
2004 | err = qman_enqueue(egress_fq, fd); | |
2005 | if (err != -EBUSY) | |
2006 | break; | |
2007 | } | |
2008 | ||
2009 | if (unlikely(err < 0)) { | |
2010 | percpu_stats->tx_errors++; | |
2011 | percpu_stats->tx_fifo_errors++; | |
2012 | return err; | |
2013 | } | |
2014 | ||
2015 | percpu_stats->tx_packets++; | |
2016 | percpu_stats->tx_bytes += qm_fd_get_length(fd); | |
2017 | ||
2018 | return 0; | |
2019 | } | |
2020 | ||
2021 | static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | |
2022 | { | |
2023 | const int queue_mapping = skb_get_queue_mapping(skb); | |
2024 | bool nonlinear = skb_is_nonlinear(skb); | |
2025 | struct rtnl_link_stats64 *percpu_stats; | |
2026 | struct dpaa_percpu_priv *percpu_priv; | |
2027 | struct dpaa_priv *priv; | |
2028 | struct qm_fd fd; | |
2029 | int offset = 0; | |
2030 | int err = 0; | |
2031 | ||
2032 | priv = netdev_priv(net_dev); | |
2033 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
2034 | percpu_stats = &percpu_priv->stats; | |
2035 | ||
2036 | qm_fd_clear_fd(&fd); | |
2037 | ||
2038 | if (!nonlinear) { | |
2039 | /* We're going to store the skb backpointer at the beginning | |
2040 | * of the data buffer, so we need a privately owned skb | |
2041 | * | |
2042 | * We've made sure skb is not shared in dev->priv_flags, | |
2043 | * we need to verify the skb head is not cloned | |
2044 | */ | |
2045 | if (skb_cow_head(skb, priv->tx_headroom)) | |
2046 | goto enomem; | |
2047 | ||
2048 | WARN_ON(skb_is_nonlinear(skb)); | |
2049 | } | |
2050 | ||
2051 | /* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES; | |
2052 | * make sure we don't feed FMan with more fragments than it supports. | |
2053 | */ | |
2054 | if (nonlinear && | |
2055 | likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) { | |
2056 | /* Just create a S/G fd based on the skb */ | |
2057 | err = skb_to_sg_fd(priv, skb, &fd); | |
b0ce0d02 | 2058 | percpu_priv->tx_frag_skbuffs++; |
9ad1a374 MB |
2059 | } else { |
2060 | /* If the egress skb contains more fragments than we support | |
2061 | * we have no choice but to linearize it ourselves. | |
2062 | */ | |
2063 | if (unlikely(nonlinear) && __skb_linearize(skb)) | |
2064 | goto enomem; | |
2065 | ||
2066 | /* Finally, create a contig FD from this skb */ | |
2067 | err = skb_to_contig_fd(priv, skb, &fd, &offset); | |
2068 | } | |
2069 | if (unlikely(err < 0)) | |
2070 | goto skb_to_fd_failed; | |
2071 | ||
2072 | if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0)) | |
2073 | return NETDEV_TX_OK; | |
2074 | ||
2075 | dpaa_cleanup_tx_fd(priv, &fd); | |
2076 | skb_to_fd_failed: | |
2077 | enomem: | |
2078 | percpu_stats->tx_errors++; | |
2079 | dev_kfree_skb(skb); | |
2080 | return NETDEV_TX_OK; | |
2081 | } | |
2082 | ||
2083 | static void dpaa_rx_error(struct net_device *net_dev, | |
2084 | const struct dpaa_priv *priv, | |
2085 | struct dpaa_percpu_priv *percpu_priv, | |
2086 | const struct qm_fd *fd, | |
2087 | u32 fqid) | |
2088 | { | |
2089 | if (net_ratelimit()) | |
2090 | netif_err(priv, hw, net_dev, "Err FD status = 0x%08x\n", | |
7d6f8dc0 | 2091 | be32_to_cpu(fd->status) & FM_FD_STAT_RX_ERRORS); |
9ad1a374 MB |
2092 | |
2093 | percpu_priv->stats.rx_errors++; | |
2094 | ||
7d6f8dc0 | 2095 | if (be32_to_cpu(fd->status) & FM_FD_ERR_DMA) |
b0ce0d02 | 2096 | percpu_priv->rx_errors.dme++; |
7d6f8dc0 | 2097 | if (be32_to_cpu(fd->status) & FM_FD_ERR_PHYSICAL) |
b0ce0d02 | 2098 | percpu_priv->rx_errors.fpe++; |
7d6f8dc0 | 2099 | if (be32_to_cpu(fd->status) & FM_FD_ERR_SIZE) |
b0ce0d02 | 2100 | percpu_priv->rx_errors.fse++; |
7d6f8dc0 | 2101 | if (be32_to_cpu(fd->status) & FM_FD_ERR_PRS_HDR_ERR) |
b0ce0d02 MB |
2102 | percpu_priv->rx_errors.phe++; |
2103 | ||
9ad1a374 MB |
2104 | dpaa_fd_release(net_dev, fd); |
2105 | } | |
2106 | ||
2107 | static void dpaa_tx_error(struct net_device *net_dev, | |
2108 | const struct dpaa_priv *priv, | |
2109 | struct dpaa_percpu_priv *percpu_priv, | |
2110 | const struct qm_fd *fd, | |
2111 | u32 fqid) | |
2112 | { | |
2113 | struct sk_buff *skb; | |
2114 | ||
2115 | if (net_ratelimit()) | |
2116 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | |
7d6f8dc0 | 2117 | be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS); |
9ad1a374 MB |
2118 | |
2119 | percpu_priv->stats.tx_errors++; | |
2120 | ||
2121 | skb = dpaa_cleanup_tx_fd(priv, fd); | |
2122 | dev_kfree_skb(skb); | |
2123 | } | |
2124 | ||
2125 | static int dpaa_eth_poll(struct napi_struct *napi, int budget) | |
2126 | { | |
2127 | struct dpaa_napi_portal *np = | |
2128 | container_of(napi, struct dpaa_napi_portal, napi); | |
2129 | ||
2130 | int cleaned = qman_p_poll_dqrr(np->p, budget); | |
2131 | ||
2132 | if (cleaned < budget) { | |
6ad20165 | 2133 | napi_complete_done(napi, cleaned); |
9ad1a374 MB |
2134 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); |
2135 | ||
2136 | } else if (np->down) { | |
2137 | qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); | |
2138 | } | |
2139 | ||
2140 | return cleaned; | |
2141 | } | |
2142 | ||
2143 | static void dpaa_tx_conf(struct net_device *net_dev, | |
2144 | const struct dpaa_priv *priv, | |
2145 | struct dpaa_percpu_priv *percpu_priv, | |
2146 | const struct qm_fd *fd, | |
2147 | u32 fqid) | |
2148 | { | |
2149 | struct sk_buff *skb; | |
2150 | ||
7d6f8dc0 | 2151 | if (unlikely(be32_to_cpu(fd->status) & FM_FD_STAT_TX_ERRORS)) { |
9ad1a374 MB |
2152 | if (net_ratelimit()) |
2153 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | |
7d6f8dc0 CM |
2154 | be32_to_cpu(fd->status) & |
2155 | FM_FD_STAT_TX_ERRORS); | |
9ad1a374 MB |
2156 | |
2157 | percpu_priv->stats.tx_errors++; | |
2158 | } | |
2159 | ||
b0ce0d02 MB |
2160 | percpu_priv->tx_confirm++; |
2161 | ||
9ad1a374 MB |
2162 | skb = dpaa_cleanup_tx_fd(priv, fd); |
2163 | ||
2164 | consume_skb(skb); | |
2165 | } | |
2166 | ||
2167 | static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv, | |
2168 | struct qman_portal *portal) | |
2169 | { | |
2170 | if (unlikely(in_irq() || !in_serving_softirq())) { | |
2171 | /* Disable QMan IRQ and invoke NAPI */ | |
2172 | qman_p_irqsource_remove(portal, QM_PIRQ_DQRI); | |
2173 | ||
2174 | percpu_priv->np.p = portal; | |
2175 | napi_schedule(&percpu_priv->np.napi); | |
b0ce0d02 | 2176 | percpu_priv->in_interrupt++; |
9ad1a374 MB |
2177 | return 1; |
2178 | } | |
2179 | return 0; | |
2180 | } | |
2181 | ||
2182 | static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal, | |
2183 | struct qman_fq *fq, | |
2184 | const struct qm_dqrr_entry *dq) | |
2185 | { | |
2186 | struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); | |
2187 | struct dpaa_percpu_priv *percpu_priv; | |
2188 | struct net_device *net_dev; | |
2189 | struct dpaa_bp *dpaa_bp; | |
2190 | struct dpaa_priv *priv; | |
2191 | ||
2192 | net_dev = dpaa_fq->net_dev; | |
2193 | priv = netdev_priv(net_dev); | |
2194 | dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); | |
2195 | if (!dpaa_bp) | |
2196 | return qman_cb_dqrr_consume; | |
2197 | ||
2198 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
2199 | ||
2200 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | |
2201 | return qman_cb_dqrr_stop; | |
2202 | ||
2203 | if (dpaa_eth_refill_bpools(priv)) | |
2204 | /* Unable to refill the buffer pool due to insufficient | |
2205 | * system memory. Just release the frame back into the pool, | |
2206 | * otherwise we'll soon end up with an empty buffer pool. | |
2207 | */ | |
2208 | dpaa_fd_release(net_dev, &dq->fd); | |
2209 | else | |
2210 | dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | |
2211 | ||
2212 | return qman_cb_dqrr_consume; | |
2213 | } | |
2214 | ||
2215 | static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal, | |
2216 | struct qman_fq *fq, | |
2217 | const struct qm_dqrr_entry *dq) | |
2218 | { | |
2219 | struct rtnl_link_stats64 *percpu_stats; | |
2220 | struct dpaa_percpu_priv *percpu_priv; | |
2221 | const struct qm_fd *fd = &dq->fd; | |
2222 | dma_addr_t addr = qm_fd_addr(fd); | |
2223 | enum qm_fd_format fd_format; | |
2224 | struct net_device *net_dev; | |
056057e2 | 2225 | u32 fd_status, hash_offset; |
9ad1a374 MB |
2226 | struct dpaa_bp *dpaa_bp; |
2227 | struct dpaa_priv *priv; | |
2228 | unsigned int skb_len; | |
2229 | struct sk_buff *skb; | |
2230 | int *count_ptr; | |
056057e2 | 2231 | void *vaddr; |
9ad1a374 | 2232 | |
7d6f8dc0 CM |
2233 | fd_status = be32_to_cpu(fd->status); |
2234 | fd_format = qm_fd_get_format(fd); | |
9ad1a374 MB |
2235 | net_dev = ((struct dpaa_fq *)fq)->net_dev; |
2236 | priv = netdev_priv(net_dev); | |
2237 | dpaa_bp = dpaa_bpid2pool(dq->fd.bpid); | |
2238 | if (!dpaa_bp) | |
2239 | return qman_cb_dqrr_consume; | |
2240 | ||
eb11ddf3 MB |
2241 | /* Trace the Rx fd */ |
2242 | trace_dpaa_rx_fd(net_dev, fq, &dq->fd); | |
2243 | ||
9ad1a374 MB |
2244 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
2245 | percpu_stats = &percpu_priv->stats; | |
2246 | ||
2247 | if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal))) | |
2248 | return qman_cb_dqrr_stop; | |
2249 | ||
2250 | /* Make sure we didn't run out of buffers */ | |
2251 | if (unlikely(dpaa_eth_refill_bpools(priv))) { | |
2252 | /* Unable to refill the buffer pool due to insufficient | |
2253 | * system memory. Just release the frame back into the pool, | |
2254 | * otherwise we'll soon end up with an empty buffer pool. | |
2255 | */ | |
2256 | dpaa_fd_release(net_dev, &dq->fd); | |
2257 | return qman_cb_dqrr_consume; | |
2258 | } | |
2259 | ||
2260 | if (unlikely(fd_status & FM_FD_STAT_RX_ERRORS) != 0) { | |
2261 | if (net_ratelimit()) | |
2262 | netif_warn(priv, hw, net_dev, "FD status = 0x%08x\n", | |
2263 | fd_status & FM_FD_STAT_RX_ERRORS); | |
2264 | ||
2265 | percpu_stats->rx_errors++; | |
2266 | dpaa_fd_release(net_dev, fd); | |
2267 | return qman_cb_dqrr_consume; | |
2268 | } | |
2269 | ||
2270 | dpaa_bp = dpaa_bpid2pool(fd->bpid); | |
2271 | if (!dpaa_bp) | |
2272 | return qman_cb_dqrr_consume; | |
2273 | ||
2274 | dma_unmap_single(dpaa_bp->dev, addr, dpaa_bp->size, DMA_FROM_DEVICE); | |
2275 | ||
2276 | /* prefetch the first 64 bytes of the frame or the SGT start */ | |
056057e2 MB |
2277 | vaddr = phys_to_virt(addr); |
2278 | prefetch(vaddr + qm_fd_get_offset(fd)); | |
9ad1a374 MB |
2279 | |
2280 | fd_format = qm_fd_get_format(fd); | |
2281 | /* The only FD types that we may receive are contig and S/G */ | |
2282 | WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); | |
2283 | ||
2284 | /* Account for either the contig buffer or the SGT buffer (depending on | |
2285 | * which case we were in) having been removed from the pool. | |
2286 | */ | |
2287 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); | |
2288 | (*count_ptr)--; | |
2289 | ||
2290 | if (likely(fd_format == qm_fd_contig)) | |
2291 | skb = contig_fd_to_skb(priv, fd); | |
2292 | else | |
2293 | skb = sg_fd_to_skb(priv, fd); | |
2294 | if (!skb) | |
2295 | return qman_cb_dqrr_consume; | |
2296 | ||
2297 | skb->protocol = eth_type_trans(skb, net_dev); | |
2298 | ||
056057e2 MB |
2299 | if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && |
2300 | !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], | |
2301 | &hash_offset)) { | |
2302 | enum pkt_hash_types type; | |
2303 | ||
2304 | /* if L4 exists, it was used in the hash generation */ | |
2305 | type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ? | |
2306 | PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3; | |
2307 | skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)), | |
2308 | type); | |
2309 | } | |
2310 | ||
9ad1a374 MB |
2311 | skb_len = skb->len; |
2312 | ||
2313 | if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) | |
2314 | return qman_cb_dqrr_consume; | |
2315 | ||
2316 | percpu_stats->rx_packets++; | |
2317 | percpu_stats->rx_bytes += skb_len; | |
2318 | ||
2319 | return qman_cb_dqrr_consume; | |
2320 | } | |
2321 | ||
2322 | static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal, | |
2323 | struct qman_fq *fq, | |
2324 | const struct qm_dqrr_entry *dq) | |
2325 | { | |
2326 | struct dpaa_percpu_priv *percpu_priv; | |
2327 | struct net_device *net_dev; | |
2328 | struct dpaa_priv *priv; | |
2329 | ||
2330 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | |
2331 | priv = netdev_priv(net_dev); | |
2332 | ||
2333 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
2334 | ||
2335 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | |
2336 | return qman_cb_dqrr_stop; | |
2337 | ||
2338 | dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | |
2339 | ||
2340 | return qman_cb_dqrr_consume; | |
2341 | } | |
2342 | ||
2343 | static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal, | |
2344 | struct qman_fq *fq, | |
2345 | const struct qm_dqrr_entry *dq) | |
2346 | { | |
2347 | struct dpaa_percpu_priv *percpu_priv; | |
2348 | struct net_device *net_dev; | |
2349 | struct dpaa_priv *priv; | |
2350 | ||
2351 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | |
2352 | priv = netdev_priv(net_dev); | |
2353 | ||
eb11ddf3 MB |
2354 | /* Trace the fd */ |
2355 | trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); | |
2356 | ||
9ad1a374 MB |
2357 | percpu_priv = this_cpu_ptr(priv->percpu_priv); |
2358 | ||
2359 | if (dpaa_eth_napi_schedule(percpu_priv, portal)) | |
2360 | return qman_cb_dqrr_stop; | |
2361 | ||
2362 | dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); | |
2363 | ||
2364 | return qman_cb_dqrr_consume; | |
2365 | } | |
2366 | ||
2367 | static void egress_ern(struct qman_portal *portal, | |
2368 | struct qman_fq *fq, | |
2369 | const union qm_mr_entry *msg) | |
2370 | { | |
2371 | const struct qm_fd *fd = &msg->ern.fd; | |
2372 | struct dpaa_percpu_priv *percpu_priv; | |
2373 | const struct dpaa_priv *priv; | |
2374 | struct net_device *net_dev; | |
2375 | struct sk_buff *skb; | |
2376 | ||
2377 | net_dev = ((struct dpaa_fq *)fq)->net_dev; | |
2378 | priv = netdev_priv(net_dev); | |
2379 | percpu_priv = this_cpu_ptr(priv->percpu_priv); | |
2380 | ||
2381 | percpu_priv->stats.tx_dropped++; | |
2382 | percpu_priv->stats.tx_fifo_errors++; | |
b0ce0d02 | 2383 | count_ern(percpu_priv, msg); |
9ad1a374 MB |
2384 | |
2385 | skb = dpaa_cleanup_tx_fd(priv, fd); | |
2386 | dev_kfree_skb_any(skb); | |
2387 | } | |
2388 | ||
2389 | static const struct dpaa_fq_cbs dpaa_fq_cbs = { | |
2390 | .rx_defq = { .cb = { .dqrr = rx_default_dqrr } }, | |
2391 | .tx_defq = { .cb = { .dqrr = conf_dflt_dqrr } }, | |
2392 | .rx_errq = { .cb = { .dqrr = rx_error_dqrr } }, | |
2393 | .tx_errq = { .cb = { .dqrr = conf_error_dqrr } }, | |
2394 | .egress_ern = { .cb = { .ern = egress_ern } } | |
2395 | }; | |
2396 | ||
2397 | static void dpaa_eth_napi_enable(struct dpaa_priv *priv) | |
2398 | { | |
2399 | struct dpaa_percpu_priv *percpu_priv; | |
2400 | int i; | |
2401 | ||
2402 | for_each_possible_cpu(i) { | |
2403 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
2404 | ||
2405 | percpu_priv->np.down = 0; | |
2406 | napi_enable(&percpu_priv->np.napi); | |
2407 | } | |
2408 | } | |
2409 | ||
2410 | static void dpaa_eth_napi_disable(struct dpaa_priv *priv) | |
2411 | { | |
2412 | struct dpaa_percpu_priv *percpu_priv; | |
2413 | int i; | |
2414 | ||
2415 | for_each_possible_cpu(i) { | |
2416 | percpu_priv = per_cpu_ptr(priv->percpu_priv, i); | |
2417 | ||
2418 | percpu_priv->np.down = 1; | |
2419 | napi_disable(&percpu_priv->np.napi); | |
2420 | } | |
2421 | } | |
2422 | ||
3c38ec67 MB |
2423 | static void dpaa_adjust_link(struct net_device *net_dev) |
2424 | { | |
2425 | struct mac_device *mac_dev; | |
2426 | struct dpaa_priv *priv; | |
2427 | ||
2428 | priv = netdev_priv(net_dev); | |
2429 | mac_dev = priv->mac_dev; | |
2430 | mac_dev->adjust_link(mac_dev); | |
2431 | } | |
2432 | ||
2433 | static int dpaa_phy_init(struct net_device *net_dev) | |
2434 | { | |
2435 | struct mac_device *mac_dev; | |
2436 | struct phy_device *phy_dev; | |
2437 | struct dpaa_priv *priv; | |
2438 | ||
2439 | priv = netdev_priv(net_dev); | |
2440 | mac_dev = priv->mac_dev; | |
2441 | ||
2442 | phy_dev = of_phy_connect(net_dev, mac_dev->phy_node, | |
2443 | &dpaa_adjust_link, 0, | |
2444 | mac_dev->phy_if); | |
2445 | if (!phy_dev) { | |
2446 | netif_err(priv, ifup, net_dev, "init_phy() failed\n"); | |
2447 | return -ENODEV; | |
2448 | } | |
2449 | ||
2450 | /* Remove any features not supported by the controller */ | |
2451 | phy_dev->supported &= mac_dev->if_support; | |
3c38ec67 MB |
2452 | phy_dev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause); |
2453 | phy_dev->advertising = phy_dev->supported; | |
2454 | ||
2455 | mac_dev->phy_dev = phy_dev; | |
2456 | net_dev->phydev = phy_dev; | |
2457 | ||
2458 | return 0; | |
2459 | } | |
2460 | ||
9ad1a374 MB |
2461 | static int dpaa_open(struct net_device *net_dev) |
2462 | { | |
2463 | struct mac_device *mac_dev; | |
2464 | struct dpaa_priv *priv; | |
2465 | int err, i; | |
2466 | ||
2467 | priv = netdev_priv(net_dev); | |
2468 | mac_dev = priv->mac_dev; | |
2469 | dpaa_eth_napi_enable(priv); | |
2470 | ||
a35c52b7 | 2471 | err = dpaa_phy_init(net_dev); |
2472 | if (err) | |
3fe61f09 | 2473 | goto phy_init_failed; |
9ad1a374 MB |
2474 | |
2475 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { | |
2476 | err = fman_port_enable(mac_dev->port[i]); | |
2477 | if (err) | |
2478 | goto mac_start_failed; | |
2479 | } | |
2480 | ||
2481 | err = priv->mac_dev->start(mac_dev); | |
2482 | if (err < 0) { | |
2483 | netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err); | |
2484 | goto mac_start_failed; | |
2485 | } | |
2486 | ||
2487 | netif_tx_start_all_queues(net_dev); | |
2488 | ||
2489 | return 0; | |
2490 | ||
2491 | mac_start_failed: | |
2492 | for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) | |
2493 | fman_port_disable(mac_dev->port[i]); | |
2494 | ||
3fe61f09 | 2495 | phy_init_failed: |
9ad1a374 MB |
2496 | dpaa_eth_napi_disable(priv); |
2497 | ||
2498 | return err; | |
2499 | } | |
2500 | ||
2501 | static int dpaa_eth_stop(struct net_device *net_dev) | |
2502 | { | |
2503 | struct dpaa_priv *priv; | |
2504 | int err; | |
2505 | ||
2506 | err = dpaa_stop(net_dev); | |
2507 | ||
2508 | priv = netdev_priv(net_dev); | |
2509 | dpaa_eth_napi_disable(priv); | |
2510 | ||
2511 | return err; | |
2512 | } | |
2513 | ||
1763413a MW |
2514 | static int dpaa_ioctl(struct net_device *net_dev, struct ifreq *rq, int cmd) |
2515 | { | |
2516 | if (!net_dev->phydev) | |
2517 | return -EINVAL; | |
2518 | return phy_mii_ioctl(net_dev->phydev, rq, cmd); | |
2519 | } | |
2520 | ||
9ad1a374 MB |
2521 | static const struct net_device_ops dpaa_ops = { |
2522 | .ndo_open = dpaa_open, | |
2523 | .ndo_start_xmit = dpaa_start_xmit, | |
2524 | .ndo_stop = dpaa_eth_stop, | |
2525 | .ndo_tx_timeout = dpaa_tx_timeout, | |
2526 | .ndo_get_stats64 = dpaa_get_stats64, | |
2527 | .ndo_set_mac_address = dpaa_set_mac_address, | |
2528 | .ndo_validate_addr = eth_validate_addr, | |
2529 | .ndo_set_rx_mode = dpaa_set_rx_mode, | |
1763413a | 2530 | .ndo_do_ioctl = dpaa_ioctl, |
2ea08f82 | 2531 | .ndo_setup_tc = dpaa_setup_tc, |
9ad1a374 MB |
2532 | }; |
2533 | ||
2534 | static int dpaa_napi_add(struct net_device *net_dev) | |
2535 | { | |
2536 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
2537 | struct dpaa_percpu_priv *percpu_priv; | |
2538 | int cpu; | |
2539 | ||
2540 | for_each_possible_cpu(cpu) { | |
2541 | percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); | |
2542 | ||
2543 | netif_napi_add(net_dev, &percpu_priv->np.napi, | |
2544 | dpaa_eth_poll, NAPI_POLL_WEIGHT); | |
2545 | } | |
2546 | ||
2547 | return 0; | |
2548 | } | |
2549 | ||
2550 | static void dpaa_napi_del(struct net_device *net_dev) | |
2551 | { | |
2552 | struct dpaa_priv *priv = netdev_priv(net_dev); | |
2553 | struct dpaa_percpu_priv *percpu_priv; | |
2554 | int cpu; | |
2555 | ||
2556 | for_each_possible_cpu(cpu) { | |
2557 | percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu); | |
2558 | ||
2559 | netif_napi_del(&percpu_priv->np.napi); | |
2560 | } | |
2561 | } | |
2562 | ||
2563 | static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp, | |
2564 | struct bm_buffer *bmb) | |
2565 | { | |
2566 | dma_addr_t addr = bm_buf_addr(bmb); | |
2567 | ||
2568 | dma_unmap_single(bp->dev, addr, bp->size, DMA_FROM_DEVICE); | |
2569 | ||
2570 | skb_free_frag(phys_to_virt(addr)); | |
2571 | } | |
2572 | ||
2573 | /* Alloc the dpaa_bp struct and configure default values */ | |
2574 | static struct dpaa_bp *dpaa_bp_alloc(struct device *dev) | |
2575 | { | |
2576 | struct dpaa_bp *dpaa_bp; | |
2577 | ||
2578 | dpaa_bp = devm_kzalloc(dev, sizeof(*dpaa_bp), GFP_KERNEL); | |
2579 | if (!dpaa_bp) | |
2580 | return ERR_PTR(-ENOMEM); | |
2581 | ||
2582 | dpaa_bp->bpid = FSL_DPAA_BPID_INV; | |
2583 | dpaa_bp->percpu_count = devm_alloc_percpu(dev, *dpaa_bp->percpu_count); | |
52600dcc MB |
2584 | if (!dpaa_bp->percpu_count) |
2585 | return ERR_PTR(-ENOMEM); | |
2586 | ||
9ad1a374 MB |
2587 | dpaa_bp->config_count = FSL_DPAA_ETH_MAX_BUF_COUNT; |
2588 | ||
2589 | dpaa_bp->seed_cb = dpaa_bp_seed; | |
2590 | dpaa_bp->free_buf_cb = dpaa_bp_free_pf; | |
2591 | ||
2592 | return dpaa_bp; | |
2593 | } | |
2594 | ||
2595 | /* Place all ingress FQs (Rx Default, Rx Error) in a dedicated CGR. | |
2596 | * We won't be sending congestion notifications to FMan; for now, we just use | |
2597 | * this CGR to generate enqueue rejections to FMan in order to drop the frames | |
2598 | * before they reach our ingress queues and eat up memory. | |
2599 | */ | |
2600 | static int dpaa_ingress_cgr_init(struct dpaa_priv *priv) | |
2601 | { | |
2602 | struct qm_mcc_initcgr initcgr; | |
2603 | u32 cs_th; | |
2604 | int err; | |
2605 | ||
2606 | err = qman_alloc_cgrid(&priv->ingress_cgr.cgrid); | |
2607 | if (err < 0) { | |
2608 | if (netif_msg_drv(priv)) | |
2609 | pr_err("Error %d allocating CGR ID\n", err); | |
2610 | goto out_error; | |
2611 | } | |
2612 | ||
2613 | /* Enable CS TD, but disable Congestion State Change Notifications. */ | |
0fbb0f24 | 2614 | memset(&initcgr, 0, sizeof(initcgr)); |
7d6f8dc0 | 2615 | initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES); |
9ad1a374 MB |
2616 | initcgr.cgr.cscn_en = QM_CGR_EN; |
2617 | cs_th = DPAA_INGRESS_CS_THRESHOLD; | |
2618 | qm_cgr_cs_thres_set64(&initcgr.cgr.cs_thres, cs_th, 1); | |
2619 | ||
7d6f8dc0 | 2620 | initcgr.we_mask |= cpu_to_be16(QM_CGR_WE_CSTD_EN); |
9ad1a374 MB |
2621 | initcgr.cgr.cstd_en = QM_CGR_EN; |
2622 | ||
2623 | /* This CGR will be associated with the SWP affined to the current CPU. | |
2624 | * However, we'll place all our ingress FQs in it. | |
2625 | */ | |
2626 | err = qman_create_cgr(&priv->ingress_cgr, QMAN_CGR_FLAG_USE_INIT, | |
2627 | &initcgr); | |
2628 | if (err < 0) { | |
2629 | if (netif_msg_drv(priv)) | |
2630 | pr_err("Error %d creating ingress CGR with ID %d\n", | |
2631 | err, priv->ingress_cgr.cgrid); | |
2632 | qman_release_cgrid(priv->ingress_cgr.cgrid); | |
2633 | goto out_error; | |
2634 | } | |
2635 | if (netif_msg_drv(priv)) | |
2636 | pr_debug("Created ingress CGR %d for netdev with hwaddr %pM\n", | |
2637 | priv->ingress_cgr.cgrid, priv->mac_dev->addr); | |
2638 | ||
2639 | priv->use_ingress_cgr = true; | |
2640 | ||
2641 | out_error: | |
2642 | return err; | |
2643 | } | |
2644 | ||
2645 | static const struct of_device_id dpaa_match[]; | |
2646 | ||
2647 | static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) | |
2648 | { | |
2649 | u16 headroom; | |
2650 | ||
2651 | /* The frame headroom must accommodate: | |
2652 | * - the driver private data area | |
2653 | * - parse results, hash results, timestamp if selected | |
2654 | * If either hash results or time stamp are selected, both will | |
2655 | * be copied to/from the frame headroom, as TS is located between PR and | |
2656 | * HR in the IC and IC copy size has a granularity of 16bytes | |
2657 | * (see description of FMBM_RICP and FMBM_TICP registers in DPAARM) | |
2658 | * | |
2659 | * Also make sure the headroom is a multiple of data_align bytes | |
2660 | */ | |
2661 | headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + | |
2662 | DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); | |
2663 | ||
2664 | return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom, | |
2665 | DPAA_FD_DATA_ALIGNMENT) : | |
2666 | headroom; | |
2667 | } | |
2668 | ||
2669 | static int dpaa_eth_probe(struct platform_device *pdev) | |
2670 | { | |
2671 | struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL}; | |
9ad1a374 MB |
2672 | struct net_device *net_dev = NULL; |
2673 | struct dpaa_fq *dpaa_fq, *tmp; | |
2674 | struct dpaa_priv *priv = NULL; | |
2675 | struct fm_port_fqs port_fqs; | |
2676 | struct mac_device *mac_dev; | |
2677 | int err = 0, i, channel; | |
2678 | struct device *dev; | |
2679 | ||
c6e26ea8 MB |
2680 | /* device used for DMA mapping */ |
2681 | dev = pdev->dev.parent; | |
2682 | err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(40)); | |
2683 | if (err) { | |
2684 | dev_err(dev, "dma_coerce_mask_and_coherent() failed\n"); | |
2685 | return err; | |
2686 | } | |
9ad1a374 MB |
2687 | |
2688 | /* Allocate this early, so we can store relevant information in | |
2689 | * the private area | |
2690 | */ | |
2691 | net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); | |
2692 | if (!net_dev) { | |
2693 | dev_err(dev, "alloc_etherdev_mq() failed\n"); | |
8b9b5a2c | 2694 | return -ENOMEM; |
9ad1a374 MB |
2695 | } |
2696 | ||
2697 | /* Do this here, so we can be verbose early */ | |
2698 | SET_NETDEV_DEV(net_dev, dev); | |
2699 | dev_set_drvdata(dev, net_dev); | |
2700 | ||
2701 | priv = netdev_priv(net_dev); | |
2702 | priv->net_dev = net_dev; | |
2703 | ||
2704 | priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); | |
2705 | ||
2706 | mac_dev = dpaa_mac_dev_get(pdev); | |
2707 | if (IS_ERR(mac_dev)) { | |
2708 | dev_err(dev, "dpaa_mac_dev_get() failed\n"); | |
2709 | err = PTR_ERR(mac_dev); | |
8b9b5a2c | 2710 | goto free_netdev; |
9ad1a374 MB |
2711 | } |
2712 | ||
2713 | /* If fsl_fm_max_frm is set to a higher value than the all-common 1500, | |
2714 | * we choose conservatively and let the user explicitly set a higher | |
2715 | * MTU via ifconfig. Otherwise, the user may end up with different MTUs | |
2716 | * in the same LAN. | |
2717 | * If on the other hand fsl_fm_max_frm has been chosen below 1500, | |
2718 | * start with the maximum allowed. | |
2719 | */ | |
2720 | net_dev->mtu = min(dpaa_get_max_mtu(), ETH_DATA_LEN); | |
2721 | ||
2722 | netdev_dbg(net_dev, "Setting initial MTU on net device: %d\n", | |
2723 | net_dev->mtu); | |
2724 | ||
2725 | priv->buf_layout[RX].priv_data_size = DPAA_RX_PRIV_DATA_SIZE; /* Rx */ | |
2726 | priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */ | |
2727 | ||
9ad1a374 MB |
2728 | /* bp init */ |
2729 | for (i = 0; i < DPAA_BPS_NUM; i++) { | |
2730 | int err; | |
2731 | ||
2732 | dpaa_bps[i] = dpaa_bp_alloc(dev); | |
2733 | if (IS_ERR(dpaa_bps[i])) | |
8b9b5a2c | 2734 | goto free_dpaa_bps; |
9ad1a374 MB |
2735 | /* the raw size of the buffers used for reception */ |
2736 | dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM); | |
2737 | /* avoid runtime computations by keeping the usable size here */ | |
2738 | dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size); | |
2739 | dpaa_bps[i]->dev = dev; | |
2740 | ||
2741 | err = dpaa_bp_alloc_pool(dpaa_bps[i]); | |
8b9b5a2c MB |
2742 | if (err < 0) |
2743 | goto free_dpaa_bps; | |
9ad1a374 MB |
2744 | priv->dpaa_bps[i] = dpaa_bps[i]; |
2745 | } | |
2746 | ||
2747 | INIT_LIST_HEAD(&priv->dpaa_fq_list); | |
2748 | ||
2749 | memset(&port_fqs, 0, sizeof(port_fqs)); | |
2750 | ||
2751 | err = dpaa_alloc_all_fqs(dev, &priv->dpaa_fq_list, &port_fqs); | |
2752 | if (err < 0) { | |
2753 | dev_err(dev, "dpaa_alloc_all_fqs() failed\n"); | |
8b9b5a2c | 2754 | goto free_dpaa_bps; |
9ad1a374 MB |
2755 | } |
2756 | ||
2757 | priv->mac_dev = mac_dev; | |
2758 | ||
2759 | channel = dpaa_get_channel(); | |
2760 | if (channel < 0) { | |
2761 | dev_err(dev, "dpaa_get_channel() failed\n"); | |
2762 | err = channel; | |
8b9b5a2c | 2763 | goto free_dpaa_bps; |
9ad1a374 MB |
2764 | } |
2765 | ||
2766 | priv->channel = (u16)channel; | |
2767 | ||
2768 | /* Start a thread that will walk the CPUs with affine portals | |
2769 | * and add this pool channel to each's dequeue mask. | |
2770 | */ | |
2771 | dpaa_eth_add_channel(priv->channel); | |
2772 | ||
2773 | dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); | |
2774 | ||
2775 | /* Create a congestion group for this netdev, with | |
2776 | * dynamically-allocated CGR ID. | |
2777 | * Must be executed after probing the MAC, but before | |
2778 | * assigning the egress FQs to the CGRs. | |
2779 | */ | |
2780 | err = dpaa_eth_cgr_init(priv); | |
2781 | if (err < 0) { | |
2782 | dev_err(dev, "Error initializing CGR\n"); | |
8b9b5a2c | 2783 | goto free_dpaa_bps; |
9ad1a374 MB |
2784 | } |
2785 | ||
2786 | err = dpaa_ingress_cgr_init(priv); | |
2787 | if (err < 0) { | |
2788 | dev_err(dev, "Error initializing ingress CGR\n"); | |
8b9b5a2c | 2789 | goto delete_egress_cgr; |
9ad1a374 MB |
2790 | } |
2791 | ||
2792 | /* Add the FQs to the interface, and make them active */ | |
2793 | list_for_each_entry_safe(dpaa_fq, tmp, &priv->dpaa_fq_list, list) { | |
2794 | err = dpaa_fq_init(dpaa_fq, false); | |
2795 | if (err < 0) | |
8b9b5a2c | 2796 | goto free_dpaa_fqs; |
9ad1a374 MB |
2797 | } |
2798 | ||
2799 | priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]); | |
2800 | priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); | |
2801 | ||
2802 | /* All real interfaces need their ports initialized */ | |
7f8a6a1b MB |
2803 | err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs, |
2804 | &priv->buf_layout[0], dev); | |
2805 | if (err) | |
8b9b5a2c | 2806 | goto free_dpaa_fqs; |
9ad1a374 | 2807 | |
056057e2 MB |
2808 | /* Rx traffic distribution based on keygen hashing defaults to on */ |
2809 | priv->keygen_in_use = true; | |
2810 | ||
9ad1a374 MB |
2811 | priv->percpu_priv = devm_alloc_percpu(dev, *priv->percpu_priv); |
2812 | if (!priv->percpu_priv) { | |
2813 | dev_err(dev, "devm_alloc_percpu() failed\n"); | |
2814 | err = -ENOMEM; | |
8b9b5a2c | 2815 | goto free_dpaa_fqs; |
9ad1a374 | 2816 | } |
9ad1a374 | 2817 | |
c44efa1d CG |
2818 | priv->num_tc = 1; |
2819 | netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); | |
2820 | ||
9ad1a374 MB |
2821 | /* Initialize NAPI */ |
2822 | err = dpaa_napi_add(net_dev); | |
2823 | if (err < 0) | |
8b9b5a2c | 2824 | goto delete_dpaa_napi; |
9ad1a374 MB |
2825 | |
2826 | err = dpaa_netdev_init(net_dev, &dpaa_ops, tx_timeout); | |
2827 | if (err < 0) | |
8b9b5a2c | 2828 | goto delete_dpaa_napi; |
9ad1a374 | 2829 | |
846a86e2 MB |
2830 | dpaa_eth_sysfs_init(&net_dev->dev); |
2831 | ||
9ad1a374 MB |
2832 | netif_info(priv, probe, net_dev, "Probed interface %s\n", |
2833 | net_dev->name); | |
2834 | ||
2835 | return 0; | |
2836 | ||
8b9b5a2c | 2837 | delete_dpaa_napi: |
9ad1a374 | 2838 | dpaa_napi_del(net_dev); |
8b9b5a2c | 2839 | free_dpaa_fqs: |
9ad1a374 | 2840 | dpaa_fq_free(dev, &priv->dpaa_fq_list); |
9ad1a374 MB |
2841 | qman_delete_cgr_safe(&priv->ingress_cgr); |
2842 | qman_release_cgrid(priv->ingress_cgr.cgrid); | |
8b9b5a2c | 2843 | delete_egress_cgr: |
9ad1a374 MB |
2844 | qman_delete_cgr_safe(&priv->cgr_data.cgr); |
2845 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | |
8b9b5a2c | 2846 | free_dpaa_bps: |
9ad1a374 | 2847 | dpaa_bps_free(priv); |
8b9b5a2c | 2848 | free_netdev: |
9ad1a374 MB |
2849 | dev_set_drvdata(dev, NULL); |
2850 | free_netdev(net_dev); | |
8b9b5a2c | 2851 | |
9ad1a374 MB |
2852 | return err; |
2853 | } | |
2854 | ||
2855 | static int dpaa_remove(struct platform_device *pdev) | |
2856 | { | |
2857 | struct net_device *net_dev; | |
2858 | struct dpaa_priv *priv; | |
2859 | struct device *dev; | |
2860 | int err; | |
2861 | ||
2862 | dev = &pdev->dev; | |
2863 | net_dev = dev_get_drvdata(dev); | |
2864 | ||
2865 | priv = netdev_priv(net_dev); | |
2866 | ||
846a86e2 MB |
2867 | dpaa_eth_sysfs_remove(dev); |
2868 | ||
9ad1a374 MB |
2869 | dev_set_drvdata(dev, NULL); |
2870 | unregister_netdev(net_dev); | |
2871 | ||
2872 | err = dpaa_fq_free(dev, &priv->dpaa_fq_list); | |
2873 | ||
2874 | qman_delete_cgr_safe(&priv->ingress_cgr); | |
2875 | qman_release_cgrid(priv->ingress_cgr.cgrid); | |
2876 | qman_delete_cgr_safe(&priv->cgr_data.cgr); | |
2877 | qman_release_cgrid(priv->cgr_data.cgr.cgrid); | |
2878 | ||
2879 | dpaa_napi_del(net_dev); | |
2880 | ||
2881 | dpaa_bps_free(priv); | |
2882 | ||
2883 | free_netdev(net_dev); | |
2884 | ||
2885 | return err; | |
2886 | } | |
2887 | ||
bef0fed4 | 2888 | static const struct platform_device_id dpaa_devtype[] = { |
9ad1a374 MB |
2889 | { |
2890 | .name = "dpaa-ethernet", | |
2891 | .driver_data = 0, | |
2892 | }, { | |
2893 | } | |
2894 | }; | |
2895 | MODULE_DEVICE_TABLE(platform, dpaa_devtype); | |
2896 | ||
2897 | static struct platform_driver dpaa_driver = { | |
2898 | .driver = { | |
2899 | .name = KBUILD_MODNAME, | |
2900 | }, | |
2901 | .id_table = dpaa_devtype, | |
2902 | .probe = dpaa_eth_probe, | |
2903 | .remove = dpaa_remove | |
2904 | }; | |
2905 | ||
2906 | static int __init dpaa_load(void) | |
2907 | { | |
2908 | int err; | |
2909 | ||
2910 | pr_debug("FSL DPAA Ethernet driver\n"); | |
2911 | ||
2912 | /* initialize dpaa_eth mirror values */ | |
2913 | dpaa_rx_extra_headroom = fman_get_rx_extra_headroom(); | |
2914 | dpaa_max_frm = fman_get_max_frm(); | |
2915 | ||
2916 | err = platform_driver_register(&dpaa_driver); | |
2917 | if (err < 0) | |
2918 | pr_err("Error, platform_driver_register() = %d\n", err); | |
2919 | ||
2920 | return err; | |
2921 | } | |
2922 | module_init(dpaa_load); | |
2923 | ||
2924 | static void __exit dpaa_unload(void) | |
2925 | { | |
2926 | platform_driver_unregister(&dpaa_driver); | |
2927 | ||
2928 | /* Only one channel is used and needs to be released after all | |
2929 | * interfaces are removed | |
2930 | */ | |
2931 | dpaa_release_channel(); | |
2932 | } | |
2933 | module_exit(dpaa_unload); | |
2934 | ||
2935 | MODULE_LICENSE("Dual BSD/GPL"); | |
2936 | MODULE_DESCRIPTION("FSL DPAA Ethernet driver"); |