Commit | Line | Data |
---|---|---|
fe1a5642 HM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Lantiq / Intel PMAC driver for XRX200 SoCs | |
4 | * | |
5 | * Copyright (C) 2010 Lantiq Deutschland | |
6 | * Copyright (C) 2012 John Crispin <john@phrozen.org> | |
7 | * Copyright (C) 2017 - 2018 Hauke Mehrtens <hauke@hauke-m.de> | |
8 | */ | |
9 | ||
10 | #include <linux/etherdevice.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/platform_device.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/clk.h> | |
15 | #include <linux/delay.h> | |
16 | ||
998ac358 AJB |
17 | #include <linux/if_vlan.h> |
18 | ||
fe1a5642 HM |
19 | #include <linux/of_net.h> |
20 | #include <linux/of_platform.h> | |
21 | ||
22 | #include <xway_dma.h> | |
23 | ||
24 | /* DMA */ | |
998ac358 | 25 | #define XRX200_DMA_DATA_LEN (SZ_64K - 1) |
fe1a5642 HM |
26 | #define XRX200_DMA_RX 0 |
27 | #define XRX200_DMA_TX 1 | |
7e553c44 | 28 | #define XRX200_DMA_BURST_LEN 8 |
fe1a5642 | 29 | |
c3e6b2c3 AJB |
30 | #define XRX200_DMA_PACKET_COMPLETE 0 |
31 | #define XRX200_DMA_PACKET_IN_PROGRESS 1 | |
32 | ||
fe1a5642 HM |
33 | /* cpu port mac */ |
34 | #define PMAC_RX_IPG 0x0024 | |
35 | #define PMAC_RX_IPG_MASK 0xf | |
36 | ||
37 | #define PMAC_HD_CTL 0x0000 | |
38 | /* Add Ethernet header to packets from DMA to PMAC */ | |
39 | #define PMAC_HD_CTL_ADD BIT(0) | |
40 | /* Add VLAN tag to Packets from DMA to PMAC */ | |
41 | #define PMAC_HD_CTL_TAG BIT(1) | |
42 | /* Add CRC to packets from DMA to PMAC */ | |
43 | #define PMAC_HD_CTL_AC BIT(2) | |
44 | /* Add status header to packets from PMAC to DMA */ | |
45 | #define PMAC_HD_CTL_AS BIT(3) | |
46 | /* Remove CRC from packets from PMAC to DMA */ | |
47 | #define PMAC_HD_CTL_RC BIT(4) | |
48 | /* Remove Layer-2 header from packets from PMAC to DMA */ | |
49 | #define PMAC_HD_CTL_RL2 BIT(5) | |
50 | /* Status header is present from DMA to PMAC */ | |
51 | #define PMAC_HD_CTL_RXSH BIT(6) | |
52 | /* Add special tag from PMAC to switch */ | |
53 | #define PMAC_HD_CTL_AST BIT(7) | |
54 | /* Remove specail Tag from PMAC to DMA */ | |
55 | #define PMAC_HD_CTL_RST BIT(8) | |
56 | /* Check CRC from DMA to PMAC */ | |
57 | #define PMAC_HD_CTL_CCRC BIT(9) | |
58 | /* Enable reaction to Pause frames in the PMAC */ | |
59 | #define PMAC_HD_CTL_FC BIT(10) | |
60 | ||
61 | struct xrx200_chan { | |
62 | int tx_free; | |
63 | ||
64 | struct napi_struct napi; | |
65 | struct ltq_dma_channel dma; | |
e0155935 AJB |
66 | |
67 | union { | |
68 | struct sk_buff *skb[LTQ_DESC_NUM]; | |
69 | void *rx_buff[LTQ_DESC_NUM]; | |
70 | }; | |
fe1a5642 | 71 | |
c3e6b2c3 AJB |
72 | struct sk_buff *skb_head; |
73 | struct sk_buff *skb_tail; | |
74 | ||
fe1a5642 HM |
75 | struct xrx200_priv *priv; |
76 | }; | |
77 | ||
78 | struct xrx200_priv { | |
79 | struct clk *clk; | |
80 | ||
81 | struct xrx200_chan chan_tx; | |
82 | struct xrx200_chan chan_rx; | |
83 | ||
1488fc20 | 84 | u16 rx_buf_size; |
e0155935 | 85 | u16 rx_skb_size; |
1488fc20 | 86 | |
fe1a5642 HM |
87 | struct net_device *net_dev; |
88 | struct device *dev; | |
89 | ||
90 | __iomem void *pmac_reg; | |
91 | }; | |
92 | ||
93 | static u32 xrx200_pmac_r32(struct xrx200_priv *priv, u32 offset) | |
94 | { | |
95 | return __raw_readl(priv->pmac_reg + offset); | |
96 | } | |
97 | ||
98 | static void xrx200_pmac_w32(struct xrx200_priv *priv, u32 val, u32 offset) | |
99 | { | |
100 | __raw_writel(val, priv->pmac_reg + offset); | |
101 | } | |
102 | ||
103 | static void xrx200_pmac_mask(struct xrx200_priv *priv, u32 clear, u32 set, | |
104 | u32 offset) | |
105 | { | |
106 | u32 val = xrx200_pmac_r32(priv, offset); | |
107 | ||
108 | val &= ~(clear); | |
109 | val |= set; | |
110 | xrx200_pmac_w32(priv, val, offset); | |
111 | } | |
112 | ||
1488fc20 AJB |
113 | static int xrx200_max_frame_len(int mtu) |
114 | { | |
115 | return VLAN_ETH_HLEN + mtu; | |
116 | } | |
117 | ||
118 | static int xrx200_buffer_size(int mtu) | |
119 | { | |
120 | return round_up(xrx200_max_frame_len(mtu), 4 * XRX200_DMA_BURST_LEN); | |
121 | } | |
122 | ||
e0155935 AJB |
123 | static int xrx200_skb_size(u16 buf_size) |
124 | { | |
125 | return SKB_DATA_ALIGN(buf_size + NET_SKB_PAD + NET_IP_ALIGN) + | |
126 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
127 | } | |
128 | ||
fe1a5642 HM |
129 | /* drop all the packets from the DMA ring */ |
130 | static void xrx200_flush_dma(struct xrx200_chan *ch) | |
131 | { | |
132 | int i; | |
133 | ||
134 | for (i = 0; i < LTQ_DESC_NUM; i++) { | |
135 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; | |
136 | ||
137 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C) | |
138 | break; | |
139 | ||
140 | desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | | |
1488fc20 | 141 | ch->priv->rx_buf_size; |
fe1a5642 HM |
142 | ch->dma.desc++; |
143 | ch->dma.desc %= LTQ_DESC_NUM; | |
144 | } | |
145 | } | |
146 | ||
147 | static int xrx200_open(struct net_device *net_dev) | |
148 | { | |
149 | struct xrx200_priv *priv = netdev_priv(net_dev); | |
fe1a5642 HM |
150 | |
151 | napi_enable(&priv->chan_tx.napi); | |
152 | ltq_dma_open(&priv->chan_tx.dma); | |
153 | ltq_dma_enable_irq(&priv->chan_tx.dma); | |
154 | ||
155 | napi_enable(&priv->chan_rx.napi); | |
156 | ltq_dma_open(&priv->chan_rx.dma); | |
157 | /* The boot loader does not always deactivate the receiving of frames | |
158 | * on the ports and then some packets queue up in the PPE buffers. | |
159 | * They already passed the PMAC so they do not have the tags | |
160 | * configured here. Read the these packets here and drop them. | |
161 | * The HW should have written them into memory after 10us | |
162 | */ | |
163 | usleep_range(20, 40); | |
164 | xrx200_flush_dma(&priv->chan_rx); | |
165 | ltq_dma_enable_irq(&priv->chan_rx.dma); | |
166 | ||
167 | netif_wake_queue(net_dev); | |
168 | ||
169 | return 0; | |
170 | } | |
171 | ||
172 | static int xrx200_close(struct net_device *net_dev) | |
173 | { | |
174 | struct xrx200_priv *priv = netdev_priv(net_dev); | |
175 | ||
176 | netif_stop_queue(net_dev); | |
177 | ||
178 | napi_disable(&priv->chan_rx.napi); | |
179 | ltq_dma_close(&priv->chan_rx.dma); | |
180 | ||
181 | napi_disable(&priv->chan_tx.napi); | |
182 | ltq_dma_close(&priv->chan_tx.dma); | |
183 | ||
fe1a5642 HM |
184 | return 0; |
185 | } | |
186 | ||
e0155935 | 187 | static int xrx200_alloc_buf(struct xrx200_chan *ch, void *(*alloc)(unsigned int size)) |
fe1a5642 | 188 | { |
e0155935 | 189 | void *buf = ch->rx_buff[ch->dma.desc]; |
1488fc20 | 190 | struct xrx200_priv *priv = ch->priv; |
c7718ee9 | 191 | dma_addr_t mapping; |
fe1a5642 HM |
192 | int ret = 0; |
193 | ||
e0155935 AJB |
194 | ch->rx_buff[ch->dma.desc] = alloc(priv->rx_skb_size); |
195 | if (!ch->rx_buff[ch->dma.desc]) { | |
c9c3b177 | 196 | ch->rx_buff[ch->dma.desc] = buf; |
fe1a5642 HM |
197 | ret = -ENOMEM; |
198 | goto skip; | |
199 | } | |
200 | ||
e0155935 | 201 | mapping = dma_map_single(priv->dev, ch->rx_buff[ch->dma.desc], |
1488fc20 AJB |
202 | priv->rx_buf_size, DMA_FROM_DEVICE); |
203 | if (unlikely(dma_mapping_error(priv->dev, mapping))) { | |
e0155935 AJB |
204 | skb_free_frag(ch->rx_buff[ch->dma.desc]); |
205 | ch->rx_buff[ch->dma.desc] = buf; | |
fe1a5642 HM |
206 | ret = -ENOMEM; |
207 | goto skip; | |
208 | } | |
209 | ||
e0155935 | 210 | ch->dma.desc_base[ch->dma.desc].addr = mapping + NET_SKB_PAD + NET_IP_ALIGN; |
c7718ee9 AJB |
211 | /* Make sure the address is written before we give it to HW */ |
212 | wmb(); | |
fe1a5642 HM |
213 | skip: |
214 | ch->dma.desc_base[ch->dma.desc].ctl = | |
1488fc20 | 215 | LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | priv->rx_buf_size; |
fe1a5642 HM |
216 | |
217 | return ret; | |
218 | } | |
219 | ||
220 | static int xrx200_hw_receive(struct xrx200_chan *ch) | |
221 | { | |
222 | struct xrx200_priv *priv = ch->priv; | |
223 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; | |
e0155935 | 224 | void *buf = ch->rx_buff[ch->dma.desc]; |
c3e6b2c3 AJB |
225 | u32 ctl = desc->ctl; |
226 | int len = (ctl & LTQ_DMA_SIZE_MASK); | |
fe1a5642 | 227 | struct net_device *net_dev = priv->net_dev; |
e0155935 | 228 | struct sk_buff *skb; |
fe1a5642 HM |
229 | int ret; |
230 | ||
e0155935 | 231 | ret = xrx200_alloc_buf(ch, napi_alloc_frag); |
fe1a5642 HM |
232 | |
233 | ch->dma.desc++; | |
234 | ch->dma.desc %= LTQ_DESC_NUM; | |
235 | ||
236 | if (ret) { | |
c7718ee9 | 237 | net_dev->stats.rx_dropped++; |
fe1a5642 HM |
238 | netdev_err(net_dev, "failed to allocate new rx buffer\n"); |
239 | return ret; | |
240 | } | |
241 | ||
e0155935 | 242 | skb = build_skb(buf, priv->rx_skb_size); |
c8b04370 AJB |
243 | if (!skb) { |
244 | skb_free_frag(buf); | |
245 | net_dev->stats.rx_dropped++; | |
246 | return -ENOMEM; | |
247 | } | |
248 | ||
e0155935 | 249 | skb_reserve(skb, NET_SKB_PAD); |
fe1a5642 | 250 | skb_put(skb, len); |
fe1a5642 | 251 | |
c3e6b2c3 AJB |
252 | /* add buffers to skb via skb->frag_list */ |
253 | if (ctl & LTQ_DMA_SOP) { | |
254 | ch->skb_head = skb; | |
255 | ch->skb_tail = skb; | |
e0155935 | 256 | skb_reserve(skb, NET_IP_ALIGN); |
c3e6b2c3 AJB |
257 | } else if (ch->skb_head) { |
258 | if (ch->skb_head == ch->skb_tail) | |
259 | skb_shinfo(ch->skb_tail)->frag_list = skb; | |
260 | else | |
261 | ch->skb_tail->next = skb; | |
262 | ch->skb_tail = skb; | |
c3e6b2c3 AJB |
263 | ch->skb_head->len += skb->len; |
264 | ch->skb_head->data_len += skb->len; | |
265 | ch->skb_head->truesize += skb->truesize; | |
266 | } | |
267 | ||
268 | if (ctl & LTQ_DMA_EOP) { | |
269 | ch->skb_head->protocol = eth_type_trans(ch->skb_head, net_dev); | |
c3e6b2c3 AJB |
270 | net_dev->stats.rx_packets++; |
271 | net_dev->stats.rx_bytes += ch->skb_head->len; | |
dd830aed | 272 | netif_receive_skb(ch->skb_head); |
c3e6b2c3 AJB |
273 | ch->skb_head = NULL; |
274 | ch->skb_tail = NULL; | |
275 | ret = XRX200_DMA_PACKET_COMPLETE; | |
276 | } else { | |
277 | ret = XRX200_DMA_PACKET_IN_PROGRESS; | |
278 | } | |
279 | ||
280 | return ret; | |
fe1a5642 HM |
281 | } |
282 | ||
283 | static int xrx200_poll_rx(struct napi_struct *napi, int budget) | |
284 | { | |
285 | struct xrx200_chan *ch = container_of(napi, | |
286 | struct xrx200_chan, napi); | |
287 | int rx = 0; | |
288 | int ret; | |
289 | ||
290 | while (rx < budget) { | |
291 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; | |
292 | ||
293 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { | |
294 | ret = xrx200_hw_receive(ch); | |
c3e6b2c3 AJB |
295 | if (ret == XRX200_DMA_PACKET_IN_PROGRESS) |
296 | continue; | |
297 | if (ret != XRX200_DMA_PACKET_COMPLETE) | |
c4b6e934 | 298 | break; |
fe1a5642 HM |
299 | rx++; |
300 | } else { | |
301 | break; | |
302 | } | |
303 | } | |
304 | ||
305 | if (rx < budget) { | |
c582a7fe HM |
306 | if (napi_complete_done(&ch->napi, rx)) |
307 | ltq_dma_enable_irq(&ch->dma); | |
fe1a5642 HM |
308 | } |
309 | ||
310 | return rx; | |
311 | } | |
312 | ||
313 | static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget) | |
314 | { | |
315 | struct xrx200_chan *ch = container_of(napi, | |
316 | struct xrx200_chan, napi); | |
317 | struct net_device *net_dev = ch->priv->net_dev; | |
318 | int pkts = 0; | |
319 | int bytes = 0; | |
320 | ||
f9317ae5 | 321 | netif_tx_lock(net_dev); |
fe1a5642 HM |
322 | while (pkts < budget) { |
323 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->tx_free]; | |
324 | ||
325 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) { | |
326 | struct sk_buff *skb = ch->skb[ch->tx_free]; | |
327 | ||
328 | pkts++; | |
329 | bytes += skb->len; | |
330 | ch->skb[ch->tx_free] = NULL; | |
331 | consume_skb(skb); | |
332 | memset(&ch->dma.desc_base[ch->tx_free], 0, | |
333 | sizeof(struct ltq_dma_desc)); | |
334 | ch->tx_free++; | |
335 | ch->tx_free %= LTQ_DESC_NUM; | |
336 | } else { | |
337 | break; | |
338 | } | |
339 | } | |
340 | ||
341 | net_dev->stats.tx_packets += pkts; | |
342 | net_dev->stats.tx_bytes += bytes; | |
343 | netdev_completed_queue(ch->priv->net_dev, pkts, bytes); | |
344 | ||
f9317ae5 | 345 | netif_tx_unlock(net_dev); |
dea36631 HM |
346 | if (netif_queue_stopped(net_dev)) |
347 | netif_wake_queue(net_dev); | |
348 | ||
fe1a5642 | 349 | if (pkts < budget) { |
c582a7fe HM |
350 | if (napi_complete_done(&ch->napi, pkts)) |
351 | ltq_dma_enable_irq(&ch->dma); | |
fe1a5642 HM |
352 | } |
353 | ||
354 | return pkts; | |
355 | } | |
356 | ||
1cfecc23 YW |
357 | static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb, |
358 | struct net_device *net_dev) | |
fe1a5642 HM |
359 | { |
360 | struct xrx200_priv *priv = netdev_priv(net_dev); | |
361 | struct xrx200_chan *ch = &priv->chan_tx; | |
362 | struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; | |
363 | u32 byte_offset; | |
364 | dma_addr_t mapping; | |
365 | int len; | |
366 | ||
367 | skb->dev = net_dev; | |
368 | if (skb_put_padto(skb, ETH_ZLEN)) { | |
369 | net_dev->stats.tx_dropped++; | |
370 | return NETDEV_TX_OK; | |
371 | } | |
372 | ||
373 | len = skb->len; | |
374 | ||
375 | if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { | |
376 | netdev_err(net_dev, "tx ring full\n"); | |
377 | netif_stop_queue(net_dev); | |
378 | return NETDEV_TX_BUSY; | |
379 | } | |
380 | ||
381 | ch->skb[ch->dma.desc] = skb; | |
382 | ||
383 | mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); | |
384 | if (unlikely(dma_mapping_error(priv->dev, mapping))) | |
385 | goto err_drop; | |
386 | ||
14d4e308 | 387 | /* dma needs to start on a burst length value aligned address */ |
7e553c44 | 388 | byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4); |
fe1a5642 HM |
389 | |
390 | desc->addr = mapping - byte_offset; | |
391 | /* Make sure the address is written before we give it to HW */ | |
392 | wmb(); | |
393 | desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | | |
394 | LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); | |
395 | ch->dma.desc++; | |
396 | ch->dma.desc %= LTQ_DESC_NUM; | |
397 | if (ch->dma.desc == ch->tx_free) | |
398 | netif_stop_queue(net_dev); | |
399 | ||
400 | netdev_sent_queue(net_dev, len); | |
401 | ||
402 | return NETDEV_TX_OK; | |
403 | ||
404 | err_drop: | |
405 | dev_kfree_skb(skb); | |
406 | net_dev->stats.tx_dropped++; | |
407 | net_dev->stats.tx_errors++; | |
408 | return NETDEV_TX_OK; | |
409 | } | |
410 | ||
998ac358 AJB |
411 | static int |
412 | xrx200_change_mtu(struct net_device *net_dev, int new_mtu) | |
413 | { | |
414 | struct xrx200_priv *priv = netdev_priv(net_dev); | |
415 | struct xrx200_chan *ch_rx = &priv->chan_rx; | |
416 | int old_mtu = net_dev->mtu; | |
417 | bool running = false; | |
e0155935 | 418 | void *buff; |
998ac358 AJB |
419 | int curr_desc; |
420 | int ret = 0; | |
421 | ||
422 | net_dev->mtu = new_mtu; | |
1488fc20 | 423 | priv->rx_buf_size = xrx200_buffer_size(new_mtu); |
e0155935 | 424 | priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size); |
998ac358 AJB |
425 | |
426 | if (new_mtu <= old_mtu) | |
427 | return ret; | |
428 | ||
429 | running = netif_running(net_dev); | |
430 | if (running) { | |
431 | napi_disable(&ch_rx->napi); | |
432 | ltq_dma_close(&ch_rx->dma); | |
433 | } | |
434 | ||
435 | xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM); | |
436 | curr_desc = ch_rx->dma.desc; | |
437 | ||
438 | for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; | |
439 | ch_rx->dma.desc++) { | |
e0155935 AJB |
440 | buff = ch_rx->rx_buff[ch_rx->dma.desc]; |
441 | ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag); | |
998ac358 AJB |
442 | if (ret) { |
443 | net_dev->mtu = old_mtu; | |
1488fc20 | 444 | priv->rx_buf_size = xrx200_buffer_size(old_mtu); |
e0155935 | 445 | priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size); |
998ac358 AJB |
446 | break; |
447 | } | |
e0155935 | 448 | skb_free_frag(buff); |
998ac358 AJB |
449 | } |
450 | ||
451 | ch_rx->dma.desc = curr_desc; | |
452 | if (running) { | |
453 | napi_enable(&ch_rx->napi); | |
454 | ltq_dma_open(&ch_rx->dma); | |
455 | ltq_dma_enable_irq(&ch_rx->dma); | |
456 | } | |
457 | ||
458 | return ret; | |
459 | } | |
460 | ||
fe1a5642 HM |
461 | static const struct net_device_ops xrx200_netdev_ops = { |
462 | .ndo_open = xrx200_open, | |
463 | .ndo_stop = xrx200_close, | |
464 | .ndo_start_xmit = xrx200_start_xmit, | |
998ac358 | 465 | .ndo_change_mtu = xrx200_change_mtu, |
fe1a5642 HM |
466 | .ndo_set_mac_address = eth_mac_addr, |
467 | .ndo_validate_addr = eth_validate_addr, | |
fe1a5642 HM |
468 | }; |
469 | ||
470 | static irqreturn_t xrx200_dma_irq(int irq, void *ptr) | |
471 | { | |
472 | struct xrx200_chan *ch = ptr; | |
473 | ||
9423361d | 474 | if (napi_schedule_prep(&ch->napi)) { |
9423361d | 475 | ltq_dma_disable_irq(&ch->dma); |
f2386cf7 | 476 | __napi_schedule(&ch->napi); |
9423361d | 477 | } |
fe1a5642 | 478 | |
9423361d | 479 | ltq_dma_ack_irq(&ch->dma); |
fe1a5642 HM |
480 | |
481 | return IRQ_HANDLED; | |
482 | } | |
483 | ||
484 | static int xrx200_dma_init(struct xrx200_priv *priv) | |
485 | { | |
486 | struct xrx200_chan *ch_rx = &priv->chan_rx; | |
487 | struct xrx200_chan *ch_tx = &priv->chan_tx; | |
488 | int ret = 0; | |
489 | int i; | |
490 | ||
7e553c44 AJB |
491 | ltq_dma_init_port(DMA_PORT_ETOP, XRX200_DMA_BURST_LEN, |
492 | XRX200_DMA_BURST_LEN); | |
fe1a5642 HM |
493 | |
494 | ch_rx->dma.nr = XRX200_DMA_RX; | |
495 | ch_rx->dma.dev = priv->dev; | |
496 | ch_rx->priv = priv; | |
497 | ||
498 | ltq_dma_alloc_rx(&ch_rx->dma); | |
499 | for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM; | |
500 | ch_rx->dma.desc++) { | |
e0155935 | 501 | ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag); |
fe1a5642 HM |
502 | if (ret) |
503 | goto rx_free; | |
504 | } | |
505 | ch_rx->dma.desc = 0; | |
506 | ret = devm_request_irq(priv->dev, ch_rx->dma.irq, xrx200_dma_irq, 0, | |
507 | "xrx200_net_rx", &priv->chan_rx); | |
508 | if (ret) { | |
509 | dev_err(priv->dev, "failed to request RX irq %d\n", | |
510 | ch_rx->dma.irq); | |
511 | goto rx_ring_free; | |
512 | } | |
513 | ||
514 | ch_tx->dma.nr = XRX200_DMA_TX; | |
515 | ch_tx->dma.dev = priv->dev; | |
516 | ch_tx->priv = priv; | |
517 | ||
518 | ltq_dma_alloc_tx(&ch_tx->dma); | |
519 | ret = devm_request_irq(priv->dev, ch_tx->dma.irq, xrx200_dma_irq, 0, | |
520 | "xrx200_net_tx", &priv->chan_tx); | |
521 | if (ret) { | |
522 | dev_err(priv->dev, "failed to request TX irq %d\n", | |
523 | ch_tx->dma.irq); | |
524 | goto tx_free; | |
525 | } | |
526 | ||
527 | return ret; | |
528 | ||
529 | tx_free: | |
530 | ltq_dma_free(&ch_tx->dma); | |
531 | ||
532 | rx_ring_free: | |
533 | /* free the allocated RX ring */ | |
534 | for (i = 0; i < LTQ_DESC_NUM; i++) { | |
535 | if (priv->chan_rx.skb[i]) | |
e0155935 | 536 | skb_free_frag(priv->chan_rx.rx_buff[i]); |
fe1a5642 HM |
537 | } |
538 | ||
539 | rx_free: | |
540 | ltq_dma_free(&ch_rx->dma); | |
541 | return ret; | |
542 | } | |
543 | ||
544 | static void xrx200_hw_cleanup(struct xrx200_priv *priv) | |
545 | { | |
546 | int i; | |
547 | ||
548 | ltq_dma_free(&priv->chan_tx.dma); | |
549 | ltq_dma_free(&priv->chan_rx.dma); | |
550 | ||
551 | /* free the allocated RX ring */ | |
552 | for (i = 0; i < LTQ_DESC_NUM; i++) | |
e0155935 | 553 | skb_free_frag(priv->chan_rx.rx_buff[i]); |
fe1a5642 HM |
554 | } |
555 | ||
556 | static int xrx200_probe(struct platform_device *pdev) | |
557 | { | |
558 | struct device *dev = &pdev->dev; | |
559 | struct device_node *np = dev->of_node; | |
fe1a5642 HM |
560 | struct xrx200_priv *priv; |
561 | struct net_device *net_dev; | |
fe1a5642 HM |
562 | int err; |
563 | ||
564 | /* alloc the network device */ | |
565 | net_dev = devm_alloc_etherdev(dev, sizeof(struct xrx200_priv)); | |
566 | if (!net_dev) | |
567 | return -ENOMEM; | |
568 | ||
569 | priv = netdev_priv(net_dev); | |
570 | priv->net_dev = net_dev; | |
571 | priv->dev = dev; | |
572 | ||
573 | net_dev->netdev_ops = &xrx200_netdev_ops; | |
574 | SET_NETDEV_DEV(net_dev, dev); | |
575 | net_dev->min_mtu = ETH_ZLEN; | |
1488fc20 AJB |
576 | net_dev->max_mtu = XRX200_DMA_DATA_LEN - xrx200_max_frame_len(0); |
577 | priv->rx_buf_size = xrx200_buffer_size(ETH_DATA_LEN); | |
e0155935 | 578 | priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size); |
fe1a5642 HM |
579 | |
580 | /* load the memory ranges */ | |
d402af20 | 581 | priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); |
d759c1bd | 582 | if (IS_ERR(priv->pmac_reg)) |
b8b2de91 | 583 | return PTR_ERR(priv->pmac_reg); |
fe1a5642 HM |
584 | |
585 | priv->chan_rx.dma.irq = platform_get_irq_byname(pdev, "rx"); | |
d1a55841 | 586 | if (priv->chan_rx.dma.irq < 0) |
fe1a5642 | 587 | return -ENOENT; |
fe1a5642 | 588 | priv->chan_tx.dma.irq = platform_get_irq_byname(pdev, "tx"); |
d1a55841 | 589 | if (priv->chan_tx.dma.irq < 0) |
fe1a5642 | 590 | return -ENOENT; |
fe1a5642 HM |
591 | |
592 | /* get the clock */ | |
593 | priv->clk = devm_clk_get(dev, NULL); | |
594 | if (IS_ERR(priv->clk)) { | |
595 | dev_err(dev, "failed to get clock\n"); | |
596 | return PTR_ERR(priv->clk); | |
597 | } | |
598 | ||
9ca01b25 | 599 | err = of_get_ethdev_address(np, net_dev); |
83216e39 | 600 | if (err) |
fe1a5642 HM |
601 | eth_hw_addr_random(net_dev); |
602 | ||
603 | /* bring up the dma engine and IP core */ | |
604 | err = xrx200_dma_init(priv); | |
605 | if (err) | |
606 | return err; | |
607 | ||
a44ecfbd HM |
608 | /* enable clock gate */ |
609 | err = clk_prepare_enable(priv->clk); | |
610 | if (err) | |
611 | goto err_uninit_dma; | |
612 | ||
fe1a5642 HM |
613 | /* set IPG to 12 */ |
614 | xrx200_pmac_mask(priv, PMAC_RX_IPG_MASK, 0xb, PMAC_RX_IPG); | |
615 | ||
616 | /* enable status header, enable CRC */ | |
617 | xrx200_pmac_mask(priv, 0, | |
618 | PMAC_HD_CTL_RST | PMAC_HD_CTL_AST | PMAC_HD_CTL_RXSH | | |
619 | PMAC_HD_CTL_AS | PMAC_HD_CTL_AC | PMAC_HD_CTL_RC, | |
620 | PMAC_HD_CTL); | |
621 | ||
622 | /* setup NAPI */ | |
b48b89f9 | 623 | netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx); |
16d083e2 JK |
624 | netif_napi_add_tx(net_dev, &priv->chan_tx.napi, |
625 | xrx200_tx_housekeeping); | |
fe1a5642 HM |
626 | |
627 | platform_set_drvdata(pdev, priv); | |
628 | ||
629 | err = register_netdev(net_dev); | |
630 | if (err) | |
a44ecfbd | 631 | goto err_unprepare_clk; |
06bc4d00 CJ |
632 | |
633 | return 0; | |
fe1a5642 | 634 | |
a44ecfbd HM |
635 | err_unprepare_clk: |
636 | clk_disable_unprepare(priv->clk); | |
637 | ||
fe1a5642 HM |
638 | err_uninit_dma: |
639 | xrx200_hw_cleanup(priv); | |
640 | ||
06bc4d00 | 641 | return err; |
fe1a5642 HM |
642 | } |
643 | ||
644 | static int xrx200_remove(struct platform_device *pdev) | |
645 | { | |
646 | struct xrx200_priv *priv = platform_get_drvdata(pdev); | |
647 | struct net_device *net_dev = priv->net_dev; | |
648 | ||
649 | /* free stack related instances */ | |
650 | netif_stop_queue(net_dev); | |
651 | netif_napi_del(&priv->chan_tx.napi); | |
652 | netif_napi_del(&priv->chan_rx.napi); | |
653 | ||
654 | /* remove the actual device */ | |
655 | unregister_netdev(net_dev); | |
656 | ||
a44ecfbd HM |
657 | /* release the clock */ |
658 | clk_disable_unprepare(priv->clk); | |
659 | ||
fe1a5642 HM |
660 | /* shut down hardware */ |
661 | xrx200_hw_cleanup(priv); | |
662 | ||
663 | return 0; | |
664 | } | |
665 | ||
666 | static const struct of_device_id xrx200_match[] = { | |
667 | { .compatible = "lantiq,xrx200-net" }, | |
668 | {}, | |
669 | }; | |
670 | MODULE_DEVICE_TABLE(of, xrx200_match); | |
671 | ||
672 | static struct platform_driver xrx200_driver = { | |
673 | .probe = xrx200_probe, | |
674 | .remove = xrx200_remove, | |
675 | .driver = { | |
676 | .name = "lantiq,xrx200-net", | |
677 | .of_match_table = xrx200_match, | |
678 | }, | |
679 | }; | |
680 | ||
681 | module_platform_driver(xrx200_driver); | |
682 | ||
683 | MODULE_AUTHOR("John Crispin <john@phrozen.org>"); | |
684 | MODULE_DESCRIPTION("Lantiq SoC XRX200 ethernet"); | |
685 | MODULE_LICENSE("GPL"); |