Staging: octeon: constify of_device_id array
[linux-2.6-block.git] / drivers / staging / octeon / ethernet.c
CommitLineData
80ff0fd3
DD
1/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2007 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
df9244c5 27#include <linux/platform_device.h>
80ff0fd3 28#include <linux/kernel.h>
80ff0fd3
DD
29#include <linux/module.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
f6ed1b3b 32#include <linux/phy.h>
5a0e3ad6 33#include <linux/slab.h>
dc890df0 34#include <linux/interrupt.h>
df9244c5 35#include <linux/of_net.h>
80ff0fd3
DD
36
37#include <net/dst.h>
38
39#include <asm/octeon/octeon.h>
40
41#include "ethernet-defines.h"
a620c163 42#include "octeon-ethernet.h"
80ff0fd3
DD
43#include "ethernet-mem.h"
44#include "ethernet-rx.h"
45#include "ethernet-tx.h"
f696a108 46#include "ethernet-mdio.h"
80ff0fd3 47#include "ethernet-util.h"
80ff0fd3 48
af866496
DD
49#include <asm/octeon/cvmx-pip.h>
50#include <asm/octeon/cvmx-pko.h>
51#include <asm/octeon/cvmx-fau.h>
52#include <asm/octeon/cvmx-ipd.h>
53#include <asm/octeon/cvmx-helper.h>
80ff0fd3 54
af866496
DD
55#include <asm/octeon/cvmx-gmxx-defs.h>
56#include <asm/octeon/cvmx-smix-defs.h>
80ff0fd3 57
90419615 58static int num_packet_buffers = 1024;
80ff0fd3
DD
59module_param(num_packet_buffers, int, 0444);
60MODULE_PARM_DESC(num_packet_buffers, "\n"
61 "\tNumber of packet buffers to allocate and store in the\n"
5ff8bebb 62 "\tFPA. By default, 1024 packet buffers are used.\n");
80ff0fd3
DD
63
64int pow_receive_group = 15;
65module_param(pow_receive_group, int, 0444);
66MODULE_PARM_DESC(pow_receive_group, "\n"
67 "\tPOW group to receive packets from. All ethernet hardware\n"
d82603c6 68 "\twill be configured to send incoming packets to this POW\n"
80ff0fd3
DD
69 "\tgroup. Also any other software can submit packets to this\n"
70 "\tgroup for the kernel to process.");
71
72int pow_send_group = -1;
73module_param(pow_send_group, int, 0644);
74MODULE_PARM_DESC(pow_send_group, "\n"
75 "\tPOW group to send packets to other software on. This\n"
76 "\tcontrols the creation of the virtual device pow0.\n"
77 "\talways_use_pow also depends on this value.");
78
79int always_use_pow;
80module_param(always_use_pow, int, 0444);
81MODULE_PARM_DESC(always_use_pow, "\n"
82 "\tWhen set, always send to the pow group. This will cause\n"
83 "\tpackets sent to real ethernet devices to be sent to the\n"
84 "\tPOW group instead of the hardware. Unless some other\n"
85 "\tapplication changes the config, packets will still be\n"
86 "\treceived from the low level hardware. Use this option\n"
87 "\tto allow a CVMX app to intercept all packets from the\n"
88 "\tlinux kernel. You must specify pow_send_group along with\n"
89 "\tthis option.");
90
91char pow_send_list[128] = "";
92module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
93MODULE_PARM_DESC(pow_send_list, "\n"
94 "\tComma separated list of ethernet devices that should use the\n"
95 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
96 "\tis a per port version of always_use_pow. always_use_pow takes\n"
97 "\tprecedence over this list. For example, setting this to\n"
98 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
99 "\tusing the pow_send_group.");
100
3368c784
DD
101int rx_napi_weight = 32;
102module_param(rx_napi_weight, int, 0444);
103MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
13c5939e 104
80ff0fd3 105/**
f8c26486 106 * cvm_oct_poll_queue - Workqueue for polling operations.
80ff0fd3 107 */
f8c26486
DD
108struct workqueue_struct *cvm_oct_poll_queue;
109
110/**
111 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
112 *
113 * Set to one right before cvm_oct_poll_queue is destroyed.
80ff0fd3 114 */
f8c26486 115atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
80ff0fd3
DD
116
117/**
118 * Array of every ethernet device owned by this driver indexed by
119 * the ipd input port number.
120 */
121struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
122
4898c560
DD
123u64 cvm_oct_tx_poll_interval;
124
f8c26486
DD
125static void cvm_oct_rx_refill_worker(struct work_struct *work);
126static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
127
128static void cvm_oct_rx_refill_worker(struct work_struct *work)
80ff0fd3 129{
f8c26486
DD
130 /*
131 * FPA 0 may have been drained, try to refill it if we need
132 * more than num_packet_buffers / 2, otherwise normal receive
133 * processing will refill it. If it were drained, no packets
134 * could be received so cvm_oct_napi_poll would never be
135 * invoked to do the refill.
136 */
137 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
a620c163 138
f8c26486
DD
139 if (!atomic_read(&cvm_oct_poll_queue_stopping))
140 queue_delayed_work(cvm_oct_poll_queue,
141 &cvm_oct_rx_refill_work, HZ);
80ff0fd3
DD
142}
143
4898c560 144static void cvm_oct_periodic_worker(struct work_struct *work)
f8c26486
DD
145{
146 struct octeon_ethernet *priv = container_of(work,
147 struct octeon_ethernet,
4898c560 148 port_periodic_work.work);
a620c163 149
f6ed1b3b 150 if (priv->poll)
f8c26486 151 priv->poll(cvm_oct_device[priv->port]);
a620c163 152
b186410d
NH
153 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
154 cvm_oct_device[priv->port]);
4898c560 155
f8c26486 156 if (!atomic_read(&cvm_oct_poll_queue_stopping))
b186410d
NH
157 queue_delayed_work(cvm_oct_poll_queue,
158 &priv->port_periodic_work, HZ);
851ec8cd 159}
80ff0fd3 160
4f240906 161static void cvm_oct_configure_common_hw(void)
80ff0fd3 162{
80ff0fd3
DD
163 /* Setup the FPA */
164 cvmx_fpa_enable();
165 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
166 num_packet_buffers);
167 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
168 num_packet_buffers);
169 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
170 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
171 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
172
173 if (USE_RED)
174 cvmx_helper_setup_red(num_packet_buffers / 4,
175 num_packet_buffers / 8);
176
80ff0fd3
DD
177}
178
179/**
ec977c5b
DD
180 * cvm_oct_free_work- Free a work queue entry
181 *
182 * @work_queue_entry: Work queue entry to free
80ff0fd3 183 *
80ff0fd3
DD
184 * Returns Zero on success, Negative on failure.
185 */
186int cvm_oct_free_work(void *work_queue_entry)
187{
188 cvmx_wqe_t *work = work_queue_entry;
189
190 int segments = work->word2.s.bufs;
191 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
192
193 while (segments--) {
194 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
195 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
196 if (unlikely(!segment_ptr.s.i))
197 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
198 segment_ptr.s.pool,
199 DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE /
200 128));
201 segment_ptr = next_ptr;
202 }
203 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1));
204
205 return 0;
206}
207EXPORT_SYMBOL(cvm_oct_free_work);
208
f696a108 209/**
ec977c5b 210 * cvm_oct_common_get_stats - get the low level ethernet statistics
f696a108 211 * @dev: Device to get the statistics from
ec977c5b 212 *
f696a108
DD
213 * Returns Pointer to the statistics
214 */
215static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
216{
217 cvmx_pip_port_status_t rx_status;
218 cvmx_pko_port_status_t tx_status;
219 struct octeon_ethernet *priv = netdev_priv(dev);
220
221 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
222 if (octeon_is_simulation()) {
223 /* The simulator doesn't support statistics */
224 memset(&rx_status, 0, sizeof(rx_status));
225 memset(&tx_status, 0, sizeof(tx_status));
226 } else {
227 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
228 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
229 }
230
231 priv->stats.rx_packets += rx_status.inb_packets;
232 priv->stats.tx_packets += tx_status.packets;
233 priv->stats.rx_bytes += rx_status.inb_octets;
234 priv->stats.tx_bytes += tx_status.octets;
235 priv->stats.multicast += rx_status.multicast_packets;
236 priv->stats.rx_crc_errors += rx_status.inb_errors;
237 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
238
239 /*
240 * The drop counter must be incremented atomically
241 * since the RX tasklet also increments it.
242 */
243#ifdef CONFIG_64BIT
244 atomic64_add(rx_status.dropped_packets,
245 (atomic64_t *)&priv->stats.rx_dropped);
246#else
247 atomic_add(rx_status.dropped_packets,
248 (atomic_t *)&priv->stats.rx_dropped);
249#endif
250 }
251
252 return &priv->stats;
253}
254
255/**
ec977c5b 256 * cvm_oct_common_change_mtu - change the link MTU
f696a108
DD
257 * @dev: Device to change
258 * @new_mtu: The new MTU
259 *
260 * Returns Zero on success
261 */
262static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
263{
264 struct octeon_ethernet *priv = netdev_priv(dev);
265 int interface = INTERFACE(priv->port);
266 int index = INDEX(priv->port);
267#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
268 int vlan_bytes = 4;
269#else
270 int vlan_bytes = 0;
271#endif
272
273 /*
274 * Limit the MTU to make sure the ethernet packets are between
275 * 64 bytes and 65535 bytes.
276 */
277 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
278 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
279 pr_err("MTU must be between %d and %d.\n",
280 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
281 return -EINVAL;
282 }
283 dev->mtu = new_mtu;
284
285 if ((interface < 2)
286 && (cvmx_helper_interface_get_mode(interface) !=
287 CVMX_HELPER_INTERFACE_MODE_SPI)) {
288 /* Add ethernet header and FCS, and VLAN if configured. */
289 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
290
291 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
292 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
293 /* Signal errors on packets larger than the MTU */
294 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
295 max_packet);
296 } else {
297 /*
298 * Set the hardware to truncate packets larger
299 * than the MTU and smaller the 64 bytes.
300 */
301 union cvmx_pip_frm_len_chkx frm_len_chk;
39bc7513 302
f696a108
DD
303 frm_len_chk.u64 = 0;
304 frm_len_chk.s.minlen = 64;
305 frm_len_chk.s.maxlen = max_packet;
306 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
307 frm_len_chk.u64);
308 }
309 /*
310 * Set the hardware to truncate packets larger than
311 * the MTU. The jabber register must be set to a
312 * multiple of 8 bytes, so round up.
313 */
314 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
315 (max_packet + 7) & ~7u);
316 }
317 return 0;
318}
319
320/**
ec977c5b 321 * cvm_oct_common_set_multicast_list - set the multicast list
f696a108
DD
322 * @dev: Device to work on
323 */
324static void cvm_oct_common_set_multicast_list(struct net_device *dev)
325{
326 union cvmx_gmxx_prtx_cfg gmx_cfg;
327 struct octeon_ethernet *priv = netdev_priv(dev);
328 int interface = INTERFACE(priv->port);
329 int index = INDEX(priv->port);
330
331 if ((interface < 2)
332 && (cvmx_helper_interface_get_mode(interface) !=
333 CVMX_HELPER_INTERFACE_MODE_SPI)) {
334 union cvmx_gmxx_rxx_adr_ctl control;
39bc7513 335
f696a108
DD
336 control.u64 = 0;
337 control.s.bcst = 1; /* Allow broadcast MAC addresses */
338
d5907942 339 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
f696a108
DD
340 (dev->flags & IFF_PROMISC))
341 /* Force accept multicast packets */
342 control.s.mcst = 2;
343 else
215c47c9 344 /* Force reject multicast packets */
f696a108
DD
345 control.s.mcst = 1;
346
347 if (dev->flags & IFF_PROMISC)
348 /*
349 * Reject matches if promisc. Since CAM is
350 * shut off, should accept everything.
351 */
352 control.s.cam_mode = 0;
353 else
354 /* Filter packets based on the CAM */
355 control.s.cam_mode = 1;
356
357 gmx_cfg.u64 =
358 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
359 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
360 gmx_cfg.u64 & ~1ull);
361
362 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
363 control.u64);
364 if (dev->flags & IFF_PROMISC)
365 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
366 (index, interface), 0);
367 else
368 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
369 (index, interface), 1);
370
371 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
372 gmx_cfg.u64);
373 }
374}
375
376/**
ec977c5b
DD
377 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
378 * @dev: The device in question.
379 * @addr: Address structure to change it too.
380
f696a108
DD
381 * Returns Zero on success
382 */
df9244c5 383static int cvm_oct_set_mac_filter(struct net_device *dev)
f696a108
DD
384{
385 struct octeon_ethernet *priv = netdev_priv(dev);
386 union cvmx_gmxx_prtx_cfg gmx_cfg;
387 int interface = INTERFACE(priv->port);
388 int index = INDEX(priv->port);
389
f696a108
DD
390 if ((interface < 2)
391 && (cvmx_helper_interface_get_mode(interface) !=
392 CVMX_HELPER_INTERFACE_MODE_SPI)) {
393 int i;
df9244c5 394 uint8_t *ptr = dev->dev_addr;
f696a108 395 uint64_t mac = 0;
39bc7513 396
f696a108 397 for (i = 0; i < 6; i++)
df9244c5 398 mac = (mac << 8) | (uint64_t)ptr[i];
f696a108
DD
399
400 gmx_cfg.u64 =
401 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
402 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
403 gmx_cfg.u64 & ~1ull);
404
405 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
406 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
df9244c5 407 ptr[0]);
f696a108 408 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
df9244c5 409 ptr[1]);
f696a108 410 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
df9244c5 411 ptr[2]);
f696a108 412 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
df9244c5 413 ptr[3]);
f696a108 414 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
df9244c5 415 ptr[4]);
f696a108 416 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
df9244c5 417 ptr[5]);
f696a108
DD
418 cvm_oct_common_set_multicast_list(dev);
419 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
420 gmx_cfg.u64);
421 }
422 return 0;
423}
424
df9244c5
DD
425static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
426{
427 int r = eth_mac_addr(dev, addr);
428
429 if (r)
430 return r;
431 return cvm_oct_set_mac_filter(dev);
432}
433
f696a108 434/**
ec977c5b 435 * cvm_oct_common_init - per network device initialization
f696a108 436 * @dev: Device to initialize
ec977c5b 437 *
f696a108
DD
438 * Returns Zero on success
439 */
440int cvm_oct_common_init(struct net_device *dev)
441{
f696a108 442 struct octeon_ethernet *priv = netdev_priv(dev);
df9244c5
DD
443 const u8 *mac = NULL;
444
445 if (priv->of_node)
446 mac = of_get_mac_address(priv->of_node);
447
4d978452 448 if (mac)
6c71ea54 449 ether_addr_copy(dev->dev_addr, mac);
15c6ff3b 450 else
df9244c5 451 eth_hw_addr_random(dev);
f696a108
DD
452
453 /*
454 * Force the interface to use the POW send if always_use_pow
455 * was specified or it is in the pow send list.
456 */
457 if ((pow_send_group != -1)
458 && (always_use_pow || strstr(pow_send_list, dev->name)))
459 priv->queue = -1;
460
924cc268
DD
461 if (priv->queue != -1) {
462 dev->features |= NETIF_F_SG;
463 if (USE_HW_TCPUDP_CHECKSUM)
464 dev->features |= NETIF_F_IP_CSUM;
465 }
f696a108 466
f696a108
DD
467 /* We do our own locking, Linux doesn't need to */
468 dev->features |= NETIF_F_LLTX;
7ad24ea4 469 dev->ethtool_ops = &cvm_oct_ethtool_ops;
f696a108 470
df9244c5 471 cvm_oct_set_mac_filter(dev);
f696a108
DD
472 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
473
474 /*
475 * Zero out stats for port so we won't mistakenly show
476 * counters from the bootloader.
477 */
478 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
479 sizeof(struct net_device_stats));
480
481 return 0;
482}
483
484void cvm_oct_common_uninit(struct net_device *dev)
485{
f6ed1b3b
DD
486 struct octeon_ethernet *priv = netdev_priv(dev);
487
488 if (priv->phydev)
489 phy_disconnect(priv->phydev);
f696a108
DD
490}
491
492static const struct net_device_ops cvm_oct_npi_netdev_ops = {
493 .ndo_init = cvm_oct_common_init,
494 .ndo_uninit = cvm_oct_common_uninit,
495 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 496 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
497 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
498 .ndo_do_ioctl = cvm_oct_ioctl,
499 .ndo_change_mtu = cvm_oct_common_change_mtu,
500 .ndo_get_stats = cvm_oct_common_get_stats,
501#ifdef CONFIG_NET_POLL_CONTROLLER
502 .ndo_poll_controller = cvm_oct_poll_controller,
503#endif
504};
505static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
506 .ndo_init = cvm_oct_xaui_init,
507 .ndo_uninit = cvm_oct_xaui_uninit,
508 .ndo_open = cvm_oct_xaui_open,
509 .ndo_stop = cvm_oct_xaui_stop,
510 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 511 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
512 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
513 .ndo_do_ioctl = cvm_oct_ioctl,
514 .ndo_change_mtu = cvm_oct_common_change_mtu,
515 .ndo_get_stats = cvm_oct_common_get_stats,
516#ifdef CONFIG_NET_POLL_CONTROLLER
517 .ndo_poll_controller = cvm_oct_poll_controller,
518#endif
519};
520static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
521 .ndo_init = cvm_oct_sgmii_init,
522 .ndo_uninit = cvm_oct_sgmii_uninit,
523 .ndo_open = cvm_oct_sgmii_open,
524 .ndo_stop = cvm_oct_sgmii_stop,
525 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 526 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
527 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
528 .ndo_do_ioctl = cvm_oct_ioctl,
529 .ndo_change_mtu = cvm_oct_common_change_mtu,
530 .ndo_get_stats = cvm_oct_common_get_stats,
531#ifdef CONFIG_NET_POLL_CONTROLLER
532 .ndo_poll_controller = cvm_oct_poll_controller,
533#endif
534};
535static const struct net_device_ops cvm_oct_spi_netdev_ops = {
536 .ndo_init = cvm_oct_spi_init,
537 .ndo_uninit = cvm_oct_spi_uninit,
538 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 539 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
540 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
541 .ndo_do_ioctl = cvm_oct_ioctl,
542 .ndo_change_mtu = cvm_oct_common_change_mtu,
543 .ndo_get_stats = cvm_oct_common_get_stats,
544#ifdef CONFIG_NET_POLL_CONTROLLER
545 .ndo_poll_controller = cvm_oct_poll_controller,
546#endif
547};
548static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
549 .ndo_init = cvm_oct_rgmii_init,
550 .ndo_uninit = cvm_oct_rgmii_uninit,
551 .ndo_open = cvm_oct_rgmii_open,
552 .ndo_stop = cvm_oct_rgmii_stop,
553 .ndo_start_xmit = cvm_oct_xmit,
afc4b13d 554 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
555 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
556 .ndo_do_ioctl = cvm_oct_ioctl,
557 .ndo_change_mtu = cvm_oct_common_change_mtu,
558 .ndo_get_stats = cvm_oct_common_get_stats,
559#ifdef CONFIG_NET_POLL_CONTROLLER
560 .ndo_poll_controller = cvm_oct_poll_controller,
561#endif
562};
563static const struct net_device_ops cvm_oct_pow_netdev_ops = {
564 .ndo_init = cvm_oct_common_init,
565 .ndo_start_xmit = cvm_oct_xmit_pow,
afc4b13d 566 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
f696a108
DD
567 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
568 .ndo_do_ioctl = cvm_oct_ioctl,
569 .ndo_change_mtu = cvm_oct_common_change_mtu,
570 .ndo_get_stats = cvm_oct_common_get_stats,
571#ifdef CONFIG_NET_POLL_CONTROLLER
572 .ndo_poll_controller = cvm_oct_poll_controller,
573#endif
574};
575
f6ed1b3b
DD
576extern void octeon_mdiobus_force_mod_depencency(void);
577
b186410d
NH
578static struct device_node *cvm_oct_of_get_child(
579 const struct device_node *parent, int reg_val)
df9244c5
DD
580{
581 struct device_node *node = NULL;
582 int size;
583 const __be32 *addr;
584
585 for (;;) {
586 node = of_get_next_child(parent, node);
587 if (!node)
588 break;
589 addr = of_get_property(node, "reg", &size);
590 if (addr && (be32_to_cpu(*addr) == reg_val))
591 break;
592 }
593 return node;
594}
595
4f240906 596static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
b186410d 597 int interface, int port)
df9244c5
DD
598{
599 struct device_node *ni, *np;
600
601 ni = cvm_oct_of_get_child(pip, interface);
602 if (!ni)
603 return NULL;
604
605 np = cvm_oct_of_get_child(ni, port);
606 of_node_put(ni);
607
608 return np;
609}
610
4f240906 611static int cvm_oct_probe(struct platform_device *pdev)
80ff0fd3
DD
612{
613 int num_interfaces;
614 int interface;
615 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
616 int qos;
df9244c5 617 struct device_node *pip;
80ff0fd3 618
f6ed1b3b 619 octeon_mdiobus_force_mod_depencency();
80ff0fd3
DD
620 pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
621
df9244c5
DD
622 pip = pdev->dev.of_node;
623 if (!pip) {
624 pr_err("Error: No 'pip' in /aliases\n");
625 return -EINVAL;
626 }
13c5939e 627
f8c26486
DD
628 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
629 if (cvm_oct_poll_queue == NULL) {
630 pr_err("octeon-ethernet: Cannot create workqueue");
631 return -ENOMEM;
632 }
633
80ff0fd3
DD
634 cvm_oct_configure_common_hw();
635
636 cvmx_helper_initialize_packet_io_global();
637
638 /* Change the input group for all ports before input is enabled */
639 num_interfaces = cvmx_helper_get_number_of_interfaces();
640 for (interface = 0; interface < num_interfaces; interface++) {
641 int num_ports = cvmx_helper_ports_on_interface(interface);
642 int port;
643
644 for (port = cvmx_helper_get_ipd_port(interface, 0);
645 port < cvmx_helper_get_ipd_port(interface, num_ports);
646 port++) {
647 union cvmx_pip_prt_tagx pip_prt_tagx;
39bc7513 648
80ff0fd3
DD
649 pip_prt_tagx.u64 =
650 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
651 pip_prt_tagx.s.grp = pow_receive_group;
652 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
653 pip_prt_tagx.u64);
654 }
655 }
656
657 cvmx_helper_ipd_and_packet_input_enable();
658
659 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
660
661 /*
662 * Initialize the FAU used for counting packet buffers that
663 * need to be freed.
664 */
665 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
666
4898c560
DD
667 /* Initialize the FAU used for counting tx SKBs that need to be freed */
668 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
669
80ff0fd3
DD
670 if ((pow_send_group != -1)) {
671 struct net_device *dev;
39bc7513 672
80ff0fd3
DD
673 pr_info("\tConfiguring device for POW only access\n");
674 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
675 if (dev) {
676 /* Initialize the device private structure. */
677 struct octeon_ethernet *priv = netdev_priv(dev);
80ff0fd3 678
f696a108 679 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
80ff0fd3
DD
680 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
681 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
682 priv->queue = -1;
683 strcpy(dev->name, "pow%d");
684 for (qos = 0; qos < 16; qos++)
685 skb_queue_head_init(&priv->tx_free_list[qos]);
686
687 if (register_netdev(dev) < 0) {
6568a234 688 pr_err("Failed to register ethernet device for POW\n");
c4711c3a 689 free_netdev(dev);
80ff0fd3
DD
690 } else {
691 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
6568a234
DD
692 pr_info("%s: POW send group %d, receive group %d\n",
693 dev->name, pow_send_group,
694 pow_receive_group);
80ff0fd3
DD
695 }
696 } else {
6568a234 697 pr_err("Failed to allocate ethernet device for POW\n");
80ff0fd3
DD
698 }
699 }
700
701 num_interfaces = cvmx_helper_get_number_of_interfaces();
702 for (interface = 0; interface < num_interfaces; interface++) {
703 cvmx_helper_interface_mode_t imode =
704 cvmx_helper_interface_get_mode(interface);
705 int num_ports = cvmx_helper_ports_on_interface(interface);
706 int port;
df9244c5 707 int port_index;
80ff0fd3 708
b186410d
NH
709 for (port_index = 0,
710 port = cvmx_helper_get_ipd_port(interface, 0);
80ff0fd3 711 port < cvmx_helper_get_ipd_port(interface, num_ports);
df9244c5 712 port_index++, port++) {
80ff0fd3
DD
713 struct octeon_ethernet *priv;
714 struct net_device *dev =
715 alloc_etherdev(sizeof(struct octeon_ethernet));
716 if (!dev) {
99f8dbc5
EA
717 pr_err("Failed to allocate ethernet device for port %d\n",
718 port);
80ff0fd3
DD
719 continue;
720 }
80ff0fd3
DD
721
722 /* Initialize the device private structure. */
723 priv = netdev_priv(dev);
ec3a2207 724 priv->netdev = dev;
b186410d
NH
725 priv->of_node = cvm_oct_node_for_port(pip, interface,
726 port_index);
80ff0fd3 727
4898c560
DD
728 INIT_DELAYED_WORK(&priv->port_periodic_work,
729 cvm_oct_periodic_worker);
80ff0fd3
DD
730 priv->imode = imode;
731 priv->port = port;
732 priv->queue = cvmx_pko_get_base_queue(priv->port);
733 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
734 for (qos = 0; qos < 16; qos++)
735 skb_queue_head_init(&priv->tx_free_list[qos]);
736 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
737 qos++)
738 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
739
740 switch (priv->imode) {
741
742 /* These types don't support ports to IPD/PKO */
743 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
744 case CVMX_HELPER_INTERFACE_MODE_PCIE:
745 case CVMX_HELPER_INTERFACE_MODE_PICMG:
746 break;
747
748 case CVMX_HELPER_INTERFACE_MODE_NPI:
f696a108 749 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
80ff0fd3
DD
750 strcpy(dev->name, "npi%d");
751 break;
752
753 case CVMX_HELPER_INTERFACE_MODE_XAUI:
f696a108 754 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
80ff0fd3
DD
755 strcpy(dev->name, "xaui%d");
756 break;
757
758 case CVMX_HELPER_INTERFACE_MODE_LOOP:
f696a108 759 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
80ff0fd3
DD
760 strcpy(dev->name, "loop%d");
761 break;
762
763 case CVMX_HELPER_INTERFACE_MODE_SGMII:
f696a108 764 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
80ff0fd3
DD
765 strcpy(dev->name, "eth%d");
766 break;
767
768 case CVMX_HELPER_INTERFACE_MODE_SPI:
f696a108 769 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
80ff0fd3
DD
770 strcpy(dev->name, "spi%d");
771 break;
772
773 case CVMX_HELPER_INTERFACE_MODE_RGMII:
774 case CVMX_HELPER_INTERFACE_MODE_GMII:
f696a108 775 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
80ff0fd3
DD
776 strcpy(dev->name, "eth%d");
777 break;
778 }
779
f696a108 780 if (!dev->netdev_ops) {
c4711c3a 781 free_netdev(dev);
80ff0fd3 782 } else if (register_netdev(dev) < 0) {
0a5fcc6b 783 pr_err("Failed to register ethernet device for interface %d, port %d\n",
80ff0fd3 784 interface, priv->port);
c4711c3a 785 free_netdev(dev);
80ff0fd3
DD
786 } else {
787 cvm_oct_device[priv->port] = dev;
788 fau -=
789 cvmx_pko_get_num_queues(priv->port) *
790 sizeof(uint32_t);
f8c26486 791 queue_delayed_work(cvm_oct_poll_queue,
b186410d 792 &priv->port_periodic_work, HZ);
80ff0fd3
DD
793 }
794 }
795 }
796
4898c560 797 cvm_oct_tx_initialize();
3368c784 798 cvm_oct_rx_initialize();
80ff0fd3 799
4898c560 800 /*
f5801a81 801 * 150 uS: about 10 1500-byte packets at 1GE.
4898c560
DD
802 */
803 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
80ff0fd3 804
f8c26486 805 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
80ff0fd3
DD
806
807 return 0;
808}
809
f7e2f350 810static int cvm_oct_remove(struct platform_device *pdev)
80ff0fd3
DD
811{
812 int port;
813
814 /* Disable POW interrupt */
815 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
816
817 cvmx_ipd_disable();
818
819 /* Free the interrupt handler */
820 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
821
f8c26486
DD
822 atomic_inc_return(&cvm_oct_poll_queue_stopping);
823 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
824
80ff0fd3 825 cvm_oct_rx_shutdown();
4898c560
DD
826 cvm_oct_tx_shutdown();
827
80ff0fd3
DD
828 cvmx_pko_disable();
829
830 /* Free the ethernet devices */
831 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
832 if (cvm_oct_device[port]) {
f8c26486
DD
833 struct net_device *dev = cvm_oct_device[port];
834 struct octeon_ethernet *priv = netdev_priv(dev);
39bc7513 835
4898c560 836 cancel_delayed_work_sync(&priv->port_periodic_work);
f8c26486 837
4898c560 838 cvm_oct_tx_shutdown_dev(dev);
f8c26486 839 unregister_netdev(dev);
c4711c3a 840 free_netdev(dev);
80ff0fd3
DD
841 cvm_oct_device[port] = NULL;
842 }
843 }
844
f8c26486
DD
845 destroy_workqueue(cvm_oct_poll_queue);
846
80ff0fd3 847 cvmx_pko_shutdown();
80ff0fd3
DD
848
849 cvmx_ipd_free_ptr();
850
851 /* Free the HW pools */
852 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
853 num_packet_buffers);
854 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
855 num_packet_buffers);
856 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
857 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
858 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
df9244c5 859 return 0;
80ff0fd3
DD
860}
861
87794575 862static const struct of_device_id cvm_oct_match[] = {
df9244c5
DD
863 {
864 .compatible = "cavium,octeon-3860-pip",
865 },
866 {},
867};
868MODULE_DEVICE_TABLE(of, cvm_oct_match);
869
870static struct platform_driver cvm_oct_driver = {
871 .probe = cvm_oct_probe,
095d0bb5 872 .remove = cvm_oct_remove,
df9244c5 873 .driver = {
df9244c5
DD
874 .name = KBUILD_MODNAME,
875 .of_match_table = cvm_oct_match,
876 },
877};
878
879module_platform_driver(cvm_oct_driver);
880
80ff0fd3
DD
881MODULE_LICENSE("GPL");
882MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
883MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");