blob: 7274fda0b77b9f4f89939ff3f477d6b25a2a0244 [file] [log] [blame]
Aaro Koskinen67620982015-04-04 22:51:21 +03001/*
2 * This file is based on code from OCTEON SDK by Cavium Networks.
David Daney80ff0fd2009-05-05 17:35:21 -07003 *
4 * Copyright (c) 2003-2007 Cavium Networks
5 *
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
Aaro Koskinen67620982015-04-04 22:51:21 +03009 */
10
David Daneydf9244c2012-07-05 18:12:40 +020011#include <linux/platform_device.h>
David Daney80ff0fd2009-05-05 17:35:21 -070012#include <linux/kernel.h>
David Daney80ff0fd2009-05-05 17:35:21 -070013#include <linux/module.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
David Daneyf6ed1b32009-10-14 12:04:42 -070016#include <linux/phy.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Imre Kalozdc890df2012-04-19 12:27:27 +020018#include <linux/interrupt.h>
David Daneydf9244c2012-07-05 18:12:40 +020019#include <linux/of_net.h>
David Daney80ff0fd2009-05-05 17:35:21 -070020
21#include <net/dst.h>
22
23#include <asm/octeon/octeon.h>
24
25#include "ethernet-defines.h"
David Daneya620c162009-06-23 16:20:56 -070026#include "octeon-ethernet.h"
David Daney80ff0fd2009-05-05 17:35:21 -070027#include "ethernet-mem.h"
28#include "ethernet-rx.h"
29#include "ethernet-tx.h"
David Daneyf696a102009-06-23 11:34:08 -070030#include "ethernet-mdio.h"
David Daney80ff0fd2009-05-05 17:35:21 -070031#include "ethernet-util.h"
David Daney80ff0fd2009-05-05 17:35:21 -070032
David Daneyaf866492011-11-22 14:47:00 +000033#include <asm/octeon/cvmx-pip.h>
34#include <asm/octeon/cvmx-pko.h>
35#include <asm/octeon/cvmx-fau.h>
36#include <asm/octeon/cvmx-ipd.h>
37#include <asm/octeon/cvmx-helper.h>
David Daney80ff0fd2009-05-05 17:35:21 -070038
David Daneyaf866492011-11-22 14:47:00 +000039#include <asm/octeon/cvmx-gmxx-defs.h>
40#include <asm/octeon/cvmx-smix-defs.h>
David Daney80ff0fd2009-05-05 17:35:21 -070041
Aaro Koskinen90419612014-03-02 00:09:06 +020042static int num_packet_buffers = 1024;
David Daney80ff0fd2009-05-05 17:35:21 -070043module_param(num_packet_buffers, int, 0444);
44MODULE_PARM_DESC(num_packet_buffers, "\n"
45 "\tNumber of packet buffers to allocate and store in the\n"
Aaro Koskinen5ff8bebb2014-03-02 00:09:05 +020046 "\tFPA. By default, 1024 packet buffers are used.\n");
David Daney80ff0fd2009-05-05 17:35:21 -070047
48int pow_receive_group = 15;
49module_param(pow_receive_group, int, 0444);
50MODULE_PARM_DESC(pow_receive_group, "\n"
51 "\tPOW group to receive packets from. All ethernet hardware\n"
Jorrit Schippersd82603c2012-12-27 17:33:02 +010052 "\twill be configured to send incoming packets to this POW\n"
David Daney80ff0fd2009-05-05 17:35:21 -070053 "\tgroup. Also any other software can submit packets to this\n"
54 "\tgroup for the kernel to process.");
55
56int pow_send_group = -1;
57module_param(pow_send_group, int, 0644);
58MODULE_PARM_DESC(pow_send_group, "\n"
59 "\tPOW group to send packets to other software on. This\n"
60 "\tcontrols the creation of the virtual device pow0.\n"
61 "\talways_use_pow also depends on this value.");
62
63int always_use_pow;
64module_param(always_use_pow, int, 0444);
65MODULE_PARM_DESC(always_use_pow, "\n"
66 "\tWhen set, always send to the pow group. This will cause\n"
67 "\tpackets sent to real ethernet devices to be sent to the\n"
68 "\tPOW group instead of the hardware. Unless some other\n"
69 "\tapplication changes the config, packets will still be\n"
70 "\treceived from the low level hardware. Use this option\n"
71 "\tto allow a CVMX app to intercept all packets from the\n"
72 "\tlinux kernel. You must specify pow_send_group along with\n"
73 "\tthis option.");
74
75char pow_send_list[128] = "";
76module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444);
77MODULE_PARM_DESC(pow_send_list, "\n"
78 "\tComma separated list of ethernet devices that should use the\n"
79 "\tPOW for transmit instead of the actual ethernet hardware. This\n"
80 "\tis a per port version of always_use_pow. always_use_pow takes\n"
81 "\tprecedence over this list. For example, setting this to\n"
82 "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
83 "\tusing the pow_send_group.");
84
David Daney3368c782010-01-07 11:05:04 -080085int rx_napi_weight = 32;
86module_param(rx_napi_weight, int, 0444);
87MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
David Daney13c59392009-10-12 12:04:32 -070088
David Daney80ff0fd2009-05-05 17:35:21 -070089/**
David Daneyf8c26482010-02-15 12:13:17 -080090 * cvm_oct_poll_queue - Workqueue for polling operations.
David Daney80ff0fd2009-05-05 17:35:21 -070091 */
David Daneyf8c26482010-02-15 12:13:17 -080092struct workqueue_struct *cvm_oct_poll_queue;
93
94/**
95 * cvm_oct_poll_queue_stopping - flag to indicate polling should stop.
96 *
97 * Set to one right before cvm_oct_poll_queue is destroyed.
98 */
99atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
David Daney80ff0fd2009-05-05 17:35:21 -0700100
101/**
102 * Array of every ethernet device owned by this driver indexed by
103 * the ipd input port number.
104 */
105struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
106
David Daney4898c562010-02-15 15:06:47 -0800107u64 cvm_oct_tx_poll_interval;
108
David Daneyf8c26482010-02-15 12:13:17 -0800109static void cvm_oct_rx_refill_worker(struct work_struct *work);
110static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
111
112static void cvm_oct_rx_refill_worker(struct work_struct *work)
David Daney80ff0fd2009-05-05 17:35:21 -0700113{
David Daneyf8c26482010-02-15 12:13:17 -0800114 /*
115 * FPA 0 may have been drained, try to refill it if we need
116 * more than num_packet_buffers / 2, otherwise normal receive
117 * processing will refill it. If it were drained, no packets
118 * could be received so cvm_oct_napi_poll would never be
119 * invoked to do the refill.
120 */
121 cvm_oct_rx_refill_pool(num_packet_buffers / 2);
David Daney80ff0fd2009-05-05 17:35:21 -0700122
David Daneyf8c26482010-02-15 12:13:17 -0800123 if (!atomic_read(&cvm_oct_poll_queue_stopping))
124 queue_delayed_work(cvm_oct_poll_queue,
125 &cvm_oct_rx_refill_work, HZ);
David Daney80ff0fd2009-05-05 17:35:21 -0700126}
127
David Daney4898c562010-02-15 15:06:47 -0800128static void cvm_oct_periodic_worker(struct work_struct *work)
David Daneyf8c26482010-02-15 12:13:17 -0800129{
130 struct octeon_ethernet *priv = container_of(work,
131 struct octeon_ethernet,
David Daney4898c562010-02-15 15:06:47 -0800132 port_periodic_work.work);
David Daneyf8c26482010-02-15 12:13:17 -0800133
134 if (priv->poll)
135 priv->poll(cvm_oct_device[priv->port]);
David Daney4898c562010-02-15 15:06:47 -0800136
Nandini Hanumanthagowdab1864102013-10-13 20:19:49 +0530137 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(
138 cvm_oct_device[priv->port]);
David Daney4898c562010-02-15 15:06:47 -0800139
David Daneyf8c26482010-02-15 12:13:17 -0800140 if (!atomic_read(&cvm_oct_poll_queue_stopping))
Nandini Hanumanthagowdab1864102013-10-13 20:19:49 +0530141 queue_delayed_work(cvm_oct_poll_queue,
142 &priv->port_periodic_work, HZ);
Nandini Hanumanthagowda851ec8c2013-10-13 20:19:50 +0530143}
David Daneyf8c26482010-02-15 12:13:17 -0800144
Bill Pemberton4f240902012-11-19 13:22:06 -0500145static void cvm_oct_configure_common_hw(void)
David Daney80ff0fd2009-05-05 17:35:21 -0700146{
David Daney80ff0fd2009-05-05 17:35:21 -0700147 /* Setup the FPA */
148 cvmx_fpa_enable();
149 cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
150 num_packet_buffers);
151 cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
152 num_packet_buffers);
153 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
154 cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
Janne Huttunend5f9bc72015-08-13 16:21:41 +0300155 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 1024);
David Daney80ff0fd2009-05-05 17:35:21 -0700156
Paul Martin8a5cc922015-03-30 17:00:59 +0100157#ifdef __LITTLE_ENDIAN
158 {
159 union cvmx_ipd_ctl_status ipd_ctl_status;
Kevin Darbyshire-Bryant4bc8ff72015-07-25 10:48:28 +0100160
Paul Martin8a5cc922015-03-30 17:00:59 +0100161 ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
162 ipd_ctl_status.s.pkt_lend = 1;
163 ipd_ctl_status.s.wqe_lend = 1;
164 cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
165 }
166#endif
167
Aaro Koskinencccdb272015-04-04 22:51:18 +0300168 cvmx_helper_setup_red(num_packet_buffers / 4, num_packet_buffers / 8);
David Daney80ff0fd2009-05-05 17:35:21 -0700169}
170
171/**
David Daneyec977c52010-02-16 17:25:32 -0800172 * cvm_oct_free_work- Free a work queue entry
David Daney80ff0fd2009-05-05 17:35:21 -0700173 *
David Daneyec977c52010-02-16 17:25:32 -0800174 * @work_queue_entry: Work queue entry to free
175 *
David Daney80ff0fd2009-05-05 17:35:21 -0700176 * Returns Zero on success, Negative on failure.
177 */
178int cvm_oct_free_work(void *work_queue_entry)
179{
180 cvmx_wqe_t *work = work_queue_entry;
181
182 int segments = work->word2.s.bufs;
183 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
184
185 while (segments--) {
186 union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *)
187 cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
188 if (unlikely(!segment_ptr.s.i))
189 cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr),
190 segment_ptr.s.pool,
Aaro Koskinenc93b0e72015-04-04 22:51:19 +0300191 CVMX_FPA_PACKET_POOL_SIZE / 128);
David Daney80ff0fd2009-05-05 17:35:21 -0700192 segment_ptr = next_ptr;
193 }
Aaro Koskinenc93b0e72015-04-04 22:51:19 +0300194 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
David Daney80ff0fd2009-05-05 17:35:21 -0700195
196 return 0;
197}
198EXPORT_SYMBOL(cvm_oct_free_work);
199
200/**
David Daneyec977c52010-02-16 17:25:32 -0800201 * cvm_oct_common_get_stats - get the low level ethernet statistics
David Daneyf696a102009-06-23 11:34:08 -0700202 * @dev: Device to get the statistics from
David Daneyec977c52010-02-16 17:25:32 -0800203 *
David Daneyf696a102009-06-23 11:34:08 -0700204 * Returns Pointer to the statistics
205 */
206static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
207{
208 cvmx_pip_port_status_t rx_status;
209 cvmx_pko_port_status_t tx_status;
210 struct octeon_ethernet *priv = netdev_priv(dev);
211
212 if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) {
213 if (octeon_is_simulation()) {
214 /* The simulator doesn't support statistics */
215 memset(&rx_status, 0, sizeof(rx_status));
216 memset(&tx_status, 0, sizeof(tx_status));
217 } else {
218 cvmx_pip_get_port_status(priv->port, 1, &rx_status);
219 cvmx_pko_get_port_status(priv->port, 1, &tx_status);
220 }
221
222 priv->stats.rx_packets += rx_status.inb_packets;
223 priv->stats.tx_packets += tx_status.packets;
224 priv->stats.rx_bytes += rx_status.inb_octets;
225 priv->stats.tx_bytes += tx_status.octets;
226 priv->stats.multicast += rx_status.multicast_packets;
227 priv->stats.rx_crc_errors += rx_status.inb_errors;
228 priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets;
229
230 /*
231 * The drop counter must be incremented atomically
232 * since the RX tasklet also increments it.
233 */
234#ifdef CONFIG_64BIT
235 atomic64_add(rx_status.dropped_packets,
236 (atomic64_t *)&priv->stats.rx_dropped);
237#else
238 atomic_add(rx_status.dropped_packets,
239 (atomic_t *)&priv->stats.rx_dropped);
240#endif
241 }
242
243 return &priv->stats;
244}
245
246/**
David Daneyec977c52010-02-16 17:25:32 -0800247 * cvm_oct_common_change_mtu - change the link MTU
David Daneyf696a102009-06-23 11:34:08 -0700248 * @dev: Device to change
249 * @new_mtu: The new MTU
250 *
251 * Returns Zero on success
252 */
253static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu)
254{
255 struct octeon_ethernet *priv = netdev_priv(dev);
256 int interface = INTERFACE(priv->port);
257 int index = INDEX(priv->port);
258#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
259 int vlan_bytes = 4;
260#else
261 int vlan_bytes = 0;
262#endif
263
264 /*
265 * Limit the MTU to make sure the ethernet packets are between
266 * 64 bytes and 65535 bytes.
267 */
268 if ((new_mtu + 14 + 4 + vlan_bytes < 64)
269 || (new_mtu + 14 + 4 + vlan_bytes > 65392)) {
270 pr_err("MTU must be between %d and %d.\n",
271 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes);
272 return -EINVAL;
273 }
274 dev->mtu = new_mtu;
275
276 if ((interface < 2)
277 && (cvmx_helper_interface_get_mode(interface) !=
278 CVMX_HELPER_INTERFACE_MODE_SPI)) {
279 /* Add ethernet header and FCS, and VLAN if configured. */
280 int max_packet = new_mtu + 14 + 4 + vlan_bytes;
281
282 if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
283 || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
284 /* Signal errors on packets larger than the MTU */
285 cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface),
286 max_packet);
287 } else {
288 /*
289 * Set the hardware to truncate packets larger
290 * than the MTU and smaller the 64 bytes.
291 */
292 union cvmx_pip_frm_len_chkx frm_len_chk;
Gulsah Kose39bc7512014-09-20 23:27:39 +0300293
David Daneyf696a102009-06-23 11:34:08 -0700294 frm_len_chk.u64 = 0;
295 frm_len_chk.s.minlen = 64;
296 frm_len_chk.s.maxlen = max_packet;
297 cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface),
298 frm_len_chk.u64);
299 }
300 /*
301 * Set the hardware to truncate packets larger than
302 * the MTU. The jabber register must be set to a
303 * multiple of 8 bytes, so round up.
304 */
305 cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface),
306 (max_packet + 7) & ~7u);
307 }
308 return 0;
309}
310
311/**
David Daneyec977c52010-02-16 17:25:32 -0800312 * cvm_oct_common_set_multicast_list - set the multicast list
David Daneyf696a102009-06-23 11:34:08 -0700313 * @dev: Device to work on
314 */
315static void cvm_oct_common_set_multicast_list(struct net_device *dev)
316{
317 union cvmx_gmxx_prtx_cfg gmx_cfg;
318 struct octeon_ethernet *priv = netdev_priv(dev);
319 int interface = INTERFACE(priv->port);
320 int index = INDEX(priv->port);
321
322 if ((interface < 2)
323 && (cvmx_helper_interface_get_mode(interface) !=
324 CVMX_HELPER_INTERFACE_MODE_SPI)) {
325 union cvmx_gmxx_rxx_adr_ctl control;
Gulsah Kose39bc7512014-09-20 23:27:39 +0300326
David Daneyf696a102009-06-23 11:34:08 -0700327 control.u64 = 0;
328 control.s.bcst = 1; /* Allow broadcast MAC addresses */
329
Jiri Pirkod5907942010-02-18 05:10:14 +0000330 if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
David Daneyf696a102009-06-23 11:34:08 -0700331 (dev->flags & IFF_PROMISC))
332 /* Force accept multicast packets */
333 control.s.mcst = 2;
334 else
Justin P. Mattock215c47c2012-03-26 21:34:18 -0700335 /* Force reject multicast packets */
David Daneyf696a102009-06-23 11:34:08 -0700336 control.s.mcst = 1;
337
338 if (dev->flags & IFF_PROMISC)
339 /*
340 * Reject matches if promisc. Since CAM is
341 * shut off, should accept everything.
342 */
343 control.s.cam_mode = 0;
344 else
345 /* Filter packets based on the CAM */
346 control.s.cam_mode = 1;
347
348 gmx_cfg.u64 =
349 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
350 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
351 gmx_cfg.u64 & ~1ull);
352
353 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface),
354 control.u64);
355 if (dev->flags & IFF_PROMISC)
356 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
357 (index, interface), 0);
358 else
359 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN
360 (index, interface), 1);
361
362 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
363 gmx_cfg.u64);
364 }
365}
366
367/**
David Daneyec977c52010-02-16 17:25:32 -0800368 * cvm_oct_common_set_mac_address - set the hardware MAC address for a device
369 * @dev: The device in question.
370 * @addr: Address structure to change it too.
371
David Daneyf696a102009-06-23 11:34:08 -0700372 * Returns Zero on success
373 */
David Daneydf9244c2012-07-05 18:12:40 +0200374static int cvm_oct_set_mac_filter(struct net_device *dev)
David Daneyf696a102009-06-23 11:34:08 -0700375{
376 struct octeon_ethernet *priv = netdev_priv(dev);
377 union cvmx_gmxx_prtx_cfg gmx_cfg;
378 int interface = INTERFACE(priv->port);
379 int index = INDEX(priv->port);
380
David Daneyf696a102009-06-23 11:34:08 -0700381 if ((interface < 2)
382 && (cvmx_helper_interface_get_mode(interface) !=
383 CVMX_HELPER_INTERFACE_MODE_SPI)) {
384 int i;
David Daneydf9244c2012-07-05 18:12:40 +0200385 uint8_t *ptr = dev->dev_addr;
David Daneyf696a102009-06-23 11:34:08 -0700386 uint64_t mac = 0;
Gulsah Kose39bc7512014-09-20 23:27:39 +0300387
David Daneyf696a102009-06-23 11:34:08 -0700388 for (i = 0; i < 6; i++)
David Daneydf9244c2012-07-05 18:12:40 +0200389 mac = (mac << 8) | (uint64_t)ptr[i];
David Daneyf696a102009-06-23 11:34:08 -0700390
391 gmx_cfg.u64 =
392 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
393 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
394 gmx_cfg.u64 & ~1ull);
395
396 cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac);
397 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface),
David Daneydf9244c2012-07-05 18:12:40 +0200398 ptr[0]);
David Daneyf696a102009-06-23 11:34:08 -0700399 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface),
David Daneydf9244c2012-07-05 18:12:40 +0200400 ptr[1]);
David Daneyf696a102009-06-23 11:34:08 -0700401 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface),
David Daneydf9244c2012-07-05 18:12:40 +0200402 ptr[2]);
David Daneyf696a102009-06-23 11:34:08 -0700403 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface),
David Daneydf9244c2012-07-05 18:12:40 +0200404 ptr[3]);
David Daneyf696a102009-06-23 11:34:08 -0700405 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface),
David Daneydf9244c2012-07-05 18:12:40 +0200406 ptr[4]);
David Daneyf696a102009-06-23 11:34:08 -0700407 cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface),
David Daneydf9244c2012-07-05 18:12:40 +0200408 ptr[5]);
David Daneyf696a102009-06-23 11:34:08 -0700409 cvm_oct_common_set_multicast_list(dev);
410 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
411 gmx_cfg.u64);
412 }
413 return 0;
414}
415
David Daneydf9244c2012-07-05 18:12:40 +0200416static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
417{
418 int r = eth_mac_addr(dev, addr);
419
420 if (r)
421 return r;
422 return cvm_oct_set_mac_filter(dev);
423}
424
David Daneyf696a102009-06-23 11:34:08 -0700425/**
David Daneyec977c52010-02-16 17:25:32 -0800426 * cvm_oct_common_init - per network device initialization
David Daneyf696a102009-06-23 11:34:08 -0700427 * @dev: Device to initialize
David Daneyec977c52010-02-16 17:25:32 -0800428 *
David Daneyf696a102009-06-23 11:34:08 -0700429 * Returns Zero on success
430 */
431int cvm_oct_common_init(struct net_device *dev)
432{
David Daneyf696a102009-06-23 11:34:08 -0700433 struct octeon_ethernet *priv = netdev_priv(dev);
David Daneydf9244c2012-07-05 18:12:40 +0200434 const u8 *mac = NULL;
David Daney13c59392009-10-12 12:04:32 -0700435
David Daneydf9244c2012-07-05 18:12:40 +0200436 if (priv->of_node)
437 mac = of_get_mac_address(priv->of_node);
David Daney13c59392009-10-12 12:04:32 -0700438
Luka Perkov4d978452013-10-29 02:24:34 +0100439 if (mac)
Dilek Uzulmez6c71ea52014-10-07 15:58:48 +0300440 ether_addr_copy(dev->dev_addr, mac);
Jiri Pirko15c6ff32013-01-01 03:30:17 +0000441 else
David Daneydf9244c2012-07-05 18:12:40 +0200442 eth_hw_addr_random(dev);
David Daneyf696a102009-06-23 11:34:08 -0700443
444 /*
445 * Force the interface to use the POW send if always_use_pow
446 * was specified or it is in the pow send list.
447 */
448 if ((pow_send_group != -1)
449 && (always_use_pow || strstr(pow_send_list, dev->name)))
450 priv->queue = -1;
451
Aaro Koskinen6646baf2015-04-04 22:51:16 +0300452 if (priv->queue != -1)
453 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
David Daneyf696a102009-06-23 11:34:08 -0700454
David Daneyf696a102009-06-23 11:34:08 -0700455 /* We do our own locking, Linux doesn't need to */
456 dev->features |= NETIF_F_LLTX;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000457 dev->ethtool_ops = &cvm_oct_ethtool_ops;
David Daneyf696a102009-06-23 11:34:08 -0700458
David Daneydf9244c2012-07-05 18:12:40 +0200459 cvm_oct_set_mac_filter(dev);
David Daneyf696a102009-06-23 11:34:08 -0700460 dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
461
462 /*
463 * Zero out stats for port so we won't mistakenly show
464 * counters from the bootloader.
465 */
466 memset(dev->netdev_ops->ndo_get_stats(dev), 0,
467 sizeof(struct net_device_stats));
468
Aaro Koskinenbe764002015-04-04 22:51:04 +0300469 if (dev->netdev_ops->ndo_stop)
470 dev->netdev_ops->ndo_stop(dev);
471
David Daneyf696a102009-06-23 11:34:08 -0700472 return 0;
473}
474
475void cvm_oct_common_uninit(struct net_device *dev)
476{
David Daneyf6ed1b32009-10-14 12:04:42 -0700477 struct octeon_ethernet *priv = netdev_priv(dev);
478
479 if (priv->phydev)
480 phy_disconnect(priv->phydev);
David Daneyf696a102009-06-23 11:34:08 -0700481}
482
Aaro Koskinen9e3ae4f2015-04-04 22:51:02 +0300483int cvm_oct_common_open(struct net_device *dev,
484 void (*link_poll)(struct net_device *), bool poll_now)
485{
486 union cvmx_gmxx_prtx_cfg gmx_cfg;
487 struct octeon_ethernet *priv = netdev_priv(dev);
488 int interface = INTERFACE(priv->port);
489 int index = INDEX(priv->port);
490 cvmx_helper_link_info_t link_info;
491 int rv;
492
493 rv = cvm_oct_phy_setup_device(dev);
494 if (rv)
495 return rv;
496
497 gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
498 gmx_cfg.s.en = 1;
499 cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
500
501 if (octeon_is_simulation())
502 return 0;
503
504 if (priv->phydev) {
505 int r = phy_read_status(priv->phydev);
506
507 if (r == 0 && priv->phydev->link == 0)
508 netif_carrier_off(dev);
509 cvm_oct_adjust_link(dev);
510 } else {
511 link_info = cvmx_helper_link_get(priv->port);
512 if (!link_info.s.link_up)
513 netif_carrier_off(dev);
514 priv->poll = link_poll;
515 if (poll_now)
516 link_poll(dev);
517 }
518
519 return 0;
520}
521
Aaro Koskinena8d2e812015-04-04 22:51:08 +0300522void cvm_oct_link_poll(struct net_device *dev)
523{
524 struct octeon_ethernet *priv = netdev_priv(dev);
525 cvmx_helper_link_info_t link_info;
526
527 link_info = cvmx_helper_link_get(priv->port);
528 if (link_info.u64 == priv->link_info)
529 return;
530
531 link_info = cvmx_helper_link_autoconf(priv->port);
532 priv->link_info = link_info.u64;
533
534 if (link_info.s.link_up) {
535 if (!netif_carrier_ok(dev))
536 netif_carrier_on(dev);
537 } else if (netif_carrier_ok(dev)) {
538 netif_carrier_off(dev);
539 }
540 cvm_oct_note_carrier(priv, link_info);
541}
542
David Daneyf696a102009-06-23 11:34:08 -0700543static const struct net_device_ops cvm_oct_npi_netdev_ops = {
544 .ndo_init = cvm_oct_common_init,
545 .ndo_uninit = cvm_oct_common_uninit,
546 .ndo_start_xmit = cvm_oct_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000547 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
David Daneyf696a102009-06-23 11:34:08 -0700548 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
549 .ndo_do_ioctl = cvm_oct_ioctl,
550 .ndo_change_mtu = cvm_oct_common_change_mtu,
551 .ndo_get_stats = cvm_oct_common_get_stats,
552#ifdef CONFIG_NET_POLL_CONTROLLER
553 .ndo_poll_controller = cvm_oct_poll_controller,
554#endif
555};
556static const struct net_device_ops cvm_oct_xaui_netdev_ops = {
557 .ndo_init = cvm_oct_xaui_init,
Aaro Koskinen3c339142015-04-04 22:51:05 +0300558 .ndo_uninit = cvm_oct_common_uninit,
David Daneyf696a102009-06-23 11:34:08 -0700559 .ndo_open = cvm_oct_xaui_open,
Aaro Koskinen96217eb2015-04-04 22:51:03 +0300560 .ndo_stop = cvm_oct_common_stop,
David Daneyf696a102009-06-23 11:34:08 -0700561 .ndo_start_xmit = cvm_oct_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000562 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
David Daneyf696a102009-06-23 11:34:08 -0700563 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
564 .ndo_do_ioctl = cvm_oct_ioctl,
565 .ndo_change_mtu = cvm_oct_common_change_mtu,
566 .ndo_get_stats = cvm_oct_common_get_stats,
567#ifdef CONFIG_NET_POLL_CONTROLLER
568 .ndo_poll_controller = cvm_oct_poll_controller,
569#endif
570};
571static const struct net_device_ops cvm_oct_sgmii_netdev_ops = {
572 .ndo_init = cvm_oct_sgmii_init,
Aaro Koskinen3c339142015-04-04 22:51:05 +0300573 .ndo_uninit = cvm_oct_common_uninit,
David Daneyf696a102009-06-23 11:34:08 -0700574 .ndo_open = cvm_oct_sgmii_open,
Aaro Koskinen96217eb2015-04-04 22:51:03 +0300575 .ndo_stop = cvm_oct_common_stop,
David Daneyf696a102009-06-23 11:34:08 -0700576 .ndo_start_xmit = cvm_oct_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000577 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
David Daneyf696a102009-06-23 11:34:08 -0700578 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
579 .ndo_do_ioctl = cvm_oct_ioctl,
580 .ndo_change_mtu = cvm_oct_common_change_mtu,
581 .ndo_get_stats = cvm_oct_common_get_stats,
582#ifdef CONFIG_NET_POLL_CONTROLLER
583 .ndo_poll_controller = cvm_oct_poll_controller,
584#endif
585};
586static const struct net_device_ops cvm_oct_spi_netdev_ops = {
587 .ndo_init = cvm_oct_spi_init,
588 .ndo_uninit = cvm_oct_spi_uninit,
589 .ndo_start_xmit = cvm_oct_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000590 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
David Daneyf696a102009-06-23 11:34:08 -0700591 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
592 .ndo_do_ioctl = cvm_oct_ioctl,
593 .ndo_change_mtu = cvm_oct_common_change_mtu,
594 .ndo_get_stats = cvm_oct_common_get_stats,
595#ifdef CONFIG_NET_POLL_CONTROLLER
596 .ndo_poll_controller = cvm_oct_poll_controller,
597#endif
598};
599static const struct net_device_ops cvm_oct_rgmii_netdev_ops = {
600 .ndo_init = cvm_oct_rgmii_init,
601 .ndo_uninit = cvm_oct_rgmii_uninit,
602 .ndo_open = cvm_oct_rgmii_open,
Aaro Koskinen96217eb2015-04-04 22:51:03 +0300603 .ndo_stop = cvm_oct_common_stop,
David Daneyf696a102009-06-23 11:34:08 -0700604 .ndo_start_xmit = cvm_oct_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000605 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
David Daneyf696a102009-06-23 11:34:08 -0700606 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
607 .ndo_do_ioctl = cvm_oct_ioctl,
608 .ndo_change_mtu = cvm_oct_common_change_mtu,
609 .ndo_get_stats = cvm_oct_common_get_stats,
610#ifdef CONFIG_NET_POLL_CONTROLLER
611 .ndo_poll_controller = cvm_oct_poll_controller,
612#endif
613};
614static const struct net_device_ops cvm_oct_pow_netdev_ops = {
615 .ndo_init = cvm_oct_common_init,
616 .ndo_start_xmit = cvm_oct_xmit_pow,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000617 .ndo_set_rx_mode = cvm_oct_common_set_multicast_list,
David Daneyf696a102009-06-23 11:34:08 -0700618 .ndo_set_mac_address = cvm_oct_common_set_mac_address,
619 .ndo_do_ioctl = cvm_oct_ioctl,
620 .ndo_change_mtu = cvm_oct_common_change_mtu,
621 .ndo_get_stats = cvm_oct_common_get_stats,
622#ifdef CONFIG_NET_POLL_CONTROLLER
623 .ndo_poll_controller = cvm_oct_poll_controller,
624#endif
625};
626
Nandini Hanumanthagowdab1864102013-10-13 20:19:49 +0530627static struct device_node *cvm_oct_of_get_child(
628 const struct device_node *parent, int reg_val)
David Daneydf9244c2012-07-05 18:12:40 +0200629{
630 struct device_node *node = NULL;
631 int size;
632 const __be32 *addr;
633
634 for (;;) {
635 node = of_get_next_child(parent, node);
636 if (!node)
637 break;
638 addr = of_get_property(node, "reg", &size);
639 if (addr && (be32_to_cpu(*addr) == reg_val))
640 break;
641 }
642 return node;
643}
644
Bill Pemberton4f240902012-11-19 13:22:06 -0500645static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
Nandini Hanumanthagowdab1864102013-10-13 20:19:49 +0530646 int interface, int port)
David Daneydf9244c2012-07-05 18:12:40 +0200647{
648 struct device_node *ni, *np;
649
650 ni = cvm_oct_of_get_child(pip, interface);
651 if (!ni)
652 return NULL;
653
654 np = cvm_oct_of_get_child(ni, port);
655 of_node_put(ni);
656
657 return np;
658}
659
Bill Pemberton4f240902012-11-19 13:22:06 -0500660static int cvm_oct_probe(struct platform_device *pdev)
David Daney80ff0fd2009-05-05 17:35:21 -0700661{
662 int num_interfaces;
663 int interface;
664 int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE;
665 int qos;
David Daneydf9244c2012-07-05 18:12:40 +0200666 struct device_node *pip;
David Daney80ff0fd2009-05-05 17:35:21 -0700667
David Daneyf6ed1b32009-10-14 12:04:42 -0700668 octeon_mdiobus_force_mod_depencency();
David Daney80ff0fd2009-05-05 17:35:21 -0700669
David Daneydf9244c2012-07-05 18:12:40 +0200670 pip = pdev->dev.of_node;
671 if (!pip) {
672 pr_err("Error: No 'pip' in /aliases\n");
673 return -EINVAL;
674 }
David Daney13c59392009-10-12 12:04:32 -0700675
David Daneyf8c26482010-02-15 12:13:17 -0800676 cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet");
677 if (cvm_oct_poll_queue == NULL) {
678 pr_err("octeon-ethernet: Cannot create workqueue");
679 return -ENOMEM;
680 }
681
David Daney80ff0fd2009-05-05 17:35:21 -0700682 cvm_oct_configure_common_hw();
683
684 cvmx_helper_initialize_packet_io_global();
685
686 /* Change the input group for all ports before input is enabled */
687 num_interfaces = cvmx_helper_get_number_of_interfaces();
688 for (interface = 0; interface < num_interfaces; interface++) {
689 int num_ports = cvmx_helper_ports_on_interface(interface);
690 int port;
691
692 for (port = cvmx_helper_get_ipd_port(interface, 0);
693 port < cvmx_helper_get_ipd_port(interface, num_ports);
694 port++) {
695 union cvmx_pip_prt_tagx pip_prt_tagx;
Gulsah Kose39bc7512014-09-20 23:27:39 +0300696
David Daney80ff0fd2009-05-05 17:35:21 -0700697 pip_prt_tagx.u64 =
698 cvmx_read_csr(CVMX_PIP_PRT_TAGX(port));
699 pip_prt_tagx.s.grp = pow_receive_group;
700 cvmx_write_csr(CVMX_PIP_PRT_TAGX(port),
701 pip_prt_tagx.u64);
702 }
703 }
704
705 cvmx_helper_ipd_and_packet_input_enable();
706
707 memset(cvm_oct_device, 0, sizeof(cvm_oct_device));
708
709 /*
710 * Initialize the FAU used for counting packet buffers that
711 * need to be freed.
712 */
713 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
714
David Daney4898c562010-02-15 15:06:47 -0800715 /* Initialize the FAU used for counting tx SKBs that need to be freed */
716 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
717
David Daney80ff0fd2009-05-05 17:35:21 -0700718 if ((pow_send_group != -1)) {
719 struct net_device *dev;
Gulsah Kose39bc7512014-09-20 23:27:39 +0300720
David Daney80ff0fd2009-05-05 17:35:21 -0700721 pr_info("\tConfiguring device for POW only access\n");
722 dev = alloc_etherdev(sizeof(struct octeon_ethernet));
723 if (dev) {
724 /* Initialize the device private structure. */
725 struct octeon_ethernet *priv = netdev_priv(dev);
David Daney80ff0fd2009-05-05 17:35:21 -0700726
David Daneyf696a102009-06-23 11:34:08 -0700727 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
David Daney80ff0fd2009-05-05 17:35:21 -0700728 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
729 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
730 priv->queue = -1;
731 strcpy(dev->name, "pow%d");
732 for (qos = 0; qos < 16; qos++)
733 skb_queue_head_init(&priv->tx_free_list[qos]);
734
735 if (register_netdev(dev) < 0) {
David Daney6568a232010-01-07 11:05:01 -0800736 pr_err("Failed to register ethernet device for POW\n");
Vasiliy Kulikovc4711c32010-09-28 21:08:01 +0400737 free_netdev(dev);
David Daney80ff0fd2009-05-05 17:35:21 -0700738 } else {
739 cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev;
David Daney6568a232010-01-07 11:05:01 -0800740 pr_info("%s: POW send group %d, receive group %d\n",
741 dev->name, pow_send_group,
742 pow_receive_group);
David Daney80ff0fd2009-05-05 17:35:21 -0700743 }
744 } else {
David Daney6568a232010-01-07 11:05:01 -0800745 pr_err("Failed to allocate ethernet device for POW\n");
David Daney80ff0fd2009-05-05 17:35:21 -0700746 }
747 }
748
749 num_interfaces = cvmx_helper_get_number_of_interfaces();
750 for (interface = 0; interface < num_interfaces; interface++) {
751 cvmx_helper_interface_mode_t imode =
752 cvmx_helper_interface_get_mode(interface);
753 int num_ports = cvmx_helper_ports_on_interface(interface);
754 int port;
David Daneydf9244c2012-07-05 18:12:40 +0200755 int port_index;
David Daney80ff0fd2009-05-05 17:35:21 -0700756
Nandini Hanumanthagowdab1864102013-10-13 20:19:49 +0530757 for (port_index = 0,
758 port = cvmx_helper_get_ipd_port(interface, 0);
David Daney80ff0fd2009-05-05 17:35:21 -0700759 port < cvmx_helper_get_ipd_port(interface, num_ports);
David Daneydf9244c2012-07-05 18:12:40 +0200760 port_index++, port++) {
David Daney80ff0fd2009-05-05 17:35:21 -0700761 struct octeon_ethernet *priv;
762 struct net_device *dev =
763 alloc_etherdev(sizeof(struct octeon_ethernet));
764 if (!dev) {
Esra Altintas99f8dbc2014-09-27 13:49:02 +0300765 pr_err("Failed to allocate ethernet device for port %d\n",
766 port);
David Daney80ff0fd2009-05-05 17:35:21 -0700767 continue;
768 }
David Daney80ff0fd2009-05-05 17:35:21 -0700769
770 /* Initialize the device private structure. */
771 priv = netdev_priv(dev);
David Daneyec3a2202014-05-29 11:10:02 +0100772 priv->netdev = dev;
Nandini Hanumanthagowdab1864102013-10-13 20:19:49 +0530773 priv->of_node = cvm_oct_node_for_port(pip, interface,
774 port_index);
David Daney80ff0fd2009-05-05 17:35:21 -0700775
David Daney4898c562010-02-15 15:06:47 -0800776 INIT_DELAYED_WORK(&priv->port_periodic_work,
777 cvm_oct_periodic_worker);
David Daney80ff0fd2009-05-05 17:35:21 -0700778 priv->imode = imode;
779 priv->port = port;
780 priv->queue = cvmx_pko_get_base_queue(priv->port);
781 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
782 for (qos = 0; qos < 16; qos++)
783 skb_queue_head_init(&priv->tx_free_list[qos]);
784 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
785 qos++)
786 cvmx_fau_atomic_write32(priv->fau + qos * 4, 0);
787
788 switch (priv->imode) {
789
790 /* These types don't support ports to IPD/PKO */
791 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
792 case CVMX_HELPER_INTERFACE_MODE_PCIE:
793 case CVMX_HELPER_INTERFACE_MODE_PICMG:
794 break;
795
796 case CVMX_HELPER_INTERFACE_MODE_NPI:
David Daneyf696a102009-06-23 11:34:08 -0700797 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
David Daney80ff0fd2009-05-05 17:35:21 -0700798 strcpy(dev->name, "npi%d");
799 break;
800
801 case CVMX_HELPER_INTERFACE_MODE_XAUI:
David Daneyf696a102009-06-23 11:34:08 -0700802 dev->netdev_ops = &cvm_oct_xaui_netdev_ops;
David Daney80ff0fd2009-05-05 17:35:21 -0700803 strcpy(dev->name, "xaui%d");
804 break;
805
806 case CVMX_HELPER_INTERFACE_MODE_LOOP:
David Daneyf696a102009-06-23 11:34:08 -0700807 dev->netdev_ops = &cvm_oct_npi_netdev_ops;
David Daney80ff0fd2009-05-05 17:35:21 -0700808 strcpy(dev->name, "loop%d");
809 break;
810
811 case CVMX_HELPER_INTERFACE_MODE_SGMII:
David Daneyf696a102009-06-23 11:34:08 -0700812 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
David Daney80ff0fd2009-05-05 17:35:21 -0700813 strcpy(dev->name, "eth%d");
814 break;
815
816 case CVMX_HELPER_INTERFACE_MODE_SPI:
David Daneyf696a102009-06-23 11:34:08 -0700817 dev->netdev_ops = &cvm_oct_spi_netdev_ops;
David Daney80ff0fd2009-05-05 17:35:21 -0700818 strcpy(dev->name, "spi%d");
819 break;
820
821 case CVMX_HELPER_INTERFACE_MODE_RGMII:
822 case CVMX_HELPER_INTERFACE_MODE_GMII:
David Daneyf696a102009-06-23 11:34:08 -0700823 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
David Daney80ff0fd2009-05-05 17:35:21 -0700824 strcpy(dev->name, "eth%d");
825 break;
826 }
827
David Daneyf696a102009-06-23 11:34:08 -0700828 if (!dev->netdev_ops) {
Vasiliy Kulikovc4711c32010-09-28 21:08:01 +0400829 free_netdev(dev);
David Daney80ff0fd2009-05-05 17:35:21 -0700830 } else if (register_netdev(dev) < 0) {
Gulsah Kose0a5fcc62014-09-20 23:32:05 +0300831 pr_err("Failed to register ethernet device for interface %d, port %d\n",
David Daney80ff0fd2009-05-05 17:35:21 -0700832 interface, priv->port);
Vasiliy Kulikovc4711c32010-09-28 21:08:01 +0400833 free_netdev(dev);
David Daney80ff0fd2009-05-05 17:35:21 -0700834 } else {
835 cvm_oct_device[priv->port] = dev;
836 fau -=
837 cvmx_pko_get_num_queues(priv->port) *
838 sizeof(uint32_t);
David Daneyf8c26482010-02-15 12:13:17 -0800839 queue_delayed_work(cvm_oct_poll_queue,
Nandini Hanumanthagowdab1864102013-10-13 20:19:49 +0530840 &priv->port_periodic_work, HZ);
David Daney80ff0fd2009-05-05 17:35:21 -0700841 }
842 }
843 }
844
David Daney4898c562010-02-15 15:06:47 -0800845 cvm_oct_tx_initialize();
David Daney3368c782010-01-07 11:05:04 -0800846 cvm_oct_rx_initialize();
David Daney80ff0fd2009-05-05 17:35:21 -0700847
David Daney4898c562010-02-15 15:06:47 -0800848 /*
Gustavo A. R. Silvaf5801a82015-01-11 15:44:39 -0600849 * 150 uS: about 10 1500-byte packets at 1GE.
David Daney4898c562010-02-15 15:06:47 -0800850 */
851 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
David Daney80ff0fd2009-05-05 17:35:21 -0700852
David Daneyf8c26482010-02-15 12:13:17 -0800853 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
David Daney80ff0fd2009-05-05 17:35:21 -0700854
855 return 0;
856}
857
Bill Pembertonf7e2f352012-11-19 13:26:42 -0500858static int cvm_oct_remove(struct platform_device *pdev)
David Daney80ff0fd2009-05-05 17:35:21 -0700859{
860 int port;
861
862 /* Disable POW interrupt */
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300863 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
864 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group), 0);
865 else
866 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0);
David Daney80ff0fd2009-05-05 17:35:21 -0700867
868 cvmx_ipd_disable();
869
870 /* Free the interrupt handler */
871 free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device);
872
David Daneyf8c26482010-02-15 12:13:17 -0800873 atomic_inc_return(&cvm_oct_poll_queue_stopping);
874 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
875
David Daney80ff0fd2009-05-05 17:35:21 -0700876 cvm_oct_rx_shutdown();
David Daney4898c562010-02-15 15:06:47 -0800877 cvm_oct_tx_shutdown();
878
David Daney80ff0fd2009-05-05 17:35:21 -0700879 cvmx_pko_disable();
880
881 /* Free the ethernet devices */
882 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
883 if (cvm_oct_device[port]) {
David Daneyf8c26482010-02-15 12:13:17 -0800884 struct net_device *dev = cvm_oct_device[port];
885 struct octeon_ethernet *priv = netdev_priv(dev);
Gulsah Kose39bc7512014-09-20 23:27:39 +0300886
David Daney4898c562010-02-15 15:06:47 -0800887 cancel_delayed_work_sync(&priv->port_periodic_work);
David Daneyf8c26482010-02-15 12:13:17 -0800888
David Daney4898c562010-02-15 15:06:47 -0800889 cvm_oct_tx_shutdown_dev(dev);
David Daneyf8c26482010-02-15 12:13:17 -0800890 unregister_netdev(dev);
Vasiliy Kulikovc4711c32010-09-28 21:08:01 +0400891 free_netdev(dev);
David Daney80ff0fd2009-05-05 17:35:21 -0700892 cvm_oct_device[port] = NULL;
893 }
894 }
895
David Daneyf8c26482010-02-15 12:13:17 -0800896 destroy_workqueue(cvm_oct_poll_queue);
897
David Daney80ff0fd2009-05-05 17:35:21 -0700898 cvmx_pko_shutdown();
David Daney80ff0fd2009-05-05 17:35:21 -0700899
900 cvmx_ipd_free_ptr();
901
902 /* Free the HW pools */
903 cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE,
904 num_packet_buffers);
905 cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE,
906 num_packet_buffers);
907 if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL)
908 cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL,
909 CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128);
David Daneydf9244c2012-07-05 18:12:40 +0200910 return 0;
David Daney80ff0fd2009-05-05 17:35:21 -0700911}
912
Fabian Frederick87794572015-03-16 20:59:08 +0100913static const struct of_device_id cvm_oct_match[] = {
David Daneydf9244c2012-07-05 18:12:40 +0200914 {
915 .compatible = "cavium,octeon-3860-pip",
916 },
917 {},
918};
919MODULE_DEVICE_TABLE(of, cvm_oct_match);
920
921static struct platform_driver cvm_oct_driver = {
922 .probe = cvm_oct_probe,
Bill Pemberton095d0bb2012-11-19 13:20:57 -0500923 .remove = cvm_oct_remove,
David Daneydf9244c2012-07-05 18:12:40 +0200924 .driver = {
David Daneydf9244c2012-07-05 18:12:40 +0200925 .name = KBUILD_MODNAME,
926 .of_match_table = cvm_oct_match,
927 },
928};
929
930module_platform_driver(cvm_oct_driver);
931
David Daney80ff0fd2009-05-05 17:35:21 -0700932MODULE_LICENSE("GPL");
933MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
934MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver.");