David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 1 | /********************************************************************** |
| 2 | * Author: Cavium Networks |
| 3 | * |
| 4 | * Contact: support@caviumnetworks.com |
| 5 | * This file is part of the OCTEON SDK |
| 6 | * |
| 7 | * Copyright (c) 2003-2007 Cavium Networks |
| 8 | * |
| 9 | * This file is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License, Version 2, as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This file is distributed in the hope that it will be useful, but |
| 14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty |
| 15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or |
| 16 | * NONINFRINGEMENT. See the GNU General Public License for more |
| 17 | * details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU General Public License |
| 20 | * along with this file; if not, write to the Free Software |
| 21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 22 | * or visit http://www.gnu.org/licenses/. |
| 23 | * |
| 24 | * This file may also be available under a different license from Cavium. |
| 25 | * Contact Cavium Networks for more information |
| 26 | **********************************************************************/ |
| 27 | #include <linux/kernel.h> |
| 28 | #include <linux/init.h> |
| 29 | #include <linux/module.h> |
| 30 | #include <linux/netdevice.h> |
| 31 | #include <linux/etherdevice.h> |
David Daney | f6ed1b3 | 2009-10-14 12:04:42 -0700 | [diff] [blame] | 32 | #include <linux/phy.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 33 | #include <linux/slab.h> |
Imre Kaloz | dc890df | 2012-04-19 12:27:27 +0200 | [diff] [blame] | 34 | #include <linux/interrupt.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 35 | |
| 36 | #include <net/dst.h> |
| 37 | |
| 38 | #include <asm/octeon/octeon.h> |
| 39 | |
| 40 | #include "ethernet-defines.h" |
David Daney | a620c16 | 2009-06-23 16:20:56 -0700 | [diff] [blame] | 41 | #include "octeon-ethernet.h" |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 42 | #include "ethernet-mem.h" |
| 43 | #include "ethernet-rx.h" |
| 44 | #include "ethernet-tx.h" |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 45 | #include "ethernet-mdio.h" |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 46 | #include "ethernet-util.h" |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 47 | |
David Daney | af86649 | 2011-11-22 14:47:00 +0000 | [diff] [blame] | 48 | #include <asm/octeon/cvmx-pip.h> |
| 49 | #include <asm/octeon/cvmx-pko.h> |
| 50 | #include <asm/octeon/cvmx-fau.h> |
| 51 | #include <asm/octeon/cvmx-ipd.h> |
| 52 | #include <asm/octeon/cvmx-helper.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 53 | |
David Daney | af86649 | 2011-11-22 14:47:00 +0000 | [diff] [blame] | 54 | #include <asm/octeon/cvmx-gmxx-defs.h> |
| 55 | #include <asm/octeon/cvmx-smix-defs.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 56 | |
| 57 | #if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \ |
| 58 | && CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS |
| 59 | int num_packet_buffers = CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS; |
| 60 | #else |
| 61 | int num_packet_buffers = 1024; |
| 62 | #endif |
| 63 | module_param(num_packet_buffers, int, 0444); |
| 64 | MODULE_PARM_DESC(num_packet_buffers, "\n" |
| 65 | "\tNumber of packet buffers to allocate and store in the\n" |
| 66 | "\tFPA. By default, 1024 packet buffers are used unless\n" |
| 67 | "\tCONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS is defined."); |
| 68 | |
| 69 | int pow_receive_group = 15; |
| 70 | module_param(pow_receive_group, int, 0444); |
| 71 | MODULE_PARM_DESC(pow_receive_group, "\n" |
| 72 | "\tPOW group to receive packets from. All ethernet hardware\n" |
| 73 | "\twill be configured to send incomming packets to this POW\n" |
| 74 | "\tgroup. Also any other software can submit packets to this\n" |
| 75 | "\tgroup for the kernel to process."); |
| 76 | |
| 77 | int pow_send_group = -1; |
| 78 | module_param(pow_send_group, int, 0644); |
| 79 | MODULE_PARM_DESC(pow_send_group, "\n" |
| 80 | "\tPOW group to send packets to other software on. This\n" |
| 81 | "\tcontrols the creation of the virtual device pow0.\n" |
| 82 | "\talways_use_pow also depends on this value."); |
| 83 | |
| 84 | int always_use_pow; |
| 85 | module_param(always_use_pow, int, 0444); |
| 86 | MODULE_PARM_DESC(always_use_pow, "\n" |
| 87 | "\tWhen set, always send to the pow group. This will cause\n" |
| 88 | "\tpackets sent to real ethernet devices to be sent to the\n" |
| 89 | "\tPOW group instead of the hardware. Unless some other\n" |
| 90 | "\tapplication changes the config, packets will still be\n" |
| 91 | "\treceived from the low level hardware. Use this option\n" |
| 92 | "\tto allow a CVMX app to intercept all packets from the\n" |
| 93 | "\tlinux kernel. You must specify pow_send_group along with\n" |
| 94 | "\tthis option."); |
| 95 | |
| 96 | char pow_send_list[128] = ""; |
| 97 | module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444); |
| 98 | MODULE_PARM_DESC(pow_send_list, "\n" |
| 99 | "\tComma separated list of ethernet devices that should use the\n" |
| 100 | "\tPOW for transmit instead of the actual ethernet hardware. This\n" |
| 101 | "\tis a per port version of always_use_pow. always_use_pow takes\n" |
| 102 | "\tprecedence over this list. For example, setting this to\n" |
| 103 | "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" |
| 104 | "\tusing the pow_send_group."); |
| 105 | |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 106 | int max_rx_cpus = -1; |
| 107 | module_param(max_rx_cpus, int, 0444); |
| 108 | MODULE_PARM_DESC(max_rx_cpus, "\n" |
| 109 | "\t\tThe maximum number of CPUs to use for packet reception.\n" |
| 110 | "\t\tUse -1 to use all available CPUs."); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 111 | |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 112 | int rx_napi_weight = 32; |
| 113 | module_param(rx_napi_weight, int, 0444); |
| 114 | MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter."); |
David Daney | 13c5939 | 2009-10-12 12:04:32 -0700 | [diff] [blame] | 115 | |
| 116 | /* |
| 117 | * The offset from mac_addr_base that should be used for the next port |
| 118 | * that is configured. By convention, if any mgmt ports exist on the |
| 119 | * chip, they get the first mac addresses, The ports controlled by |
| 120 | * this driver are numbered sequencially following any mgmt addresses |
| 121 | * that may exist. |
| 122 | */ |
| 123 | static unsigned int cvm_oct_mac_addr_offset; |
| 124 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 125 | /** |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 126 | * cvm_oct_poll_queue - Workqueue for polling operations. |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 127 | */ |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 128 | struct workqueue_struct *cvm_oct_poll_queue; |
| 129 | |
| 130 | /** |
| 131 | * cvm_oct_poll_queue_stopping - flag to indicate polling should stop. |
| 132 | * |
| 133 | * Set to one right before cvm_oct_poll_queue is destroyed. |
| 134 | */ |
| 135 | atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 136 | |
| 137 | /** |
| 138 | * Array of every ethernet device owned by this driver indexed by |
| 139 | * the ipd input port number. |
| 140 | */ |
| 141 | struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; |
| 142 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 143 | u64 cvm_oct_tx_poll_interval; |
| 144 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 145 | static void cvm_oct_rx_refill_worker(struct work_struct *work); |
| 146 | static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker); |
| 147 | |
| 148 | static void cvm_oct_rx_refill_worker(struct work_struct *work) |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 149 | { |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 150 | /* |
| 151 | * FPA 0 may have been drained, try to refill it if we need |
| 152 | * more than num_packet_buffers / 2, otherwise normal receive |
| 153 | * processing will refill it. If it were drained, no packets |
| 154 | * could be received so cvm_oct_napi_poll would never be |
| 155 | * invoked to do the refill. |
| 156 | */ |
| 157 | cvm_oct_rx_refill_pool(num_packet_buffers / 2); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 158 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 159 | if (!atomic_read(&cvm_oct_poll_queue_stopping)) |
| 160 | queue_delayed_work(cvm_oct_poll_queue, |
| 161 | &cvm_oct_rx_refill_work, HZ); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 162 | } |
| 163 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 164 | static void cvm_oct_periodic_worker(struct work_struct *work) |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 165 | { |
| 166 | struct octeon_ethernet *priv = container_of(work, |
| 167 | struct octeon_ethernet, |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 168 | port_periodic_work.work); |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 169 | |
| 170 | if (priv->poll) |
| 171 | priv->poll(cvm_oct_device[priv->port]); |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 172 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 173 | cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]); |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 174 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 175 | if (!atomic_read(&cvm_oct_poll_queue_stopping)) |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 176 | queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ); |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 177 | } |
| 178 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 179 | static __init void cvm_oct_configure_common_hw(void) |
| 180 | { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 181 | /* Setup the FPA */ |
| 182 | cvmx_fpa_enable(); |
| 183 | cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, |
| 184 | num_packet_buffers); |
| 185 | cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, |
| 186 | num_packet_buffers); |
| 187 | if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) |
| 188 | cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, |
| 189 | CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); |
| 190 | |
| 191 | if (USE_RED) |
| 192 | cvmx_helper_setup_red(num_packet_buffers / 4, |
| 193 | num_packet_buffers / 8); |
| 194 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 198 | * cvm_oct_free_work- Free a work queue entry |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 199 | * |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 200 | * @work_queue_entry: Work queue entry to free |
| 201 | * |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 202 | * Returns Zero on success, Negative on failure. |
| 203 | */ |
| 204 | int cvm_oct_free_work(void *work_queue_entry) |
| 205 | { |
| 206 | cvmx_wqe_t *work = work_queue_entry; |
| 207 | |
| 208 | int segments = work->word2.s.bufs; |
| 209 | union cvmx_buf_ptr segment_ptr = work->packet_ptr; |
| 210 | |
| 211 | while (segments--) { |
| 212 | union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *) |
| 213 | cvmx_phys_to_ptr(segment_ptr.s.addr - 8); |
| 214 | if (unlikely(!segment_ptr.s.i)) |
| 215 | cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr), |
| 216 | segment_ptr.s.pool, |
| 217 | DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE / |
| 218 | 128)); |
| 219 | segment_ptr = next_ptr; |
| 220 | } |
| 221 | cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); |
| 222 | |
| 223 | return 0; |
| 224 | } |
| 225 | EXPORT_SYMBOL(cvm_oct_free_work); |
| 226 | |
| 227 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 228 | * cvm_oct_common_get_stats - get the low level ethernet statistics |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 229 | * @dev: Device to get the statistics from |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 230 | * |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 231 | * Returns Pointer to the statistics |
| 232 | */ |
| 233 | static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) |
| 234 | { |
| 235 | cvmx_pip_port_status_t rx_status; |
| 236 | cvmx_pko_port_status_t tx_status; |
| 237 | struct octeon_ethernet *priv = netdev_priv(dev); |
| 238 | |
| 239 | if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { |
| 240 | if (octeon_is_simulation()) { |
| 241 | /* The simulator doesn't support statistics */ |
| 242 | memset(&rx_status, 0, sizeof(rx_status)); |
| 243 | memset(&tx_status, 0, sizeof(tx_status)); |
| 244 | } else { |
| 245 | cvmx_pip_get_port_status(priv->port, 1, &rx_status); |
| 246 | cvmx_pko_get_port_status(priv->port, 1, &tx_status); |
| 247 | } |
| 248 | |
| 249 | priv->stats.rx_packets += rx_status.inb_packets; |
| 250 | priv->stats.tx_packets += tx_status.packets; |
| 251 | priv->stats.rx_bytes += rx_status.inb_octets; |
| 252 | priv->stats.tx_bytes += tx_status.octets; |
| 253 | priv->stats.multicast += rx_status.multicast_packets; |
| 254 | priv->stats.rx_crc_errors += rx_status.inb_errors; |
| 255 | priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; |
| 256 | |
| 257 | /* |
| 258 | * The drop counter must be incremented atomically |
| 259 | * since the RX tasklet also increments it. |
| 260 | */ |
| 261 | #ifdef CONFIG_64BIT |
| 262 | atomic64_add(rx_status.dropped_packets, |
| 263 | (atomic64_t *)&priv->stats.rx_dropped); |
| 264 | #else |
| 265 | atomic_add(rx_status.dropped_packets, |
| 266 | (atomic_t *)&priv->stats.rx_dropped); |
| 267 | #endif |
| 268 | } |
| 269 | |
| 270 | return &priv->stats; |
| 271 | } |
| 272 | |
| 273 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 274 | * cvm_oct_common_change_mtu - change the link MTU |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 275 | * @dev: Device to change |
| 276 | * @new_mtu: The new MTU |
| 277 | * |
| 278 | * Returns Zero on success |
| 279 | */ |
| 280 | static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) |
| 281 | { |
| 282 | struct octeon_ethernet *priv = netdev_priv(dev); |
| 283 | int interface = INTERFACE(priv->port); |
| 284 | int index = INDEX(priv->port); |
| 285 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
| 286 | int vlan_bytes = 4; |
| 287 | #else |
| 288 | int vlan_bytes = 0; |
| 289 | #endif |
| 290 | |
| 291 | /* |
| 292 | * Limit the MTU to make sure the ethernet packets are between |
| 293 | * 64 bytes and 65535 bytes. |
| 294 | */ |
| 295 | if ((new_mtu + 14 + 4 + vlan_bytes < 64) |
| 296 | || (new_mtu + 14 + 4 + vlan_bytes > 65392)) { |
| 297 | pr_err("MTU must be between %d and %d.\n", |
| 298 | 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes); |
| 299 | return -EINVAL; |
| 300 | } |
| 301 | dev->mtu = new_mtu; |
| 302 | |
| 303 | if ((interface < 2) |
| 304 | && (cvmx_helper_interface_get_mode(interface) != |
| 305 | CVMX_HELPER_INTERFACE_MODE_SPI)) { |
| 306 | /* Add ethernet header and FCS, and VLAN if configured. */ |
| 307 | int max_packet = new_mtu + 14 + 4 + vlan_bytes; |
| 308 | |
| 309 | if (OCTEON_IS_MODEL(OCTEON_CN3XXX) |
| 310 | || OCTEON_IS_MODEL(OCTEON_CN58XX)) { |
| 311 | /* Signal errors on packets larger than the MTU */ |
| 312 | cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), |
| 313 | max_packet); |
| 314 | } else { |
| 315 | /* |
| 316 | * Set the hardware to truncate packets larger |
| 317 | * than the MTU and smaller the 64 bytes. |
| 318 | */ |
| 319 | union cvmx_pip_frm_len_chkx frm_len_chk; |
| 320 | frm_len_chk.u64 = 0; |
| 321 | frm_len_chk.s.minlen = 64; |
| 322 | frm_len_chk.s.maxlen = max_packet; |
| 323 | cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), |
| 324 | frm_len_chk.u64); |
| 325 | } |
| 326 | /* |
| 327 | * Set the hardware to truncate packets larger than |
| 328 | * the MTU. The jabber register must be set to a |
| 329 | * multiple of 8 bytes, so round up. |
| 330 | */ |
| 331 | cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), |
| 332 | (max_packet + 7) & ~7u); |
| 333 | } |
| 334 | return 0; |
| 335 | } |
| 336 | |
| 337 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 338 | * cvm_oct_common_set_multicast_list - set the multicast list |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 339 | * @dev: Device to work on |
| 340 | */ |
| 341 | static void cvm_oct_common_set_multicast_list(struct net_device *dev) |
| 342 | { |
| 343 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
| 344 | struct octeon_ethernet *priv = netdev_priv(dev); |
| 345 | int interface = INTERFACE(priv->port); |
| 346 | int index = INDEX(priv->port); |
| 347 | |
| 348 | if ((interface < 2) |
| 349 | && (cvmx_helper_interface_get_mode(interface) != |
| 350 | CVMX_HELPER_INTERFACE_MODE_SPI)) { |
| 351 | union cvmx_gmxx_rxx_adr_ctl control; |
| 352 | control.u64 = 0; |
| 353 | control.s.bcst = 1; /* Allow broadcast MAC addresses */ |
| 354 | |
Jiri Pirko | d590794 | 2010-02-18 05:10:14 +0000 | [diff] [blame] | 355 | if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) || |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 356 | (dev->flags & IFF_PROMISC)) |
| 357 | /* Force accept multicast packets */ |
| 358 | control.s.mcst = 2; |
| 359 | else |
| 360 | /* Force reject multicat packets */ |
| 361 | control.s.mcst = 1; |
| 362 | |
| 363 | if (dev->flags & IFF_PROMISC) |
| 364 | /* |
| 365 | * Reject matches if promisc. Since CAM is |
| 366 | * shut off, should accept everything. |
| 367 | */ |
| 368 | control.s.cam_mode = 0; |
| 369 | else |
| 370 | /* Filter packets based on the CAM */ |
| 371 | control.s.cam_mode = 1; |
| 372 | |
| 373 | gmx_cfg.u64 = |
| 374 | cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); |
| 375 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
| 376 | gmx_cfg.u64 & ~1ull); |
| 377 | |
| 378 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), |
| 379 | control.u64); |
| 380 | if (dev->flags & IFF_PROMISC) |
| 381 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN |
| 382 | (index, interface), 0); |
| 383 | else |
| 384 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN |
| 385 | (index, interface), 1); |
| 386 | |
| 387 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
| 388 | gmx_cfg.u64); |
| 389 | } |
| 390 | } |
| 391 | |
| 392 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 393 | * cvm_oct_common_set_mac_address - set the hardware MAC address for a device |
| 394 | * @dev: The device in question. |
| 395 | * @addr: Address structure to change it too. |
| 396 | |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 397 | * Returns Zero on success |
| 398 | */ |
| 399 | static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) |
| 400 | { |
| 401 | struct octeon_ethernet *priv = netdev_priv(dev); |
| 402 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
| 403 | int interface = INTERFACE(priv->port); |
| 404 | int index = INDEX(priv->port); |
| 405 | |
| 406 | memcpy(dev->dev_addr, addr + 2, 6); |
| 407 | |
| 408 | if ((interface < 2) |
| 409 | && (cvmx_helper_interface_get_mode(interface) != |
| 410 | CVMX_HELPER_INTERFACE_MODE_SPI)) { |
| 411 | int i; |
| 412 | uint8_t *ptr = addr; |
| 413 | uint64_t mac = 0; |
| 414 | for (i = 0; i < 6; i++) |
| 415 | mac = (mac << 8) | (uint64_t) (ptr[i + 2]); |
| 416 | |
| 417 | gmx_cfg.u64 = |
| 418 | cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); |
| 419 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
| 420 | gmx_cfg.u64 & ~1ull); |
| 421 | |
| 422 | cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); |
| 423 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), |
| 424 | ptr[2]); |
| 425 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), |
| 426 | ptr[3]); |
| 427 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), |
| 428 | ptr[4]); |
| 429 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), |
| 430 | ptr[5]); |
| 431 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), |
| 432 | ptr[6]); |
| 433 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), |
| 434 | ptr[7]); |
| 435 | cvm_oct_common_set_multicast_list(dev); |
| 436 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
| 437 | gmx_cfg.u64); |
| 438 | } |
| 439 | return 0; |
| 440 | } |
| 441 | |
| 442 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 443 | * cvm_oct_common_init - per network device initialization |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 444 | * @dev: Device to initialize |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 445 | * |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 446 | * Returns Zero on success |
| 447 | */ |
| 448 | int cvm_oct_common_init(struct net_device *dev) |
| 449 | { |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 450 | struct octeon_ethernet *priv = netdev_priv(dev); |
David Daney | 13c5939 | 2009-10-12 12:04:32 -0700 | [diff] [blame] | 451 | struct sockaddr sa; |
| 452 | u64 mac = ((u64)(octeon_bootinfo->mac_addr_base[0] & 0xff) << 40) | |
| 453 | ((u64)(octeon_bootinfo->mac_addr_base[1] & 0xff) << 32) | |
| 454 | ((u64)(octeon_bootinfo->mac_addr_base[2] & 0xff) << 24) | |
| 455 | ((u64)(octeon_bootinfo->mac_addr_base[3] & 0xff) << 16) | |
| 456 | ((u64)(octeon_bootinfo->mac_addr_base[4] & 0xff) << 8) | |
| 457 | (u64)(octeon_bootinfo->mac_addr_base[5] & 0xff); |
| 458 | |
| 459 | mac += cvm_oct_mac_addr_offset; |
| 460 | sa.sa_data[0] = (mac >> 40) & 0xff; |
| 461 | sa.sa_data[1] = (mac >> 32) & 0xff; |
| 462 | sa.sa_data[2] = (mac >> 24) & 0xff; |
| 463 | sa.sa_data[3] = (mac >> 16) & 0xff; |
| 464 | sa.sa_data[4] = (mac >> 8) & 0xff; |
| 465 | sa.sa_data[5] = mac & 0xff; |
| 466 | |
| 467 | if (cvm_oct_mac_addr_offset >= octeon_bootinfo->mac_addr_count) |
| 468 | printk(KERN_DEBUG "%s: Using MAC outside of the assigned range:" |
Andy Shevchenko | 369ae8b | 2010-09-17 11:24:46 +0300 | [diff] [blame] | 469 | " %pM\n", dev->name, sa.sa_data); |
David Daney | 13c5939 | 2009-10-12 12:04:32 -0700 | [diff] [blame] | 470 | cvm_oct_mac_addr_offset++; |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 471 | |
| 472 | /* |
| 473 | * Force the interface to use the POW send if always_use_pow |
| 474 | * was specified or it is in the pow send list. |
| 475 | */ |
| 476 | if ((pow_send_group != -1) |
| 477 | && (always_use_pow || strstr(pow_send_list, dev->name))) |
| 478 | priv->queue = -1; |
| 479 | |
David Daney | 924cc26 | 2010-01-07 11:05:05 -0800 | [diff] [blame] | 480 | if (priv->queue != -1) { |
| 481 | dev->features |= NETIF_F_SG; |
| 482 | if (USE_HW_TCPUDP_CHECKSUM) |
| 483 | dev->features |= NETIF_F_IP_CSUM; |
| 484 | } |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 485 | |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 486 | /* We do our own locking, Linux doesn't need to */ |
| 487 | dev->features |= NETIF_F_LLTX; |
| 488 | SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops); |
| 489 | |
David Daney | f6ed1b3 | 2009-10-14 12:04:42 -0700 | [diff] [blame] | 490 | cvm_oct_phy_setup_device(dev); |
David Daney | 13c5939 | 2009-10-12 12:04:32 -0700 | [diff] [blame] | 491 | dev->netdev_ops->ndo_set_mac_address(dev, &sa); |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 492 | dev->netdev_ops->ndo_change_mtu(dev, dev->mtu); |
| 493 | |
| 494 | /* |
| 495 | * Zero out stats for port so we won't mistakenly show |
| 496 | * counters from the bootloader. |
| 497 | */ |
| 498 | memset(dev->netdev_ops->ndo_get_stats(dev), 0, |
| 499 | sizeof(struct net_device_stats)); |
| 500 | |
| 501 | return 0; |
| 502 | } |
| 503 | |
| 504 | void cvm_oct_common_uninit(struct net_device *dev) |
| 505 | { |
David Daney | f6ed1b3 | 2009-10-14 12:04:42 -0700 | [diff] [blame] | 506 | struct octeon_ethernet *priv = netdev_priv(dev); |
| 507 | |
| 508 | if (priv->phydev) |
| 509 | phy_disconnect(priv->phydev); |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 510 | } |
| 511 | |
| 512 | static const struct net_device_ops cvm_oct_npi_netdev_ops = { |
| 513 | .ndo_init = cvm_oct_common_init, |
| 514 | .ndo_uninit = cvm_oct_common_uninit, |
| 515 | .ndo_start_xmit = cvm_oct_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 516 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 517 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 518 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 519 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 520 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 521 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 522 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 523 | #endif |
| 524 | }; |
| 525 | static const struct net_device_ops cvm_oct_xaui_netdev_ops = { |
| 526 | .ndo_init = cvm_oct_xaui_init, |
| 527 | .ndo_uninit = cvm_oct_xaui_uninit, |
| 528 | .ndo_open = cvm_oct_xaui_open, |
| 529 | .ndo_stop = cvm_oct_xaui_stop, |
| 530 | .ndo_start_xmit = cvm_oct_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 531 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 532 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 533 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 534 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 535 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 536 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 537 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 538 | #endif |
| 539 | }; |
| 540 | static const struct net_device_ops cvm_oct_sgmii_netdev_ops = { |
| 541 | .ndo_init = cvm_oct_sgmii_init, |
| 542 | .ndo_uninit = cvm_oct_sgmii_uninit, |
| 543 | .ndo_open = cvm_oct_sgmii_open, |
| 544 | .ndo_stop = cvm_oct_sgmii_stop, |
| 545 | .ndo_start_xmit = cvm_oct_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 546 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 547 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 548 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 549 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 550 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 551 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 552 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 553 | #endif |
| 554 | }; |
| 555 | static const struct net_device_ops cvm_oct_spi_netdev_ops = { |
| 556 | .ndo_init = cvm_oct_spi_init, |
| 557 | .ndo_uninit = cvm_oct_spi_uninit, |
| 558 | .ndo_start_xmit = cvm_oct_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 559 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 560 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 561 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 562 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 563 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 564 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 565 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 566 | #endif |
| 567 | }; |
| 568 | static const struct net_device_ops cvm_oct_rgmii_netdev_ops = { |
| 569 | .ndo_init = cvm_oct_rgmii_init, |
| 570 | .ndo_uninit = cvm_oct_rgmii_uninit, |
| 571 | .ndo_open = cvm_oct_rgmii_open, |
| 572 | .ndo_stop = cvm_oct_rgmii_stop, |
| 573 | .ndo_start_xmit = cvm_oct_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 574 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 575 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 576 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 577 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 578 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 579 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 580 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 581 | #endif |
| 582 | }; |
| 583 | static const struct net_device_ops cvm_oct_pow_netdev_ops = { |
| 584 | .ndo_init = cvm_oct_common_init, |
| 585 | .ndo_start_xmit = cvm_oct_xmit_pow, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 586 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 587 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 588 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 589 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 590 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 591 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 592 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 593 | #endif |
| 594 | }; |
| 595 | |
David Daney | f6ed1b3 | 2009-10-14 12:04:42 -0700 | [diff] [blame] | 596 | extern void octeon_mdiobus_force_mod_depencency(void); |
| 597 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 598 | static int __init cvm_oct_init_module(void) |
| 599 | { |
| 600 | int num_interfaces; |
| 601 | int interface; |
| 602 | int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; |
| 603 | int qos; |
| 604 | |
David Daney | f6ed1b3 | 2009-10-14 12:04:42 -0700 | [diff] [blame] | 605 | octeon_mdiobus_force_mod_depencency(); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 606 | pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION); |
| 607 | |
David Daney | 13c5939 | 2009-10-12 12:04:32 -0700 | [diff] [blame] | 608 | if (OCTEON_IS_MODEL(OCTEON_CN52XX)) |
| 609 | cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */ |
| 610 | else if (OCTEON_IS_MODEL(OCTEON_CN56XX)) |
| 611 | cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */ |
| 612 | else |
| 613 | cvm_oct_mac_addr_offset = 0; |
| 614 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 615 | cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet"); |
| 616 | if (cvm_oct_poll_queue == NULL) { |
| 617 | pr_err("octeon-ethernet: Cannot create workqueue"); |
| 618 | return -ENOMEM; |
| 619 | } |
| 620 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 621 | cvm_oct_configure_common_hw(); |
| 622 | |
| 623 | cvmx_helper_initialize_packet_io_global(); |
| 624 | |
| 625 | /* Change the input group for all ports before input is enabled */ |
| 626 | num_interfaces = cvmx_helper_get_number_of_interfaces(); |
| 627 | for (interface = 0; interface < num_interfaces; interface++) { |
| 628 | int num_ports = cvmx_helper_ports_on_interface(interface); |
| 629 | int port; |
| 630 | |
| 631 | for (port = cvmx_helper_get_ipd_port(interface, 0); |
| 632 | port < cvmx_helper_get_ipd_port(interface, num_ports); |
| 633 | port++) { |
| 634 | union cvmx_pip_prt_tagx pip_prt_tagx; |
| 635 | pip_prt_tagx.u64 = |
| 636 | cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); |
| 637 | pip_prt_tagx.s.grp = pow_receive_group; |
| 638 | cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), |
| 639 | pip_prt_tagx.u64); |
| 640 | } |
| 641 | } |
| 642 | |
| 643 | cvmx_helper_ipd_and_packet_input_enable(); |
| 644 | |
| 645 | memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); |
| 646 | |
| 647 | /* |
| 648 | * Initialize the FAU used for counting packet buffers that |
| 649 | * need to be freed. |
| 650 | */ |
| 651 | cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); |
| 652 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 653 | /* Initialize the FAU used for counting tx SKBs that need to be freed */ |
| 654 | cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0); |
| 655 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 656 | if ((pow_send_group != -1)) { |
| 657 | struct net_device *dev; |
| 658 | pr_info("\tConfiguring device for POW only access\n"); |
| 659 | dev = alloc_etherdev(sizeof(struct octeon_ethernet)); |
| 660 | if (dev) { |
| 661 | /* Initialize the device private structure. */ |
| 662 | struct octeon_ethernet *priv = netdev_priv(dev); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 663 | |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 664 | dev->netdev_ops = &cvm_oct_pow_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 665 | priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; |
| 666 | priv->port = CVMX_PIP_NUM_INPUT_PORTS; |
| 667 | priv->queue = -1; |
| 668 | strcpy(dev->name, "pow%d"); |
| 669 | for (qos = 0; qos < 16; qos++) |
| 670 | skb_queue_head_init(&priv->tx_free_list[qos]); |
| 671 | |
| 672 | if (register_netdev(dev) < 0) { |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 673 | pr_err("Failed to register ethernet device for POW\n"); |
Vasiliy Kulikov | c4711c3 | 2010-09-28 21:08:01 +0400 | [diff] [blame] | 674 | free_netdev(dev); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 675 | } else { |
| 676 | cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 677 | pr_info("%s: POW send group %d, receive group %d\n", |
| 678 | dev->name, pow_send_group, |
| 679 | pow_receive_group); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 680 | } |
| 681 | } else { |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 682 | pr_err("Failed to allocate ethernet device for POW\n"); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 683 | } |
| 684 | } |
| 685 | |
| 686 | num_interfaces = cvmx_helper_get_number_of_interfaces(); |
| 687 | for (interface = 0; interface < num_interfaces; interface++) { |
| 688 | cvmx_helper_interface_mode_t imode = |
| 689 | cvmx_helper_interface_get_mode(interface); |
| 690 | int num_ports = cvmx_helper_ports_on_interface(interface); |
| 691 | int port; |
| 692 | |
| 693 | for (port = cvmx_helper_get_ipd_port(interface, 0); |
| 694 | port < cvmx_helper_get_ipd_port(interface, num_ports); |
| 695 | port++) { |
| 696 | struct octeon_ethernet *priv; |
| 697 | struct net_device *dev = |
| 698 | alloc_etherdev(sizeof(struct octeon_ethernet)); |
| 699 | if (!dev) { |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 700 | pr_err("Failed to allocate ethernet device for port %d\n", port); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 701 | continue; |
| 702 | } |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 703 | |
| 704 | /* Initialize the device private structure. */ |
| 705 | priv = netdev_priv(dev); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 706 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 707 | INIT_DELAYED_WORK(&priv->port_periodic_work, |
| 708 | cvm_oct_periodic_worker); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 709 | priv->imode = imode; |
| 710 | priv->port = port; |
| 711 | priv->queue = cvmx_pko_get_base_queue(priv->port); |
| 712 | priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; |
| 713 | for (qos = 0; qos < 16; qos++) |
| 714 | skb_queue_head_init(&priv->tx_free_list[qos]); |
| 715 | for (qos = 0; qos < cvmx_pko_get_num_queues(port); |
| 716 | qos++) |
| 717 | cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); |
| 718 | |
| 719 | switch (priv->imode) { |
| 720 | |
| 721 | /* These types don't support ports to IPD/PKO */ |
| 722 | case CVMX_HELPER_INTERFACE_MODE_DISABLED: |
| 723 | case CVMX_HELPER_INTERFACE_MODE_PCIE: |
| 724 | case CVMX_HELPER_INTERFACE_MODE_PICMG: |
| 725 | break; |
| 726 | |
| 727 | case CVMX_HELPER_INTERFACE_MODE_NPI: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 728 | dev->netdev_ops = &cvm_oct_npi_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 729 | strcpy(dev->name, "npi%d"); |
| 730 | break; |
| 731 | |
| 732 | case CVMX_HELPER_INTERFACE_MODE_XAUI: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 733 | dev->netdev_ops = &cvm_oct_xaui_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 734 | strcpy(dev->name, "xaui%d"); |
| 735 | break; |
| 736 | |
| 737 | case CVMX_HELPER_INTERFACE_MODE_LOOP: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 738 | dev->netdev_ops = &cvm_oct_npi_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 739 | strcpy(dev->name, "loop%d"); |
| 740 | break; |
| 741 | |
| 742 | case CVMX_HELPER_INTERFACE_MODE_SGMII: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 743 | dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 744 | strcpy(dev->name, "eth%d"); |
| 745 | break; |
| 746 | |
| 747 | case CVMX_HELPER_INTERFACE_MODE_SPI: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 748 | dev->netdev_ops = &cvm_oct_spi_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 749 | strcpy(dev->name, "spi%d"); |
| 750 | break; |
| 751 | |
| 752 | case CVMX_HELPER_INTERFACE_MODE_RGMII: |
| 753 | case CVMX_HELPER_INTERFACE_MODE_GMII: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 754 | dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 755 | strcpy(dev->name, "eth%d"); |
| 756 | break; |
| 757 | } |
| 758 | |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 759 | if (!dev->netdev_ops) { |
Vasiliy Kulikov | c4711c3 | 2010-09-28 21:08:01 +0400 | [diff] [blame] | 760 | free_netdev(dev); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 761 | } else if (register_netdev(dev) < 0) { |
| 762 | pr_err("Failed to register ethernet device " |
| 763 | "for interface %d, port %d\n", |
| 764 | interface, priv->port); |
Vasiliy Kulikov | c4711c3 | 2010-09-28 21:08:01 +0400 | [diff] [blame] | 765 | free_netdev(dev); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 766 | } else { |
| 767 | cvm_oct_device[priv->port] = dev; |
| 768 | fau -= |
| 769 | cvmx_pko_get_num_queues(priv->port) * |
| 770 | sizeof(uint32_t); |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 771 | queue_delayed_work(cvm_oct_poll_queue, |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 772 | &priv->port_periodic_work, HZ); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 773 | } |
| 774 | } |
| 775 | } |
| 776 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 777 | cvm_oct_tx_initialize(); |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 778 | cvm_oct_rx_initialize(); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 779 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 780 | /* |
| 781 | * 150 uS: about 10 1500-byte packtes at 1GE. |
| 782 | */ |
| 783 | cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 784 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 785 | queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 786 | |
| 787 | return 0; |
| 788 | } |
| 789 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 790 | static void __exit cvm_oct_cleanup_module(void) |
| 791 | { |
| 792 | int port; |
| 793 | |
| 794 | /* Disable POW interrupt */ |
| 795 | cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0); |
| 796 | |
| 797 | cvmx_ipd_disable(); |
| 798 | |
| 799 | /* Free the interrupt handler */ |
| 800 | free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device); |
| 801 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 802 | atomic_inc_return(&cvm_oct_poll_queue_stopping); |
| 803 | cancel_delayed_work_sync(&cvm_oct_rx_refill_work); |
| 804 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 805 | cvm_oct_rx_shutdown(); |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 806 | cvm_oct_tx_shutdown(); |
| 807 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 808 | cvmx_pko_disable(); |
| 809 | |
| 810 | /* Free the ethernet devices */ |
| 811 | for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { |
| 812 | if (cvm_oct_device[port]) { |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 813 | struct net_device *dev = cvm_oct_device[port]; |
| 814 | struct octeon_ethernet *priv = netdev_priv(dev); |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 815 | cancel_delayed_work_sync(&priv->port_periodic_work); |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 816 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 817 | cvm_oct_tx_shutdown_dev(dev); |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 818 | unregister_netdev(dev); |
Vasiliy Kulikov | c4711c3 | 2010-09-28 21:08:01 +0400 | [diff] [blame] | 819 | free_netdev(dev); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 820 | cvm_oct_device[port] = NULL; |
| 821 | } |
| 822 | } |
| 823 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 824 | destroy_workqueue(cvm_oct_poll_queue); |
| 825 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 826 | cvmx_pko_shutdown(); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 827 | |
| 828 | cvmx_ipd_free_ptr(); |
| 829 | |
| 830 | /* Free the HW pools */ |
| 831 | cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, |
| 832 | num_packet_buffers); |
| 833 | cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, |
| 834 | num_packet_buffers); |
| 835 | if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) |
| 836 | cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, |
| 837 | CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); |
| 838 | } |
| 839 | |
| 840 | MODULE_LICENSE("GPL"); |
| 841 | MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>"); |
| 842 | MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver."); |
| 843 | module_init(cvm_oct_init_module); |
| 844 | module_exit(cvm_oct_cleanup_module); |