David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 1 | /********************************************************************** |
| 2 | * Author: Cavium Networks |
| 3 | * |
| 4 | * Contact: support@caviumnetworks.com |
| 5 | * This file is part of the OCTEON SDK |
| 6 | * |
| 7 | * Copyright (c) 2003-2007 Cavium Networks |
| 8 | * |
| 9 | * This file is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License, Version 2, as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This file is distributed in the hope that it will be useful, but |
| 14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty |
| 15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or |
| 16 | * NONINFRINGEMENT. See the GNU General Public License for more |
| 17 | * details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU General Public License |
| 20 | * along with this file; if not, write to the Free Software |
| 21 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 22 | * or visit http://www.gnu.org/licenses/. |
| 23 | * |
| 24 | * This file may also be available under a different license from Cavium. |
| 25 | * Contact Cavium Networks for more information |
| 26 | **********************************************************************/ |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 27 | #include <linux/platform_device.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 28 | #include <linux/kernel.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 29 | #include <linux/module.h> |
| 30 | #include <linux/netdevice.h> |
| 31 | #include <linux/etherdevice.h> |
David Daney | f6ed1b3 | 2009-10-14 12:04:42 -0700 | [diff] [blame] | 32 | #include <linux/phy.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 33 | #include <linux/slab.h> |
Imre Kaloz | dc890df | 2012-04-19 12:27:27 +0200 | [diff] [blame] | 34 | #include <linux/interrupt.h> |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 35 | #include <linux/of_net.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 36 | |
| 37 | #include <net/dst.h> |
| 38 | |
| 39 | #include <asm/octeon/octeon.h> |
| 40 | |
| 41 | #include "ethernet-defines.h" |
David Daney | a620c16 | 2009-06-23 16:20:56 -0700 | [diff] [blame] | 42 | #include "octeon-ethernet.h" |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 43 | #include "ethernet-mem.h" |
| 44 | #include "ethernet-rx.h" |
| 45 | #include "ethernet-tx.h" |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 46 | #include "ethernet-mdio.h" |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 47 | #include "ethernet-util.h" |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 48 | |
David Daney | af86649 | 2011-11-22 14:47:00 +0000 | [diff] [blame] | 49 | #include <asm/octeon/cvmx-pip.h> |
| 50 | #include <asm/octeon/cvmx-pko.h> |
| 51 | #include <asm/octeon/cvmx-fau.h> |
| 52 | #include <asm/octeon/cvmx-ipd.h> |
| 53 | #include <asm/octeon/cvmx-helper.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 54 | |
David Daney | af86649 | 2011-11-22 14:47:00 +0000 | [diff] [blame] | 55 | #include <asm/octeon/cvmx-gmxx-defs.h> |
| 56 | #include <asm/octeon/cvmx-smix-defs.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 57 | |
Aaro Koskinen | 9041961 | 2014-03-02 00:09:06 +0200 | [diff] [blame] | 58 | static int num_packet_buffers = 1024; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 59 | module_param(num_packet_buffers, int, 0444); |
| 60 | MODULE_PARM_DESC(num_packet_buffers, "\n" |
| 61 | "\tNumber of packet buffers to allocate and store in the\n" |
Aaro Koskinen | 5ff8bebb | 2014-03-02 00:09:05 +0200 | [diff] [blame] | 62 | "\tFPA. By default, 1024 packet buffers are used.\n"); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 63 | |
| 64 | int pow_receive_group = 15; |
| 65 | module_param(pow_receive_group, int, 0444); |
| 66 | MODULE_PARM_DESC(pow_receive_group, "\n" |
| 67 | "\tPOW group to receive packets from. All ethernet hardware\n" |
Jorrit Schippers | d82603c | 2012-12-27 17:33:02 +0100 | [diff] [blame] | 68 | "\twill be configured to send incoming packets to this POW\n" |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 69 | "\tgroup. Also any other software can submit packets to this\n" |
| 70 | "\tgroup for the kernel to process."); |
| 71 | |
| 72 | int pow_send_group = -1; |
| 73 | module_param(pow_send_group, int, 0644); |
| 74 | MODULE_PARM_DESC(pow_send_group, "\n" |
| 75 | "\tPOW group to send packets to other software on. This\n" |
| 76 | "\tcontrols the creation of the virtual device pow0.\n" |
| 77 | "\talways_use_pow also depends on this value."); |
| 78 | |
| 79 | int always_use_pow; |
| 80 | module_param(always_use_pow, int, 0444); |
| 81 | MODULE_PARM_DESC(always_use_pow, "\n" |
| 82 | "\tWhen set, always send to the pow group. This will cause\n" |
| 83 | "\tpackets sent to real ethernet devices to be sent to the\n" |
| 84 | "\tPOW group instead of the hardware. Unless some other\n" |
| 85 | "\tapplication changes the config, packets will still be\n" |
| 86 | "\treceived from the low level hardware. Use this option\n" |
| 87 | "\tto allow a CVMX app to intercept all packets from the\n" |
| 88 | "\tlinux kernel. You must specify pow_send_group along with\n" |
| 89 | "\tthis option."); |
| 90 | |
| 91 | char pow_send_list[128] = ""; |
| 92 | module_param_string(pow_send_list, pow_send_list, sizeof(pow_send_list), 0444); |
| 93 | MODULE_PARM_DESC(pow_send_list, "\n" |
| 94 | "\tComma separated list of ethernet devices that should use the\n" |
| 95 | "\tPOW for transmit instead of the actual ethernet hardware. This\n" |
| 96 | "\tis a per port version of always_use_pow. always_use_pow takes\n" |
| 97 | "\tprecedence over this list. For example, setting this to\n" |
| 98 | "\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n" |
| 99 | "\tusing the pow_send_group."); |
| 100 | |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 101 | int max_rx_cpus = -1; |
| 102 | module_param(max_rx_cpus, int, 0444); |
| 103 | MODULE_PARM_DESC(max_rx_cpus, "\n" |
| 104 | "\t\tThe maximum number of CPUs to use for packet reception.\n" |
| 105 | "\t\tUse -1 to use all available CPUs."); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 106 | |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 107 | int rx_napi_weight = 32; |
| 108 | module_param(rx_napi_weight, int, 0444); |
| 109 | MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter."); |
David Daney | 13c5939 | 2009-10-12 12:04:32 -0700 | [diff] [blame] | 110 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 111 | /** |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 112 | * cvm_oct_poll_queue - Workqueue for polling operations. |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 113 | */ |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 114 | struct workqueue_struct *cvm_oct_poll_queue; |
| 115 | |
| 116 | /** |
| 117 | * cvm_oct_poll_queue_stopping - flag to indicate polling should stop. |
| 118 | * |
| 119 | * Set to one right before cvm_oct_poll_queue is destroyed. |
| 120 | */ |
| 121 | atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 122 | |
| 123 | /** |
| 124 | * Array of every ethernet device owned by this driver indexed by |
| 125 | * the ipd input port number. |
| 126 | */ |
| 127 | struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; |
| 128 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 129 | u64 cvm_oct_tx_poll_interval; |
| 130 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 131 | static void cvm_oct_rx_refill_worker(struct work_struct *work); |
| 132 | static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker); |
| 133 | |
| 134 | static void cvm_oct_rx_refill_worker(struct work_struct *work) |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 135 | { |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 136 | /* |
| 137 | * FPA 0 may have been drained, try to refill it if we need |
| 138 | * more than num_packet_buffers / 2, otherwise normal receive |
| 139 | * processing will refill it. If it were drained, no packets |
| 140 | * could be received so cvm_oct_napi_poll would never be |
| 141 | * invoked to do the refill. |
| 142 | */ |
| 143 | cvm_oct_rx_refill_pool(num_packet_buffers / 2); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 144 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 145 | if (!atomic_read(&cvm_oct_poll_queue_stopping)) |
| 146 | queue_delayed_work(cvm_oct_poll_queue, |
| 147 | &cvm_oct_rx_refill_work, HZ); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 148 | } |
| 149 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 150 | static void cvm_oct_periodic_worker(struct work_struct *work) |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 151 | { |
| 152 | struct octeon_ethernet *priv = container_of(work, |
| 153 | struct octeon_ethernet, |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 154 | port_periodic_work.work); |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 155 | |
| 156 | if (priv->poll) |
| 157 | priv->poll(cvm_oct_device[priv->port]); |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 158 | |
Nandini Hanumanthagowda | b186410 | 2013-10-13 20:19:49 +0530 | [diff] [blame] | 159 | cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats( |
| 160 | cvm_oct_device[priv->port]); |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 161 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 162 | if (!atomic_read(&cvm_oct_poll_queue_stopping)) |
Nandini Hanumanthagowda | b186410 | 2013-10-13 20:19:49 +0530 | [diff] [blame] | 163 | queue_delayed_work(cvm_oct_poll_queue, |
| 164 | &priv->port_periodic_work, HZ); |
Nandini Hanumanthagowda | 851ec8c | 2013-10-13 20:19:50 +0530 | [diff] [blame] | 165 | } |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 166 | |
Bill Pemberton | 4f24090 | 2012-11-19 13:22:06 -0500 | [diff] [blame] | 167 | static void cvm_oct_configure_common_hw(void) |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 168 | { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 169 | /* Setup the FPA */ |
| 170 | cvmx_fpa_enable(); |
| 171 | cvm_oct_mem_fill_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, |
| 172 | num_packet_buffers); |
| 173 | cvm_oct_mem_fill_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, |
| 174 | num_packet_buffers); |
| 175 | if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) |
| 176 | cvm_oct_mem_fill_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, |
| 177 | CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); |
| 178 | |
| 179 | if (USE_RED) |
| 180 | cvmx_helper_setup_red(num_packet_buffers / 4, |
| 181 | num_packet_buffers / 8); |
| 182 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 183 | } |
| 184 | |
| 185 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 186 | * cvm_oct_free_work- Free a work queue entry |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 187 | * |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 188 | * @work_queue_entry: Work queue entry to free |
| 189 | * |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 190 | * Returns Zero on success, Negative on failure. |
| 191 | */ |
| 192 | int cvm_oct_free_work(void *work_queue_entry) |
| 193 | { |
| 194 | cvmx_wqe_t *work = work_queue_entry; |
| 195 | |
| 196 | int segments = work->word2.s.bufs; |
| 197 | union cvmx_buf_ptr segment_ptr = work->packet_ptr; |
| 198 | |
| 199 | while (segments--) { |
| 200 | union cvmx_buf_ptr next_ptr = *(union cvmx_buf_ptr *) |
| 201 | cvmx_phys_to_ptr(segment_ptr.s.addr - 8); |
| 202 | if (unlikely(!segment_ptr.s.i)) |
| 203 | cvmx_fpa_free(cvm_oct_get_buffer_ptr(segment_ptr), |
| 204 | segment_ptr.s.pool, |
| 205 | DONT_WRITEBACK(CVMX_FPA_PACKET_POOL_SIZE / |
| 206 | 128)); |
| 207 | segment_ptr = next_ptr; |
| 208 | } |
| 209 | cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, DONT_WRITEBACK(1)); |
| 210 | |
| 211 | return 0; |
| 212 | } |
| 213 | EXPORT_SYMBOL(cvm_oct_free_work); |
| 214 | |
| 215 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 216 | * cvm_oct_common_get_stats - get the low level ethernet statistics |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 217 | * @dev: Device to get the statistics from |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 218 | * |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 219 | * Returns Pointer to the statistics |
| 220 | */ |
| 221 | static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev) |
| 222 | { |
| 223 | cvmx_pip_port_status_t rx_status; |
| 224 | cvmx_pko_port_status_t tx_status; |
| 225 | struct octeon_ethernet *priv = netdev_priv(dev); |
| 226 | |
| 227 | if (priv->port < CVMX_PIP_NUM_INPUT_PORTS) { |
| 228 | if (octeon_is_simulation()) { |
| 229 | /* The simulator doesn't support statistics */ |
| 230 | memset(&rx_status, 0, sizeof(rx_status)); |
| 231 | memset(&tx_status, 0, sizeof(tx_status)); |
| 232 | } else { |
| 233 | cvmx_pip_get_port_status(priv->port, 1, &rx_status); |
| 234 | cvmx_pko_get_port_status(priv->port, 1, &tx_status); |
| 235 | } |
| 236 | |
| 237 | priv->stats.rx_packets += rx_status.inb_packets; |
| 238 | priv->stats.tx_packets += tx_status.packets; |
| 239 | priv->stats.rx_bytes += rx_status.inb_octets; |
| 240 | priv->stats.tx_bytes += tx_status.octets; |
| 241 | priv->stats.multicast += rx_status.multicast_packets; |
| 242 | priv->stats.rx_crc_errors += rx_status.inb_errors; |
| 243 | priv->stats.rx_frame_errors += rx_status.fcs_align_err_packets; |
| 244 | |
| 245 | /* |
| 246 | * The drop counter must be incremented atomically |
| 247 | * since the RX tasklet also increments it. |
| 248 | */ |
| 249 | #ifdef CONFIG_64BIT |
| 250 | atomic64_add(rx_status.dropped_packets, |
| 251 | (atomic64_t *)&priv->stats.rx_dropped); |
| 252 | #else |
| 253 | atomic_add(rx_status.dropped_packets, |
| 254 | (atomic_t *)&priv->stats.rx_dropped); |
| 255 | #endif |
| 256 | } |
| 257 | |
| 258 | return &priv->stats; |
| 259 | } |
| 260 | |
| 261 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 262 | * cvm_oct_common_change_mtu - change the link MTU |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 263 | * @dev: Device to change |
| 264 | * @new_mtu: The new MTU |
| 265 | * |
| 266 | * Returns Zero on success |
| 267 | */ |
| 268 | static int cvm_oct_common_change_mtu(struct net_device *dev, int new_mtu) |
| 269 | { |
| 270 | struct octeon_ethernet *priv = netdev_priv(dev); |
| 271 | int interface = INTERFACE(priv->port); |
| 272 | int index = INDEX(priv->port); |
| 273 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
| 274 | int vlan_bytes = 4; |
| 275 | #else |
| 276 | int vlan_bytes = 0; |
| 277 | #endif |
| 278 | |
| 279 | /* |
| 280 | * Limit the MTU to make sure the ethernet packets are between |
| 281 | * 64 bytes and 65535 bytes. |
| 282 | */ |
| 283 | if ((new_mtu + 14 + 4 + vlan_bytes < 64) |
| 284 | || (new_mtu + 14 + 4 + vlan_bytes > 65392)) { |
| 285 | pr_err("MTU must be between %d and %d.\n", |
| 286 | 64 - 14 - 4 - vlan_bytes, 65392 - 14 - 4 - vlan_bytes); |
| 287 | return -EINVAL; |
| 288 | } |
| 289 | dev->mtu = new_mtu; |
| 290 | |
| 291 | if ((interface < 2) |
| 292 | && (cvmx_helper_interface_get_mode(interface) != |
| 293 | CVMX_HELPER_INTERFACE_MODE_SPI)) { |
| 294 | /* Add ethernet header and FCS, and VLAN if configured. */ |
| 295 | int max_packet = new_mtu + 14 + 4 + vlan_bytes; |
| 296 | |
| 297 | if (OCTEON_IS_MODEL(OCTEON_CN3XXX) |
| 298 | || OCTEON_IS_MODEL(OCTEON_CN58XX)) { |
| 299 | /* Signal errors on packets larger than the MTU */ |
| 300 | cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(index, interface), |
| 301 | max_packet); |
| 302 | } else { |
| 303 | /* |
| 304 | * Set the hardware to truncate packets larger |
| 305 | * than the MTU and smaller the 64 bytes. |
| 306 | */ |
| 307 | union cvmx_pip_frm_len_chkx frm_len_chk; |
| 308 | frm_len_chk.u64 = 0; |
| 309 | frm_len_chk.s.minlen = 64; |
| 310 | frm_len_chk.s.maxlen = max_packet; |
| 311 | cvmx_write_csr(CVMX_PIP_FRM_LEN_CHKX(interface), |
| 312 | frm_len_chk.u64); |
| 313 | } |
| 314 | /* |
| 315 | * Set the hardware to truncate packets larger than |
| 316 | * the MTU. The jabber register must be set to a |
| 317 | * multiple of 8 bytes, so round up. |
| 318 | */ |
| 319 | cvmx_write_csr(CVMX_GMXX_RXX_JABBER(index, interface), |
| 320 | (max_packet + 7) & ~7u); |
| 321 | } |
| 322 | return 0; |
| 323 | } |
| 324 | |
| 325 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 326 | * cvm_oct_common_set_multicast_list - set the multicast list |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 327 | * @dev: Device to work on |
| 328 | */ |
| 329 | static void cvm_oct_common_set_multicast_list(struct net_device *dev) |
| 330 | { |
| 331 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
| 332 | struct octeon_ethernet *priv = netdev_priv(dev); |
| 333 | int interface = INTERFACE(priv->port); |
| 334 | int index = INDEX(priv->port); |
| 335 | |
| 336 | if ((interface < 2) |
| 337 | && (cvmx_helper_interface_get_mode(interface) != |
| 338 | CVMX_HELPER_INTERFACE_MODE_SPI)) { |
| 339 | union cvmx_gmxx_rxx_adr_ctl control; |
| 340 | control.u64 = 0; |
| 341 | control.s.bcst = 1; /* Allow broadcast MAC addresses */ |
| 342 | |
Jiri Pirko | d590794 | 2010-02-18 05:10:14 +0000 | [diff] [blame] | 343 | if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) || |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 344 | (dev->flags & IFF_PROMISC)) |
| 345 | /* Force accept multicast packets */ |
| 346 | control.s.mcst = 2; |
| 347 | else |
Justin P. Mattock | 215c47c | 2012-03-26 21:34:18 -0700 | [diff] [blame] | 348 | /* Force reject multicast packets */ |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 349 | control.s.mcst = 1; |
| 350 | |
| 351 | if (dev->flags & IFF_PROMISC) |
| 352 | /* |
| 353 | * Reject matches if promisc. Since CAM is |
| 354 | * shut off, should accept everything. |
| 355 | */ |
| 356 | control.s.cam_mode = 0; |
| 357 | else |
| 358 | /* Filter packets based on the CAM */ |
| 359 | control.s.cam_mode = 1; |
| 360 | |
| 361 | gmx_cfg.u64 = |
| 362 | cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); |
| 363 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
| 364 | gmx_cfg.u64 & ~1ull); |
| 365 | |
| 366 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CTL(index, interface), |
| 367 | control.u64); |
| 368 | if (dev->flags & IFF_PROMISC) |
| 369 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN |
| 370 | (index, interface), 0); |
| 371 | else |
| 372 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM_EN |
| 373 | (index, interface), 1); |
| 374 | |
| 375 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
| 376 | gmx_cfg.u64); |
| 377 | } |
| 378 | } |
| 379 | |
| 380 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 381 | * cvm_oct_common_set_mac_address - set the hardware MAC address for a device |
| 382 | * @dev: The device in question. |
| 383 | * @addr: Address structure to change it too. |
| 384 | |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 385 | * Returns Zero on success |
| 386 | */ |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 387 | static int cvm_oct_set_mac_filter(struct net_device *dev) |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 388 | { |
| 389 | struct octeon_ethernet *priv = netdev_priv(dev); |
| 390 | union cvmx_gmxx_prtx_cfg gmx_cfg; |
| 391 | int interface = INTERFACE(priv->port); |
| 392 | int index = INDEX(priv->port); |
| 393 | |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 394 | if ((interface < 2) |
| 395 | && (cvmx_helper_interface_get_mode(interface) != |
| 396 | CVMX_HELPER_INTERFACE_MODE_SPI)) { |
| 397 | int i; |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 398 | uint8_t *ptr = dev->dev_addr; |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 399 | uint64_t mac = 0; |
| 400 | for (i = 0; i < 6; i++) |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 401 | mac = (mac << 8) | (uint64_t)ptr[i]; |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 402 | |
| 403 | gmx_cfg.u64 = |
| 404 | cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); |
| 405 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
| 406 | gmx_cfg.u64 & ~1ull); |
| 407 | |
| 408 | cvmx_write_csr(CVMX_GMXX_SMACX(index, interface), mac); |
| 409 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM0(index, interface), |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 410 | ptr[0]); |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 411 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM1(index, interface), |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 412 | ptr[1]); |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 413 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM2(index, interface), |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 414 | ptr[2]); |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 415 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM3(index, interface), |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 416 | ptr[3]); |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 417 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM4(index, interface), |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 418 | ptr[4]); |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 419 | cvmx_write_csr(CVMX_GMXX_RXX_ADR_CAM5(index, interface), |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 420 | ptr[5]); |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 421 | cvm_oct_common_set_multicast_list(dev); |
| 422 | cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), |
| 423 | gmx_cfg.u64); |
| 424 | } |
| 425 | return 0; |
| 426 | } |
| 427 | |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 428 | static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr) |
| 429 | { |
| 430 | int r = eth_mac_addr(dev, addr); |
| 431 | |
| 432 | if (r) |
| 433 | return r; |
| 434 | return cvm_oct_set_mac_filter(dev); |
| 435 | } |
| 436 | |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 437 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 438 | * cvm_oct_common_init - per network device initialization |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 439 | * @dev: Device to initialize |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 440 | * |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 441 | * Returns Zero on success |
| 442 | */ |
| 443 | int cvm_oct_common_init(struct net_device *dev) |
| 444 | { |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 445 | struct octeon_ethernet *priv = netdev_priv(dev); |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 446 | const u8 *mac = NULL; |
David Daney | 13c5939 | 2009-10-12 12:04:32 -0700 | [diff] [blame] | 447 | |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 448 | if (priv->of_node) |
| 449 | mac = of_get_mac_address(priv->of_node); |
David Daney | 13c5939 | 2009-10-12 12:04:32 -0700 | [diff] [blame] | 450 | |
Luka Perkov | 4d97845 | 2013-10-29 02:24:34 +0100 | [diff] [blame] | 451 | if (mac) |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 452 | memcpy(dev->dev_addr, mac, ETH_ALEN); |
Jiri Pirko | 15c6ff3 | 2013-01-01 03:30:17 +0000 | [diff] [blame] | 453 | else |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 454 | eth_hw_addr_random(dev); |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 455 | |
| 456 | /* |
| 457 | * Force the interface to use the POW send if always_use_pow |
| 458 | * was specified or it is in the pow send list. |
| 459 | */ |
| 460 | if ((pow_send_group != -1) |
| 461 | && (always_use_pow || strstr(pow_send_list, dev->name))) |
| 462 | priv->queue = -1; |
| 463 | |
David Daney | 924cc26 | 2010-01-07 11:05:05 -0800 | [diff] [blame] | 464 | if (priv->queue != -1) { |
| 465 | dev->features |= NETIF_F_SG; |
| 466 | if (USE_HW_TCPUDP_CHECKSUM) |
| 467 | dev->features |= NETIF_F_IP_CSUM; |
| 468 | } |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 469 | |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 470 | /* We do our own locking, Linux doesn't need to */ |
| 471 | dev->features |= NETIF_F_LLTX; |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 472 | dev->ethtool_ops = &cvm_oct_ethtool_ops; |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 473 | |
David Daney | f6ed1b3 | 2009-10-14 12:04:42 -0700 | [diff] [blame] | 474 | cvm_oct_phy_setup_device(dev); |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 475 | cvm_oct_set_mac_filter(dev); |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 476 | dev->netdev_ops->ndo_change_mtu(dev, dev->mtu); |
| 477 | |
| 478 | /* |
| 479 | * Zero out stats for port so we won't mistakenly show |
| 480 | * counters from the bootloader. |
| 481 | */ |
| 482 | memset(dev->netdev_ops->ndo_get_stats(dev), 0, |
| 483 | sizeof(struct net_device_stats)); |
| 484 | |
| 485 | return 0; |
| 486 | } |
| 487 | |
| 488 | void cvm_oct_common_uninit(struct net_device *dev) |
| 489 | { |
David Daney | f6ed1b3 | 2009-10-14 12:04:42 -0700 | [diff] [blame] | 490 | struct octeon_ethernet *priv = netdev_priv(dev); |
| 491 | |
| 492 | if (priv->phydev) |
| 493 | phy_disconnect(priv->phydev); |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 494 | } |
| 495 | |
| 496 | static const struct net_device_ops cvm_oct_npi_netdev_ops = { |
| 497 | .ndo_init = cvm_oct_common_init, |
| 498 | .ndo_uninit = cvm_oct_common_uninit, |
| 499 | .ndo_start_xmit = cvm_oct_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 500 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 501 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 502 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 503 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 504 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 505 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 506 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 507 | #endif |
| 508 | }; |
| 509 | static const struct net_device_ops cvm_oct_xaui_netdev_ops = { |
| 510 | .ndo_init = cvm_oct_xaui_init, |
| 511 | .ndo_uninit = cvm_oct_xaui_uninit, |
| 512 | .ndo_open = cvm_oct_xaui_open, |
| 513 | .ndo_stop = cvm_oct_xaui_stop, |
| 514 | .ndo_start_xmit = cvm_oct_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 515 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 516 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 517 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 518 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 519 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 520 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 521 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 522 | #endif |
| 523 | }; |
| 524 | static const struct net_device_ops cvm_oct_sgmii_netdev_ops = { |
| 525 | .ndo_init = cvm_oct_sgmii_init, |
| 526 | .ndo_uninit = cvm_oct_sgmii_uninit, |
| 527 | .ndo_open = cvm_oct_sgmii_open, |
| 528 | .ndo_stop = cvm_oct_sgmii_stop, |
| 529 | .ndo_start_xmit = cvm_oct_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 530 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 531 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 532 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 533 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 534 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 535 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 536 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 537 | #endif |
| 538 | }; |
| 539 | static const struct net_device_ops cvm_oct_spi_netdev_ops = { |
| 540 | .ndo_init = cvm_oct_spi_init, |
| 541 | .ndo_uninit = cvm_oct_spi_uninit, |
| 542 | .ndo_start_xmit = cvm_oct_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 543 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 544 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 545 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 546 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 547 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 548 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 549 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 550 | #endif |
| 551 | }; |
| 552 | static const struct net_device_ops cvm_oct_rgmii_netdev_ops = { |
| 553 | .ndo_init = cvm_oct_rgmii_init, |
| 554 | .ndo_uninit = cvm_oct_rgmii_uninit, |
| 555 | .ndo_open = cvm_oct_rgmii_open, |
| 556 | .ndo_stop = cvm_oct_rgmii_stop, |
| 557 | .ndo_start_xmit = cvm_oct_xmit, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 558 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 559 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 560 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 561 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 562 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 563 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 564 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 565 | #endif |
| 566 | }; |
| 567 | static const struct net_device_ops cvm_oct_pow_netdev_ops = { |
| 568 | .ndo_init = cvm_oct_common_init, |
| 569 | .ndo_start_xmit = cvm_oct_xmit_pow, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 570 | .ndo_set_rx_mode = cvm_oct_common_set_multicast_list, |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 571 | .ndo_set_mac_address = cvm_oct_common_set_mac_address, |
| 572 | .ndo_do_ioctl = cvm_oct_ioctl, |
| 573 | .ndo_change_mtu = cvm_oct_common_change_mtu, |
| 574 | .ndo_get_stats = cvm_oct_common_get_stats, |
| 575 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 576 | .ndo_poll_controller = cvm_oct_poll_controller, |
| 577 | #endif |
| 578 | }; |
| 579 | |
David Daney | f6ed1b3 | 2009-10-14 12:04:42 -0700 | [diff] [blame] | 580 | extern void octeon_mdiobus_force_mod_depencency(void); |
| 581 | |
Nandini Hanumanthagowda | b186410 | 2013-10-13 20:19:49 +0530 | [diff] [blame] | 582 | static struct device_node *cvm_oct_of_get_child( |
| 583 | const struct device_node *parent, int reg_val) |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 584 | { |
| 585 | struct device_node *node = NULL; |
| 586 | int size; |
| 587 | const __be32 *addr; |
| 588 | |
| 589 | for (;;) { |
| 590 | node = of_get_next_child(parent, node); |
| 591 | if (!node) |
| 592 | break; |
| 593 | addr = of_get_property(node, "reg", &size); |
| 594 | if (addr && (be32_to_cpu(*addr) == reg_val)) |
| 595 | break; |
| 596 | } |
| 597 | return node; |
| 598 | } |
| 599 | |
Bill Pemberton | 4f24090 | 2012-11-19 13:22:06 -0500 | [diff] [blame] | 600 | static struct device_node *cvm_oct_node_for_port(struct device_node *pip, |
Nandini Hanumanthagowda | b186410 | 2013-10-13 20:19:49 +0530 | [diff] [blame] | 601 | int interface, int port) |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 602 | { |
| 603 | struct device_node *ni, *np; |
| 604 | |
| 605 | ni = cvm_oct_of_get_child(pip, interface); |
| 606 | if (!ni) |
| 607 | return NULL; |
| 608 | |
| 609 | np = cvm_oct_of_get_child(ni, port); |
| 610 | of_node_put(ni); |
| 611 | |
| 612 | return np; |
| 613 | } |
| 614 | |
Bill Pemberton | 4f24090 | 2012-11-19 13:22:06 -0500 | [diff] [blame] | 615 | static int cvm_oct_probe(struct platform_device *pdev) |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 616 | { |
| 617 | int num_interfaces; |
| 618 | int interface; |
| 619 | int fau = FAU_NUM_PACKET_BUFFERS_TO_FREE; |
| 620 | int qos; |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 621 | struct device_node *pip; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 622 | |
David Daney | f6ed1b3 | 2009-10-14 12:04:42 -0700 | [diff] [blame] | 623 | octeon_mdiobus_force_mod_depencency(); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 624 | pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION); |
| 625 | |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 626 | pip = pdev->dev.of_node; |
| 627 | if (!pip) { |
| 628 | pr_err("Error: No 'pip' in /aliases\n"); |
| 629 | return -EINVAL; |
| 630 | } |
David Daney | 13c5939 | 2009-10-12 12:04:32 -0700 | [diff] [blame] | 631 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 632 | cvm_oct_poll_queue = create_singlethread_workqueue("octeon-ethernet"); |
| 633 | if (cvm_oct_poll_queue == NULL) { |
| 634 | pr_err("octeon-ethernet: Cannot create workqueue"); |
| 635 | return -ENOMEM; |
| 636 | } |
| 637 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 638 | cvm_oct_configure_common_hw(); |
| 639 | |
| 640 | cvmx_helper_initialize_packet_io_global(); |
| 641 | |
| 642 | /* Change the input group for all ports before input is enabled */ |
| 643 | num_interfaces = cvmx_helper_get_number_of_interfaces(); |
| 644 | for (interface = 0; interface < num_interfaces; interface++) { |
| 645 | int num_ports = cvmx_helper_ports_on_interface(interface); |
| 646 | int port; |
| 647 | |
| 648 | for (port = cvmx_helper_get_ipd_port(interface, 0); |
| 649 | port < cvmx_helper_get_ipd_port(interface, num_ports); |
| 650 | port++) { |
| 651 | union cvmx_pip_prt_tagx pip_prt_tagx; |
| 652 | pip_prt_tagx.u64 = |
| 653 | cvmx_read_csr(CVMX_PIP_PRT_TAGX(port)); |
| 654 | pip_prt_tagx.s.grp = pow_receive_group; |
| 655 | cvmx_write_csr(CVMX_PIP_PRT_TAGX(port), |
| 656 | pip_prt_tagx.u64); |
| 657 | } |
| 658 | } |
| 659 | |
| 660 | cvmx_helper_ipd_and_packet_input_enable(); |
| 661 | |
| 662 | memset(cvm_oct_device, 0, sizeof(cvm_oct_device)); |
| 663 | |
| 664 | /* |
| 665 | * Initialize the FAU used for counting packet buffers that |
| 666 | * need to be freed. |
| 667 | */ |
| 668 | cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); |
| 669 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 670 | /* Initialize the FAU used for counting tx SKBs that need to be freed */ |
| 671 | cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0); |
| 672 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 673 | if ((pow_send_group != -1)) { |
| 674 | struct net_device *dev; |
| 675 | pr_info("\tConfiguring device for POW only access\n"); |
| 676 | dev = alloc_etherdev(sizeof(struct octeon_ethernet)); |
| 677 | if (dev) { |
| 678 | /* Initialize the device private structure. */ |
| 679 | struct octeon_ethernet *priv = netdev_priv(dev); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 680 | |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 681 | dev->netdev_ops = &cvm_oct_pow_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 682 | priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; |
| 683 | priv->port = CVMX_PIP_NUM_INPUT_PORTS; |
| 684 | priv->queue = -1; |
| 685 | strcpy(dev->name, "pow%d"); |
| 686 | for (qos = 0; qos < 16; qos++) |
| 687 | skb_queue_head_init(&priv->tx_free_list[qos]); |
| 688 | |
| 689 | if (register_netdev(dev) < 0) { |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 690 | pr_err("Failed to register ethernet device for POW\n"); |
Vasiliy Kulikov | c4711c3 | 2010-09-28 21:08:01 +0400 | [diff] [blame] | 691 | free_netdev(dev); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 692 | } else { |
| 693 | cvm_oct_device[CVMX_PIP_NUM_INPUT_PORTS] = dev; |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 694 | pr_info("%s: POW send group %d, receive group %d\n", |
| 695 | dev->name, pow_send_group, |
| 696 | pow_receive_group); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 697 | } |
| 698 | } else { |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 699 | pr_err("Failed to allocate ethernet device for POW\n"); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 700 | } |
| 701 | } |
| 702 | |
| 703 | num_interfaces = cvmx_helper_get_number_of_interfaces(); |
| 704 | for (interface = 0; interface < num_interfaces; interface++) { |
| 705 | cvmx_helper_interface_mode_t imode = |
| 706 | cvmx_helper_interface_get_mode(interface); |
| 707 | int num_ports = cvmx_helper_ports_on_interface(interface); |
| 708 | int port; |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 709 | int port_index; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 710 | |
Nandini Hanumanthagowda | b186410 | 2013-10-13 20:19:49 +0530 | [diff] [blame] | 711 | for (port_index = 0, |
| 712 | port = cvmx_helper_get_ipd_port(interface, 0); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 713 | port < cvmx_helper_get_ipd_port(interface, num_ports); |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 714 | port_index++, port++) { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 715 | struct octeon_ethernet *priv; |
| 716 | struct net_device *dev = |
| 717 | alloc_etherdev(sizeof(struct octeon_ethernet)); |
| 718 | if (!dev) { |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 719 | pr_err("Failed to allocate ethernet device for port %d\n", port); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 720 | continue; |
| 721 | } |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 722 | |
| 723 | /* Initialize the device private structure. */ |
| 724 | priv = netdev_priv(dev); |
Nandini Hanumanthagowda | b186410 | 2013-10-13 20:19:49 +0530 | [diff] [blame] | 725 | priv->of_node = cvm_oct_node_for_port(pip, interface, |
| 726 | port_index); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 727 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 728 | INIT_DELAYED_WORK(&priv->port_periodic_work, |
| 729 | cvm_oct_periodic_worker); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 730 | priv->imode = imode; |
| 731 | priv->port = port; |
| 732 | priv->queue = cvmx_pko_get_base_queue(priv->port); |
| 733 | priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; |
| 734 | for (qos = 0; qos < 16; qos++) |
| 735 | skb_queue_head_init(&priv->tx_free_list[qos]); |
| 736 | for (qos = 0; qos < cvmx_pko_get_num_queues(port); |
| 737 | qos++) |
| 738 | cvmx_fau_atomic_write32(priv->fau + qos * 4, 0); |
| 739 | |
| 740 | switch (priv->imode) { |
| 741 | |
| 742 | /* These types don't support ports to IPD/PKO */ |
| 743 | case CVMX_HELPER_INTERFACE_MODE_DISABLED: |
| 744 | case CVMX_HELPER_INTERFACE_MODE_PCIE: |
| 745 | case CVMX_HELPER_INTERFACE_MODE_PICMG: |
| 746 | break; |
| 747 | |
| 748 | case CVMX_HELPER_INTERFACE_MODE_NPI: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 749 | dev->netdev_ops = &cvm_oct_npi_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 750 | strcpy(dev->name, "npi%d"); |
| 751 | break; |
| 752 | |
| 753 | case CVMX_HELPER_INTERFACE_MODE_XAUI: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 754 | dev->netdev_ops = &cvm_oct_xaui_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 755 | strcpy(dev->name, "xaui%d"); |
| 756 | break; |
| 757 | |
| 758 | case CVMX_HELPER_INTERFACE_MODE_LOOP: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 759 | dev->netdev_ops = &cvm_oct_npi_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 760 | strcpy(dev->name, "loop%d"); |
| 761 | break; |
| 762 | |
| 763 | case CVMX_HELPER_INTERFACE_MODE_SGMII: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 764 | dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 765 | strcpy(dev->name, "eth%d"); |
| 766 | break; |
| 767 | |
| 768 | case CVMX_HELPER_INTERFACE_MODE_SPI: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 769 | dev->netdev_ops = &cvm_oct_spi_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 770 | strcpy(dev->name, "spi%d"); |
| 771 | break; |
| 772 | |
| 773 | case CVMX_HELPER_INTERFACE_MODE_RGMII: |
| 774 | case CVMX_HELPER_INTERFACE_MODE_GMII: |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 775 | dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 776 | strcpy(dev->name, "eth%d"); |
| 777 | break; |
| 778 | } |
| 779 | |
David Daney | f696a10 | 2009-06-23 11:34:08 -0700 | [diff] [blame] | 780 | if (!dev->netdev_ops) { |
Vasiliy Kulikov | c4711c3 | 2010-09-28 21:08:01 +0400 | [diff] [blame] | 781 | free_netdev(dev); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 782 | } else if (register_netdev(dev) < 0) { |
| 783 | pr_err("Failed to register ethernet device " |
| 784 | "for interface %d, port %d\n", |
| 785 | interface, priv->port); |
Vasiliy Kulikov | c4711c3 | 2010-09-28 21:08:01 +0400 | [diff] [blame] | 786 | free_netdev(dev); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 787 | } else { |
| 788 | cvm_oct_device[priv->port] = dev; |
| 789 | fau -= |
| 790 | cvmx_pko_get_num_queues(priv->port) * |
| 791 | sizeof(uint32_t); |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 792 | queue_delayed_work(cvm_oct_poll_queue, |
Nandini Hanumanthagowda | b186410 | 2013-10-13 20:19:49 +0530 | [diff] [blame] | 793 | &priv->port_periodic_work, HZ); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 794 | } |
| 795 | } |
| 796 | } |
| 797 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 798 | cvm_oct_tx_initialize(); |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 799 | cvm_oct_rx_initialize(); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 800 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 801 | /* |
| 802 | * 150 uS: about 10 1500-byte packtes at 1GE. |
| 803 | */ |
| 804 | cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 805 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 806 | queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 807 | |
| 808 | return 0; |
| 809 | } |
| 810 | |
Bill Pemberton | f7e2f35 | 2012-11-19 13:26:42 -0500 | [diff] [blame] | 811 | static int cvm_oct_remove(struct platform_device *pdev) |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 812 | { |
| 813 | int port; |
| 814 | |
| 815 | /* Disable POW interrupt */ |
| 816 | cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), 0); |
| 817 | |
| 818 | cvmx_ipd_disable(); |
| 819 | |
| 820 | /* Free the interrupt handler */ |
| 821 | free_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, cvm_oct_device); |
| 822 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 823 | atomic_inc_return(&cvm_oct_poll_queue_stopping); |
| 824 | cancel_delayed_work_sync(&cvm_oct_rx_refill_work); |
| 825 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 826 | cvm_oct_rx_shutdown(); |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 827 | cvm_oct_tx_shutdown(); |
| 828 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 829 | cvmx_pko_disable(); |
| 830 | |
| 831 | /* Free the ethernet devices */ |
| 832 | for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) { |
| 833 | if (cvm_oct_device[port]) { |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 834 | struct net_device *dev = cvm_oct_device[port]; |
| 835 | struct octeon_ethernet *priv = netdev_priv(dev); |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 836 | cancel_delayed_work_sync(&priv->port_periodic_work); |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 837 | |
David Daney | 4898c56 | 2010-02-15 15:06:47 -0800 | [diff] [blame] | 838 | cvm_oct_tx_shutdown_dev(dev); |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 839 | unregister_netdev(dev); |
Vasiliy Kulikov | c4711c3 | 2010-09-28 21:08:01 +0400 | [diff] [blame] | 840 | free_netdev(dev); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 841 | cvm_oct_device[port] = NULL; |
| 842 | } |
| 843 | } |
| 844 | |
David Daney | f8c2648 | 2010-02-15 12:13:17 -0800 | [diff] [blame] | 845 | destroy_workqueue(cvm_oct_poll_queue); |
| 846 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 847 | cvmx_pko_shutdown(); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 848 | |
| 849 | cvmx_ipd_free_ptr(); |
| 850 | |
| 851 | /* Free the HW pools */ |
| 852 | cvm_oct_mem_empty_fpa(CVMX_FPA_PACKET_POOL, CVMX_FPA_PACKET_POOL_SIZE, |
| 853 | num_packet_buffers); |
| 854 | cvm_oct_mem_empty_fpa(CVMX_FPA_WQE_POOL, CVMX_FPA_WQE_POOL_SIZE, |
| 855 | num_packet_buffers); |
| 856 | if (CVMX_FPA_OUTPUT_BUFFER_POOL != CVMX_FPA_PACKET_POOL) |
| 857 | cvm_oct_mem_empty_fpa(CVMX_FPA_OUTPUT_BUFFER_POOL, |
| 858 | CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE, 128); |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 859 | return 0; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 860 | } |
| 861 | |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 862 | static struct of_device_id cvm_oct_match[] = { |
| 863 | { |
| 864 | .compatible = "cavium,octeon-3860-pip", |
| 865 | }, |
| 866 | {}, |
| 867 | }; |
| 868 | MODULE_DEVICE_TABLE(of, cvm_oct_match); |
| 869 | |
| 870 | static struct platform_driver cvm_oct_driver = { |
| 871 | .probe = cvm_oct_probe, |
Bill Pemberton | 095d0bb | 2012-11-19 13:20:57 -0500 | [diff] [blame] | 872 | .remove = cvm_oct_remove, |
David Daney | df9244c | 2012-07-05 18:12:40 +0200 | [diff] [blame] | 873 | .driver = { |
| 874 | .owner = THIS_MODULE, |
| 875 | .name = KBUILD_MODNAME, |
| 876 | .of_match_table = cvm_oct_match, |
| 877 | }, |
| 878 | }; |
| 879 | |
| 880 | module_platform_driver(cvm_oct_driver); |
| 881 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 882 | MODULE_LICENSE("GPL"); |
| 883 | MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>"); |
| 884 | MODULE_DESCRIPTION("Cavium Networks Octeon ethernet driver."); |