Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 2 | #include <linux/kernel.h> |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 3 | #include <linux/types.h> |
| 4 | #include <linux/module.h> |
| 5 | #include <linux/list.h> |
| 6 | #include <linux/pci.h> |
| 7 | #include <linux/dma-mapping.h> |
| 8 | #include <linux/pagemap.h> |
| 9 | #include <linux/sched.h> |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 10 | #include <linux/dmapool.h> |
| 11 | #include <linux/mempool.h> |
| 12 | #include <linux/spinlock.h> |
| 13 | #include <linux/kthread.h> |
| 14 | #include <linux/interrupt.h> |
| 15 | #include <linux/errno.h> |
| 16 | #include <linux/ioport.h> |
| 17 | #include <linux/in.h> |
| 18 | #include <linux/ip.h> |
| 19 | #include <linux/ipv6.h> |
| 20 | #include <net/ipv6.h> |
| 21 | #include <linux/tcp.h> |
| 22 | #include <linux/udp.h> |
| 23 | #include <linux/if_arp.h> |
| 24 | #include <linux/if_ether.h> |
| 25 | #include <linux/netdevice.h> |
| 26 | #include <linux/etherdevice.h> |
| 27 | #include <linux/ethtool.h> |
| 28 | #include <linux/skbuff.h> |
| 29 | #include <linux/rtnetlink.h> |
| 30 | #include <linux/if_vlan.h> |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 31 | #include <linux/delay.h> |
| 32 | #include <linux/mm.h> |
| 33 | #include <linux/vmalloc.h> |
| 34 | |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 35 | |
| 36 | #include "qlge.h" |
| 37 | |
Jitendra Kalsaria | 667b938 | 2012-07-10 14:57:38 +0000 | [diff] [blame] | 38 | struct ql_stats { |
| 39 | char stat_string[ETH_GSTRING_LEN]; |
| 40 | int sizeof_stat; |
| 41 | int stat_offset; |
| 42 | }; |
| 43 | |
| 44 | #define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m) |
| 45 | #define QL_OFF(m) offsetof(struct ql_adapter, m) |
| 46 | |
| 47 | static const struct ql_stats ql_gstrings_stats[] = { |
| 48 | {"tx_pkts", QL_SIZEOF(nic_stats.tx_pkts), QL_OFF(nic_stats.tx_pkts)}, |
| 49 | {"tx_bytes", QL_SIZEOF(nic_stats.tx_bytes), QL_OFF(nic_stats.tx_bytes)}, |
| 50 | {"tx_mcast_pkts", QL_SIZEOF(nic_stats.tx_mcast_pkts), |
| 51 | QL_OFF(nic_stats.tx_mcast_pkts)}, |
| 52 | {"tx_bcast_pkts", QL_SIZEOF(nic_stats.tx_bcast_pkts), |
| 53 | QL_OFF(nic_stats.tx_bcast_pkts)}, |
| 54 | {"tx_ucast_pkts", QL_SIZEOF(nic_stats.tx_ucast_pkts), |
| 55 | QL_OFF(nic_stats.tx_ucast_pkts)}, |
| 56 | {"tx_ctl_pkts", QL_SIZEOF(nic_stats.tx_ctl_pkts), |
| 57 | QL_OFF(nic_stats.tx_ctl_pkts)}, |
| 58 | {"tx_pause_pkts", QL_SIZEOF(nic_stats.tx_pause_pkts), |
| 59 | QL_OFF(nic_stats.tx_pause_pkts)}, |
| 60 | {"tx_64_pkts", QL_SIZEOF(nic_stats.tx_64_pkt), |
| 61 | QL_OFF(nic_stats.tx_64_pkt)}, |
| 62 | {"tx_65_to_127_pkts", QL_SIZEOF(nic_stats.tx_65_to_127_pkt), |
| 63 | QL_OFF(nic_stats.tx_65_to_127_pkt)}, |
| 64 | {"tx_128_to_255_pkts", QL_SIZEOF(nic_stats.tx_128_to_255_pkt), |
| 65 | QL_OFF(nic_stats.tx_128_to_255_pkt)}, |
| 66 | {"tx_256_511_pkts", QL_SIZEOF(nic_stats.tx_256_511_pkt), |
| 67 | QL_OFF(nic_stats.tx_256_511_pkt)}, |
| 68 | {"tx_512_to_1023_pkts", QL_SIZEOF(nic_stats.tx_512_to_1023_pkt), |
| 69 | QL_OFF(nic_stats.tx_512_to_1023_pkt)}, |
| 70 | {"tx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.tx_1024_to_1518_pkt), |
| 71 | QL_OFF(nic_stats.tx_1024_to_1518_pkt)}, |
| 72 | {"tx_1519_to_max_pkts", QL_SIZEOF(nic_stats.tx_1519_to_max_pkt), |
| 73 | QL_OFF(nic_stats.tx_1519_to_max_pkt)}, |
| 74 | {"tx_undersize_pkts", QL_SIZEOF(nic_stats.tx_undersize_pkt), |
| 75 | QL_OFF(nic_stats.tx_undersize_pkt)}, |
| 76 | {"tx_oversize_pkts", QL_SIZEOF(nic_stats.tx_oversize_pkt), |
| 77 | QL_OFF(nic_stats.tx_oversize_pkt)}, |
| 78 | {"rx_bytes", QL_SIZEOF(nic_stats.rx_bytes), QL_OFF(nic_stats.rx_bytes)}, |
| 79 | {"rx_bytes_ok", QL_SIZEOF(nic_stats.rx_bytes_ok), |
| 80 | QL_OFF(nic_stats.rx_bytes_ok)}, |
| 81 | {"rx_pkts", QL_SIZEOF(nic_stats.rx_pkts), QL_OFF(nic_stats.rx_pkts)}, |
| 82 | {"rx_pkts_ok", QL_SIZEOF(nic_stats.rx_pkts_ok), |
| 83 | QL_OFF(nic_stats.rx_pkts_ok)}, |
| 84 | {"rx_bcast_pkts", QL_SIZEOF(nic_stats.rx_bcast_pkts), |
| 85 | QL_OFF(nic_stats.rx_bcast_pkts)}, |
| 86 | {"rx_mcast_pkts", QL_SIZEOF(nic_stats.rx_mcast_pkts), |
| 87 | QL_OFF(nic_stats.rx_mcast_pkts)}, |
| 88 | {"rx_ucast_pkts", QL_SIZEOF(nic_stats.rx_ucast_pkts), |
| 89 | QL_OFF(nic_stats.rx_ucast_pkts)}, |
| 90 | {"rx_undersize_pkts", QL_SIZEOF(nic_stats.rx_undersize_pkts), |
| 91 | QL_OFF(nic_stats.rx_undersize_pkts)}, |
| 92 | {"rx_oversize_pkts", QL_SIZEOF(nic_stats.rx_oversize_pkts), |
| 93 | QL_OFF(nic_stats.rx_oversize_pkts)}, |
| 94 | {"rx_jabber_pkts", QL_SIZEOF(nic_stats.rx_jabber_pkts), |
| 95 | QL_OFF(nic_stats.rx_jabber_pkts)}, |
| 96 | {"rx_undersize_fcerr_pkts", |
| 97 | QL_SIZEOF(nic_stats.rx_undersize_fcerr_pkts), |
| 98 | QL_OFF(nic_stats.rx_undersize_fcerr_pkts)}, |
| 99 | {"rx_drop_events", QL_SIZEOF(nic_stats.rx_drop_events), |
| 100 | QL_OFF(nic_stats.rx_drop_events)}, |
| 101 | {"rx_fcerr_pkts", QL_SIZEOF(nic_stats.rx_fcerr_pkts), |
| 102 | QL_OFF(nic_stats.rx_fcerr_pkts)}, |
| 103 | {"rx_align_err", QL_SIZEOF(nic_stats.rx_align_err), |
| 104 | QL_OFF(nic_stats.rx_align_err)}, |
| 105 | {"rx_symbol_err", QL_SIZEOF(nic_stats.rx_symbol_err), |
| 106 | QL_OFF(nic_stats.rx_symbol_err)}, |
| 107 | {"rx_mac_err", QL_SIZEOF(nic_stats.rx_mac_err), |
| 108 | QL_OFF(nic_stats.rx_mac_err)}, |
| 109 | {"rx_ctl_pkts", QL_SIZEOF(nic_stats.rx_ctl_pkts), |
| 110 | QL_OFF(nic_stats.rx_ctl_pkts)}, |
| 111 | {"rx_pause_pkts", QL_SIZEOF(nic_stats.rx_pause_pkts), |
| 112 | QL_OFF(nic_stats.rx_pause_pkts)}, |
| 113 | {"rx_64_pkts", QL_SIZEOF(nic_stats.rx_64_pkts), |
| 114 | QL_OFF(nic_stats.rx_64_pkts)}, |
| 115 | {"rx_65_to_127_pkts", QL_SIZEOF(nic_stats.rx_65_to_127_pkts), |
| 116 | QL_OFF(nic_stats.rx_65_to_127_pkts)}, |
| 117 | {"rx_128_255_pkts", QL_SIZEOF(nic_stats.rx_128_255_pkts), |
| 118 | QL_OFF(nic_stats.rx_128_255_pkts)}, |
| 119 | {"rx_256_511_pkts", QL_SIZEOF(nic_stats.rx_256_511_pkts), |
| 120 | QL_OFF(nic_stats.rx_256_511_pkts)}, |
| 121 | {"rx_512_to_1023_pkts", QL_SIZEOF(nic_stats.rx_512_to_1023_pkts), |
| 122 | QL_OFF(nic_stats.rx_512_to_1023_pkts)}, |
| 123 | {"rx_1024_to_1518_pkts", QL_SIZEOF(nic_stats.rx_1024_to_1518_pkts), |
| 124 | QL_OFF(nic_stats.rx_1024_to_1518_pkts)}, |
| 125 | {"rx_1519_to_max_pkts", QL_SIZEOF(nic_stats.rx_1519_to_max_pkts), |
| 126 | QL_OFF(nic_stats.rx_1519_to_max_pkts)}, |
| 127 | {"rx_len_err_pkts", QL_SIZEOF(nic_stats.rx_len_err_pkts), |
| 128 | QL_OFF(nic_stats.rx_len_err_pkts)}, |
| 129 | {"rx_code_err", QL_SIZEOF(nic_stats.rx_code_err), |
| 130 | QL_OFF(nic_stats.rx_code_err)}, |
| 131 | {"rx_oversize_err", QL_SIZEOF(nic_stats.rx_oversize_err), |
| 132 | QL_OFF(nic_stats.rx_oversize_err)}, |
| 133 | {"rx_undersize_err", QL_SIZEOF(nic_stats.rx_undersize_err), |
| 134 | QL_OFF(nic_stats.rx_undersize_err)}, |
| 135 | {"rx_preamble_err", QL_SIZEOF(nic_stats.rx_preamble_err), |
| 136 | QL_OFF(nic_stats.rx_preamble_err)}, |
| 137 | {"rx_frame_len_err", QL_SIZEOF(nic_stats.rx_frame_len_err), |
| 138 | QL_OFF(nic_stats.rx_frame_len_err)}, |
| 139 | {"rx_crc_err", QL_SIZEOF(nic_stats.rx_crc_err), |
| 140 | QL_OFF(nic_stats.rx_crc_err)}, |
| 141 | {"rx_err_count", QL_SIZEOF(nic_stats.rx_err_count), |
| 142 | QL_OFF(nic_stats.rx_err_count)}, |
| 143 | {"tx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames0), |
| 144 | QL_OFF(nic_stats.tx_cbfc_pause_frames0)}, |
| 145 | {"tx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames1), |
| 146 | QL_OFF(nic_stats.tx_cbfc_pause_frames1)}, |
| 147 | {"tx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames2), |
| 148 | QL_OFF(nic_stats.tx_cbfc_pause_frames2)}, |
| 149 | {"tx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames3), |
| 150 | QL_OFF(nic_stats.tx_cbfc_pause_frames3)}, |
| 151 | {"tx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames4), |
| 152 | QL_OFF(nic_stats.tx_cbfc_pause_frames4)}, |
| 153 | {"tx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames5), |
| 154 | QL_OFF(nic_stats.tx_cbfc_pause_frames5)}, |
| 155 | {"tx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames6), |
| 156 | QL_OFF(nic_stats.tx_cbfc_pause_frames6)}, |
| 157 | {"tx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.tx_cbfc_pause_frames7), |
| 158 | QL_OFF(nic_stats.tx_cbfc_pause_frames7)}, |
| 159 | {"rx_cbfc_pause_frames0", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames0), |
| 160 | QL_OFF(nic_stats.rx_cbfc_pause_frames0)}, |
| 161 | {"rx_cbfc_pause_frames1", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames1), |
| 162 | QL_OFF(nic_stats.rx_cbfc_pause_frames1)}, |
| 163 | {"rx_cbfc_pause_frames2", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames2), |
| 164 | QL_OFF(nic_stats.rx_cbfc_pause_frames2)}, |
| 165 | {"rx_cbfc_pause_frames3", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames3), |
| 166 | QL_OFF(nic_stats.rx_cbfc_pause_frames3)}, |
| 167 | {"rx_cbfc_pause_frames4", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames4), |
| 168 | QL_OFF(nic_stats.rx_cbfc_pause_frames4)}, |
| 169 | {"rx_cbfc_pause_frames5", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames5), |
| 170 | QL_OFF(nic_stats.rx_cbfc_pause_frames5)}, |
| 171 | {"rx_cbfc_pause_frames6", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames6), |
| 172 | QL_OFF(nic_stats.rx_cbfc_pause_frames6)}, |
| 173 | {"rx_cbfc_pause_frames7", QL_SIZEOF(nic_stats.rx_cbfc_pause_frames7), |
| 174 | QL_OFF(nic_stats.rx_cbfc_pause_frames7)}, |
| 175 | {"rx_nic_fifo_drop", QL_SIZEOF(nic_stats.rx_nic_fifo_drop), |
| 176 | QL_OFF(nic_stats.rx_nic_fifo_drop)}, |
| 177 | }; |
| 178 | |
Ron Mercer | 9dfbbaa | 2009-10-30 12:13:33 +0000 | [diff] [blame] | 179 | static const char ql_gstrings_test[][ETH_GSTRING_LEN] = { |
| 180 | "Loopback test (offline)" |
| 181 | }; |
| 182 | #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN) |
Jitendra Kalsaria | 667b938 | 2012-07-10 14:57:38 +0000 | [diff] [blame] | 183 | #define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats) |
Jitendra Kalsaria | fb6e088 | 2013-12-05 18:11:22 -0500 | [diff] [blame] | 184 | #define QLGE_RCV_MAC_ERR_STATS 7 |
Ron Mercer | 9dfbbaa | 2009-10-30 12:13:33 +0000 | [diff] [blame] | 185 | |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 186 | static int ql_update_ring_coalescing(struct ql_adapter *qdev) |
| 187 | { |
| 188 | int i, status = 0; |
| 189 | struct rx_ring *rx_ring; |
| 190 | struct cqicb *cqicb; |
| 191 | |
| 192 | if (!netif_running(qdev->ndev)) |
| 193 | return status; |
| 194 | |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 195 | /* Skip the default queue, and update the outbound handler |
| 196 | * queues if they changed. |
| 197 | */ |
Ron Mercer | b2014ff | 2009-08-27 11:02:09 +0000 | [diff] [blame] | 198 | cqicb = (struct cqicb *)&qdev->rx_ring[qdev->rss_ring_count]; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 199 | if (le16_to_cpu(cqicb->irq_delay) != qdev->tx_coalesce_usecs || |
Ron Mercer | b2014ff | 2009-08-27 11:02:09 +0000 | [diff] [blame] | 200 | le16_to_cpu(cqicb->pkt_delay) != |
| 201 | qdev->tx_max_coalesced_frames) { |
| 202 | for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) { |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 203 | rx_ring = &qdev->rx_ring[i]; |
| 204 | cqicb = (struct cqicb *)rx_ring; |
Ron Mercer | 8306c95 | 2009-01-05 18:17:33 -0800 | [diff] [blame] | 205 | cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 206 | cqicb->pkt_delay = |
Ron Mercer | 8306c95 | 2009-01-05 18:17:33 -0800 | [diff] [blame] | 207 | cpu_to_le16(qdev->tx_max_coalesced_frames); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 208 | cqicb->flags = FLAGS_LI; |
Ron Mercer | e332471 | 2009-07-02 06:06:13 +0000 | [diff] [blame] | 209 | status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 210 | CFG_LCQ, rx_ring->cq_id); |
| 211 | if (status) { |
Joe Perches | ae9540f7 | 2010-02-09 11:49:52 +0000 | [diff] [blame] | 212 | netif_err(qdev, ifup, qdev->ndev, |
| 213 | "Failed to load CQICB.\n"); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 214 | goto exit; |
| 215 | } |
| 216 | } |
| 217 | } |
| 218 | |
| 219 | /* Update the inbound (RSS) handler queues if they changed. */ |
Ron Mercer | b2014ff | 2009-08-27 11:02:09 +0000 | [diff] [blame] | 220 | cqicb = (struct cqicb *)&qdev->rx_ring[0]; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 221 | if (le16_to_cpu(cqicb->irq_delay) != qdev->rx_coalesce_usecs || |
Ron Mercer | b2014ff | 2009-08-27 11:02:09 +0000 | [diff] [blame] | 222 | le16_to_cpu(cqicb->pkt_delay) != |
| 223 | qdev->rx_max_coalesced_frames) { |
| 224 | for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 225 | rx_ring = &qdev->rx_ring[i]; |
| 226 | cqicb = (struct cqicb *)rx_ring; |
Ron Mercer | 8306c95 | 2009-01-05 18:17:33 -0800 | [diff] [blame] | 227 | cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 228 | cqicb->pkt_delay = |
Ron Mercer | 8306c95 | 2009-01-05 18:17:33 -0800 | [diff] [blame] | 229 | cpu_to_le16(qdev->rx_max_coalesced_frames); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 230 | cqicb->flags = FLAGS_LI; |
Ron Mercer | e332471 | 2009-07-02 06:06:13 +0000 | [diff] [blame] | 231 | status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb), |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 232 | CFG_LCQ, rx_ring->cq_id); |
| 233 | if (status) { |
Joe Perches | ae9540f7 | 2010-02-09 11:49:52 +0000 | [diff] [blame] | 234 | netif_err(qdev, ifup, qdev->ndev, |
| 235 | "Failed to load CQICB.\n"); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 236 | goto exit; |
| 237 | } |
| 238 | } |
| 239 | } |
| 240 | exit: |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 241 | return status; |
| 242 | } |
| 243 | |
Hannes Eder | 2f22d22 | 2008-12-26 00:04:53 -0800 | [diff] [blame] | 244 | static void ql_update_stats(struct ql_adapter *qdev) |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 245 | { |
| 246 | u32 i; |
| 247 | u64 data; |
| 248 | u64 *iter = &qdev->nic_stats.tx_pkts; |
| 249 | |
| 250 | spin_lock(&qdev->stats_lock); |
| 251 | if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) { |
Joe Perches | ae9540f7 | 2010-02-09 11:49:52 +0000 | [diff] [blame] | 252 | netif_err(qdev, drv, qdev->ndev, |
| 253 | "Couldn't get xgmac sem.\n"); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 254 | goto quit; |
| 255 | } |
| 256 | /* |
| 257 | * Get TX statistics. |
| 258 | */ |
| 259 | for (i = 0x200; i < 0x280; i += 8) { |
| 260 | if (ql_read_xgmac_reg64(qdev, i, &data)) { |
Joe Perches | ae9540f7 | 2010-02-09 11:49:52 +0000 | [diff] [blame] | 261 | netif_err(qdev, drv, qdev->ndev, |
| 262 | "Error reading status register 0x%.04x.\n", |
| 263 | i); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 264 | goto end; |
| 265 | } else |
| 266 | *iter = data; |
| 267 | iter++; |
| 268 | } |
| 269 | |
| 270 | /* |
| 271 | * Get RX statistics. |
| 272 | */ |
| 273 | for (i = 0x300; i < 0x3d0; i += 8) { |
| 274 | if (ql_read_xgmac_reg64(qdev, i, &data)) { |
Joe Perches | ae9540f7 | 2010-02-09 11:49:52 +0000 | [diff] [blame] | 275 | netif_err(qdev, drv, qdev->ndev, |
| 276 | "Error reading status register 0x%.04x.\n", |
| 277 | i); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 278 | goto end; |
| 279 | } else |
| 280 | *iter = data; |
| 281 | iter++; |
| 282 | } |
| 283 | |
Jitendra Kalsaria | fb6e088 | 2013-12-05 18:11:22 -0500 | [diff] [blame] | 284 | /* Update receive mac error statistics */ |
| 285 | iter += QLGE_RCV_MAC_ERR_STATS; |
| 286 | |
Ron Mercer | 6abd234 | 2009-10-10 09:35:10 +0000 | [diff] [blame] | 287 | /* |
| 288 | * Get Per-priority TX pause frame counter statistics. |
| 289 | */ |
| 290 | for (i = 0x500; i < 0x540; i += 8) { |
| 291 | if (ql_read_xgmac_reg64(qdev, i, &data)) { |
Joe Perches | ae9540f7 | 2010-02-09 11:49:52 +0000 | [diff] [blame] | 292 | netif_err(qdev, drv, qdev->ndev, |
| 293 | "Error reading status register 0x%.04x.\n", |
| 294 | i); |
Ron Mercer | 6abd234 | 2009-10-10 09:35:10 +0000 | [diff] [blame] | 295 | goto end; |
| 296 | } else |
| 297 | *iter = data; |
| 298 | iter++; |
| 299 | } |
| 300 | |
| 301 | /* |
| 302 | * Get Per-priority RX pause frame counter statistics. |
| 303 | */ |
| 304 | for (i = 0x568; i < 0x5a8; i += 8) { |
| 305 | if (ql_read_xgmac_reg64(qdev, i, &data)) { |
Joe Perches | ae9540f7 | 2010-02-09 11:49:52 +0000 | [diff] [blame] | 306 | netif_err(qdev, drv, qdev->ndev, |
| 307 | "Error reading status register 0x%.04x.\n", |
| 308 | i); |
Ron Mercer | 6abd234 | 2009-10-10 09:35:10 +0000 | [diff] [blame] | 309 | goto end; |
| 310 | } else |
| 311 | *iter = data; |
| 312 | iter++; |
| 313 | } |
| 314 | |
| 315 | /* |
| 316 | * Get RX NIC FIFO DROP statistics. |
| 317 | */ |
| 318 | if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) { |
Joe Perches | ae9540f7 | 2010-02-09 11:49:52 +0000 | [diff] [blame] | 319 | netif_err(qdev, drv, qdev->ndev, |
| 320 | "Error reading status register 0x%.04x.\n", i); |
Ron Mercer | 6abd234 | 2009-10-10 09:35:10 +0000 | [diff] [blame] | 321 | goto end; |
| 322 | } else |
| 323 | *iter = data; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 324 | end: |
| 325 | ql_sem_unlock(qdev, qdev->xg_sem_mask); |
| 326 | quit: |
| 327 | spin_unlock(&qdev->stats_lock); |
| 328 | |
| 329 | QL_DUMP_STAT(qdev); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 330 | } |
| 331 | |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 332 | static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) |
| 333 | { |
Jitendra Kalsaria | 667b938 | 2012-07-10 14:57:38 +0000 | [diff] [blame] | 334 | int index; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 335 | switch (stringset) { |
Jitendra Kalsaria | a7db9ad | 2012-07-10 14:57:35 +0000 | [diff] [blame] | 336 | case ETH_SS_TEST: |
| 337 | memcpy(buf, *ql_gstrings_test, QLGE_TEST_LEN * ETH_GSTRING_LEN); |
| 338 | break; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 339 | case ETH_SS_STATS: |
Jitendra Kalsaria | 667b938 | 2012-07-10 14:57:38 +0000 | [diff] [blame] | 340 | for (index = 0; index < QLGE_STATS_LEN; index++) { |
| 341 | memcpy(buf + index * ETH_GSTRING_LEN, |
| 342 | ql_gstrings_stats[index].stat_string, |
| 343 | ETH_GSTRING_LEN); |
| 344 | } |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 345 | break; |
| 346 | } |
| 347 | } |
| 348 | |
| 349 | static int ql_get_sset_count(struct net_device *dev, int sset) |
| 350 | { |
| 351 | switch (sset) { |
Ron Mercer | 9dfbbaa | 2009-10-30 12:13:33 +0000 | [diff] [blame] | 352 | case ETH_SS_TEST: |
| 353 | return QLGE_TEST_LEN; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 354 | case ETH_SS_STATS: |
Jitendra Kalsaria | 667b938 | 2012-07-10 14:57:38 +0000 | [diff] [blame] | 355 | return QLGE_STATS_LEN; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 356 | default: |
| 357 | return -EOPNOTSUPP; |
| 358 | } |
| 359 | } |
| 360 | |
| 361 | static void |
| 362 | ql_get_ethtool_stats(struct net_device *ndev, |
| 363 | struct ethtool_stats *stats, u64 *data) |
| 364 | { |
| 365 | struct ql_adapter *qdev = netdev_priv(ndev); |
Jitendra Kalsaria | 667b938 | 2012-07-10 14:57:38 +0000 | [diff] [blame] | 366 | int index, length; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 367 | |
Jitendra Kalsaria | 667b938 | 2012-07-10 14:57:38 +0000 | [diff] [blame] | 368 | length = QLGE_STATS_LEN; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 369 | ql_update_stats(qdev); |
| 370 | |
Jitendra Kalsaria | 667b938 | 2012-07-10 14:57:38 +0000 | [diff] [blame] | 371 | for (index = 0; index < length; index++) { |
| 372 | char *p = (char *)qdev + |
| 373 | ql_gstrings_stats[index].stat_offset; |
| 374 | *data++ = (ql_gstrings_stats[index].sizeof_stat == |
| 375 | sizeof(u64)) ? *(u64 *)p : (*(u32 *)p); |
| 376 | } |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 377 | } |
| 378 | |
Philippe Reynes | 45ee244 | 2017-02-19 23:21:41 +0100 | [diff] [blame] | 379 | static int ql_get_link_ksettings(struct net_device *ndev, |
| 380 | struct ethtool_link_ksettings *ecmd) |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 381 | { |
| 382 | struct ql_adapter *qdev = netdev_priv(ndev); |
Philippe Reynes | 45ee244 | 2017-02-19 23:21:41 +0100 | [diff] [blame] | 383 | u32 supported, advertising; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 384 | |
Philippe Reynes | 45ee244 | 2017-02-19 23:21:41 +0100 | [diff] [blame] | 385 | supported = SUPPORTED_10000baseT_Full; |
| 386 | advertising = ADVERTISED_10000baseT_Full; |
| 387 | |
Ron Mercer | b82808b | 2009-02-26 10:08:32 +0000 | [diff] [blame] | 388 | if ((qdev->link_status & STS_LINK_TYPE_MASK) == |
| 389 | STS_LINK_TYPE_10GBASET) { |
Philippe Reynes | 45ee244 | 2017-02-19 23:21:41 +0100 | [diff] [blame] | 390 | supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); |
| 391 | advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg); |
| 392 | ecmd->base.port = PORT_TP; |
| 393 | ecmd->base.autoneg = AUTONEG_ENABLE; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 394 | } else { |
Philippe Reynes | 45ee244 | 2017-02-19 23:21:41 +0100 | [diff] [blame] | 395 | supported |= SUPPORTED_FIBRE; |
| 396 | advertising |= ADVERTISED_FIBRE; |
| 397 | ecmd->base.port = PORT_FIBRE; |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 398 | } |
| 399 | |
Philippe Reynes | 45ee244 | 2017-02-19 23:21:41 +0100 | [diff] [blame] | 400 | ecmd->base.speed = SPEED_10000; |
| 401 | ecmd->base.duplex = DUPLEX_FULL; |
| 402 | |
| 403 | ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported, |
| 404 | supported); |
| 405 | ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising, |
| 406 | advertising); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 407 | |
| 408 | return 0; |
| 409 | } |
| 410 | |
| 411 | static void ql_get_drvinfo(struct net_device *ndev, |
| 412 | struct ethtool_drvinfo *drvinfo) |
| 413 | { |
| 414 | struct ql_adapter *qdev = netdev_priv(ndev); |
Rick Jones | 68aad78 | 2011-11-07 13:29:27 +0000 | [diff] [blame] | 415 | strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver)); |
| 416 | strlcpy(drvinfo->version, qlge_driver_version, |
| 417 | sizeof(drvinfo->version)); |
| 418 | snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), |
| 419 | "v%d.%d.%d", |
Ron Mercer | cfec0cb | 2009-06-09 05:39:29 +0000 | [diff] [blame] | 420 | (qdev->fw_rev_id & 0x00ff0000) >> 16, |
| 421 | (qdev->fw_rev_id & 0x0000ff00) >> 8, |
| 422 | (qdev->fw_rev_id & 0x000000ff)); |
Rick Jones | 68aad78 | 2011-11-07 13:29:27 +0000 | [diff] [blame] | 423 | strlcpy(drvinfo->bus_info, pci_name(qdev->pdev), |
| 424 | sizeof(drvinfo->bus_info)); |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 425 | } |
| 426 | |
Ron Mercer | bc083ce | 2009-10-21 11:07:40 +0000 | [diff] [blame] | 427 | static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) |
| 428 | { |
| 429 | struct ql_adapter *qdev = netdev_priv(ndev); |
Jitendra Kalsaria | 206d78e | 2012-07-10 14:57:33 +0000 | [diff] [blame] | 430 | unsigned short ssys_dev = qdev->pdev->subsystem_device; |
| 431 | |
| 432 | /* WOL is only supported for mezz card. */ |
| 433 | if (ssys_dev == QLGE_MEZZ_SSYS_ID_068 || |
| 434 | ssys_dev == QLGE_MEZZ_SSYS_ID_180) { |
| 435 | wol->supported = WAKE_MAGIC; |
| 436 | wol->wolopts = qdev->wol; |
| 437 | } |
Ron Mercer | bc083ce | 2009-10-21 11:07:40 +0000 | [diff] [blame] | 438 | } |
| 439 | |
| 440 | static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) |
| 441 | { |
| 442 | struct ql_adapter *qdev = netdev_priv(ndev); |
Jitendra Kalsaria | 206d78e | 2012-07-10 14:57:33 +0000 | [diff] [blame] | 443 | unsigned short ssys_dev = qdev->pdev->subsystem_device; |
Ron Mercer | bc083ce | 2009-10-21 11:07:40 +0000 | [diff] [blame] | 444 | |
Jitendra Kalsaria | 206d78e | 2012-07-10 14:57:33 +0000 | [diff] [blame] | 445 | /* WOL is only supported for mezz card. */ |
Dan Carpenter | 7be7e93 | 2012-07-12 04:47:03 +0000 | [diff] [blame] | 446 | if (ssys_dev != QLGE_MEZZ_SSYS_ID_068 && |
Jitendra Kalsaria | 206d78e | 2012-07-10 14:57:33 +0000 | [diff] [blame] | 447 | ssys_dev != QLGE_MEZZ_SSYS_ID_180) { |
| 448 | netif_info(qdev, drv, qdev->ndev, |
| 449 | "WOL is only supported for mezz card\n"); |
| 450 | return -EOPNOTSUPP; |
| 451 | } |
Ron Mercer | bc083ce | 2009-10-21 11:07:40 +0000 | [diff] [blame] | 452 | if (wol->wolopts & ~WAKE_MAGIC) |
| 453 | return -EINVAL; |
| 454 | qdev->wol = wol->wolopts; |
| 455 | |
Joe Perches | ae9540f7 | 2010-02-09 11:49:52 +0000 | [diff] [blame] | 456 | netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol); |
Ron Mercer | bc083ce | 2009-10-21 11:07:40 +0000 | [diff] [blame] | 457 | return 0; |
| 458 | } |
Ron Mercer | d8eb59d | 2009-10-21 11:07:39 +0000 | [diff] [blame] | 459 | |
stephen hemminger | 1b32912 | 2011-04-06 13:47:50 +0000 | [diff] [blame] | 460 | static int ql_set_phys_id(struct net_device *ndev, |
| 461 | enum ethtool_phys_id_state state) |
| 462 | |
Ron Mercer | d8eb59d | 2009-10-21 11:07:39 +0000 | [diff] [blame] | 463 | { |
| 464 | struct ql_adapter *qdev = netdev_priv(ndev); |
Ron Mercer | d8eb59d | 2009-10-21 11:07:39 +0000 | [diff] [blame] | 465 | |
stephen hemminger | 1b32912 | 2011-04-06 13:47:50 +0000 | [diff] [blame] | 466 | switch (state) { |
| 467 | case ETHTOOL_ID_ACTIVE: |
| 468 | /* Save the current LED settings */ |
| 469 | if (ql_mb_get_led_cfg(qdev)) |
| 470 | return -EIO; |
Ron Mercer | d8eb59d | 2009-10-21 11:07:39 +0000 | [diff] [blame] | 471 | |
stephen hemminger | 1b32912 | 2011-04-06 13:47:50 +0000 | [diff] [blame] | 472 | /* Start blinking */ |
Ron Mercer | d8eb59d | 2009-10-21 11:07:39 +0000 | [diff] [blame] | 473 | ql_mb_set_led_cfg(qdev, QL_LED_BLINK); |
stephen hemminger | 1b32912 | 2011-04-06 13:47:50 +0000 | [diff] [blame] | 474 | return 0; |
Ron Mercer | d8eb59d | 2009-10-21 11:07:39 +0000 | [diff] [blame] | 475 | |
stephen hemminger | 1b32912 | 2011-04-06 13:47:50 +0000 | [diff] [blame] | 476 | case ETHTOOL_ID_INACTIVE: |
| 477 | /* Restore LED settings */ |
| 478 | if (ql_mb_set_led_cfg(qdev, qdev->led_config)) |
| 479 | return -EIO; |
| 480 | return 0; |
Ron Mercer | d8eb59d | 2009-10-21 11:07:39 +0000 | [diff] [blame] | 481 | |
stephen hemminger | 1b32912 | 2011-04-06 13:47:50 +0000 | [diff] [blame] | 482 | default: |
| 483 | return -EINVAL; |
| 484 | } |
Ron Mercer | d8eb59d | 2009-10-21 11:07:39 +0000 | [diff] [blame] | 485 | } |
Ron Mercer | a61f802 | 2009-10-21 11:07:41 +0000 | [diff] [blame] | 486 | |
Ron Mercer | 9dfbbaa | 2009-10-30 12:13:33 +0000 | [diff] [blame] | 487 | static int ql_start_loopback(struct ql_adapter *qdev) |
| 488 | { |
| 489 | if (netif_carrier_ok(qdev->ndev)) { |
| 490 | set_bit(QL_LB_LINK_UP, &qdev->flags); |
| 491 | netif_carrier_off(qdev->ndev); |
| 492 | } else |
| 493 | clear_bit(QL_LB_LINK_UP, &qdev->flags); |
| 494 | qdev->link_config |= CFG_LOOPBACK_PCS; |
| 495 | return ql_mb_set_port_cfg(qdev); |
| 496 | } |
| 497 | |
| 498 | static void ql_stop_loopback(struct ql_adapter *qdev) |
| 499 | { |
| 500 | qdev->link_config &= ~CFG_LOOPBACK_PCS; |
| 501 | ql_mb_set_port_cfg(qdev); |
| 502 | if (test_bit(QL_LB_LINK_UP, &qdev->flags)) { |
| 503 | netif_carrier_on(qdev->ndev); |
| 504 | clear_bit(QL_LB_LINK_UP, &qdev->flags); |
| 505 | } |
| 506 | } |
| 507 | |
| 508 | static void ql_create_lb_frame(struct sk_buff *skb, |
| 509 | unsigned int frame_size) |
| 510 | { |
| 511 | memset(skb->data, 0xFF, frame_size); |
| 512 | frame_size &= ~1; |
| 513 | memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); |
| 514 | memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); |
| 515 | memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); |
| 516 | } |
| 517 | |
| 518 | void ql_check_lb_frame(struct ql_adapter *qdev, |
| 519 | struct sk_buff *skb) |
| 520 | { |
| 521 | unsigned int frame_size = skb->len; |
| 522 | |
| 523 | if ((*(skb->data + 3) == 0xFF) && |
| 524 | (*(skb->data + frame_size / 2 + 10) == 0xBE) && |
| 525 | (*(skb->data + frame_size / 2 + 12) == 0xAF)) { |
| 526 | atomic_dec(&qdev->lb_count); |
| 527 | return; |
| 528 | } |
| 529 | } |
| 530 | |
| 531 | static int ql_run_loopback_test(struct ql_adapter *qdev) |
| 532 | { |
| 533 | int i; |
| 534 | netdev_tx_t rc; |
| 535 | struct sk_buff *skb; |
| 536 | unsigned int size = SMALL_BUF_MAP_SIZE; |
| 537 | |
| 538 | for (i = 0; i < 64; i++) { |
| 539 | skb = netdev_alloc_skb(qdev->ndev, size); |
| 540 | if (!skb) |
| 541 | return -ENOMEM; |
| 542 | |
| 543 | skb->queue_mapping = 0; |
| 544 | skb_put(skb, size); |
| 545 | ql_create_lb_frame(skb, size); |
| 546 | rc = ql_lb_send(skb, qdev->ndev); |
| 547 | if (rc != NETDEV_TX_OK) |
| 548 | return -EPIPE; |
| 549 | atomic_inc(&qdev->lb_count); |
| 550 | } |
Ron Mercer | aa13bd6 | 2010-02-17 06:41:23 +0000 | [diff] [blame] | 551 | /* Give queue time to settle before testing results. */ |
| 552 | msleep(2); |
Ron Mercer | 9dfbbaa | 2009-10-30 12:13:33 +0000 | [diff] [blame] | 553 | ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128); |
| 554 | return atomic_read(&qdev->lb_count) ? -EIO : 0; |
| 555 | } |
| 556 | |
| 557 | static int ql_loopback_test(struct ql_adapter *qdev, u64 *data) |
| 558 | { |
| 559 | *data = ql_start_loopback(qdev); |
| 560 | if (*data) |
| 561 | goto out; |
| 562 | *data = ql_run_loopback_test(qdev); |
| 563 | out: |
| 564 | ql_stop_loopback(qdev); |
| 565 | return *data; |
| 566 | } |
| 567 | |
| 568 | static void ql_self_test(struct net_device *ndev, |
| 569 | struct ethtool_test *eth_test, u64 *data) |
| 570 | { |
| 571 | struct ql_adapter *qdev = netdev_priv(ndev); |
| 572 | |
Jitendra Kalsaria | a7db9ad | 2012-07-10 14:57:35 +0000 | [diff] [blame] | 573 | memset(data, 0, sizeof(u64) * QLGE_TEST_LEN); |
| 574 | |
Ron Mercer | 9dfbbaa | 2009-10-30 12:13:33 +0000 | [diff] [blame] | 575 | if (netif_running(ndev)) { |
| 576 | set_bit(QL_SELFTEST, &qdev->flags); |
| 577 | if (eth_test->flags == ETH_TEST_FL_OFFLINE) { |
| 578 | /* Offline tests */ |
| 579 | if (ql_loopback_test(qdev, &data[0])) |
| 580 | eth_test->flags |= ETH_TEST_FL_FAILED; |
| 581 | |
| 582 | } else { |
| 583 | /* Online tests */ |
| 584 | data[0] = 0; |
| 585 | } |
| 586 | clear_bit(QL_SELFTEST, &qdev->flags); |
Ron Mercer | aa13bd6 | 2010-02-17 06:41:23 +0000 | [diff] [blame] | 587 | /* Give link time to come up after |
| 588 | * port configuration changes. |
| 589 | */ |
| 590 | msleep_interruptible(4 * 1000); |
Ron Mercer | 9dfbbaa | 2009-10-30 12:13:33 +0000 | [diff] [blame] | 591 | } else { |
Joe Perches | ae9540f7 | 2010-02-09 11:49:52 +0000 | [diff] [blame] | 592 | netif_err(qdev, drv, qdev->ndev, |
| 593 | "is down, Loopback test will fail.\n"); |
Ron Mercer | 9dfbbaa | 2009-10-30 12:13:33 +0000 | [diff] [blame] | 594 | eth_test->flags |= ETH_TEST_FL_FAILED; |
| 595 | } |
| 596 | } |
| 597 | |
Ron Mercer | a61f802 | 2009-10-21 11:07:41 +0000 | [diff] [blame] | 598 | static int ql_get_regs_len(struct net_device *ndev) |
| 599 | { |
Ron Mercer | 673483c | 2010-11-10 09:29:45 +0000 | [diff] [blame] | 600 | struct ql_adapter *qdev = netdev_priv(ndev); |
| 601 | |
| 602 | if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) |
| 603 | return sizeof(struct ql_mpi_coredump); |
| 604 | else |
| 605 | return sizeof(struct ql_reg_dump); |
Ron Mercer | a61f802 | 2009-10-21 11:07:41 +0000 | [diff] [blame] | 606 | } |
| 607 | |
| 608 | static void ql_get_regs(struct net_device *ndev, |
| 609 | struct ethtool_regs *regs, void *p) |
| 610 | { |
| 611 | struct ql_adapter *qdev = netdev_priv(ndev); |
| 612 | |
Ron Mercer | 673483c | 2010-11-10 09:29:45 +0000 | [diff] [blame] | 613 | ql_get_dump(qdev, p); |
| 614 | qdev->core_is_dumped = 0; |
| 615 | if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) |
| 616 | regs->len = sizeof(struct ql_mpi_coredump); |
| 617 | else |
| 618 | regs->len = sizeof(struct ql_reg_dump); |
Ron Mercer | a61f802 | 2009-10-21 11:07:41 +0000 | [diff] [blame] | 619 | } |
| 620 | |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 621 | static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) |
| 622 | { |
| 623 | struct ql_adapter *qdev = netdev_priv(dev); |
| 624 | |
| 625 | c->rx_coalesce_usecs = qdev->rx_coalesce_usecs; |
| 626 | c->tx_coalesce_usecs = qdev->tx_coalesce_usecs; |
| 627 | |
| 628 | /* This chip coalesces as follows: |
| 629 | * If a packet arrives, hold off interrupts until |
| 630 | * cqicb->int_delay expires, but if no other packets arrive don't |
| 631 | * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a |
| 632 | * timer to coalesce on a frame basis. So, we have to take ethtool's |
| 633 | * max_coalesced_frames value and convert it to a delay in microseconds. |
| 634 | * We do this by using a basic thoughput of 1,000,000 frames per |
| 635 | * second @ (1024 bytes). This means one frame per usec. So it's a |
| 636 | * simple one to one ratio. |
| 637 | */ |
| 638 | c->rx_max_coalesced_frames = qdev->rx_max_coalesced_frames; |
| 639 | c->tx_max_coalesced_frames = qdev->tx_max_coalesced_frames; |
| 640 | |
| 641 | return 0; |
| 642 | } |
| 643 | |
| 644 | static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c) |
| 645 | { |
| 646 | struct ql_adapter *qdev = netdev_priv(ndev); |
| 647 | |
| 648 | /* Validate user parameters. */ |
| 649 | if (c->rx_coalesce_usecs > qdev->rx_ring_size / 2) |
| 650 | return -EINVAL; |
| 651 | /* Don't wait more than 10 usec. */ |
| 652 | if (c->rx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) |
| 653 | return -EINVAL; |
| 654 | if (c->tx_coalesce_usecs > qdev->tx_ring_size / 2) |
| 655 | return -EINVAL; |
| 656 | if (c->tx_max_coalesced_frames > MAX_INTER_FRAME_WAIT) |
| 657 | return -EINVAL; |
| 658 | |
| 659 | /* Verify a change took place before updating the hardware. */ |
| 660 | if (qdev->rx_coalesce_usecs == c->rx_coalesce_usecs && |
| 661 | qdev->tx_coalesce_usecs == c->tx_coalesce_usecs && |
| 662 | qdev->rx_max_coalesced_frames == c->rx_max_coalesced_frames && |
| 663 | qdev->tx_max_coalesced_frames == c->tx_max_coalesced_frames) |
| 664 | return 0; |
| 665 | |
| 666 | qdev->rx_coalesce_usecs = c->rx_coalesce_usecs; |
| 667 | qdev->tx_coalesce_usecs = c->tx_coalesce_usecs; |
| 668 | qdev->rx_max_coalesced_frames = c->rx_max_coalesced_frames; |
| 669 | qdev->tx_max_coalesced_frames = c->tx_max_coalesced_frames; |
| 670 | |
| 671 | return ql_update_ring_coalescing(qdev); |
| 672 | } |
| 673 | |
Ron Mercer | 1d30df2 | 2009-10-21 11:07:38 +0000 | [diff] [blame] | 674 | static void ql_get_pauseparam(struct net_device *netdev, |
| 675 | struct ethtool_pauseparam *pause) |
| 676 | { |
| 677 | struct ql_adapter *qdev = netdev_priv(netdev); |
| 678 | |
| 679 | ql_mb_get_port_cfg(qdev); |
| 680 | if (qdev->link_config & CFG_PAUSE_STD) { |
| 681 | pause->rx_pause = 1; |
| 682 | pause->tx_pause = 1; |
| 683 | } |
| 684 | } |
| 685 | |
| 686 | static int ql_set_pauseparam(struct net_device *netdev, |
| 687 | struct ethtool_pauseparam *pause) |
| 688 | { |
| 689 | struct ql_adapter *qdev = netdev_priv(netdev); |
| 690 | int status = 0; |
| 691 | |
| 692 | if ((pause->rx_pause) && (pause->tx_pause)) |
| 693 | qdev->link_config |= CFG_PAUSE_STD; |
| 694 | else if (!pause->rx_pause && !pause->tx_pause) |
| 695 | qdev->link_config &= ~CFG_PAUSE_STD; |
| 696 | else |
| 697 | return -EINVAL; |
| 698 | |
| 699 | status = ql_mb_set_port_cfg(qdev); |
Ron Mercer | 1d30df2 | 2009-10-21 11:07:38 +0000 | [diff] [blame] | 700 | return status; |
| 701 | } |
| 702 | |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 703 | static u32 ql_get_msglevel(struct net_device *ndev) |
| 704 | { |
| 705 | struct ql_adapter *qdev = netdev_priv(ndev); |
| 706 | return qdev->msg_enable; |
| 707 | } |
| 708 | |
| 709 | static void ql_set_msglevel(struct net_device *ndev, u32 value) |
| 710 | { |
| 711 | struct ql_adapter *qdev = netdev_priv(ndev); |
| 712 | qdev->msg_enable = value; |
| 713 | } |
| 714 | |
| 715 | const struct ethtool_ops qlge_ethtool_ops = { |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 716 | .get_drvinfo = ql_get_drvinfo, |
Ron Mercer | bc083ce | 2009-10-21 11:07:40 +0000 | [diff] [blame] | 717 | .get_wol = ql_get_wol, |
| 718 | .set_wol = ql_set_wol, |
Ron Mercer | a61f802 | 2009-10-21 11:07:41 +0000 | [diff] [blame] | 719 | .get_regs_len = ql_get_regs_len, |
| 720 | .get_regs = ql_get_regs, |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 721 | .get_msglevel = ql_get_msglevel, |
| 722 | .set_msglevel = ql_set_msglevel, |
| 723 | .get_link = ethtool_op_get_link, |
stephen hemminger | 1b32912 | 2011-04-06 13:47:50 +0000 | [diff] [blame] | 724 | .set_phys_id = ql_set_phys_id, |
Ron Mercer | 9dfbbaa | 2009-10-30 12:13:33 +0000 | [diff] [blame] | 725 | .self_test = ql_self_test, |
Ron Mercer | 1d30df2 | 2009-10-21 11:07:38 +0000 | [diff] [blame] | 726 | .get_pauseparam = ql_get_pauseparam, |
| 727 | .set_pauseparam = ql_set_pauseparam, |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 728 | .get_coalesce = ql_get_coalesce, |
| 729 | .set_coalesce = ql_set_coalesce, |
| 730 | .get_sset_count = ql_get_sset_count, |
| 731 | .get_strings = ql_get_strings, |
| 732 | .get_ethtool_stats = ql_get_ethtool_stats, |
Philippe Reynes | 45ee244 | 2017-02-19 23:21:41 +0100 | [diff] [blame] | 733 | .get_link_ksettings = ql_get_link_ksettings, |
Ron Mercer | c4e84bd | 2008-09-18 11:56:28 -0400 | [diff] [blame] | 734 | }; |
| 735 | |