Aaro Koskinen | 6762098 | 2015-04-04 22:51:21 +0300 | [diff] [blame] | 1 | /* |
| 2 | * This file is based on code from OCTEON SDK by Cavium Networks. |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 3 | * |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 4 | * Copyright (c) 2003-2010 Cavium Networks |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 5 | * |
| 6 | * This file is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License, Version 2, as |
| 8 | * published by the Free Software Foundation. |
Aaro Koskinen | 6762098 | 2015-04-04 22:51:21 +0300 | [diff] [blame] | 9 | */ |
| 10 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 11 | #include <linux/module.h> |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/cache.h> |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 14 | #include <linux/cpumask.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 15 | #include <linux/netdevice.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 16 | #include <linux/etherdevice.h> |
| 17 | #include <linux/ip.h> |
| 18 | #include <linux/string.h> |
| 19 | #include <linux/prefetch.h> |
Christian Dietrich | 7a2eaf9 | 2011-06-04 17:35:58 +0200 | [diff] [blame] | 20 | #include <linux/ratelimit.h> |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 21 | #include <linux/smp.h> |
Imre Kaloz | dc890df | 2012-04-19 12:27:27 +0200 | [diff] [blame] | 22 | #include <linux/interrupt.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 23 | #include <net/dst.h> |
| 24 | #ifdef CONFIG_XFRM |
| 25 | #include <linux/xfrm.h> |
| 26 | #include <net/xfrm.h> |
| 27 | #endif /* CONFIG_XFRM */ |
| 28 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 29 | #include <asm/octeon/octeon.h> |
| 30 | |
| 31 | #include "ethernet-defines.h" |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 32 | #include "ethernet-mem.h" |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 33 | #include "ethernet-rx.h" |
| 34 | #include "octeon-ethernet.h" |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 35 | #include "ethernet-util.h" |
| 36 | |
David Daney | af86649 | 2011-11-22 14:47:00 +0000 | [diff] [blame] | 37 | #include <asm/octeon/cvmx-helper.h> |
| 38 | #include <asm/octeon/cvmx-wqe.h> |
| 39 | #include <asm/octeon/cvmx-fau.h> |
| 40 | #include <asm/octeon/cvmx-pow.h> |
| 41 | #include <asm/octeon/cvmx-pip.h> |
| 42 | #include <asm/octeon/cvmx-scratch.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 43 | |
David Daney | af86649 | 2011-11-22 14:47:00 +0000 | [diff] [blame] | 44 | #include <asm/octeon/cvmx-gmxx-defs.h> |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 45 | |
Aaro Koskinen | d48f10f | 2016-08-31 23:57:46 +0300 | [diff] [blame] | 46 | static atomic_t oct_rx_ready = ATOMIC_INIT(0); |
| 47 | |
Aaro Koskinen | 785e9b7 | 2016-08-31 23:57:40 +0300 | [diff] [blame] | 48 | static struct oct_rx_group { |
Aaro Koskinen | 9382cfe | 2016-08-31 23:57:41 +0300 | [diff] [blame] | 49 | int irq; |
Aaro Koskinen | 942bab4 | 2016-08-31 23:57:42 +0300 | [diff] [blame] | 50 | int group; |
Aaro Koskinen | 785e9b7 | 2016-08-31 23:57:40 +0300 | [diff] [blame] | 51 | struct napi_struct napi; |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 52 | } oct_rx_group[16]; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 53 | |
| 54 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 55 | * cvm_oct_do_interrupt - interrupt handler. |
Aaro Koskinen | 513ff86 | 2016-08-31 23:57:37 +0300 | [diff] [blame] | 56 | * @irq: Interrupt number. |
Aaro Koskinen | 08712f9 | 2016-08-31 23:57:38 +0300 | [diff] [blame] | 57 | * @napi_id: Cookie to identify the NAPI instance. |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 58 | * |
| 59 | * The interrupt occurs whenever the POW has packets in our group. |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 60 | * |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 61 | */ |
Aaro Koskinen | 08712f9 | 2016-08-31 23:57:38 +0300 | [diff] [blame] | 62 | static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id) |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 63 | { |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 64 | /* Disable the IRQ and start napi_poll. */ |
Aaro Koskinen | 513ff86 | 2016-08-31 23:57:37 +0300 | [diff] [blame] | 65 | disable_irq_nosync(irq); |
Aaro Koskinen | 08712f9 | 2016-08-31 23:57:38 +0300 | [diff] [blame] | 66 | napi_schedule(napi_id); |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 67 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 68 | return IRQ_HANDLED; |
| 69 | } |
| 70 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 71 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 72 | * cvm_oct_check_rcv_error - process receive errors |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 73 | * @work: Work queue entry pointing to the packet. |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 74 | * |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 75 | * Returns Non-zero if the packet can be dropped, zero otherwise. |
| 76 | */ |
| 77 | static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) |
| 78 | { |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 79 | int port; |
| 80 | |
| 81 | if (octeon_has_feature(OCTEON_FEATURE_PKND)) |
| 82 | port = work->word0.pip.cn68xx.pknd; |
| 83 | else |
| 84 | port = work->word1.cn38xx.ipprt; |
| 85 | |
| 86 | if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 87 | /* |
| 88 | * Ignore length errors on min size packets. Some |
| 89 | * equipment incorrectly pads packets to 64+4FCS |
| 90 | * instead of 60+4FCS. Note these packets still get |
| 91 | * counted as frame errors. |
| 92 | */ |
Aaro Koskinen | 25efe08 | 2015-04-04 22:51:15 +0300 | [diff] [blame] | 93 | } else if (work->word2.snoip.err_code == 5 || |
| 94 | work->word2.snoip.err_code == 7) { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 95 | /* |
| 96 | * We received a packet with either an alignment error |
| 97 | * or a FCS error. This may be signalling that we are |
Justin P. Mattock | 215c47c | 2012-03-26 21:34:18 -0700 | [diff] [blame] | 98 | * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK] |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 99 | * off. If this is the case we need to parse the |
| 100 | * packet to determine if we can remove a non spec |
| 101 | * preamble and generate a correct packet. |
| 102 | */ |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 103 | int interface = cvmx_helper_get_interface_num(port); |
| 104 | int index = cvmx_helper_get_interface_index_num(port); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 105 | union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; |
Jamie Lawler | 85fdebc | 2014-12-04 13:02:23 +0000 | [diff] [blame] | 106 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 107 | gmxx_rxx_frm_ctl.u64 = |
| 108 | cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); |
| 109 | if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { |
Aybuke Ozdemir | ec2c398 | 2015-10-01 16:42:16 +0300 | [diff] [blame] | 110 | u8 *ptr = |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 111 | cvmx_phys_to_ptr(work->packet_ptr.s.addr); |
| 112 | int i = 0; |
| 113 | |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 114 | while (i < work->word1.len - 1) { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 115 | if (*ptr != 0x55) |
| 116 | break; |
| 117 | ptr++; |
| 118 | i++; |
| 119 | } |
| 120 | |
| 121 | if (*ptr == 0xd5) { |
Laura Garcia Liebana | b4ede79 | 2016-02-28 00:43:52 +0100 | [diff] [blame] | 122 | /* Port received 0xd5 preamble */ |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 123 | work->packet_ptr.s.addr += i + 1; |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 124 | work->word1.len -= i + 5; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 125 | } else if ((*ptr & 0xf) == 0xd) { |
Laura Garcia Liebana | b4ede79 | 2016-02-28 00:43:52 +0100 | [diff] [blame] | 126 | /* Port received 0xd preamble */ |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 127 | work->packet_ptr.s.addr += i; |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 128 | work->word1.len -= i + 4; |
| 129 | for (i = 0; i < work->word1.len; i++) { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 130 | *ptr = |
| 131 | ((*ptr & 0xf0) >> 4) | |
| 132 | ((*(ptr + 1) & 0xf) << 4); |
| 133 | ptr++; |
| 134 | } |
| 135 | } else { |
Gulsah Kose | 61e15f0 | 2014-09-30 22:12:23 +0300 | [diff] [blame] | 136 | printk_ratelimited("Port %d unknown preamble, packet dropped\n", |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 137 | port); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 138 | cvm_oct_free_work(work); |
| 139 | return 1; |
| 140 | } |
| 141 | } |
| 142 | } else { |
Christian Dietrich | 7a2eaf9 | 2011-06-04 17:35:58 +0200 | [diff] [blame] | 143 | printk_ratelimited("Port %d receive error code %d, packet dropped\n", |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 144 | port, work->word2.snoip.err_code); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 145 | cvm_oct_free_work(work); |
| 146 | return 1; |
| 147 | } |
| 148 | |
| 149 | return 0; |
| 150 | } |
| 151 | |
Aaro Koskinen | 942bab4 | 2016-08-31 23:57:42 +0300 | [diff] [blame] | 152 | static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget) |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 153 | { |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 154 | const int coreid = cvmx_get_core_num(); |
Aybuke Ozdemir | ec2c398 | 2015-10-01 16:42:16 +0300 | [diff] [blame] | 155 | u64 old_group_mask; |
| 156 | u64 old_scratch; |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 157 | int rx_count = 0; |
| 158 | int did_work_request = 0; |
| 159 | int packet_not_copied; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 160 | |
| 161 | /* Prefetch cvm_oct_device since we know we need it soon */ |
| 162 | prefetch(cvm_oct_device); |
| 163 | |
| 164 | if (USE_ASYNC_IOBDMA) { |
| 165 | /* Save scratch in case userspace is using it */ |
| 166 | CVMX_SYNCIOBDMA; |
| 167 | old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); |
| 168 | } |
| 169 | |
| 170 | /* Only allow work for our group (and preserve priorities) */ |
Aaro Koskinen | f5cfc8d | 2015-08-13 16:21:40 +0300 | [diff] [blame] | 171 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { |
| 172 | old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); |
| 173 | cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), |
Aaro Koskinen | 942bab4 | 2016-08-31 23:57:42 +0300 | [diff] [blame] | 174 | BIT(rx_group->group)); |
Aaro Koskinen | f5cfc8d | 2015-08-13 16:21:40 +0300 | [diff] [blame] | 175 | cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */ |
| 176 | } else { |
| 177 | old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); |
| 178 | cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), |
Laura Garcia Liebana | ac05a58 | 2016-03-12 16:35:30 +0100 | [diff] [blame] | 179 | (old_group_mask & ~0xFFFFull) | |
Aaro Koskinen | 942bab4 | 2016-08-31 23:57:42 +0300 | [diff] [blame] | 180 | BIT(rx_group->group)); |
Aaro Koskinen | f5cfc8d | 2015-08-13 16:21:40 +0300 | [diff] [blame] | 181 | } |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 182 | |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 183 | if (USE_ASYNC_IOBDMA) { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 184 | cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 185 | did_work_request = 1; |
| 186 | } |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 187 | |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 188 | while (rx_count < budget) { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 189 | struct sk_buff *skb = NULL; |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 190 | struct sk_buff **pskb = NULL; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 191 | int skb_in_hw; |
| 192 | cvmx_wqe_t *work; |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 193 | int port; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 194 | |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 195 | if (USE_ASYNC_IOBDMA && did_work_request) |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 196 | work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 197 | else |
| 198 | work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); |
| 199 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 200 | prefetch(work); |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 201 | did_work_request = 0; |
Laura Garcia Liebana | e8a4e57 | 2016-02-28 00:43:12 +0100 | [diff] [blame] | 202 | if (!work) { |
Aaro Koskinen | bcbb139 | 2015-08-13 16:21:39 +0300 | [diff] [blame] | 203 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { |
| 204 | cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS, |
Aaro Koskinen | 942bab4 | 2016-08-31 23:57:42 +0300 | [diff] [blame] | 205 | BIT(rx_group->group)); |
Aaro Koskinen | bcbb139 | 2015-08-13 16:21:39 +0300 | [diff] [blame] | 206 | cvmx_write_csr(CVMX_SSO_WQ_INT, |
Aaro Koskinen | 942bab4 | 2016-08-31 23:57:42 +0300 | [diff] [blame] | 207 | BIT(rx_group->group)); |
Aaro Koskinen | bcbb139 | 2015-08-13 16:21:39 +0300 | [diff] [blame] | 208 | } else { |
| 209 | union cvmx_pow_wq_int wq_int; |
Jamie Lawler | 85fdebc | 2014-12-04 13:02:23 +0000 | [diff] [blame] | 210 | |
Aaro Koskinen | bcbb139 | 2015-08-13 16:21:39 +0300 | [diff] [blame] | 211 | wq_int.u64 = 0; |
Aaro Koskinen | 942bab4 | 2016-08-31 23:57:42 +0300 | [diff] [blame] | 212 | wq_int.s.iq_dis = BIT(rx_group->group); |
| 213 | wq_int.s.wq_int = BIT(rx_group->group); |
Aaro Koskinen | bcbb139 | 2015-08-13 16:21:39 +0300 | [diff] [blame] | 214 | cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); |
| 215 | } |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 216 | break; |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 217 | } |
Laura Garcia Liebana | 18f6970 | 2016-02-28 00:45:13 +0100 | [diff] [blame] | 218 | pskb = (struct sk_buff **) |
| 219 | (cvm_oct_get_buffer_ptr(work->packet_ptr) - |
Luis de Bethencourt | f884625 | 2014-11-28 14:34:29 +0100 | [diff] [blame] | 220 | sizeof(void *)); |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 221 | prefetch(pskb); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 222 | |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 223 | if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { |
Luis de Bethencourt | f884625 | 2014-11-28 14:34:29 +0100 | [diff] [blame] | 224 | cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, |
| 225 | CVMX_POW_NO_WAIT); |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 226 | did_work_request = 1; |
| 227 | } |
Aaro Koskinen | da029d0 | 2013-09-05 21:43:59 +0300 | [diff] [blame] | 228 | rx_count++; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 229 | |
Aaro Koskinen | 3a990f3 | 2015-04-04 22:51:17 +0300 | [diff] [blame] | 230 | skb_in_hw = work->word2.s.bufs == 1; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 231 | if (likely(skb_in_hw)) { |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 232 | skb = *pskb; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 233 | prefetch(&skb->head); |
| 234 | prefetch(&skb->len); |
| 235 | } |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 236 | |
| 237 | if (octeon_has_feature(OCTEON_FEATURE_PKND)) |
| 238 | port = work->word0.pip.cn68xx.pknd; |
| 239 | else |
| 240 | port = work->word1.cn38xx.ipprt; |
| 241 | |
| 242 | prefetch(cvm_oct_device[port]); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 243 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 244 | /* Immediately throw away all packets with receive errors */ |
| 245 | if (unlikely(work->word2.snoip.rcv_error)) { |
| 246 | if (cvm_oct_check_rcv_error(work)) |
| 247 | continue; |
| 248 | } |
| 249 | |
| 250 | /* |
| 251 | * We can only use the zero copy path if skbuffs are |
| 252 | * in the FPA pool and the packet fits in a single |
| 253 | * buffer. |
| 254 | */ |
| 255 | if (likely(skb_in_hw)) { |
Luis de Bethencourt | f884625 | 2014-11-28 14:34:29 +0100 | [diff] [blame] | 256 | skb->data = skb->head + work->packet_ptr.s.addr - |
| 257 | cvmx_ptr_to_phys(skb->head); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 258 | prefetch(skb->data); |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 259 | skb->len = work->word1.len; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 260 | skb_set_tail_pointer(skb, skb->len); |
| 261 | packet_not_copied = 1; |
| 262 | } else { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 263 | /* |
| 264 | * We have to copy the packet. First allocate |
| 265 | * an skbuff for it. |
| 266 | */ |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 267 | skb = dev_alloc_skb(work->word1.len); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 268 | if (!skb) { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 269 | cvm_oct_free_work(work); |
| 270 | continue; |
| 271 | } |
| 272 | |
| 273 | /* |
| 274 | * Check if we've received a packet that was |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 275 | * entirely stored in the work entry. |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 276 | */ |
| 277 | if (unlikely(work->word2.s.bufs == 0)) { |
Aybuke Ozdemir | ec2c398 | 2015-10-01 16:42:16 +0300 | [diff] [blame] | 278 | u8 *ptr = work->packet_data; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 279 | |
| 280 | if (likely(!work->word2.s.not_IP)) { |
| 281 | /* |
| 282 | * The beginning of the packet |
| 283 | * moves for IP packets. |
| 284 | */ |
| 285 | if (work->word2.s.is_v6) |
| 286 | ptr += 2; |
| 287 | else |
| 288 | ptr += 6; |
| 289 | } |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 290 | memcpy(skb_put(skb, work->word1.len), ptr, |
| 291 | work->word1.len); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 292 | /* No packet buffers to free */ |
| 293 | } else { |
| 294 | int segments = work->word2.s.bufs; |
Luis de Bethencourt | f884625 | 2014-11-28 14:34:29 +0100 | [diff] [blame] | 295 | union cvmx_buf_ptr segment_ptr = |
| 296 | work->packet_ptr; |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 297 | int len = work->word1.len; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 298 | |
| 299 | while (segments--) { |
| 300 | union cvmx_buf_ptr next_ptr = |
Laura Garcia Liebana | 18f6970 | 2016-02-28 00:45:13 +0100 | [diff] [blame] | 301 | *(union cvmx_buf_ptr *) |
| 302 | cvmx_phys_to_ptr( |
| 303 | segment_ptr.s.addr - 8); |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 304 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 305 | /* |
| 306 | * Octeon Errata PKI-100: The segment size is |
| 307 | * wrong. Until it is fixed, calculate the |
| 308 | * segment size based on the packet pool |
| 309 | * buffer size. When it is fixed, the |
| 310 | * following line should be replaced with this |
| 311 | * one: int segment_size = |
| 312 | * segment_ptr.s.size; |
| 313 | */ |
Luis de Bethencourt | f884625 | 2014-11-28 14:34:29 +0100 | [diff] [blame] | 314 | int segment_size = |
| 315 | CVMX_FPA_PACKET_POOL_SIZE - |
| 316 | (segment_ptr.s.addr - |
| 317 | (((segment_ptr.s.addr >> 7) - |
| 318 | segment_ptr.s.back) << 7)); |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 319 | /* |
| 320 | * Don't copy more than what |
| 321 | * is left in the packet. |
| 322 | */ |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 323 | if (segment_size > len) |
| 324 | segment_size = len; |
| 325 | /* Copy the data into the packet */ |
| 326 | memcpy(skb_put(skb, segment_size), |
Laura Garcia Liebana | 18f6970 | 2016-02-28 00:45:13 +0100 | [diff] [blame] | 327 | cvmx_phys_to_ptr( |
| 328 | segment_ptr.s.addr), |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 329 | segment_size); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 330 | len -= segment_size; |
| 331 | segment_ptr = next_ptr; |
| 332 | } |
| 333 | } |
| 334 | packet_not_copied = 0; |
| 335 | } |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 336 | if (likely((port < TOTAL_NUMBER_OF_PORTS) && |
| 337 | cvm_oct_device[port])) { |
| 338 | struct net_device *dev = cvm_oct_device[port]; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 339 | struct octeon_ethernet *priv = netdev_priv(dev); |
| 340 | |
David Daney | 6568a23 | 2010-01-07 11:05:01 -0800 | [diff] [blame] | 341 | /* |
| 342 | * Only accept packets for devices that are |
| 343 | * currently up. |
| 344 | */ |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 345 | if (likely(dev->flags & IFF_UP)) { |
| 346 | skb->protocol = eth_type_trans(skb, dev); |
| 347 | skb->dev = dev; |
| 348 | |
Luis de Bethencourt | f884625 | 2014-11-28 14:34:29 +0100 | [diff] [blame] | 349 | if (unlikely(work->word2.s.not_IP || |
| 350 | work->word2.s.IP_exc || |
| 351 | work->word2.s.L4_error || |
| 352 | !work->word2.s.tcp_or_udp)) |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 353 | skb->ip_summed = CHECKSUM_NONE; |
| 354 | else |
| 355 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 356 | |
| 357 | /* Increment RX stats for virtual ports */ |
Janne Huttunen | f8023da | 2015-08-13 16:21:42 +0300 | [diff] [blame] | 358 | if (port >= CVMX_PIP_NUM_INPUT_PORTS) { |
Aaro Koskinen | dcf24f7 | 2016-02-19 22:47:12 +0200 | [diff] [blame] | 359 | priv->stats.rx_packets++; |
| 360 | priv->stats.rx_bytes += skb->len; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 361 | } |
| 362 | netif_receive_skb(skb); |
| 363 | } else { |
| 364 | /* |
Laura Garcia Liebana | b4ede79 | 2016-02-28 00:43:52 +0100 | [diff] [blame] | 365 | * Drop any packet received for a device that |
| 366 | * isn't up. |
| 367 | */ |
Aaro Koskinen | dcf24f7 | 2016-02-19 22:47:12 +0200 | [diff] [blame] | 368 | priv->stats.rx_dropped++; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 369 | dev_kfree_skb_irq(skb); |
| 370 | } |
| 371 | } else { |
| 372 | /* |
| 373 | * Drop any packet received for a device that |
| 374 | * doesn't exist. |
| 375 | */ |
Christian Dietrich | 7a2eaf9 | 2011-06-04 17:35:58 +0200 | [diff] [blame] | 376 | printk_ratelimited("Port %d not controlled by Linux, packet dropped\n", |
Laura Garcia Liebana | ac05a58 | 2016-03-12 16:35:30 +0100 | [diff] [blame] | 377 | port); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 378 | dev_kfree_skb_irq(skb); |
| 379 | } |
| 380 | /* |
| 381 | * Check to see if the skbuff and work share the same |
| 382 | * packet buffer. |
| 383 | */ |
Aaro Koskinen | 3a990f3 | 2015-04-04 22:51:17 +0300 | [diff] [blame] | 384 | if (likely(packet_not_copied)) { |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 385 | /* |
| 386 | * This buffer needs to be replaced, increment |
| 387 | * the number of buffers we need to free by |
| 388 | * one. |
| 389 | */ |
| 390 | cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, |
| 391 | 1); |
| 392 | |
Aaro Koskinen | c93b0e7 | 2015-04-04 22:51:19 +0300 | [diff] [blame] | 393 | cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 394 | } else { |
| 395 | cvm_oct_free_work(work); |
| 396 | } |
| 397 | } |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 398 | /* Restore the original POW group mask */ |
Aaro Koskinen | f5cfc8d | 2015-08-13 16:21:40 +0300 | [diff] [blame] | 399 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { |
| 400 | cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask); |
| 401 | cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */ |
| 402 | } else { |
| 403 | cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); |
| 404 | } |
| 405 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 406 | if (USE_ASYNC_IOBDMA) { |
| 407 | /* Restore the scratch area */ |
| 408 | cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); |
| 409 | } |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 410 | cvm_oct_rx_refill_pool(0); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 411 | |
Aaro Koskinen | b7d7dee | 2016-08-31 23:57:39 +0300 | [diff] [blame] | 412 | return rx_count; |
| 413 | } |
| 414 | |
| 415 | /** |
| 416 | * cvm_oct_napi_poll - the NAPI poll function. |
| 417 | * @napi: The NAPI instance. |
| 418 | * @budget: Maximum number of packets to receive. |
| 419 | * |
| 420 | * Returns the number of packets processed. |
| 421 | */ |
| 422 | static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) |
| 423 | { |
Aaro Koskinen | 9382cfe | 2016-08-31 23:57:41 +0300 | [diff] [blame] | 424 | struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group, |
| 425 | napi); |
Aaro Koskinen | b7d7dee | 2016-08-31 23:57:39 +0300 | [diff] [blame] | 426 | int rx_count; |
| 427 | |
Aaro Koskinen | 942bab4 | 2016-08-31 23:57:42 +0300 | [diff] [blame] | 428 | rx_count = cvm_oct_poll(rx_group, budget); |
Aaro Koskinen | b7d7dee | 2016-08-31 23:57:39 +0300 | [diff] [blame] | 429 | |
| 430 | if (rx_count < budget) { |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 431 | /* No more work */ |
| 432 | napi_complete(napi); |
Aaro Koskinen | 9382cfe | 2016-08-31 23:57:41 +0300 | [diff] [blame] | 433 | enable_irq(rx_group->irq); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 434 | } |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 435 | return rx_count; |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 436 | } |
| 437 | |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 438 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 439 | /** |
David Daney | ec977c5 | 2010-02-16 17:25:32 -0800 | [diff] [blame] | 440 | * cvm_oct_poll_controller - poll for receive packets |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 441 | * device. |
| 442 | * |
| 443 | * @dev: Device to poll. Unused |
| 444 | */ |
| 445 | void cvm_oct_poll_controller(struct net_device *dev) |
| 446 | { |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 447 | int i; |
| 448 | |
Aaro Koskinen | d48f10f | 2016-08-31 23:57:46 +0300 | [diff] [blame] | 449 | if (!atomic_read(&oct_rx_ready)) |
| 450 | return; |
| 451 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 452 | for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) { |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 453 | if (!(pow_receive_groups & BIT(i))) |
| 454 | continue; |
| 455 | |
| 456 | cvm_oct_poll(&oct_rx_group[i], 16); |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 457 | } |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 458 | } |
| 459 | #endif |
| 460 | |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 461 | void cvm_oct_rx_initialize(void) |
| 462 | { |
| 463 | int i; |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 464 | struct net_device *dev_for_napi = NULL; |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 465 | |
| 466 | for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { |
| 467 | if (cvm_oct_device[i]) { |
| 468 | dev_for_napi = cvm_oct_device[i]; |
| 469 | break; |
| 470 | } |
| 471 | } |
| 472 | |
Laura Garcia Liebana | e8a4e57 | 2016-02-28 00:43:12 +0100 | [diff] [blame] | 473 | if (!dev_for_napi) |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 474 | panic("No net_devices were allocated."); |
| 475 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 476 | for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) { |
| 477 | int ret; |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 478 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 479 | if (!(pow_receive_groups & BIT(i))) |
| 480 | continue; |
Aaro Koskinen | 9382cfe | 2016-08-31 23:57:41 +0300 | [diff] [blame] | 481 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 482 | netif_napi_add(dev_for_napi, &oct_rx_group[i].napi, |
| 483 | cvm_oct_napi_poll, rx_napi_weight); |
| 484 | napi_enable(&oct_rx_group[i].napi); |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 485 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 486 | oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i; |
| 487 | oct_rx_group[i].group = i; |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 488 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 489 | /* Register an IRQ handler to receive POW interrupts */ |
| 490 | ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0, |
| 491 | "Ethernet", &oct_rx_group[i].napi); |
| 492 | if (ret) |
| 493 | panic("Could not acquire Ethernet IRQ %d\n", |
| 494 | oct_rx_group[i].irq); |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 495 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 496 | disable_irq_nosync(oct_rx_group[i].irq); |
David Daney | 3368c78 | 2010-01-07 11:05:04 -0800 | [diff] [blame] | 497 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 498 | /* Enable POW interrupt when our port has at least one packet */ |
| 499 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) { |
| 500 | union cvmx_sso_wq_int_thrx int_thr; |
| 501 | union cvmx_pow_wq_int_pc int_pc; |
Aaro Koskinen | bcbb139 | 2015-08-13 16:21:39 +0300 | [diff] [blame] | 502 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 503 | int_thr.u64 = 0; |
| 504 | int_thr.s.tc_en = 1; |
| 505 | int_thr.s.tc_thr = 1; |
| 506 | cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64); |
Aaro Koskinen | bcbb139 | 2015-08-13 16:21:39 +0300 | [diff] [blame] | 507 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 508 | int_pc.u64 = 0; |
| 509 | int_pc.s.pc_thr = 5; |
| 510 | cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64); |
| 511 | } else { |
| 512 | union cvmx_pow_wq_int_thrx int_thr; |
| 513 | union cvmx_pow_wq_int_pc int_pc; |
Aaro Koskinen | bcbb139 | 2015-08-13 16:21:39 +0300 | [diff] [blame] | 514 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 515 | int_thr.u64 = 0; |
| 516 | int_thr.s.tc_en = 1; |
| 517 | int_thr.s.tc_thr = 1; |
| 518 | cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64); |
| 519 | |
| 520 | int_pc.u64 = 0; |
| 521 | int_pc.s.pc_thr = 5; |
| 522 | cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); |
| 523 | } |
| 524 | |
| 525 | /* Schedule NAPI now. This will indirectly enable the |
| 526 | * interrupt. |
| 527 | */ |
| 528 | napi_schedule(&oct_rx_group[i].napi); |
Aaro Koskinen | bcbb139 | 2015-08-13 16:21:39 +0300 | [diff] [blame] | 529 | } |
Aaro Koskinen | d48f10f | 2016-08-31 23:57:46 +0300 | [diff] [blame] | 530 | atomic_inc(&oct_rx_ready); |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 531 | } |
| 532 | |
| 533 | void cvm_oct_rx_shutdown(void) |
| 534 | { |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 535 | int i; |
Aaro Koskinen | 287faa5 | 2016-08-31 23:57:36 +0300 | [diff] [blame] | 536 | |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 537 | for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) { |
Aaro Koskinen | e971a11 | 2016-08-31 23:57:43 +0300 | [diff] [blame] | 538 | if (!(pow_receive_groups & BIT(i))) |
| 539 | continue; |
| 540 | |
| 541 | /* Disable POW interrupt */ |
| 542 | if (OCTEON_IS_MODEL(OCTEON_CN68XX)) |
| 543 | cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0); |
| 544 | else |
| 545 | cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0); |
| 546 | |
| 547 | /* Free the interrupt handler */ |
| 548 | free_irq(oct_rx_group[i].irq, cvm_oct_device); |
| 549 | |
| 550 | netif_napi_del(&oct_rx_group[i].napi); |
| 551 | } |
David Daney | 80ff0fd | 2009-05-05 17:35:21 -0700 | [diff] [blame] | 552 | } |