blob: f0900d1c4d7b5fe63a04a917b60b37d0dba923b5 [file] [log] [blame]
Aaro Koskinen67620982015-04-04 22:51:21 +03001/*
2 * This file is based on code from OCTEON SDK by Cavium Networks.
David Daney80ff0fd2009-05-05 17:35:21 -07003 *
David Daney3368c782010-01-07 11:05:04 -08004 * Copyright (c) 2003-2010 Cavium Networks
David Daney80ff0fd2009-05-05 17:35:21 -07005 *
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
Aaro Koskinen67620982015-04-04 22:51:21 +03009 */
10
David Daney80ff0fd2009-05-05 17:35:21 -070011#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/cache.h>
David Daney3368c782010-01-07 11:05:04 -080014#include <linux/cpumask.h>
David Daney80ff0fd2009-05-05 17:35:21 -070015#include <linux/netdevice.h>
David Daney80ff0fd2009-05-05 17:35:21 -070016#include <linux/etherdevice.h>
17#include <linux/ip.h>
18#include <linux/string.h>
19#include <linux/prefetch.h>
Christian Dietrich7a2eaf92011-06-04 17:35:58 +020020#include <linux/ratelimit.h>
David Daney3368c782010-01-07 11:05:04 -080021#include <linux/smp.h>
Imre Kalozdc890df2012-04-19 12:27:27 +020022#include <linux/interrupt.h>
David Daney80ff0fd2009-05-05 17:35:21 -070023#include <net/dst.h>
24#ifdef CONFIG_XFRM
25#include <linux/xfrm.h>
26#include <net/xfrm.h>
27#endif /* CONFIG_XFRM */
28
David Daney80ff0fd2009-05-05 17:35:21 -070029#include <asm/octeon/octeon.h>
30
31#include "ethernet-defines.h"
David Daney80ff0fd2009-05-05 17:35:21 -070032#include "ethernet-mem.h"
David Daney3368c782010-01-07 11:05:04 -080033#include "ethernet-rx.h"
34#include "octeon-ethernet.h"
David Daney80ff0fd2009-05-05 17:35:21 -070035#include "ethernet-util.h"
36
David Daneyaf866492011-11-22 14:47:00 +000037#include <asm/octeon/cvmx-helper.h>
38#include <asm/octeon/cvmx-wqe.h>
39#include <asm/octeon/cvmx-fau.h>
40#include <asm/octeon/cvmx-pow.h>
41#include <asm/octeon/cvmx-pip.h>
42#include <asm/octeon/cvmx-scratch.h>
David Daney80ff0fd2009-05-05 17:35:21 -070043
David Daneyaf866492011-11-22 14:47:00 +000044#include <asm/octeon/cvmx-gmxx-defs.h>
David Daney80ff0fd2009-05-05 17:35:21 -070045
Aaro Koskinend48f10f2016-08-31 23:57:46 +030046static atomic_t oct_rx_ready = ATOMIC_INIT(0);
47
Aaro Koskinen785e9b72016-08-31 23:57:40 +030048static struct oct_rx_group {
Aaro Koskinen9382cfe2016-08-31 23:57:41 +030049 int irq;
Aaro Koskinen942bab42016-08-31 23:57:42 +030050 int group;
Aaro Koskinen785e9b72016-08-31 23:57:40 +030051 struct napi_struct napi;
Aaro Koskinene971a112016-08-31 23:57:43 +030052} oct_rx_group[16];
David Daney80ff0fd2009-05-05 17:35:21 -070053
54/**
David Daneyec977c52010-02-16 17:25:32 -080055 * cvm_oct_do_interrupt - interrupt handler.
Aaro Koskinen513ff862016-08-31 23:57:37 +030056 * @irq: Interrupt number.
Aaro Koskinen08712f92016-08-31 23:57:38 +030057 * @napi_id: Cookie to identify the NAPI instance.
David Daneyec977c52010-02-16 17:25:32 -080058 *
59 * The interrupt occurs whenever the POW has packets in our group.
David Daney80ff0fd2009-05-05 17:35:21 -070060 *
David Daney80ff0fd2009-05-05 17:35:21 -070061 */
Aaro Koskinen08712f92016-08-31 23:57:38 +030062static irqreturn_t cvm_oct_do_interrupt(int irq, void *napi_id)
David Daney80ff0fd2009-05-05 17:35:21 -070063{
David Daney3368c782010-01-07 11:05:04 -080064 /* Disable the IRQ and start napi_poll. */
Aaro Koskinen513ff862016-08-31 23:57:37 +030065 disable_irq_nosync(irq);
Aaro Koskinen08712f92016-08-31 23:57:38 +030066 napi_schedule(napi_id);
David Daney3368c782010-01-07 11:05:04 -080067
David Daney80ff0fd2009-05-05 17:35:21 -070068 return IRQ_HANDLED;
69}
70
David Daney80ff0fd2009-05-05 17:35:21 -070071/**
David Daneyec977c52010-02-16 17:25:32 -080072 * cvm_oct_check_rcv_error - process receive errors
David Daney80ff0fd2009-05-05 17:35:21 -070073 * @work: Work queue entry pointing to the packet.
David Daneyec977c52010-02-16 17:25:32 -080074 *
David Daney80ff0fd2009-05-05 17:35:21 -070075 * Returns Non-zero if the packet can be dropped, zero otherwise.
76 */
77static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
78{
Janne Huttunenf8023da2015-08-13 16:21:42 +030079 int port;
80
81 if (octeon_has_feature(OCTEON_FEATURE_PKND))
82 port = work->word0.pip.cn68xx.pknd;
83 else
84 port = work->word1.cn38xx.ipprt;
85
86 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
David Daney80ff0fd2009-05-05 17:35:21 -070087 /*
88 * Ignore length errors on min size packets. Some
89 * equipment incorrectly pads packets to 64+4FCS
90 * instead of 60+4FCS. Note these packets still get
91 * counted as frame errors.
92 */
Aaro Koskinen25efe082015-04-04 22:51:15 +030093 } else if (work->word2.snoip.err_code == 5 ||
94 work->word2.snoip.err_code == 7) {
David Daney80ff0fd2009-05-05 17:35:21 -070095 /*
96 * We received a packet with either an alignment error
97 * or a FCS error. This may be signalling that we are
Justin P. Mattock215c47c2012-03-26 21:34:18 -070098 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
David Daney80ff0fd2009-05-05 17:35:21 -070099 * off. If this is the case we need to parse the
100 * packet to determine if we can remove a non spec
101 * preamble and generate a correct packet.
102 */
Janne Huttunenf8023da2015-08-13 16:21:42 +0300103 int interface = cvmx_helper_get_interface_num(port);
104 int index = cvmx_helper_get_interface_index_num(port);
David Daney80ff0fd2009-05-05 17:35:21 -0700105 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
Jamie Lawler85fdebc2014-12-04 13:02:23 +0000106
David Daney80ff0fd2009-05-05 17:35:21 -0700107 gmxx_rxx_frm_ctl.u64 =
108 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
109 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
Aybuke Ozdemirec2c3982015-10-01 16:42:16 +0300110 u8 *ptr =
David Daney80ff0fd2009-05-05 17:35:21 -0700111 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
112 int i = 0;
113
Janne Huttunenf8023da2015-08-13 16:21:42 +0300114 while (i < work->word1.len - 1) {
David Daney80ff0fd2009-05-05 17:35:21 -0700115 if (*ptr != 0x55)
116 break;
117 ptr++;
118 i++;
119 }
120
121 if (*ptr == 0xd5) {
Laura Garcia Liebanab4ede792016-02-28 00:43:52 +0100122 /* Port received 0xd5 preamble */
David Daney80ff0fd2009-05-05 17:35:21 -0700123 work->packet_ptr.s.addr += i + 1;
Janne Huttunenf8023da2015-08-13 16:21:42 +0300124 work->word1.len -= i + 5;
David Daney80ff0fd2009-05-05 17:35:21 -0700125 } else if ((*ptr & 0xf) == 0xd) {
Laura Garcia Liebanab4ede792016-02-28 00:43:52 +0100126 /* Port received 0xd preamble */
David Daney80ff0fd2009-05-05 17:35:21 -0700127 work->packet_ptr.s.addr += i;
Janne Huttunenf8023da2015-08-13 16:21:42 +0300128 work->word1.len -= i + 4;
129 for (i = 0; i < work->word1.len; i++) {
David Daney80ff0fd2009-05-05 17:35:21 -0700130 *ptr =
131 ((*ptr & 0xf0) >> 4) |
132 ((*(ptr + 1) & 0xf) << 4);
133 ptr++;
134 }
135 } else {
Gulsah Kose61e15f02014-09-30 22:12:23 +0300136 printk_ratelimited("Port %d unknown preamble, packet dropped\n",
Janne Huttunenf8023da2015-08-13 16:21:42 +0300137 port);
David Daney80ff0fd2009-05-05 17:35:21 -0700138 cvm_oct_free_work(work);
139 return 1;
140 }
141 }
142 } else {
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200143 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
Janne Huttunenf8023da2015-08-13 16:21:42 +0300144 port, work->word2.snoip.err_code);
David Daney80ff0fd2009-05-05 17:35:21 -0700145 cvm_oct_free_work(work);
146 return 1;
147 }
148
149 return 0;
150}
151
Aaro Koskinen942bab42016-08-31 23:57:42 +0300152static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
David Daney80ff0fd2009-05-05 17:35:21 -0700153{
David Daney3368c782010-01-07 11:05:04 -0800154 const int coreid = cvmx_get_core_num();
Aybuke Ozdemirec2c3982015-10-01 16:42:16 +0300155 u64 old_group_mask;
156 u64 old_scratch;
David Daney3368c782010-01-07 11:05:04 -0800157 int rx_count = 0;
158 int did_work_request = 0;
159 int packet_not_copied;
David Daney80ff0fd2009-05-05 17:35:21 -0700160
161 /* Prefetch cvm_oct_device since we know we need it soon */
162 prefetch(cvm_oct_device);
163
164 if (USE_ASYNC_IOBDMA) {
165 /* Save scratch in case userspace is using it */
166 CVMX_SYNCIOBDMA;
167 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
168 }
169
170 /* Only allow work for our group (and preserve priorities) */
Aaro Koskinenf5cfc8d2015-08-13 16:21:40 +0300171 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
172 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
173 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
Aaro Koskinen942bab42016-08-31 23:57:42 +0300174 BIT(rx_group->group));
Aaro Koskinenf5cfc8d2015-08-13 16:21:40 +0300175 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
176 } else {
177 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
178 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
Laura Garcia Liebanaac05a582016-03-12 16:35:30 +0100179 (old_group_mask & ~0xFFFFull) |
Aaro Koskinen942bab42016-08-31 23:57:42 +0300180 BIT(rx_group->group));
Aaro Koskinenf5cfc8d2015-08-13 16:21:40 +0300181 }
David Daney80ff0fd2009-05-05 17:35:21 -0700182
David Daney3368c782010-01-07 11:05:04 -0800183 if (USE_ASYNC_IOBDMA) {
David Daney80ff0fd2009-05-05 17:35:21 -0700184 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
David Daney3368c782010-01-07 11:05:04 -0800185 did_work_request = 1;
186 }
David Daney80ff0fd2009-05-05 17:35:21 -0700187
David Daney3368c782010-01-07 11:05:04 -0800188 while (rx_count < budget) {
David Daney80ff0fd2009-05-05 17:35:21 -0700189 struct sk_buff *skb = NULL;
David Daney3368c782010-01-07 11:05:04 -0800190 struct sk_buff **pskb = NULL;
David Daney80ff0fd2009-05-05 17:35:21 -0700191 int skb_in_hw;
192 cvmx_wqe_t *work;
Janne Huttunenf8023da2015-08-13 16:21:42 +0300193 int port;
David Daney80ff0fd2009-05-05 17:35:21 -0700194
David Daney3368c782010-01-07 11:05:04 -0800195 if (USE_ASYNC_IOBDMA && did_work_request)
David Daney80ff0fd2009-05-05 17:35:21 -0700196 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
David Daney3368c782010-01-07 11:05:04 -0800197 else
198 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
199
David Daney80ff0fd2009-05-05 17:35:21 -0700200 prefetch(work);
David Daney3368c782010-01-07 11:05:04 -0800201 did_work_request = 0;
Laura Garcia Liebanae8a4e572016-02-28 00:43:12 +0100202 if (!work) {
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300203 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
204 cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
Aaro Koskinen942bab42016-08-31 23:57:42 +0300205 BIT(rx_group->group));
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300206 cvmx_write_csr(CVMX_SSO_WQ_INT,
Aaro Koskinen942bab42016-08-31 23:57:42 +0300207 BIT(rx_group->group));
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300208 } else {
209 union cvmx_pow_wq_int wq_int;
Jamie Lawler85fdebc2014-12-04 13:02:23 +0000210
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300211 wq_int.u64 = 0;
Aaro Koskinen942bab42016-08-31 23:57:42 +0300212 wq_int.s.iq_dis = BIT(rx_group->group);
213 wq_int.s.wq_int = BIT(rx_group->group);
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300214 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
215 }
David Daney80ff0fd2009-05-05 17:35:21 -0700216 break;
David Daney3368c782010-01-07 11:05:04 -0800217 }
Laura Garcia Liebana18f69702016-02-28 00:45:13 +0100218 pskb = (struct sk_buff **)
219 (cvm_oct_get_buffer_ptr(work->packet_ptr) -
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100220 sizeof(void *));
David Daney3368c782010-01-07 11:05:04 -0800221 prefetch(pskb);
David Daney80ff0fd2009-05-05 17:35:21 -0700222
David Daney3368c782010-01-07 11:05:04 -0800223 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100224 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
225 CVMX_POW_NO_WAIT);
David Daney3368c782010-01-07 11:05:04 -0800226 did_work_request = 1;
227 }
Aaro Koskinenda029d02013-09-05 21:43:59 +0300228 rx_count++;
David Daney80ff0fd2009-05-05 17:35:21 -0700229
Aaro Koskinen3a990f32015-04-04 22:51:17 +0300230 skb_in_hw = work->word2.s.bufs == 1;
David Daney80ff0fd2009-05-05 17:35:21 -0700231 if (likely(skb_in_hw)) {
David Daney3368c782010-01-07 11:05:04 -0800232 skb = *pskb;
David Daney80ff0fd2009-05-05 17:35:21 -0700233 prefetch(&skb->head);
234 prefetch(&skb->len);
235 }
Janne Huttunenf8023da2015-08-13 16:21:42 +0300236
237 if (octeon_has_feature(OCTEON_FEATURE_PKND))
238 port = work->word0.pip.cn68xx.pknd;
239 else
240 port = work->word1.cn38xx.ipprt;
241
242 prefetch(cvm_oct_device[port]);
David Daney80ff0fd2009-05-05 17:35:21 -0700243
David Daney80ff0fd2009-05-05 17:35:21 -0700244 /* Immediately throw away all packets with receive errors */
245 if (unlikely(work->word2.snoip.rcv_error)) {
246 if (cvm_oct_check_rcv_error(work))
247 continue;
248 }
249
250 /*
251 * We can only use the zero copy path if skbuffs are
252 * in the FPA pool and the packet fits in a single
253 * buffer.
254 */
255 if (likely(skb_in_hw)) {
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100256 skb->data = skb->head + work->packet_ptr.s.addr -
257 cvmx_ptr_to_phys(skb->head);
David Daney80ff0fd2009-05-05 17:35:21 -0700258 prefetch(skb->data);
Janne Huttunenf8023da2015-08-13 16:21:42 +0300259 skb->len = work->word1.len;
David Daney80ff0fd2009-05-05 17:35:21 -0700260 skb_set_tail_pointer(skb, skb->len);
261 packet_not_copied = 1;
262 } else {
David Daney80ff0fd2009-05-05 17:35:21 -0700263 /*
264 * We have to copy the packet. First allocate
265 * an skbuff for it.
266 */
Janne Huttunenf8023da2015-08-13 16:21:42 +0300267 skb = dev_alloc_skb(work->word1.len);
David Daney80ff0fd2009-05-05 17:35:21 -0700268 if (!skb) {
David Daney80ff0fd2009-05-05 17:35:21 -0700269 cvm_oct_free_work(work);
270 continue;
271 }
272
273 /*
274 * Check if we've received a packet that was
David Daney6568a232010-01-07 11:05:01 -0800275 * entirely stored in the work entry.
David Daney80ff0fd2009-05-05 17:35:21 -0700276 */
277 if (unlikely(work->word2.s.bufs == 0)) {
Aybuke Ozdemirec2c3982015-10-01 16:42:16 +0300278 u8 *ptr = work->packet_data;
David Daney80ff0fd2009-05-05 17:35:21 -0700279
280 if (likely(!work->word2.s.not_IP)) {
281 /*
282 * The beginning of the packet
283 * moves for IP packets.
284 */
285 if (work->word2.s.is_v6)
286 ptr += 2;
287 else
288 ptr += 6;
289 }
Janne Huttunenf8023da2015-08-13 16:21:42 +0300290 memcpy(skb_put(skb, work->word1.len), ptr,
291 work->word1.len);
David Daney80ff0fd2009-05-05 17:35:21 -0700292 /* No packet buffers to free */
293 } else {
294 int segments = work->word2.s.bufs;
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100295 union cvmx_buf_ptr segment_ptr =
296 work->packet_ptr;
Janne Huttunenf8023da2015-08-13 16:21:42 +0300297 int len = work->word1.len;
David Daney80ff0fd2009-05-05 17:35:21 -0700298
299 while (segments--) {
300 union cvmx_buf_ptr next_ptr =
Laura Garcia Liebana18f69702016-02-28 00:45:13 +0100301 *(union cvmx_buf_ptr *)
302 cvmx_phys_to_ptr(
303 segment_ptr.s.addr - 8);
David Daney6568a232010-01-07 11:05:01 -0800304
David Daney80ff0fd2009-05-05 17:35:21 -0700305 /*
306 * Octeon Errata PKI-100: The segment size is
307 * wrong. Until it is fixed, calculate the
308 * segment size based on the packet pool
309 * buffer size. When it is fixed, the
310 * following line should be replaced with this
311 * one: int segment_size =
312 * segment_ptr.s.size;
313 */
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100314 int segment_size =
315 CVMX_FPA_PACKET_POOL_SIZE -
316 (segment_ptr.s.addr -
317 (((segment_ptr.s.addr >> 7) -
318 segment_ptr.s.back) << 7));
David Daney6568a232010-01-07 11:05:01 -0800319 /*
320 * Don't copy more than what
321 * is left in the packet.
322 */
David Daney80ff0fd2009-05-05 17:35:21 -0700323 if (segment_size > len)
324 segment_size = len;
325 /* Copy the data into the packet */
326 memcpy(skb_put(skb, segment_size),
Laura Garcia Liebana18f69702016-02-28 00:45:13 +0100327 cvmx_phys_to_ptr(
328 segment_ptr.s.addr),
David Daney80ff0fd2009-05-05 17:35:21 -0700329 segment_size);
David Daney80ff0fd2009-05-05 17:35:21 -0700330 len -= segment_size;
331 segment_ptr = next_ptr;
332 }
333 }
334 packet_not_copied = 0;
335 }
Janne Huttunenf8023da2015-08-13 16:21:42 +0300336 if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
337 cvm_oct_device[port])) {
338 struct net_device *dev = cvm_oct_device[port];
David Daney80ff0fd2009-05-05 17:35:21 -0700339 struct octeon_ethernet *priv = netdev_priv(dev);
340
David Daney6568a232010-01-07 11:05:01 -0800341 /*
342 * Only accept packets for devices that are
343 * currently up.
344 */
David Daney80ff0fd2009-05-05 17:35:21 -0700345 if (likely(dev->flags & IFF_UP)) {
346 skb->protocol = eth_type_trans(skb, dev);
347 skb->dev = dev;
348
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100349 if (unlikely(work->word2.s.not_IP ||
350 work->word2.s.IP_exc ||
351 work->word2.s.L4_error ||
352 !work->word2.s.tcp_or_udp))
David Daney80ff0fd2009-05-05 17:35:21 -0700353 skb->ip_summed = CHECKSUM_NONE;
354 else
355 skb->ip_summed = CHECKSUM_UNNECESSARY;
356
357 /* Increment RX stats for virtual ports */
Janne Huttunenf8023da2015-08-13 16:21:42 +0300358 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
Aaro Koskinendcf24f72016-02-19 22:47:12 +0200359 priv->stats.rx_packets++;
360 priv->stats.rx_bytes += skb->len;
David Daney80ff0fd2009-05-05 17:35:21 -0700361 }
362 netif_receive_skb(skb);
363 } else {
364 /*
Laura Garcia Liebanab4ede792016-02-28 00:43:52 +0100365 * Drop any packet received for a device that
366 * isn't up.
367 */
Aaro Koskinendcf24f72016-02-19 22:47:12 +0200368 priv->stats.rx_dropped++;
David Daney80ff0fd2009-05-05 17:35:21 -0700369 dev_kfree_skb_irq(skb);
370 }
371 } else {
372 /*
373 * Drop any packet received for a device that
374 * doesn't exist.
375 */
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200376 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
Laura Garcia Liebanaac05a582016-03-12 16:35:30 +0100377 port);
David Daney80ff0fd2009-05-05 17:35:21 -0700378 dev_kfree_skb_irq(skb);
379 }
380 /*
381 * Check to see if the skbuff and work share the same
382 * packet buffer.
383 */
Aaro Koskinen3a990f32015-04-04 22:51:17 +0300384 if (likely(packet_not_copied)) {
David Daney80ff0fd2009-05-05 17:35:21 -0700385 /*
386 * This buffer needs to be replaced, increment
387 * the number of buffers we need to free by
388 * one.
389 */
390 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
391 1);
392
Aaro Koskinenc93b0e72015-04-04 22:51:19 +0300393 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
David Daney80ff0fd2009-05-05 17:35:21 -0700394 } else {
395 cvm_oct_free_work(work);
396 }
397 }
David Daney80ff0fd2009-05-05 17:35:21 -0700398 /* Restore the original POW group mask */
Aaro Koskinenf5cfc8d2015-08-13 16:21:40 +0300399 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
400 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
401 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
402 } else {
403 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
404 }
405
David Daney80ff0fd2009-05-05 17:35:21 -0700406 if (USE_ASYNC_IOBDMA) {
407 /* Restore the scratch area */
408 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
409 }
David Daney3368c782010-01-07 11:05:04 -0800410 cvm_oct_rx_refill_pool(0);
David Daney80ff0fd2009-05-05 17:35:21 -0700411
Aaro Koskinenb7d7dee2016-08-31 23:57:39 +0300412 return rx_count;
413}
414
415/**
416 * cvm_oct_napi_poll - the NAPI poll function.
417 * @napi: The NAPI instance.
418 * @budget: Maximum number of packets to receive.
419 *
420 * Returns the number of packets processed.
421 */
422static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
423{
Aaro Koskinen9382cfe2016-08-31 23:57:41 +0300424 struct oct_rx_group *rx_group = container_of(napi, struct oct_rx_group,
425 napi);
Aaro Koskinenb7d7dee2016-08-31 23:57:39 +0300426 int rx_count;
427
Aaro Koskinen942bab42016-08-31 23:57:42 +0300428 rx_count = cvm_oct_poll(rx_group, budget);
Aaro Koskinenb7d7dee2016-08-31 23:57:39 +0300429
430 if (rx_count < budget) {
David Daney3368c782010-01-07 11:05:04 -0800431 /* No more work */
432 napi_complete(napi);
Aaro Koskinen9382cfe2016-08-31 23:57:41 +0300433 enable_irq(rx_group->irq);
David Daney80ff0fd2009-05-05 17:35:21 -0700434 }
David Daney3368c782010-01-07 11:05:04 -0800435 return rx_count;
David Daney80ff0fd2009-05-05 17:35:21 -0700436}
437
David Daney3368c782010-01-07 11:05:04 -0800438#ifdef CONFIG_NET_POLL_CONTROLLER
439/**
David Daneyec977c52010-02-16 17:25:32 -0800440 * cvm_oct_poll_controller - poll for receive packets
David Daney3368c782010-01-07 11:05:04 -0800441 * device.
442 *
443 * @dev: Device to poll. Unused
444 */
445void cvm_oct_poll_controller(struct net_device *dev)
446{
Aaro Koskinene971a112016-08-31 23:57:43 +0300447 int i;
448
Aaro Koskinend48f10f2016-08-31 23:57:46 +0300449 if (!atomic_read(&oct_rx_ready))
450 return;
451
Aaro Koskinene971a112016-08-31 23:57:43 +0300452 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
Aaro Koskinene971a112016-08-31 23:57:43 +0300453 if (!(pow_receive_groups & BIT(i)))
454 continue;
455
456 cvm_oct_poll(&oct_rx_group[i], 16);
Aaro Koskinene971a112016-08-31 23:57:43 +0300457 }
David Daney3368c782010-01-07 11:05:04 -0800458}
459#endif
460
David Daney80ff0fd2009-05-05 17:35:21 -0700461void cvm_oct_rx_initialize(void)
462{
463 int i;
David Daney3368c782010-01-07 11:05:04 -0800464 struct net_device *dev_for_napi = NULL;
David Daney3368c782010-01-07 11:05:04 -0800465
466 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
467 if (cvm_oct_device[i]) {
468 dev_for_napi = cvm_oct_device[i];
469 break;
470 }
471 }
472
Laura Garcia Liebanae8a4e572016-02-28 00:43:12 +0100473 if (!dev_for_napi)
David Daney3368c782010-01-07 11:05:04 -0800474 panic("No net_devices were allocated.");
475
Aaro Koskinene971a112016-08-31 23:57:43 +0300476 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
477 int ret;
David Daney3368c782010-01-07 11:05:04 -0800478
Aaro Koskinene971a112016-08-31 23:57:43 +0300479 if (!(pow_receive_groups & BIT(i)))
480 continue;
Aaro Koskinen9382cfe2016-08-31 23:57:41 +0300481
Aaro Koskinene971a112016-08-31 23:57:43 +0300482 netif_napi_add(dev_for_napi, &oct_rx_group[i].napi,
483 cvm_oct_napi_poll, rx_napi_weight);
484 napi_enable(&oct_rx_group[i].napi);
David Daney3368c782010-01-07 11:05:04 -0800485
Aaro Koskinene971a112016-08-31 23:57:43 +0300486 oct_rx_group[i].irq = OCTEON_IRQ_WORKQ0 + i;
487 oct_rx_group[i].group = i;
David Daney3368c782010-01-07 11:05:04 -0800488
Aaro Koskinene971a112016-08-31 23:57:43 +0300489 /* Register an IRQ handler to receive POW interrupts */
490 ret = request_irq(oct_rx_group[i].irq, cvm_oct_do_interrupt, 0,
491 "Ethernet", &oct_rx_group[i].napi);
492 if (ret)
493 panic("Could not acquire Ethernet IRQ %d\n",
494 oct_rx_group[i].irq);
David Daney3368c782010-01-07 11:05:04 -0800495
Aaro Koskinene971a112016-08-31 23:57:43 +0300496 disable_irq_nosync(oct_rx_group[i].irq);
David Daney3368c782010-01-07 11:05:04 -0800497
Aaro Koskinene971a112016-08-31 23:57:43 +0300498 /* Enable POW interrupt when our port has at least one packet */
499 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
500 union cvmx_sso_wq_int_thrx int_thr;
501 union cvmx_pow_wq_int_pc int_pc;
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300502
Aaro Koskinene971a112016-08-31 23:57:43 +0300503 int_thr.u64 = 0;
504 int_thr.s.tc_en = 1;
505 int_thr.s.tc_thr = 1;
506 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), int_thr.u64);
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300507
Aaro Koskinene971a112016-08-31 23:57:43 +0300508 int_pc.u64 = 0;
509 int_pc.s.pc_thr = 5;
510 cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
511 } else {
512 union cvmx_pow_wq_int_thrx int_thr;
513 union cvmx_pow_wq_int_pc int_pc;
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300514
Aaro Koskinene971a112016-08-31 23:57:43 +0300515 int_thr.u64 = 0;
516 int_thr.s.tc_en = 1;
517 int_thr.s.tc_thr = 1;
518 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), int_thr.u64);
519
520 int_pc.u64 = 0;
521 int_pc.s.pc_thr = 5;
522 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
523 }
524
525 /* Schedule NAPI now. This will indirectly enable the
526 * interrupt.
527 */
528 napi_schedule(&oct_rx_group[i].napi);
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300529 }
Aaro Koskinend48f10f2016-08-31 23:57:46 +0300530 atomic_inc(&oct_rx_ready);
David Daney80ff0fd2009-05-05 17:35:21 -0700531}
532
533void cvm_oct_rx_shutdown(void)
534{
Aaro Koskinene971a112016-08-31 23:57:43 +0300535 int i;
Aaro Koskinen287faa52016-08-31 23:57:36 +0300536
Aaro Koskinene971a112016-08-31 23:57:43 +0300537 for (i = 0; i < ARRAY_SIZE(oct_rx_group); i++) {
Aaro Koskinene971a112016-08-31 23:57:43 +0300538 if (!(pow_receive_groups & BIT(i)))
539 continue;
540
541 /* Disable POW interrupt */
542 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
543 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(i), 0);
544 else
545 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(i), 0);
546
547 /* Free the interrupt handler */
548 free_irq(oct_rx_group[i].irq, cvm_oct_device);
549
550 netif_napi_del(&oct_rx_group[i].napi);
551 }
David Daney80ff0fd2009-05-05 17:35:21 -0700552}