blob: a0f4868cfa13aa954625c4cef264b1d935fa67e2 [file] [log] [blame]
David Daney80ff0fd2009-05-05 17:35:21 -07001/**********************************************************************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
David Daney3368c782010-01-07 11:05:04 -08007 * Copyright (c) 2003-2010 Cavium Networks
David Daney80ff0fd2009-05-05 17:35:21 -07008 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26**********************************************************************/
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/cache.h>
David Daney3368c782010-01-07 11:05:04 -080030#include <linux/cpumask.h>
David Daney80ff0fd2009-05-05 17:35:21 -070031#include <linux/netdevice.h>
David Daney80ff0fd2009-05-05 17:35:21 -070032#include <linux/etherdevice.h>
33#include <linux/ip.h>
34#include <linux/string.h>
35#include <linux/prefetch.h>
Christian Dietrich7a2eaf92011-06-04 17:35:58 +020036#include <linux/ratelimit.h>
David Daney3368c782010-01-07 11:05:04 -080037#include <linux/smp.h>
Imre Kalozdc890df2012-04-19 12:27:27 +020038#include <linux/interrupt.h>
David Daney80ff0fd2009-05-05 17:35:21 -070039#include <net/dst.h>
40#ifdef CONFIG_XFRM
41#include <linux/xfrm.h>
42#include <net/xfrm.h>
43#endif /* CONFIG_XFRM */
44
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
David Daney80ff0fd2009-05-05 17:35:21 -070046
47#include <asm/octeon/octeon.h>
48
49#include "ethernet-defines.h"
David Daney80ff0fd2009-05-05 17:35:21 -070050#include "ethernet-mem.h"
David Daney3368c782010-01-07 11:05:04 -080051#include "ethernet-rx.h"
52#include "octeon-ethernet.h"
David Daney80ff0fd2009-05-05 17:35:21 -070053#include "ethernet-util.h"
54
David Daneyaf866492011-11-22 14:47:00 +000055#include <asm/octeon/cvmx-helper.h>
56#include <asm/octeon/cvmx-wqe.h>
57#include <asm/octeon/cvmx-fau.h>
58#include <asm/octeon/cvmx-pow.h>
59#include <asm/octeon/cvmx-pip.h>
60#include <asm/octeon/cvmx-scratch.h>
David Daney80ff0fd2009-05-05 17:35:21 -070061
David Daneyaf866492011-11-22 14:47:00 +000062#include <asm/octeon/cvmx-gmxx-defs.h>
David Daney80ff0fd2009-05-05 17:35:21 -070063
David Daney3368c782010-01-07 11:05:04 -080064struct cvm_napi_wrapper {
65 struct napi_struct napi;
66} ____cacheline_aligned_in_smp;
David Daney80ff0fd2009-05-05 17:35:21 -070067
David Daney3368c782010-01-07 11:05:04 -080068static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
David Daney80ff0fd2009-05-05 17:35:21 -070069
David Daney3368c782010-01-07 11:05:04 -080070struct cvm_oct_core_state {
71 int baseline_cores;
72 /*
73 * The number of additional cores that could be processing
Masanari Iida811a7512013-09-16 11:44:08 +090074 * input packets.
David Daney3368c782010-01-07 11:05:04 -080075 */
76 atomic_t available_cores;
77 cpumask_t cpu_state;
78} ____cacheline_aligned_in_smp;
79
80static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
81
Aaro Koskinencd39f732013-10-06 23:35:15 +030082static int cvm_irq_cpu;
83
David Daney3368c782010-01-07 11:05:04 -080084static void cvm_oct_enable_napi(void *_)
85{
86 int cpu = smp_processor_id();
87 napi_schedule(&cvm_oct_napi[cpu].napi);
88}
89
90static void cvm_oct_enable_one_cpu(void)
91{
92 int v;
93 int cpu;
94
95 /* Check to see if more CPUs are available for receive processing... */
96 v = atomic_sub_if_positive(1, &core_state.available_cores);
97 if (v < 0)
98 return;
99
100 /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
101 for_each_online_cpu(cpu) {
102 if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
103 v = smp_call_function_single(cpu, cvm_oct_enable_napi,
104 NULL, 0);
105 if (v)
106 panic("Can't enable NAPI.");
107 break;
108 }
109 }
110}
111
112static void cvm_oct_no_more_work(void)
113{
114 int cpu = smp_processor_id();
115
Aaro Koskinencd39f732013-10-06 23:35:15 +0300116 if (cpu == cvm_irq_cpu) {
David Daney3368c782010-01-07 11:05:04 -0800117 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
118 return;
119 }
120
121 cpu_clear(cpu, core_state.cpu_state);
122 atomic_add(1, &core_state.available_cores);
123}
David Daney80ff0fd2009-05-05 17:35:21 -0700124
125/**
David Daneyec977c52010-02-16 17:25:32 -0800126 * cvm_oct_do_interrupt - interrupt handler.
127 *
128 * The interrupt occurs whenever the POW has packets in our group.
David Daney80ff0fd2009-05-05 17:35:21 -0700129 *
David Daney80ff0fd2009-05-05 17:35:21 -0700130 */
David Daney3368c782010-01-07 11:05:04 -0800131static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
David Daney80ff0fd2009-05-05 17:35:21 -0700132{
David Daney3368c782010-01-07 11:05:04 -0800133 /* Disable the IRQ and start napi_poll. */
134 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
Aaro Koskinencd39f732013-10-06 23:35:15 +0300135 cvm_irq_cpu = smp_processor_id();
David Daney3368c782010-01-07 11:05:04 -0800136 cvm_oct_enable_napi(NULL);
137
David Daney80ff0fd2009-05-05 17:35:21 -0700138 return IRQ_HANDLED;
139}
140
David Daney80ff0fd2009-05-05 17:35:21 -0700141/**
David Daneyec977c52010-02-16 17:25:32 -0800142 * cvm_oct_check_rcv_error - process receive errors
David Daney80ff0fd2009-05-05 17:35:21 -0700143 * @work: Work queue entry pointing to the packet.
David Daneyec977c52010-02-16 17:25:32 -0800144 *
David Daney80ff0fd2009-05-05 17:35:21 -0700145 * Returns Non-zero if the packet can be dropped, zero otherwise.
146 */
147static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
148{
149 if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) {
150 /*
151 * Ignore length errors on min size packets. Some
152 * equipment incorrectly pads packets to 64+4FCS
153 * instead of 60+4FCS. Note these packets still get
154 * counted as frame errors.
155 */
156 } else
157 if (USE_10MBPS_PREAMBLE_WORKAROUND
158 && ((work->word2.snoip.err_code == 5)
159 || (work->word2.snoip.err_code == 7))) {
160
161 /*
162 * We received a packet with either an alignment error
163 * or a FCS error. This may be signalling that we are
Justin P. Mattock215c47c2012-03-26 21:34:18 -0700164 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
David Daney80ff0fd2009-05-05 17:35:21 -0700165 * off. If this is the case we need to parse the
166 * packet to determine if we can remove a non spec
167 * preamble and generate a correct packet.
168 */
169 int interface = cvmx_helper_get_interface_num(work->ipprt);
170 int index = cvmx_helper_get_interface_index_num(work->ipprt);
171 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
172 gmxx_rxx_frm_ctl.u64 =
173 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
174 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
175
176 uint8_t *ptr =
177 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
178 int i = 0;
179
180 while (i < work->len - 1) {
181 if (*ptr != 0x55)
182 break;
183 ptr++;
184 i++;
185 }
186
187 if (*ptr == 0xd5) {
188 /*
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200189 printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt);
David Daney80ff0fd2009-05-05 17:35:21 -0700190 */
191 work->packet_ptr.s.addr += i + 1;
192 work->len -= i + 5;
193 } else if ((*ptr & 0xf) == 0xd) {
194 /*
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200195 printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt);
David Daney80ff0fd2009-05-05 17:35:21 -0700196 */
197 work->packet_ptr.s.addr += i;
198 work->len -= i + 4;
199 for (i = 0; i < work->len; i++) {
200 *ptr =
201 ((*ptr & 0xf0) >> 4) |
202 ((*(ptr + 1) & 0xf) << 4);
203 ptr++;
204 }
205 } else {
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200206 printk_ratelimited("Port %d unknown preamble, packet "
207 "dropped\n",
208 work->ipprt);
David Daney80ff0fd2009-05-05 17:35:21 -0700209 /*
210 cvmx_helper_dump_packet(work);
211 */
212 cvm_oct_free_work(work);
213 return 1;
214 }
215 }
216 } else {
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200217 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
218 work->ipprt, work->word2.snoip.err_code);
David Daney80ff0fd2009-05-05 17:35:21 -0700219 cvm_oct_free_work(work);
220 return 1;
221 }
222
223 return 0;
224}
225
226/**
David Daneyec977c52010-02-16 17:25:32 -0800227 * cvm_oct_napi_poll - the NAPI poll function.
David Daney3368c782010-01-07 11:05:04 -0800228 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
229 * @budget: Maximum number of packets to receive.
David Daneyec977c52010-02-16 17:25:32 -0800230 *
231 * Returns the number of packets processed.
David Daney80ff0fd2009-05-05 17:35:21 -0700232 */
David Daney3368c782010-01-07 11:05:04 -0800233static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
David Daney80ff0fd2009-05-05 17:35:21 -0700234{
David Daney3368c782010-01-07 11:05:04 -0800235 const int coreid = cvmx_get_core_num();
236 uint64_t old_group_mask;
237 uint64_t old_scratch;
238 int rx_count = 0;
239 int did_work_request = 0;
240 int packet_not_copied;
David Daney80ff0fd2009-05-05 17:35:21 -0700241
242 /* Prefetch cvm_oct_device since we know we need it soon */
243 prefetch(cvm_oct_device);
244
245 if (USE_ASYNC_IOBDMA) {
246 /* Save scratch in case userspace is using it */
247 CVMX_SYNCIOBDMA;
248 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
249 }
250
251 /* Only allow work for our group (and preserve priorities) */
252 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
253 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
254 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
255
David Daney3368c782010-01-07 11:05:04 -0800256 if (USE_ASYNC_IOBDMA) {
David Daney80ff0fd2009-05-05 17:35:21 -0700257 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
David Daney3368c782010-01-07 11:05:04 -0800258 did_work_request = 1;
259 }
David Daney80ff0fd2009-05-05 17:35:21 -0700260
David Daney3368c782010-01-07 11:05:04 -0800261 while (rx_count < budget) {
David Daney80ff0fd2009-05-05 17:35:21 -0700262 struct sk_buff *skb = NULL;
David Daney3368c782010-01-07 11:05:04 -0800263 struct sk_buff **pskb = NULL;
David Daney80ff0fd2009-05-05 17:35:21 -0700264 int skb_in_hw;
265 cvmx_wqe_t *work;
266
David Daney3368c782010-01-07 11:05:04 -0800267 if (USE_ASYNC_IOBDMA && did_work_request)
David Daney80ff0fd2009-05-05 17:35:21 -0700268 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
David Daney3368c782010-01-07 11:05:04 -0800269 else
270 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
271
David Daney80ff0fd2009-05-05 17:35:21 -0700272 prefetch(work);
David Daney3368c782010-01-07 11:05:04 -0800273 did_work_request = 0;
274 if (work == NULL) {
275 union cvmx_pow_wq_int wq_int;
276 wq_int.u64 = 0;
277 wq_int.s.iq_dis = 1 << pow_receive_group;
278 wq_int.s.wq_int = 1 << pow_receive_group;
279 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
David Daney80ff0fd2009-05-05 17:35:21 -0700280 break;
David Daney3368c782010-01-07 11:05:04 -0800281 }
282 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
283 prefetch(pskb);
David Daney80ff0fd2009-05-05 17:35:21 -0700284
David Daney3368c782010-01-07 11:05:04 -0800285 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
286 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
287 did_work_request = 1;
288 }
David Daney80ff0fd2009-05-05 17:35:21 -0700289
David Daney3368c782010-01-07 11:05:04 -0800290 if (rx_count == 0) {
291 /*
292 * First time through, see if there is enough
293 * work waiting to merit waking another
294 * CPU.
295 */
296 union cvmx_pow_wq_int_cntx counts;
297 int backlog;
298 int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
299 counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
300 backlog = counts.s.iq_cnt + counts.s.ds_cnt;
301 if (backlog > budget * cores_in_use && napi != NULL)
302 cvm_oct_enable_one_cpu();
David Daney80ff0fd2009-05-05 17:35:21 -0700303 }
Aaro Koskinenda029d02013-09-05 21:43:59 +0300304 rx_count++;
David Daney80ff0fd2009-05-05 17:35:21 -0700305
306 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
307 if (likely(skb_in_hw)) {
David Daney3368c782010-01-07 11:05:04 -0800308 skb = *pskb;
David Daney80ff0fd2009-05-05 17:35:21 -0700309 prefetch(&skb->head);
310 prefetch(&skb->len);
311 }
312 prefetch(cvm_oct_device[work->ipprt]);
313
David Daney80ff0fd2009-05-05 17:35:21 -0700314 /* Immediately throw away all packets with receive errors */
315 if (unlikely(work->word2.snoip.rcv_error)) {
316 if (cvm_oct_check_rcv_error(work))
317 continue;
318 }
319
320 /*
321 * We can only use the zero copy path if skbuffs are
322 * in the FPA pool and the packet fits in a single
323 * buffer.
324 */
325 if (likely(skb_in_hw)) {
David Daney6568a232010-01-07 11:05:01 -0800326 skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
David Daney80ff0fd2009-05-05 17:35:21 -0700327 prefetch(skb->data);
328 skb->len = work->len;
329 skb_set_tail_pointer(skb, skb->len);
330 packet_not_copied = 1;
331 } else {
David Daney80ff0fd2009-05-05 17:35:21 -0700332 /*
333 * We have to copy the packet. First allocate
334 * an skbuff for it.
335 */
336 skb = dev_alloc_skb(work->len);
337 if (!skb) {
David Daney80ff0fd2009-05-05 17:35:21 -0700338 cvm_oct_free_work(work);
339 continue;
340 }
341
342 /*
343 * Check if we've received a packet that was
David Daney6568a232010-01-07 11:05:01 -0800344 * entirely stored in the work entry.
David Daney80ff0fd2009-05-05 17:35:21 -0700345 */
346 if (unlikely(work->word2.s.bufs == 0)) {
347 uint8_t *ptr = work->packet_data;
348
349 if (likely(!work->word2.s.not_IP)) {
350 /*
351 * The beginning of the packet
352 * moves for IP packets.
353 */
354 if (work->word2.s.is_v6)
355 ptr += 2;
356 else
357 ptr += 6;
358 }
359 memcpy(skb_put(skb, work->len), ptr, work->len);
360 /* No packet buffers to free */
361 } else {
362 int segments = work->word2.s.bufs;
David Daney6568a232010-01-07 11:05:01 -0800363 union cvmx_buf_ptr segment_ptr = work->packet_ptr;
David Daney80ff0fd2009-05-05 17:35:21 -0700364 int len = work->len;
365
366 while (segments--) {
367 union cvmx_buf_ptr next_ptr =
David Daney6568a232010-01-07 11:05:01 -0800368 *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
369
David Daney80ff0fd2009-05-05 17:35:21 -0700370 /*
371 * Octeon Errata PKI-100: The segment size is
372 * wrong. Until it is fixed, calculate the
373 * segment size based on the packet pool
374 * buffer size. When it is fixed, the
375 * following line should be replaced with this
376 * one: int segment_size =
377 * segment_ptr.s.size;
378 */
David Daney6568a232010-01-07 11:05:01 -0800379 int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
380 (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
381 /*
382 * Don't copy more than what
383 * is left in the packet.
384 */
David Daney80ff0fd2009-05-05 17:35:21 -0700385 if (segment_size > len)
386 segment_size = len;
387 /* Copy the data into the packet */
388 memcpy(skb_put(skb, segment_size),
David Daney6568a232010-01-07 11:05:01 -0800389 cvmx_phys_to_ptr(segment_ptr.s.addr),
David Daney80ff0fd2009-05-05 17:35:21 -0700390 segment_size);
David Daney80ff0fd2009-05-05 17:35:21 -0700391 len -= segment_size;
392 segment_ptr = next_ptr;
393 }
394 }
395 packet_not_copied = 0;
396 }
397
398 if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) &&
399 cvm_oct_device[work->ipprt])) {
400 struct net_device *dev = cvm_oct_device[work->ipprt];
401 struct octeon_ethernet *priv = netdev_priv(dev);
402
David Daney6568a232010-01-07 11:05:01 -0800403 /*
404 * Only accept packets for devices that are
405 * currently up.
406 */
David Daney80ff0fd2009-05-05 17:35:21 -0700407 if (likely(dev->flags & IFF_UP)) {
408 skb->protocol = eth_type_trans(skb, dev);
409 skb->dev = dev;
410
Roy.Lia4be6372011-09-26 09:08:00 +0800411 if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc ||
412 work->word2.s.L4_error || !work->word2.s.tcp_or_udp))
David Daney80ff0fd2009-05-05 17:35:21 -0700413 skb->ip_summed = CHECKSUM_NONE;
414 else
415 skb->ip_summed = CHECKSUM_UNNECESSARY;
416
417 /* Increment RX stats for virtual ports */
418 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
419#ifdef CONFIG_64BIT
420 atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
421 atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
422#else
423 atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
424 atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
425#endif
426 }
427 netif_receive_skb(skb);
428 } else {
David Daney6568a232010-01-07 11:05:01 -0800429 /* Drop any packet received for a device that isn't up */
David Daney80ff0fd2009-05-05 17:35:21 -0700430 /*
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200431 printk_ratelimited("%s: Device not up, packet dropped\n",
David Daney6568a232010-01-07 11:05:01 -0800432 dev->name);
433 */
David Daney80ff0fd2009-05-05 17:35:21 -0700434#ifdef CONFIG_64BIT
435 atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
436#else
437 atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
438#endif
439 dev_kfree_skb_irq(skb);
440 }
441 } else {
442 /*
443 * Drop any packet received for a device that
444 * doesn't exist.
445 */
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200446 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
David Daney6568a232010-01-07 11:05:01 -0800447 work->ipprt);
David Daney80ff0fd2009-05-05 17:35:21 -0700448 dev_kfree_skb_irq(skb);
449 }
450 /*
451 * Check to see if the skbuff and work share the same
452 * packet buffer.
453 */
454 if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) {
455 /*
456 * This buffer needs to be replaced, increment
457 * the number of buffers we need to free by
458 * one.
459 */
460 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
461 1);
462
463 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL,
464 DONT_WRITEBACK(1));
465 } else {
466 cvm_oct_free_work(work);
467 }
468 }
David Daney80ff0fd2009-05-05 17:35:21 -0700469 /* Restore the original POW group mask */
470 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
471 if (USE_ASYNC_IOBDMA) {
472 /* Restore the scratch area */
473 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
474 }
David Daney3368c782010-01-07 11:05:04 -0800475 cvm_oct_rx_refill_pool(0);
David Daney80ff0fd2009-05-05 17:35:21 -0700476
David Daney3368c782010-01-07 11:05:04 -0800477 if (rx_count < budget && napi != NULL) {
478 /* No more work */
479 napi_complete(napi);
480 cvm_oct_no_more_work();
David Daney80ff0fd2009-05-05 17:35:21 -0700481 }
David Daney3368c782010-01-07 11:05:04 -0800482 return rx_count;
David Daney80ff0fd2009-05-05 17:35:21 -0700483}
484
David Daney3368c782010-01-07 11:05:04 -0800485#ifdef CONFIG_NET_POLL_CONTROLLER
486/**
David Daneyec977c52010-02-16 17:25:32 -0800487 * cvm_oct_poll_controller - poll for receive packets
David Daney3368c782010-01-07 11:05:04 -0800488 * device.
489 *
490 * @dev: Device to poll. Unused
491 */
492void cvm_oct_poll_controller(struct net_device *dev)
493{
494 cvm_oct_napi_poll(NULL, 16);
495}
496#endif
497
David Daney80ff0fd2009-05-05 17:35:21 -0700498void cvm_oct_rx_initialize(void)
499{
500 int i;
David Daney3368c782010-01-07 11:05:04 -0800501 struct net_device *dev_for_napi = NULL;
502 union cvmx_pow_wq_int_thrx int_thr;
503 union cvmx_pow_wq_int_pc int_pc;
504
505 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
506 if (cvm_oct_device[i]) {
507 dev_for_napi = cvm_oct_device[i];
508 break;
509 }
510 }
511
512 if (NULL == dev_for_napi)
513 panic("No net_devices were allocated.");
514
Aaro Koskinend2ca24c2013-10-06 23:35:16 +0300515 if (max_rx_cpus >= 1 && max_rx_cpus < num_online_cpus())
David Daney3368c782010-01-07 11:05:04 -0800516 atomic_set(&core_state.available_cores, max_rx_cpus);
517 else
518 atomic_set(&core_state.available_cores, num_online_cpus());
519 core_state.baseline_cores = atomic_read(&core_state.available_cores);
520
521 core_state.cpu_state = CPU_MASK_NONE;
522 for_each_possible_cpu(i) {
523 netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
524 cvm_oct_napi_poll, rx_napi_weight);
525 napi_enable(&cvm_oct_napi[i].napi);
526 }
Masanari Iida811a7512013-09-16 11:44:08 +0900527 /* Register an IRQ handler to receive POW interrupts */
David Daney3368c782010-01-07 11:05:04 -0800528 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
529 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
530
531 if (i)
532 panic("Could not acquire Ethernet IRQ %d\n",
533 OCTEON_IRQ_WORKQ0 + pow_receive_group);
534
535 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
536
537 int_thr.u64 = 0;
538 int_thr.s.tc_en = 1;
539 int_thr.s.tc_thr = 1;
540 /* Enable POW interrupt when our port has at least one packet */
541 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64);
542
543 int_pc.u64 = 0;
544 int_pc.s.pc_thr = 5;
545 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
546
547
548 /* Scheduld NAPI now. This will indirectly enable interrupts. */
549 cvm_oct_enable_one_cpu();
David Daney80ff0fd2009-05-05 17:35:21 -0700550}
551
552void cvm_oct_rx_shutdown(void)
553{
554 int i;
David Daney3368c782010-01-07 11:05:04 -0800555 /* Shutdown all of the NAPIs */
556 for_each_possible_cpu(i)
557 netif_napi_del(&cvm_oct_napi[i].napi);
David Daney80ff0fd2009-05-05 17:35:21 -0700558}