blob: d1a33a927f6d1295f3cadaae26ea78d343ecab87 [file] [log] [blame]
Aaro Koskinen67620982015-04-04 22:51:21 +03001/*
2 * This file is based on code from OCTEON SDK by Cavium Networks.
David Daney80ff0fd2009-05-05 17:35:21 -07003 *
David Daney3368c782010-01-07 11:05:04 -08004 * Copyright (c) 2003-2010 Cavium Networks
David Daney80ff0fd2009-05-05 17:35:21 -07005 *
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
Aaro Koskinen67620982015-04-04 22:51:21 +03009 */
10
David Daney80ff0fd2009-05-05 17:35:21 -070011#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/cache.h>
David Daney3368c782010-01-07 11:05:04 -080014#include <linux/cpumask.h>
David Daney80ff0fd2009-05-05 17:35:21 -070015#include <linux/netdevice.h>
David Daney80ff0fd2009-05-05 17:35:21 -070016#include <linux/etherdevice.h>
17#include <linux/ip.h>
18#include <linux/string.h>
19#include <linux/prefetch.h>
Christian Dietrich7a2eaf92011-06-04 17:35:58 +020020#include <linux/ratelimit.h>
David Daney3368c782010-01-07 11:05:04 -080021#include <linux/smp.h>
Imre Kalozdc890df2012-04-19 12:27:27 +020022#include <linux/interrupt.h>
David Daney80ff0fd2009-05-05 17:35:21 -070023#include <net/dst.h>
24#ifdef CONFIG_XFRM
25#include <linux/xfrm.h>
26#include <net/xfrm.h>
27#endif /* CONFIG_XFRM */
28
Arun Sharma600634972011-07-26 16:09:06 -070029#include <linux/atomic.h>
David Daney80ff0fd2009-05-05 17:35:21 -070030
31#include <asm/octeon/octeon.h>
32
33#include "ethernet-defines.h"
David Daney80ff0fd2009-05-05 17:35:21 -070034#include "ethernet-mem.h"
David Daney3368c782010-01-07 11:05:04 -080035#include "ethernet-rx.h"
36#include "octeon-ethernet.h"
David Daney80ff0fd2009-05-05 17:35:21 -070037#include "ethernet-util.h"
38
David Daneyaf866492011-11-22 14:47:00 +000039#include <asm/octeon/cvmx-helper.h>
40#include <asm/octeon/cvmx-wqe.h>
41#include <asm/octeon/cvmx-fau.h>
42#include <asm/octeon/cvmx-pow.h>
43#include <asm/octeon/cvmx-pip.h>
44#include <asm/octeon/cvmx-scratch.h>
David Daney80ff0fd2009-05-05 17:35:21 -070045
David Daneyaf866492011-11-22 14:47:00 +000046#include <asm/octeon/cvmx-gmxx-defs.h>
David Daney80ff0fd2009-05-05 17:35:21 -070047
Aaro Koskinen030739f2014-11-07 22:44:58 +020048static struct napi_struct cvm_oct_napi;
David Daney80ff0fd2009-05-05 17:35:21 -070049
50/**
David Daneyec977c52010-02-16 17:25:32 -080051 * cvm_oct_do_interrupt - interrupt handler.
52 *
53 * The interrupt occurs whenever the POW has packets in our group.
David Daney80ff0fd2009-05-05 17:35:21 -070054 *
David Daney80ff0fd2009-05-05 17:35:21 -070055 */
David Daney3368c782010-01-07 11:05:04 -080056static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
David Daney80ff0fd2009-05-05 17:35:21 -070057{
David Daney3368c782010-01-07 11:05:04 -080058 /* Disable the IRQ and start napi_poll. */
59 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
Aaro Koskinen030739f2014-11-07 22:44:58 +020060 napi_schedule(&cvm_oct_napi);
David Daney3368c782010-01-07 11:05:04 -080061
David Daney80ff0fd2009-05-05 17:35:21 -070062 return IRQ_HANDLED;
63}
64
David Daney80ff0fd2009-05-05 17:35:21 -070065/**
David Daneyec977c52010-02-16 17:25:32 -080066 * cvm_oct_check_rcv_error - process receive errors
David Daney80ff0fd2009-05-05 17:35:21 -070067 * @work: Work queue entry pointing to the packet.
David Daneyec977c52010-02-16 17:25:32 -080068 *
David Daney80ff0fd2009-05-05 17:35:21 -070069 * Returns Non-zero if the packet can be dropped, zero otherwise.
70 */
71static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
72{
Janne Huttunenf8023da2015-08-13 16:21:42 +030073 int port;
74
75 if (octeon_has_feature(OCTEON_FEATURE_PKND))
76 port = work->word0.pip.cn68xx.pknd;
77 else
78 port = work->word1.cn38xx.ipprt;
79
80 if ((work->word2.snoip.err_code == 10) && (work->word1.len <= 64)) {
David Daney80ff0fd2009-05-05 17:35:21 -070081 /*
82 * Ignore length errors on min size packets. Some
83 * equipment incorrectly pads packets to 64+4FCS
84 * instead of 60+4FCS. Note these packets still get
85 * counted as frame errors.
86 */
Aaro Koskinen25efe082015-04-04 22:51:15 +030087 } else if (work->word2.snoip.err_code == 5 ||
88 work->word2.snoip.err_code == 7) {
David Daney80ff0fd2009-05-05 17:35:21 -070089 /*
90 * We received a packet with either an alignment error
91 * or a FCS error. This may be signalling that we are
Justin P. Mattock215c47c2012-03-26 21:34:18 -070092 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK]
David Daney80ff0fd2009-05-05 17:35:21 -070093 * off. If this is the case we need to parse the
94 * packet to determine if we can remove a non spec
95 * preamble and generate a correct packet.
96 */
Janne Huttunenf8023da2015-08-13 16:21:42 +030097 int interface = cvmx_helper_get_interface_num(port);
98 int index = cvmx_helper_get_interface_index_num(port);
David Daney80ff0fd2009-05-05 17:35:21 -070099 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl;
Jamie Lawler85fdebc2014-12-04 13:02:23 +0000100
David Daney80ff0fd2009-05-05 17:35:21 -0700101 gmxx_rxx_frm_ctl.u64 =
102 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface));
103 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) {
104
105 uint8_t *ptr =
106 cvmx_phys_to_ptr(work->packet_ptr.s.addr);
107 int i = 0;
108
Janne Huttunenf8023da2015-08-13 16:21:42 +0300109 while (i < work->word1.len - 1) {
David Daney80ff0fd2009-05-05 17:35:21 -0700110 if (*ptr != 0x55)
111 break;
112 ptr++;
113 i++;
114 }
115
116 if (*ptr == 0xd5) {
117 /*
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100118 printk_ratelimited("Port %d received 0xd5 preamble\n",
Janne Huttunenf8023da2015-08-13 16:21:42 +0300119 port);
David Daney80ff0fd2009-05-05 17:35:21 -0700120 */
121 work->packet_ptr.s.addr += i + 1;
Janne Huttunenf8023da2015-08-13 16:21:42 +0300122 work->word1.len -= i + 5;
David Daney80ff0fd2009-05-05 17:35:21 -0700123 } else if ((*ptr & 0xf) == 0xd) {
124 /*
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100125 printk_ratelimited("Port %d received 0x?d preamble\n",
Janne Huttunenf8023da2015-08-13 16:21:42 +0300126 port);
David Daney80ff0fd2009-05-05 17:35:21 -0700127 */
128 work->packet_ptr.s.addr += i;
Janne Huttunenf8023da2015-08-13 16:21:42 +0300129 work->word1.len -= i + 4;
130 for (i = 0; i < work->word1.len; i++) {
David Daney80ff0fd2009-05-05 17:35:21 -0700131 *ptr =
132 ((*ptr & 0xf0) >> 4) |
133 ((*(ptr + 1) & 0xf) << 4);
134 ptr++;
135 }
136 } else {
Gulsah Kose61e15f02014-09-30 22:12:23 +0300137 printk_ratelimited("Port %d unknown preamble, packet dropped\n",
Janne Huttunenf8023da2015-08-13 16:21:42 +0300138 port);
David Daney80ff0fd2009-05-05 17:35:21 -0700139 /*
140 cvmx_helper_dump_packet(work);
141 */
142 cvm_oct_free_work(work);
143 return 1;
144 }
145 }
146 } else {
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200147 printk_ratelimited("Port %d receive error code %d, packet dropped\n",
Janne Huttunenf8023da2015-08-13 16:21:42 +0300148 port, work->word2.snoip.err_code);
David Daney80ff0fd2009-05-05 17:35:21 -0700149 cvm_oct_free_work(work);
150 return 1;
151 }
152
153 return 0;
154}
155
156/**
David Daneyec977c52010-02-16 17:25:32 -0800157 * cvm_oct_napi_poll - the NAPI poll function.
David Daney3368c782010-01-07 11:05:04 -0800158 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller
159 * @budget: Maximum number of packets to receive.
David Daneyec977c52010-02-16 17:25:32 -0800160 *
161 * Returns the number of packets processed.
David Daney80ff0fd2009-05-05 17:35:21 -0700162 */
David Daney3368c782010-01-07 11:05:04 -0800163static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
David Daney80ff0fd2009-05-05 17:35:21 -0700164{
David Daney3368c782010-01-07 11:05:04 -0800165 const int coreid = cvmx_get_core_num();
166 uint64_t old_group_mask;
167 uint64_t old_scratch;
168 int rx_count = 0;
169 int did_work_request = 0;
170 int packet_not_copied;
David Daney80ff0fd2009-05-05 17:35:21 -0700171
172 /* Prefetch cvm_oct_device since we know we need it soon */
173 prefetch(cvm_oct_device);
174
175 if (USE_ASYNC_IOBDMA) {
176 /* Save scratch in case userspace is using it */
177 CVMX_SYNCIOBDMA;
178 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
179 }
180
181 /* Only allow work for our group (and preserve priorities) */
Aaro Koskinenf5cfc8d2015-08-13 16:21:40 +0300182 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
183 old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
184 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
185 1ull << pow_receive_group);
186 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
187 } else {
188 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
189 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
190 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
191 }
David Daney80ff0fd2009-05-05 17:35:21 -0700192
David Daney3368c782010-01-07 11:05:04 -0800193 if (USE_ASYNC_IOBDMA) {
David Daney80ff0fd2009-05-05 17:35:21 -0700194 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
David Daney3368c782010-01-07 11:05:04 -0800195 did_work_request = 1;
196 }
David Daney80ff0fd2009-05-05 17:35:21 -0700197
David Daney3368c782010-01-07 11:05:04 -0800198 while (rx_count < budget) {
David Daney80ff0fd2009-05-05 17:35:21 -0700199 struct sk_buff *skb = NULL;
David Daney3368c782010-01-07 11:05:04 -0800200 struct sk_buff **pskb = NULL;
David Daney80ff0fd2009-05-05 17:35:21 -0700201 int skb_in_hw;
202 cvmx_wqe_t *work;
Janne Huttunenf8023da2015-08-13 16:21:42 +0300203 int port;
David Daney80ff0fd2009-05-05 17:35:21 -0700204
David Daney3368c782010-01-07 11:05:04 -0800205 if (USE_ASYNC_IOBDMA && did_work_request)
David Daney80ff0fd2009-05-05 17:35:21 -0700206 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH);
David Daney3368c782010-01-07 11:05:04 -0800207 else
208 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT);
209
David Daney80ff0fd2009-05-05 17:35:21 -0700210 prefetch(work);
David Daney3368c782010-01-07 11:05:04 -0800211 did_work_request = 0;
212 if (work == NULL) {
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300213 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
214 cvmx_write_csr(CVMX_SSO_WQ_IQ_DIS,
215 1ull << pow_receive_group);
216 cvmx_write_csr(CVMX_SSO_WQ_INT,
217 1ull << pow_receive_group);
218 } else {
219 union cvmx_pow_wq_int wq_int;
Jamie Lawler85fdebc2014-12-04 13:02:23 +0000220
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300221 wq_int.u64 = 0;
222 wq_int.s.iq_dis = 1 << pow_receive_group;
223 wq_int.s.wq_int = 1 << pow_receive_group;
224 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
225 }
David Daney80ff0fd2009-05-05 17:35:21 -0700226 break;
David Daney3368c782010-01-07 11:05:04 -0800227 }
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100228 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
229 sizeof(void *));
David Daney3368c782010-01-07 11:05:04 -0800230 prefetch(pskb);
David Daney80ff0fd2009-05-05 17:35:21 -0700231
David Daney3368c782010-01-07 11:05:04 -0800232 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100233 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
234 CVMX_POW_NO_WAIT);
David Daney3368c782010-01-07 11:05:04 -0800235 did_work_request = 1;
236 }
Aaro Koskinenda029d02013-09-05 21:43:59 +0300237 rx_count++;
David Daney80ff0fd2009-05-05 17:35:21 -0700238
Aaro Koskinen3a990f32015-04-04 22:51:17 +0300239 skb_in_hw = work->word2.s.bufs == 1;
David Daney80ff0fd2009-05-05 17:35:21 -0700240 if (likely(skb_in_hw)) {
David Daney3368c782010-01-07 11:05:04 -0800241 skb = *pskb;
David Daney80ff0fd2009-05-05 17:35:21 -0700242 prefetch(&skb->head);
243 prefetch(&skb->len);
244 }
Janne Huttunenf8023da2015-08-13 16:21:42 +0300245
246 if (octeon_has_feature(OCTEON_FEATURE_PKND))
247 port = work->word0.pip.cn68xx.pknd;
248 else
249 port = work->word1.cn38xx.ipprt;
250
251 prefetch(cvm_oct_device[port]);
David Daney80ff0fd2009-05-05 17:35:21 -0700252
David Daney80ff0fd2009-05-05 17:35:21 -0700253 /* Immediately throw away all packets with receive errors */
254 if (unlikely(work->word2.snoip.rcv_error)) {
255 if (cvm_oct_check_rcv_error(work))
256 continue;
257 }
258
259 /*
260 * We can only use the zero copy path if skbuffs are
261 * in the FPA pool and the packet fits in a single
262 * buffer.
263 */
264 if (likely(skb_in_hw)) {
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100265 skb->data = skb->head + work->packet_ptr.s.addr -
266 cvmx_ptr_to_phys(skb->head);
David Daney80ff0fd2009-05-05 17:35:21 -0700267 prefetch(skb->data);
Janne Huttunenf8023da2015-08-13 16:21:42 +0300268 skb->len = work->word1.len;
David Daney80ff0fd2009-05-05 17:35:21 -0700269 skb_set_tail_pointer(skb, skb->len);
270 packet_not_copied = 1;
271 } else {
David Daney80ff0fd2009-05-05 17:35:21 -0700272 /*
273 * We have to copy the packet. First allocate
274 * an skbuff for it.
275 */
Janne Huttunenf8023da2015-08-13 16:21:42 +0300276 skb = dev_alloc_skb(work->word1.len);
David Daney80ff0fd2009-05-05 17:35:21 -0700277 if (!skb) {
David Daney80ff0fd2009-05-05 17:35:21 -0700278 cvm_oct_free_work(work);
279 continue;
280 }
281
282 /*
283 * Check if we've received a packet that was
David Daney6568a232010-01-07 11:05:01 -0800284 * entirely stored in the work entry.
David Daney80ff0fd2009-05-05 17:35:21 -0700285 */
286 if (unlikely(work->word2.s.bufs == 0)) {
287 uint8_t *ptr = work->packet_data;
288
289 if (likely(!work->word2.s.not_IP)) {
290 /*
291 * The beginning of the packet
292 * moves for IP packets.
293 */
294 if (work->word2.s.is_v6)
295 ptr += 2;
296 else
297 ptr += 6;
298 }
Janne Huttunenf8023da2015-08-13 16:21:42 +0300299 memcpy(skb_put(skb, work->word1.len), ptr,
300 work->word1.len);
David Daney80ff0fd2009-05-05 17:35:21 -0700301 /* No packet buffers to free */
302 } else {
303 int segments = work->word2.s.bufs;
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100304 union cvmx_buf_ptr segment_ptr =
305 work->packet_ptr;
Janne Huttunenf8023da2015-08-13 16:21:42 +0300306 int len = work->word1.len;
David Daney80ff0fd2009-05-05 17:35:21 -0700307
308 while (segments--) {
309 union cvmx_buf_ptr next_ptr =
David Daney6568a232010-01-07 11:05:01 -0800310 *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8);
311
David Daney80ff0fd2009-05-05 17:35:21 -0700312 /*
313 * Octeon Errata PKI-100: The segment size is
314 * wrong. Until it is fixed, calculate the
315 * segment size based on the packet pool
316 * buffer size. When it is fixed, the
317 * following line should be replaced with this
318 * one: int segment_size =
319 * segment_ptr.s.size;
320 */
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100321 int segment_size =
322 CVMX_FPA_PACKET_POOL_SIZE -
323 (segment_ptr.s.addr -
324 (((segment_ptr.s.addr >> 7) -
325 segment_ptr.s.back) << 7));
David Daney6568a232010-01-07 11:05:01 -0800326 /*
327 * Don't copy more than what
328 * is left in the packet.
329 */
David Daney80ff0fd2009-05-05 17:35:21 -0700330 if (segment_size > len)
331 segment_size = len;
332 /* Copy the data into the packet */
333 memcpy(skb_put(skb, segment_size),
David Daney6568a232010-01-07 11:05:01 -0800334 cvmx_phys_to_ptr(segment_ptr.s.addr),
David Daney80ff0fd2009-05-05 17:35:21 -0700335 segment_size);
David Daney80ff0fd2009-05-05 17:35:21 -0700336 len -= segment_size;
337 segment_ptr = next_ptr;
338 }
339 }
340 packet_not_copied = 0;
341 }
Janne Huttunenf8023da2015-08-13 16:21:42 +0300342 if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
343 cvm_oct_device[port])) {
344 struct net_device *dev = cvm_oct_device[port];
David Daney80ff0fd2009-05-05 17:35:21 -0700345 struct octeon_ethernet *priv = netdev_priv(dev);
346
David Daney6568a232010-01-07 11:05:01 -0800347 /*
348 * Only accept packets for devices that are
349 * currently up.
350 */
David Daney80ff0fd2009-05-05 17:35:21 -0700351 if (likely(dev->flags & IFF_UP)) {
352 skb->protocol = eth_type_trans(skb, dev);
353 skb->dev = dev;
354
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100355 if (unlikely(work->word2.s.not_IP ||
356 work->word2.s.IP_exc ||
357 work->word2.s.L4_error ||
358 !work->word2.s.tcp_or_udp))
David Daney80ff0fd2009-05-05 17:35:21 -0700359 skb->ip_summed = CHECKSUM_NONE;
360 else
361 skb->ip_summed = CHECKSUM_UNNECESSARY;
362
363 /* Increment RX stats for virtual ports */
Janne Huttunenf8023da2015-08-13 16:21:42 +0300364 if (port >= CVMX_PIP_NUM_INPUT_PORTS) {
David Daney80ff0fd2009-05-05 17:35:21 -0700365#ifdef CONFIG_64BIT
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100366 atomic64_add(1,
367 (atomic64_t *)&priv->stats.rx_packets);
368 atomic64_add(skb->len,
369 (atomic64_t *)&priv->stats.rx_bytes);
David Daney80ff0fd2009-05-05 17:35:21 -0700370#else
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100371 atomic_add(1,
372 (atomic_t *)&priv->stats.rx_packets);
373 atomic_add(skb->len,
374 (atomic_t *)&priv->stats.rx_bytes);
David Daney80ff0fd2009-05-05 17:35:21 -0700375#endif
376 }
377 netif_receive_skb(skb);
378 } else {
David Daney6568a232010-01-07 11:05:01 -0800379 /* Drop any packet received for a device that isn't up */
David Daney80ff0fd2009-05-05 17:35:21 -0700380 /*
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200381 printk_ratelimited("%s: Device not up, packet dropped\n",
David Daney6568a232010-01-07 11:05:01 -0800382 dev->name);
383 */
David Daney80ff0fd2009-05-05 17:35:21 -0700384#ifdef CONFIG_64BIT
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100385 atomic64_add(1,
386 (atomic64_t *)&priv->stats.rx_dropped);
David Daney80ff0fd2009-05-05 17:35:21 -0700387#else
Luis de Bethencourtf8846252014-11-28 14:34:29 +0100388 atomic_add(1,
389 (atomic_t *)&priv->stats.rx_dropped);
David Daney80ff0fd2009-05-05 17:35:21 -0700390#endif
391 dev_kfree_skb_irq(skb);
392 }
393 } else {
394 /*
395 * Drop any packet received for a device that
396 * doesn't exist.
397 */
Christian Dietrich7a2eaf92011-06-04 17:35:58 +0200398 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
Janne Huttunenf8023da2015-08-13 16:21:42 +0300399 port);
David Daney80ff0fd2009-05-05 17:35:21 -0700400 dev_kfree_skb_irq(skb);
401 }
402 /*
403 * Check to see if the skbuff and work share the same
404 * packet buffer.
405 */
Aaro Koskinen3a990f32015-04-04 22:51:17 +0300406 if (likely(packet_not_copied)) {
David Daney80ff0fd2009-05-05 17:35:21 -0700407 /*
408 * This buffer needs to be replaced, increment
409 * the number of buffers we need to free by
410 * one.
411 */
412 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
413 1);
414
Aaro Koskinenc93b0e72015-04-04 22:51:19 +0300415 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
David Daney80ff0fd2009-05-05 17:35:21 -0700416 } else {
417 cvm_oct_free_work(work);
418 }
419 }
David Daney80ff0fd2009-05-05 17:35:21 -0700420 /* Restore the original POW group mask */
Aaro Koskinenf5cfc8d2015-08-13 16:21:40 +0300421 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
422 cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid), old_group_mask);
423 cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
424 } else {
425 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask);
426 }
427
David Daney80ff0fd2009-05-05 17:35:21 -0700428 if (USE_ASYNC_IOBDMA) {
429 /* Restore the scratch area */
430 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
431 }
David Daney3368c782010-01-07 11:05:04 -0800432 cvm_oct_rx_refill_pool(0);
David Daney80ff0fd2009-05-05 17:35:21 -0700433
David Daney3368c782010-01-07 11:05:04 -0800434 if (rx_count < budget && napi != NULL) {
435 /* No more work */
436 napi_complete(napi);
Aaro Koskinen030739f2014-11-07 22:44:58 +0200437 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
David Daney80ff0fd2009-05-05 17:35:21 -0700438 }
David Daney3368c782010-01-07 11:05:04 -0800439 return rx_count;
David Daney80ff0fd2009-05-05 17:35:21 -0700440}
441
David Daney3368c782010-01-07 11:05:04 -0800442#ifdef CONFIG_NET_POLL_CONTROLLER
443/**
David Daneyec977c52010-02-16 17:25:32 -0800444 * cvm_oct_poll_controller - poll for receive packets
David Daney3368c782010-01-07 11:05:04 -0800445 * device.
446 *
447 * @dev: Device to poll. Unused
448 */
449void cvm_oct_poll_controller(struct net_device *dev)
450{
451 cvm_oct_napi_poll(NULL, 16);
452}
453#endif
454
David Daney80ff0fd2009-05-05 17:35:21 -0700455void cvm_oct_rx_initialize(void)
456{
457 int i;
David Daney3368c782010-01-07 11:05:04 -0800458 struct net_device *dev_for_napi = NULL;
David Daney3368c782010-01-07 11:05:04 -0800459
460 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) {
461 if (cvm_oct_device[i]) {
462 dev_for_napi = cvm_oct_device[i];
463 break;
464 }
465 }
466
467 if (NULL == dev_for_napi)
468 panic("No net_devices were allocated.");
469
Aaro Koskinen030739f2014-11-07 22:44:58 +0200470 netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll,
471 rx_napi_weight);
472 napi_enable(&cvm_oct_napi);
David Daney3368c782010-01-07 11:05:04 -0800473
Masanari Iida811a7512013-09-16 11:44:08 +0900474 /* Register an IRQ handler to receive POW interrupts */
David Daney3368c782010-01-07 11:05:04 -0800475 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
476 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
477
478 if (i)
479 panic("Could not acquire Ethernet IRQ %d\n",
480 OCTEON_IRQ_WORKQ0 + pow_receive_group);
481
482 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
483
David Daney3368c782010-01-07 11:05:04 -0800484 /* Enable POW interrupt when our port has at least one packet */
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300485 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
486 union cvmx_sso_wq_int_thrx int_thr;
487 union cvmx_pow_wq_int_pc int_pc;
David Daney3368c782010-01-07 11:05:04 -0800488
Aaro Koskinenbcbb1392015-08-13 16:21:39 +0300489 int_thr.u64 = 0;
490 int_thr.s.tc_en = 1;
491 int_thr.s.tc_thr = 1;
492 cvmx_write_csr(CVMX_SSO_WQ_INT_THRX(pow_receive_group),
493 int_thr.u64);
494
495 int_pc.u64 = 0;
496 int_pc.s.pc_thr = 5;
497 cvmx_write_csr(CVMX_SSO_WQ_INT_PC, int_pc.u64);
498 } else {
499 union cvmx_pow_wq_int_thrx int_thr;
500 union cvmx_pow_wq_int_pc int_pc;
501
502 int_thr.u64 = 0;
503 int_thr.s.tc_en = 1;
504 int_thr.s.tc_thr = 1;
505 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group),
506 int_thr.u64);
507
508 int_pc.u64 = 0;
509 int_pc.s.pc_thr = 5;
510 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
511 }
David Daney3368c782010-01-07 11:05:04 -0800512
Aaro Koskinen030739f2014-11-07 22:44:58 +0200513 /* Schedule NAPI now. This will indirectly enable the interrupt. */
514 napi_schedule(&cvm_oct_napi);
David Daney80ff0fd2009-05-05 17:35:21 -0700515}
516
517void cvm_oct_rx_shutdown(void)
518{
Aaro Koskinen030739f2014-11-07 22:44:58 +0200519 netif_napi_del(&cvm_oct_napi);
David Daney80ff0fd2009-05-05 17:35:21 -0700520}