blob: 7af870a3c549592803a55a5cfea89ea1b15044d3 [file] [log] [blame]
Santiago Leonf148f612010-09-03 18:29:30 +00001/*
Santiago Leon9d348af2010-09-03 18:29:53 +00002 * IBM Power Virtual Ethernet Device Driver
Santiago Leonf148f612010-09-03 18:29:30 +00003 *
Santiago Leon9d348af2010-09-03 18:29:53 +00004 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
Santiago Leonf148f612010-09-03 18:29:30 +00008 *
Santiago Leon9d348af2010-09-03 18:29:53 +00009 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Santiago Leonf148f612010-09-03 18:29:30 +000013 *
Santiago Leon9d348af2010-09-03 18:29:53 +000014 * You should have received a copy of the GNU General Public License
Jeff Kirsher0ab75ae2013-12-06 06:28:43 -080015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Santiago Leonf148f612010-09-03 18:29:30 +000016 *
Santiago Leon9d348af2010-09-03 18:29:53 +000017 * Copyright (C) IBM Corporation, 2003, 2010
18 *
19 * Authors: Dave Larson <larson1@us.ibm.com>
20 * Santiago Leon <santil@linux.vnet.ibm.com>
21 * Brian King <brking@linux.vnet.ibm.com>
22 * Robert Jennings <rcj@linux.vnet.ibm.com>
23 * Anton Blanchard <anton@au.ibm.com>
Santiago Leonf148f612010-09-03 18:29:30 +000024 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/module.h>
Robert Jennings1096d632008-07-24 04:34:52 +100027#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/types.h>
29#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/dma-mapping.h>
31#include <linux/kernel.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000036#include <linux/interrupt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/mm.h>
Brian Kinge7a3af52010-05-07 08:56:08 +000038#include <linux/pm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ethtool.h>
Brian Kingf4ff2872007-09-15 13:36:07 -070040#include <linux/in.h>
41#include <linux/ip.h>
Santiago Leonab78df72010-09-03 18:28:52 +000042#include <linux/ipv6.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090043#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <asm/hvcall.h>
Arun Sharma600634972011-07-26 16:09:06 -070045#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <asm/vio.h>
Robert Jennings1096d632008-07-24 04:34:52 +100047#include <asm/iommu.h>
Robert Jennings1096d632008-07-24 04:34:52 +100048#include <asm/firmware.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
50#include "ibmveth.h"
51
David Howells7d12e782006-10-05 14:55:46 +010052static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
Michael Ellerman493a6842007-04-17 13:12:55 +100053static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
Robert Jennings1096d632008-07-24 04:34:52 +100054static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
Santiago Leone295fe82010-09-03 18:29:08 +000055
Santiago Leon860f2422006-04-25 11:19:59 -050056static struct kobj_type ktype_veth_pool;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Robert Jennings1096d632008-07-24 04:34:52 +100058
Linus Torvalds1da177e2005-04-16 15:20:36 -070059static const char ibmveth_driver_name[] = "ibmveth";
Santiago Leon9d348af2010-09-03 18:29:53 +000060static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
Thomas Falcon8641dd82015-04-29 16:25:45 -050061#define ibmveth_driver_version "1.05"
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Santiago Leon9d348af2010-09-03 18:29:53 +000063MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
Linus Torvalds1da177e2005-04-16 15:20:36 -070065MODULE_LICENSE("GPL");
66MODULE_VERSION(ibmveth_driver_version);
67
Santiago Leonc08cc3c2010-09-03 18:28:20 +000068static unsigned int tx_copybreak __read_mostly = 128;
69module_param(tx_copybreak, uint, 0644);
70MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
72
Santiago Leon8d86c612010-09-03 18:28:25 +000073static unsigned int rx_copybreak __read_mostly = 128;
74module_param(rx_copybreak, uint, 0644);
75MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
77
Santiago Leon0c26b672010-09-03 18:28:41 +000078static unsigned int rx_flush __read_mostly = 0;
79module_param(rx_flush, uint, 0644);
80MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
81
Thomas Falcon07e6a972015-07-14 10:51:51 -050082static bool old_large_send __read_mostly;
83module_param(old_large_send, bool, S_IRUGO);
84MODULE_PARM_DESC(old_large_send,
85 "Use old large send method on firmware that supports the new method");
86
Brian Kingddbb4de2007-08-17 09:16:43 -050087struct ibmveth_stat {
88 char name[ETH_GSTRING_LEN];
89 int offset;
90};
91
92#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
93#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
94
95struct ibmveth_stat ibmveth_stats[] = {
96 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
97 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
Santiago Leonf148f612010-09-03 18:29:30 +000098 { "replenish_add_buff_failure",
99 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
100 { "replenish_add_buff_success",
101 IBMVETH_STAT_OFF(replenish_add_buff_success) },
Brian Kingddbb4de2007-08-17 09:16:43 -0500102 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
103 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
Brian Kingddbb4de2007-08-17 09:16:43 -0500104 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
105 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
Santiago Leonab78df72010-09-03 18:28:52 +0000106 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
107 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
Thomas Falcon8641dd82015-04-29 16:25:45 -0500108 { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
Thomas Falcon07e6a972015-07-14 10:51:51 -0500109 { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
110 { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
Brian Kingddbb4de2007-08-17 09:16:43 -0500111};
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113/* simple methods of getting data from the current rxq entry */
Brian King79ef4a42007-08-17 09:16:56 -0500114static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
115{
Anton Blanchard0b536be2013-09-03 09:55:32 +1000116 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
Brian King79ef4a42007-08-17 09:16:56 -0500117}
118
119static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
120{
Santiago Leonf148f612010-09-03 18:29:30 +0000121 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
122 IBMVETH_RXQ_TOGGLE_SHIFT;
Brian King79ef4a42007-08-17 09:16:56 -0500123}
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
126{
Santiago Leonf148f612010-09-03 18:29:30 +0000127 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
130static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
131{
Santiago Leonf148f612010-09-03 18:29:30 +0000132 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133}
134
135static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
136{
Santiago Leonf148f612010-09-03 18:29:30 +0000137 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138}
139
140static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
141{
Anton Blanchard0b536be2013-09-03 09:55:32 +1000142 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143}
144
Brian Kingf4ff2872007-09-15 13:36:07 -0700145static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
146{
Santiago Leonf148f612010-09-03 18:29:30 +0000147 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
Brian Kingf4ff2872007-09-15 13:36:07 -0700148}
149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150/* setup the initial settings for a buffer pool */
Santiago Leonf148f612010-09-03 18:29:30 +0000151static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
152 u32 pool_index, u32 pool_size,
153 u32 buff_size, u32 pool_active)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
155 pool->size = pool_size;
156 pool->index = pool_index;
157 pool->buff_size = buff_size;
Santiago Leonc033a6d2010-09-03 18:28:09 +0000158 pool->threshold = pool_size * 7 / 8;
Santiago Leon860f2422006-04-25 11:19:59 -0500159 pool->active = pool_active;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160}
161
162/* allocate and setup an buffer pool - called during open */
163static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
164{
165 int i;
166
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400167 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Santiago Leonf148f612010-09-03 18:29:30 +0000169 if (!pool->free_map)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400172 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
Santiago Leonf148f612010-09-03 18:29:30 +0000173 if (!pool->dma_addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 kfree(pool->free_map);
175 pool->free_map = NULL;
176 return -1;
177 }
178
Julia Lawalla05abcb2010-05-13 10:06:01 +0000179 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Santiago Leonf148f612010-09-03 18:29:30 +0000181 if (!pool->skbuff) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 kfree(pool->dma_addr);
183 pool->dma_addr = NULL;
184
185 kfree(pool->free_map);
186 pool->free_map = NULL;
187 return -1;
188 }
189
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
191
Santiago Leonf148f612010-09-03 18:29:30 +0000192 for (i = 0; i < pool->size; ++i)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 pool->free_map[i] = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
195 atomic_set(&pool->available, 0);
196 pool->producer_index = 0;
197 pool->consumer_index = 0;
198
199 return 0;
200}
201
Santiago Leon0c26b672010-09-03 18:28:41 +0000202static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
203{
204 unsigned long offset;
205
206 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
207 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
208}
209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210/* replenish the buffers for a pool. note that we don't need to
211 * skb_reserve these since they are used for incoming...
212 */
Santiago Leonf148f612010-09-03 18:29:30 +0000213static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
214 struct ibmveth_buff_pool *pool)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
216 u32 i;
217 u32 count = pool->size - atomic_read(&pool->available);
218 u32 buffers_added = 0;
Robert Jennings1096d632008-07-24 04:34:52 +1000219 struct sk_buff *skb;
220 unsigned int free_index, index;
221 u64 correlator;
222 unsigned long lpar_rc;
223 dma_addr_t dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 mb();
226
Santiago Leonf148f612010-09-03 18:29:30 +0000227 for (i = 0; i < count; ++i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 union ibmveth_buf_desc desc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
Santiago Leon003212c2010-09-03 18:29:03 +0000230 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
Santiago Leonf148f612010-09-03 18:29:30 +0000232 if (!skb) {
Santiago Leonc43ced12010-09-03 18:29:14 +0000233 netdev_dbg(adapter->netdev,
234 "replenish: unable to allocate skb\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 adapter->replenish_no_mem++;
236 break;
237 }
238
David Gibson047a66d2006-10-21 10:24:13 -0700239 free_index = pool->consumer_index;
Santiago Leona613f582010-09-03 18:28:04 +0000240 pool->consumer_index++;
241 if (pool->consumer_index >= pool->size)
242 pool->consumer_index = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 index = pool->free_map[free_index];
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400244
Santiago Leon64859112010-09-03 18:29:41 +0000245 BUG_ON(index == IBM_VETH_INVALID_MAP);
246 BUG_ON(pool->skbuff[index] != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
249 pool->buff_size, DMA_FROM_DEVICE);
250
Stephen Rothwellc713e7c2008-07-28 02:14:24 +1000251 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
Robert Jennings1096d632008-07-24 04:34:52 +1000252 goto failure;
253
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
255 pool->dma_addr[index] = dma_addr;
256 pool->skbuff[index] = skb;
257
258 correlator = ((u64)pool->index << 32) | index;
Santiago Leonf148f612010-09-03 18:29:30 +0000259 *(u64 *)skb->data = correlator;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Brian King79ef4a42007-08-17 09:16:56 -0500261 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400262 desc.fields.address = dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Santiago Leon0c26b672010-09-03 18:28:41 +0000264 if (rx_flush) {
265 unsigned int len = min(pool->buff_size,
266 adapter->netdev->mtu +
267 IBMVETH_BUFF_OH);
268 ibmveth_flush_buffer(skb->data, len);
269 }
Santiago Leonf148f612010-09-03 18:29:30 +0000270 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
271 desc.desc);
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400272
Santiago Leonf148f612010-09-03 18:29:30 +0000273 if (lpar_rc != H_SUCCESS) {
Robert Jennings1096d632008-07-24 04:34:52 +1000274 goto failure;
Santiago Leonf148f612010-09-03 18:29:30 +0000275 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 buffers_added++;
277 adapter->replenish_add_buff_success++;
278 }
279 }
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 mb();
282 atomic_add(buffers_added, &(pool->available));
Robert Jennings1096d632008-07-24 04:34:52 +1000283 return;
284
285failure:
286 pool->free_map[free_index] = index;
287 pool->skbuff[index] = NULL;
288 if (pool->consumer_index == 0)
289 pool->consumer_index = pool->size - 1;
290 else
291 pool->consumer_index--;
Stephen Rothwellc713e7c2008-07-28 02:14:24 +1000292 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
Robert Jennings1096d632008-07-24 04:34:52 +1000293 dma_unmap_single(&adapter->vdev->dev,
294 pool->dma_addr[index], pool->buff_size,
295 DMA_FROM_DEVICE);
296 dev_kfree_skb_any(skb);
297 adapter->replenish_add_buff_failure++;
298
299 mb();
300 atomic_add(buffers_added, &(pool->available));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Anton Blanchardcbd52282014-08-22 11:36:52 +1000303/*
304 * The final 8 bytes of the buffer list is a counter of frames dropped
305 * because there was not a buffer in the buffer list capable of holding
306 * the frame.
307 */
308static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
309{
310 __be64 *p = adapter->buffer_list_addr + 4096 - 8;
311
312 adapter->rx_no_buffer = be64_to_cpup(p);
313}
314
Santiago Leone2adbcb2005-10-26 10:47:08 -0600315/* replenish routine */
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400316static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317{
Santiago Leonb6d35182005-10-26 10:47:01 -0600318 int i;
319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 adapter->replenish_task_cycles++;
321
Santiago Leon517e80e2010-09-03 18:29:25 +0000322 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
Santiago Leonc033a6d2010-09-03 18:28:09 +0000323 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
324
325 if (pool->active &&
326 (atomic_read(&pool->available) < pool->threshold))
327 ibmveth_replenish_buffer_pool(adapter, pool);
328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Anton Blanchardcbd52282014-08-22 11:36:52 +1000330 ibmveth_update_rx_no_buffer(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331}
332
333/* empty and free ana buffer pool - also used to do cleanup in error paths */
Santiago Leonf148f612010-09-03 18:29:30 +0000334static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
335 struct ibmveth_buff_pool *pool)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
337 int i;
338
Jesper Juhlb4558ea2005-10-28 16:53:13 -0400339 kfree(pool->free_map);
340 pool->free_map = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
Santiago Leonf148f612010-09-03 18:29:30 +0000342 if (pool->skbuff && pool->dma_addr) {
343 for (i = 0; i < pool->size; ++i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 struct sk_buff *skb = pool->skbuff[i];
Santiago Leonf148f612010-09-03 18:29:30 +0000345 if (skb) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 dma_unmap_single(&adapter->vdev->dev,
347 pool->dma_addr[i],
348 pool->buff_size,
349 DMA_FROM_DEVICE);
350 dev_kfree_skb_any(skb);
351 pool->skbuff[i] = NULL;
352 }
353 }
354 }
355
Santiago Leonf148f612010-09-03 18:29:30 +0000356 if (pool->dma_addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 kfree(pool->dma_addr);
358 pool->dma_addr = NULL;
359 }
360
Santiago Leonf148f612010-09-03 18:29:30 +0000361 if (pool->skbuff) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 kfree(pool->skbuff);
363 pool->skbuff = NULL;
364 }
365}
366
367/* remove a buffer from a pool */
Santiago Leonf148f612010-09-03 18:29:30 +0000368static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
369 u64 correlator)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370{
371 unsigned int pool = correlator >> 32;
372 unsigned int index = correlator & 0xffffffffUL;
373 unsigned int free_index;
374 struct sk_buff *skb;
375
Santiago Leon64859112010-09-03 18:29:41 +0000376 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
377 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
379 skb = adapter->rx_buff_pool[pool].skbuff[index];
380
Santiago Leon64859112010-09-03 18:29:41 +0000381 BUG_ON(skb == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
383 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
384
385 dma_unmap_single(&adapter->vdev->dev,
386 adapter->rx_buff_pool[pool].dma_addr[index],
387 adapter->rx_buff_pool[pool].buff_size,
388 DMA_FROM_DEVICE);
389
David Gibson047a66d2006-10-21 10:24:13 -0700390 free_index = adapter->rx_buff_pool[pool].producer_index;
Santiago Leona613f582010-09-03 18:28:04 +0000391 adapter->rx_buff_pool[pool].producer_index++;
392 if (adapter->rx_buff_pool[pool].producer_index >=
393 adapter->rx_buff_pool[pool].size)
394 adapter->rx_buff_pool[pool].producer_index = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 adapter->rx_buff_pool[pool].free_map[free_index] = index;
396
397 mb();
398
399 atomic_dec(&(adapter->rx_buff_pool[pool].available));
400}
401
402/* get the current buffer on the rx queue */
403static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
404{
405 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
406 unsigned int pool = correlator >> 32;
407 unsigned int index = correlator & 0xffffffffUL;
408
Santiago Leon64859112010-09-03 18:29:41 +0000409 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
410 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412 return adapter->rx_buff_pool[pool].skbuff[index];
413}
414
415/* recycle the current buffer on the rx queue */
David S. Miller8decf862011-09-22 03:23:13 -0400416static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417{
418 u32 q_index = adapter->rx_queue.index;
419 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
420 unsigned int pool = correlator >> 32;
421 unsigned int index = correlator & 0xffffffffUL;
422 union ibmveth_buf_desc desc;
423 unsigned long lpar_rc;
David S. Miller8decf862011-09-22 03:23:13 -0400424 int ret = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Santiago Leon64859112010-09-03 18:29:41 +0000426 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
427 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Santiago Leonf148f612010-09-03 18:29:30 +0000429 if (!adapter->rx_buff_pool[pool].active) {
Santiago Leonb6d35182005-10-26 10:47:01 -0600430 ibmveth_rxq_harvest_buffer(adapter);
431 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
David S. Miller8decf862011-09-22 03:23:13 -0400432 goto out;
Santiago Leonb6d35182005-10-26 10:47:01 -0600433 }
434
Brian King79ef4a42007-08-17 09:16:56 -0500435 desc.fields.flags_len = IBMVETH_BUF_VALID |
436 adapter->rx_buff_pool[pool].buff_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
438
439 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400440
Santiago Leonf148f612010-09-03 18:29:30 +0000441 if (lpar_rc != H_SUCCESS) {
Santiago Leonc43ced12010-09-03 18:29:14 +0000442 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
443 "during recycle rc=%ld", lpar_rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
David S. Miller8decf862011-09-22 03:23:13 -0400445 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 }
447
Santiago Leonf148f612010-09-03 18:29:30 +0000448 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 adapter->rx_queue.index = 0;
450 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
451 }
David S. Miller8decf862011-09-22 03:23:13 -0400452
453out:
454 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455}
456
Michael Ellerman493a6842007-04-17 13:12:55 +1000457static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458{
459 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
460
Santiago Leonf148f612010-09-03 18:29:30 +0000461 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 adapter->rx_queue.index = 0;
463 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
464 }
465}
466
467static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
468{
Santiago Leonb6d35182005-10-26 10:47:01 -0600469 int i;
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700470 struct device *dev = &adapter->vdev->dev;
Santiago Leonb6d35182005-10-26 10:47:01 -0600471
Santiago Leonf148f612010-09-03 18:29:30 +0000472 if (adapter->buffer_list_addr != NULL) {
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700473 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
474 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 DMA_BIDIRECTIONAL);
476 adapter->buffer_list_dma = DMA_ERROR_CODE;
477 }
478 free_page((unsigned long)adapter->buffer_list_addr);
479 adapter->buffer_list_addr = NULL;
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Santiago Leonf148f612010-09-03 18:29:30 +0000482 if (adapter->filter_list_addr != NULL) {
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700483 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
484 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 DMA_BIDIRECTIONAL);
486 adapter->filter_list_dma = DMA_ERROR_CODE;
487 }
488 free_page((unsigned long)adapter->filter_list_addr);
489 adapter->filter_list_addr = NULL;
490 }
491
Santiago Leonf148f612010-09-03 18:29:30 +0000492 if (adapter->rx_queue.queue_addr != NULL) {
Santiago Leond90c92f2012-09-04 14:41:37 +0000493 dma_free_coherent(dev, adapter->rx_queue.queue_len,
494 adapter->rx_queue.queue_addr,
495 adapter->rx_queue.queue_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 adapter->rx_queue.queue_addr = NULL;
497 }
498
Santiago Leonf148f612010-09-03 18:29:30 +0000499 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
Santiago Leon860f2422006-04-25 11:19:59 -0500500 if (adapter->rx_buff_pool[i].active)
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400501 ibmveth_free_buffer_pool(adapter,
Santiago Leon860f2422006-04-25 11:19:59 -0500502 &adapter->rx_buff_pool[i]);
Robert Jennings1096d632008-07-24 04:34:52 +1000503
504 if (adapter->bounce_buffer != NULL) {
Stephen Rothwellc713e7c2008-07-28 02:14:24 +1000505 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
Robert Jennings1096d632008-07-24 04:34:52 +1000506 dma_unmap_single(&adapter->vdev->dev,
507 adapter->bounce_buffer_dma,
508 adapter->netdev->mtu + IBMVETH_BUFF_OH,
509 DMA_BIDIRECTIONAL);
510 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
511 }
512 kfree(adapter->bounce_buffer);
513 adapter->bounce_buffer = NULL;
514 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515}
516
Michael Ellermanbbedefc2006-10-03 12:24:23 -0500517static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
518 union ibmveth_buf_desc rxq_desc, u64 mac_address)
519{
520 int rc, try_again = 1;
521
Santiago Leonf148f612010-09-03 18:29:30 +0000522 /*
523 * After a kexec the adapter will still be open, so our attempt to
524 * open it will fail. So if we get a failure we free the adapter and
525 * try again, but only once.
526 */
Michael Ellermanbbedefc2006-10-03 12:24:23 -0500527retry:
528 rc = h_register_logical_lan(adapter->vdev->unit_address,
529 adapter->buffer_list_dma, rxq_desc.desc,
530 adapter->filter_list_dma, mac_address);
531
532 if (rc != H_SUCCESS && try_again) {
533 do {
534 rc = h_free_logical_lan(adapter->vdev->unit_address);
535 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
536
537 try_again = 0;
538 goto retry;
539 }
540
541 return rc;
542}
543
Anton Blanchardd746ca92014-03-05 14:51:37 +1100544static u64 ibmveth_encode_mac_addr(u8 *mac)
545{
546 int i;
547 u64 encoded = 0;
548
549 for (i = 0; i < ETH_ALEN; i++)
550 encoded = (encoded << 8) | mac[i];
551
552 return encoded;
553}
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555static int ibmveth_open(struct net_device *netdev)
556{
Wang Chen4cf16532008-11-12 23:38:14 -0800557 struct ibmveth_adapter *adapter = netdev_priv(netdev);
Anton Blanchardd746ca92014-03-05 14:51:37 +1100558 u64 mac_address;
Santiago Leonb6d35182005-10-26 10:47:01 -0600559 int rxq_entries = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 unsigned long lpar_rc;
561 int rc;
562 union ibmveth_buf_desc rxq_desc;
Santiago Leonb6d35182005-10-26 10:47:01 -0600563 int i;
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700564 struct device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
Santiago Leonc43ced12010-09-03 18:29:14 +0000566 netdev_dbg(netdev, "open starting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700568 napi_enable(&adapter->napi);
569
Santiago Leon517e80e2010-09-03 18:29:25 +0000570 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
Santiago Leonb6d35182005-10-26 10:47:01 -0600571 rxq_entries += adapter->rx_buff_pool[i].size;
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400572
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
574 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400575
Santiago Leonf148f612010-09-03 18:29:30 +0000576 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
Santiago Leon21c2dec2010-09-03 18:29:19 +0000577 netdev_err(netdev, "unable to allocate filter or buffer list "
578 "pages\n");
Denis Kirjanov88426f22010-10-20 04:21:13 +0000579 rc = -ENOMEM;
580 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 }
582
Santiago Leond90c92f2012-09-04 14:41:37 +0000583 dev = &adapter->vdev->dev;
584
Santiago Leonf148f612010-09-03 18:29:30 +0000585 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
586 rxq_entries;
Santiago Leond90c92f2012-09-04 14:41:37 +0000587 adapter->rx_queue.queue_addr =
Joe Perchesd0320f72013-03-14 13:07:21 +0000588 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
589 &adapter->rx_queue.queue_dma, GFP_KERNEL);
Santiago Leonf148f612010-09-03 18:29:30 +0000590 if (!adapter->rx_queue.queue_addr) {
Denis Kirjanov88426f22010-10-20 04:21:13 +0000591 rc = -ENOMEM;
592 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 }
594
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700595 adapter->buffer_list_dma = dma_map_single(dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700597 adapter->filter_list_dma = dma_map_single(dev,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700600 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
Santiago Leond90c92f2012-09-04 14:41:37 +0000601 (dma_mapping_error(dev, adapter->filter_list_dma))) {
Santiago Leon21c2dec2010-09-03 18:29:19 +0000602 netdev_err(netdev, "unable to map filter or buffer list "
603 "pages\n");
Denis Kirjanov88426f22010-10-20 04:21:13 +0000604 rc = -ENOMEM;
605 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
607
608 adapter->rx_queue.index = 0;
609 adapter->rx_queue.num_slots = rxq_entries;
610 adapter->rx_queue.toggle = 1;
611
Anton Blanchardd746ca92014-03-05 14:51:37 +1100612 mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
Santiago Leonf148f612010-09-03 18:29:30 +0000614 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
615 adapter->rx_queue.queue_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
617
Santiago Leonc43ced12010-09-03 18:29:14 +0000618 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
619 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
620 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Santiago Leon4347ef12006-10-03 12:24:34 -0500622 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
623
Michael Ellermanbbedefc2006-10-03 12:24:23 -0500624 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Santiago Leonf148f612010-09-03 18:29:30 +0000626 if (lpar_rc != H_SUCCESS) {
Santiago Leon21c2dec2010-09-03 18:29:19 +0000627 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
628 lpar_rc);
629 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
630 "desc:0x%llx MAC:0x%llx\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 adapter->buffer_list_dma,
632 adapter->filter_list_dma,
633 rxq_desc.desc,
634 mac_address);
Denis Kirjanov88426f22010-10-20 04:21:13 +0000635 rc = -ENONET;
636 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 }
638
Santiago Leonf148f612010-09-03 18:29:30 +0000639 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
640 if (!adapter->rx_buff_pool[i].active)
Santiago Leon860f2422006-04-25 11:19:59 -0500641 continue;
642 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
Santiago Leon21c2dec2010-09-03 18:29:19 +0000643 netdev_err(netdev, "unable to alloc pool\n");
Santiago Leon860f2422006-04-25 11:19:59 -0500644 adapter->rx_buff_pool[i].active = 0;
Denis Kirjanov88426f22010-10-20 04:21:13 +0000645 rc = -ENOMEM;
646 goto err_out;
Santiago Leon860f2422006-04-25 11:19:59 -0500647 }
648 }
649
Santiago Leonc43ced12010-09-03 18:29:14 +0000650 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
Santiago Leonf148f612010-09-03 18:29:30 +0000651 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
652 netdev);
653 if (rc != 0) {
Santiago Leon21c2dec2010-09-03 18:29:19 +0000654 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
655 netdev->irq, rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 do {
David S. Miller88c51002011-10-07 13:38:43 -0400657 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
658 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
Denis Kirjanov88426f22010-10-20 04:21:13 +0000660 goto err_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 }
662
Robert Jennings1096d632008-07-24 04:34:52 +1000663 adapter->bounce_buffer =
664 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
665 if (!adapter->bounce_buffer) {
Denis Kirjanov88426f22010-10-20 04:21:13 +0000666 rc = -ENOMEM;
Denis Kirjanove0e8ab52010-10-20 04:21:51 +0000667 goto err_out_free_irq;
Robert Jennings1096d632008-07-24 04:34:52 +1000668 }
669 adapter->bounce_buffer_dma =
670 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
671 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700672 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
Santiago Leon21c2dec2010-09-03 18:29:19 +0000673 netdev_err(netdev, "unable to map bounce buffer\n");
Denis Kirjanov88426f22010-10-20 04:21:13 +0000674 rc = -ENOMEM;
Denis Kirjanove0e8ab52010-10-20 04:21:51 +0000675 goto err_out_free_irq;
Robert Jennings1096d632008-07-24 04:34:52 +1000676 }
677
Santiago Leonc43ced12010-09-03 18:29:14 +0000678 netdev_dbg(netdev, "initial replenish cycle\n");
David Howells7d12e782006-10-05 14:55:46 +0100679 ibmveth_interrupt(netdev->irq, netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Santiago Leone2adbcb2005-10-26 10:47:08 -0600681 netif_start_queue(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
Santiago Leonc43ced12010-09-03 18:29:14 +0000683 netdev_dbg(netdev, "open complete\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
685 return 0;
Denis Kirjanov88426f22010-10-20 04:21:13 +0000686
Denis Kirjanove0e8ab52010-10-20 04:21:51 +0000687err_out_free_irq:
688 free_irq(netdev->irq, netdev);
Denis Kirjanov88426f22010-10-20 04:21:13 +0000689err_out:
690 ibmveth_cleanup(adapter);
691 napi_disable(&adapter->napi);
692 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693}
694
695static int ibmveth_close(struct net_device *netdev)
696{
Wang Chen4cf16532008-11-12 23:38:14 -0800697 struct ibmveth_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 long lpar_rc;
Jeff Garzikd7fbeba2006-05-24 01:31:14 -0400699
Santiago Leonc43ced12010-09-03 18:29:14 +0000700 netdev_dbg(netdev, "close starting\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Stephen Hemmingerbea33482007-10-03 16:41:36 -0700702 napi_disable(&adapter->napi);
703
Santiago Leon860f2422006-04-25 11:19:59 -0500704 if (!adapter->pool_config)
705 netif_stop_queue(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
Robert Jenningsee2e6112010-07-16 04:57:25 +0000707 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 do {
710 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
Segher Boessenkool706c8c92006-03-30 14:49:40 +0200711 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
Santiago Leonf148f612010-09-03 18:29:30 +0000713 if (lpar_rc != H_SUCCESS) {
Santiago Leon21c2dec2010-09-03 18:29:19 +0000714 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
715 "continuing with close\n", lpar_rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 }
717
Robert Jenningsee2e6112010-07-16 04:57:25 +0000718 free_irq(netdev->irq, netdev);
719
Anton Blanchardcbd52282014-08-22 11:36:52 +1000720 ibmveth_update_rx_no_buffer(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
722 ibmveth_cleanup(adapter);
723
Santiago Leonc43ced12010-09-03 18:29:14 +0000724 netdev_dbg(netdev, "close complete\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 return 0;
727}
728
Santiago Leonf148f612010-09-03 18:29:30 +0000729static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
730{
731 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
732 SUPPORTED_FIBRE);
733 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
734 ADVERTISED_FIBRE);
David Decotigny70739492011-04-27 18:32:40 +0000735 ethtool_cmd_speed_set(cmd, SPEED_1000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 cmd->duplex = DUPLEX_FULL;
737 cmd->port = PORT_FIBRE;
738 cmd->phy_address = 0;
739 cmd->transceiver = XCVR_INTERNAL;
740 cmd->autoneg = AUTONEG_ENABLE;
741 cmd->maxtxpkt = 0;
742 cmd->maxrxpkt = 1;
743 return 0;
744}
745
Santiago Leonf148f612010-09-03 18:29:30 +0000746static void netdev_get_drvinfo(struct net_device *dev,
747 struct ethtool_drvinfo *info)
748{
Jiri Pirko7826d432013-01-06 00:44:26 +0000749 strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
750 strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751}
752
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000753static netdev_features_t ibmveth_fix_features(struct net_device *dev,
754 netdev_features_t features)
Brian King5fc7e012007-08-17 09:16:31 -0500755{
Michał Mirosławb9367bf2011-04-19 02:14:25 +0000756 /*
757 * Since the ibmveth firmware interface does not have the
758 * concept of separate tx/rx checksum offload enable, if rx
759 * checksum is disabled we also have to disable tx checksum
760 * offload. Once we disable rx checksum offload, we are no
761 * longer allowed to send tx buffers that are not properly
762 * checksummed.
763 */
Brian King5fc7e012007-08-17 09:16:31 -0500764
Michał Mirosławb9367bf2011-04-19 02:14:25 +0000765 if (!(features & NETIF_F_RXCSUM))
766 features &= ~NETIF_F_ALL_CSUM;
767
768 return features;
Brian King5fc7e012007-08-17 09:16:31 -0500769}
770
Michał Mirosławb9367bf2011-04-19 02:14:25 +0000771static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
Brian King5fc7e012007-08-17 09:16:31 -0500772{
Wang Chen4cf16532008-11-12 23:38:14 -0800773 struct ibmveth_adapter *adapter = netdev_priv(dev);
Stephen Rothwellff5bfc32009-01-06 10:47:44 -0800774 unsigned long set_attr, clr_attr, ret_attr;
Santiago Leonab78df72010-09-03 18:28:52 +0000775 unsigned long set_attr6, clr_attr6;
David S. Miller8decf862011-09-22 03:23:13 -0400776 long ret, ret4, ret6;
Brian King5fc7e012007-08-17 09:16:31 -0500777 int rc1 = 0, rc2 = 0;
778 int restart = 0;
779
780 if (netif_running(dev)) {
781 restart = 1;
782 adapter->pool_config = 1;
783 ibmveth_close(dev);
784 adapter->pool_config = 0;
785 }
786
Brian King79ef4a42007-08-17 09:16:56 -0500787 set_attr = 0;
788 clr_attr = 0;
David S. Miller8decf862011-09-22 03:23:13 -0400789 set_attr6 = 0;
790 clr_attr6 = 0;
Brian King5fc7e012007-08-17 09:16:31 -0500791
Santiago Leonab78df72010-09-03 18:28:52 +0000792 if (data) {
Brian King79ef4a42007-08-17 09:16:56 -0500793 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
Santiago Leonab78df72010-09-03 18:28:52 +0000794 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
795 } else {
Brian King79ef4a42007-08-17 09:16:56 -0500796 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
Santiago Leonab78df72010-09-03 18:28:52 +0000797 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
798 }
Brian King5fc7e012007-08-17 09:16:31 -0500799
Brian King79ef4a42007-08-17 09:16:56 -0500800 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
Brian King5fc7e012007-08-17 09:16:31 -0500801
Brian King79ef4a42007-08-17 09:16:56 -0500802 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
803 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
804 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
David S. Miller8decf862011-09-22 03:23:13 -0400805 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
Brian King79ef4a42007-08-17 09:16:56 -0500806 set_attr, &ret_attr);
Brian King5fc7e012007-08-17 09:16:31 -0500807
David S. Miller8decf862011-09-22 03:23:13 -0400808 if (ret4 != H_SUCCESS) {
Santiago Leon21c2dec2010-09-03 18:29:19 +0000809 netdev_err(dev, "unable to change IPv4 checksum "
810 "offload settings. %d rc=%ld\n",
David S. Miller8decf862011-09-22 03:23:13 -0400811 data, ret4);
Brian King5fc7e012007-08-17 09:16:31 -0500812
David S. Miller8decf862011-09-22 03:23:13 -0400813 h_illan_attributes(adapter->vdev->unit_address,
814 set_attr, clr_attr, &ret_attr);
815
816 if (data == 1)
817 dev->features &= ~NETIF_F_IP_CSUM;
818
Santiago Leonf148f612010-09-03 18:29:30 +0000819 } else {
Santiago Leonab78df72010-09-03 18:28:52 +0000820 adapter->fw_ipv4_csum_support = data;
Santiago Leonf148f612010-09-03 18:29:30 +0000821 }
Santiago Leonab78df72010-09-03 18:28:52 +0000822
823 ret6 = h_illan_attributes(adapter->vdev->unit_address,
824 clr_attr6, set_attr6, &ret_attr);
825
826 if (ret6 != H_SUCCESS) {
Santiago Leon21c2dec2010-09-03 18:29:19 +0000827 netdev_err(dev, "unable to change IPv6 checksum "
828 "offload settings. %d rc=%ld\n",
David S. Miller8decf862011-09-22 03:23:13 -0400829 data, ret6);
Santiago Leonab78df72010-09-03 18:28:52 +0000830
David S. Miller8decf862011-09-22 03:23:13 -0400831 h_illan_attributes(adapter->vdev->unit_address,
832 set_attr6, clr_attr6, &ret_attr);
833
834 if (data == 1)
835 dev->features &= ~NETIF_F_IPV6_CSUM;
836
Santiago Leonab78df72010-09-03 18:28:52 +0000837 } else
838 adapter->fw_ipv6_csum_support = data;
839
David S. Miller8decf862011-09-22 03:23:13 -0400840 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
Michał Mirosławb9367bf2011-04-19 02:14:25 +0000841 adapter->rx_csum = data;
Santiago Leonab78df72010-09-03 18:28:52 +0000842 else
843 rc1 = -EIO;
Brian King5fc7e012007-08-17 09:16:31 -0500844 } else {
845 rc1 = -EIO;
Santiago Leon21c2dec2010-09-03 18:29:19 +0000846 netdev_err(dev, "unable to change checksum offload settings."
847 " %d rc=%ld ret_attr=%lx\n", data, ret,
848 ret_attr);
Brian King5fc7e012007-08-17 09:16:31 -0500849 }
850
851 if (restart)
852 rc2 = ibmveth_open(dev);
853
854 return rc1 ? rc1 : rc2;
855}
856
Thomas Falcon07e6a972015-07-14 10:51:51 -0500857static int ibmveth_set_tso(struct net_device *dev, u32 data)
858{
859 struct ibmveth_adapter *adapter = netdev_priv(dev);
860 unsigned long set_attr, clr_attr, ret_attr;
861 long ret1, ret2;
862 int rc1 = 0, rc2 = 0;
863 int restart = 0;
864
865 if (netif_running(dev)) {
866 restart = 1;
867 adapter->pool_config = 1;
868 ibmveth_close(dev);
869 adapter->pool_config = 0;
870 }
871
872 set_attr = 0;
873 clr_attr = 0;
874
875 if (data)
876 set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
877 else
878 clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
879
880 ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
881
882 if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
883 !old_large_send) {
884 ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
885 set_attr, &ret_attr);
886
887 if (ret2 != H_SUCCESS) {
888 netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
889 data, ret2);
890
891 h_illan_attributes(adapter->vdev->unit_address,
892 set_attr, clr_attr, &ret_attr);
893
894 if (data == 1)
895 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
896 rc1 = -EIO;
897
898 } else {
899 adapter->fw_large_send_support = data;
900 adapter->large_send = data;
901 }
902 } else {
903 /* Older firmware version of large send offload does not
904 * support tcp6/ipv6
905 */
906 if (data == 1) {
907 dev->features &= ~NETIF_F_TSO6;
908 netdev_info(dev, "TSO feature requires all partitions to have updated driver");
909 }
910 adapter->large_send = data;
911 }
912
913 if (restart)
914 rc2 = ibmveth_open(dev);
915
916 return rc1 ? rc1 : rc2;
917}
918
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000919static int ibmveth_set_features(struct net_device *dev,
920 netdev_features_t features)
Brian King5fc7e012007-08-17 09:16:31 -0500921{
Wang Chen4cf16532008-11-12 23:38:14 -0800922 struct ibmveth_adapter *adapter = netdev_priv(dev);
Michał Mirosławb9367bf2011-04-19 02:14:25 +0000923 int rx_csum = !!(features & NETIF_F_RXCSUM);
Thomas Falcon07e6a972015-07-14 10:51:51 -0500924 int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
925 int rc1 = 0, rc2 = 0;
Thomas Falcon8641dd82015-04-29 16:25:45 -0500926
Thomas Falcon07e6a972015-07-14 10:51:51 -0500927 if (rx_csum != adapter->rx_csum) {
928 rc1 = ibmveth_set_csum_offload(dev, rx_csum);
929 if (rc1 && !adapter->rx_csum)
930 dev->features =
931 features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
932 }
Brian King5fc7e012007-08-17 09:16:31 -0500933
Thomas Falcon07e6a972015-07-14 10:51:51 -0500934 if (large_send != adapter->large_send) {
935 rc2 = ibmveth_set_tso(dev, large_send);
936 if (rc2 && !adapter->large_send)
937 dev->features =
938 features & ~(NETIF_F_TSO | NETIF_F_TSO6);
939 }
Brian King5fc7e012007-08-17 09:16:31 -0500940
Thomas Falcon07e6a972015-07-14 10:51:51 -0500941 return rc1 ? rc1 : rc2;
Brian King5fc7e012007-08-17 09:16:31 -0500942}
943
Brian Kingddbb4de2007-08-17 09:16:43 -0500944static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
945{
946 int i;
947
948 if (stringset != ETH_SS_STATS)
949 return;
950
951 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
952 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
953}
954
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700955static int ibmveth_get_sset_count(struct net_device *dev, int sset)
Brian Kingddbb4de2007-08-17 09:16:43 -0500956{
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700957 switch (sset) {
958 case ETH_SS_STATS:
959 return ARRAY_SIZE(ibmveth_stats);
960 default:
961 return -EOPNOTSUPP;
962 }
Brian Kingddbb4de2007-08-17 09:16:43 -0500963}
964
965static void ibmveth_get_ethtool_stats(struct net_device *dev,
966 struct ethtool_stats *stats, u64 *data)
967{
968 int i;
Wang Chen4cf16532008-11-12 23:38:14 -0800969 struct ibmveth_adapter *adapter = netdev_priv(dev);
Brian Kingddbb4de2007-08-17 09:16:43 -0500970
971 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
972 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
973}
974
Jeff Garzik7282d492006-09-13 14:30:00 -0400975static const struct ethtool_ops netdev_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 .get_drvinfo = netdev_get_drvinfo,
977 .get_settings = netdev_get_settings,
Ben Hutchingsed4ba4b2010-12-09 12:10:25 +0000978 .get_link = ethtool_op_get_link,
Brian Kingddbb4de2007-08-17 09:16:43 -0500979 .get_strings = ibmveth_get_strings,
Jeff Garzikb9f2c042007-10-03 18:07:32 -0700980 .get_sset_count = ibmveth_get_sset_count,
Brian Kingddbb4de2007-08-17 09:16:43 -0500981 .get_ethtool_stats = ibmveth_get_ethtool_stats,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982};
983
984static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
985{
986 return -EOPNOTSUPP;
987}
988
989#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
990
Santiago Leon6e8ab302010-09-03 18:28:36 +0000991static int ibmveth_send(struct ibmveth_adapter *adapter,
Thomas Falcon07e6a972015-07-14 10:51:51 -0500992 union ibmveth_buf_desc *descs, unsigned long mss)
Santiago Leon6e8ab302010-09-03 18:28:36 +0000993{
994 unsigned long correlator;
995 unsigned int retry_count;
996 unsigned long ret;
997
998 /*
999 * The retry count sets a maximum for the number of broadcast and
1000 * multicast destinations within the system.
1001 */
1002 retry_count = 1024;
1003 correlator = 0;
1004 do {
1005 ret = h_send_logical_lan(adapter->vdev->unit_address,
1006 descs[0].desc, descs[1].desc,
1007 descs[2].desc, descs[3].desc,
1008 descs[4].desc, descs[5].desc,
Thomas Falcon07e6a972015-07-14 10:51:51 -05001009 correlator, &correlator, mss,
1010 adapter->fw_large_send_support);
Santiago Leon6e8ab302010-09-03 18:28:36 +00001011 } while ((ret == H_BUSY) && (retry_count--));
1012
1013 if (ret != H_SUCCESS && ret != H_DROPPED) {
Santiago Leon21c2dec2010-09-03 18:29:19 +00001014 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1015 "with rc=%ld\n", ret);
Santiago Leon6e8ab302010-09-03 18:28:36 +00001016 return 1;
1017 }
1018
1019 return 0;
1020}
1021
Stephen Hemminger613573252009-08-31 19:50:58 +00001022static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1023 struct net_device *netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024{
Wang Chen4cf16532008-11-12 23:38:14 -08001025 struct ibmveth_adapter *adapter = netdev_priv(netdev);
Santiago Leon6e8ab302010-09-03 18:28:36 +00001026 unsigned int desc_flags;
1027 union ibmveth_buf_desc descs[6];
1028 int last, i;
1029 int force_bounce = 0;
David S. Miller8decf862011-09-22 03:23:13 -04001030 dma_addr_t dma_addr;
Thomas Falcon07e6a972015-07-14 10:51:51 -05001031 unsigned long mss = 0;
Santiago Leon60296d92005-10-26 10:47:16 -06001032
Santiago Leon6e8ab302010-09-03 18:28:36 +00001033 /*
1034 * veth handles a maximum of 6 segments including the header, so
1035 * we have to linearize the skb if there are more than this.
1036 */
1037 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
1038 netdev->stats.tx_dropped++;
Brian Kingf4ff2872007-09-15 13:36:07 -07001039 goto out;
1040 }
1041
Santiago Leon6e8ab302010-09-03 18:28:36 +00001042 /* veth can't checksum offload UDP */
1043 if (skb->ip_summed == CHECKSUM_PARTIAL &&
Santiago Leonab78df72010-09-03 18:28:52 +00001044 ((skb->protocol == htons(ETH_P_IP) &&
1045 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1046 (skb->protocol == htons(ETH_P_IPV6) &&
1047 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1048 skb_checksum_help(skb)) {
1049
Santiago Leon21c2dec2010-09-03 18:29:19 +00001050 netdev_err(netdev, "tx: failed to checksum packet\n");
Santiago Leon6e8ab302010-09-03 18:28:36 +00001051 netdev->stats.tx_dropped++;
1052 goto out;
1053 }
Brian Kingf4ff2872007-09-15 13:36:07 -07001054
Santiago Leon6e8ab302010-09-03 18:28:36 +00001055 desc_flags = IBMVETH_BUF_VALID;
1056
Thomas Falcon07e6a972015-07-14 10:51:51 -05001057 if (skb_is_gso(skb) && adapter->fw_large_send_support)
1058 desc_flags |= IBMVETH_BUF_LRG_SND;
1059
Santiago Leon6e8ab302010-09-03 18:28:36 +00001060 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1061 unsigned char *buf = skb_transport_header(skb) +
1062 skb->csum_offset;
1063
1064 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
Brian Kingf4ff2872007-09-15 13:36:07 -07001065
1066 /* Need to zero out the checksum */
1067 buf[0] = 0;
1068 buf[1] = 0;
1069 }
1070
Santiago Leon6e8ab302010-09-03 18:28:36 +00001071retry_bounce:
1072 memset(descs, 0, sizeof(descs));
Santiago Leonc08cc3c2010-09-03 18:28:20 +00001073
Santiago Leon6e8ab302010-09-03 18:28:36 +00001074 /*
1075 * If a linear packet is below the rx threshold then
1076 * copy it into the static bounce buffer. This avoids the
1077 * cost of a TCE insert and remove.
1078 */
1079 if (force_bounce || (!skb_is_nonlinear(skb) &&
1080 (skb->len < tx_copybreak))) {
Robert Jennings1096d632008-07-24 04:34:52 +10001081 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1082 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
Santiago Leon6e8ab302010-09-03 18:28:36 +00001084 descs[0].fields.flags_len = desc_flags | skb->len;
1085 descs[0].fields.address = adapter->bounce_buffer_dma;
Jeff Garzikd7fbeba2006-05-24 01:31:14 -04001086
Thomas Falcon07e6a972015-07-14 10:51:51 -05001087 if (ibmveth_send(adapter, descs, 0)) {
Santiago Leon6e8ab302010-09-03 18:28:36 +00001088 adapter->tx_send_failed++;
1089 netdev->stats.tx_dropped++;
1090 } else {
1091 netdev->stats.tx_packets++;
1092 netdev->stats.tx_bytes += skb->len;
1093 }
1094
1095 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 }
1097
Santiago Leon6e8ab302010-09-03 18:28:36 +00001098 /* Map the header */
David S. Miller8decf862011-09-22 03:23:13 -04001099 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1100 skb_headlen(skb), DMA_TO_DEVICE);
1101 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
Santiago Leon6e8ab302010-09-03 18:28:36 +00001102 goto map_failed;
1103
1104 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
David S. Miller8decf862011-09-22 03:23:13 -04001105 descs[0].fields.address = dma_addr;
Santiago Leon6e8ab302010-09-03 18:28:36 +00001106
1107 /* Map the frags */
1108 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Eric Dumazet9e903e02011-10-18 21:00:24 +00001109 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
Santiago Leon6e8ab302010-09-03 18:28:36 +00001110
Ian Campbell8838a532011-08-31 00:46:53 +00001111 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +00001112 skb_frag_size(frag), DMA_TO_DEVICE);
Santiago Leon6e8ab302010-09-03 18:28:36 +00001113
1114 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1115 goto map_failed_frags;
1116
Eric Dumazet9e903e02011-10-18 21:00:24 +00001117 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
Santiago Leon6e8ab302010-09-03 18:28:36 +00001118 descs[i+1].fields.address = dma_addr;
1119 }
1120
Thomas Falcon07e6a972015-07-14 10:51:51 -05001121 if (skb_is_gso(skb)) {
1122 if (adapter->fw_large_send_support) {
1123 mss = (unsigned long)skb_shinfo(skb)->gso_size;
1124 adapter->tx_large_packets++;
1125 } else if (!skb_is_gso_v6(skb)) {
1126 /* Put -1 in the IP checksum to tell phyp it
1127 * is a largesend packet. Put the mss in
1128 * the TCP checksum.
1129 */
1130 ip_hdr(skb)->check = 0xffff;
1131 tcp_hdr(skb)->check =
1132 cpu_to_be16(skb_shinfo(skb)->gso_size);
1133 adapter->tx_large_packets++;
1134 }
Thomas Falcon8641dd82015-04-29 16:25:45 -05001135 }
1136
Thomas Falcon07e6a972015-07-14 10:51:51 -05001137 if (ibmveth_send(adapter, descs, mss)) {
Santiago Leon6e8ab302010-09-03 18:28:36 +00001138 adapter->tx_send_failed++;
1139 netdev->stats.tx_dropped++;
1140 } else {
1141 netdev->stats.tx_packets++;
1142 netdev->stats.tx_bytes += skb->len;
1143 }
1144
David S. Miller8decf862011-09-22 03:23:13 -04001145 dma_unmap_single(&adapter->vdev->dev,
1146 descs[0].fields.address,
1147 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1148 DMA_TO_DEVICE);
1149
1150 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
Santiago Leon6e8ab302010-09-03 18:28:36 +00001151 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1152 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1153 DMA_TO_DEVICE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Santiago Leone8cb7eb2010-09-03 18:28:15 +00001155out:
Eric W. Biederman26faa9d2014-03-15 17:29:34 -07001156 dev_consume_skb_any(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +00001157 return NETDEV_TX_OK;
Santiago Leon6e8ab302010-09-03 18:28:36 +00001158
1159map_failed_frags:
1160 last = i+1;
1161 for (i = 0; i < last; i++)
1162 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1163 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1164 DMA_TO_DEVICE);
1165
1166map_failed:
1167 if (!firmware_has_feature(FW_FEATURE_CMO))
Santiago Leon21c2dec2010-09-03 18:29:19 +00001168 netdev_err(netdev, "tx: unable to map xmit buffer\n");
Santiago Leon6e8ab302010-09-03 18:28:36 +00001169 adapter->tx_map_failed++;
1170 skb_linearize(skb);
1171 force_bounce = 1;
1172 goto retry_bounce;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173}
1174
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001175static int ibmveth_poll(struct napi_struct *napi, int budget)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176{
Santiago Leonf148f612010-09-03 18:29:30 +00001177 struct ibmveth_adapter *adapter =
1178 container_of(napi, struct ibmveth_adapter, napi);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001179 struct net_device *netdev = adapter->netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 int frames_processed = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 unsigned long lpar_rc;
Thomas Falcon9c7e8bc2015-04-29 16:25:47 -05001182 struct iphdr *iph;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
Santiago Leonf148f612010-09-03 18:29:30 +00001184restart_poll:
Eric W. Biedermancb013ea2014-03-14 18:03:50 -07001185 while (frames_processed < budget) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001186 if (!ibmveth_rxq_pending_buffer(adapter))
1187 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
Anton Blanchardf89e49e2010-09-06 18:21:41 -07001189 smp_rmb();
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001190 if (!ibmveth_rxq_buffer_valid(adapter)) {
1191 wmb(); /* suggested by larson1 */
1192 adapter->rx_invalid_buffer++;
Santiago Leonc43ced12010-09-03 18:29:14 +00001193 netdev_dbg(netdev, "recycling invalid buffer\n");
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001194 ibmveth_rxq_recycle_buffer(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 } else {
Santiago Leon8d86c612010-09-03 18:28:25 +00001196 struct sk_buff *skb, *new_skb;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001197 int length = ibmveth_rxq_frame_length(adapter);
1198 int offset = ibmveth_rxq_frame_offset(adapter);
Brian Kingf4ff2872007-09-15 13:36:07 -07001199 int csum_good = ibmveth_rxq_csum_good(adapter);
1200
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001201 skb = ibmveth_rxq_get_buffer(adapter);
1202
Santiago Leon8d86c612010-09-03 18:28:25 +00001203 new_skb = NULL;
1204 if (length < rx_copybreak)
1205 new_skb = netdev_alloc_skb(netdev, length);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001206
Santiago Leon8d86c612010-09-03 18:28:25 +00001207 if (new_skb) {
1208 skb_copy_to_linear_data(new_skb,
1209 skb->data + offset,
1210 length);
Santiago Leon0c26b672010-09-03 18:28:41 +00001211 if (rx_flush)
1212 ibmveth_flush_buffer(skb->data,
1213 length + offset);
David S. Miller8decf862011-09-22 03:23:13 -04001214 if (!ibmveth_rxq_recycle_buffer(adapter))
1215 kfree_skb(skb);
Santiago Leon8d86c612010-09-03 18:28:25 +00001216 skb = new_skb;
Santiago Leon8d86c612010-09-03 18:28:25 +00001217 } else {
1218 ibmveth_rxq_harvest_buffer(adapter);
1219 skb_reserve(skb, offset);
1220 }
1221
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001222 skb_put(skb, length);
1223 skb->protocol = eth_type_trans(skb, netdev);
1224
Thomas Falcon9c7e8bc2015-04-29 16:25:47 -05001225 if (csum_good) {
Santiago Leon8d86c612010-09-03 18:28:25 +00001226 skb->ip_summed = CHECKSUM_UNNECESSARY;
Thomas Falcon9c7e8bc2015-04-29 16:25:47 -05001227 if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
1228 iph = (struct iphdr *)skb->data;
1229
1230 /* If the IP checksum is not offloaded and if the packet
1231 * is large send, the checksum must be rebuilt.
1232 */
1233 if (iph->check == 0xffff) {
1234 iph->check = 0;
1235 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1236 adapter->rx_large_packets++;
1237 }
1238 }
1239 }
Santiago Leon8d86c612010-09-03 18:28:25 +00001240
Thomas Falcon92ec8272015-04-29 16:25:46 -05001241 napi_gro_receive(napi, skb); /* send it up */
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001242
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001243 netdev->stats.rx_packets++;
1244 netdev->stats.rx_bytes += length;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001245 frames_processed++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 }
Eric W. Biedermancb013ea2014-03-14 18:03:50 -07001247 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Santiago Leone2adbcb2005-10-26 10:47:08 -06001249 ibmveth_replenish_task(adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001251 if (frames_processed < budget) {
Yongbae Park4736edc2015-03-10 11:15:39 +09001252 napi_complete(napi);
1253
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001254 /* We think we are done - reenable interrupts,
1255 * then check once more to make sure we are done.
1256 */
1257 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1258 VIO_IRQ_ENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
Santiago Leon64859112010-09-03 18:29:41 +00001260 BUG_ON(lpar_rc != H_SUCCESS);
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001261
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001262 if (ibmveth_rxq_pending_buffer(adapter) &&
Ben Hutchings288379f2009-01-19 16:43:59 -08001263 napi_reschedule(napi)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001264 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1265 VIO_IRQ_DISABLE);
1266 goto restart_poll;
1267 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 }
1269
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001270 return frames_processed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271}
1272
David Howells7d12e782006-10-05 14:55:46 +01001273static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
Jeff Garzikd7fbeba2006-05-24 01:31:14 -04001274{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 struct net_device *netdev = dev_instance;
Wang Chen4cf16532008-11-12 23:38:14 -08001276 struct ibmveth_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 unsigned long lpar_rc;
1278
Ben Hutchings288379f2009-01-19 16:43:59 -08001279 if (napi_schedule_prep(&adapter->napi)) {
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001280 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1281 VIO_IRQ_DISABLE);
Santiago Leon64859112010-09-03 18:29:41 +00001282 BUG_ON(lpar_rc != H_SUCCESS);
Ben Hutchings288379f2009-01-19 16:43:59 -08001283 __napi_schedule(&adapter->napi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 }
1285 return IRQ_HANDLED;
1286}
1287
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288static void ibmveth_set_multicast_list(struct net_device *netdev)
1289{
Wang Chen4cf16532008-11-12 23:38:14 -08001290 struct ibmveth_adapter *adapter = netdev_priv(netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 unsigned long lpar_rc;
1292
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001293 if ((netdev->flags & IFF_PROMISC) ||
1294 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1296 IbmVethMcastEnableRecv |
1297 IbmVethMcastDisableFiltering,
1298 0);
Santiago Leonf148f612010-09-03 18:29:30 +00001299 if (lpar_rc != H_SUCCESS) {
Santiago Leon21c2dec2010-09-03 18:29:19 +00001300 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1301 "entering promisc mode\n", lpar_rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 }
1303 } else {
Jiri Pirko22bedad32010-04-01 21:22:57 +00001304 struct netdev_hw_addr *ha;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 /* clear the filter table & disable filtering */
1306 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1307 IbmVethMcastEnableRecv |
1308 IbmVethMcastDisableFiltering |
1309 IbmVethMcastClearFilterTable,
1310 0);
Santiago Leonf148f612010-09-03 18:29:30 +00001311 if (lpar_rc != H_SUCCESS) {
Santiago Leon21c2dec2010-09-03 18:29:19 +00001312 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1313 "attempting to clear filter table\n",
1314 lpar_rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 }
1316 /* add the addresses to the filter table */
Jiri Pirko22bedad32010-04-01 21:22:57 +00001317 netdev_for_each_mc_addr(ha, netdev) {
Santiago Leonf148f612010-09-03 18:29:30 +00001318 /* add the multicast address to the filter table */
Anton Blanchardd746ca92014-03-05 14:51:37 +11001319 u64 mcast_addr;
1320 mcast_addr = ibmveth_encode_mac_addr(ha->addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1322 IbmVethMcastAddFilter,
1323 mcast_addr);
Santiago Leonf148f612010-09-03 18:29:30 +00001324 if (lpar_rc != H_SUCCESS) {
Santiago Leon21c2dec2010-09-03 18:29:19 +00001325 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1326 "when adding an entry to the filter "
1327 "table\n", lpar_rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 }
1329 }
Jeff Garzikd7fbeba2006-05-24 01:31:14 -04001330
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 /* re-enable filtering */
1332 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1333 IbmVethMcastEnableFiltering,
1334 0);
Santiago Leonf148f612010-09-03 18:29:30 +00001335 if (lpar_rc != H_SUCCESS) {
Santiago Leon21c2dec2010-09-03 18:29:19 +00001336 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1337 "enabling filtering\n", lpar_rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 }
1339 }
1340}
1341
1342static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1343{
Wang Chen4cf16532008-11-12 23:38:14 -08001344 struct ibmveth_adapter *adapter = netdev_priv(dev);
Robert Jennings1096d632008-07-24 04:34:52 +10001345 struct vio_dev *viodev = adapter->vdev;
Santiago Leon860f2422006-04-25 11:19:59 -05001346 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
Robert Jennings0645bab2010-08-17 09:15:45 +00001347 int i, rc;
1348 int need_restart = 0;
Santiago Leonb6d35182005-10-26 10:47:01 -06001349
Santiago Leon517e80e2010-09-03 18:29:25 +00001350 if (new_mtu < IBMVETH_MIN_MTU)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 return -EINVAL;
Santiago Leonb6d35182005-10-26 10:47:01 -06001352
Santiago Leon517e80e2010-09-03 18:29:25 +00001353 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
David Gibson4fce1482015-04-23 14:43:05 +10001354 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
Brian Kingce6eea52007-06-08 14:05:17 -05001355 break;
1356
Santiago Leon517e80e2010-09-03 18:29:25 +00001357 if (i == IBMVETH_NUM_BUFF_POOLS)
Brian Kingce6eea52007-06-08 14:05:17 -05001358 return -EINVAL;
1359
Santiago Leonea866e62008-07-24 04:34:23 +10001360 /* Deactivate all the buffer pools so that the next loop can activate
1361 only the buffer pools necessary to hold the new MTU */
Robert Jennings0645bab2010-08-17 09:15:45 +00001362 if (netif_running(adapter->netdev)) {
1363 need_restart = 1;
1364 adapter->pool_config = 1;
1365 ibmveth_close(adapter->netdev);
1366 adapter->pool_config = 0;
1367 }
Brian Kingce6eea52007-06-08 14:05:17 -05001368
Santiago Leonea866e62008-07-24 04:34:23 +10001369 /* Look for an active buffer pool that can hold the new MTU */
Santiago Leonf148f612010-09-03 18:29:30 +00001370 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
Santiago Leonea866e62008-07-24 04:34:23 +10001371 adapter->rx_buff_pool[i].active = 1;
1372
David Gibson4fce1482015-04-23 14:43:05 +10001373 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
Robert Jennings1096d632008-07-24 04:34:52 +10001374 dev->mtu = new_mtu;
1375 vio_cmo_set_dev_desired(viodev,
1376 ibmveth_get_desired_dma
1377 (viodev));
Robert Jennings0645bab2010-08-17 09:15:45 +00001378 if (need_restart) {
1379 return ibmveth_open(adapter->netdev);
1380 }
Santiago Leon860f2422006-04-25 11:19:59 -05001381 return 0;
Santiago Leonb6d35182005-10-26 10:47:01 -06001382 }
Santiago Leonb6d35182005-10-26 10:47:01 -06001383 }
Robert Jennings0645bab2010-08-17 09:15:45 +00001384
1385 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1386 return rc;
1387
Santiago Leon860f2422006-04-25 11:19:59 -05001388 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389}
1390
Santiago Leon6b422372006-10-03 12:24:28 -05001391#ifdef CONFIG_NET_POLL_CONTROLLER
1392static void ibmveth_poll_controller(struct net_device *dev)
1393{
Wang Chen4cf16532008-11-12 23:38:14 -08001394 ibmveth_replenish_task(netdev_priv(dev));
Andrew Morton5f771132006-10-10 14:33:30 -07001395 ibmveth_interrupt(dev->irq, dev);
Santiago Leon6b422372006-10-03 12:24:28 -05001396}
1397#endif
1398
Robert Jennings1096d632008-07-24 04:34:52 +10001399/**
1400 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1401 *
1402 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1403 *
1404 * Return value:
1405 * Number of bytes of IO data the driver will need to perform well.
1406 */
1407static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1408{
1409 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1410 struct ibmveth_adapter *adapter;
Alistair Poppled0847752013-12-09 18:17:03 +11001411 struct iommu_table *tbl;
Robert Jennings1096d632008-07-24 04:34:52 +10001412 unsigned long ret;
1413 int i;
1414 int rxqentries = 1;
1415
Alistair Poppled0847752013-12-09 18:17:03 +11001416 tbl = get_iommu_table_base(&vdev->dev);
1417
Robert Jennings1096d632008-07-24 04:34:52 +10001418 /* netdev inits at probe time along with the structures we need below*/
1419 if (netdev == NULL)
Alistair Poppled0847752013-12-09 18:17:03 +11001420 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
Robert Jennings1096d632008-07-24 04:34:52 +10001421
1422 adapter = netdev_priv(netdev);
1423
1424 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
Alistair Poppled0847752013-12-09 18:17:03 +11001425 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
Robert Jennings1096d632008-07-24 04:34:52 +10001426
Santiago Leon517e80e2010-09-03 18:29:25 +00001427 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
Robert Jennings1096d632008-07-24 04:34:52 +10001428 /* add the size of the active receive buffers */
1429 if (adapter->rx_buff_pool[i].active)
1430 ret +=
1431 adapter->rx_buff_pool[i].size *
1432 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
Alistair Poppled0847752013-12-09 18:17:03 +11001433 buff_size, tbl);
Robert Jennings1096d632008-07-24 04:34:52 +10001434 rxqentries += adapter->rx_buff_pool[i].size;
1435 }
1436 /* add the size of the receive queue entries */
Alistair Poppled0847752013-12-09 18:17:03 +11001437 ret += IOMMU_PAGE_ALIGN(
1438 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
Robert Jennings1096d632008-07-24 04:34:52 +10001439
1440 return ret;
1441}
1442
Thomas Falconc77c7612015-03-02 11:56:12 -06001443static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1444{
1445 struct ibmveth_adapter *adapter = netdev_priv(dev);
1446 struct sockaddr *addr = p;
1447 u64 mac_address;
1448 int rc;
1449
1450 if (!is_valid_ether_addr(addr->sa_data))
1451 return -EADDRNOTAVAIL;
1452
1453 mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1454 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1455 if (rc) {
1456 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1457 return rc;
1458 }
1459
1460 ether_addr_copy(dev->dev_addr, addr->sa_data);
1461
1462 return 0;
1463}
1464
Alexander Beregalove186d172009-04-15 12:52:39 +00001465static const struct net_device_ops ibmveth_netdev_ops = {
1466 .ndo_open = ibmveth_open,
1467 .ndo_stop = ibmveth_close,
1468 .ndo_start_xmit = ibmveth_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001469 .ndo_set_rx_mode = ibmveth_set_multicast_list,
Alexander Beregalove186d172009-04-15 12:52:39 +00001470 .ndo_do_ioctl = ibmveth_ioctl,
1471 .ndo_change_mtu = ibmveth_change_mtu,
Michał Mirosławb9367bf2011-04-19 02:14:25 +00001472 .ndo_fix_features = ibmveth_fix_features,
1473 .ndo_set_features = ibmveth_set_features,
Alexander Beregalove186d172009-04-15 12:52:39 +00001474 .ndo_validate_addr = eth_validate_addr,
Thomas Falconc77c7612015-03-02 11:56:12 -06001475 .ndo_set_mac_address = ibmveth_set_mac_addr,
Alexander Beregalove186d172009-04-15 12:52:39 +00001476#ifdef CONFIG_NET_POLL_CONTROLLER
1477 .ndo_poll_controller = ibmveth_poll_controller,
1478#endif
1479};
1480
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00001481static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482{
Benjamin Herrenschmidt13f85202013-05-03 17:19:01 +00001483 int rc, i, mac_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 struct net_device *netdev;
Mariusz Kozlowski9dc83af2007-08-06 23:44:03 +02001485 struct ibmveth_adapter *adapter;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 unsigned char *mac_addr_p;
1487 unsigned int *mcastFilterSize_p;
Thomas Falcon07e6a972015-07-14 10:51:51 -05001488 long ret;
1489 unsigned long ret_attr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
Santiago Leonc43ced12010-09-03 18:29:14 +00001491 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1492 dev->unit_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493
Santiago Leonf148f612010-09-03 18:29:30 +00001494 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
Benjamin Herrenschmidt13f85202013-05-03 17:19:01 +00001495 &mac_len);
Santiago Leonf148f612010-09-03 18:29:30 +00001496 if (!mac_addr_p) {
Santiago Leon21c2dec2010-09-03 18:29:19 +00001497 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
Santiago Leonbe35ae92010-09-03 18:29:36 +00001498 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 }
Benjamin Herrenschmidt13f85202013-05-03 17:19:01 +00001500 /* Workaround for old/broken pHyp */
1501 if (mac_len == 8)
1502 mac_addr_p += 2;
1503 else if (mac_len != 6) {
1504 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1505 mac_len);
1506 return -EINVAL;
1507 }
Jeff Garzikd7fbeba2006-05-24 01:31:14 -04001508
Santiago Leonf148f612010-09-03 18:29:30 +00001509 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
Michael Ellerman493a6842007-04-17 13:12:55 +10001510 VETH_MCAST_FILTER_SIZE, NULL);
Santiago Leonf148f612010-09-03 18:29:30 +00001511 if (!mcastFilterSize_p) {
Santiago Leon21c2dec2010-09-03 18:29:19 +00001512 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1513 "attribute\n");
Santiago Leonbe35ae92010-09-03 18:29:36 +00001514 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 }
Jeff Garzikd7fbeba2006-05-24 01:31:14 -04001516
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1518
Santiago Leonf148f612010-09-03 18:29:30 +00001519 if (!netdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 return -ENOMEM;
1521
Wang Chen4cf16532008-11-12 23:38:14 -08001522 adapter = netdev_priv(netdev);
Greg Kroah-Hartmanc7ae0112009-05-04 21:33:19 -07001523 dev_set_drvdata(&dev->dev, netdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
1525 adapter->vdev = dev;
1526 adapter->netdev = netdev;
Santiago Leonf148f612010-09-03 18:29:30 +00001527 adapter->mcastFilterSize = *mcastFilterSize_p;
Santiago Leon860f2422006-04-25 11:19:59 -05001528 adapter->pool_config = 0;
Jeff Garzikd7fbeba2006-05-24 01:31:14 -04001529
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001530 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1531
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 netdev->irq = dev->irq;
Alexander Beregalove186d172009-04-15 12:52:39 +00001533 netdev->netdev_ops = &ibmveth_netdev_ops;
1534 netdev->ethtool_ops = &netdev_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 SET_NETDEV_DEV(netdev, &dev->dev);
Michał Mirosławb9367bf2011-04-19 02:14:25 +00001536 netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1537 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Thomas Falcon07e6a972015-07-14 10:51:51 -05001538
Michał Mirosławb9367bf2011-04-19 02:14:25 +00001539 netdev->features |= netdev->hw_features;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
Thomas Falcon07e6a972015-07-14 10:51:51 -05001541 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1542
1543 /* If running older firmware, TSO should not be enabled by default */
1544 if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1545 !old_large_send) {
1546 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1547 netdev->features |= netdev->hw_features;
1548 } else {
1549 netdev->hw_features |= NETIF_F_TSO;
1550 }
Thomas Falcon8641dd82015-04-29 16:25:45 -05001551
Anton Blanchardd746ca92014-03-05 14:51:37 +11001552 memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Thomas Falconcd7c7ec2015-04-29 16:25:44 -05001554 if (firmware_has_feature(FW_FEATURE_CMO))
1555 memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1556
Santiago Leonf148f612010-09-03 18:29:30 +00001557 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
Santiago Leon860f2422006-04-25 11:19:59 -05001558 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
Greg Kroah-Hartman8dde2a92007-12-17 15:54:39 -04001559 int error;
1560
Jeff Garzikd7fbeba2006-05-24 01:31:14 -04001561 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1562 pool_count[i], pool_size[i],
Santiago Leon860f2422006-04-25 11:19:59 -05001563 pool_active[i]);
Greg Kroah-Hartman8dde2a92007-12-17 15:54:39 -04001564 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1565 &dev->dev.kobj, "pool%d", i);
1566 if (!error)
1567 kobject_uevent(kobj, KOBJ_ADD);
Santiago Leon860f2422006-04-25 11:19:59 -05001568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
Santiago Leonc43ced12010-09-03 18:29:14 +00001570 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 adapter->buffer_list_dma = DMA_ERROR_CODE;
1573 adapter->filter_list_dma = DMA_ERROR_CODE;
1574 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1575
Santiago Leonc43ced12010-09-03 18:29:14 +00001576 netdev_dbg(netdev, "registering netdev...\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
Michał Mirosławb801a4e2011-04-28 11:59:15 +10001578 ibmveth_set_features(netdev, netdev->features);
1579
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 rc = register_netdev(netdev);
1581
Santiago Leonf148f612010-09-03 18:29:30 +00001582 if (rc) {
Santiago Leonc43ced12010-09-03 18:29:14 +00001583 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 free_netdev(netdev);
1585 return rc;
1586 }
1587
Santiago Leonc43ced12010-09-03 18:29:14 +00001588 netdev_dbg(netdev, "registered\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 return 0;
1591}
1592
Bill Pembertone11787a2012-12-03 09:23:12 -05001593static int ibmveth_remove(struct vio_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594{
Greg Kroah-Hartmanc7ae0112009-05-04 21:33:19 -07001595 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Wang Chen4cf16532008-11-12 23:38:14 -08001596 struct ibmveth_adapter *adapter = netdev_priv(netdev);
Santiago Leon860f2422006-04-25 11:19:59 -05001597 int i;
1598
Santiago Leonf148f612010-09-03 18:29:30 +00001599 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
Greg Kroah-Hartmanc10997f2007-12-20 08:13:05 -08001600 kobject_put(&adapter->rx_buff_pool[i].kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601
1602 unregister_netdev(netdev);
1603
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 free_netdev(netdev);
Robert Jennings1096d632008-07-24 04:34:52 +10001605 dev_set_drvdata(&dev->dev, NULL);
1606
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 return 0;
1608}
1609
Santiago Leon860f2422006-04-25 11:19:59 -05001610static struct attribute veth_active_attr;
1611static struct attribute veth_num_attr;
1612static struct attribute veth_size_attr;
1613
Santiago Leonf148f612010-09-03 18:29:30 +00001614static ssize_t veth_pool_show(struct kobject *kobj,
1615 struct attribute *attr, char *buf)
Santiago Leon860f2422006-04-25 11:19:59 -05001616{
Jeff Garzikd7fbeba2006-05-24 01:31:14 -04001617 struct ibmveth_buff_pool *pool = container_of(kobj,
Santiago Leon860f2422006-04-25 11:19:59 -05001618 struct ibmveth_buff_pool,
1619 kobj);
1620
1621 if (attr == &veth_active_attr)
1622 return sprintf(buf, "%d\n", pool->active);
1623 else if (attr == &veth_num_attr)
1624 return sprintf(buf, "%d\n", pool->size);
1625 else if (attr == &veth_size_attr)
1626 return sprintf(buf, "%d\n", pool->buff_size);
1627 return 0;
1628}
1629
Santiago Leonf148f612010-09-03 18:29:30 +00001630static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1631 const char *buf, size_t count)
Santiago Leon860f2422006-04-25 11:19:59 -05001632{
Jeff Garzikd7fbeba2006-05-24 01:31:14 -04001633 struct ibmveth_buff_pool *pool = container_of(kobj,
Santiago Leon860f2422006-04-25 11:19:59 -05001634 struct ibmveth_buff_pool,
1635 kobj);
Greg Kroah-Hartmanc7ae0112009-05-04 21:33:19 -07001636 struct net_device *netdev = dev_get_drvdata(
1637 container_of(kobj->parent, struct device, kobj));
Wang Chen4cf16532008-11-12 23:38:14 -08001638 struct ibmveth_adapter *adapter = netdev_priv(netdev);
Santiago Leon860f2422006-04-25 11:19:59 -05001639 long value = simple_strtol(buf, NULL, 10);
1640 long rc;
1641
1642 if (attr == &veth_active_attr) {
1643 if (value && !pool->active) {
Brian King4aa9c932007-06-08 14:05:16 -05001644 if (netif_running(netdev)) {
Santiago Leonf148f612010-09-03 18:29:30 +00001645 if (ibmveth_alloc_buffer_pool(pool)) {
Santiago Leon21c2dec2010-09-03 18:29:19 +00001646 netdev_err(netdev,
1647 "unable to alloc pool\n");
Brian King4aa9c932007-06-08 14:05:16 -05001648 return -ENOMEM;
1649 }
1650 pool->active = 1;
1651 adapter->pool_config = 1;
1652 ibmveth_close(netdev);
1653 adapter->pool_config = 0;
1654 if ((rc = ibmveth_open(netdev)))
1655 return rc;
Santiago Leonf148f612010-09-03 18:29:30 +00001656 } else {
Brian King4aa9c932007-06-08 14:05:16 -05001657 pool->active = 1;
Santiago Leonf148f612010-09-03 18:29:30 +00001658 }
Santiago Leon860f2422006-04-25 11:19:59 -05001659 } else if (!value && pool->active) {
1660 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1661 int i;
1662 /* Make sure there is a buffer pool with buffers that
1663 can hold a packet of the size of the MTU */
Santiago Leon517e80e2010-09-03 18:29:25 +00001664 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
Santiago Leon860f2422006-04-25 11:19:59 -05001665 if (pool == &adapter->rx_buff_pool[i])
1666 continue;
1667 if (!adapter->rx_buff_pool[i].active)
1668 continue;
Brian King76b9cfc2007-08-03 13:55:19 +10001669 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1670 break;
Santiago Leon860f2422006-04-25 11:19:59 -05001671 }
Brian King76b9cfc2007-08-03 13:55:19 +10001672
Santiago Leon517e80e2010-09-03 18:29:25 +00001673 if (i == IBMVETH_NUM_BUFF_POOLS) {
Santiago Leon21c2dec2010-09-03 18:29:19 +00001674 netdev_err(netdev, "no active pool >= MTU\n");
Santiago Leon860f2422006-04-25 11:19:59 -05001675 return -EPERM;
1676 }
Brian King76b9cfc2007-08-03 13:55:19 +10001677
Brian King76b9cfc2007-08-03 13:55:19 +10001678 if (netif_running(netdev)) {
1679 adapter->pool_config = 1;
1680 ibmveth_close(netdev);
Santiago Leonea866e62008-07-24 04:34:23 +10001681 pool->active = 0;
Brian King76b9cfc2007-08-03 13:55:19 +10001682 adapter->pool_config = 0;
1683 if ((rc = ibmveth_open(netdev)))
1684 return rc;
1685 }
Santiago Leonea866e62008-07-24 04:34:23 +10001686 pool->active = 0;
Santiago Leon860f2422006-04-25 11:19:59 -05001687 }
1688 } else if (attr == &veth_num_attr) {
Santiago Leonf148f612010-09-03 18:29:30 +00001689 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
Santiago Leon860f2422006-04-25 11:19:59 -05001690 return -EINVAL;
Santiago Leonf148f612010-09-03 18:29:30 +00001691 } else {
Brian King4aa9c932007-06-08 14:05:16 -05001692 if (netif_running(netdev)) {
1693 adapter->pool_config = 1;
1694 ibmveth_close(netdev);
1695 adapter->pool_config = 0;
1696 pool->size = value;
1697 if ((rc = ibmveth_open(netdev)))
1698 return rc;
Santiago Leonf148f612010-09-03 18:29:30 +00001699 } else {
Brian King4aa9c932007-06-08 14:05:16 -05001700 pool->size = value;
Santiago Leonf148f612010-09-03 18:29:30 +00001701 }
Santiago Leon860f2422006-04-25 11:19:59 -05001702 }
1703 } else if (attr == &veth_size_attr) {
Santiago Leonf148f612010-09-03 18:29:30 +00001704 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
Santiago Leon860f2422006-04-25 11:19:59 -05001705 return -EINVAL;
Santiago Leonf148f612010-09-03 18:29:30 +00001706 } else {
Brian King4aa9c932007-06-08 14:05:16 -05001707 if (netif_running(netdev)) {
1708 adapter->pool_config = 1;
1709 ibmveth_close(netdev);
1710 adapter->pool_config = 0;
1711 pool->buff_size = value;
1712 if ((rc = ibmveth_open(netdev)))
1713 return rc;
Santiago Leonf148f612010-09-03 18:29:30 +00001714 } else {
Brian King4aa9c932007-06-08 14:05:16 -05001715 pool->buff_size = value;
Santiago Leonf148f612010-09-03 18:29:30 +00001716 }
Santiago Leon860f2422006-04-25 11:19:59 -05001717 }
1718 }
1719
1720 /* kick the interrupt handler to allocate/deallocate pools */
David Howells7d12e782006-10-05 14:55:46 +01001721 ibmveth_interrupt(netdev->irq, netdev);
Santiago Leon860f2422006-04-25 11:19:59 -05001722 return count;
1723}
1724
1725
Santiago Leonf148f612010-09-03 18:29:30 +00001726#define ATTR(_name, _mode) \
1727 struct attribute veth_##_name##_attr = { \
1728 .name = __stringify(_name), .mode = _mode, \
1729 };
Santiago Leon860f2422006-04-25 11:19:59 -05001730
1731static ATTR(active, 0644);
1732static ATTR(num, 0644);
1733static ATTR(size, 0644);
1734
Santiago Leonf148f612010-09-03 18:29:30 +00001735static struct attribute *veth_pool_attrs[] = {
Santiago Leon860f2422006-04-25 11:19:59 -05001736 &veth_active_attr,
1737 &veth_num_attr,
1738 &veth_size_attr,
1739 NULL,
1740};
1741
Emese Revfy52cf25d2010-01-19 02:58:23 +01001742static const struct sysfs_ops veth_pool_ops = {
Santiago Leon860f2422006-04-25 11:19:59 -05001743 .show = veth_pool_show,
1744 .store = veth_pool_store,
1745};
1746
1747static struct kobj_type ktype_veth_pool = {
1748 .release = NULL,
1749 .sysfs_ops = &veth_pool_ops,
1750 .default_attrs = veth_pool_attrs,
1751};
1752
Brian Kinge7a3af52010-05-07 08:56:08 +00001753static int ibmveth_resume(struct device *dev)
1754{
1755 struct net_device *netdev = dev_get_drvdata(dev);
1756 ibmveth_interrupt(netdev->irq, netdev);
1757 return 0;
1758}
Santiago Leon860f2422006-04-25 11:19:59 -05001759
Bill Pembertone11787a2012-12-03 09:23:12 -05001760static struct vio_device_id ibmveth_device_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 { "network", "IBM,l-lan"},
Stephen Rothwellfb120da2005-08-17 16:42:59 +10001762 { "", "" }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1765
Brian Kinge7a3af52010-05-07 08:56:08 +00001766static struct dev_pm_ops ibmveth_pm_ops = {
1767 .resume = ibmveth_resume
1768};
1769
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770static struct vio_driver ibmveth_driver = {
Stephen Rothwell6fdf5392005-10-24 14:53:21 +10001771 .id_table = ibmveth_device_table,
1772 .probe = ibmveth_probe,
1773 .remove = ibmveth_remove,
Robert Jennings1096d632008-07-24 04:34:52 +10001774 .get_desired_dma = ibmveth_get_desired_dma,
Benjamin Herrenschmidtcb52d892012-03-26 19:06:30 +00001775 .name = ibmveth_driver_name,
1776 .pm = &ibmveth_pm_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777};
1778
1779static int __init ibmveth_module_init(void)
1780{
Santiago Leon21c2dec2010-09-03 18:29:19 +00001781 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1782 ibmveth_driver_string, ibmveth_driver_version);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 return vio_register_driver(&ibmveth_driver);
1785}
1786
1787static void __exit ibmveth_module_exit(void)
1788{
1789 vio_unregister_driver(&ibmveth_driver);
Jeff Garzikd7fbeba2006-05-24 01:31:14 -04001790}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
1792module_init(ibmveth_module_init);
1793module_exit(ibmveth_module_exit);