blob: 4dc304422ecea32a88dba8f08b7faa1023d94572 [file] [log] [blame]
Thomas Falcon032c5e82015-12-21 11:26:06 -06001/**************************************************************************/
2/* */
3/* IBM System i and System p Virtual NIC Device Driver */
4/* Copyright (C) 2014 IBM Corp. */
5/* Santiago Leon (santi_leon@yahoo.com) */
6/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7/* John Allen (jallen@linux.vnet.ibm.com) */
8/* */
9/* This program is free software; you can redistribute it and/or modify */
10/* it under the terms of the GNU General Public License as published by */
11/* the Free Software Foundation; either version 2 of the License, or */
12/* (at your option) any later version. */
13/* */
14/* This program is distributed in the hope that it will be useful, */
15/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17/* GNU General Public License for more details. */
18/* */
19/* You should have received a copy of the GNU General Public License */
20/* along with this program. */
21/* */
22/* This module contains the implementation of a virtual ethernet device */
23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24/* option of the RS/6000 Platform Architecture to interface with virtual */
25/* ethernet NICs that are presented to the partition by the hypervisor. */
26/* */
27/* Messages are passed between the VNIC driver and the VNIC server using */
28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29/* issue and receive commands that initiate communication with the server */
30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31/* are used by the driver to notify the server that a packet is */
32/* ready for transmission or that a buffer has been added to receive a */
33/* packet. Subsequently, sCRQs are used by the server to notify the */
34/* driver that a packet transmission has been completed or that a packet */
35/* has been received and placed in a waiting buffer. */
36/* */
37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38/* which skbs are DMA mapped and immediately unmapped when the transmit */
39/* or receive has been completed, the VNIC driver is required to use */
40/* "long term mapping". This entails that large, continuous DMA mapped */
41/* buffers are allocated on driver initialization and these buffers are */
42/* then continuously reused to pass skbs to and from the VNIC server. */
43/* */
44/**************************************************************************/
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
Thomas Falcon4eb50ce2017-12-18 12:52:40 -060062#include <linux/if_arp.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060063#include <linux/in.h>
64#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050065#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060066#include <linux/irq.h>
67#include <linux/kthread.h>
68#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060069#include <linux/interrupt.h>
70#include <net/net_namespace.h>
71#include <asm/hvcall.h>
72#include <linux/atomic.h>
73#include <asm/vio.h>
74#include <asm/iommu.h>
75#include <linux/uaccess.h>
76#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050077#include <linux/workqueue.h>
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -040078#include <linux/if_vlan.h>
Nathan Fontenot37798d02017-11-08 11:23:56 -060079#include <linux/utsname.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060080
81#include "ibmvnic.h"
82
83static const char ibmvnic_driver_name[] = "ibmvnic";
84static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
85
Thomas Falcon78b07ac2017-06-01 15:32:34 -050086MODULE_AUTHOR("Santiago Leon");
Thomas Falcon032c5e82015-12-21 11:26:06 -060087MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
88MODULE_LICENSE("GPL");
89MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
90
91static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
92static int ibmvnic_remove(struct vio_dev *);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -060093static void release_sub_crqs(struct ibmvnic_adapter *, bool);
Thomas Falcon032c5e82015-12-21 11:26:06 -060094static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -0500100static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600101static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102static int enable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104static int disable_scrq_irq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106static int pending_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 struct ibmvnic_sub_crq_queue *);
110static int ibmvnic_poll(struct napi_struct *napi, int data);
111static void send_map_query(struct ibmvnic_adapter *adapter);
112static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113static void send_request_unmap(struct ibmvnic_adapter *, u8);
Thomas Falcon20a8ab72018-02-26 18:10:59 -0600114static int send_login(struct ibmvnic_adapter *adapter);
John Allenbd0b6722017-03-17 17:13:40 -0500115static void send_cap_queries(struct ibmvnic_adapter *adapter);
Thomas Falcon4d96f122017-08-01 15:04:36 -0500116static int init_sub_crqs(struct ibmvnic_adapter *);
John Allenbd0b6722017-03-17 17:13:40 -0500117static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
John Allenea5509f2017-03-17 17:13:43 -0500118static int ibmvnic_init(struct ibmvnic_adapter *);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400119static void release_crq_queue(struct ibmvnic_adapter *);
John Allenc26eba02017-10-26 16:23:25 -0500120static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600121
122struct ibmvnic_stat {
123 char name[ETH_GSTRING_LEN];
124 int offset;
125};
126
127#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
128 offsetof(struct ibmvnic_statistics, stat))
129#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
130
131static const struct ibmvnic_stat ibmvnic_stats[] = {
132 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
133 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
134 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
135 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
136 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
137 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
138 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
139 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
140 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
141 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
142 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
143 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
144 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
145 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
146 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
147 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
148 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
149 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
150 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
151 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
152 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
153 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
154};
155
156static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
157 unsigned long length, unsigned long *number,
158 unsigned long *irq)
159{
160 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
161 long rc;
162
163 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
164 *number = retbuf[0];
165 *irq = retbuf[1];
166
167 return rc;
168}
169
Thomas Falcon032c5e82015-12-21 11:26:06 -0600170static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
171 struct ibmvnic_long_term_buff *ltb, int size)
172{
173 struct device *dev = &adapter->vdev->dev;
174
175 ltb->size = size;
176 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
177 GFP_KERNEL);
178
179 if (!ltb->buff) {
180 dev_err(dev, "Couldn't alloc long term buffer\n");
181 return -ENOMEM;
182 }
183 ltb->map_id = adapter->map_id;
184 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500185
186 init_completion(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600187 send_request_map(adapter, ltb->addr,
188 ltb->size, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600189 wait_for_completion(&adapter->fw_done);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500190
191 if (adapter->fw_done_rc) {
192 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
193 adapter->fw_done_rc);
194 return -1;
195 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600196 return 0;
197}
198
199static void free_long_term_buff(struct ibmvnic_adapter *adapter,
200 struct ibmvnic_long_term_buff *ltb)
201{
202 struct device *dev = &adapter->vdev->dev;
203
Nathan Fontenotc657e322017-03-30 02:49:06 -0400204 if (!ltb->buff)
205 return;
206
Nathan Fontenoted651a12017-05-03 14:04:38 -0400207 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
208 adapter->reset_reason != VNIC_RESET_MOBILITY)
Thomas Falcondfad09a2016-08-18 11:37:51 -0500209 send_request_unmap(adapter, ltb->map_id);
Brian King59af56c2017-04-19 13:44:41 -0400210 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600211}
212
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500213static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
214 struct ibmvnic_long_term_buff *ltb)
215{
216 memset(ltb->buff, 0, ltb->size);
217
218 init_completion(&adapter->fw_done);
219 send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
220 wait_for_completion(&adapter->fw_done);
221
222 if (adapter->fw_done_rc) {
223 dev_info(&adapter->vdev->dev,
224 "Reset failed, attempting to free and reallocate buffer\n");
225 free_long_term_buff(adapter, ltb);
226 return alloc_long_term_buff(adapter, ltb, ltb->size);
227 }
228 return 0;
229}
230
Thomas Falconf185a492017-05-26 10:30:48 -0400231static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
232{
233 int i;
234
235 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
236 i++)
237 adapter->rx_pool[i].active = 0;
238}
239
Thomas Falcon032c5e82015-12-21 11:26:06 -0600240static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
241 struct ibmvnic_rx_pool *pool)
242{
243 int count = pool->size - atomic_read(&pool->available);
244 struct device *dev = &adapter->vdev->dev;
245 int buffers_added = 0;
246 unsigned long lpar_rc;
247 union sub_crq sub_crq;
248 struct sk_buff *skb;
249 unsigned int offset;
250 dma_addr_t dma_addr;
251 unsigned char *dst;
252 u64 *handle_array;
253 int shift = 0;
254 int index;
255 int i;
256
Thomas Falconf185a492017-05-26 10:30:48 -0400257 if (!pool->active)
258 return;
259
Thomas Falcon032c5e82015-12-21 11:26:06 -0600260 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
261 be32_to_cpu(adapter->login_rsp_buf->
262 off_rxadd_subcrqs));
263
264 for (i = 0; i < count; ++i) {
265 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
266 if (!skb) {
267 dev_err(dev, "Couldn't replenish rx buff\n");
268 adapter->replenish_no_mem++;
269 break;
270 }
271
272 index = pool->free_map[pool->next_free];
273
274 if (pool->rx_buff[index].skb)
275 dev_err(dev, "Inconsistent free_map!\n");
276
277 /* Copy the skb to the long term mapped DMA buffer */
278 offset = index * pool->buff_size;
279 dst = pool->long_term_buff.buff + offset;
280 memset(dst, 0, pool->buff_size);
281 dma_addr = pool->long_term_buff.addr + offset;
282 pool->rx_buff[index].data = dst;
283
284 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
285 pool->rx_buff[index].dma = dma_addr;
286 pool->rx_buff[index].skb = skb;
287 pool->rx_buff[index].pool_index = pool->index;
288 pool->rx_buff[index].size = pool->buff_size;
289
290 memset(&sub_crq, 0, sizeof(sub_crq));
291 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
292 sub_crq.rx_add.correlator =
293 cpu_to_be64((u64)&pool->rx_buff[index]);
294 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
295 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
296
297 /* The length field of the sCRQ is defined to be 24 bits so the
298 * buffer size needs to be left shifted by a byte before it is
299 * converted to big endian to prevent the last byte from being
300 * truncated.
301 */
302#ifdef __LITTLE_ENDIAN__
303 shift = 8;
304#endif
305 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
306
307 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
308 &sub_crq);
309 if (lpar_rc != H_SUCCESS)
310 goto failure;
311
312 buffers_added++;
313 adapter->replenish_add_buff_success++;
314 pool->next_free = (pool->next_free + 1) % pool->size;
315 }
316 atomic_add(buffers_added, &pool->available);
317 return;
318
319failure:
320 dev_info(dev, "replenish pools failure\n");
321 pool->free_map[pool->next_free] = index;
322 pool->rx_buff[index].skb = NULL;
323 if (!dma_mapping_error(dev, dma_addr))
324 dma_unmap_single(dev, dma_addr, pool->buff_size,
325 DMA_FROM_DEVICE);
326
327 dev_kfree_skb_any(skb);
328 adapter->replenish_add_buff_failure++;
329 atomic_add(buffers_added, &pool->available);
Thomas Falconf185a492017-05-26 10:30:48 -0400330
331 if (lpar_rc == H_CLOSED) {
332 /* Disable buffer pool replenishment and report carrier off if
333 * queue is closed. Firmware guarantees that a signal will
334 * be sent to the driver, triggering a reset.
335 */
336 deactivate_rx_pools(adapter);
337 netif_carrier_off(adapter->netdev);
338 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600339}
340
341static void replenish_pools(struct ibmvnic_adapter *adapter)
342{
343 int i;
344
Thomas Falcon032c5e82015-12-21 11:26:06 -0600345 adapter->replenish_task_cycles++;
346 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
347 i++) {
348 if (adapter->rx_pool[i].active)
349 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
350 }
351}
352
John Allen3d52b592017-08-02 16:44:14 -0500353static void release_stats_buffers(struct ibmvnic_adapter *adapter)
354{
355 kfree(adapter->tx_stats_buffers);
356 kfree(adapter->rx_stats_buffers);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600357 adapter->tx_stats_buffers = NULL;
358 adapter->rx_stats_buffers = NULL;
John Allen3d52b592017-08-02 16:44:14 -0500359}
360
361static int init_stats_buffers(struct ibmvnic_adapter *adapter)
362{
363 adapter->tx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600364 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500365 sizeof(struct ibmvnic_tx_queue_stats),
366 GFP_KERNEL);
367 if (!adapter->tx_stats_buffers)
368 return -ENOMEM;
369
370 adapter->rx_stats_buffers =
Nathan Fontenotabcae542018-02-19 13:30:47 -0600371 kcalloc(IBMVNIC_MAX_QUEUES,
John Allen3d52b592017-08-02 16:44:14 -0500372 sizeof(struct ibmvnic_rx_queue_stats),
373 GFP_KERNEL);
374 if (!adapter->rx_stats_buffers)
375 return -ENOMEM;
376
377 return 0;
378}
379
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400380static void release_stats_token(struct ibmvnic_adapter *adapter)
381{
382 struct device *dev = &adapter->vdev->dev;
383
384 if (!adapter->stats_token)
385 return;
386
387 dma_unmap_single(dev, adapter->stats_token,
388 sizeof(struct ibmvnic_statistics),
389 DMA_FROM_DEVICE);
390 adapter->stats_token = 0;
391}
392
393static int init_stats_token(struct ibmvnic_adapter *adapter)
394{
395 struct device *dev = &adapter->vdev->dev;
396 dma_addr_t stok;
397
398 stok = dma_map_single(dev, &adapter->stats,
399 sizeof(struct ibmvnic_statistics),
400 DMA_FROM_DEVICE);
401 if (dma_mapping_error(dev, stok)) {
402 dev_err(dev, "Couldn't map stats buffer\n");
403 return -1;
404 }
405
406 adapter->stats_token = stok;
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500407 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400408 return 0;
409}
410
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400411static int reset_rx_pools(struct ibmvnic_adapter *adapter)
412{
413 struct ibmvnic_rx_pool *rx_pool;
414 int rx_scrqs;
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500415 int i, j, rc;
John Allen896d8692018-01-18 16:26:31 -0600416 u64 *size_array;
417
418 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
419 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400420
421 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
422 for (i = 0; i < rx_scrqs; i++) {
423 rx_pool = &adapter->rx_pool[i];
424
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500425 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
426
John Allen896d8692018-01-18 16:26:31 -0600427 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
428 free_long_term_buff(adapter, &rx_pool->long_term_buff);
429 rx_pool->buff_size = be64_to_cpu(size_array[i]);
430 alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
431 rx_pool->size *
432 rx_pool->buff_size);
433 } else {
434 rc = reset_long_term_buff(adapter,
435 &rx_pool->long_term_buff);
436 }
437
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500438 if (rc)
439 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400440
441 for (j = 0; j < rx_pool->size; j++)
442 rx_pool->free_map[j] = j;
443
444 memset(rx_pool->rx_buff, 0,
445 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
446
447 atomic_set(&rx_pool->available, 0);
448 rx_pool->next_alloc = 0;
449 rx_pool->next_free = 0;
Thomas Falconc3e53b92017-06-14 23:50:05 -0500450 rx_pool->active = 1;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400451 }
452
453 return 0;
454}
455
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400456static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600457{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400458 struct ibmvnic_rx_pool *rx_pool;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400459 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600460
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400461 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600462 return;
463
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600464 for (i = 0; i < adapter->num_active_rx_pools; i++) {
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400465 rx_pool = &adapter->rx_pool[i];
466
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500467 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
468
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400469 kfree(rx_pool->free_map);
470 free_long_term_buff(adapter, &rx_pool->long_term_buff);
471
472 if (!rx_pool->rx_buff)
Nathan Fontenote0ebe9422017-05-03 14:04:50 -0400473 continue;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400474
475 for (j = 0; j < rx_pool->size; j++) {
476 if (rx_pool->rx_buff[j].skb) {
477 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
478 rx_pool->rx_buff[i].skb = NULL;
479 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600480 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400481
482 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600483 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400484
485 kfree(adapter->rx_pool);
486 adapter->rx_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600487 adapter->num_active_rx_pools = 0;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400488}
489
490static int init_rx_pools(struct net_device *netdev)
491{
492 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
493 struct device *dev = &adapter->vdev->dev;
494 struct ibmvnic_rx_pool *rx_pool;
495 int rxadd_subcrqs;
496 u64 *size_array;
497 int i, j;
498
499 rxadd_subcrqs =
500 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
501 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
502 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
503
504 adapter->rx_pool = kcalloc(rxadd_subcrqs,
505 sizeof(struct ibmvnic_rx_pool),
506 GFP_KERNEL);
507 if (!adapter->rx_pool) {
508 dev_err(dev, "Failed to allocate rx pools\n");
509 return -1;
510 }
511
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600512 adapter->num_active_rx_pools = rxadd_subcrqs;
513
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400514 for (i = 0; i < rxadd_subcrqs; i++) {
515 rx_pool = &adapter->rx_pool[i];
516
517 netdev_dbg(adapter->netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500518 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400519 i, adapter->req_rx_add_entries_per_subcrq,
520 be64_to_cpu(size_array[i]));
521
522 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
523 rx_pool->index = i;
524 rx_pool->buff_size = be64_to_cpu(size_array[i]);
525 rx_pool->active = 1;
526
527 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
528 GFP_KERNEL);
529 if (!rx_pool->free_map) {
530 release_rx_pools(adapter);
531 return -1;
532 }
533
534 rx_pool->rx_buff = kcalloc(rx_pool->size,
535 sizeof(struct ibmvnic_rx_buff),
536 GFP_KERNEL);
537 if (!rx_pool->rx_buff) {
538 dev_err(dev, "Couldn't alloc rx buffers\n");
539 release_rx_pools(adapter);
540 return -1;
541 }
542
543 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
544 rx_pool->size * rx_pool->buff_size)) {
545 release_rx_pools(adapter);
546 return -1;
547 }
548
549 for (j = 0; j < rx_pool->size; ++j)
550 rx_pool->free_map[j] = j;
551
552 atomic_set(&rx_pool->available, 0);
553 rx_pool->next_alloc = 0;
554 rx_pool->next_free = 0;
555 }
556
557 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600558}
559
Thomas Falcone26dc252018-03-16 20:00:25 -0500560static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
561 struct ibmvnic_tx_pool *tx_pool)
562{
563 int rc, i;
564
565 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
566 if (rc)
567 return rc;
568
569 memset(tx_pool->tx_buff, 0,
570 tx_pool->num_buffers *
571 sizeof(struct ibmvnic_tx_buff));
572
573 for (i = 0; i < tx_pool->num_buffers; i++)
574 tx_pool->free_map[i] = i;
575
576 tx_pool->consumer_index = 0;
577 tx_pool->producer_index = 0;
578
579 return 0;
580}
581
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400582static int reset_tx_pools(struct ibmvnic_adapter *adapter)
583{
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400584 int tx_scrqs;
Thomas Falcone26dc252018-03-16 20:00:25 -0500585 int i, rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400586
587 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
588 for (i = 0; i < tx_scrqs; i++) {
Thomas Falcone26dc252018-03-16 20:00:25 -0500589 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
Thomas Falconf3be0cb2017-06-21 14:53:01 -0500590 if (rc)
591 return rc;
Thomas Falcone26dc252018-03-16 20:00:25 -0500592 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
Thomas Falconfdb06102017-10-17 12:36:55 -0500593 if (rc)
594 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -0400595 }
596
597 return 0;
598}
599
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200600static void release_vpd_data(struct ibmvnic_adapter *adapter)
601{
602 if (!adapter->vpd)
603 return;
604
605 kfree(adapter->vpd->buff);
606 kfree(adapter->vpd);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600607
608 adapter->vpd = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200609}
610
Nathan Fontenotc657e322017-03-30 02:49:06 -0400611static void release_tx_pools(struct ibmvnic_adapter *adapter)
612{
613 struct ibmvnic_tx_pool *tx_pool;
John Allen896d8692018-01-18 16:26:31 -0600614 int i;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400615
616 if (!adapter->tx_pool)
617 return;
618
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600619 for (i = 0; i < adapter->num_active_tx_pools; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500620 netdev_dbg(adapter->netdev, "Releasing tx_pool[%d]\n", i);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400621 tx_pool = &adapter->tx_pool[i];
622 kfree(tx_pool->tx_buff);
623 free_long_term_buff(adapter, &tx_pool->long_term_buff);
Thomas Falconfdb06102017-10-17 12:36:55 -0500624 free_long_term_buff(adapter, &tx_pool->tso_ltb);
Nathan Fontenotc657e322017-03-30 02:49:06 -0400625 kfree(tx_pool->free_map);
626 }
627
628 kfree(adapter->tx_pool);
629 adapter->tx_pool = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600630 adapter->num_active_tx_pools = 0;
Nathan Fontenotc657e322017-03-30 02:49:06 -0400631}
632
633static int init_tx_pools(struct net_device *netdev)
634{
635 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
636 struct device *dev = &adapter->vdev->dev;
637 struct ibmvnic_tx_pool *tx_pool;
638 int tx_subcrqs;
639 int i, j;
640
641 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
642 adapter->tx_pool = kcalloc(tx_subcrqs,
643 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
644 if (!adapter->tx_pool)
645 return -1;
646
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600647 adapter->num_active_tx_pools = tx_subcrqs;
648
Nathan Fontenotc657e322017-03-30 02:49:06 -0400649 for (i = 0; i < tx_subcrqs; i++) {
650 tx_pool = &adapter->tx_pool[i];
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500651
652 netdev_dbg(adapter->netdev,
653 "Initializing tx_pool[%d], %lld buffs\n",
654 i, adapter->req_tx_entries_per_subcrq);
655
Nathan Fontenotc657e322017-03-30 02:49:06 -0400656 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
657 sizeof(struct ibmvnic_tx_buff),
658 GFP_KERNEL);
659 if (!tx_pool->tx_buff) {
660 dev_err(dev, "tx pool buffer allocation failed\n");
661 release_tx_pools(adapter);
662 return -1;
663 }
664
665 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
666 adapter->req_tx_entries_per_subcrq *
Thomas Falcon8dff66c2018-03-12 11:51:03 -0500667 (adapter->req_mtu + VLAN_HLEN))) {
Nathan Fontenotc657e322017-03-30 02:49:06 -0400668 release_tx_pools(adapter);
669 return -1;
670 }
671
Thomas Falconfdb06102017-10-17 12:36:55 -0500672 /* alloc TSO ltb */
673 if (alloc_long_term_buff(adapter, &tx_pool->tso_ltb,
674 IBMVNIC_TSO_BUFS *
675 IBMVNIC_TSO_BUF_SZ)) {
676 release_tx_pools(adapter);
677 return -1;
678 }
679
680 tx_pool->tso_index = 0;
681
Nathan Fontenotc657e322017-03-30 02:49:06 -0400682 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
683 sizeof(int), GFP_KERNEL);
684 if (!tx_pool->free_map) {
685 release_tx_pools(adapter);
686 return -1;
687 }
688
689 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
690 tx_pool->free_map[j] = j;
691
692 tx_pool->consumer_index = 0;
693 tx_pool->producer_index = 0;
694 }
695
696 return 0;
697}
698
Nathan Fontenot661a2622017-04-19 13:44:58 -0400699static void release_error_buffers(struct ibmvnic_adapter *adapter)
700{
701 struct device *dev = &adapter->vdev->dev;
702 struct ibmvnic_error_buff *error_buff, *tmp;
703 unsigned long flags;
704
705 spin_lock_irqsave(&adapter->error_list_lock, flags);
706 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
707 list_del(&error_buff->list);
708 dma_unmap_single(dev, error_buff->dma, error_buff->len,
709 DMA_FROM_DEVICE);
710 kfree(error_buff->buff);
711 kfree(error_buff);
712 }
713 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
714}
715
John Allend944c3d62017-05-26 10:30:13 -0400716static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
717{
718 int i;
719
720 if (adapter->napi_enabled)
721 return;
722
723 for (i = 0; i < adapter->req_rx_queues; i++)
724 napi_enable(&adapter->napi[i]);
725
726 adapter->napi_enabled = true;
727}
728
729static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
730{
731 int i;
732
733 if (!adapter->napi_enabled)
734 return;
735
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500736 for (i = 0; i < adapter->req_rx_queues; i++) {
737 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
John Allend944c3d62017-05-26 10:30:13 -0400738 napi_disable(&adapter->napi[i]);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500739 }
John Allend944c3d62017-05-26 10:30:13 -0400740
741 adapter->napi_enabled = false;
742}
743
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600744static int init_napi(struct ibmvnic_adapter *adapter)
745{
746 int i;
747
748 adapter->napi = kcalloc(adapter->req_rx_queues,
749 sizeof(struct napi_struct), GFP_KERNEL);
750 if (!adapter->napi)
751 return -ENOMEM;
752
753 for (i = 0; i < adapter->req_rx_queues; i++) {
754 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
755 netif_napi_add(adapter->netdev, &adapter->napi[i],
756 ibmvnic_poll, NAPI_POLL_WEIGHT);
757 }
758
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600759 adapter->num_active_rx_napi = adapter->req_rx_queues;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600760 return 0;
761}
762
763static void release_napi(struct ibmvnic_adapter *adapter)
764{
765 int i;
766
767 if (!adapter->napi)
768 return;
769
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600770 for (i = 0; i < adapter->num_active_rx_napi; i++) {
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600771 if (&adapter->napi[i]) {
772 netdev_dbg(adapter->netdev,
773 "Releasing napi[%d]\n", i);
774 netif_napi_del(&adapter->napi[i]);
775 }
776 }
777
778 kfree(adapter->napi);
779 adapter->napi = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -0600780 adapter->num_active_rx_napi = 0;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600781}
782
John Allena57a5d22017-03-17 17:13:41 -0500783static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600784{
785 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500786 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600787 struct device *dev = &adapter->vdev->dev;
Thomas Falcon4d96f122017-08-01 15:04:36 -0500788 int rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600789
John Allenbd0b6722017-03-17 17:13:40 -0500790 do {
791 if (adapter->renegotiate) {
792 adapter->renegotiate = false;
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -0600793 release_sub_crqs(adapter, 1);
John Allenbd0b6722017-03-17 17:13:40 -0500794
795 reinit_completion(&adapter->init_done);
796 send_cap_queries(adapter);
797 if (!wait_for_completion_timeout(&adapter->init_done,
798 timeout)) {
799 dev_err(dev, "Capabilities query timeout\n");
800 return -1;
801 }
Thomas Falcon4d96f122017-08-01 15:04:36 -0500802 rc = init_sub_crqs(adapter);
803 if (rc) {
804 dev_err(dev,
805 "Initialization of SCRQ's failed\n");
806 return -1;
807 }
808 rc = init_sub_crq_irqs(adapter);
809 if (rc) {
810 dev_err(dev,
811 "Initialization of SCRQ's irqs failed\n");
812 return -1;
813 }
John Allenbd0b6722017-03-17 17:13:40 -0500814 }
815
816 reinit_completion(&adapter->init_done);
Thomas Falcon20a8ab72018-02-26 18:10:59 -0600817 rc = send_login(adapter);
818 if (rc) {
819 dev_err(dev, "Unable to attempt device login\n");
820 return rc;
821 } else if (!wait_for_completion_timeout(&adapter->init_done,
John Allenbd0b6722017-03-17 17:13:40 -0500822 timeout)) {
823 dev_err(dev, "Login timeout\n");
824 return -1;
825 }
826 } while (adapter->renegotiate);
827
Thomas Falcon3d166132018-01-10 19:39:52 -0600828 /* handle pending MAC address changes after successful login */
829 if (adapter->mac_change_pending) {
830 __ibmvnic_set_mac(netdev, &adapter->desired.mac);
831 adapter->mac_change_pending = false;
832 }
833
John Allena57a5d22017-03-17 17:13:41 -0500834 return 0;
835}
836
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600837static void release_login_buffer(struct ibmvnic_adapter *adapter)
838{
839 kfree(adapter->login_buf);
840 adapter->login_buf = NULL;
841}
842
843static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
844{
845 kfree(adapter->login_rsp_buf);
846 adapter->login_rsp_buf = NULL;
847}
848
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400849static void release_resources(struct ibmvnic_adapter *adapter)
850{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200851 release_vpd_data(adapter);
852
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400853 release_tx_pools(adapter);
854 release_rx_pools(adapter);
855
Nathan Fontenot661a2622017-04-19 13:44:58 -0400856 release_error_buffers(adapter);
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600857 release_napi(adapter);
Thomas Falcon34f0f4e2018-02-13 18:23:40 -0600858 release_login_rsp_buffer(adapter);
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400859}
860
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400861static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
862{
863 struct net_device *netdev = adapter->netdev;
864 unsigned long timeout = msecs_to_jiffies(30000);
865 union ibmvnic_crq crq;
866 bool resend;
867 int rc;
868
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500869 netdev_dbg(netdev, "setting link state %d\n", link_state);
870
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400871 memset(&crq, 0, sizeof(crq));
872 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
873 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
874 crq.logical_link_state.link_state = link_state;
875
876 do {
877 resend = false;
878
879 reinit_completion(&adapter->init_done);
880 rc = ibmvnic_send_crq(adapter, &crq);
881 if (rc) {
882 netdev_err(netdev, "Failed to set link state\n");
883 return rc;
884 }
885
886 if (!wait_for_completion_timeout(&adapter->init_done,
887 timeout)) {
888 netdev_err(netdev, "timeout setting link state\n");
889 return -1;
890 }
891
892 if (adapter->init_done_rc == 1) {
893 /* Partuial success, delay and re-send */
894 mdelay(1000);
895 resend = true;
896 }
897 } while (resend);
898
899 return 0;
900}
901
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400902static int set_real_num_queues(struct net_device *netdev)
903{
904 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
905 int rc;
906
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -0500907 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
908 adapter->req_tx_queues, adapter->req_rx_queues);
909
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400910 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
911 if (rc) {
912 netdev_err(netdev, "failed to set the number of tx queues\n");
913 return rc;
914 }
915
916 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
917 if (rc)
918 netdev_err(netdev, "failed to set the number of rx queues\n");
919
920 return rc;
921}
922
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200923static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
924{
925 struct device *dev = &adapter->vdev->dev;
926 union ibmvnic_crq crq;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200927 int len = 0;
928
929 if (adapter->vpd->buff)
930 len = adapter->vpd->len;
931
John Allen69d08dc2018-01-18 16:27:58 -0600932 init_completion(&adapter->fw_done);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200933 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
934 crq.get_vpd_size.cmd = GET_VPD_SIZE;
935 ibmvnic_send_crq(adapter, &crq);
936 wait_for_completion(&adapter->fw_done);
937
938 if (!adapter->vpd->len)
939 return -ENODATA;
940
941 if (!adapter->vpd->buff)
942 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
943 else if (adapter->vpd->len != len)
944 adapter->vpd->buff =
945 krealloc(adapter->vpd->buff,
946 adapter->vpd->len, GFP_KERNEL);
947
948 if (!adapter->vpd->buff) {
949 dev_err(dev, "Could allocate VPD buffer\n");
950 return -ENOMEM;
951 }
952
953 adapter->vpd->dma_addr =
954 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
955 DMA_FROM_DEVICE);
Desnes Augusto Nunes do Rosariof7431062017-11-17 09:09:04 -0200956 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200957 dev_err(dev, "Could not map VPD buffer\n");
958 kfree(adapter->vpd->buff);
Thomas Falconb0992ec2018-02-06 17:25:23 -0600959 adapter->vpd->buff = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200960 return -ENOMEM;
961 }
962
963 reinit_completion(&adapter->fw_done);
964 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
965 crq.get_vpd.cmd = GET_VPD;
966 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
967 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
968 ibmvnic_send_crq(adapter, &crq);
969 wait_for_completion(&adapter->fw_done);
970
971 return 0;
972}
973
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400974static int init_resources(struct ibmvnic_adapter *adapter)
John Allena57a5d22017-03-17 17:13:41 -0500975{
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400976 struct net_device *netdev = adapter->netdev;
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600977 int rc;
John Allena57a5d22017-03-17 17:13:41 -0500978
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400979 rc = set_real_num_queues(netdev);
980 if (rc)
981 return rc;
John Allenbd0b6722017-03-17 17:13:40 -0500982
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -0200983 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
984 if (!adapter->vpd)
985 return -ENOMEM;
986
John Allen69d08dc2018-01-18 16:27:58 -0600987 /* Vital Product Data (VPD) */
988 rc = ibmvnic_get_vpd(adapter);
989 if (rc) {
990 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
991 return rc;
992 }
993
Thomas Falcon032c5e82015-12-21 11:26:06 -0600994 adapter->map_id = 1;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400995
Nathan Fontenot86f669b2018-02-19 13:30:39 -0600996 rc = init_napi(adapter);
997 if (rc)
998 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600999
Thomas Falcon032c5e82015-12-21 11:26:06 -06001000 send_map_query(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -04001001
1002 rc = init_rx_pools(netdev);
1003 if (rc)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001004 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001005
Nathan Fontenotc657e322017-03-30 02:49:06 -04001006 rc = init_tx_pools(netdev);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001007 return rc;
1008}
1009
Nathan Fontenoted651a12017-05-03 14:04:38 -04001010static int __ibmvnic_open(struct net_device *netdev)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001011{
1012 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001013 enum vnic_state prev_state = adapter->state;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001014 int i, rc;
1015
Nathan Fontenot90c80142017-05-03 14:04:32 -04001016 adapter->state = VNIC_OPENING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001017 replenish_pools(adapter);
John Allend944c3d62017-05-26 10:30:13 -04001018 ibmvnic_napi_enable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001019
Thomas Falcon032c5e82015-12-21 11:26:06 -06001020 /* We're ready to receive frames, enable the sub-crq interrupts and
1021 * set the logical link state to up
1022 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001023 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001024 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001025 if (prev_state == VNIC_CLOSED)
1026 enable_irq(adapter->rx_scrq[i]->irq);
1027 else
1028 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1029 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001030
Nathan Fontenoted651a12017-05-03 14:04:38 -04001031 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001032 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001033 if (prev_state == VNIC_CLOSED)
1034 enable_irq(adapter->tx_scrq[i]->irq);
1035 else
1036 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1037 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001038
Nathan Fontenot53da09e2017-04-21 15:39:04 -04001039 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001040 if (rc) {
1041 for (i = 0; i < adapter->req_rx_queues; i++)
1042 napi_disable(&adapter->napi[i]);
1043 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001044 return rc;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001045 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001046
Nathan Fontenoted651a12017-05-03 14:04:38 -04001047 netif_tx_start_all_queues(netdev);
1048
1049 if (prev_state == VNIC_CLOSED) {
1050 for (i = 0; i < adapter->req_rx_queues; i++)
1051 napi_schedule(&adapter->napi[i]);
1052 }
1053
1054 adapter->state = VNIC_OPEN;
1055 return rc;
1056}
1057
1058static int ibmvnic_open(struct net_device *netdev)
1059{
1060 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allen69d08dc2018-01-18 16:27:58 -06001061 int rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001062
1063 mutex_lock(&adapter->reset_lock);
1064
1065 if (adapter->state != VNIC_CLOSED) {
1066 rc = ibmvnic_login(netdev);
1067 if (rc) {
1068 mutex_unlock(&adapter->reset_lock);
1069 return rc;
1070 }
1071
1072 rc = init_resources(adapter);
1073 if (rc) {
1074 netdev_err(netdev, "failed to initialize resources\n");
1075 release_resources(adapter);
1076 mutex_unlock(&adapter->reset_lock);
1077 return rc;
1078 }
1079 }
1080
1081 rc = __ibmvnic_open(netdev);
Mick Tarsele876a8a2017-09-28 13:53:18 -07001082 netif_carrier_on(netdev);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02001083
Nathan Fontenoted651a12017-05-03 14:04:38 -04001084 mutex_unlock(&adapter->reset_lock);
1085
Nathan Fontenotbfc32f22017-05-03 14:04:26 -04001086 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001087}
1088
Thomas Falcond0869c02018-02-13 18:23:43 -06001089static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1090{
1091 struct ibmvnic_rx_pool *rx_pool;
Thomas Falcon637f81d2018-02-26 18:10:57 -06001092 struct ibmvnic_rx_buff *rx_buff;
Thomas Falcond0869c02018-02-13 18:23:43 -06001093 u64 rx_entries;
1094 int rx_scrqs;
1095 int i, j;
1096
1097 if (!adapter->rx_pool)
1098 return;
1099
1100 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
1101 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1102
1103 /* Free any remaining skbs in the rx buffer pools */
1104 for (i = 0; i < rx_scrqs; i++) {
1105 rx_pool = &adapter->rx_pool[i];
Thomas Falcon637f81d2018-02-26 18:10:57 -06001106 if (!rx_pool || !rx_pool->rx_buff)
Thomas Falcond0869c02018-02-13 18:23:43 -06001107 continue;
1108
1109 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1110 for (j = 0; j < rx_entries; j++) {
Thomas Falcon637f81d2018-02-26 18:10:57 -06001111 rx_buff = &rx_pool->rx_buff[j];
1112 if (rx_buff && rx_buff->skb) {
1113 dev_kfree_skb_any(rx_buff->skb);
1114 rx_buff->skb = NULL;
Thomas Falcond0869c02018-02-13 18:23:43 -06001115 }
1116 }
1117 }
1118}
1119
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001120static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1121{
1122 struct ibmvnic_tx_pool *tx_pool;
Thomas Falcon637f81d2018-02-26 18:10:57 -06001123 struct ibmvnic_tx_buff *tx_buff;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001124 u64 tx_entries;
1125 int tx_scrqs;
1126 int i, j;
1127
1128 if (!adapter->tx_pool)
1129 return;
1130
1131 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
1132 tx_entries = adapter->req_tx_entries_per_subcrq;
1133
1134 /* Free any remaining skbs in the tx buffer pools */
1135 for (i = 0; i < tx_scrqs; i++) {
1136 tx_pool = &adapter->tx_pool[i];
Thomas Falcon637f81d2018-02-26 18:10:57 -06001137 if (!tx_pool && !tx_pool->tx_buff)
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001138 continue;
1139
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001140 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001141 for (j = 0; j < tx_entries; j++) {
Thomas Falcon637f81d2018-02-26 18:10:57 -06001142 tx_buff = &tx_pool->tx_buff[j];
1143 if (tx_buff && tx_buff->skb) {
1144 dev_kfree_skb_any(tx_buff->skb);
1145 tx_buff->skb = NULL;
Nathan Fontenotb41b83e2017-05-03 14:04:56 -04001146 }
1147 }
1148 }
1149}
1150
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001151static void ibmvnic_cleanup(struct net_device *netdev)
John Allenea5509f2017-03-17 17:13:43 -05001152{
1153 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenea5509f2017-03-17 17:13:43 -05001154 int i;
1155
Thomas Falcon4c2687a2017-06-14 23:50:06 -05001156 /* ensure that transmissions are stopped if called by do_reset */
1157 if (adapter->resetting)
1158 netif_tx_disable(netdev);
1159 else
1160 netif_tx_stop_all_queues(netdev);
1161
John Allend944c3d62017-05-26 10:30:13 -04001162 ibmvnic_napi_disable(adapter);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001163
1164 if (adapter->tx_scrq) {
1165 for (i = 0; i < adapter->req_tx_queues; i++)
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001166 if (adapter->tx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001167 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001168 "Disabling tx_scrq[%d] irq\n", i);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001169 disable_irq(adapter->tx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001170 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001171 }
1172
Nathan Fontenot46293b92017-05-03 14:05:02 -04001173 if (adapter->rx_scrq) {
1174 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001175 if (adapter->rx_scrq[i]->irq) {
Thomas Falconf8738662018-03-07 17:51:45 -06001176 netdev_dbg(netdev,
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001177 "Disabling rx_scrq[%d] irq\n", i);
Nathan Fontenot46293b92017-05-03 14:05:02 -04001178 disable_irq(adapter->rx_scrq[i]->irq);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001179 }
Nathan Fontenot46293b92017-05-03 14:05:02 -04001180 }
1181 }
Thomas Falcond0869c02018-02-13 18:23:43 -06001182 clean_rx_pools(adapter);
Thomas Falcon10f76212017-05-26 10:30:31 -04001183 clean_tx_pools(adapter);
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001184}
1185
1186static int __ibmvnic_close(struct net_device *netdev)
1187{
1188 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1189 int rc = 0;
1190
1191 adapter->state = VNIC_CLOSING;
1192 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1193 if (rc)
1194 return rc;
1195 ibmvnic_cleanup(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04001196 adapter->state = VNIC_CLOSED;
Thomas Falcon01d9bd72018-03-07 17:51:46 -06001197 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001198}
1199
Nathan Fontenoted651a12017-05-03 14:04:38 -04001200static int ibmvnic_close(struct net_device *netdev)
1201{
1202 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1203 int rc;
1204
1205 mutex_lock(&adapter->reset_lock);
1206 rc = __ibmvnic_close(netdev);
1207 mutex_unlock(&adapter->reset_lock);
1208
1209 return rc;
1210}
1211
Thomas Falconad7775d2016-04-01 17:20:34 -05001212/**
1213 * build_hdr_data - creates L2/L3/L4 header data buffer
1214 * @hdr_field - bitfield determining needed headers
1215 * @skb - socket buffer
1216 * @hdr_len - array of header lengths
1217 * @tot_len - total length of data
1218 *
1219 * Reads hdr_field to determine which headers are needed by firmware.
1220 * Builds a buffer containing these headers. Saves individual header
1221 * lengths and total buffer length to be used to build descriptors.
1222 */
1223static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1224 int *hdr_len, u8 *hdr_data)
1225{
1226 int len = 0;
1227 u8 *hdr;
1228
Thomas Falconda75e3b2018-03-12 11:51:02 -05001229 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1230 hdr_len[0] = sizeof(struct vlan_ethhdr);
1231 else
1232 hdr_len[0] = sizeof(struct ethhdr);
Thomas Falconad7775d2016-04-01 17:20:34 -05001233
1234 if (skb->protocol == htons(ETH_P_IP)) {
1235 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1236 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1237 hdr_len[2] = tcp_hdrlen(skb);
1238 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1239 hdr_len[2] = sizeof(struct udphdr);
1240 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1241 hdr_len[1] = sizeof(struct ipv6hdr);
1242 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1243 hdr_len[2] = tcp_hdrlen(skb);
1244 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1245 hdr_len[2] = sizeof(struct udphdr);
Thomas Falcon4eb50ce2017-12-18 12:52:40 -06001246 } else if (skb->protocol == htons(ETH_P_ARP)) {
1247 hdr_len[1] = arp_hdr_len(skb->dev);
1248 hdr_len[2] = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001249 }
1250
1251 memset(hdr_data, 0, 120);
1252 if ((hdr_field >> 6) & 1) {
1253 hdr = skb_mac_header(skb);
1254 memcpy(hdr_data, hdr, hdr_len[0]);
1255 len += hdr_len[0];
1256 }
1257
1258 if ((hdr_field >> 5) & 1) {
1259 hdr = skb_network_header(skb);
1260 memcpy(hdr_data + len, hdr, hdr_len[1]);
1261 len += hdr_len[1];
1262 }
1263
1264 if ((hdr_field >> 4) & 1) {
1265 hdr = skb_transport_header(skb);
1266 memcpy(hdr_data + len, hdr, hdr_len[2]);
1267 len += hdr_len[2];
1268 }
1269 return len;
1270}
1271
1272/**
1273 * create_hdr_descs - create header and header extension descriptors
1274 * @hdr_field - bitfield determining needed headers
1275 * @data - buffer containing header data
1276 * @len - length of data buffer
1277 * @hdr_len - array of individual header lengths
1278 * @scrq_arr - descriptor array
1279 *
1280 * Creates header and, if needed, header extension descriptors and
1281 * places them in a descriptor array, scrq_arr
1282 */
1283
Thomas Falcon2de09682017-10-16 10:02:11 -05001284static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1285 union sub_crq *scrq_arr)
Thomas Falconad7775d2016-04-01 17:20:34 -05001286{
1287 union sub_crq hdr_desc;
1288 int tmp_len = len;
Thomas Falcon2de09682017-10-16 10:02:11 -05001289 int num_descs = 0;
Thomas Falconad7775d2016-04-01 17:20:34 -05001290 u8 *data, *cur;
1291 int tmp;
1292
1293 while (tmp_len > 0) {
1294 cur = hdr_data + len - tmp_len;
1295
1296 memset(&hdr_desc, 0, sizeof(hdr_desc));
1297 if (cur != hdr_data) {
1298 data = hdr_desc.hdr_ext.data;
1299 tmp = tmp_len > 29 ? 29 : tmp_len;
1300 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1301 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1302 hdr_desc.hdr_ext.len = tmp;
1303 } else {
1304 data = hdr_desc.hdr.data;
1305 tmp = tmp_len > 24 ? 24 : tmp_len;
1306 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1307 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1308 hdr_desc.hdr.len = tmp;
1309 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1310 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1311 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1312 hdr_desc.hdr.flag = hdr_field << 1;
1313 }
1314 memcpy(data, cur, tmp);
1315 tmp_len -= tmp;
1316 *scrq_arr = hdr_desc;
1317 scrq_arr++;
Thomas Falcon2de09682017-10-16 10:02:11 -05001318 num_descs++;
Thomas Falconad7775d2016-04-01 17:20:34 -05001319 }
Thomas Falcon2de09682017-10-16 10:02:11 -05001320
1321 return num_descs;
Thomas Falconad7775d2016-04-01 17:20:34 -05001322}
1323
1324/**
1325 * build_hdr_descs_arr - build a header descriptor array
1326 * @skb - socket buffer
1327 * @num_entries - number of descriptors to be sent
1328 * @subcrq - first TX descriptor
1329 * @hdr_field - bit field determining which headers will be sent
1330 *
1331 * This function will build a TX descriptor array with applicable
1332 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
1333 */
1334
1335static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1336 int *num_entries, u8 hdr_field)
1337{
1338 int hdr_len[3] = {0, 0, 0};
Thomas Falcon2de09682017-10-16 10:02:11 -05001339 int tot_len;
Thomas Falconad7775d2016-04-01 17:20:34 -05001340 u8 *hdr_data = txbuff->hdr_data;
1341
1342 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1343 txbuff->hdr_data);
Thomas Falcon2de09682017-10-16 10:02:11 -05001344 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
Thomas Falconad7775d2016-04-01 17:20:34 -05001345 txbuff->indir_arr + 1);
1346}
1347
Thomas Falcon1f247a62018-03-12 11:51:04 -05001348static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1349 struct net_device *netdev)
1350{
1351 /* For some backing devices, mishandling of small packets
1352 * can result in a loss of connection or TX stall. Device
1353 * architects recommend that no packet should be smaller
1354 * than the minimum MTU value provided to the driver, so
1355 * pad any packets to that length
1356 */
1357 if (skb->len < netdev->min_mtu)
1358 return skb_put_padto(skb, netdev->min_mtu);
Thomas Falcon7083a452018-03-12 21:05:26 -05001359
1360 return 0;
Thomas Falcon1f247a62018-03-12 11:51:04 -05001361}
1362
Thomas Falcon032c5e82015-12-21 11:26:06 -06001363static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1364{
1365 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1366 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -05001367 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001368 struct device *dev = &adapter->vdev->dev;
1369 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001370 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001371 struct ibmvnic_tx_pool *tx_pool;
1372 unsigned int tx_send_failed = 0;
1373 unsigned int tx_map_failed = 0;
1374 unsigned int tx_dropped = 0;
1375 unsigned int tx_packets = 0;
1376 unsigned int tx_bytes = 0;
1377 dma_addr_t data_dma_addr;
1378 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001379 unsigned long lpar_rc;
1380 union sub_crq tx_crq;
1381 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -05001382 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001383 unsigned char *dst;
1384 u64 *handle_array;
1385 int index = 0;
Thomas Falcona0dca102018-01-18 19:29:48 -06001386 u8 proto = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001387 int ret = 0;
1388
Nathan Fontenoted651a12017-05-03 14:04:38 -04001389 if (adapter->resetting) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001390 if (!netif_subqueue_stopped(netdev, skb))
1391 netif_stop_subqueue(netdev, queue_num);
1392 dev_kfree_skb_any(skb);
1393
Thomas Falcon032c5e82015-12-21 11:26:06 -06001394 tx_send_failed++;
1395 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001396 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001397 goto out;
1398 }
1399
Thomas Falcon7083a452018-03-12 21:05:26 -05001400 if (ibmvnic_xmit_workarounds(skb, netdev)) {
Thomas Falcon1f247a62018-03-12 11:51:04 -05001401 tx_dropped++;
1402 tx_send_failed++;
1403 ret = NETDEV_TX_OK;
1404 goto out;
1405 }
1406
Nathan Fontenot161b8a82017-05-03 14:05:08 -04001407 tx_pool = &adapter->tx_pool[queue_num];
1408 tx_scrq = adapter->tx_scrq[queue_num];
1409 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1410 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1411 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1412
Thomas Falcon032c5e82015-12-21 11:26:06 -06001413 index = tx_pool->free_map[tx_pool->consumer_index];
Thomas Falconfdb06102017-10-17 12:36:55 -05001414
1415 if (skb_is_gso(skb)) {
1416 offset = tx_pool->tso_index * IBMVNIC_TSO_BUF_SZ;
1417 dst = tx_pool->tso_ltb.buff + offset;
1418 memset(dst, 0, IBMVNIC_TSO_BUF_SZ);
1419 data_dma_addr = tx_pool->tso_ltb.addr + offset;
1420 tx_pool->tso_index++;
1421 if (tx_pool->tso_index == IBMVNIC_TSO_BUFS)
1422 tx_pool->tso_index = 0;
1423 } else {
Thomas Falcon8dff66c2018-03-12 11:51:03 -05001424 offset = index * (adapter->req_mtu + VLAN_HLEN);
Thomas Falconfdb06102017-10-17 12:36:55 -05001425 dst = tx_pool->long_term_buff.buff + offset;
Thomas Falcon8dff66c2018-03-12 11:51:03 -05001426 memset(dst, 0, adapter->req_mtu + VLAN_HLEN);
Thomas Falconfdb06102017-10-17 12:36:55 -05001427 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1428 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001429
Thomas Falcon15482052017-10-17 12:36:54 -05001430 if (skb_shinfo(skb)->nr_frags) {
1431 int cur, i;
1432
1433 /* Copy the head */
1434 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1435 cur = skb_headlen(skb);
1436
1437 /* Copy the frags */
1438 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1439 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1440
1441 memcpy(dst + cur,
1442 page_address(skb_frag_page(frag)) +
1443 frag->page_offset, skb_frag_size(frag));
1444 cur += skb_frag_size(frag);
1445 }
1446 } else {
1447 skb_copy_from_linear_data(skb, dst, skb->len);
1448 }
1449
Thomas Falcon032c5e82015-12-21 11:26:06 -06001450 tx_pool->consumer_index =
1451 (tx_pool->consumer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -06001452 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001453
1454 tx_buff = &tx_pool->tx_buff[index];
1455 tx_buff->skb = skb;
1456 tx_buff->data_dma[0] = data_dma_addr;
1457 tx_buff->data_len[0] = skb->len;
1458 tx_buff->index = index;
1459 tx_buff->pool_index = queue_num;
1460 tx_buff->last_frag = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001461
1462 memset(&tx_crq, 0, sizeof(tx_crq));
1463 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1464 tx_crq.v1.type = IBMVNIC_TX_DESC;
1465 tx_crq.v1.n_crq_elem = 1;
1466 tx_crq.v1.n_sge = 1;
1467 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1468 tx_crq.v1.correlator = cpu_to_be32(index);
Thomas Falconfdb06102017-10-17 12:36:55 -05001469 if (skb_is_gso(skb))
1470 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->tso_ltb.map_id);
1471 else
1472 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001473 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1474 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1475
1476 if (adapter->vlan_header_insertion) {
1477 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1478 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1479 }
1480
1481 if (skb->protocol == htons(ETH_P_IP)) {
Thomas Falcona0dca102018-01-18 19:29:48 -06001482 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1483 proto = ip_hdr(skb)->protocol;
1484 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1485 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1486 proto = ipv6_hdr(skb)->nexthdr;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001487 }
1488
Thomas Falcona0dca102018-01-18 19:29:48 -06001489 if (proto == IPPROTO_TCP)
1490 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1491 else if (proto == IPPROTO_UDP)
1492 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1493
Thomas Falconad7775d2016-04-01 17:20:34 -05001494 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001495 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -05001496 hdrs += 2;
1497 }
Thomas Falconfdb06102017-10-17 12:36:55 -05001498 if (skb_is_gso(skb)) {
1499 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1500 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1501 hdrs += 2;
1502 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001503 /* determine if l2/3/4 headers are sent to firmware */
John Allen2fa56a42018-02-09 13:19:46 -06001504 if ((*hdrs >> 7) & 1) {
Thomas Falconad7775d2016-04-01 17:20:34 -05001505 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1506 tx_crq.v1.n_crq_elem = num_entries;
Thomas Falconecba6162018-02-26 18:10:55 -06001507 tx_buff->num_entries = num_entries;
Thomas Falconad7775d2016-04-01 17:20:34 -05001508 tx_buff->indir_arr[0] = tx_crq;
1509 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1510 sizeof(tx_buff->indir_arr),
1511 DMA_TO_DEVICE);
1512 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001513 dev_kfree_skb_any(skb);
1514 tx_buff->skb = NULL;
Thomas Falconad7775d2016-04-01 17:20:34 -05001515 if (!firmware_has_feature(FW_FEATURE_CMO))
1516 dev_err(dev, "tx: unable to map descriptor array\n");
1517 tx_map_failed++;
1518 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001519 ret = NETDEV_TX_OK;
Thomas Falconad7775d2016-04-01 17:20:34 -05001520 goto out;
1521 }
John Allen498cd8e2016-04-06 11:49:55 -05001522 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -05001523 (u64)tx_buff->indir_dma,
1524 (u64)num_entries);
1525 } else {
Thomas Falconecba6162018-02-26 18:10:55 -06001526 tx_buff->num_entries = num_entries;
John Allen498cd8e2016-04-06 11:49:55 -05001527 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1528 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -05001529 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001530 if (lpar_rc != H_SUCCESS) {
1531 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1532
1533 if (tx_pool->consumer_index == 0)
1534 tx_pool->consumer_index =
Thomas Falcon068d9f92017-03-05 12:18:42 -06001535 adapter->req_tx_entries_per_subcrq - 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001536 else
1537 tx_pool->consumer_index--;
1538
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001539 dev_kfree_skb_any(skb);
1540 tx_buff->skb = NULL;
1541
Thomas Falconb8c80b82017-05-26 10:30:42 -04001542 if (lpar_rc == H_CLOSED) {
1543 /* Disable TX and report carrier off if queue is closed.
1544 * Firmware guarantees that a signal will be sent to the
1545 * driver, triggering a reset or some other action.
1546 */
1547 netif_tx_stop_all_queues(netdev);
1548 netif_carrier_off(netdev);
1549 }
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001550
Thomas Falcon032c5e82015-12-21 11:26:06 -06001551 tx_send_failed++;
1552 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001553 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001554 goto out;
1555 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001556
Thomas Falconffc385b2018-02-18 10:08:41 -06001557 if (atomic_add_return(num_entries, &tx_scrq->used)
Brian King58c8c0c2017-04-19 13:44:47 -04001558 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon0aecb132018-02-26 18:10:58 -06001559 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001560 netif_stop_subqueue(netdev, queue_num);
1561 }
1562
Thomas Falcon032c5e82015-12-21 11:26:06 -06001563 tx_packets++;
1564 tx_bytes += skb->len;
1565 txq->trans_start = jiffies;
1566 ret = NETDEV_TX_OK;
1567
1568out:
1569 netdev->stats.tx_dropped += tx_dropped;
1570 netdev->stats.tx_bytes += tx_bytes;
1571 netdev->stats.tx_packets += tx_packets;
1572 adapter->tx_send_failed += tx_send_failed;
1573 adapter->tx_map_failed += tx_map_failed;
John Allen3d52b592017-08-02 16:44:14 -05001574 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1575 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1576 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001577
1578 return ret;
1579}
1580
1581static void ibmvnic_set_multi(struct net_device *netdev)
1582{
1583 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1584 struct netdev_hw_addr *ha;
1585 union ibmvnic_crq crq;
1586
1587 memset(&crq, 0, sizeof(crq));
1588 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1589 crq.request_capability.cmd = REQUEST_CAPABILITY;
1590
1591 if (netdev->flags & IFF_PROMISC) {
1592 if (!adapter->promisc_supported)
1593 return;
1594 } else {
1595 if (netdev->flags & IFF_ALLMULTI) {
1596 /* Accept all multicast */
1597 memset(&crq, 0, sizeof(crq));
1598 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1599 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1600 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1601 ibmvnic_send_crq(adapter, &crq);
1602 } else if (netdev_mc_empty(netdev)) {
1603 /* Reject all multicast */
1604 memset(&crq, 0, sizeof(crq));
1605 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1606 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1607 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1608 ibmvnic_send_crq(adapter, &crq);
1609 } else {
1610 /* Accept one or more multicast(s) */
1611 netdev_for_each_mc_addr(ha, netdev) {
1612 memset(&crq, 0, sizeof(crq));
1613 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1614 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1615 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1616 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1617 ha->addr);
1618 ibmvnic_send_crq(adapter, &crq);
1619 }
1620 }
1621 }
1622}
1623
John Allenc26eba02017-10-26 16:23:25 -05001624static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001625{
1626 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1627 struct sockaddr *addr = p;
1628 union ibmvnic_crq crq;
1629
1630 if (!is_valid_ether_addr(addr->sa_data))
1631 return -EADDRNOTAVAIL;
1632
1633 memset(&crq, 0, sizeof(crq));
1634 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1635 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1636 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
Thomas Falconf8136142018-01-29 13:45:05 -06001637
1638 init_completion(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001639 ibmvnic_send_crq(adapter, &crq);
Thomas Falconf8136142018-01-29 13:45:05 -06001640 wait_for_completion(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001641 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
Thomas Falconf8136142018-01-29 13:45:05 -06001642 return adapter->fw_done_rc ? -EIO : 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001643}
1644
John Allenc26eba02017-10-26 16:23:25 -05001645static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1646{
1647 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1648 struct sockaddr *addr = p;
Thomas Falconf8136142018-01-29 13:45:05 -06001649 int rc;
John Allenc26eba02017-10-26 16:23:25 -05001650
Thomas Falcon3d166132018-01-10 19:39:52 -06001651 if (adapter->state == VNIC_PROBED) {
John Allenc26eba02017-10-26 16:23:25 -05001652 memcpy(&adapter->desired.mac, addr, sizeof(struct sockaddr));
1653 adapter->mac_change_pending = true;
1654 return 0;
1655 }
1656
Thomas Falconf8136142018-01-29 13:45:05 -06001657 rc = __ibmvnic_set_mac(netdev, addr);
John Allenc26eba02017-10-26 16:23:25 -05001658
Thomas Falconf8136142018-01-29 13:45:05 -06001659 return rc;
John Allenc26eba02017-10-26 16:23:25 -05001660}
1661
Nathan Fontenoted651a12017-05-03 14:04:38 -04001662/**
1663 * do_reset returns zero if we are able to keep processing reset events, or
1664 * non-zero if we hit a fatal error and must halt.
1665 */
1666static int do_reset(struct ibmvnic_adapter *adapter,
1667 struct ibmvnic_rwi *rwi, u32 reset_state)
1668{
John Allen896d8692018-01-18 16:26:31 -06001669 u64 old_num_rx_queues, old_num_tx_queues;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001670 struct net_device *netdev = adapter->netdev;
1671 int i, rc;
1672
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001673 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1674 rwi->reset_reason);
1675
Nathan Fontenoted651a12017-05-03 14:04:38 -04001676 netif_carrier_off(netdev);
1677 adapter->reset_reason = rwi->reset_reason;
1678
John Allen896d8692018-01-18 16:26:31 -06001679 old_num_rx_queues = adapter->req_rx_queues;
1680 old_num_tx_queues = adapter->req_tx_queues;
1681
Nathan Fontenoted651a12017-05-03 14:04:38 -04001682 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1683 rc = ibmvnic_reenable_crq_queue(adapter);
1684 if (rc)
1685 return 0;
Thomas Falcon18b8d6b2018-03-07 17:51:47 -06001686 ibmvnic_cleanup(netdev);
1687 } else if (rwi->reset_reason == VNIC_RESET_FAILOVER) {
1688 ibmvnic_cleanup(netdev);
1689 } else {
1690 rc = __ibmvnic_close(netdev);
1691 if (rc)
1692 return rc;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001693 }
1694
John Allenc26eba02017-10-26 16:23:25 -05001695 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1696 adapter->wait_for_reset) {
1697 release_resources(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06001698 release_sub_crqs(adapter, 1);
John Allenc26eba02017-10-26 16:23:25 -05001699 release_crq_queue(adapter);
1700 }
1701
John Allen8cb31cf2017-05-26 10:30:37 -04001702 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1703 /* remove the closed state so when we call open it appears
1704 * we are coming from the probed state.
1705 */
Nathan Fontenoted651a12017-05-03 14:04:38 -04001706 adapter->state = VNIC_PROBED;
John Allen8cb31cf2017-05-26 10:30:37 -04001707
John Allen8cb31cf2017-05-26 10:30:37 -04001708 rc = ibmvnic_init(adapter);
1709 if (rc)
John Allen2a1bf512017-10-26 16:24:15 -05001710 return IBMVNIC_INIT_FAILED;
John Allen8cb31cf2017-05-26 10:30:37 -04001711
1712 /* If the adapter was in PROBE state prior to the reset,
1713 * exit here.
1714 */
1715 if (reset_state == VNIC_PROBED)
1716 return 0;
1717
1718 rc = ibmvnic_login(netdev);
1719 if (rc) {
1720 adapter->state = VNIC_PROBED;
1721 return 0;
1722 }
1723
John Allenc26eba02017-10-26 16:23:25 -05001724 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
1725 adapter->wait_for_reset) {
1726 rc = init_resources(adapter);
1727 if (rc)
1728 return rc;
John Allen896d8692018-01-18 16:26:31 -06001729 } else if (adapter->req_rx_queues != old_num_rx_queues ||
1730 adapter->req_tx_queues != old_num_tx_queues) {
Thomas Falconfaefaa92018-02-09 11:41:09 -06001731 adapter->map_id = 1;
John Allen896d8692018-01-18 16:26:31 -06001732 release_rx_pools(adapter);
1733 release_tx_pools(adapter);
1734 init_rx_pools(netdev);
1735 init_tx_pools(netdev);
Nathan Fontenotd9043c12018-02-19 13:30:14 -06001736
Nathan Fontenot86f669b2018-02-19 13:30:39 -06001737 release_napi(adapter);
1738 init_napi(adapter);
John Allenc26eba02017-10-26 16:23:25 -05001739 } else {
1740 rc = reset_tx_pools(adapter);
1741 if (rc)
1742 return rc;
Nathan Fontenot8c0543a2017-05-26 10:31:06 -04001743
John Allenc26eba02017-10-26 16:23:25 -05001744 rc = reset_rx_pools(adapter);
1745 if (rc)
1746 return rc;
John Allenc26eba02017-10-26 16:23:25 -05001747 }
Nathan Fontenoted651a12017-05-03 14:04:38 -04001748 }
1749
John Allene676d812018-03-14 10:41:29 -05001750 adapter->state = VNIC_CLOSED;
1751
1752 if (reset_state == VNIC_CLOSED)
1753 return 0;
1754
Nathan Fontenoted651a12017-05-03 14:04:38 -04001755 rc = __ibmvnic_open(netdev);
1756 if (rc) {
1757 if (list_empty(&adapter->rwi_list))
1758 adapter->state = VNIC_CLOSED;
1759 else
1760 adapter->state = reset_state;
1761
1762 return 0;
1763 }
1764
Nathan Fontenoted651a12017-05-03 14:04:38 -04001765 /* kick napi */
1766 for (i = 0; i < adapter->req_rx_queues; i++)
1767 napi_schedule(&adapter->napi[i]);
1768
Nathan Fontenot61d3e1d2017-06-12 20:47:45 -04001769 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
1770 netdev_notify_peers(netdev);
1771
Thomas Falconcc85c022018-02-13 15:32:50 -06001772 netif_carrier_on(netdev);
1773
Nathan Fontenoted651a12017-05-03 14:04:38 -04001774 return 0;
1775}
1776
1777static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1778{
1779 struct ibmvnic_rwi *rwi;
1780
1781 mutex_lock(&adapter->rwi_lock);
1782
1783 if (!list_empty(&adapter->rwi_list)) {
1784 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1785 list);
1786 list_del(&rwi->list);
1787 } else {
1788 rwi = NULL;
1789 }
1790
1791 mutex_unlock(&adapter->rwi_lock);
1792 return rwi;
1793}
1794
1795static void free_all_rwi(struct ibmvnic_adapter *adapter)
1796{
1797 struct ibmvnic_rwi *rwi;
1798
1799 rwi = get_next_rwi(adapter);
1800 while (rwi) {
1801 kfree(rwi);
1802 rwi = get_next_rwi(adapter);
1803 }
1804}
1805
1806static void __ibmvnic_reset(struct work_struct *work)
1807{
1808 struct ibmvnic_rwi *rwi;
1809 struct ibmvnic_adapter *adapter;
1810 struct net_device *netdev;
1811 u32 reset_state;
John Allenc26eba02017-10-26 16:23:25 -05001812 int rc = 0;
Nathan Fontenoted651a12017-05-03 14:04:38 -04001813
1814 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1815 netdev = adapter->netdev;
1816
1817 mutex_lock(&adapter->reset_lock);
1818 adapter->resetting = true;
1819 reset_state = adapter->state;
1820
1821 rwi = get_next_rwi(adapter);
1822 while (rwi) {
1823 rc = do_reset(adapter, rwi, reset_state);
1824 kfree(rwi);
John Allen2a1bf512017-10-26 16:24:15 -05001825 if (rc && rc != IBMVNIC_INIT_FAILED)
Nathan Fontenoted651a12017-05-03 14:04:38 -04001826 break;
1827
1828 rwi = get_next_rwi(adapter);
1829 }
1830
John Allenc26eba02017-10-26 16:23:25 -05001831 if (adapter->wait_for_reset) {
1832 adapter->wait_for_reset = false;
1833 adapter->reset_done_rc = rc;
1834 complete(&adapter->reset_done);
1835 }
1836
Nathan Fontenoted651a12017-05-03 14:04:38 -04001837 if (rc) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001838 netdev_dbg(adapter->netdev, "Reset failed\n");
Nathan Fontenoted651a12017-05-03 14:04:38 -04001839 free_all_rwi(adapter);
Wei Yongjun6d0af072017-05-18 15:24:52 +00001840 mutex_unlock(&adapter->reset_lock);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001841 return;
1842 }
1843
1844 adapter->resetting = false;
1845 mutex_unlock(&adapter->reset_lock);
1846}
1847
1848static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
1849 enum ibmvnic_reset_reason reason)
1850{
1851 struct ibmvnic_rwi *rwi, *tmp;
1852 struct net_device *netdev = adapter->netdev;
1853 struct list_head *entry;
1854
1855 if (adapter->state == VNIC_REMOVING ||
1856 adapter->state == VNIC_REMOVED) {
1857 netdev_dbg(netdev, "Adapter removing, skipping reset\n");
1858 return;
1859 }
1860
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04001861 if (adapter->state == VNIC_PROBING) {
1862 netdev_warn(netdev, "Adapter reset during probe\n");
1863 adapter->init_done_rc = EAGAIN;
1864 return;
1865 }
1866
Nathan Fontenoted651a12017-05-03 14:04:38 -04001867 mutex_lock(&adapter->rwi_lock);
1868
1869 list_for_each(entry, &adapter->rwi_list) {
1870 tmp = list_entry(entry, struct ibmvnic_rwi, list);
1871 if (tmp->reset_reason == reason) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001872 netdev_dbg(netdev, "Skipping matching reset\n");
Nathan Fontenoted651a12017-05-03 14:04:38 -04001873 mutex_unlock(&adapter->rwi_lock);
1874 return;
1875 }
1876 }
1877
1878 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
1879 if (!rwi) {
1880 mutex_unlock(&adapter->rwi_lock);
1881 ibmvnic_close(netdev);
1882 return;
1883 }
1884
1885 rwi->reset_reason = reason;
1886 list_add_tail(&rwi->list, &adapter->rwi_list);
1887 mutex_unlock(&adapter->rwi_lock);
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05001888
1889 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001890 schedule_work(&adapter->ibmvnic_reset);
1891}
1892
Thomas Falcon032c5e82015-12-21 11:26:06 -06001893static void ibmvnic_tx_timeout(struct net_device *dev)
1894{
1895 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001896
Nathan Fontenoted651a12017-05-03 14:04:38 -04001897 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001898}
1899
1900static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1901 struct ibmvnic_rx_buff *rx_buff)
1902{
1903 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1904
1905 rx_buff->skb = NULL;
1906
1907 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1908 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1909
1910 atomic_dec(&pool->available);
1911}
1912
1913static int ibmvnic_poll(struct napi_struct *napi, int budget)
1914{
1915 struct net_device *netdev = napi->dev;
1916 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1917 int scrq_num = (int)(napi - adapter->napi);
1918 int frames_processed = 0;
Nathan Fontenot152ce472017-05-26 10:30:54 -04001919
Thomas Falcon032c5e82015-12-21 11:26:06 -06001920restart_poll:
1921 while (frames_processed < budget) {
1922 struct sk_buff *skb;
1923 struct ibmvnic_rx_buff *rx_buff;
1924 union sub_crq *next;
1925 u32 length;
1926 u16 offset;
1927 u8 flags = 0;
1928
John Allen34686562018-02-06 16:21:49 -06001929 if (unlikely(adapter->resetting &&
1930 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
Thomas Falcon21ecba62017-06-14 23:50:09 -05001931 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1932 napi_complete_done(napi, frames_processed);
1933 return frames_processed;
1934 }
1935
Thomas Falcon032c5e82015-12-21 11:26:06 -06001936 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1937 break;
1938 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1939 rx_buff =
1940 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1941 rx_comp.correlator);
1942 /* do error checking */
1943 if (next->rx_comp.rc) {
John Allene1cea2e2017-08-07 15:42:30 -05001944 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
1945 be16_to_cpu(next->rx_comp.rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06001946 /* free the entry */
1947 next->rx_comp.first = 0;
Thomas Falcon4b9b0f02018-02-13 18:23:42 -06001948 dev_kfree_skb_any(rx_buff->skb);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001949 remove_buff_from_pool(adapter, rx_buff);
Nathan Fontenotca05e312017-05-03 14:05:14 -04001950 continue;
Thomas Falconabe27a82018-02-19 20:12:57 -06001951 } else if (!rx_buff->skb) {
1952 /* free the entry */
1953 next->rx_comp.first = 0;
1954 remove_buff_from_pool(adapter, rx_buff);
1955 continue;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001956 }
1957
1958 length = be32_to_cpu(next->rx_comp.len);
1959 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1960 flags = next->rx_comp.flags;
1961 skb = rx_buff->skb;
1962 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1963 length);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04001964
1965 /* VLAN Header has been stripped by the system firmware and
1966 * needs to be inserted by the driver
1967 */
1968 if (adapter->rx_vlan_header_insertion &&
1969 (flags & IBMVNIC_VLAN_STRIPPED))
1970 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1971 ntohs(next->rx_comp.vlan_tci));
1972
Thomas Falcon032c5e82015-12-21 11:26:06 -06001973 /* free the entry */
1974 next->rx_comp.first = 0;
1975 remove_buff_from_pool(adapter, rx_buff);
1976
1977 skb_put(skb, length);
1978 skb->protocol = eth_type_trans(skb, netdev);
Thomas Falcon94ca3052017-05-03 14:05:20 -04001979 skb_record_rx_queue(skb, scrq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001980
1981 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1982 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1983 skb->ip_summed = CHECKSUM_UNNECESSARY;
1984 }
1985
1986 length = skb->len;
1987 napi_gro_receive(napi, skb); /* send it up */
1988 netdev->stats.rx_packets++;
1989 netdev->stats.rx_bytes += length;
John Allen3d52b592017-08-02 16:44:14 -05001990 adapter->rx_stats_buffers[scrq_num].packets++;
1991 adapter->rx_stats_buffers[scrq_num].bytes += length;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001992 frames_processed++;
1993 }
Nathan Fontenot152ce472017-05-26 10:30:54 -04001994
1995 if (adapter->state != VNIC_CLOSING)
1996 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001997
1998 if (frames_processed < budget) {
1999 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08002000 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002001 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2002 napi_reschedule(napi)) {
2003 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2004 goto restart_poll;
2005 }
2006 }
2007 return frames_processed;
2008}
2009
2010#ifdef CONFIG_NET_POLL_CONTROLLER
2011static void ibmvnic_netpoll_controller(struct net_device *dev)
2012{
2013 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2014 int i;
2015
2016 replenish_pools(netdev_priv(dev));
2017 for (i = 0; i < adapter->req_rx_queues; i++)
2018 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
2019 adapter->rx_scrq[i]);
2020}
2021#endif
2022
John Allenc26eba02017-10-26 16:23:25 -05002023static int wait_for_reset(struct ibmvnic_adapter *adapter)
2024{
2025 adapter->fallback.mtu = adapter->req_mtu;
2026 adapter->fallback.rx_queues = adapter->req_rx_queues;
2027 adapter->fallback.tx_queues = adapter->req_tx_queues;
2028 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2029 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2030
2031 init_completion(&adapter->reset_done);
2032 ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2033 adapter->wait_for_reset = true;
2034 wait_for_completion(&adapter->reset_done);
2035
2036 if (adapter->reset_done_rc) {
2037 adapter->desired.mtu = adapter->fallback.mtu;
2038 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2039 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2040 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2041 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2042
2043 init_completion(&adapter->reset_done);
2044 ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2045 wait_for_completion(&adapter->reset_done);
2046 }
2047 adapter->wait_for_reset = false;
2048
2049 return adapter->reset_done_rc;
2050}
2051
John Allen3a807b72017-06-06 16:55:52 -05002052static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2053{
John Allenc26eba02017-10-26 16:23:25 -05002054 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2055
2056 adapter->desired.mtu = new_mtu + ETH_HLEN;
2057
2058 return wait_for_reset(adapter);
John Allen3a807b72017-06-06 16:55:52 -05002059}
2060
Thomas Falconf10b09e2018-03-12 11:51:05 -05002061static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2062 struct net_device *dev,
2063 netdev_features_t features)
2064{
2065 /* Some backing hardware adapters can not
2066 * handle packets with a MSS less than 224
2067 * or with only one segment.
2068 */
2069 if (skb_is_gso(skb)) {
2070 if (skb_shinfo(skb)->gso_size < 224 ||
2071 skb_shinfo(skb)->gso_segs == 1)
2072 features &= ~NETIF_F_GSO_MASK;
2073 }
2074
2075 return features;
2076}
2077
Thomas Falcon032c5e82015-12-21 11:26:06 -06002078static const struct net_device_ops ibmvnic_netdev_ops = {
2079 .ndo_open = ibmvnic_open,
2080 .ndo_stop = ibmvnic_close,
2081 .ndo_start_xmit = ibmvnic_xmit,
2082 .ndo_set_rx_mode = ibmvnic_set_multi,
2083 .ndo_set_mac_address = ibmvnic_set_mac,
2084 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002085 .ndo_tx_timeout = ibmvnic_tx_timeout,
2086#ifdef CONFIG_NET_POLL_CONTROLLER
2087 .ndo_poll_controller = ibmvnic_netpoll_controller,
2088#endif
John Allen3a807b72017-06-06 16:55:52 -05002089 .ndo_change_mtu = ibmvnic_change_mtu,
Thomas Falconf10b09e2018-03-12 11:51:05 -05002090 .ndo_features_check = ibmvnic_features_check,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002091};
2092
2093/* ethtool functions */
2094
Philippe Reynes8a433792017-01-07 22:37:29 +01002095static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2096 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002097{
Philippe Reynes8a433792017-01-07 22:37:29 +01002098 u32 supported, advertising;
2099
2100 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06002101 SUPPORTED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01002102 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06002103 ADVERTISED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01002104 cmd->base.speed = SPEED_1000;
2105 cmd->base.duplex = DUPLEX_FULL;
2106 cmd->base.port = PORT_FIBRE;
2107 cmd->base.phy_address = 0;
2108 cmd->base.autoneg = AUTONEG_ENABLE;
2109
2110 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
2111 supported);
2112 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
2113 advertising);
2114
Thomas Falcon032c5e82015-12-21 11:26:06 -06002115 return 0;
2116}
2117
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002118static void ibmvnic_get_drvinfo(struct net_device *netdev,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002119 struct ethtool_drvinfo *info)
2120{
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002121 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2122
Thomas Falcon032c5e82015-12-21 11:26:06 -06002123 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2124 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02002125 strlcpy(info->fw_version, adapter->fw_version,
2126 sizeof(info->fw_version));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002127}
2128
2129static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2130{
2131 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2132
2133 return adapter->msg_enable;
2134}
2135
2136static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2137{
2138 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2139
2140 adapter->msg_enable = data;
2141}
2142
2143static u32 ibmvnic_get_link(struct net_device *netdev)
2144{
2145 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2146
2147 /* Don't need to send a query because we request a logical link up at
2148 * init and then we wait for link state indications
2149 */
2150 return adapter->logical_link_state;
2151}
2152
2153static void ibmvnic_get_ringparam(struct net_device *netdev,
2154 struct ethtool_ringparam *ring)
2155{
John Allenbc131b32017-08-02 16:46:30 -05002156 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2157
2158 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2159 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002160 ring->rx_mini_max_pending = 0;
2161 ring->rx_jumbo_max_pending = 0;
John Allenbc131b32017-08-02 16:46:30 -05002162 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2163 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002164 ring->rx_mini_pending = 0;
2165 ring->rx_jumbo_pending = 0;
2166}
2167
John Allenc26eba02017-10-26 16:23:25 -05002168static int ibmvnic_set_ringparam(struct net_device *netdev,
2169 struct ethtool_ringparam *ring)
2170{
2171 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2172
2173 if (ring->rx_pending > adapter->max_rx_add_entries_per_subcrq ||
2174 ring->tx_pending > adapter->max_tx_entries_per_subcrq) {
2175 netdev_err(netdev, "Invalid request.\n");
2176 netdev_err(netdev, "Max tx buffers = %llu\n",
2177 adapter->max_rx_add_entries_per_subcrq);
2178 netdev_err(netdev, "Max rx buffers = %llu\n",
2179 adapter->max_tx_entries_per_subcrq);
2180 return -EINVAL;
2181 }
2182
2183 adapter->desired.rx_entries = ring->rx_pending;
2184 adapter->desired.tx_entries = ring->tx_pending;
2185
2186 return wait_for_reset(adapter);
2187}
2188
John Allenc2dbeb62017-08-02 16:47:17 -05002189static void ibmvnic_get_channels(struct net_device *netdev,
2190 struct ethtool_channels *channels)
2191{
2192 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2193
2194 channels->max_rx = adapter->max_rx_queues;
2195 channels->max_tx = adapter->max_tx_queues;
2196 channels->max_other = 0;
2197 channels->max_combined = 0;
2198 channels->rx_count = adapter->req_rx_queues;
2199 channels->tx_count = adapter->req_tx_queues;
2200 channels->other_count = 0;
2201 channels->combined_count = 0;
2202}
2203
John Allenc26eba02017-10-26 16:23:25 -05002204static int ibmvnic_set_channels(struct net_device *netdev,
2205 struct ethtool_channels *channels)
2206{
2207 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2208
2209 adapter->desired.rx_queues = channels->rx_count;
2210 adapter->desired.tx_queues = channels->tx_count;
2211
2212 return wait_for_reset(adapter);
2213}
2214
Thomas Falcon032c5e82015-12-21 11:26:06 -06002215static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2216{
John Allen3d52b592017-08-02 16:44:14 -05002217 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002218 int i;
2219
2220 if (stringset != ETH_SS_STATS)
2221 return;
2222
2223 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
2224 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
John Allen3d52b592017-08-02 16:44:14 -05002225
2226 for (i = 0; i < adapter->req_tx_queues; i++) {
2227 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2228 data += ETH_GSTRING_LEN;
2229
2230 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2231 data += ETH_GSTRING_LEN;
2232
2233 snprintf(data, ETH_GSTRING_LEN, "tx%d_dropped_packets", i);
2234 data += ETH_GSTRING_LEN;
2235 }
2236
2237 for (i = 0; i < adapter->req_rx_queues; i++) {
2238 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2239 data += ETH_GSTRING_LEN;
2240
2241 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2242 data += ETH_GSTRING_LEN;
2243
2244 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2245 data += ETH_GSTRING_LEN;
2246 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002247}
2248
2249static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2250{
John Allen3d52b592017-08-02 16:44:14 -05002251 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2252
Thomas Falcon032c5e82015-12-21 11:26:06 -06002253 switch (sset) {
2254 case ETH_SS_STATS:
John Allen3d52b592017-08-02 16:44:14 -05002255 return ARRAY_SIZE(ibmvnic_stats) +
2256 adapter->req_tx_queues * NUM_TX_STATS +
2257 adapter->req_rx_queues * NUM_RX_STATS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002258 default:
2259 return -EOPNOTSUPP;
2260 }
2261}
2262
2263static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2264 struct ethtool_stats *stats, u64 *data)
2265{
2266 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2267 union ibmvnic_crq crq;
John Allen3d52b592017-08-02 16:44:14 -05002268 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002269
2270 memset(&crq, 0, sizeof(crq));
2271 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2272 crq.request_statistics.cmd = REQUEST_STATISTICS;
2273 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2274 crq.request_statistics.len =
2275 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002276
2277 /* Wait for data to be written */
2278 init_completion(&adapter->stats_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05002279 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002280 wait_for_completion(&adapter->stats_done);
2281
2282 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
John Allen52da5c12017-08-02 16:45:28 -05002283 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2284 ibmvnic_stats[i].offset));
John Allen3d52b592017-08-02 16:44:14 -05002285
2286 for (j = 0; j < adapter->req_tx_queues; j++) {
2287 data[i] = adapter->tx_stats_buffers[j].packets;
2288 i++;
2289 data[i] = adapter->tx_stats_buffers[j].bytes;
2290 i++;
2291 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2292 i++;
2293 }
2294
2295 for (j = 0; j < adapter->req_rx_queues; j++) {
2296 data[i] = adapter->rx_stats_buffers[j].packets;
2297 i++;
2298 data[i] = adapter->rx_stats_buffers[j].bytes;
2299 i++;
2300 data[i] = adapter->rx_stats_buffers[j].interrupts;
2301 i++;
2302 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002303}
2304
2305static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002306 .get_drvinfo = ibmvnic_get_drvinfo,
2307 .get_msglevel = ibmvnic_get_msglevel,
2308 .set_msglevel = ibmvnic_set_msglevel,
2309 .get_link = ibmvnic_get_link,
2310 .get_ringparam = ibmvnic_get_ringparam,
John Allenc26eba02017-10-26 16:23:25 -05002311 .set_ringparam = ibmvnic_set_ringparam,
John Allenc2dbeb62017-08-02 16:47:17 -05002312 .get_channels = ibmvnic_get_channels,
John Allenc26eba02017-10-26 16:23:25 -05002313 .set_channels = ibmvnic_set_channels,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002314 .get_strings = ibmvnic_get_strings,
2315 .get_sset_count = ibmvnic_get_sset_count,
2316 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01002317 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002318};
2319
2320/* Routines for managing CRQs/sCRQs */
2321
Nathan Fontenot57a49432017-05-26 10:31:12 -04002322static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2323 struct ibmvnic_sub_crq_queue *scrq)
2324{
2325 int rc;
2326
2327 if (scrq->irq) {
2328 free_irq(scrq->irq, scrq);
2329 irq_dispose_mapping(scrq->irq);
2330 scrq->irq = 0;
2331 }
2332
Thomas Falconc8b2ad02017-06-14 23:50:07 -05002333 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002334 scrq->cur = 0;
2335
2336 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2337 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2338 return rc;
2339}
2340
2341static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2342{
2343 int i, rc;
2344
2345 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002346 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002347 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2348 if (rc)
2349 return rc;
2350 }
2351
2352 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002353 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
Nathan Fontenot57a49432017-05-26 10:31:12 -04002354 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2355 if (rc)
2356 return rc;
2357 }
2358
Nathan Fontenot57a49432017-05-26 10:31:12 -04002359 return rc;
2360}
2361
Thomas Falcon032c5e82015-12-21 11:26:06 -06002362static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002363 struct ibmvnic_sub_crq_queue *scrq,
2364 bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002365{
2366 struct device *dev = &adapter->vdev->dev;
2367 long rc;
2368
2369 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2370
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002371 if (do_h_free) {
2372 /* Close the sub-crqs */
2373 do {
2374 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2375 adapter->vdev->unit_address,
2376 scrq->crq_num);
2377 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002378
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002379 if (rc) {
2380 netdev_err(adapter->netdev,
2381 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2382 scrq->crq_num, rc);
2383 }
Thomas Falconffa73852017-04-19 13:44:29 -04002384 }
2385
Thomas Falcon032c5e82015-12-21 11:26:06 -06002386 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2387 DMA_BIDIRECTIONAL);
2388 free_pages((unsigned long)scrq->msgs, 2);
2389 kfree(scrq);
2390}
2391
2392static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2393 *adapter)
2394{
2395 struct device *dev = &adapter->vdev->dev;
2396 struct ibmvnic_sub_crq_queue *scrq;
2397 int rc;
2398
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002399 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002400 if (!scrq)
2401 return NULL;
2402
Nathan Fontenot7f7adc52017-04-19 13:45:16 -04002403 scrq->msgs =
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002404 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002405 if (!scrq->msgs) {
2406 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2407 goto zero_page_failed;
2408 }
2409
2410 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2411 DMA_BIDIRECTIONAL);
2412 if (dma_mapping_error(dev, scrq->msg_token)) {
2413 dev_warn(dev, "Couldn't map crq queue messages page\n");
2414 goto map_failed;
2415 }
2416
2417 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2418 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2419
2420 if (rc == H_RESOURCE)
2421 rc = ibmvnic_reset_crq(adapter);
2422
2423 if (rc == H_CLOSED) {
2424 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2425 } else if (rc) {
2426 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2427 goto reg_failed;
2428 }
2429
Thomas Falcon032c5e82015-12-21 11:26:06 -06002430 scrq->adapter = adapter;
2431 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002432 spin_lock_init(&scrq->lock);
2433
2434 netdev_dbg(adapter->netdev,
2435 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2436 scrq->crq_num, scrq->hw_irq, scrq->irq);
2437
2438 return scrq;
2439
Thomas Falcon032c5e82015-12-21 11:26:06 -06002440reg_failed:
2441 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2442 DMA_BIDIRECTIONAL);
2443map_failed:
2444 free_pages((unsigned long)scrq->msgs, 2);
2445zero_page_failed:
2446 kfree(scrq);
2447
2448 return NULL;
2449}
2450
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002451static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002452{
2453 int i;
2454
2455 if (adapter->tx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002456 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04002457 if (!adapter->tx_scrq[i])
2458 continue;
2459
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002460 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2461 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002462 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002463 free_irq(adapter->tx_scrq[i]->irq,
2464 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05002465 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002466 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002467 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04002468
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002469 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2470 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002471 }
2472
Nathan Fontenot9501df32017-03-15 23:38:07 -04002473 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002474 adapter->tx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002475 adapter->num_active_tx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002476 }
2477
2478 if (adapter->rx_scrq) {
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002479 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04002480 if (!adapter->rx_scrq[i])
2481 continue;
2482
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002483 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2484 i);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002485 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002486 free_irq(adapter->rx_scrq[i]->irq,
2487 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05002488 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002489 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002490 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04002491
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002492 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2493 do_h_free);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002494 }
2495
Nathan Fontenot9501df32017-03-15 23:38:07 -04002496 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002497 adapter->rx_scrq = NULL;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002498 adapter->num_active_rx_scrqs = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002499 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002500}
2501
2502static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2503 struct ibmvnic_sub_crq_queue *scrq)
2504{
2505 struct device *dev = &adapter->vdev->dev;
2506 unsigned long rc;
2507
2508 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2509 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2510 if (rc)
2511 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2512 scrq->hw_irq, rc);
2513 return rc;
2514}
2515
2516static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2517 struct ibmvnic_sub_crq_queue *scrq)
2518{
2519 struct device *dev = &adapter->vdev->dev;
2520 unsigned long rc;
2521
2522 if (scrq->hw_irq > 0x100000000ULL) {
2523 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2524 return 1;
2525 }
2526
2527 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2528 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2529 if (rc)
2530 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2531 scrq->hw_irq, rc);
2532 return rc;
2533}
2534
2535static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2536 struct ibmvnic_sub_crq_queue *scrq)
2537{
2538 struct device *dev = &adapter->vdev->dev;
2539 struct ibmvnic_tx_buff *txbuff;
2540 union sub_crq *next;
2541 int index;
2542 int i, j;
Thomas Falconad7775d2016-04-01 17:20:34 -05002543 u8 first;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002544
2545restart_loop:
2546 while (pending_scrq(adapter, scrq)) {
2547 unsigned int pool = scrq->pool_index;
Thomas Falconffc385b2018-02-18 10:08:41 -06002548 int num_entries = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002549
2550 next = ibmvnic_next_scrq(adapter, scrq);
2551 for (i = 0; i < next->tx_comp.num_comps; i++) {
2552 if (next->tx_comp.rcs[i]) {
2553 dev_err(dev, "tx error %x\n",
2554 next->tx_comp.rcs[i]);
2555 continue;
2556 }
2557 index = be32_to_cpu(next->tx_comp.correlators[i]);
2558 txbuff = &adapter->tx_pool[pool].tx_buff[index];
2559
2560 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2561 if (!txbuff->data_dma[j])
2562 continue;
2563
2564 txbuff->data_dma[j] = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002565 }
Thomas Falconad7775d2016-04-01 17:20:34 -05002566 /* if sub_crq was sent indirectly */
2567 first = txbuff->indir_arr[0].generic.first;
2568 if (first == IBMVNIC_CRQ_CMD) {
2569 dma_unmap_single(dev, txbuff->indir_dma,
2570 sizeof(txbuff->indir_arr),
2571 DMA_TO_DEVICE);
2572 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002573
Thomas Falcon142c0ac2017-03-05 12:18:41 -06002574 if (txbuff->last_frag) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002575 dev_kfree_skb_any(txbuff->skb);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04002576 txbuff->skb = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06002577 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002578
Thomas Falconffc385b2018-02-18 10:08:41 -06002579 num_entries += txbuff->num_entries;
2580
Thomas Falcon032c5e82015-12-21 11:26:06 -06002581 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
2582 producer_index] = index;
2583 adapter->tx_pool[pool].producer_index =
2584 (adapter->tx_pool[pool].producer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -06002585 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002586 }
2587 /* remove tx_comp scrq*/
2588 next->tx_comp.first = 0;
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04002589
Thomas Falconffc385b2018-02-18 10:08:41 -06002590 if (atomic_sub_return(num_entries, &scrq->used) <=
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04002591 (adapter->req_tx_entries_per_subcrq / 2) &&
2592 __netif_subqueue_stopped(adapter->netdev,
2593 scrq->pool_index)) {
2594 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
Thomas Falcon0aecb132018-02-26 18:10:58 -06002595 netdev_dbg(adapter->netdev, "Started queue %d\n",
2596 scrq->pool_index);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04002597 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002598 }
2599
2600 enable_scrq_irq(adapter, scrq);
2601
2602 if (pending_scrq(adapter, scrq)) {
2603 disable_scrq_irq(adapter, scrq);
2604 goto restart_loop;
2605 }
2606
2607 return 0;
2608}
2609
2610static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2611{
2612 struct ibmvnic_sub_crq_queue *scrq = instance;
2613 struct ibmvnic_adapter *adapter = scrq->adapter;
2614
2615 disable_scrq_irq(adapter, scrq);
2616 ibmvnic_complete_tx(adapter, scrq);
2617
2618 return IRQ_HANDLED;
2619}
2620
2621static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2622{
2623 struct ibmvnic_sub_crq_queue *scrq = instance;
2624 struct ibmvnic_adapter *adapter = scrq->adapter;
2625
Nathan Fontenot09fb35e2018-01-10 10:40:09 -06002626 /* When booting a kdump kernel we can hit pending interrupts
2627 * prior to completing driver initialization.
2628 */
2629 if (unlikely(adapter->state != VNIC_OPEN))
2630 return IRQ_NONE;
2631
John Allen3d52b592017-08-02 16:44:14 -05002632 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2633
Thomas Falcon032c5e82015-12-21 11:26:06 -06002634 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
2635 disable_scrq_irq(adapter, scrq);
2636 __napi_schedule(&adapter->napi[scrq->scrq_num]);
2637 }
2638
2639 return IRQ_HANDLED;
2640}
2641
Thomas Falconea22d512016-07-06 15:35:17 -05002642static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
2643{
2644 struct device *dev = &adapter->vdev->dev;
2645 struct ibmvnic_sub_crq_queue *scrq;
2646 int i = 0, j = 0;
2647 int rc = 0;
2648
2649 for (i = 0; i < adapter->req_tx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002650 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
2651 i);
Thomas Falconea22d512016-07-06 15:35:17 -05002652 scrq = adapter->tx_scrq[i];
2653 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
2654
Michael Ellerman99c17902016-09-10 19:59:05 +10002655 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05002656 rc = -EINVAL;
2657 dev_err(dev, "Error mapping irq\n");
2658 goto req_tx_irq_failed;
2659 }
2660
2661 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
2662 0, "ibmvnic_tx", scrq);
2663
2664 if (rc) {
2665 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
2666 scrq->irq, rc);
2667 irq_dispose_mapping(scrq->irq);
Nathan Fontenotaf9090c2018-02-20 11:04:18 -06002668 goto req_tx_irq_failed;
Thomas Falconea22d512016-07-06 15:35:17 -05002669 }
2670 }
2671
2672 for (i = 0; i < adapter->req_rx_queues; i++) {
Nathan Fontenotd1cf33d2017-08-08 15:24:05 -05002673 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
2674 i);
Thomas Falconea22d512016-07-06 15:35:17 -05002675 scrq = adapter->rx_scrq[i];
2676 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10002677 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05002678 rc = -EINVAL;
2679 dev_err(dev, "Error mapping irq\n");
2680 goto req_rx_irq_failed;
2681 }
2682 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
2683 0, "ibmvnic_rx", scrq);
2684 if (rc) {
2685 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
2686 scrq->irq, rc);
2687 irq_dispose_mapping(scrq->irq);
2688 goto req_rx_irq_failed;
2689 }
2690 }
2691 return rc;
2692
2693req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05002694 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05002695 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
2696 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05002697 }
Thomas Falconea22d512016-07-06 15:35:17 -05002698 i = adapter->req_tx_queues;
2699req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05002700 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05002701 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
2702 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05002703 }
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002704 release_sub_crqs(adapter, 1);
Thomas Falconea22d512016-07-06 15:35:17 -05002705 return rc;
2706}
2707
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002708static int init_sub_crqs(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002709{
2710 struct device *dev = &adapter->vdev->dev;
2711 struct ibmvnic_sub_crq_queue **allqueues;
2712 int registered_queues = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002713 int total_queues;
2714 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05002715 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002716
Thomas Falcon032c5e82015-12-21 11:26:06 -06002717 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
2718
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002719 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002720 if (!allqueues)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002721 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002722
2723 for (i = 0; i < total_queues; i++) {
2724 allqueues[i] = init_sub_crq_queue(adapter);
2725 if (!allqueues[i]) {
2726 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
2727 break;
2728 }
2729 registered_queues++;
2730 }
2731
2732 /* Make sure we were able to register the minimum number of queues */
2733 if (registered_queues <
2734 adapter->min_tx_queues + adapter->min_rx_queues) {
2735 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
2736 goto tx_failed;
2737 }
2738
2739 /* Distribute the failed allocated queues*/
2740 for (i = 0; i < total_queues - registered_queues + more ; i++) {
2741 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
2742 switch (i % 3) {
2743 case 0:
2744 if (adapter->req_rx_queues > adapter->min_rx_queues)
2745 adapter->req_rx_queues--;
2746 else
2747 more++;
2748 break;
2749 case 1:
2750 if (adapter->req_tx_queues > adapter->min_tx_queues)
2751 adapter->req_tx_queues--;
2752 else
2753 more++;
2754 break;
2755 }
2756 }
2757
2758 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002759 sizeof(*adapter->tx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002760 if (!adapter->tx_scrq)
2761 goto tx_failed;
2762
2763 for (i = 0; i < adapter->req_tx_queues; i++) {
2764 adapter->tx_scrq[i] = allqueues[i];
2765 adapter->tx_scrq[i]->pool_index = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002766 adapter->num_active_tx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002767 }
2768
2769 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002770 sizeof(*adapter->rx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002771 if (!adapter->rx_scrq)
2772 goto rx_failed;
2773
2774 for (i = 0; i < adapter->req_rx_queues; i++) {
2775 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
2776 adapter->rx_scrq[i]->scrq_num = i;
Nathan Fontenot82e3be32018-02-21 21:33:56 -06002777 adapter->num_active_rx_scrqs++;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002778 }
2779
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002780 kfree(allqueues);
2781 return 0;
2782
2783rx_failed:
2784 kfree(adapter->tx_scrq);
2785 adapter->tx_scrq = NULL;
2786tx_failed:
2787 for (i = 0; i < registered_queues; i++)
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06002788 release_sub_crq_queue(adapter, allqueues[i], 1);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002789 kfree(allqueues);
2790 return -1;
2791}
2792
2793static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
2794{
2795 struct device *dev = &adapter->vdev->dev;
2796 union ibmvnic_crq crq;
John Allenc26eba02017-10-26 16:23:25 -05002797 int max_entries;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002798
2799 if (!retry) {
2800 /* Sub-CRQ entries are 32 byte long */
2801 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
2802
2803 if (adapter->min_tx_entries_per_subcrq > entries_page ||
2804 adapter->min_rx_add_entries_per_subcrq > entries_page) {
2805 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
2806 return;
2807 }
2808
John Allenc26eba02017-10-26 16:23:25 -05002809 if (adapter->desired.mtu)
2810 adapter->req_mtu = adapter->desired.mtu;
2811 else
2812 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002813
John Allenc26eba02017-10-26 16:23:25 -05002814 if (!adapter->desired.tx_entries)
2815 adapter->desired.tx_entries =
2816 adapter->max_tx_entries_per_subcrq;
2817 if (!adapter->desired.rx_entries)
2818 adapter->desired.rx_entries =
2819 adapter->max_rx_add_entries_per_subcrq;
2820
2821 max_entries = IBMVNIC_MAX_LTB_SIZE /
2822 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
2823
2824 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2825 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
2826 adapter->desired.tx_entries = max_entries;
2827 }
2828
2829 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
2830 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
2831 adapter->desired.rx_entries = max_entries;
2832 }
2833
2834 if (adapter->desired.tx_entries)
2835 adapter->req_tx_entries_per_subcrq =
2836 adapter->desired.tx_entries;
2837 else
2838 adapter->req_tx_entries_per_subcrq =
2839 adapter->max_tx_entries_per_subcrq;
2840
2841 if (adapter->desired.rx_entries)
2842 adapter->req_rx_add_entries_per_subcrq =
2843 adapter->desired.rx_entries;
2844 else
2845 adapter->req_rx_add_entries_per_subcrq =
2846 adapter->max_rx_add_entries_per_subcrq;
2847
2848 if (adapter->desired.tx_queues)
2849 adapter->req_tx_queues =
2850 adapter->desired.tx_queues;
2851 else
2852 adapter->req_tx_queues =
2853 adapter->opt_tx_comp_sub_queues;
2854
2855 if (adapter->desired.rx_queues)
2856 adapter->req_rx_queues =
2857 adapter->desired.rx_queues;
2858 else
2859 adapter->req_rx_queues =
2860 adapter->opt_rx_comp_queues;
2861
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002862 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002863 }
2864
Thomas Falcon032c5e82015-12-21 11:26:06 -06002865 memset(&crq, 0, sizeof(crq));
2866 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2867 crq.request_capability.cmd = REQUEST_CAPABILITY;
2868
2869 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06002870 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06002871 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002872 ibmvnic_send_crq(adapter, &crq);
2873
2874 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06002875 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06002876 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002877 ibmvnic_send_crq(adapter, &crq);
2878
2879 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06002880 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06002881 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002882 ibmvnic_send_crq(adapter, &crq);
2883
2884 crq.request_capability.capability =
2885 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
2886 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06002887 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06002888 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002889 ibmvnic_send_crq(adapter, &crq);
2890
2891 crq.request_capability.capability =
2892 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
2893 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06002894 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06002895 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002896 ibmvnic_send_crq(adapter, &crq);
2897
2898 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06002899 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06002900 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002901 ibmvnic_send_crq(adapter, &crq);
2902
2903 if (adapter->netdev->flags & IFF_PROMISC) {
2904 if (adapter->promisc_supported) {
2905 crq.request_capability.capability =
2906 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06002907 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06002908 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002909 ibmvnic_send_crq(adapter, &crq);
2910 }
2911 } else {
2912 crq.request_capability.capability =
2913 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06002914 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06002915 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002916 ibmvnic_send_crq(adapter, &crq);
2917 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002918}
2919
2920static int pending_scrq(struct ibmvnic_adapter *adapter,
2921 struct ibmvnic_sub_crq_queue *scrq)
2922{
2923 union sub_crq *entry = &scrq->msgs[scrq->cur];
2924
Thomas Falcon1cf9cc72017-06-14 23:50:08 -05002925 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002926 return 1;
2927 else
2928 return 0;
2929}
2930
2931static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
2932 struct ibmvnic_sub_crq_queue *scrq)
2933{
2934 union sub_crq *entry;
2935 unsigned long flags;
2936
2937 spin_lock_irqsave(&scrq->lock, flags);
2938 entry = &scrq->msgs[scrq->cur];
2939 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2940 if (++scrq->cur == scrq->size)
2941 scrq->cur = 0;
2942 } else {
2943 entry = NULL;
2944 }
2945 spin_unlock_irqrestore(&scrq->lock, flags);
2946
2947 return entry;
2948}
2949
2950static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
2951{
2952 struct ibmvnic_crq_queue *queue = &adapter->crq;
2953 union ibmvnic_crq *crq;
2954
2955 crq = &queue->msgs[queue->cur];
2956 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2957 if (++queue->cur == queue->size)
2958 queue->cur = 0;
2959 } else {
2960 crq = NULL;
2961 }
2962
2963 return crq;
2964}
2965
2966static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
2967 union sub_crq *sub_crq)
2968{
2969 unsigned int ua = adapter->vdev->unit_address;
2970 struct device *dev = &adapter->vdev->dev;
2971 u64 *u64_crq = (u64 *)sub_crq;
2972 int rc;
2973
2974 netdev_dbg(adapter->netdev,
2975 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2976 (unsigned long int)cpu_to_be64(remote_handle),
2977 (unsigned long int)cpu_to_be64(u64_crq[0]),
2978 (unsigned long int)cpu_to_be64(u64_crq[1]),
2979 (unsigned long int)cpu_to_be64(u64_crq[2]),
2980 (unsigned long int)cpu_to_be64(u64_crq[3]));
2981
2982 /* Make sure the hypervisor sees the complete request */
2983 mb();
2984
2985 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
2986 cpu_to_be64(remote_handle),
2987 cpu_to_be64(u64_crq[0]),
2988 cpu_to_be64(u64_crq[1]),
2989 cpu_to_be64(u64_crq[2]),
2990 cpu_to_be64(u64_crq[3]));
2991
2992 if (rc) {
2993 if (rc == H_CLOSED)
2994 dev_warn(dev, "CRQ Queue closed\n");
2995 dev_err(dev, "Send error (rc=%d)\n", rc);
2996 }
2997
2998 return rc;
2999}
3000
Thomas Falconad7775d2016-04-01 17:20:34 -05003001static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3002 u64 remote_handle, u64 ioba, u64 num_entries)
3003{
3004 unsigned int ua = adapter->vdev->unit_address;
3005 struct device *dev = &adapter->vdev->dev;
3006 int rc;
3007
3008 /* Make sure the hypervisor sees the complete request */
3009 mb();
3010 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3011 cpu_to_be64(remote_handle),
3012 ioba, num_entries);
3013
3014 if (rc) {
3015 if (rc == H_CLOSED)
3016 dev_warn(dev, "CRQ Queue closed\n");
3017 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
3018 }
3019
3020 return rc;
3021}
3022
Thomas Falcon032c5e82015-12-21 11:26:06 -06003023static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3024 union ibmvnic_crq *crq)
3025{
3026 unsigned int ua = adapter->vdev->unit_address;
3027 struct device *dev = &adapter->vdev->dev;
3028 u64 *u64_crq = (u64 *)crq;
3029 int rc;
3030
3031 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3032 (unsigned long int)cpu_to_be64(u64_crq[0]),
3033 (unsigned long int)cpu_to_be64(u64_crq[1]));
3034
3035 /* Make sure the hypervisor sees the complete request */
3036 mb();
3037
3038 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3039 cpu_to_be64(u64_crq[0]),
3040 cpu_to_be64(u64_crq[1]));
3041
3042 if (rc) {
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003043 if (rc == H_CLOSED) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003044 dev_warn(dev, "CRQ Queue closed\n");
Nathan Fontenotec95dff2018-02-07 13:00:24 -06003045 if (adapter->resetting)
3046 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3047 }
3048
Thomas Falcon032c5e82015-12-21 11:26:06 -06003049 dev_warn(dev, "Send error (rc=%d)\n", rc);
3050 }
3051
3052 return rc;
3053}
3054
3055static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3056{
3057 union ibmvnic_crq crq;
3058
3059 memset(&crq, 0, sizeof(crq));
3060 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3061 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3062 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3063
3064 return ibmvnic_send_crq(adapter, &crq);
3065}
3066
Thomas Falcon032c5e82015-12-21 11:26:06 -06003067static int send_version_xchg(struct ibmvnic_adapter *adapter)
3068{
3069 union ibmvnic_crq crq;
3070
3071 memset(&crq, 0, sizeof(crq));
3072 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3073 crq.version_exchange.cmd = VERSION_EXCHANGE;
3074 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3075
3076 return ibmvnic_send_crq(adapter, &crq);
3077}
3078
Nathan Fontenot37798d02017-11-08 11:23:56 -06003079struct vnic_login_client_data {
3080 u8 type;
3081 __be16 len;
3082 char name;
3083} __packed;
3084
3085static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3086{
3087 int len;
3088
3089 /* Calculate the amount of buffer space needed for the
3090 * vnic client data in the login buffer. There are four entries,
3091 * OS name, LPAR name, device name, and a null last entry.
3092 */
3093 len = 4 * sizeof(struct vnic_login_client_data);
3094 len += 6; /* "Linux" plus NULL */
3095 len += strlen(utsname()->nodename) + 1;
3096 len += strlen(adapter->netdev->name) + 1;
3097
3098 return len;
3099}
3100
3101static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3102 struct vnic_login_client_data *vlcd)
3103{
3104 const char *os_name = "Linux";
3105 int len;
3106
3107 /* Type 1 - LPAR OS */
3108 vlcd->type = 1;
3109 len = strlen(os_name) + 1;
3110 vlcd->len = cpu_to_be16(len);
3111 strncpy(&vlcd->name, os_name, len);
3112 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
3113
3114 /* Type 2 - LPAR name */
3115 vlcd->type = 2;
3116 len = strlen(utsname()->nodename) + 1;
3117 vlcd->len = cpu_to_be16(len);
3118 strncpy(&vlcd->name, utsname()->nodename, len);
3119 vlcd = (struct vnic_login_client_data *)((char *)&vlcd->name + len);
3120
3121 /* Type 3 - device name */
3122 vlcd->type = 3;
3123 len = strlen(adapter->netdev->name) + 1;
3124 vlcd->len = cpu_to_be16(len);
3125 strncpy(&vlcd->name, adapter->netdev->name, len);
3126}
3127
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003128static int send_login(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003129{
3130 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3131 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003132 struct device *dev = &adapter->vdev->dev;
3133 dma_addr_t rsp_buffer_token;
3134 dma_addr_t buffer_token;
3135 size_t rsp_buffer_size;
3136 union ibmvnic_crq crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003137 size_t buffer_size;
3138 __be64 *tx_list_p;
3139 __be64 *rx_list_p;
Nathan Fontenot37798d02017-11-08 11:23:56 -06003140 int client_data_len;
3141 struct vnic_login_client_data *vlcd;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003142 int i;
3143
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003144 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3145 netdev_err(adapter->netdev,
3146 "RX or TX queues are not allocated, device login failed\n");
3147 return -1;
3148 }
3149
Thomas Falcon34f0f4e2018-02-13 18:23:40 -06003150 release_login_rsp_buffer(adapter);
Nathan Fontenot37798d02017-11-08 11:23:56 -06003151 client_data_len = vnic_client_data_len(adapter);
3152
Thomas Falcon032c5e82015-12-21 11:26:06 -06003153 buffer_size =
3154 sizeof(struct ibmvnic_login_buffer) +
Nathan Fontenot37798d02017-11-08 11:23:56 -06003155 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3156 client_data_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003157
Nathan Fontenot37798d02017-11-08 11:23:56 -06003158 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003159 if (!login_buffer)
3160 goto buf_alloc_failed;
3161
3162 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3163 DMA_TO_DEVICE);
3164 if (dma_mapping_error(dev, buffer_token)) {
3165 dev_err(dev, "Couldn't map login buffer\n");
3166 goto buf_map_failed;
3167 }
3168
John Allen498cd8e2016-04-06 11:49:55 -05003169 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3170 sizeof(u64) * adapter->req_tx_queues +
3171 sizeof(u64) * adapter->req_rx_queues +
3172 sizeof(u64) * adapter->req_rx_queues +
3173 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003174
3175 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3176 if (!login_rsp_buffer)
3177 goto buf_rsp_alloc_failed;
3178
3179 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3180 rsp_buffer_size, DMA_FROM_DEVICE);
3181 if (dma_mapping_error(dev, rsp_buffer_token)) {
3182 dev_err(dev, "Couldn't map login rsp buffer\n");
3183 goto buf_rsp_map_failed;
3184 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04003185
Thomas Falcon032c5e82015-12-21 11:26:06 -06003186 adapter->login_buf = login_buffer;
3187 adapter->login_buf_token = buffer_token;
3188 adapter->login_buf_sz = buffer_size;
3189 adapter->login_rsp_buf = login_rsp_buffer;
3190 adapter->login_rsp_buf_token = rsp_buffer_token;
3191 adapter->login_rsp_buf_sz = rsp_buffer_size;
3192
3193 login_buffer->len = cpu_to_be32(buffer_size);
3194 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3195 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3196 login_buffer->off_txcomp_subcrqs =
3197 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3198 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3199 login_buffer->off_rxcomp_subcrqs =
3200 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3201 sizeof(u64) * adapter->req_tx_queues);
3202 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3203 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3204
3205 tx_list_p = (__be64 *)((char *)login_buffer +
3206 sizeof(struct ibmvnic_login_buffer));
3207 rx_list_p = (__be64 *)((char *)login_buffer +
3208 sizeof(struct ibmvnic_login_buffer) +
3209 sizeof(u64) * adapter->req_tx_queues);
3210
3211 for (i = 0; i < adapter->req_tx_queues; i++) {
3212 if (adapter->tx_scrq[i]) {
3213 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3214 crq_num);
3215 }
3216 }
3217
3218 for (i = 0; i < adapter->req_rx_queues; i++) {
3219 if (adapter->rx_scrq[i]) {
3220 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3221 crq_num);
3222 }
3223 }
3224
Nathan Fontenot37798d02017-11-08 11:23:56 -06003225 /* Insert vNIC login client data */
3226 vlcd = (struct vnic_login_client_data *)
3227 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3228 login_buffer->client_data_offset =
3229 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3230 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3231
3232 vnic_add_client_data(adapter, vlcd);
3233
Thomas Falcon032c5e82015-12-21 11:26:06 -06003234 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3235 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3236 netdev_dbg(adapter->netdev, "%016lx\n",
3237 ((unsigned long int *)(adapter->login_buf))[i]);
3238 }
3239
3240 memset(&crq, 0, sizeof(crq));
3241 crq.login.first = IBMVNIC_CRQ_CMD;
3242 crq.login.cmd = LOGIN;
3243 crq.login.ioba = cpu_to_be32(buffer_token);
3244 crq.login.len = cpu_to_be32(buffer_size);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003245 ibmvnic_send_crq(adapter, &crq);
3246
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003247 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003248
Thomas Falcon032c5e82015-12-21 11:26:06 -06003249buf_rsp_map_failed:
3250 kfree(login_rsp_buffer);
3251buf_rsp_alloc_failed:
3252 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3253buf_map_failed:
3254 kfree(login_buffer);
3255buf_alloc_failed:
Thomas Falcon20a8ab72018-02-26 18:10:59 -06003256 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003257}
3258
3259static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3260 u32 len, u8 map_id)
3261{
3262 union ibmvnic_crq crq;
3263
3264 memset(&crq, 0, sizeof(crq));
3265 crq.request_map.first = IBMVNIC_CRQ_CMD;
3266 crq.request_map.cmd = REQUEST_MAP;
3267 crq.request_map.map_id = map_id;
3268 crq.request_map.ioba = cpu_to_be32(addr);
3269 crq.request_map.len = cpu_to_be32(len);
3270 ibmvnic_send_crq(adapter, &crq);
3271}
3272
3273static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3274{
3275 union ibmvnic_crq crq;
3276
3277 memset(&crq, 0, sizeof(crq));
3278 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3279 crq.request_unmap.cmd = REQUEST_UNMAP;
3280 crq.request_unmap.map_id = map_id;
3281 ibmvnic_send_crq(adapter, &crq);
3282}
3283
3284static void send_map_query(struct ibmvnic_adapter *adapter)
3285{
3286 union ibmvnic_crq crq;
3287
3288 memset(&crq, 0, sizeof(crq));
3289 crq.query_map.first = IBMVNIC_CRQ_CMD;
3290 crq.query_map.cmd = QUERY_MAP;
3291 ibmvnic_send_crq(adapter, &crq);
3292}
3293
3294/* Send a series of CRQs requesting various capabilities of the VNIC server */
3295static void send_cap_queries(struct ibmvnic_adapter *adapter)
3296{
3297 union ibmvnic_crq crq;
3298
Thomas Falcon901e0402017-02-15 12:17:59 -06003299 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003300 memset(&crq, 0, sizeof(crq));
3301 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3302 crq.query_capability.cmd = QUERY_CAPABILITY;
3303
3304 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003305 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003306 ibmvnic_send_crq(adapter, &crq);
3307
3308 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003309 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003310 ibmvnic_send_crq(adapter, &crq);
3311
3312 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003313 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003314 ibmvnic_send_crq(adapter, &crq);
3315
3316 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003317 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003318 ibmvnic_send_crq(adapter, &crq);
3319
3320 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003321 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003322 ibmvnic_send_crq(adapter, &crq);
3323
3324 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003325 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003326 ibmvnic_send_crq(adapter, &crq);
3327
3328 crq.query_capability.capability =
3329 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003330 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003331 ibmvnic_send_crq(adapter, &crq);
3332
3333 crq.query_capability.capability =
3334 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003335 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003336 ibmvnic_send_crq(adapter, &crq);
3337
3338 crq.query_capability.capability =
3339 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003340 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003341 ibmvnic_send_crq(adapter, &crq);
3342
3343 crq.query_capability.capability =
3344 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003345 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003346 ibmvnic_send_crq(adapter, &crq);
3347
3348 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06003349 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003350 ibmvnic_send_crq(adapter, &crq);
3351
3352 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003353 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003354 ibmvnic_send_crq(adapter, &crq);
3355
3356 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003357 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003358 ibmvnic_send_crq(adapter, &crq);
3359
3360 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06003361 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003362 ibmvnic_send_crq(adapter, &crq);
3363
3364 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06003365 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003366 ibmvnic_send_crq(adapter, &crq);
3367
3368 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06003369 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003370 ibmvnic_send_crq(adapter, &crq);
3371
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04003372 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3373 atomic_inc(&adapter->running_cap_crqs);
3374 ibmvnic_send_crq(adapter, &crq);
3375
Thomas Falcon032c5e82015-12-21 11:26:06 -06003376 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003377 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003378 ibmvnic_send_crq(adapter, &crq);
3379
3380 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06003381 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003382 ibmvnic_send_crq(adapter, &crq);
3383
3384 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003385 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003386 ibmvnic_send_crq(adapter, &crq);
3387
3388 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06003389 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003390 ibmvnic_send_crq(adapter, &crq);
3391
3392 crq.query_capability.capability =
3393 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06003394 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003395 ibmvnic_send_crq(adapter, &crq);
3396
3397 crq.query_capability.capability =
3398 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003399 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003400 ibmvnic_send_crq(adapter, &crq);
3401
3402 crq.query_capability.capability =
3403 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003404 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003405 ibmvnic_send_crq(adapter, &crq);
3406
3407 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06003408 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003409 ibmvnic_send_crq(adapter, &crq);
3410}
3411
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003412static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3413 struct ibmvnic_adapter *adapter)
3414{
3415 struct device *dev = &adapter->vdev->dev;
3416
3417 if (crq->get_vpd_size_rsp.rc.code) {
3418 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3419 crq->get_vpd_size_rsp.rc.code);
3420 complete(&adapter->fw_done);
3421 return;
3422 }
3423
3424 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3425 complete(&adapter->fw_done);
3426}
3427
3428static void handle_vpd_rsp(union ibmvnic_crq *crq,
3429 struct ibmvnic_adapter *adapter)
3430{
3431 struct device *dev = &adapter->vdev->dev;
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02003432 unsigned char *substr = NULL;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003433 u8 fw_level_len = 0;
3434
3435 memset(adapter->fw_version, 0, 32);
3436
3437 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3438 DMA_FROM_DEVICE);
3439
3440 if (crq->get_vpd_rsp.rc.code) {
3441 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3442 crq->get_vpd_rsp.rc.code);
3443 goto complete;
3444 }
3445
3446 /* get the position of the firmware version info
3447 * located after the ASCII 'RM' substring in the buffer
3448 */
3449 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3450 if (!substr) {
Desnes Augusto Nunes do Rosarioa1073112018-02-01 16:04:30 -02003451 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003452 goto complete;
3453 }
3454
3455 /* get length of firmware level ASCII substring */
3456 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3457 fw_level_len = *(substr + 2);
3458 } else {
3459 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3460 goto complete;
3461 }
3462
3463 /* copy firmware version string from vpd into adapter */
3464 if ((substr + 3 + fw_level_len) <
3465 (adapter->vpd->buff + adapter->vpd->len)) {
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02003466 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003467 } else {
3468 dev_info(dev, "FW substr extrapolated VPD buff\n");
3469 }
3470
3471complete:
Desnes Augusto Nunes do Rosario21a25452018-02-05 14:33:55 -02003472 if (adapter->fw_version[0] == '\0')
3473 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02003474 complete(&adapter->fw_done);
3475}
3476
Thomas Falcon032c5e82015-12-21 11:26:06 -06003477static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3478{
3479 struct device *dev = &adapter->vdev->dev;
3480 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3481 union ibmvnic_crq crq;
3482 int i;
3483
3484 dma_unmap_single(dev, adapter->ip_offload_tok,
3485 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3486
3487 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3488 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3489 netdev_dbg(adapter->netdev, "%016lx\n",
3490 ((unsigned long int *)(buf))[i]);
3491
3492 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3493 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3494 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3495 buf->tcp_ipv4_chksum);
3496 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3497 buf->tcp_ipv6_chksum);
3498 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3499 buf->udp_ipv4_chksum);
3500 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3501 buf->udp_ipv6_chksum);
3502 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3503 buf->large_tx_ipv4);
3504 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3505 buf->large_tx_ipv6);
3506 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3507 buf->large_rx_ipv4);
3508 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3509 buf->large_rx_ipv6);
3510 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3511 buf->max_ipv4_header_size);
3512 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3513 buf->max_ipv6_header_size);
3514 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3515 buf->max_tcp_header_size);
3516 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3517 buf->max_udp_header_size);
3518 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3519 buf->max_large_tx_size);
3520 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3521 buf->max_large_rx_size);
3522 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3523 buf->ipv6_extension_header);
3524 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3525 buf->tcp_pseudosum_req);
3526 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3527 buf->num_ipv6_ext_headers);
3528 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3529 buf->off_ipv6_ext_headers);
3530
3531 adapter->ip_offload_ctrl_tok =
3532 dma_map_single(dev, &adapter->ip_offload_ctrl,
3533 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3534
3535 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3536 dev_err(dev, "Couldn't map ip offload control buffer\n");
3537 return;
3538 }
3539
Thomas Falconf6897942018-01-18 19:05:01 -06003540 adapter->ip_offload_ctrl.len =
3541 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003542 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
Thomas Falconf6897942018-01-18 19:05:01 -06003543 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3544 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003545 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3546 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3547 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3548 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
Thomas Falconfdb06102017-10-17 12:36:55 -05003549 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3550 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003551
Thomas Falconfdb06102017-10-17 12:36:55 -05003552 /* large_rx disabled for now, additional features needed */
Thomas Falcon032c5e82015-12-21 11:26:06 -06003553 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3554 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3555
Thomas Falcon15482052017-10-17 12:36:54 -05003556 adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003557
3558 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3559 adapter->netdev->features |= NETIF_F_IP_CSUM;
3560
3561 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3562 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
3563
Thomas Falcon9be02cd2016-04-01 17:20:35 -05003564 if ((adapter->netdev->features &
3565 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3566 adapter->netdev->features |= NETIF_F_RXCSUM;
3567
Thomas Falconfdb06102017-10-17 12:36:55 -05003568 if (buf->large_tx_ipv4)
3569 adapter->netdev->features |= NETIF_F_TSO;
3570 if (buf->large_tx_ipv6)
3571 adapter->netdev->features |= NETIF_F_TSO6;
3572
Thomas Falconaa0bf852017-10-17 12:36:56 -05003573 adapter->netdev->hw_features |= adapter->netdev->features;
3574
Thomas Falcon032c5e82015-12-21 11:26:06 -06003575 memset(&crq, 0, sizeof(crq));
3576 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3577 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3578 crq.control_ip_offload.len =
3579 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3580 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3581 ibmvnic_send_crq(adapter, &crq);
3582}
3583
3584static void handle_error_info_rsp(union ibmvnic_crq *crq,
3585 struct ibmvnic_adapter *adapter)
3586{
3587 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08003588 struct ibmvnic_error_buff *error_buff, *tmp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003589 unsigned long flags;
3590 bool found = false;
3591 int i;
3592
3593 if (!crq->request_error_rsp.rc.code) {
3594 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
3595 crq->request_error_rsp.rc.code);
3596 return;
3597 }
3598
3599 spin_lock_irqsave(&adapter->error_list_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08003600 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003601 if (error_buff->error_id == crq->request_error_rsp.error_id) {
3602 found = true;
3603 list_del(&error_buff->list);
3604 break;
3605 }
3606 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3607
3608 if (!found) {
3609 dev_err(dev, "Couldn't find error id %x\n",
Thomas Falcon75224c92017-02-15 10:33:33 -06003610 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003611 return;
3612 }
3613
3614 dev_err(dev, "Detailed info for error id %x:",
Thomas Falcon75224c92017-02-15 10:33:33 -06003615 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003616
3617 for (i = 0; i < error_buff->len; i++) {
3618 pr_cont("%02x", (int)error_buff->buff[i]);
3619 if (i % 8 == 7)
3620 pr_cont(" ");
3621 }
3622 pr_cont("\n");
3623
3624 dma_unmap_single(dev, error_buff->dma, error_buff->len,
3625 DMA_FROM_DEVICE);
3626 kfree(error_buff->buff);
3627 kfree(error_buff);
3628}
3629
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04003630static void request_error_information(struct ibmvnic_adapter *adapter,
3631 union ibmvnic_crq *err_crq)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003632{
Thomas Falcon032c5e82015-12-21 11:26:06 -06003633 struct device *dev = &adapter->vdev->dev;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04003634 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003635 struct ibmvnic_error_buff *error_buff;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04003636 unsigned long timeout = msecs_to_jiffies(30000);
3637 union ibmvnic_crq crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003638 unsigned long flags;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04003639 int rc, detail_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003640
3641 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
3642 if (!error_buff)
3643 return;
3644
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04003645 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003646 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
3647 if (!error_buff->buff) {
3648 kfree(error_buff);
3649 return;
3650 }
3651
3652 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
3653 DMA_FROM_DEVICE);
3654 if (dma_mapping_error(dev, error_buff->dma)) {
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04003655 netdev_err(netdev, "Couldn't map error buffer\n");
Thomas Falcon032c5e82015-12-21 11:26:06 -06003656 kfree(error_buff->buff);
3657 kfree(error_buff);
3658 return;
3659 }
3660
Thomas Falcon032c5e82015-12-21 11:26:06 -06003661 error_buff->len = detail_len;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04003662 error_buff->error_id = err_crq->error_indication.error_id;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003663
3664 spin_lock_irqsave(&adapter->error_list_lock, flags);
3665 list_add_tail(&error_buff->list, &adapter->errors);
3666 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3667
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04003668 memset(&crq, 0, sizeof(crq));
3669 crq.request_error_info.first = IBMVNIC_CRQ_CMD;
3670 crq.request_error_info.cmd = REQUEST_ERROR_INFO;
3671 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
3672 crq.request_error_info.len = cpu_to_be32(detail_len);
3673 crq.request_error_info.error_id = err_crq->error_indication.error_id;
3674
3675 rc = ibmvnic_send_crq(adapter, &crq);
3676 if (rc) {
3677 netdev_err(netdev, "failed to request error information\n");
3678 goto err_info_fail;
3679 }
3680
3681 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3682 netdev_err(netdev, "timeout waiting for error information\n");
3683 goto err_info_fail;
3684 }
3685
3686 return;
3687
3688err_info_fail:
3689 spin_lock_irqsave(&adapter->error_list_lock, flags);
3690 list_del(&error_buff->list);
3691 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
3692
3693 kfree(error_buff->buff);
3694 kfree(error_buff);
3695}
3696
3697static void handle_error_indication(union ibmvnic_crq *crq,
3698 struct ibmvnic_adapter *adapter)
3699{
3700 struct device *dev = &adapter->vdev->dev;
3701
3702 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
3703 crq->error_indication.flags
3704 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
3705 be32_to_cpu(crq->error_indication.error_id),
3706 be16_to_cpu(crq->error_indication.error_cause));
3707
3708 if (be32_to_cpu(crq->error_indication.error_id))
3709 request_error_information(adapter, crq);
Nathan Fontenoted651a12017-05-03 14:04:38 -04003710
3711 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
3712 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
John Allen8cb31cf2017-05-26 10:30:37 -04003713 else
3714 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003715}
3716
Thomas Falconf8136142018-01-29 13:45:05 -06003717static int handle_change_mac_rsp(union ibmvnic_crq *crq,
3718 struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003719{
3720 struct net_device *netdev = adapter->netdev;
3721 struct device *dev = &adapter->vdev->dev;
3722 long rc;
3723
3724 rc = crq->change_mac_addr_rsp.rc.code;
3725 if (rc) {
3726 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
Thomas Falconf8136142018-01-29 13:45:05 -06003727 goto out;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003728 }
3729 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
3730 ETH_ALEN);
Thomas Falconf8136142018-01-29 13:45:05 -06003731out:
3732 complete(&adapter->fw_done);
3733 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003734}
3735
3736static void handle_request_cap_rsp(union ibmvnic_crq *crq,
3737 struct ibmvnic_adapter *adapter)
3738{
3739 struct device *dev = &adapter->vdev->dev;
3740 u64 *req_value;
3741 char *name;
3742
Thomas Falcon901e0402017-02-15 12:17:59 -06003743 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003744 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
3745 case REQ_TX_QUEUES:
3746 req_value = &adapter->req_tx_queues;
3747 name = "tx";
3748 break;
3749 case REQ_RX_QUEUES:
3750 req_value = &adapter->req_rx_queues;
3751 name = "rx";
3752 break;
3753 case REQ_RX_ADD_QUEUES:
3754 req_value = &adapter->req_rx_add_queues;
3755 name = "rx_add";
3756 break;
3757 case REQ_TX_ENTRIES_PER_SUBCRQ:
3758 req_value = &adapter->req_tx_entries_per_subcrq;
3759 name = "tx_entries_per_subcrq";
3760 break;
3761 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
3762 req_value = &adapter->req_rx_add_entries_per_subcrq;
3763 name = "rx_add_entries_per_subcrq";
3764 break;
3765 case REQ_MTU:
3766 req_value = &adapter->req_mtu;
3767 name = "mtu";
3768 break;
3769 case PROMISC_REQUESTED:
3770 req_value = &adapter->promisc;
3771 name = "promisc";
3772 break;
3773 default:
3774 dev_err(dev, "Got invalid cap request rsp %d\n",
3775 crq->request_capability.capability);
3776 return;
3777 }
3778
3779 switch (crq->request_capability_rsp.rc.code) {
3780 case SUCCESS:
3781 break;
3782 case PARTIALSUCCESS:
3783 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
3784 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06003785 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06003786 number), name);
John Allene7913802018-01-18 16:27:12 -06003787
3788 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
3789 REQ_MTU) {
3790 pr_err("mtu of %llu is not supported. Reverting.\n",
3791 *req_value);
3792 *req_value = adapter->fallback.mtu;
3793 } else {
3794 *req_value =
3795 be64_to_cpu(crq->request_capability_rsp.number);
3796 }
3797
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003798 ibmvnic_send_req_caps(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003799 return;
3800 default:
3801 dev_err(dev, "Error %d in request cap rsp\n",
3802 crq->request_capability_rsp.rc.code);
3803 return;
3804 }
3805
3806 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06003807 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003808 union ibmvnic_crq newcrq;
3809 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
3810 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
3811 &adapter->ip_offload_buf;
3812
Thomas Falcon249168a2017-02-15 12:18:00 -06003813 adapter->wait_capability = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003814 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
3815 buf_sz,
3816 DMA_FROM_DEVICE);
3817
3818 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
3819 if (!firmware_has_feature(FW_FEATURE_CMO))
3820 dev_err(dev, "Couldn't map offload buffer\n");
3821 return;
3822 }
3823
3824 memset(&newcrq, 0, sizeof(newcrq));
3825 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
3826 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
3827 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
3828 newcrq.query_ip_offload.ioba =
3829 cpu_to_be32(adapter->ip_offload_tok);
3830
3831 ibmvnic_send_crq(adapter, &newcrq);
3832 }
3833}
3834
3835static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3836 struct ibmvnic_adapter *adapter)
3837{
3838 struct device *dev = &adapter->vdev->dev;
John Allenc26eba02017-10-26 16:23:25 -05003839 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003840 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
3841 struct ibmvnic_login_buffer *login = adapter->login_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003842 int i;
3843
3844 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
3845 DMA_BIDIRECTIONAL);
3846 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3847 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
3848
John Allen498cd8e2016-04-06 11:49:55 -05003849 /* If the number of queues requested can't be allocated by the
3850 * server, the login response will return with code 1. We will need
3851 * to resend the login buffer with fewer queues requested.
3852 */
3853 if (login_rsp_crq->generic.rc.code) {
3854 adapter->renegotiate = true;
3855 complete(&adapter->init_done);
3856 return 0;
3857 }
3858
John Allenc26eba02017-10-26 16:23:25 -05003859 netdev->mtu = adapter->req_mtu - ETH_HLEN;
3860
Thomas Falcon032c5e82015-12-21 11:26:06 -06003861 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
3862 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
3863 netdev_dbg(adapter->netdev, "%016lx\n",
3864 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
3865 }
3866
3867 /* Sanity checks */
3868 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
3869 (be32_to_cpu(login->num_rxcomp_subcrqs) *
3870 adapter->req_rx_add_queues !=
3871 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
3872 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
3873 ibmvnic_remove(adapter->vdev);
3874 return -EIO;
3875 }
Thomas Falcona2c0f032018-02-21 18:18:30 -06003876 release_login_buffer(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003877 complete(&adapter->init_done);
3878
Thomas Falcon032c5e82015-12-21 11:26:06 -06003879 return 0;
3880}
3881
Thomas Falcon032c5e82015-12-21 11:26:06 -06003882static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
3883 struct ibmvnic_adapter *adapter)
3884{
3885 struct device *dev = &adapter->vdev->dev;
3886 long rc;
3887
3888 rc = crq->request_unmap_rsp.rc.code;
3889 if (rc)
3890 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
3891}
3892
3893static void handle_query_map_rsp(union ibmvnic_crq *crq,
3894 struct ibmvnic_adapter *adapter)
3895{
3896 struct net_device *netdev = adapter->netdev;
3897 struct device *dev = &adapter->vdev->dev;
3898 long rc;
3899
3900 rc = crq->query_map_rsp.rc.code;
3901 if (rc) {
3902 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
3903 return;
3904 }
3905 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
3906 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
3907 crq->query_map_rsp.free_pages);
3908}
3909
3910static void handle_query_cap_rsp(union ibmvnic_crq *crq,
3911 struct ibmvnic_adapter *adapter)
3912{
3913 struct net_device *netdev = adapter->netdev;
3914 struct device *dev = &adapter->vdev->dev;
3915 long rc;
3916
Thomas Falcon901e0402017-02-15 12:17:59 -06003917 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003918 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06003919 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003920 rc = crq->query_capability.rc.code;
3921 if (rc) {
3922 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
3923 goto out;
3924 }
3925
3926 switch (be16_to_cpu(crq->query_capability.capability)) {
3927 case MIN_TX_QUEUES:
3928 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003929 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003930 netdev_dbg(netdev, "min_tx_queues = %lld\n",
3931 adapter->min_tx_queues);
3932 break;
3933 case MIN_RX_QUEUES:
3934 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003935 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003936 netdev_dbg(netdev, "min_rx_queues = %lld\n",
3937 adapter->min_rx_queues);
3938 break;
3939 case MIN_RX_ADD_QUEUES:
3940 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003941 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003942 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
3943 adapter->min_rx_add_queues);
3944 break;
3945 case MAX_TX_QUEUES:
3946 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003947 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003948 netdev_dbg(netdev, "max_tx_queues = %lld\n",
3949 adapter->max_tx_queues);
3950 break;
3951 case MAX_RX_QUEUES:
3952 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003953 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003954 netdev_dbg(netdev, "max_rx_queues = %lld\n",
3955 adapter->max_rx_queues);
3956 break;
3957 case MAX_RX_ADD_QUEUES:
3958 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003959 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003960 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
3961 adapter->max_rx_add_queues);
3962 break;
3963 case MIN_TX_ENTRIES_PER_SUBCRQ:
3964 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06003965 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003966 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
3967 adapter->min_tx_entries_per_subcrq);
3968 break;
3969 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
3970 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06003971 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003972 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
3973 adapter->min_rx_add_entries_per_subcrq);
3974 break;
3975 case MAX_TX_ENTRIES_PER_SUBCRQ:
3976 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06003977 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003978 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
3979 adapter->max_tx_entries_per_subcrq);
3980 break;
3981 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
3982 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06003983 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003984 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
3985 adapter->max_rx_add_entries_per_subcrq);
3986 break;
3987 case TCP_IP_OFFLOAD:
3988 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06003989 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003990 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
3991 adapter->tcp_ip_offload);
3992 break;
3993 case PROMISC_SUPPORTED:
3994 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06003995 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003996 netdev_dbg(netdev, "promisc_supported = %lld\n",
3997 adapter->promisc_supported);
3998 break;
3999 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004000 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004001 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004002 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4003 break;
4004 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06004005 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06004006 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004007 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4008 break;
4009 case MAX_MULTICAST_FILTERS:
4010 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06004011 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004012 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4013 adapter->max_multicast_filters);
4014 break;
4015 case VLAN_HEADER_INSERTION:
4016 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06004017 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004018 if (adapter->vlan_header_insertion)
4019 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4020 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4021 adapter->vlan_header_insertion);
4022 break;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04004023 case RX_VLAN_HEADER_INSERTION:
4024 adapter->rx_vlan_header_insertion =
4025 be64_to_cpu(crq->query_capability.number);
4026 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4027 adapter->rx_vlan_header_insertion);
4028 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004029 case MAX_TX_SG_ENTRIES:
4030 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06004031 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004032 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4033 adapter->max_tx_sg_entries);
4034 break;
4035 case RX_SG_SUPPORTED:
4036 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06004037 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004038 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4039 adapter->rx_sg_supported);
4040 break;
4041 case OPT_TX_COMP_SUB_QUEUES:
4042 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004043 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004044 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4045 adapter->opt_tx_comp_sub_queues);
4046 break;
4047 case OPT_RX_COMP_QUEUES:
4048 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06004049 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004050 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4051 adapter->opt_rx_comp_queues);
4052 break;
4053 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4054 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06004055 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004056 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4057 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4058 break;
4059 case OPT_TX_ENTRIES_PER_SUBCRQ:
4060 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004061 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004062 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4063 adapter->opt_tx_entries_per_subcrq);
4064 break;
4065 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4066 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06004067 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004068 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4069 adapter->opt_rxba_entries_per_subcrq);
4070 break;
4071 case TX_RX_DESC_REQ:
4072 adapter->tx_rx_desc_req = crq->query_capability.number;
4073 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4074 adapter->tx_rx_desc_req);
4075 break;
4076
4077 default:
4078 netdev_err(netdev, "Got invalid cap rsp %d\n",
4079 crq->query_capability.capability);
4080 }
4081
4082out:
Thomas Falcon249168a2017-02-15 12:18:00 -06004083 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4084 adapter->wait_capability = false;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04004085 ibmvnic_send_req_caps(adapter, 0);
Thomas Falcon249168a2017-02-15 12:18:00 -06004086 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06004087}
4088
Thomas Falcon032c5e82015-12-21 11:26:06 -06004089static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4090 struct ibmvnic_adapter *adapter)
4091{
4092 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4093 struct net_device *netdev = adapter->netdev;
4094 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004095 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004096 long rc;
4097
4098 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04004099 (unsigned long int)cpu_to_be64(u64_crq[0]),
4100 (unsigned long int)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06004101 switch (gen_crq->first) {
4102 case IBMVNIC_CRQ_INIT_RSP:
4103 switch (gen_crq->cmd) {
4104 case IBMVNIC_CRQ_INIT:
4105 dev_info(dev, "Partner initialized\n");
John Allen017892c12017-05-26 10:30:19 -04004106 adapter->from_passive_init = true;
4107 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004108 break;
4109 case IBMVNIC_CRQ_INIT_COMPLETE:
4110 dev_info(dev, "Partner initialization complete\n");
4111 send_version_xchg(adapter);
4112 break;
4113 default:
4114 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4115 }
4116 return;
4117 case IBMVNIC_CRQ_XPORT_EVENT:
Nathan Fontenoted651a12017-05-03 14:04:38 -04004118 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004119 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04004120 dev_info(dev, "Migrated, re-enabling adapter\n");
4121 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
Thomas Falcondfad09a2016-08-18 11:37:51 -05004122 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4123 dev_info(dev, "Backing device failover detected\n");
Nathan Fontenoted651a12017-05-03 14:04:38 -04004124 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004125 } else {
4126 /* The adapter lost the connection */
4127 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4128 gen_crq->cmd);
Nathan Fontenoted651a12017-05-03 14:04:38 -04004129 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004130 }
4131 return;
4132 case IBMVNIC_CRQ_CMD_RSP:
4133 break;
4134 default:
4135 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4136 gen_crq->first);
4137 return;
4138 }
4139
4140 switch (gen_crq->cmd) {
4141 case VERSION_EXCHANGE_RSP:
4142 rc = crq->version_exchange_rsp.rc.code;
4143 if (rc) {
4144 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4145 break;
4146 }
4147 dev_info(dev, "Partner protocol version is %d\n",
4148 crq->version_exchange_rsp.version);
4149 if (be16_to_cpu(crq->version_exchange_rsp.version) <
4150 ibmvnic_version)
4151 ibmvnic_version =
4152 be16_to_cpu(crq->version_exchange_rsp.version);
4153 send_cap_queries(adapter);
4154 break;
4155 case QUERY_CAPABILITY_RSP:
4156 handle_query_cap_rsp(crq, adapter);
4157 break;
4158 case QUERY_MAP_RSP:
4159 handle_query_map_rsp(crq, adapter);
4160 break;
4161 case REQUEST_MAP_RSP:
Thomas Falconf3be0cb2017-06-21 14:53:01 -05004162 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4163 complete(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004164 break;
4165 case REQUEST_UNMAP_RSP:
4166 handle_request_unmap_rsp(crq, adapter);
4167 break;
4168 case REQUEST_CAPABILITY_RSP:
4169 handle_request_cap_rsp(crq, adapter);
4170 break;
4171 case LOGIN_RSP:
4172 netdev_dbg(netdev, "Got Login Response\n");
4173 handle_login_rsp(crq, adapter);
4174 break;
4175 case LOGICAL_LINK_STATE_RSP:
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004176 netdev_dbg(netdev,
4177 "Got Logical Link State Response, state: %d rc: %d\n",
4178 crq->logical_link_state_rsp.link_state,
4179 crq->logical_link_state_rsp.rc.code);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004180 adapter->logical_link_state =
4181 crq->logical_link_state_rsp.link_state;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04004182 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4183 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004184 break;
4185 case LINK_STATE_INDICATION:
4186 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4187 adapter->phys_link_state =
4188 crq->link_state_indication.phys_link_state;
4189 adapter->logical_link_state =
4190 crq->link_state_indication.logical_link_state;
4191 break;
4192 case CHANGE_MAC_ADDR_RSP:
4193 netdev_dbg(netdev, "Got MAC address change Response\n");
Thomas Falconf8136142018-01-29 13:45:05 -06004194 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004195 break;
4196 case ERROR_INDICATION:
4197 netdev_dbg(netdev, "Got Error Indication\n");
4198 handle_error_indication(crq, adapter);
4199 break;
4200 case REQUEST_ERROR_RSP:
4201 netdev_dbg(netdev, "Got Error Detail Response\n");
4202 handle_error_info_rsp(crq, adapter);
4203 break;
4204 case REQUEST_STATISTICS_RSP:
4205 netdev_dbg(netdev, "Got Statistics Response\n");
4206 complete(&adapter->stats_done);
4207 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004208 case QUERY_IP_OFFLOAD_RSP:
4209 netdev_dbg(netdev, "Got Query IP offload Response\n");
4210 handle_query_ip_offload_rsp(adapter);
4211 break;
4212 case MULTICAST_CTRL_RSP:
4213 netdev_dbg(netdev, "Got multicast control Response\n");
4214 break;
4215 case CONTROL_IP_OFFLOAD_RSP:
4216 netdev_dbg(netdev, "Got Control IP offload Response\n");
4217 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4218 sizeof(adapter->ip_offload_ctrl),
4219 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05004220 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004221 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004222 case COLLECT_FW_TRACE_RSP:
4223 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4224 complete(&adapter->fw_done);
4225 break;
Desnes Augusto Nunes do Rosario4e6759b2017-11-13 15:59:19 -02004226 case GET_VPD_SIZE_RSP:
4227 handle_vpd_size_rsp(crq, adapter);
4228 break;
4229 case GET_VPD_RSP:
4230 handle_vpd_rsp(crq, adapter);
4231 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004232 default:
4233 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4234 gen_crq->cmd);
4235 }
4236}
4237
4238static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4239{
4240 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06004241
Thomas Falcon6c267b32017-02-15 12:17:58 -06004242 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004243 return IRQ_HANDLED;
4244}
4245
4246static void ibmvnic_tasklet(void *data)
4247{
4248 struct ibmvnic_adapter *adapter = data;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004249 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004250 union ibmvnic_crq *crq;
4251 unsigned long flags;
4252 bool done = false;
4253
4254 spin_lock_irqsave(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004255 while (!done) {
4256 /* Pull all the valid messages off the CRQ */
4257 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4258 ibmvnic_handle_crq(crq, adapter);
4259 crq->generic.first = 0;
4260 }
Brian Kinged7ecbf2017-04-19 13:44:53 -04004261
4262 /* remain in tasklet until all
4263 * capabilities responses are received
4264 */
4265 if (!adapter->wait_capability)
4266 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004267 }
Thomas Falcon249168a2017-02-15 12:18:00 -06004268 /* if capabilities CRQ's were sent in this tasklet, the following
4269 * tasklet must wait until all responses are received
4270 */
4271 if (atomic_read(&adapter->running_cap_crqs) != 0)
4272 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004273 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004274}
4275
4276static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4277{
4278 struct vio_dev *vdev = adapter->vdev;
4279 int rc;
4280
4281 do {
4282 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4283 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4284
4285 if (rc)
4286 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4287
4288 return rc;
4289}
4290
4291static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4292{
4293 struct ibmvnic_crq_queue *crq = &adapter->crq;
4294 struct device *dev = &adapter->vdev->dev;
4295 struct vio_dev *vdev = adapter->vdev;
4296 int rc;
4297
4298 /* Close the CRQ */
4299 do {
4300 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4301 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4302
4303 /* Clean out the queue */
4304 memset(crq->msgs, 0, PAGE_SIZE);
4305 crq->cur = 0;
4306
4307 /* And re-open it again */
4308 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4309 crq->msg_token, PAGE_SIZE);
4310
4311 if (rc == H_CLOSED)
4312 /* Adapter is good, but other end is not ready */
4313 dev_warn(dev, "Partner adapter not ready\n");
4314 else if (rc != 0)
4315 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4316
4317 return rc;
4318}
4319
Nathan Fontenotf9928872017-03-30 02:48:54 -04004320static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004321{
4322 struct ibmvnic_crq_queue *crq = &adapter->crq;
4323 struct vio_dev *vdev = adapter->vdev;
4324 long rc;
4325
Nathan Fontenotf9928872017-03-30 02:48:54 -04004326 if (!crq->msgs)
4327 return;
4328
Thomas Falcon032c5e82015-12-21 11:26:06 -06004329 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4330 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06004331 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004332 do {
4333 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4334 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4335
4336 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4337 DMA_BIDIRECTIONAL);
4338 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04004339 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004340}
4341
Nathan Fontenotf9928872017-03-30 02:48:54 -04004342static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06004343{
4344 struct ibmvnic_crq_queue *crq = &adapter->crq;
4345 struct device *dev = &adapter->vdev->dev;
4346 struct vio_dev *vdev = adapter->vdev;
4347 int rc, retrc = -ENOMEM;
4348
Nathan Fontenotf9928872017-03-30 02:48:54 -04004349 if (crq->msgs)
4350 return 0;
4351
Thomas Falcon032c5e82015-12-21 11:26:06 -06004352 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4353 /* Should we allocate more than one page? */
4354
4355 if (!crq->msgs)
4356 return -ENOMEM;
4357
4358 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4359 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4360 DMA_BIDIRECTIONAL);
4361 if (dma_mapping_error(dev, crq->msg_token))
4362 goto map_failed;
4363
4364 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4365 crq->msg_token, PAGE_SIZE);
4366
4367 if (rc == H_RESOURCE)
4368 /* maybe kexecing and resource is busy. try a reset */
4369 rc = ibmvnic_reset_crq(adapter);
4370 retrc = rc;
4371
4372 if (rc == H_CLOSED) {
4373 dev_warn(dev, "Partner adapter not ready\n");
4374 } else if (rc) {
4375 dev_warn(dev, "Error %d opening adapter\n", rc);
4376 goto reg_crq_failed;
4377 }
4378
4379 retrc = 0;
4380
Thomas Falcon6c267b32017-02-15 12:17:58 -06004381 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4382 (unsigned long)adapter);
4383
Thomas Falcon032c5e82015-12-21 11:26:06 -06004384 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4385 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
4386 adapter);
4387 if (rc) {
4388 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4389 vdev->irq, rc);
4390 goto req_irq_failed;
4391 }
4392
4393 rc = vio_enable_interrupts(vdev);
4394 if (rc) {
4395 dev_err(dev, "Error %d enabling interrupts\n", rc);
4396 goto req_irq_failed;
4397 }
4398
4399 crq->cur = 0;
4400 spin_lock_init(&crq->lock);
4401
4402 return retrc;
4403
4404req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06004405 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004406 do {
4407 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4408 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4409reg_crq_failed:
4410 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4411map_failed:
4412 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04004413 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004414 return retrc;
4415}
4416
John Allenf6ef6402017-03-17 17:13:42 -05004417static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4418{
4419 struct device *dev = &adapter->vdev->dev;
4420 unsigned long timeout = msecs_to_jiffies(30000);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004421 u64 old_num_rx_queues, old_num_tx_queues;
John Allenf6ef6402017-03-17 17:13:42 -05004422 int rc;
4423
John Allenc26eba02017-10-26 16:23:25 -05004424 if (adapter->resetting && !adapter->wait_for_reset) {
Nathan Fontenot28cde752017-05-26 10:31:00 -04004425 rc = ibmvnic_reset_crq(adapter);
4426 if (!rc)
4427 rc = vio_enable_interrupts(adapter->vdev);
4428 } else {
4429 rc = init_crq_queue(adapter);
4430 }
4431
John Allenf6ef6402017-03-17 17:13:42 -05004432 if (rc) {
4433 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
4434 return rc;
4435 }
4436
John Allen017892c12017-05-26 10:30:19 -04004437 adapter->from_passive_init = false;
4438
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004439 old_num_rx_queues = adapter->req_rx_queues;
4440 old_num_tx_queues = adapter->req_tx_queues;
4441
John Allenf6ef6402017-03-17 17:13:42 -05004442 init_completion(&adapter->init_done);
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04004443 adapter->init_done_rc = 0;
John Allenf6ef6402017-03-17 17:13:42 -05004444 ibmvnic_send_crq_init(adapter);
4445 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4446 dev_err(dev, "Initialization sequence timed out\n");
John Allen017892c12017-05-26 10:30:19 -04004447 return -1;
4448 }
4449
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04004450 if (adapter->init_done_rc) {
4451 release_crq_queue(adapter);
4452 return adapter->init_done_rc;
4453 }
4454
John Allen017892c12017-05-26 10:30:19 -04004455 if (adapter->from_passive_init) {
4456 adapter->state = VNIC_OPEN;
4457 adapter->from_passive_init = false;
John Allenf6ef6402017-03-17 17:13:42 -05004458 return -1;
4459 }
4460
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004461 if (adapter->resetting && !adapter->wait_for_reset) {
4462 if (adapter->req_rx_queues != old_num_rx_queues ||
4463 adapter->req_tx_queues != old_num_tx_queues) {
4464 release_sub_crqs(adapter, 0);
4465 rc = init_sub_crqs(adapter);
4466 } else {
4467 rc = reset_sub_crq_queues(adapter);
4468 }
4469 } else {
Nathan Fontenot57a49432017-05-26 10:31:12 -04004470 rc = init_sub_crqs(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004471 }
4472
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04004473 if (rc) {
4474 dev_err(dev, "Initialization of sub crqs failed\n");
4475 release_crq_queue(adapter);
Thomas Falcon5df969c2017-06-28 19:55:54 -05004476 return rc;
4477 }
4478
4479 rc = init_sub_crq_irqs(adapter);
4480 if (rc) {
4481 dev_err(dev, "Failed to initialize sub crq irqs\n");
4482 release_crq_queue(adapter);
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04004483 }
4484
Thomas Falcon53cc7722018-02-26 18:10:56 -06004485 rc = init_stats_buffers(adapter);
4486 if (rc)
4487 return rc;
4488
4489 rc = init_stats_token(adapter);
4490 if (rc)
4491 return rc;
4492
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04004493 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05004494}
4495
Thomas Falcon40c9db82017-06-12 12:35:04 -05004496static struct device_attribute dev_attr_failover;
4497
Thomas Falcon032c5e82015-12-21 11:26:06 -06004498static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4499{
4500 struct ibmvnic_adapter *adapter;
4501 struct net_device *netdev;
4502 unsigned char *mac_addr_p;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004503 int rc;
4504
4505 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4506 dev->unit_address);
4507
4508 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4509 VETH_MAC_ADDR, NULL);
4510 if (!mac_addr_p) {
4511 dev_err(&dev->dev,
4512 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4513 __FILE__, __LINE__);
4514 return 0;
4515 }
4516
4517 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
Thomas Falcond45cc3a2017-12-18 12:52:11 -06004518 IBMVNIC_MAX_QUEUES);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004519 if (!netdev)
4520 return -ENOMEM;
4521
4522 adapter = netdev_priv(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04004523 adapter->state = VNIC_PROBING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004524 dev_set_drvdata(&dev->dev, netdev);
4525 adapter->vdev = dev;
4526 adapter->netdev = netdev;
4527
4528 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4529 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4530 netdev->irq = dev->irq;
4531 netdev->netdev_ops = &ibmvnic_netdev_ops;
4532 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4533 SET_NETDEV_DEV(netdev, &dev->dev);
4534
4535 spin_lock_init(&adapter->stats_lock);
4536
Thomas Falcon032c5e82015-12-21 11:26:06 -06004537 INIT_LIST_HEAD(&adapter->errors);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004538 spin_lock_init(&adapter->error_list_lock);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004539
Nathan Fontenoted651a12017-05-03 14:04:38 -04004540 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4541 INIT_LIST_HEAD(&adapter->rwi_list);
4542 mutex_init(&adapter->reset_lock);
4543 mutex_init(&adapter->rwi_lock);
4544 adapter->resetting = false;
4545
John Allenc26eba02017-10-26 16:23:25 -05004546 adapter->mac_change_pending = false;
4547
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04004548 do {
4549 rc = ibmvnic_init(adapter);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05004550 if (rc && rc != EAGAIN)
4551 goto ibmvnic_init_fail;
Nathan Fontenot6a2fb0e2017-06-15 14:48:09 -04004552 } while (rc == EAGAIN);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004553
Thomas Falconf39f0d12017-02-14 10:22:59 -06004554 netdev->mtu = adapter->req_mtu - ETH_HLEN;
John Allenc26eba02017-10-26 16:23:25 -05004555 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4556 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004557
Thomas Falcon40c9db82017-06-12 12:35:04 -05004558 rc = device_create_file(&dev->dev, &dev_attr_failover);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05004559 if (rc)
4560 goto ibmvnic_init_fail;
Thomas Falcon40c9db82017-06-12 12:35:04 -05004561
Mick Tarsele876a8a2017-09-28 13:53:18 -07004562 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004563 rc = register_netdev(netdev);
4564 if (rc) {
4565 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05004566 goto ibmvnic_register_fail;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004567 }
4568 dev_info(&dev->dev, "ibmvnic registered\n");
4569
Nathan Fontenot90c80142017-05-03 14:04:32 -04004570 adapter->state = VNIC_PROBED;
John Allenc26eba02017-10-26 16:23:25 -05004571
4572 adapter->wait_for_reset = false;
4573
Thomas Falcon032c5e82015-12-21 11:26:06 -06004574 return 0;
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05004575
4576ibmvnic_register_fail:
4577 device_remove_file(&dev->dev, &dev_attr_failover);
4578
4579ibmvnic_init_fail:
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004580 release_sub_crqs(adapter, 1);
Nathan Fontenot7c1885a2017-08-08 14:28:45 -05004581 release_crq_queue(adapter);
4582 free_netdev(netdev);
4583
4584 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004585}
4586
4587static int ibmvnic_remove(struct vio_dev *dev)
4588{
4589 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04004590 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004591
Nathan Fontenot90c80142017-05-03 14:04:32 -04004592 adapter->state = VNIC_REMOVING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06004593 unregister_netdev(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04004594 mutex_lock(&adapter->reset_lock);
Nathan Fontenot37489052017-04-19 13:45:04 -04004595
4596 release_resources(adapter);
Nathan Fontenotd7c0ef32018-02-19 13:30:31 -06004597 release_sub_crqs(adapter, 1);
Nathan Fontenot37489052017-04-19 13:45:04 -04004598 release_crq_queue(adapter);
4599
Thomas Falcon53cc7722018-02-26 18:10:56 -06004600 release_stats_token(adapter);
4601 release_stats_buffers(adapter);
4602
Nathan Fontenot90c80142017-05-03 14:04:32 -04004603 adapter->state = VNIC_REMOVED;
4604
Nathan Fontenoted651a12017-05-03 14:04:38 -04004605 mutex_unlock(&adapter->reset_lock);
Thomas Falcon40c9db82017-06-12 12:35:04 -05004606 device_remove_file(&dev->dev, &dev_attr_failover);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004607 free_netdev(netdev);
4608 dev_set_drvdata(&dev->dev, NULL);
4609
4610 return 0;
4611}
4612
Thomas Falcon40c9db82017-06-12 12:35:04 -05004613static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
4614 const char *buf, size_t count)
4615{
4616 struct net_device *netdev = dev_get_drvdata(dev);
4617 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
4618 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
4619 __be64 session_token;
4620 long rc;
4621
4622 if (!sysfs_streq(buf, "1"))
4623 return -EINVAL;
4624
4625 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
4626 H_GET_SESSION_TOKEN, 0, 0, 0);
4627 if (rc) {
4628 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
4629 rc);
4630 return -EINVAL;
4631 }
4632
4633 session_token = (__be64)retbuf[0];
4634 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
4635 be64_to_cpu(session_token));
4636 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
4637 H_SESSION_ERR_DETECTED, session_token, 0, 0);
4638 if (rc) {
4639 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
4640 rc);
4641 return -EINVAL;
4642 }
4643
4644 return count;
4645}
4646
Joe Perches6cbaefb2017-12-19 10:15:09 -08004647static DEVICE_ATTR_WO(failover);
Thomas Falcon40c9db82017-06-12 12:35:04 -05004648
Thomas Falcon032c5e82015-12-21 11:26:06 -06004649static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
4650{
4651 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
4652 struct ibmvnic_adapter *adapter;
4653 struct iommu_table *tbl;
4654 unsigned long ret = 0;
4655 int i;
4656
4657 tbl = get_iommu_table_base(&vdev->dev);
4658
4659 /* netdev inits at probe time along with the structures we need below*/
4660 if (!netdev)
4661 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
4662
4663 adapter = netdev_priv(netdev);
4664
4665 ret += PAGE_SIZE; /* the crq message queue */
Thomas Falcon032c5e82015-12-21 11:26:06 -06004666 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
4667
4668 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
4669 ret += 4 * PAGE_SIZE; /* the scrq message queue */
4670
4671 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
4672 i++)
4673 ret += adapter->rx_pool[i].size *
4674 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
4675
4676 return ret;
4677}
4678
4679static int ibmvnic_resume(struct device *dev)
4680{
4681 struct net_device *netdev = dev_get_drvdata(dev);
4682 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004683
John Allencb89ba22017-06-19 11:27:53 -05004684 if (adapter->state != VNIC_OPEN)
4685 return 0;
4686
John Allena2488782017-07-24 13:26:06 -05004687 tasklet_schedule(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06004688
4689 return 0;
4690}
4691
Arvind Yadav8c37bc62017-08-17 18:52:54 +05304692static const struct vio_device_id ibmvnic_device_table[] = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06004693 {"network", "IBM,vnic"},
4694 {"", "" }
4695};
4696MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
4697
4698static const struct dev_pm_ops ibmvnic_pm_ops = {
4699 .resume = ibmvnic_resume
4700};
4701
4702static struct vio_driver ibmvnic_driver = {
4703 .id_table = ibmvnic_device_table,
4704 .probe = ibmvnic_probe,
4705 .remove = ibmvnic_remove,
4706 .get_desired_dma = ibmvnic_get_desired_dma,
4707 .name = ibmvnic_driver_name,
4708 .pm = &ibmvnic_pm_ops,
4709};
4710
4711/* module functions */
4712static int __init ibmvnic_module_init(void)
4713{
4714 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
4715 IBMVNIC_DRIVER_VERSION);
4716
4717 return vio_register_driver(&ibmvnic_driver);
4718}
4719
4720static void __exit ibmvnic_module_exit(void)
4721{
4722 vio_unregister_driver(&ibmvnic_driver);
4723}
4724
4725module_init(ibmvnic_module_init);
4726module_exit(ibmvnic_module_exit);