blob: 0f705e68755fbc917a44d8ff253044ecf0690142 [file] [log] [blame]
Thomas Falcon032c5e82015-12-21 11:26:06 -06001/**************************************************************************/
2/* */
3/* IBM System i and System p Virtual NIC Device Driver */
4/* Copyright (C) 2014 IBM Corp. */
5/* Santiago Leon (santi_leon@yahoo.com) */
6/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7/* John Allen (jallen@linux.vnet.ibm.com) */
8/* */
9/* This program is free software; you can redistribute it and/or modify */
10/* it under the terms of the GNU General Public License as published by */
11/* the Free Software Foundation; either version 2 of the License, or */
12/* (at your option) any later version. */
13/* */
14/* This program is distributed in the hope that it will be useful, */
15/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17/* GNU General Public License for more details. */
18/* */
19/* You should have received a copy of the GNU General Public License */
20/* along with this program. */
21/* */
22/* This module contains the implementation of a virtual ethernet device */
23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24/* option of the RS/6000 Platform Architecture to interface with virtual */
25/* ethernet NICs that are presented to the partition by the hypervisor. */
26/* */
27/* Messages are passed between the VNIC driver and the VNIC server using */
28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29/* issue and receive commands that initiate communication with the server */
30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31/* are used by the driver to notify the server that a packet is */
32/* ready for transmission or that a buffer has been added to receive a */
33/* packet. Subsequently, sCRQs are used by the server to notify the */
34/* driver that a packet transmission has been completed or that a packet */
35/* has been received and placed in a waiting buffer. */
36/* */
37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38/* which skbs are DMA mapped and immediately unmapped when the transmit */
39/* or receive has been completed, the VNIC driver is required to use */
40/* "long term mapping". This entails that large, continuous DMA mapped */
41/* buffers are allocated on driver initialization and these buffers are */
42/* then continuously reused to pass skbs to and from the VNIC server. */
43/* */
44/**************************************************************************/
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/in.h>
63#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050064#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060065#include <linux/irq.h>
66#include <linux/kthread.h>
67#include <linux/seq_file.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060068#include <linux/interrupt.h>
69#include <net/net_namespace.h>
70#include <asm/hvcall.h>
71#include <linux/atomic.h>
72#include <asm/vio.h>
73#include <asm/iommu.h>
74#include <linux/uaccess.h>
75#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050076#include <linux/workqueue.h>
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -040077#include <linux/if_vlan.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060078
79#include "ibmvnic.h"
80
81static const char ibmvnic_driver_name[] = "ibmvnic";
82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83
84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88
89static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90static int ibmvnic_remove(struct vio_dev *);
91static void release_sub_crqs(struct ibmvnic_adapter *);
92static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
95static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
96static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
97 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050098static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060099static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
100static int enable_scrq_irq(struct ibmvnic_adapter *,
101 struct ibmvnic_sub_crq_queue *);
102static int disable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104static int pending_scrq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108static int ibmvnic_poll(struct napi_struct *napi, int data);
109static void send_map_query(struct ibmvnic_adapter *adapter);
110static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111static void send_request_unmap(struct ibmvnic_adapter *, u8);
John Allenbd0b6722017-03-17 17:13:40 -0500112static void send_login(struct ibmvnic_adapter *adapter);
113static void send_cap_queries(struct ibmvnic_adapter *adapter);
114static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
John Allenea5509f2017-03-17 17:13:43 -0500115static int ibmvnic_init(struct ibmvnic_adapter *);
Nathan Fontenotf9928872017-03-30 02:48:54 -0400116static void release_crq_queue(struct ibmvnic_adapter *);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600117
118struct ibmvnic_stat {
119 char name[ETH_GSTRING_LEN];
120 int offset;
121};
122
123#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
124 offsetof(struct ibmvnic_statistics, stat))
125#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
126
127static const struct ibmvnic_stat ibmvnic_stats[] = {
128 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
129 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
130 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
131 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
132 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
133 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
134 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
135 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
136 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
137 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
138 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
139 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
140 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
141 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
142 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
143 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
144 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
145 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
146 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
147 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
148 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
149 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
150};
151
152static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
153 unsigned long length, unsigned long *number,
154 unsigned long *irq)
155{
156 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
157 long rc;
158
159 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
160 *number = retbuf[0];
161 *irq = retbuf[1];
162
163 return rc;
164}
165
Thomas Falcon032c5e82015-12-21 11:26:06 -0600166static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
167 struct ibmvnic_long_term_buff *ltb, int size)
168{
169 struct device *dev = &adapter->vdev->dev;
170
171 ltb->size = size;
172 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
173 GFP_KERNEL);
174
175 if (!ltb->buff) {
176 dev_err(dev, "Couldn't alloc long term buffer\n");
177 return -ENOMEM;
178 }
179 ltb->map_id = adapter->map_id;
180 adapter->map_id++;
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -0500181
182 init_completion(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600183 send_request_map(adapter, ltb->addr,
184 ltb->size, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600185 wait_for_completion(&adapter->fw_done);
186 return 0;
187}
188
189static void free_long_term_buff(struct ibmvnic_adapter *adapter,
190 struct ibmvnic_long_term_buff *ltb)
191{
192 struct device *dev = &adapter->vdev->dev;
193
Nathan Fontenotc657e322017-03-30 02:49:06 -0400194 if (!ltb->buff)
195 return;
196
Nathan Fontenoted651a12017-05-03 14:04:38 -0400197 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
198 adapter->reset_reason != VNIC_RESET_MOBILITY)
Thomas Falcondfad09a2016-08-18 11:37:51 -0500199 send_request_unmap(adapter, ltb->map_id);
Brian King59af56c2017-04-19 13:44:41 -0400200 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600201}
202
Thomas Falcon032c5e82015-12-21 11:26:06 -0600203static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
204 struct ibmvnic_rx_pool *pool)
205{
206 int count = pool->size - atomic_read(&pool->available);
207 struct device *dev = &adapter->vdev->dev;
208 int buffers_added = 0;
209 unsigned long lpar_rc;
210 union sub_crq sub_crq;
211 struct sk_buff *skb;
212 unsigned int offset;
213 dma_addr_t dma_addr;
214 unsigned char *dst;
215 u64 *handle_array;
216 int shift = 0;
217 int index;
218 int i;
219
220 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
221 be32_to_cpu(adapter->login_rsp_buf->
222 off_rxadd_subcrqs));
223
224 for (i = 0; i < count; ++i) {
225 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
226 if (!skb) {
227 dev_err(dev, "Couldn't replenish rx buff\n");
228 adapter->replenish_no_mem++;
229 break;
230 }
231
232 index = pool->free_map[pool->next_free];
233
234 if (pool->rx_buff[index].skb)
235 dev_err(dev, "Inconsistent free_map!\n");
236
237 /* Copy the skb to the long term mapped DMA buffer */
238 offset = index * pool->buff_size;
239 dst = pool->long_term_buff.buff + offset;
240 memset(dst, 0, pool->buff_size);
241 dma_addr = pool->long_term_buff.addr + offset;
242 pool->rx_buff[index].data = dst;
243
244 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
245 pool->rx_buff[index].dma = dma_addr;
246 pool->rx_buff[index].skb = skb;
247 pool->rx_buff[index].pool_index = pool->index;
248 pool->rx_buff[index].size = pool->buff_size;
249
250 memset(&sub_crq, 0, sizeof(sub_crq));
251 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
252 sub_crq.rx_add.correlator =
253 cpu_to_be64((u64)&pool->rx_buff[index]);
254 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
255 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
256
257 /* The length field of the sCRQ is defined to be 24 bits so the
258 * buffer size needs to be left shifted by a byte before it is
259 * converted to big endian to prevent the last byte from being
260 * truncated.
261 */
262#ifdef __LITTLE_ENDIAN__
263 shift = 8;
264#endif
265 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
266
267 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
268 &sub_crq);
269 if (lpar_rc != H_SUCCESS)
270 goto failure;
271
272 buffers_added++;
273 adapter->replenish_add_buff_success++;
274 pool->next_free = (pool->next_free + 1) % pool->size;
275 }
276 atomic_add(buffers_added, &pool->available);
277 return;
278
279failure:
280 dev_info(dev, "replenish pools failure\n");
281 pool->free_map[pool->next_free] = index;
282 pool->rx_buff[index].skb = NULL;
283 if (!dma_mapping_error(dev, dma_addr))
284 dma_unmap_single(dev, dma_addr, pool->buff_size,
285 DMA_FROM_DEVICE);
286
287 dev_kfree_skb_any(skb);
288 adapter->replenish_add_buff_failure++;
289 atomic_add(buffers_added, &pool->available);
290}
291
292static void replenish_pools(struct ibmvnic_adapter *adapter)
293{
294 int i;
295
Thomas Falcon032c5e82015-12-21 11:26:06 -0600296 adapter->replenish_task_cycles++;
297 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
298 i++) {
299 if (adapter->rx_pool[i].active)
300 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
301 }
302}
303
Nathan Fontenot7bbc27a2017-03-30 02:49:23 -0400304static void release_stats_token(struct ibmvnic_adapter *adapter)
305{
306 struct device *dev = &adapter->vdev->dev;
307
308 if (!adapter->stats_token)
309 return;
310
311 dma_unmap_single(dev, adapter->stats_token,
312 sizeof(struct ibmvnic_statistics),
313 DMA_FROM_DEVICE);
314 adapter->stats_token = 0;
315}
316
317static int init_stats_token(struct ibmvnic_adapter *adapter)
318{
319 struct device *dev = &adapter->vdev->dev;
320 dma_addr_t stok;
321
322 stok = dma_map_single(dev, &adapter->stats,
323 sizeof(struct ibmvnic_statistics),
324 DMA_FROM_DEVICE);
325 if (dma_mapping_error(dev, stok)) {
326 dev_err(dev, "Couldn't map stats buffer\n");
327 return -1;
328 }
329
330 adapter->stats_token = stok;
331 return 0;
332}
333
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400334static void release_rx_pools(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600335{
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400336 struct ibmvnic_rx_pool *rx_pool;
337 int rx_scrqs;
338 int i, j;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600339
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400340 if (!adapter->rx_pool)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600341 return;
342
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400343 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
344 for (i = 0; i < rx_scrqs; i++) {
345 rx_pool = &adapter->rx_pool[i];
346
347 kfree(rx_pool->free_map);
348 free_long_term_buff(adapter, &rx_pool->long_term_buff);
349
350 if (!rx_pool->rx_buff)
Nathan Fontenote0ebe9422017-05-03 14:04:50 -0400351 continue;
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400352
353 for (j = 0; j < rx_pool->size; j++) {
354 if (rx_pool->rx_buff[j].skb) {
355 dev_kfree_skb_any(rx_pool->rx_buff[i].skb);
356 rx_pool->rx_buff[i].skb = NULL;
357 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600358 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400359
360 kfree(rx_pool->rx_buff);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600361 }
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400362
363 kfree(adapter->rx_pool);
364 adapter->rx_pool = NULL;
365}
366
367static int init_rx_pools(struct net_device *netdev)
368{
369 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
370 struct device *dev = &adapter->vdev->dev;
371 struct ibmvnic_rx_pool *rx_pool;
372 int rxadd_subcrqs;
373 u64 *size_array;
374 int i, j;
375
376 rxadd_subcrqs =
377 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
378 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
379 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
380
381 adapter->rx_pool = kcalloc(rxadd_subcrqs,
382 sizeof(struct ibmvnic_rx_pool),
383 GFP_KERNEL);
384 if (!adapter->rx_pool) {
385 dev_err(dev, "Failed to allocate rx pools\n");
386 return -1;
387 }
388
389 for (i = 0; i < rxadd_subcrqs; i++) {
390 rx_pool = &adapter->rx_pool[i];
391
392 netdev_dbg(adapter->netdev,
393 "Initializing rx_pool %d, %lld buffs, %lld bytes each\n",
394 i, adapter->req_rx_add_entries_per_subcrq,
395 be64_to_cpu(size_array[i]));
396
397 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
398 rx_pool->index = i;
399 rx_pool->buff_size = be64_to_cpu(size_array[i]);
400 rx_pool->active = 1;
401
402 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
403 GFP_KERNEL);
404 if (!rx_pool->free_map) {
405 release_rx_pools(adapter);
406 return -1;
407 }
408
409 rx_pool->rx_buff = kcalloc(rx_pool->size,
410 sizeof(struct ibmvnic_rx_buff),
411 GFP_KERNEL);
412 if (!rx_pool->rx_buff) {
413 dev_err(dev, "Couldn't alloc rx buffers\n");
414 release_rx_pools(adapter);
415 return -1;
416 }
417
418 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
419 rx_pool->size * rx_pool->buff_size)) {
420 release_rx_pools(adapter);
421 return -1;
422 }
423
424 for (j = 0; j < rx_pool->size; ++j)
425 rx_pool->free_map[j] = j;
426
427 atomic_set(&rx_pool->available, 0);
428 rx_pool->next_alloc = 0;
429 rx_pool->next_free = 0;
430 }
431
432 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600433}
434
Nathan Fontenotc657e322017-03-30 02:49:06 -0400435static void release_tx_pools(struct ibmvnic_adapter *adapter)
436{
437 struct ibmvnic_tx_pool *tx_pool;
438 int i, tx_scrqs;
439
440 if (!adapter->tx_pool)
441 return;
442
443 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
444 for (i = 0; i < tx_scrqs; i++) {
445 tx_pool = &adapter->tx_pool[i];
446 kfree(tx_pool->tx_buff);
447 free_long_term_buff(adapter, &tx_pool->long_term_buff);
448 kfree(tx_pool->free_map);
449 }
450
451 kfree(adapter->tx_pool);
452 adapter->tx_pool = NULL;
453}
454
455static int init_tx_pools(struct net_device *netdev)
456{
457 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
458 struct device *dev = &adapter->vdev->dev;
459 struct ibmvnic_tx_pool *tx_pool;
460 int tx_subcrqs;
461 int i, j;
462
463 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
464 adapter->tx_pool = kcalloc(tx_subcrqs,
465 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
466 if (!adapter->tx_pool)
467 return -1;
468
469 for (i = 0; i < tx_subcrqs; i++) {
470 tx_pool = &adapter->tx_pool[i];
471 tx_pool->tx_buff = kcalloc(adapter->req_tx_entries_per_subcrq,
472 sizeof(struct ibmvnic_tx_buff),
473 GFP_KERNEL);
474 if (!tx_pool->tx_buff) {
475 dev_err(dev, "tx pool buffer allocation failed\n");
476 release_tx_pools(adapter);
477 return -1;
478 }
479
480 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
481 adapter->req_tx_entries_per_subcrq *
482 adapter->req_mtu)) {
483 release_tx_pools(adapter);
484 return -1;
485 }
486
487 tx_pool->free_map = kcalloc(adapter->req_tx_entries_per_subcrq,
488 sizeof(int), GFP_KERNEL);
489 if (!tx_pool->free_map) {
490 release_tx_pools(adapter);
491 return -1;
492 }
493
494 for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
495 tx_pool->free_map[j] = j;
496
497 tx_pool->consumer_index = 0;
498 tx_pool->producer_index = 0;
499 }
500
501 return 0;
502}
503
Nathan Fontenot661a2622017-04-19 13:44:58 -0400504static void release_error_buffers(struct ibmvnic_adapter *adapter)
505{
506 struct device *dev = &adapter->vdev->dev;
507 struct ibmvnic_error_buff *error_buff, *tmp;
508 unsigned long flags;
509
510 spin_lock_irqsave(&adapter->error_list_lock, flags);
511 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list) {
512 list_del(&error_buff->list);
513 dma_unmap_single(dev, error_buff->dma, error_buff->len,
514 DMA_FROM_DEVICE);
515 kfree(error_buff->buff);
516 kfree(error_buff);
517 }
518 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
519}
520
John Allend944c3d62017-05-26 10:30:13 -0400521static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
522{
523 int i;
524
525 if (adapter->napi_enabled)
526 return;
527
528 for (i = 0; i < adapter->req_rx_queues; i++)
529 napi_enable(&adapter->napi[i]);
530
531 adapter->napi_enabled = true;
532}
533
534static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
535{
536 int i;
537
538 if (!adapter->napi_enabled)
539 return;
540
541 for (i = 0; i < adapter->req_rx_queues; i++)
542 napi_disable(&adapter->napi[i]);
543
544 adapter->napi_enabled = false;
545}
546
John Allena57a5d22017-03-17 17:13:41 -0500547static int ibmvnic_login(struct net_device *netdev)
Thomas Falcon032c5e82015-12-21 11:26:06 -0600548{
549 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
John Allenbd0b6722017-03-17 17:13:40 -0500550 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600551 struct device *dev = &adapter->vdev->dev;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600552
John Allenbd0b6722017-03-17 17:13:40 -0500553 do {
554 if (adapter->renegotiate) {
555 adapter->renegotiate = false;
Nathan Fontenotb5108882017-03-30 02:49:18 -0400556 release_sub_crqs(adapter);
John Allenbd0b6722017-03-17 17:13:40 -0500557
558 reinit_completion(&adapter->init_done);
559 send_cap_queries(adapter);
560 if (!wait_for_completion_timeout(&adapter->init_done,
561 timeout)) {
562 dev_err(dev, "Capabilities query timeout\n");
563 return -1;
564 }
565 }
566
567 reinit_completion(&adapter->init_done);
568 send_login(adapter);
569 if (!wait_for_completion_timeout(&adapter->init_done,
570 timeout)) {
571 dev_err(dev, "Login timeout\n");
572 return -1;
573 }
574 } while (adapter->renegotiate);
575
John Allena57a5d22017-03-17 17:13:41 -0500576 return 0;
577}
578
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400579static void release_resources(struct ibmvnic_adapter *adapter)
580{
Nathan Fontenotc7bac002017-05-03 14:04:44 -0400581 int i;
582
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400583 release_tx_pools(adapter);
584 release_rx_pools(adapter);
585
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400586 release_stats_token(adapter);
Nathan Fontenot661a2622017-04-19 13:44:58 -0400587 release_error_buffers(adapter);
Nathan Fontenotc7bac002017-05-03 14:04:44 -0400588
589 if (adapter->napi) {
590 for (i = 0; i < adapter->req_rx_queues; i++) {
591 if (&adapter->napi[i])
592 netif_napi_del(&adapter->napi[i]);
593 }
594 }
Nathan Fontenot1b8955e2017-03-30 02:49:29 -0400595}
596
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400597static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
598{
599 struct net_device *netdev = adapter->netdev;
600 unsigned long timeout = msecs_to_jiffies(30000);
601 union ibmvnic_crq crq;
602 bool resend;
603 int rc;
604
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400605 netdev_err(netdev, "setting link state %d\n", link_state);
606 memset(&crq, 0, sizeof(crq));
607 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
608 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
609 crq.logical_link_state.link_state = link_state;
610
611 do {
612 resend = false;
613
614 reinit_completion(&adapter->init_done);
615 rc = ibmvnic_send_crq(adapter, &crq);
616 if (rc) {
617 netdev_err(netdev, "Failed to set link state\n");
618 return rc;
619 }
620
621 if (!wait_for_completion_timeout(&adapter->init_done,
622 timeout)) {
623 netdev_err(netdev, "timeout setting link state\n");
624 return -1;
625 }
626
627 if (adapter->init_done_rc == 1) {
628 /* Partuial success, delay and re-send */
629 mdelay(1000);
630 resend = true;
631 }
632 } while (resend);
633
634 return 0;
635}
636
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400637static int set_real_num_queues(struct net_device *netdev)
638{
639 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
640 int rc;
641
642 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
643 if (rc) {
644 netdev_err(netdev, "failed to set the number of tx queues\n");
645 return rc;
646 }
647
648 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
649 if (rc)
650 netdev_err(netdev, "failed to set the number of rx queues\n");
651
652 return rc;
653}
654
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400655static int init_resources(struct ibmvnic_adapter *adapter)
John Allena57a5d22017-03-17 17:13:41 -0500656{
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400657 struct net_device *netdev = adapter->netdev;
658 int i, rc;
John Allena57a5d22017-03-17 17:13:41 -0500659
Thomas Falcon7f3c6e62017-04-21 15:38:40 -0400660 rc = set_real_num_queues(netdev);
661 if (rc)
662 return rc;
John Allenbd0b6722017-03-17 17:13:40 -0500663
664 rc = init_sub_crq_irqs(adapter);
665 if (rc) {
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400666 netdev_err(netdev, "failed to initialize sub crq irqs\n");
John Allenbd0b6722017-03-17 17:13:40 -0500667 return -1;
668 }
669
Nathan Fontenot5d5e84e2017-04-21 15:38:58 -0400670 rc = init_stats_token(adapter);
671 if (rc)
672 return rc;
673
Thomas Falcon032c5e82015-12-21 11:26:06 -0600674 adapter->map_id = 1;
675 adapter->napi = kcalloc(adapter->req_rx_queues,
676 sizeof(struct napi_struct), GFP_KERNEL);
677 if (!adapter->napi)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400678 return -ENOMEM;
679
Thomas Falcon032c5e82015-12-21 11:26:06 -0600680 for (i = 0; i < adapter->req_rx_queues; i++) {
681 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
682 NAPI_POLL_WEIGHT);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600683 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600684
Thomas Falcon032c5e82015-12-21 11:26:06 -0600685 send_map_query(adapter);
Nathan Fontenot0ffe2cb2017-03-30 02:49:12 -0400686
687 rc = init_rx_pools(netdev);
688 if (rc)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400689 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600690
Nathan Fontenotc657e322017-03-30 02:49:06 -0400691 rc = init_tx_pools(netdev);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400692 return rc;
693}
694
Nathan Fontenoted651a12017-05-03 14:04:38 -0400695static int __ibmvnic_open(struct net_device *netdev)
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400696{
697 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -0400698 enum vnic_state prev_state = adapter->state;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400699 int i, rc;
700
Nathan Fontenot90c80142017-05-03 14:04:32 -0400701 adapter->state = VNIC_OPENING;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600702 replenish_pools(adapter);
John Allend944c3d62017-05-26 10:30:13 -0400703 ibmvnic_napi_enable(adapter);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400704
Thomas Falcon032c5e82015-12-21 11:26:06 -0600705 /* We're ready to receive frames, enable the sub-crq interrupts and
706 * set the logical link state to up
707 */
Nathan Fontenoted651a12017-05-03 14:04:38 -0400708 for (i = 0; i < adapter->req_rx_queues; i++) {
709 if (prev_state == VNIC_CLOSED)
710 enable_irq(adapter->rx_scrq[i]->irq);
711 else
712 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
713 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600714
Nathan Fontenoted651a12017-05-03 14:04:38 -0400715 for (i = 0; i < adapter->req_tx_queues; i++) {
716 if (prev_state == VNIC_CLOSED)
717 enable_irq(adapter->tx_scrq[i]->irq);
718 else
719 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
720 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600721
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400722 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400723 if (rc) {
724 for (i = 0; i < adapter->req_rx_queues; i++)
725 napi_disable(&adapter->napi[i]);
726 release_resources(adapter);
Nathan Fontenoted651a12017-05-03 14:04:38 -0400727 return rc;
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400728 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600729
Nathan Fontenoted651a12017-05-03 14:04:38 -0400730 netif_tx_start_all_queues(netdev);
731
732 if (prev_state == VNIC_CLOSED) {
733 for (i = 0; i < adapter->req_rx_queues; i++)
734 napi_schedule(&adapter->napi[i]);
735 }
736
737 adapter->state = VNIC_OPEN;
738 return rc;
739}
740
741static int ibmvnic_open(struct net_device *netdev)
742{
743 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
744 int rc;
745
746 mutex_lock(&adapter->reset_lock);
747
748 if (adapter->state != VNIC_CLOSED) {
749 rc = ibmvnic_login(netdev);
750 if (rc) {
751 mutex_unlock(&adapter->reset_lock);
752 return rc;
753 }
754
755 rc = init_resources(adapter);
756 if (rc) {
757 netdev_err(netdev, "failed to initialize resources\n");
758 release_resources(adapter);
759 mutex_unlock(&adapter->reset_lock);
760 return rc;
761 }
762 }
763
764 rc = __ibmvnic_open(netdev);
765 mutex_unlock(&adapter->reset_lock);
766
Nathan Fontenotbfc32f22017-05-03 14:04:26 -0400767 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600768}
769
Nathan Fontenotb41b83e2017-05-03 14:04:56 -0400770static void clean_tx_pools(struct ibmvnic_adapter *adapter)
771{
772 struct ibmvnic_tx_pool *tx_pool;
773 u64 tx_entries;
774 int tx_scrqs;
775 int i, j;
776
777 if (!adapter->tx_pool)
778 return;
779
780 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
781 tx_entries = adapter->req_tx_entries_per_subcrq;
782
783 /* Free any remaining skbs in the tx buffer pools */
784 for (i = 0; i < tx_scrqs; i++) {
785 tx_pool = &adapter->tx_pool[i];
786 if (!tx_pool)
787 continue;
788
789 for (j = 0; j < tx_entries; j++) {
790 if (tx_pool->tx_buff[j].skb) {
791 dev_kfree_skb_any(tx_pool->tx_buff[j].skb);
792 tx_pool->tx_buff[j].skb = NULL;
793 }
794 }
795 }
796}
797
Nathan Fontenoted651a12017-05-03 14:04:38 -0400798static int __ibmvnic_close(struct net_device *netdev)
John Allenea5509f2017-03-17 17:13:43 -0500799{
800 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400801 int rc = 0;
John Allenea5509f2017-03-17 17:13:43 -0500802 int i;
803
Nathan Fontenot90c80142017-05-03 14:04:32 -0400804 adapter->state = VNIC_CLOSING;
Nathan Fontenoted651a12017-05-03 14:04:38 -0400805 netif_tx_stop_all_queues(netdev);
John Allend944c3d62017-05-26 10:30:13 -0400806 ibmvnic_napi_disable(adapter);
Nathan Fontenot46293b92017-05-03 14:05:02 -0400807
808 if (adapter->tx_scrq) {
809 for (i = 0; i < adapter->req_tx_queues; i++)
810 if (adapter->tx_scrq[i]->irq)
811 disable_irq(adapter->tx_scrq[i]->irq);
812 }
813
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400814 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
Nathan Fontenot46293b92017-05-03 14:05:02 -0400815 if (rc)
816 return rc;
817
818 if (adapter->rx_scrq) {
819 for (i = 0; i < adapter->req_rx_queues; i++) {
820 int retries = 10;
821
822 while (pending_scrq(adapter, adapter->rx_scrq[i])) {
823 retries--;
824 mdelay(100);
825
826 if (retries == 0)
827 break;
828 }
829
830 if (adapter->rx_scrq[i]->irq)
831 disable_irq(adapter->rx_scrq[i]->irq);
832 }
833 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600834
Thomas Falcon10f76212017-05-26 10:30:31 -0400835 clean_tx_pools(adapter);
Nathan Fontenot90c80142017-05-03 14:04:32 -0400836 adapter->state = VNIC_CLOSED;
Nathan Fontenot53da09e2017-04-21 15:39:04 -0400837 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600838}
839
Nathan Fontenoted651a12017-05-03 14:04:38 -0400840static int ibmvnic_close(struct net_device *netdev)
841{
842 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
843 int rc;
844
845 mutex_lock(&adapter->reset_lock);
846 rc = __ibmvnic_close(netdev);
847 mutex_unlock(&adapter->reset_lock);
848
849 return rc;
850}
851
Thomas Falconad7775d2016-04-01 17:20:34 -0500852/**
853 * build_hdr_data - creates L2/L3/L4 header data buffer
854 * @hdr_field - bitfield determining needed headers
855 * @skb - socket buffer
856 * @hdr_len - array of header lengths
857 * @tot_len - total length of data
858 *
859 * Reads hdr_field to determine which headers are needed by firmware.
860 * Builds a buffer containing these headers. Saves individual header
861 * lengths and total buffer length to be used to build descriptors.
862 */
863static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
864 int *hdr_len, u8 *hdr_data)
865{
866 int len = 0;
867 u8 *hdr;
868
869 hdr_len[0] = sizeof(struct ethhdr);
870
871 if (skb->protocol == htons(ETH_P_IP)) {
872 hdr_len[1] = ip_hdr(skb)->ihl * 4;
873 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
874 hdr_len[2] = tcp_hdrlen(skb);
875 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
876 hdr_len[2] = sizeof(struct udphdr);
877 } else if (skb->protocol == htons(ETH_P_IPV6)) {
878 hdr_len[1] = sizeof(struct ipv6hdr);
879 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
880 hdr_len[2] = tcp_hdrlen(skb);
881 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
882 hdr_len[2] = sizeof(struct udphdr);
883 }
884
885 memset(hdr_data, 0, 120);
886 if ((hdr_field >> 6) & 1) {
887 hdr = skb_mac_header(skb);
888 memcpy(hdr_data, hdr, hdr_len[0]);
889 len += hdr_len[0];
890 }
891
892 if ((hdr_field >> 5) & 1) {
893 hdr = skb_network_header(skb);
894 memcpy(hdr_data + len, hdr, hdr_len[1]);
895 len += hdr_len[1];
896 }
897
898 if ((hdr_field >> 4) & 1) {
899 hdr = skb_transport_header(skb);
900 memcpy(hdr_data + len, hdr, hdr_len[2]);
901 len += hdr_len[2];
902 }
903 return len;
904}
905
906/**
907 * create_hdr_descs - create header and header extension descriptors
908 * @hdr_field - bitfield determining needed headers
909 * @data - buffer containing header data
910 * @len - length of data buffer
911 * @hdr_len - array of individual header lengths
912 * @scrq_arr - descriptor array
913 *
914 * Creates header and, if needed, header extension descriptors and
915 * places them in a descriptor array, scrq_arr
916 */
917
918static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
919 union sub_crq *scrq_arr)
920{
921 union sub_crq hdr_desc;
922 int tmp_len = len;
923 u8 *data, *cur;
924 int tmp;
925
926 while (tmp_len > 0) {
927 cur = hdr_data + len - tmp_len;
928
929 memset(&hdr_desc, 0, sizeof(hdr_desc));
930 if (cur != hdr_data) {
931 data = hdr_desc.hdr_ext.data;
932 tmp = tmp_len > 29 ? 29 : tmp_len;
933 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
934 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
935 hdr_desc.hdr_ext.len = tmp;
936 } else {
937 data = hdr_desc.hdr.data;
938 tmp = tmp_len > 24 ? 24 : tmp_len;
939 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
940 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
941 hdr_desc.hdr.len = tmp;
942 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
943 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
944 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
945 hdr_desc.hdr.flag = hdr_field << 1;
946 }
947 memcpy(data, cur, tmp);
948 tmp_len -= tmp;
949 *scrq_arr = hdr_desc;
950 scrq_arr++;
951 }
952}
953
954/**
955 * build_hdr_descs_arr - build a header descriptor array
956 * @skb - socket buffer
957 * @num_entries - number of descriptors to be sent
958 * @subcrq - first TX descriptor
959 * @hdr_field - bit field determining which headers will be sent
960 *
961 * This function will build a TX descriptor array with applicable
962 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
963 */
964
965static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
966 int *num_entries, u8 hdr_field)
967{
968 int hdr_len[3] = {0, 0, 0};
969 int tot_len, len;
970 u8 *hdr_data = txbuff->hdr_data;
971
972 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
973 txbuff->hdr_data);
974 len = tot_len;
975 len -= 24;
976 if (len > 0)
977 num_entries += len % 29 ? len / 29 + 1 : len / 29;
978 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
979 txbuff->indir_arr + 1);
980}
981
Thomas Falcon032c5e82015-12-21 11:26:06 -0600982static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
983{
984 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
985 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -0500986 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600987 struct device *dev = &adapter->vdev->dev;
988 struct ibmvnic_tx_buff *tx_buff = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -0600989 struct ibmvnic_sub_crq_queue *tx_scrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600990 struct ibmvnic_tx_pool *tx_pool;
991 unsigned int tx_send_failed = 0;
992 unsigned int tx_map_failed = 0;
993 unsigned int tx_dropped = 0;
994 unsigned int tx_packets = 0;
995 unsigned int tx_bytes = 0;
996 dma_addr_t data_dma_addr;
997 struct netdev_queue *txq;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600998 unsigned long lpar_rc;
999 union sub_crq tx_crq;
1000 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -05001001 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001002 unsigned char *dst;
1003 u64 *handle_array;
1004 int index = 0;
1005 int ret = 0;
1006
Nathan Fontenoted651a12017-05-03 14:04:38 -04001007 if (adapter->resetting) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001008 if (!netif_subqueue_stopped(netdev, skb))
1009 netif_stop_subqueue(netdev, queue_num);
1010 dev_kfree_skb_any(skb);
1011
Thomas Falcon032c5e82015-12-21 11:26:06 -06001012 tx_send_failed++;
1013 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001014 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001015 goto out;
1016 }
1017
Nathan Fontenot161b8a82017-05-03 14:05:08 -04001018 tx_pool = &adapter->tx_pool[queue_num];
1019 tx_scrq = adapter->tx_scrq[queue_num];
1020 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1021 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1022 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1023
Thomas Falcon032c5e82015-12-21 11:26:06 -06001024 index = tx_pool->free_map[tx_pool->consumer_index];
1025 offset = index * adapter->req_mtu;
1026 dst = tx_pool->long_term_buff.buff + offset;
1027 memset(dst, 0, adapter->req_mtu);
1028 skb_copy_from_linear_data(skb, dst, skb->len);
1029 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1030
1031 tx_pool->consumer_index =
1032 (tx_pool->consumer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -06001033 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001034
1035 tx_buff = &tx_pool->tx_buff[index];
1036 tx_buff->skb = skb;
1037 tx_buff->data_dma[0] = data_dma_addr;
1038 tx_buff->data_len[0] = skb->len;
1039 tx_buff->index = index;
1040 tx_buff->pool_index = queue_num;
1041 tx_buff->last_frag = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001042
1043 memset(&tx_crq, 0, sizeof(tx_crq));
1044 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1045 tx_crq.v1.type = IBMVNIC_TX_DESC;
1046 tx_crq.v1.n_crq_elem = 1;
1047 tx_crq.v1.n_sge = 1;
1048 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1049 tx_crq.v1.correlator = cpu_to_be32(index);
1050 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1051 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1052 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1053
1054 if (adapter->vlan_header_insertion) {
1055 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1056 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1057 }
1058
1059 if (skb->protocol == htons(ETH_P_IP)) {
1060 if (ip_hdr(skb)->version == 4)
1061 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1062 else if (ip_hdr(skb)->version == 6)
1063 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1064
1065 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1066 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1067 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
1068 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1069 }
1070
Thomas Falconad7775d2016-04-01 17:20:34 -05001071 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001072 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -05001073 hdrs += 2;
1074 }
1075 /* determine if l2/3/4 headers are sent to firmware */
1076 if ((*hdrs >> 7) & 1 &&
1077 (skb->protocol == htons(ETH_P_IP) ||
1078 skb->protocol == htons(ETH_P_IPV6))) {
1079 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1080 tx_crq.v1.n_crq_elem = num_entries;
1081 tx_buff->indir_arr[0] = tx_crq;
1082 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1083 sizeof(tx_buff->indir_arr),
1084 DMA_TO_DEVICE);
1085 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001086 dev_kfree_skb_any(skb);
1087 tx_buff->skb = NULL;
Thomas Falconad7775d2016-04-01 17:20:34 -05001088 if (!firmware_has_feature(FW_FEATURE_CMO))
1089 dev_err(dev, "tx: unable to map descriptor array\n");
1090 tx_map_failed++;
1091 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001092 ret = NETDEV_TX_OK;
Thomas Falconad7775d2016-04-01 17:20:34 -05001093 goto out;
1094 }
John Allen498cd8e2016-04-06 11:49:55 -05001095 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -05001096 (u64)tx_buff->indir_dma,
1097 (u64)num_entries);
1098 } else {
John Allen498cd8e2016-04-06 11:49:55 -05001099 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1100 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -05001101 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001102 if (lpar_rc != H_SUCCESS) {
1103 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
1104
1105 if (tx_pool->consumer_index == 0)
1106 tx_pool->consumer_index =
Thomas Falcon068d9f92017-03-05 12:18:42 -06001107 adapter->req_tx_entries_per_subcrq - 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001108 else
1109 tx_pool->consumer_index--;
1110
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001111 dev_kfree_skb_any(skb);
1112 tx_buff->skb = NULL;
1113
1114 if (lpar_rc == H_CLOSED)
1115 netif_stop_subqueue(netdev, queue_num);
1116
Thomas Falcon032c5e82015-12-21 11:26:06 -06001117 tx_send_failed++;
1118 tx_dropped++;
Thomas Falcon7f5b0302017-04-21 15:39:16 -04001119 ret = NETDEV_TX_OK;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001120 goto out;
1121 }
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001122
Brian King58c8c0c2017-04-19 13:44:47 -04001123 if (atomic_inc_return(&tx_scrq->used)
1124 >= adapter->req_tx_entries_per_subcrq) {
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001125 netdev_info(netdev, "Stopping queue %d\n", queue_num);
1126 netif_stop_subqueue(netdev, queue_num);
1127 }
1128
Thomas Falcon032c5e82015-12-21 11:26:06 -06001129 tx_packets++;
1130 tx_bytes += skb->len;
1131 txq->trans_start = jiffies;
1132 ret = NETDEV_TX_OK;
1133
1134out:
1135 netdev->stats.tx_dropped += tx_dropped;
1136 netdev->stats.tx_bytes += tx_bytes;
1137 netdev->stats.tx_packets += tx_packets;
1138 adapter->tx_send_failed += tx_send_failed;
1139 adapter->tx_map_failed += tx_map_failed;
1140
1141 return ret;
1142}
1143
1144static void ibmvnic_set_multi(struct net_device *netdev)
1145{
1146 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1147 struct netdev_hw_addr *ha;
1148 union ibmvnic_crq crq;
1149
1150 memset(&crq, 0, sizeof(crq));
1151 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1152 crq.request_capability.cmd = REQUEST_CAPABILITY;
1153
1154 if (netdev->flags & IFF_PROMISC) {
1155 if (!adapter->promisc_supported)
1156 return;
1157 } else {
1158 if (netdev->flags & IFF_ALLMULTI) {
1159 /* Accept all multicast */
1160 memset(&crq, 0, sizeof(crq));
1161 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1162 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1163 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1164 ibmvnic_send_crq(adapter, &crq);
1165 } else if (netdev_mc_empty(netdev)) {
1166 /* Reject all multicast */
1167 memset(&crq, 0, sizeof(crq));
1168 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1169 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1170 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1171 ibmvnic_send_crq(adapter, &crq);
1172 } else {
1173 /* Accept one or more multicast(s) */
1174 netdev_for_each_mc_addr(ha, netdev) {
1175 memset(&crq, 0, sizeof(crq));
1176 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1177 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1178 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1179 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1180 ha->addr);
1181 ibmvnic_send_crq(adapter, &crq);
1182 }
1183 }
1184 }
1185}
1186
1187static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1188{
1189 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1190 struct sockaddr *addr = p;
1191 union ibmvnic_crq crq;
1192
1193 if (!is_valid_ether_addr(addr->sa_data))
1194 return -EADDRNOTAVAIL;
1195
1196 memset(&crq, 0, sizeof(crq));
1197 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1198 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1199 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
1200 ibmvnic_send_crq(adapter, &crq);
1201 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
1202 return 0;
1203}
1204
Nathan Fontenoted651a12017-05-03 14:04:38 -04001205/**
1206 * do_reset returns zero if we are able to keep processing reset events, or
1207 * non-zero if we hit a fatal error and must halt.
1208 */
1209static int do_reset(struct ibmvnic_adapter *adapter,
1210 struct ibmvnic_rwi *rwi, u32 reset_state)
1211{
1212 struct net_device *netdev = adapter->netdev;
1213 int i, rc;
1214
1215 netif_carrier_off(netdev);
1216 adapter->reset_reason = rwi->reset_reason;
1217
1218 if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
1219 rc = ibmvnic_reenable_crq_queue(adapter);
1220 if (rc)
1221 return 0;
1222 }
1223
1224 rc = __ibmvnic_close(netdev);
1225 if (rc)
1226 return rc;
1227
1228 /* remove the closed state so when we call open it appears
1229 * we are coming from the probed state.
1230 */
1231 adapter->state = VNIC_PROBED;
1232
1233 release_resources(adapter);
1234 release_sub_crqs(adapter);
1235 release_crq_queue(adapter);
1236
1237 rc = ibmvnic_init(adapter);
1238 if (rc)
1239 return 0;
1240
1241 /* If the adapter was in PROBE state prior to the reset, exit here. */
1242 if (reset_state == VNIC_PROBED)
1243 return 0;
1244
1245 rc = ibmvnic_login(netdev);
1246 if (rc) {
1247 adapter->state = VNIC_PROBED;
1248 return 0;
1249 }
1250
1251 rtnl_lock();
1252 rc = init_resources(adapter);
1253 rtnl_unlock();
1254 if (rc)
1255 return rc;
1256
1257 if (reset_state == VNIC_CLOSED)
1258 return 0;
1259
1260 rc = __ibmvnic_open(netdev);
1261 if (rc) {
1262 if (list_empty(&adapter->rwi_list))
1263 adapter->state = VNIC_CLOSED;
1264 else
1265 adapter->state = reset_state;
1266
1267 return 0;
1268 }
1269
1270 netif_carrier_on(netdev);
1271
1272 /* kick napi */
1273 for (i = 0; i < adapter->req_rx_queues; i++)
1274 napi_schedule(&adapter->napi[i]);
1275
John Allen2ce9e4e2017-05-26 10:30:25 -04001276 netdev_notify_peers(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001277 return 0;
1278}
1279
1280static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
1281{
1282 struct ibmvnic_rwi *rwi;
1283
1284 mutex_lock(&adapter->rwi_lock);
1285
1286 if (!list_empty(&adapter->rwi_list)) {
1287 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
1288 list);
1289 list_del(&rwi->list);
1290 } else {
1291 rwi = NULL;
1292 }
1293
1294 mutex_unlock(&adapter->rwi_lock);
1295 return rwi;
1296}
1297
1298static void free_all_rwi(struct ibmvnic_adapter *adapter)
1299{
1300 struct ibmvnic_rwi *rwi;
1301
1302 rwi = get_next_rwi(adapter);
1303 while (rwi) {
1304 kfree(rwi);
1305 rwi = get_next_rwi(adapter);
1306 }
1307}
1308
1309static void __ibmvnic_reset(struct work_struct *work)
1310{
1311 struct ibmvnic_rwi *rwi;
1312 struct ibmvnic_adapter *adapter;
1313 struct net_device *netdev;
1314 u32 reset_state;
1315 int rc;
1316
1317 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
1318 netdev = adapter->netdev;
1319
1320 mutex_lock(&adapter->reset_lock);
1321 adapter->resetting = true;
1322 reset_state = adapter->state;
1323
1324 rwi = get_next_rwi(adapter);
1325 while (rwi) {
1326 rc = do_reset(adapter, rwi, reset_state);
1327 kfree(rwi);
1328 if (rc)
1329 break;
1330
1331 rwi = get_next_rwi(adapter);
1332 }
1333
1334 if (rc) {
1335 free_all_rwi(adapter);
Wei Yongjun6d0af072017-05-18 15:24:52 +00001336 mutex_unlock(&adapter->reset_lock);
Nathan Fontenoted651a12017-05-03 14:04:38 -04001337 return;
1338 }
1339
1340 adapter->resetting = false;
1341 mutex_unlock(&adapter->reset_lock);
1342}
1343
1344static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
1345 enum ibmvnic_reset_reason reason)
1346{
1347 struct ibmvnic_rwi *rwi, *tmp;
1348 struct net_device *netdev = adapter->netdev;
1349 struct list_head *entry;
1350
1351 if (adapter->state == VNIC_REMOVING ||
1352 adapter->state == VNIC_REMOVED) {
1353 netdev_dbg(netdev, "Adapter removing, skipping reset\n");
1354 return;
1355 }
1356
1357 mutex_lock(&adapter->rwi_lock);
1358
1359 list_for_each(entry, &adapter->rwi_list) {
1360 tmp = list_entry(entry, struct ibmvnic_rwi, list);
1361 if (tmp->reset_reason == reason) {
1362 netdev_err(netdev, "Matching reset found, skipping\n");
1363 mutex_unlock(&adapter->rwi_lock);
1364 return;
1365 }
1366 }
1367
1368 rwi = kzalloc(sizeof(*rwi), GFP_KERNEL);
1369 if (!rwi) {
1370 mutex_unlock(&adapter->rwi_lock);
1371 ibmvnic_close(netdev);
1372 return;
1373 }
1374
1375 rwi->reset_reason = reason;
1376 list_add_tail(&rwi->list, &adapter->rwi_list);
1377 mutex_unlock(&adapter->rwi_lock);
1378 schedule_work(&adapter->ibmvnic_reset);
1379}
1380
Thomas Falcon032c5e82015-12-21 11:26:06 -06001381static void ibmvnic_tx_timeout(struct net_device *dev)
1382{
1383 struct ibmvnic_adapter *adapter = netdev_priv(dev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001384
Nathan Fontenoted651a12017-05-03 14:04:38 -04001385 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001386}
1387
1388static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
1389 struct ibmvnic_rx_buff *rx_buff)
1390{
1391 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
1392
1393 rx_buff->skb = NULL;
1394
1395 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
1396 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
1397
1398 atomic_dec(&pool->available);
1399}
1400
1401static int ibmvnic_poll(struct napi_struct *napi, int budget)
1402{
1403 struct net_device *netdev = napi->dev;
1404 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1405 int scrq_num = (int)(napi - adapter->napi);
1406 int frames_processed = 0;
1407restart_poll:
1408 while (frames_processed < budget) {
1409 struct sk_buff *skb;
1410 struct ibmvnic_rx_buff *rx_buff;
1411 union sub_crq *next;
1412 u32 length;
1413 u16 offset;
1414 u8 flags = 0;
1415
1416 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
1417 break;
1418 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
1419 rx_buff =
1420 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
1421 rx_comp.correlator);
1422 /* do error checking */
1423 if (next->rx_comp.rc) {
1424 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
1425 /* free the entry */
1426 next->rx_comp.first = 0;
1427 remove_buff_from_pool(adapter, rx_buff);
Nathan Fontenotca05e312017-05-03 14:05:14 -04001428 continue;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001429 }
1430
1431 length = be32_to_cpu(next->rx_comp.len);
1432 offset = be16_to_cpu(next->rx_comp.off_frame_data);
1433 flags = next->rx_comp.flags;
1434 skb = rx_buff->skb;
1435 skb_copy_to_linear_data(skb, rx_buff->data + offset,
1436 length);
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04001437
1438 /* VLAN Header has been stripped by the system firmware and
1439 * needs to be inserted by the driver
1440 */
1441 if (adapter->rx_vlan_header_insertion &&
1442 (flags & IBMVNIC_VLAN_STRIPPED))
1443 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1444 ntohs(next->rx_comp.vlan_tci));
1445
Thomas Falcon032c5e82015-12-21 11:26:06 -06001446 /* free the entry */
1447 next->rx_comp.first = 0;
1448 remove_buff_from_pool(adapter, rx_buff);
1449
1450 skb_put(skb, length);
1451 skb->protocol = eth_type_trans(skb, netdev);
Thomas Falcon94ca3052017-05-03 14:05:20 -04001452 skb_record_rx_queue(skb, scrq_num);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001453
1454 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
1455 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
1456 skb->ip_summed = CHECKSUM_UNNECESSARY;
1457 }
1458
1459 length = skb->len;
1460 napi_gro_receive(napi, skb); /* send it up */
1461 netdev->stats.rx_packets++;
1462 netdev->stats.rx_bytes += length;
1463 frames_processed++;
1464 }
John Allen498cd8e2016-04-06 11:49:55 -05001465 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001466
1467 if (frames_processed < budget) {
1468 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
Eric Dumazet6ad20162017-01-30 08:22:01 -08001469 napi_complete_done(napi, frames_processed);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001470 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1471 napi_reschedule(napi)) {
1472 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1473 goto restart_poll;
1474 }
1475 }
1476 return frames_processed;
1477}
1478
1479#ifdef CONFIG_NET_POLL_CONTROLLER
1480static void ibmvnic_netpoll_controller(struct net_device *dev)
1481{
1482 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1483 int i;
1484
1485 replenish_pools(netdev_priv(dev));
1486 for (i = 0; i < adapter->req_rx_queues; i++)
1487 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1488 adapter->rx_scrq[i]);
1489}
1490#endif
1491
1492static const struct net_device_ops ibmvnic_netdev_ops = {
1493 .ndo_open = ibmvnic_open,
1494 .ndo_stop = ibmvnic_close,
1495 .ndo_start_xmit = ibmvnic_xmit,
1496 .ndo_set_rx_mode = ibmvnic_set_multi,
1497 .ndo_set_mac_address = ibmvnic_set_mac,
1498 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06001499 .ndo_tx_timeout = ibmvnic_tx_timeout,
1500#ifdef CONFIG_NET_POLL_CONTROLLER
1501 .ndo_poll_controller = ibmvnic_netpoll_controller,
1502#endif
1503};
1504
1505/* ethtool functions */
1506
Philippe Reynes8a433792017-01-07 22:37:29 +01001507static int ibmvnic_get_link_ksettings(struct net_device *netdev,
1508 struct ethtool_link_ksettings *cmd)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001509{
Philippe Reynes8a433792017-01-07 22:37:29 +01001510 u32 supported, advertising;
1511
1512 supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06001513 SUPPORTED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01001514 advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
Thomas Falcon032c5e82015-12-21 11:26:06 -06001515 ADVERTISED_FIBRE);
Philippe Reynes8a433792017-01-07 22:37:29 +01001516 cmd->base.speed = SPEED_1000;
1517 cmd->base.duplex = DUPLEX_FULL;
1518 cmd->base.port = PORT_FIBRE;
1519 cmd->base.phy_address = 0;
1520 cmd->base.autoneg = AUTONEG_ENABLE;
1521
1522 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
1523 supported);
1524 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
1525 advertising);
1526
Thomas Falcon032c5e82015-12-21 11:26:06 -06001527 return 0;
1528}
1529
1530static void ibmvnic_get_drvinfo(struct net_device *dev,
1531 struct ethtool_drvinfo *info)
1532{
1533 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1534 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1535}
1536
1537static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1538{
1539 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1540
1541 return adapter->msg_enable;
1542}
1543
1544static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1545{
1546 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1547
1548 adapter->msg_enable = data;
1549}
1550
1551static u32 ibmvnic_get_link(struct net_device *netdev)
1552{
1553 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1554
1555 /* Don't need to send a query because we request a logical link up at
1556 * init and then we wait for link state indications
1557 */
1558 return adapter->logical_link_state;
1559}
1560
1561static void ibmvnic_get_ringparam(struct net_device *netdev,
1562 struct ethtool_ringparam *ring)
1563{
1564 ring->rx_max_pending = 0;
1565 ring->tx_max_pending = 0;
1566 ring->rx_mini_max_pending = 0;
1567 ring->rx_jumbo_max_pending = 0;
1568 ring->rx_pending = 0;
1569 ring->tx_pending = 0;
1570 ring->rx_mini_pending = 0;
1571 ring->rx_jumbo_pending = 0;
1572}
1573
1574static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1575{
1576 int i;
1577
1578 if (stringset != ETH_SS_STATS)
1579 return;
1580
1581 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1582 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1583}
1584
1585static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1586{
1587 switch (sset) {
1588 case ETH_SS_STATS:
1589 return ARRAY_SIZE(ibmvnic_stats);
1590 default:
1591 return -EOPNOTSUPP;
1592 }
1593}
1594
1595static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1596 struct ethtool_stats *stats, u64 *data)
1597{
1598 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1599 union ibmvnic_crq crq;
1600 int i;
1601
1602 memset(&crq, 0, sizeof(crq));
1603 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1604 crq.request_statistics.cmd = REQUEST_STATISTICS;
1605 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1606 crq.request_statistics.len =
1607 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06001608
1609 /* Wait for data to be written */
1610 init_completion(&adapter->stats_done);
Nathan Fontenotdb5d0b52017-02-10 13:45:05 -05001611 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001612 wait_for_completion(&adapter->stats_done);
1613
1614 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1615 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1616}
1617
1618static const struct ethtool_ops ibmvnic_ethtool_ops = {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001619 .get_drvinfo = ibmvnic_get_drvinfo,
1620 .get_msglevel = ibmvnic_get_msglevel,
1621 .set_msglevel = ibmvnic_set_msglevel,
1622 .get_link = ibmvnic_get_link,
1623 .get_ringparam = ibmvnic_get_ringparam,
1624 .get_strings = ibmvnic_get_strings,
1625 .get_sset_count = ibmvnic_get_sset_count,
1626 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
Philippe Reynes8a433792017-01-07 22:37:29 +01001627 .get_link_ksettings = ibmvnic_get_link_ksettings,
Thomas Falcon032c5e82015-12-21 11:26:06 -06001628};
1629
1630/* Routines for managing CRQs/sCRQs */
1631
1632static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1633 struct ibmvnic_sub_crq_queue *scrq)
1634{
1635 struct device *dev = &adapter->vdev->dev;
1636 long rc;
1637
1638 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1639
1640 /* Close the sub-crqs */
1641 do {
1642 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1643 adapter->vdev->unit_address,
1644 scrq->crq_num);
1645 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1646
Thomas Falconffa73852017-04-19 13:44:29 -04001647 if (rc) {
1648 netdev_err(adapter->netdev,
1649 "Failed to release sub-CRQ %16lx, rc = %ld\n",
1650 scrq->crq_num, rc);
1651 }
1652
Thomas Falcon032c5e82015-12-21 11:26:06 -06001653 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1654 DMA_BIDIRECTIONAL);
1655 free_pages((unsigned long)scrq->msgs, 2);
1656 kfree(scrq);
1657}
1658
1659static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1660 *adapter)
1661{
1662 struct device *dev = &adapter->vdev->dev;
1663 struct ibmvnic_sub_crq_queue *scrq;
1664 int rc;
1665
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04001666 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001667 if (!scrq)
1668 return NULL;
1669
Nathan Fontenot7f7adc52017-04-19 13:45:16 -04001670 scrq->msgs =
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04001671 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001672 if (!scrq->msgs) {
1673 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1674 goto zero_page_failed;
1675 }
1676
1677 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1678 DMA_BIDIRECTIONAL);
1679 if (dma_mapping_error(dev, scrq->msg_token)) {
1680 dev_warn(dev, "Couldn't map crq queue messages page\n");
1681 goto map_failed;
1682 }
1683
1684 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1685 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1686
1687 if (rc == H_RESOURCE)
1688 rc = ibmvnic_reset_crq(adapter);
1689
1690 if (rc == H_CLOSED) {
1691 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1692 } else if (rc) {
1693 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1694 goto reg_failed;
1695 }
1696
Thomas Falcon032c5e82015-12-21 11:26:06 -06001697 scrq->adapter = adapter;
1698 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001699 spin_lock_init(&scrq->lock);
1700
1701 netdev_dbg(adapter->netdev,
1702 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1703 scrq->crq_num, scrq->hw_irq, scrq->irq);
1704
1705 return scrq;
1706
Thomas Falcon032c5e82015-12-21 11:26:06 -06001707reg_failed:
1708 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1709 DMA_BIDIRECTIONAL);
1710map_failed:
1711 free_pages((unsigned long)scrq->msgs, 2);
1712zero_page_failed:
1713 kfree(scrq);
1714
1715 return NULL;
1716}
1717
1718static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1719{
1720 int i;
1721
1722 if (adapter->tx_scrq) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04001723 for (i = 0; i < adapter->req_tx_queues; i++) {
1724 if (!adapter->tx_scrq[i])
1725 continue;
1726
1727 if (adapter->tx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001728 free_irq(adapter->tx_scrq[i]->irq,
1729 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001730 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04001731 adapter->tx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001732 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04001733
1734 release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
1735 }
1736
Nathan Fontenot9501df32017-03-15 23:38:07 -04001737 kfree(adapter->tx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001738 adapter->tx_scrq = NULL;
1739 }
1740
1741 if (adapter->rx_scrq) {
Nathan Fontenotb5108882017-03-30 02:49:18 -04001742 for (i = 0; i < adapter->req_rx_queues; i++) {
1743 if (!adapter->rx_scrq[i])
1744 continue;
1745
1746 if (adapter->rx_scrq[i]->irq) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001747 free_irq(adapter->rx_scrq[i]->irq,
1748 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001749 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Nathan Fontenotb5108882017-03-30 02:49:18 -04001750 adapter->rx_scrq[i]->irq = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001751 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04001752
1753 release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
1754 }
1755
Nathan Fontenot9501df32017-03-15 23:38:07 -04001756 kfree(adapter->rx_scrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001757 adapter->rx_scrq = NULL;
1758 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001759}
1760
1761static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1762 struct ibmvnic_sub_crq_queue *scrq)
1763{
1764 struct device *dev = &adapter->vdev->dev;
1765 unsigned long rc;
1766
1767 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1768 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1769 if (rc)
1770 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1771 scrq->hw_irq, rc);
1772 return rc;
1773}
1774
1775static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1776 struct ibmvnic_sub_crq_queue *scrq)
1777{
1778 struct device *dev = &adapter->vdev->dev;
1779 unsigned long rc;
1780
1781 if (scrq->hw_irq > 0x100000000ULL) {
1782 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1783 return 1;
1784 }
1785
1786 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1787 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1788 if (rc)
1789 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1790 scrq->hw_irq, rc);
1791 return rc;
1792}
1793
1794static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1795 struct ibmvnic_sub_crq_queue *scrq)
1796{
1797 struct device *dev = &adapter->vdev->dev;
1798 struct ibmvnic_tx_buff *txbuff;
1799 union sub_crq *next;
1800 int index;
1801 int i, j;
Thomas Falconad7775d2016-04-01 17:20:34 -05001802 u8 first;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001803
1804restart_loop:
1805 while (pending_scrq(adapter, scrq)) {
1806 unsigned int pool = scrq->pool_index;
1807
1808 next = ibmvnic_next_scrq(adapter, scrq);
1809 for (i = 0; i < next->tx_comp.num_comps; i++) {
1810 if (next->tx_comp.rcs[i]) {
1811 dev_err(dev, "tx error %x\n",
1812 next->tx_comp.rcs[i]);
1813 continue;
1814 }
1815 index = be32_to_cpu(next->tx_comp.correlators[i]);
1816 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1817
1818 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1819 if (!txbuff->data_dma[j])
1820 continue;
1821
1822 txbuff->data_dma[j] = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001823 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001824 /* if sub_crq was sent indirectly */
1825 first = txbuff->indir_arr[0].generic.first;
1826 if (first == IBMVNIC_CRQ_CMD) {
1827 dma_unmap_single(dev, txbuff->indir_dma,
1828 sizeof(txbuff->indir_arr),
1829 DMA_TO_DEVICE);
1830 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001831
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001832 if (txbuff->last_frag) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06001833 dev_kfree_skb_any(txbuff->skb);
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04001834 txbuff->skb = NULL;
Thomas Falcon142c0ac2017-03-05 12:18:41 -06001835 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001836
1837 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1838 producer_index] = index;
1839 adapter->tx_pool[pool].producer_index =
1840 (adapter->tx_pool[pool].producer_index + 1) %
Thomas Falcon068d9f92017-03-05 12:18:42 -06001841 adapter->req_tx_entries_per_subcrq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001842 }
1843 /* remove tx_comp scrq*/
1844 next->tx_comp.first = 0;
Nathan Fontenot7c3e7de2017-05-03 14:05:25 -04001845
1846 if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <=
1847 (adapter->req_tx_entries_per_subcrq / 2) &&
1848 __netif_subqueue_stopped(adapter->netdev,
1849 scrq->pool_index)) {
1850 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
1851 netdev_info(adapter->netdev, "Started queue %d\n",
1852 scrq->pool_index);
1853 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001854 }
1855
1856 enable_scrq_irq(adapter, scrq);
1857
1858 if (pending_scrq(adapter, scrq)) {
1859 disable_scrq_irq(adapter, scrq);
1860 goto restart_loop;
1861 }
1862
1863 return 0;
1864}
1865
1866static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1867{
1868 struct ibmvnic_sub_crq_queue *scrq = instance;
1869 struct ibmvnic_adapter *adapter = scrq->adapter;
1870
1871 disable_scrq_irq(adapter, scrq);
1872 ibmvnic_complete_tx(adapter, scrq);
1873
1874 return IRQ_HANDLED;
1875}
1876
1877static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1878{
1879 struct ibmvnic_sub_crq_queue *scrq = instance;
1880 struct ibmvnic_adapter *adapter = scrq->adapter;
1881
1882 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1883 disable_scrq_irq(adapter, scrq);
1884 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1885 }
1886
1887 return IRQ_HANDLED;
1888}
1889
Thomas Falconea22d512016-07-06 15:35:17 -05001890static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1891{
1892 struct device *dev = &adapter->vdev->dev;
1893 struct ibmvnic_sub_crq_queue *scrq;
1894 int i = 0, j = 0;
1895 int rc = 0;
1896
1897 for (i = 0; i < adapter->req_tx_queues; i++) {
1898 scrq = adapter->tx_scrq[i];
1899 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1900
Michael Ellerman99c17902016-09-10 19:59:05 +10001901 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001902 rc = -EINVAL;
1903 dev_err(dev, "Error mapping irq\n");
1904 goto req_tx_irq_failed;
1905 }
1906
1907 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1908 0, "ibmvnic_tx", scrq);
1909
1910 if (rc) {
1911 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1912 scrq->irq, rc);
1913 irq_dispose_mapping(scrq->irq);
1914 goto req_rx_irq_failed;
1915 }
1916 }
1917
1918 for (i = 0; i < adapter->req_rx_queues; i++) {
1919 scrq = adapter->rx_scrq[i];
1920 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10001921 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001922 rc = -EINVAL;
1923 dev_err(dev, "Error mapping irq\n");
1924 goto req_rx_irq_failed;
1925 }
1926 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1927 0, "ibmvnic_rx", scrq);
1928 if (rc) {
1929 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1930 scrq->irq, rc);
1931 irq_dispose_mapping(scrq->irq);
1932 goto req_rx_irq_failed;
1933 }
1934 }
1935 return rc;
1936
1937req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001938 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001939 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1940 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001941 }
Thomas Falconea22d512016-07-06 15:35:17 -05001942 i = adapter->req_tx_queues;
1943req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001944 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001945 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1946 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001947 }
Nathan Fontenotb5108882017-03-30 02:49:18 -04001948 release_sub_crqs(adapter);
Thomas Falconea22d512016-07-06 15:35:17 -05001949 return rc;
1950}
1951
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04001952static int init_sub_crqs(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06001953{
1954 struct device *dev = &adapter->vdev->dev;
1955 struct ibmvnic_sub_crq_queue **allqueues;
1956 int registered_queues = 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001957 int total_queues;
1958 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05001959 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001960
Thomas Falcon032c5e82015-12-21 11:26:06 -06001961 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1962
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04001963 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001964 if (!allqueues)
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04001965 return -1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001966
1967 for (i = 0; i < total_queues; i++) {
1968 allqueues[i] = init_sub_crq_queue(adapter);
1969 if (!allqueues[i]) {
1970 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1971 break;
1972 }
1973 registered_queues++;
1974 }
1975
1976 /* Make sure we were able to register the minimum number of queues */
1977 if (registered_queues <
1978 adapter->min_tx_queues + adapter->min_rx_queues) {
1979 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1980 goto tx_failed;
1981 }
1982
1983 /* Distribute the failed allocated queues*/
1984 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1985 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1986 switch (i % 3) {
1987 case 0:
1988 if (adapter->req_rx_queues > adapter->min_rx_queues)
1989 adapter->req_rx_queues--;
1990 else
1991 more++;
1992 break;
1993 case 1:
1994 if (adapter->req_tx_queues > adapter->min_tx_queues)
1995 adapter->req_tx_queues--;
1996 else
1997 more++;
1998 break;
1999 }
2000 }
2001
2002 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002003 sizeof(*adapter->tx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002004 if (!adapter->tx_scrq)
2005 goto tx_failed;
2006
2007 for (i = 0; i < adapter->req_tx_queues; i++) {
2008 adapter->tx_scrq[i] = allqueues[i];
2009 adapter->tx_scrq[i]->pool_index = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002010 }
2011
2012 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04002013 sizeof(*adapter->rx_scrq), GFP_KERNEL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002014 if (!adapter->rx_scrq)
2015 goto rx_failed;
2016
2017 for (i = 0; i < adapter->req_rx_queues; i++) {
2018 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
2019 adapter->rx_scrq[i]->scrq_num = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002020 }
2021
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002022 kfree(allqueues);
2023 return 0;
2024
2025rx_failed:
2026 kfree(adapter->tx_scrq);
2027 adapter->tx_scrq = NULL;
2028tx_failed:
2029 for (i = 0; i < registered_queues; i++)
2030 release_sub_crq_queue(adapter, allqueues[i]);
2031 kfree(allqueues);
2032 return -1;
2033}
2034
2035static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
2036{
2037 struct device *dev = &adapter->vdev->dev;
2038 union ibmvnic_crq crq;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002039
2040 if (!retry) {
2041 /* Sub-CRQ entries are 32 byte long */
2042 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
2043
2044 if (adapter->min_tx_entries_per_subcrq > entries_page ||
2045 adapter->min_rx_add_entries_per_subcrq > entries_page) {
2046 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
2047 return;
2048 }
2049
2050 /* Get the minimum between the queried max and the entries
2051 * that fit in our PAGE_SIZE
2052 */
2053 adapter->req_tx_entries_per_subcrq =
2054 adapter->max_tx_entries_per_subcrq > entries_page ?
2055 entries_page : adapter->max_tx_entries_per_subcrq;
2056 adapter->req_rx_add_entries_per_subcrq =
2057 adapter->max_rx_add_entries_per_subcrq > entries_page ?
2058 entries_page : adapter->max_rx_add_entries_per_subcrq;
2059
2060 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
2061 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
2062 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
2063
2064 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
2065 }
2066
Thomas Falcon032c5e82015-12-21 11:26:06 -06002067 memset(&crq, 0, sizeof(crq));
2068 crq.request_capability.first = IBMVNIC_CRQ_CMD;
2069 crq.request_capability.cmd = REQUEST_CAPABILITY;
2070
2071 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06002072 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06002073 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002074 ibmvnic_send_crq(adapter, &crq);
2075
2076 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06002077 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06002078 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002079 ibmvnic_send_crq(adapter, &crq);
2080
2081 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06002082 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon901e0402017-02-15 12:17:59 -06002083 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002084 ibmvnic_send_crq(adapter, &crq);
2085
2086 crq.request_capability.capability =
2087 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
2088 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06002089 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06002090 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002091 ibmvnic_send_crq(adapter, &crq);
2092
2093 crq.request_capability.capability =
2094 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
2095 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06002096 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon901e0402017-02-15 12:17:59 -06002097 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002098 ibmvnic_send_crq(adapter, &crq);
2099
2100 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06002101 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon901e0402017-02-15 12:17:59 -06002102 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002103 ibmvnic_send_crq(adapter, &crq);
2104
2105 if (adapter->netdev->flags & IFF_PROMISC) {
2106 if (adapter->promisc_supported) {
2107 crq.request_capability.capability =
2108 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06002109 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon901e0402017-02-15 12:17:59 -06002110 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002111 ibmvnic_send_crq(adapter, &crq);
2112 }
2113 } else {
2114 crq.request_capability.capability =
2115 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06002116 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon901e0402017-02-15 12:17:59 -06002117 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002118 ibmvnic_send_crq(adapter, &crq);
2119 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06002120}
2121
2122static int pending_scrq(struct ibmvnic_adapter *adapter,
2123 struct ibmvnic_sub_crq_queue *scrq)
2124{
2125 union sub_crq *entry = &scrq->msgs[scrq->cur];
2126
Nathan Fontenot90c80142017-05-03 14:04:32 -04002127 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP ||
2128 adapter->state == VNIC_CLOSING)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002129 return 1;
2130 else
2131 return 0;
2132}
2133
2134static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
2135 struct ibmvnic_sub_crq_queue *scrq)
2136{
2137 union sub_crq *entry;
2138 unsigned long flags;
2139
2140 spin_lock_irqsave(&scrq->lock, flags);
2141 entry = &scrq->msgs[scrq->cur];
2142 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2143 if (++scrq->cur == scrq->size)
2144 scrq->cur = 0;
2145 } else {
2146 entry = NULL;
2147 }
2148 spin_unlock_irqrestore(&scrq->lock, flags);
2149
2150 return entry;
2151}
2152
2153static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
2154{
2155 struct ibmvnic_crq_queue *queue = &adapter->crq;
2156 union ibmvnic_crq *crq;
2157
2158 crq = &queue->msgs[queue->cur];
2159 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
2160 if (++queue->cur == queue->size)
2161 queue->cur = 0;
2162 } else {
2163 crq = NULL;
2164 }
2165
2166 return crq;
2167}
2168
2169static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
2170 union sub_crq *sub_crq)
2171{
2172 unsigned int ua = adapter->vdev->unit_address;
2173 struct device *dev = &adapter->vdev->dev;
2174 u64 *u64_crq = (u64 *)sub_crq;
2175 int rc;
2176
2177 netdev_dbg(adapter->netdev,
2178 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
2179 (unsigned long int)cpu_to_be64(remote_handle),
2180 (unsigned long int)cpu_to_be64(u64_crq[0]),
2181 (unsigned long int)cpu_to_be64(u64_crq[1]),
2182 (unsigned long int)cpu_to_be64(u64_crq[2]),
2183 (unsigned long int)cpu_to_be64(u64_crq[3]));
2184
2185 /* Make sure the hypervisor sees the complete request */
2186 mb();
2187
2188 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
2189 cpu_to_be64(remote_handle),
2190 cpu_to_be64(u64_crq[0]),
2191 cpu_to_be64(u64_crq[1]),
2192 cpu_to_be64(u64_crq[2]),
2193 cpu_to_be64(u64_crq[3]));
2194
2195 if (rc) {
2196 if (rc == H_CLOSED)
2197 dev_warn(dev, "CRQ Queue closed\n");
2198 dev_err(dev, "Send error (rc=%d)\n", rc);
2199 }
2200
2201 return rc;
2202}
2203
Thomas Falconad7775d2016-04-01 17:20:34 -05002204static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
2205 u64 remote_handle, u64 ioba, u64 num_entries)
2206{
2207 unsigned int ua = adapter->vdev->unit_address;
2208 struct device *dev = &adapter->vdev->dev;
2209 int rc;
2210
2211 /* Make sure the hypervisor sees the complete request */
2212 mb();
2213 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
2214 cpu_to_be64(remote_handle),
2215 ioba, num_entries);
2216
2217 if (rc) {
2218 if (rc == H_CLOSED)
2219 dev_warn(dev, "CRQ Queue closed\n");
2220 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
2221 }
2222
2223 return rc;
2224}
2225
Thomas Falcon032c5e82015-12-21 11:26:06 -06002226static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
2227 union ibmvnic_crq *crq)
2228{
2229 unsigned int ua = adapter->vdev->unit_address;
2230 struct device *dev = &adapter->vdev->dev;
2231 u64 *u64_crq = (u64 *)crq;
2232 int rc;
2233
2234 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
2235 (unsigned long int)cpu_to_be64(u64_crq[0]),
2236 (unsigned long int)cpu_to_be64(u64_crq[1]));
2237
2238 /* Make sure the hypervisor sees the complete request */
2239 mb();
2240
2241 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
2242 cpu_to_be64(u64_crq[0]),
2243 cpu_to_be64(u64_crq[1]));
2244
2245 if (rc) {
2246 if (rc == H_CLOSED)
2247 dev_warn(dev, "CRQ Queue closed\n");
2248 dev_warn(dev, "Send error (rc=%d)\n", rc);
2249 }
2250
2251 return rc;
2252}
2253
2254static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
2255{
2256 union ibmvnic_crq crq;
2257
2258 memset(&crq, 0, sizeof(crq));
2259 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
2260 crq.generic.cmd = IBMVNIC_CRQ_INIT;
2261 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
2262
2263 return ibmvnic_send_crq(adapter, &crq);
2264}
2265
Thomas Falcon032c5e82015-12-21 11:26:06 -06002266static int send_version_xchg(struct ibmvnic_adapter *adapter)
2267{
2268 union ibmvnic_crq crq;
2269
2270 memset(&crq, 0, sizeof(crq));
2271 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
2272 crq.version_exchange.cmd = VERSION_EXCHANGE;
2273 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
2274
2275 return ibmvnic_send_crq(adapter, &crq);
2276}
2277
2278static void send_login(struct ibmvnic_adapter *adapter)
2279{
2280 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
2281 struct ibmvnic_login_buffer *login_buffer;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002282 struct device *dev = &adapter->vdev->dev;
2283 dma_addr_t rsp_buffer_token;
2284 dma_addr_t buffer_token;
2285 size_t rsp_buffer_size;
2286 union ibmvnic_crq crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002287 size_t buffer_size;
2288 __be64 *tx_list_p;
2289 __be64 *rx_list_p;
2290 int i;
2291
2292 buffer_size =
2293 sizeof(struct ibmvnic_login_buffer) +
2294 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
2295
2296 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
2297 if (!login_buffer)
2298 goto buf_alloc_failed;
2299
2300 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
2301 DMA_TO_DEVICE);
2302 if (dma_mapping_error(dev, buffer_token)) {
2303 dev_err(dev, "Couldn't map login buffer\n");
2304 goto buf_map_failed;
2305 }
2306
John Allen498cd8e2016-04-06 11:49:55 -05002307 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
2308 sizeof(u64) * adapter->req_tx_queues +
2309 sizeof(u64) * adapter->req_rx_queues +
2310 sizeof(u64) * adapter->req_rx_queues +
2311 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002312
2313 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
2314 if (!login_rsp_buffer)
2315 goto buf_rsp_alloc_failed;
2316
2317 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
2318 rsp_buffer_size, DMA_FROM_DEVICE);
2319 if (dma_mapping_error(dev, rsp_buffer_token)) {
2320 dev_err(dev, "Couldn't map login rsp buffer\n");
2321 goto buf_rsp_map_failed;
2322 }
Nathan Fontenot661a2622017-04-19 13:44:58 -04002323
Thomas Falcon032c5e82015-12-21 11:26:06 -06002324 adapter->login_buf = login_buffer;
2325 adapter->login_buf_token = buffer_token;
2326 adapter->login_buf_sz = buffer_size;
2327 adapter->login_rsp_buf = login_rsp_buffer;
2328 adapter->login_rsp_buf_token = rsp_buffer_token;
2329 adapter->login_rsp_buf_sz = rsp_buffer_size;
2330
2331 login_buffer->len = cpu_to_be32(buffer_size);
2332 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
2333 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
2334 login_buffer->off_txcomp_subcrqs =
2335 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
2336 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
2337 login_buffer->off_rxcomp_subcrqs =
2338 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
2339 sizeof(u64) * adapter->req_tx_queues);
2340 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
2341 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
2342
2343 tx_list_p = (__be64 *)((char *)login_buffer +
2344 sizeof(struct ibmvnic_login_buffer));
2345 rx_list_p = (__be64 *)((char *)login_buffer +
2346 sizeof(struct ibmvnic_login_buffer) +
2347 sizeof(u64) * adapter->req_tx_queues);
2348
2349 for (i = 0; i < adapter->req_tx_queues; i++) {
2350 if (adapter->tx_scrq[i]) {
2351 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
2352 crq_num);
2353 }
2354 }
2355
2356 for (i = 0; i < adapter->req_rx_queues; i++) {
2357 if (adapter->rx_scrq[i]) {
2358 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
2359 crq_num);
2360 }
2361 }
2362
2363 netdev_dbg(adapter->netdev, "Login Buffer:\n");
2364 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
2365 netdev_dbg(adapter->netdev, "%016lx\n",
2366 ((unsigned long int *)(adapter->login_buf))[i]);
2367 }
2368
2369 memset(&crq, 0, sizeof(crq));
2370 crq.login.first = IBMVNIC_CRQ_CMD;
2371 crq.login.cmd = LOGIN;
2372 crq.login.ioba = cpu_to_be32(buffer_token);
2373 crq.login.len = cpu_to_be32(buffer_size);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002374 ibmvnic_send_crq(adapter, &crq);
2375
2376 return;
2377
Thomas Falcon032c5e82015-12-21 11:26:06 -06002378buf_rsp_map_failed:
2379 kfree(login_rsp_buffer);
2380buf_rsp_alloc_failed:
2381 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
2382buf_map_failed:
2383 kfree(login_buffer);
2384buf_alloc_failed:
2385 return;
2386}
2387
2388static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
2389 u32 len, u8 map_id)
2390{
2391 union ibmvnic_crq crq;
2392
2393 memset(&crq, 0, sizeof(crq));
2394 crq.request_map.first = IBMVNIC_CRQ_CMD;
2395 crq.request_map.cmd = REQUEST_MAP;
2396 crq.request_map.map_id = map_id;
2397 crq.request_map.ioba = cpu_to_be32(addr);
2398 crq.request_map.len = cpu_to_be32(len);
2399 ibmvnic_send_crq(adapter, &crq);
2400}
2401
2402static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
2403{
2404 union ibmvnic_crq crq;
2405
2406 memset(&crq, 0, sizeof(crq));
2407 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
2408 crq.request_unmap.cmd = REQUEST_UNMAP;
2409 crq.request_unmap.map_id = map_id;
2410 ibmvnic_send_crq(adapter, &crq);
2411}
2412
2413static void send_map_query(struct ibmvnic_adapter *adapter)
2414{
2415 union ibmvnic_crq crq;
2416
2417 memset(&crq, 0, sizeof(crq));
2418 crq.query_map.first = IBMVNIC_CRQ_CMD;
2419 crq.query_map.cmd = QUERY_MAP;
2420 ibmvnic_send_crq(adapter, &crq);
2421}
2422
2423/* Send a series of CRQs requesting various capabilities of the VNIC server */
2424static void send_cap_queries(struct ibmvnic_adapter *adapter)
2425{
2426 union ibmvnic_crq crq;
2427
Thomas Falcon901e0402017-02-15 12:17:59 -06002428 atomic_set(&adapter->running_cap_crqs, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002429 memset(&crq, 0, sizeof(crq));
2430 crq.query_capability.first = IBMVNIC_CRQ_CMD;
2431 crq.query_capability.cmd = QUERY_CAPABILITY;
2432
2433 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002434 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002435 ibmvnic_send_crq(adapter, &crq);
2436
2437 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002438 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002439 ibmvnic_send_crq(adapter, &crq);
2440
2441 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002442 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002443 ibmvnic_send_crq(adapter, &crq);
2444
2445 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002446 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002447 ibmvnic_send_crq(adapter, &crq);
2448
2449 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002450 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002451 ibmvnic_send_crq(adapter, &crq);
2452
2453 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002454 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002455 ibmvnic_send_crq(adapter, &crq);
2456
2457 crq.query_capability.capability =
2458 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002459 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002460 ibmvnic_send_crq(adapter, &crq);
2461
2462 crq.query_capability.capability =
2463 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002464 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002465 ibmvnic_send_crq(adapter, &crq);
2466
2467 crq.query_capability.capability =
2468 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002469 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002470 ibmvnic_send_crq(adapter, &crq);
2471
2472 crq.query_capability.capability =
2473 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002474 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002475 ibmvnic_send_crq(adapter, &crq);
2476
2477 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
Thomas Falcon901e0402017-02-15 12:17:59 -06002478 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002479 ibmvnic_send_crq(adapter, &crq);
2480
2481 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06002482 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002483 ibmvnic_send_crq(adapter, &crq);
2484
2485 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06002486 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002487 ibmvnic_send_crq(adapter, &crq);
2488
2489 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
Thomas Falcon901e0402017-02-15 12:17:59 -06002490 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002491 ibmvnic_send_crq(adapter, &crq);
2492
2493 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
Thomas Falcon901e0402017-02-15 12:17:59 -06002494 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002495 ibmvnic_send_crq(adapter, &crq);
2496
2497 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
Thomas Falcon901e0402017-02-15 12:17:59 -06002498 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002499 ibmvnic_send_crq(adapter, &crq);
2500
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04002501 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
2502 atomic_inc(&adapter->running_cap_crqs);
2503 ibmvnic_send_crq(adapter, &crq);
2504
Thomas Falcon032c5e82015-12-21 11:26:06 -06002505 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002506 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002507 ibmvnic_send_crq(adapter, &crq);
2508
2509 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
Thomas Falcon901e0402017-02-15 12:17:59 -06002510 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002511 ibmvnic_send_crq(adapter, &crq);
2512
2513 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002514 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002515 ibmvnic_send_crq(adapter, &crq);
2516
2517 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
Thomas Falcon901e0402017-02-15 12:17:59 -06002518 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002519 ibmvnic_send_crq(adapter, &crq);
2520
2521 crq.query_capability.capability =
2522 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
Thomas Falcon901e0402017-02-15 12:17:59 -06002523 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002524 ibmvnic_send_crq(adapter, &crq);
2525
2526 crq.query_capability.capability =
2527 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002528 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002529 ibmvnic_send_crq(adapter, &crq);
2530
2531 crq.query_capability.capability =
2532 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002533 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002534 ibmvnic_send_crq(adapter, &crq);
2535
2536 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
Thomas Falcon901e0402017-02-15 12:17:59 -06002537 atomic_inc(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002538 ibmvnic_send_crq(adapter, &crq);
2539}
2540
2541static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2542{
2543 struct device *dev = &adapter->vdev->dev;
2544 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2545 union ibmvnic_crq crq;
2546 int i;
2547
2548 dma_unmap_single(dev, adapter->ip_offload_tok,
2549 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2550
2551 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2552 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2553 netdev_dbg(adapter->netdev, "%016lx\n",
2554 ((unsigned long int *)(buf))[i]);
2555
2556 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2557 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2558 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2559 buf->tcp_ipv4_chksum);
2560 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2561 buf->tcp_ipv6_chksum);
2562 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2563 buf->udp_ipv4_chksum);
2564 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2565 buf->udp_ipv6_chksum);
2566 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2567 buf->large_tx_ipv4);
2568 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2569 buf->large_tx_ipv6);
2570 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2571 buf->large_rx_ipv4);
2572 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2573 buf->large_rx_ipv6);
2574 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2575 buf->max_ipv4_header_size);
2576 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2577 buf->max_ipv6_header_size);
2578 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2579 buf->max_tcp_header_size);
2580 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2581 buf->max_udp_header_size);
2582 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2583 buf->max_large_tx_size);
2584 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2585 buf->max_large_rx_size);
2586 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2587 buf->ipv6_extension_header);
2588 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2589 buf->tcp_pseudosum_req);
2590 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2591 buf->num_ipv6_ext_headers);
2592 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2593 buf->off_ipv6_ext_headers);
2594
2595 adapter->ip_offload_ctrl_tok =
2596 dma_map_single(dev, &adapter->ip_offload_ctrl,
2597 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2598
2599 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2600 dev_err(dev, "Couldn't map ip offload control buffer\n");
2601 return;
2602 }
2603
2604 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2605 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2606 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2607 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2608 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2609
2610 /* large_tx/rx disabled for now, additional features needed */
2611 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2612 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2613 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2614 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2615
2616 adapter->netdev->features = NETIF_F_GSO;
2617
2618 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2619 adapter->netdev->features |= NETIF_F_IP_CSUM;
2620
2621 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2622 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2623
Thomas Falcon9be02cd2016-04-01 17:20:35 -05002624 if ((adapter->netdev->features &
2625 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2626 adapter->netdev->features |= NETIF_F_RXCSUM;
2627
Thomas Falcon032c5e82015-12-21 11:26:06 -06002628 memset(&crq, 0, sizeof(crq));
2629 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2630 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2631 crq.control_ip_offload.len =
2632 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2633 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2634 ibmvnic_send_crq(adapter, &crq);
2635}
2636
2637static void handle_error_info_rsp(union ibmvnic_crq *crq,
2638 struct ibmvnic_adapter *adapter)
2639{
2640 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08002641 struct ibmvnic_error_buff *error_buff, *tmp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002642 unsigned long flags;
2643 bool found = false;
2644 int i;
2645
2646 if (!crq->request_error_rsp.rc.code) {
2647 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2648 crq->request_error_rsp.rc.code);
2649 return;
2650 }
2651
2652 spin_lock_irqsave(&adapter->error_list_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08002653 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002654 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2655 found = true;
2656 list_del(&error_buff->list);
2657 break;
2658 }
2659 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2660
2661 if (!found) {
2662 dev_err(dev, "Couldn't find error id %x\n",
Thomas Falcon75224c92017-02-15 10:33:33 -06002663 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002664 return;
2665 }
2666
2667 dev_err(dev, "Detailed info for error id %x:",
Thomas Falcon75224c92017-02-15 10:33:33 -06002668 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002669
2670 for (i = 0; i < error_buff->len; i++) {
2671 pr_cont("%02x", (int)error_buff->buff[i]);
2672 if (i % 8 == 7)
2673 pr_cont(" ");
2674 }
2675 pr_cont("\n");
2676
2677 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2678 DMA_FROM_DEVICE);
2679 kfree(error_buff->buff);
2680 kfree(error_buff);
2681}
2682
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04002683static void request_error_information(struct ibmvnic_adapter *adapter,
2684 union ibmvnic_crq *err_crq)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002685{
Thomas Falcon032c5e82015-12-21 11:26:06 -06002686 struct device *dev = &adapter->vdev->dev;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04002687 struct net_device *netdev = adapter->netdev;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002688 struct ibmvnic_error_buff *error_buff;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04002689 unsigned long timeout = msecs_to_jiffies(30000);
2690 union ibmvnic_crq crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002691 unsigned long flags;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04002692 int rc, detail_len;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002693
2694 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2695 if (!error_buff)
2696 return;
2697
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04002698 detail_len = be32_to_cpu(err_crq->error_indication.detail_error_sz);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002699 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2700 if (!error_buff->buff) {
2701 kfree(error_buff);
2702 return;
2703 }
2704
2705 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2706 DMA_FROM_DEVICE);
2707 if (dma_mapping_error(dev, error_buff->dma)) {
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04002708 netdev_err(netdev, "Couldn't map error buffer\n");
Thomas Falcon032c5e82015-12-21 11:26:06 -06002709 kfree(error_buff->buff);
2710 kfree(error_buff);
2711 return;
2712 }
2713
Thomas Falcon032c5e82015-12-21 11:26:06 -06002714 error_buff->len = detail_len;
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04002715 error_buff->error_id = err_crq->error_indication.error_id;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002716
2717 spin_lock_irqsave(&adapter->error_list_lock, flags);
2718 list_add_tail(&error_buff->list, &adapter->errors);
2719 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2720
Nathan Fontenot2f9de9b2017-04-21 15:38:52 -04002721 memset(&crq, 0, sizeof(crq));
2722 crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2723 crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2724 crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2725 crq.request_error_info.len = cpu_to_be32(detail_len);
2726 crq.request_error_info.error_id = err_crq->error_indication.error_id;
2727
2728 rc = ibmvnic_send_crq(adapter, &crq);
2729 if (rc) {
2730 netdev_err(netdev, "failed to request error information\n");
2731 goto err_info_fail;
2732 }
2733
2734 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
2735 netdev_err(netdev, "timeout waiting for error information\n");
2736 goto err_info_fail;
2737 }
2738
2739 return;
2740
2741err_info_fail:
2742 spin_lock_irqsave(&adapter->error_list_lock, flags);
2743 list_del(&error_buff->list);
2744 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2745
2746 kfree(error_buff->buff);
2747 kfree(error_buff);
2748}
2749
2750static void handle_error_indication(union ibmvnic_crq *crq,
2751 struct ibmvnic_adapter *adapter)
2752{
2753 struct device *dev = &adapter->vdev->dev;
2754
2755 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2756 crq->error_indication.flags
2757 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2758 be32_to_cpu(crq->error_indication.error_id),
2759 be16_to_cpu(crq->error_indication.error_cause));
2760
2761 if (be32_to_cpu(crq->error_indication.error_id))
2762 request_error_information(adapter, crq);
Nathan Fontenoted651a12017-05-03 14:04:38 -04002763
2764 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
2765 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002766}
2767
2768static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2769 struct ibmvnic_adapter *adapter)
2770{
2771 struct net_device *netdev = adapter->netdev;
2772 struct device *dev = &adapter->vdev->dev;
2773 long rc;
2774
2775 rc = crq->change_mac_addr_rsp.rc.code;
2776 if (rc) {
2777 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2778 return;
2779 }
2780 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2781 ETH_ALEN);
2782}
2783
2784static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2785 struct ibmvnic_adapter *adapter)
2786{
2787 struct device *dev = &adapter->vdev->dev;
2788 u64 *req_value;
2789 char *name;
2790
Thomas Falcon901e0402017-02-15 12:17:59 -06002791 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002792 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2793 case REQ_TX_QUEUES:
2794 req_value = &adapter->req_tx_queues;
2795 name = "tx";
2796 break;
2797 case REQ_RX_QUEUES:
2798 req_value = &adapter->req_rx_queues;
2799 name = "rx";
2800 break;
2801 case REQ_RX_ADD_QUEUES:
2802 req_value = &adapter->req_rx_add_queues;
2803 name = "rx_add";
2804 break;
2805 case REQ_TX_ENTRIES_PER_SUBCRQ:
2806 req_value = &adapter->req_tx_entries_per_subcrq;
2807 name = "tx_entries_per_subcrq";
2808 break;
2809 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2810 req_value = &adapter->req_rx_add_entries_per_subcrq;
2811 name = "rx_add_entries_per_subcrq";
2812 break;
2813 case REQ_MTU:
2814 req_value = &adapter->req_mtu;
2815 name = "mtu";
2816 break;
2817 case PROMISC_REQUESTED:
2818 req_value = &adapter->promisc;
2819 name = "promisc";
2820 break;
2821 default:
2822 dev_err(dev, "Got invalid cap request rsp %d\n",
2823 crq->request_capability.capability);
2824 return;
2825 }
2826
2827 switch (crq->request_capability_rsp.rc.code) {
2828 case SUCCESS:
2829 break;
2830 case PARTIALSUCCESS:
2831 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2832 *req_value,
Thomas Falcon28f4d162017-02-15 10:32:11 -06002833 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06002834 number), name);
Nathan Fontenotb5108882017-03-30 02:49:18 -04002835 release_sub_crqs(adapter);
Thomas Falcon28f4d162017-02-15 10:32:11 -06002836 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04002837 ibmvnic_send_req_caps(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002838 return;
2839 default:
2840 dev_err(dev, "Error %d in request cap rsp\n",
2841 crq->request_capability_rsp.rc.code);
2842 return;
2843 }
2844
2845 /* Done receiving requested capabilities, query IP offload support */
Thomas Falcon901e0402017-02-15 12:17:59 -06002846 if (atomic_read(&adapter->running_cap_crqs) == 0) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06002847 union ibmvnic_crq newcrq;
2848 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2849 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2850 &adapter->ip_offload_buf;
2851
Thomas Falcon249168a2017-02-15 12:18:00 -06002852 adapter->wait_capability = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002853 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2854 buf_sz,
2855 DMA_FROM_DEVICE);
2856
2857 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2858 if (!firmware_has_feature(FW_FEATURE_CMO))
2859 dev_err(dev, "Couldn't map offload buffer\n");
2860 return;
2861 }
2862
2863 memset(&newcrq, 0, sizeof(newcrq));
2864 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2865 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2866 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2867 newcrq.query_ip_offload.ioba =
2868 cpu_to_be32(adapter->ip_offload_tok);
2869
2870 ibmvnic_send_crq(adapter, &newcrq);
2871 }
2872}
2873
2874static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2875 struct ibmvnic_adapter *adapter)
2876{
2877 struct device *dev = &adapter->vdev->dev;
2878 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2879 struct ibmvnic_login_buffer *login = adapter->login_buf;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002880 int i;
2881
2882 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2883 DMA_BIDIRECTIONAL);
2884 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2885 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2886
John Allen498cd8e2016-04-06 11:49:55 -05002887 /* If the number of queues requested can't be allocated by the
2888 * server, the login response will return with code 1. We will need
2889 * to resend the login buffer with fewer queues requested.
2890 */
2891 if (login_rsp_crq->generic.rc.code) {
2892 adapter->renegotiate = true;
2893 complete(&adapter->init_done);
2894 return 0;
2895 }
2896
Thomas Falcon032c5e82015-12-21 11:26:06 -06002897 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2898 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2899 netdev_dbg(adapter->netdev, "%016lx\n",
2900 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2901 }
2902
2903 /* Sanity checks */
2904 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2905 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2906 adapter->req_rx_add_queues !=
2907 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2908 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2909 ibmvnic_remove(adapter->vdev);
2910 return -EIO;
2911 }
2912 complete(&adapter->init_done);
2913
Thomas Falcon032c5e82015-12-21 11:26:06 -06002914 return 0;
2915}
2916
2917static void handle_request_map_rsp(union ibmvnic_crq *crq,
2918 struct ibmvnic_adapter *adapter)
2919{
2920 struct device *dev = &adapter->vdev->dev;
2921 u8 map_id = crq->request_map_rsp.map_id;
2922 int tx_subcrqs;
2923 int rx_subcrqs;
2924 long rc;
2925 int i;
2926
2927 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2928 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2929
2930 rc = crq->request_map_rsp.rc.code;
2931 if (rc) {
2932 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2933 adapter->map_id--;
2934 /* need to find and zero tx/rx_pool map_id */
2935 for (i = 0; i < tx_subcrqs; i++) {
2936 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2937 adapter->tx_pool[i].long_term_buff.map_id = 0;
2938 }
2939 for (i = 0; i < rx_subcrqs; i++) {
2940 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2941 adapter->rx_pool[i].long_term_buff.map_id = 0;
2942 }
2943 }
2944 complete(&adapter->fw_done);
2945}
2946
2947static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2948 struct ibmvnic_adapter *adapter)
2949{
2950 struct device *dev = &adapter->vdev->dev;
2951 long rc;
2952
2953 rc = crq->request_unmap_rsp.rc.code;
2954 if (rc)
2955 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2956}
2957
2958static void handle_query_map_rsp(union ibmvnic_crq *crq,
2959 struct ibmvnic_adapter *adapter)
2960{
2961 struct net_device *netdev = adapter->netdev;
2962 struct device *dev = &adapter->vdev->dev;
2963 long rc;
2964
2965 rc = crq->query_map_rsp.rc.code;
2966 if (rc) {
2967 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2968 return;
2969 }
2970 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2971 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2972 crq->query_map_rsp.free_pages);
2973}
2974
2975static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2976 struct ibmvnic_adapter *adapter)
2977{
2978 struct net_device *netdev = adapter->netdev;
2979 struct device *dev = &adapter->vdev->dev;
2980 long rc;
2981
Thomas Falcon901e0402017-02-15 12:17:59 -06002982 atomic_dec(&adapter->running_cap_crqs);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002983 netdev_dbg(netdev, "Outstanding queries: %d\n",
Thomas Falcon901e0402017-02-15 12:17:59 -06002984 atomic_read(&adapter->running_cap_crqs));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002985 rc = crq->query_capability.rc.code;
2986 if (rc) {
2987 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2988 goto out;
2989 }
2990
2991 switch (be16_to_cpu(crq->query_capability.capability)) {
2992 case MIN_TX_QUEUES:
2993 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002994 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002995 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2996 adapter->min_tx_queues);
2997 break;
2998 case MIN_RX_QUEUES:
2999 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003000 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003001 netdev_dbg(netdev, "min_rx_queues = %lld\n",
3002 adapter->min_rx_queues);
3003 break;
3004 case MIN_RX_ADD_QUEUES:
3005 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003006 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003007 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
3008 adapter->min_rx_add_queues);
3009 break;
3010 case MAX_TX_QUEUES:
3011 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003012 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003013 netdev_dbg(netdev, "max_tx_queues = %lld\n",
3014 adapter->max_tx_queues);
3015 break;
3016 case MAX_RX_QUEUES:
3017 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003018 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003019 netdev_dbg(netdev, "max_rx_queues = %lld\n",
3020 adapter->max_rx_queues);
3021 break;
3022 case MAX_RX_ADD_QUEUES:
3023 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003024 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003025 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
3026 adapter->max_rx_add_queues);
3027 break;
3028 case MIN_TX_ENTRIES_PER_SUBCRQ:
3029 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06003030 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003031 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
3032 adapter->min_tx_entries_per_subcrq);
3033 break;
3034 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
3035 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06003036 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003037 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
3038 adapter->min_rx_add_entries_per_subcrq);
3039 break;
3040 case MAX_TX_ENTRIES_PER_SUBCRQ:
3041 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06003042 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003043 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
3044 adapter->max_tx_entries_per_subcrq);
3045 break;
3046 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
3047 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06003048 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003049 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
3050 adapter->max_rx_add_entries_per_subcrq);
3051 break;
3052 case TCP_IP_OFFLOAD:
3053 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06003054 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003055 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
3056 adapter->tcp_ip_offload);
3057 break;
3058 case PROMISC_SUPPORTED:
3059 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06003060 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003061 netdev_dbg(netdev, "promisc_supported = %lld\n",
3062 adapter->promisc_supported);
3063 break;
3064 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06003065 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06003066 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003067 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
3068 break;
3069 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06003070 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falconf39f0d12017-02-14 10:22:59 -06003071 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003072 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
3073 break;
3074 case MAX_MULTICAST_FILTERS:
3075 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06003076 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003077 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
3078 adapter->max_multicast_filters);
3079 break;
3080 case VLAN_HEADER_INSERTION:
3081 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06003082 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003083 if (adapter->vlan_header_insertion)
3084 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
3085 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
3086 adapter->vlan_header_insertion);
3087 break;
Murilo Fossa Vicentini6052d5e2017-04-21 15:38:46 -04003088 case RX_VLAN_HEADER_INSERTION:
3089 adapter->rx_vlan_header_insertion =
3090 be64_to_cpu(crq->query_capability.number);
3091 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
3092 adapter->rx_vlan_header_insertion);
3093 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003094 case MAX_TX_SG_ENTRIES:
3095 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06003096 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003097 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
3098 adapter->max_tx_sg_entries);
3099 break;
3100 case RX_SG_SUPPORTED:
3101 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06003102 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003103 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
3104 adapter->rx_sg_supported);
3105 break;
3106 case OPT_TX_COMP_SUB_QUEUES:
3107 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003108 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003109 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
3110 adapter->opt_tx_comp_sub_queues);
3111 break;
3112 case OPT_RX_COMP_QUEUES:
3113 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06003114 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003115 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
3116 adapter->opt_rx_comp_queues);
3117 break;
3118 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
3119 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06003120 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003121 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
3122 adapter->opt_rx_bufadd_q_per_rx_comp_q);
3123 break;
3124 case OPT_TX_ENTRIES_PER_SUBCRQ:
3125 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06003126 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003127 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
3128 adapter->opt_tx_entries_per_subcrq);
3129 break;
3130 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
3131 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06003132 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003133 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
3134 adapter->opt_rxba_entries_per_subcrq);
3135 break;
3136 case TX_RX_DESC_REQ:
3137 adapter->tx_rx_desc_req = crq->query_capability.number;
3138 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
3139 adapter->tx_rx_desc_req);
3140 break;
3141
3142 default:
3143 netdev_err(netdev, "Got invalid cap rsp %d\n",
3144 crq->query_capability.capability);
3145 }
3146
3147out:
Thomas Falcon249168a2017-02-15 12:18:00 -06003148 if (atomic_read(&adapter->running_cap_crqs) == 0) {
3149 adapter->wait_capability = false;
Nathan Fontenotd346b9b2017-04-25 15:01:04 -04003150 ibmvnic_send_req_caps(adapter, 0);
Thomas Falcon249168a2017-02-15 12:18:00 -06003151 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003152}
3153
Thomas Falcon032c5e82015-12-21 11:26:06 -06003154static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3155 struct ibmvnic_adapter *adapter)
3156{
3157 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3158 struct net_device *netdev = adapter->netdev;
3159 struct device *dev = &adapter->vdev->dev;
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04003160 u64 *u64_crq = (u64 *)crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003161 long rc;
3162
3163 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
Murilo Fossa Vicentini993a82b2017-04-19 13:44:35 -04003164 (unsigned long int)cpu_to_be64(u64_crq[0]),
3165 (unsigned long int)cpu_to_be64(u64_crq[1]));
Thomas Falcon032c5e82015-12-21 11:26:06 -06003166 switch (gen_crq->first) {
3167 case IBMVNIC_CRQ_INIT_RSP:
3168 switch (gen_crq->cmd) {
3169 case IBMVNIC_CRQ_INIT:
3170 dev_info(dev, "Partner initialized\n");
John Allen017892c12017-05-26 10:30:19 -04003171 adapter->from_passive_init = true;
3172 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003173 break;
3174 case IBMVNIC_CRQ_INIT_COMPLETE:
3175 dev_info(dev, "Partner initialization complete\n");
3176 send_version_xchg(adapter);
3177 break;
3178 default:
3179 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3180 }
3181 return;
3182 case IBMVNIC_CRQ_XPORT_EVENT:
Nathan Fontenoted651a12017-05-03 14:04:38 -04003183 netif_carrier_off(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003184 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
Nathan Fontenoted651a12017-05-03 14:04:38 -04003185 dev_info(dev, "Migrated, re-enabling adapter\n");
3186 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
Thomas Falcondfad09a2016-08-18 11:37:51 -05003187 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3188 dev_info(dev, "Backing device failover detected\n");
Nathan Fontenoted651a12017-05-03 14:04:38 -04003189 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003190 } else {
3191 /* The adapter lost the connection */
3192 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3193 gen_crq->cmd);
Nathan Fontenoted651a12017-05-03 14:04:38 -04003194 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003195 }
3196 return;
3197 case IBMVNIC_CRQ_CMD_RSP:
3198 break;
3199 default:
3200 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3201 gen_crq->first);
3202 return;
3203 }
3204
3205 switch (gen_crq->cmd) {
3206 case VERSION_EXCHANGE_RSP:
3207 rc = crq->version_exchange_rsp.rc.code;
3208 if (rc) {
3209 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3210 break;
3211 }
3212 dev_info(dev, "Partner protocol version is %d\n",
3213 crq->version_exchange_rsp.version);
3214 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3215 ibmvnic_version)
3216 ibmvnic_version =
3217 be16_to_cpu(crq->version_exchange_rsp.version);
3218 send_cap_queries(adapter);
3219 break;
3220 case QUERY_CAPABILITY_RSP:
3221 handle_query_cap_rsp(crq, adapter);
3222 break;
3223 case QUERY_MAP_RSP:
3224 handle_query_map_rsp(crq, adapter);
3225 break;
3226 case REQUEST_MAP_RSP:
3227 handle_request_map_rsp(crq, adapter);
3228 break;
3229 case REQUEST_UNMAP_RSP:
3230 handle_request_unmap_rsp(crq, adapter);
3231 break;
3232 case REQUEST_CAPABILITY_RSP:
3233 handle_request_cap_rsp(crq, adapter);
3234 break;
3235 case LOGIN_RSP:
3236 netdev_dbg(netdev, "Got Login Response\n");
3237 handle_login_rsp(crq, adapter);
3238 break;
3239 case LOGICAL_LINK_STATE_RSP:
Nathan Fontenot53da09e2017-04-21 15:39:04 -04003240 netdev_dbg(netdev,
3241 "Got Logical Link State Response, state: %d rc: %d\n",
3242 crq->logical_link_state_rsp.link_state,
3243 crq->logical_link_state_rsp.rc.code);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003244 adapter->logical_link_state =
3245 crq->logical_link_state_rsp.link_state;
Nathan Fontenot53da09e2017-04-21 15:39:04 -04003246 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
3247 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003248 break;
3249 case LINK_STATE_INDICATION:
3250 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3251 adapter->phys_link_state =
3252 crq->link_state_indication.phys_link_state;
3253 adapter->logical_link_state =
3254 crq->link_state_indication.logical_link_state;
3255 break;
3256 case CHANGE_MAC_ADDR_RSP:
3257 netdev_dbg(netdev, "Got MAC address change Response\n");
3258 handle_change_mac_rsp(crq, adapter);
3259 break;
3260 case ERROR_INDICATION:
3261 netdev_dbg(netdev, "Got Error Indication\n");
3262 handle_error_indication(crq, adapter);
3263 break;
3264 case REQUEST_ERROR_RSP:
3265 netdev_dbg(netdev, "Got Error Detail Response\n");
3266 handle_error_info_rsp(crq, adapter);
3267 break;
3268 case REQUEST_STATISTICS_RSP:
3269 netdev_dbg(netdev, "Got Statistics Response\n");
3270 complete(&adapter->stats_done);
3271 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003272 case QUERY_IP_OFFLOAD_RSP:
3273 netdev_dbg(netdev, "Got Query IP offload Response\n");
3274 handle_query_ip_offload_rsp(adapter);
3275 break;
3276 case MULTICAST_CTRL_RSP:
3277 netdev_dbg(netdev, "Got multicast control Response\n");
3278 break;
3279 case CONTROL_IP_OFFLOAD_RSP:
3280 netdev_dbg(netdev, "Got Control IP offload Response\n");
3281 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3282 sizeof(adapter->ip_offload_ctrl),
3283 DMA_TO_DEVICE);
John Allenbd0b6722017-03-17 17:13:40 -05003284 complete(&adapter->init_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003285 break;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003286 case COLLECT_FW_TRACE_RSP:
3287 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3288 complete(&adapter->fw_done);
3289 break;
3290 default:
3291 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3292 gen_crq->cmd);
3293 }
3294}
3295
3296static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3297{
3298 struct ibmvnic_adapter *adapter = instance;
Thomas Falcon6c267b32017-02-15 12:17:58 -06003299
Thomas Falcon6c267b32017-02-15 12:17:58 -06003300 tasklet_schedule(&adapter->tasklet);
Thomas Falcon6c267b32017-02-15 12:17:58 -06003301 return IRQ_HANDLED;
3302}
3303
3304static void ibmvnic_tasklet(void *data)
3305{
3306 struct ibmvnic_adapter *adapter = data;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003307 struct ibmvnic_crq_queue *queue = &adapter->crq;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003308 union ibmvnic_crq *crq;
3309 unsigned long flags;
3310 bool done = false;
3311
3312 spin_lock_irqsave(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003313 while (!done) {
3314 /* Pull all the valid messages off the CRQ */
3315 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3316 ibmvnic_handle_crq(crq, adapter);
3317 crq->generic.first = 0;
3318 }
Brian Kinged7ecbf2017-04-19 13:44:53 -04003319
3320 /* remain in tasklet until all
3321 * capabilities responses are received
3322 */
3323 if (!adapter->wait_capability)
3324 done = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003325 }
Thomas Falcon249168a2017-02-15 12:18:00 -06003326 /* if capabilities CRQ's were sent in this tasklet, the following
3327 * tasklet must wait until all responses are received
3328 */
3329 if (atomic_read(&adapter->running_cap_crqs) != 0)
3330 adapter->wait_capability = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003331 spin_unlock_irqrestore(&queue->lock, flags);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003332}
3333
3334static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3335{
3336 struct vio_dev *vdev = adapter->vdev;
3337 int rc;
3338
3339 do {
3340 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3341 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3342
3343 if (rc)
3344 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3345
3346 return rc;
3347}
3348
3349static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3350{
3351 struct ibmvnic_crq_queue *crq = &adapter->crq;
3352 struct device *dev = &adapter->vdev->dev;
3353 struct vio_dev *vdev = adapter->vdev;
3354 int rc;
3355
3356 /* Close the CRQ */
3357 do {
3358 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3359 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3360
3361 /* Clean out the queue */
3362 memset(crq->msgs, 0, PAGE_SIZE);
3363 crq->cur = 0;
3364
3365 /* And re-open it again */
3366 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3367 crq->msg_token, PAGE_SIZE);
3368
3369 if (rc == H_CLOSED)
3370 /* Adapter is good, but other end is not ready */
3371 dev_warn(dev, "Partner adapter not ready\n");
3372 else if (rc != 0)
3373 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3374
3375 return rc;
3376}
3377
Nathan Fontenotf9928872017-03-30 02:48:54 -04003378static void release_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003379{
3380 struct ibmvnic_crq_queue *crq = &adapter->crq;
3381 struct vio_dev *vdev = adapter->vdev;
3382 long rc;
3383
Nathan Fontenotf9928872017-03-30 02:48:54 -04003384 if (!crq->msgs)
3385 return;
3386
Thomas Falcon032c5e82015-12-21 11:26:06 -06003387 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3388 free_irq(vdev->irq, adapter);
Thomas Falcon6c267b32017-02-15 12:17:58 -06003389 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003390 do {
3391 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3392 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3393
3394 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3395 DMA_BIDIRECTIONAL);
3396 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04003397 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003398}
3399
Nathan Fontenotf9928872017-03-30 02:48:54 -04003400static int init_crq_queue(struct ibmvnic_adapter *adapter)
Thomas Falcon032c5e82015-12-21 11:26:06 -06003401{
3402 struct ibmvnic_crq_queue *crq = &adapter->crq;
3403 struct device *dev = &adapter->vdev->dev;
3404 struct vio_dev *vdev = adapter->vdev;
3405 int rc, retrc = -ENOMEM;
3406
Nathan Fontenotf9928872017-03-30 02:48:54 -04003407 if (crq->msgs)
3408 return 0;
3409
Thomas Falcon032c5e82015-12-21 11:26:06 -06003410 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3411 /* Should we allocate more than one page? */
3412
3413 if (!crq->msgs)
3414 return -ENOMEM;
3415
3416 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3417 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3418 DMA_BIDIRECTIONAL);
3419 if (dma_mapping_error(dev, crq->msg_token))
3420 goto map_failed;
3421
3422 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3423 crq->msg_token, PAGE_SIZE);
3424
3425 if (rc == H_RESOURCE)
3426 /* maybe kexecing and resource is busy. try a reset */
3427 rc = ibmvnic_reset_crq(adapter);
3428 retrc = rc;
3429
3430 if (rc == H_CLOSED) {
3431 dev_warn(dev, "Partner adapter not ready\n");
3432 } else if (rc) {
3433 dev_warn(dev, "Error %d opening adapter\n", rc);
3434 goto reg_crq_failed;
3435 }
3436
3437 retrc = 0;
3438
Thomas Falcon6c267b32017-02-15 12:17:58 -06003439 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
3440 (unsigned long)adapter);
3441
Thomas Falcon032c5e82015-12-21 11:26:06 -06003442 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3443 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3444 adapter);
3445 if (rc) {
3446 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3447 vdev->irq, rc);
3448 goto req_irq_failed;
3449 }
3450
3451 rc = vio_enable_interrupts(vdev);
3452 if (rc) {
3453 dev_err(dev, "Error %d enabling interrupts\n", rc);
3454 goto req_irq_failed;
3455 }
3456
3457 crq->cur = 0;
3458 spin_lock_init(&crq->lock);
3459
3460 return retrc;
3461
3462req_irq_failed:
Thomas Falcon6c267b32017-02-15 12:17:58 -06003463 tasklet_kill(&adapter->tasklet);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003464 do {
3465 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3466 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3467reg_crq_failed:
3468 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3469map_failed:
3470 free_page((unsigned long)crq->msgs);
Nathan Fontenotf9928872017-03-30 02:48:54 -04003471 crq->msgs = NULL;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003472 return retrc;
3473}
3474
John Allenf6ef6402017-03-17 17:13:42 -05003475static int ibmvnic_init(struct ibmvnic_adapter *adapter)
3476{
3477 struct device *dev = &adapter->vdev->dev;
3478 unsigned long timeout = msecs_to_jiffies(30000);
John Allenf6ef6402017-03-17 17:13:42 -05003479 int rc;
3480
Nathan Fontenotf9928872017-03-30 02:48:54 -04003481 rc = init_crq_queue(adapter);
John Allenf6ef6402017-03-17 17:13:42 -05003482 if (rc) {
3483 dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
3484 return rc;
3485 }
3486
John Allen017892c12017-05-26 10:30:19 -04003487 adapter->from_passive_init = false;
3488
John Allenf6ef6402017-03-17 17:13:42 -05003489 init_completion(&adapter->init_done);
3490 ibmvnic_send_crq_init(adapter);
3491 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3492 dev_err(dev, "Initialization sequence timed out\n");
John Allen017892c12017-05-26 10:30:19 -04003493 return -1;
3494 }
3495
3496 if (adapter->from_passive_init) {
3497 adapter->state = VNIC_OPEN;
3498 adapter->from_passive_init = false;
John Allenf6ef6402017-03-17 17:13:42 -05003499 return -1;
3500 }
3501
Nathan Fontenot1bb3c732017-04-25 15:01:10 -04003502 rc = init_sub_crqs(adapter);
3503 if (rc) {
3504 dev_err(dev, "Initialization of sub crqs failed\n");
3505 release_crq_queue(adapter);
3506 }
3507
3508 return rc;
John Allenf6ef6402017-03-17 17:13:42 -05003509}
3510
Thomas Falcon032c5e82015-12-21 11:26:06 -06003511static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3512{
3513 struct ibmvnic_adapter *adapter;
3514 struct net_device *netdev;
3515 unsigned char *mac_addr_p;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003516 int rc;
3517
3518 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3519 dev->unit_address);
3520
3521 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3522 VETH_MAC_ADDR, NULL);
3523 if (!mac_addr_p) {
3524 dev_err(&dev->dev,
3525 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3526 __FILE__, __LINE__);
3527 return 0;
3528 }
3529
3530 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3531 IBMVNIC_MAX_TX_QUEUES);
3532 if (!netdev)
3533 return -ENOMEM;
3534
3535 adapter = netdev_priv(netdev);
Nathan Fontenot90c80142017-05-03 14:04:32 -04003536 adapter->state = VNIC_PROBING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003537 dev_set_drvdata(&dev->dev, netdev);
3538 adapter->vdev = dev;
3539 adapter->netdev = netdev;
3540
3541 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3542 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3543 netdev->irq = dev->irq;
3544 netdev->netdev_ops = &ibmvnic_netdev_ops;
3545 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3546 SET_NETDEV_DEV(netdev, &dev->dev);
3547
3548 spin_lock_init(&adapter->stats_lock);
3549
Thomas Falcon032c5e82015-12-21 11:26:06 -06003550 INIT_LIST_HEAD(&adapter->errors);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003551 spin_lock_init(&adapter->error_list_lock);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003552
Nathan Fontenoted651a12017-05-03 14:04:38 -04003553 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
3554 INIT_LIST_HEAD(&adapter->rwi_list);
3555 mutex_init(&adapter->reset_lock);
3556 mutex_init(&adapter->rwi_lock);
3557 adapter->resetting = false;
3558
John Allenf6ef6402017-03-17 17:13:42 -05003559 rc = ibmvnic_init(adapter);
3560 if (rc) {
3561 free_netdev(netdev);
3562 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003563 }
3564
Thomas Falconf39f0d12017-02-14 10:22:59 -06003565 netdev->mtu = adapter->req_mtu - ETH_HLEN;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003566
3567 rc = register_netdev(netdev);
3568 if (rc) {
3569 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
John Allenf6ef6402017-03-17 17:13:42 -05003570 free_netdev(netdev);
3571 return rc;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003572 }
3573 dev_info(&dev->dev, "ibmvnic registered\n");
3574
Nathan Fontenot90c80142017-05-03 14:04:32 -04003575 adapter->state = VNIC_PROBED;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003576 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003577}
3578
3579static int ibmvnic_remove(struct vio_dev *dev)
3580{
3581 struct net_device *netdev = dev_get_drvdata(&dev->dev);
Nathan Fontenot37489052017-04-19 13:45:04 -04003582 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003583
Nathan Fontenot90c80142017-05-03 14:04:32 -04003584 adapter->state = VNIC_REMOVING;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003585 unregister_netdev(netdev);
Nathan Fontenoted651a12017-05-03 14:04:38 -04003586 mutex_lock(&adapter->reset_lock);
Nathan Fontenot37489052017-04-19 13:45:04 -04003587
3588 release_resources(adapter);
3589 release_sub_crqs(adapter);
3590 release_crq_queue(adapter);
3591
Nathan Fontenot90c80142017-05-03 14:04:32 -04003592 adapter->state = VNIC_REMOVED;
3593
Nathan Fontenoted651a12017-05-03 14:04:38 -04003594 mutex_unlock(&adapter->reset_lock);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003595 free_netdev(netdev);
3596 dev_set_drvdata(&dev->dev, NULL);
3597
3598 return 0;
3599}
3600
3601static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3602{
3603 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3604 struct ibmvnic_adapter *adapter;
3605 struct iommu_table *tbl;
3606 unsigned long ret = 0;
3607 int i;
3608
3609 tbl = get_iommu_table_base(&vdev->dev);
3610
3611 /* netdev inits at probe time along with the structures we need below*/
3612 if (!netdev)
3613 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3614
3615 adapter = netdev_priv(netdev);
3616
3617 ret += PAGE_SIZE; /* the crq message queue */
Thomas Falcon032c5e82015-12-21 11:26:06 -06003618 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3619
3620 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3621 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3622
3623 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3624 i++)
3625 ret += adapter->rx_pool[i].size *
3626 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3627
3628 return ret;
3629}
3630
3631static int ibmvnic_resume(struct device *dev)
3632{
3633 struct net_device *netdev = dev_get_drvdata(dev);
3634 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3635 int i;
3636
3637 /* kick the interrupt handlers just in case we lost an interrupt */
3638 for (i = 0; i < adapter->req_rx_queues; i++)
3639 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3640 adapter->rx_scrq[i]);
3641
3642 return 0;
3643}
3644
3645static struct vio_device_id ibmvnic_device_table[] = {
3646 {"network", "IBM,vnic"},
3647 {"", "" }
3648};
3649MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3650
3651static const struct dev_pm_ops ibmvnic_pm_ops = {
3652 .resume = ibmvnic_resume
3653};
3654
3655static struct vio_driver ibmvnic_driver = {
3656 .id_table = ibmvnic_device_table,
3657 .probe = ibmvnic_probe,
3658 .remove = ibmvnic_remove,
3659 .get_desired_dma = ibmvnic_get_desired_dma,
3660 .name = ibmvnic_driver_name,
3661 .pm = &ibmvnic_pm_ops,
3662};
3663
3664/* module functions */
3665static int __init ibmvnic_module_init(void)
3666{
3667 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3668 IBMVNIC_DRIVER_VERSION);
3669
3670 return vio_register_driver(&ibmvnic_driver);
3671}
3672
3673static void __exit ibmvnic_module_exit(void)
3674{
3675 vio_unregister_driver(&ibmvnic_driver);
3676}
3677
3678module_init(ibmvnic_module_init);
3679module_exit(ibmvnic_module_exit);