blob: 9f2184be55dceb6d2c30a4aed9cc3c932aae6e08 [file] [log] [blame]
Thomas Falcon032c5e82015-12-21 11:26:06 -06001/**************************************************************************/
2/* */
3/* IBM System i and System p Virtual NIC Device Driver */
4/* Copyright (C) 2014 IBM Corp. */
5/* Santiago Leon (santi_leon@yahoo.com) */
6/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7/* John Allen (jallen@linux.vnet.ibm.com) */
8/* */
9/* This program is free software; you can redistribute it and/or modify */
10/* it under the terms of the GNU General Public License as published by */
11/* the Free Software Foundation; either version 2 of the License, or */
12/* (at your option) any later version. */
13/* */
14/* This program is distributed in the hope that it will be useful, */
15/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17/* GNU General Public License for more details. */
18/* */
19/* You should have received a copy of the GNU General Public License */
20/* along with this program. */
21/* */
22/* This module contains the implementation of a virtual ethernet device */
23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24/* option of the RS/6000 Platform Architecture to interface with virtual */
25/* ethernet NICs that are presented to the partition by the hypervisor. */
26/* */
27/* Messages are passed between the VNIC driver and the VNIC server using */
28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29/* issue and receive commands that initiate communication with the server */
30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31/* are used by the driver to notify the server that a packet is */
32/* ready for transmission or that a buffer has been added to receive a */
33/* packet. Subsequently, sCRQs are used by the server to notify the */
34/* driver that a packet transmission has been completed or that a packet */
35/* has been received and placed in a waiting buffer. */
36/* */
37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38/* which skbs are DMA mapped and immediately unmapped when the transmit */
39/* or receive has been completed, the VNIC driver is required to use */
40/* "long term mapping". This entails that large, continuous DMA mapped */
41/* buffers are allocated on driver initialization and these buffers are */
42/* then continuously reused to pass skbs to and from the VNIC server. */
43/* */
44/**************************************************************************/
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/in.h>
63#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050064#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060065#include <linux/irq.h>
66#include <linux/kthread.h>
67#include <linux/seq_file.h>
68#include <linux/debugfs.h>
69#include <linux/interrupt.h>
70#include <net/net_namespace.h>
71#include <asm/hvcall.h>
72#include <linux/atomic.h>
73#include <asm/vio.h>
74#include <asm/iommu.h>
75#include <linux/uaccess.h>
76#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050077#include <linux/workqueue.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060078
79#include "ibmvnic.h"
80
81static const char ibmvnic_driver_name[] = "ibmvnic";
82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83
84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88
89static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90static int ibmvnic_remove(struct vio_dev *);
91static void release_sub_crqs(struct ibmvnic_adapter *);
Thomas Falconea22d512016-07-06 15:35:17 -050092static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
Thomas Falcon032c5e82015-12-21 11:26:06 -060093static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
94static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
95static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
96static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
97static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
98 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050099static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600100static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
101static int enable_scrq_irq(struct ibmvnic_adapter *,
102 struct ibmvnic_sub_crq_queue *);
103static int disable_scrq_irq(struct ibmvnic_adapter *,
104 struct ibmvnic_sub_crq_queue *);
105static int pending_scrq(struct ibmvnic_adapter *,
106 struct ibmvnic_sub_crq_queue *);
107static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
108 struct ibmvnic_sub_crq_queue *);
109static int ibmvnic_poll(struct napi_struct *napi, int data);
110static void send_map_query(struct ibmvnic_adapter *adapter);
111static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
112static void send_request_unmap(struct ibmvnic_adapter *, u8);
113
114struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
116 int offset;
117};
118
119#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
122
123static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
146};
147
148static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 unsigned long length, unsigned long *number,
150 unsigned long *irq)
151{
152 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
153 long rc;
154
155 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
156 *number = retbuf[0];
157 *irq = retbuf[1];
158
159 return rc;
160}
161
162/* net_device_ops functions */
163
164static void init_rx_pool(struct ibmvnic_adapter *adapter,
165 struct ibmvnic_rx_pool *rx_pool, int num, int index,
166 int buff_size, int active)
167{
168 netdev_dbg(adapter->netdev,
169 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
170 index, num, buff_size);
171 rx_pool->size = num;
172 rx_pool->index = index;
173 rx_pool->buff_size = buff_size;
174 rx_pool->active = active;
175}
176
177static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
178 struct ibmvnic_long_term_buff *ltb, int size)
179{
180 struct device *dev = &adapter->vdev->dev;
181
182 ltb->size = size;
183 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
184 GFP_KERNEL);
185
186 if (!ltb->buff) {
187 dev_err(dev, "Couldn't alloc long term buffer\n");
188 return -ENOMEM;
189 }
190 ltb->map_id = adapter->map_id;
191 adapter->map_id++;
Nathan Fontenotae0b63e2017-05-23 21:53:39 -0400192
193 init_completion(&adapter->fw_done);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600194 send_request_map(adapter, ltb->addr,
195 ltb->size, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600196 wait_for_completion(&adapter->fw_done);
197 return 0;
198}
199
200static void free_long_term_buff(struct ibmvnic_adapter *adapter,
201 struct ibmvnic_long_term_buff *ltb)
202{
203 struct device *dev = &adapter->vdev->dev;
204
205 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcondfad09a2016-08-18 11:37:51 -0500206 if (!adapter->failover)
207 send_request_unmap(adapter, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600208}
209
210static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
211 struct ibmvnic_rx_pool *pool)
212{
213 struct device *dev = &adapter->vdev->dev;
214 int i;
215
216 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
217 if (!pool->free_map)
218 return -ENOMEM;
219
220 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
221 GFP_KERNEL);
222
223 if (!pool->rx_buff) {
224 dev_err(dev, "Couldn't alloc rx buffers\n");
225 kfree(pool->free_map);
226 return -ENOMEM;
227 }
228
229 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
230 pool->size * pool->buff_size)) {
231 kfree(pool->free_map);
232 kfree(pool->rx_buff);
233 return -ENOMEM;
234 }
235
236 for (i = 0; i < pool->size; ++i)
237 pool->free_map[i] = i;
238
239 atomic_set(&pool->available, 0);
240 pool->next_alloc = 0;
241 pool->next_free = 0;
242
243 return 0;
244}
245
246static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
247 struct ibmvnic_rx_pool *pool)
248{
249 int count = pool->size - atomic_read(&pool->available);
250 struct device *dev = &adapter->vdev->dev;
251 int buffers_added = 0;
252 unsigned long lpar_rc;
253 union sub_crq sub_crq;
254 struct sk_buff *skb;
255 unsigned int offset;
256 dma_addr_t dma_addr;
257 unsigned char *dst;
258 u64 *handle_array;
259 int shift = 0;
260 int index;
261 int i;
262
263 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
264 be32_to_cpu(adapter->login_rsp_buf->
265 off_rxadd_subcrqs));
266
267 for (i = 0; i < count; ++i) {
268 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
269 if (!skb) {
270 dev_err(dev, "Couldn't replenish rx buff\n");
271 adapter->replenish_no_mem++;
272 break;
273 }
274
275 index = pool->free_map[pool->next_free];
276
277 if (pool->rx_buff[index].skb)
278 dev_err(dev, "Inconsistent free_map!\n");
279
280 /* Copy the skb to the long term mapped DMA buffer */
281 offset = index * pool->buff_size;
282 dst = pool->long_term_buff.buff + offset;
283 memset(dst, 0, pool->buff_size);
284 dma_addr = pool->long_term_buff.addr + offset;
285 pool->rx_buff[index].data = dst;
286
287 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
288 pool->rx_buff[index].dma = dma_addr;
289 pool->rx_buff[index].skb = skb;
290 pool->rx_buff[index].pool_index = pool->index;
291 pool->rx_buff[index].size = pool->buff_size;
292
293 memset(&sub_crq, 0, sizeof(sub_crq));
294 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
295 sub_crq.rx_add.correlator =
296 cpu_to_be64((u64)&pool->rx_buff[index]);
297 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
298 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
299
300 /* The length field of the sCRQ is defined to be 24 bits so the
301 * buffer size needs to be left shifted by a byte before it is
302 * converted to big endian to prevent the last byte from being
303 * truncated.
304 */
305#ifdef __LITTLE_ENDIAN__
306 shift = 8;
307#endif
308 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
309
310 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
311 &sub_crq);
312 if (lpar_rc != H_SUCCESS)
313 goto failure;
314
315 buffers_added++;
316 adapter->replenish_add_buff_success++;
317 pool->next_free = (pool->next_free + 1) % pool->size;
318 }
319 atomic_add(buffers_added, &pool->available);
320 return;
321
322failure:
323 dev_info(dev, "replenish pools failure\n");
324 pool->free_map[pool->next_free] = index;
325 pool->rx_buff[index].skb = NULL;
326 if (!dma_mapping_error(dev, dma_addr))
327 dma_unmap_single(dev, dma_addr, pool->buff_size,
328 DMA_FROM_DEVICE);
329
330 dev_kfree_skb_any(skb);
331 adapter->replenish_add_buff_failure++;
332 atomic_add(buffers_added, &pool->available);
333}
334
335static void replenish_pools(struct ibmvnic_adapter *adapter)
336{
337 int i;
338
339 if (adapter->migrated)
340 return;
341
342 adapter->replenish_task_cycles++;
343 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
344 i++) {
345 if (adapter->rx_pool[i].active)
346 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
347 }
348}
349
350static void free_rx_pool(struct ibmvnic_adapter *adapter,
351 struct ibmvnic_rx_pool *pool)
352{
353 int i;
354
355 kfree(pool->free_map);
356 pool->free_map = NULL;
357
358 if (!pool->rx_buff)
359 return;
360
361 for (i = 0; i < pool->size; i++) {
362 if (pool->rx_buff[i].skb) {
363 dev_kfree_skb_any(pool->rx_buff[i].skb);
364 pool->rx_buff[i].skb = NULL;
365 }
366 }
367 kfree(pool->rx_buff);
368 pool->rx_buff = NULL;
369}
370
371static int ibmvnic_open(struct net_device *netdev)
372{
373 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
374 struct device *dev = &adapter->vdev->dev;
375 struct ibmvnic_tx_pool *tx_pool;
376 union ibmvnic_crq crq;
377 int rxadd_subcrqs;
378 u64 *size_array;
379 int tx_subcrqs;
380 int i, j;
381
382 rxadd_subcrqs =
383 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
384 tx_subcrqs =
385 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
386 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
387 be32_to_cpu(adapter->login_rsp_buf->
388 off_rxadd_buff_size));
389 adapter->map_id = 1;
390 adapter->napi = kcalloc(adapter->req_rx_queues,
391 sizeof(struct napi_struct), GFP_KERNEL);
392 if (!adapter->napi)
393 goto alloc_napi_failed;
394 for (i = 0; i < adapter->req_rx_queues; i++) {
395 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
396 NAPI_POLL_WEIGHT);
397 napi_enable(&adapter->napi[i]);
398 }
399 adapter->rx_pool =
400 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
401
402 if (!adapter->rx_pool)
403 goto rx_pool_arr_alloc_failed;
404 send_map_query(adapter);
405 for (i = 0; i < rxadd_subcrqs; i++) {
406 init_rx_pool(adapter, &adapter->rx_pool[i],
407 IBMVNIC_BUFFS_PER_POOL, i,
408 be64_to_cpu(size_array[i]), 1);
409 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
410 dev_err(dev, "Couldn't alloc rx pool\n");
411 goto rx_pool_alloc_failed;
412 }
413 }
414 adapter->tx_pool =
415 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
416
417 if (!adapter->tx_pool)
418 goto tx_pool_arr_alloc_failed;
419 for (i = 0; i < tx_subcrqs; i++) {
420 tx_pool = &adapter->tx_pool[i];
421 tx_pool->tx_buff =
422 kcalloc(adapter->max_tx_entries_per_subcrq,
423 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
424 if (!tx_pool->tx_buff)
425 goto tx_pool_alloc_failed;
426
427 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
428 adapter->max_tx_entries_per_subcrq *
429 adapter->req_mtu))
430 goto tx_ltb_alloc_failed;
431
432 tx_pool->free_map =
433 kcalloc(adapter->max_tx_entries_per_subcrq,
434 sizeof(int), GFP_KERNEL);
435 if (!tx_pool->free_map)
436 goto tx_fm_alloc_failed;
437
438 for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
439 tx_pool->free_map[j] = j;
440
441 tx_pool->consumer_index = 0;
442 tx_pool->producer_index = 0;
443 }
444 adapter->bounce_buffer_size =
445 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
446 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
447 GFP_KERNEL);
448 if (!adapter->bounce_buffer)
449 goto bounce_alloc_failed;
450
451 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
452 adapter->bounce_buffer_size,
453 DMA_TO_DEVICE);
454 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
455 dev_err(dev, "Couldn't map tx bounce buffer\n");
456 goto bounce_map_failed;
457 }
458 replenish_pools(adapter);
459
460 /* We're ready to receive frames, enable the sub-crq interrupts and
461 * set the logical link state to up
462 */
463 for (i = 0; i < adapter->req_rx_queues; i++)
464 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
465
466 for (i = 0; i < adapter->req_tx_queues; i++)
467 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
468
469 memset(&crq, 0, sizeof(crq));
470 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
471 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
472 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
473 ibmvnic_send_crq(adapter, &crq);
474
Thomas Falconb8efb892016-07-06 15:35:15 -0500475 netif_tx_start_all_queues(netdev);
476
Thomas Falcon032c5e82015-12-21 11:26:06 -0600477 return 0;
478
479bounce_map_failed:
480 kfree(adapter->bounce_buffer);
481bounce_alloc_failed:
482 i = tx_subcrqs - 1;
483 kfree(adapter->tx_pool[i].free_map);
484tx_fm_alloc_failed:
485 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
486tx_ltb_alloc_failed:
487 kfree(adapter->tx_pool[i].tx_buff);
488tx_pool_alloc_failed:
489 for (j = 0; j < i; j++) {
490 kfree(adapter->tx_pool[j].tx_buff);
491 free_long_term_buff(adapter,
492 &adapter->tx_pool[j].long_term_buff);
493 kfree(adapter->tx_pool[j].free_map);
494 }
495 kfree(adapter->tx_pool);
496 adapter->tx_pool = NULL;
497tx_pool_arr_alloc_failed:
498 i = rxadd_subcrqs;
499rx_pool_alloc_failed:
500 for (j = 0; j < i; j++) {
501 free_rx_pool(adapter, &adapter->rx_pool[j]);
502 free_long_term_buff(adapter,
503 &adapter->rx_pool[j].long_term_buff);
504 }
505 kfree(adapter->rx_pool);
506 adapter->rx_pool = NULL;
507rx_pool_arr_alloc_failed:
508 for (i = 0; i < adapter->req_rx_queues; i++)
Nathan Fontenot4544ba32017-05-23 21:53:38 -0400509 napi_disable(&adapter->napi[i]);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600510alloc_napi_failed:
511 return -ENOMEM;
512}
513
514static int ibmvnic_close(struct net_device *netdev)
515{
516 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
517 struct device *dev = &adapter->vdev->dev;
518 union ibmvnic_crq crq;
519 int i;
520
521 adapter->closing = true;
522
523 for (i = 0; i < adapter->req_rx_queues; i++)
524 napi_disable(&adapter->napi[i]);
525
Thomas Falcondfad09a2016-08-18 11:37:51 -0500526 if (!adapter->failover)
527 netif_tx_stop_all_queues(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600528
529 if (adapter->bounce_buffer) {
530 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
531 dma_unmap_single(&adapter->vdev->dev,
532 adapter->bounce_buffer_dma,
533 adapter->bounce_buffer_size,
534 DMA_BIDIRECTIONAL);
535 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
536 }
537 kfree(adapter->bounce_buffer);
538 adapter->bounce_buffer = NULL;
539 }
540
541 memset(&crq, 0, sizeof(crq));
542 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
543 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
544 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
545 ibmvnic_send_crq(adapter, &crq);
546
547 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
548 i++) {
549 kfree(adapter->tx_pool[i].tx_buff);
550 free_long_term_buff(adapter,
551 &adapter->tx_pool[i].long_term_buff);
552 kfree(adapter->tx_pool[i].free_map);
553 }
554 kfree(adapter->tx_pool);
555 adapter->tx_pool = NULL;
556
557 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
558 i++) {
559 free_rx_pool(adapter, &adapter->rx_pool[i]);
560 free_long_term_buff(adapter,
561 &adapter->rx_pool[i].long_term_buff);
562 }
563 kfree(adapter->rx_pool);
564 adapter->rx_pool = NULL;
565
566 adapter->closing = false;
567
568 return 0;
569}
570
Thomas Falconad7775d2016-04-01 17:20:34 -0500571/**
572 * build_hdr_data - creates L2/L3/L4 header data buffer
573 * @hdr_field - bitfield determining needed headers
574 * @skb - socket buffer
575 * @hdr_len - array of header lengths
576 * @tot_len - total length of data
577 *
578 * Reads hdr_field to determine which headers are needed by firmware.
579 * Builds a buffer containing these headers. Saves individual header
580 * lengths and total buffer length to be used to build descriptors.
581 */
582static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
583 int *hdr_len, u8 *hdr_data)
584{
585 int len = 0;
586 u8 *hdr;
587
588 hdr_len[0] = sizeof(struct ethhdr);
589
590 if (skb->protocol == htons(ETH_P_IP)) {
591 hdr_len[1] = ip_hdr(skb)->ihl * 4;
592 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
593 hdr_len[2] = tcp_hdrlen(skb);
594 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
595 hdr_len[2] = sizeof(struct udphdr);
596 } else if (skb->protocol == htons(ETH_P_IPV6)) {
597 hdr_len[1] = sizeof(struct ipv6hdr);
598 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
599 hdr_len[2] = tcp_hdrlen(skb);
600 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
601 hdr_len[2] = sizeof(struct udphdr);
602 }
603
604 memset(hdr_data, 0, 120);
605 if ((hdr_field >> 6) & 1) {
606 hdr = skb_mac_header(skb);
607 memcpy(hdr_data, hdr, hdr_len[0]);
608 len += hdr_len[0];
609 }
610
611 if ((hdr_field >> 5) & 1) {
612 hdr = skb_network_header(skb);
613 memcpy(hdr_data + len, hdr, hdr_len[1]);
614 len += hdr_len[1];
615 }
616
617 if ((hdr_field >> 4) & 1) {
618 hdr = skb_transport_header(skb);
619 memcpy(hdr_data + len, hdr, hdr_len[2]);
620 len += hdr_len[2];
621 }
622 return len;
623}
624
625/**
626 * create_hdr_descs - create header and header extension descriptors
627 * @hdr_field - bitfield determining needed headers
628 * @data - buffer containing header data
629 * @len - length of data buffer
630 * @hdr_len - array of individual header lengths
631 * @scrq_arr - descriptor array
632 *
633 * Creates header and, if needed, header extension descriptors and
634 * places them in a descriptor array, scrq_arr
635 */
636
637static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
638 union sub_crq *scrq_arr)
639{
640 union sub_crq hdr_desc;
641 int tmp_len = len;
642 u8 *data, *cur;
643 int tmp;
644
645 while (tmp_len > 0) {
646 cur = hdr_data + len - tmp_len;
647
648 memset(&hdr_desc, 0, sizeof(hdr_desc));
649 if (cur != hdr_data) {
650 data = hdr_desc.hdr_ext.data;
651 tmp = tmp_len > 29 ? 29 : tmp_len;
652 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
653 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
654 hdr_desc.hdr_ext.len = tmp;
655 } else {
656 data = hdr_desc.hdr.data;
657 tmp = tmp_len > 24 ? 24 : tmp_len;
658 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
659 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
660 hdr_desc.hdr.len = tmp;
661 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
662 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
663 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
664 hdr_desc.hdr.flag = hdr_field << 1;
665 }
666 memcpy(data, cur, tmp);
667 tmp_len -= tmp;
668 *scrq_arr = hdr_desc;
669 scrq_arr++;
670 }
671}
672
673/**
674 * build_hdr_descs_arr - build a header descriptor array
675 * @skb - socket buffer
676 * @num_entries - number of descriptors to be sent
677 * @subcrq - first TX descriptor
678 * @hdr_field - bit field determining which headers will be sent
679 *
680 * This function will build a TX descriptor array with applicable
681 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
682 */
683
684static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
685 int *num_entries, u8 hdr_field)
686{
687 int hdr_len[3] = {0, 0, 0};
688 int tot_len, len;
689 u8 *hdr_data = txbuff->hdr_data;
690
691 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
692 txbuff->hdr_data);
693 len = tot_len;
694 len -= 24;
695 if (len > 0)
696 num_entries += len % 29 ? len / 29 + 1 : len / 29;
697 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
698 txbuff->indir_arr + 1);
699}
700
Thomas Falcon032c5e82015-12-21 11:26:06 -0600701static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
702{
703 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
704 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -0500705 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600706 struct device *dev = &adapter->vdev->dev;
707 struct ibmvnic_tx_buff *tx_buff = NULL;
708 struct ibmvnic_tx_pool *tx_pool;
709 unsigned int tx_send_failed = 0;
710 unsigned int tx_map_failed = 0;
711 unsigned int tx_dropped = 0;
712 unsigned int tx_packets = 0;
713 unsigned int tx_bytes = 0;
714 dma_addr_t data_dma_addr;
715 struct netdev_queue *txq;
716 bool used_bounce = false;
717 unsigned long lpar_rc;
718 union sub_crq tx_crq;
719 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -0500720 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600721 unsigned char *dst;
722 u64 *handle_array;
723 int index = 0;
724 int ret = 0;
725
726 tx_pool = &adapter->tx_pool[queue_num];
727 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
728 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
729 be32_to_cpu(adapter->login_rsp_buf->
730 off_txsubm_subcrqs));
731 if (adapter->migrated) {
732 tx_send_failed++;
733 tx_dropped++;
734 ret = NETDEV_TX_BUSY;
735 goto out;
736 }
737
738 index = tx_pool->free_map[tx_pool->consumer_index];
739 offset = index * adapter->req_mtu;
740 dst = tx_pool->long_term_buff.buff + offset;
741 memset(dst, 0, adapter->req_mtu);
742 skb_copy_from_linear_data(skb, dst, skb->len);
743 data_dma_addr = tx_pool->long_term_buff.addr + offset;
744
745 tx_pool->consumer_index =
746 (tx_pool->consumer_index + 1) %
747 adapter->max_tx_entries_per_subcrq;
748
749 tx_buff = &tx_pool->tx_buff[index];
750 tx_buff->skb = skb;
751 tx_buff->data_dma[0] = data_dma_addr;
752 tx_buff->data_len[0] = skb->len;
753 tx_buff->index = index;
754 tx_buff->pool_index = queue_num;
755 tx_buff->last_frag = true;
756 tx_buff->used_bounce = used_bounce;
757
758 memset(&tx_crq, 0, sizeof(tx_crq));
759 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
760 tx_crq.v1.type = IBMVNIC_TX_DESC;
761 tx_crq.v1.n_crq_elem = 1;
762 tx_crq.v1.n_sge = 1;
763 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
764 tx_crq.v1.correlator = cpu_to_be32(index);
765 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
766 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
767 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
768
769 if (adapter->vlan_header_insertion) {
770 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
771 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
772 }
773
774 if (skb->protocol == htons(ETH_P_IP)) {
775 if (ip_hdr(skb)->version == 4)
776 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
777 else if (ip_hdr(skb)->version == 6)
778 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
779
780 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
781 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
782 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
783 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
784 }
785
Thomas Falconad7775d2016-04-01 17:20:34 -0500786 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600787 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -0500788 hdrs += 2;
789 }
790 /* determine if l2/3/4 headers are sent to firmware */
791 if ((*hdrs >> 7) & 1 &&
792 (skb->protocol == htons(ETH_P_IP) ||
793 skb->protocol == htons(ETH_P_IPV6))) {
794 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
795 tx_crq.v1.n_crq_elem = num_entries;
796 tx_buff->indir_arr[0] = tx_crq;
797 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
798 sizeof(tx_buff->indir_arr),
799 DMA_TO_DEVICE);
800 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
801 if (!firmware_has_feature(FW_FEATURE_CMO))
802 dev_err(dev, "tx: unable to map descriptor array\n");
803 tx_map_failed++;
804 tx_dropped++;
805 ret = NETDEV_TX_BUSY;
806 goto out;
807 }
John Allen498cd8e2016-04-06 11:49:55 -0500808 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -0500809 (u64)tx_buff->indir_dma,
810 (u64)num_entries);
811 } else {
John Allen498cd8e2016-04-06 11:49:55 -0500812 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
813 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -0500814 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600815 if (lpar_rc != H_SUCCESS) {
816 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
817
818 if (tx_pool->consumer_index == 0)
819 tx_pool->consumer_index =
820 adapter->max_tx_entries_per_subcrq - 1;
821 else
822 tx_pool->consumer_index--;
823
824 tx_send_failed++;
825 tx_dropped++;
826 ret = NETDEV_TX_BUSY;
827 goto out;
828 }
829 tx_packets++;
830 tx_bytes += skb->len;
831 txq->trans_start = jiffies;
832 ret = NETDEV_TX_OK;
833
834out:
835 netdev->stats.tx_dropped += tx_dropped;
836 netdev->stats.tx_bytes += tx_bytes;
837 netdev->stats.tx_packets += tx_packets;
838 adapter->tx_send_failed += tx_send_failed;
839 adapter->tx_map_failed += tx_map_failed;
840
841 return ret;
842}
843
844static void ibmvnic_set_multi(struct net_device *netdev)
845{
846 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
847 struct netdev_hw_addr *ha;
848 union ibmvnic_crq crq;
849
850 memset(&crq, 0, sizeof(crq));
851 crq.request_capability.first = IBMVNIC_CRQ_CMD;
852 crq.request_capability.cmd = REQUEST_CAPABILITY;
853
854 if (netdev->flags & IFF_PROMISC) {
855 if (!adapter->promisc_supported)
856 return;
857 } else {
858 if (netdev->flags & IFF_ALLMULTI) {
859 /* Accept all multicast */
860 memset(&crq, 0, sizeof(crq));
861 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
862 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
863 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
864 ibmvnic_send_crq(adapter, &crq);
865 } else if (netdev_mc_empty(netdev)) {
866 /* Reject all multicast */
867 memset(&crq, 0, sizeof(crq));
868 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
869 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
870 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
871 ibmvnic_send_crq(adapter, &crq);
872 } else {
873 /* Accept one or more multicast(s) */
874 netdev_for_each_mc_addr(ha, netdev) {
875 memset(&crq, 0, sizeof(crq));
876 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
877 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
878 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
879 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
880 ha->addr);
881 ibmvnic_send_crq(adapter, &crq);
882 }
883 }
884 }
885}
886
887static int ibmvnic_set_mac(struct net_device *netdev, void *p)
888{
889 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
890 struct sockaddr *addr = p;
891 union ibmvnic_crq crq;
892
893 if (!is_valid_ether_addr(addr->sa_data))
894 return -EADDRNOTAVAIL;
895
896 memset(&crq, 0, sizeof(crq));
897 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
898 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
899 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
900 ibmvnic_send_crq(adapter, &crq);
901 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
902 return 0;
903}
904
905static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
906{
907 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
908
909 if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu)
910 return -EINVAL;
911
912 netdev->mtu = new_mtu;
913 return 0;
914}
915
916static void ibmvnic_tx_timeout(struct net_device *dev)
917{
918 struct ibmvnic_adapter *adapter = netdev_priv(dev);
919 int rc;
920
921 /* Adapter timed out, resetting it */
922 release_sub_crqs(adapter);
923 rc = ibmvnic_reset_crq(adapter);
924 if (rc)
925 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
926 else
927 ibmvnic_send_crq_init(adapter);
928}
929
930static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
931 struct ibmvnic_rx_buff *rx_buff)
932{
933 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
934
935 rx_buff->skb = NULL;
936
937 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
938 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
939
940 atomic_dec(&pool->available);
941}
942
943static int ibmvnic_poll(struct napi_struct *napi, int budget)
944{
945 struct net_device *netdev = napi->dev;
946 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
947 int scrq_num = (int)(napi - adapter->napi);
948 int frames_processed = 0;
949restart_poll:
950 while (frames_processed < budget) {
951 struct sk_buff *skb;
952 struct ibmvnic_rx_buff *rx_buff;
953 union sub_crq *next;
954 u32 length;
955 u16 offset;
956 u8 flags = 0;
957
958 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
959 break;
960 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
961 rx_buff =
962 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
963 rx_comp.correlator);
964 /* do error checking */
965 if (next->rx_comp.rc) {
966 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
967 /* free the entry */
968 next->rx_comp.first = 0;
969 remove_buff_from_pool(adapter, rx_buff);
970 break;
971 }
972
973 length = be32_to_cpu(next->rx_comp.len);
974 offset = be16_to_cpu(next->rx_comp.off_frame_data);
975 flags = next->rx_comp.flags;
976 skb = rx_buff->skb;
977 skb_copy_to_linear_data(skb, rx_buff->data + offset,
978 length);
979 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
980 /* free the entry */
981 next->rx_comp.first = 0;
982 remove_buff_from_pool(adapter, rx_buff);
983
984 skb_put(skb, length);
985 skb->protocol = eth_type_trans(skb, netdev);
986
987 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
988 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
989 skb->ip_summed = CHECKSUM_UNNECESSARY;
990 }
991
992 length = skb->len;
993 napi_gro_receive(napi, skb); /* send it up */
994 netdev->stats.rx_packets++;
995 netdev->stats.rx_bytes += length;
996 frames_processed++;
997 }
John Allen498cd8e2016-04-06 11:49:55 -0500998 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600999
1000 if (frames_processed < budget) {
1001 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1002 napi_complete(napi);
1003 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1004 napi_reschedule(napi)) {
1005 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1006 goto restart_poll;
1007 }
1008 }
1009 return frames_processed;
1010}
1011
1012#ifdef CONFIG_NET_POLL_CONTROLLER
1013static void ibmvnic_netpoll_controller(struct net_device *dev)
1014{
1015 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1016 int i;
1017
1018 replenish_pools(netdev_priv(dev));
1019 for (i = 0; i < adapter->req_rx_queues; i++)
1020 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1021 adapter->rx_scrq[i]);
1022}
1023#endif
1024
1025static const struct net_device_ops ibmvnic_netdev_ops = {
1026 .ndo_open = ibmvnic_open,
1027 .ndo_stop = ibmvnic_close,
1028 .ndo_start_xmit = ibmvnic_xmit,
1029 .ndo_set_rx_mode = ibmvnic_set_multi,
1030 .ndo_set_mac_address = ibmvnic_set_mac,
1031 .ndo_validate_addr = eth_validate_addr,
1032 .ndo_change_mtu = ibmvnic_change_mtu,
1033 .ndo_tx_timeout = ibmvnic_tx_timeout,
1034#ifdef CONFIG_NET_POLL_CONTROLLER
1035 .ndo_poll_controller = ibmvnic_netpoll_controller,
1036#endif
1037};
1038
1039/* ethtool functions */
1040
1041static int ibmvnic_get_settings(struct net_device *netdev,
1042 struct ethtool_cmd *cmd)
1043{
1044 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1045 SUPPORTED_FIBRE);
1046 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1047 ADVERTISED_FIBRE);
1048 ethtool_cmd_speed_set(cmd, SPEED_1000);
1049 cmd->duplex = DUPLEX_FULL;
1050 cmd->port = PORT_FIBRE;
1051 cmd->phy_address = 0;
1052 cmd->transceiver = XCVR_INTERNAL;
1053 cmd->autoneg = AUTONEG_ENABLE;
1054 cmd->maxtxpkt = 0;
1055 cmd->maxrxpkt = 1;
1056 return 0;
1057}
1058
1059static void ibmvnic_get_drvinfo(struct net_device *dev,
1060 struct ethtool_drvinfo *info)
1061{
1062 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1063 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1064}
1065
1066static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1067{
1068 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1069
1070 return adapter->msg_enable;
1071}
1072
1073static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1074{
1075 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1076
1077 adapter->msg_enable = data;
1078}
1079
1080static u32 ibmvnic_get_link(struct net_device *netdev)
1081{
1082 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1083
1084 /* Don't need to send a query because we request a logical link up at
1085 * init and then we wait for link state indications
1086 */
1087 return adapter->logical_link_state;
1088}
1089
1090static void ibmvnic_get_ringparam(struct net_device *netdev,
1091 struct ethtool_ringparam *ring)
1092{
1093 ring->rx_max_pending = 0;
1094 ring->tx_max_pending = 0;
1095 ring->rx_mini_max_pending = 0;
1096 ring->rx_jumbo_max_pending = 0;
1097 ring->rx_pending = 0;
1098 ring->tx_pending = 0;
1099 ring->rx_mini_pending = 0;
1100 ring->rx_jumbo_pending = 0;
1101}
1102
1103static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1104{
1105 int i;
1106
1107 if (stringset != ETH_SS_STATS)
1108 return;
1109
1110 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1111 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1112}
1113
1114static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1115{
1116 switch (sset) {
1117 case ETH_SS_STATS:
1118 return ARRAY_SIZE(ibmvnic_stats);
1119 default:
1120 return -EOPNOTSUPP;
1121 }
1122}
1123
1124static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1125 struct ethtool_stats *stats, u64 *data)
1126{
1127 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1128 union ibmvnic_crq crq;
1129 int i;
1130
1131 memset(&crq, 0, sizeof(crq));
1132 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1133 crq.request_statistics.cmd = REQUEST_STATISTICS;
1134 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1135 crq.request_statistics.len =
1136 cpu_to_be32(sizeof(struct ibmvnic_statistics));
Thomas Falcon032c5e82015-12-21 11:26:06 -06001137
1138 /* Wait for data to be written */
1139 init_completion(&adapter->stats_done);
Nathan Fontenotae0b63e2017-05-23 21:53:39 -04001140 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001141 wait_for_completion(&adapter->stats_done);
1142
1143 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1144 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1145}
1146
1147static const struct ethtool_ops ibmvnic_ethtool_ops = {
1148 .get_settings = ibmvnic_get_settings,
1149 .get_drvinfo = ibmvnic_get_drvinfo,
1150 .get_msglevel = ibmvnic_get_msglevel,
1151 .set_msglevel = ibmvnic_set_msglevel,
1152 .get_link = ibmvnic_get_link,
1153 .get_ringparam = ibmvnic_get_ringparam,
1154 .get_strings = ibmvnic_get_strings,
1155 .get_sset_count = ibmvnic_get_sset_count,
1156 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1157};
1158
1159/* Routines for managing CRQs/sCRQs */
1160
1161static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1162 struct ibmvnic_sub_crq_queue *scrq)
1163{
1164 struct device *dev = &adapter->vdev->dev;
1165 long rc;
1166
1167 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1168
1169 /* Close the sub-crqs */
1170 do {
1171 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1172 adapter->vdev->unit_address,
1173 scrq->crq_num);
1174 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1175
1176 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1177 DMA_BIDIRECTIONAL);
1178 free_pages((unsigned long)scrq->msgs, 2);
1179 kfree(scrq);
1180}
1181
1182static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1183 *adapter)
1184{
1185 struct device *dev = &adapter->vdev->dev;
1186 struct ibmvnic_sub_crq_queue *scrq;
1187 int rc;
1188
1189 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1190 if (!scrq)
1191 return NULL;
1192
Thomas Falcon12608c22016-10-17 15:28:09 -05001193 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001194 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1195 if (!scrq->msgs) {
1196 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1197 goto zero_page_failed;
1198 }
1199
1200 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1201 DMA_BIDIRECTIONAL);
1202 if (dma_mapping_error(dev, scrq->msg_token)) {
1203 dev_warn(dev, "Couldn't map crq queue messages page\n");
1204 goto map_failed;
1205 }
1206
1207 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1208 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1209
1210 if (rc == H_RESOURCE)
1211 rc = ibmvnic_reset_crq(adapter);
1212
1213 if (rc == H_CLOSED) {
1214 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1215 } else if (rc) {
1216 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1217 goto reg_failed;
1218 }
1219
Thomas Falcon032c5e82015-12-21 11:26:06 -06001220 scrq->adapter = adapter;
1221 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1222 scrq->cur = 0;
1223 scrq->rx_skb_top = NULL;
1224 spin_lock_init(&scrq->lock);
1225
1226 netdev_dbg(adapter->netdev,
1227 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1228 scrq->crq_num, scrq->hw_irq, scrq->irq);
1229
1230 return scrq;
1231
Thomas Falcon032c5e82015-12-21 11:26:06 -06001232reg_failed:
1233 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1234 DMA_BIDIRECTIONAL);
1235map_failed:
1236 free_pages((unsigned long)scrq->msgs, 2);
1237zero_page_failed:
1238 kfree(scrq);
1239
1240 return NULL;
1241}
1242
1243static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1244{
1245 int i;
1246
1247 if (adapter->tx_scrq) {
1248 for (i = 0; i < adapter->req_tx_queues; i++)
1249 if (adapter->tx_scrq[i]) {
1250 free_irq(adapter->tx_scrq[i]->irq,
1251 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001252 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001253 release_sub_crq_queue(adapter,
1254 adapter->tx_scrq[i]);
1255 }
1256 adapter->tx_scrq = NULL;
1257 }
1258
1259 if (adapter->rx_scrq) {
1260 for (i = 0; i < adapter->req_rx_queues; i++)
1261 if (adapter->rx_scrq[i]) {
1262 free_irq(adapter->rx_scrq[i]->irq,
1263 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001264 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001265 release_sub_crq_queue(adapter,
1266 adapter->rx_scrq[i]);
1267 }
1268 adapter->rx_scrq = NULL;
1269 }
1270
1271 adapter->requested_caps = 0;
1272}
1273
Thomas Falconea22d512016-07-06 15:35:17 -05001274static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1275{
1276 int i;
1277
1278 if (adapter->tx_scrq) {
1279 for (i = 0; i < adapter->req_tx_queues; i++)
1280 if (adapter->tx_scrq[i])
1281 release_sub_crq_queue(adapter,
1282 adapter->tx_scrq[i]);
1283 adapter->tx_scrq = NULL;
1284 }
1285
1286 if (adapter->rx_scrq) {
1287 for (i = 0; i < adapter->req_rx_queues; i++)
1288 if (adapter->rx_scrq[i])
1289 release_sub_crq_queue(adapter,
1290 adapter->rx_scrq[i]);
1291 adapter->rx_scrq = NULL;
1292 }
1293
1294 adapter->requested_caps = 0;
1295}
1296
Thomas Falcon032c5e82015-12-21 11:26:06 -06001297static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1298 struct ibmvnic_sub_crq_queue *scrq)
1299{
1300 struct device *dev = &adapter->vdev->dev;
1301 unsigned long rc;
1302
1303 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1304 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1305 if (rc)
1306 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1307 scrq->hw_irq, rc);
1308 return rc;
1309}
1310
1311static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1312 struct ibmvnic_sub_crq_queue *scrq)
1313{
1314 struct device *dev = &adapter->vdev->dev;
1315 unsigned long rc;
1316
1317 if (scrq->hw_irq > 0x100000000ULL) {
1318 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1319 return 1;
1320 }
1321
1322 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1323 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1324 if (rc)
1325 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1326 scrq->hw_irq, rc);
1327 return rc;
1328}
1329
1330static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1331 struct ibmvnic_sub_crq_queue *scrq)
1332{
1333 struct device *dev = &adapter->vdev->dev;
1334 struct ibmvnic_tx_buff *txbuff;
1335 union sub_crq *next;
1336 int index;
1337 int i, j;
Thomas Falconad7775d2016-04-01 17:20:34 -05001338 u8 first;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001339
1340restart_loop:
1341 while (pending_scrq(adapter, scrq)) {
1342 unsigned int pool = scrq->pool_index;
1343
1344 next = ibmvnic_next_scrq(adapter, scrq);
1345 for (i = 0; i < next->tx_comp.num_comps; i++) {
1346 if (next->tx_comp.rcs[i]) {
1347 dev_err(dev, "tx error %x\n",
1348 next->tx_comp.rcs[i]);
1349 continue;
1350 }
1351 index = be32_to_cpu(next->tx_comp.correlators[i]);
1352 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1353
1354 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1355 if (!txbuff->data_dma[j])
1356 continue;
1357
1358 txbuff->data_dma[j] = 0;
1359 txbuff->used_bounce = false;
1360 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001361 /* if sub_crq was sent indirectly */
1362 first = txbuff->indir_arr[0].generic.first;
1363 if (first == IBMVNIC_CRQ_CMD) {
1364 dma_unmap_single(dev, txbuff->indir_dma,
1365 sizeof(txbuff->indir_arr),
1366 DMA_TO_DEVICE);
1367 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001368
1369 if (txbuff->last_frag)
1370 dev_kfree_skb_any(txbuff->skb);
1371
1372 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1373 producer_index] = index;
1374 adapter->tx_pool[pool].producer_index =
1375 (adapter->tx_pool[pool].producer_index + 1) %
1376 adapter->max_tx_entries_per_subcrq;
1377 }
1378 /* remove tx_comp scrq*/
1379 next->tx_comp.first = 0;
1380 }
1381
1382 enable_scrq_irq(adapter, scrq);
1383
1384 if (pending_scrq(adapter, scrq)) {
1385 disable_scrq_irq(adapter, scrq);
1386 goto restart_loop;
1387 }
1388
1389 return 0;
1390}
1391
1392static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1393{
1394 struct ibmvnic_sub_crq_queue *scrq = instance;
1395 struct ibmvnic_adapter *adapter = scrq->adapter;
1396
1397 disable_scrq_irq(adapter, scrq);
1398 ibmvnic_complete_tx(adapter, scrq);
1399
1400 return IRQ_HANDLED;
1401}
1402
1403static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1404{
1405 struct ibmvnic_sub_crq_queue *scrq = instance;
1406 struct ibmvnic_adapter *adapter = scrq->adapter;
1407
1408 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1409 disable_scrq_irq(adapter, scrq);
1410 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1411 }
1412
1413 return IRQ_HANDLED;
1414}
1415
Thomas Falconea22d512016-07-06 15:35:17 -05001416static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1417{
1418 struct device *dev = &adapter->vdev->dev;
1419 struct ibmvnic_sub_crq_queue *scrq;
1420 int i = 0, j = 0;
1421 int rc = 0;
1422
1423 for (i = 0; i < adapter->req_tx_queues; i++) {
1424 scrq = adapter->tx_scrq[i];
1425 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1426
Michael Ellerman99c17902016-09-10 19:59:05 +10001427 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001428 rc = -EINVAL;
1429 dev_err(dev, "Error mapping irq\n");
1430 goto req_tx_irq_failed;
1431 }
1432
1433 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1434 0, "ibmvnic_tx", scrq);
1435
1436 if (rc) {
1437 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1438 scrq->irq, rc);
1439 irq_dispose_mapping(scrq->irq);
1440 goto req_rx_irq_failed;
1441 }
1442 }
1443
1444 for (i = 0; i < adapter->req_rx_queues; i++) {
1445 scrq = adapter->rx_scrq[i];
1446 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10001447 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001448 rc = -EINVAL;
1449 dev_err(dev, "Error mapping irq\n");
1450 goto req_rx_irq_failed;
1451 }
1452 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1453 0, "ibmvnic_rx", scrq);
1454 if (rc) {
1455 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1456 scrq->irq, rc);
1457 irq_dispose_mapping(scrq->irq);
1458 goto req_rx_irq_failed;
1459 }
1460 }
1461 return rc;
1462
1463req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001464 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001465 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1466 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001467 }
Thomas Falconea22d512016-07-06 15:35:17 -05001468 i = adapter->req_tx_queues;
1469req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001470 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001471 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1472 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001473 }
Thomas Falconea22d512016-07-06 15:35:17 -05001474 release_sub_crqs_no_irqs(adapter);
1475 return rc;
1476}
1477
Thomas Falcon032c5e82015-12-21 11:26:06 -06001478static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1479{
1480 struct device *dev = &adapter->vdev->dev;
1481 struct ibmvnic_sub_crq_queue **allqueues;
1482 int registered_queues = 0;
1483 union ibmvnic_crq crq;
1484 int total_queues;
1485 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05001486 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001487
1488 if (!retry) {
1489 /* Sub-CRQ entries are 32 byte long */
1490 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1491
1492 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1493 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1494 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1495 goto allqueues_failed;
1496 }
1497
1498 /* Get the minimum between the queried max and the entries
1499 * that fit in our PAGE_SIZE
1500 */
1501 adapter->req_tx_entries_per_subcrq =
1502 adapter->max_tx_entries_per_subcrq > entries_page ?
1503 entries_page : adapter->max_tx_entries_per_subcrq;
1504 adapter->req_rx_add_entries_per_subcrq =
1505 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1506 entries_page : adapter->max_rx_add_entries_per_subcrq;
1507
John Allen6dbcd8f2016-11-07 14:27:28 -06001508 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1509 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
John Allen498cd8e2016-04-06 11:49:55 -05001510 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001511
1512 adapter->req_mtu = adapter->max_mtu;
1513 }
1514
1515 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1516
1517 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1518 if (!allqueues)
1519 goto allqueues_failed;
1520
1521 for (i = 0; i < total_queues; i++) {
1522 allqueues[i] = init_sub_crq_queue(adapter);
1523 if (!allqueues[i]) {
1524 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1525 break;
1526 }
1527 registered_queues++;
1528 }
1529
1530 /* Make sure we were able to register the minimum number of queues */
1531 if (registered_queues <
1532 adapter->min_tx_queues + adapter->min_rx_queues) {
1533 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1534 goto tx_failed;
1535 }
1536
1537 /* Distribute the failed allocated queues*/
1538 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1539 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1540 switch (i % 3) {
1541 case 0:
1542 if (adapter->req_rx_queues > adapter->min_rx_queues)
1543 adapter->req_rx_queues--;
1544 else
1545 more++;
1546 break;
1547 case 1:
1548 if (adapter->req_tx_queues > adapter->min_tx_queues)
1549 adapter->req_tx_queues--;
1550 else
1551 more++;
1552 break;
1553 }
1554 }
1555
1556 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1557 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1558 if (!adapter->tx_scrq)
1559 goto tx_failed;
1560
1561 for (i = 0; i < adapter->req_tx_queues; i++) {
1562 adapter->tx_scrq[i] = allqueues[i];
1563 adapter->tx_scrq[i]->pool_index = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001564 }
1565
1566 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1567 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1568 if (!adapter->rx_scrq)
1569 goto rx_failed;
1570
1571 for (i = 0; i < adapter->req_rx_queues; i++) {
1572 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1573 adapter->rx_scrq[i]->scrq_num = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001574 }
1575
1576 memset(&crq, 0, sizeof(crq));
1577 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1578 crq.request_capability.cmd = REQUEST_CAPABILITY;
1579
1580 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001581 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001582 ibmvnic_send_crq(adapter, &crq);
1583
1584 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001585 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001586 ibmvnic_send_crq(adapter, &crq);
1587
1588 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001589 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001590 ibmvnic_send_crq(adapter, &crq);
1591
1592 crq.request_capability.capability =
1593 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1594 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001595 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001596 ibmvnic_send_crq(adapter, &crq);
1597
1598 crq.request_capability.capability =
1599 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1600 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001601 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001602 ibmvnic_send_crq(adapter, &crq);
1603
1604 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06001605 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001606 ibmvnic_send_crq(adapter, &crq);
1607
1608 if (adapter->netdev->flags & IFF_PROMISC) {
1609 if (adapter->promisc_supported) {
1610 crq.request_capability.capability =
1611 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001612 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001613 ibmvnic_send_crq(adapter, &crq);
1614 }
1615 } else {
1616 crq.request_capability.capability =
1617 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001618 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001619 ibmvnic_send_crq(adapter, &crq);
1620 }
1621
1622 kfree(allqueues);
1623
1624 return;
1625
Thomas Falcon032c5e82015-12-21 11:26:06 -06001626rx_failed:
1627 kfree(adapter->tx_scrq);
1628 adapter->tx_scrq = NULL;
1629tx_failed:
1630 for (i = 0; i < registered_queues; i++)
1631 release_sub_crq_queue(adapter, allqueues[i]);
1632 kfree(allqueues);
1633allqueues_failed:
1634 ibmvnic_remove(adapter->vdev);
1635}
1636
1637static int pending_scrq(struct ibmvnic_adapter *adapter,
1638 struct ibmvnic_sub_crq_queue *scrq)
1639{
1640 union sub_crq *entry = &scrq->msgs[scrq->cur];
1641
1642 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1643 return 1;
1644 else
1645 return 0;
1646}
1647
1648static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1649 struct ibmvnic_sub_crq_queue *scrq)
1650{
1651 union sub_crq *entry;
1652 unsigned long flags;
1653
1654 spin_lock_irqsave(&scrq->lock, flags);
1655 entry = &scrq->msgs[scrq->cur];
1656 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1657 if (++scrq->cur == scrq->size)
1658 scrq->cur = 0;
1659 } else {
1660 entry = NULL;
1661 }
1662 spin_unlock_irqrestore(&scrq->lock, flags);
1663
1664 return entry;
1665}
1666
1667static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1668{
1669 struct ibmvnic_crq_queue *queue = &adapter->crq;
1670 union ibmvnic_crq *crq;
1671
1672 crq = &queue->msgs[queue->cur];
1673 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1674 if (++queue->cur == queue->size)
1675 queue->cur = 0;
1676 } else {
1677 crq = NULL;
1678 }
1679
1680 return crq;
1681}
1682
1683static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1684 union sub_crq *sub_crq)
1685{
1686 unsigned int ua = adapter->vdev->unit_address;
1687 struct device *dev = &adapter->vdev->dev;
1688 u64 *u64_crq = (u64 *)sub_crq;
1689 int rc;
1690
1691 netdev_dbg(adapter->netdev,
1692 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1693 (unsigned long int)cpu_to_be64(remote_handle),
1694 (unsigned long int)cpu_to_be64(u64_crq[0]),
1695 (unsigned long int)cpu_to_be64(u64_crq[1]),
1696 (unsigned long int)cpu_to_be64(u64_crq[2]),
1697 (unsigned long int)cpu_to_be64(u64_crq[3]));
1698
1699 /* Make sure the hypervisor sees the complete request */
1700 mb();
1701
1702 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1703 cpu_to_be64(remote_handle),
1704 cpu_to_be64(u64_crq[0]),
1705 cpu_to_be64(u64_crq[1]),
1706 cpu_to_be64(u64_crq[2]),
1707 cpu_to_be64(u64_crq[3]));
1708
1709 if (rc) {
1710 if (rc == H_CLOSED)
1711 dev_warn(dev, "CRQ Queue closed\n");
1712 dev_err(dev, "Send error (rc=%d)\n", rc);
1713 }
1714
1715 return rc;
1716}
1717
Thomas Falconad7775d2016-04-01 17:20:34 -05001718static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1719 u64 remote_handle, u64 ioba, u64 num_entries)
1720{
1721 unsigned int ua = adapter->vdev->unit_address;
1722 struct device *dev = &adapter->vdev->dev;
1723 int rc;
1724
1725 /* Make sure the hypervisor sees the complete request */
1726 mb();
1727 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1728 cpu_to_be64(remote_handle),
1729 ioba, num_entries);
1730
1731 if (rc) {
1732 if (rc == H_CLOSED)
1733 dev_warn(dev, "CRQ Queue closed\n");
1734 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1735 }
1736
1737 return rc;
1738}
1739
Thomas Falcon032c5e82015-12-21 11:26:06 -06001740static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1741 union ibmvnic_crq *crq)
1742{
1743 unsigned int ua = adapter->vdev->unit_address;
1744 struct device *dev = &adapter->vdev->dev;
1745 u64 *u64_crq = (u64 *)crq;
1746 int rc;
1747
1748 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1749 (unsigned long int)cpu_to_be64(u64_crq[0]),
1750 (unsigned long int)cpu_to_be64(u64_crq[1]));
1751
1752 /* Make sure the hypervisor sees the complete request */
1753 mb();
1754
1755 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1756 cpu_to_be64(u64_crq[0]),
1757 cpu_to_be64(u64_crq[1]));
1758
1759 if (rc) {
1760 if (rc == H_CLOSED)
1761 dev_warn(dev, "CRQ Queue closed\n");
1762 dev_warn(dev, "Send error (rc=%d)\n", rc);
1763 }
1764
1765 return rc;
1766}
1767
1768static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1769{
1770 union ibmvnic_crq crq;
1771
1772 memset(&crq, 0, sizeof(crq));
1773 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1774 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1775 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1776
1777 return ibmvnic_send_crq(adapter, &crq);
1778}
1779
1780static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1781{
1782 union ibmvnic_crq crq;
1783
1784 memset(&crq, 0, sizeof(crq));
1785 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1786 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1787 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1788
1789 return ibmvnic_send_crq(adapter, &crq);
1790}
1791
1792static int send_version_xchg(struct ibmvnic_adapter *adapter)
1793{
1794 union ibmvnic_crq crq;
1795
1796 memset(&crq, 0, sizeof(crq));
1797 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1798 crq.version_exchange.cmd = VERSION_EXCHANGE;
1799 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1800
1801 return ibmvnic_send_crq(adapter, &crq);
1802}
1803
1804static void send_login(struct ibmvnic_adapter *adapter)
1805{
1806 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1807 struct ibmvnic_login_buffer *login_buffer;
1808 struct ibmvnic_inflight_cmd *inflight_cmd;
1809 struct device *dev = &adapter->vdev->dev;
1810 dma_addr_t rsp_buffer_token;
1811 dma_addr_t buffer_token;
1812 size_t rsp_buffer_size;
1813 union ibmvnic_crq crq;
1814 unsigned long flags;
1815 size_t buffer_size;
1816 __be64 *tx_list_p;
1817 __be64 *rx_list_p;
1818 int i;
1819
1820 buffer_size =
1821 sizeof(struct ibmvnic_login_buffer) +
1822 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1823
1824 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1825 if (!login_buffer)
1826 goto buf_alloc_failed;
1827
1828 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1829 DMA_TO_DEVICE);
1830 if (dma_mapping_error(dev, buffer_token)) {
1831 dev_err(dev, "Couldn't map login buffer\n");
1832 goto buf_map_failed;
1833 }
1834
John Allen498cd8e2016-04-06 11:49:55 -05001835 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1836 sizeof(u64) * adapter->req_tx_queues +
1837 sizeof(u64) * adapter->req_rx_queues +
1838 sizeof(u64) * adapter->req_rx_queues +
1839 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001840
1841 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1842 if (!login_rsp_buffer)
1843 goto buf_rsp_alloc_failed;
1844
1845 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1846 rsp_buffer_size, DMA_FROM_DEVICE);
1847 if (dma_mapping_error(dev, rsp_buffer_token)) {
1848 dev_err(dev, "Couldn't map login rsp buffer\n");
1849 goto buf_rsp_map_failed;
1850 }
1851 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1852 if (!inflight_cmd) {
1853 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1854 goto inflight_alloc_failed;
1855 }
1856 adapter->login_buf = login_buffer;
1857 adapter->login_buf_token = buffer_token;
1858 adapter->login_buf_sz = buffer_size;
1859 adapter->login_rsp_buf = login_rsp_buffer;
1860 adapter->login_rsp_buf_token = rsp_buffer_token;
1861 adapter->login_rsp_buf_sz = rsp_buffer_size;
1862
1863 login_buffer->len = cpu_to_be32(buffer_size);
1864 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1865 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1866 login_buffer->off_txcomp_subcrqs =
1867 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1868 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1869 login_buffer->off_rxcomp_subcrqs =
1870 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1871 sizeof(u64) * adapter->req_tx_queues);
1872 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1873 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1874
1875 tx_list_p = (__be64 *)((char *)login_buffer +
1876 sizeof(struct ibmvnic_login_buffer));
1877 rx_list_p = (__be64 *)((char *)login_buffer +
1878 sizeof(struct ibmvnic_login_buffer) +
1879 sizeof(u64) * adapter->req_tx_queues);
1880
1881 for (i = 0; i < adapter->req_tx_queues; i++) {
1882 if (adapter->tx_scrq[i]) {
1883 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1884 crq_num);
1885 }
1886 }
1887
1888 for (i = 0; i < adapter->req_rx_queues; i++) {
1889 if (adapter->rx_scrq[i]) {
1890 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1891 crq_num);
1892 }
1893 }
1894
1895 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1896 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1897 netdev_dbg(adapter->netdev, "%016lx\n",
1898 ((unsigned long int *)(adapter->login_buf))[i]);
1899 }
1900
1901 memset(&crq, 0, sizeof(crq));
1902 crq.login.first = IBMVNIC_CRQ_CMD;
1903 crq.login.cmd = LOGIN;
1904 crq.login.ioba = cpu_to_be32(buffer_token);
1905 crq.login.len = cpu_to_be32(buffer_size);
1906
1907 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1908
1909 spin_lock_irqsave(&adapter->inflight_lock, flags);
1910 list_add_tail(&inflight_cmd->list, &adapter->inflight);
1911 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1912
1913 ibmvnic_send_crq(adapter, &crq);
1914
1915 return;
1916
1917inflight_alloc_failed:
1918 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1919 DMA_FROM_DEVICE);
1920buf_rsp_map_failed:
1921 kfree(login_rsp_buffer);
1922buf_rsp_alloc_failed:
1923 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1924buf_map_failed:
1925 kfree(login_buffer);
1926buf_alloc_failed:
1927 return;
1928}
1929
1930static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1931 u32 len, u8 map_id)
1932{
1933 union ibmvnic_crq crq;
1934
1935 memset(&crq, 0, sizeof(crq));
1936 crq.request_map.first = IBMVNIC_CRQ_CMD;
1937 crq.request_map.cmd = REQUEST_MAP;
1938 crq.request_map.map_id = map_id;
1939 crq.request_map.ioba = cpu_to_be32(addr);
1940 crq.request_map.len = cpu_to_be32(len);
1941 ibmvnic_send_crq(adapter, &crq);
1942}
1943
1944static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1945{
1946 union ibmvnic_crq crq;
1947
1948 memset(&crq, 0, sizeof(crq));
1949 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1950 crq.request_unmap.cmd = REQUEST_UNMAP;
1951 crq.request_unmap.map_id = map_id;
1952 ibmvnic_send_crq(adapter, &crq);
1953}
1954
1955static void send_map_query(struct ibmvnic_adapter *adapter)
1956{
1957 union ibmvnic_crq crq;
1958
1959 memset(&crq, 0, sizeof(crq));
1960 crq.query_map.first = IBMVNIC_CRQ_CMD;
1961 crq.query_map.cmd = QUERY_MAP;
1962 ibmvnic_send_crq(adapter, &crq);
1963}
1964
1965/* Send a series of CRQs requesting various capabilities of the VNIC server */
1966static void send_cap_queries(struct ibmvnic_adapter *adapter)
1967{
1968 union ibmvnic_crq crq;
1969
1970 atomic_set(&adapter->running_cap_queries, 0);
1971 memset(&crq, 0, sizeof(crq));
1972 crq.query_capability.first = IBMVNIC_CRQ_CMD;
1973 crq.query_capability.cmd = QUERY_CAPABILITY;
1974
1975 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
1976 atomic_inc(&adapter->running_cap_queries);
1977 ibmvnic_send_crq(adapter, &crq);
1978
1979 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
1980 atomic_inc(&adapter->running_cap_queries);
1981 ibmvnic_send_crq(adapter, &crq);
1982
1983 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
1984 atomic_inc(&adapter->running_cap_queries);
1985 ibmvnic_send_crq(adapter, &crq);
1986
1987 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
1988 atomic_inc(&adapter->running_cap_queries);
1989 ibmvnic_send_crq(adapter, &crq);
1990
1991 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
1992 atomic_inc(&adapter->running_cap_queries);
1993 ibmvnic_send_crq(adapter, &crq);
1994
1995 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
1996 atomic_inc(&adapter->running_cap_queries);
1997 ibmvnic_send_crq(adapter, &crq);
1998
1999 crq.query_capability.capability =
2000 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
2001 atomic_inc(&adapter->running_cap_queries);
2002 ibmvnic_send_crq(adapter, &crq);
2003
2004 crq.query_capability.capability =
2005 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
2006 atomic_inc(&adapter->running_cap_queries);
2007 ibmvnic_send_crq(adapter, &crq);
2008
2009 crq.query_capability.capability =
2010 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2011 atomic_inc(&adapter->running_cap_queries);
2012 ibmvnic_send_crq(adapter, &crq);
2013
2014 crq.query_capability.capability =
2015 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2016 atomic_inc(&adapter->running_cap_queries);
2017 ibmvnic_send_crq(adapter, &crq);
2018
2019 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2020 atomic_inc(&adapter->running_cap_queries);
2021 ibmvnic_send_crq(adapter, &crq);
2022
2023 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2024 atomic_inc(&adapter->running_cap_queries);
2025 ibmvnic_send_crq(adapter, &crq);
2026
2027 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2028 atomic_inc(&adapter->running_cap_queries);
2029 ibmvnic_send_crq(adapter, &crq);
2030
2031 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2032 atomic_inc(&adapter->running_cap_queries);
2033 ibmvnic_send_crq(adapter, &crq);
2034
2035 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2036 atomic_inc(&adapter->running_cap_queries);
2037 ibmvnic_send_crq(adapter, &crq);
2038
2039 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2040 atomic_inc(&adapter->running_cap_queries);
2041 ibmvnic_send_crq(adapter, &crq);
2042
2043 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2044 atomic_inc(&adapter->running_cap_queries);
2045 ibmvnic_send_crq(adapter, &crq);
2046
2047 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2048 atomic_inc(&adapter->running_cap_queries);
2049 ibmvnic_send_crq(adapter, &crq);
2050
2051 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2052 atomic_inc(&adapter->running_cap_queries);
2053 ibmvnic_send_crq(adapter, &crq);
2054
2055 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2056 atomic_inc(&adapter->running_cap_queries);
2057 ibmvnic_send_crq(adapter, &crq);
2058
2059 crq.query_capability.capability =
2060 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2061 atomic_inc(&adapter->running_cap_queries);
2062 ibmvnic_send_crq(adapter, &crq);
2063
2064 crq.query_capability.capability =
2065 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2066 atomic_inc(&adapter->running_cap_queries);
2067 ibmvnic_send_crq(adapter, &crq);
2068
2069 crq.query_capability.capability =
2070 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2071 atomic_inc(&adapter->running_cap_queries);
2072 ibmvnic_send_crq(adapter, &crq);
2073
2074 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2075 atomic_inc(&adapter->running_cap_queries);
2076 ibmvnic_send_crq(adapter, &crq);
2077}
2078
2079static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2080{
2081 struct device *dev = &adapter->vdev->dev;
2082 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2083 union ibmvnic_crq crq;
2084 int i;
2085
2086 dma_unmap_single(dev, adapter->ip_offload_tok,
2087 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2088
2089 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2090 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2091 netdev_dbg(adapter->netdev, "%016lx\n",
2092 ((unsigned long int *)(buf))[i]);
2093
2094 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2095 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2096 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2097 buf->tcp_ipv4_chksum);
2098 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2099 buf->tcp_ipv6_chksum);
2100 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2101 buf->udp_ipv4_chksum);
2102 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2103 buf->udp_ipv6_chksum);
2104 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2105 buf->large_tx_ipv4);
2106 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2107 buf->large_tx_ipv6);
2108 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2109 buf->large_rx_ipv4);
2110 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2111 buf->large_rx_ipv6);
2112 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2113 buf->max_ipv4_header_size);
2114 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2115 buf->max_ipv6_header_size);
2116 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2117 buf->max_tcp_header_size);
2118 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2119 buf->max_udp_header_size);
2120 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2121 buf->max_large_tx_size);
2122 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2123 buf->max_large_rx_size);
2124 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2125 buf->ipv6_extension_header);
2126 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2127 buf->tcp_pseudosum_req);
2128 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2129 buf->num_ipv6_ext_headers);
2130 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2131 buf->off_ipv6_ext_headers);
2132
2133 adapter->ip_offload_ctrl_tok =
2134 dma_map_single(dev, &adapter->ip_offload_ctrl,
2135 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2136
2137 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2138 dev_err(dev, "Couldn't map ip offload control buffer\n");
2139 return;
2140 }
2141
2142 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2143 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2144 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2145 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2146 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2147
2148 /* large_tx/rx disabled for now, additional features needed */
2149 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2150 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2151 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2152 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2153
2154 adapter->netdev->features = NETIF_F_GSO;
2155
2156 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2157 adapter->netdev->features |= NETIF_F_IP_CSUM;
2158
2159 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2160 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2161
Thomas Falcon9be02cd2016-04-01 17:20:35 -05002162 if ((adapter->netdev->features &
2163 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2164 adapter->netdev->features |= NETIF_F_RXCSUM;
2165
Thomas Falcon032c5e82015-12-21 11:26:06 -06002166 memset(&crq, 0, sizeof(crq));
2167 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2168 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2169 crq.control_ip_offload.len =
2170 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2171 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2172 ibmvnic_send_crq(adapter, &crq);
2173}
2174
2175static void handle_error_info_rsp(union ibmvnic_crq *crq,
2176 struct ibmvnic_adapter *adapter)
2177{
2178 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08002179 struct ibmvnic_error_buff *error_buff, *tmp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002180 unsigned long flags;
2181 bool found = false;
2182 int i;
2183
2184 if (!crq->request_error_rsp.rc.code) {
2185 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2186 crq->request_error_rsp.rc.code);
2187 return;
2188 }
2189
2190 spin_lock_irqsave(&adapter->error_list_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08002191 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002192 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2193 found = true;
2194 list_del(&error_buff->list);
2195 break;
2196 }
2197 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2198
2199 if (!found) {
2200 dev_err(dev, "Couldn't find error id %x\n",
Thomas Falconb5a1aa82017-05-23 21:53:26 -04002201 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002202 return;
2203 }
2204
2205 dev_err(dev, "Detailed info for error id %x:",
Thomas Falconb5a1aa82017-05-23 21:53:26 -04002206 be32_to_cpu(crq->request_error_rsp.error_id));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002207
2208 for (i = 0; i < error_buff->len; i++) {
2209 pr_cont("%02x", (int)error_buff->buff[i]);
2210 if (i % 8 == 7)
2211 pr_cont(" ");
2212 }
2213 pr_cont("\n");
2214
2215 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2216 DMA_FROM_DEVICE);
2217 kfree(error_buff->buff);
2218 kfree(error_buff);
2219}
2220
2221static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2222 struct ibmvnic_adapter *adapter)
2223{
2224 int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2225 struct ibmvnic_inflight_cmd *inflight_cmd;
2226 struct device *dev = &adapter->vdev->dev;
2227 union ibmvnic_crq newcrq;
2228 unsigned long flags;
2229
2230 /* allocate and map buffer */
2231 adapter->dump_data = kmalloc(len, GFP_KERNEL);
2232 if (!adapter->dump_data) {
2233 complete(&adapter->fw_done);
2234 return;
2235 }
2236
2237 adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2238 DMA_FROM_DEVICE);
2239
2240 if (dma_mapping_error(dev, adapter->dump_data_token)) {
2241 if (!firmware_has_feature(FW_FEATURE_CMO))
2242 dev_err(dev, "Couldn't map dump data\n");
2243 kfree(adapter->dump_data);
2244 complete(&adapter->fw_done);
2245 return;
2246 }
2247
2248 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2249 if (!inflight_cmd) {
2250 dma_unmap_single(dev, adapter->dump_data_token, len,
2251 DMA_FROM_DEVICE);
2252 kfree(adapter->dump_data);
2253 complete(&adapter->fw_done);
2254 return;
2255 }
2256
2257 memset(&newcrq, 0, sizeof(newcrq));
2258 newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2259 newcrq.request_dump.cmd = REQUEST_DUMP;
2260 newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2261 newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2262
2263 memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2264
2265 spin_lock_irqsave(&adapter->inflight_lock, flags);
2266 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2267 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2268
2269 ibmvnic_send_crq(adapter, &newcrq);
2270}
2271
2272static void handle_error_indication(union ibmvnic_crq *crq,
2273 struct ibmvnic_adapter *adapter)
2274{
2275 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2276 struct ibmvnic_inflight_cmd *inflight_cmd;
2277 struct device *dev = &adapter->vdev->dev;
2278 struct ibmvnic_error_buff *error_buff;
2279 union ibmvnic_crq new_crq;
2280 unsigned long flags;
2281
2282 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2283 crq->error_indication.
2284 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
Thomas Falconb5a1aa82017-05-23 21:53:26 -04002285 be32_to_cpu(crq->error_indication.error_id),
2286 be16_to_cpu(crq->error_indication.error_cause));
Thomas Falcon032c5e82015-12-21 11:26:06 -06002287
2288 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2289 if (!error_buff)
2290 return;
2291
2292 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2293 if (!error_buff->buff) {
2294 kfree(error_buff);
2295 return;
2296 }
2297
2298 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2299 DMA_FROM_DEVICE);
2300 if (dma_mapping_error(dev, error_buff->dma)) {
2301 if (!firmware_has_feature(FW_FEATURE_CMO))
2302 dev_err(dev, "Couldn't map error buffer\n");
2303 kfree(error_buff->buff);
2304 kfree(error_buff);
2305 return;
2306 }
2307
2308 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2309 if (!inflight_cmd) {
2310 dma_unmap_single(dev, error_buff->dma, detail_len,
2311 DMA_FROM_DEVICE);
2312 kfree(error_buff->buff);
2313 kfree(error_buff);
2314 return;
2315 }
2316
2317 error_buff->len = detail_len;
2318 error_buff->error_id = crq->error_indication.error_id;
2319
2320 spin_lock_irqsave(&adapter->error_list_lock, flags);
2321 list_add_tail(&error_buff->list, &adapter->errors);
2322 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2323
2324 memset(&new_crq, 0, sizeof(new_crq));
2325 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2326 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2327 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2328 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2329 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2330
2331 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2332
2333 spin_lock_irqsave(&adapter->inflight_lock, flags);
2334 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2335 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2336
2337 ibmvnic_send_crq(adapter, &new_crq);
2338}
2339
2340static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2341 struct ibmvnic_adapter *adapter)
2342{
2343 struct net_device *netdev = adapter->netdev;
2344 struct device *dev = &adapter->vdev->dev;
2345 long rc;
2346
2347 rc = crq->change_mac_addr_rsp.rc.code;
2348 if (rc) {
2349 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2350 return;
2351 }
2352 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2353 ETH_ALEN);
2354}
2355
2356static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2357 struct ibmvnic_adapter *adapter)
2358{
2359 struct device *dev = &adapter->vdev->dev;
2360 u64 *req_value;
2361 char *name;
2362
2363 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2364 case REQ_TX_QUEUES:
2365 req_value = &adapter->req_tx_queues;
2366 name = "tx";
2367 break;
2368 case REQ_RX_QUEUES:
2369 req_value = &adapter->req_rx_queues;
2370 name = "rx";
2371 break;
2372 case REQ_RX_ADD_QUEUES:
2373 req_value = &adapter->req_rx_add_queues;
2374 name = "rx_add";
2375 break;
2376 case REQ_TX_ENTRIES_PER_SUBCRQ:
2377 req_value = &adapter->req_tx_entries_per_subcrq;
2378 name = "tx_entries_per_subcrq";
2379 break;
2380 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2381 req_value = &adapter->req_rx_add_entries_per_subcrq;
2382 name = "rx_add_entries_per_subcrq";
2383 break;
2384 case REQ_MTU:
2385 req_value = &adapter->req_mtu;
2386 name = "mtu";
2387 break;
2388 case PROMISC_REQUESTED:
2389 req_value = &adapter->promisc;
2390 name = "promisc";
2391 break;
2392 default:
2393 dev_err(dev, "Got invalid cap request rsp %d\n",
2394 crq->request_capability.capability);
2395 return;
2396 }
2397
2398 switch (crq->request_capability_rsp.rc.code) {
2399 case SUCCESS:
2400 break;
2401 case PARTIALSUCCESS:
2402 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2403 *req_value,
Thomas Falcon65e72722017-05-23 21:53:27 -04002404 (long int)be64_to_cpu(crq->request_capability_rsp.
Thomas Falcon032c5e82015-12-21 11:26:06 -06002405 number), name);
Thomas Falconea22d512016-07-06 15:35:17 -05002406 release_sub_crqs_no_irqs(adapter);
Thomas Falcon65e72722017-05-23 21:53:27 -04002407 *req_value = be64_to_cpu(crq->request_capability_rsp.number);
Thomas Falconea22d512016-07-06 15:35:17 -05002408 init_sub_crqs(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002409 return;
2410 default:
2411 dev_err(dev, "Error %d in request cap rsp\n",
2412 crq->request_capability_rsp.rc.code);
2413 return;
2414 }
2415
2416 /* Done receiving requested capabilities, query IP offload support */
2417 if (++adapter->requested_caps == 7) {
2418 union ibmvnic_crq newcrq;
2419 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2420 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2421 &adapter->ip_offload_buf;
2422
2423 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2424 buf_sz,
2425 DMA_FROM_DEVICE);
2426
2427 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2428 if (!firmware_has_feature(FW_FEATURE_CMO))
2429 dev_err(dev, "Couldn't map offload buffer\n");
2430 return;
2431 }
2432
2433 memset(&newcrq, 0, sizeof(newcrq));
2434 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2435 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2436 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2437 newcrq.query_ip_offload.ioba =
2438 cpu_to_be32(adapter->ip_offload_tok);
2439
2440 ibmvnic_send_crq(adapter, &newcrq);
2441 }
2442}
2443
2444static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2445 struct ibmvnic_adapter *adapter)
2446{
2447 struct device *dev = &adapter->vdev->dev;
2448 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2449 struct ibmvnic_login_buffer *login = adapter->login_buf;
2450 union ibmvnic_crq crq;
2451 int i;
2452
2453 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2454 DMA_BIDIRECTIONAL);
2455 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2456 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2457
John Allen498cd8e2016-04-06 11:49:55 -05002458 /* If the number of queues requested can't be allocated by the
2459 * server, the login response will return with code 1. We will need
2460 * to resend the login buffer with fewer queues requested.
2461 */
2462 if (login_rsp_crq->generic.rc.code) {
2463 adapter->renegotiate = true;
2464 complete(&adapter->init_done);
2465 return 0;
2466 }
2467
Thomas Falcon032c5e82015-12-21 11:26:06 -06002468 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2469 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2470 netdev_dbg(adapter->netdev, "%016lx\n",
2471 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2472 }
2473
2474 /* Sanity checks */
2475 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2476 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2477 adapter->req_rx_add_queues !=
2478 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2479 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2480 ibmvnic_remove(adapter->vdev);
2481 return -EIO;
2482 }
2483 complete(&adapter->init_done);
2484
2485 memset(&crq, 0, sizeof(crq));
2486 crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2487 crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2488 ibmvnic_send_crq(adapter, &crq);
2489
2490 return 0;
2491}
2492
2493static void handle_request_map_rsp(union ibmvnic_crq *crq,
2494 struct ibmvnic_adapter *adapter)
2495{
2496 struct device *dev = &adapter->vdev->dev;
2497 u8 map_id = crq->request_map_rsp.map_id;
2498 int tx_subcrqs;
2499 int rx_subcrqs;
2500 long rc;
2501 int i;
2502
2503 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2504 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2505
2506 rc = crq->request_map_rsp.rc.code;
2507 if (rc) {
2508 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2509 adapter->map_id--;
2510 /* need to find and zero tx/rx_pool map_id */
2511 for (i = 0; i < tx_subcrqs; i++) {
2512 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2513 adapter->tx_pool[i].long_term_buff.map_id = 0;
2514 }
2515 for (i = 0; i < rx_subcrqs; i++) {
2516 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2517 adapter->rx_pool[i].long_term_buff.map_id = 0;
2518 }
2519 }
2520 complete(&adapter->fw_done);
2521}
2522
2523static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2524 struct ibmvnic_adapter *adapter)
2525{
2526 struct device *dev = &adapter->vdev->dev;
2527 long rc;
2528
2529 rc = crq->request_unmap_rsp.rc.code;
2530 if (rc)
2531 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2532}
2533
2534static void handle_query_map_rsp(union ibmvnic_crq *crq,
2535 struct ibmvnic_adapter *adapter)
2536{
2537 struct net_device *netdev = adapter->netdev;
2538 struct device *dev = &adapter->vdev->dev;
2539 long rc;
2540
2541 rc = crq->query_map_rsp.rc.code;
2542 if (rc) {
2543 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2544 return;
2545 }
2546 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2547 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2548 crq->query_map_rsp.free_pages);
2549}
2550
2551static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2552 struct ibmvnic_adapter *adapter)
2553{
2554 struct net_device *netdev = adapter->netdev;
2555 struct device *dev = &adapter->vdev->dev;
2556 long rc;
2557
2558 atomic_dec(&adapter->running_cap_queries);
2559 netdev_dbg(netdev, "Outstanding queries: %d\n",
2560 atomic_read(&adapter->running_cap_queries));
2561 rc = crq->query_capability.rc.code;
2562 if (rc) {
2563 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2564 goto out;
2565 }
2566
2567 switch (be16_to_cpu(crq->query_capability.capability)) {
2568 case MIN_TX_QUEUES:
2569 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002570 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002571 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2572 adapter->min_tx_queues);
2573 break;
2574 case MIN_RX_QUEUES:
2575 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002576 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002577 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2578 adapter->min_rx_queues);
2579 break;
2580 case MIN_RX_ADD_QUEUES:
2581 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002582 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002583 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2584 adapter->min_rx_add_queues);
2585 break;
2586 case MAX_TX_QUEUES:
2587 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002588 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002589 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2590 adapter->max_tx_queues);
2591 break;
2592 case MAX_RX_QUEUES:
2593 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002594 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002595 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2596 adapter->max_rx_queues);
2597 break;
2598 case MAX_RX_ADD_QUEUES:
2599 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002600 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002601 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2602 adapter->max_rx_add_queues);
2603 break;
2604 case MIN_TX_ENTRIES_PER_SUBCRQ:
2605 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002606 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002607 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2608 adapter->min_tx_entries_per_subcrq);
2609 break;
2610 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2611 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002612 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002613 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2614 adapter->min_rx_add_entries_per_subcrq);
2615 break;
2616 case MAX_TX_ENTRIES_PER_SUBCRQ:
2617 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002618 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002619 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2620 adapter->max_tx_entries_per_subcrq);
2621 break;
2622 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2623 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002624 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002625 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2626 adapter->max_rx_add_entries_per_subcrq);
2627 break;
2628 case TCP_IP_OFFLOAD:
2629 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06002630 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002631 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2632 adapter->tcp_ip_offload);
2633 break;
2634 case PROMISC_SUPPORTED:
2635 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002636 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002637 netdev_dbg(netdev, "promisc_supported = %lld\n",
2638 adapter->promisc_supported);
2639 break;
2640 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002641 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002642 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2643 break;
2644 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002645 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002646 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2647 break;
2648 case MAX_MULTICAST_FILTERS:
2649 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06002650 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002651 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2652 adapter->max_multicast_filters);
2653 break;
2654 case VLAN_HEADER_INSERTION:
2655 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06002656 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002657 if (adapter->vlan_header_insertion)
2658 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2659 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2660 adapter->vlan_header_insertion);
2661 break;
2662 case MAX_TX_SG_ENTRIES:
2663 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06002664 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002665 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2666 adapter->max_tx_sg_entries);
2667 break;
2668 case RX_SG_SUPPORTED:
2669 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002670 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002671 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2672 adapter->rx_sg_supported);
2673 break;
2674 case OPT_TX_COMP_SUB_QUEUES:
2675 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002676 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002677 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2678 adapter->opt_tx_comp_sub_queues);
2679 break;
2680 case OPT_RX_COMP_QUEUES:
2681 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002682 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002683 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2684 adapter->opt_rx_comp_queues);
2685 break;
2686 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2687 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06002688 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002689 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2690 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2691 break;
2692 case OPT_TX_ENTRIES_PER_SUBCRQ:
2693 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002694 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002695 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2696 adapter->opt_tx_entries_per_subcrq);
2697 break;
2698 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2699 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002700 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002701 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2702 adapter->opt_rxba_entries_per_subcrq);
2703 break;
2704 case TX_RX_DESC_REQ:
2705 adapter->tx_rx_desc_req = crq->query_capability.number;
2706 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2707 adapter->tx_rx_desc_req);
2708 break;
2709
2710 default:
2711 netdev_err(netdev, "Got invalid cap rsp %d\n",
2712 crq->query_capability.capability);
2713 }
2714
2715out:
2716 if (atomic_read(&adapter->running_cap_queries) == 0)
Thomas Falconea22d512016-07-06 15:35:17 -05002717 init_sub_crqs(adapter, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002718 /* We're done querying the capabilities, initialize sub-crqs */
2719}
2720
2721static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2722 struct ibmvnic_adapter *adapter)
2723{
2724 u8 correlator = crq->control_ras_rsp.correlator;
2725 struct device *dev = &adapter->vdev->dev;
2726 bool found = false;
2727 int i;
2728
2729 if (crq->control_ras_rsp.rc.code) {
2730 dev_warn(dev, "Control ras failed rc=%d\n",
2731 crq->control_ras_rsp.rc.code);
2732 return;
2733 }
2734
2735 for (i = 0; i < adapter->ras_comp_num; i++) {
2736 if (adapter->ras_comps[i].correlator == correlator) {
2737 found = true;
2738 break;
2739 }
2740 }
2741
2742 if (!found) {
2743 dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2744 return;
2745 }
2746
2747 switch (crq->control_ras_rsp.op) {
2748 case IBMVNIC_TRACE_LEVEL:
2749 adapter->ras_comps[i].trace_level = crq->control_ras.level;
2750 break;
2751 case IBMVNIC_ERROR_LEVEL:
2752 adapter->ras_comps[i].error_check_level =
2753 crq->control_ras.level;
2754 break;
2755 case IBMVNIC_TRACE_PAUSE:
2756 adapter->ras_comp_int[i].paused = 1;
2757 break;
2758 case IBMVNIC_TRACE_RESUME:
2759 adapter->ras_comp_int[i].paused = 0;
2760 break;
2761 case IBMVNIC_TRACE_ON:
2762 adapter->ras_comps[i].trace_on = 1;
2763 break;
2764 case IBMVNIC_TRACE_OFF:
2765 adapter->ras_comps[i].trace_on = 0;
2766 break;
2767 case IBMVNIC_CHG_TRACE_BUFF_SZ:
2768 /* trace_buff_sz is 3 bytes, stuff it into an int */
2769 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2770 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2771 crq->control_ras_rsp.trace_buff_sz[0];
2772 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2773 crq->control_ras_rsp.trace_buff_sz[1];
2774 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2775 crq->control_ras_rsp.trace_buff_sz[2];
2776 break;
2777 default:
2778 dev_err(dev, "invalid op %d on control_ras_rsp",
2779 crq->control_ras_rsp.op);
2780 }
2781}
2782
Thomas Falcon032c5e82015-12-21 11:26:06 -06002783static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2784 loff_t *ppos)
2785{
2786 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2787 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2788 struct device *dev = &adapter->vdev->dev;
2789 struct ibmvnic_fw_trace_entry *trace;
2790 int num = ras_comp_int->num;
2791 union ibmvnic_crq crq;
2792 dma_addr_t trace_tok;
2793
2794 if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2795 return 0;
2796
2797 trace =
2798 dma_alloc_coherent(dev,
2799 be32_to_cpu(adapter->ras_comps[num].
2800 trace_buff_size), &trace_tok,
2801 GFP_KERNEL);
2802 if (!trace) {
2803 dev_err(dev, "Couldn't alloc trace buffer\n");
2804 return 0;
2805 }
2806
2807 memset(&crq, 0, sizeof(crq));
2808 crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2809 crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2810 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2811 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2812 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002813
2814 init_completion(&adapter->fw_done);
Nathan Fontenotae0b63e2017-05-23 21:53:39 -04002815 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002816 wait_for_completion(&adapter->fw_done);
2817
2818 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2819 len =
2820 be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2821 *ppos;
2822
2823 copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2824
2825 dma_free_coherent(dev,
2826 be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2827 trace, trace_tok);
2828 *ppos += len;
2829 return len;
2830}
2831
2832static const struct file_operations trace_ops = {
2833 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002834 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002835 .read = trace_read,
2836};
2837
2838static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2839 loff_t *ppos)
2840{
2841 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2842 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2843 int num = ras_comp_int->num;
2844 char buff[5]; /* 1 or 0 plus \n and \0 */
2845 int size;
2846
2847 size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2848
2849 if (*ppos >= size)
2850 return 0;
2851
2852 copy_to_user(user_buf, buff, size);
2853 *ppos += size;
2854 return size;
2855}
2856
2857static ssize_t paused_write(struct file *file, const char __user *user_buf,
2858 size_t len, loff_t *ppos)
2859{
2860 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2861 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2862 int num = ras_comp_int->num;
2863 union ibmvnic_crq crq;
2864 unsigned long val;
2865 char buff[9]; /* decimal max int plus \n and \0 */
2866
2867 copy_from_user(buff, user_buf, sizeof(buff));
2868 val = kstrtoul(buff, 10, NULL);
2869
2870 adapter->ras_comp_int[num].paused = val ? 1 : 0;
2871
2872 memset(&crq, 0, sizeof(crq));
2873 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2874 crq.control_ras.cmd = CONTROL_RAS;
2875 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2876 crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2877 ibmvnic_send_crq(adapter, &crq);
2878
2879 return len;
2880}
2881
2882static const struct file_operations paused_ops = {
2883 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002884 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002885 .read = paused_read,
2886 .write = paused_write,
2887};
2888
2889static ssize_t tracing_read(struct file *file, char __user *user_buf,
2890 size_t len, loff_t *ppos)
2891{
2892 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2893 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2894 int num = ras_comp_int->num;
2895 char buff[5]; /* 1 or 0 plus \n and \0 */
2896 int size;
2897
2898 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2899
2900 if (*ppos >= size)
2901 return 0;
2902
2903 copy_to_user(user_buf, buff, size);
2904 *ppos += size;
2905 return size;
2906}
2907
2908static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2909 size_t len, loff_t *ppos)
2910{
2911 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2912 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2913 int num = ras_comp_int->num;
2914 union ibmvnic_crq crq;
2915 unsigned long val;
2916 char buff[9]; /* decimal max int plus \n and \0 */
2917
2918 copy_from_user(buff, user_buf, sizeof(buff));
2919 val = kstrtoul(buff, 10, NULL);
2920
2921 memset(&crq, 0, sizeof(crq));
2922 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2923 crq.control_ras.cmd = CONTROL_RAS;
2924 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2925 crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2926
2927 return len;
2928}
2929
2930static const struct file_operations tracing_ops = {
2931 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002932 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002933 .read = tracing_read,
2934 .write = tracing_write,
2935};
2936
2937static ssize_t error_level_read(struct file *file, char __user *user_buf,
2938 size_t len, loff_t *ppos)
2939{
2940 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2941 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2942 int num = ras_comp_int->num;
2943 char buff[5]; /* decimal max char plus \n and \0 */
2944 int size;
2945
2946 size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2947
2948 if (*ppos >= size)
2949 return 0;
2950
2951 copy_to_user(user_buf, buff, size);
2952 *ppos += size;
2953 return size;
2954}
2955
2956static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2957 size_t len, loff_t *ppos)
2958{
2959 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2960 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2961 int num = ras_comp_int->num;
2962 union ibmvnic_crq crq;
2963 unsigned long val;
2964 char buff[9]; /* decimal max int plus \n and \0 */
2965
2966 copy_from_user(buff, user_buf, sizeof(buff));
2967 val = kstrtoul(buff, 10, NULL);
2968
2969 if (val > 9)
2970 val = 9;
2971
2972 memset(&crq, 0, sizeof(crq));
2973 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2974 crq.control_ras.cmd = CONTROL_RAS;
2975 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2976 crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
2977 crq.control_ras.level = val;
2978 ibmvnic_send_crq(adapter, &crq);
2979
2980 return len;
2981}
2982
2983static const struct file_operations error_level_ops = {
2984 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002985 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002986 .read = error_level_read,
2987 .write = error_level_write,
2988};
2989
2990static ssize_t trace_level_read(struct file *file, char __user *user_buf,
2991 size_t len, loff_t *ppos)
2992{
2993 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2994 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2995 int num = ras_comp_int->num;
2996 char buff[5]; /* decimal max char plus \n and \0 */
2997 int size;
2998
2999 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
3000 if (*ppos >= size)
3001 return 0;
3002
3003 copy_to_user(user_buf, buff, size);
3004 *ppos += size;
3005 return size;
3006}
3007
3008static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
3009 size_t len, loff_t *ppos)
3010{
3011 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3012 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3013 union ibmvnic_crq crq;
3014 unsigned long val;
3015 char buff[9]; /* decimal max int plus \n and \0 */
3016
3017 copy_from_user(buff, user_buf, sizeof(buff));
3018 val = kstrtoul(buff, 10, NULL);
3019 if (val > 9)
3020 val = 9;
3021
3022 memset(&crq, 0, sizeof(crq));
3023 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3024 crq.control_ras.cmd = CONTROL_RAS;
3025 crq.control_ras.correlator =
3026 adapter->ras_comps[ras_comp_int->num].correlator;
3027 crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
3028 crq.control_ras.level = val;
3029 ibmvnic_send_crq(adapter, &crq);
3030
3031 return len;
3032}
3033
3034static const struct file_operations trace_level_ops = {
3035 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003036 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003037 .read = trace_level_read,
3038 .write = trace_level_write,
3039};
3040
3041static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
3042 size_t len, loff_t *ppos)
3043{
3044 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3045 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3046 int num = ras_comp_int->num;
3047 char buff[9]; /* decimal max int plus \n and \0 */
3048 int size;
3049
3050 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3051 if (*ppos >= size)
3052 return 0;
3053
3054 copy_to_user(user_buf, buff, size);
3055 *ppos += size;
3056 return size;
3057}
3058
3059static ssize_t trace_buff_size_write(struct file *file,
3060 const char __user *user_buf, size_t len,
3061 loff_t *ppos)
3062{
3063 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3064 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3065 union ibmvnic_crq crq;
3066 unsigned long val;
3067 char buff[9]; /* decimal max int plus \n and \0 */
3068
3069 copy_from_user(buff, user_buf, sizeof(buff));
3070 val = kstrtoul(buff, 10, NULL);
3071
3072 memset(&crq, 0, sizeof(crq));
3073 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3074 crq.control_ras.cmd = CONTROL_RAS;
3075 crq.control_ras.correlator =
3076 adapter->ras_comps[ras_comp_int->num].correlator;
3077 crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3078 /* trace_buff_sz is 3 bytes, stuff an int into it */
3079 crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3080 crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3081 crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3082 ibmvnic_send_crq(adapter, &crq);
3083
3084 return len;
3085}
3086
3087static const struct file_operations trace_size_ops = {
3088 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003089 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003090 .read = trace_buff_size_read,
3091 .write = trace_buff_size_write,
3092};
3093
3094static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3095 struct ibmvnic_adapter *adapter)
3096{
3097 struct device *dev = &adapter->vdev->dev;
3098 struct dentry *dir_ent;
3099 struct dentry *ent;
3100 int i;
3101
3102 debugfs_remove_recursive(adapter->ras_comps_ent);
3103
3104 adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3105 adapter->debugfs_dir);
3106 if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3107 dev_info(dev, "debugfs create ras_comps dir failed\n");
3108 return;
3109 }
3110
3111 for (i = 0; i < adapter->ras_comp_num; i++) {
3112 dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3113 adapter->ras_comps_ent);
3114 if (!dir_ent || IS_ERR(dir_ent)) {
3115 dev_info(dev, "debugfs create %s dir failed\n",
3116 adapter->ras_comps[i].name);
3117 continue;
3118 }
3119
3120 adapter->ras_comp_int[i].adapter = adapter;
3121 adapter->ras_comp_int[i].num = i;
3122 adapter->ras_comp_int[i].desc_blob.data =
3123 &adapter->ras_comps[i].description;
3124 adapter->ras_comp_int[i].desc_blob.size =
3125 sizeof(adapter->ras_comps[i].description);
3126
3127 /* Don't need to remember the dentry's because the debugfs dir
3128 * gets removed recursively
3129 */
3130 ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3131 &adapter->ras_comp_int[i].desc_blob);
3132 ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3133 dir_ent, &adapter->ras_comp_int[i],
3134 &trace_size_ops);
3135 ent = debugfs_create_file("trace_level",
3136 S_IRUGO |
3137 (adapter->ras_comps[i].trace_level !=
3138 0xFF ? S_IWUSR : 0),
3139 dir_ent, &adapter->ras_comp_int[i],
3140 &trace_level_ops);
3141 ent = debugfs_create_file("error_level",
3142 S_IRUGO |
3143 (adapter->
3144 ras_comps[i].error_check_level !=
3145 0xFF ? S_IWUSR : 0),
3146 dir_ent, &adapter->ras_comp_int[i],
3147 &trace_level_ops);
3148 ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3149 dir_ent, &adapter->ras_comp_int[i],
3150 &tracing_ops);
3151 ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3152 dir_ent, &adapter->ras_comp_int[i],
3153 &paused_ops);
3154 ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3155 &adapter->ras_comp_int[i],
3156 &trace_ops);
3157 }
3158}
3159
3160static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3161 struct ibmvnic_adapter *adapter)
3162{
3163 int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3164 struct device *dev = &adapter->vdev->dev;
3165 union ibmvnic_crq newcrq;
3166
3167 adapter->ras_comps = dma_alloc_coherent(dev, len,
3168 &adapter->ras_comps_tok,
3169 GFP_KERNEL);
3170 if (!adapter->ras_comps) {
3171 if (!firmware_has_feature(FW_FEATURE_CMO))
3172 dev_err(dev, "Couldn't alloc fw comps buffer\n");
3173 return;
3174 }
3175
3176 adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3177 sizeof(struct ibmvnic_fw_comp_internal),
3178 GFP_KERNEL);
3179 if (!adapter->ras_comp_int)
3180 dma_free_coherent(dev, len, adapter->ras_comps,
3181 adapter->ras_comps_tok);
3182
3183 memset(&newcrq, 0, sizeof(newcrq));
3184 newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3185 newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3186 newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3187 newcrq.request_ras_comps.len = cpu_to_be32(len);
3188 ibmvnic_send_crq(adapter, &newcrq);
3189}
3190
3191static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3192{
Wei Yongjun96183182016-06-27 20:48:53 +08003193 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003194 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08003195 struct ibmvnic_error_buff *error_buff, *tmp2;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003196 unsigned long flags;
3197 unsigned long flags2;
3198
3199 spin_lock_irqsave(&adapter->inflight_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08003200 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003201 switch (inflight_cmd->crq.generic.cmd) {
3202 case LOGIN:
3203 dma_unmap_single(dev, adapter->login_buf_token,
3204 adapter->login_buf_sz,
3205 DMA_BIDIRECTIONAL);
3206 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3207 adapter->login_rsp_buf_sz,
3208 DMA_BIDIRECTIONAL);
3209 kfree(adapter->login_rsp_buf);
3210 kfree(adapter->login_buf);
3211 break;
3212 case REQUEST_DUMP:
3213 complete(&adapter->fw_done);
3214 break;
3215 case REQUEST_ERROR_INFO:
3216 spin_lock_irqsave(&adapter->error_list_lock, flags2);
Wei Yongjun96183182016-06-27 20:48:53 +08003217 list_for_each_entry_safe(error_buff, tmp2,
3218 &adapter->errors, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003219 dma_unmap_single(dev, error_buff->dma,
3220 error_buff->len,
3221 DMA_FROM_DEVICE);
3222 kfree(error_buff->buff);
3223 list_del(&error_buff->list);
3224 kfree(error_buff);
3225 }
3226 spin_unlock_irqrestore(&adapter->error_list_lock,
3227 flags2);
3228 break;
3229 }
3230 list_del(&inflight_cmd->list);
3231 kfree(inflight_cmd);
3232 }
3233 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3234}
3235
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003236static void ibmvnic_xport_event(struct work_struct *work)
3237{
3238 struct ibmvnic_adapter *adapter = container_of(work,
3239 struct ibmvnic_adapter,
3240 ibmvnic_xport);
3241 struct device *dev = &adapter->vdev->dev;
3242 long rc;
3243
3244 ibmvnic_free_inflight(adapter);
3245 release_sub_crqs(adapter);
3246 if (adapter->migrated) {
3247 rc = ibmvnic_reenable_crq_queue(adapter);
3248 if (rc)
3249 dev_err(dev, "Error after enable rc=%ld\n", rc);
3250 adapter->migrated = false;
3251 rc = ibmvnic_send_crq_init(adapter);
3252 if (rc)
3253 dev_err(dev, "Error sending init rc=%ld\n", rc);
3254 }
3255}
3256
Thomas Falcon032c5e82015-12-21 11:26:06 -06003257static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3258 struct ibmvnic_adapter *adapter)
3259{
3260 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3261 struct net_device *netdev = adapter->netdev;
3262 struct device *dev = &adapter->vdev->dev;
3263 long rc;
3264
3265 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3266 ((unsigned long int *)crq)[0],
3267 ((unsigned long int *)crq)[1]);
3268 switch (gen_crq->first) {
3269 case IBMVNIC_CRQ_INIT_RSP:
3270 switch (gen_crq->cmd) {
3271 case IBMVNIC_CRQ_INIT:
3272 dev_info(dev, "Partner initialized\n");
3273 /* Send back a response */
3274 rc = ibmvnic_send_crq_init_complete(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003275 if (!rc)
3276 schedule_work(&adapter->vnic_crq_init);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003277 else
3278 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3279 break;
3280 case IBMVNIC_CRQ_INIT_COMPLETE:
3281 dev_info(dev, "Partner initialization complete\n");
3282 send_version_xchg(adapter);
3283 break;
3284 default:
3285 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3286 }
3287 return;
3288 case IBMVNIC_CRQ_XPORT_EVENT:
3289 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3290 dev_info(dev, "Re-enabling adapter\n");
3291 adapter->migrated = true;
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003292 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcondfad09a2016-08-18 11:37:51 -05003293 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3294 dev_info(dev, "Backing device failover detected\n");
3295 netif_carrier_off(netdev);
3296 adapter->failover = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003297 } else {
3298 /* The adapter lost the connection */
3299 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3300 gen_crq->cmd);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003301 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003302 }
3303 return;
3304 case IBMVNIC_CRQ_CMD_RSP:
3305 break;
3306 default:
3307 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3308 gen_crq->first);
3309 return;
3310 }
3311
3312 switch (gen_crq->cmd) {
3313 case VERSION_EXCHANGE_RSP:
3314 rc = crq->version_exchange_rsp.rc.code;
3315 if (rc) {
3316 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3317 break;
3318 }
3319 dev_info(dev, "Partner protocol version is %d\n",
3320 crq->version_exchange_rsp.version);
3321 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3322 ibmvnic_version)
3323 ibmvnic_version =
3324 be16_to_cpu(crq->version_exchange_rsp.version);
3325 send_cap_queries(adapter);
3326 break;
3327 case QUERY_CAPABILITY_RSP:
3328 handle_query_cap_rsp(crq, adapter);
3329 break;
3330 case QUERY_MAP_RSP:
3331 handle_query_map_rsp(crq, adapter);
3332 break;
3333 case REQUEST_MAP_RSP:
3334 handle_request_map_rsp(crq, adapter);
3335 break;
3336 case REQUEST_UNMAP_RSP:
3337 handle_request_unmap_rsp(crq, adapter);
3338 break;
3339 case REQUEST_CAPABILITY_RSP:
3340 handle_request_cap_rsp(crq, adapter);
3341 break;
3342 case LOGIN_RSP:
3343 netdev_dbg(netdev, "Got Login Response\n");
3344 handle_login_rsp(crq, adapter);
3345 break;
3346 case LOGICAL_LINK_STATE_RSP:
3347 netdev_dbg(netdev, "Got Logical Link State Response\n");
3348 adapter->logical_link_state =
3349 crq->logical_link_state_rsp.link_state;
3350 break;
3351 case LINK_STATE_INDICATION:
3352 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3353 adapter->phys_link_state =
3354 crq->link_state_indication.phys_link_state;
3355 adapter->logical_link_state =
3356 crq->link_state_indication.logical_link_state;
3357 break;
3358 case CHANGE_MAC_ADDR_RSP:
3359 netdev_dbg(netdev, "Got MAC address change Response\n");
3360 handle_change_mac_rsp(crq, adapter);
3361 break;
3362 case ERROR_INDICATION:
3363 netdev_dbg(netdev, "Got Error Indication\n");
3364 handle_error_indication(crq, adapter);
3365 break;
3366 case REQUEST_ERROR_RSP:
3367 netdev_dbg(netdev, "Got Error Detail Response\n");
3368 handle_error_info_rsp(crq, adapter);
3369 break;
3370 case REQUEST_STATISTICS_RSP:
3371 netdev_dbg(netdev, "Got Statistics Response\n");
3372 complete(&adapter->stats_done);
3373 break;
3374 case REQUEST_DUMP_SIZE_RSP:
3375 netdev_dbg(netdev, "Got Request Dump Size Response\n");
3376 handle_dump_size_rsp(crq, adapter);
3377 break;
3378 case REQUEST_DUMP_RSP:
3379 netdev_dbg(netdev, "Got Request Dump Response\n");
3380 complete(&adapter->fw_done);
3381 break;
3382 case QUERY_IP_OFFLOAD_RSP:
3383 netdev_dbg(netdev, "Got Query IP offload Response\n");
3384 handle_query_ip_offload_rsp(adapter);
3385 break;
3386 case MULTICAST_CTRL_RSP:
3387 netdev_dbg(netdev, "Got multicast control Response\n");
3388 break;
3389 case CONTROL_IP_OFFLOAD_RSP:
3390 netdev_dbg(netdev, "Got Control IP offload Response\n");
3391 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3392 sizeof(adapter->ip_offload_ctrl),
3393 DMA_TO_DEVICE);
3394 /* We're done with the queries, perform the login */
3395 send_login(adapter);
3396 break;
3397 case REQUEST_RAS_COMP_NUM_RSP:
3398 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3399 if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3400 netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3401 break;
3402 }
3403 adapter->ras_comp_num =
3404 be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3405 handle_request_ras_comp_num_rsp(crq, adapter);
3406 break;
3407 case REQUEST_RAS_COMPS_RSP:
3408 netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3409 handle_request_ras_comps_rsp(crq, adapter);
3410 break;
3411 case CONTROL_RAS_RSP:
3412 netdev_dbg(netdev, "Got Control RAS Response\n");
3413 handle_control_ras_rsp(crq, adapter);
3414 break;
3415 case COLLECT_FW_TRACE_RSP:
3416 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3417 complete(&adapter->fw_done);
3418 break;
3419 default:
3420 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3421 gen_crq->cmd);
3422 }
3423}
3424
3425static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3426{
3427 struct ibmvnic_adapter *adapter = instance;
3428 struct ibmvnic_crq_queue *queue = &adapter->crq;
3429 struct vio_dev *vdev = adapter->vdev;
3430 union ibmvnic_crq *crq;
3431 unsigned long flags;
3432 bool done = false;
3433
3434 spin_lock_irqsave(&queue->lock, flags);
3435 vio_disable_interrupts(vdev);
3436 while (!done) {
3437 /* Pull all the valid messages off the CRQ */
3438 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3439 ibmvnic_handle_crq(crq, adapter);
3440 crq->generic.first = 0;
3441 }
3442 vio_enable_interrupts(vdev);
3443 crq = ibmvnic_next_crq(adapter);
3444 if (crq) {
3445 vio_disable_interrupts(vdev);
3446 ibmvnic_handle_crq(crq, adapter);
3447 crq->generic.first = 0;
3448 } else {
3449 done = true;
3450 }
3451 }
3452 spin_unlock_irqrestore(&queue->lock, flags);
3453 return IRQ_HANDLED;
3454}
3455
3456static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3457{
3458 struct vio_dev *vdev = adapter->vdev;
3459 int rc;
3460
3461 do {
3462 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3463 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3464
3465 if (rc)
3466 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3467
3468 return rc;
3469}
3470
3471static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3472{
3473 struct ibmvnic_crq_queue *crq = &adapter->crq;
3474 struct device *dev = &adapter->vdev->dev;
3475 struct vio_dev *vdev = adapter->vdev;
3476 int rc;
3477
3478 /* Close the CRQ */
3479 do {
3480 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3481 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3482
3483 /* Clean out the queue */
3484 memset(crq->msgs, 0, PAGE_SIZE);
3485 crq->cur = 0;
3486
3487 /* And re-open it again */
3488 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3489 crq->msg_token, PAGE_SIZE);
3490
3491 if (rc == H_CLOSED)
3492 /* Adapter is good, but other end is not ready */
3493 dev_warn(dev, "Partner adapter not ready\n");
3494 else if (rc != 0)
3495 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3496
3497 return rc;
3498}
3499
3500static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3501{
3502 struct ibmvnic_crq_queue *crq = &adapter->crq;
3503 struct vio_dev *vdev = adapter->vdev;
3504 long rc;
3505
3506 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3507 free_irq(vdev->irq, adapter);
3508 do {
3509 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3510 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3511
3512 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3513 DMA_BIDIRECTIONAL);
3514 free_page((unsigned long)crq->msgs);
3515}
3516
3517static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3518{
3519 struct ibmvnic_crq_queue *crq = &adapter->crq;
3520 struct device *dev = &adapter->vdev->dev;
3521 struct vio_dev *vdev = adapter->vdev;
3522 int rc, retrc = -ENOMEM;
3523
3524 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3525 /* Should we allocate more than one page? */
3526
3527 if (!crq->msgs)
3528 return -ENOMEM;
3529
3530 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3531 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3532 DMA_BIDIRECTIONAL);
3533 if (dma_mapping_error(dev, crq->msg_token))
3534 goto map_failed;
3535
3536 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3537 crq->msg_token, PAGE_SIZE);
3538
3539 if (rc == H_RESOURCE)
3540 /* maybe kexecing and resource is busy. try a reset */
3541 rc = ibmvnic_reset_crq(adapter);
3542 retrc = rc;
3543
3544 if (rc == H_CLOSED) {
3545 dev_warn(dev, "Partner adapter not ready\n");
3546 } else if (rc) {
3547 dev_warn(dev, "Error %d opening adapter\n", rc);
3548 goto reg_crq_failed;
3549 }
3550
3551 retrc = 0;
3552
3553 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3554 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3555 adapter);
3556 if (rc) {
3557 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3558 vdev->irq, rc);
3559 goto req_irq_failed;
3560 }
3561
3562 rc = vio_enable_interrupts(vdev);
3563 if (rc) {
3564 dev_err(dev, "Error %d enabling interrupts\n", rc);
3565 goto req_irq_failed;
3566 }
3567
3568 crq->cur = 0;
3569 spin_lock_init(&crq->lock);
3570
3571 return retrc;
3572
3573req_irq_failed:
3574 do {
3575 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3576 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3577reg_crq_failed:
3578 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3579map_failed:
3580 free_page((unsigned long)crq->msgs);
3581 return retrc;
3582}
3583
3584/* debugfs for dump */
3585static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3586{
3587 struct net_device *netdev = seq->private;
3588 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3589 struct device *dev = &adapter->vdev->dev;
3590 union ibmvnic_crq crq;
3591
3592 memset(&crq, 0, sizeof(crq));
3593 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3594 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003595
3596 init_completion(&adapter->fw_done);
Nathan Fontenotae0b63e2017-05-23 21:53:39 -04003597 ibmvnic_send_crq(adapter, &crq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003598 wait_for_completion(&adapter->fw_done);
3599
3600 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3601
3602 dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3603 DMA_BIDIRECTIONAL);
3604
3605 kfree(adapter->dump_data);
3606
3607 return 0;
3608}
3609
3610static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3611{
3612 return single_open(file, ibmvnic_dump_show, inode->i_private);
3613}
3614
3615static const struct file_operations ibmvnic_dump_ops = {
3616 .owner = THIS_MODULE,
3617 .open = ibmvnic_dump_open,
3618 .read = seq_read,
3619 .llseek = seq_lseek,
3620 .release = single_release,
3621};
3622
Thomas Falcon65dc6892016-07-06 15:35:18 -05003623static void handle_crq_init_rsp(struct work_struct *work)
3624{
3625 struct ibmvnic_adapter *adapter = container_of(work,
3626 struct ibmvnic_adapter,
3627 vnic_crq_init);
3628 struct device *dev = &adapter->vdev->dev;
3629 struct net_device *netdev = adapter->netdev;
3630 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcondfad09a2016-08-18 11:37:51 -05003631 bool restart = false;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003632 int rc;
3633
Thomas Falcondfad09a2016-08-18 11:37:51 -05003634 if (adapter->failover) {
3635 release_sub_crqs(adapter);
3636 if (netif_running(netdev)) {
3637 netif_tx_disable(netdev);
3638 ibmvnic_close(netdev);
3639 restart = true;
3640 }
3641 }
3642
Thomas Falcon65dc6892016-07-06 15:35:18 -05003643 reinit_completion(&adapter->init_done);
Nathan Fontenotae0b63e2017-05-23 21:53:39 -04003644 send_version_xchg(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003645 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3646 dev_err(dev, "Passive init timeout\n");
3647 goto task_failed;
3648 }
3649
3650 do {
3651 if (adapter->renegotiate) {
3652 adapter->renegotiate = false;
3653 release_sub_crqs_no_irqs(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003654
3655 reinit_completion(&adapter->init_done);
Nathan Fontenotae0b63e2017-05-23 21:53:39 -04003656 send_cap_queries(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003657 if (!wait_for_completion_timeout(&adapter->init_done,
3658 timeout)) {
3659 dev_err(dev, "Passive init timeout\n");
3660 goto task_failed;
3661 }
3662 }
3663 } while (adapter->renegotiate);
3664 rc = init_sub_crq_irqs(adapter);
3665
3666 if (rc)
3667 goto task_failed;
3668
3669 netdev->real_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon87737f82016-10-17 15:28:10 -05003670 netdev->mtu = adapter->req_mtu;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003671
Thomas Falcondfad09a2016-08-18 11:37:51 -05003672 if (adapter->failover) {
3673 adapter->failover = false;
3674 if (restart) {
3675 rc = ibmvnic_open(netdev);
3676 if (rc)
3677 goto restart_failed;
3678 }
3679 netif_carrier_on(netdev);
3680 return;
3681 }
3682
Thomas Falcon65dc6892016-07-06 15:35:18 -05003683 rc = register_netdev(netdev);
3684 if (rc) {
3685 dev_err(dev,
3686 "failed to register netdev rc=%d\n", rc);
3687 goto register_failed;
3688 }
3689 dev_info(dev, "ibmvnic registered\n");
3690
3691 return;
3692
Thomas Falcondfad09a2016-08-18 11:37:51 -05003693restart_failed:
3694 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003695register_failed:
3696 release_sub_crqs(adapter);
3697task_failed:
3698 dev_err(dev, "Passive initialization was not successful\n");
3699}
3700
Thomas Falcon032c5e82015-12-21 11:26:06 -06003701static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3702{
Thomas Falconea22d512016-07-06 15:35:17 -05003703 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003704 struct ibmvnic_adapter *adapter;
3705 struct net_device *netdev;
3706 unsigned char *mac_addr_p;
3707 struct dentry *ent;
Thomas Falcone1fac0a2016-11-11 11:00:46 -06003708 char buf[17]; /* debugfs name buf */
Thomas Falcon032c5e82015-12-21 11:26:06 -06003709 int rc;
3710
3711 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3712 dev->unit_address);
3713
3714 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3715 VETH_MAC_ADDR, NULL);
3716 if (!mac_addr_p) {
3717 dev_err(&dev->dev,
3718 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3719 __FILE__, __LINE__);
3720 return 0;
3721 }
3722
3723 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3724 IBMVNIC_MAX_TX_QUEUES);
3725 if (!netdev)
3726 return -ENOMEM;
3727
3728 adapter = netdev_priv(netdev);
3729 dev_set_drvdata(&dev->dev, netdev);
3730 adapter->vdev = dev;
3731 adapter->netdev = netdev;
Thomas Falcondfad09a2016-08-18 11:37:51 -05003732 adapter->failover = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003733
3734 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3735 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3736 netdev->irq = dev->irq;
3737 netdev->netdev_ops = &ibmvnic_netdev_ops;
3738 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3739 SET_NETDEV_DEV(netdev, &dev->dev);
3740
Thomas Falcon65dc6892016-07-06 15:35:18 -05003741 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003742 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003743
Thomas Falcon032c5e82015-12-21 11:26:06 -06003744 spin_lock_init(&adapter->stats_lock);
3745
3746 rc = ibmvnic_init_crq_queue(adapter);
3747 if (rc) {
3748 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3749 goto free_netdev;
3750 }
3751
3752 INIT_LIST_HEAD(&adapter->errors);
3753 INIT_LIST_HEAD(&adapter->inflight);
3754 spin_lock_init(&adapter->error_list_lock);
3755 spin_lock_init(&adapter->inflight_lock);
3756
3757 adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3758 sizeof(struct ibmvnic_statistics),
3759 DMA_FROM_DEVICE);
3760 if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3761 if (!firmware_has_feature(FW_FEATURE_CMO))
3762 dev_err(&dev->dev, "Couldn't map stats buffer\n");
Wei Yongjun0e872032016-08-24 13:47:58 +00003763 rc = -ENOMEM;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003764 goto free_crq;
3765 }
3766
3767 snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3768 ent = debugfs_create_dir(buf, NULL);
3769 if (!ent || IS_ERR(ent)) {
3770 dev_info(&dev->dev, "debugfs create directory failed\n");
3771 adapter->debugfs_dir = NULL;
3772 } else {
3773 adapter->debugfs_dir = ent;
3774 ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3775 netdev, &ibmvnic_dump_ops);
3776 if (!ent || IS_ERR(ent)) {
3777 dev_info(&dev->dev,
3778 "debugfs create dump file failed\n");
3779 adapter->debugfs_dump = NULL;
3780 } else {
3781 adapter->debugfs_dump = ent;
3782 }
3783 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06003784
3785 init_completion(&adapter->init_done);
Nathan Fontenotae0b63e2017-05-23 21:53:39 -04003786 ibmvnic_send_crq_init(adapter);
Thomas Falconea22d512016-07-06 15:35:17 -05003787 if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3788 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003789
John Allen498cd8e2016-04-06 11:49:55 -05003790 do {
John Allen498cd8e2016-04-06 11:49:55 -05003791 if (adapter->renegotiate) {
Thomas Falconea22d512016-07-06 15:35:17 -05003792 adapter->renegotiate = false;
3793 release_sub_crqs_no_irqs(adapter);
John Allen498cd8e2016-04-06 11:49:55 -05003794
3795 reinit_completion(&adapter->init_done);
Nathan Fontenotae0b63e2017-05-23 21:53:39 -04003796 send_cap_queries(adapter);
Thomas Falconea22d512016-07-06 15:35:17 -05003797 if (!wait_for_completion_timeout(&adapter->init_done,
3798 timeout))
3799 return 0;
John Allen498cd8e2016-04-06 11:49:55 -05003800 }
3801 } while (adapter->renegotiate);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003802
Thomas Falconea22d512016-07-06 15:35:17 -05003803 rc = init_sub_crq_irqs(adapter);
3804 if (rc) {
3805 dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
3806 goto free_debugfs;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003807 }
3808
3809 netdev->real_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon87737f82016-10-17 15:28:10 -05003810 netdev->mtu = adapter->req_mtu;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003811
3812 rc = register_netdev(netdev);
3813 if (rc) {
3814 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Thomas Falconea22d512016-07-06 15:35:17 -05003815 goto free_sub_crqs;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003816 }
3817 dev_info(&dev->dev, "ibmvnic registered\n");
3818
3819 return 0;
3820
Thomas Falconea22d512016-07-06 15:35:17 -05003821free_sub_crqs:
3822 release_sub_crqs(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003823free_debugfs:
3824 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3825 debugfs_remove_recursive(adapter->debugfs_dir);
3826free_crq:
3827 ibmvnic_release_crq_queue(adapter);
3828free_netdev:
3829 free_netdev(netdev);
3830 return rc;
3831}
3832
3833static int ibmvnic_remove(struct vio_dev *dev)
3834{
3835 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3836 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3837
3838 unregister_netdev(netdev);
3839
3840 release_sub_crqs(adapter);
3841
3842 ibmvnic_release_crq_queue(adapter);
3843
3844 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3845 debugfs_remove_recursive(adapter->debugfs_dir);
3846
Thomas Falconb7f193d2016-11-11 11:00:45 -06003847 dma_unmap_single(&dev->dev, adapter->stats_token,
3848 sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
3849
Thomas Falcon032c5e82015-12-21 11:26:06 -06003850 if (adapter->ras_comps)
3851 dma_free_coherent(&dev->dev,
3852 adapter->ras_comp_num *
3853 sizeof(struct ibmvnic_fw_component),
3854 adapter->ras_comps, adapter->ras_comps_tok);
3855
3856 kfree(adapter->ras_comp_int);
3857
3858 free_netdev(netdev);
3859 dev_set_drvdata(&dev->dev, NULL);
3860
3861 return 0;
3862}
3863
3864static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3865{
3866 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3867 struct ibmvnic_adapter *adapter;
3868 struct iommu_table *tbl;
3869 unsigned long ret = 0;
3870 int i;
3871
3872 tbl = get_iommu_table_base(&vdev->dev);
3873
3874 /* netdev inits at probe time along with the structures we need below*/
3875 if (!netdev)
3876 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3877
3878 adapter = netdev_priv(netdev);
3879
3880 ret += PAGE_SIZE; /* the crq message queue */
3881 ret += adapter->bounce_buffer_size;
3882 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3883
3884 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3885 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3886
3887 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3888 i++)
3889 ret += adapter->rx_pool[i].size *
3890 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3891
3892 return ret;
3893}
3894
3895static int ibmvnic_resume(struct device *dev)
3896{
3897 struct net_device *netdev = dev_get_drvdata(dev);
3898 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3899 int i;
3900
3901 /* kick the interrupt handlers just in case we lost an interrupt */
3902 for (i = 0; i < adapter->req_rx_queues; i++)
3903 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3904 adapter->rx_scrq[i]);
3905
3906 return 0;
3907}
3908
3909static struct vio_device_id ibmvnic_device_table[] = {
3910 {"network", "IBM,vnic"},
3911 {"", "" }
3912};
3913MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3914
3915static const struct dev_pm_ops ibmvnic_pm_ops = {
3916 .resume = ibmvnic_resume
3917};
3918
3919static struct vio_driver ibmvnic_driver = {
3920 .id_table = ibmvnic_device_table,
3921 .probe = ibmvnic_probe,
3922 .remove = ibmvnic_remove,
3923 .get_desired_dma = ibmvnic_get_desired_dma,
3924 .name = ibmvnic_driver_name,
3925 .pm = &ibmvnic_pm_ops,
3926};
3927
3928/* module functions */
3929static int __init ibmvnic_module_init(void)
3930{
3931 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3932 IBMVNIC_DRIVER_VERSION);
3933
3934 return vio_register_driver(&ibmvnic_driver);
3935}
3936
3937static void __exit ibmvnic_module_exit(void)
3938{
3939 vio_unregister_driver(&ibmvnic_driver);
3940}
3941
3942module_init(ibmvnic_module_init);
3943module_exit(ibmvnic_module_exit);