blob: f04830e237d9c87341fcf9cd2072dddeef5aba14 [file] [log] [blame]
Thomas Falcon032c5e82015-12-21 11:26:06 -06001/**************************************************************************/
2/* */
3/* IBM System i and System p Virtual NIC Device Driver */
4/* Copyright (C) 2014 IBM Corp. */
5/* Santiago Leon (santi_leon@yahoo.com) */
6/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7/* John Allen (jallen@linux.vnet.ibm.com) */
8/* */
9/* This program is free software; you can redistribute it and/or modify */
10/* it under the terms of the GNU General Public License as published by */
11/* the Free Software Foundation; either version 2 of the License, or */
12/* (at your option) any later version. */
13/* */
14/* This program is distributed in the hope that it will be useful, */
15/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17/* GNU General Public License for more details. */
18/* */
19/* You should have received a copy of the GNU General Public License */
20/* along with this program. */
21/* */
22/* This module contains the implementation of a virtual ethernet device */
23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24/* option of the RS/6000 Platform Architecture to interface with virtual */
25/* ethernet NICs that are presented to the partition by the hypervisor. */
26/* */
27/* Messages are passed between the VNIC driver and the VNIC server using */
28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29/* issue and receive commands that initiate communication with the server */
30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31/* are used by the driver to notify the server that a packet is */
32/* ready for transmission or that a buffer has been added to receive a */
33/* packet. Subsequently, sCRQs are used by the server to notify the */
34/* driver that a packet transmission has been completed or that a packet */
35/* has been received and placed in a waiting buffer. */
36/* */
37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38/* which skbs are DMA mapped and immediately unmapped when the transmit */
39/* or receive has been completed, the VNIC driver is required to use */
40/* "long term mapping". This entails that large, continuous DMA mapped */
41/* buffers are allocated on driver initialization and these buffers are */
42/* then continuously reused to pass skbs to and from the VNIC server. */
43/* */
44/**************************************************************************/
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/in.h>
63#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050064#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060065#include <linux/irq.h>
66#include <linux/kthread.h>
67#include <linux/seq_file.h>
68#include <linux/debugfs.h>
69#include <linux/interrupt.h>
70#include <net/net_namespace.h>
71#include <asm/hvcall.h>
72#include <linux/atomic.h>
73#include <asm/vio.h>
74#include <asm/iommu.h>
75#include <linux/uaccess.h>
76#include <asm/firmware.h>
77#include <linux/seq_file.h>
78
79#include "ibmvnic.h"
80
81static const char ibmvnic_driver_name[] = "ibmvnic";
82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83
84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88
89static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90static int ibmvnic_remove(struct vio_dev *);
91static void release_sub_crqs(struct ibmvnic_adapter *);
92static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
93static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
94static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
95static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
96static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
97 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050098static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -060099static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
100static int enable_scrq_irq(struct ibmvnic_adapter *,
101 struct ibmvnic_sub_crq_queue *);
102static int disable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104static int pending_scrq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108static int ibmvnic_poll(struct napi_struct *napi, int data);
109static void send_map_query(struct ibmvnic_adapter *adapter);
110static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
111static void send_request_unmap(struct ibmvnic_adapter *, u8);
112
113struct ibmvnic_stat {
114 char name[ETH_GSTRING_LEN];
115 int offset;
116};
117
118#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
119 offsetof(struct ibmvnic_statistics, stat))
120#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
121
122static const struct ibmvnic_stat ibmvnic_stats[] = {
123 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
124 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
125 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
126 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
127 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
128 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
129 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
130 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
131 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
132 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
133 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
134 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
135 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
136 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
137 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
138 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
139 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
140 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
141 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
142 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
143 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
144 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
145};
146
147static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
148 unsigned long length, unsigned long *number,
149 unsigned long *irq)
150{
151 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
152 long rc;
153
154 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
155 *number = retbuf[0];
156 *irq = retbuf[1];
157
158 return rc;
159}
160
161/* net_device_ops functions */
162
163static void init_rx_pool(struct ibmvnic_adapter *adapter,
164 struct ibmvnic_rx_pool *rx_pool, int num, int index,
165 int buff_size, int active)
166{
167 netdev_dbg(adapter->netdev,
168 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
169 index, num, buff_size);
170 rx_pool->size = num;
171 rx_pool->index = index;
172 rx_pool->buff_size = buff_size;
173 rx_pool->active = active;
174}
175
176static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
177 struct ibmvnic_long_term_buff *ltb, int size)
178{
179 struct device *dev = &adapter->vdev->dev;
180
181 ltb->size = size;
182 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
183 GFP_KERNEL);
184
185 if (!ltb->buff) {
186 dev_err(dev, "Couldn't alloc long term buffer\n");
187 return -ENOMEM;
188 }
189 ltb->map_id = adapter->map_id;
190 adapter->map_id++;
191 send_request_map(adapter, ltb->addr,
192 ltb->size, ltb->map_id);
193 init_completion(&adapter->fw_done);
194 wait_for_completion(&adapter->fw_done);
195 return 0;
196}
197
198static void free_long_term_buff(struct ibmvnic_adapter *adapter,
199 struct ibmvnic_long_term_buff *ltb)
200{
201 struct device *dev = &adapter->vdev->dev;
202
203 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
204 send_request_unmap(adapter, ltb->map_id);
205}
206
207static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
208 struct ibmvnic_rx_pool *pool)
209{
210 struct device *dev = &adapter->vdev->dev;
211 int i;
212
213 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
214 if (!pool->free_map)
215 return -ENOMEM;
216
217 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
218 GFP_KERNEL);
219
220 if (!pool->rx_buff) {
221 dev_err(dev, "Couldn't alloc rx buffers\n");
222 kfree(pool->free_map);
223 return -ENOMEM;
224 }
225
226 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
227 pool->size * pool->buff_size)) {
228 kfree(pool->free_map);
229 kfree(pool->rx_buff);
230 return -ENOMEM;
231 }
232
233 for (i = 0; i < pool->size; ++i)
234 pool->free_map[i] = i;
235
236 atomic_set(&pool->available, 0);
237 pool->next_alloc = 0;
238 pool->next_free = 0;
239
240 return 0;
241}
242
243static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
244 struct ibmvnic_rx_pool *pool)
245{
246 int count = pool->size - atomic_read(&pool->available);
247 struct device *dev = &adapter->vdev->dev;
248 int buffers_added = 0;
249 unsigned long lpar_rc;
250 union sub_crq sub_crq;
251 struct sk_buff *skb;
252 unsigned int offset;
253 dma_addr_t dma_addr;
254 unsigned char *dst;
255 u64 *handle_array;
256 int shift = 0;
257 int index;
258 int i;
259
260 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
261 be32_to_cpu(adapter->login_rsp_buf->
262 off_rxadd_subcrqs));
263
264 for (i = 0; i < count; ++i) {
265 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
266 if (!skb) {
267 dev_err(dev, "Couldn't replenish rx buff\n");
268 adapter->replenish_no_mem++;
269 break;
270 }
271
272 index = pool->free_map[pool->next_free];
273
274 if (pool->rx_buff[index].skb)
275 dev_err(dev, "Inconsistent free_map!\n");
276
277 /* Copy the skb to the long term mapped DMA buffer */
278 offset = index * pool->buff_size;
279 dst = pool->long_term_buff.buff + offset;
280 memset(dst, 0, pool->buff_size);
281 dma_addr = pool->long_term_buff.addr + offset;
282 pool->rx_buff[index].data = dst;
283
284 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
285 pool->rx_buff[index].dma = dma_addr;
286 pool->rx_buff[index].skb = skb;
287 pool->rx_buff[index].pool_index = pool->index;
288 pool->rx_buff[index].size = pool->buff_size;
289
290 memset(&sub_crq, 0, sizeof(sub_crq));
291 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
292 sub_crq.rx_add.correlator =
293 cpu_to_be64((u64)&pool->rx_buff[index]);
294 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
295 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
296
297 /* The length field of the sCRQ is defined to be 24 bits so the
298 * buffer size needs to be left shifted by a byte before it is
299 * converted to big endian to prevent the last byte from being
300 * truncated.
301 */
302#ifdef __LITTLE_ENDIAN__
303 shift = 8;
304#endif
305 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
306
307 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
308 &sub_crq);
309 if (lpar_rc != H_SUCCESS)
310 goto failure;
311
312 buffers_added++;
313 adapter->replenish_add_buff_success++;
314 pool->next_free = (pool->next_free + 1) % pool->size;
315 }
316 atomic_add(buffers_added, &pool->available);
317 return;
318
319failure:
320 dev_info(dev, "replenish pools failure\n");
321 pool->free_map[pool->next_free] = index;
322 pool->rx_buff[index].skb = NULL;
323 if (!dma_mapping_error(dev, dma_addr))
324 dma_unmap_single(dev, dma_addr, pool->buff_size,
325 DMA_FROM_DEVICE);
326
327 dev_kfree_skb_any(skb);
328 adapter->replenish_add_buff_failure++;
329 atomic_add(buffers_added, &pool->available);
330}
331
332static void replenish_pools(struct ibmvnic_adapter *adapter)
333{
334 int i;
335
336 if (adapter->migrated)
337 return;
338
339 adapter->replenish_task_cycles++;
340 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
341 i++) {
342 if (adapter->rx_pool[i].active)
343 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
344 }
345}
346
347static void free_rx_pool(struct ibmvnic_adapter *adapter,
348 struct ibmvnic_rx_pool *pool)
349{
350 int i;
351
352 kfree(pool->free_map);
353 pool->free_map = NULL;
354
355 if (!pool->rx_buff)
356 return;
357
358 for (i = 0; i < pool->size; i++) {
359 if (pool->rx_buff[i].skb) {
360 dev_kfree_skb_any(pool->rx_buff[i].skb);
361 pool->rx_buff[i].skb = NULL;
362 }
363 }
364 kfree(pool->rx_buff);
365 pool->rx_buff = NULL;
366}
367
368static int ibmvnic_open(struct net_device *netdev)
369{
370 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
371 struct device *dev = &adapter->vdev->dev;
372 struct ibmvnic_tx_pool *tx_pool;
373 union ibmvnic_crq crq;
374 int rxadd_subcrqs;
375 u64 *size_array;
376 int tx_subcrqs;
377 int i, j;
378
379 rxadd_subcrqs =
380 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
381 tx_subcrqs =
382 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
383 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
384 be32_to_cpu(adapter->login_rsp_buf->
385 off_rxadd_buff_size));
386 adapter->map_id = 1;
387 adapter->napi = kcalloc(adapter->req_rx_queues,
388 sizeof(struct napi_struct), GFP_KERNEL);
389 if (!adapter->napi)
390 goto alloc_napi_failed;
391 for (i = 0; i < adapter->req_rx_queues; i++) {
392 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
393 NAPI_POLL_WEIGHT);
394 napi_enable(&adapter->napi[i]);
395 }
396 adapter->rx_pool =
397 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
398
399 if (!adapter->rx_pool)
400 goto rx_pool_arr_alloc_failed;
401 send_map_query(adapter);
402 for (i = 0; i < rxadd_subcrqs; i++) {
403 init_rx_pool(adapter, &adapter->rx_pool[i],
404 IBMVNIC_BUFFS_PER_POOL, i,
405 be64_to_cpu(size_array[i]), 1);
406 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
407 dev_err(dev, "Couldn't alloc rx pool\n");
408 goto rx_pool_alloc_failed;
409 }
410 }
411 adapter->tx_pool =
412 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
413
414 if (!adapter->tx_pool)
415 goto tx_pool_arr_alloc_failed;
416 for (i = 0; i < tx_subcrqs; i++) {
417 tx_pool = &adapter->tx_pool[i];
418 tx_pool->tx_buff =
419 kcalloc(adapter->max_tx_entries_per_subcrq,
420 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
421 if (!tx_pool->tx_buff)
422 goto tx_pool_alloc_failed;
423
424 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
425 adapter->max_tx_entries_per_subcrq *
426 adapter->req_mtu))
427 goto tx_ltb_alloc_failed;
428
429 tx_pool->free_map =
430 kcalloc(adapter->max_tx_entries_per_subcrq,
431 sizeof(int), GFP_KERNEL);
432 if (!tx_pool->free_map)
433 goto tx_fm_alloc_failed;
434
435 for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
436 tx_pool->free_map[j] = j;
437
438 tx_pool->consumer_index = 0;
439 tx_pool->producer_index = 0;
440 }
441 adapter->bounce_buffer_size =
442 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
443 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
444 GFP_KERNEL);
445 if (!adapter->bounce_buffer)
446 goto bounce_alloc_failed;
447
448 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
449 adapter->bounce_buffer_size,
450 DMA_TO_DEVICE);
451 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
452 dev_err(dev, "Couldn't map tx bounce buffer\n");
453 goto bounce_map_failed;
454 }
455 replenish_pools(adapter);
456
457 /* We're ready to receive frames, enable the sub-crq interrupts and
458 * set the logical link state to up
459 */
460 for (i = 0; i < adapter->req_rx_queues; i++)
461 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
462
463 for (i = 0; i < adapter->req_tx_queues; i++)
464 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
465
466 memset(&crq, 0, sizeof(crq));
467 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
468 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
469 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
470 ibmvnic_send_crq(adapter, &crq);
471
Thomas Falconb8efb892016-07-06 15:35:15 -0500472 netif_tx_start_all_queues(netdev);
473
Thomas Falcon032c5e82015-12-21 11:26:06 -0600474 return 0;
475
476bounce_map_failed:
477 kfree(adapter->bounce_buffer);
478bounce_alloc_failed:
479 i = tx_subcrqs - 1;
480 kfree(adapter->tx_pool[i].free_map);
481tx_fm_alloc_failed:
482 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
483tx_ltb_alloc_failed:
484 kfree(adapter->tx_pool[i].tx_buff);
485tx_pool_alloc_failed:
486 for (j = 0; j < i; j++) {
487 kfree(adapter->tx_pool[j].tx_buff);
488 free_long_term_buff(adapter,
489 &adapter->tx_pool[j].long_term_buff);
490 kfree(adapter->tx_pool[j].free_map);
491 }
492 kfree(adapter->tx_pool);
493 adapter->tx_pool = NULL;
494tx_pool_arr_alloc_failed:
495 i = rxadd_subcrqs;
496rx_pool_alloc_failed:
497 for (j = 0; j < i; j++) {
498 free_rx_pool(adapter, &adapter->rx_pool[j]);
499 free_long_term_buff(adapter,
500 &adapter->rx_pool[j].long_term_buff);
501 }
502 kfree(adapter->rx_pool);
503 adapter->rx_pool = NULL;
504rx_pool_arr_alloc_failed:
505 for (i = 0; i < adapter->req_rx_queues; i++)
506 napi_enable(&adapter->napi[i]);
507alloc_napi_failed:
508 return -ENOMEM;
509}
510
511static int ibmvnic_close(struct net_device *netdev)
512{
513 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
514 struct device *dev = &adapter->vdev->dev;
515 union ibmvnic_crq crq;
516 int i;
517
518 adapter->closing = true;
519
520 for (i = 0; i < adapter->req_rx_queues; i++)
521 napi_disable(&adapter->napi[i]);
522
Thomas Falconb8efb892016-07-06 15:35:15 -0500523 netif_tx_stop_all_queues(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600524
525 if (adapter->bounce_buffer) {
526 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
527 dma_unmap_single(&adapter->vdev->dev,
528 adapter->bounce_buffer_dma,
529 adapter->bounce_buffer_size,
530 DMA_BIDIRECTIONAL);
531 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
532 }
533 kfree(adapter->bounce_buffer);
534 adapter->bounce_buffer = NULL;
535 }
536
537 memset(&crq, 0, sizeof(crq));
538 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
539 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
540 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
541 ibmvnic_send_crq(adapter, &crq);
542
543 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
544 i++) {
545 kfree(adapter->tx_pool[i].tx_buff);
546 free_long_term_buff(adapter,
547 &adapter->tx_pool[i].long_term_buff);
548 kfree(adapter->tx_pool[i].free_map);
549 }
550 kfree(adapter->tx_pool);
551 adapter->tx_pool = NULL;
552
553 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
554 i++) {
555 free_rx_pool(adapter, &adapter->rx_pool[i]);
556 free_long_term_buff(adapter,
557 &adapter->rx_pool[i].long_term_buff);
558 }
559 kfree(adapter->rx_pool);
560 adapter->rx_pool = NULL;
561
562 adapter->closing = false;
563
564 return 0;
565}
566
Thomas Falconad7775d2016-04-01 17:20:34 -0500567/**
568 * build_hdr_data - creates L2/L3/L4 header data buffer
569 * @hdr_field - bitfield determining needed headers
570 * @skb - socket buffer
571 * @hdr_len - array of header lengths
572 * @tot_len - total length of data
573 *
574 * Reads hdr_field to determine which headers are needed by firmware.
575 * Builds a buffer containing these headers. Saves individual header
576 * lengths and total buffer length to be used to build descriptors.
577 */
578static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
579 int *hdr_len, u8 *hdr_data)
580{
581 int len = 0;
582 u8 *hdr;
583
584 hdr_len[0] = sizeof(struct ethhdr);
585
586 if (skb->protocol == htons(ETH_P_IP)) {
587 hdr_len[1] = ip_hdr(skb)->ihl * 4;
588 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
589 hdr_len[2] = tcp_hdrlen(skb);
590 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
591 hdr_len[2] = sizeof(struct udphdr);
592 } else if (skb->protocol == htons(ETH_P_IPV6)) {
593 hdr_len[1] = sizeof(struct ipv6hdr);
594 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
595 hdr_len[2] = tcp_hdrlen(skb);
596 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
597 hdr_len[2] = sizeof(struct udphdr);
598 }
599
600 memset(hdr_data, 0, 120);
601 if ((hdr_field >> 6) & 1) {
602 hdr = skb_mac_header(skb);
603 memcpy(hdr_data, hdr, hdr_len[0]);
604 len += hdr_len[0];
605 }
606
607 if ((hdr_field >> 5) & 1) {
608 hdr = skb_network_header(skb);
609 memcpy(hdr_data + len, hdr, hdr_len[1]);
610 len += hdr_len[1];
611 }
612
613 if ((hdr_field >> 4) & 1) {
614 hdr = skb_transport_header(skb);
615 memcpy(hdr_data + len, hdr, hdr_len[2]);
616 len += hdr_len[2];
617 }
618 return len;
619}
620
621/**
622 * create_hdr_descs - create header and header extension descriptors
623 * @hdr_field - bitfield determining needed headers
624 * @data - buffer containing header data
625 * @len - length of data buffer
626 * @hdr_len - array of individual header lengths
627 * @scrq_arr - descriptor array
628 *
629 * Creates header and, if needed, header extension descriptors and
630 * places them in a descriptor array, scrq_arr
631 */
632
633static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
634 union sub_crq *scrq_arr)
635{
636 union sub_crq hdr_desc;
637 int tmp_len = len;
638 u8 *data, *cur;
639 int tmp;
640
641 while (tmp_len > 0) {
642 cur = hdr_data + len - tmp_len;
643
644 memset(&hdr_desc, 0, sizeof(hdr_desc));
645 if (cur != hdr_data) {
646 data = hdr_desc.hdr_ext.data;
647 tmp = tmp_len > 29 ? 29 : tmp_len;
648 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
649 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
650 hdr_desc.hdr_ext.len = tmp;
651 } else {
652 data = hdr_desc.hdr.data;
653 tmp = tmp_len > 24 ? 24 : tmp_len;
654 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
655 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
656 hdr_desc.hdr.len = tmp;
657 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
658 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
659 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
660 hdr_desc.hdr.flag = hdr_field << 1;
661 }
662 memcpy(data, cur, tmp);
663 tmp_len -= tmp;
664 *scrq_arr = hdr_desc;
665 scrq_arr++;
666 }
667}
668
669/**
670 * build_hdr_descs_arr - build a header descriptor array
671 * @skb - socket buffer
672 * @num_entries - number of descriptors to be sent
673 * @subcrq - first TX descriptor
674 * @hdr_field - bit field determining which headers will be sent
675 *
676 * This function will build a TX descriptor array with applicable
677 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
678 */
679
680static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
681 int *num_entries, u8 hdr_field)
682{
683 int hdr_len[3] = {0, 0, 0};
684 int tot_len, len;
685 u8 *hdr_data = txbuff->hdr_data;
686
687 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
688 txbuff->hdr_data);
689 len = tot_len;
690 len -= 24;
691 if (len > 0)
692 num_entries += len % 29 ? len / 29 + 1 : len / 29;
693 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
694 txbuff->indir_arr + 1);
695}
696
Thomas Falcon032c5e82015-12-21 11:26:06 -0600697static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
698{
699 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
700 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -0500701 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600702 struct device *dev = &adapter->vdev->dev;
703 struct ibmvnic_tx_buff *tx_buff = NULL;
704 struct ibmvnic_tx_pool *tx_pool;
705 unsigned int tx_send_failed = 0;
706 unsigned int tx_map_failed = 0;
707 unsigned int tx_dropped = 0;
708 unsigned int tx_packets = 0;
709 unsigned int tx_bytes = 0;
710 dma_addr_t data_dma_addr;
711 struct netdev_queue *txq;
712 bool used_bounce = false;
713 unsigned long lpar_rc;
714 union sub_crq tx_crq;
715 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -0500716 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600717 unsigned char *dst;
718 u64 *handle_array;
719 int index = 0;
720 int ret = 0;
721
722 tx_pool = &adapter->tx_pool[queue_num];
723 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
724 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
725 be32_to_cpu(adapter->login_rsp_buf->
726 off_txsubm_subcrqs));
727 if (adapter->migrated) {
728 tx_send_failed++;
729 tx_dropped++;
730 ret = NETDEV_TX_BUSY;
731 goto out;
732 }
733
734 index = tx_pool->free_map[tx_pool->consumer_index];
735 offset = index * adapter->req_mtu;
736 dst = tx_pool->long_term_buff.buff + offset;
737 memset(dst, 0, adapter->req_mtu);
738 skb_copy_from_linear_data(skb, dst, skb->len);
739 data_dma_addr = tx_pool->long_term_buff.addr + offset;
740
741 tx_pool->consumer_index =
742 (tx_pool->consumer_index + 1) %
743 adapter->max_tx_entries_per_subcrq;
744
745 tx_buff = &tx_pool->tx_buff[index];
746 tx_buff->skb = skb;
747 tx_buff->data_dma[0] = data_dma_addr;
748 tx_buff->data_len[0] = skb->len;
749 tx_buff->index = index;
750 tx_buff->pool_index = queue_num;
751 tx_buff->last_frag = true;
752 tx_buff->used_bounce = used_bounce;
753
754 memset(&tx_crq, 0, sizeof(tx_crq));
755 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
756 tx_crq.v1.type = IBMVNIC_TX_DESC;
757 tx_crq.v1.n_crq_elem = 1;
758 tx_crq.v1.n_sge = 1;
759 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
760 tx_crq.v1.correlator = cpu_to_be32(index);
761 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
762 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
763 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
764
765 if (adapter->vlan_header_insertion) {
766 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
767 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
768 }
769
770 if (skb->protocol == htons(ETH_P_IP)) {
771 if (ip_hdr(skb)->version == 4)
772 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
773 else if (ip_hdr(skb)->version == 6)
774 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
775
776 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
777 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
778 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
779 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
780 }
781
Thomas Falconad7775d2016-04-01 17:20:34 -0500782 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600783 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -0500784 hdrs += 2;
785 }
786 /* determine if l2/3/4 headers are sent to firmware */
787 if ((*hdrs >> 7) & 1 &&
788 (skb->protocol == htons(ETH_P_IP) ||
789 skb->protocol == htons(ETH_P_IPV6))) {
790 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
791 tx_crq.v1.n_crq_elem = num_entries;
792 tx_buff->indir_arr[0] = tx_crq;
793 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
794 sizeof(tx_buff->indir_arr),
795 DMA_TO_DEVICE);
796 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
797 if (!firmware_has_feature(FW_FEATURE_CMO))
798 dev_err(dev, "tx: unable to map descriptor array\n");
799 tx_map_failed++;
800 tx_dropped++;
801 ret = NETDEV_TX_BUSY;
802 goto out;
803 }
John Allen498cd8e2016-04-06 11:49:55 -0500804 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -0500805 (u64)tx_buff->indir_dma,
806 (u64)num_entries);
807 } else {
John Allen498cd8e2016-04-06 11:49:55 -0500808 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
809 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -0500810 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600811 if (lpar_rc != H_SUCCESS) {
812 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
813
814 if (tx_pool->consumer_index == 0)
815 tx_pool->consumer_index =
816 adapter->max_tx_entries_per_subcrq - 1;
817 else
818 tx_pool->consumer_index--;
819
820 tx_send_failed++;
821 tx_dropped++;
822 ret = NETDEV_TX_BUSY;
823 goto out;
824 }
825 tx_packets++;
826 tx_bytes += skb->len;
827 txq->trans_start = jiffies;
828 ret = NETDEV_TX_OK;
829
830out:
831 netdev->stats.tx_dropped += tx_dropped;
832 netdev->stats.tx_bytes += tx_bytes;
833 netdev->stats.tx_packets += tx_packets;
834 adapter->tx_send_failed += tx_send_failed;
835 adapter->tx_map_failed += tx_map_failed;
836
837 return ret;
838}
839
840static void ibmvnic_set_multi(struct net_device *netdev)
841{
842 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
843 struct netdev_hw_addr *ha;
844 union ibmvnic_crq crq;
845
846 memset(&crq, 0, sizeof(crq));
847 crq.request_capability.first = IBMVNIC_CRQ_CMD;
848 crq.request_capability.cmd = REQUEST_CAPABILITY;
849
850 if (netdev->flags & IFF_PROMISC) {
851 if (!adapter->promisc_supported)
852 return;
853 } else {
854 if (netdev->flags & IFF_ALLMULTI) {
855 /* Accept all multicast */
856 memset(&crq, 0, sizeof(crq));
857 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
858 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
859 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
860 ibmvnic_send_crq(adapter, &crq);
861 } else if (netdev_mc_empty(netdev)) {
862 /* Reject all multicast */
863 memset(&crq, 0, sizeof(crq));
864 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
865 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
866 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
867 ibmvnic_send_crq(adapter, &crq);
868 } else {
869 /* Accept one or more multicast(s) */
870 netdev_for_each_mc_addr(ha, netdev) {
871 memset(&crq, 0, sizeof(crq));
872 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
873 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
874 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
875 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
876 ha->addr);
877 ibmvnic_send_crq(adapter, &crq);
878 }
879 }
880 }
881}
882
883static int ibmvnic_set_mac(struct net_device *netdev, void *p)
884{
885 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
886 struct sockaddr *addr = p;
887 union ibmvnic_crq crq;
888
889 if (!is_valid_ether_addr(addr->sa_data))
890 return -EADDRNOTAVAIL;
891
892 memset(&crq, 0, sizeof(crq));
893 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
894 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
895 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
896 ibmvnic_send_crq(adapter, &crq);
897 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
898 return 0;
899}
900
901static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
902{
903 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
904
905 if (new_mtu > adapter->req_mtu || new_mtu < adapter->min_mtu)
906 return -EINVAL;
907
908 netdev->mtu = new_mtu;
909 return 0;
910}
911
912static void ibmvnic_tx_timeout(struct net_device *dev)
913{
914 struct ibmvnic_adapter *adapter = netdev_priv(dev);
915 int rc;
916
917 /* Adapter timed out, resetting it */
918 release_sub_crqs(adapter);
919 rc = ibmvnic_reset_crq(adapter);
920 if (rc)
921 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
922 else
923 ibmvnic_send_crq_init(adapter);
924}
925
926static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
927 struct ibmvnic_rx_buff *rx_buff)
928{
929 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
930
931 rx_buff->skb = NULL;
932
933 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
934 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
935
936 atomic_dec(&pool->available);
937}
938
939static int ibmvnic_poll(struct napi_struct *napi, int budget)
940{
941 struct net_device *netdev = napi->dev;
942 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
943 int scrq_num = (int)(napi - adapter->napi);
944 int frames_processed = 0;
945restart_poll:
946 while (frames_processed < budget) {
947 struct sk_buff *skb;
948 struct ibmvnic_rx_buff *rx_buff;
949 union sub_crq *next;
950 u32 length;
951 u16 offset;
952 u8 flags = 0;
953
954 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
955 break;
956 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
957 rx_buff =
958 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
959 rx_comp.correlator);
960 /* do error checking */
961 if (next->rx_comp.rc) {
962 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
963 /* free the entry */
964 next->rx_comp.first = 0;
965 remove_buff_from_pool(adapter, rx_buff);
966 break;
967 }
968
969 length = be32_to_cpu(next->rx_comp.len);
970 offset = be16_to_cpu(next->rx_comp.off_frame_data);
971 flags = next->rx_comp.flags;
972 skb = rx_buff->skb;
973 skb_copy_to_linear_data(skb, rx_buff->data + offset,
974 length);
975 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
976 /* free the entry */
977 next->rx_comp.first = 0;
978 remove_buff_from_pool(adapter, rx_buff);
979
980 skb_put(skb, length);
981 skb->protocol = eth_type_trans(skb, netdev);
982
983 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
984 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
985 skb->ip_summed = CHECKSUM_UNNECESSARY;
986 }
987
988 length = skb->len;
989 napi_gro_receive(napi, skb); /* send it up */
990 netdev->stats.rx_packets++;
991 netdev->stats.rx_bytes += length;
992 frames_processed++;
993 }
John Allen498cd8e2016-04-06 11:49:55 -0500994 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600995
996 if (frames_processed < budget) {
997 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
998 napi_complete(napi);
999 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
1000 napi_reschedule(napi)) {
1001 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
1002 goto restart_poll;
1003 }
1004 }
1005 return frames_processed;
1006}
1007
1008#ifdef CONFIG_NET_POLL_CONTROLLER
1009static void ibmvnic_netpoll_controller(struct net_device *dev)
1010{
1011 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1012 int i;
1013
1014 replenish_pools(netdev_priv(dev));
1015 for (i = 0; i < adapter->req_rx_queues; i++)
1016 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1017 adapter->rx_scrq[i]);
1018}
1019#endif
1020
1021static const struct net_device_ops ibmvnic_netdev_ops = {
1022 .ndo_open = ibmvnic_open,
1023 .ndo_stop = ibmvnic_close,
1024 .ndo_start_xmit = ibmvnic_xmit,
1025 .ndo_set_rx_mode = ibmvnic_set_multi,
1026 .ndo_set_mac_address = ibmvnic_set_mac,
1027 .ndo_validate_addr = eth_validate_addr,
1028 .ndo_change_mtu = ibmvnic_change_mtu,
1029 .ndo_tx_timeout = ibmvnic_tx_timeout,
1030#ifdef CONFIG_NET_POLL_CONTROLLER
1031 .ndo_poll_controller = ibmvnic_netpoll_controller,
1032#endif
1033};
1034
1035/* ethtool functions */
1036
1037static int ibmvnic_get_settings(struct net_device *netdev,
1038 struct ethtool_cmd *cmd)
1039{
1040 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1041 SUPPORTED_FIBRE);
1042 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1043 ADVERTISED_FIBRE);
1044 ethtool_cmd_speed_set(cmd, SPEED_1000);
1045 cmd->duplex = DUPLEX_FULL;
1046 cmd->port = PORT_FIBRE;
1047 cmd->phy_address = 0;
1048 cmd->transceiver = XCVR_INTERNAL;
1049 cmd->autoneg = AUTONEG_ENABLE;
1050 cmd->maxtxpkt = 0;
1051 cmd->maxrxpkt = 1;
1052 return 0;
1053}
1054
1055static void ibmvnic_get_drvinfo(struct net_device *dev,
1056 struct ethtool_drvinfo *info)
1057{
1058 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1059 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1060}
1061
1062static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1063{
1064 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1065
1066 return adapter->msg_enable;
1067}
1068
1069static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1070{
1071 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1072
1073 adapter->msg_enable = data;
1074}
1075
1076static u32 ibmvnic_get_link(struct net_device *netdev)
1077{
1078 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1079
1080 /* Don't need to send a query because we request a logical link up at
1081 * init and then we wait for link state indications
1082 */
1083 return adapter->logical_link_state;
1084}
1085
1086static void ibmvnic_get_ringparam(struct net_device *netdev,
1087 struct ethtool_ringparam *ring)
1088{
1089 ring->rx_max_pending = 0;
1090 ring->tx_max_pending = 0;
1091 ring->rx_mini_max_pending = 0;
1092 ring->rx_jumbo_max_pending = 0;
1093 ring->rx_pending = 0;
1094 ring->tx_pending = 0;
1095 ring->rx_mini_pending = 0;
1096 ring->rx_jumbo_pending = 0;
1097}
1098
1099static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1100{
1101 int i;
1102
1103 if (stringset != ETH_SS_STATS)
1104 return;
1105
1106 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1107 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1108}
1109
1110static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1111{
1112 switch (sset) {
1113 case ETH_SS_STATS:
1114 return ARRAY_SIZE(ibmvnic_stats);
1115 default:
1116 return -EOPNOTSUPP;
1117 }
1118}
1119
1120static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1121 struct ethtool_stats *stats, u64 *data)
1122{
1123 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1124 union ibmvnic_crq crq;
1125 int i;
1126
1127 memset(&crq, 0, sizeof(crq));
1128 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1129 crq.request_statistics.cmd = REQUEST_STATISTICS;
1130 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1131 crq.request_statistics.len =
1132 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1133 ibmvnic_send_crq(adapter, &crq);
1134
1135 /* Wait for data to be written */
1136 init_completion(&adapter->stats_done);
1137 wait_for_completion(&adapter->stats_done);
1138
1139 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1140 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1141}
1142
1143static const struct ethtool_ops ibmvnic_ethtool_ops = {
1144 .get_settings = ibmvnic_get_settings,
1145 .get_drvinfo = ibmvnic_get_drvinfo,
1146 .get_msglevel = ibmvnic_get_msglevel,
1147 .set_msglevel = ibmvnic_set_msglevel,
1148 .get_link = ibmvnic_get_link,
1149 .get_ringparam = ibmvnic_get_ringparam,
1150 .get_strings = ibmvnic_get_strings,
1151 .get_sset_count = ibmvnic_get_sset_count,
1152 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1153};
1154
1155/* Routines for managing CRQs/sCRQs */
1156
1157static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1158 struct ibmvnic_sub_crq_queue *scrq)
1159{
1160 struct device *dev = &adapter->vdev->dev;
1161 long rc;
1162
1163 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1164
1165 /* Close the sub-crqs */
1166 do {
1167 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1168 adapter->vdev->unit_address,
1169 scrq->crq_num);
1170 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1171
1172 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1173 DMA_BIDIRECTIONAL);
1174 free_pages((unsigned long)scrq->msgs, 2);
1175 kfree(scrq);
1176}
1177
1178static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1179 *adapter)
1180{
1181 struct device *dev = &adapter->vdev->dev;
1182 struct ibmvnic_sub_crq_queue *scrq;
1183 int rc;
1184
1185 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1186 if (!scrq)
1187 return NULL;
1188
1189 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_KERNEL, 2);
1190 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1191 if (!scrq->msgs) {
1192 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1193 goto zero_page_failed;
1194 }
1195
1196 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1197 DMA_BIDIRECTIONAL);
1198 if (dma_mapping_error(dev, scrq->msg_token)) {
1199 dev_warn(dev, "Couldn't map crq queue messages page\n");
1200 goto map_failed;
1201 }
1202
1203 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1204 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1205
1206 if (rc == H_RESOURCE)
1207 rc = ibmvnic_reset_crq(adapter);
1208
1209 if (rc == H_CLOSED) {
1210 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1211 } else if (rc) {
1212 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1213 goto reg_failed;
1214 }
1215
1216 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1217 if (scrq->irq == NO_IRQ) {
1218 dev_err(dev, "Error mapping irq\n");
1219 goto map_irq_failed;
1220 }
1221
1222 scrq->adapter = adapter;
1223 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1224 scrq->cur = 0;
1225 scrq->rx_skb_top = NULL;
1226 spin_lock_init(&scrq->lock);
1227
1228 netdev_dbg(adapter->netdev,
1229 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1230 scrq->crq_num, scrq->hw_irq, scrq->irq);
1231
1232 return scrq;
1233
1234map_irq_failed:
1235 do {
1236 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1237 adapter->vdev->unit_address,
1238 scrq->crq_num);
1239 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1240reg_failed:
1241 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1242 DMA_BIDIRECTIONAL);
1243map_failed:
1244 free_pages((unsigned long)scrq->msgs, 2);
1245zero_page_failed:
1246 kfree(scrq);
1247
1248 return NULL;
1249}
1250
1251static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1252{
1253 int i;
1254
1255 if (adapter->tx_scrq) {
1256 for (i = 0; i < adapter->req_tx_queues; i++)
1257 if (adapter->tx_scrq[i]) {
1258 free_irq(adapter->tx_scrq[i]->irq,
1259 adapter->tx_scrq[i]);
1260 release_sub_crq_queue(adapter,
1261 adapter->tx_scrq[i]);
1262 }
1263 adapter->tx_scrq = NULL;
1264 }
1265
1266 if (adapter->rx_scrq) {
1267 for (i = 0; i < adapter->req_rx_queues; i++)
1268 if (adapter->rx_scrq[i]) {
1269 free_irq(adapter->rx_scrq[i]->irq,
1270 adapter->rx_scrq[i]);
1271 release_sub_crq_queue(adapter,
1272 adapter->rx_scrq[i]);
1273 }
1274 adapter->rx_scrq = NULL;
1275 }
1276
1277 adapter->requested_caps = 0;
1278}
1279
1280static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1281 struct ibmvnic_sub_crq_queue *scrq)
1282{
1283 struct device *dev = &adapter->vdev->dev;
1284 unsigned long rc;
1285
1286 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1287 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1288 if (rc)
1289 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1290 scrq->hw_irq, rc);
1291 return rc;
1292}
1293
1294static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1295 struct ibmvnic_sub_crq_queue *scrq)
1296{
1297 struct device *dev = &adapter->vdev->dev;
1298 unsigned long rc;
1299
1300 if (scrq->hw_irq > 0x100000000ULL) {
1301 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1302 return 1;
1303 }
1304
1305 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1306 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1307 if (rc)
1308 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1309 scrq->hw_irq, rc);
1310 return rc;
1311}
1312
1313static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1314 struct ibmvnic_sub_crq_queue *scrq)
1315{
1316 struct device *dev = &adapter->vdev->dev;
1317 struct ibmvnic_tx_buff *txbuff;
1318 union sub_crq *next;
1319 int index;
1320 int i, j;
Thomas Falconad7775d2016-04-01 17:20:34 -05001321 u8 first;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001322
1323restart_loop:
1324 while (pending_scrq(adapter, scrq)) {
1325 unsigned int pool = scrq->pool_index;
1326
1327 next = ibmvnic_next_scrq(adapter, scrq);
1328 for (i = 0; i < next->tx_comp.num_comps; i++) {
1329 if (next->tx_comp.rcs[i]) {
1330 dev_err(dev, "tx error %x\n",
1331 next->tx_comp.rcs[i]);
1332 continue;
1333 }
1334 index = be32_to_cpu(next->tx_comp.correlators[i]);
1335 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1336
1337 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1338 if (!txbuff->data_dma[j])
1339 continue;
1340
1341 txbuff->data_dma[j] = 0;
1342 txbuff->used_bounce = false;
1343 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001344 /* if sub_crq was sent indirectly */
1345 first = txbuff->indir_arr[0].generic.first;
1346 if (first == IBMVNIC_CRQ_CMD) {
1347 dma_unmap_single(dev, txbuff->indir_dma,
1348 sizeof(txbuff->indir_arr),
1349 DMA_TO_DEVICE);
1350 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001351
1352 if (txbuff->last_frag)
1353 dev_kfree_skb_any(txbuff->skb);
1354
1355 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1356 producer_index] = index;
1357 adapter->tx_pool[pool].producer_index =
1358 (adapter->tx_pool[pool].producer_index + 1) %
1359 adapter->max_tx_entries_per_subcrq;
1360 }
1361 /* remove tx_comp scrq*/
1362 next->tx_comp.first = 0;
1363 }
1364
1365 enable_scrq_irq(adapter, scrq);
1366
1367 if (pending_scrq(adapter, scrq)) {
1368 disable_scrq_irq(adapter, scrq);
1369 goto restart_loop;
1370 }
1371
1372 return 0;
1373}
1374
1375static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1376{
1377 struct ibmvnic_sub_crq_queue *scrq = instance;
1378 struct ibmvnic_adapter *adapter = scrq->adapter;
1379
1380 disable_scrq_irq(adapter, scrq);
1381 ibmvnic_complete_tx(adapter, scrq);
1382
1383 return IRQ_HANDLED;
1384}
1385
1386static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1387{
1388 struct ibmvnic_sub_crq_queue *scrq = instance;
1389 struct ibmvnic_adapter *adapter = scrq->adapter;
1390
1391 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1392 disable_scrq_irq(adapter, scrq);
1393 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1394 }
1395
1396 return IRQ_HANDLED;
1397}
1398
1399static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1400{
1401 struct device *dev = &adapter->vdev->dev;
1402 struct ibmvnic_sub_crq_queue **allqueues;
1403 int registered_queues = 0;
1404 union ibmvnic_crq crq;
1405 int total_queues;
1406 int more = 0;
1407 int i, j;
1408 int rc;
1409
1410 if (!retry) {
1411 /* Sub-CRQ entries are 32 byte long */
1412 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1413
1414 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1415 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1416 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1417 goto allqueues_failed;
1418 }
1419
1420 /* Get the minimum between the queried max and the entries
1421 * that fit in our PAGE_SIZE
1422 */
1423 adapter->req_tx_entries_per_subcrq =
1424 adapter->max_tx_entries_per_subcrq > entries_page ?
1425 entries_page : adapter->max_tx_entries_per_subcrq;
1426 adapter->req_rx_add_entries_per_subcrq =
1427 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1428 entries_page : adapter->max_rx_add_entries_per_subcrq;
1429
1430 /* Choosing the maximum number of queues supported by firmware*/
John Allen498cd8e2016-04-06 11:49:55 -05001431 adapter->req_tx_queues = adapter->max_tx_queues;
1432 adapter->req_rx_queues = adapter->max_rx_queues;
1433 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001434
1435 adapter->req_mtu = adapter->max_mtu;
1436 }
1437
1438 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1439
1440 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1441 if (!allqueues)
1442 goto allqueues_failed;
1443
1444 for (i = 0; i < total_queues; i++) {
1445 allqueues[i] = init_sub_crq_queue(adapter);
1446 if (!allqueues[i]) {
1447 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1448 break;
1449 }
1450 registered_queues++;
1451 }
1452
1453 /* Make sure we were able to register the minimum number of queues */
1454 if (registered_queues <
1455 adapter->min_tx_queues + adapter->min_rx_queues) {
1456 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1457 goto tx_failed;
1458 }
1459
1460 /* Distribute the failed allocated queues*/
1461 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1462 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1463 switch (i % 3) {
1464 case 0:
1465 if (adapter->req_rx_queues > adapter->min_rx_queues)
1466 adapter->req_rx_queues--;
1467 else
1468 more++;
1469 break;
1470 case 1:
1471 if (adapter->req_tx_queues > adapter->min_tx_queues)
1472 adapter->req_tx_queues--;
1473 else
1474 more++;
1475 break;
1476 }
1477 }
1478
1479 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1480 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1481 if (!adapter->tx_scrq)
1482 goto tx_failed;
1483
1484 for (i = 0; i < adapter->req_tx_queues; i++) {
1485 adapter->tx_scrq[i] = allqueues[i];
1486 adapter->tx_scrq[i]->pool_index = i;
1487 rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
1488 0, "ibmvnic_tx", adapter->tx_scrq[i]);
1489 if (rc) {
1490 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1491 adapter->tx_scrq[i]->irq, rc);
1492 goto req_tx_irq_failed;
1493 }
1494 }
1495
1496 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1497 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1498 if (!adapter->rx_scrq)
1499 goto rx_failed;
1500
1501 for (i = 0; i < adapter->req_rx_queues; i++) {
1502 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1503 adapter->rx_scrq[i]->scrq_num = i;
1504 rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
1505 0, "ibmvnic_rx", adapter->rx_scrq[i]);
1506 if (rc) {
1507 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1508 adapter->rx_scrq[i]->irq, rc);
1509 goto req_rx_irq_failed;
1510 }
1511 }
1512
1513 memset(&crq, 0, sizeof(crq));
1514 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1515 crq.request_capability.cmd = REQUEST_CAPABILITY;
1516
1517 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001518 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001519 ibmvnic_send_crq(adapter, &crq);
1520
1521 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001522 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001523 ibmvnic_send_crq(adapter, &crq);
1524
1525 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001526 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001527 ibmvnic_send_crq(adapter, &crq);
1528
1529 crq.request_capability.capability =
1530 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1531 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001532 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001533 ibmvnic_send_crq(adapter, &crq);
1534
1535 crq.request_capability.capability =
1536 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1537 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001538 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001539 ibmvnic_send_crq(adapter, &crq);
1540
1541 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06001542 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001543 ibmvnic_send_crq(adapter, &crq);
1544
1545 if (adapter->netdev->flags & IFF_PROMISC) {
1546 if (adapter->promisc_supported) {
1547 crq.request_capability.capability =
1548 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001549 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001550 ibmvnic_send_crq(adapter, &crq);
1551 }
1552 } else {
1553 crq.request_capability.capability =
1554 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001555 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001556 ibmvnic_send_crq(adapter, &crq);
1557 }
1558
1559 kfree(allqueues);
1560
1561 return;
1562
1563req_rx_irq_failed:
1564 for (j = 0; j < i; j++)
1565 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1566 i = adapter->req_tx_queues;
1567req_tx_irq_failed:
1568 for (j = 0; j < i; j++)
1569 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1570 kfree(adapter->rx_scrq);
1571 adapter->rx_scrq = NULL;
1572rx_failed:
1573 kfree(adapter->tx_scrq);
1574 adapter->tx_scrq = NULL;
1575tx_failed:
1576 for (i = 0; i < registered_queues; i++)
1577 release_sub_crq_queue(adapter, allqueues[i]);
1578 kfree(allqueues);
1579allqueues_failed:
1580 ibmvnic_remove(adapter->vdev);
1581}
1582
1583static int pending_scrq(struct ibmvnic_adapter *adapter,
1584 struct ibmvnic_sub_crq_queue *scrq)
1585{
1586 union sub_crq *entry = &scrq->msgs[scrq->cur];
1587
1588 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1589 return 1;
1590 else
1591 return 0;
1592}
1593
1594static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1595 struct ibmvnic_sub_crq_queue *scrq)
1596{
1597 union sub_crq *entry;
1598 unsigned long flags;
1599
1600 spin_lock_irqsave(&scrq->lock, flags);
1601 entry = &scrq->msgs[scrq->cur];
1602 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1603 if (++scrq->cur == scrq->size)
1604 scrq->cur = 0;
1605 } else {
1606 entry = NULL;
1607 }
1608 spin_unlock_irqrestore(&scrq->lock, flags);
1609
1610 return entry;
1611}
1612
1613static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1614{
1615 struct ibmvnic_crq_queue *queue = &adapter->crq;
1616 union ibmvnic_crq *crq;
1617
1618 crq = &queue->msgs[queue->cur];
1619 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1620 if (++queue->cur == queue->size)
1621 queue->cur = 0;
1622 } else {
1623 crq = NULL;
1624 }
1625
1626 return crq;
1627}
1628
1629static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1630 union sub_crq *sub_crq)
1631{
1632 unsigned int ua = adapter->vdev->unit_address;
1633 struct device *dev = &adapter->vdev->dev;
1634 u64 *u64_crq = (u64 *)sub_crq;
1635 int rc;
1636
1637 netdev_dbg(adapter->netdev,
1638 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1639 (unsigned long int)cpu_to_be64(remote_handle),
1640 (unsigned long int)cpu_to_be64(u64_crq[0]),
1641 (unsigned long int)cpu_to_be64(u64_crq[1]),
1642 (unsigned long int)cpu_to_be64(u64_crq[2]),
1643 (unsigned long int)cpu_to_be64(u64_crq[3]));
1644
1645 /* Make sure the hypervisor sees the complete request */
1646 mb();
1647
1648 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1649 cpu_to_be64(remote_handle),
1650 cpu_to_be64(u64_crq[0]),
1651 cpu_to_be64(u64_crq[1]),
1652 cpu_to_be64(u64_crq[2]),
1653 cpu_to_be64(u64_crq[3]));
1654
1655 if (rc) {
1656 if (rc == H_CLOSED)
1657 dev_warn(dev, "CRQ Queue closed\n");
1658 dev_err(dev, "Send error (rc=%d)\n", rc);
1659 }
1660
1661 return rc;
1662}
1663
Thomas Falconad7775d2016-04-01 17:20:34 -05001664static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1665 u64 remote_handle, u64 ioba, u64 num_entries)
1666{
1667 unsigned int ua = adapter->vdev->unit_address;
1668 struct device *dev = &adapter->vdev->dev;
1669 int rc;
1670
1671 /* Make sure the hypervisor sees the complete request */
1672 mb();
1673 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1674 cpu_to_be64(remote_handle),
1675 ioba, num_entries);
1676
1677 if (rc) {
1678 if (rc == H_CLOSED)
1679 dev_warn(dev, "CRQ Queue closed\n");
1680 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1681 }
1682
1683 return rc;
1684}
1685
Thomas Falcon032c5e82015-12-21 11:26:06 -06001686static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1687 union ibmvnic_crq *crq)
1688{
1689 unsigned int ua = adapter->vdev->unit_address;
1690 struct device *dev = &adapter->vdev->dev;
1691 u64 *u64_crq = (u64 *)crq;
1692 int rc;
1693
1694 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1695 (unsigned long int)cpu_to_be64(u64_crq[0]),
1696 (unsigned long int)cpu_to_be64(u64_crq[1]));
1697
1698 /* Make sure the hypervisor sees the complete request */
1699 mb();
1700
1701 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1702 cpu_to_be64(u64_crq[0]),
1703 cpu_to_be64(u64_crq[1]));
1704
1705 if (rc) {
1706 if (rc == H_CLOSED)
1707 dev_warn(dev, "CRQ Queue closed\n");
1708 dev_warn(dev, "Send error (rc=%d)\n", rc);
1709 }
1710
1711 return rc;
1712}
1713
1714static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1715{
1716 union ibmvnic_crq crq;
1717
1718 memset(&crq, 0, sizeof(crq));
1719 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1720 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1721 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1722
1723 return ibmvnic_send_crq(adapter, &crq);
1724}
1725
1726static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1727{
1728 union ibmvnic_crq crq;
1729
1730 memset(&crq, 0, sizeof(crq));
1731 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1732 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1733 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1734
1735 return ibmvnic_send_crq(adapter, &crq);
1736}
1737
1738static int send_version_xchg(struct ibmvnic_adapter *adapter)
1739{
1740 union ibmvnic_crq crq;
1741
1742 memset(&crq, 0, sizeof(crq));
1743 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1744 crq.version_exchange.cmd = VERSION_EXCHANGE;
1745 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1746
1747 return ibmvnic_send_crq(adapter, &crq);
1748}
1749
1750static void send_login(struct ibmvnic_adapter *adapter)
1751{
1752 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1753 struct ibmvnic_login_buffer *login_buffer;
1754 struct ibmvnic_inflight_cmd *inflight_cmd;
1755 struct device *dev = &adapter->vdev->dev;
1756 dma_addr_t rsp_buffer_token;
1757 dma_addr_t buffer_token;
1758 size_t rsp_buffer_size;
1759 union ibmvnic_crq crq;
1760 unsigned long flags;
1761 size_t buffer_size;
1762 __be64 *tx_list_p;
1763 __be64 *rx_list_p;
1764 int i;
1765
1766 buffer_size =
1767 sizeof(struct ibmvnic_login_buffer) +
1768 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1769
1770 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1771 if (!login_buffer)
1772 goto buf_alloc_failed;
1773
1774 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1775 DMA_TO_DEVICE);
1776 if (dma_mapping_error(dev, buffer_token)) {
1777 dev_err(dev, "Couldn't map login buffer\n");
1778 goto buf_map_failed;
1779 }
1780
John Allen498cd8e2016-04-06 11:49:55 -05001781 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1782 sizeof(u64) * adapter->req_tx_queues +
1783 sizeof(u64) * adapter->req_rx_queues +
1784 sizeof(u64) * adapter->req_rx_queues +
1785 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001786
1787 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1788 if (!login_rsp_buffer)
1789 goto buf_rsp_alloc_failed;
1790
1791 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1792 rsp_buffer_size, DMA_FROM_DEVICE);
1793 if (dma_mapping_error(dev, rsp_buffer_token)) {
1794 dev_err(dev, "Couldn't map login rsp buffer\n");
1795 goto buf_rsp_map_failed;
1796 }
1797 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1798 if (!inflight_cmd) {
1799 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1800 goto inflight_alloc_failed;
1801 }
1802 adapter->login_buf = login_buffer;
1803 adapter->login_buf_token = buffer_token;
1804 adapter->login_buf_sz = buffer_size;
1805 adapter->login_rsp_buf = login_rsp_buffer;
1806 adapter->login_rsp_buf_token = rsp_buffer_token;
1807 adapter->login_rsp_buf_sz = rsp_buffer_size;
1808
1809 login_buffer->len = cpu_to_be32(buffer_size);
1810 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1811 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1812 login_buffer->off_txcomp_subcrqs =
1813 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1814 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1815 login_buffer->off_rxcomp_subcrqs =
1816 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1817 sizeof(u64) * adapter->req_tx_queues);
1818 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1819 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1820
1821 tx_list_p = (__be64 *)((char *)login_buffer +
1822 sizeof(struct ibmvnic_login_buffer));
1823 rx_list_p = (__be64 *)((char *)login_buffer +
1824 sizeof(struct ibmvnic_login_buffer) +
1825 sizeof(u64) * adapter->req_tx_queues);
1826
1827 for (i = 0; i < adapter->req_tx_queues; i++) {
1828 if (adapter->tx_scrq[i]) {
1829 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1830 crq_num);
1831 }
1832 }
1833
1834 for (i = 0; i < adapter->req_rx_queues; i++) {
1835 if (adapter->rx_scrq[i]) {
1836 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1837 crq_num);
1838 }
1839 }
1840
1841 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1842 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1843 netdev_dbg(adapter->netdev, "%016lx\n",
1844 ((unsigned long int *)(adapter->login_buf))[i]);
1845 }
1846
1847 memset(&crq, 0, sizeof(crq));
1848 crq.login.first = IBMVNIC_CRQ_CMD;
1849 crq.login.cmd = LOGIN;
1850 crq.login.ioba = cpu_to_be32(buffer_token);
1851 crq.login.len = cpu_to_be32(buffer_size);
1852
1853 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1854
1855 spin_lock_irqsave(&adapter->inflight_lock, flags);
1856 list_add_tail(&inflight_cmd->list, &adapter->inflight);
1857 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1858
1859 ibmvnic_send_crq(adapter, &crq);
1860
1861 return;
1862
1863inflight_alloc_failed:
1864 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1865 DMA_FROM_DEVICE);
1866buf_rsp_map_failed:
1867 kfree(login_rsp_buffer);
1868buf_rsp_alloc_failed:
1869 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1870buf_map_failed:
1871 kfree(login_buffer);
1872buf_alloc_failed:
1873 return;
1874}
1875
1876static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1877 u32 len, u8 map_id)
1878{
1879 union ibmvnic_crq crq;
1880
1881 memset(&crq, 0, sizeof(crq));
1882 crq.request_map.first = IBMVNIC_CRQ_CMD;
1883 crq.request_map.cmd = REQUEST_MAP;
1884 crq.request_map.map_id = map_id;
1885 crq.request_map.ioba = cpu_to_be32(addr);
1886 crq.request_map.len = cpu_to_be32(len);
1887 ibmvnic_send_crq(adapter, &crq);
1888}
1889
1890static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1891{
1892 union ibmvnic_crq crq;
1893
1894 memset(&crq, 0, sizeof(crq));
1895 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1896 crq.request_unmap.cmd = REQUEST_UNMAP;
1897 crq.request_unmap.map_id = map_id;
1898 ibmvnic_send_crq(adapter, &crq);
1899}
1900
1901static void send_map_query(struct ibmvnic_adapter *adapter)
1902{
1903 union ibmvnic_crq crq;
1904
1905 memset(&crq, 0, sizeof(crq));
1906 crq.query_map.first = IBMVNIC_CRQ_CMD;
1907 crq.query_map.cmd = QUERY_MAP;
1908 ibmvnic_send_crq(adapter, &crq);
1909}
1910
1911/* Send a series of CRQs requesting various capabilities of the VNIC server */
1912static void send_cap_queries(struct ibmvnic_adapter *adapter)
1913{
1914 union ibmvnic_crq crq;
1915
1916 atomic_set(&adapter->running_cap_queries, 0);
1917 memset(&crq, 0, sizeof(crq));
1918 crq.query_capability.first = IBMVNIC_CRQ_CMD;
1919 crq.query_capability.cmd = QUERY_CAPABILITY;
1920
1921 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
1922 atomic_inc(&adapter->running_cap_queries);
1923 ibmvnic_send_crq(adapter, &crq);
1924
1925 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
1926 atomic_inc(&adapter->running_cap_queries);
1927 ibmvnic_send_crq(adapter, &crq);
1928
1929 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
1930 atomic_inc(&adapter->running_cap_queries);
1931 ibmvnic_send_crq(adapter, &crq);
1932
1933 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
1934 atomic_inc(&adapter->running_cap_queries);
1935 ibmvnic_send_crq(adapter, &crq);
1936
1937 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
1938 atomic_inc(&adapter->running_cap_queries);
1939 ibmvnic_send_crq(adapter, &crq);
1940
1941 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
1942 atomic_inc(&adapter->running_cap_queries);
1943 ibmvnic_send_crq(adapter, &crq);
1944
1945 crq.query_capability.capability =
1946 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
1947 atomic_inc(&adapter->running_cap_queries);
1948 ibmvnic_send_crq(adapter, &crq);
1949
1950 crq.query_capability.capability =
1951 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
1952 atomic_inc(&adapter->running_cap_queries);
1953 ibmvnic_send_crq(adapter, &crq);
1954
1955 crq.query_capability.capability =
1956 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
1957 atomic_inc(&adapter->running_cap_queries);
1958 ibmvnic_send_crq(adapter, &crq);
1959
1960 crq.query_capability.capability =
1961 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
1962 atomic_inc(&adapter->running_cap_queries);
1963 ibmvnic_send_crq(adapter, &crq);
1964
1965 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
1966 atomic_inc(&adapter->running_cap_queries);
1967 ibmvnic_send_crq(adapter, &crq);
1968
1969 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
1970 atomic_inc(&adapter->running_cap_queries);
1971 ibmvnic_send_crq(adapter, &crq);
1972
1973 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
1974 atomic_inc(&adapter->running_cap_queries);
1975 ibmvnic_send_crq(adapter, &crq);
1976
1977 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
1978 atomic_inc(&adapter->running_cap_queries);
1979 ibmvnic_send_crq(adapter, &crq);
1980
1981 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
1982 atomic_inc(&adapter->running_cap_queries);
1983 ibmvnic_send_crq(adapter, &crq);
1984
1985 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
1986 atomic_inc(&adapter->running_cap_queries);
1987 ibmvnic_send_crq(adapter, &crq);
1988
1989 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
1990 atomic_inc(&adapter->running_cap_queries);
1991 ibmvnic_send_crq(adapter, &crq);
1992
1993 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
1994 atomic_inc(&adapter->running_cap_queries);
1995 ibmvnic_send_crq(adapter, &crq);
1996
1997 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
1998 atomic_inc(&adapter->running_cap_queries);
1999 ibmvnic_send_crq(adapter, &crq);
2000
2001 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2002 atomic_inc(&adapter->running_cap_queries);
2003 ibmvnic_send_crq(adapter, &crq);
2004
2005 crq.query_capability.capability =
2006 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2007 atomic_inc(&adapter->running_cap_queries);
2008 ibmvnic_send_crq(adapter, &crq);
2009
2010 crq.query_capability.capability =
2011 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2012 atomic_inc(&adapter->running_cap_queries);
2013 ibmvnic_send_crq(adapter, &crq);
2014
2015 crq.query_capability.capability =
2016 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2017 atomic_inc(&adapter->running_cap_queries);
2018 ibmvnic_send_crq(adapter, &crq);
2019
2020 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2021 atomic_inc(&adapter->running_cap_queries);
2022 ibmvnic_send_crq(adapter, &crq);
2023}
2024
2025static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2026{
2027 struct device *dev = &adapter->vdev->dev;
2028 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2029 union ibmvnic_crq crq;
2030 int i;
2031
2032 dma_unmap_single(dev, adapter->ip_offload_tok,
2033 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2034
2035 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2036 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2037 netdev_dbg(adapter->netdev, "%016lx\n",
2038 ((unsigned long int *)(buf))[i]);
2039
2040 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2041 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2042 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2043 buf->tcp_ipv4_chksum);
2044 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2045 buf->tcp_ipv6_chksum);
2046 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2047 buf->udp_ipv4_chksum);
2048 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2049 buf->udp_ipv6_chksum);
2050 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2051 buf->large_tx_ipv4);
2052 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2053 buf->large_tx_ipv6);
2054 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2055 buf->large_rx_ipv4);
2056 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2057 buf->large_rx_ipv6);
2058 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2059 buf->max_ipv4_header_size);
2060 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2061 buf->max_ipv6_header_size);
2062 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2063 buf->max_tcp_header_size);
2064 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2065 buf->max_udp_header_size);
2066 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2067 buf->max_large_tx_size);
2068 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2069 buf->max_large_rx_size);
2070 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2071 buf->ipv6_extension_header);
2072 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2073 buf->tcp_pseudosum_req);
2074 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2075 buf->num_ipv6_ext_headers);
2076 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2077 buf->off_ipv6_ext_headers);
2078
2079 adapter->ip_offload_ctrl_tok =
2080 dma_map_single(dev, &adapter->ip_offload_ctrl,
2081 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2082
2083 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2084 dev_err(dev, "Couldn't map ip offload control buffer\n");
2085 return;
2086 }
2087
2088 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2089 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2090 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2091 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2092 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2093
2094 /* large_tx/rx disabled for now, additional features needed */
2095 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2096 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2097 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2098 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2099
2100 adapter->netdev->features = NETIF_F_GSO;
2101
2102 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2103 adapter->netdev->features |= NETIF_F_IP_CSUM;
2104
2105 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2106 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2107
Thomas Falcon9be02cd2016-04-01 17:20:35 -05002108 if ((adapter->netdev->features &
2109 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2110 adapter->netdev->features |= NETIF_F_RXCSUM;
2111
Thomas Falcon032c5e82015-12-21 11:26:06 -06002112 memset(&crq, 0, sizeof(crq));
2113 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2114 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2115 crq.control_ip_offload.len =
2116 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2117 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2118 ibmvnic_send_crq(adapter, &crq);
2119}
2120
2121static void handle_error_info_rsp(union ibmvnic_crq *crq,
2122 struct ibmvnic_adapter *adapter)
2123{
2124 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08002125 struct ibmvnic_error_buff *error_buff, *tmp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002126 unsigned long flags;
2127 bool found = false;
2128 int i;
2129
2130 if (!crq->request_error_rsp.rc.code) {
2131 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2132 crq->request_error_rsp.rc.code);
2133 return;
2134 }
2135
2136 spin_lock_irqsave(&adapter->error_list_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08002137 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002138 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2139 found = true;
2140 list_del(&error_buff->list);
2141 break;
2142 }
2143 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2144
2145 if (!found) {
2146 dev_err(dev, "Couldn't find error id %x\n",
2147 crq->request_error_rsp.error_id);
2148 return;
2149 }
2150
2151 dev_err(dev, "Detailed info for error id %x:",
2152 crq->request_error_rsp.error_id);
2153
2154 for (i = 0; i < error_buff->len; i++) {
2155 pr_cont("%02x", (int)error_buff->buff[i]);
2156 if (i % 8 == 7)
2157 pr_cont(" ");
2158 }
2159 pr_cont("\n");
2160
2161 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2162 DMA_FROM_DEVICE);
2163 kfree(error_buff->buff);
2164 kfree(error_buff);
2165}
2166
2167static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2168 struct ibmvnic_adapter *adapter)
2169{
2170 int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2171 struct ibmvnic_inflight_cmd *inflight_cmd;
2172 struct device *dev = &adapter->vdev->dev;
2173 union ibmvnic_crq newcrq;
2174 unsigned long flags;
2175
2176 /* allocate and map buffer */
2177 adapter->dump_data = kmalloc(len, GFP_KERNEL);
2178 if (!adapter->dump_data) {
2179 complete(&adapter->fw_done);
2180 return;
2181 }
2182
2183 adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2184 DMA_FROM_DEVICE);
2185
2186 if (dma_mapping_error(dev, adapter->dump_data_token)) {
2187 if (!firmware_has_feature(FW_FEATURE_CMO))
2188 dev_err(dev, "Couldn't map dump data\n");
2189 kfree(adapter->dump_data);
2190 complete(&adapter->fw_done);
2191 return;
2192 }
2193
2194 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2195 if (!inflight_cmd) {
2196 dma_unmap_single(dev, adapter->dump_data_token, len,
2197 DMA_FROM_DEVICE);
2198 kfree(adapter->dump_data);
2199 complete(&adapter->fw_done);
2200 return;
2201 }
2202
2203 memset(&newcrq, 0, sizeof(newcrq));
2204 newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2205 newcrq.request_dump.cmd = REQUEST_DUMP;
2206 newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2207 newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2208
2209 memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2210
2211 spin_lock_irqsave(&adapter->inflight_lock, flags);
2212 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2213 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2214
2215 ibmvnic_send_crq(adapter, &newcrq);
2216}
2217
2218static void handle_error_indication(union ibmvnic_crq *crq,
2219 struct ibmvnic_adapter *adapter)
2220{
2221 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2222 struct ibmvnic_inflight_cmd *inflight_cmd;
2223 struct device *dev = &adapter->vdev->dev;
2224 struct ibmvnic_error_buff *error_buff;
2225 union ibmvnic_crq new_crq;
2226 unsigned long flags;
2227
2228 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2229 crq->error_indication.
2230 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2231 crq->error_indication.error_id,
2232 crq->error_indication.error_cause);
2233
2234 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2235 if (!error_buff)
2236 return;
2237
2238 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2239 if (!error_buff->buff) {
2240 kfree(error_buff);
2241 return;
2242 }
2243
2244 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2245 DMA_FROM_DEVICE);
2246 if (dma_mapping_error(dev, error_buff->dma)) {
2247 if (!firmware_has_feature(FW_FEATURE_CMO))
2248 dev_err(dev, "Couldn't map error buffer\n");
2249 kfree(error_buff->buff);
2250 kfree(error_buff);
2251 return;
2252 }
2253
2254 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2255 if (!inflight_cmd) {
2256 dma_unmap_single(dev, error_buff->dma, detail_len,
2257 DMA_FROM_DEVICE);
2258 kfree(error_buff->buff);
2259 kfree(error_buff);
2260 return;
2261 }
2262
2263 error_buff->len = detail_len;
2264 error_buff->error_id = crq->error_indication.error_id;
2265
2266 spin_lock_irqsave(&adapter->error_list_lock, flags);
2267 list_add_tail(&error_buff->list, &adapter->errors);
2268 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2269
2270 memset(&new_crq, 0, sizeof(new_crq));
2271 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2272 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2273 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2274 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2275 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2276
2277 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2278
2279 spin_lock_irqsave(&adapter->inflight_lock, flags);
2280 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2281 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2282
2283 ibmvnic_send_crq(adapter, &new_crq);
2284}
2285
2286static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2287 struct ibmvnic_adapter *adapter)
2288{
2289 struct net_device *netdev = adapter->netdev;
2290 struct device *dev = &adapter->vdev->dev;
2291 long rc;
2292
2293 rc = crq->change_mac_addr_rsp.rc.code;
2294 if (rc) {
2295 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2296 return;
2297 }
2298 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2299 ETH_ALEN);
2300}
2301
2302static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2303 struct ibmvnic_adapter *adapter)
2304{
2305 struct device *dev = &adapter->vdev->dev;
2306 u64 *req_value;
2307 char *name;
2308
2309 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2310 case REQ_TX_QUEUES:
2311 req_value = &adapter->req_tx_queues;
2312 name = "tx";
2313 break;
2314 case REQ_RX_QUEUES:
2315 req_value = &adapter->req_rx_queues;
2316 name = "rx";
2317 break;
2318 case REQ_RX_ADD_QUEUES:
2319 req_value = &adapter->req_rx_add_queues;
2320 name = "rx_add";
2321 break;
2322 case REQ_TX_ENTRIES_PER_SUBCRQ:
2323 req_value = &adapter->req_tx_entries_per_subcrq;
2324 name = "tx_entries_per_subcrq";
2325 break;
2326 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2327 req_value = &adapter->req_rx_add_entries_per_subcrq;
2328 name = "rx_add_entries_per_subcrq";
2329 break;
2330 case REQ_MTU:
2331 req_value = &adapter->req_mtu;
2332 name = "mtu";
2333 break;
2334 case PROMISC_REQUESTED:
2335 req_value = &adapter->promisc;
2336 name = "promisc";
2337 break;
2338 default:
2339 dev_err(dev, "Got invalid cap request rsp %d\n",
2340 crq->request_capability.capability);
2341 return;
2342 }
2343
2344 switch (crq->request_capability_rsp.rc.code) {
2345 case SUCCESS:
2346 break;
2347 case PARTIALSUCCESS:
2348 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2349 *req_value,
2350 (long int)be32_to_cpu(crq->request_capability_rsp.
2351 number), name);
2352 release_sub_crqs(adapter);
2353 *req_value = be32_to_cpu(crq->request_capability_rsp.number);
2354 complete(&adapter->init_done);
2355 return;
2356 default:
2357 dev_err(dev, "Error %d in request cap rsp\n",
2358 crq->request_capability_rsp.rc.code);
2359 return;
2360 }
2361
2362 /* Done receiving requested capabilities, query IP offload support */
2363 if (++adapter->requested_caps == 7) {
2364 union ibmvnic_crq newcrq;
2365 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2366 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2367 &adapter->ip_offload_buf;
2368
2369 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2370 buf_sz,
2371 DMA_FROM_DEVICE);
2372
2373 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2374 if (!firmware_has_feature(FW_FEATURE_CMO))
2375 dev_err(dev, "Couldn't map offload buffer\n");
2376 return;
2377 }
2378
2379 memset(&newcrq, 0, sizeof(newcrq));
2380 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2381 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2382 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2383 newcrq.query_ip_offload.ioba =
2384 cpu_to_be32(adapter->ip_offload_tok);
2385
2386 ibmvnic_send_crq(adapter, &newcrq);
2387 }
2388}
2389
2390static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2391 struct ibmvnic_adapter *adapter)
2392{
2393 struct device *dev = &adapter->vdev->dev;
2394 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2395 struct ibmvnic_login_buffer *login = adapter->login_buf;
2396 union ibmvnic_crq crq;
2397 int i;
2398
2399 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2400 DMA_BIDIRECTIONAL);
2401 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2402 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2403
John Allen498cd8e2016-04-06 11:49:55 -05002404 /* If the number of queues requested can't be allocated by the
2405 * server, the login response will return with code 1. We will need
2406 * to resend the login buffer with fewer queues requested.
2407 */
2408 if (login_rsp_crq->generic.rc.code) {
2409 adapter->renegotiate = true;
2410 complete(&adapter->init_done);
2411 return 0;
2412 }
2413
Thomas Falcon032c5e82015-12-21 11:26:06 -06002414 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2415 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2416 netdev_dbg(adapter->netdev, "%016lx\n",
2417 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2418 }
2419
2420 /* Sanity checks */
2421 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2422 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2423 adapter->req_rx_add_queues !=
2424 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2425 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2426 ibmvnic_remove(adapter->vdev);
2427 return -EIO;
2428 }
2429 complete(&adapter->init_done);
2430
2431 memset(&crq, 0, sizeof(crq));
2432 crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2433 crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2434 ibmvnic_send_crq(adapter, &crq);
2435
2436 return 0;
2437}
2438
2439static void handle_request_map_rsp(union ibmvnic_crq *crq,
2440 struct ibmvnic_adapter *adapter)
2441{
2442 struct device *dev = &adapter->vdev->dev;
2443 u8 map_id = crq->request_map_rsp.map_id;
2444 int tx_subcrqs;
2445 int rx_subcrqs;
2446 long rc;
2447 int i;
2448
2449 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2450 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2451
2452 rc = crq->request_map_rsp.rc.code;
2453 if (rc) {
2454 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2455 adapter->map_id--;
2456 /* need to find and zero tx/rx_pool map_id */
2457 for (i = 0; i < tx_subcrqs; i++) {
2458 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2459 adapter->tx_pool[i].long_term_buff.map_id = 0;
2460 }
2461 for (i = 0; i < rx_subcrqs; i++) {
2462 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2463 adapter->rx_pool[i].long_term_buff.map_id = 0;
2464 }
2465 }
2466 complete(&adapter->fw_done);
2467}
2468
2469static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2470 struct ibmvnic_adapter *adapter)
2471{
2472 struct device *dev = &adapter->vdev->dev;
2473 long rc;
2474
2475 rc = crq->request_unmap_rsp.rc.code;
2476 if (rc)
2477 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2478}
2479
2480static void handle_query_map_rsp(union ibmvnic_crq *crq,
2481 struct ibmvnic_adapter *adapter)
2482{
2483 struct net_device *netdev = adapter->netdev;
2484 struct device *dev = &adapter->vdev->dev;
2485 long rc;
2486
2487 rc = crq->query_map_rsp.rc.code;
2488 if (rc) {
2489 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2490 return;
2491 }
2492 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2493 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2494 crq->query_map_rsp.free_pages);
2495}
2496
2497static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2498 struct ibmvnic_adapter *adapter)
2499{
2500 struct net_device *netdev = adapter->netdev;
2501 struct device *dev = &adapter->vdev->dev;
2502 long rc;
2503
2504 atomic_dec(&adapter->running_cap_queries);
2505 netdev_dbg(netdev, "Outstanding queries: %d\n",
2506 atomic_read(&adapter->running_cap_queries));
2507 rc = crq->query_capability.rc.code;
2508 if (rc) {
2509 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2510 goto out;
2511 }
2512
2513 switch (be16_to_cpu(crq->query_capability.capability)) {
2514 case MIN_TX_QUEUES:
2515 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002516 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002517 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2518 adapter->min_tx_queues);
2519 break;
2520 case MIN_RX_QUEUES:
2521 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002522 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002523 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2524 adapter->min_rx_queues);
2525 break;
2526 case MIN_RX_ADD_QUEUES:
2527 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002528 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002529 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2530 adapter->min_rx_add_queues);
2531 break;
2532 case MAX_TX_QUEUES:
2533 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002534 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002535 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2536 adapter->max_tx_queues);
2537 break;
2538 case MAX_RX_QUEUES:
2539 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002540 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002541 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2542 adapter->max_rx_queues);
2543 break;
2544 case MAX_RX_ADD_QUEUES:
2545 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002546 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002547 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2548 adapter->max_rx_add_queues);
2549 break;
2550 case MIN_TX_ENTRIES_PER_SUBCRQ:
2551 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002552 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002553 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2554 adapter->min_tx_entries_per_subcrq);
2555 break;
2556 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2557 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002558 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002559 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2560 adapter->min_rx_add_entries_per_subcrq);
2561 break;
2562 case MAX_TX_ENTRIES_PER_SUBCRQ:
2563 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002564 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002565 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2566 adapter->max_tx_entries_per_subcrq);
2567 break;
2568 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2569 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002570 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002571 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2572 adapter->max_rx_add_entries_per_subcrq);
2573 break;
2574 case TCP_IP_OFFLOAD:
2575 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06002576 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002577 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2578 adapter->tcp_ip_offload);
2579 break;
2580 case PROMISC_SUPPORTED:
2581 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002582 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002583 netdev_dbg(netdev, "promisc_supported = %lld\n",
2584 adapter->promisc_supported);
2585 break;
2586 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002587 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002588 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2589 break;
2590 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002591 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002592 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2593 break;
2594 case MAX_MULTICAST_FILTERS:
2595 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06002596 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002597 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2598 adapter->max_multicast_filters);
2599 break;
2600 case VLAN_HEADER_INSERTION:
2601 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06002602 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002603 if (adapter->vlan_header_insertion)
2604 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2605 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2606 adapter->vlan_header_insertion);
2607 break;
2608 case MAX_TX_SG_ENTRIES:
2609 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06002610 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002611 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2612 adapter->max_tx_sg_entries);
2613 break;
2614 case RX_SG_SUPPORTED:
2615 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002616 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002617 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2618 adapter->rx_sg_supported);
2619 break;
2620 case OPT_TX_COMP_SUB_QUEUES:
2621 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002622 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002623 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2624 adapter->opt_tx_comp_sub_queues);
2625 break;
2626 case OPT_RX_COMP_QUEUES:
2627 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002628 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002629 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2630 adapter->opt_rx_comp_queues);
2631 break;
2632 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2633 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06002634 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002635 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2636 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2637 break;
2638 case OPT_TX_ENTRIES_PER_SUBCRQ:
2639 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002640 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002641 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2642 adapter->opt_tx_entries_per_subcrq);
2643 break;
2644 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2645 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002646 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002647 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2648 adapter->opt_rxba_entries_per_subcrq);
2649 break;
2650 case TX_RX_DESC_REQ:
2651 adapter->tx_rx_desc_req = crq->query_capability.number;
2652 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2653 adapter->tx_rx_desc_req);
2654 break;
2655
2656 default:
2657 netdev_err(netdev, "Got invalid cap rsp %d\n",
2658 crq->query_capability.capability);
2659 }
2660
2661out:
2662 if (atomic_read(&adapter->running_cap_queries) == 0)
2663 complete(&adapter->init_done);
2664 /* We're done querying the capabilities, initialize sub-crqs */
2665}
2666
2667static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2668 struct ibmvnic_adapter *adapter)
2669{
2670 u8 correlator = crq->control_ras_rsp.correlator;
2671 struct device *dev = &adapter->vdev->dev;
2672 bool found = false;
2673 int i;
2674
2675 if (crq->control_ras_rsp.rc.code) {
2676 dev_warn(dev, "Control ras failed rc=%d\n",
2677 crq->control_ras_rsp.rc.code);
2678 return;
2679 }
2680
2681 for (i = 0; i < adapter->ras_comp_num; i++) {
2682 if (adapter->ras_comps[i].correlator == correlator) {
2683 found = true;
2684 break;
2685 }
2686 }
2687
2688 if (!found) {
2689 dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2690 return;
2691 }
2692
2693 switch (crq->control_ras_rsp.op) {
2694 case IBMVNIC_TRACE_LEVEL:
2695 adapter->ras_comps[i].trace_level = crq->control_ras.level;
2696 break;
2697 case IBMVNIC_ERROR_LEVEL:
2698 adapter->ras_comps[i].error_check_level =
2699 crq->control_ras.level;
2700 break;
2701 case IBMVNIC_TRACE_PAUSE:
2702 adapter->ras_comp_int[i].paused = 1;
2703 break;
2704 case IBMVNIC_TRACE_RESUME:
2705 adapter->ras_comp_int[i].paused = 0;
2706 break;
2707 case IBMVNIC_TRACE_ON:
2708 adapter->ras_comps[i].trace_on = 1;
2709 break;
2710 case IBMVNIC_TRACE_OFF:
2711 adapter->ras_comps[i].trace_on = 0;
2712 break;
2713 case IBMVNIC_CHG_TRACE_BUFF_SZ:
2714 /* trace_buff_sz is 3 bytes, stuff it into an int */
2715 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2716 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2717 crq->control_ras_rsp.trace_buff_sz[0];
2718 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2719 crq->control_ras_rsp.trace_buff_sz[1];
2720 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2721 crq->control_ras_rsp.trace_buff_sz[2];
2722 break;
2723 default:
2724 dev_err(dev, "invalid op %d on control_ras_rsp",
2725 crq->control_ras_rsp.op);
2726 }
2727}
2728
2729static int ibmvnic_fw_comp_open(struct inode *inode, struct file *file)
2730{
2731 file->private_data = inode->i_private;
2732 return 0;
2733}
2734
2735static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2736 loff_t *ppos)
2737{
2738 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2739 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2740 struct device *dev = &adapter->vdev->dev;
2741 struct ibmvnic_fw_trace_entry *trace;
2742 int num = ras_comp_int->num;
2743 union ibmvnic_crq crq;
2744 dma_addr_t trace_tok;
2745
2746 if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2747 return 0;
2748
2749 trace =
2750 dma_alloc_coherent(dev,
2751 be32_to_cpu(adapter->ras_comps[num].
2752 trace_buff_size), &trace_tok,
2753 GFP_KERNEL);
2754 if (!trace) {
2755 dev_err(dev, "Couldn't alloc trace buffer\n");
2756 return 0;
2757 }
2758
2759 memset(&crq, 0, sizeof(crq));
2760 crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2761 crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2762 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2763 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2764 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2765 ibmvnic_send_crq(adapter, &crq);
2766
2767 init_completion(&adapter->fw_done);
2768 wait_for_completion(&adapter->fw_done);
2769
2770 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2771 len =
2772 be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2773 *ppos;
2774
2775 copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2776
2777 dma_free_coherent(dev,
2778 be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2779 trace, trace_tok);
2780 *ppos += len;
2781 return len;
2782}
2783
2784static const struct file_operations trace_ops = {
2785 .owner = THIS_MODULE,
2786 .open = ibmvnic_fw_comp_open,
2787 .read = trace_read,
2788};
2789
2790static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2791 loff_t *ppos)
2792{
2793 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2794 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2795 int num = ras_comp_int->num;
2796 char buff[5]; /* 1 or 0 plus \n and \0 */
2797 int size;
2798
2799 size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2800
2801 if (*ppos >= size)
2802 return 0;
2803
2804 copy_to_user(user_buf, buff, size);
2805 *ppos += size;
2806 return size;
2807}
2808
2809static ssize_t paused_write(struct file *file, const char __user *user_buf,
2810 size_t len, loff_t *ppos)
2811{
2812 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2813 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2814 int num = ras_comp_int->num;
2815 union ibmvnic_crq crq;
2816 unsigned long val;
2817 char buff[9]; /* decimal max int plus \n and \0 */
2818
2819 copy_from_user(buff, user_buf, sizeof(buff));
2820 val = kstrtoul(buff, 10, NULL);
2821
2822 adapter->ras_comp_int[num].paused = val ? 1 : 0;
2823
2824 memset(&crq, 0, sizeof(crq));
2825 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2826 crq.control_ras.cmd = CONTROL_RAS;
2827 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2828 crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2829 ibmvnic_send_crq(adapter, &crq);
2830
2831 return len;
2832}
2833
2834static const struct file_operations paused_ops = {
2835 .owner = THIS_MODULE,
2836 .open = ibmvnic_fw_comp_open,
2837 .read = paused_read,
2838 .write = paused_write,
2839};
2840
2841static ssize_t tracing_read(struct file *file, char __user *user_buf,
2842 size_t len, loff_t *ppos)
2843{
2844 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2845 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2846 int num = ras_comp_int->num;
2847 char buff[5]; /* 1 or 0 plus \n and \0 */
2848 int size;
2849
2850 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2851
2852 if (*ppos >= size)
2853 return 0;
2854
2855 copy_to_user(user_buf, buff, size);
2856 *ppos += size;
2857 return size;
2858}
2859
2860static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2861 size_t len, loff_t *ppos)
2862{
2863 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2864 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2865 int num = ras_comp_int->num;
2866 union ibmvnic_crq crq;
2867 unsigned long val;
2868 char buff[9]; /* decimal max int plus \n and \0 */
2869
2870 copy_from_user(buff, user_buf, sizeof(buff));
2871 val = kstrtoul(buff, 10, NULL);
2872
2873 memset(&crq, 0, sizeof(crq));
2874 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2875 crq.control_ras.cmd = CONTROL_RAS;
2876 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2877 crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2878
2879 return len;
2880}
2881
2882static const struct file_operations tracing_ops = {
2883 .owner = THIS_MODULE,
2884 .open = ibmvnic_fw_comp_open,
2885 .read = tracing_read,
2886 .write = tracing_write,
2887};
2888
2889static ssize_t error_level_read(struct file *file, char __user *user_buf,
2890 size_t len, loff_t *ppos)
2891{
2892 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2893 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2894 int num = ras_comp_int->num;
2895 char buff[5]; /* decimal max char plus \n and \0 */
2896 int size;
2897
2898 size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2899
2900 if (*ppos >= size)
2901 return 0;
2902
2903 copy_to_user(user_buf, buff, size);
2904 *ppos += size;
2905 return size;
2906}
2907
2908static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2909 size_t len, loff_t *ppos)
2910{
2911 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2912 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2913 int num = ras_comp_int->num;
2914 union ibmvnic_crq crq;
2915 unsigned long val;
2916 char buff[9]; /* decimal max int plus \n and \0 */
2917
2918 copy_from_user(buff, user_buf, sizeof(buff));
2919 val = kstrtoul(buff, 10, NULL);
2920
2921 if (val > 9)
2922 val = 9;
2923
2924 memset(&crq, 0, sizeof(crq));
2925 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2926 crq.control_ras.cmd = CONTROL_RAS;
2927 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2928 crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
2929 crq.control_ras.level = val;
2930 ibmvnic_send_crq(adapter, &crq);
2931
2932 return len;
2933}
2934
2935static const struct file_operations error_level_ops = {
2936 .owner = THIS_MODULE,
2937 .open = ibmvnic_fw_comp_open,
2938 .read = error_level_read,
2939 .write = error_level_write,
2940};
2941
2942static ssize_t trace_level_read(struct file *file, char __user *user_buf,
2943 size_t len, loff_t *ppos)
2944{
2945 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2946 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2947 int num = ras_comp_int->num;
2948 char buff[5]; /* decimal max char plus \n and \0 */
2949 int size;
2950
2951 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
2952 if (*ppos >= size)
2953 return 0;
2954
2955 copy_to_user(user_buf, buff, size);
2956 *ppos += size;
2957 return size;
2958}
2959
2960static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
2961 size_t len, loff_t *ppos)
2962{
2963 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2964 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2965 union ibmvnic_crq crq;
2966 unsigned long val;
2967 char buff[9]; /* decimal max int plus \n and \0 */
2968
2969 copy_from_user(buff, user_buf, sizeof(buff));
2970 val = kstrtoul(buff, 10, NULL);
2971 if (val > 9)
2972 val = 9;
2973
2974 memset(&crq, 0, sizeof(crq));
2975 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2976 crq.control_ras.cmd = CONTROL_RAS;
2977 crq.control_ras.correlator =
2978 adapter->ras_comps[ras_comp_int->num].correlator;
2979 crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
2980 crq.control_ras.level = val;
2981 ibmvnic_send_crq(adapter, &crq);
2982
2983 return len;
2984}
2985
2986static const struct file_operations trace_level_ops = {
2987 .owner = THIS_MODULE,
2988 .open = ibmvnic_fw_comp_open,
2989 .read = trace_level_read,
2990 .write = trace_level_write,
2991};
2992
2993static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
2994 size_t len, loff_t *ppos)
2995{
2996 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2997 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2998 int num = ras_comp_int->num;
2999 char buff[9]; /* decimal max int plus \n and \0 */
3000 int size;
3001
3002 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3003 if (*ppos >= size)
3004 return 0;
3005
3006 copy_to_user(user_buf, buff, size);
3007 *ppos += size;
3008 return size;
3009}
3010
3011static ssize_t trace_buff_size_write(struct file *file,
3012 const char __user *user_buf, size_t len,
3013 loff_t *ppos)
3014{
3015 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3016 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3017 union ibmvnic_crq crq;
3018 unsigned long val;
3019 char buff[9]; /* decimal max int plus \n and \0 */
3020
3021 copy_from_user(buff, user_buf, sizeof(buff));
3022 val = kstrtoul(buff, 10, NULL);
3023
3024 memset(&crq, 0, sizeof(crq));
3025 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3026 crq.control_ras.cmd = CONTROL_RAS;
3027 crq.control_ras.correlator =
3028 adapter->ras_comps[ras_comp_int->num].correlator;
3029 crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3030 /* trace_buff_sz is 3 bytes, stuff an int into it */
3031 crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3032 crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3033 crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3034 ibmvnic_send_crq(adapter, &crq);
3035
3036 return len;
3037}
3038
3039static const struct file_operations trace_size_ops = {
3040 .owner = THIS_MODULE,
3041 .open = ibmvnic_fw_comp_open,
3042 .read = trace_buff_size_read,
3043 .write = trace_buff_size_write,
3044};
3045
3046static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3047 struct ibmvnic_adapter *adapter)
3048{
3049 struct device *dev = &adapter->vdev->dev;
3050 struct dentry *dir_ent;
3051 struct dentry *ent;
3052 int i;
3053
3054 debugfs_remove_recursive(adapter->ras_comps_ent);
3055
3056 adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3057 adapter->debugfs_dir);
3058 if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3059 dev_info(dev, "debugfs create ras_comps dir failed\n");
3060 return;
3061 }
3062
3063 for (i = 0; i < adapter->ras_comp_num; i++) {
3064 dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3065 adapter->ras_comps_ent);
3066 if (!dir_ent || IS_ERR(dir_ent)) {
3067 dev_info(dev, "debugfs create %s dir failed\n",
3068 adapter->ras_comps[i].name);
3069 continue;
3070 }
3071
3072 adapter->ras_comp_int[i].adapter = adapter;
3073 adapter->ras_comp_int[i].num = i;
3074 adapter->ras_comp_int[i].desc_blob.data =
3075 &adapter->ras_comps[i].description;
3076 adapter->ras_comp_int[i].desc_blob.size =
3077 sizeof(adapter->ras_comps[i].description);
3078
3079 /* Don't need to remember the dentry's because the debugfs dir
3080 * gets removed recursively
3081 */
3082 ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3083 &adapter->ras_comp_int[i].desc_blob);
3084 ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3085 dir_ent, &adapter->ras_comp_int[i],
3086 &trace_size_ops);
3087 ent = debugfs_create_file("trace_level",
3088 S_IRUGO |
3089 (adapter->ras_comps[i].trace_level !=
3090 0xFF ? S_IWUSR : 0),
3091 dir_ent, &adapter->ras_comp_int[i],
3092 &trace_level_ops);
3093 ent = debugfs_create_file("error_level",
3094 S_IRUGO |
3095 (adapter->
3096 ras_comps[i].error_check_level !=
3097 0xFF ? S_IWUSR : 0),
3098 dir_ent, &adapter->ras_comp_int[i],
3099 &trace_level_ops);
3100 ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3101 dir_ent, &adapter->ras_comp_int[i],
3102 &tracing_ops);
3103 ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3104 dir_ent, &adapter->ras_comp_int[i],
3105 &paused_ops);
3106 ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3107 &adapter->ras_comp_int[i],
3108 &trace_ops);
3109 }
3110}
3111
3112static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3113 struct ibmvnic_adapter *adapter)
3114{
3115 int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3116 struct device *dev = &adapter->vdev->dev;
3117 union ibmvnic_crq newcrq;
3118
3119 adapter->ras_comps = dma_alloc_coherent(dev, len,
3120 &adapter->ras_comps_tok,
3121 GFP_KERNEL);
3122 if (!adapter->ras_comps) {
3123 if (!firmware_has_feature(FW_FEATURE_CMO))
3124 dev_err(dev, "Couldn't alloc fw comps buffer\n");
3125 return;
3126 }
3127
3128 adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3129 sizeof(struct ibmvnic_fw_comp_internal),
3130 GFP_KERNEL);
3131 if (!adapter->ras_comp_int)
3132 dma_free_coherent(dev, len, adapter->ras_comps,
3133 adapter->ras_comps_tok);
3134
3135 memset(&newcrq, 0, sizeof(newcrq));
3136 newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3137 newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3138 newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3139 newcrq.request_ras_comps.len = cpu_to_be32(len);
3140 ibmvnic_send_crq(adapter, &newcrq);
3141}
3142
3143static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3144{
Wei Yongjun96183182016-06-27 20:48:53 +08003145 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003146 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08003147 struct ibmvnic_error_buff *error_buff, *tmp2;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003148 unsigned long flags;
3149 unsigned long flags2;
3150
3151 spin_lock_irqsave(&adapter->inflight_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08003152 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003153 switch (inflight_cmd->crq.generic.cmd) {
3154 case LOGIN:
3155 dma_unmap_single(dev, adapter->login_buf_token,
3156 adapter->login_buf_sz,
3157 DMA_BIDIRECTIONAL);
3158 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3159 adapter->login_rsp_buf_sz,
3160 DMA_BIDIRECTIONAL);
3161 kfree(adapter->login_rsp_buf);
3162 kfree(adapter->login_buf);
3163 break;
3164 case REQUEST_DUMP:
3165 complete(&adapter->fw_done);
3166 break;
3167 case REQUEST_ERROR_INFO:
3168 spin_lock_irqsave(&adapter->error_list_lock, flags2);
Wei Yongjun96183182016-06-27 20:48:53 +08003169 list_for_each_entry_safe(error_buff, tmp2,
3170 &adapter->errors, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003171 dma_unmap_single(dev, error_buff->dma,
3172 error_buff->len,
3173 DMA_FROM_DEVICE);
3174 kfree(error_buff->buff);
3175 list_del(&error_buff->list);
3176 kfree(error_buff);
3177 }
3178 spin_unlock_irqrestore(&adapter->error_list_lock,
3179 flags2);
3180 break;
3181 }
3182 list_del(&inflight_cmd->list);
3183 kfree(inflight_cmd);
3184 }
3185 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3186}
3187
3188static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3189 struct ibmvnic_adapter *adapter)
3190{
3191 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3192 struct net_device *netdev = adapter->netdev;
3193 struct device *dev = &adapter->vdev->dev;
3194 long rc;
3195
3196 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3197 ((unsigned long int *)crq)[0],
3198 ((unsigned long int *)crq)[1]);
3199 switch (gen_crq->first) {
3200 case IBMVNIC_CRQ_INIT_RSP:
3201 switch (gen_crq->cmd) {
3202 case IBMVNIC_CRQ_INIT:
3203 dev_info(dev, "Partner initialized\n");
3204 /* Send back a response */
3205 rc = ibmvnic_send_crq_init_complete(adapter);
3206 if (rc == 0)
3207 send_version_xchg(adapter);
3208 else
3209 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3210 break;
3211 case IBMVNIC_CRQ_INIT_COMPLETE:
3212 dev_info(dev, "Partner initialization complete\n");
3213 send_version_xchg(adapter);
3214 break;
3215 default:
3216 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3217 }
3218 return;
3219 case IBMVNIC_CRQ_XPORT_EVENT:
3220 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3221 dev_info(dev, "Re-enabling adapter\n");
3222 adapter->migrated = true;
3223 ibmvnic_free_inflight(adapter);
3224 release_sub_crqs(adapter);
3225 rc = ibmvnic_reenable_crq_queue(adapter);
3226 if (rc)
3227 dev_err(dev, "Error after enable rc=%ld\n", rc);
3228 adapter->migrated = false;
3229 rc = ibmvnic_send_crq_init(adapter);
3230 if (rc)
3231 dev_err(dev, "Error sending init rc=%ld\n", rc);
3232 } else {
3233 /* The adapter lost the connection */
3234 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3235 gen_crq->cmd);
3236 ibmvnic_free_inflight(adapter);
3237 release_sub_crqs(adapter);
3238 }
3239 return;
3240 case IBMVNIC_CRQ_CMD_RSP:
3241 break;
3242 default:
3243 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3244 gen_crq->first);
3245 return;
3246 }
3247
3248 switch (gen_crq->cmd) {
3249 case VERSION_EXCHANGE_RSP:
3250 rc = crq->version_exchange_rsp.rc.code;
3251 if (rc) {
3252 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3253 break;
3254 }
3255 dev_info(dev, "Partner protocol version is %d\n",
3256 crq->version_exchange_rsp.version);
3257 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3258 ibmvnic_version)
3259 ibmvnic_version =
3260 be16_to_cpu(crq->version_exchange_rsp.version);
3261 send_cap_queries(adapter);
3262 break;
3263 case QUERY_CAPABILITY_RSP:
3264 handle_query_cap_rsp(crq, adapter);
3265 break;
3266 case QUERY_MAP_RSP:
3267 handle_query_map_rsp(crq, adapter);
3268 break;
3269 case REQUEST_MAP_RSP:
3270 handle_request_map_rsp(crq, adapter);
3271 break;
3272 case REQUEST_UNMAP_RSP:
3273 handle_request_unmap_rsp(crq, adapter);
3274 break;
3275 case REQUEST_CAPABILITY_RSP:
3276 handle_request_cap_rsp(crq, adapter);
3277 break;
3278 case LOGIN_RSP:
3279 netdev_dbg(netdev, "Got Login Response\n");
3280 handle_login_rsp(crq, adapter);
3281 break;
3282 case LOGICAL_LINK_STATE_RSP:
3283 netdev_dbg(netdev, "Got Logical Link State Response\n");
3284 adapter->logical_link_state =
3285 crq->logical_link_state_rsp.link_state;
3286 break;
3287 case LINK_STATE_INDICATION:
3288 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3289 adapter->phys_link_state =
3290 crq->link_state_indication.phys_link_state;
3291 adapter->logical_link_state =
3292 crq->link_state_indication.logical_link_state;
3293 break;
3294 case CHANGE_MAC_ADDR_RSP:
3295 netdev_dbg(netdev, "Got MAC address change Response\n");
3296 handle_change_mac_rsp(crq, adapter);
3297 break;
3298 case ERROR_INDICATION:
3299 netdev_dbg(netdev, "Got Error Indication\n");
3300 handle_error_indication(crq, adapter);
3301 break;
3302 case REQUEST_ERROR_RSP:
3303 netdev_dbg(netdev, "Got Error Detail Response\n");
3304 handle_error_info_rsp(crq, adapter);
3305 break;
3306 case REQUEST_STATISTICS_RSP:
3307 netdev_dbg(netdev, "Got Statistics Response\n");
3308 complete(&adapter->stats_done);
3309 break;
3310 case REQUEST_DUMP_SIZE_RSP:
3311 netdev_dbg(netdev, "Got Request Dump Size Response\n");
3312 handle_dump_size_rsp(crq, adapter);
3313 break;
3314 case REQUEST_DUMP_RSP:
3315 netdev_dbg(netdev, "Got Request Dump Response\n");
3316 complete(&adapter->fw_done);
3317 break;
3318 case QUERY_IP_OFFLOAD_RSP:
3319 netdev_dbg(netdev, "Got Query IP offload Response\n");
3320 handle_query_ip_offload_rsp(adapter);
3321 break;
3322 case MULTICAST_CTRL_RSP:
3323 netdev_dbg(netdev, "Got multicast control Response\n");
3324 break;
3325 case CONTROL_IP_OFFLOAD_RSP:
3326 netdev_dbg(netdev, "Got Control IP offload Response\n");
3327 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3328 sizeof(adapter->ip_offload_ctrl),
3329 DMA_TO_DEVICE);
3330 /* We're done with the queries, perform the login */
3331 send_login(adapter);
3332 break;
3333 case REQUEST_RAS_COMP_NUM_RSP:
3334 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3335 if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3336 netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3337 break;
3338 }
3339 adapter->ras_comp_num =
3340 be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3341 handle_request_ras_comp_num_rsp(crq, adapter);
3342 break;
3343 case REQUEST_RAS_COMPS_RSP:
3344 netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3345 handle_request_ras_comps_rsp(crq, adapter);
3346 break;
3347 case CONTROL_RAS_RSP:
3348 netdev_dbg(netdev, "Got Control RAS Response\n");
3349 handle_control_ras_rsp(crq, adapter);
3350 break;
3351 case COLLECT_FW_TRACE_RSP:
3352 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3353 complete(&adapter->fw_done);
3354 break;
3355 default:
3356 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3357 gen_crq->cmd);
3358 }
3359}
3360
3361static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3362{
3363 struct ibmvnic_adapter *adapter = instance;
3364 struct ibmvnic_crq_queue *queue = &adapter->crq;
3365 struct vio_dev *vdev = adapter->vdev;
3366 union ibmvnic_crq *crq;
3367 unsigned long flags;
3368 bool done = false;
3369
3370 spin_lock_irqsave(&queue->lock, flags);
3371 vio_disable_interrupts(vdev);
3372 while (!done) {
3373 /* Pull all the valid messages off the CRQ */
3374 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3375 ibmvnic_handle_crq(crq, adapter);
3376 crq->generic.first = 0;
3377 }
3378 vio_enable_interrupts(vdev);
3379 crq = ibmvnic_next_crq(adapter);
3380 if (crq) {
3381 vio_disable_interrupts(vdev);
3382 ibmvnic_handle_crq(crq, adapter);
3383 crq->generic.first = 0;
3384 } else {
3385 done = true;
3386 }
3387 }
3388 spin_unlock_irqrestore(&queue->lock, flags);
3389 return IRQ_HANDLED;
3390}
3391
3392static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3393{
3394 struct vio_dev *vdev = adapter->vdev;
3395 int rc;
3396
3397 do {
3398 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3399 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3400
3401 if (rc)
3402 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3403
3404 return rc;
3405}
3406
3407static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3408{
3409 struct ibmvnic_crq_queue *crq = &adapter->crq;
3410 struct device *dev = &adapter->vdev->dev;
3411 struct vio_dev *vdev = adapter->vdev;
3412 int rc;
3413
3414 /* Close the CRQ */
3415 do {
3416 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3417 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3418
3419 /* Clean out the queue */
3420 memset(crq->msgs, 0, PAGE_SIZE);
3421 crq->cur = 0;
3422
3423 /* And re-open it again */
3424 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3425 crq->msg_token, PAGE_SIZE);
3426
3427 if (rc == H_CLOSED)
3428 /* Adapter is good, but other end is not ready */
3429 dev_warn(dev, "Partner adapter not ready\n");
3430 else if (rc != 0)
3431 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3432
3433 return rc;
3434}
3435
3436static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3437{
3438 struct ibmvnic_crq_queue *crq = &adapter->crq;
3439 struct vio_dev *vdev = adapter->vdev;
3440 long rc;
3441
3442 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3443 free_irq(vdev->irq, adapter);
3444 do {
3445 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3446 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3447
3448 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3449 DMA_BIDIRECTIONAL);
3450 free_page((unsigned long)crq->msgs);
3451}
3452
3453static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3454{
3455 struct ibmvnic_crq_queue *crq = &adapter->crq;
3456 struct device *dev = &adapter->vdev->dev;
3457 struct vio_dev *vdev = adapter->vdev;
3458 int rc, retrc = -ENOMEM;
3459
3460 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3461 /* Should we allocate more than one page? */
3462
3463 if (!crq->msgs)
3464 return -ENOMEM;
3465
3466 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3467 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3468 DMA_BIDIRECTIONAL);
3469 if (dma_mapping_error(dev, crq->msg_token))
3470 goto map_failed;
3471
3472 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3473 crq->msg_token, PAGE_SIZE);
3474
3475 if (rc == H_RESOURCE)
3476 /* maybe kexecing and resource is busy. try a reset */
3477 rc = ibmvnic_reset_crq(adapter);
3478 retrc = rc;
3479
3480 if (rc == H_CLOSED) {
3481 dev_warn(dev, "Partner adapter not ready\n");
3482 } else if (rc) {
3483 dev_warn(dev, "Error %d opening adapter\n", rc);
3484 goto reg_crq_failed;
3485 }
3486
3487 retrc = 0;
3488
3489 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3490 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3491 adapter);
3492 if (rc) {
3493 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3494 vdev->irq, rc);
3495 goto req_irq_failed;
3496 }
3497
3498 rc = vio_enable_interrupts(vdev);
3499 if (rc) {
3500 dev_err(dev, "Error %d enabling interrupts\n", rc);
3501 goto req_irq_failed;
3502 }
3503
3504 crq->cur = 0;
3505 spin_lock_init(&crq->lock);
3506
3507 return retrc;
3508
3509req_irq_failed:
3510 do {
3511 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3512 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3513reg_crq_failed:
3514 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3515map_failed:
3516 free_page((unsigned long)crq->msgs);
3517 return retrc;
3518}
3519
3520/* debugfs for dump */
3521static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3522{
3523 struct net_device *netdev = seq->private;
3524 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3525 struct device *dev = &adapter->vdev->dev;
3526 union ibmvnic_crq crq;
3527
3528 memset(&crq, 0, sizeof(crq));
3529 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3530 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3531 ibmvnic_send_crq(adapter, &crq);
3532
3533 init_completion(&adapter->fw_done);
3534 wait_for_completion(&adapter->fw_done);
3535
3536 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3537
3538 dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3539 DMA_BIDIRECTIONAL);
3540
3541 kfree(adapter->dump_data);
3542
3543 return 0;
3544}
3545
3546static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3547{
3548 return single_open(file, ibmvnic_dump_show, inode->i_private);
3549}
3550
3551static const struct file_operations ibmvnic_dump_ops = {
3552 .owner = THIS_MODULE,
3553 .open = ibmvnic_dump_open,
3554 .read = seq_read,
3555 .llseek = seq_lseek,
3556 .release = single_release,
3557};
3558
3559static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3560{
3561 struct ibmvnic_adapter *adapter;
3562 struct net_device *netdev;
3563 unsigned char *mac_addr_p;
3564 struct dentry *ent;
3565 char buf[16]; /* debugfs name buf */
3566 int rc;
3567
3568 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3569 dev->unit_address);
3570
3571 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3572 VETH_MAC_ADDR, NULL);
3573 if (!mac_addr_p) {
3574 dev_err(&dev->dev,
3575 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3576 __FILE__, __LINE__);
3577 return 0;
3578 }
3579
3580 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3581 IBMVNIC_MAX_TX_QUEUES);
3582 if (!netdev)
3583 return -ENOMEM;
3584
3585 adapter = netdev_priv(netdev);
3586 dev_set_drvdata(&dev->dev, netdev);
3587 adapter->vdev = dev;
3588 adapter->netdev = netdev;
3589
3590 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3591 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3592 netdev->irq = dev->irq;
3593 netdev->netdev_ops = &ibmvnic_netdev_ops;
3594 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3595 SET_NETDEV_DEV(netdev, &dev->dev);
3596
3597 spin_lock_init(&adapter->stats_lock);
3598
3599 rc = ibmvnic_init_crq_queue(adapter);
3600 if (rc) {
3601 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3602 goto free_netdev;
3603 }
3604
3605 INIT_LIST_HEAD(&adapter->errors);
3606 INIT_LIST_HEAD(&adapter->inflight);
3607 spin_lock_init(&adapter->error_list_lock);
3608 spin_lock_init(&adapter->inflight_lock);
3609
3610 adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3611 sizeof(struct ibmvnic_statistics),
3612 DMA_FROM_DEVICE);
3613 if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3614 if (!firmware_has_feature(FW_FEATURE_CMO))
3615 dev_err(&dev->dev, "Couldn't map stats buffer\n");
3616 goto free_crq;
3617 }
3618
3619 snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3620 ent = debugfs_create_dir(buf, NULL);
3621 if (!ent || IS_ERR(ent)) {
3622 dev_info(&dev->dev, "debugfs create directory failed\n");
3623 adapter->debugfs_dir = NULL;
3624 } else {
3625 adapter->debugfs_dir = ent;
3626 ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3627 netdev, &ibmvnic_dump_ops);
3628 if (!ent || IS_ERR(ent)) {
3629 dev_info(&dev->dev,
3630 "debugfs create dump file failed\n");
3631 adapter->debugfs_dump = NULL;
3632 } else {
3633 adapter->debugfs_dump = ent;
3634 }
3635 }
3636 ibmvnic_send_crq_init(adapter);
3637
3638 init_completion(&adapter->init_done);
3639 wait_for_completion(&adapter->init_done);
3640
John Allen498cd8e2016-04-06 11:49:55 -05003641 do {
3642 adapter->renegotiate = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003643
John Allen498cd8e2016-04-06 11:49:55 -05003644 init_sub_crqs(adapter, 0);
3645 reinit_completion(&adapter->init_done);
3646 wait_for_completion(&adapter->init_done);
3647
3648 if (adapter->renegotiate) {
3649 release_sub_crqs(adapter);
3650 send_cap_queries(adapter);
3651
3652 reinit_completion(&adapter->init_done);
3653 wait_for_completion(&adapter->init_done);
3654 }
3655 } while (adapter->renegotiate);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003656
3657 /* if init_sub_crqs is partially successful, retry */
3658 while (!adapter->tx_scrq || !adapter->rx_scrq) {
3659 init_sub_crqs(adapter, 1);
3660
3661 reinit_completion(&adapter->init_done);
3662 wait_for_completion(&adapter->init_done);
3663 }
3664
3665 netdev->real_num_tx_queues = adapter->req_tx_queues;
3666
3667 rc = register_netdev(netdev);
3668 if (rc) {
3669 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3670 goto free_debugfs;
3671 }
3672 dev_info(&dev->dev, "ibmvnic registered\n");
3673
3674 return 0;
3675
3676free_debugfs:
3677 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3678 debugfs_remove_recursive(adapter->debugfs_dir);
3679free_crq:
3680 ibmvnic_release_crq_queue(adapter);
3681free_netdev:
3682 free_netdev(netdev);
3683 return rc;
3684}
3685
3686static int ibmvnic_remove(struct vio_dev *dev)
3687{
3688 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3689 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3690
3691 unregister_netdev(netdev);
3692
3693 release_sub_crqs(adapter);
3694
3695 ibmvnic_release_crq_queue(adapter);
3696
3697 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3698 debugfs_remove_recursive(adapter->debugfs_dir);
3699
3700 if (adapter->ras_comps)
3701 dma_free_coherent(&dev->dev,
3702 adapter->ras_comp_num *
3703 sizeof(struct ibmvnic_fw_component),
3704 adapter->ras_comps, adapter->ras_comps_tok);
3705
3706 kfree(adapter->ras_comp_int);
3707
3708 free_netdev(netdev);
3709 dev_set_drvdata(&dev->dev, NULL);
3710
3711 return 0;
3712}
3713
3714static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3715{
3716 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3717 struct ibmvnic_adapter *adapter;
3718 struct iommu_table *tbl;
3719 unsigned long ret = 0;
3720 int i;
3721
3722 tbl = get_iommu_table_base(&vdev->dev);
3723
3724 /* netdev inits at probe time along with the structures we need below*/
3725 if (!netdev)
3726 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3727
3728 adapter = netdev_priv(netdev);
3729
3730 ret += PAGE_SIZE; /* the crq message queue */
3731 ret += adapter->bounce_buffer_size;
3732 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3733
3734 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3735 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3736
3737 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3738 i++)
3739 ret += adapter->rx_pool[i].size *
3740 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3741
3742 return ret;
3743}
3744
3745static int ibmvnic_resume(struct device *dev)
3746{
3747 struct net_device *netdev = dev_get_drvdata(dev);
3748 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3749 int i;
3750
3751 /* kick the interrupt handlers just in case we lost an interrupt */
3752 for (i = 0; i < adapter->req_rx_queues; i++)
3753 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3754 adapter->rx_scrq[i]);
3755
3756 return 0;
3757}
3758
3759static struct vio_device_id ibmvnic_device_table[] = {
3760 {"network", "IBM,vnic"},
3761 {"", "" }
3762};
3763MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3764
3765static const struct dev_pm_ops ibmvnic_pm_ops = {
3766 .resume = ibmvnic_resume
3767};
3768
3769static struct vio_driver ibmvnic_driver = {
3770 .id_table = ibmvnic_device_table,
3771 .probe = ibmvnic_probe,
3772 .remove = ibmvnic_remove,
3773 .get_desired_dma = ibmvnic_get_desired_dma,
3774 .name = ibmvnic_driver_name,
3775 .pm = &ibmvnic_pm_ops,
3776};
3777
3778/* module functions */
3779static int __init ibmvnic_module_init(void)
3780{
3781 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3782 IBMVNIC_DRIVER_VERSION);
3783
3784 return vio_register_driver(&ibmvnic_driver);
3785}
3786
3787static void __exit ibmvnic_module_exit(void)
3788{
3789 vio_unregister_driver(&ibmvnic_driver);
3790}
3791
3792module_init(ibmvnic_module_init);
3793module_exit(ibmvnic_module_exit);