blob: c12596676bbbba5ef426b1e8463e7080bdd2539b [file] [log] [blame]
Thomas Falcon032c5e82015-12-21 11:26:06 -06001/**************************************************************************/
2/* */
3/* IBM System i and System p Virtual NIC Device Driver */
4/* Copyright (C) 2014 IBM Corp. */
5/* Santiago Leon (santi_leon@yahoo.com) */
6/* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7/* John Allen (jallen@linux.vnet.ibm.com) */
8/* */
9/* This program is free software; you can redistribute it and/or modify */
10/* it under the terms of the GNU General Public License as published by */
11/* the Free Software Foundation; either version 2 of the License, or */
12/* (at your option) any later version. */
13/* */
14/* This program is distributed in the hope that it will be useful, */
15/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17/* GNU General Public License for more details. */
18/* */
19/* You should have received a copy of the GNU General Public License */
20/* along with this program. */
21/* */
22/* This module contains the implementation of a virtual ethernet device */
23/* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24/* option of the RS/6000 Platform Architecture to interface with virtual */
25/* ethernet NICs that are presented to the partition by the hypervisor. */
26/* */
27/* Messages are passed between the VNIC driver and the VNIC server using */
28/* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29/* issue and receive commands that initiate communication with the server */
30/* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31/* are used by the driver to notify the server that a packet is */
32/* ready for transmission or that a buffer has been added to receive a */
33/* packet. Subsequently, sCRQs are used by the server to notify the */
34/* driver that a packet transmission has been completed or that a packet */
35/* has been received and placed in a waiting buffer. */
36/* */
37/* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38/* which skbs are DMA mapped and immediately unmapped when the transmit */
39/* or receive has been completed, the VNIC driver is required to use */
40/* "long term mapping". This entails that large, continuous DMA mapped */
41/* buffers are allocated on driver initialization and these buffers are */
42/* then continuously reused to pass skbs to and from the VNIC server. */
43/* */
44/**************************************************************************/
45
46#include <linux/module.h>
47#include <linux/moduleparam.h>
48#include <linux/types.h>
49#include <linux/errno.h>
50#include <linux/completion.h>
51#include <linux/ioport.h>
52#include <linux/dma-mapping.h>
53#include <linux/kernel.h>
54#include <linux/netdevice.h>
55#include <linux/etherdevice.h>
56#include <linux/skbuff.h>
57#include <linux/init.h>
58#include <linux/delay.h>
59#include <linux/mm.h>
60#include <linux/ethtool.h>
61#include <linux/proc_fs.h>
62#include <linux/in.h>
63#include <linux/ip.h>
Thomas Falconad7775d2016-04-01 17:20:34 -050064#include <linux/ipv6.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060065#include <linux/irq.h>
66#include <linux/kthread.h>
67#include <linux/seq_file.h>
68#include <linux/debugfs.h>
69#include <linux/interrupt.h>
70#include <net/net_namespace.h>
71#include <asm/hvcall.h>
72#include <linux/atomic.h>
73#include <asm/vio.h>
74#include <asm/iommu.h>
75#include <linux/uaccess.h>
76#include <asm/firmware.h>
Thomas Falcon65dc6892016-07-06 15:35:18 -050077#include <linux/workqueue.h>
Thomas Falcon032c5e82015-12-21 11:26:06 -060078
79#include "ibmvnic.h"
80
81static const char ibmvnic_driver_name[] = "ibmvnic";
82static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
83
84MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
85MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
86MODULE_LICENSE("GPL");
87MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
88
89static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
90static int ibmvnic_remove(struct vio_dev *);
91static void release_sub_crqs(struct ibmvnic_adapter *);
Thomas Falconea22d512016-07-06 15:35:17 -050092static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
Thomas Falcon032c5e82015-12-21 11:26:06 -060093static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
94static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
95static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
96static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
97static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
98 union sub_crq *sub_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -050099static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600100static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
101static int enable_scrq_irq(struct ibmvnic_adapter *,
102 struct ibmvnic_sub_crq_queue *);
103static int disable_scrq_irq(struct ibmvnic_adapter *,
104 struct ibmvnic_sub_crq_queue *);
105static int pending_scrq(struct ibmvnic_adapter *,
106 struct ibmvnic_sub_crq_queue *);
107static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
108 struct ibmvnic_sub_crq_queue *);
109static int ibmvnic_poll(struct napi_struct *napi, int data);
110static void send_map_query(struct ibmvnic_adapter *adapter);
111static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
112static void send_request_unmap(struct ibmvnic_adapter *, u8);
113
114struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
116 int offset;
117};
118
119#define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121#define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
122
123static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
146};
147
148static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 unsigned long length, unsigned long *number,
150 unsigned long *irq)
151{
152 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
153 long rc;
154
155 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
156 *number = retbuf[0];
157 *irq = retbuf[1];
158
159 return rc;
160}
161
162/* net_device_ops functions */
163
164static void init_rx_pool(struct ibmvnic_adapter *adapter,
165 struct ibmvnic_rx_pool *rx_pool, int num, int index,
166 int buff_size, int active)
167{
168 netdev_dbg(adapter->netdev,
169 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
170 index, num, buff_size);
171 rx_pool->size = num;
172 rx_pool->index = index;
173 rx_pool->buff_size = buff_size;
174 rx_pool->active = active;
175}
176
177static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
178 struct ibmvnic_long_term_buff *ltb, int size)
179{
180 struct device *dev = &adapter->vdev->dev;
181
182 ltb->size = size;
183 ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
184 GFP_KERNEL);
185
186 if (!ltb->buff) {
187 dev_err(dev, "Couldn't alloc long term buffer\n");
188 return -ENOMEM;
189 }
190 ltb->map_id = adapter->map_id;
191 adapter->map_id++;
192 send_request_map(adapter, ltb->addr,
193 ltb->size, ltb->map_id);
194 init_completion(&adapter->fw_done);
195 wait_for_completion(&adapter->fw_done);
196 return 0;
197}
198
199static void free_long_term_buff(struct ibmvnic_adapter *adapter,
200 struct ibmvnic_long_term_buff *ltb)
201{
202 struct device *dev = &adapter->vdev->dev;
203
204 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
Thomas Falcondfad09a2016-08-18 11:37:51 -0500205 if (!adapter->failover)
206 send_request_unmap(adapter, ltb->map_id);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600207}
208
209static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
210 struct ibmvnic_rx_pool *pool)
211{
212 struct device *dev = &adapter->vdev->dev;
213 int i;
214
215 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
216 if (!pool->free_map)
217 return -ENOMEM;
218
219 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
220 GFP_KERNEL);
221
222 if (!pool->rx_buff) {
223 dev_err(dev, "Couldn't alloc rx buffers\n");
224 kfree(pool->free_map);
225 return -ENOMEM;
226 }
227
228 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
229 pool->size * pool->buff_size)) {
230 kfree(pool->free_map);
231 kfree(pool->rx_buff);
232 return -ENOMEM;
233 }
234
235 for (i = 0; i < pool->size; ++i)
236 pool->free_map[i] = i;
237
238 atomic_set(&pool->available, 0);
239 pool->next_alloc = 0;
240 pool->next_free = 0;
241
242 return 0;
243}
244
245static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
246 struct ibmvnic_rx_pool *pool)
247{
248 int count = pool->size - atomic_read(&pool->available);
249 struct device *dev = &adapter->vdev->dev;
250 int buffers_added = 0;
251 unsigned long lpar_rc;
252 union sub_crq sub_crq;
253 struct sk_buff *skb;
254 unsigned int offset;
255 dma_addr_t dma_addr;
256 unsigned char *dst;
257 u64 *handle_array;
258 int shift = 0;
259 int index;
260 int i;
261
262 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
263 be32_to_cpu(adapter->login_rsp_buf->
264 off_rxadd_subcrqs));
265
266 for (i = 0; i < count; ++i) {
267 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
268 if (!skb) {
269 dev_err(dev, "Couldn't replenish rx buff\n");
270 adapter->replenish_no_mem++;
271 break;
272 }
273
274 index = pool->free_map[pool->next_free];
275
276 if (pool->rx_buff[index].skb)
277 dev_err(dev, "Inconsistent free_map!\n");
278
279 /* Copy the skb to the long term mapped DMA buffer */
280 offset = index * pool->buff_size;
281 dst = pool->long_term_buff.buff + offset;
282 memset(dst, 0, pool->buff_size);
283 dma_addr = pool->long_term_buff.addr + offset;
284 pool->rx_buff[index].data = dst;
285
286 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
287 pool->rx_buff[index].dma = dma_addr;
288 pool->rx_buff[index].skb = skb;
289 pool->rx_buff[index].pool_index = pool->index;
290 pool->rx_buff[index].size = pool->buff_size;
291
292 memset(&sub_crq, 0, sizeof(sub_crq));
293 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
294 sub_crq.rx_add.correlator =
295 cpu_to_be64((u64)&pool->rx_buff[index]);
296 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
297 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
298
299 /* The length field of the sCRQ is defined to be 24 bits so the
300 * buffer size needs to be left shifted by a byte before it is
301 * converted to big endian to prevent the last byte from being
302 * truncated.
303 */
304#ifdef __LITTLE_ENDIAN__
305 shift = 8;
306#endif
307 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
308
309 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
310 &sub_crq);
311 if (lpar_rc != H_SUCCESS)
312 goto failure;
313
314 buffers_added++;
315 adapter->replenish_add_buff_success++;
316 pool->next_free = (pool->next_free + 1) % pool->size;
317 }
318 atomic_add(buffers_added, &pool->available);
319 return;
320
321failure:
322 dev_info(dev, "replenish pools failure\n");
323 pool->free_map[pool->next_free] = index;
324 pool->rx_buff[index].skb = NULL;
325 if (!dma_mapping_error(dev, dma_addr))
326 dma_unmap_single(dev, dma_addr, pool->buff_size,
327 DMA_FROM_DEVICE);
328
329 dev_kfree_skb_any(skb);
330 adapter->replenish_add_buff_failure++;
331 atomic_add(buffers_added, &pool->available);
332}
333
334static void replenish_pools(struct ibmvnic_adapter *adapter)
335{
336 int i;
337
338 if (adapter->migrated)
339 return;
340
341 adapter->replenish_task_cycles++;
342 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
343 i++) {
344 if (adapter->rx_pool[i].active)
345 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
346 }
347}
348
349static void free_rx_pool(struct ibmvnic_adapter *adapter,
350 struct ibmvnic_rx_pool *pool)
351{
352 int i;
353
354 kfree(pool->free_map);
355 pool->free_map = NULL;
356
357 if (!pool->rx_buff)
358 return;
359
360 for (i = 0; i < pool->size; i++) {
361 if (pool->rx_buff[i].skb) {
362 dev_kfree_skb_any(pool->rx_buff[i].skb);
363 pool->rx_buff[i].skb = NULL;
364 }
365 }
366 kfree(pool->rx_buff);
367 pool->rx_buff = NULL;
368}
369
370static int ibmvnic_open(struct net_device *netdev)
371{
372 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
373 struct device *dev = &adapter->vdev->dev;
374 struct ibmvnic_tx_pool *tx_pool;
375 union ibmvnic_crq crq;
376 int rxadd_subcrqs;
377 u64 *size_array;
378 int tx_subcrqs;
379 int i, j;
380
381 rxadd_subcrqs =
382 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
383 tx_subcrqs =
384 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
385 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
386 be32_to_cpu(adapter->login_rsp_buf->
387 off_rxadd_buff_size));
388 adapter->map_id = 1;
389 adapter->napi = kcalloc(adapter->req_rx_queues,
390 sizeof(struct napi_struct), GFP_KERNEL);
391 if (!adapter->napi)
392 goto alloc_napi_failed;
393 for (i = 0; i < adapter->req_rx_queues; i++) {
394 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
395 NAPI_POLL_WEIGHT);
396 napi_enable(&adapter->napi[i]);
397 }
398 adapter->rx_pool =
399 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
400
401 if (!adapter->rx_pool)
402 goto rx_pool_arr_alloc_failed;
403 send_map_query(adapter);
404 for (i = 0; i < rxadd_subcrqs; i++) {
405 init_rx_pool(adapter, &adapter->rx_pool[i],
406 IBMVNIC_BUFFS_PER_POOL, i,
407 be64_to_cpu(size_array[i]), 1);
408 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
409 dev_err(dev, "Couldn't alloc rx pool\n");
410 goto rx_pool_alloc_failed;
411 }
412 }
413 adapter->tx_pool =
414 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
415
416 if (!adapter->tx_pool)
417 goto tx_pool_arr_alloc_failed;
418 for (i = 0; i < tx_subcrqs; i++) {
419 tx_pool = &adapter->tx_pool[i];
420 tx_pool->tx_buff =
421 kcalloc(adapter->max_tx_entries_per_subcrq,
422 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
423 if (!tx_pool->tx_buff)
424 goto tx_pool_alloc_failed;
425
426 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
427 adapter->max_tx_entries_per_subcrq *
428 adapter->req_mtu))
429 goto tx_ltb_alloc_failed;
430
431 tx_pool->free_map =
432 kcalloc(adapter->max_tx_entries_per_subcrq,
433 sizeof(int), GFP_KERNEL);
434 if (!tx_pool->free_map)
435 goto tx_fm_alloc_failed;
436
437 for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
438 tx_pool->free_map[j] = j;
439
440 tx_pool->consumer_index = 0;
441 tx_pool->producer_index = 0;
442 }
443 adapter->bounce_buffer_size =
444 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
445 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
446 GFP_KERNEL);
447 if (!adapter->bounce_buffer)
448 goto bounce_alloc_failed;
449
450 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
451 adapter->bounce_buffer_size,
452 DMA_TO_DEVICE);
453 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
454 dev_err(dev, "Couldn't map tx bounce buffer\n");
455 goto bounce_map_failed;
456 }
457 replenish_pools(adapter);
458
459 /* We're ready to receive frames, enable the sub-crq interrupts and
460 * set the logical link state to up
461 */
462 for (i = 0; i < adapter->req_rx_queues; i++)
463 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
464
465 for (i = 0; i < adapter->req_tx_queues; i++)
466 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
467
468 memset(&crq, 0, sizeof(crq));
469 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
470 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
471 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
472 ibmvnic_send_crq(adapter, &crq);
473
Thomas Falconb8efb892016-07-06 15:35:15 -0500474 netif_tx_start_all_queues(netdev);
475
Thomas Falcon032c5e82015-12-21 11:26:06 -0600476 return 0;
477
478bounce_map_failed:
479 kfree(adapter->bounce_buffer);
480bounce_alloc_failed:
481 i = tx_subcrqs - 1;
482 kfree(adapter->tx_pool[i].free_map);
483tx_fm_alloc_failed:
484 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
485tx_ltb_alloc_failed:
486 kfree(adapter->tx_pool[i].tx_buff);
487tx_pool_alloc_failed:
488 for (j = 0; j < i; j++) {
489 kfree(adapter->tx_pool[j].tx_buff);
490 free_long_term_buff(adapter,
491 &adapter->tx_pool[j].long_term_buff);
492 kfree(adapter->tx_pool[j].free_map);
493 }
494 kfree(adapter->tx_pool);
495 adapter->tx_pool = NULL;
496tx_pool_arr_alloc_failed:
497 i = rxadd_subcrqs;
498rx_pool_alloc_failed:
499 for (j = 0; j < i; j++) {
500 free_rx_pool(adapter, &adapter->rx_pool[j]);
501 free_long_term_buff(adapter,
502 &adapter->rx_pool[j].long_term_buff);
503 }
504 kfree(adapter->rx_pool);
505 adapter->rx_pool = NULL;
506rx_pool_arr_alloc_failed:
507 for (i = 0; i < adapter->req_rx_queues; i++)
508 napi_enable(&adapter->napi[i]);
509alloc_napi_failed:
510 return -ENOMEM;
511}
512
513static int ibmvnic_close(struct net_device *netdev)
514{
515 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
516 struct device *dev = &adapter->vdev->dev;
517 union ibmvnic_crq crq;
518 int i;
519
520 adapter->closing = true;
521
522 for (i = 0; i < adapter->req_rx_queues; i++)
523 napi_disable(&adapter->napi[i]);
524
Thomas Falcondfad09a2016-08-18 11:37:51 -0500525 if (!adapter->failover)
526 netif_tx_stop_all_queues(netdev);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600527
528 if (adapter->bounce_buffer) {
529 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
530 dma_unmap_single(&adapter->vdev->dev,
531 adapter->bounce_buffer_dma,
532 adapter->bounce_buffer_size,
533 DMA_BIDIRECTIONAL);
534 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
535 }
536 kfree(adapter->bounce_buffer);
537 adapter->bounce_buffer = NULL;
538 }
539
540 memset(&crq, 0, sizeof(crq));
541 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
542 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
543 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
544 ibmvnic_send_crq(adapter, &crq);
545
546 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
547 i++) {
548 kfree(adapter->tx_pool[i].tx_buff);
549 free_long_term_buff(adapter,
550 &adapter->tx_pool[i].long_term_buff);
551 kfree(adapter->tx_pool[i].free_map);
552 }
553 kfree(adapter->tx_pool);
554 adapter->tx_pool = NULL;
555
556 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
557 i++) {
558 free_rx_pool(adapter, &adapter->rx_pool[i]);
559 free_long_term_buff(adapter,
560 &adapter->rx_pool[i].long_term_buff);
561 }
562 kfree(adapter->rx_pool);
563 adapter->rx_pool = NULL;
564
565 adapter->closing = false;
566
567 return 0;
568}
569
Thomas Falconad7775d2016-04-01 17:20:34 -0500570/**
571 * build_hdr_data - creates L2/L3/L4 header data buffer
572 * @hdr_field - bitfield determining needed headers
573 * @skb - socket buffer
574 * @hdr_len - array of header lengths
575 * @tot_len - total length of data
576 *
577 * Reads hdr_field to determine which headers are needed by firmware.
578 * Builds a buffer containing these headers. Saves individual header
579 * lengths and total buffer length to be used to build descriptors.
580 */
581static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
582 int *hdr_len, u8 *hdr_data)
583{
584 int len = 0;
585 u8 *hdr;
586
587 hdr_len[0] = sizeof(struct ethhdr);
588
589 if (skb->protocol == htons(ETH_P_IP)) {
590 hdr_len[1] = ip_hdr(skb)->ihl * 4;
591 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
592 hdr_len[2] = tcp_hdrlen(skb);
593 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
594 hdr_len[2] = sizeof(struct udphdr);
595 } else if (skb->protocol == htons(ETH_P_IPV6)) {
596 hdr_len[1] = sizeof(struct ipv6hdr);
597 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
598 hdr_len[2] = tcp_hdrlen(skb);
599 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
600 hdr_len[2] = sizeof(struct udphdr);
601 }
602
603 memset(hdr_data, 0, 120);
604 if ((hdr_field >> 6) & 1) {
605 hdr = skb_mac_header(skb);
606 memcpy(hdr_data, hdr, hdr_len[0]);
607 len += hdr_len[0];
608 }
609
610 if ((hdr_field >> 5) & 1) {
611 hdr = skb_network_header(skb);
612 memcpy(hdr_data + len, hdr, hdr_len[1]);
613 len += hdr_len[1];
614 }
615
616 if ((hdr_field >> 4) & 1) {
617 hdr = skb_transport_header(skb);
618 memcpy(hdr_data + len, hdr, hdr_len[2]);
619 len += hdr_len[2];
620 }
621 return len;
622}
623
624/**
625 * create_hdr_descs - create header and header extension descriptors
626 * @hdr_field - bitfield determining needed headers
627 * @data - buffer containing header data
628 * @len - length of data buffer
629 * @hdr_len - array of individual header lengths
630 * @scrq_arr - descriptor array
631 *
632 * Creates header and, if needed, header extension descriptors and
633 * places them in a descriptor array, scrq_arr
634 */
635
636static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
637 union sub_crq *scrq_arr)
638{
639 union sub_crq hdr_desc;
640 int tmp_len = len;
641 u8 *data, *cur;
642 int tmp;
643
644 while (tmp_len > 0) {
645 cur = hdr_data + len - tmp_len;
646
647 memset(&hdr_desc, 0, sizeof(hdr_desc));
648 if (cur != hdr_data) {
649 data = hdr_desc.hdr_ext.data;
650 tmp = tmp_len > 29 ? 29 : tmp_len;
651 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
652 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
653 hdr_desc.hdr_ext.len = tmp;
654 } else {
655 data = hdr_desc.hdr.data;
656 tmp = tmp_len > 24 ? 24 : tmp_len;
657 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
658 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
659 hdr_desc.hdr.len = tmp;
660 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
661 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
662 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
663 hdr_desc.hdr.flag = hdr_field << 1;
664 }
665 memcpy(data, cur, tmp);
666 tmp_len -= tmp;
667 *scrq_arr = hdr_desc;
668 scrq_arr++;
669 }
670}
671
672/**
673 * build_hdr_descs_arr - build a header descriptor array
674 * @skb - socket buffer
675 * @num_entries - number of descriptors to be sent
676 * @subcrq - first TX descriptor
677 * @hdr_field - bit field determining which headers will be sent
678 *
679 * This function will build a TX descriptor array with applicable
680 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
681 */
682
683static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
684 int *num_entries, u8 hdr_field)
685{
686 int hdr_len[3] = {0, 0, 0};
687 int tot_len, len;
688 u8 *hdr_data = txbuff->hdr_data;
689
690 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
691 txbuff->hdr_data);
692 len = tot_len;
693 len -= 24;
694 if (len > 0)
695 num_entries += len % 29 ? len / 29 + 1 : len / 29;
696 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
697 txbuff->indir_arr + 1);
698}
699
Thomas Falcon032c5e82015-12-21 11:26:06 -0600700static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
701{
702 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
703 int queue_num = skb_get_queue_mapping(skb);
Thomas Falconad7775d2016-04-01 17:20:34 -0500704 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600705 struct device *dev = &adapter->vdev->dev;
706 struct ibmvnic_tx_buff *tx_buff = NULL;
707 struct ibmvnic_tx_pool *tx_pool;
708 unsigned int tx_send_failed = 0;
709 unsigned int tx_map_failed = 0;
710 unsigned int tx_dropped = 0;
711 unsigned int tx_packets = 0;
712 unsigned int tx_bytes = 0;
713 dma_addr_t data_dma_addr;
714 struct netdev_queue *txq;
715 bool used_bounce = false;
716 unsigned long lpar_rc;
717 union sub_crq tx_crq;
718 unsigned int offset;
Thomas Falconad7775d2016-04-01 17:20:34 -0500719 int num_entries = 1;
Thomas Falcon032c5e82015-12-21 11:26:06 -0600720 unsigned char *dst;
721 u64 *handle_array;
722 int index = 0;
723 int ret = 0;
724
725 tx_pool = &adapter->tx_pool[queue_num];
726 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
727 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
728 be32_to_cpu(adapter->login_rsp_buf->
729 off_txsubm_subcrqs));
730 if (adapter->migrated) {
731 tx_send_failed++;
732 tx_dropped++;
733 ret = NETDEV_TX_BUSY;
734 goto out;
735 }
736
737 index = tx_pool->free_map[tx_pool->consumer_index];
738 offset = index * adapter->req_mtu;
739 dst = tx_pool->long_term_buff.buff + offset;
740 memset(dst, 0, adapter->req_mtu);
741 skb_copy_from_linear_data(skb, dst, skb->len);
742 data_dma_addr = tx_pool->long_term_buff.addr + offset;
743
744 tx_pool->consumer_index =
745 (tx_pool->consumer_index + 1) %
746 adapter->max_tx_entries_per_subcrq;
747
748 tx_buff = &tx_pool->tx_buff[index];
749 tx_buff->skb = skb;
750 tx_buff->data_dma[0] = data_dma_addr;
751 tx_buff->data_len[0] = skb->len;
752 tx_buff->index = index;
753 tx_buff->pool_index = queue_num;
754 tx_buff->last_frag = true;
755 tx_buff->used_bounce = used_bounce;
756
757 memset(&tx_crq, 0, sizeof(tx_crq));
758 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
759 tx_crq.v1.type = IBMVNIC_TX_DESC;
760 tx_crq.v1.n_crq_elem = 1;
761 tx_crq.v1.n_sge = 1;
762 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
763 tx_crq.v1.correlator = cpu_to_be32(index);
764 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
765 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
766 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
767
768 if (adapter->vlan_header_insertion) {
769 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
770 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
771 }
772
773 if (skb->protocol == htons(ETH_P_IP)) {
774 if (ip_hdr(skb)->version == 4)
775 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
776 else if (ip_hdr(skb)->version == 6)
777 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
778
779 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
780 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
781 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
782 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
783 }
784
Thomas Falconad7775d2016-04-01 17:20:34 -0500785 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Thomas Falcon032c5e82015-12-21 11:26:06 -0600786 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
Thomas Falconad7775d2016-04-01 17:20:34 -0500787 hdrs += 2;
788 }
789 /* determine if l2/3/4 headers are sent to firmware */
790 if ((*hdrs >> 7) & 1 &&
791 (skb->protocol == htons(ETH_P_IP) ||
792 skb->protocol == htons(ETH_P_IPV6))) {
793 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
794 tx_crq.v1.n_crq_elem = num_entries;
795 tx_buff->indir_arr[0] = tx_crq;
796 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
797 sizeof(tx_buff->indir_arr),
798 DMA_TO_DEVICE);
799 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
800 if (!firmware_has_feature(FW_FEATURE_CMO))
801 dev_err(dev, "tx: unable to map descriptor array\n");
802 tx_map_failed++;
803 tx_dropped++;
804 ret = NETDEV_TX_BUSY;
805 goto out;
806 }
John Allen498cd8e2016-04-06 11:49:55 -0500807 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
Thomas Falconad7775d2016-04-01 17:20:34 -0500808 (u64)tx_buff->indir_dma,
809 (u64)num_entries);
810 } else {
John Allen498cd8e2016-04-06 11:49:55 -0500811 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
812 &tx_crq);
Thomas Falconad7775d2016-04-01 17:20:34 -0500813 }
Thomas Falcon032c5e82015-12-21 11:26:06 -0600814 if (lpar_rc != H_SUCCESS) {
815 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
816
817 if (tx_pool->consumer_index == 0)
818 tx_pool->consumer_index =
819 adapter->max_tx_entries_per_subcrq - 1;
820 else
821 tx_pool->consumer_index--;
822
823 tx_send_failed++;
824 tx_dropped++;
825 ret = NETDEV_TX_BUSY;
826 goto out;
827 }
828 tx_packets++;
829 tx_bytes += skb->len;
830 txq->trans_start = jiffies;
831 ret = NETDEV_TX_OK;
832
833out:
834 netdev->stats.tx_dropped += tx_dropped;
835 netdev->stats.tx_bytes += tx_bytes;
836 netdev->stats.tx_packets += tx_packets;
837 adapter->tx_send_failed += tx_send_failed;
838 adapter->tx_map_failed += tx_map_failed;
839
840 return ret;
841}
842
843static void ibmvnic_set_multi(struct net_device *netdev)
844{
845 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
846 struct netdev_hw_addr *ha;
847 union ibmvnic_crq crq;
848
849 memset(&crq, 0, sizeof(crq));
850 crq.request_capability.first = IBMVNIC_CRQ_CMD;
851 crq.request_capability.cmd = REQUEST_CAPABILITY;
852
853 if (netdev->flags & IFF_PROMISC) {
854 if (!adapter->promisc_supported)
855 return;
856 } else {
857 if (netdev->flags & IFF_ALLMULTI) {
858 /* Accept all multicast */
859 memset(&crq, 0, sizeof(crq));
860 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
861 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
862 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
863 ibmvnic_send_crq(adapter, &crq);
864 } else if (netdev_mc_empty(netdev)) {
865 /* Reject all multicast */
866 memset(&crq, 0, sizeof(crq));
867 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
868 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
869 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
870 ibmvnic_send_crq(adapter, &crq);
871 } else {
872 /* Accept one or more multicast(s) */
873 netdev_for_each_mc_addr(ha, netdev) {
874 memset(&crq, 0, sizeof(crq));
875 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
876 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
877 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
878 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
879 ha->addr);
880 ibmvnic_send_crq(adapter, &crq);
881 }
882 }
883 }
884}
885
886static int ibmvnic_set_mac(struct net_device *netdev, void *p)
887{
888 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
889 struct sockaddr *addr = p;
890 union ibmvnic_crq crq;
891
892 if (!is_valid_ether_addr(addr->sa_data))
893 return -EADDRNOTAVAIL;
894
895 memset(&crq, 0, sizeof(crq));
896 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
897 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
898 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
899 ibmvnic_send_crq(adapter, &crq);
900 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
901 return 0;
902}
903
Thomas Falcon032c5e82015-12-21 11:26:06 -0600904static void ibmvnic_tx_timeout(struct net_device *dev)
905{
906 struct ibmvnic_adapter *adapter = netdev_priv(dev);
907 int rc;
908
909 /* Adapter timed out, resetting it */
910 release_sub_crqs(adapter);
911 rc = ibmvnic_reset_crq(adapter);
912 if (rc)
913 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
914 else
915 ibmvnic_send_crq_init(adapter);
916}
917
918static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
919 struct ibmvnic_rx_buff *rx_buff)
920{
921 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
922
923 rx_buff->skb = NULL;
924
925 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
926 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
927
928 atomic_dec(&pool->available);
929}
930
931static int ibmvnic_poll(struct napi_struct *napi, int budget)
932{
933 struct net_device *netdev = napi->dev;
934 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
935 int scrq_num = (int)(napi - adapter->napi);
936 int frames_processed = 0;
937restart_poll:
938 while (frames_processed < budget) {
939 struct sk_buff *skb;
940 struct ibmvnic_rx_buff *rx_buff;
941 union sub_crq *next;
942 u32 length;
943 u16 offset;
944 u8 flags = 0;
945
946 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
947 break;
948 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
949 rx_buff =
950 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
951 rx_comp.correlator);
952 /* do error checking */
953 if (next->rx_comp.rc) {
954 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
955 /* free the entry */
956 next->rx_comp.first = 0;
957 remove_buff_from_pool(adapter, rx_buff);
958 break;
959 }
960
961 length = be32_to_cpu(next->rx_comp.len);
962 offset = be16_to_cpu(next->rx_comp.off_frame_data);
963 flags = next->rx_comp.flags;
964 skb = rx_buff->skb;
965 skb_copy_to_linear_data(skb, rx_buff->data + offset,
966 length);
967 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
968 /* free the entry */
969 next->rx_comp.first = 0;
970 remove_buff_from_pool(adapter, rx_buff);
971
972 skb_put(skb, length);
973 skb->protocol = eth_type_trans(skb, netdev);
974
975 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
976 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
977 skb->ip_summed = CHECKSUM_UNNECESSARY;
978 }
979
980 length = skb->len;
981 napi_gro_receive(napi, skb); /* send it up */
982 netdev->stats.rx_packets++;
983 netdev->stats.rx_bytes += length;
984 frames_processed++;
985 }
John Allen498cd8e2016-04-06 11:49:55 -0500986 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
Thomas Falcon032c5e82015-12-21 11:26:06 -0600987
988 if (frames_processed < budget) {
989 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
990 napi_complete(napi);
991 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
992 napi_reschedule(napi)) {
993 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
994 goto restart_poll;
995 }
996 }
997 return frames_processed;
998}
999
1000#ifdef CONFIG_NET_POLL_CONTROLLER
1001static void ibmvnic_netpoll_controller(struct net_device *dev)
1002{
1003 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1004 int i;
1005
1006 replenish_pools(netdev_priv(dev));
1007 for (i = 0; i < adapter->req_rx_queues; i++)
1008 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1009 adapter->rx_scrq[i]);
1010}
1011#endif
1012
1013static const struct net_device_ops ibmvnic_netdev_ops = {
1014 .ndo_open = ibmvnic_open,
1015 .ndo_stop = ibmvnic_close,
1016 .ndo_start_xmit = ibmvnic_xmit,
1017 .ndo_set_rx_mode = ibmvnic_set_multi,
1018 .ndo_set_mac_address = ibmvnic_set_mac,
1019 .ndo_validate_addr = eth_validate_addr,
Thomas Falcon032c5e82015-12-21 11:26:06 -06001020 .ndo_tx_timeout = ibmvnic_tx_timeout,
1021#ifdef CONFIG_NET_POLL_CONTROLLER
1022 .ndo_poll_controller = ibmvnic_netpoll_controller,
1023#endif
1024};
1025
1026/* ethtool functions */
1027
1028static int ibmvnic_get_settings(struct net_device *netdev,
1029 struct ethtool_cmd *cmd)
1030{
1031 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1032 SUPPORTED_FIBRE);
1033 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1034 ADVERTISED_FIBRE);
1035 ethtool_cmd_speed_set(cmd, SPEED_1000);
1036 cmd->duplex = DUPLEX_FULL;
1037 cmd->port = PORT_FIBRE;
1038 cmd->phy_address = 0;
1039 cmd->transceiver = XCVR_INTERNAL;
1040 cmd->autoneg = AUTONEG_ENABLE;
1041 cmd->maxtxpkt = 0;
1042 cmd->maxrxpkt = 1;
1043 return 0;
1044}
1045
1046static void ibmvnic_get_drvinfo(struct net_device *dev,
1047 struct ethtool_drvinfo *info)
1048{
1049 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1050 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1051}
1052
1053static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1054{
1055 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1056
1057 return adapter->msg_enable;
1058}
1059
1060static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1061{
1062 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1063
1064 adapter->msg_enable = data;
1065}
1066
1067static u32 ibmvnic_get_link(struct net_device *netdev)
1068{
1069 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1070
1071 /* Don't need to send a query because we request a logical link up at
1072 * init and then we wait for link state indications
1073 */
1074 return adapter->logical_link_state;
1075}
1076
1077static void ibmvnic_get_ringparam(struct net_device *netdev,
1078 struct ethtool_ringparam *ring)
1079{
1080 ring->rx_max_pending = 0;
1081 ring->tx_max_pending = 0;
1082 ring->rx_mini_max_pending = 0;
1083 ring->rx_jumbo_max_pending = 0;
1084 ring->rx_pending = 0;
1085 ring->tx_pending = 0;
1086 ring->rx_mini_pending = 0;
1087 ring->rx_jumbo_pending = 0;
1088}
1089
1090static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1091{
1092 int i;
1093
1094 if (stringset != ETH_SS_STATS)
1095 return;
1096
1097 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1098 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1099}
1100
1101static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1102{
1103 switch (sset) {
1104 case ETH_SS_STATS:
1105 return ARRAY_SIZE(ibmvnic_stats);
1106 default:
1107 return -EOPNOTSUPP;
1108 }
1109}
1110
1111static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1112 struct ethtool_stats *stats, u64 *data)
1113{
1114 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1115 union ibmvnic_crq crq;
1116 int i;
1117
1118 memset(&crq, 0, sizeof(crq));
1119 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1120 crq.request_statistics.cmd = REQUEST_STATISTICS;
1121 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1122 crq.request_statistics.len =
1123 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1124 ibmvnic_send_crq(adapter, &crq);
1125
1126 /* Wait for data to be written */
1127 init_completion(&adapter->stats_done);
1128 wait_for_completion(&adapter->stats_done);
1129
1130 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1131 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1132}
1133
1134static const struct ethtool_ops ibmvnic_ethtool_ops = {
1135 .get_settings = ibmvnic_get_settings,
1136 .get_drvinfo = ibmvnic_get_drvinfo,
1137 .get_msglevel = ibmvnic_get_msglevel,
1138 .set_msglevel = ibmvnic_set_msglevel,
1139 .get_link = ibmvnic_get_link,
1140 .get_ringparam = ibmvnic_get_ringparam,
1141 .get_strings = ibmvnic_get_strings,
1142 .get_sset_count = ibmvnic_get_sset_count,
1143 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1144};
1145
1146/* Routines for managing CRQs/sCRQs */
1147
1148static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1149 struct ibmvnic_sub_crq_queue *scrq)
1150{
1151 struct device *dev = &adapter->vdev->dev;
1152 long rc;
1153
1154 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1155
1156 /* Close the sub-crqs */
1157 do {
1158 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1159 adapter->vdev->unit_address,
1160 scrq->crq_num);
1161 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1162
1163 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1164 DMA_BIDIRECTIONAL);
1165 free_pages((unsigned long)scrq->msgs, 2);
1166 kfree(scrq);
1167}
1168
1169static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1170 *adapter)
1171{
1172 struct device *dev = &adapter->vdev->dev;
1173 struct ibmvnic_sub_crq_queue *scrq;
1174 int rc;
1175
1176 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1177 if (!scrq)
1178 return NULL;
1179
Thomas Falcon12608c22016-10-17 15:28:09 -05001180 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001181 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1182 if (!scrq->msgs) {
1183 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1184 goto zero_page_failed;
1185 }
1186
1187 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1188 DMA_BIDIRECTIONAL);
1189 if (dma_mapping_error(dev, scrq->msg_token)) {
1190 dev_warn(dev, "Couldn't map crq queue messages page\n");
1191 goto map_failed;
1192 }
1193
1194 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1195 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1196
1197 if (rc == H_RESOURCE)
1198 rc = ibmvnic_reset_crq(adapter);
1199
1200 if (rc == H_CLOSED) {
1201 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1202 } else if (rc) {
1203 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1204 goto reg_failed;
1205 }
1206
Thomas Falcon032c5e82015-12-21 11:26:06 -06001207 scrq->adapter = adapter;
1208 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1209 scrq->cur = 0;
1210 scrq->rx_skb_top = NULL;
1211 spin_lock_init(&scrq->lock);
1212
1213 netdev_dbg(adapter->netdev,
1214 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1215 scrq->crq_num, scrq->hw_irq, scrq->irq);
1216
1217 return scrq;
1218
Thomas Falcon032c5e82015-12-21 11:26:06 -06001219reg_failed:
1220 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1221 DMA_BIDIRECTIONAL);
1222map_failed:
1223 free_pages((unsigned long)scrq->msgs, 2);
1224zero_page_failed:
1225 kfree(scrq);
1226
1227 return NULL;
1228}
1229
1230static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1231{
1232 int i;
1233
1234 if (adapter->tx_scrq) {
1235 for (i = 0; i < adapter->req_tx_queues; i++)
1236 if (adapter->tx_scrq[i]) {
1237 free_irq(adapter->tx_scrq[i]->irq,
1238 adapter->tx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001239 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001240 release_sub_crq_queue(adapter,
1241 adapter->tx_scrq[i]);
1242 }
1243 adapter->tx_scrq = NULL;
1244 }
1245
1246 if (adapter->rx_scrq) {
1247 for (i = 0; i < adapter->req_rx_queues; i++)
1248 if (adapter->rx_scrq[i]) {
1249 free_irq(adapter->rx_scrq[i]->irq,
1250 adapter->rx_scrq[i]);
Thomas Falcon88eb98a2016-07-06 15:35:16 -05001251 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001252 release_sub_crq_queue(adapter,
1253 adapter->rx_scrq[i]);
1254 }
1255 adapter->rx_scrq = NULL;
1256 }
1257
1258 adapter->requested_caps = 0;
1259}
1260
Thomas Falconea22d512016-07-06 15:35:17 -05001261static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1262{
1263 int i;
1264
1265 if (adapter->tx_scrq) {
1266 for (i = 0; i < adapter->req_tx_queues; i++)
1267 if (adapter->tx_scrq[i])
1268 release_sub_crq_queue(adapter,
1269 adapter->tx_scrq[i]);
1270 adapter->tx_scrq = NULL;
1271 }
1272
1273 if (adapter->rx_scrq) {
1274 for (i = 0; i < adapter->req_rx_queues; i++)
1275 if (adapter->rx_scrq[i])
1276 release_sub_crq_queue(adapter,
1277 adapter->rx_scrq[i]);
1278 adapter->rx_scrq = NULL;
1279 }
1280
1281 adapter->requested_caps = 0;
1282}
1283
Thomas Falcon032c5e82015-12-21 11:26:06 -06001284static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1285 struct ibmvnic_sub_crq_queue *scrq)
1286{
1287 struct device *dev = &adapter->vdev->dev;
1288 unsigned long rc;
1289
1290 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1291 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1292 if (rc)
1293 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1294 scrq->hw_irq, rc);
1295 return rc;
1296}
1297
1298static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1299 struct ibmvnic_sub_crq_queue *scrq)
1300{
1301 struct device *dev = &adapter->vdev->dev;
1302 unsigned long rc;
1303
1304 if (scrq->hw_irq > 0x100000000ULL) {
1305 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1306 return 1;
1307 }
1308
1309 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1310 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1311 if (rc)
1312 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1313 scrq->hw_irq, rc);
1314 return rc;
1315}
1316
1317static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1318 struct ibmvnic_sub_crq_queue *scrq)
1319{
1320 struct device *dev = &adapter->vdev->dev;
1321 struct ibmvnic_tx_buff *txbuff;
1322 union sub_crq *next;
1323 int index;
1324 int i, j;
Thomas Falconad7775d2016-04-01 17:20:34 -05001325 u8 first;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001326
1327restart_loop:
1328 while (pending_scrq(adapter, scrq)) {
1329 unsigned int pool = scrq->pool_index;
1330
1331 next = ibmvnic_next_scrq(adapter, scrq);
1332 for (i = 0; i < next->tx_comp.num_comps; i++) {
1333 if (next->tx_comp.rcs[i]) {
1334 dev_err(dev, "tx error %x\n",
1335 next->tx_comp.rcs[i]);
1336 continue;
1337 }
1338 index = be32_to_cpu(next->tx_comp.correlators[i]);
1339 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1340
1341 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1342 if (!txbuff->data_dma[j])
1343 continue;
1344
1345 txbuff->data_dma[j] = 0;
1346 txbuff->used_bounce = false;
1347 }
Thomas Falconad7775d2016-04-01 17:20:34 -05001348 /* if sub_crq was sent indirectly */
1349 first = txbuff->indir_arr[0].generic.first;
1350 if (first == IBMVNIC_CRQ_CMD) {
1351 dma_unmap_single(dev, txbuff->indir_dma,
1352 sizeof(txbuff->indir_arr),
1353 DMA_TO_DEVICE);
1354 }
Thomas Falcon032c5e82015-12-21 11:26:06 -06001355
1356 if (txbuff->last_frag)
1357 dev_kfree_skb_any(txbuff->skb);
1358
1359 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1360 producer_index] = index;
1361 adapter->tx_pool[pool].producer_index =
1362 (adapter->tx_pool[pool].producer_index + 1) %
1363 adapter->max_tx_entries_per_subcrq;
1364 }
1365 /* remove tx_comp scrq*/
1366 next->tx_comp.first = 0;
1367 }
1368
1369 enable_scrq_irq(adapter, scrq);
1370
1371 if (pending_scrq(adapter, scrq)) {
1372 disable_scrq_irq(adapter, scrq);
1373 goto restart_loop;
1374 }
1375
1376 return 0;
1377}
1378
1379static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1380{
1381 struct ibmvnic_sub_crq_queue *scrq = instance;
1382 struct ibmvnic_adapter *adapter = scrq->adapter;
1383
1384 disable_scrq_irq(adapter, scrq);
1385 ibmvnic_complete_tx(adapter, scrq);
1386
1387 return IRQ_HANDLED;
1388}
1389
1390static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1391{
1392 struct ibmvnic_sub_crq_queue *scrq = instance;
1393 struct ibmvnic_adapter *adapter = scrq->adapter;
1394
1395 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1396 disable_scrq_irq(adapter, scrq);
1397 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1398 }
1399
1400 return IRQ_HANDLED;
1401}
1402
Thomas Falconea22d512016-07-06 15:35:17 -05001403static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1404{
1405 struct device *dev = &adapter->vdev->dev;
1406 struct ibmvnic_sub_crq_queue *scrq;
1407 int i = 0, j = 0;
1408 int rc = 0;
1409
1410 for (i = 0; i < adapter->req_tx_queues; i++) {
1411 scrq = adapter->tx_scrq[i];
1412 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1413
Michael Ellerman99c17902016-09-10 19:59:05 +10001414 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001415 rc = -EINVAL;
1416 dev_err(dev, "Error mapping irq\n");
1417 goto req_tx_irq_failed;
1418 }
1419
1420 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1421 0, "ibmvnic_tx", scrq);
1422
1423 if (rc) {
1424 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1425 scrq->irq, rc);
1426 irq_dispose_mapping(scrq->irq);
1427 goto req_rx_irq_failed;
1428 }
1429 }
1430
1431 for (i = 0; i < adapter->req_rx_queues; i++) {
1432 scrq = adapter->rx_scrq[i];
1433 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
Michael Ellerman99c17902016-09-10 19:59:05 +10001434 if (!scrq->irq) {
Thomas Falconea22d512016-07-06 15:35:17 -05001435 rc = -EINVAL;
1436 dev_err(dev, "Error mapping irq\n");
1437 goto req_rx_irq_failed;
1438 }
1439 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1440 0, "ibmvnic_rx", scrq);
1441 if (rc) {
1442 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1443 scrq->irq, rc);
1444 irq_dispose_mapping(scrq->irq);
1445 goto req_rx_irq_failed;
1446 }
1447 }
1448 return rc;
1449
1450req_rx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001451 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001452 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1453 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001454 }
Thomas Falconea22d512016-07-06 15:35:17 -05001455 i = adapter->req_tx_queues;
1456req_tx_irq_failed:
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001457 for (j = 0; j < i; j++) {
Thomas Falconea22d512016-07-06 15:35:17 -05001458 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1459 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
Thomas Falcon8bf371e2016-10-27 12:28:52 -05001460 }
Thomas Falconea22d512016-07-06 15:35:17 -05001461 release_sub_crqs_no_irqs(adapter);
1462 return rc;
1463}
1464
Thomas Falcon032c5e82015-12-21 11:26:06 -06001465static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1466{
1467 struct device *dev = &adapter->vdev->dev;
1468 struct ibmvnic_sub_crq_queue **allqueues;
1469 int registered_queues = 0;
1470 union ibmvnic_crq crq;
1471 int total_queues;
1472 int more = 0;
Thomas Falconea22d512016-07-06 15:35:17 -05001473 int i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001474
1475 if (!retry) {
1476 /* Sub-CRQ entries are 32 byte long */
1477 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1478
1479 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1480 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1481 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1482 goto allqueues_failed;
1483 }
1484
1485 /* Get the minimum between the queried max and the entries
1486 * that fit in our PAGE_SIZE
1487 */
1488 adapter->req_tx_entries_per_subcrq =
1489 adapter->max_tx_entries_per_subcrq > entries_page ?
1490 entries_page : adapter->max_tx_entries_per_subcrq;
1491 adapter->req_rx_add_entries_per_subcrq =
1492 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1493 entries_page : adapter->max_rx_add_entries_per_subcrq;
1494
John Allen6dbcd8f2016-11-07 14:27:28 -06001495 adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues;
1496 adapter->req_rx_queues = adapter->opt_rx_comp_queues;
John Allen498cd8e2016-04-06 11:49:55 -05001497 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001498
1499 adapter->req_mtu = adapter->max_mtu;
1500 }
1501
1502 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1503
1504 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1505 if (!allqueues)
1506 goto allqueues_failed;
1507
1508 for (i = 0; i < total_queues; i++) {
1509 allqueues[i] = init_sub_crq_queue(adapter);
1510 if (!allqueues[i]) {
1511 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1512 break;
1513 }
1514 registered_queues++;
1515 }
1516
1517 /* Make sure we were able to register the minimum number of queues */
1518 if (registered_queues <
1519 adapter->min_tx_queues + adapter->min_rx_queues) {
1520 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1521 goto tx_failed;
1522 }
1523
1524 /* Distribute the failed allocated queues*/
1525 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1526 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1527 switch (i % 3) {
1528 case 0:
1529 if (adapter->req_rx_queues > adapter->min_rx_queues)
1530 adapter->req_rx_queues--;
1531 else
1532 more++;
1533 break;
1534 case 1:
1535 if (adapter->req_tx_queues > adapter->min_tx_queues)
1536 adapter->req_tx_queues--;
1537 else
1538 more++;
1539 break;
1540 }
1541 }
1542
1543 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1544 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1545 if (!adapter->tx_scrq)
1546 goto tx_failed;
1547
1548 for (i = 0; i < adapter->req_tx_queues; i++) {
1549 adapter->tx_scrq[i] = allqueues[i];
1550 adapter->tx_scrq[i]->pool_index = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001551 }
1552
1553 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1554 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1555 if (!adapter->rx_scrq)
1556 goto rx_failed;
1557
1558 for (i = 0; i < adapter->req_rx_queues; i++) {
1559 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1560 adapter->rx_scrq[i]->scrq_num = i;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001561 }
1562
1563 memset(&crq, 0, sizeof(crq));
1564 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1565 crq.request_capability.cmd = REQUEST_CAPABILITY;
1566
1567 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001568 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001569 ibmvnic_send_crq(adapter, &crq);
1570
1571 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001572 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001573 ibmvnic_send_crq(adapter, &crq);
1574
1575 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
Thomas Falconde89e852016-03-01 10:20:09 -06001576 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001577 ibmvnic_send_crq(adapter, &crq);
1578
1579 crq.request_capability.capability =
1580 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1581 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001582 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001583 ibmvnic_send_crq(adapter, &crq);
1584
1585 crq.request_capability.capability =
1586 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1587 crq.request_capability.number =
Thomas Falconde89e852016-03-01 10:20:09 -06001588 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001589 ibmvnic_send_crq(adapter, &crq);
1590
1591 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
Thomas Falconde89e852016-03-01 10:20:09 -06001592 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001593 ibmvnic_send_crq(adapter, &crq);
1594
1595 if (adapter->netdev->flags & IFF_PROMISC) {
1596 if (adapter->promisc_supported) {
1597 crq.request_capability.capability =
1598 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001599 crq.request_capability.number = cpu_to_be64(1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001600 ibmvnic_send_crq(adapter, &crq);
1601 }
1602 } else {
1603 crq.request_capability.capability =
1604 cpu_to_be16(PROMISC_REQUESTED);
Thomas Falconde89e852016-03-01 10:20:09 -06001605 crq.request_capability.number = cpu_to_be64(0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06001606 ibmvnic_send_crq(adapter, &crq);
1607 }
1608
1609 kfree(allqueues);
1610
1611 return;
1612
Thomas Falcon032c5e82015-12-21 11:26:06 -06001613rx_failed:
1614 kfree(adapter->tx_scrq);
1615 adapter->tx_scrq = NULL;
1616tx_failed:
1617 for (i = 0; i < registered_queues; i++)
1618 release_sub_crq_queue(adapter, allqueues[i]);
1619 kfree(allqueues);
1620allqueues_failed:
1621 ibmvnic_remove(adapter->vdev);
1622}
1623
1624static int pending_scrq(struct ibmvnic_adapter *adapter,
1625 struct ibmvnic_sub_crq_queue *scrq)
1626{
1627 union sub_crq *entry = &scrq->msgs[scrq->cur];
1628
1629 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1630 return 1;
1631 else
1632 return 0;
1633}
1634
1635static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1636 struct ibmvnic_sub_crq_queue *scrq)
1637{
1638 union sub_crq *entry;
1639 unsigned long flags;
1640
1641 spin_lock_irqsave(&scrq->lock, flags);
1642 entry = &scrq->msgs[scrq->cur];
1643 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1644 if (++scrq->cur == scrq->size)
1645 scrq->cur = 0;
1646 } else {
1647 entry = NULL;
1648 }
1649 spin_unlock_irqrestore(&scrq->lock, flags);
1650
1651 return entry;
1652}
1653
1654static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1655{
1656 struct ibmvnic_crq_queue *queue = &adapter->crq;
1657 union ibmvnic_crq *crq;
1658
1659 crq = &queue->msgs[queue->cur];
1660 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1661 if (++queue->cur == queue->size)
1662 queue->cur = 0;
1663 } else {
1664 crq = NULL;
1665 }
1666
1667 return crq;
1668}
1669
1670static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1671 union sub_crq *sub_crq)
1672{
1673 unsigned int ua = adapter->vdev->unit_address;
1674 struct device *dev = &adapter->vdev->dev;
1675 u64 *u64_crq = (u64 *)sub_crq;
1676 int rc;
1677
1678 netdev_dbg(adapter->netdev,
1679 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1680 (unsigned long int)cpu_to_be64(remote_handle),
1681 (unsigned long int)cpu_to_be64(u64_crq[0]),
1682 (unsigned long int)cpu_to_be64(u64_crq[1]),
1683 (unsigned long int)cpu_to_be64(u64_crq[2]),
1684 (unsigned long int)cpu_to_be64(u64_crq[3]));
1685
1686 /* Make sure the hypervisor sees the complete request */
1687 mb();
1688
1689 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1690 cpu_to_be64(remote_handle),
1691 cpu_to_be64(u64_crq[0]),
1692 cpu_to_be64(u64_crq[1]),
1693 cpu_to_be64(u64_crq[2]),
1694 cpu_to_be64(u64_crq[3]));
1695
1696 if (rc) {
1697 if (rc == H_CLOSED)
1698 dev_warn(dev, "CRQ Queue closed\n");
1699 dev_err(dev, "Send error (rc=%d)\n", rc);
1700 }
1701
1702 return rc;
1703}
1704
Thomas Falconad7775d2016-04-01 17:20:34 -05001705static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1706 u64 remote_handle, u64 ioba, u64 num_entries)
1707{
1708 unsigned int ua = adapter->vdev->unit_address;
1709 struct device *dev = &adapter->vdev->dev;
1710 int rc;
1711
1712 /* Make sure the hypervisor sees the complete request */
1713 mb();
1714 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1715 cpu_to_be64(remote_handle),
1716 ioba, num_entries);
1717
1718 if (rc) {
1719 if (rc == H_CLOSED)
1720 dev_warn(dev, "CRQ Queue closed\n");
1721 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1722 }
1723
1724 return rc;
1725}
1726
Thomas Falcon032c5e82015-12-21 11:26:06 -06001727static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1728 union ibmvnic_crq *crq)
1729{
1730 unsigned int ua = adapter->vdev->unit_address;
1731 struct device *dev = &adapter->vdev->dev;
1732 u64 *u64_crq = (u64 *)crq;
1733 int rc;
1734
1735 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1736 (unsigned long int)cpu_to_be64(u64_crq[0]),
1737 (unsigned long int)cpu_to_be64(u64_crq[1]));
1738
1739 /* Make sure the hypervisor sees the complete request */
1740 mb();
1741
1742 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1743 cpu_to_be64(u64_crq[0]),
1744 cpu_to_be64(u64_crq[1]));
1745
1746 if (rc) {
1747 if (rc == H_CLOSED)
1748 dev_warn(dev, "CRQ Queue closed\n");
1749 dev_warn(dev, "Send error (rc=%d)\n", rc);
1750 }
1751
1752 return rc;
1753}
1754
1755static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1756{
1757 union ibmvnic_crq crq;
1758
1759 memset(&crq, 0, sizeof(crq));
1760 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1761 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1762 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1763
1764 return ibmvnic_send_crq(adapter, &crq);
1765}
1766
1767static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1768{
1769 union ibmvnic_crq crq;
1770
1771 memset(&crq, 0, sizeof(crq));
1772 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1773 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1774 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1775
1776 return ibmvnic_send_crq(adapter, &crq);
1777}
1778
1779static int send_version_xchg(struct ibmvnic_adapter *adapter)
1780{
1781 union ibmvnic_crq crq;
1782
1783 memset(&crq, 0, sizeof(crq));
1784 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1785 crq.version_exchange.cmd = VERSION_EXCHANGE;
1786 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1787
1788 return ibmvnic_send_crq(adapter, &crq);
1789}
1790
1791static void send_login(struct ibmvnic_adapter *adapter)
1792{
1793 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1794 struct ibmvnic_login_buffer *login_buffer;
1795 struct ibmvnic_inflight_cmd *inflight_cmd;
1796 struct device *dev = &adapter->vdev->dev;
1797 dma_addr_t rsp_buffer_token;
1798 dma_addr_t buffer_token;
1799 size_t rsp_buffer_size;
1800 union ibmvnic_crq crq;
1801 unsigned long flags;
1802 size_t buffer_size;
1803 __be64 *tx_list_p;
1804 __be64 *rx_list_p;
1805 int i;
1806
1807 buffer_size =
1808 sizeof(struct ibmvnic_login_buffer) +
1809 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1810
1811 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1812 if (!login_buffer)
1813 goto buf_alloc_failed;
1814
1815 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1816 DMA_TO_DEVICE);
1817 if (dma_mapping_error(dev, buffer_token)) {
1818 dev_err(dev, "Couldn't map login buffer\n");
1819 goto buf_map_failed;
1820 }
1821
John Allen498cd8e2016-04-06 11:49:55 -05001822 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1823 sizeof(u64) * adapter->req_tx_queues +
1824 sizeof(u64) * adapter->req_rx_queues +
1825 sizeof(u64) * adapter->req_rx_queues +
1826 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
Thomas Falcon032c5e82015-12-21 11:26:06 -06001827
1828 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1829 if (!login_rsp_buffer)
1830 goto buf_rsp_alloc_failed;
1831
1832 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1833 rsp_buffer_size, DMA_FROM_DEVICE);
1834 if (dma_mapping_error(dev, rsp_buffer_token)) {
1835 dev_err(dev, "Couldn't map login rsp buffer\n");
1836 goto buf_rsp_map_failed;
1837 }
1838 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1839 if (!inflight_cmd) {
1840 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1841 goto inflight_alloc_failed;
1842 }
1843 adapter->login_buf = login_buffer;
1844 adapter->login_buf_token = buffer_token;
1845 adapter->login_buf_sz = buffer_size;
1846 adapter->login_rsp_buf = login_rsp_buffer;
1847 adapter->login_rsp_buf_token = rsp_buffer_token;
1848 adapter->login_rsp_buf_sz = rsp_buffer_size;
1849
1850 login_buffer->len = cpu_to_be32(buffer_size);
1851 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1852 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1853 login_buffer->off_txcomp_subcrqs =
1854 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1855 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1856 login_buffer->off_rxcomp_subcrqs =
1857 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1858 sizeof(u64) * adapter->req_tx_queues);
1859 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1860 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1861
1862 tx_list_p = (__be64 *)((char *)login_buffer +
1863 sizeof(struct ibmvnic_login_buffer));
1864 rx_list_p = (__be64 *)((char *)login_buffer +
1865 sizeof(struct ibmvnic_login_buffer) +
1866 sizeof(u64) * adapter->req_tx_queues);
1867
1868 for (i = 0; i < adapter->req_tx_queues; i++) {
1869 if (adapter->tx_scrq[i]) {
1870 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1871 crq_num);
1872 }
1873 }
1874
1875 for (i = 0; i < adapter->req_rx_queues; i++) {
1876 if (adapter->rx_scrq[i]) {
1877 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1878 crq_num);
1879 }
1880 }
1881
1882 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1883 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1884 netdev_dbg(adapter->netdev, "%016lx\n",
1885 ((unsigned long int *)(adapter->login_buf))[i]);
1886 }
1887
1888 memset(&crq, 0, sizeof(crq));
1889 crq.login.first = IBMVNIC_CRQ_CMD;
1890 crq.login.cmd = LOGIN;
1891 crq.login.ioba = cpu_to_be32(buffer_token);
1892 crq.login.len = cpu_to_be32(buffer_size);
1893
1894 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1895
1896 spin_lock_irqsave(&adapter->inflight_lock, flags);
1897 list_add_tail(&inflight_cmd->list, &adapter->inflight);
1898 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1899
1900 ibmvnic_send_crq(adapter, &crq);
1901
1902 return;
1903
1904inflight_alloc_failed:
1905 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1906 DMA_FROM_DEVICE);
1907buf_rsp_map_failed:
1908 kfree(login_rsp_buffer);
1909buf_rsp_alloc_failed:
1910 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1911buf_map_failed:
1912 kfree(login_buffer);
1913buf_alloc_failed:
1914 return;
1915}
1916
1917static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1918 u32 len, u8 map_id)
1919{
1920 union ibmvnic_crq crq;
1921
1922 memset(&crq, 0, sizeof(crq));
1923 crq.request_map.first = IBMVNIC_CRQ_CMD;
1924 crq.request_map.cmd = REQUEST_MAP;
1925 crq.request_map.map_id = map_id;
1926 crq.request_map.ioba = cpu_to_be32(addr);
1927 crq.request_map.len = cpu_to_be32(len);
1928 ibmvnic_send_crq(adapter, &crq);
1929}
1930
1931static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1932{
1933 union ibmvnic_crq crq;
1934
1935 memset(&crq, 0, sizeof(crq));
1936 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1937 crq.request_unmap.cmd = REQUEST_UNMAP;
1938 crq.request_unmap.map_id = map_id;
1939 ibmvnic_send_crq(adapter, &crq);
1940}
1941
1942static void send_map_query(struct ibmvnic_adapter *adapter)
1943{
1944 union ibmvnic_crq crq;
1945
1946 memset(&crq, 0, sizeof(crq));
1947 crq.query_map.first = IBMVNIC_CRQ_CMD;
1948 crq.query_map.cmd = QUERY_MAP;
1949 ibmvnic_send_crq(adapter, &crq);
1950}
1951
1952/* Send a series of CRQs requesting various capabilities of the VNIC server */
1953static void send_cap_queries(struct ibmvnic_adapter *adapter)
1954{
1955 union ibmvnic_crq crq;
1956
1957 atomic_set(&adapter->running_cap_queries, 0);
1958 memset(&crq, 0, sizeof(crq));
1959 crq.query_capability.first = IBMVNIC_CRQ_CMD;
1960 crq.query_capability.cmd = QUERY_CAPABILITY;
1961
1962 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
1963 atomic_inc(&adapter->running_cap_queries);
1964 ibmvnic_send_crq(adapter, &crq);
1965
1966 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
1967 atomic_inc(&adapter->running_cap_queries);
1968 ibmvnic_send_crq(adapter, &crq);
1969
1970 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
1971 atomic_inc(&adapter->running_cap_queries);
1972 ibmvnic_send_crq(adapter, &crq);
1973
1974 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
1975 atomic_inc(&adapter->running_cap_queries);
1976 ibmvnic_send_crq(adapter, &crq);
1977
1978 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
1979 atomic_inc(&adapter->running_cap_queries);
1980 ibmvnic_send_crq(adapter, &crq);
1981
1982 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
1983 atomic_inc(&adapter->running_cap_queries);
1984 ibmvnic_send_crq(adapter, &crq);
1985
1986 crq.query_capability.capability =
1987 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
1988 atomic_inc(&adapter->running_cap_queries);
1989 ibmvnic_send_crq(adapter, &crq);
1990
1991 crq.query_capability.capability =
1992 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
1993 atomic_inc(&adapter->running_cap_queries);
1994 ibmvnic_send_crq(adapter, &crq);
1995
1996 crq.query_capability.capability =
1997 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
1998 atomic_inc(&adapter->running_cap_queries);
1999 ibmvnic_send_crq(adapter, &crq);
2000
2001 crq.query_capability.capability =
2002 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2003 atomic_inc(&adapter->running_cap_queries);
2004 ibmvnic_send_crq(adapter, &crq);
2005
2006 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2007 atomic_inc(&adapter->running_cap_queries);
2008 ibmvnic_send_crq(adapter, &crq);
2009
2010 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2011 atomic_inc(&adapter->running_cap_queries);
2012 ibmvnic_send_crq(adapter, &crq);
2013
2014 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2015 atomic_inc(&adapter->running_cap_queries);
2016 ibmvnic_send_crq(adapter, &crq);
2017
2018 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2019 atomic_inc(&adapter->running_cap_queries);
2020 ibmvnic_send_crq(adapter, &crq);
2021
2022 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2023 atomic_inc(&adapter->running_cap_queries);
2024 ibmvnic_send_crq(adapter, &crq);
2025
2026 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2027 atomic_inc(&adapter->running_cap_queries);
2028 ibmvnic_send_crq(adapter, &crq);
2029
2030 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2031 atomic_inc(&adapter->running_cap_queries);
2032 ibmvnic_send_crq(adapter, &crq);
2033
2034 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2035 atomic_inc(&adapter->running_cap_queries);
2036 ibmvnic_send_crq(adapter, &crq);
2037
2038 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2039 atomic_inc(&adapter->running_cap_queries);
2040 ibmvnic_send_crq(adapter, &crq);
2041
2042 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2043 atomic_inc(&adapter->running_cap_queries);
2044 ibmvnic_send_crq(adapter, &crq);
2045
2046 crq.query_capability.capability =
2047 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2048 atomic_inc(&adapter->running_cap_queries);
2049 ibmvnic_send_crq(adapter, &crq);
2050
2051 crq.query_capability.capability =
2052 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2053 atomic_inc(&adapter->running_cap_queries);
2054 ibmvnic_send_crq(adapter, &crq);
2055
2056 crq.query_capability.capability =
2057 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2058 atomic_inc(&adapter->running_cap_queries);
2059 ibmvnic_send_crq(adapter, &crq);
2060
2061 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2062 atomic_inc(&adapter->running_cap_queries);
2063 ibmvnic_send_crq(adapter, &crq);
2064}
2065
2066static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2067{
2068 struct device *dev = &adapter->vdev->dev;
2069 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2070 union ibmvnic_crq crq;
2071 int i;
2072
2073 dma_unmap_single(dev, adapter->ip_offload_tok,
2074 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2075
2076 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2077 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2078 netdev_dbg(adapter->netdev, "%016lx\n",
2079 ((unsigned long int *)(buf))[i]);
2080
2081 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2082 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2083 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2084 buf->tcp_ipv4_chksum);
2085 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2086 buf->tcp_ipv6_chksum);
2087 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2088 buf->udp_ipv4_chksum);
2089 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2090 buf->udp_ipv6_chksum);
2091 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2092 buf->large_tx_ipv4);
2093 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2094 buf->large_tx_ipv6);
2095 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2096 buf->large_rx_ipv4);
2097 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2098 buf->large_rx_ipv6);
2099 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2100 buf->max_ipv4_header_size);
2101 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2102 buf->max_ipv6_header_size);
2103 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2104 buf->max_tcp_header_size);
2105 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2106 buf->max_udp_header_size);
2107 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2108 buf->max_large_tx_size);
2109 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2110 buf->max_large_rx_size);
2111 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2112 buf->ipv6_extension_header);
2113 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2114 buf->tcp_pseudosum_req);
2115 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2116 buf->num_ipv6_ext_headers);
2117 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2118 buf->off_ipv6_ext_headers);
2119
2120 adapter->ip_offload_ctrl_tok =
2121 dma_map_single(dev, &adapter->ip_offload_ctrl,
2122 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2123
2124 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2125 dev_err(dev, "Couldn't map ip offload control buffer\n");
2126 return;
2127 }
2128
2129 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2130 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2131 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2132 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2133 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2134
2135 /* large_tx/rx disabled for now, additional features needed */
2136 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2137 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2138 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2139 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2140
2141 adapter->netdev->features = NETIF_F_GSO;
2142
2143 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2144 adapter->netdev->features |= NETIF_F_IP_CSUM;
2145
2146 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2147 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2148
Thomas Falcon9be02cd2016-04-01 17:20:35 -05002149 if ((adapter->netdev->features &
2150 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2151 adapter->netdev->features |= NETIF_F_RXCSUM;
2152
Thomas Falcon032c5e82015-12-21 11:26:06 -06002153 memset(&crq, 0, sizeof(crq));
2154 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2155 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2156 crq.control_ip_offload.len =
2157 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2158 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2159 ibmvnic_send_crq(adapter, &crq);
2160}
2161
2162static void handle_error_info_rsp(union ibmvnic_crq *crq,
2163 struct ibmvnic_adapter *adapter)
2164{
2165 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08002166 struct ibmvnic_error_buff *error_buff, *tmp;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002167 unsigned long flags;
2168 bool found = false;
2169 int i;
2170
2171 if (!crq->request_error_rsp.rc.code) {
2172 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2173 crq->request_error_rsp.rc.code);
2174 return;
2175 }
2176
2177 spin_lock_irqsave(&adapter->error_list_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08002178 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
Thomas Falcon032c5e82015-12-21 11:26:06 -06002179 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2180 found = true;
2181 list_del(&error_buff->list);
2182 break;
2183 }
2184 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2185
2186 if (!found) {
2187 dev_err(dev, "Couldn't find error id %x\n",
2188 crq->request_error_rsp.error_id);
2189 return;
2190 }
2191
2192 dev_err(dev, "Detailed info for error id %x:",
2193 crq->request_error_rsp.error_id);
2194
2195 for (i = 0; i < error_buff->len; i++) {
2196 pr_cont("%02x", (int)error_buff->buff[i]);
2197 if (i % 8 == 7)
2198 pr_cont(" ");
2199 }
2200 pr_cont("\n");
2201
2202 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2203 DMA_FROM_DEVICE);
2204 kfree(error_buff->buff);
2205 kfree(error_buff);
2206}
2207
2208static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2209 struct ibmvnic_adapter *adapter)
2210{
2211 int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2212 struct ibmvnic_inflight_cmd *inflight_cmd;
2213 struct device *dev = &adapter->vdev->dev;
2214 union ibmvnic_crq newcrq;
2215 unsigned long flags;
2216
2217 /* allocate and map buffer */
2218 adapter->dump_data = kmalloc(len, GFP_KERNEL);
2219 if (!adapter->dump_data) {
2220 complete(&adapter->fw_done);
2221 return;
2222 }
2223
2224 adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2225 DMA_FROM_DEVICE);
2226
2227 if (dma_mapping_error(dev, adapter->dump_data_token)) {
2228 if (!firmware_has_feature(FW_FEATURE_CMO))
2229 dev_err(dev, "Couldn't map dump data\n");
2230 kfree(adapter->dump_data);
2231 complete(&adapter->fw_done);
2232 return;
2233 }
2234
2235 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2236 if (!inflight_cmd) {
2237 dma_unmap_single(dev, adapter->dump_data_token, len,
2238 DMA_FROM_DEVICE);
2239 kfree(adapter->dump_data);
2240 complete(&adapter->fw_done);
2241 return;
2242 }
2243
2244 memset(&newcrq, 0, sizeof(newcrq));
2245 newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2246 newcrq.request_dump.cmd = REQUEST_DUMP;
2247 newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2248 newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2249
2250 memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2251
2252 spin_lock_irqsave(&adapter->inflight_lock, flags);
2253 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2254 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2255
2256 ibmvnic_send_crq(adapter, &newcrq);
2257}
2258
2259static void handle_error_indication(union ibmvnic_crq *crq,
2260 struct ibmvnic_adapter *adapter)
2261{
2262 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2263 struct ibmvnic_inflight_cmd *inflight_cmd;
2264 struct device *dev = &adapter->vdev->dev;
2265 struct ibmvnic_error_buff *error_buff;
2266 union ibmvnic_crq new_crq;
2267 unsigned long flags;
2268
2269 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2270 crq->error_indication.
2271 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2272 crq->error_indication.error_id,
2273 crq->error_indication.error_cause);
2274
2275 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2276 if (!error_buff)
2277 return;
2278
2279 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2280 if (!error_buff->buff) {
2281 kfree(error_buff);
2282 return;
2283 }
2284
2285 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2286 DMA_FROM_DEVICE);
2287 if (dma_mapping_error(dev, error_buff->dma)) {
2288 if (!firmware_has_feature(FW_FEATURE_CMO))
2289 dev_err(dev, "Couldn't map error buffer\n");
2290 kfree(error_buff->buff);
2291 kfree(error_buff);
2292 return;
2293 }
2294
2295 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2296 if (!inflight_cmd) {
2297 dma_unmap_single(dev, error_buff->dma, detail_len,
2298 DMA_FROM_DEVICE);
2299 kfree(error_buff->buff);
2300 kfree(error_buff);
2301 return;
2302 }
2303
2304 error_buff->len = detail_len;
2305 error_buff->error_id = crq->error_indication.error_id;
2306
2307 spin_lock_irqsave(&adapter->error_list_lock, flags);
2308 list_add_tail(&error_buff->list, &adapter->errors);
2309 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2310
2311 memset(&new_crq, 0, sizeof(new_crq));
2312 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2313 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2314 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2315 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2316 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2317
2318 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2319
2320 spin_lock_irqsave(&adapter->inflight_lock, flags);
2321 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2322 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2323
2324 ibmvnic_send_crq(adapter, &new_crq);
2325}
2326
2327static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2328 struct ibmvnic_adapter *adapter)
2329{
2330 struct net_device *netdev = adapter->netdev;
2331 struct device *dev = &adapter->vdev->dev;
2332 long rc;
2333
2334 rc = crq->change_mac_addr_rsp.rc.code;
2335 if (rc) {
2336 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2337 return;
2338 }
2339 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2340 ETH_ALEN);
2341}
2342
2343static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2344 struct ibmvnic_adapter *adapter)
2345{
2346 struct device *dev = &adapter->vdev->dev;
2347 u64 *req_value;
2348 char *name;
2349
2350 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2351 case REQ_TX_QUEUES:
2352 req_value = &adapter->req_tx_queues;
2353 name = "tx";
2354 break;
2355 case REQ_RX_QUEUES:
2356 req_value = &adapter->req_rx_queues;
2357 name = "rx";
2358 break;
2359 case REQ_RX_ADD_QUEUES:
2360 req_value = &adapter->req_rx_add_queues;
2361 name = "rx_add";
2362 break;
2363 case REQ_TX_ENTRIES_PER_SUBCRQ:
2364 req_value = &adapter->req_tx_entries_per_subcrq;
2365 name = "tx_entries_per_subcrq";
2366 break;
2367 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2368 req_value = &adapter->req_rx_add_entries_per_subcrq;
2369 name = "rx_add_entries_per_subcrq";
2370 break;
2371 case REQ_MTU:
2372 req_value = &adapter->req_mtu;
2373 name = "mtu";
2374 break;
2375 case PROMISC_REQUESTED:
2376 req_value = &adapter->promisc;
2377 name = "promisc";
2378 break;
2379 default:
2380 dev_err(dev, "Got invalid cap request rsp %d\n",
2381 crq->request_capability.capability);
2382 return;
2383 }
2384
2385 switch (crq->request_capability_rsp.rc.code) {
2386 case SUCCESS:
2387 break;
2388 case PARTIALSUCCESS:
2389 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2390 *req_value,
2391 (long int)be32_to_cpu(crq->request_capability_rsp.
2392 number), name);
Thomas Falconea22d512016-07-06 15:35:17 -05002393 release_sub_crqs_no_irqs(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002394 *req_value = be32_to_cpu(crq->request_capability_rsp.number);
Thomas Falconea22d512016-07-06 15:35:17 -05002395 init_sub_crqs(adapter, 1);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002396 return;
2397 default:
2398 dev_err(dev, "Error %d in request cap rsp\n",
2399 crq->request_capability_rsp.rc.code);
2400 return;
2401 }
2402
2403 /* Done receiving requested capabilities, query IP offload support */
2404 if (++adapter->requested_caps == 7) {
2405 union ibmvnic_crq newcrq;
2406 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2407 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2408 &adapter->ip_offload_buf;
2409
2410 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2411 buf_sz,
2412 DMA_FROM_DEVICE);
2413
2414 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2415 if (!firmware_has_feature(FW_FEATURE_CMO))
2416 dev_err(dev, "Couldn't map offload buffer\n");
2417 return;
2418 }
2419
2420 memset(&newcrq, 0, sizeof(newcrq));
2421 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2422 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2423 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2424 newcrq.query_ip_offload.ioba =
2425 cpu_to_be32(adapter->ip_offload_tok);
2426
2427 ibmvnic_send_crq(adapter, &newcrq);
2428 }
2429}
2430
2431static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2432 struct ibmvnic_adapter *adapter)
2433{
2434 struct device *dev = &adapter->vdev->dev;
2435 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2436 struct ibmvnic_login_buffer *login = adapter->login_buf;
2437 union ibmvnic_crq crq;
2438 int i;
2439
2440 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2441 DMA_BIDIRECTIONAL);
2442 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2443 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2444
John Allen498cd8e2016-04-06 11:49:55 -05002445 /* If the number of queues requested can't be allocated by the
2446 * server, the login response will return with code 1. We will need
2447 * to resend the login buffer with fewer queues requested.
2448 */
2449 if (login_rsp_crq->generic.rc.code) {
2450 adapter->renegotiate = true;
2451 complete(&adapter->init_done);
2452 return 0;
2453 }
2454
Thomas Falcon032c5e82015-12-21 11:26:06 -06002455 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2456 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2457 netdev_dbg(adapter->netdev, "%016lx\n",
2458 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2459 }
2460
2461 /* Sanity checks */
2462 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2463 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2464 adapter->req_rx_add_queues !=
2465 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2466 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2467 ibmvnic_remove(adapter->vdev);
2468 return -EIO;
2469 }
2470 complete(&adapter->init_done);
2471
2472 memset(&crq, 0, sizeof(crq));
2473 crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2474 crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2475 ibmvnic_send_crq(adapter, &crq);
2476
2477 return 0;
2478}
2479
2480static void handle_request_map_rsp(union ibmvnic_crq *crq,
2481 struct ibmvnic_adapter *adapter)
2482{
2483 struct device *dev = &adapter->vdev->dev;
2484 u8 map_id = crq->request_map_rsp.map_id;
2485 int tx_subcrqs;
2486 int rx_subcrqs;
2487 long rc;
2488 int i;
2489
2490 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2491 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2492
2493 rc = crq->request_map_rsp.rc.code;
2494 if (rc) {
2495 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2496 adapter->map_id--;
2497 /* need to find and zero tx/rx_pool map_id */
2498 for (i = 0; i < tx_subcrqs; i++) {
2499 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2500 adapter->tx_pool[i].long_term_buff.map_id = 0;
2501 }
2502 for (i = 0; i < rx_subcrqs; i++) {
2503 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2504 adapter->rx_pool[i].long_term_buff.map_id = 0;
2505 }
2506 }
2507 complete(&adapter->fw_done);
2508}
2509
2510static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2511 struct ibmvnic_adapter *adapter)
2512{
2513 struct device *dev = &adapter->vdev->dev;
2514 long rc;
2515
2516 rc = crq->request_unmap_rsp.rc.code;
2517 if (rc)
2518 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2519}
2520
2521static void handle_query_map_rsp(union ibmvnic_crq *crq,
2522 struct ibmvnic_adapter *adapter)
2523{
2524 struct net_device *netdev = adapter->netdev;
2525 struct device *dev = &adapter->vdev->dev;
2526 long rc;
2527
2528 rc = crq->query_map_rsp.rc.code;
2529 if (rc) {
2530 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2531 return;
2532 }
2533 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2534 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2535 crq->query_map_rsp.free_pages);
2536}
2537
2538static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2539 struct ibmvnic_adapter *adapter)
2540{
2541 struct net_device *netdev = adapter->netdev;
2542 struct device *dev = &adapter->vdev->dev;
2543 long rc;
2544
2545 atomic_dec(&adapter->running_cap_queries);
2546 netdev_dbg(netdev, "Outstanding queries: %d\n",
2547 atomic_read(&adapter->running_cap_queries));
2548 rc = crq->query_capability.rc.code;
2549 if (rc) {
2550 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2551 goto out;
2552 }
2553
2554 switch (be16_to_cpu(crq->query_capability.capability)) {
2555 case MIN_TX_QUEUES:
2556 adapter->min_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002557 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002558 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2559 adapter->min_tx_queues);
2560 break;
2561 case MIN_RX_QUEUES:
2562 adapter->min_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002563 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002564 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2565 adapter->min_rx_queues);
2566 break;
2567 case MIN_RX_ADD_QUEUES:
2568 adapter->min_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002569 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002570 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2571 adapter->min_rx_add_queues);
2572 break;
2573 case MAX_TX_QUEUES:
2574 adapter->max_tx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002575 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002576 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2577 adapter->max_tx_queues);
2578 break;
2579 case MAX_RX_QUEUES:
2580 adapter->max_rx_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002581 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002582 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2583 adapter->max_rx_queues);
2584 break;
2585 case MAX_RX_ADD_QUEUES:
2586 adapter->max_rx_add_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002587 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002588 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2589 adapter->max_rx_add_queues);
2590 break;
2591 case MIN_TX_ENTRIES_PER_SUBCRQ:
2592 adapter->min_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002593 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002594 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2595 adapter->min_tx_entries_per_subcrq);
2596 break;
2597 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2598 adapter->min_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002599 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002600 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2601 adapter->min_rx_add_entries_per_subcrq);
2602 break;
2603 case MAX_TX_ENTRIES_PER_SUBCRQ:
2604 adapter->max_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002605 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002606 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2607 adapter->max_tx_entries_per_subcrq);
2608 break;
2609 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2610 adapter->max_rx_add_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002611 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002612 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2613 adapter->max_rx_add_entries_per_subcrq);
2614 break;
2615 case TCP_IP_OFFLOAD:
2616 adapter->tcp_ip_offload =
Thomas Falconde89e852016-03-01 10:20:09 -06002617 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002618 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2619 adapter->tcp_ip_offload);
2620 break;
2621 case PROMISC_SUPPORTED:
2622 adapter->promisc_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002623 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002624 netdev_dbg(netdev, "promisc_supported = %lld\n",
2625 adapter->promisc_supported);
2626 break;
2627 case MIN_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002628 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
Jarod Wilsond894be52016-10-20 13:55:16 -04002629 netdev->min_mtu = adapter->min_mtu;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002630 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2631 break;
2632 case MAX_MTU:
Thomas Falconde89e852016-03-01 10:20:09 -06002633 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
Jarod Wilsond894be52016-10-20 13:55:16 -04002634 netdev->max_mtu = adapter->max_mtu;
Thomas Falcon032c5e82015-12-21 11:26:06 -06002635 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2636 break;
2637 case MAX_MULTICAST_FILTERS:
2638 adapter->max_multicast_filters =
Thomas Falconde89e852016-03-01 10:20:09 -06002639 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002640 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2641 adapter->max_multicast_filters);
2642 break;
2643 case VLAN_HEADER_INSERTION:
2644 adapter->vlan_header_insertion =
Thomas Falconde89e852016-03-01 10:20:09 -06002645 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002646 if (adapter->vlan_header_insertion)
2647 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2648 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2649 adapter->vlan_header_insertion);
2650 break;
2651 case MAX_TX_SG_ENTRIES:
2652 adapter->max_tx_sg_entries =
Thomas Falconde89e852016-03-01 10:20:09 -06002653 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002654 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2655 adapter->max_tx_sg_entries);
2656 break;
2657 case RX_SG_SUPPORTED:
2658 adapter->rx_sg_supported =
Thomas Falconde89e852016-03-01 10:20:09 -06002659 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002660 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2661 adapter->rx_sg_supported);
2662 break;
2663 case OPT_TX_COMP_SUB_QUEUES:
2664 adapter->opt_tx_comp_sub_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002665 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002666 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2667 adapter->opt_tx_comp_sub_queues);
2668 break;
2669 case OPT_RX_COMP_QUEUES:
2670 adapter->opt_rx_comp_queues =
Thomas Falconde89e852016-03-01 10:20:09 -06002671 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002672 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2673 adapter->opt_rx_comp_queues);
2674 break;
2675 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2676 adapter->opt_rx_bufadd_q_per_rx_comp_q =
Thomas Falconde89e852016-03-01 10:20:09 -06002677 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002678 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2679 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2680 break;
2681 case OPT_TX_ENTRIES_PER_SUBCRQ:
2682 adapter->opt_tx_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002683 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002684 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2685 adapter->opt_tx_entries_per_subcrq);
2686 break;
2687 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2688 adapter->opt_rxba_entries_per_subcrq =
Thomas Falconde89e852016-03-01 10:20:09 -06002689 be64_to_cpu(crq->query_capability.number);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002690 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2691 adapter->opt_rxba_entries_per_subcrq);
2692 break;
2693 case TX_RX_DESC_REQ:
2694 adapter->tx_rx_desc_req = crq->query_capability.number;
2695 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2696 adapter->tx_rx_desc_req);
2697 break;
2698
2699 default:
2700 netdev_err(netdev, "Got invalid cap rsp %d\n",
2701 crq->query_capability.capability);
2702 }
2703
2704out:
2705 if (atomic_read(&adapter->running_cap_queries) == 0)
Thomas Falconea22d512016-07-06 15:35:17 -05002706 init_sub_crqs(adapter, 0);
Thomas Falcon032c5e82015-12-21 11:26:06 -06002707 /* We're done querying the capabilities, initialize sub-crqs */
2708}
2709
2710static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2711 struct ibmvnic_adapter *adapter)
2712{
2713 u8 correlator = crq->control_ras_rsp.correlator;
2714 struct device *dev = &adapter->vdev->dev;
2715 bool found = false;
2716 int i;
2717
2718 if (crq->control_ras_rsp.rc.code) {
2719 dev_warn(dev, "Control ras failed rc=%d\n",
2720 crq->control_ras_rsp.rc.code);
2721 return;
2722 }
2723
2724 for (i = 0; i < adapter->ras_comp_num; i++) {
2725 if (adapter->ras_comps[i].correlator == correlator) {
2726 found = true;
2727 break;
2728 }
2729 }
2730
2731 if (!found) {
2732 dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2733 return;
2734 }
2735
2736 switch (crq->control_ras_rsp.op) {
2737 case IBMVNIC_TRACE_LEVEL:
2738 adapter->ras_comps[i].trace_level = crq->control_ras.level;
2739 break;
2740 case IBMVNIC_ERROR_LEVEL:
2741 adapter->ras_comps[i].error_check_level =
2742 crq->control_ras.level;
2743 break;
2744 case IBMVNIC_TRACE_PAUSE:
2745 adapter->ras_comp_int[i].paused = 1;
2746 break;
2747 case IBMVNIC_TRACE_RESUME:
2748 adapter->ras_comp_int[i].paused = 0;
2749 break;
2750 case IBMVNIC_TRACE_ON:
2751 adapter->ras_comps[i].trace_on = 1;
2752 break;
2753 case IBMVNIC_TRACE_OFF:
2754 adapter->ras_comps[i].trace_on = 0;
2755 break;
2756 case IBMVNIC_CHG_TRACE_BUFF_SZ:
2757 /* trace_buff_sz is 3 bytes, stuff it into an int */
2758 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2759 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2760 crq->control_ras_rsp.trace_buff_sz[0];
2761 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2762 crq->control_ras_rsp.trace_buff_sz[1];
2763 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2764 crq->control_ras_rsp.trace_buff_sz[2];
2765 break;
2766 default:
2767 dev_err(dev, "invalid op %d on control_ras_rsp",
2768 crq->control_ras_rsp.op);
2769 }
2770}
2771
Thomas Falcon032c5e82015-12-21 11:26:06 -06002772static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2773 loff_t *ppos)
2774{
2775 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2776 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2777 struct device *dev = &adapter->vdev->dev;
2778 struct ibmvnic_fw_trace_entry *trace;
2779 int num = ras_comp_int->num;
2780 union ibmvnic_crq crq;
2781 dma_addr_t trace_tok;
2782
2783 if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2784 return 0;
2785
2786 trace =
2787 dma_alloc_coherent(dev,
2788 be32_to_cpu(adapter->ras_comps[num].
2789 trace_buff_size), &trace_tok,
2790 GFP_KERNEL);
2791 if (!trace) {
2792 dev_err(dev, "Couldn't alloc trace buffer\n");
2793 return 0;
2794 }
2795
2796 memset(&crq, 0, sizeof(crq));
2797 crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2798 crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2799 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2800 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2801 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2802 ibmvnic_send_crq(adapter, &crq);
2803
2804 init_completion(&adapter->fw_done);
2805 wait_for_completion(&adapter->fw_done);
2806
2807 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2808 len =
2809 be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2810 *ppos;
2811
2812 copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2813
2814 dma_free_coherent(dev,
2815 be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2816 trace, trace_tok);
2817 *ppos += len;
2818 return len;
2819}
2820
2821static const struct file_operations trace_ops = {
2822 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002823 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002824 .read = trace_read,
2825};
2826
2827static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2828 loff_t *ppos)
2829{
2830 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2831 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2832 int num = ras_comp_int->num;
2833 char buff[5]; /* 1 or 0 plus \n and \0 */
2834 int size;
2835
2836 size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2837
2838 if (*ppos >= size)
2839 return 0;
2840
2841 copy_to_user(user_buf, buff, size);
2842 *ppos += size;
2843 return size;
2844}
2845
2846static ssize_t paused_write(struct file *file, const char __user *user_buf,
2847 size_t len, loff_t *ppos)
2848{
2849 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2850 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2851 int num = ras_comp_int->num;
2852 union ibmvnic_crq crq;
2853 unsigned long val;
2854 char buff[9]; /* decimal max int plus \n and \0 */
2855
2856 copy_from_user(buff, user_buf, sizeof(buff));
2857 val = kstrtoul(buff, 10, NULL);
2858
2859 adapter->ras_comp_int[num].paused = val ? 1 : 0;
2860
2861 memset(&crq, 0, sizeof(crq));
2862 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2863 crq.control_ras.cmd = CONTROL_RAS;
2864 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2865 crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2866 ibmvnic_send_crq(adapter, &crq);
2867
2868 return len;
2869}
2870
2871static const struct file_operations paused_ops = {
2872 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002873 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002874 .read = paused_read,
2875 .write = paused_write,
2876};
2877
2878static ssize_t tracing_read(struct file *file, char __user *user_buf,
2879 size_t len, loff_t *ppos)
2880{
2881 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2882 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2883 int num = ras_comp_int->num;
2884 char buff[5]; /* 1 or 0 plus \n and \0 */
2885 int size;
2886
2887 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2888
2889 if (*ppos >= size)
2890 return 0;
2891
2892 copy_to_user(user_buf, buff, size);
2893 *ppos += size;
2894 return size;
2895}
2896
2897static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2898 size_t len, loff_t *ppos)
2899{
2900 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2901 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2902 int num = ras_comp_int->num;
2903 union ibmvnic_crq crq;
2904 unsigned long val;
2905 char buff[9]; /* decimal max int plus \n and \0 */
2906
2907 copy_from_user(buff, user_buf, sizeof(buff));
2908 val = kstrtoul(buff, 10, NULL);
2909
2910 memset(&crq, 0, sizeof(crq));
2911 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2912 crq.control_ras.cmd = CONTROL_RAS;
2913 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2914 crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2915
2916 return len;
2917}
2918
2919static const struct file_operations tracing_ops = {
2920 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002921 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002922 .read = tracing_read,
2923 .write = tracing_write,
2924};
2925
2926static ssize_t error_level_read(struct file *file, char __user *user_buf,
2927 size_t len, loff_t *ppos)
2928{
2929 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2930 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2931 int num = ras_comp_int->num;
2932 char buff[5]; /* decimal max char plus \n and \0 */
2933 int size;
2934
2935 size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2936
2937 if (*ppos >= size)
2938 return 0;
2939
2940 copy_to_user(user_buf, buff, size);
2941 *ppos += size;
2942 return size;
2943}
2944
2945static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2946 size_t len, loff_t *ppos)
2947{
2948 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2949 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2950 int num = ras_comp_int->num;
2951 union ibmvnic_crq crq;
2952 unsigned long val;
2953 char buff[9]; /* decimal max int plus \n and \0 */
2954
2955 copy_from_user(buff, user_buf, sizeof(buff));
2956 val = kstrtoul(buff, 10, NULL);
2957
2958 if (val > 9)
2959 val = 9;
2960
2961 memset(&crq, 0, sizeof(crq));
2962 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2963 crq.control_ras.cmd = CONTROL_RAS;
2964 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2965 crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
2966 crq.control_ras.level = val;
2967 ibmvnic_send_crq(adapter, &crq);
2968
2969 return len;
2970}
2971
2972static const struct file_operations error_level_ops = {
2973 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00002974 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06002975 .read = error_level_read,
2976 .write = error_level_write,
2977};
2978
2979static ssize_t trace_level_read(struct file *file, char __user *user_buf,
2980 size_t len, loff_t *ppos)
2981{
2982 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2983 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2984 int num = ras_comp_int->num;
2985 char buff[5]; /* decimal max char plus \n and \0 */
2986 int size;
2987
2988 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
2989 if (*ppos >= size)
2990 return 0;
2991
2992 copy_to_user(user_buf, buff, size);
2993 *ppos += size;
2994 return size;
2995}
2996
2997static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
2998 size_t len, loff_t *ppos)
2999{
3000 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3001 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3002 union ibmvnic_crq crq;
3003 unsigned long val;
3004 char buff[9]; /* decimal max int plus \n and \0 */
3005
3006 copy_from_user(buff, user_buf, sizeof(buff));
3007 val = kstrtoul(buff, 10, NULL);
3008 if (val > 9)
3009 val = 9;
3010
3011 memset(&crq, 0, sizeof(crq));
3012 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3013 crq.control_ras.cmd = CONTROL_RAS;
3014 crq.control_ras.correlator =
3015 adapter->ras_comps[ras_comp_int->num].correlator;
3016 crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
3017 crq.control_ras.level = val;
3018 ibmvnic_send_crq(adapter, &crq);
3019
3020 return len;
3021}
3022
3023static const struct file_operations trace_level_ops = {
3024 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003025 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003026 .read = trace_level_read,
3027 .write = trace_level_write,
3028};
3029
3030static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
3031 size_t len, loff_t *ppos)
3032{
3033 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3034 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3035 int num = ras_comp_int->num;
3036 char buff[9]; /* decimal max int plus \n and \0 */
3037 int size;
3038
3039 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3040 if (*ppos >= size)
3041 return 0;
3042
3043 copy_to_user(user_buf, buff, size);
3044 *ppos += size;
3045 return size;
3046}
3047
3048static ssize_t trace_buff_size_write(struct file *file,
3049 const char __user *user_buf, size_t len,
3050 loff_t *ppos)
3051{
3052 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3053 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3054 union ibmvnic_crq crq;
3055 unsigned long val;
3056 char buff[9]; /* decimal max int plus \n and \0 */
3057
3058 copy_from_user(buff, user_buf, sizeof(buff));
3059 val = kstrtoul(buff, 10, NULL);
3060
3061 memset(&crq, 0, sizeof(crq));
3062 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3063 crq.control_ras.cmd = CONTROL_RAS;
3064 crq.control_ras.correlator =
3065 adapter->ras_comps[ras_comp_int->num].correlator;
3066 crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3067 /* trace_buff_sz is 3 bytes, stuff an int into it */
3068 crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3069 crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3070 crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3071 ibmvnic_send_crq(adapter, &crq);
3072
3073 return len;
3074}
3075
3076static const struct file_operations trace_size_ops = {
3077 .owner = THIS_MODULE,
Wei Yongjun7a95e942016-08-24 13:50:03 +00003078 .open = simple_open,
Thomas Falcon032c5e82015-12-21 11:26:06 -06003079 .read = trace_buff_size_read,
3080 .write = trace_buff_size_write,
3081};
3082
3083static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3084 struct ibmvnic_adapter *adapter)
3085{
3086 struct device *dev = &adapter->vdev->dev;
3087 struct dentry *dir_ent;
3088 struct dentry *ent;
3089 int i;
3090
3091 debugfs_remove_recursive(adapter->ras_comps_ent);
3092
3093 adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3094 adapter->debugfs_dir);
3095 if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3096 dev_info(dev, "debugfs create ras_comps dir failed\n");
3097 return;
3098 }
3099
3100 for (i = 0; i < adapter->ras_comp_num; i++) {
3101 dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3102 adapter->ras_comps_ent);
3103 if (!dir_ent || IS_ERR(dir_ent)) {
3104 dev_info(dev, "debugfs create %s dir failed\n",
3105 adapter->ras_comps[i].name);
3106 continue;
3107 }
3108
3109 adapter->ras_comp_int[i].adapter = adapter;
3110 adapter->ras_comp_int[i].num = i;
3111 adapter->ras_comp_int[i].desc_blob.data =
3112 &adapter->ras_comps[i].description;
3113 adapter->ras_comp_int[i].desc_blob.size =
3114 sizeof(adapter->ras_comps[i].description);
3115
3116 /* Don't need to remember the dentry's because the debugfs dir
3117 * gets removed recursively
3118 */
3119 ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3120 &adapter->ras_comp_int[i].desc_blob);
3121 ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3122 dir_ent, &adapter->ras_comp_int[i],
3123 &trace_size_ops);
3124 ent = debugfs_create_file("trace_level",
3125 S_IRUGO |
3126 (adapter->ras_comps[i].trace_level !=
3127 0xFF ? S_IWUSR : 0),
3128 dir_ent, &adapter->ras_comp_int[i],
3129 &trace_level_ops);
3130 ent = debugfs_create_file("error_level",
3131 S_IRUGO |
3132 (adapter->
3133 ras_comps[i].error_check_level !=
3134 0xFF ? S_IWUSR : 0),
3135 dir_ent, &adapter->ras_comp_int[i],
3136 &trace_level_ops);
3137 ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3138 dir_ent, &adapter->ras_comp_int[i],
3139 &tracing_ops);
3140 ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3141 dir_ent, &adapter->ras_comp_int[i],
3142 &paused_ops);
3143 ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3144 &adapter->ras_comp_int[i],
3145 &trace_ops);
3146 }
3147}
3148
3149static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3150 struct ibmvnic_adapter *adapter)
3151{
3152 int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3153 struct device *dev = &adapter->vdev->dev;
3154 union ibmvnic_crq newcrq;
3155
3156 adapter->ras_comps = dma_alloc_coherent(dev, len,
3157 &adapter->ras_comps_tok,
3158 GFP_KERNEL);
3159 if (!adapter->ras_comps) {
3160 if (!firmware_has_feature(FW_FEATURE_CMO))
3161 dev_err(dev, "Couldn't alloc fw comps buffer\n");
3162 return;
3163 }
3164
3165 adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3166 sizeof(struct ibmvnic_fw_comp_internal),
3167 GFP_KERNEL);
3168 if (!adapter->ras_comp_int)
3169 dma_free_coherent(dev, len, adapter->ras_comps,
3170 adapter->ras_comps_tok);
3171
3172 memset(&newcrq, 0, sizeof(newcrq));
3173 newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3174 newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3175 newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3176 newcrq.request_ras_comps.len = cpu_to_be32(len);
3177 ibmvnic_send_crq(adapter, &newcrq);
3178}
3179
3180static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3181{
Wei Yongjun96183182016-06-27 20:48:53 +08003182 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003183 struct device *dev = &adapter->vdev->dev;
Wei Yongjun96183182016-06-27 20:48:53 +08003184 struct ibmvnic_error_buff *error_buff, *tmp2;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003185 unsigned long flags;
3186 unsigned long flags2;
3187
3188 spin_lock_irqsave(&adapter->inflight_lock, flags);
Wei Yongjun96183182016-06-27 20:48:53 +08003189 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003190 switch (inflight_cmd->crq.generic.cmd) {
3191 case LOGIN:
3192 dma_unmap_single(dev, adapter->login_buf_token,
3193 adapter->login_buf_sz,
3194 DMA_BIDIRECTIONAL);
3195 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3196 adapter->login_rsp_buf_sz,
3197 DMA_BIDIRECTIONAL);
3198 kfree(adapter->login_rsp_buf);
3199 kfree(adapter->login_buf);
3200 break;
3201 case REQUEST_DUMP:
3202 complete(&adapter->fw_done);
3203 break;
3204 case REQUEST_ERROR_INFO:
3205 spin_lock_irqsave(&adapter->error_list_lock, flags2);
Wei Yongjun96183182016-06-27 20:48:53 +08003206 list_for_each_entry_safe(error_buff, tmp2,
3207 &adapter->errors, list) {
Thomas Falcon032c5e82015-12-21 11:26:06 -06003208 dma_unmap_single(dev, error_buff->dma,
3209 error_buff->len,
3210 DMA_FROM_DEVICE);
3211 kfree(error_buff->buff);
3212 list_del(&error_buff->list);
3213 kfree(error_buff);
3214 }
3215 spin_unlock_irqrestore(&adapter->error_list_lock,
3216 flags2);
3217 break;
3218 }
3219 list_del(&inflight_cmd->list);
3220 kfree(inflight_cmd);
3221 }
3222 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3223}
3224
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003225static void ibmvnic_xport_event(struct work_struct *work)
3226{
3227 struct ibmvnic_adapter *adapter = container_of(work,
3228 struct ibmvnic_adapter,
3229 ibmvnic_xport);
3230 struct device *dev = &adapter->vdev->dev;
3231 long rc;
3232
3233 ibmvnic_free_inflight(adapter);
3234 release_sub_crqs(adapter);
3235 if (adapter->migrated) {
3236 rc = ibmvnic_reenable_crq_queue(adapter);
3237 if (rc)
3238 dev_err(dev, "Error after enable rc=%ld\n", rc);
3239 adapter->migrated = false;
3240 rc = ibmvnic_send_crq_init(adapter);
3241 if (rc)
3242 dev_err(dev, "Error sending init rc=%ld\n", rc);
3243 }
3244}
3245
Thomas Falcon032c5e82015-12-21 11:26:06 -06003246static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3247 struct ibmvnic_adapter *adapter)
3248{
3249 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3250 struct net_device *netdev = adapter->netdev;
3251 struct device *dev = &adapter->vdev->dev;
3252 long rc;
3253
3254 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3255 ((unsigned long int *)crq)[0],
3256 ((unsigned long int *)crq)[1]);
3257 switch (gen_crq->first) {
3258 case IBMVNIC_CRQ_INIT_RSP:
3259 switch (gen_crq->cmd) {
3260 case IBMVNIC_CRQ_INIT:
3261 dev_info(dev, "Partner initialized\n");
3262 /* Send back a response */
3263 rc = ibmvnic_send_crq_init_complete(adapter);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003264 if (!rc)
3265 schedule_work(&adapter->vnic_crq_init);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003266 else
3267 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3268 break;
3269 case IBMVNIC_CRQ_INIT_COMPLETE:
3270 dev_info(dev, "Partner initialization complete\n");
3271 send_version_xchg(adapter);
3272 break;
3273 default:
3274 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3275 }
3276 return;
3277 case IBMVNIC_CRQ_XPORT_EVENT:
3278 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3279 dev_info(dev, "Re-enabling adapter\n");
3280 adapter->migrated = true;
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003281 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcondfad09a2016-08-18 11:37:51 -05003282 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3283 dev_info(dev, "Backing device failover detected\n");
3284 netif_carrier_off(netdev);
3285 adapter->failover = true;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003286 } else {
3287 /* The adapter lost the connection */
3288 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3289 gen_crq->cmd);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003290 schedule_work(&adapter->ibmvnic_xport);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003291 }
3292 return;
3293 case IBMVNIC_CRQ_CMD_RSP:
3294 break;
3295 default:
3296 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3297 gen_crq->first);
3298 return;
3299 }
3300
3301 switch (gen_crq->cmd) {
3302 case VERSION_EXCHANGE_RSP:
3303 rc = crq->version_exchange_rsp.rc.code;
3304 if (rc) {
3305 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3306 break;
3307 }
3308 dev_info(dev, "Partner protocol version is %d\n",
3309 crq->version_exchange_rsp.version);
3310 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3311 ibmvnic_version)
3312 ibmvnic_version =
3313 be16_to_cpu(crq->version_exchange_rsp.version);
3314 send_cap_queries(adapter);
3315 break;
3316 case QUERY_CAPABILITY_RSP:
3317 handle_query_cap_rsp(crq, adapter);
3318 break;
3319 case QUERY_MAP_RSP:
3320 handle_query_map_rsp(crq, adapter);
3321 break;
3322 case REQUEST_MAP_RSP:
3323 handle_request_map_rsp(crq, adapter);
3324 break;
3325 case REQUEST_UNMAP_RSP:
3326 handle_request_unmap_rsp(crq, adapter);
3327 break;
3328 case REQUEST_CAPABILITY_RSP:
3329 handle_request_cap_rsp(crq, adapter);
3330 break;
3331 case LOGIN_RSP:
3332 netdev_dbg(netdev, "Got Login Response\n");
3333 handle_login_rsp(crq, adapter);
3334 break;
3335 case LOGICAL_LINK_STATE_RSP:
3336 netdev_dbg(netdev, "Got Logical Link State Response\n");
3337 adapter->logical_link_state =
3338 crq->logical_link_state_rsp.link_state;
3339 break;
3340 case LINK_STATE_INDICATION:
3341 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3342 adapter->phys_link_state =
3343 crq->link_state_indication.phys_link_state;
3344 adapter->logical_link_state =
3345 crq->link_state_indication.logical_link_state;
3346 break;
3347 case CHANGE_MAC_ADDR_RSP:
3348 netdev_dbg(netdev, "Got MAC address change Response\n");
3349 handle_change_mac_rsp(crq, adapter);
3350 break;
3351 case ERROR_INDICATION:
3352 netdev_dbg(netdev, "Got Error Indication\n");
3353 handle_error_indication(crq, adapter);
3354 break;
3355 case REQUEST_ERROR_RSP:
3356 netdev_dbg(netdev, "Got Error Detail Response\n");
3357 handle_error_info_rsp(crq, adapter);
3358 break;
3359 case REQUEST_STATISTICS_RSP:
3360 netdev_dbg(netdev, "Got Statistics Response\n");
3361 complete(&adapter->stats_done);
3362 break;
3363 case REQUEST_DUMP_SIZE_RSP:
3364 netdev_dbg(netdev, "Got Request Dump Size Response\n");
3365 handle_dump_size_rsp(crq, adapter);
3366 break;
3367 case REQUEST_DUMP_RSP:
3368 netdev_dbg(netdev, "Got Request Dump Response\n");
3369 complete(&adapter->fw_done);
3370 break;
3371 case QUERY_IP_OFFLOAD_RSP:
3372 netdev_dbg(netdev, "Got Query IP offload Response\n");
3373 handle_query_ip_offload_rsp(adapter);
3374 break;
3375 case MULTICAST_CTRL_RSP:
3376 netdev_dbg(netdev, "Got multicast control Response\n");
3377 break;
3378 case CONTROL_IP_OFFLOAD_RSP:
3379 netdev_dbg(netdev, "Got Control IP offload Response\n");
3380 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3381 sizeof(adapter->ip_offload_ctrl),
3382 DMA_TO_DEVICE);
3383 /* We're done with the queries, perform the login */
3384 send_login(adapter);
3385 break;
3386 case REQUEST_RAS_COMP_NUM_RSP:
3387 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3388 if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3389 netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3390 break;
3391 }
3392 adapter->ras_comp_num =
3393 be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3394 handle_request_ras_comp_num_rsp(crq, adapter);
3395 break;
3396 case REQUEST_RAS_COMPS_RSP:
3397 netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3398 handle_request_ras_comps_rsp(crq, adapter);
3399 break;
3400 case CONTROL_RAS_RSP:
3401 netdev_dbg(netdev, "Got Control RAS Response\n");
3402 handle_control_ras_rsp(crq, adapter);
3403 break;
3404 case COLLECT_FW_TRACE_RSP:
3405 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3406 complete(&adapter->fw_done);
3407 break;
3408 default:
3409 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3410 gen_crq->cmd);
3411 }
3412}
3413
3414static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3415{
3416 struct ibmvnic_adapter *adapter = instance;
3417 struct ibmvnic_crq_queue *queue = &adapter->crq;
3418 struct vio_dev *vdev = adapter->vdev;
3419 union ibmvnic_crq *crq;
3420 unsigned long flags;
3421 bool done = false;
3422
3423 spin_lock_irqsave(&queue->lock, flags);
3424 vio_disable_interrupts(vdev);
3425 while (!done) {
3426 /* Pull all the valid messages off the CRQ */
3427 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3428 ibmvnic_handle_crq(crq, adapter);
3429 crq->generic.first = 0;
3430 }
3431 vio_enable_interrupts(vdev);
3432 crq = ibmvnic_next_crq(adapter);
3433 if (crq) {
3434 vio_disable_interrupts(vdev);
3435 ibmvnic_handle_crq(crq, adapter);
3436 crq->generic.first = 0;
3437 } else {
3438 done = true;
3439 }
3440 }
3441 spin_unlock_irqrestore(&queue->lock, flags);
3442 return IRQ_HANDLED;
3443}
3444
3445static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3446{
3447 struct vio_dev *vdev = adapter->vdev;
3448 int rc;
3449
3450 do {
3451 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3452 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3453
3454 if (rc)
3455 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3456
3457 return rc;
3458}
3459
3460static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3461{
3462 struct ibmvnic_crq_queue *crq = &adapter->crq;
3463 struct device *dev = &adapter->vdev->dev;
3464 struct vio_dev *vdev = adapter->vdev;
3465 int rc;
3466
3467 /* Close the CRQ */
3468 do {
3469 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3470 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3471
3472 /* Clean out the queue */
3473 memset(crq->msgs, 0, PAGE_SIZE);
3474 crq->cur = 0;
3475
3476 /* And re-open it again */
3477 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3478 crq->msg_token, PAGE_SIZE);
3479
3480 if (rc == H_CLOSED)
3481 /* Adapter is good, but other end is not ready */
3482 dev_warn(dev, "Partner adapter not ready\n");
3483 else if (rc != 0)
3484 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3485
3486 return rc;
3487}
3488
3489static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3490{
3491 struct ibmvnic_crq_queue *crq = &adapter->crq;
3492 struct vio_dev *vdev = adapter->vdev;
3493 long rc;
3494
3495 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3496 free_irq(vdev->irq, adapter);
3497 do {
3498 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3499 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3500
3501 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3502 DMA_BIDIRECTIONAL);
3503 free_page((unsigned long)crq->msgs);
3504}
3505
3506static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3507{
3508 struct ibmvnic_crq_queue *crq = &adapter->crq;
3509 struct device *dev = &adapter->vdev->dev;
3510 struct vio_dev *vdev = adapter->vdev;
3511 int rc, retrc = -ENOMEM;
3512
3513 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3514 /* Should we allocate more than one page? */
3515
3516 if (!crq->msgs)
3517 return -ENOMEM;
3518
3519 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3520 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3521 DMA_BIDIRECTIONAL);
3522 if (dma_mapping_error(dev, crq->msg_token))
3523 goto map_failed;
3524
3525 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3526 crq->msg_token, PAGE_SIZE);
3527
3528 if (rc == H_RESOURCE)
3529 /* maybe kexecing and resource is busy. try a reset */
3530 rc = ibmvnic_reset_crq(adapter);
3531 retrc = rc;
3532
3533 if (rc == H_CLOSED) {
3534 dev_warn(dev, "Partner adapter not ready\n");
3535 } else if (rc) {
3536 dev_warn(dev, "Error %d opening adapter\n", rc);
3537 goto reg_crq_failed;
3538 }
3539
3540 retrc = 0;
3541
3542 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3543 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3544 adapter);
3545 if (rc) {
3546 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3547 vdev->irq, rc);
3548 goto req_irq_failed;
3549 }
3550
3551 rc = vio_enable_interrupts(vdev);
3552 if (rc) {
3553 dev_err(dev, "Error %d enabling interrupts\n", rc);
3554 goto req_irq_failed;
3555 }
3556
3557 crq->cur = 0;
3558 spin_lock_init(&crq->lock);
3559
3560 return retrc;
3561
3562req_irq_failed:
3563 do {
3564 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3565 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3566reg_crq_failed:
3567 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3568map_failed:
3569 free_page((unsigned long)crq->msgs);
3570 return retrc;
3571}
3572
3573/* debugfs for dump */
3574static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3575{
3576 struct net_device *netdev = seq->private;
3577 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3578 struct device *dev = &adapter->vdev->dev;
3579 union ibmvnic_crq crq;
3580
3581 memset(&crq, 0, sizeof(crq));
3582 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3583 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3584 ibmvnic_send_crq(adapter, &crq);
3585
3586 init_completion(&adapter->fw_done);
3587 wait_for_completion(&adapter->fw_done);
3588
3589 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3590
3591 dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3592 DMA_BIDIRECTIONAL);
3593
3594 kfree(adapter->dump_data);
3595
3596 return 0;
3597}
3598
3599static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3600{
3601 return single_open(file, ibmvnic_dump_show, inode->i_private);
3602}
3603
3604static const struct file_operations ibmvnic_dump_ops = {
3605 .owner = THIS_MODULE,
3606 .open = ibmvnic_dump_open,
3607 .read = seq_read,
3608 .llseek = seq_lseek,
3609 .release = single_release,
3610};
3611
Thomas Falcon65dc6892016-07-06 15:35:18 -05003612static void handle_crq_init_rsp(struct work_struct *work)
3613{
3614 struct ibmvnic_adapter *adapter = container_of(work,
3615 struct ibmvnic_adapter,
3616 vnic_crq_init);
3617 struct device *dev = &adapter->vdev->dev;
3618 struct net_device *netdev = adapter->netdev;
3619 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcondfad09a2016-08-18 11:37:51 -05003620 bool restart = false;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003621 int rc;
3622
Thomas Falcondfad09a2016-08-18 11:37:51 -05003623 if (adapter->failover) {
3624 release_sub_crqs(adapter);
3625 if (netif_running(netdev)) {
3626 netif_tx_disable(netdev);
3627 ibmvnic_close(netdev);
3628 restart = true;
3629 }
3630 }
3631
Thomas Falcon65dc6892016-07-06 15:35:18 -05003632 send_version_xchg(adapter);
3633 reinit_completion(&adapter->init_done);
3634 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3635 dev_err(dev, "Passive init timeout\n");
3636 goto task_failed;
3637 }
3638
3639 do {
3640 if (adapter->renegotiate) {
3641 adapter->renegotiate = false;
3642 release_sub_crqs_no_irqs(adapter);
3643 send_cap_queries(adapter);
3644
3645 reinit_completion(&adapter->init_done);
3646 if (!wait_for_completion_timeout(&adapter->init_done,
3647 timeout)) {
3648 dev_err(dev, "Passive init timeout\n");
3649 goto task_failed;
3650 }
3651 }
3652 } while (adapter->renegotiate);
3653 rc = init_sub_crq_irqs(adapter);
3654
3655 if (rc)
3656 goto task_failed;
3657
3658 netdev->real_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon87737f82016-10-17 15:28:10 -05003659 netdev->mtu = adapter->req_mtu;
Jarod Wilsond894be52016-10-20 13:55:16 -04003660 netdev->min_mtu = adapter->min_mtu;
3661 netdev->max_mtu = adapter->max_mtu;
Thomas Falcon65dc6892016-07-06 15:35:18 -05003662
Thomas Falcondfad09a2016-08-18 11:37:51 -05003663 if (adapter->failover) {
3664 adapter->failover = false;
3665 if (restart) {
3666 rc = ibmvnic_open(netdev);
3667 if (rc)
3668 goto restart_failed;
3669 }
3670 netif_carrier_on(netdev);
3671 return;
3672 }
3673
Thomas Falcon65dc6892016-07-06 15:35:18 -05003674 rc = register_netdev(netdev);
3675 if (rc) {
3676 dev_err(dev,
3677 "failed to register netdev rc=%d\n", rc);
3678 goto register_failed;
3679 }
3680 dev_info(dev, "ibmvnic registered\n");
3681
3682 return;
3683
Thomas Falcondfad09a2016-08-18 11:37:51 -05003684restart_failed:
3685 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003686register_failed:
3687 release_sub_crqs(adapter);
3688task_failed:
3689 dev_err(dev, "Passive initialization was not successful\n");
3690}
3691
Thomas Falcon032c5e82015-12-21 11:26:06 -06003692static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3693{
Thomas Falconea22d512016-07-06 15:35:17 -05003694 unsigned long timeout = msecs_to_jiffies(30000);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003695 struct ibmvnic_adapter *adapter;
3696 struct net_device *netdev;
3697 unsigned char *mac_addr_p;
3698 struct dentry *ent;
Thomas Falcone1fac0a2016-11-11 11:00:46 -06003699 char buf[17]; /* debugfs name buf */
Thomas Falcon032c5e82015-12-21 11:26:06 -06003700 int rc;
3701
3702 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3703 dev->unit_address);
3704
3705 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3706 VETH_MAC_ADDR, NULL);
3707 if (!mac_addr_p) {
3708 dev_err(&dev->dev,
3709 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3710 __FILE__, __LINE__);
3711 return 0;
3712 }
3713
3714 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3715 IBMVNIC_MAX_TX_QUEUES);
3716 if (!netdev)
3717 return -ENOMEM;
3718
3719 adapter = netdev_priv(netdev);
3720 dev_set_drvdata(&dev->dev, netdev);
3721 adapter->vdev = dev;
3722 adapter->netdev = netdev;
Thomas Falcondfad09a2016-08-18 11:37:51 -05003723 adapter->failover = false;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003724
3725 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3726 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3727 netdev->irq = dev->irq;
3728 netdev->netdev_ops = &ibmvnic_netdev_ops;
3729 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3730 SET_NETDEV_DEV(netdev, &dev->dev);
3731
Thomas Falcon65dc6892016-07-06 15:35:18 -05003732 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
Thomas Falcon9888d7b2016-10-27 12:28:51 -05003733 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
Thomas Falcon65dc6892016-07-06 15:35:18 -05003734
Thomas Falcon032c5e82015-12-21 11:26:06 -06003735 spin_lock_init(&adapter->stats_lock);
3736
3737 rc = ibmvnic_init_crq_queue(adapter);
3738 if (rc) {
3739 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3740 goto free_netdev;
3741 }
3742
3743 INIT_LIST_HEAD(&adapter->errors);
3744 INIT_LIST_HEAD(&adapter->inflight);
3745 spin_lock_init(&adapter->error_list_lock);
3746 spin_lock_init(&adapter->inflight_lock);
3747
3748 adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3749 sizeof(struct ibmvnic_statistics),
3750 DMA_FROM_DEVICE);
3751 if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3752 if (!firmware_has_feature(FW_FEATURE_CMO))
3753 dev_err(&dev->dev, "Couldn't map stats buffer\n");
Wei Yongjun0e872032016-08-24 13:47:58 +00003754 rc = -ENOMEM;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003755 goto free_crq;
3756 }
3757
3758 snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3759 ent = debugfs_create_dir(buf, NULL);
3760 if (!ent || IS_ERR(ent)) {
3761 dev_info(&dev->dev, "debugfs create directory failed\n");
3762 adapter->debugfs_dir = NULL;
3763 } else {
3764 adapter->debugfs_dir = ent;
3765 ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3766 netdev, &ibmvnic_dump_ops);
3767 if (!ent || IS_ERR(ent)) {
3768 dev_info(&dev->dev,
3769 "debugfs create dump file failed\n");
3770 adapter->debugfs_dump = NULL;
3771 } else {
3772 adapter->debugfs_dump = ent;
3773 }
3774 }
3775 ibmvnic_send_crq_init(adapter);
3776
3777 init_completion(&adapter->init_done);
Thomas Falconea22d512016-07-06 15:35:17 -05003778 if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3779 return 0;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003780
John Allen498cd8e2016-04-06 11:49:55 -05003781 do {
John Allen498cd8e2016-04-06 11:49:55 -05003782 if (adapter->renegotiate) {
Thomas Falconea22d512016-07-06 15:35:17 -05003783 adapter->renegotiate = false;
3784 release_sub_crqs_no_irqs(adapter);
John Allen498cd8e2016-04-06 11:49:55 -05003785 send_cap_queries(adapter);
3786
3787 reinit_completion(&adapter->init_done);
Thomas Falconea22d512016-07-06 15:35:17 -05003788 if (!wait_for_completion_timeout(&adapter->init_done,
3789 timeout))
3790 return 0;
John Allen498cd8e2016-04-06 11:49:55 -05003791 }
3792 } while (adapter->renegotiate);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003793
Thomas Falconea22d512016-07-06 15:35:17 -05003794 rc = init_sub_crq_irqs(adapter);
3795 if (rc) {
3796 dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
3797 goto free_debugfs;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003798 }
3799
3800 netdev->real_num_tx_queues = adapter->req_tx_queues;
Thomas Falcon87737f82016-10-17 15:28:10 -05003801 netdev->mtu = adapter->req_mtu;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003802
3803 rc = register_netdev(netdev);
3804 if (rc) {
3805 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
Thomas Falconea22d512016-07-06 15:35:17 -05003806 goto free_sub_crqs;
Thomas Falcon032c5e82015-12-21 11:26:06 -06003807 }
3808 dev_info(&dev->dev, "ibmvnic registered\n");
3809
3810 return 0;
3811
Thomas Falconea22d512016-07-06 15:35:17 -05003812free_sub_crqs:
3813 release_sub_crqs(adapter);
Thomas Falcon032c5e82015-12-21 11:26:06 -06003814free_debugfs:
3815 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3816 debugfs_remove_recursive(adapter->debugfs_dir);
3817free_crq:
3818 ibmvnic_release_crq_queue(adapter);
3819free_netdev:
3820 free_netdev(netdev);
3821 return rc;
3822}
3823
3824static int ibmvnic_remove(struct vio_dev *dev)
3825{
3826 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3827 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3828
3829 unregister_netdev(netdev);
3830
3831 release_sub_crqs(adapter);
3832
3833 ibmvnic_release_crq_queue(adapter);
3834
3835 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3836 debugfs_remove_recursive(adapter->debugfs_dir);
3837
Thomas Falconb7f193d2016-11-11 11:00:45 -06003838 dma_unmap_single(&dev->dev, adapter->stats_token,
3839 sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE);
3840
Thomas Falcon032c5e82015-12-21 11:26:06 -06003841 if (adapter->ras_comps)
3842 dma_free_coherent(&dev->dev,
3843 adapter->ras_comp_num *
3844 sizeof(struct ibmvnic_fw_component),
3845 adapter->ras_comps, adapter->ras_comps_tok);
3846
3847 kfree(adapter->ras_comp_int);
3848
3849 free_netdev(netdev);
3850 dev_set_drvdata(&dev->dev, NULL);
3851
3852 return 0;
3853}
3854
3855static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3856{
3857 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3858 struct ibmvnic_adapter *adapter;
3859 struct iommu_table *tbl;
3860 unsigned long ret = 0;
3861 int i;
3862
3863 tbl = get_iommu_table_base(&vdev->dev);
3864
3865 /* netdev inits at probe time along with the structures we need below*/
3866 if (!netdev)
3867 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3868
3869 adapter = netdev_priv(netdev);
3870
3871 ret += PAGE_SIZE; /* the crq message queue */
3872 ret += adapter->bounce_buffer_size;
3873 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3874
3875 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3876 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3877
3878 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3879 i++)
3880 ret += adapter->rx_pool[i].size *
3881 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3882
3883 return ret;
3884}
3885
3886static int ibmvnic_resume(struct device *dev)
3887{
3888 struct net_device *netdev = dev_get_drvdata(dev);
3889 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3890 int i;
3891
3892 /* kick the interrupt handlers just in case we lost an interrupt */
3893 for (i = 0; i < adapter->req_rx_queues; i++)
3894 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3895 adapter->rx_scrq[i]);
3896
3897 return 0;
3898}
3899
3900static struct vio_device_id ibmvnic_device_table[] = {
3901 {"network", "IBM,vnic"},
3902 {"", "" }
3903};
3904MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3905
3906static const struct dev_pm_ops ibmvnic_pm_ops = {
3907 .resume = ibmvnic_resume
3908};
3909
3910static struct vio_driver ibmvnic_driver = {
3911 .id_table = ibmvnic_device_table,
3912 .probe = ibmvnic_probe,
3913 .remove = ibmvnic_remove,
3914 .get_desired_dma = ibmvnic_get_desired_dma,
3915 .name = ibmvnic_driver_name,
3916 .pm = &ibmvnic_pm_ops,
3917};
3918
3919/* module functions */
3920static int __init ibmvnic_module_init(void)
3921{
3922 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3923 IBMVNIC_DRIVER_VERSION);
3924
3925 return vio_register_driver(&ibmvnic_driver);
3926}
3927
3928static void __exit ibmvnic_module_exit(void)
3929{
3930 vio_unregister_driver(&ibmvnic_driver);
3931}
3932
3933module_init(ibmvnic_module_init);
3934module_exit(ibmvnic_module_exit);