blob: 9f97d11bc09c93b16df8924c69667190eb772103 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2* Author: Cavium, Inc.
3*
4* Contact: support@cavium.com
5* Please include "LiquidIO" in the subject.
6*
7* Copyright (c) 2003-2015 Cavium, Inc.
8*
9* This file is free software; you can redistribute it and/or modify
10* it under the terms of the GNU General Public License, Version 2, as
11* published by the Free Software Foundation.
12*
13* This file is distributed in the hope that it will be useful, but
14* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16* NONINFRINGEMENT. See the GNU General Public License for more
17* details.
18*
19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information
21**********************************************************************/
22#include <linux/version.h>
23#include <linux/module.h>
24#include <linux/crc32.h>
25#include <linux/dma-mapping.h>
26#include <linux/pci.h>
27#include <linux/pci_ids.h>
28#include <linux/ip.h>
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -070029#include <net/ip.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070030#include <linux/ipv6.h>
31#include <linux/net_tstamp.h>
32#include <linux/if_vlan.h>
33#include <linux/firmware.h>
34#include <linux/ethtool.h>
35#include <linux/ptp_clock_kernel.h>
36#include <linux/types.h>
37#include <linux/list.h>
38#include <linux/workqueue.h>
39#include <linux/interrupt.h>
40#include "octeon_config.h"
41#include "liquidio_common.h"
42#include "octeon_droq.h"
43#include "octeon_iq.h"
44#include "response_manager.h"
45#include "octeon_device.h"
46#include "octeon_nic.h"
47#include "octeon_main.h"
48#include "octeon_network.h"
49#include "cn66xx_regs.h"
50#include "cn66xx_device.h"
51#include "cn68xx_regs.h"
52#include "cn68xx_device.h"
53#include "liquidio_image.h"
54
55MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
56MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
57MODULE_LICENSE("GPL");
58MODULE_VERSION(LIQUIDIO_VERSION);
59MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
60MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
61MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
62
63static int ddr_timeout = 10000;
64module_param(ddr_timeout, int, 0644);
65MODULE_PARM_DESC(ddr_timeout,
66 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
67
68static u32 console_bitmask;
69module_param(console_bitmask, int, 0644);
70MODULE_PARM_DESC(console_bitmask,
71 "Bitmask indicating which consoles have debug output redirected to syslog.");
72
73#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
74
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070075#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
76 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
77
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070078static int debug = -1;
79module_param(debug, int, 0644);
80MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
81
82static char fw_type[LIO_MAX_FW_TYPE_LEN];
83module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
84MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
85
86static int conf_type;
87module_param(conf_type, int, 0);
88MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
89
Raghu Vatsavayia5b37882016-06-14 16:54:48 -070090static int ptp_enable = 1;
91
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070092/* Bit mask values for lio->ifstate */
93#define LIO_IFSTATE_DROQ_OPS 0x01
94#define LIO_IFSTATE_REGISTERED 0x02
95#define LIO_IFSTATE_RUNNING 0x04
96#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
97
98/* Polling interval for determining when NIC application is alive */
99#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
100
101/* runtime link query interval */
102#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
103
104struct liquidio_if_cfg_context {
105 int octeon_id;
106
107 wait_queue_head_t wc;
108
109 int cond;
110};
111
112struct liquidio_if_cfg_resp {
113 u64 rh;
114 struct liquidio_if_cfg_info cfg_info;
115 u64 status;
116};
117
118struct oct_link_status_resp {
119 u64 rh;
120 struct oct_link_info link_info;
121 u64 status;
122};
123
124struct oct_timestamp_resp {
125 u64 rh;
126 u64 timestamp;
127 u64 status;
128};
129
130#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
131
132union tx_info {
133 u64 u64;
134 struct {
135#ifdef __BIG_ENDIAN_BITFIELD
136 u16 gso_size;
137 u16 gso_segs;
138 u32 reserved;
139#else
140 u32 reserved;
141 u16 gso_segs;
142 u16 gso_size;
143#endif
144 } s;
145};
146
147/** Octeon device properties to be used by the NIC module.
148 * Each octeon device in the system will be represented
149 * by this structure in the NIC module.
150 */
151
152#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
153
154#define OCTNIC_GSO_MAX_HEADER_SIZE 128
155#define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
156
157/** Structure of a node in list of gather components maintained by
158 * NIC driver for each network device.
159 */
160struct octnic_gather {
161 /** List manipulation. Next and prev pointers. */
162 struct list_head list;
163
164 /** Size of the gather component at sg in bytes. */
165 int sg_size;
166
167 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
168 int adjust;
169
170 /** Gather component that can accommodate max sized fragment list
171 * received from the IP layer.
172 */
173 struct octeon_sg_entry *sg;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700174
175 u64 sg_dma_ptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700176};
177
178/** This structure is used by NIC driver to store information required
179 * to free the sk_buff when the packet has been fetched by Octeon.
180 * Bytes offset below assume worst-case of a 64-bit system.
181 */
182struct octnet_buf_free_info {
183 /** Bytes 1-8. Pointer to network device private structure. */
184 struct lio *lio;
185
186 /** Bytes 9-16. Pointer to sk_buff. */
187 struct sk_buff *skb;
188
189 /** Bytes 17-24. Pointer to gather list. */
190 struct octnic_gather *g;
191
192 /** Bytes 25-32. Physical address of skb->data or gather list. */
193 u64 dptr;
194
195 /** Bytes 33-47. Piggybacked soft command, if any */
196 struct octeon_soft_command *sc;
197};
198
199struct handshake {
200 struct completion init;
201 struct completion started;
202 struct pci_dev *pci_dev;
203 int init_ok;
204 int started_ok;
205};
206
207struct octeon_device_priv {
208 /** Tasklet structures for this device. */
209 struct tasklet_struct droq_tasklet;
210 unsigned long napi_mask;
211};
212
213static int octeon_device_init(struct octeon_device *);
214static void liquidio_remove(struct pci_dev *pdev);
215static int liquidio_probe(struct pci_dev *pdev,
216 const struct pci_device_id *ent);
217
218static struct handshake handshake[MAX_OCTEON_DEVICES];
219static struct completion first_stage;
220
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -0700221static void octeon_droq_bh(unsigned long pdev)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700222{
223 int q_no;
224 int reschedule = 0;
225 struct octeon_device *oct = (struct octeon_device *)pdev;
226 struct octeon_device_priv *oct_priv =
227 (struct octeon_device_priv *)oct->priv;
228
229 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700230 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
231 if (!(oct->io_qmask.oq & (1ULL << q_no)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700232 continue;
233 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
234 MAX_PACKET_BUDGET);
235 }
236
237 if (reschedule)
238 tasklet_schedule(&oct_priv->droq_tasklet);
239}
240
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -0700241static int lio_wait_for_oq_pkts(struct octeon_device *oct)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700242{
243 struct octeon_device_priv *oct_priv =
244 (struct octeon_device_priv *)oct->priv;
245 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
246 int i;
247
248 do {
249 pending_pkts = 0;
250
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700251 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
252 if (!(oct->io_qmask.oq & (1ULL << i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700253 continue;
254 pkt_cnt += octeon_droq_check_hw_for_pkts(oct,
255 oct->droq[i]);
256 }
257 if (pkt_cnt > 0) {
258 pending_pkts += pkt_cnt;
259 tasklet_schedule(&oct_priv->droq_tasklet);
260 }
261 pkt_cnt = 0;
262 schedule_timeout_uninterruptible(1);
263
264 } while (retry-- && pending_pkts);
265
266 return pkt_cnt;
267}
268
269void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
270 unsigned int bytes_compl)
271{
272 struct netdev_queue *netdev_queue = txq;
273
274 netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
275}
276
277void octeon_update_tx_completion_counters(void *buf, int reqtype,
278 unsigned int *pkts_compl,
279 unsigned int *bytes_compl)
280{
281 struct octnet_buf_free_info *finfo;
282 struct sk_buff *skb = NULL;
283 struct octeon_soft_command *sc;
284
285 switch (reqtype) {
286 case REQTYPE_NORESP_NET:
287 case REQTYPE_NORESP_NET_SG:
288 finfo = buf;
289 skb = finfo->skb;
290 break;
291
292 case REQTYPE_RESP_NET_SG:
293 case REQTYPE_RESP_NET:
294 sc = buf;
295 skb = sc->callback_arg;
296 break;
297
298 default:
299 return;
300 }
301
302 (*pkts_compl)++;
303 *bytes_compl += skb->len;
304}
305
306void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
307{
308 struct octnet_buf_free_info *finfo;
309 struct sk_buff *skb;
310 struct octeon_soft_command *sc;
311 struct netdev_queue *txq;
312
313 switch (reqtype) {
314 case REQTYPE_NORESP_NET:
315 case REQTYPE_NORESP_NET_SG:
316 finfo = buf;
317 skb = finfo->skb;
318 break;
319
320 case REQTYPE_RESP_NET_SG:
321 case REQTYPE_RESP_NET:
322 sc = buf;
323 skb = sc->callback_arg;
324 break;
325
326 default:
327 return;
328 }
329
330 txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
331 netdev_tx_sent_queue(txq, skb->len);
332}
333
334int octeon_console_debug_enabled(u32 console)
335{
336 return (console_bitmask >> (console)) & 0x1;
337}
338
339/**
340 * \brief Forces all IO queues off on a given device
341 * @param oct Pointer to Octeon device
342 */
343static void force_io_queues_off(struct octeon_device *oct)
344{
345 if ((oct->chip_id == OCTEON_CN66XX) ||
346 (oct->chip_id == OCTEON_CN68XX)) {
347 /* Reset the Enable bits for Input Queues. */
348 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
349
350 /* Reset the Enable bits for Output Queues. */
351 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
352 }
353}
354
355/**
356 * \brief wait for all pending requests to complete
357 * @param oct Pointer to Octeon device
358 *
359 * Called during shutdown sequence
360 */
361static int wait_for_pending_requests(struct octeon_device *oct)
362{
363 int i, pcount = 0;
364
365 for (i = 0; i < 100; i++) {
366 pcount =
367 atomic_read(&oct->response_list
368 [OCTEON_ORDERED_SC_LIST].pending_req_count);
369 if (pcount)
370 schedule_timeout_uninterruptible(HZ / 10);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700371 else
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700372 break;
373 }
374
375 if (pcount)
376 return 1;
377
378 return 0;
379}
380
381/**
382 * \brief Cause device to go quiet so it can be safely removed/reset/etc
383 * @param oct Pointer to Octeon device
384 */
385static inline void pcierror_quiesce_device(struct octeon_device *oct)
386{
387 int i;
388
389 /* Disable the input and output queues now. No more packets will
390 * arrive from Octeon, but we should wait for all packet processing
391 * to finish.
392 */
393 force_io_queues_off(oct);
394
395 /* To allow for in-flight requests */
396 schedule_timeout_uninterruptible(100);
397
398 if (wait_for_pending_requests(oct))
399 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
400
401 /* Force all requests waiting to be fetched by OCTEON to complete. */
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700402 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700403 struct octeon_instr_queue *iq;
404
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700405 if (!(oct->io_qmask.iq & (1ULL << i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700406 continue;
407 iq = oct->instr_queue[i];
408
409 if (atomic_read(&iq->instr_pending)) {
410 spin_lock_bh(&iq->lock);
411 iq->fill_cnt = 0;
412 iq->octeon_read_index = iq->host_write_index;
413 iq->stats.instr_processed +=
414 atomic_read(&iq->instr_pending);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700415 lio_process_iq_request_list(oct, iq, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700416 spin_unlock_bh(&iq->lock);
417 }
418 }
419
420 /* Force all pending ordered list requests to time out. */
421 lio_process_ordered_list(oct, 1);
422
423 /* We do not need to wait for output queue packets to be processed. */
424}
425
426/**
427 * \brief Cleanup PCI AER uncorrectable error status
428 * @param dev Pointer to PCI device
429 */
430static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
431{
432 int pos = 0x100;
433 u32 status, mask;
434
435 pr_info("%s :\n", __func__);
436
437 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
438 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
439 if (dev->error_state == pci_channel_io_normal)
440 status &= ~mask; /* Clear corresponding nonfatal bits */
441 else
442 status &= mask; /* Clear corresponding fatal bits */
443 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
444}
445
446/**
447 * \brief Stop all PCI IO to a given device
448 * @param dev Pointer to Octeon device
449 */
450static void stop_pci_io(struct octeon_device *oct)
451{
452 /* No more instructions will be forwarded. */
453 atomic_set(&oct->status, OCT_DEV_IN_RESET);
454
455 pci_disable_device(oct->pci_dev);
456
457 /* Disable interrupts */
458 oct->fn_list.disable_interrupt(oct->chip);
459
460 pcierror_quiesce_device(oct);
461
462 /* Release the interrupt line */
463 free_irq(oct->pci_dev->irq, oct);
464
465 if (oct->flags & LIO_FLAG_MSI_ENABLED)
466 pci_disable_msi(oct->pci_dev);
467
468 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
469 lio_get_state_string(&oct->status));
470
471 /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
472 /* making it a common function for all OCTEON models */
473 cleanup_aer_uncorrect_error_status(oct->pci_dev);
474}
475
476/**
477 * \brief called when PCI error is detected
478 * @param pdev Pointer to PCI device
479 * @param state The current pci connection state
480 *
481 * This function is called after a PCI bus error affecting
482 * this device has been detected.
483 */
484static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
485 pci_channel_state_t state)
486{
487 struct octeon_device *oct = pci_get_drvdata(pdev);
488
489 /* Non-correctable Non-fatal errors */
490 if (state == pci_channel_io_normal) {
491 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
492 cleanup_aer_uncorrect_error_status(oct->pci_dev);
493 return PCI_ERS_RESULT_CAN_RECOVER;
494 }
495
496 /* Non-correctable Fatal errors */
497 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
498 stop_pci_io(oct);
499
500 /* Always return a DISCONNECT. There is no support for recovery but only
501 * for a clean shutdown.
502 */
503 return PCI_ERS_RESULT_DISCONNECT;
504}
505
506/**
507 * \brief mmio handler
508 * @param pdev Pointer to PCI device
509 */
510static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
511{
512 /* We should never hit this since we never ask for a reset for a Fatal
513 * Error. We always return DISCONNECT in io_error above.
514 * But play safe and return RECOVERED for now.
515 */
516 return PCI_ERS_RESULT_RECOVERED;
517}
518
519/**
520 * \brief called after the pci bus has been reset.
521 * @param pdev Pointer to PCI device
522 *
523 * Restart the card from scratch, as if from a cold-boot. Implementation
524 * resembles the first-half of the octeon_resume routine.
525 */
526static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
527{
528 /* We should never hit this since we never ask for a reset for a Fatal
529 * Error. We always return DISCONNECT in io_error above.
530 * But play safe and return RECOVERED for now.
531 */
532 return PCI_ERS_RESULT_RECOVERED;
533}
534
535/**
536 * \brief called when traffic can start flowing again.
537 * @param pdev Pointer to PCI device
538 *
539 * This callback is called when the error recovery driver tells us that
540 * its OK to resume normal operation. Implementation resembles the
541 * second-half of the octeon_resume routine.
542 */
543static void liquidio_pcie_resume(struct pci_dev *pdev)
544{
545 /* Nothing to be done here. */
546}
547
548#ifdef CONFIG_PM
549/**
550 * \brief called when suspending
551 * @param pdev Pointer to PCI device
552 * @param state state to suspend to
553 */
554static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
555{
556 return 0;
557}
558
559/**
560 * \brief called when resuming
561 * @param pdev Pointer to PCI device
562 */
563static int liquidio_resume(struct pci_dev *pdev)
564{
565 return 0;
566}
567#endif
568
569/* For PCI-E Advanced Error Recovery (AER) Interface */
Julia Lawall166e2362015-11-14 11:06:53 +0100570static const struct pci_error_handlers liquidio_err_handler = {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700571 .error_detected = liquidio_pcie_error_detected,
572 .mmio_enabled = liquidio_pcie_mmio_enabled,
573 .slot_reset = liquidio_pcie_slot_reset,
574 .resume = liquidio_pcie_resume,
575};
576
577static const struct pci_device_id liquidio_pci_tbl[] = {
578 { /* 68xx */
579 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
580 },
581 { /* 66xx */
582 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
583 },
584 {
585 0, 0, 0, 0, 0, 0, 0
586 }
587};
588MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
589
590static struct pci_driver liquidio_pci_driver = {
591 .name = "LiquidIO",
592 .id_table = liquidio_pci_tbl,
593 .probe = liquidio_probe,
594 .remove = liquidio_remove,
595 .err_handler = &liquidio_err_handler, /* For AER */
596
597#ifdef CONFIG_PM
598 .suspend = liquidio_suspend,
599 .resume = liquidio_resume,
600#endif
601
602};
603
604/**
605 * \brief register PCI driver
606 */
607static int liquidio_init_pci(void)
608{
609 return pci_register_driver(&liquidio_pci_driver);
610}
611
612/**
613 * \brief unregister PCI driver
614 */
615static void liquidio_deinit_pci(void)
616{
617 pci_unregister_driver(&liquidio_pci_driver);
618}
619
620/**
621 * \brief check interface state
622 * @param lio per-network private data
623 * @param state_flag flag state to check
624 */
625static inline int ifstate_check(struct lio *lio, int state_flag)
626{
627 return atomic_read(&lio->ifstate) & state_flag;
628}
629
630/**
631 * \brief set interface state
632 * @param lio per-network private data
633 * @param state_flag flag state to set
634 */
635static inline void ifstate_set(struct lio *lio, int state_flag)
636{
637 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
638}
639
640/**
641 * \brief clear interface state
642 * @param lio per-network private data
643 * @param state_flag flag state to clear
644 */
645static inline void ifstate_reset(struct lio *lio, int state_flag)
646{
647 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
648}
649
650/**
651 * \brief Stop Tx queues
652 * @param netdev network device
653 */
654static inline void txqs_stop(struct net_device *netdev)
655{
656 if (netif_is_multiqueue(netdev)) {
657 int i;
658
659 for (i = 0; i < netdev->num_tx_queues; i++)
660 netif_stop_subqueue(netdev, i);
661 } else {
662 netif_stop_queue(netdev);
663 }
664}
665
666/**
667 * \brief Start Tx queues
668 * @param netdev network device
669 */
670static inline void txqs_start(struct net_device *netdev)
671{
672 if (netif_is_multiqueue(netdev)) {
673 int i;
674
675 for (i = 0; i < netdev->num_tx_queues; i++)
676 netif_start_subqueue(netdev, i);
677 } else {
678 netif_start_queue(netdev);
679 }
680}
681
682/**
683 * \brief Wake Tx queues
684 * @param netdev network device
685 */
686static inline void txqs_wake(struct net_device *netdev)
687{
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700688 struct lio *lio = GET_LIO(netdev);
689
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700690 if (netif_is_multiqueue(netdev)) {
691 int i;
692
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700693 for (i = 0; i < netdev->num_tx_queues; i++) {
694 int qno = lio->linfo.txpciq[i %
695 (lio->linfo.num_txpciq)].s.q_no;
696
697 if (__netif_subqueue_stopped(netdev, i)) {
698 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
699 tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700700 netif_wake_subqueue(netdev, i);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700701 }
702 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700703 } else {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700704 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
705 tx_restart, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700706 netif_wake_queue(netdev);
707 }
708}
709
710/**
711 * \brief Stop Tx queue
712 * @param netdev network device
713 */
714static void stop_txq(struct net_device *netdev)
715{
716 txqs_stop(netdev);
717}
718
719/**
720 * \brief Start Tx queue
721 * @param netdev network device
722 */
723static void start_txq(struct net_device *netdev)
724{
725 struct lio *lio = GET_LIO(netdev);
726
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700727 if (lio->linfo.link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700728 txqs_start(netdev);
729 return;
730 }
731}
732
733/**
734 * \brief Wake a queue
735 * @param netdev network device
736 * @param q which queue to wake
737 */
738static inline void wake_q(struct net_device *netdev, int q)
739{
740 if (netif_is_multiqueue(netdev))
741 netif_wake_subqueue(netdev, q);
742 else
743 netif_wake_queue(netdev);
744}
745
746/**
747 * \brief Stop a queue
748 * @param netdev network device
749 * @param q which queue to stop
750 */
751static inline void stop_q(struct net_device *netdev, int q)
752{
753 if (netif_is_multiqueue(netdev))
754 netif_stop_subqueue(netdev, q);
755 else
756 netif_stop_queue(netdev);
757}
758
759/**
760 * \brief Check Tx queue status, and take appropriate action
761 * @param lio per-network private data
762 * @returns 0 if full, number of queues woken up otherwise
763 */
764static inline int check_txq_status(struct lio *lio)
765{
766 int ret_val = 0;
767
768 if (netif_is_multiqueue(lio->netdev)) {
769 int numqs = lio->netdev->num_tx_queues;
770 int q, iq = 0;
771
772 /* check each sub-queue state */
773 for (q = 0; q < numqs; q++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700774 iq = lio->linfo.txpciq[q %
775 (lio->linfo.num_txpciq)].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700776 if (octnet_iq_is_full(lio->oct_dev, iq))
777 continue;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700778 if (__netif_subqueue_stopped(lio->netdev, q)) {
779 wake_q(lio->netdev, q);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700780 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
781 tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700782 ret_val++;
783 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700784 }
785 } else {
786 if (octnet_iq_is_full(lio->oct_dev, lio->txq))
787 return 0;
788 wake_q(lio->netdev, lio->txq);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700789 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
790 tx_restart, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700791 ret_val = 1;
792 }
793 return ret_val;
794}
795
796/**
797 * Remove the node at the head of the list. The list would be empty at
798 * the end of this call if there are no more nodes in the list.
799 */
800static inline struct list_head *list_delete_head(struct list_head *root)
801{
802 struct list_head *node;
803
804 if ((root->prev == root) && (root->next == root))
805 node = NULL;
806 else
807 node = root->next;
808
809 if (node)
810 list_del(node);
811
812 return node;
813}
814
815/**
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700816 * \brief Delete gather lists
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700817 * @param lio per-network private data
818 */
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700819static void delete_glists(struct lio *lio)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700820{
821 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700822 int i;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700823
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700824 if (!lio->glist)
825 return;
826
827 for (i = 0; i < lio->linfo.num_txpciq; i++) {
828 do {
829 g = (struct octnic_gather *)
830 list_delete_head(&lio->glist[i]);
831 if (g) {
832 if (g->sg) {
833 dma_unmap_single(&lio->oct_dev->
834 pci_dev->dev,
835 g->sg_dma_ptr,
836 g->sg_size,
837 DMA_TO_DEVICE);
838 kfree((void *)((unsigned long)g->sg -
839 g->adjust));
840 }
841 kfree(g);
842 }
843 } while (g);
844 }
845
846 kfree((void *)lio->glist);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700847}
848
849/**
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700850 * \brief Setup gather lists
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700851 * @param lio per-network private data
852 */
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700853static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700854{
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700855 int i, j;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700856 struct octnic_gather *g;
857
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700858 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
859 GFP_KERNEL);
860 if (!lio->glist_lock)
861 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700862
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700863 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
864 GFP_KERNEL);
865 if (!lio->glist) {
866 kfree((void *)lio->glist_lock);
867 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700868 }
869
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700870 for (i = 0; i < num_iqs; i++) {
871 int numa_node = cpu_to_node(i % num_online_cpus());
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700872
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700873 spin_lock_init(&lio->glist_lock[i]);
874
875 INIT_LIST_HEAD(&lio->glist[i]);
876
877 for (j = 0; j < lio->tx_qsize; j++) {
878 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
879 numa_node);
880 if (!g)
881 g = kzalloc(sizeof(*g), GFP_KERNEL);
882 if (!g)
883 break;
884
885 g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
886 OCT_SG_ENTRY_SIZE);
887
888 g->sg = kmalloc_node(g->sg_size + 8,
889 GFP_KERNEL, numa_node);
890 if (!g->sg)
891 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
892 if (!g->sg) {
893 kfree(g);
894 break;
895 }
896
897 /* The gather component should be aligned on 64-bit
898 * boundary
899 */
900 if (((unsigned long)g->sg) & 7) {
901 g->adjust = 8 - (((unsigned long)g->sg) & 7);
902 g->sg = (struct octeon_sg_entry *)
903 ((unsigned long)g->sg + g->adjust);
904 }
905 g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
906 g->sg, g->sg_size,
907 DMA_TO_DEVICE);
908 if (dma_mapping_error(&oct->pci_dev->dev,
909 g->sg_dma_ptr)) {
910 kfree((void *)((unsigned long)g->sg -
911 g->adjust));
912 kfree(g);
913 break;
914 }
915
916 list_add_tail(&g->list, &lio->glist[i]);
917 }
918
919 if (j != lio->tx_qsize) {
920 delete_glists(lio);
921 return 1;
922 }
923 }
924
925 return 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700926}
927
928/**
929 * \brief Print link information
930 * @param netdev network device
931 */
932static void print_link_info(struct net_device *netdev)
933{
934 struct lio *lio = GET_LIO(netdev);
935
936 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
937 struct oct_link_info *linfo = &lio->linfo;
938
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700939 if (linfo->link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700940 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
941 linfo->link.s.speed,
942 (linfo->link.s.duplex) ? "Full" : "Half");
943 } else {
944 netif_info(lio, link, lio->netdev, "Link Down\n");
945 }
946 }
947}
948
949/**
950 * \brief Update link status
951 * @param netdev network device
952 * @param ls link status structure
953 *
954 * Called on receipt of a link status response from the core application to
955 * update each interface's link status.
956 */
957static inline void update_link_status(struct net_device *netdev,
958 union oct_link_status *ls)
959{
960 struct lio *lio = GET_LIO(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700961 int changed = (lio->linfo.link.u64 != ls->u64);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700962
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700963 lio->linfo.link.u64 = ls->u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700964
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700965 if ((lio->intf_open) && (changed)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700966 print_link_info(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700967 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700968
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700969 if (lio->linfo.link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700970 netif_carrier_on(netdev);
971 /* start_txq(netdev); */
972 txqs_wake(netdev);
973 } else {
974 netif_carrier_off(netdev);
975 stop_txq(netdev);
976 }
977 }
978}
979
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700980/* Runs in interrupt context. */
981static void update_txq_status(struct octeon_device *oct, int iq_num)
982{
983 struct net_device *netdev;
984 struct lio *lio;
985 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
986
987 /*octeon_update_iq_read_idx(oct, iq);*/
988
989 netdev = oct->props[iq->ifidx].netdev;
990
991 /* This is needed because the first IQ does not have
992 * a netdev associated with it.
993 */
994 if (!netdev)
995 return;
996
997 lio = GET_LIO(netdev);
998 if (netif_is_multiqueue(netdev)) {
999 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
1000 lio->linfo.link.s.link_up &&
1001 (!octnet_iq_is_full(oct, iq_num))) {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001002 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
1003 tx_restart, 1);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001004 netif_wake_subqueue(netdev, iq->q_index);
1005 } else {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001006 if (!octnet_iq_is_full(oct, lio->txq)) {
1007 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
1008 lio->txq,
1009 tx_restart, 1);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001010 wake_q(netdev, lio->txq);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001011 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001012 }
1013 }
1014}
1015
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001016/**
1017 * \brief Droq packet processor sceduler
1018 * @param oct octeon device
1019 */
1020static
1021void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
1022{
1023 struct octeon_device_priv *oct_priv =
1024 (struct octeon_device_priv *)oct->priv;
1025 u64 oq_no;
1026 struct octeon_droq *droq;
1027
1028 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -07001029 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
1030 oq_no++) {
1031 if (!(oct->droq_intr & (1ULL << oq_no)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001032 continue;
1033
1034 droq = oct->droq[oq_no];
1035
1036 if (droq->ops.poll_mode) {
1037 droq->ops.napi_fn(droq);
1038 oct_priv->napi_mask |= (1 << oq_no);
1039 } else {
1040 tasklet_schedule(&oct_priv->droq_tasklet);
1041 }
1042 }
1043 }
1044}
1045
1046/**
1047 * \brief Interrupt handler for octeon
1048 * @param irq unused
1049 * @param dev octeon device
1050 */
1051static
1052irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev)
1053{
1054 struct octeon_device *oct = (struct octeon_device *)dev;
1055 irqreturn_t ret;
1056
1057 /* Disable our interrupts for the duration of ISR */
1058 oct->fn_list.disable_interrupt(oct->chip);
1059
1060 ret = oct->fn_list.process_interrupt_regs(oct);
1061
1062 if (ret == IRQ_HANDLED)
1063 liquidio_schedule_droq_pkt_handlers(oct);
1064
1065 /* Re-enable our interrupts */
1066 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
1067 oct->fn_list.enable_interrupt(oct->chip);
1068
1069 return ret;
1070}
1071
1072/**
1073 * \brief Setup interrupt for octeon device
1074 * @param oct octeon device
1075 *
1076 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1077 */
1078static int octeon_setup_interrupt(struct octeon_device *oct)
1079{
1080 int irqret, err;
1081
1082 err = pci_enable_msi(oct->pci_dev);
1083 if (err)
1084 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1085 err);
1086 else
1087 oct->flags |= LIO_FLAG_MSI_ENABLED;
1088
1089 irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler,
1090 IRQF_SHARED, "octeon", oct);
1091 if (irqret) {
1092 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1093 pci_disable_msi(oct->pci_dev);
1094 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1095 irqret);
1096 return 1;
1097 }
1098
1099 return 0;
1100}
1101
1102/**
1103 * \brief PCI probe handler
1104 * @param pdev PCI device structure
1105 * @param ent unused
1106 */
1107static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1108{
1109 struct octeon_device *oct_dev = NULL;
1110 struct handshake *hs;
1111
1112 oct_dev = octeon_allocate_device(pdev->device,
1113 sizeof(struct octeon_device_priv));
1114 if (!oct_dev) {
1115 dev_err(&pdev->dev, "Unable to allocate device\n");
1116 return -ENOMEM;
1117 }
1118
1119 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1120 (u32)pdev->vendor, (u32)pdev->device);
1121
1122 /* Assign octeon_device for this device to the private data area. */
1123 pci_set_drvdata(pdev, oct_dev);
1124
1125 /* set linux specific device pointer */
1126 oct_dev->pci_dev = (void *)pdev;
1127
1128 hs = &handshake[oct_dev->octeon_id];
1129 init_completion(&hs->init);
1130 init_completion(&hs->started);
1131 hs->pci_dev = pdev;
1132
1133 if (oct_dev->octeon_id == 0)
1134 /* first LiquidIO NIC is detected */
1135 complete(&first_stage);
1136
1137 if (octeon_device_init(oct_dev)) {
1138 liquidio_remove(pdev);
1139 return -ENOMEM;
1140 }
1141
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001142 oct_dev->rx_pause = 1;
1143 oct_dev->tx_pause = 1;
1144
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001145 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1146
1147 return 0;
1148}
1149
1150/**
1151 *\brief Destroy resources associated with octeon device
1152 * @param pdev PCI device structure
1153 * @param ent unused
1154 */
1155static void octeon_destroy_resources(struct octeon_device *oct)
1156{
1157 int i;
1158 struct octeon_device_priv *oct_priv =
1159 (struct octeon_device_priv *)oct->priv;
1160
1161 struct handshake *hs;
1162
1163 switch (atomic_read(&oct->status)) {
1164 case OCT_DEV_RUNNING:
1165 case OCT_DEV_CORE_OK:
1166
1167 /* No more instructions will be forwarded. */
1168 atomic_set(&oct->status, OCT_DEV_IN_RESET);
1169
1170 oct->app_mode = CVM_DRV_INVALID_APP;
1171 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1172 lio_get_state_string(&oct->status));
1173
1174 schedule_timeout_uninterruptible(HZ / 10);
1175
1176 /* fallthrough */
1177 case OCT_DEV_HOST_OK:
1178
1179 /* fallthrough */
1180 case OCT_DEV_CONSOLE_INIT_DONE:
1181 /* Remove any consoles */
1182 octeon_remove_consoles(oct);
1183
1184 /* fallthrough */
1185 case OCT_DEV_IO_QUEUES_DONE:
1186 if (wait_for_pending_requests(oct))
1187 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1188
1189 if (lio_wait_for_instr_fetch(oct))
1190 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1191
1192 /* Disable the input and output queues now. No more packets will
1193 * arrive from Octeon, but we should wait for all packet
1194 * processing to finish.
1195 */
1196 oct->fn_list.disable_io_queues(oct);
1197
1198 if (lio_wait_for_oq_pkts(oct))
1199 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1200
1201 /* Disable interrupts */
1202 oct->fn_list.disable_interrupt(oct->chip);
1203
1204 /* Release the interrupt line */
1205 free_irq(oct->pci_dev->irq, oct);
1206
1207 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1208 pci_disable_msi(oct->pci_dev);
1209
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001210 /* fallthrough */
1211 case OCT_DEV_IN_RESET:
1212 case OCT_DEV_DROQ_INIT_DONE:
1213 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
1214 mdelay(100);
Raghu Vatsavayi63da8402016-06-21 22:53:03 -07001215 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1216 if (!(oct->io_qmask.oq & (1ULL << i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001217 continue;
1218 octeon_delete_droq(oct, i);
1219 }
1220
1221 /* Force any pending handshakes to complete */
1222 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1223 hs = &handshake[i];
1224
1225 if (hs->pci_dev) {
1226 handshake[oct->octeon_id].init_ok = 0;
1227 complete(&handshake[oct->octeon_id].init);
1228 handshake[oct->octeon_id].started_ok = 0;
1229 complete(&handshake[oct->octeon_id].started);
1230 }
1231 }
1232
1233 /* fallthrough */
1234 case OCT_DEV_RESP_LIST_INIT_DONE:
1235 octeon_delete_response_list(oct);
1236
1237 /* fallthrough */
1238 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1239 octeon_free_sc_buffer_pool(oct);
1240
1241 /* fallthrough */
1242 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
Raghu Vatsavayi63da8402016-06-21 22:53:03 -07001243 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1244 if (!(oct->io_qmask.iq & (1ULL << i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001245 continue;
1246 octeon_delete_instr_queue(oct, i);
1247 }
1248
1249 /* fallthrough */
1250 case OCT_DEV_DISPATCH_INIT_DONE:
1251 octeon_delete_dispatch_list(oct);
1252 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1253
1254 /* fallthrough */
1255 case OCT_DEV_PCI_MAP_DONE:
Raghu Vatsavayi60b48c52016-06-21 22:53:09 -07001256
1257 /* Soft reset the octeon device before exiting */
1258 oct->fn_list.soft_reset(oct);
1259
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001260 octeon_unmap_pci_barx(oct, 0);
1261 octeon_unmap_pci_barx(oct, 1);
1262
1263 /* fallthrough */
1264 case OCT_DEV_BEGIN_STATE:
Raghu Vatsavayi60b48c52016-06-21 22:53:09 -07001265 /* Disable the device, releasing the PCI INT */
1266 pci_disable_device(oct->pci_dev);
1267
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001268 /* Nothing to be done here either */
1269 break;
1270 } /* end switch(oct->status) */
1271
1272 tasklet_kill(&oct_priv->droq_tasklet);
1273}
1274
1275/**
1276 * \brief Send Rx control command
1277 * @param lio per-network private data
1278 * @param start_stop whether to start or stop
1279 */
1280static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1281{
1282 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001283
1284 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1285
1286 nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001287 nctrl.ncmd.s.param1 = start_stop;
1288 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001289 nctrl.netpndev = (u64)lio->netdev;
1290
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001291 if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001292 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1293}
1294
1295/**
1296 * \brief Destroy NIC device interface
1297 * @param oct octeon device
1298 * @param ifidx which interface to destroy
1299 *
1300 * Cleanup associated with each interface for an Octeon device when NIC
1301 * module is being unloaded or if initialization fails during load.
1302 */
1303static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1304{
1305 struct net_device *netdev = oct->props[ifidx].netdev;
1306 struct lio *lio;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001307 struct napi_struct *napi, *n;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001308
1309 if (!netdev) {
1310 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1311 __func__, ifidx);
1312 return;
1313 }
1314
1315 lio = GET_LIO(netdev);
1316
1317 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1318
1319 send_rx_ctrl_cmd(lio, 0);
1320
1321 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1322 txqs_stop(netdev);
1323
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001324 if (oct->props[lio->ifidx].napi_enabled == 1) {
1325 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1326 napi_disable(napi);
1327
1328 oct->props[lio->ifidx].napi_enabled = 0;
1329 }
1330
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001331 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1332 unregister_netdev(netdev);
1333
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001334 delete_glists(lio);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001335
1336 free_netdev(netdev);
1337
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001338 oct->props[ifidx].gmxport = -1;
1339
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001340 oct->props[ifidx].netdev = NULL;
1341}
1342
1343/**
1344 * \brief Stop complete NIC functionality
1345 * @param oct octeon device
1346 */
1347static int liquidio_stop_nic_module(struct octeon_device *oct)
1348{
1349 int i, j;
1350 struct lio *lio;
1351
1352 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1353 if (!oct->ifcount) {
1354 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1355 return 1;
1356 }
1357
Raghu Vatsavayi60441882016-06-21 22:53:08 -07001358 spin_lock_bh(&oct->cmd_resp_wqlock);
1359 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1360 spin_unlock_bh(&oct->cmd_resp_wqlock);
1361
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001362 for (i = 0; i < oct->ifcount; i++) {
1363 lio = GET_LIO(oct->props[i].netdev);
1364 for (j = 0; j < lio->linfo.num_rxpciq; j++)
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001365 octeon_unregister_droq_ops(oct,
1366 lio->linfo.rxpciq[j].s.q_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001367 }
1368
1369 for (i = 0; i < oct->ifcount; i++)
1370 liquidio_destroy_nic_device(oct, i);
1371
1372 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1373 return 0;
1374}
1375
1376/**
1377 * \brief Cleans up resources at unload time
1378 * @param pdev PCI device structure
1379 */
1380static void liquidio_remove(struct pci_dev *pdev)
1381{
1382 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1383
1384 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1385
1386 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1387 liquidio_stop_nic_module(oct_dev);
1388
1389 /* Reset the octeon device and cleanup all memory allocated for
1390 * the octeon device by driver.
1391 */
1392 octeon_destroy_resources(oct_dev);
1393
1394 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1395
1396 /* This octeon device has been removed. Update the global
1397 * data structure to reflect this. Free the device structure.
1398 */
1399 octeon_free_device_mem(oct_dev);
1400}
1401
1402/**
1403 * \brief Identify the Octeon device and to map the BAR address space
1404 * @param oct octeon device
1405 */
1406static int octeon_chip_specific_setup(struct octeon_device *oct)
1407{
1408 u32 dev_id, rev_id;
1409 int ret = 1;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001410 char *s;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001411
1412 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1413 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1414 oct->rev_id = rev_id & 0xff;
1415
1416 switch (dev_id) {
1417 case OCTEON_CN68XX_PCIID:
1418 oct->chip_id = OCTEON_CN68XX;
1419 ret = lio_setup_cn68xx_octeon_device(oct);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001420 s = "CN68XX";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001421 break;
1422
1423 case OCTEON_CN66XX_PCIID:
1424 oct->chip_id = OCTEON_CN66XX;
1425 ret = lio_setup_cn66xx_octeon_device(oct);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001426 s = "CN66XX";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001427 break;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001428
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001429 default:
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001430 s = "?";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001431 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1432 dev_id);
1433 }
1434
1435 if (!ret)
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001436 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001437 OCTEON_MAJOR_REV(oct),
1438 OCTEON_MINOR_REV(oct),
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001439 octeon_get_conf(oct)->card_name,
1440 LIQUIDIO_VERSION);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001441
1442 return ret;
1443}
1444
1445/**
1446 * \brief PCI initialization for each Octeon device.
1447 * @param oct octeon device
1448 */
1449static int octeon_pci_os_setup(struct octeon_device *oct)
1450{
1451 /* setup PCI stuff first */
1452 if (pci_enable_device(oct->pci_dev)) {
1453 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1454 return 1;
1455 }
1456
1457 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1458 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1459 return 1;
1460 }
1461
1462 /* Enable PCI DMA Master. */
1463 pci_set_master(oct->pci_dev);
1464
1465 return 0;
1466}
1467
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001468static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1469{
1470 int q = 0;
1471
1472 if (netif_is_multiqueue(lio->netdev))
1473 q = skb->queue_mapping % lio->linfo.num_txpciq;
1474
1475 return q;
1476}
1477
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001478/**
1479 * \brief Check Tx queue state for a given network buffer
1480 * @param lio per-network private data
1481 * @param skb network buffer
1482 */
1483static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
1484{
1485 int q = 0, iq = 0;
1486
1487 if (netif_is_multiqueue(lio->netdev)) {
1488 q = skb->queue_mapping;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001489 iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001490 } else {
1491 iq = lio->txq;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001492 q = iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001493 }
1494
1495 if (octnet_iq_is_full(lio->oct_dev, iq))
1496 return 0;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001497
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001498 if (__netif_subqueue_stopped(lio->netdev, q)) {
1499 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001500 wake_q(lio->netdev, q);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001501 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001502 return 1;
1503}
1504
1505/**
1506 * \brief Unmap and free network buffer
1507 * @param buf buffer
1508 */
1509static void free_netbuf(void *buf)
1510{
1511 struct sk_buff *skb;
1512 struct octnet_buf_free_info *finfo;
1513 struct lio *lio;
1514
1515 finfo = (struct octnet_buf_free_info *)buf;
1516 skb = finfo->skb;
1517 lio = finfo->lio;
1518
1519 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1520 DMA_TO_DEVICE);
1521
1522 check_txq_state(lio, skb);
1523
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07001524 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001525}
1526
1527/**
1528 * \brief Unmap and free gather buffer
1529 * @param buf buffer
1530 */
1531static void free_netsgbuf(void *buf)
1532{
1533 struct octnet_buf_free_info *finfo;
1534 struct sk_buff *skb;
1535 struct lio *lio;
1536 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001537 int i, frags, iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001538
1539 finfo = (struct octnet_buf_free_info *)buf;
1540 skb = finfo->skb;
1541 lio = finfo->lio;
1542 g = finfo->g;
1543 frags = skb_shinfo(skb)->nr_frags;
1544
1545 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1546 g->sg[0].ptr[0], (skb->len - skb->data_len),
1547 DMA_TO_DEVICE);
1548
1549 i = 1;
1550 while (frags--) {
1551 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1552
1553 pci_unmap_page((lio->oct_dev)->pci_dev,
1554 g->sg[(i >> 2)].ptr[(i & 3)],
1555 frag->size, DMA_TO_DEVICE);
1556 i++;
1557 }
1558
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001559 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1560 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001561
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001562 iq = skb_iq(lio, skb);
1563 spin_lock(&lio->glist_lock[iq]);
1564 list_add_tail(&g->list, &lio->glist[iq]);
1565 spin_unlock(&lio->glist_lock[iq]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001566
1567 check_txq_state(lio, skb); /* mq support: sub-queue state check */
1568
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07001569 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001570}
1571
1572/**
1573 * \brief Unmap and free gather buffer with response
1574 * @param buf buffer
1575 */
1576static void free_netsgbuf_with_resp(void *buf)
1577{
1578 struct octeon_soft_command *sc;
1579 struct octnet_buf_free_info *finfo;
1580 struct sk_buff *skb;
1581 struct lio *lio;
1582 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001583 int i, frags, iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001584
1585 sc = (struct octeon_soft_command *)buf;
1586 skb = (struct sk_buff *)sc->callback_arg;
1587 finfo = (struct octnet_buf_free_info *)&skb->cb;
1588
1589 lio = finfo->lio;
1590 g = finfo->g;
1591 frags = skb_shinfo(skb)->nr_frags;
1592
1593 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1594 g->sg[0].ptr[0], (skb->len - skb->data_len),
1595 DMA_TO_DEVICE);
1596
1597 i = 1;
1598 while (frags--) {
1599 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1600
1601 pci_unmap_page((lio->oct_dev)->pci_dev,
1602 g->sg[(i >> 2)].ptr[(i & 3)],
1603 frag->size, DMA_TO_DEVICE);
1604 i++;
1605 }
1606
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001607 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1608 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001609
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001610 iq = skb_iq(lio, skb);
1611
1612 spin_lock(&lio->glist_lock[iq]);
1613 list_add_tail(&g->list, &lio->glist[iq]);
1614 spin_unlock(&lio->glist_lock[iq]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001615
1616 /* Don't free the skb yet */
1617
1618 check_txq_state(lio, skb);
1619}
1620
1621/**
1622 * \brief Adjust ptp frequency
1623 * @param ptp PTP clock info
1624 * @param ppb how much to adjust by, in parts-per-billion
1625 */
1626static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1627{
1628 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1629 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1630 u64 comp, delta;
1631 unsigned long flags;
1632 bool neg_adj = false;
1633
1634 if (ppb < 0) {
1635 neg_adj = true;
1636 ppb = -ppb;
1637 }
1638
1639 /* The hardware adds the clock compensation value to the
1640 * PTP clock on every coprocessor clock cycle, so we
1641 * compute the delta in terms of coprocessor clocks.
1642 */
1643 delta = (u64)ppb << 32;
1644 do_div(delta, oct->coproc_clock_rate);
1645
1646 spin_lock_irqsave(&lio->ptp_lock, flags);
1647 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1648 if (neg_adj)
1649 comp -= delta;
1650 else
1651 comp += delta;
1652 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1653 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1654
1655 return 0;
1656}
1657
1658/**
1659 * \brief Adjust ptp time
1660 * @param ptp PTP clock info
1661 * @param delta how much to adjust by, in nanosecs
1662 */
1663static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1664{
1665 unsigned long flags;
1666 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1667
1668 spin_lock_irqsave(&lio->ptp_lock, flags);
1669 lio->ptp_adjust += delta;
1670 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1671
1672 return 0;
1673}
1674
1675/**
1676 * \brief Get hardware clock time, including any adjustment
1677 * @param ptp PTP clock info
1678 * @param ts timespec
1679 */
1680static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1681 struct timespec64 *ts)
1682{
1683 u64 ns;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001684 unsigned long flags;
1685 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1686 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1687
1688 spin_lock_irqsave(&lio->ptp_lock, flags);
1689 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1690 ns += lio->ptp_adjust;
1691 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1692
Kefeng Wang286af312016-01-27 17:34:37 +08001693 *ts = ns_to_timespec64(ns);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001694
1695 return 0;
1696}
1697
1698/**
1699 * \brief Set hardware clock time. Reset adjustment
1700 * @param ptp PTP clock info
1701 * @param ts timespec
1702 */
1703static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1704 const struct timespec64 *ts)
1705{
1706 u64 ns;
1707 unsigned long flags;
1708 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1709 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1710
1711 ns = timespec_to_ns(ts);
1712
1713 spin_lock_irqsave(&lio->ptp_lock, flags);
1714 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1715 lio->ptp_adjust = 0;
1716 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1717
1718 return 0;
1719}
1720
1721/**
1722 * \brief Check if PTP is enabled
1723 * @param ptp PTP clock info
1724 * @param rq request
1725 * @param on is it on
1726 */
1727static int liquidio_ptp_enable(struct ptp_clock_info *ptp,
1728 struct ptp_clock_request *rq, int on)
1729{
1730 return -EOPNOTSUPP;
1731}
1732
1733/**
1734 * \brief Open PTP clock source
1735 * @param netdev network device
1736 */
1737static void oct_ptp_open(struct net_device *netdev)
1738{
1739 struct lio *lio = GET_LIO(netdev);
1740 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1741
1742 spin_lock_init(&lio->ptp_lock);
1743
1744 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1745 lio->ptp_info.owner = THIS_MODULE;
1746 lio->ptp_info.max_adj = 250000000;
1747 lio->ptp_info.n_alarm = 0;
1748 lio->ptp_info.n_ext_ts = 0;
1749 lio->ptp_info.n_per_out = 0;
1750 lio->ptp_info.pps = 0;
1751 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1752 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1753 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1754 lio->ptp_info.settime64 = liquidio_ptp_settime;
1755 lio->ptp_info.enable = liquidio_ptp_enable;
1756
1757 lio->ptp_adjust = 0;
1758
1759 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1760 &oct->pci_dev->dev);
1761
1762 if (IS_ERR(lio->ptp_clock))
1763 lio->ptp_clock = NULL;
1764}
1765
1766/**
1767 * \brief Init PTP clock
1768 * @param oct octeon device
1769 */
1770static void liquidio_ptp_init(struct octeon_device *oct)
1771{
1772 u64 clock_comp, cfg;
1773
1774 clock_comp = (u64)NSEC_PER_SEC << 32;
1775 do_div(clock_comp, oct->coproc_clock_rate);
1776 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1777
1778 /* Enable */
1779 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1780 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1781}
1782
1783/**
1784 * \brief Load firmware to device
1785 * @param oct octeon device
1786 *
1787 * Maps device to firmware filename, requests firmware, and downloads it
1788 */
1789static int load_firmware(struct octeon_device *oct)
1790{
1791 int ret = 0;
1792 const struct firmware *fw;
1793 char fw_name[LIO_MAX_FW_FILENAME_LEN];
1794 char *tmp_fw_type;
1795
1796 if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
1797 sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
1798 dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
1799 return ret;
1800 }
1801
1802 if (fw_type[0] == '\0')
1803 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1804 else
1805 tmp_fw_type = fw_type;
1806
1807 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1808 octeon_get_conf(oct)->card_name, tmp_fw_type,
1809 LIO_FW_NAME_SUFFIX);
1810
1811 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1812 if (ret) {
1813 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
1814 fw_name);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001815 release_firmware(fw);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001816 return ret;
1817 }
1818
1819 ret = octeon_download_firmware(oct, fw->data, fw->size);
1820
1821 release_firmware(fw);
1822
1823 return ret;
1824}
1825
1826/**
1827 * \brief Setup output queue
1828 * @param oct octeon device
1829 * @param q_no which queue
1830 * @param num_descs how many descriptors
1831 * @param desc_size size of each descriptor
1832 * @param app_ctx application context
1833 */
1834static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
1835 int desc_size, void *app_ctx)
1836{
1837 int ret_val = 0;
1838
1839 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
1840 /* droq creation and local register settings. */
1841 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
Amitoj Kaur Chawla08a965e2016-02-04 19:25:13 +05301842 if (ret_val < 0)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001843 return ret_val;
1844
1845 if (ret_val == 1) {
1846 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
1847 return 0;
1848 }
1849 /* tasklet creation for the droq */
1850
1851 /* Enable the droq queues */
1852 octeon_set_droq_pkt_op(oct, q_no, 1);
1853
1854 /* Send Credit for Octeon Output queues. Credits are always
1855 * sent after the output queue is enabled.
1856 */
1857 writel(oct->droq[q_no]->max_count,
1858 oct->droq[q_no]->pkts_credit_reg);
1859
1860 return ret_val;
1861}
1862
1863/**
1864 * \brief Callback for getting interface configuration
1865 * @param status status of request
1866 * @param buf pointer to resp structure
1867 */
1868static void if_cfg_callback(struct octeon_device *oct,
1869 u32 status,
1870 void *buf)
1871{
1872 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1873 struct liquidio_if_cfg_resp *resp;
1874 struct liquidio_if_cfg_context *ctx;
1875
1876 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1877 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1878
1879 oct = lio_get_device(ctx->octeon_id);
1880 if (resp->status)
1881 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
1882 CVM_CAST64(resp->status));
1883 ACCESS_ONCE(ctx->cond) = 1;
1884
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001885 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1886 resp->cfg_info.liquidio_firmware_version);
1887
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001888 /* This barrier is required to be sure that the response has been
1889 * written fully before waking up the handler
1890 */
1891 wmb();
1892
1893 wake_up_interruptible(&ctx->wc);
1894}
1895
1896/**
1897 * \brief Select queue based on hash
1898 * @param dev Net device
1899 * @param skb sk_buff structure
1900 * @returns selected queue number
1901 */
1902static u16 select_q(struct net_device *dev, struct sk_buff *skb,
1903 void *accel_priv, select_queue_fallback_t fallback)
1904{
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001905 u32 qindex = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001906 struct lio *lio;
1907
1908 lio = GET_LIO(dev);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001909 qindex = skb_tx_hash(dev, skb);
1910
1911 return (u16)(qindex % (lio->linfo.num_txpciq));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001912}
1913
1914/** Routine to push packets arriving on Octeon interface upto network layer.
1915 * @param oct_id - octeon device id.
1916 * @param skbuff - skbuff struct to be passed to network layer.
1917 * @param len - size of total data received.
1918 * @param rh - Control header associated with the packet
1919 * @param param - additional control data with the packet
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001920 * @param arg - farg registered in droq_ops
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001921 */
1922static void
1923liquidio_push_packet(u32 octeon_id,
1924 void *skbuff,
1925 u32 len,
1926 union octeon_rh *rh,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001927 void *param,
1928 void *arg)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001929{
1930 struct napi_struct *napi = param;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001931 struct sk_buff *skb = (struct sk_buff *)skbuff;
1932 struct skb_shared_hwtstamps *shhwtstamps;
1933 u64 ns;
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07001934 u16 vtag = 0;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001935 struct net_device *netdev = (struct net_device *)arg;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001936 struct octeon_droq *droq = container_of(param, struct octeon_droq,
1937 napi);
1938 if (netdev) {
1939 int packet_was_received;
1940 struct lio *lio = GET_LIO(netdev);
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07001941 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001942
1943 /* Do not proceed if the interface is not in RUNNING state. */
1944 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
1945 recv_buffer_free(skb);
1946 droq->stats.rx_dropped++;
1947 return;
1948 }
1949
1950 skb->dev = netdev;
1951
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001952 skb_record_rx_queue(skb, droq->q_no);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07001953 if (likely(len > MIN_SKB_SIZE)) {
1954 struct octeon_skb_page_info *pg_info;
1955 unsigned char *va;
1956
1957 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
1958 if (pg_info->page) {
1959 /* For Paged allocation use the frags */
1960 va = page_address(pg_info->page) +
1961 pg_info->page_offset;
1962 memcpy(skb->data, va, MIN_SKB_SIZE);
1963 skb_put(skb, MIN_SKB_SIZE);
1964 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1965 pg_info->page,
1966 pg_info->page_offset +
1967 MIN_SKB_SIZE,
1968 len - MIN_SKB_SIZE,
1969 LIO_RXBUFFER_SZ);
1970 }
1971 } else {
1972 struct octeon_skb_page_info *pg_info =
1973 ((struct octeon_skb_page_info *)(skb->cb));
1974 skb_copy_to_linear_data(skb, page_address(pg_info->page)
1975 + pg_info->page_offset, len);
1976 skb_put(skb, len);
1977 put_page(pg_info->page);
1978 }
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001979
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07001980 if (((oct->chip_id == OCTEON_CN66XX) ||
1981 (oct->chip_id == OCTEON_CN68XX)) &&
1982 ptp_enable) {
1983 if (rh->r_dh.has_hwtstamp) {
1984 /* timestamp is included from the hardware at
1985 * the beginning of the packet.
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001986 */
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07001987 if (ifstate_check
1988 (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
1989 /* Nanoseconds are in the first 64-bits
1990 * of the packet.
1991 */
1992 memcpy(&ns, (skb->data), sizeof(ns));
1993 shhwtstamps = skb_hwtstamps(skb);
1994 shhwtstamps->hwtstamp =
1995 ns_to_ktime(ns +
1996 lio->ptp_adjust);
1997 }
1998 skb_pull(skb, sizeof(ns));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001999 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002000 }
2001
2002 skb->protocol = eth_type_trans(skb, skb->dev);
2003
2004 if ((netdev->features & NETIF_F_RXCSUM) &&
2005 (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED))
2006 /* checksum has already been verified */
2007 skb->ip_summed = CHECKSUM_UNNECESSARY;
2008 else
2009 skb->ip_summed = CHECKSUM_NONE;
2010
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07002011 /* inbound VLAN tag */
2012 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2013 (rh->r_dh.vlan != 0)) {
2014 u16 vid = rh->r_dh.vlan;
2015 u16 priority = rh->r_dh.priority;
2016
2017 vtag = priority << 13 | vid;
2018 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
2019 }
2020
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002021 packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
2022
2023 if (packet_was_received) {
2024 droq->stats.rx_bytes_received += len;
2025 droq->stats.rx_pkts_received++;
2026 netdev->last_rx = jiffies;
2027 } else {
2028 droq->stats.rx_dropped++;
2029 netif_info(lio, rx_err, lio->netdev,
2030 "droq:%d error rx_dropped:%llu\n",
2031 droq->q_no, droq->stats.rx_dropped);
2032 }
2033
2034 } else {
2035 recv_buffer_free(skb);
2036 }
2037}
2038
2039/**
2040 * \brief wrapper for calling napi_schedule
2041 * @param param parameters to pass to napi_schedule
2042 *
2043 * Used when scheduling on different CPUs
2044 */
2045static void napi_schedule_wrapper(void *param)
2046{
2047 struct napi_struct *napi = param;
2048
2049 napi_schedule(napi);
2050}
2051
2052/**
2053 * \brief callback when receive interrupt occurs and we are in NAPI mode
2054 * @param arg pointer to octeon output queue
2055 */
2056static void liquidio_napi_drv_callback(void *arg)
2057{
2058 struct octeon_droq *droq = arg;
2059 int this_cpu = smp_processor_id();
2060
2061 if (droq->cpu_id == this_cpu) {
2062 napi_schedule(&droq->napi);
2063 } else {
2064 struct call_single_data *csd = &droq->csd;
2065
2066 csd->func = napi_schedule_wrapper;
2067 csd->info = &droq->napi;
2068 csd->flags = 0;
2069
2070 smp_call_function_single_async(droq->cpu_id, csd);
2071 }
2072}
2073
2074/**
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002075 * \brief Entry point for NAPI polling
2076 * @param napi NAPI structure
2077 * @param budget maximum number of items to process
2078 */
2079static int liquidio_napi_poll(struct napi_struct *napi, int budget)
2080{
2081 struct octeon_droq *droq;
2082 int work_done;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002083 int tx_done = 0, iq_no;
2084 struct octeon_instr_queue *iq;
2085 struct octeon_device *oct;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002086
2087 droq = container_of(napi, struct octeon_droq, napi);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002088 oct = droq->oct_dev;
2089 iq_no = droq->q_no;
2090 /* Handle Droq descriptors */
2091 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
2092 POLL_EVENT_PROCESS_PKTS,
2093 budget);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002094
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002095 /* Flush the instruction queue */
2096 iq = oct->instr_queue[iq_no];
2097 if (iq) {
2098 /* Process iq buffers with in the budget limits */
2099 tx_done = octeon_flush_iq(oct, iq, 1, budget);
2100 /* Update iq read-index rather than waiting for next interrupt.
2101 * Return back if tx_done is false.
2102 */
2103 update_txq_status(oct, iq_no);
2104 /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
2105 } else {
2106 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
2107 __func__, iq_no);
2108 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002109
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002110 if ((work_done < budget) && (tx_done)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002111 napi_complete(napi);
2112 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
2113 POLL_EVENT_ENABLE_INTR, 0);
2114 return 0;
2115 }
2116
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002117 return (!tx_done) ? (budget) : (work_done);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002118}
2119
2120/**
2121 * \brief Setup input and output queues
2122 * @param octeon_dev octeon device
2123 * @param net_device Net device
2124 *
2125 * Note: Queues are with respect to the octeon device. Thus
2126 * an input queue is for egress packets, and output queues
2127 * are for ingress packets.
2128 */
2129static inline int setup_io_queues(struct octeon_device *octeon_dev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002130 int ifidx)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002131{
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002132 struct octeon_droq_ops droq_ops;
2133 struct net_device *netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002134 static int cpu_id;
2135 static int cpu_id_modulus;
2136 struct octeon_droq *droq;
2137 struct napi_struct *napi;
2138 int q, q_no, retval = 0;
2139 struct lio *lio;
2140 int num_tx_descs;
2141
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002142 netdev = octeon_dev->props[ifidx].netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002143
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002144 lio = GET_LIO(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002145
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002146 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
2147
2148 droq_ops.fptr = liquidio_push_packet;
2149 droq_ops.farg = (void *)netdev;
2150
2151 droq_ops.poll_mode = 1;
2152 droq_ops.napi_fn = liquidio_napi_drv_callback;
2153 cpu_id = 0;
2154 cpu_id_modulus = num_present_cpus();
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002155
2156 /* set up DROQs. */
2157 for (q = 0; q < lio->linfo.num_rxpciq; q++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002158 q_no = lio->linfo.rxpciq[q].s.q_no;
2159 dev_dbg(&octeon_dev->pci_dev->dev,
2160 "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2161 q, q_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002162 retval = octeon_setup_droq(octeon_dev, q_no,
2163 CFG_GET_NUM_RX_DESCS_NIC_IF
2164 (octeon_get_conf(octeon_dev),
2165 lio->ifidx),
2166 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
2167 (octeon_get_conf(octeon_dev),
2168 lio->ifidx), NULL);
2169 if (retval) {
2170 dev_err(&octeon_dev->pci_dev->dev,
2171 " %s : Runtime DROQ(RxQ) creation failed.\n",
2172 __func__);
2173 return 1;
2174 }
2175
2176 droq = octeon_dev->droq[q_no];
2177 napi = &droq->napi;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002178 dev_dbg(&octeon_dev->pci_dev->dev,
2179 "netif_napi_add netdev:%llx oct:%llx\n",
2180 (u64)netdev,
2181 (u64)octeon_dev);
2182 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002183
2184 /* designate a CPU for this droq */
2185 droq->cpu_id = cpu_id;
2186 cpu_id++;
2187 if (cpu_id >= cpu_id_modulus)
2188 cpu_id = 0;
2189
2190 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
2191 }
2192
2193 /* set up IQs. */
2194 for (q = 0; q < lio->linfo.num_txpciq; q++) {
2195 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
2196 (octeon_dev),
2197 lio->ifidx);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002198 retval = octeon_setup_iq(octeon_dev, ifidx, q,
2199 lio->linfo.txpciq[q], num_tx_descs,
2200 netdev_get_tx_queue(netdev, q));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002201 if (retval) {
2202 dev_err(&octeon_dev->pci_dev->dev,
2203 " %s : Runtime IQ(TxQ) creation failed.\n",
2204 __func__);
2205 return 1;
2206 }
2207 }
2208
2209 return 0;
2210}
2211
2212/**
2213 * \brief Poll routine for checking transmit queue status
2214 * @param work work_struct data structure
2215 */
2216static void octnet_poll_check_txq_status(struct work_struct *work)
2217{
2218 struct cavium_wk *wk = (struct cavium_wk *)work;
2219 struct lio *lio = (struct lio *)wk->ctxptr;
2220
2221 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
2222 return;
2223
2224 check_txq_status(lio);
2225 queue_delayed_work(lio->txq_status_wq.wq,
2226 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2227}
2228
2229/**
2230 * \brief Sets up the txq poll check
2231 * @param netdev network device
2232 */
2233static inline void setup_tx_poll_fn(struct net_device *netdev)
2234{
2235 struct lio *lio = GET_LIO(netdev);
2236 struct octeon_device *oct = lio->oct_dev;
2237
Bhaktipriya Shridhar292b9da2016-06-08 01:47:59 +05302238 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2239 WQ_MEM_RECLAIM, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002240 if (!lio->txq_status_wq.wq) {
2241 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
2242 return;
2243 }
2244 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2245 octnet_poll_check_txq_status);
2246 lio->txq_status_wq.wk.ctxptr = lio;
2247 queue_delayed_work(lio->txq_status_wq.wq,
2248 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2249}
2250
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002251static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2252{
2253 struct lio *lio = GET_LIO(netdev);
2254
2255 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2256 destroy_workqueue(lio->txq_status_wq.wq);
2257}
2258
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002259/**
2260 * \brief Net device open for LiquidIO
2261 * @param netdev network device
2262 */
2263static int liquidio_open(struct net_device *netdev)
2264{
2265 struct lio *lio = GET_LIO(netdev);
2266 struct octeon_device *oct = lio->oct_dev;
2267 struct napi_struct *napi, *n;
2268
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002269 if (oct->props[lio->ifidx].napi_enabled == 0) {
2270 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2271 napi_enable(napi);
2272
2273 oct->props[lio->ifidx].napi_enabled = 1;
2274 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002275
2276 oct_ptp_open(netdev);
2277
2278 ifstate_set(lio, LIO_IFSTATE_RUNNING);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002279
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002280 setup_tx_poll_fn(netdev);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002281
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002282 start_txq(netdev);
2283
2284 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002285
2286 /* tell Octeon to start forwarding packets to host */
2287 send_rx_ctrl_cmd(lio, 1);
2288
2289 /* Ready for link status updates */
2290 lio->intf_open = 1;
2291
2292 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2293 netdev->name);
2294
2295 return 0;
2296}
2297
2298/**
2299 * \brief Net device stop for LiquidIO
2300 * @param netdev network device
2301 */
2302static int liquidio_stop(struct net_device *netdev)
2303{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002304 struct lio *lio = GET_LIO(netdev);
2305 struct octeon_device *oct = lio->oct_dev;
2306
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002307 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2308
2309 netif_tx_disable(netdev);
2310
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002311 /* Inform that netif carrier is down */
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002312 netif_carrier_off(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002313 lio->intf_open = 0;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002314 lio->linfo.link.s.link_up = 0;
2315 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002316
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002317 /* Pause for a moment and wait for Octeon to flush out (to the wire) any
2318 * egress packets that are in-flight.
2319 */
2320 set_current_state(TASK_INTERRUPTIBLE);
2321 schedule_timeout(msecs_to_jiffies(100));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002322
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002323 /* Now it should be safe to tell Octeon that nic interface is down. */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002324 send_rx_ctrl_cmd(lio, 0);
2325
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002326 cleanup_tx_poll_fn(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002327
2328 if (lio->ptp_clock) {
2329 ptp_clock_unregister(lio->ptp_clock);
2330 lio->ptp_clock = NULL;
2331 }
2332
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002333 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
2334 module_put(THIS_MODULE);
2335
2336 return 0;
2337}
2338
2339void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
2340{
2341 struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
2342 struct net_device *netdev = (struct net_device *)nctrl->netpndev;
2343 struct lio *lio = GET_LIO(netdev);
2344 struct octeon_device *oct = lio->oct_dev;
2345
2346 switch (nctrl->ncmd.s.cmd) {
2347 case OCTNET_CMD_CHANGE_DEVFLAGS:
2348 case OCTNET_CMD_SET_MULTI_LIST:
2349 break;
2350
2351 case OCTNET_CMD_CHANGE_MACADDR:
2352 /* If command is successful, change the MACADDR. */
2353 netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n",
2354 CVM_CAST64(nctrl->udd[0]));
2355 dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n",
2356 netdev->name, CVM_CAST64(nctrl->udd[0]));
2357 memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN);
2358 break;
2359
2360 case OCTNET_CMD_CHANGE_MTU:
2361 /* If command is successful, change the MTU. */
2362 netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
2363 netdev->mtu, nctrl->ncmd.s.param2);
2364 dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
2365 netdev->name, netdev->mtu,
2366 nctrl->ncmd.s.param2);
2367 netdev->mtu = nctrl->ncmd.s.param2;
2368 break;
2369
2370 case OCTNET_CMD_GPIO_ACCESS:
2371 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
2372
2373 break;
2374
2375 case OCTNET_CMD_LRO_ENABLE:
2376 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
2377 break;
2378
2379 case OCTNET_CMD_LRO_DISABLE:
2380 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
2381 netdev->name);
2382 break;
2383
2384 case OCTNET_CMD_VERBOSE_ENABLE:
2385 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
2386 break;
2387
2388 case OCTNET_CMD_VERBOSE_DISABLE:
2389 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
2390 netdev->name);
2391 break;
2392
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07002393 case OCTNET_CMD_ENABLE_VLAN_FILTER:
2394 dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
2395 netdev->name);
2396 break;
2397
2398 case OCTNET_CMD_ADD_VLAN_FILTER:
2399 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
2400 netdev->name, nctrl->ncmd.s.param1);
2401 break;
2402
2403 case OCTNET_CMD_DEL_VLAN_FILTER:
2404 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
2405 netdev->name, nctrl->ncmd.s.param1);
2406 break;
2407
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002408 case OCTNET_CMD_SET_SETTINGS:
2409 dev_info(&oct->pci_dev->dev, "%s settings changed\n",
2410 netdev->name);
2411
2412 break;
2413
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07002414 case OCTNET_CMD_SET_FLOW_CTL:
2415 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
2416 break;
2417
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002418 default:
2419 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
2420 nctrl->ncmd.s.cmd);
2421 }
2422}
2423
2424/**
2425 * \brief Converts a mask based on net device flags
2426 * @param netdev network device
2427 *
2428 * This routine generates a octnet_ifflags mask from the net device flags
2429 * received from the OS.
2430 */
2431static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2432{
2433 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2434
2435 if (netdev->flags & IFF_PROMISC)
2436 f |= OCTNET_IFFLAG_PROMISC;
2437
2438 if (netdev->flags & IFF_ALLMULTI)
2439 f |= OCTNET_IFFLAG_ALLMULTI;
2440
2441 if (netdev->flags & IFF_MULTICAST) {
2442 f |= OCTNET_IFFLAG_MULTICAST;
2443
2444 /* Accept all multicast addresses if there are more than we
2445 * can handle
2446 */
2447 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2448 f |= OCTNET_IFFLAG_ALLMULTI;
2449 }
2450
2451 if (netdev->flags & IFF_BROADCAST)
2452 f |= OCTNET_IFFLAG_BROADCAST;
2453
2454 return f;
2455}
2456
2457/**
2458 * \brief Net device set_multicast_list
2459 * @param netdev network device
2460 */
2461static void liquidio_set_mcast_list(struct net_device *netdev)
2462{
2463 struct lio *lio = GET_LIO(netdev);
2464 struct octeon_device *oct = lio->oct_dev;
2465 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002466 struct netdev_hw_addr *ha;
2467 u64 *mc;
2468 int ret, i;
2469 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2470
2471 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2472
2473 /* Create a ctrl pkt command to be sent to core app. */
2474 nctrl.ncmd.u64 = 0;
2475 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002476 nctrl.ncmd.s.param1 = get_new_flags(netdev);
2477 nctrl.ncmd.s.param2 = mc_count;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002478 nctrl.ncmd.s.more = mc_count;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002479 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002480 nctrl.netpndev = (u64)netdev;
2481 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2482
2483 /* copy all the addresses into the udd */
2484 i = 0;
2485 mc = &nctrl.udd[0];
2486 netdev_for_each_mc_addr(ha, netdev) {
2487 *mc = 0;
2488 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2489 /* no need to swap bytes */
2490
2491 if (++mc > &nctrl.udd[mc_count])
2492 break;
2493 }
2494
2495 /* Apparently, any activity in this call from the kernel has to
2496 * be atomic. So we won't wait for response.
2497 */
2498 nctrl.wait_time = 0;
2499
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002500 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002501 if (ret < 0) {
2502 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2503 ret);
2504 }
2505}
2506
2507/**
2508 * \brief Net device set_mac_address
2509 * @param netdev network device
2510 */
2511static int liquidio_set_mac(struct net_device *netdev, void *p)
2512{
2513 int ret = 0;
2514 struct lio *lio = GET_LIO(netdev);
2515 struct octeon_device *oct = lio->oct_dev;
2516 struct sockaddr *addr = (struct sockaddr *)p;
2517 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002518
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002519 if (!is_valid_ether_addr(addr->sa_data))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002520 return -EADDRNOTAVAIL;
2521
2522 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2523
2524 nctrl.ncmd.u64 = 0;
2525 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002526 nctrl.ncmd.s.param1 = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002527 nctrl.ncmd.s.more = 1;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002528 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002529 nctrl.netpndev = (u64)netdev;
2530 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2531 nctrl.wait_time = 100;
2532
2533 nctrl.udd[0] = 0;
2534 /* The MAC Address is presented in network byte order. */
2535 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2536
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002537 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002538 if (ret < 0) {
2539 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2540 return -ENOMEM;
2541 }
2542 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2543 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2544
2545 return 0;
2546}
2547
2548/**
2549 * \brief Net device get_stats
2550 * @param netdev network device
2551 */
2552static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2553{
2554 struct lio *lio = GET_LIO(netdev);
2555 struct net_device_stats *stats = &netdev->stats;
2556 struct octeon_device *oct;
2557 u64 pkts = 0, drop = 0, bytes = 0;
2558 struct oct_droq_stats *oq_stats;
2559 struct oct_iq_stats *iq_stats;
2560 int i, iq_no, oq_no;
2561
2562 oct = lio->oct_dev;
2563
2564 for (i = 0; i < lio->linfo.num_txpciq; i++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002565 iq_no = lio->linfo.txpciq[i].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002566 iq_stats = &oct->instr_queue[iq_no]->stats;
2567 pkts += iq_stats->tx_done;
2568 drop += iq_stats->tx_dropped;
2569 bytes += iq_stats->tx_tot_bytes;
2570 }
2571
2572 stats->tx_packets = pkts;
2573 stats->tx_bytes = bytes;
2574 stats->tx_dropped = drop;
2575
2576 pkts = 0;
2577 drop = 0;
2578 bytes = 0;
2579
2580 for (i = 0; i < lio->linfo.num_rxpciq; i++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002581 oq_no = lio->linfo.rxpciq[i].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002582 oq_stats = &oct->droq[oq_no]->stats;
2583 pkts += oq_stats->rx_pkts_received;
2584 drop += (oq_stats->rx_dropped +
2585 oq_stats->dropped_nodispatch +
2586 oq_stats->dropped_toomany +
2587 oq_stats->dropped_nomem);
2588 bytes += oq_stats->rx_bytes_received;
2589 }
2590
2591 stats->rx_bytes = bytes;
2592 stats->rx_packets = pkts;
2593 stats->rx_dropped = drop;
2594
2595 return stats;
2596}
2597
2598/**
2599 * \brief Net device change_mtu
2600 * @param netdev network device
2601 */
2602static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2603{
2604 struct lio *lio = GET_LIO(netdev);
2605 struct octeon_device *oct = lio->oct_dev;
2606 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002607 int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE;
2608 int ret = 0;
2609
2610 /* Limit the MTU to make sure the ethernet packets are between 64 bytes
2611 * and 65535 bytes
2612 */
2613 if ((max_frm_size < OCTNET_MIN_FRM_SIZE) ||
2614 (max_frm_size > OCTNET_MAX_FRM_SIZE)) {
2615 dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
2616 dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
2617 (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE),
2618 (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE));
2619 return -EINVAL;
2620 }
2621
2622 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2623
2624 nctrl.ncmd.u64 = 0;
2625 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002626 nctrl.ncmd.s.param1 = new_mtu;
2627 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002628 nctrl.wait_time = 100;
2629 nctrl.netpndev = (u64)netdev;
2630 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2631
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002632 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002633 if (ret < 0) {
2634 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
2635 return -1;
2636 }
2637
2638 lio->mtu = new_mtu;
2639
2640 return 0;
2641}
2642
2643/**
2644 * \brief Handler for SIOCSHWTSTAMP ioctl
2645 * @param netdev network device
2646 * @param ifr interface request
2647 * @param cmd command
2648 */
2649static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2650{
2651 struct hwtstamp_config conf;
2652 struct lio *lio = GET_LIO(netdev);
2653
2654 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2655 return -EFAULT;
2656
2657 if (conf.flags)
2658 return -EINVAL;
2659
2660 switch (conf.tx_type) {
2661 case HWTSTAMP_TX_ON:
2662 case HWTSTAMP_TX_OFF:
2663 break;
2664 default:
2665 return -ERANGE;
2666 }
2667
2668 switch (conf.rx_filter) {
2669 case HWTSTAMP_FILTER_NONE:
2670 break;
2671 case HWTSTAMP_FILTER_ALL:
2672 case HWTSTAMP_FILTER_SOME:
2673 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2674 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2675 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2676 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2677 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2678 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2679 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2680 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2681 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2682 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2683 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2684 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2685 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2686 break;
2687 default:
2688 return -ERANGE;
2689 }
2690
2691 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2692 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2693
2694 else
2695 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2696
2697 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2698}
2699
2700/**
2701 * \brief ioctl handler
2702 * @param netdev network device
2703 * @param ifr interface request
2704 * @param cmd command
2705 */
2706static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2707{
2708 switch (cmd) {
2709 case SIOCSHWTSTAMP:
2710 return hwtstamp_ioctl(netdev, ifr, cmd);
2711 default:
2712 return -EOPNOTSUPP;
2713 }
2714}
2715
2716/**
2717 * \brief handle a Tx timestamp response
2718 * @param status response status
2719 * @param buf pointer to skb
2720 */
2721static void handle_timestamp(struct octeon_device *oct,
2722 u32 status,
2723 void *buf)
2724{
2725 struct octnet_buf_free_info *finfo;
2726 struct octeon_soft_command *sc;
2727 struct oct_timestamp_resp *resp;
2728 struct lio *lio;
2729 struct sk_buff *skb = (struct sk_buff *)buf;
2730
2731 finfo = (struct octnet_buf_free_info *)skb->cb;
2732 lio = finfo->lio;
2733 sc = finfo->sc;
2734 oct = lio->oct_dev;
2735 resp = (struct oct_timestamp_resp *)sc->virtrptr;
2736
2737 if (status != OCTEON_REQUEST_DONE) {
2738 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2739 CVM_CAST64(status));
2740 resp->timestamp = 0;
2741 }
2742
2743 octeon_swap_8B_data(&resp->timestamp, 1);
2744
Colin Ian King19a6d152016-02-05 16:30:39 +00002745 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002746 struct skb_shared_hwtstamps ts;
2747 u64 ns = resp->timestamp;
2748
2749 netif_info(lio, tx_done, lio->netdev,
2750 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2751 skb, (unsigned long long)ns);
2752 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2753 skb_tstamp_tx(skb, &ts);
2754 }
2755
2756 octeon_free_soft_command(oct, sc);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07002757 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002758}
2759
2760/* \brief Send a data packet that will be timestamped
2761 * @param oct octeon device
2762 * @param ndata pointer to network data
2763 * @param finfo pointer to private network data
2764 */
2765static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2766 struct octnic_data_pkt *ndata,
2767 struct octnet_buf_free_info *finfo,
2768 int xmit_more)
2769{
2770 int retval;
2771 struct octeon_soft_command *sc;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002772 struct lio *lio;
2773 int ring_doorbell;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002774 u32 len;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002775
2776 lio = finfo->lio;
2777
2778 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2779 sizeof(struct oct_timestamp_resp));
2780 finfo->sc = sc;
2781
2782 if (!sc) {
2783 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2784 return IQ_SEND_FAILED;
2785 }
2786
2787 if (ndata->reqtype == REQTYPE_NORESP_NET)
2788 ndata->reqtype = REQTYPE_RESP_NET;
2789 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2790 ndata->reqtype = REQTYPE_RESP_NET_SG;
2791
2792 sc->callback = handle_timestamp;
2793 sc->callback_arg = finfo->skb;
2794 sc->iq_no = ndata->q_no;
2795
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002796 len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002797
2798 ring_doorbell = !xmit_more;
2799 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002800 sc, len, ndata->reqtype);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002801
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07002802 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002803 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2804 retval);
2805 octeon_free_soft_command(oct, sc);
2806 } else {
2807 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2808 }
2809
2810 return retval;
2811}
2812
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002813/** \brief Transmit networks packets to the Octeon interface
2814 * @param skbuff skbuff struct to be passed to network layer.
2815 * @param netdev pointer to network device
2816 * @returns whether the packet was transmitted to the device okay or not
2817 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2818 */
2819static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2820{
2821 struct lio *lio;
2822 struct octnet_buf_free_info *finfo;
2823 union octnic_cmd_setup cmdsetup;
2824 struct octnic_data_pkt ndata;
2825 struct octeon_device *oct;
2826 struct oct_iq_stats *stats;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002827 struct octeon_instr_irh *irh;
2828 union tx_info *tx_info;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002829 int status = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002830 int q_idx = 0, iq_no = 0;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07002831 int xmit_more, j;
2832 u64 dptr = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002833 u32 tag = 0;
2834
2835 lio = GET_LIO(netdev);
2836 oct = lio->oct_dev;
2837
2838 if (netif_is_multiqueue(netdev)) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002839 q_idx = skb->queue_mapping;
2840 q_idx = (q_idx % (lio->linfo.num_txpciq));
2841 tag = q_idx;
2842 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002843 } else {
2844 iq_no = lio->txq;
2845 }
2846
2847 stats = &oct->instr_queue[iq_no]->stats;
2848
2849 /* Check for all conditions in which the current packet cannot be
2850 * transmitted.
2851 */
2852 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002853 (!lio->linfo.link.s.link_up) ||
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002854 (skb->len <= 0)) {
2855 netif_info(lio, tx_err, lio->netdev,
2856 "Transmit failed link_status : %d\n",
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002857 lio->linfo.link.s.link_up);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002858 goto lio_xmit_failed;
2859 }
2860
2861 /* Use space in skb->cb to store info used to unmap and
2862 * free the buffers.
2863 */
2864 finfo = (struct octnet_buf_free_info *)skb->cb;
2865 finfo->lio = lio;
2866 finfo->skb = skb;
2867 finfo->sc = NULL;
2868
2869 /* Prepare the attributes for the data to be passed to OSI. */
2870 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2871
2872 ndata.buf = (void *)finfo;
2873
2874 ndata.q_no = iq_no;
2875
2876 if (netif_is_multiqueue(netdev)) {
2877 if (octnet_iq_is_full(oct, ndata.q_no)) {
2878 /* defer sending if queue is full */
2879 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2880 ndata.q_no);
2881 stats->tx_iq_busy++;
2882 return NETDEV_TX_BUSY;
2883 }
2884 } else {
2885 if (octnet_iq_is_full(oct, lio->txq)) {
2886 /* defer sending if queue is full */
2887 stats->tx_iq_busy++;
2888 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2889 ndata.q_no);
2890 return NETDEV_TX_BUSY;
2891 }
2892 }
2893 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2894 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no );
2895 */
2896
2897 ndata.datasize = skb->len;
2898
2899 cmdsetup.u64 = 0;
Raghu Vatsavayi7275ebf2016-06-14 16:54:49 -07002900 cmdsetup.s.iq_no = iq_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002901
Raghu Vatsavayi7275ebf2016-06-14 16:54:49 -07002902 if (skb->ip_summed == CHECKSUM_PARTIAL)
2903 cmdsetup.s.transport_csum = 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002904
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002905 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2906 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2907 cmdsetup.s.timestamp = 1;
2908 }
2909
2910 if (skb_shinfo(skb)->nr_frags == 0) {
2911 cmdsetup.s.u.datasize = skb->len;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002912 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002913 /* Offload checksum calculation for TCP/UDP packets */
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002914 dptr = dma_map_single(&oct->pci_dev->dev,
2915 skb->data,
2916 skb->len,
2917 DMA_TO_DEVICE);
2918 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002919 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2920 __func__);
2921 return NETDEV_TX_BUSY;
2922 }
2923
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002924 ndata.cmd.cmd2.dptr = dptr;
2925 finfo->dptr = dptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002926 ndata.reqtype = REQTYPE_NORESP_NET;
2927
2928 } else {
2929 int i, frags;
2930 struct skb_frag_struct *frag;
2931 struct octnic_gather *g;
2932
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07002933 spin_lock(&lio->glist_lock[q_idx]);
2934 g = (struct octnic_gather *)
2935 list_delete_head(&lio->glist[q_idx]);
2936 spin_unlock(&lio->glist_lock[q_idx]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002937
2938 if (!g) {
2939 netif_info(lio, tx_err, lio->netdev,
2940 "Transmit scatter gather: glist null!\n");
2941 goto lio_xmit_failed;
2942 }
2943
2944 cmdsetup.s.gather = 1;
2945 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002946 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002947
2948 memset(g->sg, 0, g->sg_size);
2949
2950 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2951 skb->data,
2952 (skb->len - skb->data_len),
2953 DMA_TO_DEVICE);
2954 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2955 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2956 __func__);
2957 return NETDEV_TX_BUSY;
2958 }
2959 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2960
2961 frags = skb_shinfo(skb)->nr_frags;
2962 i = 1;
2963 while (frags--) {
2964 frag = &skb_shinfo(skb)->frags[i - 1];
2965
2966 g->sg[(i >> 2)].ptr[(i & 3)] =
2967 dma_map_page(&oct->pci_dev->dev,
2968 frag->page.p,
2969 frag->page_offset,
2970 frag->size,
2971 DMA_TO_DEVICE);
2972
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07002973 if (dma_mapping_error(&oct->pci_dev->dev,
2974 g->sg[i >> 2].ptr[i & 3])) {
2975 dma_unmap_single(&oct->pci_dev->dev,
2976 g->sg[0].ptr[0],
2977 skb->len - skb->data_len,
2978 DMA_TO_DEVICE);
2979 for (j = 1; j < i; j++) {
2980 frag = &skb_shinfo(skb)->frags[j - 1];
2981 dma_unmap_page(&oct->pci_dev->dev,
2982 g->sg[j >> 2].ptr[j & 3],
2983 frag->size,
2984 DMA_TO_DEVICE);
2985 }
2986 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2987 __func__);
2988 return NETDEV_TX_BUSY;
2989 }
2990
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002991 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2992 i++;
2993 }
2994
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07002995 dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
2996 g->sg_size, DMA_TO_DEVICE);
2997 dptr = g->sg_dma_ptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002998
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002999 ndata.cmd.cmd2.dptr = dptr;
3000 finfo->dptr = dptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003001 finfo->g = g;
3002
3003 ndata.reqtype = REQTYPE_NORESP_NET_SG;
3004 }
3005
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003006 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
3007 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003008
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003009 if (skb_shinfo(skb)->gso_size) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003010 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
3011 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003012 stats->tx_gso++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003013 }
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003014
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07003015 /* HW insert VLAN tag */
3016 if (skb_vlan_tag_present(skb)) {
3017 irh->priority = skb_vlan_tag_get(skb) >> 13;
3018 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
3019 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003020
3021 xmit_more = skb->xmit_more;
3022
3023 if (unlikely(cmdsetup.s.timestamp))
3024 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
3025 else
3026 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
3027 if (status == IQ_SEND_FAILED)
3028 goto lio_xmit_failed;
3029
3030 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
3031
3032 if (status == IQ_SEND_STOP)
3033 stop_q(lio->netdev, q_idx);
3034
Florian Westphal860e9532016-05-03 16:33:13 +02003035 netif_trans_update(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003036
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003037 if (skb_shinfo(skb)->gso_size)
3038 stats->tx_done += skb_shinfo(skb)->gso_segs;
3039 else
3040 stats->tx_done++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003041 stats->tx_tot_bytes += skb->len;
3042
3043 return NETDEV_TX_OK;
3044
3045lio_xmit_failed:
3046 stats->tx_dropped++;
3047 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
3048 iq_no, stats->tx_dropped);
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003049 if (dptr)
3050 dma_unmap_single(&oct->pci_dev->dev, dptr,
3051 ndata.datasize, DMA_TO_DEVICE);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07003052 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003053 return NETDEV_TX_OK;
3054}
3055
3056/** \brief Network device Tx timeout
3057 * @param netdev pointer to network device
3058 */
3059static void liquidio_tx_timeout(struct net_device *netdev)
3060{
3061 struct lio *lio;
3062
3063 lio = GET_LIO(netdev);
3064
3065 netif_info(lio, tx_err, lio->netdev,
3066 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
3067 netdev->stats.tx_dropped);
Florian Westphal860e9532016-05-03 16:33:13 +02003068 netif_trans_update(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003069 txqs_wake(netdev);
3070}
3071
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003072static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
3073 __be16 proto __attribute__((unused)),
3074 u16 vid)
3075{
3076 struct lio *lio = GET_LIO(netdev);
3077 struct octeon_device *oct = lio->oct_dev;
3078 struct octnic_ctrl_pkt nctrl;
3079 int ret = 0;
3080
3081 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3082
3083 nctrl.ncmd.u64 = 0;
3084 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3085 nctrl.ncmd.s.param1 = vid;
3086 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3087 nctrl.wait_time = 100;
3088 nctrl.netpndev = (u64)netdev;
3089 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3090
3091 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3092 if (ret < 0) {
3093 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3094 ret);
3095 }
3096
3097 return ret;
3098}
3099
3100static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
3101 __be16 proto __attribute__((unused)),
3102 u16 vid)
3103{
3104 struct lio *lio = GET_LIO(netdev);
3105 struct octeon_device *oct = lio->oct_dev;
3106 struct octnic_ctrl_pkt nctrl;
3107 int ret = 0;
3108
3109 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3110
3111 nctrl.ncmd.u64 = 0;
3112 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3113 nctrl.ncmd.s.param1 = vid;
3114 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3115 nctrl.wait_time = 100;
3116 nctrl.netpndev = (u64)netdev;
3117 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3118
3119 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3120 if (ret < 0) {
3121 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3122 ret);
3123 }
3124 return ret;
3125}
3126
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003127int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003128{
3129 struct lio *lio = GET_LIO(netdev);
3130 struct octeon_device *oct = lio->oct_dev;
3131 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003132 int ret = 0;
3133
3134 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3135
3136 nctrl.ncmd.u64 = 0;
3137 nctrl.ncmd.s.cmd = cmd;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003138 nctrl.ncmd.s.param1 = param1;
3139 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003140 nctrl.wait_time = 100;
3141 nctrl.netpndev = (u64)netdev;
3142 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3143
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003144 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003145 if (ret < 0) {
3146 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
3147 ret);
3148 }
3149 return ret;
3150}
3151
3152/** \brief Net device fix features
3153 * @param netdev pointer to network device
3154 * @param request features requested
3155 * @returns updated features list
3156 */
3157static netdev_features_t liquidio_fix_features(struct net_device *netdev,
3158 netdev_features_t request)
3159{
3160 struct lio *lio = netdev_priv(netdev);
3161
3162 if ((request & NETIF_F_RXCSUM) &&
3163 !(lio->dev_capability & NETIF_F_RXCSUM))
3164 request &= ~NETIF_F_RXCSUM;
3165
3166 if ((request & NETIF_F_HW_CSUM) &&
3167 !(lio->dev_capability & NETIF_F_HW_CSUM))
3168 request &= ~NETIF_F_HW_CSUM;
3169
3170 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
3171 request &= ~NETIF_F_TSO;
3172
3173 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
3174 request &= ~NETIF_F_TSO6;
3175
3176 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
3177 request &= ~NETIF_F_LRO;
3178
3179 /*Disable LRO if RXCSUM is off */
3180 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
3181 (lio->dev_capability & NETIF_F_LRO))
3182 request &= ~NETIF_F_LRO;
3183
3184 return request;
3185}
3186
3187/** \brief Net device set features
3188 * @param netdev pointer to network device
3189 * @param features features to enable/disable
3190 */
3191static int liquidio_set_features(struct net_device *netdev,
3192 netdev_features_t features)
3193{
3194 struct lio *lio = netdev_priv(netdev);
3195
3196 if (!((netdev->features ^ features) & NETIF_F_LRO))
3197 return 0;
3198
3199 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003200 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3201 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003202 else if (!(features & NETIF_F_LRO) &&
3203 (lio->dev_capability & NETIF_F_LRO))
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003204 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
3205 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003206
3207 return 0;
3208}
3209
3210static struct net_device_ops lionetdevops = {
3211 .ndo_open = liquidio_open,
3212 .ndo_stop = liquidio_stop,
3213 .ndo_start_xmit = liquidio_xmit,
3214 .ndo_get_stats = liquidio_get_stats,
3215 .ndo_set_mac_address = liquidio_set_mac,
3216 .ndo_set_rx_mode = liquidio_set_mcast_list,
3217 .ndo_tx_timeout = liquidio_tx_timeout,
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003218
3219 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3220 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003221 .ndo_change_mtu = liquidio_change_mtu,
3222 .ndo_do_ioctl = liquidio_ioctl,
3223 .ndo_fix_features = liquidio_fix_features,
3224 .ndo_set_features = liquidio_set_features,
3225};
3226
3227/** \brief Entry point for the liquidio module
3228 */
3229static int __init liquidio_init(void)
3230{
3231 int i;
3232 struct handshake *hs;
3233
3234 init_completion(&first_stage);
3235
3236 octeon_init_device_list(conf_type);
3237
3238 if (liquidio_init_pci())
3239 return -EINVAL;
3240
3241 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3242
3243 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3244 hs = &handshake[i];
3245 if (hs->pci_dev) {
3246 wait_for_completion(&hs->init);
3247 if (!hs->init_ok) {
3248 /* init handshake failed */
3249 dev_err(&hs->pci_dev->dev,
3250 "Failed to init device\n");
3251 liquidio_deinit_pci();
3252 return -EIO;
3253 }
3254 }
3255 }
3256
3257 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3258 hs = &handshake[i];
3259 if (hs->pci_dev) {
3260 wait_for_completion_timeout(&hs->started,
3261 msecs_to_jiffies(30000));
3262 if (!hs->started_ok) {
3263 /* starter handshake failed */
3264 dev_err(&hs->pci_dev->dev,
3265 "Firmware failed to start\n");
3266 liquidio_deinit_pci();
3267 return -EIO;
3268 }
3269 }
3270 }
3271
3272 return 0;
3273}
3274
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -07003275static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003276{
3277 struct octeon_device *oct = (struct octeon_device *)buf;
3278 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003279 int gmxport = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003280 union oct_link_status *ls;
3281 int i;
3282
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003283 if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003284 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3285 recv_pkt->buffer_size[0],
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003286 recv_pkt->rh.r_nic_info.gmxport);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003287 goto nic_info_err;
3288 }
3289
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003290 gmxport = recv_pkt->rh.r_nic_info.gmxport;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003291 ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
3292
3293 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003294 for (i = 0; i < oct->ifcount; i++) {
3295 if (oct->props[i].gmxport == gmxport) {
3296 update_link_status(oct->props[i].netdev, ls);
3297 break;
3298 }
3299 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003300
3301nic_info_err:
3302 for (i = 0; i < recv_pkt->buffer_count; i++)
3303 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3304 octeon_free_recv_info(recv_info);
3305 return 0;
3306}
3307
3308/**
3309 * \brief Setup network interfaces
3310 * @param octeon_dev octeon device
3311 *
3312 * Called during init time for each device. It assumes the NIC
3313 * is already up and running. The link information for each
3314 * interface is passed in link_info.
3315 */
3316static int setup_nic_devices(struct octeon_device *octeon_dev)
3317{
3318 struct lio *lio = NULL;
3319 struct net_device *netdev;
3320 u8 mac[6], i, j;
3321 struct octeon_soft_command *sc;
3322 struct liquidio_if_cfg_context *ctx;
3323 struct liquidio_if_cfg_resp *resp;
3324 struct octdev_props *props;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003325 int retval, num_iqueues, num_oqueues;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003326 int num_cpus = num_online_cpus();
3327 union oct_nic_if_cfg if_cfg;
3328 unsigned int base_queue;
3329 unsigned int gmx_port_id;
3330 u32 resp_size, ctx_size;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003331 u32 ifidx_or_pfnum;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003332
3333 /* This is to handle link status changes */
3334 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3335 OPCODE_NIC_INFO,
3336 lio_nic_info, octeon_dev);
3337
3338 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3339 * They are handled directly.
3340 */
3341 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3342 free_netbuf);
3343
3344 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3345 free_netsgbuf);
3346
3347 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3348 free_netsgbuf_with_resp);
3349
3350 for (i = 0; i < octeon_dev->ifcount; i++) {
3351 resp_size = sizeof(struct liquidio_if_cfg_resp);
3352 ctx_size = sizeof(struct liquidio_if_cfg_context);
3353 sc = (struct octeon_soft_command *)
3354 octeon_alloc_soft_command(octeon_dev, 0,
3355 resp_size, ctx_size);
3356 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3357 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
3358
3359 num_iqueues =
3360 CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
3361 num_oqueues =
3362 CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i);
3363 base_queue =
3364 CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
3365 gmx_port_id =
3366 CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003367 ifidx_or_pfnum = i;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003368 if (num_iqueues > num_cpus)
3369 num_iqueues = num_cpus;
3370 if (num_oqueues > num_cpus)
3371 num_oqueues = num_cpus;
3372 dev_dbg(&octeon_dev->pci_dev->dev,
3373 "requesting config for interface %d, iqs %d, oqs %d\n",
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003374 ifidx_or_pfnum, num_iqueues, num_oqueues);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003375 ACCESS_ONCE(ctx->cond) = 0;
3376 ctx->octeon_id = lio_get_device_id(octeon_dev);
3377 init_waitqueue_head(&ctx->wc);
3378
3379 if_cfg.u64 = 0;
3380 if_cfg.s.num_iqueues = num_iqueues;
3381 if_cfg.s.num_oqueues = num_oqueues;
3382 if_cfg.s.base_queue = base_queue;
3383 if_cfg.s.gmx_port_id = gmx_port_id;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003384
3385 sc->iq_no = 0;
3386
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003387 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003388 OPCODE_NIC_IF_CFG, 0,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003389 if_cfg.u64, 0);
3390
3391 sc->callback = if_cfg_callback;
3392 sc->callback_arg = sc;
3393 sc->wait_time = 1000;
3394
3395 retval = octeon_send_soft_command(octeon_dev, sc);
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07003396 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003397 dev_err(&octeon_dev->pci_dev->dev,
3398 "iq/oq config failed status: %x\n",
3399 retval);
3400 /* Soft instr is freed by driver in case of failure. */
3401 goto setup_nic_dev_fail;
3402 }
3403
3404 /* Sleep on a wait queue till the cond flag indicates that the
3405 * response arrived or timed-out.
3406 */
3407 sleep_cond(&ctx->wc, &ctx->cond);
3408 retval = resp->status;
3409 if (retval) {
3410 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3411 goto setup_nic_dev_fail;
3412 }
3413
3414 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3415 (sizeof(struct liquidio_if_cfg_info)) >> 3);
3416
3417 num_iqueues = hweight64(resp->cfg_info.iqmask);
3418 num_oqueues = hweight64(resp->cfg_info.oqmask);
3419
3420 if (!(num_iqueues) || !(num_oqueues)) {
3421 dev_err(&octeon_dev->pci_dev->dev,
3422 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3423 resp->cfg_info.iqmask,
3424 resp->cfg_info.oqmask);
3425 goto setup_nic_dev_fail;
3426 }
3427 dev_dbg(&octeon_dev->pci_dev->dev,
3428 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3429 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3430 num_iqueues, num_oqueues);
3431 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
3432
3433 if (!netdev) {
3434 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3435 goto setup_nic_dev_fail;
3436 }
3437
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003438 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003439
3440 if (num_iqueues > 1)
3441 lionetdevops.ndo_select_queue = select_q;
3442
3443 /* Associate the routines that will handle different
3444 * netdev tasks.
3445 */
3446 netdev->netdev_ops = &lionetdevops;
3447
3448 lio = GET_LIO(netdev);
3449
3450 memset(lio, 0, sizeof(struct lio));
3451
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003452 lio->ifidx = ifidx_or_pfnum;
3453
3454 props = &octeon_dev->props[i];
3455 props->gmxport = resp->cfg_info.linfo.gmxport;
3456 props->netdev = netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003457
3458 lio->linfo.num_rxpciq = num_oqueues;
3459 lio->linfo.num_txpciq = num_iqueues;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003460 for (j = 0; j < num_oqueues; j++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003461 lio->linfo.rxpciq[j].u64 =
3462 resp->cfg_info.linfo.rxpciq[j].u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003463 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003464 for (j = 0; j < num_iqueues; j++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003465 lio->linfo.txpciq[j].u64 =
3466 resp->cfg_info.linfo.txpciq[j].u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003467 }
3468 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3469 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3470 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3471
3472 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3473
3474 lio->dev_capability = NETIF_F_HIGHDMA
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003475 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
3476 | NETIF_F_SG | NETIF_F_RXCSUM
3477 | NETIF_F_GRO
3478 | NETIF_F_TSO | NETIF_F_TSO6
3479 | NETIF_F_LRO;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003480 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3481
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07003482 netdev->vlan_features = lio->dev_capability;
3483 /* Add any unchangeable hw features */
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003484 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3485 NETIF_F_HW_VLAN_CTAG_RX |
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07003486 NETIF_F_HW_VLAN_CTAG_TX;
3487
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003488 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3489
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003490 netdev->hw_features = lio->dev_capability;
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07003491 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3492 netdev->hw_features = netdev->hw_features &
3493 ~NETIF_F_HW_VLAN_CTAG_RX;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003494
3495 /* Point to the properties for octeon device to which this
3496 * interface belongs.
3497 */
3498 lio->oct_dev = octeon_dev;
3499 lio->octprops = props;
3500 lio->netdev = netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003501
3502 dev_dbg(&octeon_dev->pci_dev->dev,
3503 "if%d gmx: %d hw_addr: 0x%llx\n", i,
3504 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3505
3506 /* 64-bit swap required on LE machines */
3507 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3508 for (j = 0; j < 6; j++)
3509 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3510
3511 /* Copy MAC Address to OS network device structure */
3512
3513 ether_addr_copy(netdev->dev_addr, mac);
3514
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003515 /* By default all interfaces on a single Octeon uses the same
3516 * tx and rx queues
3517 */
3518 lio->txq = lio->linfo.txpciq[0].s.q_no;
3519 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003520 if (setup_io_queues(octeon_dev, i)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003521 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3522 goto setup_nic_dev_fail;
3523 }
3524
3525 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3526
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003527 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3528 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3529
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07003530 if (setup_glists(octeon_dev, lio, num_iqueues)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003531 dev_err(&octeon_dev->pci_dev->dev,
3532 "Gather list allocation failed\n");
3533 goto setup_nic_dev_fail;
3534 }
3535
3536 /* Register ethtool support */
3537 liquidio_set_ethtool_ops(netdev);
3538
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003539 if (netdev->features & NETIF_F_LRO)
3540 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3541 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003542
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003543 liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
3544
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003545 if ((debug != -1) && (debug & NETIF_MSG_HW))
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003546 liquidio_set_feature(netdev,
3547 OCTNET_CMD_VERBOSE_ENABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003548
3549 /* Register the network device with the OS */
3550 if (register_netdev(netdev)) {
3551 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3552 goto setup_nic_dev_fail;
3553 }
3554
3555 dev_dbg(&octeon_dev->pci_dev->dev,
3556 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3557 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3558 netif_carrier_off(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003559 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003560
3561 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3562
3563 dev_dbg(&octeon_dev->pci_dev->dev,
3564 "NIC ifidx:%d Setup successful\n", i);
3565
3566 octeon_free_soft_command(octeon_dev, sc);
3567 }
3568
3569 return 0;
3570
3571setup_nic_dev_fail:
3572
3573 octeon_free_soft_command(octeon_dev, sc);
3574
3575 while (i--) {
3576 dev_err(&octeon_dev->pci_dev->dev,
3577 "NIC ifidx:%d Setup failed\n", i);
3578 liquidio_destroy_nic_device(octeon_dev, i);
3579 }
3580 return -ENODEV;
3581}
3582
3583/**
3584 * \brief initialize the NIC
3585 * @param oct octeon device
3586 *
3587 * This initialization routine is called once the Octeon device application is
3588 * up and running
3589 */
3590static int liquidio_init_nic_module(struct octeon_device *oct)
3591{
3592 struct oct_intrmod_cfg *intrmod_cfg;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003593 int i, retval = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003594 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3595
3596 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3597
3598 /* only default iq and oq were initialized
3599 * initialize the rest as well
3600 */
3601 /* run port_config command for each port */
3602 oct->ifcount = num_nic_ports;
3603
3604 memset(oct->props, 0,
3605 sizeof(struct octdev_props) * num_nic_ports);
3606
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003607 for (i = 0; i < MAX_OCTEON_LINKS; i++)
3608 oct->props[i].gmxport = -1;
3609
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003610 retval = setup_nic_devices(oct);
3611 if (retval) {
3612 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3613 goto octnet_init_failure;
3614 }
3615
3616 liquidio_ptp_init(oct);
3617
3618 /* Initialize interrupt moderation params */
3619 intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07003620 intrmod_cfg->rx_enable = 1;
3621 intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
3622 intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
3623 intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
3624 intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
3625 intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
3626 intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
3627 intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
3628 intrmod_cfg->tx_enable = 1;
3629 intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
3630 intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
3631 intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
3632 intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003633 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3634
3635 return retval;
3636
3637octnet_init_failure:
3638
3639 oct->ifcount = 0;
3640
3641 return retval;
3642}
3643
3644/**
3645 * \brief starter callback that invokes the remaining initialization work after
3646 * the NIC is up and running.
3647 * @param octptr work struct work_struct
3648 */
3649static void nic_starter(struct work_struct *work)
3650{
3651 struct octeon_device *oct;
3652 struct cavium_wk *wk = (struct cavium_wk *)work;
3653
3654 oct = (struct octeon_device *)wk->ctxptr;
3655
3656 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3657 return;
3658
3659 /* If the status of the device is CORE_OK, the core
3660 * application has reported its application type. Call
3661 * any registered handlers now and move to the RUNNING
3662 * state.
3663 */
3664 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3665 schedule_delayed_work(&oct->nic_poll_work.work,
3666 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3667 return;
3668 }
3669
3670 atomic_set(&oct->status, OCT_DEV_RUNNING);
3671
3672 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3673 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3674
3675 if (liquidio_init_nic_module(oct))
3676 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3677 else
3678 handshake[oct->octeon_id].started_ok = 1;
3679 } else {
3680 dev_err(&oct->pci_dev->dev,
3681 "Unexpected application running on NIC (%d). Check firmware.\n",
3682 oct->app_mode);
3683 }
3684
3685 complete(&handshake[oct->octeon_id].started);
3686}
3687
3688/**
3689 * \brief Device initialization for each Octeon device that is probed
3690 * @param octeon_dev octeon device
3691 */
3692static int octeon_device_init(struct octeon_device *octeon_dev)
3693{
3694 int j, ret;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07003695 char bootcmd[] = "\n";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003696 struct octeon_device_priv *oct_priv =
3697 (struct octeon_device_priv *)octeon_dev->priv;
3698 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
3699
3700 /* Enable access to the octeon device and make its DMA capability
3701 * known to the OS.
3702 */
3703 if (octeon_pci_os_setup(octeon_dev))
3704 return 1;
3705
3706 /* Identify the Octeon type and map the BAR address space. */
3707 if (octeon_chip_specific_setup(octeon_dev)) {
3708 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
3709 return 1;
3710 }
3711
3712 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
3713
3714 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
3715
3716 /* Do a soft reset of the Octeon device. */
3717 if (octeon_dev->fn_list.soft_reset(octeon_dev))
3718 return 1;
3719
3720 /* Initialize the dispatch mechanism used to push packets arriving on
3721 * Octeon Output queues.
3722 */
3723 if (octeon_init_dispatch_list(octeon_dev))
3724 return 1;
3725
3726 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3727 OPCODE_NIC_CORE_DRV_ACTIVE,
3728 octeon_core_drv_init,
3729 octeon_dev);
3730
3731 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
3732 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
3733 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
3734 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3735
3736 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
3737
3738 octeon_set_io_queues_off(octeon_dev);
3739
3740 /* Setup the data structures that manage this Octeon's Input queues. */
3741 if (octeon_setup_instr_queues(octeon_dev)) {
3742 dev_err(&octeon_dev->pci_dev->dev,
3743 "instruction queue initialization failed\n");
3744 /* On error, release any previously allocated queues */
3745 for (j = 0; j < octeon_dev->num_iqs; j++)
3746 octeon_delete_instr_queue(octeon_dev, j);
3747 return 1;
3748 }
3749 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
3750
3751 /* Initialize soft command buffer pool
3752 */
3753 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
3754 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
3755 return 1;
3756 }
3757 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
3758
3759 /* Initialize lists to manage the requests of different types that
3760 * arrive from user & kernel applications for this octeon device.
3761 */
3762 if (octeon_setup_response_list(octeon_dev)) {
3763 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
3764 return 1;
3765 }
3766 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
3767
3768 if (octeon_setup_output_queues(octeon_dev)) {
3769 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
3770 /* Release any previously allocated queues */
3771 for (j = 0; j < octeon_dev->num_oqs; j++)
3772 octeon_delete_droq(octeon_dev, j);
3773 }
3774
3775 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
3776
3777 /* The input and output queue registers were setup earlier (the queues
3778 * were not enabled). Any additional registers that need to be
3779 * programmed should be done now.
3780 */
3781 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
3782 if (ret) {
3783 dev_err(&octeon_dev->pci_dev->dev,
3784 "Failed to configure device registers\n");
3785 return ret;
3786 }
3787
3788 /* Initialize the tasklet that handles output queue packet processing.*/
3789 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
3790 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
3791 (unsigned long)octeon_dev);
3792
3793 /* Setup the interrupt handler and record the INT SUM register address
3794 */
3795 octeon_setup_interrupt(octeon_dev);
3796
3797 /* Enable Octeon device interrupts */
3798 octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
3799
3800 /* Enable the input and output queues for this Octeon device */
3801 octeon_dev->fn_list.enable_io_queues(octeon_dev);
3802
3803 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
3804
3805 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
3806
3807 if (ddr_timeout == 0) {
3808 dev_info(&octeon_dev->pci_dev->dev,
3809 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
3810 }
3811
3812 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
3813
3814 /* Wait for the octeon to initialize DDR after the soft-reset. */
3815 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
3816 if (ret) {
3817 dev_err(&octeon_dev->pci_dev->dev,
3818 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
3819 ret);
3820 return 1;
3821 }
3822
3823 if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) {
3824 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
3825 return 1;
3826 }
3827
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07003828 /* Divert uboot to take commands from host instead. */
3829 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
3830
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003831 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
3832 ret = octeon_init_consoles(octeon_dev);
3833 if (ret) {
3834 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
3835 return 1;
3836 }
3837 ret = octeon_add_console(octeon_dev, 0);
3838 if (ret) {
3839 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
3840 return 1;
3841 }
3842
3843 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
3844
3845 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
3846 ret = load_firmware(octeon_dev);
3847 if (ret) {
3848 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
3849 return 1;
3850 }
3851
3852 handshake[octeon_dev->octeon_id].init_ok = 1;
3853 complete(&handshake[octeon_dev->octeon_id].init);
3854
3855 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
3856
3857 /* Send Credit for Octeon Output queues. Credits are always sent after
3858 * the output queue is enabled.
3859 */
3860 for (j = 0; j < octeon_dev->num_oqs; j++)
3861 writel(octeon_dev->droq[j]->max_count,
3862 octeon_dev->droq[j]->pkts_credit_reg);
3863
3864 /* Packets can start arriving on the output queues from this point. */
3865
3866 return 0;
3867}
3868
3869/**
3870 * \brief Exits the module
3871 */
3872static void __exit liquidio_exit(void)
3873{
3874 liquidio_deinit_pci();
3875
3876 pr_info("LiquidIO network module is now unloaded\n");
3877}
3878
3879module_init(liquidio_init);
3880module_exit(liquidio_exit);