blob: 866c075cc7ea66e0c7f68e981406ed251e047972 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2* Author: Cavium, Inc.
3*
4* Contact: support@cavium.com
5* Please include "LiquidIO" in the subject.
6*
7* Copyright (c) 2003-2015 Cavium, Inc.
8*
9* This file is free software; you can redistribute it and/or modify
10* it under the terms of the GNU General Public License, Version 2, as
11* published by the Free Software Foundation.
12*
13* This file is distributed in the hope that it will be useful, but
14* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16* NONINFRINGEMENT. See the GNU General Public License for more
17* details.
18*
19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information
21**********************************************************************/
22#include <linux/version.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070023#include <linux/pci.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070024#include <linux/firmware.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070025#include <linux/ptp_clock_kernel.h>
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -070026#include <net/vxlan.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070027#include "liquidio_common.h"
28#include "octeon_droq.h"
29#include "octeon_iq.h"
30#include "response_manager.h"
31#include "octeon_device.h"
32#include "octeon_nic.h"
33#include "octeon_main.h"
34#include "octeon_network.h"
35#include "cn66xx_regs.h"
36#include "cn66xx_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070037#include "cn68xx_device.h"
Raghu Vatsavayi72c00912016-08-31 11:03:25 -070038#include "cn23xx_pf_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070039#include "liquidio_image.h"
40
41MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
42MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
43MODULE_LICENSE("GPL");
44MODULE_VERSION(LIQUIDIO_VERSION);
45MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
46MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
47MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
48
49static int ddr_timeout = 10000;
50module_param(ddr_timeout, int, 0644);
51MODULE_PARM_DESC(ddr_timeout,
52 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
53
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070054#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
55
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070056#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
57 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
58
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070059static int debug = -1;
60module_param(debug, int, 0644);
61MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
62
63static char fw_type[LIO_MAX_FW_TYPE_LEN];
64module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
65MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
66
67static int conf_type;
68module_param(conf_type, int, 0);
69MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
70
Raghu Vatsavayia5b37882016-06-14 16:54:48 -070071static int ptp_enable = 1;
72
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070073/* Bit mask values for lio->ifstate */
74#define LIO_IFSTATE_DROQ_OPS 0x01
75#define LIO_IFSTATE_REGISTERED 0x02
76#define LIO_IFSTATE_RUNNING 0x04
77#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
78
79/* Polling interval for determining when NIC application is alive */
80#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
81
82/* runtime link query interval */
83#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
84
85struct liquidio_if_cfg_context {
86 int octeon_id;
87
88 wait_queue_head_t wc;
89
90 int cond;
91};
92
93struct liquidio_if_cfg_resp {
94 u64 rh;
95 struct liquidio_if_cfg_info cfg_info;
96 u64 status;
97};
98
99struct oct_link_status_resp {
100 u64 rh;
101 struct oct_link_info link_info;
102 u64 status;
103};
104
105struct oct_timestamp_resp {
106 u64 rh;
107 u64 timestamp;
108 u64 status;
109};
110
111#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
112
113union tx_info {
114 u64 u64;
115 struct {
116#ifdef __BIG_ENDIAN_BITFIELD
117 u16 gso_size;
118 u16 gso_segs;
119 u32 reserved;
120#else
121 u32 reserved;
122 u16 gso_segs;
123 u16 gso_size;
124#endif
125 } s;
126};
127
128/** Octeon device properties to be used by the NIC module.
129 * Each octeon device in the system will be represented
130 * by this structure in the NIC module.
131 */
132
133#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
134
135#define OCTNIC_GSO_MAX_HEADER_SIZE 128
Raghu Vatsavayi72c00912016-08-31 11:03:25 -0700136#define OCTNIC_GSO_MAX_SIZE \
137 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700138
139/** Structure of a node in list of gather components maintained by
140 * NIC driver for each network device.
141 */
142struct octnic_gather {
143 /** List manipulation. Next and prev pointers. */
144 struct list_head list;
145
146 /** Size of the gather component at sg in bytes. */
147 int sg_size;
148
149 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
150 int adjust;
151
152 /** Gather component that can accommodate max sized fragment list
153 * received from the IP layer.
154 */
155 struct octeon_sg_entry *sg;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700156
157 u64 sg_dma_ptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700158};
159
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700160struct handshake {
161 struct completion init;
162 struct completion started;
163 struct pci_dev *pci_dev;
164 int init_ok;
165 int started_ok;
166};
167
168struct octeon_device_priv {
169 /** Tasklet structures for this device. */
170 struct tasklet_struct droq_tasklet;
171 unsigned long napi_mask;
172};
173
174static int octeon_device_init(struct octeon_device *);
Raghu Vatsavayi32581242016-08-31 11:03:20 -0700175static int liquidio_stop(struct net_device *netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700176static void liquidio_remove(struct pci_dev *pdev);
177static int liquidio_probe(struct pci_dev *pdev,
178 const struct pci_device_id *ent);
179
180static struct handshake handshake[MAX_OCTEON_DEVICES];
181static struct completion first_stage;
182
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -0700183static void octeon_droq_bh(unsigned long pdev)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700184{
185 int q_no;
186 int reschedule = 0;
187 struct octeon_device *oct = (struct octeon_device *)pdev;
188 struct octeon_device_priv *oct_priv =
189 (struct octeon_device_priv *)oct->priv;
190
191 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700192 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
193 if (!(oct->io_qmask.oq & (1ULL << q_no)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700194 continue;
195 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
196 MAX_PACKET_BUDGET);
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700197 lio_enable_irq(oct->droq[q_no], NULL);
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700198
199 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
200 /* set time and cnt interrupt thresholds for this DROQ
201 * for NAPI
202 */
203 int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
204
205 octeon_write_csr64(
206 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
207 0x5700000040ULL);
208 octeon_write_csr64(
209 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
210 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700211 }
212
213 if (reschedule)
214 tasklet_schedule(&oct_priv->droq_tasklet);
215}
216
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -0700217static int lio_wait_for_oq_pkts(struct octeon_device *oct)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700218{
219 struct octeon_device_priv *oct_priv =
220 (struct octeon_device_priv *)oct->priv;
221 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
222 int i;
223
224 do {
225 pending_pkts = 0;
226
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700227 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
228 if (!(oct->io_qmask.oq & (1ULL << i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700229 continue;
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700230 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700231 }
232 if (pkt_cnt > 0) {
233 pending_pkts += pkt_cnt;
234 tasklet_schedule(&oct_priv->droq_tasklet);
235 }
236 pkt_cnt = 0;
237 schedule_timeout_uninterruptible(1);
238
239 } while (retry-- && pending_pkts);
240
241 return pkt_cnt;
242}
243
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700244/**
245 * \brief Forces all IO queues off on a given device
246 * @param oct Pointer to Octeon device
247 */
248static void force_io_queues_off(struct octeon_device *oct)
249{
250 if ((oct->chip_id == OCTEON_CN66XX) ||
251 (oct->chip_id == OCTEON_CN68XX)) {
252 /* Reset the Enable bits for Input Queues. */
253 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
254
255 /* Reset the Enable bits for Output Queues. */
256 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
257 }
258}
259
260/**
261 * \brief wait for all pending requests to complete
262 * @param oct Pointer to Octeon device
263 *
264 * Called during shutdown sequence
265 */
266static int wait_for_pending_requests(struct octeon_device *oct)
267{
268 int i, pcount = 0;
269
270 for (i = 0; i < 100; i++) {
271 pcount =
272 atomic_read(&oct->response_list
273 [OCTEON_ORDERED_SC_LIST].pending_req_count);
274 if (pcount)
275 schedule_timeout_uninterruptible(HZ / 10);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700276 else
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700277 break;
278 }
279
280 if (pcount)
281 return 1;
282
283 return 0;
284}
285
286/**
287 * \brief Cause device to go quiet so it can be safely removed/reset/etc
288 * @param oct Pointer to Octeon device
289 */
290static inline void pcierror_quiesce_device(struct octeon_device *oct)
291{
292 int i;
293
294 /* Disable the input and output queues now. No more packets will
295 * arrive from Octeon, but we should wait for all packet processing
296 * to finish.
297 */
298 force_io_queues_off(oct);
299
300 /* To allow for in-flight requests */
301 schedule_timeout_uninterruptible(100);
302
303 if (wait_for_pending_requests(oct))
304 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
305
306 /* Force all requests waiting to be fetched by OCTEON to complete. */
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700307 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700308 struct octeon_instr_queue *iq;
309
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700310 if (!(oct->io_qmask.iq & (1ULL << i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700311 continue;
312 iq = oct->instr_queue[i];
313
314 if (atomic_read(&iq->instr_pending)) {
315 spin_lock_bh(&iq->lock);
316 iq->fill_cnt = 0;
317 iq->octeon_read_index = iq->host_write_index;
318 iq->stats.instr_processed +=
319 atomic_read(&iq->instr_pending);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700320 lio_process_iq_request_list(oct, iq, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700321 spin_unlock_bh(&iq->lock);
322 }
323 }
324
325 /* Force all pending ordered list requests to time out. */
326 lio_process_ordered_list(oct, 1);
327
328 /* We do not need to wait for output queue packets to be processed. */
329}
330
331/**
332 * \brief Cleanup PCI AER uncorrectable error status
333 * @param dev Pointer to PCI device
334 */
335static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
336{
337 int pos = 0x100;
338 u32 status, mask;
339
340 pr_info("%s :\n", __func__);
341
342 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
343 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
344 if (dev->error_state == pci_channel_io_normal)
345 status &= ~mask; /* Clear corresponding nonfatal bits */
346 else
347 status &= mask; /* Clear corresponding fatal bits */
348 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
349}
350
351/**
352 * \brief Stop all PCI IO to a given device
353 * @param dev Pointer to Octeon device
354 */
355static void stop_pci_io(struct octeon_device *oct)
356{
357 /* No more instructions will be forwarded. */
358 atomic_set(&oct->status, OCT_DEV_IN_RESET);
359
360 pci_disable_device(oct->pci_dev);
361
362 /* Disable interrupts */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700363 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700364
365 pcierror_quiesce_device(oct);
366
367 /* Release the interrupt line */
368 free_irq(oct->pci_dev->irq, oct);
369
370 if (oct->flags & LIO_FLAG_MSI_ENABLED)
371 pci_disable_msi(oct->pci_dev);
372
373 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
374 lio_get_state_string(&oct->status));
375
376 /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
377 /* making it a common function for all OCTEON models */
378 cleanup_aer_uncorrect_error_status(oct->pci_dev);
379}
380
381/**
382 * \brief called when PCI error is detected
383 * @param pdev Pointer to PCI device
384 * @param state The current pci connection state
385 *
386 * This function is called after a PCI bus error affecting
387 * this device has been detected.
388 */
389static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
390 pci_channel_state_t state)
391{
392 struct octeon_device *oct = pci_get_drvdata(pdev);
393
394 /* Non-correctable Non-fatal errors */
395 if (state == pci_channel_io_normal) {
396 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
397 cleanup_aer_uncorrect_error_status(oct->pci_dev);
398 return PCI_ERS_RESULT_CAN_RECOVER;
399 }
400
401 /* Non-correctable Fatal errors */
402 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
403 stop_pci_io(oct);
404
405 /* Always return a DISCONNECT. There is no support for recovery but only
406 * for a clean shutdown.
407 */
408 return PCI_ERS_RESULT_DISCONNECT;
409}
410
411/**
412 * \brief mmio handler
413 * @param pdev Pointer to PCI device
414 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700415static pci_ers_result_t liquidio_pcie_mmio_enabled(
416 struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700417{
418 /* We should never hit this since we never ask for a reset for a Fatal
419 * Error. We always return DISCONNECT in io_error above.
420 * But play safe and return RECOVERED for now.
421 */
422 return PCI_ERS_RESULT_RECOVERED;
423}
424
425/**
426 * \brief called after the pci bus has been reset.
427 * @param pdev Pointer to PCI device
428 *
429 * Restart the card from scratch, as if from a cold-boot. Implementation
430 * resembles the first-half of the octeon_resume routine.
431 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700432static pci_ers_result_t liquidio_pcie_slot_reset(
433 struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700434{
435 /* We should never hit this since we never ask for a reset for a Fatal
436 * Error. We always return DISCONNECT in io_error above.
437 * But play safe and return RECOVERED for now.
438 */
439 return PCI_ERS_RESULT_RECOVERED;
440}
441
442/**
443 * \brief called when traffic can start flowing again.
444 * @param pdev Pointer to PCI device
445 *
446 * This callback is called when the error recovery driver tells us that
447 * its OK to resume normal operation. Implementation resembles the
448 * second-half of the octeon_resume routine.
449 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700450static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700451{
452 /* Nothing to be done here. */
453}
454
455#ifdef CONFIG_PM
456/**
457 * \brief called when suspending
458 * @param pdev Pointer to PCI device
459 * @param state state to suspend to
460 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700461static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
462 pm_message_t state __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700463{
464 return 0;
465}
466
467/**
468 * \brief called when resuming
469 * @param pdev Pointer to PCI device
470 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700471static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700472{
473 return 0;
474}
475#endif
476
477/* For PCI-E Advanced Error Recovery (AER) Interface */
Julia Lawall166e2362015-11-14 11:06:53 +0100478static const struct pci_error_handlers liquidio_err_handler = {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700479 .error_detected = liquidio_pcie_error_detected,
480 .mmio_enabled = liquidio_pcie_mmio_enabled,
481 .slot_reset = liquidio_pcie_slot_reset,
482 .resume = liquidio_pcie_resume,
483};
484
485static const struct pci_device_id liquidio_pci_tbl[] = {
486 { /* 68xx */
487 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
488 },
489 { /* 66xx */
490 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
491 },
Raghu Vatsavayie86b1ab2016-08-31 11:03:24 -0700492 { /* 23xx pf */
493 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
494 },
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700495 {
496 0, 0, 0, 0, 0, 0, 0
497 }
498};
499MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
500
501static struct pci_driver liquidio_pci_driver = {
502 .name = "LiquidIO",
503 .id_table = liquidio_pci_tbl,
504 .probe = liquidio_probe,
505 .remove = liquidio_remove,
506 .err_handler = &liquidio_err_handler, /* For AER */
507
508#ifdef CONFIG_PM
509 .suspend = liquidio_suspend,
510 .resume = liquidio_resume,
511#endif
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700512};
513
514/**
515 * \brief register PCI driver
516 */
517static int liquidio_init_pci(void)
518{
519 return pci_register_driver(&liquidio_pci_driver);
520}
521
522/**
523 * \brief unregister PCI driver
524 */
525static void liquidio_deinit_pci(void)
526{
527 pci_unregister_driver(&liquidio_pci_driver);
528}
529
530/**
531 * \brief check interface state
532 * @param lio per-network private data
533 * @param state_flag flag state to check
534 */
535static inline int ifstate_check(struct lio *lio, int state_flag)
536{
537 return atomic_read(&lio->ifstate) & state_flag;
538}
539
540/**
541 * \brief set interface state
542 * @param lio per-network private data
543 * @param state_flag flag state to set
544 */
545static inline void ifstate_set(struct lio *lio, int state_flag)
546{
547 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
548}
549
550/**
551 * \brief clear interface state
552 * @param lio per-network private data
553 * @param state_flag flag state to clear
554 */
555static inline void ifstate_reset(struct lio *lio, int state_flag)
556{
557 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
558}
559
560/**
561 * \brief Stop Tx queues
562 * @param netdev network device
563 */
564static inline void txqs_stop(struct net_device *netdev)
565{
566 if (netif_is_multiqueue(netdev)) {
567 int i;
568
569 for (i = 0; i < netdev->num_tx_queues; i++)
570 netif_stop_subqueue(netdev, i);
571 } else {
572 netif_stop_queue(netdev);
573 }
574}
575
576/**
577 * \brief Start Tx queues
578 * @param netdev network device
579 */
580static inline void txqs_start(struct net_device *netdev)
581{
582 if (netif_is_multiqueue(netdev)) {
583 int i;
584
585 for (i = 0; i < netdev->num_tx_queues; i++)
586 netif_start_subqueue(netdev, i);
587 } else {
588 netif_start_queue(netdev);
589 }
590}
591
592/**
593 * \brief Wake Tx queues
594 * @param netdev network device
595 */
596static inline void txqs_wake(struct net_device *netdev)
597{
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700598 struct lio *lio = GET_LIO(netdev);
599
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700600 if (netif_is_multiqueue(netdev)) {
601 int i;
602
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700603 for (i = 0; i < netdev->num_tx_queues; i++) {
604 int qno = lio->linfo.txpciq[i %
605 (lio->linfo.num_txpciq)].s.q_no;
606
607 if (__netif_subqueue_stopped(netdev, i)) {
608 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
609 tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700610 netif_wake_subqueue(netdev, i);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700611 }
612 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700613 } else {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700614 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
615 tx_restart, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700616 netif_wake_queue(netdev);
617 }
618}
619
620/**
621 * \brief Stop Tx queue
622 * @param netdev network device
623 */
624static void stop_txq(struct net_device *netdev)
625{
626 txqs_stop(netdev);
627}
628
629/**
630 * \brief Start Tx queue
631 * @param netdev network device
632 */
633static void start_txq(struct net_device *netdev)
634{
635 struct lio *lio = GET_LIO(netdev);
636
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700637 if (lio->linfo.link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700638 txqs_start(netdev);
639 return;
640 }
641}
642
643/**
644 * \brief Wake a queue
645 * @param netdev network device
646 * @param q which queue to wake
647 */
648static inline void wake_q(struct net_device *netdev, int q)
649{
650 if (netif_is_multiqueue(netdev))
651 netif_wake_subqueue(netdev, q);
652 else
653 netif_wake_queue(netdev);
654}
655
656/**
657 * \brief Stop a queue
658 * @param netdev network device
659 * @param q which queue to stop
660 */
661static inline void stop_q(struct net_device *netdev, int q)
662{
663 if (netif_is_multiqueue(netdev))
664 netif_stop_subqueue(netdev, q);
665 else
666 netif_stop_queue(netdev);
667}
668
669/**
670 * \brief Check Tx queue status, and take appropriate action
671 * @param lio per-network private data
672 * @returns 0 if full, number of queues woken up otherwise
673 */
674static inline int check_txq_status(struct lio *lio)
675{
676 int ret_val = 0;
677
678 if (netif_is_multiqueue(lio->netdev)) {
679 int numqs = lio->netdev->num_tx_queues;
680 int q, iq = 0;
681
682 /* check each sub-queue state */
683 for (q = 0; q < numqs; q++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700684 iq = lio->linfo.txpciq[q %
685 (lio->linfo.num_txpciq)].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700686 if (octnet_iq_is_full(lio->oct_dev, iq))
687 continue;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700688 if (__netif_subqueue_stopped(lio->netdev, q)) {
689 wake_q(lio->netdev, q);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700690 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
691 tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700692 ret_val++;
693 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700694 }
695 } else {
696 if (octnet_iq_is_full(lio->oct_dev, lio->txq))
697 return 0;
698 wake_q(lio->netdev, lio->txq);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700699 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
700 tx_restart, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700701 ret_val = 1;
702 }
703 return ret_val;
704}
705
706/**
707 * Remove the node at the head of the list. The list would be empty at
708 * the end of this call if there are no more nodes in the list.
709 */
710static inline struct list_head *list_delete_head(struct list_head *root)
711{
712 struct list_head *node;
713
714 if ((root->prev == root) && (root->next == root))
715 node = NULL;
716 else
717 node = root->next;
718
719 if (node)
720 list_del(node);
721
722 return node;
723}
724
725/**
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700726 * \brief Delete gather lists
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700727 * @param lio per-network private data
728 */
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700729static void delete_glists(struct lio *lio)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700730{
731 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700732 int i;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700733
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700734 if (!lio->glist)
735 return;
736
737 for (i = 0; i < lio->linfo.num_txpciq; i++) {
738 do {
739 g = (struct octnic_gather *)
740 list_delete_head(&lio->glist[i]);
741 if (g) {
742 if (g->sg) {
743 dma_unmap_single(&lio->oct_dev->
744 pci_dev->dev,
745 g->sg_dma_ptr,
746 g->sg_size,
747 DMA_TO_DEVICE);
748 kfree((void *)((unsigned long)g->sg -
749 g->adjust));
750 }
751 kfree(g);
752 }
753 } while (g);
754 }
755
756 kfree((void *)lio->glist);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700757}
758
759/**
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700760 * \brief Setup gather lists
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700761 * @param lio per-network private data
762 */
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700763static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700764{
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700765 int i, j;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700766 struct octnic_gather *g;
767
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700768 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
769 GFP_KERNEL);
770 if (!lio->glist_lock)
771 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700772
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700773 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
774 GFP_KERNEL);
775 if (!lio->glist) {
776 kfree((void *)lio->glist_lock);
777 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700778 }
779
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700780 for (i = 0; i < num_iqs; i++) {
781 int numa_node = cpu_to_node(i % num_online_cpus());
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700782
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700783 spin_lock_init(&lio->glist_lock[i]);
784
785 INIT_LIST_HEAD(&lio->glist[i]);
786
787 for (j = 0; j < lio->tx_qsize; j++) {
788 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
789 numa_node);
790 if (!g)
791 g = kzalloc(sizeof(*g), GFP_KERNEL);
792 if (!g)
793 break;
794
795 g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
796 OCT_SG_ENTRY_SIZE);
797
798 g->sg = kmalloc_node(g->sg_size + 8,
799 GFP_KERNEL, numa_node);
800 if (!g->sg)
801 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
802 if (!g->sg) {
803 kfree(g);
804 break;
805 }
806
807 /* The gather component should be aligned on 64-bit
808 * boundary
809 */
810 if (((unsigned long)g->sg) & 7) {
811 g->adjust = 8 - (((unsigned long)g->sg) & 7);
812 g->sg = (struct octeon_sg_entry *)
813 ((unsigned long)g->sg + g->adjust);
814 }
815 g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
816 g->sg, g->sg_size,
817 DMA_TO_DEVICE);
818 if (dma_mapping_error(&oct->pci_dev->dev,
819 g->sg_dma_ptr)) {
820 kfree((void *)((unsigned long)g->sg -
821 g->adjust));
822 kfree(g);
823 break;
824 }
825
826 list_add_tail(&g->list, &lio->glist[i]);
827 }
828
829 if (j != lio->tx_qsize) {
830 delete_glists(lio);
831 return 1;
832 }
833 }
834
835 return 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700836}
837
838/**
839 * \brief Print link information
840 * @param netdev network device
841 */
842static void print_link_info(struct net_device *netdev)
843{
844 struct lio *lio = GET_LIO(netdev);
845
846 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
847 struct oct_link_info *linfo = &lio->linfo;
848
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700849 if (linfo->link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700850 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
851 linfo->link.s.speed,
852 (linfo->link.s.duplex) ? "Full" : "Half");
853 } else {
854 netif_info(lio, link, lio->netdev, "Link Down\n");
855 }
856 }
857}
858
859/**
860 * \brief Update link status
861 * @param netdev network device
862 * @param ls link status structure
863 *
864 * Called on receipt of a link status response from the core application to
865 * update each interface's link status.
866 */
867static inline void update_link_status(struct net_device *netdev,
868 union oct_link_status *ls)
869{
870 struct lio *lio = GET_LIO(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700871 int changed = (lio->linfo.link.u64 != ls->u64);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700872
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700873 lio->linfo.link.u64 = ls->u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700874
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700875 if ((lio->intf_open) && (changed)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700876 print_link_info(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700877 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700878
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700879 if (lio->linfo.link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700880 netif_carrier_on(netdev);
881 /* start_txq(netdev); */
882 txqs_wake(netdev);
883 } else {
884 netif_carrier_off(netdev);
885 stop_txq(netdev);
886 }
887 }
888}
889
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700890/* Runs in interrupt context. */
891static void update_txq_status(struct octeon_device *oct, int iq_num)
892{
893 struct net_device *netdev;
894 struct lio *lio;
895 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
896
897 /*octeon_update_iq_read_idx(oct, iq);*/
898
899 netdev = oct->props[iq->ifidx].netdev;
900
901 /* This is needed because the first IQ does not have
902 * a netdev associated with it.
903 */
904 if (!netdev)
905 return;
906
907 lio = GET_LIO(netdev);
908 if (netif_is_multiqueue(netdev)) {
909 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
910 lio->linfo.link.s.link_up &&
911 (!octnet_iq_is_full(oct, iq_num))) {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700912 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
913 tx_restart, 1);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700914 netif_wake_subqueue(netdev, iq->q_index);
915 } else {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700916 if (!octnet_iq_is_full(oct, lio->txq)) {
917 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
918 lio->txq,
919 tx_restart, 1);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700920 wake_q(netdev, lio->txq);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700921 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700922 }
923 }
924}
925
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700926static
927int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
928{
929 struct octeon_device *oct = droq->oct_dev;
930 struct octeon_device_priv *oct_priv =
931 (struct octeon_device_priv *)oct->priv;
932
933 if (droq->ops.poll_mode) {
934 droq->ops.napi_fn(droq);
935 } else {
936 if (ret & MSIX_PO_INT) {
937 tasklet_schedule(&oct_priv->droq_tasklet);
938 return 1;
939 }
940 /* this will be flushed periodically by check iq db */
941 if (ret & MSIX_PI_INT)
942 return 0;
943 }
944 return 0;
945}
946
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700947/**
948 * \brief Droq packet processor sceduler
949 * @param oct octeon device
950 */
951static
952void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
953{
954 struct octeon_device_priv *oct_priv =
955 (struct octeon_device_priv *)oct->priv;
956 u64 oq_no;
957 struct octeon_droq *droq;
958
959 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700960 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
961 oq_no++) {
962 if (!(oct->droq_intr & (1ULL << oq_no)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700963 continue;
964
965 droq = oct->droq[oq_no];
966
967 if (droq->ops.poll_mode) {
968 droq->ops.napi_fn(droq);
969 oct_priv->napi_mask |= (1 << oq_no);
970 } else {
971 tasklet_schedule(&oct_priv->droq_tasklet);
972 }
973 }
974 }
975}
976
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700977static irqreturn_t
978liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
979{
980 u64 ret;
981 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
982 struct octeon_device *oct = ioq_vector->oct_dev;
983 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
984
985 ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
986
987 if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
988 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
989
990 return IRQ_HANDLED;
991}
992
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700993/**
994 * \brief Interrupt handler for octeon
995 * @param irq unused
996 * @param dev octeon device
997 */
998static
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700999irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
1000 void *dev)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001001{
1002 struct octeon_device *oct = (struct octeon_device *)dev;
1003 irqreturn_t ret;
1004
1005 /* Disable our interrupts for the duration of ISR */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001006 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001007
1008 ret = oct->fn_list.process_interrupt_regs(oct);
1009
1010 if (ret == IRQ_HANDLED)
1011 liquidio_schedule_droq_pkt_handlers(oct);
1012
1013 /* Re-enable our interrupts */
1014 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001015 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001016
1017 return ret;
1018}
1019
1020/**
1021 * \brief Setup interrupt for octeon device
1022 * @param oct octeon device
1023 *
1024 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1025 */
1026static int octeon_setup_interrupt(struct octeon_device *oct)
1027{
1028 int irqret, err;
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001029 struct msix_entry *msix_entries;
1030 int i;
1031 int num_ioq_vectors;
1032 int num_alloc_ioq_vectors;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001033
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001034 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
1035 oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
1036 /* one non ioq interrupt for handling sli_mac_pf_int_sum */
1037 oct->num_msix_irqs += 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001038
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001039 oct->msix_entries = kcalloc(
1040 oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
1041 if (!oct->msix_entries)
1042 return 1;
1043
1044 msix_entries = (struct msix_entry *)oct->msix_entries;
1045 /*Assumption is that pf msix vectors start from pf srn to pf to
1046 * trs and not from 0. if not change this code
1047 */
1048 for (i = 0; i < oct->num_msix_irqs - 1; i++)
1049 msix_entries[i].entry = oct->sriov_info.pf_srn + i;
1050 msix_entries[oct->num_msix_irqs - 1].entry =
1051 oct->sriov_info.trs;
1052 num_alloc_ioq_vectors = pci_enable_msix_range(
1053 oct->pci_dev, msix_entries,
1054 oct->num_msix_irqs,
1055 oct->num_msix_irqs);
1056 if (num_alloc_ioq_vectors < 0) {
1057 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1058 kfree(oct->msix_entries);
1059 oct->msix_entries = NULL;
1060 return 1;
1061 }
1062 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1063
1064 num_ioq_vectors = oct->num_msix_irqs;
1065
1066 /** For PF, there is one non-ioq interrupt handler */
1067 num_ioq_vectors -= 1;
1068 irqret = request_irq(msix_entries[num_ioq_vectors].vector,
1069 liquidio_legacy_intr_handler, 0, "octeon",
1070 oct);
1071 if (irqret) {
1072 dev_err(&oct->pci_dev->dev,
1073 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1074 irqret);
1075 pci_disable_msix(oct->pci_dev);
1076 kfree(oct->msix_entries);
1077 oct->msix_entries = NULL;
1078 return 1;
1079 }
1080
1081 for (i = 0; i < num_ioq_vectors; i++) {
1082 irqret = request_irq(msix_entries[i].vector,
1083 liquidio_msix_intr_handler, 0,
1084 "octeon", &oct->ioq_vector[i]);
1085 if (irqret) {
1086 dev_err(&oct->pci_dev->dev,
1087 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1088 irqret);
1089 /** Freeing the non-ioq irq vector here . */
1090 free_irq(msix_entries[num_ioq_vectors].vector,
1091 oct);
1092
1093 while (i) {
1094 i--;
1095 /** clearing affinity mask. */
1096 irq_set_affinity_hint(
1097 msix_entries[i].vector, NULL);
1098 free_irq(msix_entries[i].vector,
1099 &oct->ioq_vector[i]);
1100 }
1101 pci_disable_msix(oct->pci_dev);
1102 kfree(oct->msix_entries);
1103 oct->msix_entries = NULL;
1104 return 1;
1105 }
1106 oct->ioq_vector[i].vector = msix_entries[i].vector;
1107 /* assign the cpu mask for this msix interrupt vector */
1108 irq_set_affinity_hint(
1109 msix_entries[i].vector,
1110 (&oct->ioq_vector[i].affinity_mask));
1111 }
1112 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1113 oct->octeon_id);
1114 } else {
1115 err = pci_enable_msi(oct->pci_dev);
1116 if (err)
1117 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1118 err);
1119 else
1120 oct->flags |= LIO_FLAG_MSI_ENABLED;
1121
1122 irqret = request_irq(oct->pci_dev->irq,
1123 liquidio_legacy_intr_handler, IRQF_SHARED,
1124 "octeon", oct);
1125 if (irqret) {
1126 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1127 pci_disable_msi(oct->pci_dev);
1128 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1129 irqret);
1130 return 1;
1131 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001132 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001133 return 0;
1134}
1135
1136/**
1137 * \brief PCI probe handler
1138 * @param pdev PCI device structure
1139 * @param ent unused
1140 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07001141static int
1142liquidio_probe(struct pci_dev *pdev,
1143 const struct pci_device_id *ent __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001144{
1145 struct octeon_device *oct_dev = NULL;
1146 struct handshake *hs;
1147
1148 oct_dev = octeon_allocate_device(pdev->device,
1149 sizeof(struct octeon_device_priv));
1150 if (!oct_dev) {
1151 dev_err(&pdev->dev, "Unable to allocate device\n");
1152 return -ENOMEM;
1153 }
1154
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001155 if (pdev->device == OCTEON_CN23XX_PF_VID)
1156 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
1157
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001158 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1159 (u32)pdev->vendor, (u32)pdev->device);
1160
1161 /* Assign octeon_device for this device to the private data area. */
1162 pci_set_drvdata(pdev, oct_dev);
1163
1164 /* set linux specific device pointer */
1165 oct_dev->pci_dev = (void *)pdev;
1166
1167 hs = &handshake[oct_dev->octeon_id];
1168 init_completion(&hs->init);
1169 init_completion(&hs->started);
1170 hs->pci_dev = pdev;
1171
1172 if (oct_dev->octeon_id == 0)
1173 /* first LiquidIO NIC is detected */
1174 complete(&first_stage);
1175
1176 if (octeon_device_init(oct_dev)) {
1177 liquidio_remove(pdev);
1178 return -ENOMEM;
1179 }
1180
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001181 oct_dev->rx_pause = 1;
1182 oct_dev->tx_pause = 1;
1183
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001184 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1185
1186 return 0;
1187}
1188
1189/**
1190 *\brief Destroy resources associated with octeon device
1191 * @param pdev PCI device structure
1192 * @param ent unused
1193 */
1194static void octeon_destroy_resources(struct octeon_device *oct)
1195{
1196 int i;
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001197 struct msix_entry *msix_entries;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001198 struct octeon_device_priv *oct_priv =
1199 (struct octeon_device_priv *)oct->priv;
1200
1201 struct handshake *hs;
1202
1203 switch (atomic_read(&oct->status)) {
1204 case OCT_DEV_RUNNING:
1205 case OCT_DEV_CORE_OK:
1206
1207 /* No more instructions will be forwarded. */
1208 atomic_set(&oct->status, OCT_DEV_IN_RESET);
1209
1210 oct->app_mode = CVM_DRV_INVALID_APP;
1211 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1212 lio_get_state_string(&oct->status));
1213
1214 schedule_timeout_uninterruptible(HZ / 10);
1215
1216 /* fallthrough */
1217 case OCT_DEV_HOST_OK:
1218
1219 /* fallthrough */
1220 case OCT_DEV_CONSOLE_INIT_DONE:
1221 /* Remove any consoles */
1222 octeon_remove_consoles(oct);
1223
1224 /* fallthrough */
1225 case OCT_DEV_IO_QUEUES_DONE:
1226 if (wait_for_pending_requests(oct))
1227 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1228
1229 if (lio_wait_for_instr_fetch(oct))
1230 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1231
1232 /* Disable the input and output queues now. No more packets will
1233 * arrive from Octeon, but we should wait for all packet
1234 * processing to finish.
1235 */
1236 oct->fn_list.disable_io_queues(oct);
1237
1238 if (lio_wait_for_oq_pkts(oct))
1239 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1240
1241 /* Disable interrupts */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001242 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001243
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001244 if (oct->msix_on) {
1245 msix_entries = (struct msix_entry *)oct->msix_entries;
1246 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1247 /* clear the affinity_cpumask */
1248 irq_set_affinity_hint(msix_entries[i].vector,
1249 NULL);
1250 free_irq(msix_entries[i].vector,
1251 &oct->ioq_vector[i]);
1252 }
1253 /* non-iov vector's argument is oct struct */
1254 free_irq(msix_entries[i].vector, oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001255
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001256 pci_disable_msix(oct->pci_dev);
1257 kfree(oct->msix_entries);
1258 oct->msix_entries = NULL;
1259 } else {
1260 /* Release the interrupt line */
1261 free_irq(oct->pci_dev->irq, oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001262
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001263 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1264 pci_disable_msi(oct->pci_dev);
1265 }
1266
1267 if (OCTEON_CN23XX_PF(oct))
1268 octeon_free_ioq_vector(oct);
1269 /* fallthrough */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001270 case OCT_DEV_IN_RESET:
1271 case OCT_DEV_DROQ_INIT_DONE:
1272 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
1273 mdelay(100);
Raghu Vatsavayi63da8402016-06-21 22:53:03 -07001274 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001275 if (!(oct->io_qmask.oq & BIT_ULL(i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001276 continue;
1277 octeon_delete_droq(oct, i);
1278 }
1279
1280 /* Force any pending handshakes to complete */
1281 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1282 hs = &handshake[i];
1283
1284 if (hs->pci_dev) {
1285 handshake[oct->octeon_id].init_ok = 0;
1286 complete(&handshake[oct->octeon_id].init);
1287 handshake[oct->octeon_id].started_ok = 0;
1288 complete(&handshake[oct->octeon_id].started);
1289 }
1290 }
1291
1292 /* fallthrough */
1293 case OCT_DEV_RESP_LIST_INIT_DONE:
1294 octeon_delete_response_list(oct);
1295
1296 /* fallthrough */
1297 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1298 octeon_free_sc_buffer_pool(oct);
1299
1300 /* fallthrough */
1301 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
Raghu Vatsavayi63da8402016-06-21 22:53:03 -07001302 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1303 if (!(oct->io_qmask.iq & (1ULL << i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001304 continue;
1305 octeon_delete_instr_queue(oct, i);
1306 }
1307
1308 /* fallthrough */
1309 case OCT_DEV_DISPATCH_INIT_DONE:
1310 octeon_delete_dispatch_list(oct);
1311 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1312
1313 /* fallthrough */
1314 case OCT_DEV_PCI_MAP_DONE:
Raghu Vatsavayi60b48c52016-06-21 22:53:09 -07001315 /* Soft reset the octeon device before exiting */
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07001316 if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
1317 oct->fn_list.soft_reset(oct);
Raghu Vatsavayi60b48c52016-06-21 22:53:09 -07001318
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001319 octeon_unmap_pci_barx(oct, 0);
1320 octeon_unmap_pci_barx(oct, 1);
1321
1322 /* fallthrough */
1323 case OCT_DEV_BEGIN_STATE:
Raghu Vatsavayi60b48c52016-06-21 22:53:09 -07001324 /* Disable the device, releasing the PCI INT */
1325 pci_disable_device(oct->pci_dev);
1326
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001327 /* Nothing to be done here either */
1328 break;
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07001329 } /* end switch (oct->status) */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001330
1331 tasklet_kill(&oct_priv->droq_tasklet);
1332}
1333
1334/**
1335 * \brief Send Rx control command
1336 * @param lio per-network private data
1337 * @param start_stop whether to start or stop
1338 */
1339static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1340{
1341 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001342
1343 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1344
1345 nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001346 nctrl.ncmd.s.param1 = start_stop;
1347 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001348 nctrl.netpndev = (u64)lio->netdev;
1349
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001350 if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001351 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1352}
1353
1354/**
1355 * \brief Destroy NIC device interface
1356 * @param oct octeon device
1357 * @param ifidx which interface to destroy
1358 *
1359 * Cleanup associated with each interface for an Octeon device when NIC
1360 * module is being unloaded or if initialization fails during load.
1361 */
1362static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1363{
1364 struct net_device *netdev = oct->props[ifidx].netdev;
1365 struct lio *lio;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001366 struct napi_struct *napi, *n;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001367
1368 if (!netdev) {
1369 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1370 __func__, ifidx);
1371 return;
1372 }
1373
1374 lio = GET_LIO(netdev);
1375
1376 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1377
1378 send_rx_ctrl_cmd(lio, 0);
1379
1380 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1381 txqs_stop(netdev);
1382
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001383 if (oct->props[lio->ifidx].napi_enabled == 1) {
1384 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1385 napi_disable(napi);
1386
1387 oct->props[lio->ifidx].napi_enabled = 0;
1388 }
1389
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001390 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1391 unregister_netdev(netdev);
1392
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001393 delete_glists(lio);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001394
1395 free_netdev(netdev);
1396
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001397 oct->props[ifidx].gmxport = -1;
1398
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001399 oct->props[ifidx].netdev = NULL;
1400}
1401
1402/**
1403 * \brief Stop complete NIC functionality
1404 * @param oct octeon device
1405 */
1406static int liquidio_stop_nic_module(struct octeon_device *oct)
1407{
1408 int i, j;
1409 struct lio *lio;
1410
1411 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1412 if (!oct->ifcount) {
1413 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1414 return 1;
1415 }
1416
Raghu Vatsavayi60441882016-06-21 22:53:08 -07001417 spin_lock_bh(&oct->cmd_resp_wqlock);
1418 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1419 spin_unlock_bh(&oct->cmd_resp_wqlock);
1420
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001421 for (i = 0; i < oct->ifcount; i++) {
1422 lio = GET_LIO(oct->props[i].netdev);
1423 for (j = 0; j < lio->linfo.num_rxpciq; j++)
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001424 octeon_unregister_droq_ops(oct,
1425 lio->linfo.rxpciq[j].s.q_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001426 }
1427
1428 for (i = 0; i < oct->ifcount; i++)
1429 liquidio_destroy_nic_device(oct, i);
1430
1431 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1432 return 0;
1433}
1434
1435/**
1436 * \brief Cleans up resources at unload time
1437 * @param pdev PCI device structure
1438 */
1439static void liquidio_remove(struct pci_dev *pdev)
1440{
1441 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1442
1443 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1444
1445 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1446 liquidio_stop_nic_module(oct_dev);
1447
1448 /* Reset the octeon device and cleanup all memory allocated for
1449 * the octeon device by driver.
1450 */
1451 octeon_destroy_resources(oct_dev);
1452
1453 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1454
1455 /* This octeon device has been removed. Update the global
1456 * data structure to reflect this. Free the device structure.
1457 */
1458 octeon_free_device_mem(oct_dev);
1459}
1460
1461/**
1462 * \brief Identify the Octeon device and to map the BAR address space
1463 * @param oct octeon device
1464 */
1465static int octeon_chip_specific_setup(struct octeon_device *oct)
1466{
1467 u32 dev_id, rev_id;
1468 int ret = 1;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001469 char *s;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001470
1471 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1472 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1473 oct->rev_id = rev_id & 0xff;
1474
1475 switch (dev_id) {
1476 case OCTEON_CN68XX_PCIID:
1477 oct->chip_id = OCTEON_CN68XX;
1478 ret = lio_setup_cn68xx_octeon_device(oct);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001479 s = "CN68XX";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001480 break;
1481
1482 case OCTEON_CN66XX_PCIID:
1483 oct->chip_id = OCTEON_CN66XX;
1484 ret = lio_setup_cn66xx_octeon_device(oct);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001485 s = "CN66XX";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001486 break;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001487
Raghu Vatsavayi72c00912016-08-31 11:03:25 -07001488 case OCTEON_CN23XX_PCIID_PF:
1489 oct->chip_id = OCTEON_CN23XX_PF_VID;
1490 ret = setup_cn23xx_octeon_pf_device(oct);
1491 s = "CN23XX";
1492 break;
1493
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001494 default:
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001495 s = "?";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001496 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1497 dev_id);
1498 }
1499
1500 if (!ret)
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001501 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001502 OCTEON_MAJOR_REV(oct),
1503 OCTEON_MINOR_REV(oct),
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001504 octeon_get_conf(oct)->card_name,
1505 LIQUIDIO_VERSION);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001506
1507 return ret;
1508}
1509
1510/**
1511 * \brief PCI initialization for each Octeon device.
1512 * @param oct octeon device
1513 */
1514static int octeon_pci_os_setup(struct octeon_device *oct)
1515{
1516 /* setup PCI stuff first */
1517 if (pci_enable_device(oct->pci_dev)) {
1518 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1519 return 1;
1520 }
1521
1522 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1523 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1524 return 1;
1525 }
1526
1527 /* Enable PCI DMA Master. */
1528 pci_set_master(oct->pci_dev);
1529
1530 return 0;
1531}
1532
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001533static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1534{
1535 int q = 0;
1536
1537 if (netif_is_multiqueue(lio->netdev))
1538 q = skb->queue_mapping % lio->linfo.num_txpciq;
1539
1540 return q;
1541}
1542
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001543/**
1544 * \brief Check Tx queue state for a given network buffer
1545 * @param lio per-network private data
1546 * @param skb network buffer
1547 */
1548static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
1549{
1550 int q = 0, iq = 0;
1551
1552 if (netif_is_multiqueue(lio->netdev)) {
1553 q = skb->queue_mapping;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001554 iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001555 } else {
1556 iq = lio->txq;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001557 q = iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001558 }
1559
1560 if (octnet_iq_is_full(lio->oct_dev, iq))
1561 return 0;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001562
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001563 if (__netif_subqueue_stopped(lio->netdev, q)) {
1564 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001565 wake_q(lio->netdev, q);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001566 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001567 return 1;
1568}
1569
1570/**
1571 * \brief Unmap and free network buffer
1572 * @param buf buffer
1573 */
1574static void free_netbuf(void *buf)
1575{
1576 struct sk_buff *skb;
1577 struct octnet_buf_free_info *finfo;
1578 struct lio *lio;
1579
1580 finfo = (struct octnet_buf_free_info *)buf;
1581 skb = finfo->skb;
1582 lio = finfo->lio;
1583
1584 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1585 DMA_TO_DEVICE);
1586
1587 check_txq_state(lio, skb);
1588
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07001589 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001590}
1591
1592/**
1593 * \brief Unmap and free gather buffer
1594 * @param buf buffer
1595 */
1596static void free_netsgbuf(void *buf)
1597{
1598 struct octnet_buf_free_info *finfo;
1599 struct sk_buff *skb;
1600 struct lio *lio;
1601 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001602 int i, frags, iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001603
1604 finfo = (struct octnet_buf_free_info *)buf;
1605 skb = finfo->skb;
1606 lio = finfo->lio;
1607 g = finfo->g;
1608 frags = skb_shinfo(skb)->nr_frags;
1609
1610 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1611 g->sg[0].ptr[0], (skb->len - skb->data_len),
1612 DMA_TO_DEVICE);
1613
1614 i = 1;
1615 while (frags--) {
1616 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1617
1618 pci_unmap_page((lio->oct_dev)->pci_dev,
1619 g->sg[(i >> 2)].ptr[(i & 3)],
1620 frag->size, DMA_TO_DEVICE);
1621 i++;
1622 }
1623
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001624 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1625 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001626
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001627 iq = skb_iq(lio, skb);
1628 spin_lock(&lio->glist_lock[iq]);
1629 list_add_tail(&g->list, &lio->glist[iq]);
1630 spin_unlock(&lio->glist_lock[iq]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001631
1632 check_txq_state(lio, skb); /* mq support: sub-queue state check */
1633
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07001634 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001635}
1636
1637/**
1638 * \brief Unmap and free gather buffer with response
1639 * @param buf buffer
1640 */
1641static void free_netsgbuf_with_resp(void *buf)
1642{
1643 struct octeon_soft_command *sc;
1644 struct octnet_buf_free_info *finfo;
1645 struct sk_buff *skb;
1646 struct lio *lio;
1647 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001648 int i, frags, iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001649
1650 sc = (struct octeon_soft_command *)buf;
1651 skb = (struct sk_buff *)sc->callback_arg;
1652 finfo = (struct octnet_buf_free_info *)&skb->cb;
1653
1654 lio = finfo->lio;
1655 g = finfo->g;
1656 frags = skb_shinfo(skb)->nr_frags;
1657
1658 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1659 g->sg[0].ptr[0], (skb->len - skb->data_len),
1660 DMA_TO_DEVICE);
1661
1662 i = 1;
1663 while (frags--) {
1664 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1665
1666 pci_unmap_page((lio->oct_dev)->pci_dev,
1667 g->sg[(i >> 2)].ptr[(i & 3)],
1668 frag->size, DMA_TO_DEVICE);
1669 i++;
1670 }
1671
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001672 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1673 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001674
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001675 iq = skb_iq(lio, skb);
1676
1677 spin_lock(&lio->glist_lock[iq]);
1678 list_add_tail(&g->list, &lio->glist[iq]);
1679 spin_unlock(&lio->glist_lock[iq]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001680
1681 /* Don't free the skb yet */
1682
1683 check_txq_state(lio, skb);
1684}
1685
1686/**
1687 * \brief Adjust ptp frequency
1688 * @param ptp PTP clock info
1689 * @param ppb how much to adjust by, in parts-per-billion
1690 */
1691static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1692{
1693 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1694 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1695 u64 comp, delta;
1696 unsigned long flags;
1697 bool neg_adj = false;
1698
1699 if (ppb < 0) {
1700 neg_adj = true;
1701 ppb = -ppb;
1702 }
1703
1704 /* The hardware adds the clock compensation value to the
1705 * PTP clock on every coprocessor clock cycle, so we
1706 * compute the delta in terms of coprocessor clocks.
1707 */
1708 delta = (u64)ppb << 32;
1709 do_div(delta, oct->coproc_clock_rate);
1710
1711 spin_lock_irqsave(&lio->ptp_lock, flags);
1712 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1713 if (neg_adj)
1714 comp -= delta;
1715 else
1716 comp += delta;
1717 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1718 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1719
1720 return 0;
1721}
1722
1723/**
1724 * \brief Adjust ptp time
1725 * @param ptp PTP clock info
1726 * @param delta how much to adjust by, in nanosecs
1727 */
1728static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1729{
1730 unsigned long flags;
1731 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1732
1733 spin_lock_irqsave(&lio->ptp_lock, flags);
1734 lio->ptp_adjust += delta;
1735 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1736
1737 return 0;
1738}
1739
1740/**
1741 * \brief Get hardware clock time, including any adjustment
1742 * @param ptp PTP clock info
1743 * @param ts timespec
1744 */
1745static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1746 struct timespec64 *ts)
1747{
1748 u64 ns;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001749 unsigned long flags;
1750 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1751 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1752
1753 spin_lock_irqsave(&lio->ptp_lock, flags);
1754 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1755 ns += lio->ptp_adjust;
1756 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1757
Kefeng Wang286af312016-01-27 17:34:37 +08001758 *ts = ns_to_timespec64(ns);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001759
1760 return 0;
1761}
1762
1763/**
1764 * \brief Set hardware clock time. Reset adjustment
1765 * @param ptp PTP clock info
1766 * @param ts timespec
1767 */
1768static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1769 const struct timespec64 *ts)
1770{
1771 u64 ns;
1772 unsigned long flags;
1773 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1774 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1775
1776 ns = timespec_to_ns(ts);
1777
1778 spin_lock_irqsave(&lio->ptp_lock, flags);
1779 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1780 lio->ptp_adjust = 0;
1781 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1782
1783 return 0;
1784}
1785
1786/**
1787 * \brief Check if PTP is enabled
1788 * @param ptp PTP clock info
1789 * @param rq request
1790 * @param on is it on
1791 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07001792static int
1793liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1794 struct ptp_clock_request *rq __attribute__((unused)),
1795 int on __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001796{
1797 return -EOPNOTSUPP;
1798}
1799
1800/**
1801 * \brief Open PTP clock source
1802 * @param netdev network device
1803 */
1804static void oct_ptp_open(struct net_device *netdev)
1805{
1806 struct lio *lio = GET_LIO(netdev);
1807 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1808
1809 spin_lock_init(&lio->ptp_lock);
1810
1811 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1812 lio->ptp_info.owner = THIS_MODULE;
1813 lio->ptp_info.max_adj = 250000000;
1814 lio->ptp_info.n_alarm = 0;
1815 lio->ptp_info.n_ext_ts = 0;
1816 lio->ptp_info.n_per_out = 0;
1817 lio->ptp_info.pps = 0;
1818 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1819 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1820 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1821 lio->ptp_info.settime64 = liquidio_ptp_settime;
1822 lio->ptp_info.enable = liquidio_ptp_enable;
1823
1824 lio->ptp_adjust = 0;
1825
1826 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1827 &oct->pci_dev->dev);
1828
1829 if (IS_ERR(lio->ptp_clock))
1830 lio->ptp_clock = NULL;
1831}
1832
1833/**
1834 * \brief Init PTP clock
1835 * @param oct octeon device
1836 */
1837static void liquidio_ptp_init(struct octeon_device *oct)
1838{
1839 u64 clock_comp, cfg;
1840
1841 clock_comp = (u64)NSEC_PER_SEC << 32;
1842 do_div(clock_comp, oct->coproc_clock_rate);
1843 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1844
1845 /* Enable */
1846 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1847 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1848}
1849
1850/**
1851 * \brief Load firmware to device
1852 * @param oct octeon device
1853 *
1854 * Maps device to firmware filename, requests firmware, and downloads it
1855 */
1856static int load_firmware(struct octeon_device *oct)
1857{
1858 int ret = 0;
1859 const struct firmware *fw;
1860 char fw_name[LIO_MAX_FW_FILENAME_LEN];
1861 char *tmp_fw_type;
1862
1863 if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
1864 sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
1865 dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
1866 return ret;
1867 }
1868
1869 if (fw_type[0] == '\0')
1870 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1871 else
1872 tmp_fw_type = fw_type;
1873
1874 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1875 octeon_get_conf(oct)->card_name, tmp_fw_type,
1876 LIO_FW_NAME_SUFFIX);
1877
1878 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1879 if (ret) {
1880 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
1881 fw_name);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001882 release_firmware(fw);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001883 return ret;
1884 }
1885
1886 ret = octeon_download_firmware(oct, fw->data, fw->size);
1887
1888 release_firmware(fw);
1889
1890 return ret;
1891}
1892
1893/**
1894 * \brief Setup output queue
1895 * @param oct octeon device
1896 * @param q_no which queue
1897 * @param num_descs how many descriptors
1898 * @param desc_size size of each descriptor
1899 * @param app_ctx application context
1900 */
1901static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
1902 int desc_size, void *app_ctx)
1903{
1904 int ret_val = 0;
1905
1906 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
1907 /* droq creation and local register settings. */
1908 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
Amitoj Kaur Chawla08a965e2016-02-04 19:25:13 +05301909 if (ret_val < 0)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001910 return ret_val;
1911
1912 if (ret_val == 1) {
1913 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
1914 return 0;
1915 }
1916 /* tasklet creation for the droq */
1917
1918 /* Enable the droq queues */
1919 octeon_set_droq_pkt_op(oct, q_no, 1);
1920
1921 /* Send Credit for Octeon Output queues. Credits are always
1922 * sent after the output queue is enabled.
1923 */
1924 writel(oct->droq[q_no]->max_count,
1925 oct->droq[q_no]->pkts_credit_reg);
1926
1927 return ret_val;
1928}
1929
1930/**
1931 * \brief Callback for getting interface configuration
1932 * @param status status of request
1933 * @param buf pointer to resp structure
1934 */
1935static void if_cfg_callback(struct octeon_device *oct,
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07001936 u32 status __attribute__((unused)),
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001937 void *buf)
1938{
1939 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1940 struct liquidio_if_cfg_resp *resp;
1941 struct liquidio_if_cfg_context *ctx;
1942
1943 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1944 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1945
1946 oct = lio_get_device(ctx->octeon_id);
1947 if (resp->status)
1948 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
1949 CVM_CAST64(resp->status));
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07001950 WRITE_ONCE(ctx->cond, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001951
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001952 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1953 resp->cfg_info.liquidio_firmware_version);
1954
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001955 /* This barrier is required to be sure that the response has been
1956 * written fully before waking up the handler
1957 */
1958 wmb();
1959
1960 wake_up_interruptible(&ctx->wc);
1961}
1962
1963/**
1964 * \brief Select queue based on hash
1965 * @param dev Net device
1966 * @param skb sk_buff structure
1967 * @returns selected queue number
1968 */
1969static u16 select_q(struct net_device *dev, struct sk_buff *skb,
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07001970 void *accel_priv __attribute__((unused)),
1971 select_queue_fallback_t fallback __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001972{
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001973 u32 qindex = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001974 struct lio *lio;
1975
1976 lio = GET_LIO(dev);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001977 qindex = skb_tx_hash(dev, skb);
1978
1979 return (u16)(qindex % (lio->linfo.num_txpciq));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001980}
1981
1982/** Routine to push packets arriving on Octeon interface upto network layer.
1983 * @param oct_id - octeon device id.
1984 * @param skbuff - skbuff struct to be passed to network layer.
1985 * @param len - size of total data received.
1986 * @param rh - Control header associated with the packet
1987 * @param param - additional control data with the packet
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001988 * @param arg - farg registered in droq_ops
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001989 */
1990static void
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07001991liquidio_push_packet(u32 octeon_id __attribute__((unused)),
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001992 void *skbuff,
1993 u32 len,
1994 union octeon_rh *rh,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001995 void *param,
1996 void *arg)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001997{
1998 struct napi_struct *napi = param;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001999 struct sk_buff *skb = (struct sk_buff *)skbuff;
2000 struct skb_shared_hwtstamps *shhwtstamps;
2001 u64 ns;
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07002002 u16 vtag = 0;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002003 struct net_device *netdev = (struct net_device *)arg;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002004 struct octeon_droq *droq = container_of(param, struct octeon_droq,
2005 napi);
2006 if (netdev) {
2007 int packet_was_received;
2008 struct lio *lio = GET_LIO(netdev);
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07002009 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002010
2011 /* Do not proceed if the interface is not in RUNNING state. */
2012 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
2013 recv_buffer_free(skb);
2014 droq->stats.rx_dropped++;
2015 return;
2016 }
2017
2018 skb->dev = netdev;
2019
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002020 skb_record_rx_queue(skb, droq->q_no);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07002021 if (likely(len > MIN_SKB_SIZE)) {
2022 struct octeon_skb_page_info *pg_info;
2023 unsigned char *va;
2024
2025 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
2026 if (pg_info->page) {
2027 /* For Paged allocation use the frags */
2028 va = page_address(pg_info->page) +
2029 pg_info->page_offset;
2030 memcpy(skb->data, va, MIN_SKB_SIZE);
2031 skb_put(skb, MIN_SKB_SIZE);
2032 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2033 pg_info->page,
2034 pg_info->page_offset +
2035 MIN_SKB_SIZE,
2036 len - MIN_SKB_SIZE,
2037 LIO_RXBUFFER_SZ);
2038 }
2039 } else {
2040 struct octeon_skb_page_info *pg_info =
2041 ((struct octeon_skb_page_info *)(skb->cb));
2042 skb_copy_to_linear_data(skb, page_address(pg_info->page)
2043 + pg_info->page_offset, len);
2044 skb_put(skb, len);
2045 put_page(pg_info->page);
2046 }
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002047
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07002048 if (((oct->chip_id == OCTEON_CN66XX) ||
2049 (oct->chip_id == OCTEON_CN68XX)) &&
2050 ptp_enable) {
2051 if (rh->r_dh.has_hwtstamp) {
2052 /* timestamp is included from the hardware at
2053 * the beginning of the packet.
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002054 */
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07002055 if (ifstate_check
2056 (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
2057 /* Nanoseconds are in the first 64-bits
2058 * of the packet.
2059 */
2060 memcpy(&ns, (skb->data), sizeof(ns));
2061 shhwtstamps = skb_hwtstamps(skb);
2062 shhwtstamps->hwtstamp =
2063 ns_to_ktime(ns +
2064 lio->ptp_adjust);
2065 }
2066 skb_pull(skb, sizeof(ns));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002067 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002068 }
2069
2070 skb->protocol = eth_type_trans(skb, skb->dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002071 if ((netdev->features & NETIF_F_RXCSUM) &&
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07002072 (((rh->r_dh.encap_on) &&
2073 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
2074 (!(rh->r_dh.encap_on) &&
2075 (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002076 /* checksum has already been verified */
2077 skb->ip_summed = CHECKSUM_UNNECESSARY;
2078 else
2079 skb->ip_summed = CHECKSUM_NONE;
2080
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07002081 /* Setting Encapsulation field on basis of status received
2082 * from the firmware
2083 */
2084 if (rh->r_dh.encap_on) {
2085 skb->encapsulation = 1;
2086 skb->csum_level = 1;
2087 droq->stats.rx_vxlan++;
2088 }
2089
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07002090 /* inbound VLAN tag */
2091 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2092 (rh->r_dh.vlan != 0)) {
2093 u16 vid = rh->r_dh.vlan;
2094 u16 priority = rh->r_dh.priority;
2095
2096 vtag = priority << 13 | vid;
2097 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
2098 }
2099
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002100 packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
2101
2102 if (packet_was_received) {
2103 droq->stats.rx_bytes_received += len;
2104 droq->stats.rx_pkts_received++;
2105 netdev->last_rx = jiffies;
2106 } else {
2107 droq->stats.rx_dropped++;
2108 netif_info(lio, rx_err, lio->netdev,
2109 "droq:%d error rx_dropped:%llu\n",
2110 droq->q_no, droq->stats.rx_dropped);
2111 }
2112
2113 } else {
2114 recv_buffer_free(skb);
2115 }
2116}
2117
2118/**
2119 * \brief wrapper for calling napi_schedule
2120 * @param param parameters to pass to napi_schedule
2121 *
2122 * Used when scheduling on different CPUs
2123 */
2124static void napi_schedule_wrapper(void *param)
2125{
2126 struct napi_struct *napi = param;
2127
2128 napi_schedule(napi);
2129}
2130
2131/**
2132 * \brief callback when receive interrupt occurs and we are in NAPI mode
2133 * @param arg pointer to octeon output queue
2134 */
2135static void liquidio_napi_drv_callback(void *arg)
2136{
2137 struct octeon_droq *droq = arg;
2138 int this_cpu = smp_processor_id();
2139
2140 if (droq->cpu_id == this_cpu) {
2141 napi_schedule(&droq->napi);
2142 } else {
2143 struct call_single_data *csd = &droq->csd;
2144
2145 csd->func = napi_schedule_wrapper;
2146 csd->info = &droq->napi;
2147 csd->flags = 0;
2148
2149 smp_call_function_single_async(droq->cpu_id, csd);
2150 }
2151}
2152
2153/**
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002154 * \brief Entry point for NAPI polling
2155 * @param napi NAPI structure
2156 * @param budget maximum number of items to process
2157 */
2158static int liquidio_napi_poll(struct napi_struct *napi, int budget)
2159{
2160 struct octeon_droq *droq;
2161 int work_done;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002162 int tx_done = 0, iq_no;
2163 struct octeon_instr_queue *iq;
2164 struct octeon_device *oct;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002165
2166 droq = container_of(napi, struct octeon_droq, napi);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002167 oct = droq->oct_dev;
2168 iq_no = droq->q_no;
2169 /* Handle Droq descriptors */
2170 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
2171 POLL_EVENT_PROCESS_PKTS,
2172 budget);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002173
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002174 /* Flush the instruction queue */
2175 iq = oct->instr_queue[iq_no];
2176 if (iq) {
2177 /* Process iq buffers with in the budget limits */
2178 tx_done = octeon_flush_iq(oct, iq, 1, budget);
2179 /* Update iq read-index rather than waiting for next interrupt.
2180 * Return back if tx_done is false.
2181 */
2182 update_txq_status(oct, iq_no);
2183 /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
2184 } else {
2185 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
2186 __func__, iq_no);
2187 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002188
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002189 if ((work_done < budget) && (tx_done)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002190 napi_complete(napi);
2191 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
2192 POLL_EVENT_ENABLE_INTR, 0);
2193 return 0;
2194 }
2195
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002196 return (!tx_done) ? (budget) : (work_done);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002197}
2198
2199/**
2200 * \brief Setup input and output queues
2201 * @param octeon_dev octeon device
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07002202 * @param ifidx Interface Index
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002203 *
2204 * Note: Queues are with respect to the octeon device. Thus
2205 * an input queue is for egress packets, and output queues
2206 * are for ingress packets.
2207 */
2208static inline int setup_io_queues(struct octeon_device *octeon_dev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002209 int ifidx)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002210{
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002211 struct octeon_droq_ops droq_ops;
2212 struct net_device *netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002213 static int cpu_id;
2214 static int cpu_id_modulus;
2215 struct octeon_droq *droq;
2216 struct napi_struct *napi;
2217 int q, q_no, retval = 0;
2218 struct lio *lio;
2219 int num_tx_descs;
2220
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002221 netdev = octeon_dev->props[ifidx].netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002222
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002223 lio = GET_LIO(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002224
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002225 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
2226
2227 droq_ops.fptr = liquidio_push_packet;
2228 droq_ops.farg = (void *)netdev;
2229
2230 droq_ops.poll_mode = 1;
2231 droq_ops.napi_fn = liquidio_napi_drv_callback;
2232 cpu_id = 0;
2233 cpu_id_modulus = num_present_cpus();
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002234
2235 /* set up DROQs. */
2236 for (q = 0; q < lio->linfo.num_rxpciq; q++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002237 q_no = lio->linfo.rxpciq[q].s.q_no;
2238 dev_dbg(&octeon_dev->pci_dev->dev,
2239 "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2240 q, q_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002241 retval = octeon_setup_droq(octeon_dev, q_no,
2242 CFG_GET_NUM_RX_DESCS_NIC_IF
2243 (octeon_get_conf(octeon_dev),
2244 lio->ifidx),
2245 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
2246 (octeon_get_conf(octeon_dev),
2247 lio->ifidx), NULL);
2248 if (retval) {
2249 dev_err(&octeon_dev->pci_dev->dev,
Raghu Vatsavayi32581242016-08-31 11:03:20 -07002250 "%s : Runtime DROQ(RxQ) creation failed.\n",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002251 __func__);
2252 return 1;
2253 }
2254
2255 droq = octeon_dev->droq[q_no];
2256 napi = &droq->napi;
Raghu Vatsavayi1b7c55c2016-08-31 11:03:27 -07002257 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
2258 (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002259 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002260
2261 /* designate a CPU for this droq */
2262 droq->cpu_id = cpu_id;
2263 cpu_id++;
2264 if (cpu_id >= cpu_id_modulus)
2265 cpu_id = 0;
2266
2267 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
2268 }
2269
2270 /* set up IQs. */
2271 for (q = 0; q < lio->linfo.num_txpciq; q++) {
2272 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
2273 (octeon_dev),
2274 lio->ifidx);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002275 retval = octeon_setup_iq(octeon_dev, ifidx, q,
2276 lio->linfo.txpciq[q], num_tx_descs,
2277 netdev_get_tx_queue(netdev, q));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002278 if (retval) {
2279 dev_err(&octeon_dev->pci_dev->dev,
2280 " %s : Runtime IQ(TxQ) creation failed.\n",
2281 __func__);
2282 return 1;
2283 }
2284 }
2285
2286 return 0;
2287}
2288
2289/**
2290 * \brief Poll routine for checking transmit queue status
2291 * @param work work_struct data structure
2292 */
2293static void octnet_poll_check_txq_status(struct work_struct *work)
2294{
2295 struct cavium_wk *wk = (struct cavium_wk *)work;
2296 struct lio *lio = (struct lio *)wk->ctxptr;
2297
2298 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
2299 return;
2300
2301 check_txq_status(lio);
2302 queue_delayed_work(lio->txq_status_wq.wq,
2303 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2304}
2305
2306/**
2307 * \brief Sets up the txq poll check
2308 * @param netdev network device
2309 */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002310static inline int setup_tx_poll_fn(struct net_device *netdev)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002311{
2312 struct lio *lio = GET_LIO(netdev);
2313 struct octeon_device *oct = lio->oct_dev;
2314
Bhaktipriya Shridhar292b9da2016-06-08 01:47:59 +05302315 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2316 WQ_MEM_RECLAIM, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002317 if (!lio->txq_status_wq.wq) {
2318 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002319 return -1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002320 }
2321 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2322 octnet_poll_check_txq_status);
2323 lio->txq_status_wq.wk.ctxptr = lio;
2324 queue_delayed_work(lio->txq_status_wq.wq,
2325 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002326 return 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002327}
2328
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002329static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2330{
2331 struct lio *lio = GET_LIO(netdev);
2332
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002333 if (lio->txq_status_wq.wq) {
2334 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2335 destroy_workqueue(lio->txq_status_wq.wq);
2336 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002337}
2338
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002339/**
2340 * \brief Net device open for LiquidIO
2341 * @param netdev network device
2342 */
2343static int liquidio_open(struct net_device *netdev)
2344{
2345 struct lio *lio = GET_LIO(netdev);
2346 struct octeon_device *oct = lio->oct_dev;
2347 struct napi_struct *napi, *n;
2348
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002349 if (oct->props[lio->ifidx].napi_enabled == 0) {
2350 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2351 napi_enable(napi);
2352
2353 oct->props[lio->ifidx].napi_enabled = 1;
2354 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002355
2356 oct_ptp_open(netdev);
2357
2358 ifstate_set(lio, LIO_IFSTATE_RUNNING);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002359
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002360 if (OCTEON_CN23XX_PF(oct)) {
2361 if (!oct->msix_on)
2362 if (setup_tx_poll_fn(netdev))
2363 return -1;
2364 } else {
2365 if (setup_tx_poll_fn(netdev))
2366 return -1;
2367 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002368
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002369 start_txq(netdev);
2370
2371 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002372
2373 /* tell Octeon to start forwarding packets to host */
2374 send_rx_ctrl_cmd(lio, 1);
2375
2376 /* Ready for link status updates */
2377 lio->intf_open = 1;
2378
2379 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2380 netdev->name);
2381
2382 return 0;
2383}
2384
2385/**
2386 * \brief Net device stop for LiquidIO
2387 * @param netdev network device
2388 */
2389static int liquidio_stop(struct net_device *netdev)
2390{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002391 struct lio *lio = GET_LIO(netdev);
2392 struct octeon_device *oct = lio->oct_dev;
2393
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002394 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2395
2396 netif_tx_disable(netdev);
2397
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002398 /* Inform that netif carrier is down */
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002399 netif_carrier_off(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002400 lio->intf_open = 0;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002401 lio->linfo.link.s.link_up = 0;
2402 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002403
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002404 /* Pause for a moment and wait for Octeon to flush out (to the wire) any
2405 * egress packets that are in-flight.
2406 */
2407 set_current_state(TASK_INTERRUPTIBLE);
2408 schedule_timeout(msecs_to_jiffies(100));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002409
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002410 /* Now it should be safe to tell Octeon that nic interface is down. */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002411 send_rx_ctrl_cmd(lio, 0);
2412
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002413 if (OCTEON_CN23XX_PF(oct)) {
2414 if (!oct->msix_on)
2415 cleanup_tx_poll_fn(netdev);
2416 } else {
2417 cleanup_tx_poll_fn(netdev);
2418 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002419
2420 if (lio->ptp_clock) {
2421 ptp_clock_unregister(lio->ptp_clock);
2422 lio->ptp_clock = NULL;
2423 }
2424
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002425 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002426
2427 return 0;
2428}
2429
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002430/**
2431 * \brief Converts a mask based on net device flags
2432 * @param netdev network device
2433 *
2434 * This routine generates a octnet_ifflags mask from the net device flags
2435 * received from the OS.
2436 */
2437static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2438{
2439 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2440
2441 if (netdev->flags & IFF_PROMISC)
2442 f |= OCTNET_IFFLAG_PROMISC;
2443
2444 if (netdev->flags & IFF_ALLMULTI)
2445 f |= OCTNET_IFFLAG_ALLMULTI;
2446
2447 if (netdev->flags & IFF_MULTICAST) {
2448 f |= OCTNET_IFFLAG_MULTICAST;
2449
2450 /* Accept all multicast addresses if there are more than we
2451 * can handle
2452 */
2453 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2454 f |= OCTNET_IFFLAG_ALLMULTI;
2455 }
2456
2457 if (netdev->flags & IFF_BROADCAST)
2458 f |= OCTNET_IFFLAG_BROADCAST;
2459
2460 return f;
2461}
2462
2463/**
2464 * \brief Net device set_multicast_list
2465 * @param netdev network device
2466 */
2467static void liquidio_set_mcast_list(struct net_device *netdev)
2468{
2469 struct lio *lio = GET_LIO(netdev);
2470 struct octeon_device *oct = lio->oct_dev;
2471 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002472 struct netdev_hw_addr *ha;
2473 u64 *mc;
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002474 int ret;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002475 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2476
2477 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2478
2479 /* Create a ctrl pkt command to be sent to core app. */
2480 nctrl.ncmd.u64 = 0;
2481 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002482 nctrl.ncmd.s.param1 = get_new_flags(netdev);
2483 nctrl.ncmd.s.param2 = mc_count;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002484 nctrl.ncmd.s.more = mc_count;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002485 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002486 nctrl.netpndev = (u64)netdev;
2487 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2488
2489 /* copy all the addresses into the udd */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002490 mc = &nctrl.udd[0];
2491 netdev_for_each_mc_addr(ha, netdev) {
2492 *mc = 0;
2493 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2494 /* no need to swap bytes */
2495
2496 if (++mc > &nctrl.udd[mc_count])
2497 break;
2498 }
2499
2500 /* Apparently, any activity in this call from the kernel has to
2501 * be atomic. So we won't wait for response.
2502 */
2503 nctrl.wait_time = 0;
2504
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002505 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002506 if (ret < 0) {
2507 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2508 ret);
2509 }
2510}
2511
2512/**
2513 * \brief Net device set_mac_address
2514 * @param netdev network device
2515 */
2516static int liquidio_set_mac(struct net_device *netdev, void *p)
2517{
2518 int ret = 0;
2519 struct lio *lio = GET_LIO(netdev);
2520 struct octeon_device *oct = lio->oct_dev;
2521 struct sockaddr *addr = (struct sockaddr *)p;
2522 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002523
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002524 if (!is_valid_ether_addr(addr->sa_data))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002525 return -EADDRNOTAVAIL;
2526
2527 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2528
2529 nctrl.ncmd.u64 = 0;
2530 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002531 nctrl.ncmd.s.param1 = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002532 nctrl.ncmd.s.more = 1;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002533 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002534 nctrl.netpndev = (u64)netdev;
2535 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2536 nctrl.wait_time = 100;
2537
2538 nctrl.udd[0] = 0;
2539 /* The MAC Address is presented in network byte order. */
2540 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2541
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002542 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002543 if (ret < 0) {
2544 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2545 return -ENOMEM;
2546 }
2547 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2548 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2549
2550 return 0;
2551}
2552
2553/**
2554 * \brief Net device get_stats
2555 * @param netdev network device
2556 */
2557static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2558{
2559 struct lio *lio = GET_LIO(netdev);
2560 struct net_device_stats *stats = &netdev->stats;
2561 struct octeon_device *oct;
2562 u64 pkts = 0, drop = 0, bytes = 0;
2563 struct oct_droq_stats *oq_stats;
2564 struct oct_iq_stats *iq_stats;
2565 int i, iq_no, oq_no;
2566
2567 oct = lio->oct_dev;
2568
2569 for (i = 0; i < lio->linfo.num_txpciq; i++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002570 iq_no = lio->linfo.txpciq[i].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002571 iq_stats = &oct->instr_queue[iq_no]->stats;
2572 pkts += iq_stats->tx_done;
2573 drop += iq_stats->tx_dropped;
2574 bytes += iq_stats->tx_tot_bytes;
2575 }
2576
2577 stats->tx_packets = pkts;
2578 stats->tx_bytes = bytes;
2579 stats->tx_dropped = drop;
2580
2581 pkts = 0;
2582 drop = 0;
2583 bytes = 0;
2584
2585 for (i = 0; i < lio->linfo.num_rxpciq; i++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002586 oq_no = lio->linfo.rxpciq[i].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002587 oq_stats = &oct->droq[oq_no]->stats;
2588 pkts += oq_stats->rx_pkts_received;
2589 drop += (oq_stats->rx_dropped +
2590 oq_stats->dropped_nodispatch +
2591 oq_stats->dropped_toomany +
2592 oq_stats->dropped_nomem);
2593 bytes += oq_stats->rx_bytes_received;
2594 }
2595
2596 stats->rx_bytes = bytes;
2597 stats->rx_packets = pkts;
2598 stats->rx_dropped = drop;
2599
2600 return stats;
2601}
2602
2603/**
2604 * \brief Net device change_mtu
2605 * @param netdev network device
2606 */
2607static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2608{
2609 struct lio *lio = GET_LIO(netdev);
2610 struct octeon_device *oct = lio->oct_dev;
2611 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002612 int ret = 0;
2613
Raghu Vatsavayi4c2743f2016-07-03 13:56:53 -07002614 /* Limit the MTU to make sure the ethernet packets are between 68 bytes
2615 * and 16000 bytes
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002616 */
Raghu Vatsavayi4c2743f2016-07-03 13:56:53 -07002617 if ((new_mtu < LIO_MIN_MTU_SIZE) ||
2618 (new_mtu > LIO_MAX_MTU_SIZE)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002619 dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
2620 dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
Raghu Vatsavayi4c2743f2016-07-03 13:56:53 -07002621 LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002622 return -EINVAL;
2623 }
2624
2625 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2626
2627 nctrl.ncmd.u64 = 0;
2628 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002629 nctrl.ncmd.s.param1 = new_mtu;
2630 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002631 nctrl.wait_time = 100;
2632 nctrl.netpndev = (u64)netdev;
2633 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2634
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002635 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002636 if (ret < 0) {
2637 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
2638 return -1;
2639 }
2640
2641 lio->mtu = new_mtu;
2642
2643 return 0;
2644}
2645
2646/**
2647 * \brief Handler for SIOCSHWTSTAMP ioctl
2648 * @param netdev network device
2649 * @param ifr interface request
2650 * @param cmd command
2651 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002652static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002653{
2654 struct hwtstamp_config conf;
2655 struct lio *lio = GET_LIO(netdev);
2656
2657 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2658 return -EFAULT;
2659
2660 if (conf.flags)
2661 return -EINVAL;
2662
2663 switch (conf.tx_type) {
2664 case HWTSTAMP_TX_ON:
2665 case HWTSTAMP_TX_OFF:
2666 break;
2667 default:
2668 return -ERANGE;
2669 }
2670
2671 switch (conf.rx_filter) {
2672 case HWTSTAMP_FILTER_NONE:
2673 break;
2674 case HWTSTAMP_FILTER_ALL:
2675 case HWTSTAMP_FILTER_SOME:
2676 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2677 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2678 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2679 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2680 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2681 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2682 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2683 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2684 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2685 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2686 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2687 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2688 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2689 break;
2690 default:
2691 return -ERANGE;
2692 }
2693
2694 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2695 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2696
2697 else
2698 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2699
2700 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2701}
2702
2703/**
2704 * \brief ioctl handler
2705 * @param netdev network device
2706 * @param ifr interface request
2707 * @param cmd command
2708 */
2709static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2710{
2711 switch (cmd) {
2712 case SIOCSHWTSTAMP:
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002713 return hwtstamp_ioctl(netdev, ifr);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002714 default:
2715 return -EOPNOTSUPP;
2716 }
2717}
2718
2719/**
2720 * \brief handle a Tx timestamp response
2721 * @param status response status
2722 * @param buf pointer to skb
2723 */
2724static void handle_timestamp(struct octeon_device *oct,
2725 u32 status,
2726 void *buf)
2727{
2728 struct octnet_buf_free_info *finfo;
2729 struct octeon_soft_command *sc;
2730 struct oct_timestamp_resp *resp;
2731 struct lio *lio;
2732 struct sk_buff *skb = (struct sk_buff *)buf;
2733
2734 finfo = (struct octnet_buf_free_info *)skb->cb;
2735 lio = finfo->lio;
2736 sc = finfo->sc;
2737 oct = lio->oct_dev;
2738 resp = (struct oct_timestamp_resp *)sc->virtrptr;
2739
2740 if (status != OCTEON_REQUEST_DONE) {
2741 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2742 CVM_CAST64(status));
2743 resp->timestamp = 0;
2744 }
2745
2746 octeon_swap_8B_data(&resp->timestamp, 1);
2747
Colin Ian King19a6d152016-02-05 16:30:39 +00002748 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002749 struct skb_shared_hwtstamps ts;
2750 u64 ns = resp->timestamp;
2751
2752 netif_info(lio, tx_done, lio->netdev,
2753 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2754 skb, (unsigned long long)ns);
2755 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2756 skb_tstamp_tx(skb, &ts);
2757 }
2758
2759 octeon_free_soft_command(oct, sc);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07002760 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002761}
2762
2763/* \brief Send a data packet that will be timestamped
2764 * @param oct octeon device
2765 * @param ndata pointer to network data
2766 * @param finfo pointer to private network data
2767 */
2768static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2769 struct octnic_data_pkt *ndata,
Raghu Vatsavayi32581242016-08-31 11:03:20 -07002770 struct octnet_buf_free_info *finfo)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002771{
2772 int retval;
2773 struct octeon_soft_command *sc;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002774 struct lio *lio;
2775 int ring_doorbell;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002776 u32 len;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002777
2778 lio = finfo->lio;
2779
2780 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2781 sizeof(struct oct_timestamp_resp));
2782 finfo->sc = sc;
2783
2784 if (!sc) {
2785 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2786 return IQ_SEND_FAILED;
2787 }
2788
2789 if (ndata->reqtype == REQTYPE_NORESP_NET)
2790 ndata->reqtype = REQTYPE_RESP_NET;
2791 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2792 ndata->reqtype = REQTYPE_RESP_NET_SG;
2793
2794 sc->callback = handle_timestamp;
2795 sc->callback_arg = finfo->skb;
2796 sc->iq_no = ndata->q_no;
2797
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002798 len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002799
Raghu Vatsavayi32581242016-08-31 11:03:20 -07002800 ring_doorbell = 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002801 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002802 sc, len, ndata->reqtype);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002803
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07002804 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002805 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2806 retval);
2807 octeon_free_soft_command(oct, sc);
2808 } else {
2809 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2810 }
2811
2812 return retval;
2813}
2814
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002815/** \brief Transmit networks packets to the Octeon interface
2816 * @param skbuff skbuff struct to be passed to network layer.
2817 * @param netdev pointer to network device
2818 * @returns whether the packet was transmitted to the device okay or not
2819 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2820 */
2821static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2822{
2823 struct lio *lio;
2824 struct octnet_buf_free_info *finfo;
2825 union octnic_cmd_setup cmdsetup;
2826 struct octnic_data_pkt ndata;
2827 struct octeon_device *oct;
2828 struct oct_iq_stats *stats;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002829 struct octeon_instr_irh *irh;
2830 union tx_info *tx_info;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002831 int status = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002832 int q_idx = 0, iq_no = 0;
Raghu Vatsavayi32581242016-08-31 11:03:20 -07002833 int j;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07002834 u64 dptr = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002835 u32 tag = 0;
2836
2837 lio = GET_LIO(netdev);
2838 oct = lio->oct_dev;
2839
2840 if (netif_is_multiqueue(netdev)) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002841 q_idx = skb->queue_mapping;
2842 q_idx = (q_idx % (lio->linfo.num_txpciq));
2843 tag = q_idx;
2844 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002845 } else {
2846 iq_no = lio->txq;
2847 }
2848
2849 stats = &oct->instr_queue[iq_no]->stats;
2850
2851 /* Check for all conditions in which the current packet cannot be
2852 * transmitted.
2853 */
2854 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002855 (!lio->linfo.link.s.link_up) ||
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002856 (skb->len <= 0)) {
2857 netif_info(lio, tx_err, lio->netdev,
2858 "Transmit failed link_status : %d\n",
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002859 lio->linfo.link.s.link_up);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002860 goto lio_xmit_failed;
2861 }
2862
2863 /* Use space in skb->cb to store info used to unmap and
2864 * free the buffers.
2865 */
2866 finfo = (struct octnet_buf_free_info *)skb->cb;
2867 finfo->lio = lio;
2868 finfo->skb = skb;
2869 finfo->sc = NULL;
2870
2871 /* Prepare the attributes for the data to be passed to OSI. */
2872 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2873
2874 ndata.buf = (void *)finfo;
2875
2876 ndata.q_no = iq_no;
2877
2878 if (netif_is_multiqueue(netdev)) {
2879 if (octnet_iq_is_full(oct, ndata.q_no)) {
2880 /* defer sending if queue is full */
2881 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2882 ndata.q_no);
2883 stats->tx_iq_busy++;
2884 return NETDEV_TX_BUSY;
2885 }
2886 } else {
2887 if (octnet_iq_is_full(oct, lio->txq)) {
2888 /* defer sending if queue is full */
2889 stats->tx_iq_busy++;
2890 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07002891 lio->txq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002892 return NETDEV_TX_BUSY;
2893 }
2894 }
2895 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07002896 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002897 */
2898
2899 ndata.datasize = skb->len;
2900
2901 cmdsetup.u64 = 0;
Raghu Vatsavayi7275ebf2016-06-14 16:54:49 -07002902 cmdsetup.s.iq_no = iq_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002903
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07002904 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2905 if (skb->encapsulation) {
2906 cmdsetup.s.tnl_csum = 1;
2907 stats->tx_vxlan++;
2908 } else {
2909 cmdsetup.s.transport_csum = 1;
2910 }
2911 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002912 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2913 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2914 cmdsetup.s.timestamp = 1;
2915 }
2916
2917 if (skb_shinfo(skb)->nr_frags == 0) {
2918 cmdsetup.s.u.datasize = skb->len;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002919 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07002920
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002921 /* Offload checksum calculation for TCP/UDP packets */
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002922 dptr = dma_map_single(&oct->pci_dev->dev,
2923 skb->data,
2924 skb->len,
2925 DMA_TO_DEVICE);
2926 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002927 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2928 __func__);
2929 return NETDEV_TX_BUSY;
2930 }
2931
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07002932 ndata.cmd.cmd2.dptr = dptr;
2933 finfo->dptr = dptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002934 ndata.reqtype = REQTYPE_NORESP_NET;
2935
2936 } else {
2937 int i, frags;
2938 struct skb_frag_struct *frag;
2939 struct octnic_gather *g;
2940
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07002941 spin_lock(&lio->glist_lock[q_idx]);
2942 g = (struct octnic_gather *)
2943 list_delete_head(&lio->glist[q_idx]);
2944 spin_unlock(&lio->glist_lock[q_idx]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002945
2946 if (!g) {
2947 netif_info(lio, tx_err, lio->netdev,
2948 "Transmit scatter gather: glist null!\n");
2949 goto lio_xmit_failed;
2950 }
2951
2952 cmdsetup.s.gather = 1;
2953 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002954 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002955
2956 memset(g->sg, 0, g->sg_size);
2957
2958 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2959 skb->data,
2960 (skb->len - skb->data_len),
2961 DMA_TO_DEVICE);
2962 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2963 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2964 __func__);
2965 return NETDEV_TX_BUSY;
2966 }
2967 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2968
2969 frags = skb_shinfo(skb)->nr_frags;
2970 i = 1;
2971 while (frags--) {
2972 frag = &skb_shinfo(skb)->frags[i - 1];
2973
2974 g->sg[(i >> 2)].ptr[(i & 3)] =
2975 dma_map_page(&oct->pci_dev->dev,
2976 frag->page.p,
2977 frag->page_offset,
2978 frag->size,
2979 DMA_TO_DEVICE);
2980
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07002981 if (dma_mapping_error(&oct->pci_dev->dev,
2982 g->sg[i >> 2].ptr[i & 3])) {
2983 dma_unmap_single(&oct->pci_dev->dev,
2984 g->sg[0].ptr[0],
2985 skb->len - skb->data_len,
2986 DMA_TO_DEVICE);
2987 for (j = 1; j < i; j++) {
2988 frag = &skb_shinfo(skb)->frags[j - 1];
2989 dma_unmap_page(&oct->pci_dev->dev,
2990 g->sg[j >> 2].ptr[j & 3],
2991 frag->size,
2992 DMA_TO_DEVICE);
2993 }
2994 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2995 __func__);
2996 return NETDEV_TX_BUSY;
2997 }
2998
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002999 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
3000 i++;
3001 }
3002
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07003003 dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
3004 g->sg_size, DMA_TO_DEVICE);
3005 dptr = g->sg_dma_ptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003006
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003007 ndata.cmd.cmd2.dptr = dptr;
3008 finfo->dptr = dptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003009 finfo->g = g;
3010
3011 ndata.reqtype = REQTYPE_NORESP_NET_SG;
3012 }
3013
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003014 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
3015 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003016
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003017 if (skb_shinfo(skb)->gso_size) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003018 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
3019 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003020 stats->tx_gso++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003021 }
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003022
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07003023 /* HW insert VLAN tag */
3024 if (skb_vlan_tag_present(skb)) {
3025 irh->priority = skb_vlan_tag_get(skb) >> 13;
3026 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
3027 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003028
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003029 if (unlikely(cmdsetup.s.timestamp))
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003030 status = send_nic_timestamp_pkt(oct, &ndata, finfo);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003031 else
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003032 status = octnet_send_nic_data_pkt(oct, &ndata);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003033 if (status == IQ_SEND_FAILED)
3034 goto lio_xmit_failed;
3035
3036 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
3037
3038 if (status == IQ_SEND_STOP)
3039 stop_q(lio->netdev, q_idx);
3040
Florian Westphal860e9532016-05-03 16:33:13 +02003041 netif_trans_update(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003042
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003043 if (skb_shinfo(skb)->gso_size)
3044 stats->tx_done += skb_shinfo(skb)->gso_segs;
3045 else
3046 stats->tx_done++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003047 stats->tx_tot_bytes += skb->len;
3048
3049 return NETDEV_TX_OK;
3050
3051lio_xmit_failed:
3052 stats->tx_dropped++;
3053 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
3054 iq_no, stats->tx_dropped);
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003055 if (dptr)
3056 dma_unmap_single(&oct->pci_dev->dev, dptr,
3057 ndata.datasize, DMA_TO_DEVICE);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07003058 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003059 return NETDEV_TX_OK;
3060}
3061
3062/** \brief Network device Tx timeout
3063 * @param netdev pointer to network device
3064 */
3065static void liquidio_tx_timeout(struct net_device *netdev)
3066{
3067 struct lio *lio;
3068
3069 lio = GET_LIO(netdev);
3070
3071 netif_info(lio, tx_err, lio->netdev,
3072 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
3073 netdev->stats.tx_dropped);
Florian Westphal860e9532016-05-03 16:33:13 +02003074 netif_trans_update(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003075 txqs_wake(netdev);
3076}
3077
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003078static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
3079 __be16 proto __attribute__((unused)),
3080 u16 vid)
3081{
3082 struct lio *lio = GET_LIO(netdev);
3083 struct octeon_device *oct = lio->oct_dev;
3084 struct octnic_ctrl_pkt nctrl;
3085 int ret = 0;
3086
3087 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3088
3089 nctrl.ncmd.u64 = 0;
3090 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3091 nctrl.ncmd.s.param1 = vid;
3092 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3093 nctrl.wait_time = 100;
3094 nctrl.netpndev = (u64)netdev;
3095 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3096
3097 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3098 if (ret < 0) {
3099 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3100 ret);
3101 }
3102
3103 return ret;
3104}
3105
3106static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
3107 __be16 proto __attribute__((unused)),
3108 u16 vid)
3109{
3110 struct lio *lio = GET_LIO(netdev);
3111 struct octeon_device *oct = lio->oct_dev;
3112 struct octnic_ctrl_pkt nctrl;
3113 int ret = 0;
3114
3115 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3116
3117 nctrl.ncmd.u64 = 0;
3118 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3119 nctrl.ncmd.s.param1 = vid;
3120 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3121 nctrl.wait_time = 100;
3122 nctrl.netpndev = (u64)netdev;
3123 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3124
3125 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3126 if (ret < 0) {
3127 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3128 ret);
3129 }
3130 return ret;
3131}
3132
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003133/** Sending command to enable/disable RX checksum offload
3134 * @param netdev pointer to network device
3135 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
3136 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
3137 * OCTNET_CMD_RXCSUM_DISABLE
3138 * @returns SUCCESS or FAILURE
3139 */
Nicholas Mc Guirec41419b2016-08-22 17:52:00 +02003140static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
3141 u8 rx_cmd)
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003142{
3143 struct lio *lio = GET_LIO(netdev);
3144 struct octeon_device *oct = lio->oct_dev;
3145 struct octnic_ctrl_pkt nctrl;
3146 int ret = 0;
3147
3148 nctrl.ncmd.u64 = 0;
3149 nctrl.ncmd.s.cmd = command;
3150 nctrl.ncmd.s.param1 = rx_cmd;
3151 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3152 nctrl.wait_time = 100;
3153 nctrl.netpndev = (u64)netdev;
3154 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3155
3156 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3157 if (ret < 0) {
3158 dev_err(&oct->pci_dev->dev,
3159 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3160 ret);
3161 }
3162 return ret;
3163}
3164
3165/** Sending command to add/delete VxLAN UDP port to firmware
3166 * @param netdev pointer to network device
3167 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
3168 * @param vxlan_port VxLAN port to be added or deleted
3169 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
3170 * OCTNET_CMD_VXLAN_PORT_DEL
3171 * @returns SUCCESS or FAILURE
3172 */
3173static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
3174 u16 vxlan_port, u8 vxlan_cmd_bit)
3175{
3176 struct lio *lio = GET_LIO(netdev);
3177 struct octeon_device *oct = lio->oct_dev;
3178 struct octnic_ctrl_pkt nctrl;
3179 int ret = 0;
3180
3181 nctrl.ncmd.u64 = 0;
3182 nctrl.ncmd.s.cmd = command;
3183 nctrl.ncmd.s.more = vxlan_cmd_bit;
3184 nctrl.ncmd.s.param1 = vxlan_port;
3185 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3186 nctrl.wait_time = 100;
3187 nctrl.netpndev = (u64)netdev;
3188 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3189
3190 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3191 if (ret < 0) {
3192 dev_err(&oct->pci_dev->dev,
3193 "VxLAN port add/delete failed in core (ret:0x%x)\n",
3194 ret);
3195 }
3196 return ret;
3197}
3198
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003199/** \brief Net device fix features
3200 * @param netdev pointer to network device
3201 * @param request features requested
3202 * @returns updated features list
3203 */
3204static netdev_features_t liquidio_fix_features(struct net_device *netdev,
3205 netdev_features_t request)
3206{
3207 struct lio *lio = netdev_priv(netdev);
3208
3209 if ((request & NETIF_F_RXCSUM) &&
3210 !(lio->dev_capability & NETIF_F_RXCSUM))
3211 request &= ~NETIF_F_RXCSUM;
3212
3213 if ((request & NETIF_F_HW_CSUM) &&
3214 !(lio->dev_capability & NETIF_F_HW_CSUM))
3215 request &= ~NETIF_F_HW_CSUM;
3216
3217 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
3218 request &= ~NETIF_F_TSO;
3219
3220 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
3221 request &= ~NETIF_F_TSO6;
3222
3223 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
3224 request &= ~NETIF_F_LRO;
3225
3226 /*Disable LRO if RXCSUM is off */
3227 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
3228 (lio->dev_capability & NETIF_F_LRO))
3229 request &= ~NETIF_F_LRO;
3230
3231 return request;
3232}
3233
3234/** \brief Net device set features
3235 * @param netdev pointer to network device
3236 * @param features features to enable/disable
3237 */
3238static int liquidio_set_features(struct net_device *netdev,
3239 netdev_features_t features)
3240{
3241 struct lio *lio = netdev_priv(netdev);
3242
3243 if (!((netdev->features ^ features) & NETIF_F_LRO))
3244 return 0;
3245
3246 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003247 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3248 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003249 else if (!(features & NETIF_F_LRO) &&
3250 (lio->dev_capability & NETIF_F_LRO))
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003251 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
3252 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003253
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003254 /* Sending command to firmware to enable/disable RX checksum
3255 * offload settings using ethtool
3256 */
3257 if (!(netdev->features & NETIF_F_RXCSUM) &&
3258 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3259 (features & NETIF_F_RXCSUM))
3260 liquidio_set_rxcsum_command(netdev,
3261 OCTNET_CMD_TNL_RX_CSUM_CTL,
3262 OCTNET_CMD_RXCSUM_ENABLE);
3263 else if ((netdev->features & NETIF_F_RXCSUM) &&
3264 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3265 !(features & NETIF_F_RXCSUM))
3266 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3267 OCTNET_CMD_RXCSUM_DISABLE);
3268
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003269 return 0;
3270}
3271
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003272static void liquidio_add_vxlan_port(struct net_device *netdev,
3273 struct udp_tunnel_info *ti)
3274{
3275 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3276 return;
3277
3278 liquidio_vxlan_port_command(netdev,
3279 OCTNET_CMD_VXLAN_PORT_CONFIG,
3280 htons(ti->port),
3281 OCTNET_CMD_VXLAN_PORT_ADD);
3282}
3283
3284static void liquidio_del_vxlan_port(struct net_device *netdev,
3285 struct udp_tunnel_info *ti)
3286{
3287 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3288 return;
3289
3290 liquidio_vxlan_port_command(netdev,
3291 OCTNET_CMD_VXLAN_PORT_CONFIG,
3292 htons(ti->port),
3293 OCTNET_CMD_VXLAN_PORT_DEL);
3294}
3295
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003296static struct net_device_ops lionetdevops = {
3297 .ndo_open = liquidio_open,
3298 .ndo_stop = liquidio_stop,
3299 .ndo_start_xmit = liquidio_xmit,
3300 .ndo_get_stats = liquidio_get_stats,
3301 .ndo_set_mac_address = liquidio_set_mac,
3302 .ndo_set_rx_mode = liquidio_set_mcast_list,
3303 .ndo_tx_timeout = liquidio_tx_timeout,
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003304
3305 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3306 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003307 .ndo_change_mtu = liquidio_change_mtu,
3308 .ndo_do_ioctl = liquidio_ioctl,
3309 .ndo_fix_features = liquidio_fix_features,
3310 .ndo_set_features = liquidio_set_features,
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003311 .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
3312 .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003313};
3314
3315/** \brief Entry point for the liquidio module
3316 */
3317static int __init liquidio_init(void)
3318{
3319 int i;
3320 struct handshake *hs;
3321
3322 init_completion(&first_stage);
3323
3324 octeon_init_device_list(conf_type);
3325
3326 if (liquidio_init_pci())
3327 return -EINVAL;
3328
3329 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3330
3331 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3332 hs = &handshake[i];
3333 if (hs->pci_dev) {
3334 wait_for_completion(&hs->init);
3335 if (!hs->init_ok) {
3336 /* init handshake failed */
3337 dev_err(&hs->pci_dev->dev,
3338 "Failed to init device\n");
3339 liquidio_deinit_pci();
3340 return -EIO;
3341 }
3342 }
3343 }
3344
3345 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3346 hs = &handshake[i];
3347 if (hs->pci_dev) {
3348 wait_for_completion_timeout(&hs->started,
3349 msecs_to_jiffies(30000));
3350 if (!hs->started_ok) {
3351 /* starter handshake failed */
3352 dev_err(&hs->pci_dev->dev,
3353 "Firmware failed to start\n");
3354 liquidio_deinit_pci();
3355 return -EIO;
3356 }
3357 }
3358 }
3359
3360 return 0;
3361}
3362
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -07003363static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003364{
3365 struct octeon_device *oct = (struct octeon_device *)buf;
3366 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003367 int gmxport = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003368 union oct_link_status *ls;
3369 int i;
3370
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003371 if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003372 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3373 recv_pkt->buffer_size[0],
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003374 recv_pkt->rh.r_nic_info.gmxport);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003375 goto nic_info_err;
3376 }
3377
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003378 gmxport = recv_pkt->rh.r_nic_info.gmxport;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003379 ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
3380
3381 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003382 for (i = 0; i < oct->ifcount; i++) {
3383 if (oct->props[i].gmxport == gmxport) {
3384 update_link_status(oct->props[i].netdev, ls);
3385 break;
3386 }
3387 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003388
3389nic_info_err:
3390 for (i = 0; i < recv_pkt->buffer_count; i++)
3391 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3392 octeon_free_recv_info(recv_info);
3393 return 0;
3394}
3395
3396/**
3397 * \brief Setup network interfaces
3398 * @param octeon_dev octeon device
3399 *
3400 * Called during init time for each device. It assumes the NIC
3401 * is already up and running. The link information for each
3402 * interface is passed in link_info.
3403 */
3404static int setup_nic_devices(struct octeon_device *octeon_dev)
3405{
3406 struct lio *lio = NULL;
3407 struct net_device *netdev;
3408 u8 mac[6], i, j;
3409 struct octeon_soft_command *sc;
3410 struct liquidio_if_cfg_context *ctx;
3411 struct liquidio_if_cfg_resp *resp;
3412 struct octdev_props *props;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003413 int retval, num_iqueues, num_oqueues;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003414 union oct_nic_if_cfg if_cfg;
3415 unsigned int base_queue;
3416 unsigned int gmx_port_id;
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003417 u32 resp_size, ctx_size, data_size;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003418 u32 ifidx_or_pfnum;
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003419 struct lio_version *vdata;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003420
3421 /* This is to handle link status changes */
3422 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3423 OPCODE_NIC_INFO,
3424 lio_nic_info, octeon_dev);
3425
3426 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3427 * They are handled directly.
3428 */
3429 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3430 free_netbuf);
3431
3432 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3433 free_netsgbuf);
3434
3435 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3436 free_netsgbuf_with_resp);
3437
3438 for (i = 0; i < octeon_dev->ifcount; i++) {
3439 resp_size = sizeof(struct liquidio_if_cfg_resp);
3440 ctx_size = sizeof(struct liquidio_if_cfg_context);
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003441 data_size = sizeof(struct lio_version);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003442 sc = (struct octeon_soft_command *)
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003443 octeon_alloc_soft_command(octeon_dev, data_size,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003444 resp_size, ctx_size);
3445 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3446 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003447 vdata = (struct lio_version *)sc->virtdptr;
3448
3449 *((u64 *)vdata) = 0;
3450 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3451 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3452 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003453
Raghu Vatsavayie86b1ab2016-08-31 11:03:24 -07003454 if (OCTEON_CN23XX_PF(octeon_dev)) {
3455 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3456 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3457 base_queue = octeon_dev->sriov_info.pf_srn;
3458
3459 gmx_port_id = octeon_dev->pf_num;
3460 ifidx_or_pfnum = octeon_dev->pf_num;
3461 } else {
3462 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3463 octeon_get_conf(octeon_dev), i);
3464 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3465 octeon_get_conf(octeon_dev), i);
3466 base_queue = CFG_GET_BASE_QUE_NIC_IF(
3467 octeon_get_conf(octeon_dev), i);
3468 gmx_port_id = CFG_GET_GMXID_NIC_IF(
3469 octeon_get_conf(octeon_dev), i);
3470 ifidx_or_pfnum = i;
3471 }
Raghu Vatsavayi3dcef2c2016-07-03 13:56:51 -07003472
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003473 dev_dbg(&octeon_dev->pci_dev->dev,
3474 "requesting config for interface %d, iqs %d, oqs %d\n",
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003475 ifidx_or_pfnum, num_iqueues, num_oqueues);
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07003476 WRITE_ONCE(ctx->cond, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003477 ctx->octeon_id = lio_get_device_id(octeon_dev);
3478 init_waitqueue_head(&ctx->wc);
3479
3480 if_cfg.u64 = 0;
3481 if_cfg.s.num_iqueues = num_iqueues;
3482 if_cfg.s.num_oqueues = num_oqueues;
3483 if_cfg.s.base_queue = base_queue;
3484 if_cfg.s.gmx_port_id = gmx_port_id;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003485
3486 sc->iq_no = 0;
3487
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003488 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003489 OPCODE_NIC_IF_CFG, 0,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003490 if_cfg.u64, 0);
3491
3492 sc->callback = if_cfg_callback;
3493 sc->callback_arg = sc;
Raghu Vatsavayi55893a62016-07-03 13:56:50 -07003494 sc->wait_time = 3000;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003495
3496 retval = octeon_send_soft_command(octeon_dev, sc);
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07003497 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003498 dev_err(&octeon_dev->pci_dev->dev,
3499 "iq/oq config failed status: %x\n",
3500 retval);
3501 /* Soft instr is freed by driver in case of failure. */
3502 goto setup_nic_dev_fail;
3503 }
3504
3505 /* Sleep on a wait queue till the cond flag indicates that the
3506 * response arrived or timed-out.
3507 */
3508 sleep_cond(&ctx->wc, &ctx->cond);
3509 retval = resp->status;
3510 if (retval) {
3511 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3512 goto setup_nic_dev_fail;
3513 }
3514
3515 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3516 (sizeof(struct liquidio_if_cfg_info)) >> 3);
3517
3518 num_iqueues = hweight64(resp->cfg_info.iqmask);
3519 num_oqueues = hweight64(resp->cfg_info.oqmask);
3520
3521 if (!(num_iqueues) || !(num_oqueues)) {
3522 dev_err(&octeon_dev->pci_dev->dev,
3523 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3524 resp->cfg_info.iqmask,
3525 resp->cfg_info.oqmask);
3526 goto setup_nic_dev_fail;
3527 }
3528 dev_dbg(&octeon_dev->pci_dev->dev,
3529 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3530 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3531 num_iqueues, num_oqueues);
3532 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
3533
3534 if (!netdev) {
3535 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3536 goto setup_nic_dev_fail;
3537 }
3538
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003539 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003540
3541 if (num_iqueues > 1)
3542 lionetdevops.ndo_select_queue = select_q;
3543
3544 /* Associate the routines that will handle different
3545 * netdev tasks.
3546 */
3547 netdev->netdev_ops = &lionetdevops;
3548
3549 lio = GET_LIO(netdev);
3550
3551 memset(lio, 0, sizeof(struct lio));
3552
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003553 lio->ifidx = ifidx_or_pfnum;
3554
3555 props = &octeon_dev->props[i];
3556 props->gmxport = resp->cfg_info.linfo.gmxport;
3557 props->netdev = netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003558
3559 lio->linfo.num_rxpciq = num_oqueues;
3560 lio->linfo.num_txpciq = num_iqueues;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003561 for (j = 0; j < num_oqueues; j++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003562 lio->linfo.rxpciq[j].u64 =
3563 resp->cfg_info.linfo.rxpciq[j].u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003564 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003565 for (j = 0; j < num_iqueues; j++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003566 lio->linfo.txpciq[j].u64 =
3567 resp->cfg_info.linfo.txpciq[j].u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003568 }
3569 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3570 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3571 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3572
3573 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3574
Raghu Vatsavayie86b1ab2016-08-31 11:03:24 -07003575 if (OCTEON_CN23XX_PF(octeon_dev) ||
3576 OCTEON_CN6XXX(octeon_dev)) {
3577 lio->dev_capability = NETIF_F_HIGHDMA
3578 | NETIF_F_IP_CSUM
3579 | NETIF_F_IPV6_CSUM
3580 | NETIF_F_SG | NETIF_F_RXCSUM
3581 | NETIF_F_GRO
3582 | NETIF_F_TSO | NETIF_F_TSO6
3583 | NETIF_F_LRO;
3584 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003585 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3586
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003587 /* Copy of transmit encapsulation capabilities:
3588 * TSO, TSO6, Checksums for this device
3589 */
3590 lio->enc_dev_capability = NETIF_F_IP_CSUM
3591 | NETIF_F_IPV6_CSUM
3592 | NETIF_F_GSO_UDP_TUNNEL
3593 | NETIF_F_HW_CSUM | NETIF_F_SG
3594 | NETIF_F_RXCSUM
3595 | NETIF_F_TSO | NETIF_F_TSO6
3596 | NETIF_F_LRO;
3597
3598 netdev->hw_enc_features = (lio->enc_dev_capability &
3599 ~NETIF_F_LRO);
3600
3601 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3602
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07003603 netdev->vlan_features = lio->dev_capability;
3604 /* Add any unchangeable hw features */
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003605 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3606 NETIF_F_HW_VLAN_CTAG_RX |
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07003607 NETIF_F_HW_VLAN_CTAG_TX;
3608
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003609 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3610
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003611 netdev->hw_features = lio->dev_capability;
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07003612 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3613 netdev->hw_features = netdev->hw_features &
3614 ~NETIF_F_HW_VLAN_CTAG_RX;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003615
3616 /* Point to the properties for octeon device to which this
3617 * interface belongs.
3618 */
3619 lio->oct_dev = octeon_dev;
3620 lio->octprops = props;
3621 lio->netdev = netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003622
3623 dev_dbg(&octeon_dev->pci_dev->dev,
3624 "if%d gmx: %d hw_addr: 0x%llx\n", i,
3625 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3626
3627 /* 64-bit swap required on LE machines */
3628 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3629 for (j = 0; j < 6; j++)
3630 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3631
3632 /* Copy MAC Address to OS network device structure */
3633
3634 ether_addr_copy(netdev->dev_addr, mac);
3635
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003636 /* By default all interfaces on a single Octeon uses the same
3637 * tx and rx queues
3638 */
3639 lio->txq = lio->linfo.txpciq[0].s.q_no;
3640 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003641 if (setup_io_queues(octeon_dev, i)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003642 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3643 goto setup_nic_dev_fail;
3644 }
3645
3646 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3647
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003648 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3649 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3650
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07003651 if (setup_glists(octeon_dev, lio, num_iqueues)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003652 dev_err(&octeon_dev->pci_dev->dev,
3653 "Gather list allocation failed\n");
3654 goto setup_nic_dev_fail;
3655 }
3656
3657 /* Register ethtool support */
3658 liquidio_set_ethtool_ops(netdev);
Raghu Vatsavayif5a20472016-06-21 22:53:14 -07003659 octeon_dev->priv_flags = 0x0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003660
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003661 if (netdev->features & NETIF_F_LRO)
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07003662 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3663 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003664
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003665 liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
3666
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003667 if ((debug != -1) && (debug & NETIF_MSG_HW))
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003668 liquidio_set_feature(netdev,
3669 OCTNET_CMD_VERBOSE_ENABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003670
3671 /* Register the network device with the OS */
3672 if (register_netdev(netdev)) {
3673 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3674 goto setup_nic_dev_fail;
3675 }
3676
3677 dev_dbg(&octeon_dev->pci_dev->dev,
3678 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3679 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3680 netif_carrier_off(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003681 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003682
3683 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3684
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003685 /* Sending command to firmware to enable Rx checksum offload
3686 * by default at the time of setup of Liquidio driver for
3687 * this device
3688 */
3689 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3690 OCTNET_CMD_RXCSUM_ENABLE);
3691 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3692 OCTNET_CMD_TXCSUM_ENABLE);
3693
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003694 dev_dbg(&octeon_dev->pci_dev->dev,
3695 "NIC ifidx:%d Setup successful\n", i);
3696
3697 octeon_free_soft_command(octeon_dev, sc);
3698 }
3699
3700 return 0;
3701
3702setup_nic_dev_fail:
3703
3704 octeon_free_soft_command(octeon_dev, sc);
3705
3706 while (i--) {
3707 dev_err(&octeon_dev->pci_dev->dev,
3708 "NIC ifidx:%d Setup failed\n", i);
3709 liquidio_destroy_nic_device(octeon_dev, i);
3710 }
3711 return -ENODEV;
3712}
3713
3714/**
3715 * \brief initialize the NIC
3716 * @param oct octeon device
3717 *
3718 * This initialization routine is called once the Octeon device application is
3719 * up and running
3720 */
3721static int liquidio_init_nic_module(struct octeon_device *oct)
3722{
3723 struct oct_intrmod_cfg *intrmod_cfg;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003724 int i, retval = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003725 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3726
3727 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3728
3729 /* only default iq and oq were initialized
3730 * initialize the rest as well
3731 */
3732 /* run port_config command for each port */
3733 oct->ifcount = num_nic_ports;
3734
3735 memset(oct->props, 0,
3736 sizeof(struct octdev_props) * num_nic_ports);
3737
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003738 for (i = 0; i < MAX_OCTEON_LINKS; i++)
3739 oct->props[i].gmxport = -1;
3740
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003741 retval = setup_nic_devices(oct);
3742 if (retval) {
3743 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3744 goto octnet_init_failure;
3745 }
3746
3747 liquidio_ptp_init(oct);
3748
3749 /* Initialize interrupt moderation params */
3750 intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07003751 intrmod_cfg->rx_enable = 1;
3752 intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
3753 intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
3754 intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
3755 intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
3756 intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
3757 intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
3758 intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
3759 intrmod_cfg->tx_enable = 1;
3760 intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
3761 intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
3762 intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
3763 intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003764 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3765
3766 return retval;
3767
3768octnet_init_failure:
3769
3770 oct->ifcount = 0;
3771
3772 return retval;
3773}
3774
3775/**
3776 * \brief starter callback that invokes the remaining initialization work after
3777 * the NIC is up and running.
3778 * @param octptr work struct work_struct
3779 */
3780static void nic_starter(struct work_struct *work)
3781{
3782 struct octeon_device *oct;
3783 struct cavium_wk *wk = (struct cavium_wk *)work;
3784
3785 oct = (struct octeon_device *)wk->ctxptr;
3786
3787 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3788 return;
3789
3790 /* If the status of the device is CORE_OK, the core
3791 * application has reported its application type. Call
3792 * any registered handlers now and move to the RUNNING
3793 * state.
3794 */
3795 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3796 schedule_delayed_work(&oct->nic_poll_work.work,
3797 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3798 return;
3799 }
3800
3801 atomic_set(&oct->status, OCT_DEV_RUNNING);
3802
3803 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3804 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3805
3806 if (liquidio_init_nic_module(oct))
3807 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3808 else
3809 handshake[oct->octeon_id].started_ok = 1;
3810 } else {
3811 dev_err(&oct->pci_dev->dev,
3812 "Unexpected application running on NIC (%d). Check firmware.\n",
3813 oct->app_mode);
3814 }
3815
3816 complete(&handshake[oct->octeon_id].started);
3817}
3818
3819/**
3820 * \brief Device initialization for each Octeon device that is probed
3821 * @param octeon_dev octeon device
3822 */
3823static int octeon_device_init(struct octeon_device *octeon_dev)
3824{
3825 int j, ret;
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07003826 int fw_loaded = 0;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07003827 char bootcmd[] = "\n";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003828 struct octeon_device_priv *oct_priv =
3829 (struct octeon_device_priv *)octeon_dev->priv;
3830 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
3831
3832 /* Enable access to the octeon device and make its DMA capability
3833 * known to the OS.
3834 */
3835 if (octeon_pci_os_setup(octeon_dev))
3836 return 1;
3837
3838 /* Identify the Octeon type and map the BAR address space. */
3839 if (octeon_chip_specific_setup(octeon_dev)) {
3840 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
3841 return 1;
3842 }
3843
3844 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
3845
3846 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
3847
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07003848 if (OCTEON_CN23XX_PF(octeon_dev)) {
3849 if (!cn23xx_fw_loaded(octeon_dev)) {
3850 fw_loaded = 0;
3851 /* Do a soft reset of the Octeon device. */
3852 if (octeon_dev->fn_list.soft_reset(octeon_dev))
3853 return 1;
3854 /* things might have changed */
3855 if (!cn23xx_fw_loaded(octeon_dev))
3856 fw_loaded = 0;
3857 else
3858 fw_loaded = 1;
3859 } else {
3860 fw_loaded = 1;
3861 }
3862 } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003863 return 1;
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07003864 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003865
3866 /* Initialize the dispatch mechanism used to push packets arriving on
3867 * Octeon Output queues.
3868 */
3869 if (octeon_init_dispatch_list(octeon_dev))
3870 return 1;
3871
3872 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3873 OPCODE_NIC_CORE_DRV_ACTIVE,
3874 octeon_core_drv_init,
3875 octeon_dev);
3876
3877 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
3878 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
3879 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
3880 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3881
3882 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
3883
3884 octeon_set_io_queues_off(octeon_dev);
3885
Raghu Vatsavayi3451b972016-08-31 11:03:26 -07003886 if (OCTEON_CN23XX_PF(octeon_dev)) {
3887 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
3888 if (ret) {
3889 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
3890 return ret;
3891 }
3892 }
3893
3894 /* Initialize soft command buffer pool
3895 */
3896 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
3897 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
3898 return 1;
3899 }
3900 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
3901
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003902 /* Setup the data structures that manage this Octeon's Input queues. */
3903 if (octeon_setup_instr_queues(octeon_dev)) {
3904 dev_err(&octeon_dev->pci_dev->dev,
3905 "instruction queue initialization failed\n");
3906 /* On error, release any previously allocated queues */
3907 for (j = 0; j < octeon_dev->num_iqs; j++)
3908 octeon_delete_instr_queue(octeon_dev, j);
3909 return 1;
3910 }
3911 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
3912
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003913 /* Initialize lists to manage the requests of different types that
3914 * arrive from user & kernel applications for this octeon device.
3915 */
3916 if (octeon_setup_response_list(octeon_dev)) {
3917 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
3918 return 1;
3919 }
3920 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
3921
3922 if (octeon_setup_output_queues(octeon_dev)) {
3923 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
3924 /* Release any previously allocated queues */
3925 for (j = 0; j < octeon_dev->num_oqs; j++)
3926 octeon_delete_droq(octeon_dev, j);
Raghu Vatsavayi1e0d30f2016-07-03 13:56:52 -07003927 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003928 }
3929
3930 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
3931
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07003932 if (OCTEON_CN23XX_PF(octeon_dev)) {
3933 if (octeon_allocate_ioq_vector(octeon_dev)) {
3934 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
3935 return 1;
3936 }
3937
3938 } else {
3939 /* The input and output queue registers were setup earlier (the
3940 * queues were not enabled). Any additional registers
3941 * that need to be programmed should be done now.
3942 */
3943 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
3944 if (ret) {
3945 dev_err(&octeon_dev->pci_dev->dev,
3946 "Failed to configure device registers\n");
3947 return ret;
3948 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003949 }
3950
3951 /* Initialize the tasklet that handles output queue packet processing.*/
3952 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
3953 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
3954 (unsigned long)octeon_dev);
3955
3956 /* Setup the interrupt handler and record the INT SUM register address
3957 */
Raghu Vatsavayi1e0d30f2016-07-03 13:56:52 -07003958 if (octeon_setup_interrupt(octeon_dev))
3959 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003960
3961 /* Enable Octeon device interrupts */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07003962 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003963
3964 /* Enable the input and output queues for this Octeon device */
Raghu Vatsavayi1b7c55c2016-08-31 11:03:27 -07003965 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
3966 if (ret) {
3967 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
3968 return ret;
3969 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003970
3971 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
3972
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07003973 if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
3974 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
3975 if (!ddr_timeout) {
3976 dev_info(&octeon_dev->pci_dev->dev,
3977 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
3978 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003979
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07003980 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003981
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07003982 /* Wait for the octeon to initialize DDR after the soft-reset.*/
3983 while (!ddr_timeout) {
3984 set_current_state(TASK_INTERRUPTIBLE);
3985 if (schedule_timeout(HZ / 10)) {
3986 /* user probably pressed Control-C */
3987 return 1;
3988 }
3989 }
3990 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
3991 if (ret) {
3992 dev_err(&octeon_dev->pci_dev->dev,
3993 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
3994 ret);
Raghu Vatsavayi4b129ae2016-06-21 22:53:15 -07003995 return 1;
3996 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003997
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07003998 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
3999 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4000 return 1;
4001 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004002
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004003 /* Divert uboot to take commands from host instead. */
4004 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07004005
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004006 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4007 ret = octeon_init_consoles(octeon_dev);
4008 if (ret) {
4009 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4010 return 1;
4011 }
4012 ret = octeon_add_console(octeon_dev, 0);
4013 if (ret) {
4014 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4015 return 1;
4016 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004017
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004018 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004019
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004020 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4021 ret = load_firmware(octeon_dev);
4022 if (ret) {
4023 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4024 return 1;
4025 }
4026 /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
4027 * loaded
4028 */
4029 if (OCTEON_CN23XX_PF(octeon_dev))
4030 octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
4031 2ULL);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004032 }
4033
4034 handshake[octeon_dev->octeon_id].init_ok = 1;
4035 complete(&handshake[octeon_dev->octeon_id].init);
4036
4037 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4038
4039 /* Send Credit for Octeon Output queues. Credits are always sent after
4040 * the output queue is enabled.
4041 */
4042 for (j = 0; j < octeon_dev->num_oqs; j++)
4043 writel(octeon_dev->droq[j]->max_count,
4044 octeon_dev->droq[j]->pkts_credit_reg);
4045
4046 /* Packets can start arriving on the output queues from this point. */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004047 return 0;
4048}
4049
4050/**
4051 * \brief Exits the module
4052 */
4053static void __exit liquidio_exit(void)
4054{
4055 liquidio_deinit_pci();
4056
4057 pr_info("LiquidIO network module is now unloaded\n");
4058}
4059
4060module_init(liquidio_init);
4061module_exit(liquidio_exit);