blob: 8aa862bcb7ead7b9dfec6a7c0d4ddd175f45f3b5 [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
2* Author: Cavium, Inc.
3*
4* Contact: support@cavium.com
5* Please include "LiquidIO" in the subject.
6*
7* Copyright (c) 2003-2015 Cavium, Inc.
8*
9* This file is free software; you can redistribute it and/or modify
10* it under the terms of the GNU General Public License, Version 2, as
11* published by the Free Software Foundation.
12*
13* This file is distributed in the hope that it will be useful, but
14* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16* NONINFRINGEMENT. See the GNU General Public License for more
17* details.
18*
19* This file may also be available under a different license from Cavium.
20* Contact Cavium, Inc. for more information
21**********************************************************************/
22#include <linux/version.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070023#include <linux/pci.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070024#include <linux/firmware.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070025#include <linux/ptp_clock_kernel.h>
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -070026#include <net/vxlan.h>
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -070027#include <linux/kthread.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070028#include "liquidio_common.h"
29#include "octeon_droq.h"
30#include "octeon_iq.h"
31#include "response_manager.h"
32#include "octeon_device.h"
33#include "octeon_nic.h"
34#include "octeon_main.h"
35#include "octeon_network.h"
36#include "cn66xx_regs.h"
37#include "cn66xx_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070038#include "cn68xx_device.h"
Raghu Vatsavayi72c00912016-08-31 11:03:25 -070039#include "cn23xx_pf_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070040#include "liquidio_image.h"
41
42MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
43MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
44MODULE_LICENSE("GPL");
45MODULE_VERSION(LIQUIDIO_VERSION);
46MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
47MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
48MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
49
50static int ddr_timeout = 10000;
51module_param(ddr_timeout, int, 0644);
52MODULE_PARM_DESC(ddr_timeout,
53 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
54
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070055#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
56
Raghu Vatsavayi1f164712016-06-21 22:53:11 -070057#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
58 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
59
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070060static int debug = -1;
61module_param(debug, int, 0644);
62MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
63
64static char fw_type[LIO_MAX_FW_TYPE_LEN];
65module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
66MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
67
68static int conf_type;
69module_param(conf_type, int, 0);
70MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
71
Raghu Vatsavayia5b37882016-06-14 16:54:48 -070072static int ptp_enable = 1;
73
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070074/* Bit mask values for lio->ifstate */
75#define LIO_IFSTATE_DROQ_OPS 0x01
76#define LIO_IFSTATE_REGISTERED 0x02
77#define LIO_IFSTATE_RUNNING 0x04
78#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
79
80/* Polling interval for determining when NIC application is alive */
81#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
82
83/* runtime link query interval */
84#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
85
86struct liquidio_if_cfg_context {
87 int octeon_id;
88
89 wait_queue_head_t wc;
90
91 int cond;
92};
93
94struct liquidio_if_cfg_resp {
95 u64 rh;
96 struct liquidio_if_cfg_info cfg_info;
97 u64 status;
98};
99
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -0700100struct liquidio_rx_ctl_context {
101 int octeon_id;
102
103 wait_queue_head_t wc;
104
105 int cond;
106};
107
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700108struct oct_link_status_resp {
109 u64 rh;
110 struct oct_link_info link_info;
111 u64 status;
112};
113
114struct oct_timestamp_resp {
115 u64 rh;
116 u64 timestamp;
117 u64 status;
118};
119
120#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
121
122union tx_info {
123 u64 u64;
124 struct {
125#ifdef __BIG_ENDIAN_BITFIELD
126 u16 gso_size;
127 u16 gso_segs;
128 u32 reserved;
129#else
130 u32 reserved;
131 u16 gso_segs;
132 u16 gso_size;
133#endif
134 } s;
135};
136
137/** Octeon device properties to be used by the NIC module.
138 * Each octeon device in the system will be represented
139 * by this structure in the NIC module.
140 */
141
142#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
143
144#define OCTNIC_GSO_MAX_HEADER_SIZE 128
Raghu Vatsavayi72c00912016-08-31 11:03:25 -0700145#define OCTNIC_GSO_MAX_SIZE \
146 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700147
148/** Structure of a node in list of gather components maintained by
149 * NIC driver for each network device.
150 */
151struct octnic_gather {
152 /** List manipulation. Next and prev pointers. */
153 struct list_head list;
154
155 /** Size of the gather component at sg in bytes. */
156 int sg_size;
157
158 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
159 int adjust;
160
161 /** Gather component that can accommodate max sized fragment list
162 * received from the IP layer.
163 */
164 struct octeon_sg_entry *sg;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700165
166 u64 sg_dma_ptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700167};
168
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700169struct handshake {
170 struct completion init;
171 struct completion started;
172 struct pci_dev *pci_dev;
173 int init_ok;
174 int started_ok;
175};
176
177struct octeon_device_priv {
178 /** Tasklet structures for this device. */
179 struct tasklet_struct droq_tasklet;
180 unsigned long napi_mask;
181};
182
Raghu Vatsavayica6139f2016-11-14 15:54:40 -0800183#ifdef CONFIG_PCI_IOV
184static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
185#endif
186
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700187static int octeon_device_init(struct octeon_device *);
Raghu Vatsavayi32581242016-08-31 11:03:20 -0700188static int liquidio_stop(struct net_device *netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700189static void liquidio_remove(struct pci_dev *pdev);
190static int liquidio_probe(struct pci_dev *pdev,
191 const struct pci_device_id *ent);
192
193static struct handshake handshake[MAX_OCTEON_DEVICES];
194static struct completion first_stage;
195
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -0700196static void octeon_droq_bh(unsigned long pdev)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700197{
198 int q_no;
199 int reschedule = 0;
200 struct octeon_device *oct = (struct octeon_device *)pdev;
201 struct octeon_device_priv *oct_priv =
202 (struct octeon_device_priv *)oct->priv;
203
204 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700205 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
206 if (!(oct->io_qmask.oq & (1ULL << q_no)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700207 continue;
208 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
209 MAX_PACKET_BUDGET);
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700210 lio_enable_irq(oct->droq[q_no], NULL);
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700211
212 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
213 /* set time and cnt interrupt thresholds for this DROQ
214 * for NAPI
215 */
216 int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
217
218 octeon_write_csr64(
219 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
220 0x5700000040ULL);
221 octeon_write_csr64(
222 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
223 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700224 }
225
226 if (reschedule)
227 tasklet_schedule(&oct_priv->droq_tasklet);
228}
229
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -0700230static int lio_wait_for_oq_pkts(struct octeon_device *oct)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700231{
232 struct octeon_device_priv *oct_priv =
233 (struct octeon_device_priv *)oct->priv;
234 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
235 int i;
236
237 do {
238 pending_pkts = 0;
239
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700240 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
241 if (!(oct->io_qmask.oq & (1ULL << i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700242 continue;
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700243 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700244 }
245 if (pkt_cnt > 0) {
246 pending_pkts += pkt_cnt;
247 tasklet_schedule(&oct_priv->droq_tasklet);
248 }
249 pkt_cnt = 0;
250 schedule_timeout_uninterruptible(1);
251
252 } while (retry-- && pending_pkts);
253
254 return pkt_cnt;
255}
256
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700257/**
258 * \brief Forces all IO queues off on a given device
259 * @param oct Pointer to Octeon device
260 */
261static void force_io_queues_off(struct octeon_device *oct)
262{
263 if ((oct->chip_id == OCTEON_CN66XX) ||
264 (oct->chip_id == OCTEON_CN68XX)) {
265 /* Reset the Enable bits for Input Queues. */
266 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
267
268 /* Reset the Enable bits for Output Queues. */
269 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
270 }
271}
272
273/**
274 * \brief wait for all pending requests to complete
275 * @param oct Pointer to Octeon device
276 *
277 * Called during shutdown sequence
278 */
279static int wait_for_pending_requests(struct octeon_device *oct)
280{
281 int i, pcount = 0;
282
283 for (i = 0; i < 100; i++) {
284 pcount =
285 atomic_read(&oct->response_list
286 [OCTEON_ORDERED_SC_LIST].pending_req_count);
287 if (pcount)
288 schedule_timeout_uninterruptible(HZ / 10);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700289 else
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700290 break;
291 }
292
293 if (pcount)
294 return 1;
295
296 return 0;
297}
298
299/**
300 * \brief Cause device to go quiet so it can be safely removed/reset/etc
301 * @param oct Pointer to Octeon device
302 */
303static inline void pcierror_quiesce_device(struct octeon_device *oct)
304{
305 int i;
306
307 /* Disable the input and output queues now. No more packets will
308 * arrive from Octeon, but we should wait for all packet processing
309 * to finish.
310 */
311 force_io_queues_off(oct);
312
313 /* To allow for in-flight requests */
314 schedule_timeout_uninterruptible(100);
315
316 if (wait_for_pending_requests(oct))
317 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
318
319 /* Force all requests waiting to be fetched by OCTEON to complete. */
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700320 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700321 struct octeon_instr_queue *iq;
322
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700323 if (!(oct->io_qmask.iq & (1ULL << i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700324 continue;
325 iq = oct->instr_queue[i];
326
327 if (atomic_read(&iq->instr_pending)) {
328 spin_lock_bh(&iq->lock);
329 iq->fill_cnt = 0;
330 iq->octeon_read_index = iq->host_write_index;
331 iq->stats.instr_processed +=
332 atomic_read(&iq->instr_pending);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700333 lio_process_iq_request_list(oct, iq, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700334 spin_unlock_bh(&iq->lock);
335 }
336 }
337
338 /* Force all pending ordered list requests to time out. */
339 lio_process_ordered_list(oct, 1);
340
341 /* We do not need to wait for output queue packets to be processed. */
342}
343
344/**
345 * \brief Cleanup PCI AER uncorrectable error status
346 * @param dev Pointer to PCI device
347 */
348static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
349{
350 int pos = 0x100;
351 u32 status, mask;
352
353 pr_info("%s :\n", __func__);
354
355 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
356 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
357 if (dev->error_state == pci_channel_io_normal)
358 status &= ~mask; /* Clear corresponding nonfatal bits */
359 else
360 status &= mask; /* Clear corresponding fatal bits */
361 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
362}
363
364/**
365 * \brief Stop all PCI IO to a given device
366 * @param dev Pointer to Octeon device
367 */
368static void stop_pci_io(struct octeon_device *oct)
369{
370 /* No more instructions will be forwarded. */
371 atomic_set(&oct->status, OCT_DEV_IN_RESET);
372
373 pci_disable_device(oct->pci_dev);
374
375 /* Disable interrupts */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700376 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700377
378 pcierror_quiesce_device(oct);
379
380 /* Release the interrupt line */
381 free_irq(oct->pci_dev->irq, oct);
382
383 if (oct->flags & LIO_FLAG_MSI_ENABLED)
384 pci_disable_msi(oct->pci_dev);
385
386 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
387 lio_get_state_string(&oct->status));
388
389 /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
390 /* making it a common function for all OCTEON models */
391 cleanup_aer_uncorrect_error_status(oct->pci_dev);
392}
393
394/**
395 * \brief called when PCI error is detected
396 * @param pdev Pointer to PCI device
397 * @param state The current pci connection state
398 *
399 * This function is called after a PCI bus error affecting
400 * this device has been detected.
401 */
402static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
403 pci_channel_state_t state)
404{
405 struct octeon_device *oct = pci_get_drvdata(pdev);
406
407 /* Non-correctable Non-fatal errors */
408 if (state == pci_channel_io_normal) {
409 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
410 cleanup_aer_uncorrect_error_status(oct->pci_dev);
411 return PCI_ERS_RESULT_CAN_RECOVER;
412 }
413
414 /* Non-correctable Fatal errors */
415 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
416 stop_pci_io(oct);
417
418 /* Always return a DISCONNECT. There is no support for recovery but only
419 * for a clean shutdown.
420 */
421 return PCI_ERS_RESULT_DISCONNECT;
422}
423
424/**
425 * \brief mmio handler
426 * @param pdev Pointer to PCI device
427 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700428static pci_ers_result_t liquidio_pcie_mmio_enabled(
429 struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700430{
431 /* We should never hit this since we never ask for a reset for a Fatal
432 * Error. We always return DISCONNECT in io_error above.
433 * But play safe and return RECOVERED for now.
434 */
435 return PCI_ERS_RESULT_RECOVERED;
436}
437
438/**
439 * \brief called after the pci bus has been reset.
440 * @param pdev Pointer to PCI device
441 *
442 * Restart the card from scratch, as if from a cold-boot. Implementation
443 * resembles the first-half of the octeon_resume routine.
444 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700445static pci_ers_result_t liquidio_pcie_slot_reset(
446 struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700447{
448 /* We should never hit this since we never ask for a reset for a Fatal
449 * Error. We always return DISCONNECT in io_error above.
450 * But play safe and return RECOVERED for now.
451 */
452 return PCI_ERS_RESULT_RECOVERED;
453}
454
455/**
456 * \brief called when traffic can start flowing again.
457 * @param pdev Pointer to PCI device
458 *
459 * This callback is called when the error recovery driver tells us that
460 * its OK to resume normal operation. Implementation resembles the
461 * second-half of the octeon_resume routine.
462 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700463static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700464{
465 /* Nothing to be done here. */
466}
467
468#ifdef CONFIG_PM
469/**
470 * \brief called when suspending
471 * @param pdev Pointer to PCI device
472 * @param state state to suspend to
473 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700474static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
475 pm_message_t state __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700476{
477 return 0;
478}
479
480/**
481 * \brief called when resuming
482 * @param pdev Pointer to PCI device
483 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700484static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700485{
486 return 0;
487}
488#endif
489
490/* For PCI-E Advanced Error Recovery (AER) Interface */
Julia Lawall166e2362015-11-14 11:06:53 +0100491static const struct pci_error_handlers liquidio_err_handler = {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700492 .error_detected = liquidio_pcie_error_detected,
493 .mmio_enabled = liquidio_pcie_mmio_enabled,
494 .slot_reset = liquidio_pcie_slot_reset,
495 .resume = liquidio_pcie_resume,
496};
497
498static const struct pci_device_id liquidio_pci_tbl[] = {
499 { /* 68xx */
500 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
501 },
502 { /* 66xx */
503 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
504 },
Raghu Vatsavayie86b1ab2016-08-31 11:03:24 -0700505 { /* 23xx pf */
506 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
507 },
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700508 {
509 0, 0, 0, 0, 0, 0, 0
510 }
511};
512MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
513
514static struct pci_driver liquidio_pci_driver = {
515 .name = "LiquidIO",
516 .id_table = liquidio_pci_tbl,
517 .probe = liquidio_probe,
518 .remove = liquidio_remove,
519 .err_handler = &liquidio_err_handler, /* For AER */
520
521#ifdef CONFIG_PM
522 .suspend = liquidio_suspend,
523 .resume = liquidio_resume,
524#endif
Raghu Vatsavayica6139f2016-11-14 15:54:40 -0800525#ifdef CONFIG_PCI_IOV
526 .sriov_configure = liquidio_enable_sriov,
527#endif
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700528};
529
530/**
531 * \brief register PCI driver
532 */
533static int liquidio_init_pci(void)
534{
535 return pci_register_driver(&liquidio_pci_driver);
536}
537
538/**
539 * \brief unregister PCI driver
540 */
541static void liquidio_deinit_pci(void)
542{
543 pci_unregister_driver(&liquidio_pci_driver);
544}
545
546/**
547 * \brief check interface state
548 * @param lio per-network private data
549 * @param state_flag flag state to check
550 */
551static inline int ifstate_check(struct lio *lio, int state_flag)
552{
553 return atomic_read(&lio->ifstate) & state_flag;
554}
555
556/**
557 * \brief set interface state
558 * @param lio per-network private data
559 * @param state_flag flag state to set
560 */
561static inline void ifstate_set(struct lio *lio, int state_flag)
562{
563 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
564}
565
566/**
567 * \brief clear interface state
568 * @param lio per-network private data
569 * @param state_flag flag state to clear
570 */
571static inline void ifstate_reset(struct lio *lio, int state_flag)
572{
573 atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
574}
575
576/**
577 * \brief Stop Tx queues
578 * @param netdev network device
579 */
580static inline void txqs_stop(struct net_device *netdev)
581{
582 if (netif_is_multiqueue(netdev)) {
583 int i;
584
585 for (i = 0; i < netdev->num_tx_queues; i++)
586 netif_stop_subqueue(netdev, i);
587 } else {
588 netif_stop_queue(netdev);
589 }
590}
591
592/**
593 * \brief Start Tx queues
594 * @param netdev network device
595 */
596static inline void txqs_start(struct net_device *netdev)
597{
598 if (netif_is_multiqueue(netdev)) {
599 int i;
600
601 for (i = 0; i < netdev->num_tx_queues; i++)
602 netif_start_subqueue(netdev, i);
603 } else {
604 netif_start_queue(netdev);
605 }
606}
607
608/**
609 * \brief Wake Tx queues
610 * @param netdev network device
611 */
612static inline void txqs_wake(struct net_device *netdev)
613{
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700614 struct lio *lio = GET_LIO(netdev);
615
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700616 if (netif_is_multiqueue(netdev)) {
617 int i;
618
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700619 for (i = 0; i < netdev->num_tx_queues; i++) {
620 int qno = lio->linfo.txpciq[i %
621 (lio->linfo.num_txpciq)].s.q_no;
622
623 if (__netif_subqueue_stopped(netdev, i)) {
624 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
625 tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700626 netif_wake_subqueue(netdev, i);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700627 }
628 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700629 } else {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700630 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
631 tx_restart, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700632 netif_wake_queue(netdev);
633 }
634}
635
636/**
637 * \brief Stop Tx queue
638 * @param netdev network device
639 */
640static void stop_txq(struct net_device *netdev)
641{
642 txqs_stop(netdev);
643}
644
645/**
646 * \brief Start Tx queue
647 * @param netdev network device
648 */
649static void start_txq(struct net_device *netdev)
650{
651 struct lio *lio = GET_LIO(netdev);
652
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700653 if (lio->linfo.link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700654 txqs_start(netdev);
655 return;
656 }
657}
658
659/**
660 * \brief Wake a queue
661 * @param netdev network device
662 * @param q which queue to wake
663 */
664static inline void wake_q(struct net_device *netdev, int q)
665{
666 if (netif_is_multiqueue(netdev))
667 netif_wake_subqueue(netdev, q);
668 else
669 netif_wake_queue(netdev);
670}
671
672/**
673 * \brief Stop a queue
674 * @param netdev network device
675 * @param q which queue to stop
676 */
677static inline void stop_q(struct net_device *netdev, int q)
678{
679 if (netif_is_multiqueue(netdev))
680 netif_stop_subqueue(netdev, q);
681 else
682 netif_stop_queue(netdev);
683}
684
685/**
686 * \brief Check Tx queue status, and take appropriate action
687 * @param lio per-network private data
688 * @returns 0 if full, number of queues woken up otherwise
689 */
690static inline int check_txq_status(struct lio *lio)
691{
692 int ret_val = 0;
693
694 if (netif_is_multiqueue(lio->netdev)) {
695 int numqs = lio->netdev->num_tx_queues;
696 int q, iq = 0;
697
698 /* check each sub-queue state */
699 for (q = 0; q < numqs; q++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700700 iq = lio->linfo.txpciq[q %
701 (lio->linfo.num_txpciq)].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700702 if (octnet_iq_is_full(lio->oct_dev, iq))
703 continue;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700704 if (__netif_subqueue_stopped(lio->netdev, q)) {
705 wake_q(lio->netdev, q);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700706 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
707 tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700708 ret_val++;
709 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700710 }
711 } else {
712 if (octnet_iq_is_full(lio->oct_dev, lio->txq))
713 return 0;
714 wake_q(lio->netdev, lio->txq);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700715 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
716 tx_restart, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700717 ret_val = 1;
718 }
719 return ret_val;
720}
721
722/**
723 * Remove the node at the head of the list. The list would be empty at
724 * the end of this call if there are no more nodes in the list.
725 */
726static inline struct list_head *list_delete_head(struct list_head *root)
727{
728 struct list_head *node;
729
730 if ((root->prev == root) && (root->next == root))
731 node = NULL;
732 else
733 node = root->next;
734
735 if (node)
736 list_del(node);
737
738 return node;
739}
740
741/**
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700742 * \brief Delete gather lists
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700743 * @param lio per-network private data
744 */
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700745static void delete_glists(struct lio *lio)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700746{
747 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700748 int i;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700749
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700750 if (!lio->glist)
751 return;
752
753 for (i = 0; i < lio->linfo.num_txpciq; i++) {
754 do {
755 g = (struct octnic_gather *)
756 list_delete_head(&lio->glist[i]);
757 if (g) {
758 if (g->sg) {
759 dma_unmap_single(&lio->oct_dev->
760 pci_dev->dev,
761 g->sg_dma_ptr,
762 g->sg_size,
763 DMA_TO_DEVICE);
764 kfree((void *)((unsigned long)g->sg -
765 g->adjust));
766 }
767 kfree(g);
768 }
769 } while (g);
770 }
771
772 kfree((void *)lio->glist);
Raghu Vatsavayi515e7522016-11-14 15:54:44 -0800773 kfree((void *)lio->glist_lock);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700774}
775
776/**
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700777 * \brief Setup gather lists
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700778 * @param lio per-network private data
779 */
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700780static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700781{
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700782 int i, j;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700783 struct octnic_gather *g;
784
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700785 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
786 GFP_KERNEL);
787 if (!lio->glist_lock)
788 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700789
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700790 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
791 GFP_KERNEL);
792 if (!lio->glist) {
793 kfree((void *)lio->glist_lock);
794 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700795 }
796
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700797 for (i = 0; i < num_iqs; i++) {
798 int numa_node = cpu_to_node(i % num_online_cpus());
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700799
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700800 spin_lock_init(&lio->glist_lock[i]);
801
802 INIT_LIST_HEAD(&lio->glist[i]);
803
804 for (j = 0; j < lio->tx_qsize; j++) {
805 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
806 numa_node);
807 if (!g)
808 g = kzalloc(sizeof(*g), GFP_KERNEL);
809 if (!g)
810 break;
811
812 g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
813 OCT_SG_ENTRY_SIZE);
814
815 g->sg = kmalloc_node(g->sg_size + 8,
816 GFP_KERNEL, numa_node);
817 if (!g->sg)
818 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
819 if (!g->sg) {
820 kfree(g);
821 break;
822 }
823
824 /* The gather component should be aligned on 64-bit
825 * boundary
826 */
827 if (((unsigned long)g->sg) & 7) {
828 g->adjust = 8 - (((unsigned long)g->sg) & 7);
829 g->sg = (struct octeon_sg_entry *)
830 ((unsigned long)g->sg + g->adjust);
831 }
832 g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
833 g->sg, g->sg_size,
834 DMA_TO_DEVICE);
835 if (dma_mapping_error(&oct->pci_dev->dev,
836 g->sg_dma_ptr)) {
837 kfree((void *)((unsigned long)g->sg -
838 g->adjust));
839 kfree(g);
840 break;
841 }
842
843 list_add_tail(&g->list, &lio->glist[i]);
844 }
845
846 if (j != lio->tx_qsize) {
847 delete_glists(lio);
848 return 1;
849 }
850 }
851
852 return 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700853}
854
855/**
856 * \brief Print link information
857 * @param netdev network device
858 */
859static void print_link_info(struct net_device *netdev)
860{
861 struct lio *lio = GET_LIO(netdev);
862
863 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
864 struct oct_link_info *linfo = &lio->linfo;
865
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700866 if (linfo->link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700867 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
868 linfo->link.s.speed,
869 (linfo->link.s.duplex) ? "Full" : "Half");
870 } else {
871 netif_info(lio, link, lio->netdev, "Link Down\n");
872 }
873 }
874}
875
876/**
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -0700877 * \brief Routine to notify MTU change
878 * @param work work_struct data structure
879 */
880static void octnet_link_status_change(struct work_struct *work)
881{
882 struct cavium_wk *wk = (struct cavium_wk *)work;
883 struct lio *lio = (struct lio *)wk->ctxptr;
884
885 rtnl_lock();
886 call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
887 rtnl_unlock();
888}
889
890/**
891 * \brief Sets up the mtu status change work
892 * @param netdev network device
893 */
894static inline int setup_link_status_change_wq(struct net_device *netdev)
895{
896 struct lio *lio = GET_LIO(netdev);
897 struct octeon_device *oct = lio->oct_dev;
898
899 lio->link_status_wq.wq = alloc_workqueue("link-status",
900 WQ_MEM_RECLAIM, 0);
901 if (!lio->link_status_wq.wq) {
902 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
903 return -1;
904 }
905 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
906 octnet_link_status_change);
907 lio->link_status_wq.wk.ctxptr = lio;
908
909 return 0;
910}
911
912static inline void cleanup_link_status_change_wq(struct net_device *netdev)
913{
914 struct lio *lio = GET_LIO(netdev);
915
916 if (lio->link_status_wq.wq) {
917 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
918 destroy_workqueue(lio->link_status_wq.wq);
919 }
920}
921
922/**
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700923 * \brief Update link status
924 * @param netdev network device
925 * @param ls link status structure
926 *
927 * Called on receipt of a link status response from the core application to
928 * update each interface's link status.
929 */
930static inline void update_link_status(struct net_device *netdev,
931 union oct_link_status *ls)
932{
933 struct lio *lio = GET_LIO(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700934 int changed = (lio->linfo.link.u64 != ls->u64);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700935
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700936 lio->linfo.link.u64 = ls->u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700937
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700938 if ((lio->intf_open) && (changed)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700939 print_link_info(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700940 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700941
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700942 if (lio->linfo.link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700943 netif_carrier_on(netdev);
944 /* start_txq(netdev); */
945 txqs_wake(netdev);
946 } else {
947 netif_carrier_off(netdev);
948 stop_txq(netdev);
949 }
950 }
951}
952
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700953/* Runs in interrupt context. */
954static void update_txq_status(struct octeon_device *oct, int iq_num)
955{
956 struct net_device *netdev;
957 struct lio *lio;
958 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
959
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700960 netdev = oct->props[iq->ifidx].netdev;
961
962 /* This is needed because the first IQ does not have
963 * a netdev associated with it.
964 */
965 if (!netdev)
966 return;
967
968 lio = GET_LIO(netdev);
969 if (netif_is_multiqueue(netdev)) {
970 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
971 lio->linfo.link.s.link_up &&
972 (!octnet_iq_is_full(oct, iq_num))) {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700973 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
974 tx_restart, 1);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700975 netif_wake_subqueue(netdev, iq->q_index);
976 } else {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700977 if (!octnet_iq_is_full(oct, lio->txq)) {
978 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
979 lio->txq,
980 tx_restart, 1);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700981 wake_q(netdev, lio->txq);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700982 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700983 }
984 }
985}
986
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700987static
988int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
989{
990 struct octeon_device *oct = droq->oct_dev;
991 struct octeon_device_priv *oct_priv =
992 (struct octeon_device_priv *)oct->priv;
993
994 if (droq->ops.poll_mode) {
995 droq->ops.napi_fn(droq);
996 } else {
997 if (ret & MSIX_PO_INT) {
998 tasklet_schedule(&oct_priv->droq_tasklet);
999 return 1;
1000 }
1001 /* this will be flushed periodically by check iq db */
1002 if (ret & MSIX_PI_INT)
1003 return 0;
1004 }
1005 return 0;
1006}
1007
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001008/**
1009 * \brief Droq packet processor sceduler
1010 * @param oct octeon device
1011 */
Raghu Vatsavayi9ded1a52016-09-01 11:16:10 -07001012static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001013{
1014 struct octeon_device_priv *oct_priv =
1015 (struct octeon_device_priv *)oct->priv;
1016 u64 oq_no;
1017 struct octeon_droq *droq;
1018
1019 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -07001020 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
1021 oq_no++) {
1022 if (!(oct->droq_intr & (1ULL << oq_no)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001023 continue;
1024
1025 droq = oct->droq[oq_no];
1026
1027 if (droq->ops.poll_mode) {
1028 droq->ops.napi_fn(droq);
1029 oct_priv->napi_mask |= (1 << oq_no);
1030 } else {
1031 tasklet_schedule(&oct_priv->droq_tasklet);
1032 }
1033 }
1034 }
1035}
1036
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001037static irqreturn_t
1038liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
1039{
1040 u64 ret;
1041 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
1042 struct octeon_device *oct = ioq_vector->oct_dev;
1043 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
1044
1045 ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
1046
1047 if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
1048 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
1049
1050 return IRQ_HANDLED;
1051}
1052
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001053/**
1054 * \brief Interrupt handler for octeon
1055 * @param irq unused
1056 * @param dev octeon device
1057 */
1058static
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001059irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
1060 void *dev)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001061{
1062 struct octeon_device *oct = (struct octeon_device *)dev;
1063 irqreturn_t ret;
1064
1065 /* Disable our interrupts for the duration of ISR */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001066 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001067
1068 ret = oct->fn_list.process_interrupt_regs(oct);
1069
1070 if (ret == IRQ_HANDLED)
1071 liquidio_schedule_droq_pkt_handlers(oct);
1072
1073 /* Re-enable our interrupts */
1074 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001075 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001076
1077 return ret;
1078}
1079
1080/**
1081 * \brief Setup interrupt for octeon device
1082 * @param oct octeon device
1083 *
1084 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1085 */
1086static int octeon_setup_interrupt(struct octeon_device *oct)
1087{
1088 int irqret, err;
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001089 struct msix_entry *msix_entries;
1090 int i;
1091 int num_ioq_vectors;
1092 int num_alloc_ioq_vectors;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001093
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001094 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
1095 oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
1096 /* one non ioq interrupt for handling sli_mac_pf_int_sum */
1097 oct->num_msix_irqs += 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001098
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001099 oct->msix_entries = kcalloc(
1100 oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
1101 if (!oct->msix_entries)
1102 return 1;
1103
1104 msix_entries = (struct msix_entry *)oct->msix_entries;
1105 /*Assumption is that pf msix vectors start from pf srn to pf to
1106 * trs and not from 0. if not change this code
1107 */
1108 for (i = 0; i < oct->num_msix_irqs - 1; i++)
1109 msix_entries[i].entry = oct->sriov_info.pf_srn + i;
1110 msix_entries[oct->num_msix_irqs - 1].entry =
1111 oct->sriov_info.trs;
1112 num_alloc_ioq_vectors = pci_enable_msix_range(
1113 oct->pci_dev, msix_entries,
1114 oct->num_msix_irqs,
1115 oct->num_msix_irqs);
1116 if (num_alloc_ioq_vectors < 0) {
1117 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1118 kfree(oct->msix_entries);
1119 oct->msix_entries = NULL;
1120 return 1;
1121 }
1122 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1123
1124 num_ioq_vectors = oct->num_msix_irqs;
1125
1126 /** For PF, there is one non-ioq interrupt handler */
1127 num_ioq_vectors -= 1;
1128 irqret = request_irq(msix_entries[num_ioq_vectors].vector,
1129 liquidio_legacy_intr_handler, 0, "octeon",
1130 oct);
1131 if (irqret) {
1132 dev_err(&oct->pci_dev->dev,
1133 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1134 irqret);
1135 pci_disable_msix(oct->pci_dev);
1136 kfree(oct->msix_entries);
1137 oct->msix_entries = NULL;
1138 return 1;
1139 }
1140
1141 for (i = 0; i < num_ioq_vectors; i++) {
1142 irqret = request_irq(msix_entries[i].vector,
1143 liquidio_msix_intr_handler, 0,
1144 "octeon", &oct->ioq_vector[i]);
1145 if (irqret) {
1146 dev_err(&oct->pci_dev->dev,
1147 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1148 irqret);
1149 /** Freeing the non-ioq irq vector here . */
1150 free_irq(msix_entries[num_ioq_vectors].vector,
1151 oct);
1152
1153 while (i) {
1154 i--;
1155 /** clearing affinity mask. */
1156 irq_set_affinity_hint(
1157 msix_entries[i].vector, NULL);
1158 free_irq(msix_entries[i].vector,
1159 &oct->ioq_vector[i]);
1160 }
1161 pci_disable_msix(oct->pci_dev);
1162 kfree(oct->msix_entries);
1163 oct->msix_entries = NULL;
1164 return 1;
1165 }
1166 oct->ioq_vector[i].vector = msix_entries[i].vector;
1167 /* assign the cpu mask for this msix interrupt vector */
1168 irq_set_affinity_hint(
1169 msix_entries[i].vector,
1170 (&oct->ioq_vector[i].affinity_mask));
1171 }
1172 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1173 oct->octeon_id);
1174 } else {
1175 err = pci_enable_msi(oct->pci_dev);
1176 if (err)
1177 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1178 err);
1179 else
1180 oct->flags |= LIO_FLAG_MSI_ENABLED;
1181
1182 irqret = request_irq(oct->pci_dev->irq,
1183 liquidio_legacy_intr_handler, IRQF_SHARED,
1184 "octeon", oct);
1185 if (irqret) {
1186 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1187 pci_disable_msi(oct->pci_dev);
1188 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1189 irqret);
1190 return 1;
1191 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001192 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001193 return 0;
1194}
1195
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001196static int liquidio_watchdog(void *param)
1197{
1198 u64 wdog;
1199 u16 mask_of_stuck_cores = 0;
1200 u16 mask_of_crashed_cores = 0;
1201 int core_num;
1202 u8 core_is_stuck[LIO_MAX_CORES];
1203 u8 core_crashed[LIO_MAX_CORES];
1204 struct octeon_device *oct = param;
1205
1206 memset(core_is_stuck, 0, sizeof(core_is_stuck));
1207 memset(core_crashed, 0, sizeof(core_crashed));
1208
1209 while (!kthread_should_stop()) {
1210 mask_of_crashed_cores =
1211 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
1212
1213 for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) {
1214 if (!core_is_stuck[core_num]) {
1215 wdog = lio_pci_readq(oct, CIU3_WDOG(core_num));
1216
1217 /* look at watchdog state field */
1218 wdog &= CIU3_WDOG_MASK;
1219 if (wdog) {
1220 /* this watchdog timer has expired */
1221 core_is_stuck[core_num] =
1222 LIO_MONITOR_WDOG_EXPIRE;
1223 mask_of_stuck_cores |= (1 << core_num);
1224 }
1225 }
1226
1227 if (!core_crashed[core_num])
1228 core_crashed[core_num] =
1229 (mask_of_crashed_cores >> core_num) & 1;
1230 }
1231
1232 if (mask_of_stuck_cores) {
1233 for (core_num = 0; core_num < LIO_MAX_CORES;
1234 core_num++) {
1235 if (core_is_stuck[core_num] == 1) {
1236 dev_err(&oct->pci_dev->dev,
1237 "ERROR: Octeon core %d is stuck!\n",
1238 core_num);
1239 /* 2 means we have printk'd an error
1240 * so no need to repeat the same printk
1241 */
1242 core_is_stuck[core_num] =
1243 LIO_MONITOR_CORE_STUCK_MSGD;
1244 }
1245 }
1246 }
1247
1248 if (mask_of_crashed_cores) {
1249 for (core_num = 0; core_num < LIO_MAX_CORES;
1250 core_num++) {
1251 if (core_crashed[core_num] == 1) {
1252 dev_err(&oct->pci_dev->dev,
1253 "ERROR: Octeon core %d crashed! See oct-fwdump for details.\n",
1254 core_num);
1255 /* 2 means we have printk'd an error
1256 * so no need to repeat the same printk
1257 */
1258 core_crashed[core_num] =
1259 LIO_MONITOR_CORE_STUCK_MSGD;
1260 }
1261 }
1262 }
1263#ifdef CONFIG_MODULE_UNLOAD
1264 if (mask_of_stuck_cores || mask_of_crashed_cores) {
1265 /* make module refcount=0 so that rmmod will work */
1266 long refcount;
1267
1268 refcount = module_refcount(THIS_MODULE);
1269
1270 while (refcount > 0) {
1271 module_put(THIS_MODULE);
1272 refcount = module_refcount(THIS_MODULE);
1273 }
1274
1275 /* compensate for and withstand an unlikely (but still
1276 * possible) race condition
1277 */
1278 while (refcount < 0) {
1279 try_module_get(THIS_MODULE);
1280 refcount = module_refcount(THIS_MODULE);
1281 }
1282 }
1283#endif
1284 /* sleep for two seconds */
1285 set_current_state(TASK_INTERRUPTIBLE);
1286 schedule_timeout(2 * HZ);
1287 }
1288
1289 return 0;
1290}
1291
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001292/**
1293 * \brief PCI probe handler
1294 * @param pdev PCI device structure
1295 * @param ent unused
1296 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07001297static int
1298liquidio_probe(struct pci_dev *pdev,
1299 const struct pci_device_id *ent __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001300{
1301 struct octeon_device *oct_dev = NULL;
1302 struct handshake *hs;
1303
1304 oct_dev = octeon_allocate_device(pdev->device,
1305 sizeof(struct octeon_device_priv));
1306 if (!oct_dev) {
1307 dev_err(&pdev->dev, "Unable to allocate device\n");
1308 return -ENOMEM;
1309 }
1310
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001311 if (pdev->device == OCTEON_CN23XX_PF_VID)
1312 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
1313
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001314 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1315 (u32)pdev->vendor, (u32)pdev->device);
1316
1317 /* Assign octeon_device for this device to the private data area. */
1318 pci_set_drvdata(pdev, oct_dev);
1319
1320 /* set linux specific device pointer */
1321 oct_dev->pci_dev = (void *)pdev;
1322
1323 hs = &handshake[oct_dev->octeon_id];
1324 init_completion(&hs->init);
1325 init_completion(&hs->started);
1326 hs->pci_dev = pdev;
1327
1328 if (oct_dev->octeon_id == 0)
1329 /* first LiquidIO NIC is detected */
1330 complete(&first_stage);
1331
1332 if (octeon_device_init(oct_dev)) {
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001333 complete(&hs->init);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001334 liquidio_remove(pdev);
1335 return -ENOMEM;
1336 }
1337
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001338 if (OCTEON_CN23XX_PF(oct_dev)) {
1339 u64 scratch1;
1340 u8 bus, device, function;
1341
1342 scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1);
1343 if (!(scratch1 & 4ULL)) {
1344 /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
1345 * the lio watchdog kernel thread is running for this
1346 * NIC. Each NIC gets one watchdog kernel thread.
1347 */
1348 scratch1 |= 4ULL;
1349 octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1,
1350 scratch1);
1351
1352 bus = pdev->bus->number;
1353 device = PCI_SLOT(pdev->devfn);
1354 function = PCI_FUNC(pdev->devfn);
1355 oct_dev->watchdog_task = kthread_create(
1356 liquidio_watchdog, oct_dev,
1357 "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001358 if (!IS_ERR(oct_dev->watchdog_task)) {
1359 wake_up_process(oct_dev->watchdog_task);
1360 } else {
1361 oct_dev->watchdog_task = NULL;
1362 dev_err(&oct_dev->pci_dev->dev,
1363 "failed to create kernel_thread\n");
1364 liquidio_remove(pdev);
1365 return -1;
1366 }
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001367 }
1368 }
1369
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001370 oct_dev->rx_pause = 1;
1371 oct_dev->tx_pause = 1;
1372
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001373 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1374
1375 return 0;
1376}
1377
1378/**
1379 *\brief Destroy resources associated with octeon device
1380 * @param pdev PCI device structure
1381 * @param ent unused
1382 */
1383static void octeon_destroy_resources(struct octeon_device *oct)
1384{
1385 int i;
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001386 struct msix_entry *msix_entries;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001387 struct octeon_device_priv *oct_priv =
1388 (struct octeon_device_priv *)oct->priv;
1389
1390 struct handshake *hs;
1391
1392 switch (atomic_read(&oct->status)) {
1393 case OCT_DEV_RUNNING:
1394 case OCT_DEV_CORE_OK:
1395
1396 /* No more instructions will be forwarded. */
1397 atomic_set(&oct->status, OCT_DEV_IN_RESET);
1398
1399 oct->app_mode = CVM_DRV_INVALID_APP;
1400 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1401 lio_get_state_string(&oct->status));
1402
1403 schedule_timeout_uninterruptible(HZ / 10);
1404
1405 /* fallthrough */
1406 case OCT_DEV_HOST_OK:
1407
1408 /* fallthrough */
1409 case OCT_DEV_CONSOLE_INIT_DONE:
1410 /* Remove any consoles */
1411 octeon_remove_consoles(oct);
1412
1413 /* fallthrough */
1414 case OCT_DEV_IO_QUEUES_DONE:
1415 if (wait_for_pending_requests(oct))
1416 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1417
1418 if (lio_wait_for_instr_fetch(oct))
1419 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1420
1421 /* Disable the input and output queues now. No more packets will
1422 * arrive from Octeon, but we should wait for all packet
1423 * processing to finish.
1424 */
1425 oct->fn_list.disable_io_queues(oct);
1426
1427 if (lio_wait_for_oq_pkts(oct))
1428 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1429
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001430 /* fallthrough */
1431 case OCT_DEV_INTR_SET_DONE:
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001432 /* Disable interrupts */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001433 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001434
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001435 if (oct->msix_on) {
1436 msix_entries = (struct msix_entry *)oct->msix_entries;
1437 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1438 /* clear the affinity_cpumask */
1439 irq_set_affinity_hint(msix_entries[i].vector,
1440 NULL);
1441 free_irq(msix_entries[i].vector,
1442 &oct->ioq_vector[i]);
1443 }
1444 /* non-iov vector's argument is oct struct */
1445 free_irq(msix_entries[i].vector, oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001446
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001447 pci_disable_msix(oct->pci_dev);
1448 kfree(oct->msix_entries);
1449 oct->msix_entries = NULL;
1450 } else {
1451 /* Release the interrupt line */
1452 free_irq(oct->pci_dev->irq, oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001453
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001454 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1455 pci_disable_msi(oct->pci_dev);
1456 }
1457
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001458 /* fallthrough */
1459 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001460 if (OCTEON_CN23XX_PF(oct))
1461 octeon_free_ioq_vector(oct);
Raghu Vatsavayi5d655562016-11-14 15:54:42 -08001462
1463 /* fallthrough */
1464 case OCT_DEV_MBOX_SETUP_DONE:
1465 if (OCTEON_CN23XX_PF(oct))
1466 oct->fn_list.free_mbox(oct);
1467
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001468 /* fallthrough */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001469 case OCT_DEV_IN_RESET:
1470 case OCT_DEV_DROQ_INIT_DONE:
1471 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
1472 mdelay(100);
Raghu Vatsavayi63da8402016-06-21 22:53:03 -07001473 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001474 if (!(oct->io_qmask.oq & BIT_ULL(i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001475 continue;
1476 octeon_delete_droq(oct, i);
1477 }
1478
1479 /* Force any pending handshakes to complete */
1480 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1481 hs = &handshake[i];
1482
1483 if (hs->pci_dev) {
1484 handshake[oct->octeon_id].init_ok = 0;
1485 complete(&handshake[oct->octeon_id].init);
1486 handshake[oct->octeon_id].started_ok = 0;
1487 complete(&handshake[oct->octeon_id].started);
1488 }
1489 }
1490
1491 /* fallthrough */
1492 case OCT_DEV_RESP_LIST_INIT_DONE:
1493 octeon_delete_response_list(oct);
1494
1495 /* fallthrough */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001496 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
Raghu Vatsavayi63da8402016-06-21 22:53:03 -07001497 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07001498 if (!(oct->io_qmask.iq & BIT_ULL(i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001499 continue;
1500 octeon_delete_instr_queue(oct, i);
1501 }
Raghu Vatsavayica6139f2016-11-14 15:54:40 -08001502#ifdef CONFIG_PCI_IOV
1503 if (oct->sriov_info.sriov_enabled)
1504 pci_disable_sriov(oct->pci_dev);
1505#endif
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07001506 /* fallthrough */
1507 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1508 octeon_free_sc_buffer_pool(oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001509
1510 /* fallthrough */
1511 case OCT_DEV_DISPATCH_INIT_DONE:
1512 octeon_delete_dispatch_list(oct);
1513 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1514
1515 /* fallthrough */
1516 case OCT_DEV_PCI_MAP_DONE:
Raghu Vatsavayi60b48c52016-06-21 22:53:09 -07001517 /* Soft reset the octeon device before exiting */
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07001518 if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
1519 oct->fn_list.soft_reset(oct);
Raghu Vatsavayi60b48c52016-06-21 22:53:09 -07001520
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001521 octeon_unmap_pci_barx(oct, 0);
1522 octeon_unmap_pci_barx(oct, 1);
1523
1524 /* fallthrough */
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001525 case OCT_DEV_PCI_ENABLE_DONE:
1526 pci_clear_master(oct->pci_dev);
Raghu Vatsavayi60b48c52016-06-21 22:53:09 -07001527 /* Disable the device, releasing the PCI INT */
1528 pci_disable_device(oct->pci_dev);
1529
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001530 /* fallthrough */
1531 case OCT_DEV_BEGIN_STATE:
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001532 /* Nothing to be done here either */
1533 break;
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07001534 } /* end switch (oct->status) */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001535
1536 tasklet_kill(&oct_priv->droq_tasklet);
1537}
1538
1539/**
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001540 * \brief Callback for rx ctrl
1541 * @param status status of request
1542 * @param buf pointer to resp structure
1543 */
1544static void rx_ctl_callback(struct octeon_device *oct,
1545 u32 status,
1546 void *buf)
1547{
1548 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1549 struct liquidio_rx_ctl_context *ctx;
1550
1551 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1552
1553 oct = lio_get_device(ctx->octeon_id);
1554 if (status)
1555 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1556 CVM_CAST64(status));
1557 WRITE_ONCE(ctx->cond, 1);
1558
1559 /* This barrier is required to be sure that the response has been
1560 * written fully before waking up the handler
1561 */
1562 wmb();
1563
1564 wake_up_interruptible(&ctx->wc);
1565}
1566
1567/**
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001568 * \brief Send Rx control command
1569 * @param lio per-network private data
1570 * @param start_stop whether to start or stop
1571 */
1572static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1573{
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001574 struct octeon_soft_command *sc;
1575 struct liquidio_rx_ctl_context *ctx;
1576 union octnet_cmd *ncmd;
1577 int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1578 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1579 int retval;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001580
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001581 if (oct->props[lio->ifidx].rx_on == start_stop)
1582 return;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001583
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001584 sc = (struct octeon_soft_command *)
1585 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1586 16, ctx_size);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001587
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001588 ncmd = (union octnet_cmd *)sc->virtdptr;
1589 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1590
1591 WRITE_ONCE(ctx->cond, 0);
1592 ctx->octeon_id = lio_get_device_id(oct);
1593 init_waitqueue_head(&ctx->wc);
1594
1595 ncmd->u64 = 0;
1596 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1597 ncmd->s.param1 = start_stop;
1598
1599 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1600
1601 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1602
1603 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1604 OPCODE_NIC_CMD, 0, 0, 0);
1605
1606 sc->callback = rx_ctl_callback;
1607 sc->callback_arg = sc;
1608 sc->wait_time = 5000;
1609
1610 retval = octeon_send_soft_command(oct, sc);
1611 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001612 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001613 } else {
1614 /* Sleep on a wait queue till the cond flag indicates that the
1615 * response arrived or timed-out.
1616 */
1617 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1618 return;
1619 oct->props[lio->ifidx].rx_on = start_stop;
1620 }
1621
1622 octeon_free_soft_command(oct, sc);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001623}
1624
1625/**
1626 * \brief Destroy NIC device interface
1627 * @param oct octeon device
1628 * @param ifidx which interface to destroy
1629 *
1630 * Cleanup associated with each interface for an Octeon device when NIC
1631 * module is being unloaded or if initialization fails during load.
1632 */
1633static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1634{
1635 struct net_device *netdev = oct->props[ifidx].netdev;
1636 struct lio *lio;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001637 struct napi_struct *napi, *n;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001638
1639 if (!netdev) {
1640 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1641 __func__, ifidx);
1642 return;
1643 }
1644
1645 lio = GET_LIO(netdev);
1646
1647 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1648
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001649 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001650 liquidio_stop(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001651
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001652 if (oct->props[lio->ifidx].napi_enabled == 1) {
1653 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1654 napi_disable(napi);
1655
1656 oct->props[lio->ifidx].napi_enabled = 0;
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07001657
1658 if (OCTEON_CN23XX_PF(oct))
1659 oct->droq[0]->ops.poll_mode = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001660 }
1661
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001662 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1663 unregister_netdev(netdev);
1664
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07001665 cleanup_link_status_change_wq(netdev);
1666
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001667 delete_glists(lio);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001668
1669 free_netdev(netdev);
1670
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001671 oct->props[ifidx].gmxport = -1;
1672
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001673 oct->props[ifidx].netdev = NULL;
1674}
1675
1676/**
1677 * \brief Stop complete NIC functionality
1678 * @param oct octeon device
1679 */
1680static int liquidio_stop_nic_module(struct octeon_device *oct)
1681{
1682 int i, j;
1683 struct lio *lio;
1684
1685 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1686 if (!oct->ifcount) {
1687 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1688 return 1;
1689 }
1690
Raghu Vatsavayi60441882016-06-21 22:53:08 -07001691 spin_lock_bh(&oct->cmd_resp_wqlock);
1692 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1693 spin_unlock_bh(&oct->cmd_resp_wqlock);
1694
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001695 for (i = 0; i < oct->ifcount; i++) {
1696 lio = GET_LIO(oct->props[i].netdev);
1697 for (j = 0; j < lio->linfo.num_rxpciq; j++)
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001698 octeon_unregister_droq_ops(oct,
1699 lio->linfo.rxpciq[j].s.q_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001700 }
1701
1702 for (i = 0; i < oct->ifcount; i++)
1703 liquidio_destroy_nic_device(oct, i);
1704
1705 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1706 return 0;
1707}
1708
1709/**
1710 * \brief Cleans up resources at unload time
1711 * @param pdev PCI device structure
1712 */
1713static void liquidio_remove(struct pci_dev *pdev)
1714{
1715 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1716
1717 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1718
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001719 if (oct_dev->watchdog_task)
1720 kthread_stop(oct_dev->watchdog_task);
1721
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001722 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1723 liquidio_stop_nic_module(oct_dev);
1724
1725 /* Reset the octeon device and cleanup all memory allocated for
1726 * the octeon device by driver.
1727 */
1728 octeon_destroy_resources(oct_dev);
1729
1730 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1731
1732 /* This octeon device has been removed. Update the global
1733 * data structure to reflect this. Free the device structure.
1734 */
1735 octeon_free_device_mem(oct_dev);
1736}
1737
1738/**
1739 * \brief Identify the Octeon device and to map the BAR address space
1740 * @param oct octeon device
1741 */
1742static int octeon_chip_specific_setup(struct octeon_device *oct)
1743{
1744 u32 dev_id, rev_id;
1745 int ret = 1;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001746 char *s;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001747
1748 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1749 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1750 oct->rev_id = rev_id & 0xff;
1751
1752 switch (dev_id) {
1753 case OCTEON_CN68XX_PCIID:
1754 oct->chip_id = OCTEON_CN68XX;
1755 ret = lio_setup_cn68xx_octeon_device(oct);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001756 s = "CN68XX";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001757 break;
1758
1759 case OCTEON_CN66XX_PCIID:
1760 oct->chip_id = OCTEON_CN66XX;
1761 ret = lio_setup_cn66xx_octeon_device(oct);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001762 s = "CN66XX";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001763 break;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001764
Raghu Vatsavayi72c00912016-08-31 11:03:25 -07001765 case OCTEON_CN23XX_PCIID_PF:
1766 oct->chip_id = OCTEON_CN23XX_PF_VID;
1767 ret = setup_cn23xx_octeon_pf_device(oct);
1768 s = "CN23XX";
1769 break;
1770
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001771 default:
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001772 s = "?";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001773 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1774 dev_id);
1775 }
1776
1777 if (!ret)
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001778 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001779 OCTEON_MAJOR_REV(oct),
1780 OCTEON_MINOR_REV(oct),
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001781 octeon_get_conf(oct)->card_name,
1782 LIQUIDIO_VERSION);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001783
1784 return ret;
1785}
1786
1787/**
1788 * \brief PCI initialization for each Octeon device.
1789 * @param oct octeon device
1790 */
1791static int octeon_pci_os_setup(struct octeon_device *oct)
1792{
1793 /* setup PCI stuff first */
1794 if (pci_enable_device(oct->pci_dev)) {
1795 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1796 return 1;
1797 }
1798
1799 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1800 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001801 pci_disable_device(oct->pci_dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001802 return 1;
1803 }
1804
1805 /* Enable PCI DMA Master. */
1806 pci_set_master(oct->pci_dev);
1807
1808 return 0;
1809}
1810
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001811static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1812{
1813 int q = 0;
1814
1815 if (netif_is_multiqueue(lio->netdev))
1816 q = skb->queue_mapping % lio->linfo.num_txpciq;
1817
1818 return q;
1819}
1820
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001821/**
1822 * \brief Check Tx queue state for a given network buffer
1823 * @param lio per-network private data
1824 * @param skb network buffer
1825 */
1826static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
1827{
1828 int q = 0, iq = 0;
1829
1830 if (netif_is_multiqueue(lio->netdev)) {
1831 q = skb->queue_mapping;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001832 iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001833 } else {
1834 iq = lio->txq;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001835 q = iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001836 }
1837
1838 if (octnet_iq_is_full(lio->oct_dev, iq))
1839 return 0;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001840
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001841 if (__netif_subqueue_stopped(lio->netdev, q)) {
1842 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001843 wake_q(lio->netdev, q);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001844 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001845 return 1;
1846}
1847
1848/**
1849 * \brief Unmap and free network buffer
1850 * @param buf buffer
1851 */
1852static void free_netbuf(void *buf)
1853{
1854 struct sk_buff *skb;
1855 struct octnet_buf_free_info *finfo;
1856 struct lio *lio;
1857
1858 finfo = (struct octnet_buf_free_info *)buf;
1859 skb = finfo->skb;
1860 lio = finfo->lio;
1861
1862 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1863 DMA_TO_DEVICE);
1864
1865 check_txq_state(lio, skb);
1866
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07001867 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001868}
1869
1870/**
1871 * \brief Unmap and free gather buffer
1872 * @param buf buffer
1873 */
1874static void free_netsgbuf(void *buf)
1875{
1876 struct octnet_buf_free_info *finfo;
1877 struct sk_buff *skb;
1878 struct lio *lio;
1879 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001880 int i, frags, iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001881
1882 finfo = (struct octnet_buf_free_info *)buf;
1883 skb = finfo->skb;
1884 lio = finfo->lio;
1885 g = finfo->g;
1886 frags = skb_shinfo(skb)->nr_frags;
1887
1888 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1889 g->sg[0].ptr[0], (skb->len - skb->data_len),
1890 DMA_TO_DEVICE);
1891
1892 i = 1;
1893 while (frags--) {
1894 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1895
1896 pci_unmap_page((lio->oct_dev)->pci_dev,
1897 g->sg[(i >> 2)].ptr[(i & 3)],
1898 frag->size, DMA_TO_DEVICE);
1899 i++;
1900 }
1901
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001902 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1903 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001904
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001905 iq = skb_iq(lio, skb);
1906 spin_lock(&lio->glist_lock[iq]);
1907 list_add_tail(&g->list, &lio->glist[iq]);
1908 spin_unlock(&lio->glist_lock[iq]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001909
1910 check_txq_state(lio, skb); /* mq support: sub-queue state check */
1911
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07001912 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001913}
1914
1915/**
1916 * \brief Unmap and free gather buffer with response
1917 * @param buf buffer
1918 */
1919static void free_netsgbuf_with_resp(void *buf)
1920{
1921 struct octeon_soft_command *sc;
1922 struct octnet_buf_free_info *finfo;
1923 struct sk_buff *skb;
1924 struct lio *lio;
1925 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001926 int i, frags, iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001927
1928 sc = (struct octeon_soft_command *)buf;
1929 skb = (struct sk_buff *)sc->callback_arg;
1930 finfo = (struct octnet_buf_free_info *)&skb->cb;
1931
1932 lio = finfo->lio;
1933 g = finfo->g;
1934 frags = skb_shinfo(skb)->nr_frags;
1935
1936 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1937 g->sg[0].ptr[0], (skb->len - skb->data_len),
1938 DMA_TO_DEVICE);
1939
1940 i = 1;
1941 while (frags--) {
1942 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1943
1944 pci_unmap_page((lio->oct_dev)->pci_dev,
1945 g->sg[(i >> 2)].ptr[(i & 3)],
1946 frag->size, DMA_TO_DEVICE);
1947 i++;
1948 }
1949
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001950 dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1951 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001952
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001953 iq = skb_iq(lio, skb);
1954
1955 spin_lock(&lio->glist_lock[iq]);
1956 list_add_tail(&g->list, &lio->glist[iq]);
1957 spin_unlock(&lio->glist_lock[iq]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001958
1959 /* Don't free the skb yet */
1960
1961 check_txq_state(lio, skb);
1962}
1963
1964/**
1965 * \brief Adjust ptp frequency
1966 * @param ptp PTP clock info
1967 * @param ppb how much to adjust by, in parts-per-billion
1968 */
1969static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1970{
1971 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1972 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1973 u64 comp, delta;
1974 unsigned long flags;
1975 bool neg_adj = false;
1976
1977 if (ppb < 0) {
1978 neg_adj = true;
1979 ppb = -ppb;
1980 }
1981
1982 /* The hardware adds the clock compensation value to the
1983 * PTP clock on every coprocessor clock cycle, so we
1984 * compute the delta in terms of coprocessor clocks.
1985 */
1986 delta = (u64)ppb << 32;
1987 do_div(delta, oct->coproc_clock_rate);
1988
1989 spin_lock_irqsave(&lio->ptp_lock, flags);
1990 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1991 if (neg_adj)
1992 comp -= delta;
1993 else
1994 comp += delta;
1995 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1996 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1997
1998 return 0;
1999}
2000
2001/**
2002 * \brief Adjust ptp time
2003 * @param ptp PTP clock info
2004 * @param delta how much to adjust by, in nanosecs
2005 */
2006static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
2007{
2008 unsigned long flags;
2009 struct lio *lio = container_of(ptp, struct lio, ptp_info);
2010
2011 spin_lock_irqsave(&lio->ptp_lock, flags);
2012 lio->ptp_adjust += delta;
2013 spin_unlock_irqrestore(&lio->ptp_lock, flags);
2014
2015 return 0;
2016}
2017
2018/**
2019 * \brief Get hardware clock time, including any adjustment
2020 * @param ptp PTP clock info
2021 * @param ts timespec
2022 */
2023static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
2024 struct timespec64 *ts)
2025{
2026 u64 ns;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002027 unsigned long flags;
2028 struct lio *lio = container_of(ptp, struct lio, ptp_info);
2029 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2030
2031 spin_lock_irqsave(&lio->ptp_lock, flags);
2032 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
2033 ns += lio->ptp_adjust;
2034 spin_unlock_irqrestore(&lio->ptp_lock, flags);
2035
Kefeng Wang286af312016-01-27 17:34:37 +08002036 *ts = ns_to_timespec64(ns);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002037
2038 return 0;
2039}
2040
2041/**
2042 * \brief Set hardware clock time. Reset adjustment
2043 * @param ptp PTP clock info
2044 * @param ts timespec
2045 */
2046static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
2047 const struct timespec64 *ts)
2048{
2049 u64 ns;
2050 unsigned long flags;
2051 struct lio *lio = container_of(ptp, struct lio, ptp_info);
2052 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2053
2054 ns = timespec_to_ns(ts);
2055
2056 spin_lock_irqsave(&lio->ptp_lock, flags);
2057 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
2058 lio->ptp_adjust = 0;
2059 spin_unlock_irqrestore(&lio->ptp_lock, flags);
2060
2061 return 0;
2062}
2063
2064/**
2065 * \brief Check if PTP is enabled
2066 * @param ptp PTP clock info
2067 * @param rq request
2068 * @param on is it on
2069 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002070static int
2071liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
2072 struct ptp_clock_request *rq __attribute__((unused)),
2073 int on __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002074{
2075 return -EOPNOTSUPP;
2076}
2077
2078/**
2079 * \brief Open PTP clock source
2080 * @param netdev network device
2081 */
2082static void oct_ptp_open(struct net_device *netdev)
2083{
2084 struct lio *lio = GET_LIO(netdev);
2085 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2086
2087 spin_lock_init(&lio->ptp_lock);
2088
2089 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
2090 lio->ptp_info.owner = THIS_MODULE;
2091 lio->ptp_info.max_adj = 250000000;
2092 lio->ptp_info.n_alarm = 0;
2093 lio->ptp_info.n_ext_ts = 0;
2094 lio->ptp_info.n_per_out = 0;
2095 lio->ptp_info.pps = 0;
2096 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
2097 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
2098 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
2099 lio->ptp_info.settime64 = liquidio_ptp_settime;
2100 lio->ptp_info.enable = liquidio_ptp_enable;
2101
2102 lio->ptp_adjust = 0;
2103
2104 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
2105 &oct->pci_dev->dev);
2106
2107 if (IS_ERR(lio->ptp_clock))
2108 lio->ptp_clock = NULL;
2109}
2110
2111/**
2112 * \brief Init PTP clock
2113 * @param oct octeon device
2114 */
2115static void liquidio_ptp_init(struct octeon_device *oct)
2116{
2117 u64 clock_comp, cfg;
2118
2119 clock_comp = (u64)NSEC_PER_SEC << 32;
2120 do_div(clock_comp, oct->coproc_clock_rate);
2121 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
2122
2123 /* Enable */
2124 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
2125 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
2126}
2127
2128/**
2129 * \brief Load firmware to device
2130 * @param oct octeon device
2131 *
2132 * Maps device to firmware filename, requests firmware, and downloads it
2133 */
2134static int load_firmware(struct octeon_device *oct)
2135{
2136 int ret = 0;
2137 const struct firmware *fw;
2138 char fw_name[LIO_MAX_FW_FILENAME_LEN];
2139 char *tmp_fw_type;
2140
2141 if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
2142 sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
2143 dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
2144 return ret;
2145 }
2146
2147 if (fw_type[0] == '\0')
2148 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
2149 else
2150 tmp_fw_type = fw_type;
2151
2152 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
2153 octeon_get_conf(oct)->card_name, tmp_fw_type,
2154 LIO_FW_NAME_SUFFIX);
2155
2156 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
2157 if (ret) {
2158 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
2159 fw_name);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07002160 release_firmware(fw);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002161 return ret;
2162 }
2163
2164 ret = octeon_download_firmware(oct, fw->data, fw->size);
2165
2166 release_firmware(fw);
2167
2168 return ret;
2169}
2170
2171/**
2172 * \brief Setup output queue
2173 * @param oct octeon device
2174 * @param q_no which queue
2175 * @param num_descs how many descriptors
2176 * @param desc_size size of each descriptor
2177 * @param app_ctx application context
2178 */
2179static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
2180 int desc_size, void *app_ctx)
2181{
2182 int ret_val = 0;
2183
2184 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
2185 /* droq creation and local register settings. */
2186 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
Amitoj Kaur Chawla08a965e2016-02-04 19:25:13 +05302187 if (ret_val < 0)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002188 return ret_val;
2189
2190 if (ret_val == 1) {
2191 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
2192 return 0;
2193 }
2194 /* tasklet creation for the droq */
2195
2196 /* Enable the droq queues */
2197 octeon_set_droq_pkt_op(oct, q_no, 1);
2198
2199 /* Send Credit for Octeon Output queues. Credits are always
2200 * sent after the output queue is enabled.
2201 */
2202 writel(oct->droq[q_no]->max_count,
2203 oct->droq[q_no]->pkts_credit_reg);
2204
2205 return ret_val;
2206}
2207
2208/**
2209 * \brief Callback for getting interface configuration
2210 * @param status status of request
2211 * @param buf pointer to resp structure
2212 */
2213static void if_cfg_callback(struct octeon_device *oct,
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002214 u32 status __attribute__((unused)),
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002215 void *buf)
2216{
2217 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
2218 struct liquidio_if_cfg_resp *resp;
2219 struct liquidio_if_cfg_context *ctx;
2220
2221 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
Raghu Vatsavayi30136392016-09-01 11:16:11 -07002222 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002223
2224 oct = lio_get_device(ctx->octeon_id);
2225 if (resp->status)
2226 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
2227 CVM_CAST64(resp->status));
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002228 WRITE_ONCE(ctx->cond, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002229
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07002230 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
2231 resp->cfg_info.liquidio_firmware_version);
2232
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002233 /* This barrier is required to be sure that the response has been
2234 * written fully before waking up the handler
2235 */
2236 wmb();
2237
2238 wake_up_interruptible(&ctx->wc);
2239}
2240
2241/**
2242 * \brief Select queue based on hash
2243 * @param dev Net device
2244 * @param skb sk_buff structure
2245 * @returns selected queue number
2246 */
2247static u16 select_q(struct net_device *dev, struct sk_buff *skb,
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002248 void *accel_priv __attribute__((unused)),
2249 select_queue_fallback_t fallback __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002250{
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002251 u32 qindex = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002252 struct lio *lio;
2253
2254 lio = GET_LIO(dev);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002255 qindex = skb_tx_hash(dev, skb);
2256
2257 return (u16)(qindex % (lio->linfo.num_txpciq));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002258}
2259
2260/** Routine to push packets arriving on Octeon interface upto network layer.
2261 * @param oct_id - octeon device id.
2262 * @param skbuff - skbuff struct to be passed to network layer.
2263 * @param len - size of total data received.
2264 * @param rh - Control header associated with the packet
2265 * @param param - additional control data with the packet
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002266 * @param arg - farg registered in droq_ops
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002267 */
2268static void
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002269liquidio_push_packet(u32 octeon_id __attribute__((unused)),
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002270 void *skbuff,
2271 u32 len,
2272 union octeon_rh *rh,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002273 void *param,
2274 void *arg)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002275{
2276 struct napi_struct *napi = param;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002277 struct sk_buff *skb = (struct sk_buff *)skbuff;
2278 struct skb_shared_hwtstamps *shhwtstamps;
2279 u64 ns;
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07002280 u16 vtag = 0;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002281 struct net_device *netdev = (struct net_device *)arg;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002282 struct octeon_droq *droq = container_of(param, struct octeon_droq,
2283 napi);
2284 if (netdev) {
2285 int packet_was_received;
2286 struct lio *lio = GET_LIO(netdev);
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07002287 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002288
2289 /* Do not proceed if the interface is not in RUNNING state. */
2290 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
2291 recv_buffer_free(skb);
2292 droq->stats.rx_dropped++;
2293 return;
2294 }
2295
2296 skb->dev = netdev;
2297
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002298 skb_record_rx_queue(skb, droq->q_no);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07002299 if (likely(len > MIN_SKB_SIZE)) {
2300 struct octeon_skb_page_info *pg_info;
2301 unsigned char *va;
2302
2303 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
2304 if (pg_info->page) {
2305 /* For Paged allocation use the frags */
2306 va = page_address(pg_info->page) +
2307 pg_info->page_offset;
2308 memcpy(skb->data, va, MIN_SKB_SIZE);
2309 skb_put(skb, MIN_SKB_SIZE);
2310 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2311 pg_info->page,
2312 pg_info->page_offset +
2313 MIN_SKB_SIZE,
2314 len - MIN_SKB_SIZE,
2315 LIO_RXBUFFER_SZ);
2316 }
2317 } else {
2318 struct octeon_skb_page_info *pg_info =
2319 ((struct octeon_skb_page_info *)(skb->cb));
2320 skb_copy_to_linear_data(skb, page_address(pg_info->page)
2321 + pg_info->page_offset, len);
2322 skb_put(skb, len);
2323 put_page(pg_info->page);
2324 }
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002325
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07002326 if (((oct->chip_id == OCTEON_CN66XX) ||
2327 (oct->chip_id == OCTEON_CN68XX)) &&
2328 ptp_enable) {
2329 if (rh->r_dh.has_hwtstamp) {
2330 /* timestamp is included from the hardware at
2331 * the beginning of the packet.
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002332 */
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07002333 if (ifstate_check
2334 (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
2335 /* Nanoseconds are in the first 64-bits
2336 * of the packet.
2337 */
2338 memcpy(&ns, (skb->data), sizeof(ns));
2339 shhwtstamps = skb_hwtstamps(skb);
2340 shhwtstamps->hwtstamp =
2341 ns_to_ktime(ns +
2342 lio->ptp_adjust);
2343 }
2344 skb_pull(skb, sizeof(ns));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002345 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002346 }
2347
2348 skb->protocol = eth_type_trans(skb, skb->dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002349 if ((netdev->features & NETIF_F_RXCSUM) &&
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07002350 (((rh->r_dh.encap_on) &&
2351 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
2352 (!(rh->r_dh.encap_on) &&
2353 (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002354 /* checksum has already been verified */
2355 skb->ip_summed = CHECKSUM_UNNECESSARY;
2356 else
2357 skb->ip_summed = CHECKSUM_NONE;
2358
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07002359 /* Setting Encapsulation field on basis of status received
2360 * from the firmware
2361 */
2362 if (rh->r_dh.encap_on) {
2363 skb->encapsulation = 1;
2364 skb->csum_level = 1;
2365 droq->stats.rx_vxlan++;
2366 }
2367
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07002368 /* inbound VLAN tag */
2369 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2370 (rh->r_dh.vlan != 0)) {
2371 u16 vid = rh->r_dh.vlan;
2372 u16 priority = rh->r_dh.priority;
2373
2374 vtag = priority << 13 | vid;
2375 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
2376 }
2377
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002378 packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
2379
2380 if (packet_was_received) {
2381 droq->stats.rx_bytes_received += len;
2382 droq->stats.rx_pkts_received++;
2383 netdev->last_rx = jiffies;
2384 } else {
2385 droq->stats.rx_dropped++;
2386 netif_info(lio, rx_err, lio->netdev,
2387 "droq:%d error rx_dropped:%llu\n",
2388 droq->q_no, droq->stats.rx_dropped);
2389 }
2390
2391 } else {
2392 recv_buffer_free(skb);
2393 }
2394}
2395
2396/**
2397 * \brief wrapper for calling napi_schedule
2398 * @param param parameters to pass to napi_schedule
2399 *
2400 * Used when scheduling on different CPUs
2401 */
2402static void napi_schedule_wrapper(void *param)
2403{
2404 struct napi_struct *napi = param;
2405
2406 napi_schedule(napi);
2407}
2408
2409/**
2410 * \brief callback when receive interrupt occurs and we are in NAPI mode
2411 * @param arg pointer to octeon output queue
2412 */
2413static void liquidio_napi_drv_callback(void *arg)
2414{
Raghu Vatsavayi9ded1a52016-09-01 11:16:10 -07002415 struct octeon_device *oct;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002416 struct octeon_droq *droq = arg;
2417 int this_cpu = smp_processor_id();
2418
Raghu Vatsavayi9ded1a52016-09-01 11:16:10 -07002419 oct = droq->oct_dev;
2420
2421 if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) {
2422 napi_schedule_irqoff(&droq->napi);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002423 } else {
2424 struct call_single_data *csd = &droq->csd;
2425
2426 csd->func = napi_schedule_wrapper;
2427 csd->info = &droq->napi;
2428 csd->flags = 0;
2429
2430 smp_call_function_single_async(droq->cpu_id, csd);
2431 }
2432}
2433
2434/**
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002435 * \brief Entry point for NAPI polling
2436 * @param napi NAPI structure
2437 * @param budget maximum number of items to process
2438 */
2439static int liquidio_napi_poll(struct napi_struct *napi, int budget)
2440{
2441 struct octeon_droq *droq;
2442 int work_done;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002443 int tx_done = 0, iq_no;
2444 struct octeon_instr_queue *iq;
2445 struct octeon_device *oct;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002446
2447 droq = container_of(napi, struct octeon_droq, napi);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002448 oct = droq->oct_dev;
2449 iq_no = droq->q_no;
2450 /* Handle Droq descriptors */
2451 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
2452 POLL_EVENT_PROCESS_PKTS,
2453 budget);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002454
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002455 /* Flush the instruction queue */
2456 iq = oct->instr_queue[iq_no];
2457 if (iq) {
2458 /* Process iq buffers with in the budget limits */
2459 tx_done = octeon_flush_iq(oct, iq, 1, budget);
2460 /* Update iq read-index rather than waiting for next interrupt.
2461 * Return back if tx_done is false.
2462 */
2463 update_txq_status(oct, iq_no);
2464 /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
2465 } else {
2466 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
2467 __func__, iq_no);
2468 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002469
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002470 if ((work_done < budget) && (tx_done)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002471 napi_complete(napi);
2472 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
2473 POLL_EVENT_ENABLE_INTR, 0);
2474 return 0;
2475 }
2476
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002477 return (!tx_done) ? (budget) : (work_done);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002478}
2479
2480/**
2481 * \brief Setup input and output queues
2482 * @param octeon_dev octeon device
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07002483 * @param ifidx Interface Index
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002484 *
2485 * Note: Queues are with respect to the octeon device. Thus
2486 * an input queue is for egress packets, and output queues
2487 * are for ingress packets.
2488 */
2489static inline int setup_io_queues(struct octeon_device *octeon_dev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002490 int ifidx)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002491{
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002492 struct octeon_droq_ops droq_ops;
2493 struct net_device *netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002494 static int cpu_id;
2495 static int cpu_id_modulus;
2496 struct octeon_droq *droq;
2497 struct napi_struct *napi;
2498 int q, q_no, retval = 0;
2499 struct lio *lio;
2500 int num_tx_descs;
2501
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002502 netdev = octeon_dev->props[ifidx].netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002503
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002504 lio = GET_LIO(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002505
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002506 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
2507
2508 droq_ops.fptr = liquidio_push_packet;
2509 droq_ops.farg = (void *)netdev;
2510
2511 droq_ops.poll_mode = 1;
2512 droq_ops.napi_fn = liquidio_napi_drv_callback;
2513 cpu_id = 0;
2514 cpu_id_modulus = num_present_cpus();
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002515
2516 /* set up DROQs. */
2517 for (q = 0; q < lio->linfo.num_rxpciq; q++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002518 q_no = lio->linfo.rxpciq[q].s.q_no;
2519 dev_dbg(&octeon_dev->pci_dev->dev,
2520 "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2521 q, q_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002522 retval = octeon_setup_droq(octeon_dev, q_no,
2523 CFG_GET_NUM_RX_DESCS_NIC_IF
2524 (octeon_get_conf(octeon_dev),
2525 lio->ifidx),
2526 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
2527 (octeon_get_conf(octeon_dev),
2528 lio->ifidx), NULL);
2529 if (retval) {
2530 dev_err(&octeon_dev->pci_dev->dev,
Raghu Vatsavayi32581242016-08-31 11:03:20 -07002531 "%s : Runtime DROQ(RxQ) creation failed.\n",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002532 __func__);
2533 return 1;
2534 }
2535
2536 droq = octeon_dev->droq[q_no];
2537 napi = &droq->napi;
Raghu Vatsavayi1b7c55c2016-08-31 11:03:27 -07002538 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
2539 (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002540 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002541
2542 /* designate a CPU for this droq */
2543 droq->cpu_id = cpu_id;
2544 cpu_id++;
2545 if (cpu_id >= cpu_id_modulus)
2546 cpu_id = 0;
2547
2548 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
2549 }
2550
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07002551 if (OCTEON_CN23XX_PF(octeon_dev)) {
2552 /* 23XX PF can receive control messages (via the first PF-owned
2553 * droq) from the firmware even if the ethX interface is down,
2554 * so that's why poll_mode must be off for the first droq.
2555 */
2556 octeon_dev->droq[0]->ops.poll_mode = 0;
2557 }
2558
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002559 /* set up IQs. */
2560 for (q = 0; q < lio->linfo.num_txpciq; q++) {
2561 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
2562 (octeon_dev),
2563 lio->ifidx);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002564 retval = octeon_setup_iq(octeon_dev, ifidx, q,
2565 lio->linfo.txpciq[q], num_tx_descs,
2566 netdev_get_tx_queue(netdev, q));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002567 if (retval) {
2568 dev_err(&octeon_dev->pci_dev->dev,
2569 " %s : Runtime IQ(TxQ) creation failed.\n",
2570 __func__);
2571 return 1;
2572 }
2573 }
2574
2575 return 0;
2576}
2577
2578/**
2579 * \brief Poll routine for checking transmit queue status
2580 * @param work work_struct data structure
2581 */
2582static void octnet_poll_check_txq_status(struct work_struct *work)
2583{
2584 struct cavium_wk *wk = (struct cavium_wk *)work;
2585 struct lio *lio = (struct lio *)wk->ctxptr;
2586
2587 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
2588 return;
2589
2590 check_txq_status(lio);
2591 queue_delayed_work(lio->txq_status_wq.wq,
2592 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2593}
2594
2595/**
2596 * \brief Sets up the txq poll check
2597 * @param netdev network device
2598 */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002599static inline int setup_tx_poll_fn(struct net_device *netdev)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002600{
2601 struct lio *lio = GET_LIO(netdev);
2602 struct octeon_device *oct = lio->oct_dev;
2603
Bhaktipriya Shridhar292b9da2016-06-08 01:47:59 +05302604 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2605 WQ_MEM_RECLAIM, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002606 if (!lio->txq_status_wq.wq) {
2607 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002608 return -1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002609 }
2610 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2611 octnet_poll_check_txq_status);
2612 lio->txq_status_wq.wk.ctxptr = lio;
2613 queue_delayed_work(lio->txq_status_wq.wq,
2614 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002615 return 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002616}
2617
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002618static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2619{
2620 struct lio *lio = GET_LIO(netdev);
2621
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002622 if (lio->txq_status_wq.wq) {
2623 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2624 destroy_workqueue(lio->txq_status_wq.wq);
2625 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002626}
2627
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002628/**
2629 * \brief Net device open for LiquidIO
2630 * @param netdev network device
2631 */
2632static int liquidio_open(struct net_device *netdev)
2633{
2634 struct lio *lio = GET_LIO(netdev);
2635 struct octeon_device *oct = lio->oct_dev;
2636 struct napi_struct *napi, *n;
2637
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002638 if (oct->props[lio->ifidx].napi_enabled == 0) {
2639 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2640 napi_enable(napi);
2641
2642 oct->props[lio->ifidx].napi_enabled = 1;
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07002643
2644 if (OCTEON_CN23XX_PF(oct))
2645 oct->droq[0]->ops.poll_mode = 1;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002646 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002647
2648 oct_ptp_open(netdev);
2649
2650 ifstate_set(lio, LIO_IFSTATE_RUNNING);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002651
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07002652 /* Ready for link status updates */
2653 lio->intf_open = 1;
2654
2655 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2656
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002657 if (OCTEON_CN23XX_PF(oct)) {
2658 if (!oct->msix_on)
2659 if (setup_tx_poll_fn(netdev))
2660 return -1;
2661 } else {
2662 if (setup_tx_poll_fn(netdev))
2663 return -1;
2664 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002665
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002666 start_txq(netdev);
2667
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002668 /* tell Octeon to start forwarding packets to host */
2669 send_rx_ctrl_cmd(lio, 1);
2670
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002671 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2672 netdev->name);
2673
2674 return 0;
2675}
2676
2677/**
2678 * \brief Net device stop for LiquidIO
2679 * @param netdev network device
2680 */
2681static int liquidio_stop(struct net_device *netdev)
2682{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002683 struct lio *lio = GET_LIO(netdev);
2684 struct octeon_device *oct = lio->oct_dev;
2685
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002686 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2687
2688 netif_tx_disable(netdev);
2689
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002690 /* Inform that netif carrier is down */
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002691 netif_carrier_off(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002692 lio->intf_open = 0;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002693 lio->linfo.link.s.link_up = 0;
2694 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002695
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002696 /* Pause for a moment and wait for Octeon to flush out (to the wire) any
2697 * egress packets that are in-flight.
2698 */
2699 set_current_state(TASK_INTERRUPTIBLE);
2700 schedule_timeout(msecs_to_jiffies(100));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002701
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002702 /* Now it should be safe to tell Octeon that nic interface is down. */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002703 send_rx_ctrl_cmd(lio, 0);
2704
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002705 if (OCTEON_CN23XX_PF(oct)) {
2706 if (!oct->msix_on)
2707 cleanup_tx_poll_fn(netdev);
2708 } else {
2709 cleanup_tx_poll_fn(netdev);
2710 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002711
2712 if (lio->ptp_clock) {
2713 ptp_clock_unregister(lio->ptp_clock);
2714 lio->ptp_clock = NULL;
2715 }
2716
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002717 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002718
2719 return 0;
2720}
2721
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002722/**
2723 * \brief Converts a mask based on net device flags
2724 * @param netdev network device
2725 *
2726 * This routine generates a octnet_ifflags mask from the net device flags
2727 * received from the OS.
2728 */
2729static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2730{
2731 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2732
2733 if (netdev->flags & IFF_PROMISC)
2734 f |= OCTNET_IFFLAG_PROMISC;
2735
2736 if (netdev->flags & IFF_ALLMULTI)
2737 f |= OCTNET_IFFLAG_ALLMULTI;
2738
2739 if (netdev->flags & IFF_MULTICAST) {
2740 f |= OCTNET_IFFLAG_MULTICAST;
2741
2742 /* Accept all multicast addresses if there are more than we
2743 * can handle
2744 */
2745 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2746 f |= OCTNET_IFFLAG_ALLMULTI;
2747 }
2748
2749 if (netdev->flags & IFF_BROADCAST)
2750 f |= OCTNET_IFFLAG_BROADCAST;
2751
2752 return f;
2753}
2754
2755/**
2756 * \brief Net device set_multicast_list
2757 * @param netdev network device
2758 */
2759static void liquidio_set_mcast_list(struct net_device *netdev)
2760{
2761 struct lio *lio = GET_LIO(netdev);
2762 struct octeon_device *oct = lio->oct_dev;
2763 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002764 struct netdev_hw_addr *ha;
2765 u64 *mc;
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002766 int ret;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002767 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2768
2769 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2770
2771 /* Create a ctrl pkt command to be sent to core app. */
2772 nctrl.ncmd.u64 = 0;
2773 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002774 nctrl.ncmd.s.param1 = get_new_flags(netdev);
2775 nctrl.ncmd.s.param2 = mc_count;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002776 nctrl.ncmd.s.more = mc_count;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002777 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002778 nctrl.netpndev = (u64)netdev;
2779 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2780
2781 /* copy all the addresses into the udd */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002782 mc = &nctrl.udd[0];
2783 netdev_for_each_mc_addr(ha, netdev) {
2784 *mc = 0;
2785 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2786 /* no need to swap bytes */
2787
2788 if (++mc > &nctrl.udd[mc_count])
2789 break;
2790 }
2791
2792 /* Apparently, any activity in this call from the kernel has to
2793 * be atomic. So we won't wait for response.
2794 */
2795 nctrl.wait_time = 0;
2796
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002797 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002798 if (ret < 0) {
2799 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2800 ret);
2801 }
2802}
2803
2804/**
2805 * \brief Net device set_mac_address
2806 * @param netdev network device
2807 */
2808static int liquidio_set_mac(struct net_device *netdev, void *p)
2809{
2810 int ret = 0;
2811 struct lio *lio = GET_LIO(netdev);
2812 struct octeon_device *oct = lio->oct_dev;
2813 struct sockaddr *addr = (struct sockaddr *)p;
2814 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002815
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002816 if (!is_valid_ether_addr(addr->sa_data))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002817 return -EADDRNOTAVAIL;
2818
2819 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2820
2821 nctrl.ncmd.u64 = 0;
2822 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002823 nctrl.ncmd.s.param1 = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002824 nctrl.ncmd.s.more = 1;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002825 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002826 nctrl.netpndev = (u64)netdev;
2827 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2828 nctrl.wait_time = 100;
2829
2830 nctrl.udd[0] = 0;
2831 /* The MAC Address is presented in network byte order. */
2832 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2833
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002834 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002835 if (ret < 0) {
2836 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2837 return -ENOMEM;
2838 }
2839 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2840 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2841
2842 return 0;
2843}
2844
2845/**
2846 * \brief Net device get_stats
2847 * @param netdev network device
2848 */
2849static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2850{
2851 struct lio *lio = GET_LIO(netdev);
2852 struct net_device_stats *stats = &netdev->stats;
2853 struct octeon_device *oct;
2854 u64 pkts = 0, drop = 0, bytes = 0;
2855 struct oct_droq_stats *oq_stats;
2856 struct oct_iq_stats *iq_stats;
2857 int i, iq_no, oq_no;
2858
2859 oct = lio->oct_dev;
2860
2861 for (i = 0; i < lio->linfo.num_txpciq; i++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002862 iq_no = lio->linfo.txpciq[i].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002863 iq_stats = &oct->instr_queue[iq_no]->stats;
2864 pkts += iq_stats->tx_done;
2865 drop += iq_stats->tx_dropped;
2866 bytes += iq_stats->tx_tot_bytes;
2867 }
2868
2869 stats->tx_packets = pkts;
2870 stats->tx_bytes = bytes;
2871 stats->tx_dropped = drop;
2872
2873 pkts = 0;
2874 drop = 0;
2875 bytes = 0;
2876
2877 for (i = 0; i < lio->linfo.num_rxpciq; i++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002878 oq_no = lio->linfo.rxpciq[i].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002879 oq_stats = &oct->droq[oq_no]->stats;
2880 pkts += oq_stats->rx_pkts_received;
2881 drop += (oq_stats->rx_dropped +
2882 oq_stats->dropped_nodispatch +
2883 oq_stats->dropped_toomany +
2884 oq_stats->dropped_nomem);
2885 bytes += oq_stats->rx_bytes_received;
2886 }
2887
2888 stats->rx_bytes = bytes;
2889 stats->rx_packets = pkts;
2890 stats->rx_dropped = drop;
2891
2892 return stats;
2893}
2894
2895/**
2896 * \brief Net device change_mtu
2897 * @param netdev network device
2898 */
2899static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2900{
2901 struct lio *lio = GET_LIO(netdev);
2902 struct octeon_device *oct = lio->oct_dev;
2903 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002904 int ret = 0;
2905
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002906 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2907
2908 nctrl.ncmd.u64 = 0;
2909 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002910 nctrl.ncmd.s.param1 = new_mtu;
2911 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002912 nctrl.wait_time = 100;
2913 nctrl.netpndev = (u64)netdev;
2914 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2915
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002916 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002917 if (ret < 0) {
2918 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
2919 return -1;
2920 }
2921
2922 lio->mtu = new_mtu;
2923
2924 return 0;
2925}
2926
2927/**
2928 * \brief Handler for SIOCSHWTSTAMP ioctl
2929 * @param netdev network device
2930 * @param ifr interface request
2931 * @param cmd command
2932 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002933static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002934{
2935 struct hwtstamp_config conf;
2936 struct lio *lio = GET_LIO(netdev);
2937
2938 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2939 return -EFAULT;
2940
2941 if (conf.flags)
2942 return -EINVAL;
2943
2944 switch (conf.tx_type) {
2945 case HWTSTAMP_TX_ON:
2946 case HWTSTAMP_TX_OFF:
2947 break;
2948 default:
2949 return -ERANGE;
2950 }
2951
2952 switch (conf.rx_filter) {
2953 case HWTSTAMP_FILTER_NONE:
2954 break;
2955 case HWTSTAMP_FILTER_ALL:
2956 case HWTSTAMP_FILTER_SOME:
2957 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2958 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2959 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2960 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2961 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2962 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2963 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2964 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2965 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2966 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2967 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2968 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2969 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2970 break;
2971 default:
2972 return -ERANGE;
2973 }
2974
2975 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2976 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2977
2978 else
2979 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2980
2981 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2982}
2983
2984/**
2985 * \brief ioctl handler
2986 * @param netdev network device
2987 * @param ifr interface request
2988 * @param cmd command
2989 */
2990static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2991{
2992 switch (cmd) {
2993 case SIOCSHWTSTAMP:
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002994 return hwtstamp_ioctl(netdev, ifr);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002995 default:
2996 return -EOPNOTSUPP;
2997 }
2998}
2999
3000/**
3001 * \brief handle a Tx timestamp response
3002 * @param status response status
3003 * @param buf pointer to skb
3004 */
3005static void handle_timestamp(struct octeon_device *oct,
3006 u32 status,
3007 void *buf)
3008{
3009 struct octnet_buf_free_info *finfo;
3010 struct octeon_soft_command *sc;
3011 struct oct_timestamp_resp *resp;
3012 struct lio *lio;
3013 struct sk_buff *skb = (struct sk_buff *)buf;
3014
3015 finfo = (struct octnet_buf_free_info *)skb->cb;
3016 lio = finfo->lio;
3017 sc = finfo->sc;
3018 oct = lio->oct_dev;
3019 resp = (struct oct_timestamp_resp *)sc->virtrptr;
3020
3021 if (status != OCTEON_REQUEST_DONE) {
3022 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
3023 CVM_CAST64(status));
3024 resp->timestamp = 0;
3025 }
3026
3027 octeon_swap_8B_data(&resp->timestamp, 1);
3028
Colin Ian King19a6d152016-02-05 16:30:39 +00003029 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003030 struct skb_shared_hwtstamps ts;
3031 u64 ns = resp->timestamp;
3032
3033 netif_info(lio, tx_done, lio->netdev,
3034 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
3035 skb, (unsigned long long)ns);
3036 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
3037 skb_tstamp_tx(skb, &ts);
3038 }
3039
3040 octeon_free_soft_command(oct, sc);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07003041 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003042}
3043
3044/* \brief Send a data packet that will be timestamped
3045 * @param oct octeon device
3046 * @param ndata pointer to network data
3047 * @param finfo pointer to private network data
3048 */
3049static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
3050 struct octnic_data_pkt *ndata,
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003051 struct octnet_buf_free_info *finfo)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003052{
3053 int retval;
3054 struct octeon_soft_command *sc;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003055 struct lio *lio;
3056 int ring_doorbell;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003057 u32 len;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003058
3059 lio = finfo->lio;
3060
3061 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
3062 sizeof(struct oct_timestamp_resp));
3063 finfo->sc = sc;
3064
3065 if (!sc) {
3066 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
3067 return IQ_SEND_FAILED;
3068 }
3069
3070 if (ndata->reqtype == REQTYPE_NORESP_NET)
3071 ndata->reqtype = REQTYPE_RESP_NET;
3072 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
3073 ndata->reqtype = REQTYPE_RESP_NET_SG;
3074
3075 sc->callback = handle_timestamp;
3076 sc->callback_arg = finfo->skb;
3077 sc->iq_no = ndata->q_no;
3078
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07003079 if (OCTEON_CN23XX_PF(oct))
3080 len = (u32)((struct octeon_instr_ih3 *)
3081 (&sc->cmd.cmd3.ih3))->dlengsz;
3082 else
3083 len = (u32)((struct octeon_instr_ih2 *)
3084 (&sc->cmd.cmd2.ih2))->dlengsz;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003085
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003086 ring_doorbell = 1;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07003087
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003088 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003089 sc, len, ndata->reqtype);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003090
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07003091 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003092 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
3093 retval);
3094 octeon_free_soft_command(oct, sc);
3095 } else {
3096 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
3097 }
3098
3099 return retval;
3100}
3101
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003102/** \brief Transmit networks packets to the Octeon interface
3103 * @param skbuff skbuff struct to be passed to network layer.
3104 * @param netdev pointer to network device
3105 * @returns whether the packet was transmitted to the device okay or not
3106 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
3107 */
3108static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
3109{
3110 struct lio *lio;
3111 struct octnet_buf_free_info *finfo;
3112 union octnic_cmd_setup cmdsetup;
3113 struct octnic_data_pkt ndata;
3114 struct octeon_device *oct;
3115 struct oct_iq_stats *stats;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003116 struct octeon_instr_irh *irh;
3117 union tx_info *tx_info;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003118 int status = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003119 int q_idx = 0, iq_no = 0;
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003120 int j;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07003121 u64 dptr = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003122 u32 tag = 0;
3123
3124 lio = GET_LIO(netdev);
3125 oct = lio->oct_dev;
3126
3127 if (netif_is_multiqueue(netdev)) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003128 q_idx = skb->queue_mapping;
3129 q_idx = (q_idx % (lio->linfo.num_txpciq));
3130 tag = q_idx;
3131 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003132 } else {
3133 iq_no = lio->txq;
3134 }
3135
3136 stats = &oct->instr_queue[iq_no]->stats;
3137
3138 /* Check for all conditions in which the current packet cannot be
3139 * transmitted.
3140 */
3141 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003142 (!lio->linfo.link.s.link_up) ||
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003143 (skb->len <= 0)) {
3144 netif_info(lio, tx_err, lio->netdev,
3145 "Transmit failed link_status : %d\n",
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003146 lio->linfo.link.s.link_up);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003147 goto lio_xmit_failed;
3148 }
3149
3150 /* Use space in skb->cb to store info used to unmap and
3151 * free the buffers.
3152 */
3153 finfo = (struct octnet_buf_free_info *)skb->cb;
3154 finfo->lio = lio;
3155 finfo->skb = skb;
3156 finfo->sc = NULL;
3157
3158 /* Prepare the attributes for the data to be passed to OSI. */
3159 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
3160
3161 ndata.buf = (void *)finfo;
3162
3163 ndata.q_no = iq_no;
3164
3165 if (netif_is_multiqueue(netdev)) {
3166 if (octnet_iq_is_full(oct, ndata.q_no)) {
3167 /* defer sending if queue is full */
3168 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
3169 ndata.q_no);
3170 stats->tx_iq_busy++;
3171 return NETDEV_TX_BUSY;
3172 }
3173 } else {
3174 if (octnet_iq_is_full(oct, lio->txq)) {
3175 /* defer sending if queue is full */
3176 stats->tx_iq_busy++;
3177 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07003178 lio->txq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003179 return NETDEV_TX_BUSY;
3180 }
3181 }
3182 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07003183 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003184 */
3185
3186 ndata.datasize = skb->len;
3187
3188 cmdsetup.u64 = 0;
Raghu Vatsavayi7275ebf2016-06-14 16:54:49 -07003189 cmdsetup.s.iq_no = iq_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003190
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003191 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3192 if (skb->encapsulation) {
3193 cmdsetup.s.tnl_csum = 1;
3194 stats->tx_vxlan++;
3195 } else {
3196 cmdsetup.s.transport_csum = 1;
3197 }
3198 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003199 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3200 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3201 cmdsetup.s.timestamp = 1;
3202 }
3203
3204 if (skb_shinfo(skb)->nr_frags == 0) {
3205 cmdsetup.s.u.datasize = skb->len;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003206 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07003207
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003208 /* Offload checksum calculation for TCP/UDP packets */
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003209 dptr = dma_map_single(&oct->pci_dev->dev,
3210 skb->data,
3211 skb->len,
3212 DMA_TO_DEVICE);
3213 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003214 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
3215 __func__);
3216 return NETDEV_TX_BUSY;
3217 }
3218
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07003219 if (OCTEON_CN23XX_PF(oct))
3220 ndata.cmd.cmd3.dptr = dptr;
3221 else
3222 ndata.cmd.cmd2.dptr = dptr;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003223 finfo->dptr = dptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003224 ndata.reqtype = REQTYPE_NORESP_NET;
3225
3226 } else {
3227 int i, frags;
3228 struct skb_frag_struct *frag;
3229 struct octnic_gather *g;
3230
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07003231 spin_lock(&lio->glist_lock[q_idx]);
3232 g = (struct octnic_gather *)
3233 list_delete_head(&lio->glist[q_idx]);
3234 spin_unlock(&lio->glist_lock[q_idx]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003235
3236 if (!g) {
3237 netif_info(lio, tx_err, lio->netdev,
3238 "Transmit scatter gather: glist null!\n");
3239 goto lio_xmit_failed;
3240 }
3241
3242 cmdsetup.s.gather = 1;
3243 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003244 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003245
3246 memset(g->sg, 0, g->sg_size);
3247
3248 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
3249 skb->data,
3250 (skb->len - skb->data_len),
3251 DMA_TO_DEVICE);
3252 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
3253 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
3254 __func__);
3255 return NETDEV_TX_BUSY;
3256 }
3257 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
3258
3259 frags = skb_shinfo(skb)->nr_frags;
3260 i = 1;
3261 while (frags--) {
3262 frag = &skb_shinfo(skb)->frags[i - 1];
3263
3264 g->sg[(i >> 2)].ptr[(i & 3)] =
3265 dma_map_page(&oct->pci_dev->dev,
3266 frag->page.p,
3267 frag->page_offset,
3268 frag->size,
3269 DMA_TO_DEVICE);
3270
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07003271 if (dma_mapping_error(&oct->pci_dev->dev,
3272 g->sg[i >> 2].ptr[i & 3])) {
3273 dma_unmap_single(&oct->pci_dev->dev,
3274 g->sg[0].ptr[0],
3275 skb->len - skb->data_len,
3276 DMA_TO_DEVICE);
3277 for (j = 1; j < i; j++) {
3278 frag = &skb_shinfo(skb)->frags[j - 1];
3279 dma_unmap_page(&oct->pci_dev->dev,
3280 g->sg[j >> 2].ptr[j & 3],
3281 frag->size,
3282 DMA_TO_DEVICE);
3283 }
3284 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
3285 __func__);
3286 return NETDEV_TX_BUSY;
3287 }
3288
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003289 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
3290 i++;
3291 }
3292
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07003293 dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
3294 g->sg_size, DMA_TO_DEVICE);
3295 dptr = g->sg_dma_ptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003296
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07003297 if (OCTEON_CN23XX_PF(oct))
3298 ndata.cmd.cmd3.dptr = dptr;
3299 else
3300 ndata.cmd.cmd2.dptr = dptr;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003301 finfo->dptr = dptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003302 finfo->g = g;
3303
3304 ndata.reqtype = REQTYPE_NORESP_NET_SG;
3305 }
3306
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07003307 if (OCTEON_CN23XX_PF(oct)) {
3308 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
3309 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
3310 } else {
3311 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
3312 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
3313 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003314
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003315 if (skb_shinfo(skb)->gso_size) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003316 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
3317 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003318 stats->tx_gso++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003319 }
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003320
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07003321 /* HW insert VLAN tag */
3322 if (skb_vlan_tag_present(skb)) {
3323 irh->priority = skb_vlan_tag_get(skb) >> 13;
3324 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
3325 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003326
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003327 if (unlikely(cmdsetup.s.timestamp))
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003328 status = send_nic_timestamp_pkt(oct, &ndata, finfo);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003329 else
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003330 status = octnet_send_nic_data_pkt(oct, &ndata);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003331 if (status == IQ_SEND_FAILED)
3332 goto lio_xmit_failed;
3333
3334 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
3335
3336 if (status == IQ_SEND_STOP)
3337 stop_q(lio->netdev, q_idx);
3338
Florian Westphal860e9532016-05-03 16:33:13 +02003339 netif_trans_update(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003340
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003341 if (skb_shinfo(skb)->gso_size)
3342 stats->tx_done += skb_shinfo(skb)->gso_segs;
3343 else
3344 stats->tx_done++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003345 stats->tx_tot_bytes += skb->len;
3346
3347 return NETDEV_TX_OK;
3348
3349lio_xmit_failed:
3350 stats->tx_dropped++;
3351 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
3352 iq_no, stats->tx_dropped);
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003353 if (dptr)
3354 dma_unmap_single(&oct->pci_dev->dev, dptr,
3355 ndata.datasize, DMA_TO_DEVICE);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07003356 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003357 return NETDEV_TX_OK;
3358}
3359
3360/** \brief Network device Tx timeout
3361 * @param netdev pointer to network device
3362 */
3363static void liquidio_tx_timeout(struct net_device *netdev)
3364{
3365 struct lio *lio;
3366
3367 lio = GET_LIO(netdev);
3368
3369 netif_info(lio, tx_err, lio->netdev,
3370 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
3371 netdev->stats.tx_dropped);
Florian Westphal860e9532016-05-03 16:33:13 +02003372 netif_trans_update(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003373 txqs_wake(netdev);
3374}
3375
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003376static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
3377 __be16 proto __attribute__((unused)),
3378 u16 vid)
3379{
3380 struct lio *lio = GET_LIO(netdev);
3381 struct octeon_device *oct = lio->oct_dev;
3382 struct octnic_ctrl_pkt nctrl;
3383 int ret = 0;
3384
3385 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3386
3387 nctrl.ncmd.u64 = 0;
3388 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3389 nctrl.ncmd.s.param1 = vid;
3390 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3391 nctrl.wait_time = 100;
3392 nctrl.netpndev = (u64)netdev;
3393 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3394
3395 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3396 if (ret < 0) {
3397 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3398 ret);
3399 }
3400
3401 return ret;
3402}
3403
3404static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
3405 __be16 proto __attribute__((unused)),
3406 u16 vid)
3407{
3408 struct lio *lio = GET_LIO(netdev);
3409 struct octeon_device *oct = lio->oct_dev;
3410 struct octnic_ctrl_pkt nctrl;
3411 int ret = 0;
3412
3413 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3414
3415 nctrl.ncmd.u64 = 0;
3416 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3417 nctrl.ncmd.s.param1 = vid;
3418 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3419 nctrl.wait_time = 100;
3420 nctrl.netpndev = (u64)netdev;
3421 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3422
3423 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3424 if (ret < 0) {
3425 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3426 ret);
3427 }
3428 return ret;
3429}
3430
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003431/** Sending command to enable/disable RX checksum offload
3432 * @param netdev pointer to network device
3433 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
3434 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
3435 * OCTNET_CMD_RXCSUM_DISABLE
3436 * @returns SUCCESS or FAILURE
3437 */
Nicholas Mc Guirec41419b2016-08-22 17:52:00 +02003438static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
3439 u8 rx_cmd)
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003440{
3441 struct lio *lio = GET_LIO(netdev);
3442 struct octeon_device *oct = lio->oct_dev;
3443 struct octnic_ctrl_pkt nctrl;
3444 int ret = 0;
3445
3446 nctrl.ncmd.u64 = 0;
3447 nctrl.ncmd.s.cmd = command;
3448 nctrl.ncmd.s.param1 = rx_cmd;
3449 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3450 nctrl.wait_time = 100;
3451 nctrl.netpndev = (u64)netdev;
3452 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3453
3454 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3455 if (ret < 0) {
3456 dev_err(&oct->pci_dev->dev,
3457 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3458 ret);
3459 }
3460 return ret;
3461}
3462
3463/** Sending command to add/delete VxLAN UDP port to firmware
3464 * @param netdev pointer to network device
3465 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
3466 * @param vxlan_port VxLAN port to be added or deleted
3467 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
3468 * OCTNET_CMD_VXLAN_PORT_DEL
3469 * @returns SUCCESS or FAILURE
3470 */
3471static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
3472 u16 vxlan_port, u8 vxlan_cmd_bit)
3473{
3474 struct lio *lio = GET_LIO(netdev);
3475 struct octeon_device *oct = lio->oct_dev;
3476 struct octnic_ctrl_pkt nctrl;
3477 int ret = 0;
3478
3479 nctrl.ncmd.u64 = 0;
3480 nctrl.ncmd.s.cmd = command;
3481 nctrl.ncmd.s.more = vxlan_cmd_bit;
3482 nctrl.ncmd.s.param1 = vxlan_port;
3483 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3484 nctrl.wait_time = 100;
3485 nctrl.netpndev = (u64)netdev;
3486 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3487
3488 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3489 if (ret < 0) {
3490 dev_err(&oct->pci_dev->dev,
3491 "VxLAN port add/delete failed in core (ret:0x%x)\n",
3492 ret);
3493 }
3494 return ret;
3495}
3496
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003497/** \brief Net device fix features
3498 * @param netdev pointer to network device
3499 * @param request features requested
3500 * @returns updated features list
3501 */
3502static netdev_features_t liquidio_fix_features(struct net_device *netdev,
3503 netdev_features_t request)
3504{
3505 struct lio *lio = netdev_priv(netdev);
3506
3507 if ((request & NETIF_F_RXCSUM) &&
3508 !(lio->dev_capability & NETIF_F_RXCSUM))
3509 request &= ~NETIF_F_RXCSUM;
3510
3511 if ((request & NETIF_F_HW_CSUM) &&
3512 !(lio->dev_capability & NETIF_F_HW_CSUM))
3513 request &= ~NETIF_F_HW_CSUM;
3514
3515 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
3516 request &= ~NETIF_F_TSO;
3517
3518 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
3519 request &= ~NETIF_F_TSO6;
3520
3521 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
3522 request &= ~NETIF_F_LRO;
3523
3524 /*Disable LRO if RXCSUM is off */
3525 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
3526 (lio->dev_capability & NETIF_F_LRO))
3527 request &= ~NETIF_F_LRO;
3528
3529 return request;
3530}
3531
3532/** \brief Net device set features
3533 * @param netdev pointer to network device
3534 * @param features features to enable/disable
3535 */
3536static int liquidio_set_features(struct net_device *netdev,
3537 netdev_features_t features)
3538{
3539 struct lio *lio = netdev_priv(netdev);
3540
3541 if (!((netdev->features ^ features) & NETIF_F_LRO))
3542 return 0;
3543
3544 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003545 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3546 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003547 else if (!(features & NETIF_F_LRO) &&
3548 (lio->dev_capability & NETIF_F_LRO))
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003549 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
3550 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003551
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003552 /* Sending command to firmware to enable/disable RX checksum
3553 * offload settings using ethtool
3554 */
3555 if (!(netdev->features & NETIF_F_RXCSUM) &&
3556 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3557 (features & NETIF_F_RXCSUM))
3558 liquidio_set_rxcsum_command(netdev,
3559 OCTNET_CMD_TNL_RX_CSUM_CTL,
3560 OCTNET_CMD_RXCSUM_ENABLE);
3561 else if ((netdev->features & NETIF_F_RXCSUM) &&
3562 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3563 !(features & NETIF_F_RXCSUM))
3564 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3565 OCTNET_CMD_RXCSUM_DISABLE);
3566
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003567 return 0;
3568}
3569
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003570static void liquidio_add_vxlan_port(struct net_device *netdev,
3571 struct udp_tunnel_info *ti)
3572{
3573 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3574 return;
3575
3576 liquidio_vxlan_port_command(netdev,
3577 OCTNET_CMD_VXLAN_PORT_CONFIG,
3578 htons(ti->port),
3579 OCTNET_CMD_VXLAN_PORT_ADD);
3580}
3581
3582static void liquidio_del_vxlan_port(struct net_device *netdev,
3583 struct udp_tunnel_info *ti)
3584{
3585 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3586 return;
3587
3588 liquidio_vxlan_port_command(netdev,
3589 OCTNET_CMD_VXLAN_PORT_CONFIG,
3590 htons(ti->port),
3591 OCTNET_CMD_VXLAN_PORT_DEL);
3592}
3593
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08003594static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
3595 u8 *mac, bool is_admin_assigned)
3596{
3597 struct lio *lio = GET_LIO(netdev);
3598 struct octeon_device *oct = lio->oct_dev;
3599 struct octnic_ctrl_pkt nctrl;
3600
3601 if (!is_valid_ether_addr(mac))
3602 return -EINVAL;
3603
3604 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
3605 return -EINVAL;
3606
3607 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3608
3609 nctrl.ncmd.u64 = 0;
3610 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
3611 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3612 nctrl.ncmd.s.param1 = vfidx + 1;
3613 nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
3614 nctrl.ncmd.s.more = 1;
3615 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3616 nctrl.cb_fn = 0;
3617 nctrl.wait_time = LIO_CMD_WAIT_TM;
3618
3619 nctrl.udd[0] = 0;
3620 /* The MAC Address is presented in network byte order. */
3621 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
3622
3623 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
3624
3625 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3626
3627 return 0;
3628}
3629
3630static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
3631{
3632 struct lio *lio = GET_LIO(netdev);
3633 struct octeon_device *oct = lio->oct_dev;
3634 int retval;
3635
3636 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
3637 if (!retval)
3638 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
3639
3640 return retval;
3641}
3642
3643static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
3644 u16 vlan, u8 qos, __be16 vlan_proto)
3645{
3646 struct lio *lio = GET_LIO(netdev);
3647 struct octeon_device *oct = lio->oct_dev;
3648 struct octnic_ctrl_pkt nctrl;
3649 u16 vlantci;
3650
3651 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3652 return -EINVAL;
3653
3654 if (vlan_proto != htons(ETH_P_8021Q))
3655 return -EPROTONOSUPPORT;
3656
3657 if (vlan >= VLAN_N_VID || qos > 7)
3658 return -EINVAL;
3659
3660 if (vlan)
3661 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
3662 else
3663 vlantci = 0;
3664
3665 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
3666 return 0;
3667
3668 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3669
3670 if (vlan)
3671 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3672 else
3673 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3674
3675 nctrl.ncmd.s.param1 = vlantci;
3676 nctrl.ncmd.s.param2 =
3677 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
3678 nctrl.ncmd.s.more = 0;
3679 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3680 nctrl.cb_fn = 0;
3681 nctrl.wait_time = LIO_CMD_WAIT_TM;
3682
3683 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3684
3685 oct->sriov_info.vf_vlantci[vfidx] = vlantci;
3686
3687 return 0;
3688}
3689
3690static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
3691 struct ifla_vf_info *ivi)
3692{
3693 struct lio *lio = GET_LIO(netdev);
3694 struct octeon_device *oct = lio->oct_dev;
3695 u8 *macaddr;
3696
3697 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3698 return -EINVAL;
3699
3700 ivi->vf = vfidx;
3701 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
3702 ether_addr_copy(&ivi->mac[0], macaddr);
3703 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
3704 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
3705 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3706 return 0;
3707}
3708
3709static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3710 int linkstate)
3711{
3712 struct lio *lio = GET_LIO(netdev);
3713 struct octeon_device *oct = lio->oct_dev;
3714 struct octnic_ctrl_pkt nctrl;
3715
3716 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3717 return -EINVAL;
3718
3719 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3720 return 0;
3721
3722 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3723 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3724 nctrl.ncmd.s.param1 =
3725 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3726 nctrl.ncmd.s.param2 = linkstate;
3727 nctrl.ncmd.s.more = 0;
3728 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3729 nctrl.cb_fn = 0;
3730 nctrl.wait_time = LIO_CMD_WAIT_TM;
3731
3732 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3733
3734 oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3735
3736 return 0;
3737}
3738
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003739static struct net_device_ops lionetdevops = {
3740 .ndo_open = liquidio_open,
3741 .ndo_stop = liquidio_stop,
3742 .ndo_start_xmit = liquidio_xmit,
3743 .ndo_get_stats = liquidio_get_stats,
3744 .ndo_set_mac_address = liquidio_set_mac,
3745 .ndo_set_rx_mode = liquidio_set_mcast_list,
3746 .ndo_tx_timeout = liquidio_tx_timeout,
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003747
3748 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3749 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003750 .ndo_change_mtu = liquidio_change_mtu,
3751 .ndo_do_ioctl = liquidio_ioctl,
3752 .ndo_fix_features = liquidio_fix_features,
3753 .ndo_set_features = liquidio_set_features,
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003754 .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
3755 .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08003756 .ndo_set_vf_mac = liquidio_set_vf_mac,
3757 .ndo_set_vf_vlan = liquidio_set_vf_vlan,
3758 .ndo_get_vf_config = liquidio_get_vf_config,
3759 .ndo_set_vf_link_state = liquidio_set_vf_link_state,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003760};
3761
3762/** \brief Entry point for the liquidio module
3763 */
3764static int __init liquidio_init(void)
3765{
3766 int i;
3767 struct handshake *hs;
3768
3769 init_completion(&first_stage);
3770
3771 octeon_init_device_list(conf_type);
3772
3773 if (liquidio_init_pci())
3774 return -EINVAL;
3775
3776 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3777
3778 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3779 hs = &handshake[i];
3780 if (hs->pci_dev) {
3781 wait_for_completion(&hs->init);
3782 if (!hs->init_ok) {
3783 /* init handshake failed */
3784 dev_err(&hs->pci_dev->dev,
3785 "Failed to init device\n");
3786 liquidio_deinit_pci();
3787 return -EIO;
3788 }
3789 }
3790 }
3791
3792 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3793 hs = &handshake[i];
3794 if (hs->pci_dev) {
3795 wait_for_completion_timeout(&hs->started,
3796 msecs_to_jiffies(30000));
3797 if (!hs->started_ok) {
3798 /* starter handshake failed */
3799 dev_err(&hs->pci_dev->dev,
3800 "Firmware failed to start\n");
3801 liquidio_deinit_pci();
3802 return -EIO;
3803 }
3804 }
3805 }
3806
3807 return 0;
3808}
3809
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -07003810static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003811{
3812 struct octeon_device *oct = (struct octeon_device *)buf;
3813 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003814 int gmxport = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003815 union oct_link_status *ls;
3816 int i;
3817
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003818 if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003819 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3820 recv_pkt->buffer_size[0],
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003821 recv_pkt->rh.r_nic_info.gmxport);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003822 goto nic_info_err;
3823 }
3824
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003825 gmxport = recv_pkt->rh.r_nic_info.gmxport;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003826 ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
3827
3828 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003829 for (i = 0; i < oct->ifcount; i++) {
3830 if (oct->props[i].gmxport == gmxport) {
3831 update_link_status(oct->props[i].netdev, ls);
3832 break;
3833 }
3834 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003835
3836nic_info_err:
3837 for (i = 0; i < recv_pkt->buffer_count; i++)
3838 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3839 octeon_free_recv_info(recv_info);
3840 return 0;
3841}
3842
3843/**
3844 * \brief Setup network interfaces
3845 * @param octeon_dev octeon device
3846 *
3847 * Called during init time for each device. It assumes the NIC
3848 * is already up and running. The link information for each
3849 * interface is passed in link_info.
3850 */
3851static int setup_nic_devices(struct octeon_device *octeon_dev)
3852{
3853 struct lio *lio = NULL;
3854 struct net_device *netdev;
3855 u8 mac[6], i, j;
3856 struct octeon_soft_command *sc;
3857 struct liquidio_if_cfg_context *ctx;
3858 struct liquidio_if_cfg_resp *resp;
3859 struct octdev_props *props;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003860 int retval, num_iqueues, num_oqueues;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003861 union oct_nic_if_cfg if_cfg;
3862 unsigned int base_queue;
3863 unsigned int gmx_port_id;
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003864 u32 resp_size, ctx_size, data_size;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003865 u32 ifidx_or_pfnum;
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003866 struct lio_version *vdata;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003867
3868 /* This is to handle link status changes */
3869 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3870 OPCODE_NIC_INFO,
3871 lio_nic_info, octeon_dev);
3872
3873 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3874 * They are handled directly.
3875 */
3876 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3877 free_netbuf);
3878
3879 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3880 free_netsgbuf);
3881
3882 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3883 free_netsgbuf_with_resp);
3884
3885 for (i = 0; i < octeon_dev->ifcount; i++) {
3886 resp_size = sizeof(struct liquidio_if_cfg_resp);
3887 ctx_size = sizeof(struct liquidio_if_cfg_context);
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003888 data_size = sizeof(struct lio_version);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003889 sc = (struct octeon_soft_command *)
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003890 octeon_alloc_soft_command(octeon_dev, data_size,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003891 resp_size, ctx_size);
3892 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3893 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003894 vdata = (struct lio_version *)sc->virtdptr;
3895
3896 *((u64 *)vdata) = 0;
3897 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3898 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3899 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003900
Raghu Vatsavayie86b1ab2016-08-31 11:03:24 -07003901 if (OCTEON_CN23XX_PF(octeon_dev)) {
3902 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3903 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3904 base_queue = octeon_dev->sriov_info.pf_srn;
3905
3906 gmx_port_id = octeon_dev->pf_num;
3907 ifidx_or_pfnum = octeon_dev->pf_num;
3908 } else {
3909 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3910 octeon_get_conf(octeon_dev), i);
3911 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3912 octeon_get_conf(octeon_dev), i);
3913 base_queue = CFG_GET_BASE_QUE_NIC_IF(
3914 octeon_get_conf(octeon_dev), i);
3915 gmx_port_id = CFG_GET_GMXID_NIC_IF(
3916 octeon_get_conf(octeon_dev), i);
3917 ifidx_or_pfnum = i;
3918 }
Raghu Vatsavayi3dcef2c2016-07-03 13:56:51 -07003919
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003920 dev_dbg(&octeon_dev->pci_dev->dev,
3921 "requesting config for interface %d, iqs %d, oqs %d\n",
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003922 ifidx_or_pfnum, num_iqueues, num_oqueues);
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07003923 WRITE_ONCE(ctx->cond, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003924 ctx->octeon_id = lio_get_device_id(octeon_dev);
3925 init_waitqueue_head(&ctx->wc);
3926
3927 if_cfg.u64 = 0;
3928 if_cfg.s.num_iqueues = num_iqueues;
3929 if_cfg.s.num_oqueues = num_oqueues;
3930 if_cfg.s.base_queue = base_queue;
3931 if_cfg.s.gmx_port_id = gmx_port_id;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003932
3933 sc->iq_no = 0;
3934
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003935 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003936 OPCODE_NIC_IF_CFG, 0,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003937 if_cfg.u64, 0);
3938
3939 sc->callback = if_cfg_callback;
3940 sc->callback_arg = sc;
Raghu Vatsavayi55893a62016-07-03 13:56:50 -07003941 sc->wait_time = 3000;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003942
3943 retval = octeon_send_soft_command(octeon_dev, sc);
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07003944 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003945 dev_err(&octeon_dev->pci_dev->dev,
3946 "iq/oq config failed status: %x\n",
3947 retval);
3948 /* Soft instr is freed by driver in case of failure. */
3949 goto setup_nic_dev_fail;
3950 }
3951
3952 /* Sleep on a wait queue till the cond flag indicates that the
3953 * response arrived or timed-out.
3954 */
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07003955 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
3956 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
3957 goto setup_nic_wait_intr;
3958 }
3959
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003960 retval = resp->status;
3961 if (retval) {
3962 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3963 goto setup_nic_dev_fail;
3964 }
3965
3966 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3967 (sizeof(struct liquidio_if_cfg_info)) >> 3);
3968
3969 num_iqueues = hweight64(resp->cfg_info.iqmask);
3970 num_oqueues = hweight64(resp->cfg_info.oqmask);
3971
3972 if (!(num_iqueues) || !(num_oqueues)) {
3973 dev_err(&octeon_dev->pci_dev->dev,
3974 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3975 resp->cfg_info.iqmask,
3976 resp->cfg_info.oqmask);
3977 goto setup_nic_dev_fail;
3978 }
3979 dev_dbg(&octeon_dev->pci_dev->dev,
3980 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3981 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3982 num_iqueues, num_oqueues);
3983 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
3984
3985 if (!netdev) {
3986 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3987 goto setup_nic_dev_fail;
3988 }
3989
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003990 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003991
3992 if (num_iqueues > 1)
3993 lionetdevops.ndo_select_queue = select_q;
3994
3995 /* Associate the routines that will handle different
3996 * netdev tasks.
3997 */
3998 netdev->netdev_ops = &lionetdevops;
3999
4000 lio = GET_LIO(netdev);
4001
4002 memset(lio, 0, sizeof(struct lio));
4003
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004004 lio->ifidx = ifidx_or_pfnum;
4005
4006 props = &octeon_dev->props[i];
4007 props->gmxport = resp->cfg_info.linfo.gmxport;
4008 props->netdev = netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004009
4010 lio->linfo.num_rxpciq = num_oqueues;
4011 lio->linfo.num_txpciq = num_iqueues;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004012 for (j = 0; j < num_oqueues; j++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07004013 lio->linfo.rxpciq[j].u64 =
4014 resp->cfg_info.linfo.rxpciq[j].u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004015 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004016 for (j = 0; j < num_iqueues; j++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07004017 lio->linfo.txpciq[j].u64 =
4018 resp->cfg_info.linfo.txpciq[j].u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004019 }
4020 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
4021 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
4022 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
4023
4024 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4025
Raghu Vatsavayie86b1ab2016-08-31 11:03:24 -07004026 if (OCTEON_CN23XX_PF(octeon_dev) ||
4027 OCTEON_CN6XXX(octeon_dev)) {
4028 lio->dev_capability = NETIF_F_HIGHDMA
4029 | NETIF_F_IP_CSUM
4030 | NETIF_F_IPV6_CSUM
4031 | NETIF_F_SG | NETIF_F_RXCSUM
4032 | NETIF_F_GRO
4033 | NETIF_F_TSO | NETIF_F_TSO6
4034 | NETIF_F_LRO;
4035 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004036 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
4037
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07004038 /* Copy of transmit encapsulation capabilities:
4039 * TSO, TSO6, Checksums for this device
4040 */
4041 lio->enc_dev_capability = NETIF_F_IP_CSUM
4042 | NETIF_F_IPV6_CSUM
4043 | NETIF_F_GSO_UDP_TUNNEL
4044 | NETIF_F_HW_CSUM | NETIF_F_SG
4045 | NETIF_F_RXCSUM
4046 | NETIF_F_TSO | NETIF_F_TSO6
4047 | NETIF_F_LRO;
4048
4049 netdev->hw_enc_features = (lio->enc_dev_capability &
4050 ~NETIF_F_LRO);
4051
4052 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
4053
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07004054 netdev->vlan_features = lio->dev_capability;
4055 /* Add any unchangeable hw features */
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07004056 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
4057 NETIF_F_HW_VLAN_CTAG_RX |
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07004058 NETIF_F_HW_VLAN_CTAG_TX;
4059
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004060 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
4061
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004062 netdev->hw_features = lio->dev_capability;
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07004063 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
4064 netdev->hw_features = netdev->hw_features &
4065 ~NETIF_F_HW_VLAN_CTAG_RX;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004066
Jarod Wilson109cc162016-10-17 15:54:13 -04004067 /* MTU range: 68 - 16000 */
4068 netdev->min_mtu = LIO_MIN_MTU_SIZE;
4069 netdev->max_mtu = LIO_MAX_MTU_SIZE;
4070
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004071 /* Point to the properties for octeon device to which this
4072 * interface belongs.
4073 */
4074 lio->oct_dev = octeon_dev;
4075 lio->octprops = props;
4076 lio->netdev = netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004077
4078 dev_dbg(&octeon_dev->pci_dev->dev,
4079 "if%d gmx: %d hw_addr: 0x%llx\n", i,
4080 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
4081
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08004082 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
4083 u8 vfmac[ETH_ALEN];
4084
4085 random_ether_addr(&vfmac[0]);
4086 if (__liquidio_set_vf_mac(netdev, j,
4087 &vfmac[0], false)) {
4088 dev_err(&octeon_dev->pci_dev->dev,
4089 "Error setting VF%d MAC address\n",
4090 j);
4091 goto setup_nic_dev_fail;
4092 }
4093 }
4094
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004095 /* 64-bit swap required on LE machines */
4096 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
4097 for (j = 0; j < 6; j++)
4098 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
4099
4100 /* Copy MAC Address to OS network device structure */
4101
4102 ether_addr_copy(netdev->dev_addr, mac);
4103
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07004104 /* By default all interfaces on a single Octeon uses the same
4105 * tx and rx queues
4106 */
4107 lio->txq = lio->linfo.txpciq[0].s.q_no;
4108 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004109 if (setup_io_queues(octeon_dev, i)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004110 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
4111 goto setup_nic_dev_fail;
4112 }
4113
4114 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
4115
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004116 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
4117 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
4118
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07004119 if (setup_glists(octeon_dev, lio, num_iqueues)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004120 dev_err(&octeon_dev->pci_dev->dev,
4121 "Gather list allocation failed\n");
4122 goto setup_nic_dev_fail;
4123 }
4124
4125 /* Register ethtool support */
4126 liquidio_set_ethtool_ops(netdev);
Raghu Vatsavayi30136392016-09-01 11:16:11 -07004127 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
4128 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
4129 else
4130 octeon_dev->priv_flags = 0x0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004131
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004132 if (netdev->features & NETIF_F_LRO)
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07004133 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
4134 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004135
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07004136 liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
4137
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004138 if ((debug != -1) && (debug & NETIF_MSG_HW))
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07004139 liquidio_set_feature(netdev,
4140 OCTNET_CMD_VERBOSE_ENABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004141
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07004142 if (setup_link_status_change_wq(netdev))
4143 goto setup_nic_dev_fail;
4144
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004145 /* Register the network device with the OS */
4146 if (register_netdev(netdev)) {
4147 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
4148 goto setup_nic_dev_fail;
4149 }
4150
4151 dev_dbg(&octeon_dev->pci_dev->dev,
4152 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
4153 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
4154 netif_carrier_off(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004155 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004156
4157 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
4158
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07004159 /* Sending command to firmware to enable Rx checksum offload
4160 * by default at the time of setup of Liquidio driver for
4161 * this device
4162 */
4163 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
4164 OCTNET_CMD_RXCSUM_ENABLE);
4165 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
4166 OCTNET_CMD_TXCSUM_ENABLE);
4167
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004168 dev_dbg(&octeon_dev->pci_dev->dev,
4169 "NIC ifidx:%d Setup successful\n", i);
4170
4171 octeon_free_soft_command(octeon_dev, sc);
4172 }
4173
4174 return 0;
4175
4176setup_nic_dev_fail:
4177
4178 octeon_free_soft_command(octeon_dev, sc);
4179
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07004180setup_nic_wait_intr:
4181
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004182 while (i--) {
4183 dev_err(&octeon_dev->pci_dev->dev,
4184 "NIC ifidx:%d Setup failed\n", i);
4185 liquidio_destroy_nic_device(octeon_dev, i);
4186 }
4187 return -ENODEV;
4188}
4189
Raghu Vatsavayica6139f2016-11-14 15:54:40 -08004190#ifdef CONFIG_PCI_IOV
4191static int octeon_enable_sriov(struct octeon_device *oct)
4192{
4193 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
4194 struct pci_dev *vfdev;
4195 int err;
4196 u32 u;
4197
4198 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
4199 err = pci_enable_sriov(oct->pci_dev,
4200 oct->sriov_info.num_vfs_alloced);
4201 if (err) {
4202 dev_err(&oct->pci_dev->dev,
4203 "OCTEON: Failed to enable PCI sriov: %d\n",
4204 err);
4205 oct->sriov_info.num_vfs_alloced = 0;
4206 return err;
4207 }
4208 oct->sriov_info.sriov_enabled = 1;
4209
4210 /* init lookup table that maps DPI ring number to VF pci_dev
4211 * struct pointer
4212 */
4213 u = 0;
4214 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
4215 OCTEON_CN23XX_VF_VID, NULL);
4216 while (vfdev) {
4217 if (vfdev->is_virtfn &&
4218 (vfdev->physfn == oct->pci_dev)) {
4219 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
4220 vfdev;
4221 u += oct->sriov_info.rings_per_vf;
4222 }
4223 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
4224 OCTEON_CN23XX_VF_VID, vfdev);
4225 }
4226 }
4227
4228 return num_vfs_alloced;
4229}
4230
4231static int lio_pci_sriov_disable(struct octeon_device *oct)
4232{
4233 int u;
4234
4235 if (pci_vfs_assigned(oct->pci_dev)) {
4236 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
4237 return -EPERM;
4238 }
4239
4240 pci_disable_sriov(oct->pci_dev);
4241
4242 u = 0;
4243 while (u < MAX_POSSIBLE_VFS) {
4244 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
4245 u += oct->sriov_info.rings_per_vf;
4246 }
4247
4248 oct->sriov_info.num_vfs_alloced = 0;
4249 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
4250 oct->pf_num);
4251
4252 return 0;
4253}
4254
4255static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
4256{
4257 struct octeon_device *oct = pci_get_drvdata(dev);
4258 int ret = 0;
4259
4260 if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
4261 (oct->sriov_info.sriov_enabled)) {
4262 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
4263 oct->pf_num, num_vfs);
4264 return 0;
4265 }
4266
4267 if (!num_vfs) {
4268 ret = lio_pci_sriov_disable(oct);
4269 } else if (num_vfs > oct->sriov_info.max_vfs) {
4270 dev_err(&oct->pci_dev->dev,
4271 "OCTEON: Max allowed VFs:%d user requested:%d",
4272 oct->sriov_info.max_vfs, num_vfs);
4273 ret = -EPERM;
4274 } else {
4275 oct->sriov_info.num_vfs_alloced = num_vfs;
4276 ret = octeon_enable_sriov(oct);
4277 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
4278 oct->pf_num, num_vfs);
4279 }
4280
4281 return ret;
4282}
4283#endif
4284
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004285/**
4286 * \brief initialize the NIC
4287 * @param oct octeon device
4288 *
4289 * This initialization routine is called once the Octeon device application is
4290 * up and running
4291 */
4292static int liquidio_init_nic_module(struct octeon_device *oct)
4293{
4294 struct oct_intrmod_cfg *intrmod_cfg;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004295 int i, retval = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004296 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
4297
4298 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
4299
4300 /* only default iq and oq were initialized
4301 * initialize the rest as well
4302 */
4303 /* run port_config command for each port */
4304 oct->ifcount = num_nic_ports;
4305
Raghu Vatsavayi30136392016-09-01 11:16:11 -07004306 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004307
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004308 for (i = 0; i < MAX_OCTEON_LINKS; i++)
4309 oct->props[i].gmxport = -1;
4310
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004311 retval = setup_nic_devices(oct);
4312 if (retval) {
4313 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
4314 goto octnet_init_failure;
4315 }
4316
4317 liquidio_ptp_init(oct);
4318
4319 /* Initialize interrupt moderation params */
4320 intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07004321 intrmod_cfg->rx_enable = 1;
Raghu Vatsavayi30136392016-09-01 11:16:11 -07004322 intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
Raghu Vatsavayi78e6a9b2016-06-21 22:53:10 -07004323 intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
4324 intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
4325 intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
4326 intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
4327 intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
4328 intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
4329 intrmod_cfg->tx_enable = 1;
4330 intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
4331 intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
4332 intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
4333 intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07004334 intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004335 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
4336
4337 return retval;
4338
4339octnet_init_failure:
4340
4341 oct->ifcount = 0;
4342
4343 return retval;
4344}
4345
4346/**
4347 * \brief starter callback that invokes the remaining initialization work after
4348 * the NIC is up and running.
4349 * @param octptr work struct work_struct
4350 */
4351static void nic_starter(struct work_struct *work)
4352{
4353 struct octeon_device *oct;
4354 struct cavium_wk *wk = (struct cavium_wk *)work;
4355
4356 oct = (struct octeon_device *)wk->ctxptr;
4357
4358 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
4359 return;
4360
4361 /* If the status of the device is CORE_OK, the core
4362 * application has reported its application type. Call
4363 * any registered handlers now and move to the RUNNING
4364 * state.
4365 */
4366 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
4367 schedule_delayed_work(&oct->nic_poll_work.work,
4368 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4369 return;
4370 }
4371
4372 atomic_set(&oct->status, OCT_DEV_RUNNING);
4373
4374 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
4375 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
4376
4377 if (liquidio_init_nic_module(oct))
4378 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
4379 else
4380 handshake[oct->octeon_id].started_ok = 1;
4381 } else {
4382 dev_err(&oct->pci_dev->dev,
4383 "Unexpected application running on NIC (%d). Check firmware.\n",
4384 oct->app_mode);
4385 }
4386
4387 complete(&handshake[oct->octeon_id].started);
4388}
4389
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08004390static int
4391octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4392{
4393 struct octeon_device *oct = (struct octeon_device *)buf;
4394 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4395 int i, notice, vf_idx;
4396 u64 *data, vf_num;
4397
4398 notice = recv_pkt->rh.r.ossp;
4399 data = (u64 *)get_rbd(recv_pkt->buffer_ptr[0]);
4400
4401 /* the first 64-bit word of data is the vf_num */
4402 vf_num = data[0];
4403 octeon_swap_8B_data(&vf_num, 1);
4404 vf_idx = (int)vf_num - 1;
4405
4406 if (notice == VF_DRV_LOADED) {
4407 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4408 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4409 dev_info(&oct->pci_dev->dev,
4410 "driver for VF%d was loaded\n", vf_idx);
4411 try_module_get(THIS_MODULE);
4412 }
4413 } else if (notice == VF_DRV_REMOVED) {
4414 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4415 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4416 dev_info(&oct->pci_dev->dev,
4417 "driver for VF%d was removed\n", vf_idx);
4418 module_put(THIS_MODULE);
4419 }
4420 } else if (notice == VF_DRV_MACADDR_CHANGED) {
4421 u8 *b = (u8 *)&data[1];
4422
4423 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4424 dev_info(&oct->pci_dev->dev,
4425 "VF driver changed VF%d's MAC address to %pM\n",
4426 vf_idx, b + 2);
4427 }
4428
4429 for (i = 0; i < recv_pkt->buffer_count; i++)
4430 recv_buffer_free(recv_pkt->buffer_ptr[i]);
4431 octeon_free_recv_info(recv_info);
4432
4433 return 0;
4434}
4435
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004436/**
4437 * \brief Device initialization for each Octeon device that is probed
4438 * @param octeon_dev octeon device
4439 */
4440static int octeon_device_init(struct octeon_device *octeon_dev)
4441{
4442 int j, ret;
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004443 int fw_loaded = 0;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07004444 char bootcmd[] = "\n";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004445 struct octeon_device_priv *oct_priv =
4446 (struct octeon_device_priv *)octeon_dev->priv;
4447 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4448
4449 /* Enable access to the octeon device and make its DMA capability
4450 * known to the OS.
4451 */
4452 if (octeon_pci_os_setup(octeon_dev))
4453 return 1;
4454
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08004455 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4456
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004457 /* Identify the Octeon type and map the BAR address space. */
4458 if (octeon_chip_specific_setup(octeon_dev)) {
4459 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4460 return 1;
4461 }
4462
4463 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4464
4465 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4466
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004467 if (OCTEON_CN23XX_PF(octeon_dev)) {
4468 if (!cn23xx_fw_loaded(octeon_dev)) {
4469 fw_loaded = 0;
4470 /* Do a soft reset of the Octeon device. */
4471 if (octeon_dev->fn_list.soft_reset(octeon_dev))
4472 return 1;
4473 /* things might have changed */
4474 if (!cn23xx_fw_loaded(octeon_dev))
4475 fw_loaded = 0;
4476 else
4477 fw_loaded = 1;
4478 } else {
4479 fw_loaded = 1;
4480 }
4481 } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004482 return 1;
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004483 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004484
4485 /* Initialize the dispatch mechanism used to push packets arriving on
4486 * Octeon Output queues.
4487 */
4488 if (octeon_init_dispatch_list(octeon_dev))
4489 return 1;
4490
4491 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4492 OPCODE_NIC_CORE_DRV_ACTIVE,
4493 octeon_core_drv_init,
4494 octeon_dev);
4495
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08004496 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4497 OPCODE_NIC_VF_DRV_NOTICE,
4498 octeon_recv_vf_drv_notice, octeon_dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004499 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4500 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4501 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4502 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4503
4504 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4505
4506 octeon_set_io_queues_off(octeon_dev);
4507
Raghu Vatsavayi3451b972016-08-31 11:03:26 -07004508 if (OCTEON_CN23XX_PF(octeon_dev)) {
4509 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4510 if (ret) {
4511 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4512 return ret;
4513 }
4514 }
4515
4516 /* Initialize soft command buffer pool
4517 */
4518 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4519 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4520 return 1;
4521 }
4522 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4523
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004524 /* Setup the data structures that manage this Octeon's Input queues. */
4525 if (octeon_setup_instr_queues(octeon_dev)) {
4526 dev_err(&octeon_dev->pci_dev->dev,
4527 "instruction queue initialization failed\n");
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004528 return 1;
4529 }
4530 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4531
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004532 /* Initialize lists to manage the requests of different types that
4533 * arrive from user & kernel applications for this octeon device.
4534 */
4535 if (octeon_setup_response_list(octeon_dev)) {
4536 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4537 return 1;
4538 }
4539 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4540
4541 if (octeon_setup_output_queues(octeon_dev)) {
4542 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
Raghu Vatsavayi1e0d30f2016-07-03 13:56:52 -07004543 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004544 }
4545
4546 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4547
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07004548 if (OCTEON_CN23XX_PF(octeon_dev)) {
Raghu Vatsavayi5d655562016-11-14 15:54:42 -08004549 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4550 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4551 return 1;
4552 }
4553 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4554
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07004555 if (octeon_allocate_ioq_vector(octeon_dev)) {
4556 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4557 return 1;
4558 }
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08004559 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07004560
4561 } else {
4562 /* The input and output queue registers were setup earlier (the
4563 * queues were not enabled). Any additional registers
4564 * that need to be programmed should be done now.
4565 */
4566 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4567 if (ret) {
4568 dev_err(&octeon_dev->pci_dev->dev,
4569 "Failed to configure device registers\n");
4570 return ret;
4571 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004572 }
4573
4574 /* Initialize the tasklet that handles output queue packet processing.*/
4575 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4576 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4577 (unsigned long)octeon_dev);
4578
4579 /* Setup the interrupt handler and record the INT SUM register address
4580 */
Raghu Vatsavayi1e0d30f2016-07-03 13:56:52 -07004581 if (octeon_setup_interrupt(octeon_dev))
4582 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004583
4584 /* Enable Octeon device interrupts */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07004585 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004586
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08004587 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4588
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004589 /* Enable the input and output queues for this Octeon device */
Raghu Vatsavayi1b7c55c2016-08-31 11:03:27 -07004590 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4591 if (ret) {
4592 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4593 return ret;
4594 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004595
4596 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4597
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004598 if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
4599 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4600 if (!ddr_timeout) {
4601 dev_info(&octeon_dev->pci_dev->dev,
4602 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4603 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004604
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004605 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004606
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004607 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4608 while (!ddr_timeout) {
4609 set_current_state(TASK_INTERRUPTIBLE);
4610 if (schedule_timeout(HZ / 10)) {
4611 /* user probably pressed Control-C */
4612 return 1;
4613 }
4614 }
4615 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4616 if (ret) {
4617 dev_err(&octeon_dev->pci_dev->dev,
4618 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4619 ret);
Raghu Vatsavayi4b129ae2016-06-21 22:53:15 -07004620 return 1;
4621 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004622
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004623 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4624 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4625 return 1;
4626 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004627
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004628 /* Divert uboot to take commands from host instead. */
4629 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07004630
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004631 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4632 ret = octeon_init_consoles(octeon_dev);
4633 if (ret) {
4634 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4635 return 1;
4636 }
4637 ret = octeon_add_console(octeon_dev, 0);
4638 if (ret) {
4639 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4640 return 1;
4641 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004642
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004643 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004644
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004645 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4646 ret = load_firmware(octeon_dev);
4647 if (ret) {
4648 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4649 return 1;
4650 }
4651 /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
4652 * loaded
4653 */
4654 if (OCTEON_CN23XX_PF(octeon_dev))
4655 octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
4656 2ULL);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004657 }
4658
4659 handshake[octeon_dev->octeon_id].init_ok = 1;
4660 complete(&handshake[octeon_dev->octeon_id].init);
4661
4662 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4663
4664 /* Send Credit for Octeon Output queues. Credits are always sent after
4665 * the output queue is enabled.
4666 */
4667 for (j = 0; j < octeon_dev->num_oqs; j++)
4668 writel(octeon_dev->droq[j]->max_count,
4669 octeon_dev->droq[j]->pkts_credit_reg);
4670
4671 /* Packets can start arriving on the output queues from this point. */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004672 return 0;
4673}
4674
4675/**
4676 * \brief Exits the module
4677 */
4678static void __exit liquidio_exit(void)
4679{
4680 liquidio_deinit_pci();
4681
4682 pr_info("LiquidIO network module is now unloaded\n");
4683}
4684
4685module_init(liquidio_init);
4686module_exit(liquidio_exit);