blob: 649f2aaf0afb41e1a2096134a54bb1c13a451c4c [file] [log] [blame]
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001/**********************************************************************
Raghu Vatsavayi50579d32016-11-14 15:54:46 -08002 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2016 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
Russell Kinge3bfc6e2017-02-07 15:03:03 -080018#include <linux/module.h>
Florian Westphal282ccf62017-03-29 17:17:31 +020019#include <linux/interrupt.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070020#include <linux/pci.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070021#include <linux/firmware.h>
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -070022#include <net/vxlan.h>
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -070023#include <linux/kthread.h>
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070024#include "liquidio_common.h"
25#include "octeon_droq.h"
26#include "octeon_iq.h"
27#include "response_manager.h"
28#include "octeon_device.h"
29#include "octeon_nic.h"
30#include "octeon_main.h"
31#include "octeon_network.h"
32#include "cn66xx_regs.h"
33#include "cn66xx_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070034#include "cn68xx_device.h"
Raghu Vatsavayi72c00912016-08-31 11:03:25 -070035#include "cn23xx_pf_device.h"
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070036#include "liquidio_image.h"
37
38MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
39MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
40MODULE_LICENSE("GPL");
41MODULE_VERSION(LIQUIDIO_VERSION);
42MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX);
43MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX);
44MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX);
Raghu Vatsavayic865cdf2016-11-28 16:54:36 -080045MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME LIO_FW_NAME_SUFFIX);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070046
47static int ddr_timeout = 10000;
48module_param(ddr_timeout, int, 0644);
49MODULE_PARM_DESC(ddr_timeout,
50 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
51
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070052#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
53
54static int debug = -1;
55module_param(debug, int, 0644);
56MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
57
58static char fw_type[LIO_MAX_FW_TYPE_LEN];
59module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
60MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
61
Raghu Vatsavayia5b37882016-06-14 16:54:48 -070062static int ptp_enable = 1;
63
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070064/* Polling interval for determining when NIC application is alive */
65#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
66
67/* runtime link query interval */
68#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
69
70struct liquidio_if_cfg_context {
71 int octeon_id;
72
73 wait_queue_head_t wc;
74
75 int cond;
76};
77
78struct liquidio_if_cfg_resp {
79 u64 rh;
80 struct liquidio_if_cfg_info cfg_info;
81 u64 status;
82};
83
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -070084struct liquidio_rx_ctl_context {
85 int octeon_id;
86
87 wait_queue_head_t wc;
88
89 int cond;
90};
91
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -070092struct oct_link_status_resp {
93 u64 rh;
94 struct oct_link_info link_info;
95 u64 status;
96};
97
98struct oct_timestamp_resp {
99 u64 rh;
100 u64 timestamp;
101 u64 status;
102};
103
104#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
105
106union tx_info {
107 u64 u64;
108 struct {
109#ifdef __BIG_ENDIAN_BITFIELD
110 u16 gso_size;
111 u16 gso_segs;
112 u32 reserved;
113#else
114 u32 reserved;
115 u16 gso_segs;
116 u16 gso_size;
117#endif
118 } s;
119};
120
121/** Octeon device properties to be used by the NIC module.
122 * Each octeon device in the system will be represented
123 * by this structure in the NIC module.
124 */
125
126#define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
127
128#define OCTNIC_GSO_MAX_HEADER_SIZE 128
Raghu Vatsavayi72c00912016-08-31 11:03:25 -0700129#define OCTNIC_GSO_MAX_SIZE \
130 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700131
132/** Structure of a node in list of gather components maintained by
133 * NIC driver for each network device.
134 */
135struct octnic_gather {
136 /** List manipulation. Next and prev pointers. */
137 struct list_head list;
138
139 /** Size of the gather component at sg in bytes. */
140 int sg_size;
141
142 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
143 int adjust;
144
145 /** Gather component that can accommodate max sized fragment list
146 * received from the IP layer.
147 */
148 struct octeon_sg_entry *sg;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700149
VSR Burru67e303e2017-03-06 18:45:59 -0800150 dma_addr_t sg_dma_ptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700151};
152
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700153struct handshake {
154 struct completion init;
155 struct completion started;
156 struct pci_dev *pci_dev;
157 int init_ok;
158 int started_ok;
159};
160
161struct octeon_device_priv {
162 /** Tasklet structures for this device. */
163 struct tasklet_struct droq_tasklet;
164 unsigned long napi_mask;
165};
166
Raghu Vatsavayica6139f2016-11-14 15:54:40 -0800167#ifdef CONFIG_PCI_IOV
168static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
169#endif
170
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700171static int octeon_device_init(struct octeon_device *);
Raghu Vatsavayi32581242016-08-31 11:03:20 -0700172static int liquidio_stop(struct net_device *netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700173static void liquidio_remove(struct pci_dev *pdev);
174static int liquidio_probe(struct pci_dev *pdev,
175 const struct pci_device_id *ent);
Felix Manlunasbb54be52017-04-04 19:26:57 -0700176static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
177 int linkstate);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700178
179static struct handshake handshake[MAX_OCTEON_DEVICES];
180static struct completion first_stage;
181
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -0700182static void octeon_droq_bh(unsigned long pdev)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700183{
184 int q_no;
185 int reschedule = 0;
186 struct octeon_device *oct = (struct octeon_device *)pdev;
187 struct octeon_device_priv *oct_priv =
188 (struct octeon_device_priv *)oct->priv;
189
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700190 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800191 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700192 continue;
193 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
194 MAX_PACKET_BUDGET);
Raghu Vatsavayicd8b1eb2016-08-31 11:03:22 -0700195 lio_enable_irq(oct->droq[q_no], NULL);
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700196
197 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
198 /* set time and cnt interrupt thresholds for this DROQ
199 * for NAPI
200 */
201 int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
202
203 octeon_write_csr64(
204 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
205 0x5700000040ULL);
206 octeon_write_csr64(
207 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
208 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700209 }
210
211 if (reschedule)
212 tasklet_schedule(&oct_priv->droq_tasklet);
213}
214
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -0700215static int lio_wait_for_oq_pkts(struct octeon_device *oct)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700216{
217 struct octeon_device_priv *oct_priv =
218 (struct octeon_device_priv *)oct->priv;
219 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
220 int i;
221
222 do {
223 pending_pkts = 0;
224
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700225 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800226 if (!(oct->io_qmask.oq & BIT_ULL(i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700227 continue;
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700228 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700229 }
230 if (pkt_cnt > 0) {
231 pending_pkts += pkt_cnt;
232 tasklet_schedule(&oct_priv->droq_tasklet);
233 }
234 pkt_cnt = 0;
235 schedule_timeout_uninterruptible(1);
236
237 } while (retry-- && pending_pkts);
238
239 return pkt_cnt;
240}
241
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700242/**
243 * \brief Forces all IO queues off on a given device
244 * @param oct Pointer to Octeon device
245 */
246static void force_io_queues_off(struct octeon_device *oct)
247{
248 if ((oct->chip_id == OCTEON_CN66XX) ||
249 (oct->chip_id == OCTEON_CN68XX)) {
250 /* Reset the Enable bits for Input Queues. */
251 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
252
253 /* Reset the Enable bits for Output Queues. */
254 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
255 }
256}
257
258/**
259 * \brief wait for all pending requests to complete
260 * @param oct Pointer to Octeon device
261 *
262 * Called during shutdown sequence
263 */
264static int wait_for_pending_requests(struct octeon_device *oct)
265{
266 int i, pcount = 0;
267
268 for (i = 0; i < 100; i++) {
269 pcount =
270 atomic_read(&oct->response_list
271 [OCTEON_ORDERED_SC_LIST].pending_req_count);
272 if (pcount)
273 schedule_timeout_uninterruptible(HZ / 10);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700274 else
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700275 break;
276 }
277
278 if (pcount)
279 return 1;
280
281 return 0;
282}
283
284/**
285 * \brief Cause device to go quiet so it can be safely removed/reset/etc
286 * @param oct Pointer to Octeon device
287 */
288static inline void pcierror_quiesce_device(struct octeon_device *oct)
289{
290 int i;
291
292 /* Disable the input and output queues now. No more packets will
293 * arrive from Octeon, but we should wait for all packet processing
294 * to finish.
295 */
296 force_io_queues_off(oct);
297
298 /* To allow for in-flight requests */
299 schedule_timeout_uninterruptible(100);
300
301 if (wait_for_pending_requests(oct))
302 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
303
304 /* Force all requests waiting to be fetched by OCTEON to complete. */
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700305 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700306 struct octeon_instr_queue *iq;
307
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800308 if (!(oct->io_qmask.iq & BIT_ULL(i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700309 continue;
310 iq = oct->instr_queue[i];
311
312 if (atomic_read(&iq->instr_pending)) {
313 spin_lock_bh(&iq->lock);
314 iq->fill_cnt = 0;
315 iq->octeon_read_index = iq->host_write_index;
316 iq->stats.instr_processed +=
317 atomic_read(&iq->instr_pending);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700318 lio_process_iq_request_list(oct, iq, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700319 spin_unlock_bh(&iq->lock);
320 }
321 }
322
323 /* Force all pending ordered list requests to time out. */
324 lio_process_ordered_list(oct, 1);
325
326 /* We do not need to wait for output queue packets to be processed. */
327}
328
329/**
330 * \brief Cleanup PCI AER uncorrectable error status
331 * @param dev Pointer to PCI device
332 */
333static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
334{
335 int pos = 0x100;
336 u32 status, mask;
337
338 pr_info("%s :\n", __func__);
339
340 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
341 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
342 if (dev->error_state == pci_channel_io_normal)
343 status &= ~mask; /* Clear corresponding nonfatal bits */
344 else
345 status &= mask; /* Clear corresponding fatal bits */
346 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
347}
348
349/**
350 * \brief Stop all PCI IO to a given device
351 * @param dev Pointer to Octeon device
352 */
353static void stop_pci_io(struct octeon_device *oct)
354{
355 /* No more instructions will be forwarded. */
356 atomic_set(&oct->status, OCT_DEV_IN_RESET);
357
358 pci_disable_device(oct->pci_dev);
359
360 /* Disable interrupts */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700361 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700362
363 pcierror_quiesce_device(oct);
364
365 /* Release the interrupt line */
366 free_irq(oct->pci_dev->irq, oct);
367
368 if (oct->flags & LIO_FLAG_MSI_ENABLED)
369 pci_disable_msi(oct->pci_dev);
370
371 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
372 lio_get_state_string(&oct->status));
373
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700374 /* making it a common function for all OCTEON models */
375 cleanup_aer_uncorrect_error_status(oct->pci_dev);
376}
377
378/**
379 * \brief called when PCI error is detected
380 * @param pdev Pointer to PCI device
381 * @param state The current pci connection state
382 *
383 * This function is called after a PCI bus error affecting
384 * this device has been detected.
385 */
386static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
387 pci_channel_state_t state)
388{
389 struct octeon_device *oct = pci_get_drvdata(pdev);
390
391 /* Non-correctable Non-fatal errors */
392 if (state == pci_channel_io_normal) {
393 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
394 cleanup_aer_uncorrect_error_status(oct->pci_dev);
395 return PCI_ERS_RESULT_CAN_RECOVER;
396 }
397
398 /* Non-correctable Fatal errors */
399 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
400 stop_pci_io(oct);
401
402 /* Always return a DISCONNECT. There is no support for recovery but only
403 * for a clean shutdown.
404 */
405 return PCI_ERS_RESULT_DISCONNECT;
406}
407
408/**
409 * \brief mmio handler
410 * @param pdev Pointer to PCI device
411 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700412static pci_ers_result_t liquidio_pcie_mmio_enabled(
413 struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700414{
415 /* We should never hit this since we never ask for a reset for a Fatal
416 * Error. We always return DISCONNECT in io_error above.
417 * But play safe and return RECOVERED for now.
418 */
419 return PCI_ERS_RESULT_RECOVERED;
420}
421
422/**
423 * \brief called after the pci bus has been reset.
424 * @param pdev Pointer to PCI device
425 *
426 * Restart the card from scratch, as if from a cold-boot. Implementation
427 * resembles the first-half of the octeon_resume routine.
428 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700429static pci_ers_result_t liquidio_pcie_slot_reset(
430 struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700431{
432 /* We should never hit this since we never ask for a reset for a Fatal
433 * Error. We always return DISCONNECT in io_error above.
434 * But play safe and return RECOVERED for now.
435 */
436 return PCI_ERS_RESULT_RECOVERED;
437}
438
439/**
440 * \brief called when traffic can start flowing again.
441 * @param pdev Pointer to PCI device
442 *
443 * This callback is called when the error recovery driver tells us that
444 * its OK to resume normal operation. Implementation resembles the
445 * second-half of the octeon_resume routine.
446 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700447static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700448{
449 /* Nothing to be done here. */
450}
451
452#ifdef CONFIG_PM
453/**
454 * \brief called when suspending
455 * @param pdev Pointer to PCI device
456 * @param state state to suspend to
457 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700458static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
459 pm_message_t state __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700460{
461 return 0;
462}
463
464/**
465 * \brief called when resuming
466 * @param pdev Pointer to PCI device
467 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -0700468static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700469{
470 return 0;
471}
472#endif
473
474/* For PCI-E Advanced Error Recovery (AER) Interface */
Julia Lawall166e2362015-11-14 11:06:53 +0100475static const struct pci_error_handlers liquidio_err_handler = {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700476 .error_detected = liquidio_pcie_error_detected,
477 .mmio_enabled = liquidio_pcie_mmio_enabled,
478 .slot_reset = liquidio_pcie_slot_reset,
479 .resume = liquidio_pcie_resume,
480};
481
482static const struct pci_device_id liquidio_pci_tbl[] = {
483 { /* 68xx */
484 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
485 },
486 { /* 66xx */
487 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
488 },
Raghu Vatsavayie86b1ab2016-08-31 11:03:24 -0700489 { /* 23xx pf */
490 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
491 },
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700492 {
493 0, 0, 0, 0, 0, 0, 0
494 }
495};
496MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
497
498static struct pci_driver liquidio_pci_driver = {
499 .name = "LiquidIO",
500 .id_table = liquidio_pci_tbl,
501 .probe = liquidio_probe,
502 .remove = liquidio_remove,
503 .err_handler = &liquidio_err_handler, /* For AER */
504
505#ifdef CONFIG_PM
506 .suspend = liquidio_suspend,
507 .resume = liquidio_resume,
508#endif
Raghu Vatsavayica6139f2016-11-14 15:54:40 -0800509#ifdef CONFIG_PCI_IOV
510 .sriov_configure = liquidio_enable_sriov,
511#endif
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700512};
513
514/**
515 * \brief register PCI driver
516 */
517static int liquidio_init_pci(void)
518{
519 return pci_register_driver(&liquidio_pci_driver);
520}
521
522/**
523 * \brief unregister PCI driver
524 */
525static void liquidio_deinit_pci(void)
526{
527 pci_unregister_driver(&liquidio_pci_driver);
528}
529
530/**
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700531 * \brief Stop Tx queues
532 * @param netdev network device
533 */
534static inline void txqs_stop(struct net_device *netdev)
535{
536 if (netif_is_multiqueue(netdev)) {
537 int i;
538
539 for (i = 0; i < netdev->num_tx_queues; i++)
540 netif_stop_subqueue(netdev, i);
541 } else {
542 netif_stop_queue(netdev);
543 }
544}
545
546/**
547 * \brief Start Tx queues
548 * @param netdev network device
549 */
550static inline void txqs_start(struct net_device *netdev)
551{
552 if (netif_is_multiqueue(netdev)) {
553 int i;
554
555 for (i = 0; i < netdev->num_tx_queues; i++)
556 netif_start_subqueue(netdev, i);
557 } else {
558 netif_start_queue(netdev);
559 }
560}
561
562/**
563 * \brief Wake Tx queues
564 * @param netdev network device
565 */
566static inline void txqs_wake(struct net_device *netdev)
567{
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700568 struct lio *lio = GET_LIO(netdev);
569
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700570 if (netif_is_multiqueue(netdev)) {
571 int i;
572
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700573 for (i = 0; i < netdev->num_tx_queues; i++) {
574 int qno = lio->linfo.txpciq[i %
575 (lio->linfo.num_txpciq)].s.q_no;
576
577 if (__netif_subqueue_stopped(netdev, i)) {
578 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
579 tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700580 netif_wake_subqueue(netdev, i);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700581 }
582 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700583 } else {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700584 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
585 tx_restart, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700586 netif_wake_queue(netdev);
587 }
588}
589
590/**
591 * \brief Stop Tx queue
592 * @param netdev network device
593 */
594static void stop_txq(struct net_device *netdev)
595{
596 txqs_stop(netdev);
597}
598
599/**
600 * \brief Start Tx queue
601 * @param netdev network device
602 */
603static void start_txq(struct net_device *netdev)
604{
605 struct lio *lio = GET_LIO(netdev);
606
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700607 if (lio->linfo.link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700608 txqs_start(netdev);
609 return;
610 }
611}
612
613/**
614 * \brief Wake a queue
615 * @param netdev network device
616 * @param q which queue to wake
617 */
618static inline void wake_q(struct net_device *netdev, int q)
619{
620 if (netif_is_multiqueue(netdev))
621 netif_wake_subqueue(netdev, q);
622 else
623 netif_wake_queue(netdev);
624}
625
626/**
627 * \brief Stop a queue
628 * @param netdev network device
629 * @param q which queue to stop
630 */
631static inline void stop_q(struct net_device *netdev, int q)
632{
633 if (netif_is_multiqueue(netdev))
634 netif_stop_subqueue(netdev, q);
635 else
636 netif_stop_queue(netdev);
637}
638
639/**
640 * \brief Check Tx queue status, and take appropriate action
641 * @param lio per-network private data
642 * @returns 0 if full, number of queues woken up otherwise
643 */
644static inline int check_txq_status(struct lio *lio)
645{
646 int ret_val = 0;
647
648 if (netif_is_multiqueue(lio->netdev)) {
649 int numqs = lio->netdev->num_tx_queues;
650 int q, iq = 0;
651
652 /* check each sub-queue state */
653 for (q = 0; q < numqs; q++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700654 iq = lio->linfo.txpciq[q %
655 (lio->linfo.num_txpciq)].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700656 if (octnet_iq_is_full(lio->oct_dev, iq))
657 continue;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700658 if (__netif_subqueue_stopped(lio->netdev, q)) {
659 wake_q(lio->netdev, q);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700660 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
661 tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -0700662 ret_val++;
663 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700664 }
665 } else {
666 if (octnet_iq_is_full(lio->oct_dev, lio->txq))
667 return 0;
668 wake_q(lio->netdev, lio->txq);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700669 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
670 tx_restart, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700671 ret_val = 1;
672 }
673 return ret_val;
674}
675
676/**
677 * Remove the node at the head of the list. The list would be empty at
678 * the end of this call if there are no more nodes in the list.
679 */
680static inline struct list_head *list_delete_head(struct list_head *root)
681{
682 struct list_head *node;
683
684 if ((root->prev == root) && (root->next == root))
685 node = NULL;
686 else
687 node = root->next;
688
689 if (node)
690 list_del(node);
691
692 return node;
693}
694
695/**
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700696 * \brief Delete gather lists
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700697 * @param lio per-network private data
698 */
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700699static void delete_glists(struct lio *lio)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700700{
701 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700702 int i;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700703
VSR Burru67e303e2017-03-06 18:45:59 -0800704 kfree(lio->glist_lock);
705 lio->glist_lock = NULL;
706
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700707 if (!lio->glist)
708 return;
709
710 for (i = 0; i < lio->linfo.num_txpciq; i++) {
711 do {
712 g = (struct octnic_gather *)
713 list_delete_head(&lio->glist[i]);
VSR Burru67e303e2017-03-06 18:45:59 -0800714 if (g)
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700715 kfree(g);
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700716 } while (g);
VSR Burru67e303e2017-03-06 18:45:59 -0800717
Felix Manlunas58ad3192017-03-20 19:04:48 -0700718 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
719 lio->glists_dma_base && lio->glists_dma_base[i]) {
VSR Burru67e303e2017-03-06 18:45:59 -0800720 lio_dma_free(lio->oct_dev,
721 lio->glist_entry_size * lio->tx_qsize,
722 lio->glists_virt_base[i],
723 lio->glists_dma_base[i]);
724 }
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700725 }
726
VSR Burru67e303e2017-03-06 18:45:59 -0800727 kfree(lio->glists_virt_base);
728 lio->glists_virt_base = NULL;
729
730 kfree(lio->glists_dma_base);
731 lio->glists_dma_base = NULL;
732
733 kfree(lio->glist);
734 lio->glist = NULL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700735}
736
737/**
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700738 * \brief Setup gather lists
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700739 * @param lio per-network private data
740 */
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700741static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700742{
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700743 int i, j;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700744 struct octnic_gather *g;
745
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700746 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
747 GFP_KERNEL);
748 if (!lio->glist_lock)
VSR Burru67e303e2017-03-06 18:45:59 -0800749 return -ENOMEM;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700750
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700751 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
752 GFP_KERNEL);
753 if (!lio->glist) {
VSR Burru67e303e2017-03-06 18:45:59 -0800754 kfree(lio->glist_lock);
755 lio->glist_lock = NULL;
756 return -ENOMEM;
757 }
758
759 lio->glist_entry_size =
760 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
761
762 /* allocate memory to store virtual and dma base address of
763 * per glist consistent memory
764 */
765 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
766 GFP_KERNEL);
767 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
768 GFP_KERNEL);
769
770 if (!lio->glists_virt_base || !lio->glists_dma_base) {
771 delete_glists(lio);
772 return -ENOMEM;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700773 }
774
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700775 for (i = 0; i < num_iqs; i++) {
VSR Burrub3ca9af2017-03-09 17:03:24 -0800776 int numa_node = dev_to_node(&oct->pci_dev->dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700777
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700778 spin_lock_init(&lio->glist_lock[i]);
779
780 INIT_LIST_HEAD(&lio->glist[i]);
781
VSR Burru67e303e2017-03-06 18:45:59 -0800782 lio->glists_virt_base[i] =
783 lio_dma_alloc(oct,
784 lio->glist_entry_size * lio->tx_qsize,
785 &lio->glists_dma_base[i]);
786
787 if (!lio->glists_virt_base[i]) {
788 delete_glists(lio);
789 return -ENOMEM;
790 }
791
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700792 for (j = 0; j < lio->tx_qsize; j++) {
793 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
794 numa_node);
795 if (!g)
796 g = kzalloc(sizeof(*g), GFP_KERNEL);
797 if (!g)
798 break;
799
VSR Burru67e303e2017-03-06 18:45:59 -0800800 g->sg = lio->glists_virt_base[i] +
801 (j * lio->glist_entry_size);
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700802
VSR Burru67e303e2017-03-06 18:45:59 -0800803 g->sg_dma_ptr = lio->glists_dma_base[i] +
804 (j * lio->glist_entry_size);
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700805
806 list_add_tail(&g->list, &lio->glist[i]);
807 }
808
809 if (j != lio->tx_qsize) {
810 delete_glists(lio);
VSR Burru67e303e2017-03-06 18:45:59 -0800811 return -ENOMEM;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -0700812 }
813 }
814
815 return 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700816}
817
818/**
819 * \brief Print link information
820 * @param netdev network device
821 */
822static void print_link_info(struct net_device *netdev)
823{
824 struct lio *lio = GET_LIO(netdev);
825
826 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
827 struct oct_link_info *linfo = &lio->linfo;
828
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700829 if (linfo->link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700830 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
831 linfo->link.s.speed,
832 (linfo->link.s.duplex) ? "Full" : "Half");
833 } else {
834 netif_info(lio, link, lio->netdev, "Link Down\n");
835 }
836 }
837}
838
839/**
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -0700840 * \brief Routine to notify MTU change
841 * @param work work_struct data structure
842 */
843static void octnet_link_status_change(struct work_struct *work)
844{
845 struct cavium_wk *wk = (struct cavium_wk *)work;
846 struct lio *lio = (struct lio *)wk->ctxptr;
847
848 rtnl_lock();
849 call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
850 rtnl_unlock();
851}
852
853/**
854 * \brief Sets up the mtu status change work
855 * @param netdev network device
856 */
857static inline int setup_link_status_change_wq(struct net_device *netdev)
858{
859 struct lio *lio = GET_LIO(netdev);
860 struct octeon_device *oct = lio->oct_dev;
861
862 lio->link_status_wq.wq = alloc_workqueue("link-status",
863 WQ_MEM_RECLAIM, 0);
864 if (!lio->link_status_wq.wq) {
865 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
866 return -1;
867 }
868 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
869 octnet_link_status_change);
870 lio->link_status_wq.wk.ctxptr = lio;
871
872 return 0;
873}
874
875static inline void cleanup_link_status_change_wq(struct net_device *netdev)
876{
877 struct lio *lio = GET_LIO(netdev);
878
879 if (lio->link_status_wq.wq) {
880 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
881 destroy_workqueue(lio->link_status_wq.wq);
882 }
883}
884
885/**
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700886 * \brief Update link status
887 * @param netdev network device
888 * @param ls link status structure
889 *
890 * Called on receipt of a link status response from the core application to
891 * update each interface's link status.
892 */
893static inline void update_link_status(struct net_device *netdev,
894 union oct_link_status *ls)
895{
896 struct lio *lio = GET_LIO(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700897 int changed = (lio->linfo.link.u64 != ls->u64);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700898
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700899 lio->linfo.link.u64 = ls->u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700900
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700901 if ((lio->intf_open) && (changed)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700902 print_link_info(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700903 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700904
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -0700905 if (lio->linfo.link.s.link_up) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700906 netif_carrier_on(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700907 txqs_wake(netdev);
908 } else {
909 netif_carrier_off(netdev);
910 stop_txq(netdev);
911 }
912 }
913}
914
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700915/* Runs in interrupt context. */
916static void update_txq_status(struct octeon_device *oct, int iq_num)
917{
918 struct net_device *netdev;
919 struct lio *lio;
920 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
921
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700922 netdev = oct->props[iq->ifidx].netdev;
923
924 /* This is needed because the first IQ does not have
925 * a netdev associated with it.
926 */
927 if (!netdev)
928 return;
929
930 lio = GET_LIO(netdev);
931 if (netif_is_multiqueue(netdev)) {
932 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
933 lio->linfo.link.s.link_up &&
934 (!octnet_iq_is_full(oct, iq_num))) {
Raghu Vatsavayi1f164712016-06-21 22:53:11 -0700935 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
936 tx_restart, 1);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700937 netif_wake_subqueue(netdev, iq->q_index);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700938 }
VSR Burru6069f3f2017-03-22 11:54:50 -0700939 } else if (netif_queue_stopped(netdev) &&
940 lio->linfo.link.s.link_up &&
941 (!octnet_iq_is_full(oct, lio->txq))) {
942 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
943 lio->txq, tx_restart, 1);
944 netif_wake_queue(netdev);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -0700945 }
946}
947
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700948static
949int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
950{
951 struct octeon_device *oct = droq->oct_dev;
952 struct octeon_device_priv *oct_priv =
953 (struct octeon_device_priv *)oct->priv;
954
955 if (droq->ops.poll_mode) {
956 droq->ops.napi_fn(droq);
957 } else {
958 if (ret & MSIX_PO_INT) {
959 tasklet_schedule(&oct_priv->droq_tasklet);
960 return 1;
961 }
962 /* this will be flushed periodically by check iq db */
963 if (ret & MSIX_PI_INT)
964 return 0;
965 }
966 return 0;
967}
968
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700969/**
970 * \brief Droq packet processor sceduler
971 * @param oct octeon device
972 */
Raghu Vatsavayi9ded1a52016-09-01 11:16:10 -0700973static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700974{
975 struct octeon_device_priv *oct_priv =
976 (struct octeon_device_priv *)oct->priv;
977 u64 oq_no;
978 struct octeon_droq *droq;
979
980 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
Raghu Vatsavayi63da8402016-06-21 22:53:03 -0700981 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
982 oq_no++) {
Raghu Vatsavayi763185a2016-11-14 15:54:45 -0800983 if (!(oct->droq_intr & BIT_ULL(oq_no)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -0700984 continue;
985
986 droq = oct->droq[oq_no];
987
988 if (droq->ops.poll_mode) {
989 droq->ops.napi_fn(droq);
990 oct_priv->napi_mask |= (1 << oq_no);
991 } else {
992 tasklet_schedule(&oct_priv->droq_tasklet);
993 }
994 }
995 }
996}
997
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -0700998static irqreturn_t
999liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
1000{
1001 u64 ret;
1002 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
1003 struct octeon_device *oct = ioq_vector->oct_dev;
1004 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
1005
1006 ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
1007
1008 if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
1009 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
1010
1011 return IRQ_HANDLED;
1012}
1013
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001014/**
1015 * \brief Interrupt handler for octeon
1016 * @param irq unused
1017 * @param dev octeon device
1018 */
1019static
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001020irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
1021 void *dev)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001022{
1023 struct octeon_device *oct = (struct octeon_device *)dev;
1024 irqreturn_t ret;
1025
1026 /* Disable our interrupts for the duration of ISR */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001027 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001028
1029 ret = oct->fn_list.process_interrupt_regs(oct);
1030
1031 if (ret == IRQ_HANDLED)
1032 liquidio_schedule_droq_pkt_handlers(oct);
1033
1034 /* Re-enable our interrupts */
1035 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001036 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001037
1038 return ret;
1039}
1040
1041/**
1042 * \brief Setup interrupt for octeon device
1043 * @param oct octeon device
1044 *
1045 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1046 */
1047static int octeon_setup_interrupt(struct octeon_device *oct)
1048{
1049 int irqret, err;
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001050 struct msix_entry *msix_entries;
1051 int i;
1052 int num_ioq_vectors;
1053 int num_alloc_ioq_vectors;
Rick Farrington0c88a762017-03-13 12:58:04 -07001054 char *queue_irq_names = NULL;
1055 char *aux_irq_name = NULL;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001056
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001057 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
1058 oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
1059 /* one non ioq interrupt for handling sli_mac_pf_int_sum */
1060 oct->num_msix_irqs += 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001061
Rick Farrington0c88a762017-03-13 12:58:04 -07001062 /* allocate storage for the names assigned to each irq */
1063 oct->irq_name_storage =
1064 kcalloc((MAX_IOQ_INTERRUPTS_PER_PF + 1), INTRNAMSIZ,
1065 GFP_KERNEL);
1066 if (!oct->irq_name_storage) {
1067 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
1068 return -ENOMEM;
1069 }
1070
1071 queue_irq_names = oct->irq_name_storage;
1072 aux_irq_name = &queue_irq_names
1073 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
1074
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001075 oct->msix_entries = kcalloc(
1076 oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
Rick Farrington0c88a762017-03-13 12:58:04 -07001077 if (!oct->msix_entries) {
1078 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
1079 kfree(oct->irq_name_storage);
1080 oct->irq_name_storage = NULL;
1081 return -ENOMEM;
1082 }
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001083
1084 msix_entries = (struct msix_entry *)oct->msix_entries;
1085 /*Assumption is that pf msix vectors start from pf srn to pf to
1086 * trs and not from 0. if not change this code
1087 */
1088 for (i = 0; i < oct->num_msix_irqs - 1; i++)
1089 msix_entries[i].entry = oct->sriov_info.pf_srn + i;
1090 msix_entries[oct->num_msix_irqs - 1].entry =
1091 oct->sriov_info.trs;
1092 num_alloc_ioq_vectors = pci_enable_msix_range(
1093 oct->pci_dev, msix_entries,
1094 oct->num_msix_irqs,
1095 oct->num_msix_irqs);
1096 if (num_alloc_ioq_vectors < 0) {
1097 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1098 kfree(oct->msix_entries);
1099 oct->msix_entries = NULL;
Rick Farrington0c88a762017-03-13 12:58:04 -07001100 kfree(oct->irq_name_storage);
1101 oct->irq_name_storage = NULL;
1102 return num_alloc_ioq_vectors;
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001103 }
1104 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1105
1106 num_ioq_vectors = oct->num_msix_irqs;
1107
1108 /** For PF, there is one non-ioq interrupt handler */
1109 num_ioq_vectors -= 1;
Rick Farrington0c88a762017-03-13 12:58:04 -07001110
1111 snprintf(aux_irq_name, INTRNAMSIZ,
1112 "LiquidIO%u-pf%u-aux", oct->octeon_id, oct->pf_num);
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001113 irqret = request_irq(msix_entries[num_ioq_vectors].vector,
Rick Farrington0c88a762017-03-13 12:58:04 -07001114 liquidio_legacy_intr_handler, 0,
1115 aux_irq_name, oct);
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001116 if (irqret) {
1117 dev_err(&oct->pci_dev->dev,
1118 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1119 irqret);
1120 pci_disable_msix(oct->pci_dev);
1121 kfree(oct->msix_entries);
1122 oct->msix_entries = NULL;
Rick Farrington0c88a762017-03-13 12:58:04 -07001123 kfree(oct->irq_name_storage);
1124 oct->irq_name_storage = NULL;
1125 return irqret;
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001126 }
1127
1128 for (i = 0; i < num_ioq_vectors; i++) {
Rick Farrington0c88a762017-03-13 12:58:04 -07001129 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], INTRNAMSIZ,
1130 "LiquidIO%u-pf%u-rxtx-%u",
1131 oct->octeon_id, oct->pf_num, i);
1132
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001133 irqret = request_irq(msix_entries[i].vector,
1134 liquidio_msix_intr_handler, 0,
Rick Farrington0c88a762017-03-13 12:58:04 -07001135 &queue_irq_names[IRQ_NAME_OFF(i)],
1136 &oct->ioq_vector[i]);
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001137 if (irqret) {
1138 dev_err(&oct->pci_dev->dev,
1139 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1140 irqret);
1141 /** Freeing the non-ioq irq vector here . */
1142 free_irq(msix_entries[num_ioq_vectors].vector,
1143 oct);
1144
1145 while (i) {
1146 i--;
1147 /** clearing affinity mask. */
1148 irq_set_affinity_hint(
1149 msix_entries[i].vector, NULL);
1150 free_irq(msix_entries[i].vector,
1151 &oct->ioq_vector[i]);
1152 }
1153 pci_disable_msix(oct->pci_dev);
1154 kfree(oct->msix_entries);
1155 oct->msix_entries = NULL;
Rick Farrington0c88a762017-03-13 12:58:04 -07001156 kfree(oct->irq_name_storage);
1157 oct->irq_name_storage = NULL;
1158 return irqret;
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001159 }
1160 oct->ioq_vector[i].vector = msix_entries[i].vector;
1161 /* assign the cpu mask for this msix interrupt vector */
1162 irq_set_affinity_hint(
1163 msix_entries[i].vector,
1164 (&oct->ioq_vector[i].affinity_mask));
1165 }
1166 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1167 oct->octeon_id);
1168 } else {
1169 err = pci_enable_msi(oct->pci_dev);
1170 if (err)
1171 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1172 err);
1173 else
1174 oct->flags |= LIO_FLAG_MSI_ENABLED;
1175
Rick Farrington0c88a762017-03-13 12:58:04 -07001176 /* allocate storage for the names assigned to the irq */
1177 oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
1178 if (!oct->irq_name_storage)
1179 return -ENOMEM;
1180
1181 queue_irq_names = oct->irq_name_storage;
1182
1183 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1184 "LiquidIO%u-pf%u-rxtx-%u",
1185 oct->octeon_id, oct->pf_num, 0);
1186
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001187 irqret = request_irq(oct->pci_dev->irq,
Rick Farrington0c88a762017-03-13 12:58:04 -07001188 liquidio_legacy_intr_handler,
1189 IRQF_SHARED,
1190 &queue_irq_names[IRQ_NAME_OFF(0)], oct);
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001191 if (irqret) {
1192 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1193 pci_disable_msi(oct->pci_dev);
1194 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1195 irqret);
Rick Farrington0c88a762017-03-13 12:58:04 -07001196 kfree(oct->irq_name_storage);
1197 oct->irq_name_storage = NULL;
1198 return irqret;
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001199 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001200 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001201 return 0;
1202}
1203
Felix Manlunasbb54be52017-04-04 19:26:57 -07001204static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
1205{
1206 struct octeon_device *other_oct;
1207
1208 other_oct = lio_get_device(oct->octeon_id + 1);
1209
1210 if (other_oct && other_oct->pci_dev) {
1211 int oct_busnum, other_oct_busnum;
1212
1213 oct_busnum = oct->pci_dev->bus->number;
1214 other_oct_busnum = other_oct->pci_dev->bus->number;
1215
1216 if (oct_busnum == other_oct_busnum) {
1217 int oct_slot, other_oct_slot;
1218
1219 oct_slot = PCI_SLOT(oct->pci_dev->devfn);
1220 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
1221
1222 if (oct_slot == other_oct_slot)
1223 return other_oct;
1224 }
1225 }
1226
1227 return NULL;
1228}
1229
1230static void disable_all_vf_links(struct octeon_device *oct)
1231{
1232 struct net_device *netdev;
1233 int max_vfs, vf, i;
1234
1235 if (!oct)
1236 return;
1237
1238 max_vfs = oct->sriov_info.max_vfs;
1239
1240 for (i = 0; i < oct->ifcount; i++) {
1241 netdev = oct->props[i].netdev;
1242 if (!netdev)
1243 continue;
1244
1245 for (vf = 0; vf < max_vfs; vf++)
1246 liquidio_set_vf_link_state(netdev, vf,
1247 IFLA_VF_LINK_STATE_DISABLE);
1248 }
1249}
1250
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001251static int liquidio_watchdog(void *param)
1252{
Felix Manlunasbb54be52017-04-04 19:26:57 -07001253 bool err_msg_was_printed[LIO_MAX_CORES];
1254 u16 mask_of_crashed_or_stuck_cores = 0;
1255 bool all_vf_links_are_disabled = false;
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001256 struct octeon_device *oct = param;
Felix Manlunasbb54be52017-04-04 19:26:57 -07001257 struct octeon_device *other_oct;
1258#ifdef CONFIG_MODULE_UNLOAD
1259 long refcount, vfs_referencing_pf;
1260 u64 vfs_mask1, vfs_mask2;
1261#endif
1262 int core;
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001263
Felix Manlunasbb54be52017-04-04 19:26:57 -07001264 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001265
1266 while (!kthread_should_stop()) {
Felix Manlunasbb54be52017-04-04 19:26:57 -07001267 /* sleep for a couple of seconds so that we don't hog the CPU */
1268 set_current_state(TASK_INTERRUPTIBLE);
1269 schedule_timeout(msecs_to_jiffies(2000));
1270
1271 mask_of_crashed_or_stuck_cores =
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001272 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
1273
Felix Manlunasbb54be52017-04-04 19:26:57 -07001274 if (!mask_of_crashed_or_stuck_cores)
1275 continue;
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001276
Felix Manlunasbb54be52017-04-04 19:26:57 -07001277 WRITE_ONCE(oct->cores_crashed, true);
1278 other_oct = get_other_octeon_device(oct);
1279 if (other_oct)
1280 WRITE_ONCE(other_oct->cores_crashed, true);
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001281
Felix Manlunasbb54be52017-04-04 19:26:57 -07001282 for (core = 0; core < LIO_MAX_CORES; core++) {
1283 bool core_crashed_or_got_stuck;
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001284
Felix Manlunasbb54be52017-04-04 19:26:57 -07001285 core_crashed_or_got_stuck =
1286 (mask_of_crashed_or_stuck_cores
1287 >> core) & 1;
1288
1289 if (core_crashed_or_got_stuck &&
1290 !err_msg_was_printed[core]) {
1291 dev_err(&oct->pci_dev->dev,
1292 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
1293 core);
1294 err_msg_was_printed[core] = true;
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001295 }
1296 }
1297
Felix Manlunasbb54be52017-04-04 19:26:57 -07001298 if (all_vf_links_are_disabled)
1299 continue;
1300
1301 disable_all_vf_links(oct);
1302 disable_all_vf_links(other_oct);
1303 all_vf_links_are_disabled = true;
1304
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001305#ifdef CONFIG_MODULE_UNLOAD
Felix Manlunasbb54be52017-04-04 19:26:57 -07001306 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
1307 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001308
Felix Manlunasbb54be52017-04-04 19:26:57 -07001309 vfs_referencing_pf = hweight64(vfs_mask1);
1310 vfs_referencing_pf += hweight64(vfs_mask2);
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001311
Felix Manlunasbb54be52017-04-04 19:26:57 -07001312 refcount = module_refcount(THIS_MODULE);
1313 if (refcount >= vfs_referencing_pf) {
1314 while (vfs_referencing_pf) {
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001315 module_put(THIS_MODULE);
Felix Manlunasbb54be52017-04-04 19:26:57 -07001316 vfs_referencing_pf--;
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001317 }
1318 }
1319#endif
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001320 }
1321
1322 return 0;
1323}
1324
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001325/**
1326 * \brief PCI probe handler
1327 * @param pdev PCI device structure
1328 * @param ent unused
1329 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07001330static int
1331liquidio_probe(struct pci_dev *pdev,
1332 const struct pci_device_id *ent __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001333{
1334 struct octeon_device *oct_dev = NULL;
1335 struct handshake *hs;
1336
1337 oct_dev = octeon_allocate_device(pdev->device,
1338 sizeof(struct octeon_device_priv));
1339 if (!oct_dev) {
1340 dev_err(&pdev->dev, "Unable to allocate device\n");
1341 return -ENOMEM;
1342 }
1343
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001344 if (pdev->device == OCTEON_CN23XX_PF_VID)
1345 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
1346
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001347 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1348 (u32)pdev->vendor, (u32)pdev->device);
1349
1350 /* Assign octeon_device for this device to the private data area. */
1351 pci_set_drvdata(pdev, oct_dev);
1352
1353 /* set linux specific device pointer */
1354 oct_dev->pci_dev = (void *)pdev;
1355
1356 hs = &handshake[oct_dev->octeon_id];
1357 init_completion(&hs->init);
1358 init_completion(&hs->started);
1359 hs->pci_dev = pdev;
1360
1361 if (oct_dev->octeon_id == 0)
1362 /* first LiquidIO NIC is detected */
1363 complete(&first_stage);
1364
1365 if (octeon_device_init(oct_dev)) {
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001366 complete(&hs->init);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001367 liquidio_remove(pdev);
1368 return -ENOMEM;
1369 }
1370
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001371 if (OCTEON_CN23XX_PF(oct_dev)) {
1372 u64 scratch1;
1373 u8 bus, device, function;
1374
1375 scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1);
1376 if (!(scratch1 & 4ULL)) {
1377 /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
1378 * the lio watchdog kernel thread is running for this
1379 * NIC. Each NIC gets one watchdog kernel thread.
1380 */
1381 scratch1 |= 4ULL;
1382 octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1,
1383 scratch1);
1384
1385 bus = pdev->bus->number;
1386 device = PCI_SLOT(pdev->devfn);
1387 function = PCI_FUNC(pdev->devfn);
1388 oct_dev->watchdog_task = kthread_create(
1389 liquidio_watchdog, oct_dev,
1390 "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001391 if (!IS_ERR(oct_dev->watchdog_task)) {
1392 wake_up_process(oct_dev->watchdog_task);
1393 } else {
1394 oct_dev->watchdog_task = NULL;
1395 dev_err(&oct_dev->pci_dev->dev,
1396 "failed to create kernel_thread\n");
1397 liquidio_remove(pdev);
1398 return -1;
1399 }
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001400 }
1401 }
1402
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001403 oct_dev->rx_pause = 1;
1404 oct_dev->tx_pause = 1;
1405
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001406 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1407
1408 return 0;
1409}
1410
Felix Manlunas7cc61db2017-03-23 13:26:28 -07001411static bool fw_type_is_none(void)
1412{
1413 return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
1414 sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
1415}
1416
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001417/**
1418 *\brief Destroy resources associated with octeon device
1419 * @param pdev PCI device structure
1420 * @param ent unused
1421 */
1422static void octeon_destroy_resources(struct octeon_device *oct)
1423{
Rick Farringtone1e3ce62017-05-16 11:14:50 -07001424 int i, refcount;
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001425 struct msix_entry *msix_entries;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001426 struct octeon_device_priv *oct_priv =
1427 (struct octeon_device_priv *)oct->priv;
1428
1429 struct handshake *hs;
1430
1431 switch (atomic_read(&oct->status)) {
1432 case OCT_DEV_RUNNING:
1433 case OCT_DEV_CORE_OK:
1434
1435 /* No more instructions will be forwarded. */
1436 atomic_set(&oct->status, OCT_DEV_IN_RESET);
1437
1438 oct->app_mode = CVM_DRV_INVALID_APP;
1439 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1440 lio_get_state_string(&oct->status));
1441
1442 schedule_timeout_uninterruptible(HZ / 10);
1443
1444 /* fallthrough */
1445 case OCT_DEV_HOST_OK:
1446
1447 /* fallthrough */
1448 case OCT_DEV_CONSOLE_INIT_DONE:
1449 /* Remove any consoles */
1450 octeon_remove_consoles(oct);
1451
1452 /* fallthrough */
1453 case OCT_DEV_IO_QUEUES_DONE:
1454 if (wait_for_pending_requests(oct))
1455 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1456
1457 if (lio_wait_for_instr_fetch(oct))
1458 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1459
1460 /* Disable the input and output queues now. No more packets will
1461 * arrive from Octeon, but we should wait for all packet
1462 * processing to finish.
1463 */
1464 oct->fn_list.disable_io_queues(oct);
1465
1466 if (lio_wait_for_oq_pkts(oct))
1467 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1468
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001469 /* fallthrough */
1470 case OCT_DEV_INTR_SET_DONE:
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001471 /* Disable interrupts */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001472 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001473
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001474 if (oct->msix_on) {
1475 msix_entries = (struct msix_entry *)oct->msix_entries;
1476 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1477 /* clear the affinity_cpumask */
1478 irq_set_affinity_hint(msix_entries[i].vector,
1479 NULL);
1480 free_irq(msix_entries[i].vector,
1481 &oct->ioq_vector[i]);
1482 }
1483 /* non-iov vector's argument is oct struct */
1484 free_irq(msix_entries[i].vector, oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001485
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001486 pci_disable_msix(oct->pci_dev);
1487 kfree(oct->msix_entries);
1488 oct->msix_entries = NULL;
1489 } else {
1490 /* Release the interrupt line */
1491 free_irq(oct->pci_dev->irq, oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001492
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001493 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1494 pci_disable_msi(oct->pci_dev);
1495 }
1496
Rick Farrington0c88a762017-03-13 12:58:04 -07001497 kfree(oct->irq_name_storage);
1498 oct->irq_name_storage = NULL;
1499
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001500 /* fallthrough */
1501 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001502 if (OCTEON_CN23XX_PF(oct))
1503 octeon_free_ioq_vector(oct);
Raghu Vatsavayi5d655562016-11-14 15:54:42 -08001504
1505 /* fallthrough */
1506 case OCT_DEV_MBOX_SETUP_DONE:
1507 if (OCTEON_CN23XX_PF(oct))
1508 oct->fn_list.free_mbox(oct);
1509
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001510 /* fallthrough */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001511 case OCT_DEV_IN_RESET:
1512 case OCT_DEV_DROQ_INIT_DONE:
Raghu Vatsavayi763185a2016-11-14 15:54:45 -08001513 /* Wait for any pending operations */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001514 mdelay(100);
Raghu Vatsavayi63da8402016-06-21 22:53:03 -07001515 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07001516 if (!(oct->io_qmask.oq & BIT_ULL(i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001517 continue;
1518 octeon_delete_droq(oct, i);
1519 }
1520
1521 /* Force any pending handshakes to complete */
1522 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1523 hs = &handshake[i];
1524
1525 if (hs->pci_dev) {
1526 handshake[oct->octeon_id].init_ok = 0;
1527 complete(&handshake[oct->octeon_id].init);
1528 handshake[oct->octeon_id].started_ok = 0;
1529 complete(&handshake[oct->octeon_id].started);
1530 }
1531 }
1532
1533 /* fallthrough */
1534 case OCT_DEV_RESP_LIST_INIT_DONE:
1535 octeon_delete_response_list(oct);
1536
1537 /* fallthrough */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001538 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
Raghu Vatsavayi63da8402016-06-21 22:53:03 -07001539 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07001540 if (!(oct->io_qmask.iq & BIT_ULL(i)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001541 continue;
1542 octeon_delete_instr_queue(oct, i);
1543 }
Raghu Vatsavayica6139f2016-11-14 15:54:40 -08001544#ifdef CONFIG_PCI_IOV
1545 if (oct->sriov_info.sriov_enabled)
1546 pci_disable_sriov(oct->pci_dev);
1547#endif
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07001548 /* fallthrough */
1549 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1550 octeon_free_sc_buffer_pool(oct);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001551
1552 /* fallthrough */
1553 case OCT_DEV_DISPATCH_INIT_DONE:
1554 octeon_delete_dispatch_list(oct);
1555 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1556
1557 /* fallthrough */
1558 case OCT_DEV_PCI_MAP_DONE:
Rick Farringtone1e3ce62017-05-16 11:14:50 -07001559 refcount = octeon_deregister_device(oct);
1560
Felix Manlunas7cc61db2017-03-23 13:26:28 -07001561 if (!fw_type_is_none()) {
Rick Farringtone1e3ce62017-05-16 11:14:50 -07001562 /* Soft reset the octeon device before exiting.
1563 * Implementation note: here, we reset the device
1564 * if it is a CN6XXX OR the last CN23XX device.
1565 */
1566 if (OCTEON_CN6XXX(oct) || !refcount)
Felix Manlunas7cc61db2017-03-23 13:26:28 -07001567 oct->fn_list.soft_reset(oct);
1568 }
Raghu Vatsavayi60b48c52016-06-21 22:53:09 -07001569
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001570 octeon_unmap_pci_barx(oct, 0);
1571 octeon_unmap_pci_barx(oct, 1);
1572
1573 /* fallthrough */
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001574 case OCT_DEV_PCI_ENABLE_DONE:
1575 pci_clear_master(oct->pci_dev);
Raghu Vatsavayi60b48c52016-06-21 22:53:09 -07001576 /* Disable the device, releasing the PCI INT */
1577 pci_disable_device(oct->pci_dev);
1578
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001579 /* fallthrough */
1580 case OCT_DEV_BEGIN_STATE:
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001581 /* Nothing to be done here either */
1582 break;
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07001583 } /* end switch (oct->status) */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001584
1585 tasklet_kill(&oct_priv->droq_tasklet);
1586}
1587
1588/**
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001589 * \brief Callback for rx ctrl
1590 * @param status status of request
1591 * @param buf pointer to resp structure
1592 */
1593static void rx_ctl_callback(struct octeon_device *oct,
1594 u32 status,
1595 void *buf)
1596{
1597 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1598 struct liquidio_rx_ctl_context *ctx;
1599
1600 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1601
1602 oct = lio_get_device(ctx->octeon_id);
1603 if (status)
1604 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1605 CVM_CAST64(status));
1606 WRITE_ONCE(ctx->cond, 1);
1607
1608 /* This barrier is required to be sure that the response has been
1609 * written fully before waking up the handler
1610 */
1611 wmb();
1612
1613 wake_up_interruptible(&ctx->wc);
1614}
1615
1616/**
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001617 * \brief Send Rx control command
1618 * @param lio per-network private data
1619 * @param start_stop whether to start or stop
1620 */
1621static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1622{
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001623 struct octeon_soft_command *sc;
1624 struct liquidio_rx_ctl_context *ctx;
1625 union octnet_cmd *ncmd;
1626 int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1627 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1628 int retval;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001629
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001630 if (oct->props[lio->ifidx].rx_on == start_stop)
1631 return;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001632
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001633 sc = (struct octeon_soft_command *)
1634 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1635 16, ctx_size);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001636
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001637 ncmd = (union octnet_cmd *)sc->virtdptr;
1638 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1639
1640 WRITE_ONCE(ctx->cond, 0);
1641 ctx->octeon_id = lio_get_device_id(oct);
1642 init_waitqueue_head(&ctx->wc);
1643
1644 ncmd->u64 = 0;
1645 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1646 ncmd->s.param1 = start_stop;
1647
1648 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1649
1650 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1651
1652 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1653 OPCODE_NIC_CMD, 0, 0, 0);
1654
1655 sc->callback = rx_ctl_callback;
1656 sc->callback_arg = sc;
1657 sc->wait_time = 5000;
1658
1659 retval = octeon_send_soft_command(oct, sc);
1660 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001661 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001662 } else {
1663 /* Sleep on a wait queue till the cond flag indicates that the
1664 * response arrived or timed-out.
1665 */
1666 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1667 return;
1668 oct->props[lio->ifidx].rx_on = start_stop;
1669 }
1670
1671 octeon_free_soft_command(oct, sc);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001672}
1673
1674/**
1675 * \brief Destroy NIC device interface
1676 * @param oct octeon device
1677 * @param ifidx which interface to destroy
1678 *
1679 * Cleanup associated with each interface for an Octeon device when NIC
1680 * module is being unloaded or if initialization fails during load.
1681 */
1682static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1683{
1684 struct net_device *netdev = oct->props[ifidx].netdev;
1685 struct lio *lio;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001686 struct napi_struct *napi, *n;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001687
1688 if (!netdev) {
1689 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1690 __func__, ifidx);
1691 return;
1692 }
1693
1694 lio = GET_LIO(netdev);
1695
1696 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1697
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001698 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07001699 liquidio_stop(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001700
Felix Manlunas7cc61db2017-03-23 13:26:28 -07001701 if (fw_type_is_none()) {
1702 struct octnic_ctrl_pkt nctrl;
1703
1704 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1705 nctrl.ncmd.s.cmd = OCTNET_CMD_RESET_PF;
1706 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1707 octnet_send_nic_ctrl_pkt(oct, &nctrl);
1708 }
1709
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001710 if (oct->props[lio->ifidx].napi_enabled == 1) {
1711 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1712 napi_disable(napi);
1713
1714 oct->props[lio->ifidx].napi_enabled = 0;
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07001715
1716 if (OCTEON_CN23XX_PF(oct))
1717 oct->droq[0]->ops.poll_mode = 0;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07001718 }
1719
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001720 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1721 unregister_netdev(netdev);
1722
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07001723 cleanup_link_status_change_wq(netdev);
1724
Satanand Burla031d4f12017-03-22 11:31:13 -07001725 cleanup_rx_oom_poll_fn(netdev);
1726
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001727 delete_glists(lio);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001728
1729 free_netdev(netdev);
1730
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07001731 oct->props[ifidx].gmxport = -1;
1732
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001733 oct->props[ifidx].netdev = NULL;
1734}
1735
1736/**
1737 * \brief Stop complete NIC functionality
1738 * @param oct octeon device
1739 */
1740static int liquidio_stop_nic_module(struct octeon_device *oct)
1741{
1742 int i, j;
1743 struct lio *lio;
1744
1745 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1746 if (!oct->ifcount) {
1747 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1748 return 1;
1749 }
1750
Raghu Vatsavayi60441882016-06-21 22:53:08 -07001751 spin_lock_bh(&oct->cmd_resp_wqlock);
1752 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1753 spin_unlock_bh(&oct->cmd_resp_wqlock);
1754
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001755 for (i = 0; i < oct->ifcount; i++) {
1756 lio = GET_LIO(oct->props[i].netdev);
1757 for (j = 0; j < lio->linfo.num_rxpciq; j++)
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001758 octeon_unregister_droq_ops(oct,
1759 lio->linfo.rxpciq[j].s.q_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001760 }
1761
1762 for (i = 0; i < oct->ifcount; i++)
1763 liquidio_destroy_nic_device(oct, i);
1764
1765 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1766 return 0;
1767}
1768
1769/**
1770 * \brief Cleans up resources at unload time
1771 * @param pdev PCI device structure
1772 */
1773static void liquidio_remove(struct pci_dev *pdev)
1774{
1775 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1776
1777 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1778
Raghu Vatsavayi9ff1a9b2016-09-01 11:16:09 -07001779 if (oct_dev->watchdog_task)
1780 kthread_stop(oct_dev->watchdog_task);
1781
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001782 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1783 liquidio_stop_nic_module(oct_dev);
1784
1785 /* Reset the octeon device and cleanup all memory allocated for
1786 * the octeon device by driver.
1787 */
1788 octeon_destroy_resources(oct_dev);
1789
1790 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1791
1792 /* This octeon device has been removed. Update the global
1793 * data structure to reflect this. Free the device structure.
1794 */
1795 octeon_free_device_mem(oct_dev);
1796}
1797
1798/**
1799 * \brief Identify the Octeon device and to map the BAR address space
1800 * @param oct octeon device
1801 */
1802static int octeon_chip_specific_setup(struct octeon_device *oct)
1803{
1804 u32 dev_id, rev_id;
1805 int ret = 1;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001806 char *s;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001807
1808 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1809 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1810 oct->rev_id = rev_id & 0xff;
1811
1812 switch (dev_id) {
1813 case OCTEON_CN68XX_PCIID:
1814 oct->chip_id = OCTEON_CN68XX;
1815 ret = lio_setup_cn68xx_octeon_device(oct);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001816 s = "CN68XX";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001817 break;
1818
1819 case OCTEON_CN66XX_PCIID:
1820 oct->chip_id = OCTEON_CN66XX;
1821 ret = lio_setup_cn66xx_octeon_device(oct);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001822 s = "CN66XX";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001823 break;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001824
Raghu Vatsavayi72c00912016-08-31 11:03:25 -07001825 case OCTEON_CN23XX_PCIID_PF:
1826 oct->chip_id = OCTEON_CN23XX_PF_VID;
1827 ret = setup_cn23xx_octeon_pf_device(oct);
1828 s = "CN23XX";
1829 break;
1830
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001831 default:
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001832 s = "?";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001833 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1834 dev_id);
1835 }
1836
1837 if (!ret)
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001838 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001839 OCTEON_MAJOR_REV(oct),
1840 OCTEON_MINOR_REV(oct),
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07001841 octeon_get_conf(oct)->card_name,
1842 LIQUIDIO_VERSION);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001843
1844 return ret;
1845}
1846
1847/**
1848 * \brief PCI initialization for each Octeon device.
1849 * @param oct octeon device
1850 */
1851static int octeon_pci_os_setup(struct octeon_device *oct)
1852{
1853 /* setup PCI stuff first */
1854 if (pci_enable_device(oct->pci_dev)) {
1855 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1856 return 1;
1857 }
1858
1859 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1860 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08001861 pci_disable_device(oct->pci_dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001862 return 1;
1863 }
1864
1865 /* Enable PCI DMA Master. */
1866 pci_set_master(oct->pci_dev);
1867
1868 return 0;
1869}
1870
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001871static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1872{
1873 int q = 0;
1874
1875 if (netif_is_multiqueue(lio->netdev))
1876 q = skb->queue_mapping % lio->linfo.num_txpciq;
1877
1878 return q;
1879}
1880
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001881/**
1882 * \brief Check Tx queue state for a given network buffer
1883 * @param lio per-network private data
1884 * @param skb network buffer
1885 */
1886static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
1887{
1888 int q = 0, iq = 0;
1889
1890 if (netif_is_multiqueue(lio->netdev)) {
1891 q = skb->queue_mapping;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001892 iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001893 } else {
1894 iq = lio->txq;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001895 q = iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001896 }
1897
1898 if (octnet_iq_is_full(lio->oct_dev, iq))
1899 return 0;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001900
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001901 if (__netif_subqueue_stopped(lio->netdev, q)) {
1902 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07001903 wake_q(lio->netdev, q);
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07001904 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001905 return 1;
1906}
1907
1908/**
1909 * \brief Unmap and free network buffer
1910 * @param buf buffer
1911 */
1912static void free_netbuf(void *buf)
1913{
1914 struct sk_buff *skb;
1915 struct octnet_buf_free_info *finfo;
1916 struct lio *lio;
1917
1918 finfo = (struct octnet_buf_free_info *)buf;
1919 skb = finfo->skb;
1920 lio = finfo->lio;
1921
1922 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1923 DMA_TO_DEVICE);
1924
1925 check_txq_state(lio, skb);
1926
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07001927 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001928}
1929
1930/**
1931 * \brief Unmap and free gather buffer
1932 * @param buf buffer
1933 */
1934static void free_netsgbuf(void *buf)
1935{
1936 struct octnet_buf_free_info *finfo;
1937 struct sk_buff *skb;
1938 struct lio *lio;
1939 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001940 int i, frags, iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001941
1942 finfo = (struct octnet_buf_free_info *)buf;
1943 skb = finfo->skb;
1944 lio = finfo->lio;
1945 g = finfo->g;
1946 frags = skb_shinfo(skb)->nr_frags;
1947
1948 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1949 g->sg[0].ptr[0], (skb->len - skb->data_len),
1950 DMA_TO_DEVICE);
1951
1952 i = 1;
1953 while (frags--) {
1954 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1955
1956 pci_unmap_page((lio->oct_dev)->pci_dev,
1957 g->sg[(i >> 2)].ptr[(i & 3)],
1958 frag->size, DMA_TO_DEVICE);
1959 i++;
1960 }
1961
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001962 iq = skb_iq(lio, skb);
1963 spin_lock(&lio->glist_lock[iq]);
1964 list_add_tail(&g->list, &lio->glist[iq]);
1965 spin_unlock(&lio->glist_lock[iq]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001966
1967 check_txq_state(lio, skb); /* mq support: sub-queue state check */
1968
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07001969 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001970}
1971
1972/**
1973 * \brief Unmap and free gather buffer with response
1974 * @param buf buffer
1975 */
1976static void free_netsgbuf_with_resp(void *buf)
1977{
1978 struct octeon_soft_command *sc;
1979 struct octnet_buf_free_info *finfo;
1980 struct sk_buff *skb;
1981 struct lio *lio;
1982 struct octnic_gather *g;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07001983 int i, frags, iq;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07001984
1985 sc = (struct octeon_soft_command *)buf;
1986 skb = (struct sk_buff *)sc->callback_arg;
1987 finfo = (struct octnet_buf_free_info *)&skb->cb;
1988
1989 lio = finfo->lio;
1990 g = finfo->g;
1991 frags = skb_shinfo(skb)->nr_frags;
1992
1993 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1994 g->sg[0].ptr[0], (skb->len - skb->data_len),
1995 DMA_TO_DEVICE);
1996
1997 i = 1;
1998 while (frags--) {
1999 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2000
2001 pci_unmap_page((lio->oct_dev)->pci_dev,
2002 g->sg[(i >> 2)].ptr[(i & 3)],
2003 frag->size, DMA_TO_DEVICE);
2004 i++;
2005 }
2006
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07002007 iq = skb_iq(lio, skb);
2008
2009 spin_lock(&lio->glist_lock[iq]);
2010 list_add_tail(&g->list, &lio->glist[iq]);
2011 spin_unlock(&lio->glist_lock[iq]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002012
2013 /* Don't free the skb yet */
2014
2015 check_txq_state(lio, skb);
2016}
2017
2018/**
2019 * \brief Adjust ptp frequency
2020 * @param ptp PTP clock info
2021 * @param ppb how much to adjust by, in parts-per-billion
2022 */
2023static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
2024{
2025 struct lio *lio = container_of(ptp, struct lio, ptp_info);
2026 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2027 u64 comp, delta;
2028 unsigned long flags;
2029 bool neg_adj = false;
2030
2031 if (ppb < 0) {
2032 neg_adj = true;
2033 ppb = -ppb;
2034 }
2035
2036 /* The hardware adds the clock compensation value to the
2037 * PTP clock on every coprocessor clock cycle, so we
2038 * compute the delta in terms of coprocessor clocks.
2039 */
2040 delta = (u64)ppb << 32;
2041 do_div(delta, oct->coproc_clock_rate);
2042
2043 spin_lock_irqsave(&lio->ptp_lock, flags);
2044 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
2045 if (neg_adj)
2046 comp -= delta;
2047 else
2048 comp += delta;
2049 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
2050 spin_unlock_irqrestore(&lio->ptp_lock, flags);
2051
2052 return 0;
2053}
2054
2055/**
2056 * \brief Adjust ptp time
2057 * @param ptp PTP clock info
2058 * @param delta how much to adjust by, in nanosecs
2059 */
2060static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
2061{
2062 unsigned long flags;
2063 struct lio *lio = container_of(ptp, struct lio, ptp_info);
2064
2065 spin_lock_irqsave(&lio->ptp_lock, flags);
2066 lio->ptp_adjust += delta;
2067 spin_unlock_irqrestore(&lio->ptp_lock, flags);
2068
2069 return 0;
2070}
2071
2072/**
2073 * \brief Get hardware clock time, including any adjustment
2074 * @param ptp PTP clock info
2075 * @param ts timespec
2076 */
2077static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
2078 struct timespec64 *ts)
2079{
2080 u64 ns;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002081 unsigned long flags;
2082 struct lio *lio = container_of(ptp, struct lio, ptp_info);
2083 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2084
2085 spin_lock_irqsave(&lio->ptp_lock, flags);
2086 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
2087 ns += lio->ptp_adjust;
2088 spin_unlock_irqrestore(&lio->ptp_lock, flags);
2089
Kefeng Wang286af312016-01-27 17:34:37 +08002090 *ts = ns_to_timespec64(ns);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002091
2092 return 0;
2093}
2094
2095/**
2096 * \brief Set hardware clock time. Reset adjustment
2097 * @param ptp PTP clock info
2098 * @param ts timespec
2099 */
2100static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
2101 const struct timespec64 *ts)
2102{
2103 u64 ns;
2104 unsigned long flags;
2105 struct lio *lio = container_of(ptp, struct lio, ptp_info);
2106 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2107
2108 ns = timespec_to_ns(ts);
2109
2110 spin_lock_irqsave(&lio->ptp_lock, flags);
2111 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
2112 lio->ptp_adjust = 0;
2113 spin_unlock_irqrestore(&lio->ptp_lock, flags);
2114
2115 return 0;
2116}
2117
2118/**
2119 * \brief Check if PTP is enabled
2120 * @param ptp PTP clock info
2121 * @param rq request
2122 * @param on is it on
2123 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002124static int
2125liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
2126 struct ptp_clock_request *rq __attribute__((unused)),
2127 int on __attribute__((unused)))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002128{
2129 return -EOPNOTSUPP;
2130}
2131
2132/**
2133 * \brief Open PTP clock source
2134 * @param netdev network device
2135 */
2136static void oct_ptp_open(struct net_device *netdev)
2137{
2138 struct lio *lio = GET_LIO(netdev);
2139 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2140
2141 spin_lock_init(&lio->ptp_lock);
2142
2143 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
2144 lio->ptp_info.owner = THIS_MODULE;
2145 lio->ptp_info.max_adj = 250000000;
2146 lio->ptp_info.n_alarm = 0;
2147 lio->ptp_info.n_ext_ts = 0;
2148 lio->ptp_info.n_per_out = 0;
2149 lio->ptp_info.pps = 0;
2150 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
2151 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
2152 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
2153 lio->ptp_info.settime64 = liquidio_ptp_settime;
2154 lio->ptp_info.enable = liquidio_ptp_enable;
2155
2156 lio->ptp_adjust = 0;
2157
2158 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
2159 &oct->pci_dev->dev);
2160
2161 if (IS_ERR(lio->ptp_clock))
2162 lio->ptp_clock = NULL;
2163}
2164
2165/**
2166 * \brief Init PTP clock
2167 * @param oct octeon device
2168 */
2169static void liquidio_ptp_init(struct octeon_device *oct)
2170{
2171 u64 clock_comp, cfg;
2172
2173 clock_comp = (u64)NSEC_PER_SEC << 32;
2174 do_div(clock_comp, oct->coproc_clock_rate);
2175 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
2176
2177 /* Enable */
2178 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
2179 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
2180}
2181
2182/**
2183 * \brief Load firmware to device
2184 * @param oct octeon device
2185 *
2186 * Maps device to firmware filename, requests firmware, and downloads it
2187 */
2188static int load_firmware(struct octeon_device *oct)
2189{
2190 int ret = 0;
2191 const struct firmware *fw;
2192 char fw_name[LIO_MAX_FW_FILENAME_LEN];
2193 char *tmp_fw_type;
2194
Felix Manlunas7cc61db2017-03-23 13:26:28 -07002195 if (fw_type_is_none()) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002196 dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
2197 return ret;
2198 }
2199
2200 if (fw_type[0] == '\0')
2201 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
2202 else
2203 tmp_fw_type = fw_type;
2204
2205 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
2206 octeon_get_conf(oct)->card_name, tmp_fw_type,
2207 LIO_FW_NAME_SUFFIX);
2208
2209 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
2210 if (ret) {
2211 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
2212 fw_name);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07002213 release_firmware(fw);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002214 return ret;
2215 }
2216
2217 ret = octeon_download_firmware(oct, fw->data, fw->size);
2218
2219 release_firmware(fw);
2220
2221 return ret;
2222}
2223
2224/**
2225 * \brief Setup output queue
2226 * @param oct octeon device
2227 * @param q_no which queue
2228 * @param num_descs how many descriptors
2229 * @param desc_size size of each descriptor
2230 * @param app_ctx application context
2231 */
2232static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
2233 int desc_size, void *app_ctx)
2234{
2235 int ret_val = 0;
2236
2237 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
2238 /* droq creation and local register settings. */
2239 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
Amitoj Kaur Chawla08a965e2016-02-04 19:25:13 +05302240 if (ret_val < 0)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002241 return ret_val;
2242
2243 if (ret_val == 1) {
2244 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
2245 return 0;
2246 }
2247 /* tasklet creation for the droq */
2248
2249 /* Enable the droq queues */
2250 octeon_set_droq_pkt_op(oct, q_no, 1);
2251
2252 /* Send Credit for Octeon Output queues. Credits are always
2253 * sent after the output queue is enabled.
2254 */
2255 writel(oct->droq[q_no]->max_count,
2256 oct->droq[q_no]->pkts_credit_reg);
2257
2258 return ret_val;
2259}
2260
2261/**
2262 * \brief Callback for getting interface configuration
2263 * @param status status of request
2264 * @param buf pointer to resp structure
2265 */
2266static void if_cfg_callback(struct octeon_device *oct,
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002267 u32 status __attribute__((unused)),
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002268 void *buf)
2269{
2270 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
2271 struct liquidio_if_cfg_resp *resp;
2272 struct liquidio_if_cfg_context *ctx;
2273
2274 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
Raghu Vatsavayi30136392016-09-01 11:16:11 -07002275 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002276
2277 oct = lio_get_device(ctx->octeon_id);
2278 if (resp->status)
Rick Farringtonc5b71e62017-03-17 11:23:08 -07002279 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
2280 CVM_CAST64(resp->status), status);
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002281 WRITE_ONCE(ctx->cond, 1);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002282
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07002283 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
2284 resp->cfg_info.liquidio_firmware_version);
2285
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002286 /* This barrier is required to be sure that the response has been
2287 * written fully before waking up the handler
2288 */
2289 wmb();
2290
2291 wake_up_interruptible(&ctx->wc);
2292}
2293
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002294/** Routine to push packets arriving on Octeon interface upto network layer.
2295 * @param oct_id - octeon device id.
2296 * @param skbuff - skbuff struct to be passed to network layer.
2297 * @param len - size of total data received.
2298 * @param rh - Control header associated with the packet
2299 * @param param - additional control data with the packet
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002300 * @param arg - farg registered in droq_ops
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002301 */
2302static void
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002303liquidio_push_packet(u32 octeon_id __attribute__((unused)),
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002304 void *skbuff,
2305 u32 len,
2306 union octeon_rh *rh,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002307 void *param,
2308 void *arg)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002309{
2310 struct napi_struct *napi = param;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002311 struct sk_buff *skb = (struct sk_buff *)skbuff;
2312 struct skb_shared_hwtstamps *shhwtstamps;
2313 u64 ns;
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07002314 u16 vtag = 0;
Prasad Kannegantide28c992017-01-09 14:42:40 -08002315 u32 r_dh_off;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002316 struct net_device *netdev = (struct net_device *)arg;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002317 struct octeon_droq *droq = container_of(param, struct octeon_droq,
2318 napi);
2319 if (netdev) {
2320 int packet_was_received;
2321 struct lio *lio = GET_LIO(netdev);
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07002322 struct octeon_device *oct = lio->oct_dev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002323
2324 /* Do not proceed if the interface is not in RUNNING state. */
2325 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
2326 recv_buffer_free(skb);
2327 droq->stats.rx_dropped++;
2328 return;
2329 }
2330
2331 skb->dev = netdev;
2332
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002333 skb_record_rx_queue(skb, droq->q_no);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07002334 if (likely(len > MIN_SKB_SIZE)) {
2335 struct octeon_skb_page_info *pg_info;
2336 unsigned char *va;
2337
2338 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
2339 if (pg_info->page) {
2340 /* For Paged allocation use the frags */
2341 va = page_address(pg_info->page) +
2342 pg_info->page_offset;
2343 memcpy(skb->data, va, MIN_SKB_SIZE);
2344 skb_put(skb, MIN_SKB_SIZE);
2345 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2346 pg_info->page,
2347 pg_info->page_offset +
2348 MIN_SKB_SIZE,
2349 len - MIN_SKB_SIZE,
2350 LIO_RXBUFFER_SZ);
2351 }
2352 } else {
2353 struct octeon_skb_page_info *pg_info =
2354 ((struct octeon_skb_page_info *)(skb->cb));
2355 skb_copy_to_linear_data(skb, page_address(pg_info->page)
2356 + pg_info->page_offset, len);
2357 skb_put(skb, len);
2358 put_page(pg_info->page);
2359 }
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002360
Prasad Kannegantide28c992017-01-09 14:42:40 -08002361 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
2362
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07002363 if (((oct->chip_id == OCTEON_CN66XX) ||
2364 (oct->chip_id == OCTEON_CN68XX)) &&
2365 ptp_enable) {
2366 if (rh->r_dh.has_hwtstamp) {
2367 /* timestamp is included from the hardware at
2368 * the beginning of the packet.
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002369 */
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07002370 if (ifstate_check
2371 (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
2372 /* Nanoseconds are in the first 64-bits
2373 * of the packet.
2374 */
Prasad Kannegantide28c992017-01-09 14:42:40 -08002375 memcpy(&ns, (skb->data + r_dh_off),
2376 sizeof(ns));
2377 r_dh_off -= BYTES_PER_DHLEN_UNIT;
Raghu Vatsavayia5b37882016-06-14 16:54:48 -07002378 shhwtstamps = skb_hwtstamps(skb);
2379 shhwtstamps->hwtstamp =
2380 ns_to_ktime(ns +
2381 lio->ptp_adjust);
2382 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002383 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002384 }
2385
Prasad Kannegantide28c992017-01-09 14:42:40 -08002386 if (rh->r_dh.has_hash) {
2387 __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
2388 u32 hash = be32_to_cpu(*hash_be);
2389
2390 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
2391 r_dh_off -= BYTES_PER_DHLEN_UNIT;
2392 }
2393
2394 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
2395
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002396 skb->protocol = eth_type_trans(skb, skb->dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002397 if ((netdev->features & NETIF_F_RXCSUM) &&
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07002398 (((rh->r_dh.encap_on) &&
2399 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
2400 (!(rh->r_dh.encap_on) &&
2401 (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002402 /* checksum has already been verified */
2403 skb->ip_summed = CHECKSUM_UNNECESSARY;
2404 else
2405 skb->ip_summed = CHECKSUM_NONE;
2406
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07002407 /* Setting Encapsulation field on basis of status received
2408 * from the firmware
2409 */
2410 if (rh->r_dh.encap_on) {
2411 skb->encapsulation = 1;
2412 skb->csum_level = 1;
2413 droq->stats.rx_vxlan++;
2414 }
2415
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07002416 /* inbound VLAN tag */
2417 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2418 (rh->r_dh.vlan != 0)) {
2419 u16 vid = rh->r_dh.vlan;
2420 u16 priority = rh->r_dh.priority;
2421
2422 vtag = priority << 13 | vid;
2423 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
2424 }
2425
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002426 packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
2427
2428 if (packet_was_received) {
2429 droq->stats.rx_bytes_received += len;
2430 droq->stats.rx_pkts_received++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002431 } else {
2432 droq->stats.rx_dropped++;
2433 netif_info(lio, rx_err, lio->netdev,
2434 "droq:%d error rx_dropped:%llu\n",
2435 droq->q_no, droq->stats.rx_dropped);
2436 }
2437
2438 } else {
2439 recv_buffer_free(skb);
2440 }
2441}
2442
2443/**
2444 * \brief wrapper for calling napi_schedule
2445 * @param param parameters to pass to napi_schedule
2446 *
2447 * Used when scheduling on different CPUs
2448 */
2449static void napi_schedule_wrapper(void *param)
2450{
2451 struct napi_struct *napi = param;
2452
2453 napi_schedule(napi);
2454}
2455
2456/**
2457 * \brief callback when receive interrupt occurs and we are in NAPI mode
2458 * @param arg pointer to octeon output queue
2459 */
2460static void liquidio_napi_drv_callback(void *arg)
2461{
Raghu Vatsavayi9ded1a52016-09-01 11:16:10 -07002462 struct octeon_device *oct;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002463 struct octeon_droq *droq = arg;
2464 int this_cpu = smp_processor_id();
2465
Raghu Vatsavayi9ded1a52016-09-01 11:16:10 -07002466 oct = droq->oct_dev;
2467
2468 if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) {
2469 napi_schedule_irqoff(&droq->napi);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002470 } else {
2471 struct call_single_data *csd = &droq->csd;
2472
2473 csd->func = napi_schedule_wrapper;
2474 csd->info = &droq->napi;
2475 csd->flags = 0;
2476
2477 smp_call_function_single_async(droq->cpu_id, csd);
2478 }
2479}
2480
2481/**
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002482 * \brief Entry point for NAPI polling
2483 * @param napi NAPI structure
2484 * @param budget maximum number of items to process
2485 */
2486static int liquidio_napi_poll(struct napi_struct *napi, int budget)
2487{
2488 struct octeon_droq *droq;
2489 int work_done;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002490 int tx_done = 0, iq_no;
2491 struct octeon_instr_queue *iq;
2492 struct octeon_device *oct;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002493
2494 droq = container_of(napi, struct octeon_droq, napi);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002495 oct = droq->oct_dev;
2496 iq_no = droq->q_no;
2497 /* Handle Droq descriptors */
2498 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
2499 POLL_EVENT_PROCESS_PKTS,
2500 budget);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002501
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002502 /* Flush the instruction queue */
2503 iq = oct->instr_queue[iq_no];
2504 if (iq) {
VSR Burru6069f3f2017-03-22 11:54:50 -07002505 if (atomic_read(&iq->instr_pending))
2506 /* Process iq buffers with in the budget limits */
2507 tx_done = octeon_flush_iq(oct, iq, budget);
2508 else
2509 tx_done = 1;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002510 /* Update iq read-index rather than waiting for next interrupt.
2511 * Return back if tx_done is false.
2512 */
2513 update_txq_status(oct, iq_no);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002514 } else {
2515 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
2516 __func__, iq_no);
2517 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002518
Satanand Burlacdb478e2017-01-31 13:04:42 -08002519 /* force enable interrupt if reg cnts are high to avoid wraparound */
2520 if ((work_done < budget && tx_done) ||
Felix Manlunas76e0e702017-02-07 12:10:58 -08002521 (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
Satanand Burlacdb478e2017-01-31 13:04:42 -08002522 (droq->pkt_count >= MAX_REG_CNT)) {
2523 tx_done = 1;
Eric Dumazet6ad20162017-01-30 08:22:01 -08002524 napi_complete_done(napi, work_done);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002525 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
2526 POLL_EVENT_ENABLE_INTR, 0);
2527 return 0;
2528 }
2529
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002530 return (!tx_done) ? (budget) : (work_done);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002531}
2532
2533/**
2534 * \brief Setup input and output queues
2535 * @param octeon_dev octeon device
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07002536 * @param ifidx Interface Index
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002537 *
2538 * Note: Queues are with respect to the octeon device. Thus
2539 * an input queue is for egress packets, and output queues
2540 * are for ingress packets.
2541 */
2542static inline int setup_io_queues(struct octeon_device *octeon_dev,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002543 int ifidx)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002544{
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002545 struct octeon_droq_ops droq_ops;
2546 struct net_device *netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002547 static int cpu_id;
2548 static int cpu_id_modulus;
2549 struct octeon_droq *droq;
2550 struct napi_struct *napi;
2551 int q, q_no, retval = 0;
2552 struct lio *lio;
2553 int num_tx_descs;
2554
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002555 netdev = octeon_dev->props[ifidx].netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002556
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002557 lio = GET_LIO(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002558
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002559 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
2560
2561 droq_ops.fptr = liquidio_push_packet;
2562 droq_ops.farg = (void *)netdev;
2563
2564 droq_ops.poll_mode = 1;
2565 droq_ops.napi_fn = liquidio_napi_drv_callback;
2566 cpu_id = 0;
2567 cpu_id_modulus = num_present_cpus();
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002568
2569 /* set up DROQs. */
2570 for (q = 0; q < lio->linfo.num_rxpciq; q++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002571 q_no = lio->linfo.rxpciq[q].s.q_no;
2572 dev_dbg(&octeon_dev->pci_dev->dev,
2573 "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2574 q, q_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002575 retval = octeon_setup_droq(octeon_dev, q_no,
2576 CFG_GET_NUM_RX_DESCS_NIC_IF
2577 (octeon_get_conf(octeon_dev),
2578 lio->ifidx),
2579 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
2580 (octeon_get_conf(octeon_dev),
2581 lio->ifidx), NULL);
2582 if (retval) {
2583 dev_err(&octeon_dev->pci_dev->dev,
Raghu Vatsavayi32581242016-08-31 11:03:20 -07002584 "%s : Runtime DROQ(RxQ) creation failed.\n",
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002585 __func__);
2586 return 1;
2587 }
2588
2589 droq = octeon_dev->droq[q_no];
2590 napi = &droq->napi;
Raghu Vatsavayi1b7c55c2016-08-31 11:03:27 -07002591 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
2592 (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002593 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002594
2595 /* designate a CPU for this droq */
2596 droq->cpu_id = cpu_id;
2597 cpu_id++;
2598 if (cpu_id >= cpu_id_modulus)
2599 cpu_id = 0;
2600
2601 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
2602 }
2603
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07002604 if (OCTEON_CN23XX_PF(octeon_dev)) {
2605 /* 23XX PF can receive control messages (via the first PF-owned
2606 * droq) from the firmware even if the ethX interface is down,
2607 * so that's why poll_mode must be off for the first droq.
2608 */
2609 octeon_dev->droq[0]->ops.poll_mode = 0;
2610 }
2611
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002612 /* set up IQs. */
2613 for (q = 0; q < lio->linfo.num_txpciq; q++) {
2614 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
2615 (octeon_dev),
2616 lio->ifidx);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002617 retval = octeon_setup_iq(octeon_dev, ifidx, q,
2618 lio->linfo.txpciq[q], num_tx_descs,
2619 netdev_get_tx_queue(netdev, q));
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002620 if (retval) {
2621 dev_err(&octeon_dev->pci_dev->dev,
2622 " %s : Runtime IQ(TxQ) creation failed.\n",
2623 __func__);
2624 return 1;
2625 }
Rick Farrington35ae57e2017-03-07 11:40:41 -08002626
2627 if (octeon_dev->ioq_vector) {
2628 struct octeon_ioq_vector *ioq_vector;
2629
2630 ioq_vector = &octeon_dev->ioq_vector[q];
2631 netif_set_xps_queue(netdev,
2632 &ioq_vector->affinity_mask,
2633 ioq_vector->iq_index);
2634 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002635 }
2636
2637 return 0;
2638}
2639
2640/**
2641 * \brief Poll routine for checking transmit queue status
2642 * @param work work_struct data structure
2643 */
2644static void octnet_poll_check_txq_status(struct work_struct *work)
2645{
2646 struct cavium_wk *wk = (struct cavium_wk *)work;
2647 struct lio *lio = (struct lio *)wk->ctxptr;
2648
2649 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
2650 return;
2651
2652 check_txq_status(lio);
2653 queue_delayed_work(lio->txq_status_wq.wq,
2654 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2655}
2656
2657/**
2658 * \brief Sets up the txq poll check
2659 * @param netdev network device
2660 */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002661static inline int setup_tx_poll_fn(struct net_device *netdev)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002662{
2663 struct lio *lio = GET_LIO(netdev);
2664 struct octeon_device *oct = lio->oct_dev;
2665
Bhaktipriya Shridhar292b9da2016-06-08 01:47:59 +05302666 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2667 WQ_MEM_RECLAIM, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002668 if (!lio->txq_status_wq.wq) {
2669 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002670 return -1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002671 }
2672 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2673 octnet_poll_check_txq_status);
2674 lio->txq_status_wq.wk.ctxptr = lio;
2675 queue_delayed_work(lio->txq_status_wq.wq,
2676 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002677 return 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002678}
2679
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002680static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2681{
2682 struct lio *lio = GET_LIO(netdev);
2683
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002684 if (lio->txq_status_wq.wq) {
2685 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2686 destroy_workqueue(lio->txq_status_wq.wq);
2687 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002688}
2689
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002690/**
2691 * \brief Net device open for LiquidIO
2692 * @param netdev network device
2693 */
2694static int liquidio_open(struct net_device *netdev)
2695{
2696 struct lio *lio = GET_LIO(netdev);
2697 struct octeon_device *oct = lio->oct_dev;
2698 struct napi_struct *napi, *n;
2699
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002700 if (oct->props[lio->ifidx].napi_enabled == 0) {
2701 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2702 napi_enable(napi);
2703
2704 oct->props[lio->ifidx].napi_enabled = 1;
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07002705
2706 if (OCTEON_CN23XX_PF(oct))
2707 oct->droq[0]->ops.poll_mode = 1;
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002708 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002709
Prasad Kanneganti9feb16a2017-01-03 11:27:33 -08002710 if ((oct->chip_id == OCTEON_CN66XX || oct->chip_id == OCTEON_CN68XX) &&
2711 ptp_enable)
2712 oct_ptp_open(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002713
2714 ifstate_set(lio, LIO_IFSTATE_RUNNING);
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002715
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07002716 /* Ready for link status updates */
2717 lio->intf_open = 1;
2718
2719 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2720
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002721 if (OCTEON_CN23XX_PF(oct)) {
2722 if (!oct->msix_on)
2723 if (setup_tx_poll_fn(netdev))
2724 return -1;
2725 } else {
2726 if (setup_tx_poll_fn(netdev))
2727 return -1;
2728 }
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002729
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002730 start_txq(netdev);
2731
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002732 /* tell Octeon to start forwarding packets to host */
2733 send_rx_ctrl_cmd(lio, 1);
2734
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002735 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2736 netdev->name);
2737
2738 return 0;
2739}
2740
2741/**
2742 * \brief Net device stop for LiquidIO
2743 * @param netdev network device
2744 */
2745static int liquidio_stop(struct net_device *netdev)
2746{
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002747 struct lio *lio = GET_LIO(netdev);
2748 struct octeon_device *oct = lio->oct_dev;
2749
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002750 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2751
2752 netif_tx_disable(netdev);
2753
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002754 /* Inform that netif carrier is down */
Raghu Vatsavayi9a96bde2016-06-21 22:53:06 -07002755 netif_carrier_off(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002756 lio->intf_open = 0;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002757 lio->linfo.link.s.link_up = 0;
2758 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002759
Felix Manlunascb2336b2017-01-11 17:09:02 -08002760 /* Tell Octeon that nic interface is down. */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002761 send_rx_ctrl_cmd(lio, 0);
2762
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07002763 if (OCTEON_CN23XX_PF(oct)) {
2764 if (!oct->msix_on)
2765 cleanup_tx_poll_fn(netdev);
2766 } else {
2767 cleanup_tx_poll_fn(netdev);
2768 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002769
2770 if (lio->ptp_clock) {
2771 ptp_clock_unregister(lio->ptp_clock);
2772 lio->ptp_clock = NULL;
2773 }
2774
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002775 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002776
2777 return 0;
2778}
2779
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002780/**
2781 * \brief Converts a mask based on net device flags
2782 * @param netdev network device
2783 *
2784 * This routine generates a octnet_ifflags mask from the net device flags
2785 * received from the OS.
2786 */
2787static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2788{
2789 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2790
2791 if (netdev->flags & IFF_PROMISC)
2792 f |= OCTNET_IFFLAG_PROMISC;
2793
2794 if (netdev->flags & IFF_ALLMULTI)
2795 f |= OCTNET_IFFLAG_ALLMULTI;
2796
2797 if (netdev->flags & IFF_MULTICAST) {
2798 f |= OCTNET_IFFLAG_MULTICAST;
2799
2800 /* Accept all multicast addresses if there are more than we
2801 * can handle
2802 */
2803 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2804 f |= OCTNET_IFFLAG_ALLMULTI;
2805 }
2806
2807 if (netdev->flags & IFF_BROADCAST)
2808 f |= OCTNET_IFFLAG_BROADCAST;
2809
2810 return f;
2811}
2812
2813/**
2814 * \brief Net device set_multicast_list
2815 * @param netdev network device
2816 */
2817static void liquidio_set_mcast_list(struct net_device *netdev)
2818{
2819 struct lio *lio = GET_LIO(netdev);
2820 struct octeon_device *oct = lio->oct_dev;
2821 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002822 struct netdev_hw_addr *ha;
2823 u64 *mc;
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002824 int ret;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002825 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2826
2827 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2828
2829 /* Create a ctrl pkt command to be sent to core app. */
2830 nctrl.ncmd.u64 = 0;
2831 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002832 nctrl.ncmd.s.param1 = get_new_flags(netdev);
2833 nctrl.ncmd.s.param2 = mc_count;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002834 nctrl.ncmd.s.more = mc_count;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002835 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002836 nctrl.netpndev = (u64)netdev;
2837 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2838
2839 /* copy all the addresses into the udd */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002840 mc = &nctrl.udd[0];
2841 netdev_for_each_mc_addr(ha, netdev) {
2842 *mc = 0;
2843 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2844 /* no need to swap bytes */
2845
2846 if (++mc > &nctrl.udd[mc_count])
2847 break;
2848 }
2849
2850 /* Apparently, any activity in this call from the kernel has to
2851 * be atomic. So we won't wait for response.
2852 */
2853 nctrl.wait_time = 0;
2854
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002855 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002856 if (ret < 0) {
2857 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2858 ret);
2859 }
2860}
2861
2862/**
2863 * \brief Net device set_mac_address
2864 * @param netdev network device
2865 */
2866static int liquidio_set_mac(struct net_device *netdev, void *p)
2867{
2868 int ret = 0;
2869 struct lio *lio = GET_LIO(netdev);
2870 struct octeon_device *oct = lio->oct_dev;
2871 struct sockaddr *addr = (struct sockaddr *)p;
2872 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002873
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002874 if (!is_valid_ether_addr(addr->sa_data))
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002875 return -EADDRNOTAVAIL;
2876
2877 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2878
2879 nctrl.ncmd.u64 = 0;
2880 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002881 nctrl.ncmd.s.param1 = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002882 nctrl.ncmd.s.more = 1;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002883 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002884 nctrl.netpndev = (u64)netdev;
2885 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2886 nctrl.wait_time = 100;
2887
2888 nctrl.udd[0] = 0;
2889 /* The MAC Address is presented in network byte order. */
2890 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2891
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002892 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002893 if (ret < 0) {
2894 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2895 return -ENOMEM;
2896 }
2897 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2898 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2899
2900 return 0;
2901}
2902
2903/**
2904 * \brief Net device get_stats
2905 * @param netdev network device
2906 */
2907static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2908{
2909 struct lio *lio = GET_LIO(netdev);
2910 struct net_device_stats *stats = &netdev->stats;
2911 struct octeon_device *oct;
2912 u64 pkts = 0, drop = 0, bytes = 0;
2913 struct oct_droq_stats *oq_stats;
2914 struct oct_iq_stats *iq_stats;
2915 int i, iq_no, oq_no;
2916
2917 oct = lio->oct_dev;
2918
2919 for (i = 0; i < lio->linfo.num_txpciq; i++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002920 iq_no = lio->linfo.txpciq[i].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002921 iq_stats = &oct->instr_queue[iq_no]->stats;
2922 pkts += iq_stats->tx_done;
2923 drop += iq_stats->tx_dropped;
2924 bytes += iq_stats->tx_tot_bytes;
2925 }
2926
2927 stats->tx_packets = pkts;
2928 stats->tx_bytes = bytes;
2929 stats->tx_dropped = drop;
2930
2931 pkts = 0;
2932 drop = 0;
2933 bytes = 0;
2934
2935 for (i = 0; i < lio->linfo.num_rxpciq; i++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07002936 oq_no = lio->linfo.rxpciq[i].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002937 oq_stats = &oct->droq[oq_no]->stats;
2938 pkts += oq_stats->rx_pkts_received;
2939 drop += (oq_stats->rx_dropped +
2940 oq_stats->dropped_nodispatch +
2941 oq_stats->dropped_toomany +
2942 oq_stats->dropped_nomem);
2943 bytes += oq_stats->rx_bytes_received;
2944 }
2945
2946 stats->rx_bytes = bytes;
2947 stats->rx_packets = pkts;
2948 stats->rx_dropped = drop;
2949
2950 return stats;
2951}
2952
2953/**
2954 * \brief Net device change_mtu
2955 * @param netdev network device
2956 */
2957static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2958{
2959 struct lio *lio = GET_LIO(netdev);
2960 struct octeon_device *oct = lio->oct_dev;
2961 struct octnic_ctrl_pkt nctrl;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002962 int ret = 0;
2963
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002964 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2965
2966 nctrl.ncmd.u64 = 0;
2967 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002968 nctrl.ncmd.s.param1 = new_mtu;
2969 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002970 nctrl.wait_time = 100;
2971 nctrl.netpndev = (u64)netdev;
2972 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2973
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07002974 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002975 if (ret < 0) {
2976 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
2977 return -1;
2978 }
2979
2980 lio->mtu = new_mtu;
2981
2982 return 0;
2983}
2984
2985/**
2986 * \brief Handler for SIOCSHWTSTAMP ioctl
2987 * @param netdev network device
2988 * @param ifr interface request
2989 * @param cmd command
2990 */
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07002991static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07002992{
2993 struct hwtstamp_config conf;
2994 struct lio *lio = GET_LIO(netdev);
2995
2996 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2997 return -EFAULT;
2998
2999 if (conf.flags)
3000 return -EINVAL;
3001
3002 switch (conf.tx_type) {
3003 case HWTSTAMP_TX_ON:
3004 case HWTSTAMP_TX_OFF:
3005 break;
3006 default:
3007 return -ERANGE;
3008 }
3009
3010 switch (conf.rx_filter) {
3011 case HWTSTAMP_FILTER_NONE:
3012 break;
3013 case HWTSTAMP_FILTER_ALL:
3014 case HWTSTAMP_FILTER_SOME:
3015 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3016 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3017 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3018 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3019 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3020 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3021 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3022 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3023 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3024 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3025 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3026 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3027 conf.rx_filter = HWTSTAMP_FILTER_ALL;
3028 break;
3029 default:
3030 return -ERANGE;
3031 }
3032
3033 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
3034 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
3035
3036 else
3037 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
3038
3039 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
3040}
3041
3042/**
3043 * \brief ioctl handler
3044 * @param netdev network device
3045 * @param ifr interface request
3046 * @param cmd command
3047 */
3048static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
3049{
Prasad Kanneganti9feb16a2017-01-03 11:27:33 -08003050 struct lio *lio = GET_LIO(netdev);
3051
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003052 switch (cmd) {
3053 case SIOCSHWTSTAMP:
Prasad Kanneganti9feb16a2017-01-03 11:27:33 -08003054 if ((lio->oct_dev->chip_id == OCTEON_CN66XX ||
3055 lio->oct_dev->chip_id == OCTEON_CN68XX) && ptp_enable)
3056 return hwtstamp_ioctl(netdev, ifr);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003057 default:
3058 return -EOPNOTSUPP;
3059 }
3060}
3061
3062/**
3063 * \brief handle a Tx timestamp response
3064 * @param status response status
3065 * @param buf pointer to skb
3066 */
3067static void handle_timestamp(struct octeon_device *oct,
3068 u32 status,
3069 void *buf)
3070{
3071 struct octnet_buf_free_info *finfo;
3072 struct octeon_soft_command *sc;
3073 struct oct_timestamp_resp *resp;
3074 struct lio *lio;
3075 struct sk_buff *skb = (struct sk_buff *)buf;
3076
3077 finfo = (struct octnet_buf_free_info *)skb->cb;
3078 lio = finfo->lio;
3079 sc = finfo->sc;
3080 oct = lio->oct_dev;
3081 resp = (struct oct_timestamp_resp *)sc->virtrptr;
3082
3083 if (status != OCTEON_REQUEST_DONE) {
3084 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
3085 CVM_CAST64(status));
3086 resp->timestamp = 0;
3087 }
3088
3089 octeon_swap_8B_data(&resp->timestamp, 1);
3090
Colin Ian King19a6d152016-02-05 16:30:39 +00003091 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003092 struct skb_shared_hwtstamps ts;
3093 u64 ns = resp->timestamp;
3094
3095 netif_info(lio, tx_done, lio->netdev,
3096 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
3097 skb, (unsigned long long)ns);
3098 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
3099 skb_tstamp_tx(skb, &ts);
3100 }
3101
3102 octeon_free_soft_command(oct, sc);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07003103 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003104}
3105
3106/* \brief Send a data packet that will be timestamped
3107 * @param oct octeon device
3108 * @param ndata pointer to network data
3109 * @param finfo pointer to private network data
3110 */
3111static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
3112 struct octnic_data_pkt *ndata,
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003113 struct octnet_buf_free_info *finfo)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003114{
3115 int retval;
3116 struct octeon_soft_command *sc;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003117 struct lio *lio;
3118 int ring_doorbell;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003119 u32 len;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003120
3121 lio = finfo->lio;
3122
3123 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
3124 sizeof(struct oct_timestamp_resp));
3125 finfo->sc = sc;
3126
3127 if (!sc) {
3128 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
3129 return IQ_SEND_FAILED;
3130 }
3131
3132 if (ndata->reqtype == REQTYPE_NORESP_NET)
3133 ndata->reqtype = REQTYPE_RESP_NET;
3134 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
3135 ndata->reqtype = REQTYPE_RESP_NET_SG;
3136
3137 sc->callback = handle_timestamp;
3138 sc->callback_arg = finfo->skb;
3139 sc->iq_no = ndata->q_no;
3140
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07003141 if (OCTEON_CN23XX_PF(oct))
3142 len = (u32)((struct octeon_instr_ih3 *)
3143 (&sc->cmd.cmd3.ih3))->dlengsz;
3144 else
3145 len = (u32)((struct octeon_instr_ih2 *)
3146 (&sc->cmd.cmd2.ih2))->dlengsz;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003147
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003148 ring_doorbell = 1;
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07003149
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003150 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003151 sc, len, ndata->reqtype);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003152
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07003153 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003154 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
3155 retval);
3156 octeon_free_soft_command(oct, sc);
3157 } else {
3158 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
3159 }
3160
3161 return retval;
3162}
3163
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003164/** \brief Transmit networks packets to the Octeon interface
3165 * @param skbuff skbuff struct to be passed to network layer.
3166 * @param netdev pointer to network device
3167 * @returns whether the packet was transmitted to the device okay or not
3168 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
3169 */
3170static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
3171{
3172 struct lio *lio;
3173 struct octnet_buf_free_info *finfo;
3174 union octnic_cmd_setup cmdsetup;
3175 struct octnic_data_pkt ndata;
3176 struct octeon_device *oct;
3177 struct oct_iq_stats *stats;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003178 struct octeon_instr_irh *irh;
3179 union tx_info *tx_info;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003180 int status = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003181 int q_idx = 0, iq_no = 0;
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003182 int j;
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07003183 u64 dptr = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003184 u32 tag = 0;
3185
3186 lio = GET_LIO(netdev);
3187 oct = lio->oct_dev;
3188
3189 if (netif_is_multiqueue(netdev)) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003190 q_idx = skb->queue_mapping;
3191 q_idx = (q_idx % (lio->linfo.num_txpciq));
3192 tag = q_idx;
3193 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003194 } else {
3195 iq_no = lio->txq;
3196 }
3197
3198 stats = &oct->instr_queue[iq_no]->stats;
3199
3200 /* Check for all conditions in which the current packet cannot be
3201 * transmitted.
3202 */
3203 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003204 (!lio->linfo.link.s.link_up) ||
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003205 (skb->len <= 0)) {
3206 netif_info(lio, tx_err, lio->netdev,
3207 "Transmit failed link_status : %d\n",
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003208 lio->linfo.link.s.link_up);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003209 goto lio_xmit_failed;
3210 }
3211
3212 /* Use space in skb->cb to store info used to unmap and
3213 * free the buffers.
3214 */
3215 finfo = (struct octnet_buf_free_info *)skb->cb;
3216 finfo->lio = lio;
3217 finfo->skb = skb;
3218 finfo->sc = NULL;
3219
3220 /* Prepare the attributes for the data to be passed to OSI. */
3221 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
3222
3223 ndata.buf = (void *)finfo;
3224
3225 ndata.q_no = iq_no;
3226
3227 if (netif_is_multiqueue(netdev)) {
3228 if (octnet_iq_is_full(oct, ndata.q_no)) {
3229 /* defer sending if queue is full */
3230 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
3231 ndata.q_no);
3232 stats->tx_iq_busy++;
3233 return NETDEV_TX_BUSY;
3234 }
3235 } else {
3236 if (octnet_iq_is_full(oct, lio->txq)) {
3237 /* defer sending if queue is full */
3238 stats->tx_iq_busy++;
3239 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07003240 lio->txq);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003241 return NETDEV_TX_BUSY;
3242 }
3243 }
3244 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07003245 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003246 */
3247
3248 ndata.datasize = skb->len;
3249
3250 cmdsetup.u64 = 0;
Raghu Vatsavayi7275ebf2016-06-14 16:54:49 -07003251 cmdsetup.s.iq_no = iq_no;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003252
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003253 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3254 if (skb->encapsulation) {
3255 cmdsetup.s.tnl_csum = 1;
3256 stats->tx_vxlan++;
3257 } else {
3258 cmdsetup.s.transport_csum = 1;
3259 }
3260 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003261 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3262 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3263 cmdsetup.s.timestamp = 1;
3264 }
3265
3266 if (skb_shinfo(skb)->nr_frags == 0) {
3267 cmdsetup.s.u.datasize = skb->len;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003268 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07003269
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003270 /* Offload checksum calculation for TCP/UDP packets */
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003271 dptr = dma_map_single(&oct->pci_dev->dev,
3272 skb->data,
3273 skb->len,
3274 DMA_TO_DEVICE);
3275 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003276 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
3277 __func__);
3278 return NETDEV_TX_BUSY;
3279 }
3280
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07003281 if (OCTEON_CN23XX_PF(oct))
3282 ndata.cmd.cmd3.dptr = dptr;
3283 else
3284 ndata.cmd.cmd2.dptr = dptr;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003285 finfo->dptr = dptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003286 ndata.reqtype = REQTYPE_NORESP_NET;
3287
3288 } else {
3289 int i, frags;
3290 struct skb_frag_struct *frag;
3291 struct octnic_gather *g;
3292
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07003293 spin_lock(&lio->glist_lock[q_idx]);
3294 g = (struct octnic_gather *)
3295 list_delete_head(&lio->glist[q_idx]);
3296 spin_unlock(&lio->glist_lock[q_idx]);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003297
3298 if (!g) {
3299 netif_info(lio, tx_err, lio->netdev,
3300 "Transmit scatter gather: glist null!\n");
3301 goto lio_xmit_failed;
3302 }
3303
3304 cmdsetup.s.gather = 1;
3305 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003306 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003307
3308 memset(g->sg, 0, g->sg_size);
3309
3310 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
3311 skb->data,
3312 (skb->len - skb->data_len),
3313 DMA_TO_DEVICE);
3314 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
3315 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
3316 __func__);
3317 return NETDEV_TX_BUSY;
3318 }
3319 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
3320
3321 frags = skb_shinfo(skb)->nr_frags;
3322 i = 1;
3323 while (frags--) {
3324 frag = &skb_shinfo(skb)->frags[i - 1];
3325
3326 g->sg[(i >> 2)].ptr[(i & 3)] =
3327 dma_map_page(&oct->pci_dev->dev,
3328 frag->page.p,
3329 frag->page_offset,
3330 frag->size,
3331 DMA_TO_DEVICE);
3332
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07003333 if (dma_mapping_error(&oct->pci_dev->dev,
3334 g->sg[i >> 2].ptr[i & 3])) {
3335 dma_unmap_single(&oct->pci_dev->dev,
3336 g->sg[0].ptr[0],
3337 skb->len - skb->data_len,
3338 DMA_TO_DEVICE);
3339 for (j = 1; j < i; j++) {
3340 frag = &skb_shinfo(skb)->frags[j - 1];
3341 dma_unmap_page(&oct->pci_dev->dev,
3342 g->sg[j >> 2].ptr[j & 3],
3343 frag->size,
3344 DMA_TO_DEVICE);
3345 }
3346 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
3347 __func__);
3348 return NETDEV_TX_BUSY;
3349 }
3350
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003351 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
3352 i++;
3353 }
3354
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07003355 dptr = g->sg_dma_ptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003356
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07003357 if (OCTEON_CN23XX_PF(oct))
3358 ndata.cmd.cmd3.dptr = dptr;
3359 else
3360 ndata.cmd.cmd2.dptr = dptr;
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003361 finfo->dptr = dptr;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003362 finfo->g = g;
3363
3364 ndata.reqtype = REQTYPE_NORESP_NET_SG;
3365 }
3366
Raghu Vatsavayi5b823512016-09-01 11:16:07 -07003367 if (OCTEON_CN23XX_PF(oct)) {
3368 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
3369 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
3370 } else {
3371 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
3372 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
3373 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003374
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003375 if (skb_shinfo(skb)->gso_size) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003376 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
3377 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003378 stats->tx_gso++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003379 }
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003380
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07003381 /* HW insert VLAN tag */
3382 if (skb_vlan_tag_present(skb)) {
3383 irh->priority = skb_vlan_tag_get(skb) >> 13;
3384 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
3385 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003386
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003387 if (unlikely(cmdsetup.s.timestamp))
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003388 status = send_nic_timestamp_pkt(oct, &ndata, finfo);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003389 else
Raghu Vatsavayi32581242016-08-31 11:03:20 -07003390 status = octnet_send_nic_data_pkt(oct, &ndata);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003391 if (status == IQ_SEND_FAILED)
3392 goto lio_xmit_failed;
3393
3394 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
3395
3396 if (status == IQ_SEND_STOP)
3397 stop_q(lio->netdev, q_idx);
3398
Florian Westphal860e9532016-05-03 16:33:13 +02003399 netif_trans_update(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003400
Satanand Burla80c8eae2017-01-26 11:52:35 -08003401 if (tx_info->s.gso_segs)
3402 stats->tx_done += tx_info->s.gso_segs;
Raghu Vatsavayi1f164712016-06-21 22:53:11 -07003403 else
3404 stats->tx_done++;
Satanand Burla80c8eae2017-01-26 11:52:35 -08003405 stats->tx_tot_bytes += ndata.datasize;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003406
3407 return NETDEV_TX_OK;
3408
3409lio_xmit_failed:
3410 stats->tx_dropped++;
3411 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
3412 iq_no, stats->tx_dropped);
Raghu Vatsavayi6a885b62016-06-14 16:54:51 -07003413 if (dptr)
3414 dma_unmap_single(&oct->pci_dev->dev, dptr,
3415 ndata.datasize, DMA_TO_DEVICE);
Raghu Vatsavayicabeb132016-06-14 16:54:47 -07003416 tx_buffer_free(skb);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003417 return NETDEV_TX_OK;
3418}
3419
3420/** \brief Network device Tx timeout
3421 * @param netdev pointer to network device
3422 */
3423static void liquidio_tx_timeout(struct net_device *netdev)
3424{
3425 struct lio *lio;
3426
3427 lio = GET_LIO(netdev);
3428
3429 netif_info(lio, tx_err, lio->netdev,
3430 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
3431 netdev->stats.tx_dropped);
Florian Westphal860e9532016-05-03 16:33:13 +02003432 netif_trans_update(netdev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003433 txqs_wake(netdev);
3434}
3435
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003436static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
3437 __be16 proto __attribute__((unused)),
3438 u16 vid)
3439{
3440 struct lio *lio = GET_LIO(netdev);
3441 struct octeon_device *oct = lio->oct_dev;
3442 struct octnic_ctrl_pkt nctrl;
3443 int ret = 0;
3444
3445 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3446
3447 nctrl.ncmd.u64 = 0;
3448 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3449 nctrl.ncmd.s.param1 = vid;
3450 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3451 nctrl.wait_time = 100;
3452 nctrl.netpndev = (u64)netdev;
3453 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3454
3455 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3456 if (ret < 0) {
3457 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3458 ret);
3459 }
3460
3461 return ret;
3462}
3463
3464static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
3465 __be16 proto __attribute__((unused)),
3466 u16 vid)
3467{
3468 struct lio *lio = GET_LIO(netdev);
3469 struct octeon_device *oct = lio->oct_dev;
3470 struct octnic_ctrl_pkt nctrl;
3471 int ret = 0;
3472
3473 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3474
3475 nctrl.ncmd.u64 = 0;
3476 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3477 nctrl.ncmd.s.param1 = vid;
3478 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3479 nctrl.wait_time = 100;
3480 nctrl.netpndev = (u64)netdev;
3481 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3482
3483 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3484 if (ret < 0) {
3485 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3486 ret);
3487 }
3488 return ret;
3489}
3490
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003491/** Sending command to enable/disable RX checksum offload
3492 * @param netdev pointer to network device
3493 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
3494 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
3495 * OCTNET_CMD_RXCSUM_DISABLE
3496 * @returns SUCCESS or FAILURE
3497 */
Nicholas Mc Guirec41419b2016-08-22 17:52:00 +02003498static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
3499 u8 rx_cmd)
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003500{
3501 struct lio *lio = GET_LIO(netdev);
3502 struct octeon_device *oct = lio->oct_dev;
3503 struct octnic_ctrl_pkt nctrl;
3504 int ret = 0;
3505
Felix Manlunas0c264582017-04-06 19:22:22 -07003506 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3507
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003508 nctrl.ncmd.u64 = 0;
3509 nctrl.ncmd.s.cmd = command;
3510 nctrl.ncmd.s.param1 = rx_cmd;
3511 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3512 nctrl.wait_time = 100;
3513 nctrl.netpndev = (u64)netdev;
3514 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3515
3516 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3517 if (ret < 0) {
3518 dev_err(&oct->pci_dev->dev,
3519 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3520 ret);
3521 }
3522 return ret;
3523}
3524
3525/** Sending command to add/delete VxLAN UDP port to firmware
3526 * @param netdev pointer to network device
3527 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
3528 * @param vxlan_port VxLAN port to be added or deleted
3529 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
3530 * OCTNET_CMD_VXLAN_PORT_DEL
3531 * @returns SUCCESS or FAILURE
3532 */
3533static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
3534 u16 vxlan_port, u8 vxlan_cmd_bit)
3535{
3536 struct lio *lio = GET_LIO(netdev);
3537 struct octeon_device *oct = lio->oct_dev;
3538 struct octnic_ctrl_pkt nctrl;
3539 int ret = 0;
3540
Felix Manlunas0c264582017-04-06 19:22:22 -07003541 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3542
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003543 nctrl.ncmd.u64 = 0;
3544 nctrl.ncmd.s.cmd = command;
3545 nctrl.ncmd.s.more = vxlan_cmd_bit;
3546 nctrl.ncmd.s.param1 = vxlan_port;
3547 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3548 nctrl.wait_time = 100;
3549 nctrl.netpndev = (u64)netdev;
3550 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3551
3552 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3553 if (ret < 0) {
3554 dev_err(&oct->pci_dev->dev,
3555 "VxLAN port add/delete failed in core (ret:0x%x)\n",
3556 ret);
3557 }
3558 return ret;
3559}
3560
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003561/** \brief Net device fix features
3562 * @param netdev pointer to network device
3563 * @param request features requested
3564 * @returns updated features list
3565 */
3566static netdev_features_t liquidio_fix_features(struct net_device *netdev,
3567 netdev_features_t request)
3568{
3569 struct lio *lio = netdev_priv(netdev);
3570
3571 if ((request & NETIF_F_RXCSUM) &&
3572 !(lio->dev_capability & NETIF_F_RXCSUM))
3573 request &= ~NETIF_F_RXCSUM;
3574
3575 if ((request & NETIF_F_HW_CSUM) &&
3576 !(lio->dev_capability & NETIF_F_HW_CSUM))
3577 request &= ~NETIF_F_HW_CSUM;
3578
3579 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
3580 request &= ~NETIF_F_TSO;
3581
3582 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
3583 request &= ~NETIF_F_TSO6;
3584
3585 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
3586 request &= ~NETIF_F_LRO;
3587
3588 /*Disable LRO if RXCSUM is off */
3589 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
3590 (lio->dev_capability & NETIF_F_LRO))
3591 request &= ~NETIF_F_LRO;
3592
3593 return request;
3594}
3595
3596/** \brief Net device set features
3597 * @param netdev pointer to network device
3598 * @param features features to enable/disable
3599 */
3600static int liquidio_set_features(struct net_device *netdev,
3601 netdev_features_t features)
3602{
3603 struct lio *lio = netdev_priv(netdev);
3604
3605 if (!((netdev->features ^ features) & NETIF_F_LRO))
3606 return 0;
3607
3608 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003609 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3610 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003611 else if (!(features & NETIF_F_LRO) &&
3612 (lio->dev_capability & NETIF_F_LRO))
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003613 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
3614 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003615
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003616 /* Sending command to firmware to enable/disable RX checksum
3617 * offload settings using ethtool
3618 */
3619 if (!(netdev->features & NETIF_F_RXCSUM) &&
3620 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3621 (features & NETIF_F_RXCSUM))
3622 liquidio_set_rxcsum_command(netdev,
3623 OCTNET_CMD_TNL_RX_CSUM_CTL,
3624 OCTNET_CMD_RXCSUM_ENABLE);
3625 else if ((netdev->features & NETIF_F_RXCSUM) &&
3626 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3627 !(features & NETIF_F_RXCSUM))
3628 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3629 OCTNET_CMD_RXCSUM_DISABLE);
3630
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003631 return 0;
3632}
3633
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003634static void liquidio_add_vxlan_port(struct net_device *netdev,
3635 struct udp_tunnel_info *ti)
3636{
3637 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3638 return;
3639
3640 liquidio_vxlan_port_command(netdev,
3641 OCTNET_CMD_VXLAN_PORT_CONFIG,
3642 htons(ti->port),
3643 OCTNET_CMD_VXLAN_PORT_ADD);
3644}
3645
3646static void liquidio_del_vxlan_port(struct net_device *netdev,
3647 struct udp_tunnel_info *ti)
3648{
3649 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3650 return;
3651
3652 liquidio_vxlan_port_command(netdev,
3653 OCTNET_CMD_VXLAN_PORT_CONFIG,
3654 htons(ti->port),
3655 OCTNET_CMD_VXLAN_PORT_DEL);
3656}
3657
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08003658static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
3659 u8 *mac, bool is_admin_assigned)
3660{
3661 struct lio *lio = GET_LIO(netdev);
3662 struct octeon_device *oct = lio->oct_dev;
3663 struct octnic_ctrl_pkt nctrl;
3664
3665 if (!is_valid_ether_addr(mac))
3666 return -EINVAL;
3667
3668 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
3669 return -EINVAL;
3670
3671 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3672
3673 nctrl.ncmd.u64 = 0;
3674 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
3675 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3676 nctrl.ncmd.s.param1 = vfidx + 1;
3677 nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
3678 nctrl.ncmd.s.more = 1;
3679 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
Rick Farrington9549c6c2017-03-17 15:43:26 -07003680 nctrl.netpndev = (u64)netdev;
3681 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08003682 nctrl.wait_time = LIO_CMD_WAIT_TM;
3683
3684 nctrl.udd[0] = 0;
3685 /* The MAC Address is presented in network byte order. */
3686 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
3687
3688 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
3689
3690 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3691
3692 return 0;
3693}
3694
3695static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
3696{
3697 struct lio *lio = GET_LIO(netdev);
3698 struct octeon_device *oct = lio->oct_dev;
3699 int retval;
3700
Felix Manlunas0d9a5992017-05-16 11:28:00 -07003701 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3702 return -EINVAL;
3703
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08003704 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
3705 if (!retval)
3706 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
3707
3708 return retval;
3709}
3710
3711static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
3712 u16 vlan, u8 qos, __be16 vlan_proto)
3713{
3714 struct lio *lio = GET_LIO(netdev);
3715 struct octeon_device *oct = lio->oct_dev;
3716 struct octnic_ctrl_pkt nctrl;
3717 u16 vlantci;
3718
3719 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3720 return -EINVAL;
3721
3722 if (vlan_proto != htons(ETH_P_8021Q))
3723 return -EPROTONOSUPPORT;
3724
3725 if (vlan >= VLAN_N_VID || qos > 7)
3726 return -EINVAL;
3727
3728 if (vlan)
3729 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
3730 else
3731 vlantci = 0;
3732
3733 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
3734 return 0;
3735
3736 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3737
3738 if (vlan)
3739 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3740 else
3741 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3742
3743 nctrl.ncmd.s.param1 = vlantci;
3744 nctrl.ncmd.s.param2 =
3745 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
3746 nctrl.ncmd.s.more = 0;
3747 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3748 nctrl.cb_fn = 0;
3749 nctrl.wait_time = LIO_CMD_WAIT_TM;
3750
3751 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3752
3753 oct->sriov_info.vf_vlantci[vfidx] = vlantci;
3754
3755 return 0;
3756}
3757
3758static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
3759 struct ifla_vf_info *ivi)
3760{
3761 struct lio *lio = GET_LIO(netdev);
3762 struct octeon_device *oct = lio->oct_dev;
3763 u8 *macaddr;
3764
3765 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3766 return -EINVAL;
3767
3768 ivi->vf = vfidx;
3769 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
3770 ether_addr_copy(&ivi->mac[0], macaddr);
3771 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
3772 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
3773 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3774 return 0;
3775}
3776
3777static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3778 int linkstate)
3779{
3780 struct lio *lio = GET_LIO(netdev);
3781 struct octeon_device *oct = lio->oct_dev;
3782 struct octnic_ctrl_pkt nctrl;
3783
3784 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3785 return -EINVAL;
3786
3787 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3788 return 0;
3789
3790 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3791 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3792 nctrl.ncmd.s.param1 =
3793 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3794 nctrl.ncmd.s.param2 = linkstate;
3795 nctrl.ncmd.s.more = 0;
3796 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3797 nctrl.cb_fn = 0;
3798 nctrl.wait_time = LIO_CMD_WAIT_TM;
3799
3800 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3801
3802 oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3803
3804 return 0;
3805}
3806
Raghu Vatsavayi97a25322016-11-14 15:54:47 -08003807static const struct net_device_ops lionetdevops = {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003808 .ndo_open = liquidio_open,
3809 .ndo_stop = liquidio_stop,
3810 .ndo_start_xmit = liquidio_xmit,
3811 .ndo_get_stats = liquidio_get_stats,
3812 .ndo_set_mac_address = liquidio_set_mac,
3813 .ndo_set_rx_mode = liquidio_set_mcast_list,
3814 .ndo_tx_timeout = liquidio_tx_timeout,
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07003815
3816 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3817 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003818 .ndo_change_mtu = liquidio_change_mtu,
3819 .ndo_do_ioctl = liquidio_ioctl,
3820 .ndo_fix_features = liquidio_fix_features,
3821 .ndo_set_features = liquidio_set_features,
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07003822 .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
3823 .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08003824 .ndo_set_vf_mac = liquidio_set_vf_mac,
3825 .ndo_set_vf_vlan = liquidio_set_vf_vlan,
3826 .ndo_get_vf_config = liquidio_get_vf_config,
3827 .ndo_set_vf_link_state = liquidio_set_vf_link_state,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003828};
3829
3830/** \brief Entry point for the liquidio module
3831 */
3832static int __init liquidio_init(void)
3833{
3834 int i;
3835 struct handshake *hs;
3836
3837 init_completion(&first_stage);
3838
Raghu Vatsavayi97a25322016-11-14 15:54:47 -08003839 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003840
3841 if (liquidio_init_pci())
3842 return -EINVAL;
3843
3844 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3845
3846 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3847 hs = &handshake[i];
3848 if (hs->pci_dev) {
3849 wait_for_completion(&hs->init);
3850 if (!hs->init_ok) {
3851 /* init handshake failed */
3852 dev_err(&hs->pci_dev->dev,
3853 "Failed to init device\n");
3854 liquidio_deinit_pci();
3855 return -EIO;
3856 }
3857 }
3858 }
3859
3860 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3861 hs = &handshake[i];
3862 if (hs->pci_dev) {
3863 wait_for_completion_timeout(&hs->started,
3864 msecs_to_jiffies(30000));
3865 if (!hs->started_ok) {
3866 /* starter handshake failed */
3867 dev_err(&hs->pci_dev->dev,
3868 "Firmware failed to start\n");
3869 liquidio_deinit_pci();
3870 return -EIO;
3871 }
3872 }
3873 }
3874
3875 return 0;
3876}
3877
Raghu Vatsavayi5b173cf2015-06-12 18:11:50 -07003878static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003879{
3880 struct octeon_device *oct = (struct octeon_device *)buf;
3881 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003882 int gmxport = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003883 union oct_link_status *ls;
3884 int i;
3885
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003886 if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003887 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3888 recv_pkt->buffer_size[0],
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003889 recv_pkt->rh.r_nic_info.gmxport);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003890 goto nic_info_err;
3891 }
3892
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003893 gmxport = recv_pkt->rh.r_nic_info.gmxport;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003894 ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
3895
3896 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003897 for (i = 0; i < oct->ifcount; i++) {
3898 if (oct->props[i].gmxport == gmxport) {
3899 update_link_status(oct->props[i].netdev, ls);
3900 break;
3901 }
3902 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003903
3904nic_info_err:
3905 for (i = 0; i < recv_pkt->buffer_count; i++)
3906 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3907 octeon_free_recv_info(recv_info);
3908 return 0;
3909}
3910
3911/**
3912 * \brief Setup network interfaces
3913 * @param octeon_dev octeon device
3914 *
3915 * Called during init time for each device. It assumes the NIC
3916 * is already up and running. The link information for each
3917 * interface is passed in link_info.
3918 */
3919static int setup_nic_devices(struct octeon_device *octeon_dev)
3920{
3921 struct lio *lio = NULL;
3922 struct net_device *netdev;
3923 u8 mac[6], i, j;
3924 struct octeon_soft_command *sc;
3925 struct liquidio_if_cfg_context *ctx;
3926 struct liquidio_if_cfg_resp *resp;
3927 struct octdev_props *props;
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07003928 int retval, num_iqueues, num_oqueues;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003929 union oct_nic_if_cfg if_cfg;
3930 unsigned int base_queue;
3931 unsigned int gmx_port_id;
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003932 u32 resp_size, ctx_size, data_size;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003933 u32 ifidx_or_pfnum;
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003934 struct lio_version *vdata;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003935
3936 /* This is to handle link status changes */
3937 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3938 OPCODE_NIC_INFO,
3939 lio_nic_info, octeon_dev);
3940
3941 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3942 * They are handled directly.
3943 */
3944 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3945 free_netbuf);
3946
3947 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3948 free_netsgbuf);
3949
3950 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3951 free_netsgbuf_with_resp);
3952
3953 for (i = 0; i < octeon_dev->ifcount; i++) {
3954 resp_size = sizeof(struct liquidio_if_cfg_resp);
3955 ctx_size = sizeof(struct liquidio_if_cfg_context);
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003956 data_size = sizeof(struct lio_version);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003957 sc = (struct octeon_soft_command *)
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003958 octeon_alloc_soft_command(octeon_dev, data_size,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003959 resp_size, ctx_size);
3960 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3961 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
Raghu Vatsavayi83101ce2016-08-31 11:03:21 -07003962 vdata = (struct lio_version *)sc->virtdptr;
3963
3964 *((u64 *)vdata) = 0;
3965 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3966 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3967 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003968
Raghu Vatsavayie86b1ab2016-08-31 11:03:24 -07003969 if (OCTEON_CN23XX_PF(octeon_dev)) {
3970 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3971 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3972 base_queue = octeon_dev->sriov_info.pf_srn;
3973
3974 gmx_port_id = octeon_dev->pf_num;
3975 ifidx_or_pfnum = octeon_dev->pf_num;
3976 } else {
3977 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3978 octeon_get_conf(octeon_dev), i);
3979 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3980 octeon_get_conf(octeon_dev), i);
3981 base_queue = CFG_GET_BASE_QUE_NIC_IF(
3982 octeon_get_conf(octeon_dev), i);
3983 gmx_port_id = CFG_GET_GMXID_NIC_IF(
3984 octeon_get_conf(octeon_dev), i);
3985 ifidx_or_pfnum = i;
3986 }
Raghu Vatsavayi3dcef2c2016-07-03 13:56:51 -07003987
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003988 dev_dbg(&octeon_dev->pci_dev->dev,
3989 "requesting config for interface %d, iqs %d, oqs %d\n",
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07003990 ifidx_or_pfnum, num_iqueues, num_oqueues);
Raghu Vatsavayia7d5a3d2016-07-03 13:56:48 -07003991 WRITE_ONCE(ctx->cond, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07003992 ctx->octeon_id = lio_get_device_id(octeon_dev);
3993 init_waitqueue_head(&ctx->wc);
3994
3995 if_cfg.u64 = 0;
3996 if_cfg.s.num_iqueues = num_iqueues;
3997 if_cfg.s.num_oqueues = num_oqueues;
3998 if_cfg.s.base_queue = base_queue;
3999 if_cfg.s.gmx_port_id = gmx_port_id;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004000
4001 sc->iq_no = 0;
4002
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004003 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004004 OPCODE_NIC_IF_CFG, 0,
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004005 if_cfg.u64, 0);
4006
4007 sc->callback = if_cfg_callback;
4008 sc->callback_arg = sc;
Raghu Vatsavayi55893a62016-07-03 13:56:50 -07004009 sc->wait_time = 3000;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004010
4011 retval = octeon_send_soft_command(octeon_dev, sc);
Raghu Vatsavayiddc173a2016-06-14 16:54:43 -07004012 if (retval == IQ_SEND_FAILED) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004013 dev_err(&octeon_dev->pci_dev->dev,
4014 "iq/oq config failed status: %x\n",
4015 retval);
4016 /* Soft instr is freed by driver in case of failure. */
4017 goto setup_nic_dev_fail;
4018 }
4019
4020 /* Sleep on a wait queue till the cond flag indicates that the
4021 * response arrived or timed-out.
4022 */
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07004023 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
4024 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
4025 goto setup_nic_wait_intr;
4026 }
4027
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004028 retval = resp->status;
4029 if (retval) {
4030 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
4031 goto setup_nic_dev_fail;
4032 }
4033
4034 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
4035 (sizeof(struct liquidio_if_cfg_info)) >> 3);
4036
4037 num_iqueues = hweight64(resp->cfg_info.iqmask);
4038 num_oqueues = hweight64(resp->cfg_info.oqmask);
4039
4040 if (!(num_iqueues) || !(num_oqueues)) {
4041 dev_err(&octeon_dev->pci_dev->dev,
4042 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
4043 resp->cfg_info.iqmask,
4044 resp->cfg_info.oqmask);
4045 goto setup_nic_dev_fail;
4046 }
4047 dev_dbg(&octeon_dev->pci_dev->dev,
4048 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
4049 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
4050 num_iqueues, num_oqueues);
4051 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
4052
4053 if (!netdev) {
4054 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
4055 goto setup_nic_dev_fail;
4056 }
4057
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004058 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004059
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004060 /* Associate the routines that will handle different
4061 * netdev tasks.
4062 */
4063 netdev->netdev_ops = &lionetdevops;
4064
4065 lio = GET_LIO(netdev);
4066
4067 memset(lio, 0, sizeof(struct lio));
4068
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004069 lio->ifidx = ifidx_or_pfnum;
4070
4071 props = &octeon_dev->props[i];
4072 props->gmxport = resp->cfg_info.linfo.gmxport;
4073 props->netdev = netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004074
4075 lio->linfo.num_rxpciq = num_oqueues;
4076 lio->linfo.num_txpciq = num_iqueues;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004077 for (j = 0; j < num_oqueues; j++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07004078 lio->linfo.rxpciq[j].u64 =
4079 resp->cfg_info.linfo.rxpciq[j].u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004080 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004081 for (j = 0; j < num_iqueues; j++) {
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07004082 lio->linfo.txpciq[j].u64 =
4083 resp->cfg_info.linfo.txpciq[j].u64;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004084 }
4085 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
4086 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
4087 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
4088
4089 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
4090
Raghu Vatsavayie86b1ab2016-08-31 11:03:24 -07004091 if (OCTEON_CN23XX_PF(octeon_dev) ||
4092 OCTEON_CN6XXX(octeon_dev)) {
4093 lio->dev_capability = NETIF_F_HIGHDMA
4094 | NETIF_F_IP_CSUM
4095 | NETIF_F_IPV6_CSUM
4096 | NETIF_F_SG | NETIF_F_RXCSUM
4097 | NETIF_F_GRO
4098 | NETIF_F_TSO | NETIF_F_TSO6
4099 | NETIF_F_LRO;
4100 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004101 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
4102
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07004103 /* Copy of transmit encapsulation capabilities:
4104 * TSO, TSO6, Checksums for this device
4105 */
4106 lio->enc_dev_capability = NETIF_F_IP_CSUM
4107 | NETIF_F_IPV6_CSUM
4108 | NETIF_F_GSO_UDP_TUNNEL
4109 | NETIF_F_HW_CSUM | NETIF_F_SG
4110 | NETIF_F_RXCSUM
4111 | NETIF_F_TSO | NETIF_F_TSO6
4112 | NETIF_F_LRO;
4113
4114 netdev->hw_enc_features = (lio->enc_dev_capability &
4115 ~NETIF_F_LRO);
4116
4117 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
4118
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07004119 netdev->vlan_features = lio->dev_capability;
4120 /* Add any unchangeable hw features */
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07004121 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
4122 NETIF_F_HW_VLAN_CTAG_RX |
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07004123 NETIF_F_HW_VLAN_CTAG_TX;
4124
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004125 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
4126
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004127 netdev->hw_features = lio->dev_capability;
Raghu Vatsavayi0da0b772016-06-21 22:53:04 -07004128 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
4129 netdev->hw_features = netdev->hw_features &
4130 ~NETIF_F_HW_VLAN_CTAG_RX;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004131
Jarod Wilson109cc162016-10-17 15:54:13 -04004132 /* MTU range: 68 - 16000 */
4133 netdev->min_mtu = LIO_MIN_MTU_SIZE;
4134 netdev->max_mtu = LIO_MAX_MTU_SIZE;
4135
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004136 /* Point to the properties for octeon device to which this
4137 * interface belongs.
4138 */
4139 lio->oct_dev = octeon_dev;
4140 lio->octprops = props;
4141 lio->netdev = netdev;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004142
4143 dev_dbg(&octeon_dev->pci_dev->dev,
4144 "if%d gmx: %d hw_addr: 0x%llx\n", i,
4145 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
4146
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08004147 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
4148 u8 vfmac[ETH_ALEN];
4149
4150 random_ether_addr(&vfmac[0]);
4151 if (__liquidio_set_vf_mac(netdev, j,
4152 &vfmac[0], false)) {
4153 dev_err(&octeon_dev->pci_dev->dev,
4154 "Error setting VF%d MAC address\n",
4155 j);
4156 goto setup_nic_dev_fail;
4157 }
4158 }
4159
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004160 /* 64-bit swap required on LE machines */
4161 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
4162 for (j = 0; j < 6; j++)
4163 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
4164
4165 /* Copy MAC Address to OS network device structure */
4166
4167 ether_addr_copy(netdev->dev_addr, mac);
4168
Raghu Vatsavayi26236fa2016-06-14 16:54:44 -07004169 /* By default all interfaces on a single Octeon uses the same
4170 * tx and rx queues
4171 */
4172 lio->txq = lio->linfo.txpciq[0].s.q_no;
4173 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004174 if (setup_io_queues(octeon_dev, i)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004175 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
4176 goto setup_nic_dev_fail;
4177 }
4178
4179 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
4180
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004181 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
4182 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
4183
Raghu Vatsavayifcd2b5e2016-06-14 16:54:45 -07004184 if (setup_glists(octeon_dev, lio, num_iqueues)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004185 dev_err(&octeon_dev->pci_dev->dev,
4186 "Gather list allocation failed\n");
4187 goto setup_nic_dev_fail;
4188 }
4189
4190 /* Register ethtool support */
4191 liquidio_set_ethtool_ops(netdev);
Raghu Vatsavayi30136392016-09-01 11:16:11 -07004192 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
4193 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
4194 else
4195 octeon_dev->priv_flags = 0x0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004196
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004197 if (netdev->features & NETIF_F_LRO)
Raghu Vatsavayia2c64b62016-07-03 13:56:55 -07004198 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
4199 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004200
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07004201 liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
4202
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004203 if ((debug != -1) && (debug & NETIF_MSG_HW))
Raghu Vatsavayi63245f22016-06-21 22:53:05 -07004204 liquidio_set_feature(netdev,
4205 OCTNET_CMD_VERBOSE_ENABLE, 0);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004206
Raghu Vatsavayi7b6b6c92016-09-01 11:16:04 -07004207 if (setup_link_status_change_wq(netdev))
4208 goto setup_nic_dev_fail;
4209
Satanand Burla031d4f12017-03-22 11:31:13 -07004210 if (setup_rx_oom_poll_fn(netdev))
4211 goto setup_nic_dev_fail;
4212
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004213 /* Register the network device with the OS */
4214 if (register_netdev(netdev)) {
4215 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
4216 goto setup_nic_dev_fail;
4217 }
4218
4219 dev_dbg(&octeon_dev->pci_dev->dev,
4220 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
4221 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
4222 netif_carrier_off(netdev);
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004223 lio->link_changes++;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004224
4225 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
4226
Raghu Vatsavayi01fb2372016-07-03 13:56:47 -07004227 /* Sending command to firmware to enable Rx checksum offload
4228 * by default at the time of setup of Liquidio driver for
4229 * this device
4230 */
4231 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
4232 OCTNET_CMD_RXCSUM_ENABLE);
4233 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
4234 OCTNET_CMD_TXCSUM_ENABLE);
4235
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004236 dev_dbg(&octeon_dev->pci_dev->dev,
4237 "NIC ifidx:%d Setup successful\n", i);
4238
4239 octeon_free_soft_command(octeon_dev, sc);
4240 }
4241
4242 return 0;
4243
4244setup_nic_dev_fail:
4245
4246 octeon_free_soft_command(octeon_dev, sc);
4247
Raghu Vatsavayiafdf8412016-09-01 11:16:05 -07004248setup_nic_wait_intr:
4249
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004250 while (i--) {
4251 dev_err(&octeon_dev->pci_dev->dev,
4252 "NIC ifidx:%d Setup failed\n", i);
4253 liquidio_destroy_nic_device(octeon_dev, i);
4254 }
4255 return -ENODEV;
4256}
4257
Raghu Vatsavayica6139f2016-11-14 15:54:40 -08004258#ifdef CONFIG_PCI_IOV
4259static int octeon_enable_sriov(struct octeon_device *oct)
4260{
4261 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
4262 struct pci_dev *vfdev;
4263 int err;
4264 u32 u;
4265
4266 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
4267 err = pci_enable_sriov(oct->pci_dev,
4268 oct->sriov_info.num_vfs_alloced);
4269 if (err) {
4270 dev_err(&oct->pci_dev->dev,
4271 "OCTEON: Failed to enable PCI sriov: %d\n",
4272 err);
4273 oct->sriov_info.num_vfs_alloced = 0;
4274 return err;
4275 }
4276 oct->sriov_info.sriov_enabled = 1;
4277
4278 /* init lookup table that maps DPI ring number to VF pci_dev
4279 * struct pointer
4280 */
4281 u = 0;
4282 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
4283 OCTEON_CN23XX_VF_VID, NULL);
4284 while (vfdev) {
4285 if (vfdev->is_virtfn &&
4286 (vfdev->physfn == oct->pci_dev)) {
4287 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
4288 vfdev;
4289 u += oct->sriov_info.rings_per_vf;
4290 }
4291 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
4292 OCTEON_CN23XX_VF_VID, vfdev);
4293 }
4294 }
4295
4296 return num_vfs_alloced;
4297}
4298
4299static int lio_pci_sriov_disable(struct octeon_device *oct)
4300{
4301 int u;
4302
4303 if (pci_vfs_assigned(oct->pci_dev)) {
4304 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
4305 return -EPERM;
4306 }
4307
4308 pci_disable_sriov(oct->pci_dev);
4309
4310 u = 0;
4311 while (u < MAX_POSSIBLE_VFS) {
4312 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
4313 u += oct->sriov_info.rings_per_vf;
4314 }
4315
4316 oct->sriov_info.num_vfs_alloced = 0;
4317 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
4318 oct->pf_num);
4319
4320 return 0;
4321}
4322
4323static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
4324{
4325 struct octeon_device *oct = pci_get_drvdata(dev);
4326 int ret = 0;
4327
4328 if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
4329 (oct->sriov_info.sriov_enabled)) {
4330 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
4331 oct->pf_num, num_vfs);
4332 return 0;
4333 }
4334
4335 if (!num_vfs) {
4336 ret = lio_pci_sriov_disable(oct);
4337 } else if (num_vfs > oct->sriov_info.max_vfs) {
4338 dev_err(&oct->pci_dev->dev,
4339 "OCTEON: Max allowed VFs:%d user requested:%d",
4340 oct->sriov_info.max_vfs, num_vfs);
4341 ret = -EPERM;
4342 } else {
4343 oct->sriov_info.num_vfs_alloced = num_vfs;
4344 ret = octeon_enable_sriov(oct);
4345 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
4346 oct->pf_num, num_vfs);
4347 }
4348
4349 return ret;
4350}
4351#endif
4352
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004353/**
4354 * \brief initialize the NIC
4355 * @param oct octeon device
4356 *
4357 * This initialization routine is called once the Octeon device application is
4358 * up and running
4359 */
4360static int liquidio_init_nic_module(struct octeon_device *oct)
4361{
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004362 int i, retval = 0;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004363 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
4364
4365 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
4366
4367 /* only default iq and oq were initialized
4368 * initialize the rest as well
4369 */
4370 /* run port_config command for each port */
4371 oct->ifcount = num_nic_ports;
4372
Raghu Vatsavayi30136392016-09-01 11:16:11 -07004373 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004374
Raghu Vatsavayi0cece6c2016-06-14 16:54:50 -07004375 for (i = 0; i < MAX_OCTEON_LINKS; i++)
4376 oct->props[i].gmxport = -1;
4377
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004378 retval = setup_nic_devices(oct);
4379 if (retval) {
4380 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
4381 goto octnet_init_failure;
4382 }
4383
4384 liquidio_ptp_init(oct);
4385
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004386 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
4387
4388 return retval;
4389
4390octnet_init_failure:
4391
4392 oct->ifcount = 0;
4393
4394 return retval;
4395}
4396
4397/**
4398 * \brief starter callback that invokes the remaining initialization work after
4399 * the NIC is up and running.
4400 * @param octptr work struct work_struct
4401 */
4402static void nic_starter(struct work_struct *work)
4403{
4404 struct octeon_device *oct;
4405 struct cavium_wk *wk = (struct cavium_wk *)work;
4406
4407 oct = (struct octeon_device *)wk->ctxptr;
4408
4409 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
4410 return;
4411
4412 /* If the status of the device is CORE_OK, the core
4413 * application has reported its application type. Call
4414 * any registered handlers now and move to the RUNNING
4415 * state.
4416 */
4417 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
4418 schedule_delayed_work(&oct->nic_poll_work.work,
4419 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4420 return;
4421 }
4422
4423 atomic_set(&oct->status, OCT_DEV_RUNNING);
4424
4425 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
4426 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
4427
4428 if (liquidio_init_nic_module(oct))
4429 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
4430 else
4431 handshake[oct->octeon_id].started_ok = 1;
4432 } else {
4433 dev_err(&oct->pci_dev->dev,
4434 "Unexpected application running on NIC (%d). Check firmware.\n",
4435 oct->app_mode);
4436 }
4437
4438 complete(&handshake[oct->octeon_id].started);
4439}
4440
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08004441static int
4442octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4443{
4444 struct octeon_device *oct = (struct octeon_device *)buf;
4445 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4446 int i, notice, vf_idx;
Felix Manlunasbb54be52017-04-04 19:26:57 -07004447 bool cores_crashed;
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08004448 u64 *data, vf_num;
4449
4450 notice = recv_pkt->rh.r.ossp;
4451 data = (u64 *)get_rbd(recv_pkt->buffer_ptr[0]);
4452
4453 /* the first 64-bit word of data is the vf_num */
4454 vf_num = data[0];
4455 octeon_swap_8B_data(&vf_num, 1);
4456 vf_idx = (int)vf_num - 1;
4457
Felix Manlunasbb54be52017-04-04 19:26:57 -07004458 cores_crashed = READ_ONCE(oct->cores_crashed);
4459
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08004460 if (notice == VF_DRV_LOADED) {
4461 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4462 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4463 dev_info(&oct->pci_dev->dev,
4464 "driver for VF%d was loaded\n", vf_idx);
Felix Manlunasbb54be52017-04-04 19:26:57 -07004465 if (!cores_crashed)
4466 try_module_get(THIS_MODULE);
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08004467 }
4468 } else if (notice == VF_DRV_REMOVED) {
4469 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4470 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4471 dev_info(&oct->pci_dev->dev,
4472 "driver for VF%d was removed\n", vf_idx);
Felix Manlunasbb54be52017-04-04 19:26:57 -07004473 if (!cores_crashed)
4474 module_put(THIS_MODULE);
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08004475 }
4476 } else if (notice == VF_DRV_MACADDR_CHANGED) {
4477 u8 *b = (u8 *)&data[1];
4478
4479 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4480 dev_info(&oct->pci_dev->dev,
4481 "VF driver changed VF%d's MAC address to %pM\n",
4482 vf_idx, b + 2);
4483 }
4484
4485 for (i = 0; i < recv_pkt->buffer_count; i++)
4486 recv_buffer_free(recv_pkt->buffer_ptr[i]);
4487 octeon_free_recv_info(recv_info);
4488
4489 return 0;
4490}
4491
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004492/**
4493 * \brief Device initialization for each Octeon device that is probed
4494 * @param octeon_dev octeon device
4495 */
4496static int octeon_device_init(struct octeon_device *octeon_dev)
4497{
4498 int j, ret;
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004499 int fw_loaded = 0;
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07004500 char bootcmd[] = "\n";
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004501 struct octeon_device_priv *oct_priv =
4502 (struct octeon_device_priv *)octeon_dev->priv;
4503 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4504
4505 /* Enable access to the octeon device and make its DMA capability
4506 * known to the OS.
4507 */
4508 if (octeon_pci_os_setup(octeon_dev))
4509 return 1;
4510
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08004511 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4512
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004513 /* Identify the Octeon type and map the BAR address space. */
4514 if (octeon_chip_specific_setup(octeon_dev)) {
4515 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4516 return 1;
4517 }
4518
4519 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4520
Rick Farringtone1e3ce62017-05-16 11:14:50 -07004521 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4522 * since that is what is required for the reference to be removed
4523 * during de-initialization (see 'octeon_destroy_resources').
4524 */
4525 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4526 PCI_SLOT(octeon_dev->pci_dev->devfn),
4527 PCI_FUNC(octeon_dev->pci_dev->devfn),
4528 true);
4529
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004530 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4531
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004532 if (OCTEON_CN23XX_PF(octeon_dev)) {
4533 if (!cn23xx_fw_loaded(octeon_dev)) {
4534 fw_loaded = 0;
Felix Manlunas7cc61db2017-03-23 13:26:28 -07004535 if (!fw_type_is_none()) {
4536 /* Do a soft reset of the Octeon device. */
4537 if (octeon_dev->fn_list.soft_reset(octeon_dev))
4538 return 1;
4539 /* things might have changed */
4540 if (!cn23xx_fw_loaded(octeon_dev))
4541 fw_loaded = 0;
4542 else
4543 fw_loaded = 1;
4544 }
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004545 } else {
4546 fw_loaded = 1;
4547 }
4548 } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004549 return 1;
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004550 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004551
4552 /* Initialize the dispatch mechanism used to push packets arriving on
4553 * Octeon Output queues.
4554 */
4555 if (octeon_init_dispatch_list(octeon_dev))
4556 return 1;
4557
4558 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4559 OPCODE_NIC_CORE_DRV_ACTIVE,
4560 octeon_core_drv_init,
4561 octeon_dev);
4562
Raghu Vatsavayi86dea552016-11-14 15:54:43 -08004563 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4564 OPCODE_NIC_VF_DRV_NOTICE,
4565 octeon_recv_vf_drv_notice, octeon_dev);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004566 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4567 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4568 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4569 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4570
4571 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4572
Raghu Vatsavayic865cdf2016-11-28 16:54:36 -08004573 if (octeon_set_io_queues_off(octeon_dev)) {
4574 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4575 return 1;
4576 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004577
Raghu Vatsavayi3451b972016-08-31 11:03:26 -07004578 if (OCTEON_CN23XX_PF(octeon_dev)) {
4579 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4580 if (ret) {
4581 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4582 return ret;
4583 }
4584 }
4585
4586 /* Initialize soft command buffer pool
4587 */
4588 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4589 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4590 return 1;
4591 }
4592 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4593
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004594 /* Setup the data structures that manage this Octeon's Input queues. */
4595 if (octeon_setup_instr_queues(octeon_dev)) {
4596 dev_err(&octeon_dev->pci_dev->dev,
4597 "instruction queue initialization failed\n");
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004598 return 1;
4599 }
4600 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4601
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004602 /* Initialize lists to manage the requests of different types that
4603 * arrive from user & kernel applications for this octeon device.
4604 */
4605 if (octeon_setup_response_list(octeon_dev)) {
4606 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4607 return 1;
4608 }
4609 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4610
4611 if (octeon_setup_output_queues(octeon_dev)) {
4612 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
Raghu Vatsavayi1e0d30f2016-07-03 13:56:52 -07004613 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004614 }
4615
4616 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4617
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07004618 if (OCTEON_CN23XX_PF(octeon_dev)) {
Raghu Vatsavayi5d655562016-11-14 15:54:42 -08004619 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4620 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4621 return 1;
4622 }
4623 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4624
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07004625 if (octeon_allocate_ioq_vector(octeon_dev)) {
4626 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4627 return 1;
4628 }
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08004629 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07004630
4631 } else {
4632 /* The input and output queue registers were setup earlier (the
4633 * queues were not enabled). Any additional registers
4634 * that need to be programmed should be done now.
4635 */
4636 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4637 if (ret) {
4638 dev_err(&octeon_dev->pci_dev->dev,
4639 "Failed to configure device registers\n");
4640 return ret;
4641 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004642 }
4643
4644 /* Initialize the tasklet that handles output queue packet processing.*/
4645 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4646 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4647 (unsigned long)octeon_dev);
4648
4649 /* Setup the interrupt handler and record the INT SUM register address
4650 */
Raghu Vatsavayi1e0d30f2016-07-03 13:56:52 -07004651 if (octeon_setup_interrupt(octeon_dev))
4652 return 1;
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004653
4654 /* Enable Octeon device interrupts */
Raghu Vatsavayi5b07aee2016-08-31 11:03:28 -07004655 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004656
Raghu Vatsavayi515e7522016-11-14 15:54:44 -08004657 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4658
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004659 /* Enable the input and output queues for this Octeon device */
Raghu Vatsavayi1b7c55c2016-08-31 11:03:27 -07004660 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4661 if (ret) {
4662 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4663 return ret;
4664 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004665
4666 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4667
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004668 if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
4669 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4670 if (!ddr_timeout) {
4671 dev_info(&octeon_dev->pci_dev->dev,
4672 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4673 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004674
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004675 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004676
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004677 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4678 while (!ddr_timeout) {
4679 set_current_state(TASK_INTERRUPTIBLE);
4680 if (schedule_timeout(HZ / 10)) {
4681 /* user probably pressed Control-C */
4682 return 1;
4683 }
4684 }
4685 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4686 if (ret) {
4687 dev_err(&octeon_dev->pci_dev->dev,
4688 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4689 ret);
Raghu Vatsavayi4b129ae2016-06-21 22:53:15 -07004690 return 1;
4691 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004692
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004693 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4694 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4695 return 1;
4696 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004697
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004698 /* Divert uboot to take commands from host instead. */
4699 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
Raghu Vatsavayid3d7e6c2016-06-21 22:53:07 -07004700
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004701 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4702 ret = octeon_init_consoles(octeon_dev);
4703 if (ret) {
4704 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4705 return 1;
4706 }
4707 ret = octeon_add_console(octeon_dev, 0);
4708 if (ret) {
4709 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4710 return 1;
4711 }
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004712
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004713 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004714
Raghu Vatsavayic0eab5b2016-08-31 11:03:29 -07004715 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4716 ret = load_firmware(octeon_dev);
4717 if (ret) {
4718 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4719 return 1;
4720 }
4721 /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
4722 * loaded
4723 */
4724 if (OCTEON_CN23XX_PF(octeon_dev))
4725 octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
4726 2ULL);
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004727 }
4728
4729 handshake[octeon_dev->octeon_id].init_ok = 1;
4730 complete(&handshake[octeon_dev->octeon_id].init);
4731
4732 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4733
4734 /* Send Credit for Octeon Output queues. Credits are always sent after
4735 * the output queue is enabled.
4736 */
4737 for (j = 0; j < octeon_dev->num_oqs; j++)
4738 writel(octeon_dev->droq[j]->max_count,
4739 octeon_dev->droq[j]->pkts_credit_reg);
4740
4741 /* Packets can start arriving on the output queues from this point. */
Raghu Vatsavayif21fb3e2015-06-09 18:15:23 -07004742 return 0;
4743}
4744
4745/**
4746 * \brief Exits the module
4747 */
4748static void __exit liquidio_exit(void)
4749{
4750 liquidio_deinit_pci();
4751
4752 pr_info("LiquidIO network module is now unloaded\n");
4753}
4754
4755module_init(liquidio_init);
4756module_exit(liquidio_exit);