Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 1 | /********************************************************************** |
| 2 | * Author: Cavium, Inc. |
| 3 | * |
| 4 | * Contact: support@cavium.com |
| 5 | * Please include "LiquidIO" in the subject. |
| 6 | * |
| 7 | * Copyright (c) 2003-2015 Cavium, Inc. |
| 8 | * |
| 9 | * This file is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License, Version 2, as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This file is distributed in the hope that it will be useful, but |
| 14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty |
| 15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or |
| 16 | * NONINFRINGEMENT. See the GNU General Public License for more |
| 17 | * details. |
| 18 | * |
| 19 | * This file may also be available under a different license from Cavium. |
| 20 | * Contact Cavium, Inc. for more information |
| 21 | **********************************************************************/ |
| 22 | #include <linux/version.h> |
| 23 | #include <linux/types.h> |
| 24 | #include <linux/list.h> |
| 25 | #include <linux/interrupt.h> |
| 26 | #include <linux/pci.h> |
| 27 | #include <linux/kthread.h> |
| 28 | #include <linux/netdevice.h> |
| 29 | #include "octeon_config.h" |
| 30 | #include "liquidio_common.h" |
| 31 | #include "octeon_droq.h" |
| 32 | #include "octeon_iq.h" |
| 33 | #include "response_manager.h" |
| 34 | #include "octeon_device.h" |
| 35 | #include "octeon_nic.h" |
| 36 | #include "octeon_main.h" |
| 37 | #include "octeon_network.h" |
| 38 | #include "cn66xx_regs.h" |
| 39 | #include "cn66xx_device.h" |
| 40 | #include "liquidio_image.h" |
| 41 | #include "octeon_mem_ops.h" |
| 42 | |
| 43 | int lio_cn6xxx_soft_reset(struct octeon_device *oct) |
| 44 | { |
| 45 | octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); |
| 46 | |
| 47 | dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n"); |
| 48 | |
| 49 | lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST); |
| 50 | octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL); |
| 51 | |
| 52 | lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST); |
| 53 | lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST); |
| 54 | |
| 55 | /* make sure that the reset is written before starting timer */ |
| 56 | mmiowb(); |
| 57 | |
| 58 | /* Wait for 10ms as Octeon resets. */ |
| 59 | mdelay(100); |
| 60 | |
| 61 | if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) { |
| 62 | dev_err(&oct->pci_dev->dev, "Soft reset failed\n"); |
| 63 | return 1; |
| 64 | } |
| 65 | |
| 66 | dev_dbg(&oct->pci_dev->dev, "Reset completed\n"); |
| 67 | octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); |
| 68 | |
| 69 | return 0; |
| 70 | } |
| 71 | |
| 72 | void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct) |
| 73 | { |
| 74 | u32 val; |
| 75 | |
| 76 | pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); |
| 77 | if (val & 0x000f0000) { |
| 78 | dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n", |
| 79 | val & 0x000f0000); |
| 80 | } |
| 81 | |
| 82 | val |= 0xf; /* Enable Link error reporting */ |
| 83 | |
| 84 | dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n"); |
| 85 | pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); |
| 86 | } |
| 87 | |
| 88 | void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct, |
| 89 | enum octeon_pcie_mps mps) |
| 90 | { |
| 91 | u32 val; |
| 92 | u64 r64; |
| 93 | |
| 94 | /* Read config register for MPS */ |
| 95 | pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); |
| 96 | |
| 97 | if (mps == PCIE_MPS_DEFAULT) { |
| 98 | mps = ((val & (0x7 << 5)) >> 5); |
| 99 | } else { |
| 100 | val &= ~(0x7 << 5); /* Turn off any MPS bits */ |
| 101 | val |= (mps << 5); /* Set MPS */ |
| 102 | pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); |
| 103 | } |
| 104 | |
| 105 | /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */ |
| 106 | r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); |
| 107 | r64 |= (mps << 4); |
| 108 | lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); |
| 109 | } |
| 110 | |
| 111 | void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct, |
| 112 | enum octeon_pcie_mrrs mrrs) |
| 113 | { |
| 114 | u32 val; |
| 115 | u64 r64; |
| 116 | |
| 117 | /* Read config register for MRRS */ |
| 118 | pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); |
| 119 | |
| 120 | if (mrrs == PCIE_MRRS_DEFAULT) { |
| 121 | mrrs = ((val & (0x7 << 12)) >> 12); |
| 122 | } else { |
| 123 | val &= ~(0x7 << 12); /* Turn off any MRRS bits */ |
| 124 | val |= (mrrs << 12); /* Set MRRS */ |
| 125 | pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); |
| 126 | } |
| 127 | |
| 128 | /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */ |
| 129 | r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port)); |
| 130 | r64 |= mrrs; |
| 131 | octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64); |
| 132 | |
| 133 | /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */ |
| 134 | r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); |
| 135 | r64 |= mrrs; |
| 136 | lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); |
| 137 | } |
| 138 | |
| 139 | u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct) |
| 140 | { |
| 141 | /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier |
| 142 | * for SLI. |
| 143 | */ |
| 144 | return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50; |
| 145 | } |
| 146 | |
| 147 | u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, |
| 148 | u32 time_intr_in_us) |
| 149 | { |
| 150 | /* This gives the SLI clock per microsec */ |
| 151 | u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct); |
| 152 | |
| 153 | /* core clock per us / oq ticks will be fractional. TO avoid that |
| 154 | * we use the method below. |
| 155 | */ |
| 156 | |
| 157 | /* This gives the clock cycles per millisecond */ |
| 158 | oqticks_per_us *= 1000; |
| 159 | |
| 160 | /* This gives the oq ticks (1024 core clock cycles) per millisecond */ |
| 161 | oqticks_per_us /= 1024; |
| 162 | |
| 163 | /* time_intr is in microseconds. The next 2 steps gives the oq ticks |
| 164 | * corressponding to time_intr. |
| 165 | */ |
| 166 | oqticks_per_us *= time_intr_in_us; |
| 167 | oqticks_per_us /= 1000; |
| 168 | |
| 169 | return oqticks_per_us; |
| 170 | } |
| 171 | |
| 172 | void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct) |
| 173 | { |
| 174 | /* Select Round-Robin Arb, ES, RO, NS for Input Queues */ |
| 175 | octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL, |
| 176 | CN6XXX_INPUT_CTL_MASK); |
| 177 | |
| 178 | /* Instruction Read Size - Max 4 instructions per PCIE Read */ |
| 179 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE, |
| 180 | 0xFFFFFFFFFFFFFFFFULL); |
| 181 | |
| 182 | /* Select PCIE Port for all Input rings. */ |
| 183 | octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT, |
| 184 | (oct->pcie_port * 0x5555555555555555ULL)); |
| 185 | } |
| 186 | |
| 187 | static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct) |
| 188 | { |
| 189 | u64 pktctl; |
| 190 | |
| 191 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; |
| 192 | |
| 193 | pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL); |
| 194 | |
| 195 | /* 66XX SPECIFIC */ |
| 196 | if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4) |
| 197 | /* Disable RING_EN if only upto 4 rings are used. */ |
| 198 | pktctl &= ~(1 << 4); |
| 199 | else |
| 200 | pktctl |= (1 << 4); |
| 201 | |
| 202 | if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) |
| 203 | pktctl |= 0xF; |
| 204 | else |
| 205 | /* Disable per-port backpressure. */ |
| 206 | pktctl &= ~0xF; |
| 207 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl); |
| 208 | } |
| 209 | |
| 210 | void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct) |
| 211 | { |
| 212 | u32 time_threshold; |
| 213 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; |
| 214 | |
| 215 | /* / Select PCI-E Port for all Output queues */ |
| 216 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64, |
| 217 | (oct->pcie_port * 0x5555555555555555ULL)); |
| 218 | |
| 219 | if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) { |
| 220 | octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32); |
| 221 | } else { |
| 222 | /* / Set Output queue watermark to 0 to disable backpressure */ |
| 223 | octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0); |
| 224 | } |
| 225 | |
| 226 | /* / Select Info Ptr for length & data */ |
| 227 | octeon_write_csr(oct, CN6XXX_SLI_PKT_IPTR, 0xFFFFFFFF); |
| 228 | |
| 229 | /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */ |
| 230 | octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0); |
| 231 | |
| 232 | /* / Select ES,RO,NS setting from register for Output Queue Packet |
| 233 | * Address |
| 234 | */ |
| 235 | octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF); |
| 236 | |
| 237 | /* No Relaxed Ordering, No Snoop, 64-bit swap for Output |
| 238 | * Queue ScatterList |
| 239 | */ |
| 240 | octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0); |
| 241 | octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0); |
| 242 | |
| 243 | /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */ |
| 244 | #ifdef __BIG_ENDIAN_BITFIELD |
| 245 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, |
| 246 | 0x5555555555555555ULL); |
| 247 | #else |
| 248 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL); |
| 249 | #endif |
| 250 | |
| 251 | /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */ |
| 252 | octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0); |
| 253 | octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0); |
| 254 | octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64, |
| 255 | 0x5555555555555555ULL); |
| 256 | |
| 257 | /* / Set up interrupt packet and time threshold */ |
| 258 | octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, |
| 259 | (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf)); |
| 260 | time_threshold = |
| 261 | lio_cn6xxx_get_oq_ticks(oct, (u32) |
| 262 | CFG_GET_OQ_INTR_TIME(cn6xxx->conf)); |
| 263 | |
| 264 | octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold); |
| 265 | } |
| 266 | |
| 267 | static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct) |
| 268 | { |
| 269 | lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT); |
| 270 | lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B); |
| 271 | lio_cn6xxx_enable_error_reporting(oct); |
| 272 | |
| 273 | lio_cn6xxx_setup_global_input_regs(oct); |
| 274 | lio_cn66xx_setup_pkt_ctl_regs(oct); |
| 275 | lio_cn6xxx_setup_global_output_regs(oct); |
| 276 | |
| 277 | /* Default error timeout value should be 0x200000 to avoid host hang |
| 278 | * when reads invalid register |
| 279 | */ |
| 280 | octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL); |
| 281 | return 0; |
| 282 | } |
| 283 | |
| 284 | void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) |
| 285 | { |
| 286 | struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; |
| 287 | |
| 288 | /* Disable Packet-by-Packet mode; No Parse Mode or Skip length */ |
| 289 | octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0); |
| 290 | |
| 291 | /* Write the start of the input queue's ring and its size */ |
| 292 | octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no), |
| 293 | iq->base_addr_dma); |
| 294 | octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count); |
| 295 | |
| 296 | /* Remember the doorbell & instruction count register addr for this |
| 297 | * queue |
| 298 | */ |
| 299 | iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no); |
| 300 | iq->inst_cnt_reg = oct->mmio[0].hw_addr |
| 301 | + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no); |
| 302 | dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n", |
| 303 | iq_no, iq->doorbell_reg, iq->inst_cnt_reg); |
| 304 | |
| 305 | /* Store the current instruction counter |
| 306 | * (used in flush_iq calculation) |
| 307 | */ |
| 308 | iq->reset_instr_cnt = readl(iq->inst_cnt_reg); |
| 309 | } |
| 310 | |
| 311 | static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) |
| 312 | { |
| 313 | lio_cn6xxx_setup_iq_regs(oct, iq_no); |
| 314 | |
| 315 | /* Backpressure for this queue - WMARK set to all F's. This effectively |
| 316 | * disables the backpressure mechanism. |
| 317 | */ |
| 318 | octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no), |
| 319 | (0xFFFFFFFFULL << 32)); |
| 320 | } |
| 321 | |
| 322 | void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no) |
| 323 | { |
| 324 | u32 intr; |
| 325 | struct octeon_droq *droq = oct->droq[oq_no]; |
| 326 | |
| 327 | octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no), |
| 328 | droq->desc_ring_dma); |
| 329 | octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count); |
| 330 | |
| 331 | octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no), |
| 332 | (droq->buffer_size | (OCT_RH_SIZE << 16))); |
| 333 | |
| 334 | /* Get the mapped address of the pkt_sent and pkts_credit regs */ |
| 335 | droq->pkts_sent_reg = |
| 336 | oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no); |
| 337 | droq->pkts_credit_reg = |
| 338 | oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no); |
| 339 | |
| 340 | /* Enable this output queue to generate Packet Timer Interrupt */ |
| 341 | intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); |
| 342 | intr |= (1 << oq_no); |
| 343 | octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr); |
| 344 | |
| 345 | /* Enable this output queue to generate Packet Timer Interrupt */ |
| 346 | intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); |
| 347 | intr |= (1 << oq_no); |
| 348 | octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr); |
| 349 | } |
| 350 | |
| 351 | void lio_cn6xxx_enable_io_queues(struct octeon_device *oct) |
| 352 | { |
| 353 | u32 mask; |
| 354 | |
| 355 | mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE); |
| 356 | mask |= oct->io_qmask.iq64B; |
| 357 | octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask); |
| 358 | |
| 359 | mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB); |
| 360 | mask |= oct->io_qmask.iq; |
| 361 | octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); |
| 362 | |
| 363 | mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); |
| 364 | mask |= oct->io_qmask.oq; |
| 365 | octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask); |
| 366 | } |
| 367 | |
| 368 | void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) |
| 369 | { |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 370 | int i; |
| 371 | u32 mask, loop = HZ; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 372 | u32 d32; |
| 373 | |
| 374 | /* Reset the Enable bits for Input Queues. */ |
| 375 | mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB); |
| 376 | mask ^= oct->io_qmask.iq; |
| 377 | octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); |
| 378 | |
| 379 | /* Wait until hardware indicates that the queues are out of reset. */ |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 380 | mask = (u32)oct->io_qmask.iq; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 381 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); |
| 382 | while (((d32 & mask) != mask) && loop--) { |
| 383 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); |
| 384 | schedule_timeout_uninterruptible(1); |
| 385 | } |
| 386 | |
| 387 | /* Reset the doorbell register for each Input queue. */ |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 388 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
| 389 | if (!(oct->io_qmask.iq & (1ULL << i))) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 390 | continue; |
| 391 | octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF); |
| 392 | d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i)); |
| 393 | } |
| 394 | |
| 395 | /* Reset the Enable bits for Output Queues. */ |
| 396 | mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); |
| 397 | mask ^= oct->io_qmask.oq; |
| 398 | octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask); |
| 399 | |
| 400 | /* Wait until hardware indicates that the queues are out of reset. */ |
| 401 | loop = HZ; |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 402 | mask = (u32)oct->io_qmask.oq; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 403 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); |
| 404 | while (((d32 & mask) != mask) && loop--) { |
| 405 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); |
| 406 | schedule_timeout_uninterruptible(1); |
| 407 | } |
| 408 | ; |
| 409 | |
| 410 | /* Reset the doorbell register for each Output queue. */ |
| 411 | /* for (i = 0; i < oct->num_oqs; i++) { */ |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 412 | for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { |
| 413 | if (!(oct->io_qmask.oq & (1ULL << i))) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 414 | continue; |
| 415 | octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF); |
| 416 | d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i)); |
| 417 | |
| 418 | d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i)); |
| 419 | octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32); |
| 420 | } |
| 421 | |
| 422 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT); |
| 423 | if (d32) |
| 424 | octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32); |
| 425 | |
| 426 | d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT); |
| 427 | if (d32) |
| 428 | octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32); |
| 429 | } |
| 430 | |
| 431 | void lio_cn6xxx_reinit_regs(struct octeon_device *oct) |
| 432 | { |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 433 | int i; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 434 | |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 435 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
| 436 | if (!(oct->io_qmask.iq & (1ULL << i))) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 437 | continue; |
| 438 | oct->fn_list.setup_iq_regs(oct, i); |
| 439 | } |
| 440 | |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 441 | for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { |
| 442 | if (!(oct->io_qmask.oq & (1ULL << i))) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 443 | continue; |
| 444 | oct->fn_list.setup_oq_regs(oct, i); |
| 445 | } |
| 446 | |
| 447 | oct->fn_list.setup_device_regs(oct); |
| 448 | |
| 449 | oct->fn_list.enable_interrupt(oct->chip); |
| 450 | |
| 451 | oct->fn_list.enable_io_queues(oct); |
| 452 | |
| 453 | /* for (i = 0; i < oct->num_oqs; i++) { */ |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 454 | for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { |
| 455 | if (!(oct->io_qmask.oq & (1ULL << i))) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 456 | continue; |
| 457 | writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg); |
| 458 | } |
| 459 | } |
| 460 | |
| 461 | void |
| 462 | lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, |
| 463 | u64 core_addr, |
| 464 | u32 idx, |
| 465 | int valid) |
| 466 | { |
| 467 | u64 bar1; |
| 468 | |
| 469 | if (valid == 0) { |
| 470 | bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); |
| 471 | lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL), |
| 472 | CN6XXX_BAR1_REG(idx, oct->pcie_port)); |
| 473 | bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); |
| 474 | return; |
| 475 | } |
| 476 | |
| 477 | /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of |
| 478 | * the Core Addr |
| 479 | */ |
| 480 | lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK), |
| 481 | CN6XXX_BAR1_REG(idx, oct->pcie_port)); |
| 482 | |
| 483 | bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); |
| 484 | } |
| 485 | |
| 486 | void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, |
| 487 | u32 idx, |
| 488 | u32 mask) |
| 489 | { |
| 490 | lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port)); |
| 491 | } |
| 492 | |
| 493 | u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx) |
| 494 | { |
| 495 | return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); |
| 496 | } |
| 497 | |
| 498 | u32 |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 499 | lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 500 | { |
| 501 | u32 new_idx = readl(iq->inst_cnt_reg); |
| 502 | |
| 503 | /* The new instr cnt reg is a 32-bit counter that can roll over. We have |
| 504 | * noted the counter's initial value at init time into |
| 505 | * reset_instr_cnt |
| 506 | */ |
| 507 | if (iq->reset_instr_cnt < new_idx) |
| 508 | new_idx -= iq->reset_instr_cnt; |
| 509 | else |
| 510 | new_idx += (0xffffffff - iq->reset_instr_cnt) + 1; |
| 511 | |
| 512 | /* Modulo of the new index with the IQ size will give us |
| 513 | * the new index. |
| 514 | */ |
| 515 | new_idx %= iq->max_count; |
| 516 | |
| 517 | return new_idx; |
| 518 | } |
| 519 | |
| 520 | void lio_cn6xxx_enable_interrupt(void *chip) |
| 521 | { |
| 522 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip; |
| 523 | u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE; |
| 524 | |
| 525 | /* Enable Interrupt */ |
| 526 | writeq(mask, cn6xxx->intr_enb_reg64); |
| 527 | } |
| 528 | |
| 529 | void lio_cn6xxx_disable_interrupt(void *chip) |
| 530 | { |
| 531 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip; |
| 532 | |
| 533 | /* Disable Interrupts */ |
| 534 | writeq(0, cn6xxx->intr_enb_reg64); |
| 535 | |
| 536 | /* make sure interrupts are really disabled */ |
| 537 | mmiowb(); |
| 538 | } |
| 539 | |
Raghu Vatsavayi | 5b173cf | 2015-06-12 18:11:50 -0700 | [diff] [blame] | 540 | static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 541 | { |
| 542 | /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register |
| 543 | * to determine the PCIE port # |
| 544 | */ |
| 545 | oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff; |
| 546 | |
| 547 | dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port); |
| 548 | } |
| 549 | |
| 550 | void |
| 551 | lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64) |
| 552 | { |
| 553 | dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n", |
| 554 | CVM_CAST64(intr64)); |
| 555 | } |
| 556 | |
| 557 | int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) |
| 558 | { |
| 559 | struct octeon_droq *droq; |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 560 | int oq_no; |
| 561 | u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 562 | u32 droq_cnt_enb, droq_cnt_mask; |
| 563 | |
| 564 | droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); |
| 565 | droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT); |
| 566 | droq_mask = droq_cnt_mask & droq_cnt_enb; |
| 567 | |
| 568 | droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT); |
| 569 | droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); |
| 570 | droq_mask |= (droq_time_mask & droq_int_enb); |
| 571 | |
| 572 | droq_mask &= oct->io_qmask.oq; |
| 573 | |
| 574 | oct->droq_intr = 0; |
| 575 | |
| 576 | /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */ |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 577 | for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) { |
| 578 | if (!(droq_mask & (1ULL << oq_no))) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 579 | continue; |
| 580 | |
| 581 | droq = oct->droq[oq_no]; |
| 582 | pkt_count = octeon_droq_check_hw_for_pkts(oct, droq); |
| 583 | if (pkt_count) { |
| 584 | oct->droq_intr |= (1ULL << oq_no); |
| 585 | if (droq->ops.poll_mode) { |
| 586 | u32 value; |
| 587 | u32 reg; |
| 588 | |
| 589 | struct octeon_cn6xxx *cn6xxx = |
| 590 | (struct octeon_cn6xxx *)oct->chip; |
| 591 | |
| 592 | /* disable interrupts for this droq */ |
| 593 | spin_lock |
| 594 | (&cn6xxx->lock_for_droq_int_enb_reg); |
| 595 | reg = CN6XXX_SLI_PKT_TIME_INT_ENB; |
| 596 | value = octeon_read_csr(oct, reg); |
| 597 | value &= ~(1 << oq_no); |
| 598 | octeon_write_csr(oct, reg, value); |
| 599 | reg = CN6XXX_SLI_PKT_CNT_INT_ENB; |
| 600 | value = octeon_read_csr(oct, reg); |
| 601 | value &= ~(1 << oq_no); |
| 602 | octeon_write_csr(oct, reg, value); |
| 603 | |
| 604 | /* Ensure that the enable register is written. |
| 605 | */ |
| 606 | mmiowb(); |
| 607 | |
| 608 | spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg); |
| 609 | } |
| 610 | } |
| 611 | } |
| 612 | |
| 613 | droq_time_mask &= oct->io_qmask.oq; |
| 614 | droq_cnt_mask &= oct->io_qmask.oq; |
| 615 | |
| 616 | /* Reset the PKT_CNT/TIME_INT registers. */ |
| 617 | if (droq_time_mask) |
| 618 | octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask); |
| 619 | |
| 620 | if (droq_cnt_mask) /* reset PKT_CNT register:66xx */ |
| 621 | octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask); |
| 622 | |
| 623 | return 0; |
| 624 | } |
| 625 | |
| 626 | irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev) |
| 627 | { |
| 628 | struct octeon_device *oct = (struct octeon_device *)dev; |
| 629 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; |
| 630 | u64 intr64; |
| 631 | |
| 632 | intr64 = readq(cn6xxx->intr_sum_reg64); |
| 633 | |
| 634 | /* If our device has interrupted, then proceed. |
| 635 | * Also check for all f's if interrupt was triggered on an error |
| 636 | * and the PCI read fails. |
| 637 | */ |
| 638 | if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL)) |
| 639 | return IRQ_NONE; |
| 640 | |
| 641 | oct->int_status = 0; |
| 642 | |
| 643 | if (intr64 & CN6XXX_INTR_ERR) |
| 644 | lio_cn6xxx_process_pcie_error_intr(oct, intr64); |
| 645 | |
| 646 | if (intr64 & CN6XXX_INTR_PKT_DATA) { |
| 647 | lio_cn6xxx_process_droq_intr_regs(oct); |
| 648 | oct->int_status |= OCT_DEV_INTR_PKT_DATA; |
| 649 | } |
| 650 | |
| 651 | if (intr64 & CN6XXX_INTR_DMA0_FORCE) |
| 652 | oct->int_status |= OCT_DEV_INTR_DMA0_FORCE; |
| 653 | |
| 654 | if (intr64 & CN6XXX_INTR_DMA1_FORCE) |
| 655 | oct->int_status |= OCT_DEV_INTR_DMA1_FORCE; |
| 656 | |
| 657 | /* Clear the current interrupts */ |
| 658 | writeq(intr64, cn6xxx->intr_sum_reg64); |
| 659 | |
| 660 | return IRQ_HANDLED; |
| 661 | } |
| 662 | |
| 663 | void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, |
| 664 | void *chip, |
| 665 | struct octeon_reg_list *reg_list) |
| 666 | { |
| 667 | u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; |
| 668 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip; |
| 669 | |
| 670 | reg_list->pci_win_wr_addr_hi = |
| 671 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI); |
| 672 | reg_list->pci_win_wr_addr_lo = |
| 673 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO); |
| 674 | reg_list->pci_win_wr_addr = |
| 675 | (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64); |
| 676 | |
| 677 | reg_list->pci_win_rd_addr_hi = |
| 678 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI); |
| 679 | reg_list->pci_win_rd_addr_lo = |
| 680 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO); |
| 681 | reg_list->pci_win_rd_addr = |
| 682 | (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64); |
| 683 | |
| 684 | reg_list->pci_win_wr_data_hi = |
| 685 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI); |
| 686 | reg_list->pci_win_wr_data_lo = |
| 687 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO); |
| 688 | reg_list->pci_win_wr_data = |
| 689 | (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64); |
| 690 | |
| 691 | reg_list->pci_win_rd_data_hi = |
| 692 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI); |
| 693 | reg_list->pci_win_rd_data_lo = |
| 694 | (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO); |
| 695 | reg_list->pci_win_rd_data = |
| 696 | (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64); |
| 697 | |
| 698 | lio_cn6xxx_get_pcie_qlmport(oct); |
| 699 | |
| 700 | cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64; |
| 701 | cn6xxx->intr_mask64 = CN6XXX_INTR_MASK; |
| 702 | cn6xxx->intr_enb_reg64 = |
| 703 | bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port); |
| 704 | } |
| 705 | |
| 706 | int lio_setup_cn66xx_octeon_device(struct octeon_device *oct) |
| 707 | { |
| 708 | struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; |
| 709 | |
| 710 | if (octeon_map_pci_barx(oct, 0, 0)) |
| 711 | return 1; |
| 712 | |
| 713 | if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { |
| 714 | dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n", |
| 715 | __func__); |
| 716 | octeon_unmap_pci_barx(oct, 0); |
| 717 | return 1; |
| 718 | } |
| 719 | |
| 720 | spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg); |
| 721 | |
| 722 | oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs; |
| 723 | oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs; |
| 724 | |
| 725 | oct->fn_list.soft_reset = lio_cn6xxx_soft_reset; |
| 726 | oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs; |
| 727 | oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs; |
| 728 | oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index; |
| 729 | |
| 730 | oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup; |
| 731 | oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write; |
| 732 | oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read; |
| 733 | |
| 734 | oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs; |
| 735 | oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt; |
| 736 | oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt; |
| 737 | |
| 738 | oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues; |
| 739 | oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues; |
| 740 | |
| 741 | lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list); |
| 742 | |
| 743 | cn6xxx->conf = (struct octeon_config *) |
| 744 | oct_get_config_info(oct, LIO_210SV); |
| 745 | if (!cn6xxx->conf) { |
| 746 | dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n", |
| 747 | __func__); |
| 748 | octeon_unmap_pci_barx(oct, 0); |
| 749 | octeon_unmap_pci_barx(oct, 1); |
| 750 | return 1; |
| 751 | } |
| 752 | |
| 753 | oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct); |
| 754 | |
| 755 | return 0; |
| 756 | } |
| 757 | |
| 758 | int lio_validate_cn6xxx_config_info(struct octeon_device *oct, |
| 759 | struct octeon_config *conf6xxx) |
| 760 | { |
| 761 | /* int total_instrs = 0; */ |
| 762 | |
| 763 | if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) { |
| 764 | dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n", |
| 765 | __func__, CFG_GET_IQ_MAX_Q(conf6xxx), |
| 766 | CN6XXX_MAX_INPUT_QUEUES); |
| 767 | return 1; |
| 768 | } |
| 769 | |
| 770 | if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) { |
| 771 | dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n", |
| 772 | __func__, CFG_GET_OQ_MAX_Q(conf6xxx), |
| 773 | CN6XXX_MAX_OUTPUT_QUEUES); |
| 774 | return 1; |
| 775 | } |
| 776 | |
| 777 | if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR && |
| 778 | CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) { |
| 779 | dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n", |
| 780 | __func__); |
| 781 | return 1; |
| 782 | } |
| 783 | if (!(CFG_GET_OQ_INFO_PTR(conf6xxx)) || |
| 784 | !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx))) { |
| 785 | dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n", |
| 786 | __func__); |
| 787 | return 1; |
| 788 | } |
| 789 | |
| 790 | if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) { |
| 791 | dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n", |
| 792 | __func__); |
| 793 | return 1; |
| 794 | } |
| 795 | |
| 796 | return 0; |
| 797 | } |