Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 1 | /********************************************************************** |
| 2 | * Author: Cavium, Inc. |
| 3 | * |
| 4 | * Contact: support@cavium.com |
| 5 | * Please include "LiquidIO" in the subject. |
| 6 | * |
| 7 | * Copyright (c) 2003-2015 Cavium, Inc. |
| 8 | * |
| 9 | * This file is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License, Version 2, as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This file is distributed in the hope that it will be useful, but |
| 14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty |
| 15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or |
| 16 | * NONINFRINGEMENT. See the GNU General Public License for more |
| 17 | * details. |
| 18 | * |
| 19 | * This file may also be available under a different license from Cavium. |
| 20 | * Contact Cavium, Inc. for more information |
| 21 | **********************************************************************/ |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 22 | #include <linux/pci.h> |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 23 | #include <linux/netdevice.h> |
Raghu Vatsavayi | 5b173cf | 2015-06-12 18:11:50 -0700 | [diff] [blame] | 24 | #include <linux/vmalloc.h> |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 25 | #include "liquidio_common.h" |
| 26 | #include "octeon_droq.h" |
| 27 | #include "octeon_iq.h" |
| 28 | #include "response_manager.h" |
| 29 | #include "octeon_device.h" |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 30 | #include "octeon_main.h" |
| 31 | #include "octeon_network.h" |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 32 | #include "cn66xx_device.h" |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 33 | #include "cn23xx_pf_device.h" |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 34 | |
| 35 | #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ |
| 36 | (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count) |
| 37 | |
| 38 | struct iq_post_status { |
| 39 | int status; |
| 40 | int index; |
| 41 | }; |
| 42 | |
| 43 | static void check_db_timeout(struct work_struct *work); |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 44 | static void __check_db_timeout(struct octeon_device *oct, u64 iq_no); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 45 | |
| 46 | static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *); |
| 47 | |
| 48 | static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no) |
| 49 | { |
| 50 | struct octeon_instr_queue *iq = |
| 51 | (struct octeon_instr_queue *)oct->instr_queue[iq_no]; |
| 52 | return iq->iqcmd_64B; |
| 53 | } |
| 54 | |
| 55 | #define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no)) |
| 56 | |
| 57 | /* Define this to return the request status comaptible to old code */ |
| 58 | /*#define OCTEON_USE_OLD_REQ_STATUS*/ |
| 59 | |
| 60 | /* Return 0 on success, 1 on failure */ |
| 61 | int octeon_init_instr_queue(struct octeon_device *oct, |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 62 | union oct_txpciq txpciq, |
| 63 | u32 num_descs) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 64 | { |
| 65 | struct octeon_instr_queue *iq; |
| 66 | struct octeon_iq_config *conf = NULL; |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 67 | u32 iq_no = (u32)txpciq.s.q_no; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 68 | u32 q_size; |
| 69 | struct cavium_wq *db_wq; |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 70 | int orig_node = dev_to_node(&oct->pci_dev->dev); |
| 71 | int numa_node = cpu_to_node(iq_no % num_online_cpus()); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 72 | |
| 73 | if (OCTEON_CN6XXX(oct)) |
| 74 | conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf))); |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 75 | else if (OCTEON_CN23XX_PF(oct)) |
| 76 | conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn23xx_pf, conf))); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 77 | if (!conf) { |
| 78 | dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n", |
| 79 | oct->chip_id); |
| 80 | return 1; |
| 81 | } |
| 82 | |
| 83 | if (num_descs & (num_descs - 1)) { |
| 84 | dev_err(&oct->pci_dev->dev, |
| 85 | "Number of descriptors for instr queue %d not in power of 2.\n", |
| 86 | iq_no); |
| 87 | return 1; |
| 88 | } |
| 89 | |
| 90 | q_size = (u32)conf->instr_type * num_descs; |
| 91 | |
| 92 | iq = oct->instr_queue[iq_no]; |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 93 | |
Raghu Vatsavayi | 6a885b6 | 2016-06-14 16:54:51 -0700 | [diff] [blame] | 94 | iq->oct_dev = oct; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 95 | |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 96 | set_dev_node(&oct->pci_dev->dev, numa_node); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 97 | iq->base_addr = lio_dma_alloc(oct, q_size, |
| 98 | (dma_addr_t *)&iq->base_addr_dma); |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 99 | set_dev_node(&oct->pci_dev->dev, orig_node); |
| 100 | if (!iq->base_addr) |
| 101 | iq->base_addr = lio_dma_alloc(oct, q_size, |
| 102 | (dma_addr_t *)&iq->base_addr_dma); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 103 | if (!iq->base_addr) { |
| 104 | dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n", |
| 105 | iq_no); |
| 106 | return 1; |
| 107 | } |
| 108 | |
| 109 | iq->max_count = num_descs; |
| 110 | |
| 111 | /* Initialize a list to holds requests that have been posted to Octeon |
| 112 | * but has yet to be fetched by octeon |
| 113 | */ |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 114 | iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs), |
| 115 | numa_node); |
| 116 | if (!iq->request_list) |
| 117 | iq->request_list = vmalloc(sizeof(*iq->request_list) * |
| 118 | num_descs); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 119 | if (!iq->request_list) { |
| 120 | lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); |
| 121 | dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", |
| 122 | iq_no); |
| 123 | return 1; |
| 124 | } |
| 125 | |
| 126 | memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs); |
| 127 | |
| 128 | dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", |
| 129 | iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); |
| 130 | |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 131 | iq->txpciq.u64 = txpciq.u64; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 132 | iq->fill_threshold = (u32)conf->db_min; |
| 133 | iq->fill_cnt = 0; |
| 134 | iq->host_write_index = 0; |
| 135 | iq->octeon_read_index = 0; |
| 136 | iq->flush_index = 0; |
| 137 | iq->last_db_time = 0; |
| 138 | iq->do_auto_flush = 1; |
| 139 | iq->db_timeout = (u32)conf->db_timeout; |
| 140 | atomic_set(&iq->instr_pending, 0); |
| 141 | |
| 142 | /* Initialize the spinlock for this instruction queue */ |
| 143 | spin_lock_init(&iq->lock); |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 144 | spin_lock_init(&iq->post_lock); |
| 145 | |
| 146 | spin_lock_init(&iq->iq_flush_running_lock); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 147 | |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 148 | oct->io_qmask.iq |= (1ULL << iq_no); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 149 | |
| 150 | /* Set the 32B/64B mode for each input queue */ |
| 151 | oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); |
| 152 | iq->iqcmd_64B = (conf->instr_type == 64); |
| 153 | |
| 154 | oct->fn_list.setup_iq_regs(oct, iq_no); |
| 155 | |
Bhaktipriya Shridhar | aaa7672 | 2016-06-04 20:54:00 +0530 | [diff] [blame] | 156 | oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db", |
| 157 | WQ_MEM_RECLAIM, |
| 158 | 0); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 159 | if (!oct->check_db_wq[iq_no].wq) { |
Raghu Vatsavayi | 515e752 | 2016-11-14 15:54:44 -0800 | [diff] [blame^] | 160 | vfree(iq->request_list); |
| 161 | iq->request_list = NULL; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 162 | lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); |
| 163 | dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n", |
| 164 | iq_no); |
| 165 | return 1; |
| 166 | } |
| 167 | |
| 168 | db_wq = &oct->check_db_wq[iq_no]; |
| 169 | |
| 170 | INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout); |
| 171 | db_wq->wk.ctxptr = oct; |
| 172 | db_wq->wk.ctxul = iq_no; |
| 173 | queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); |
| 174 | |
| 175 | return 0; |
| 176 | } |
| 177 | |
| 178 | int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) |
| 179 | { |
| 180 | u64 desc_size = 0, q_size; |
| 181 | struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; |
| 182 | |
| 183 | cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 184 | destroy_workqueue(oct->check_db_wq[iq_no].wq); |
| 185 | |
| 186 | if (OCTEON_CN6XXX(oct)) |
| 187 | desc_size = |
| 188 | CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf)); |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 189 | else if (OCTEON_CN23XX_PF(oct)) |
| 190 | desc_size = |
| 191 | CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn23xx_pf, conf)); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 192 | |
Markus Elfring | 9686f31 | 2015-06-29 12:22:24 +0200 | [diff] [blame] | 193 | vfree(iq->request_list); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 194 | |
| 195 | if (iq->base_addr) { |
| 196 | q_size = iq->max_count * desc_size; |
| 197 | lio_dma_free(oct, (u32)q_size, iq->base_addr, |
| 198 | iq->base_addr_dma); |
| 199 | return 0; |
| 200 | } |
| 201 | return 1; |
| 202 | } |
| 203 | |
| 204 | /* Return 0 on success, 1 on failure */ |
| 205 | int octeon_setup_iq(struct octeon_device *oct, |
Raghu Vatsavayi | 0cece6c | 2016-06-14 16:54:50 -0700 | [diff] [blame] | 206 | int ifidx, |
| 207 | int q_index, |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 208 | union oct_txpciq txpciq, |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 209 | u32 num_descs, |
| 210 | void *app_ctx) |
| 211 | { |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 212 | u32 iq_no = (u32)txpciq.s.q_no; |
| 213 | int numa_node = cpu_to_node(iq_no % num_online_cpus()); |
| 214 | |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 215 | if (oct->instr_queue[iq_no]) { |
| 216 | dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n", |
| 217 | iq_no); |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 218 | oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 219 | oct->instr_queue[iq_no]->app_ctx = app_ctx; |
| 220 | return 0; |
| 221 | } |
| 222 | oct->instr_queue[iq_no] = |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 223 | vmalloc_node(sizeof(struct octeon_instr_queue), numa_node); |
| 224 | if (!oct->instr_queue[iq_no]) |
| 225 | oct->instr_queue[iq_no] = |
| 226 | vmalloc(sizeof(struct octeon_instr_queue)); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 227 | if (!oct->instr_queue[iq_no]) |
| 228 | return 1; |
| 229 | |
| 230 | memset(oct->instr_queue[iq_no], 0, |
| 231 | sizeof(struct octeon_instr_queue)); |
| 232 | |
Raghu Vatsavayi | 0cece6c | 2016-06-14 16:54:50 -0700 | [diff] [blame] | 233 | oct->instr_queue[iq_no]->q_index = q_index; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 234 | oct->instr_queue[iq_no]->app_ctx = app_ctx; |
Raghu Vatsavayi | 0cece6c | 2016-06-14 16:54:50 -0700 | [diff] [blame] | 235 | oct->instr_queue[iq_no]->ifidx = ifidx; |
| 236 | |
Raghu Vatsavayi | 26236fa | 2016-06-14 16:54:44 -0700 | [diff] [blame] | 237 | if (octeon_init_instr_queue(oct, txpciq, num_descs)) { |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 238 | vfree(oct->instr_queue[iq_no]); |
| 239 | oct->instr_queue[iq_no] = NULL; |
| 240 | return 1; |
| 241 | } |
| 242 | |
| 243 | oct->num_iqs++; |
| 244 | oct->fn_list.enable_io_queues(oct); |
| 245 | return 0; |
| 246 | } |
| 247 | |
| 248 | int lio_wait_for_instr_fetch(struct octeon_device *oct) |
| 249 | { |
| 250 | int i, retry = 1000, pending, instr_cnt = 0; |
| 251 | |
| 252 | do { |
| 253 | instr_cnt = 0; |
| 254 | |
| 255 | /*for (i = 0; i < oct->num_iqs; i++) {*/ |
Raghu Vatsavayi | 63da840 | 2016-06-21 22:53:03 -0700 | [diff] [blame] | 256 | for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { |
| 257 | if (!(oct->io_qmask.iq & (1ULL << i))) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 258 | continue; |
| 259 | pending = |
| 260 | atomic_read(&oct-> |
| 261 | instr_queue[i]->instr_pending); |
| 262 | if (pending) |
| 263 | __check_db_timeout(oct, i); |
| 264 | instr_cnt += pending; |
| 265 | } |
| 266 | |
| 267 | if (instr_cnt == 0) |
| 268 | break; |
| 269 | |
| 270 | schedule_timeout_uninterruptible(1); |
| 271 | |
| 272 | } while (retry-- && instr_cnt); |
| 273 | |
| 274 | return instr_cnt; |
| 275 | } |
| 276 | |
| 277 | static inline void |
| 278 | ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) |
| 279 | { |
| 280 | if (atomic_read(&oct->status) == OCT_DEV_RUNNING) { |
| 281 | writel(iq->fill_cnt, iq->doorbell_reg); |
| 282 | /* make sure doorbell write goes through */ |
| 283 | mmiowb(); |
| 284 | iq->fill_cnt = 0; |
| 285 | iq->last_db_time = jiffies; |
| 286 | return; |
| 287 | } |
| 288 | } |
| 289 | |
| 290 | static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq, |
| 291 | u8 *cmd) |
| 292 | { |
| 293 | u8 *iqptr, cmdsize; |
| 294 | |
| 295 | cmdsize = ((iq->iqcmd_64B) ? 64 : 32); |
| 296 | iqptr = iq->base_addr + (cmdsize * iq->host_write_index); |
| 297 | |
| 298 | memcpy(iqptr, cmd, cmdsize); |
| 299 | } |
| 300 | |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 301 | static inline struct iq_post_status |
Raghu Vatsavayi | a7d5a3d | 2016-07-03 13:56:48 -0700 | [diff] [blame] | 302 | __post_command2(struct octeon_instr_queue *iq, u8 *cmd) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 303 | { |
| 304 | struct iq_post_status st; |
| 305 | |
| 306 | st.status = IQ_SEND_OK; |
| 307 | |
| 308 | /* This ensures that the read index does not wrap around to the same |
| 309 | * position if queue gets full before Octeon could fetch any instr. |
| 310 | */ |
| 311 | if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) { |
| 312 | st.status = IQ_SEND_FAILED; |
| 313 | st.index = -1; |
| 314 | return st; |
| 315 | } |
| 316 | |
| 317 | if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2)) |
| 318 | st.status = IQ_SEND_STOP; |
| 319 | |
| 320 | __copy_cmd_into_iq(iq, cmd); |
| 321 | |
| 322 | /* "index" is returned, host_write_index is modified. */ |
| 323 | st.index = iq->host_write_index; |
| 324 | INCR_INDEX_BY1(iq->host_write_index, iq->max_count); |
| 325 | iq->fill_cnt++; |
| 326 | |
| 327 | /* Flush the command into memory. We need to be sure the data is in |
| 328 | * memory before indicating that the instruction is pending. |
| 329 | */ |
| 330 | wmb(); |
| 331 | |
| 332 | atomic_inc(&iq->instr_pending); |
| 333 | |
| 334 | return st; |
| 335 | } |
| 336 | |
| 337 | int |
| 338 | octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype, |
| 339 | void (*fn)(void *)) |
| 340 | { |
| 341 | if (reqtype > REQTYPE_LAST) { |
| 342 | dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n", |
| 343 | __func__, reqtype); |
| 344 | return -EINVAL; |
| 345 | } |
| 346 | |
| 347 | reqtype_free_fn[oct->octeon_id][reqtype] = fn; |
| 348 | |
| 349 | return 0; |
| 350 | } |
| 351 | |
| 352 | static inline void |
| 353 | __add_to_request_list(struct octeon_instr_queue *iq, |
| 354 | int idx, void *buf, int reqtype) |
| 355 | { |
| 356 | iq->request_list[idx].buf = buf; |
| 357 | iq->request_list[idx].reqtype = reqtype; |
| 358 | } |
| 359 | |
Raghu Vatsavayi | a2c64b6 | 2016-07-03 13:56:55 -0700 | [diff] [blame] | 360 | /* Can only run in process context */ |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 361 | int |
| 362 | lio_process_iq_request_list(struct octeon_device *oct, |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 363 | struct octeon_instr_queue *iq, u32 napi_budget) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 364 | { |
| 365 | int reqtype; |
| 366 | void *buf; |
| 367 | u32 old = iq->flush_index; |
| 368 | u32 inst_count = 0; |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 369 | unsigned int pkts_compl = 0, bytes_compl = 0; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 370 | struct octeon_soft_command *sc; |
| 371 | struct octeon_instr_irh *irh; |
Raghu Vatsavayi | 14866cc | 2016-07-03 13:56:49 -0700 | [diff] [blame] | 372 | unsigned long flags; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 373 | |
| 374 | while (old != iq->octeon_read_index) { |
| 375 | reqtype = iq->request_list[old].reqtype; |
| 376 | buf = iq->request_list[old].buf; |
| 377 | |
| 378 | if (reqtype == REQTYPE_NONE) |
| 379 | goto skip_this; |
| 380 | |
| 381 | octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl, |
| 382 | &bytes_compl); |
| 383 | |
| 384 | switch (reqtype) { |
| 385 | case REQTYPE_NORESP_NET: |
| 386 | case REQTYPE_NORESP_NET_SG: |
| 387 | case REQTYPE_RESP_NET_SG: |
| 388 | reqtype_free_fn[oct->octeon_id][reqtype](buf); |
| 389 | break; |
| 390 | case REQTYPE_RESP_NET: |
| 391 | case REQTYPE_SOFT_COMMAND: |
| 392 | sc = buf; |
| 393 | |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 394 | if (OCTEON_CN23XX_PF(oct)) |
| 395 | irh = (struct octeon_instr_irh *) |
| 396 | &sc->cmd.cmd3.irh; |
| 397 | else |
| 398 | irh = (struct octeon_instr_irh *) |
| 399 | &sc->cmd.cmd2.irh; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 400 | if (irh->rflag) { |
| 401 | /* We're expecting a response from Octeon. |
| 402 | * It's up to lio_process_ordered_list() to |
| 403 | * process sc. Add sc to the ordered soft |
| 404 | * command response list because we expect |
| 405 | * a response from Octeon. |
| 406 | */ |
Raghu Vatsavayi | 14866cc | 2016-07-03 13:56:49 -0700 | [diff] [blame] | 407 | spin_lock_irqsave |
| 408 | (&oct->response_list |
| 409 | [OCTEON_ORDERED_SC_LIST].lock, |
| 410 | flags); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 411 | atomic_inc(&oct->response_list |
| 412 | [OCTEON_ORDERED_SC_LIST]. |
| 413 | pending_req_count); |
| 414 | list_add_tail(&sc->node, &oct->response_list |
| 415 | [OCTEON_ORDERED_SC_LIST].head); |
Raghu Vatsavayi | 14866cc | 2016-07-03 13:56:49 -0700 | [diff] [blame] | 416 | spin_unlock_irqrestore |
| 417 | (&oct->response_list |
| 418 | [OCTEON_ORDERED_SC_LIST].lock, |
| 419 | flags); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 420 | } else { |
| 421 | if (sc->callback) { |
Raghu Vatsavayi | a2c64b6 | 2016-07-03 13:56:55 -0700 | [diff] [blame] | 422 | /* This callback must not sleep */ |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 423 | sc->callback(oct, OCTEON_REQUEST_DONE, |
| 424 | sc->callback_arg); |
| 425 | } |
| 426 | } |
| 427 | break; |
| 428 | default: |
| 429 | dev_err(&oct->pci_dev->dev, |
| 430 | "%s Unknown reqtype: %d buf: %p at idx %d\n", |
| 431 | __func__, reqtype, buf, old); |
| 432 | } |
| 433 | |
| 434 | iq->request_list[old].buf = NULL; |
| 435 | iq->request_list[old].reqtype = 0; |
| 436 | |
| 437 | skip_this: |
| 438 | inst_count++; |
| 439 | INCR_INDEX_BY1(old, iq->max_count); |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 440 | |
| 441 | if ((napi_budget) && (inst_count >= napi_budget)) |
| 442 | break; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 443 | } |
| 444 | if (bytes_compl) |
| 445 | octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl, |
| 446 | bytes_compl); |
| 447 | iq->flush_index = old; |
| 448 | |
| 449 | return inst_count; |
| 450 | } |
| 451 | |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 452 | /* Can only be called from process context */ |
| 453 | int |
| 454 | octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, |
| 455 | u32 pending_thresh, u32 napi_budget) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 456 | { |
| 457 | u32 inst_processed = 0; |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 458 | u32 tot_inst_processed = 0; |
| 459 | int tx_done = 1; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 460 | |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 461 | if (!spin_trylock(&iq->iq_flush_running_lock)) |
| 462 | return tx_done; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 463 | |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 464 | spin_lock_bh(&iq->lock); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 465 | |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 466 | iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 467 | |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 468 | if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) { |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 469 | do { |
| 470 | /* Process any outstanding IQ packets. */ |
| 471 | if (iq->flush_index == iq->octeon_read_index) |
| 472 | break; |
| 473 | |
| 474 | if (napi_budget) |
| 475 | inst_processed = lio_process_iq_request_list |
| 476 | (oct, iq, |
| 477 | napi_budget - tot_inst_processed); |
| 478 | else |
| 479 | inst_processed = |
| 480 | lio_process_iq_request_list(oct, iq, 0); |
| 481 | |
| 482 | if (inst_processed) { |
| 483 | atomic_sub(inst_processed, &iq->instr_pending); |
| 484 | iq->stats.instr_processed += inst_processed; |
| 485 | } |
| 486 | |
| 487 | tot_inst_processed += inst_processed; |
| 488 | inst_processed = 0; |
| 489 | |
| 490 | } while (tot_inst_processed < napi_budget); |
| 491 | |
| 492 | if (napi_budget && (tot_inst_processed >= napi_budget)) |
| 493 | tx_done = 0; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 494 | } |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 495 | |
| 496 | iq->last_db_time = jiffies; |
| 497 | |
| 498 | spin_unlock_bh(&iq->lock); |
| 499 | |
| 500 | spin_unlock(&iq->iq_flush_running_lock); |
| 501 | |
| 502 | return tx_done; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 503 | } |
| 504 | |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 505 | /* Process instruction queue after timeout. |
| 506 | * This routine gets called from a workqueue or when removing the module. |
| 507 | */ |
| 508 | static void __check_db_timeout(struct octeon_device *oct, u64 iq_no) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 509 | { |
| 510 | struct octeon_instr_queue *iq; |
| 511 | u64 next_time; |
| 512 | |
| 513 | if (!oct) |
| 514 | return; |
Raghu Vatsavayi | cd8b1eb | 2016-08-31 11:03:22 -0700 | [diff] [blame] | 515 | |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 516 | iq = oct->instr_queue[iq_no]; |
| 517 | if (!iq) |
| 518 | return; |
| 519 | |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 520 | /* return immediately, if no work pending */ |
| 521 | if (!atomic_read(&iq->instr_pending)) |
| 522 | return; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 523 | /* If jiffies - last_db_time < db_timeout do nothing */ |
| 524 | next_time = iq->last_db_time + iq->db_timeout; |
| 525 | if (!time_after(jiffies, (unsigned long)next_time)) |
| 526 | return; |
| 527 | iq->last_db_time = jiffies; |
| 528 | |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 529 | /* Flush the instruction queue */ |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 530 | octeon_flush_iq(oct, iq, 1, 0); |
Raghu Vatsavayi | cd8b1eb | 2016-08-31 11:03:22 -0700 | [diff] [blame] | 531 | |
| 532 | lio_enable_irq(NULL, iq); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 533 | } |
| 534 | |
| 535 | /* Called by the Poll thread at regular intervals to check the instruction |
| 536 | * queue for commands to be posted and for commands that were fetched by Octeon. |
| 537 | */ |
| 538 | static void check_db_timeout(struct work_struct *work) |
| 539 | { |
| 540 | struct cavium_wk *wk = (struct cavium_wk *)work; |
| 541 | struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; |
Raghu Vatsavayi | a2c64b6 | 2016-07-03 13:56:55 -0700 | [diff] [blame] | 542 | u64 iq_no = wk->ctxul; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 543 | struct cavium_wq *db_wq = &oct->check_db_wq[iq_no]; |
Raghu Vatsavayi | 55893a6 | 2016-07-03 13:56:50 -0700 | [diff] [blame] | 544 | u32 delay = 10; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 545 | |
| 546 | __check_db_timeout(oct, iq_no); |
Raghu Vatsavayi | 55893a6 | 2016-07-03 13:56:50 -0700 | [diff] [blame] | 547 | queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay)); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 548 | } |
| 549 | |
| 550 | int |
| 551 | octeon_send_command(struct octeon_device *oct, u32 iq_no, |
| 552 | u32 force_db, void *cmd, void *buf, |
| 553 | u32 datasize, u32 reqtype) |
| 554 | { |
| 555 | struct iq_post_status st; |
| 556 | struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; |
| 557 | |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 558 | /* Get the lock and prevent other tasks and tx interrupt handler from |
| 559 | * running. |
| 560 | */ |
| 561 | spin_lock_bh(&iq->post_lock); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 562 | |
Raghu Vatsavayi | a7d5a3d | 2016-07-03 13:56:48 -0700 | [diff] [blame] | 563 | st = __post_command2(iq, cmd); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 564 | |
| 565 | if (st.status != IQ_SEND_FAILED) { |
| 566 | octeon_report_sent_bytes_to_bql(buf, reqtype); |
| 567 | __add_to_request_list(iq, st.index, buf, reqtype); |
| 568 | INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); |
| 569 | INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); |
| 570 | |
Raghu Vatsavayi | a2c64b6 | 2016-07-03 13:56:55 -0700 | [diff] [blame] | 571 | if (force_db) |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 572 | ring_doorbell(oct, iq); |
| 573 | } else { |
| 574 | INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); |
| 575 | } |
| 576 | |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 577 | spin_unlock_bh(&iq->post_lock); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 578 | |
Raghu Vatsavayi | 9a96bde | 2016-06-21 22:53:06 -0700 | [diff] [blame] | 579 | /* This is only done here to expedite packets being flushed |
| 580 | * for cases where there are no IQ completion interrupts. |
| 581 | */ |
| 582 | /*if (iq->do_auto_flush)*/ |
| 583 | /* octeon_flush_iq(oct, iq, 2, 0);*/ |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 584 | |
| 585 | return st.status; |
| 586 | } |
| 587 | |
| 588 | void |
| 589 | octeon_prepare_soft_command(struct octeon_device *oct, |
| 590 | struct octeon_soft_command *sc, |
| 591 | u8 opcode, |
| 592 | u8 subcode, |
| 593 | u32 irh_ossp, |
| 594 | u64 ossp0, |
| 595 | u64 ossp1) |
| 596 | { |
| 597 | struct octeon_config *oct_cfg; |
Raghu Vatsavayi | 6a885b6 | 2016-06-14 16:54:51 -0700 | [diff] [blame] | 598 | struct octeon_instr_ih2 *ih2; |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 599 | struct octeon_instr_ih3 *ih3; |
| 600 | struct octeon_instr_pki_ih3 *pki_ih3; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 601 | struct octeon_instr_irh *irh; |
| 602 | struct octeon_instr_rdp *rdp; |
| 603 | |
Raghu Vatsavayi | a7d5a3d | 2016-07-03 13:56:48 -0700 | [diff] [blame] | 604 | WARN_ON(opcode > 15); |
| 605 | WARN_ON(subcode > 127); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 606 | |
| 607 | oct_cfg = octeon_get_conf(oct); |
| 608 | |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 609 | if (OCTEON_CN23XX_PF(oct)) { |
| 610 | ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 611 | |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 612 | ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 613 | |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 614 | pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 615 | |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 616 | pki_ih3->w = 1; |
| 617 | pki_ih3->raw = 1; |
| 618 | pki_ih3->utag = 1; |
| 619 | pki_ih3->uqpg = |
| 620 | oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg; |
| 621 | pki_ih3->utt = 1; |
| 622 | pki_ih3->tag = LIO_CONTROL; |
| 623 | pki_ih3->tagtype = ATOMIC_TAG; |
| 624 | pki_ih3->qpg = |
| 625 | oct->instr_queue[sc->iq_no]->txpciq.s.qpg; |
| 626 | pki_ih3->pm = 0x7; |
| 627 | pki_ih3->sl = 8; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 628 | |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 629 | if (sc->datasize) |
| 630 | ih3->dlengsz = sc->datasize; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 631 | |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 632 | irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; |
| 633 | irh->opcode = opcode; |
| 634 | irh->subcode = subcode; |
| 635 | |
| 636 | /* opcode/subcode specific parameters (ossp) */ |
| 637 | irh->ossp = irh_ossp; |
| 638 | sc->cmd.cmd3.ossp[0] = ossp0; |
| 639 | sc->cmd.cmd3.ossp[1] = ossp1; |
| 640 | |
| 641 | if (sc->rdatasize) { |
| 642 | rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; |
| 643 | rdp->pcie_port = oct->pcie_port; |
| 644 | rdp->rlen = sc->rdatasize; |
| 645 | |
| 646 | irh->rflag = 1; |
| 647 | /*PKI IH3*/ |
| 648 | /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */ |
| 649 | ih3->fsz = LIO_SOFTCMDRESP_IH3; |
| 650 | } else { |
| 651 | irh->rflag = 0; |
| 652 | /*PKI IH3*/ |
| 653 | /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */ |
| 654 | ih3->fsz = LIO_PCICMD_O3; |
| 655 | } |
| 656 | |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 657 | } else { |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 658 | ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; |
| 659 | ih2->tagtype = ATOMIC_TAG; |
| 660 | ih2->tag = LIO_CONTROL; |
| 661 | ih2->raw = 1; |
| 662 | ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg); |
| 663 | |
| 664 | if (sc->datasize) { |
| 665 | ih2->dlengsz = sc->datasize; |
| 666 | ih2->rs = 1; |
| 667 | } |
| 668 | |
| 669 | irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; |
| 670 | irh->opcode = opcode; |
| 671 | irh->subcode = subcode; |
| 672 | |
| 673 | /* opcode/subcode specific parameters (ossp) */ |
| 674 | irh->ossp = irh_ossp; |
| 675 | sc->cmd.cmd2.ossp[0] = ossp0; |
| 676 | sc->cmd.cmd2.ossp[1] = ossp1; |
| 677 | |
| 678 | if (sc->rdatasize) { |
| 679 | rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; |
| 680 | rdp->pcie_port = oct->pcie_port; |
| 681 | rdp->rlen = sc->rdatasize; |
| 682 | |
| 683 | irh->rflag = 1; |
| 684 | /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */ |
| 685 | ih2->fsz = LIO_SOFTCMDRESP_IH2; |
| 686 | } else { |
| 687 | irh->rflag = 0; |
| 688 | /* irh + ossp[0] + ossp[1] = 24 bytes */ |
| 689 | ih2->fsz = LIO_PCICMD_O2; |
| 690 | } |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 691 | } |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 692 | } |
| 693 | |
| 694 | int octeon_send_soft_command(struct octeon_device *oct, |
| 695 | struct octeon_soft_command *sc) |
| 696 | { |
Raghu Vatsavayi | 6a885b6 | 2016-06-14 16:54:51 -0700 | [diff] [blame] | 697 | struct octeon_instr_ih2 *ih2; |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 698 | struct octeon_instr_ih3 *ih3; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 699 | struct octeon_instr_irh *irh; |
Raghu Vatsavayi | 6a885b6 | 2016-06-14 16:54:51 -0700 | [diff] [blame] | 700 | u32 len; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 701 | |
Raghu Vatsavayi | 5b82351 | 2016-09-01 11:16:07 -0700 | [diff] [blame] | 702 | if (OCTEON_CN23XX_PF(oct)) { |
| 703 | ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; |
| 704 | if (ih3->dlengsz) { |
| 705 | WARN_ON(!sc->dmadptr); |
| 706 | sc->cmd.cmd3.dptr = sc->dmadptr; |
| 707 | } |
| 708 | irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; |
| 709 | if (irh->rflag) { |
| 710 | WARN_ON(!sc->dmarptr); |
| 711 | WARN_ON(!sc->status_word); |
| 712 | *sc->status_word = COMPLETION_WORD_INIT; |
| 713 | sc->cmd.cmd3.rptr = sc->dmarptr; |
| 714 | } |
| 715 | len = (u32)ih3->dlengsz; |
| 716 | } else { |
| 717 | ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; |
| 718 | if (ih2->dlengsz) { |
| 719 | WARN_ON(!sc->dmadptr); |
| 720 | sc->cmd.cmd2.dptr = sc->dmadptr; |
| 721 | } |
| 722 | irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; |
| 723 | if (irh->rflag) { |
| 724 | WARN_ON(!sc->dmarptr); |
| 725 | WARN_ON(!sc->status_word); |
| 726 | *sc->status_word = COMPLETION_WORD_INIT; |
| 727 | sc->cmd.cmd2.rptr = sc->dmarptr; |
| 728 | } |
| 729 | len = (u32)ih2->dlengsz; |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 730 | } |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 731 | |
| 732 | if (sc->wait_time) |
| 733 | sc->timeout = jiffies + sc->wait_time; |
| 734 | |
Raghu Vatsavayi | 6a885b6 | 2016-06-14 16:54:51 -0700 | [diff] [blame] | 735 | return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, |
| 736 | len, REQTYPE_SOFT_COMMAND)); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 737 | } |
| 738 | |
| 739 | int octeon_setup_sc_buffer_pool(struct octeon_device *oct) |
| 740 | { |
| 741 | int i; |
| 742 | u64 dma_addr; |
| 743 | struct octeon_soft_command *sc; |
| 744 | |
| 745 | INIT_LIST_HEAD(&oct->sc_buf_pool.head); |
| 746 | spin_lock_init(&oct->sc_buf_pool.lock); |
| 747 | atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0); |
| 748 | |
| 749 | for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) { |
| 750 | sc = (struct octeon_soft_command *) |
| 751 | lio_dma_alloc(oct, |
| 752 | SOFT_COMMAND_BUFFER_SIZE, |
| 753 | (dma_addr_t *)&dma_addr); |
Raghu Vatsavayi | 515e752 | 2016-11-14 15:54:44 -0800 | [diff] [blame^] | 754 | if (!sc) { |
| 755 | octeon_free_sc_buffer_pool(oct); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 756 | return 1; |
Raghu Vatsavayi | 515e752 | 2016-11-14 15:54:44 -0800 | [diff] [blame^] | 757 | } |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 758 | |
| 759 | sc->dma_addr = dma_addr; |
| 760 | sc->size = SOFT_COMMAND_BUFFER_SIZE; |
| 761 | |
| 762 | list_add_tail(&sc->node, &oct->sc_buf_pool.head); |
| 763 | } |
| 764 | |
| 765 | return 0; |
| 766 | } |
| 767 | |
| 768 | int octeon_free_sc_buffer_pool(struct octeon_device *oct) |
| 769 | { |
| 770 | struct list_head *tmp, *tmp2; |
| 771 | struct octeon_soft_command *sc; |
| 772 | |
Raghu Vatsavayi | 14866cc | 2016-07-03 13:56:49 -0700 | [diff] [blame] | 773 | spin_lock_bh(&oct->sc_buf_pool.lock); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 774 | |
| 775 | list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { |
| 776 | list_del(tmp); |
| 777 | |
| 778 | sc = (struct octeon_soft_command *)tmp; |
| 779 | |
| 780 | lio_dma_free(oct, sc->size, sc, sc->dma_addr); |
| 781 | } |
| 782 | |
| 783 | INIT_LIST_HEAD(&oct->sc_buf_pool.head); |
| 784 | |
Raghu Vatsavayi | 14866cc | 2016-07-03 13:56:49 -0700 | [diff] [blame] | 785 | spin_unlock_bh(&oct->sc_buf_pool.lock); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 786 | |
| 787 | return 0; |
| 788 | } |
| 789 | |
| 790 | struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, |
| 791 | u32 datasize, |
| 792 | u32 rdatasize, |
| 793 | u32 ctxsize) |
| 794 | { |
| 795 | u64 dma_addr; |
| 796 | u32 size; |
| 797 | u32 offset = sizeof(struct octeon_soft_command); |
| 798 | struct octeon_soft_command *sc = NULL; |
| 799 | struct list_head *tmp; |
| 800 | |
Raghu Vatsavayi | a7d5a3d | 2016-07-03 13:56:48 -0700 | [diff] [blame] | 801 | WARN_ON((offset + datasize + rdatasize + ctxsize) > |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 802 | SOFT_COMMAND_BUFFER_SIZE); |
| 803 | |
Raghu Vatsavayi | 14866cc | 2016-07-03 13:56:49 -0700 | [diff] [blame] | 804 | spin_lock_bh(&oct->sc_buf_pool.lock); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 805 | |
| 806 | if (list_empty(&oct->sc_buf_pool.head)) { |
Raghu Vatsavayi | 14866cc | 2016-07-03 13:56:49 -0700 | [diff] [blame] | 807 | spin_unlock_bh(&oct->sc_buf_pool.lock); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 808 | return NULL; |
| 809 | } |
| 810 | |
| 811 | list_for_each(tmp, &oct->sc_buf_pool.head) |
| 812 | break; |
| 813 | |
| 814 | list_del(tmp); |
| 815 | |
| 816 | atomic_inc(&oct->sc_buf_pool.alloc_buf_count); |
| 817 | |
Raghu Vatsavayi | 14866cc | 2016-07-03 13:56:49 -0700 | [diff] [blame] | 818 | spin_unlock_bh(&oct->sc_buf_pool.lock); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 819 | |
| 820 | sc = (struct octeon_soft_command *)tmp; |
| 821 | |
| 822 | dma_addr = sc->dma_addr; |
| 823 | size = sc->size; |
| 824 | |
| 825 | memset(sc, 0, sc->size); |
| 826 | |
| 827 | sc->dma_addr = dma_addr; |
| 828 | sc->size = size; |
| 829 | |
| 830 | if (ctxsize) { |
| 831 | sc->ctxptr = (u8 *)sc + offset; |
| 832 | sc->ctxsize = ctxsize; |
| 833 | } |
| 834 | |
| 835 | /* Start data at 128 byte boundary */ |
| 836 | offset = (offset + ctxsize + 127) & 0xffffff80; |
| 837 | |
| 838 | if (datasize) { |
| 839 | sc->virtdptr = (u8 *)sc + offset; |
| 840 | sc->dmadptr = dma_addr + offset; |
| 841 | sc->datasize = datasize; |
| 842 | } |
| 843 | |
| 844 | /* Start rdata at 128 byte boundary */ |
| 845 | offset = (offset + datasize + 127) & 0xffffff80; |
| 846 | |
| 847 | if (rdatasize) { |
Raghu Vatsavayi | a7d5a3d | 2016-07-03 13:56:48 -0700 | [diff] [blame] | 848 | WARN_ON(rdatasize < 16); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 849 | sc->virtrptr = (u8 *)sc + offset; |
| 850 | sc->dmarptr = dma_addr + offset; |
| 851 | sc->rdatasize = rdatasize; |
| 852 | sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8); |
| 853 | } |
| 854 | |
| 855 | return sc; |
| 856 | } |
| 857 | |
| 858 | void octeon_free_soft_command(struct octeon_device *oct, |
| 859 | struct octeon_soft_command *sc) |
| 860 | { |
Raghu Vatsavayi | 14866cc | 2016-07-03 13:56:49 -0700 | [diff] [blame] | 861 | spin_lock_bh(&oct->sc_buf_pool.lock); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 862 | |
| 863 | list_add_tail(&sc->node, &oct->sc_buf_pool.head); |
| 864 | |
| 865 | atomic_dec(&oct->sc_buf_pool.alloc_buf_count); |
| 866 | |
Raghu Vatsavayi | 14866cc | 2016-07-03 13:56:49 -0700 | [diff] [blame] | 867 | spin_unlock_bh(&oct->sc_buf_pool.lock); |
Raghu Vatsavayi | f21fb3e | 2015-06-09 18:15:23 -0700 | [diff] [blame] | 868 | } |