Claudiu Manoil | de77562 | 2016-09-22 18:04:11 +0300 | [diff] [blame] | 1 | /* Copyright 2009 - 2016 Freescale Semiconductor, Inc. |
| 2 | * |
| 3 | * Redistribution and use in source and binary forms, with or without |
| 4 | * modification, are permitted provided that the following conditions are met: |
| 5 | * * Redistributions of source code must retain the above copyright |
| 6 | * notice, this list of conditions and the following disclaimer. |
| 7 | * * Redistributions in binary form must reproduce the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer in the |
| 9 | * documentation and/or other materials provided with the distribution. |
| 10 | * * Neither the name of Freescale Semiconductor nor the |
| 11 | * names of its contributors may be used to endorse or promote products |
| 12 | * derived from this software without specific prior written permission. |
| 13 | * |
| 14 | * ALTERNATIVELY, this software may be distributed under the terms of the |
| 15 | * GNU General Public License ("GPL") as published by the Free Software |
| 16 | * Foundation, either version 2 of that License or (at your option) any |
| 17 | * later version. |
| 18 | * |
| 19 | * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY |
| 20 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED |
| 21 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
| 22 | * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY |
| 23 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| 24 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
| 25 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
| 26 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 28 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 | */ |
| 30 | |
| 31 | #include "qman_test.h" |
| 32 | |
| 33 | #include <linux/dma-mapping.h> |
| 34 | #include <linux/delay.h> |
| 35 | |
| 36 | /* |
| 37 | * Algorithm: |
| 38 | * |
| 39 | * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates |
| 40 | * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The |
| 41 | * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will |
| 42 | * shuttle a "hot potato" frame around them such that every forwarding action |
| 43 | * moves it from one cpu to another. (The use of more than one handler per cpu |
| 44 | * is to allow enough handlers/FQs to truly test the significance of caching - |
| 45 | * ie. when cache-expiries are occurring.) |
| 46 | * |
| 47 | * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the |
| 48 | * first and last words of the frame data will undergo a transformation step on |
| 49 | * each forwarding action. To achieve this, each handler will be assigned a |
| 50 | * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is |
| 51 | * received by a handler, the mixer of the expected sender is XOR'd into all |
| 52 | * words of the entire frame, which is then validated against the original |
| 53 | * values. Then, before forwarding, the entire frame is XOR'd with the mixer of |
| 54 | * the current handler. Apart from validating that the frame is taking the |
| 55 | * expected path, this also provides some quasi-realistic overheads to each |
| 56 | * forwarding action - dereferencing *all* the frame data, computation, and |
| 57 | * conditional branching. There is a "special" handler designated to act as the |
| 58 | * instigator of the test by creating an enqueuing the "hot potato" frame, and |
| 59 | * to determine when the test has completed by counting HP_LOOPS iterations. |
| 60 | * |
| 61 | * Init phases: |
| 62 | * |
| 63 | * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them |
| 64 | * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU |
| 65 | * handlers and link-list them (but do no other handler setup). |
| 66 | * |
| 67 | * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each |
| 68 | * hp_cpu's 'iterator' to point to its first handler. With each loop, |
| 69 | * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler |
| 70 | * and advance the iterator for the next loop. This includes a final fixup, |
| 71 | * which connects the last handler to the first (and which is why phase 2 |
| 72 | * and 3 are separate). |
| 73 | * |
| 74 | * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each |
| 75 | * hp_cpu's 'iterator' to point to its first handler. With each loop, |
| 76 | * initialise FQ objects and advance the iterator for the next loop. |
| 77 | * Moreover, do this initialisation on the cpu it applies to so that Rx FQ |
| 78 | * initialisation targets the correct cpu. |
| 79 | */ |
| 80 | |
| 81 | /* |
| 82 | * helper to run something on all cpus (can't use on_each_cpu(), as that invokes |
| 83 | * the fn from irq context, which is too restrictive). |
| 84 | */ |
| 85 | struct bstrap { |
| 86 | int (*fn)(void); |
| 87 | atomic_t started; |
| 88 | }; |
| 89 | static int bstrap_fn(void *bs) |
| 90 | { |
| 91 | struct bstrap *bstrap = bs; |
| 92 | int err; |
| 93 | |
| 94 | atomic_inc(&bstrap->started); |
| 95 | err = bstrap->fn(); |
| 96 | if (err) |
| 97 | return err; |
| 98 | while (!kthread_should_stop()) |
| 99 | msleep(20); |
| 100 | return 0; |
| 101 | } |
| 102 | static int on_all_cpus(int (*fn)(void)) |
| 103 | { |
| 104 | int cpu; |
| 105 | |
| 106 | for_each_cpu(cpu, cpu_online_mask) { |
| 107 | struct bstrap bstrap = { |
| 108 | .fn = fn, |
| 109 | .started = ATOMIC_INIT(0) |
| 110 | }; |
| 111 | struct task_struct *k = kthread_create(bstrap_fn, &bstrap, |
| 112 | "hotpotato%d", cpu); |
| 113 | int ret; |
| 114 | |
| 115 | if (IS_ERR(k)) |
| 116 | return -ENOMEM; |
| 117 | kthread_bind(k, cpu); |
| 118 | wake_up_process(k); |
| 119 | /* |
| 120 | * If we call kthread_stop() before the "wake up" has had an |
| 121 | * effect, then the thread may exit with -EINTR without ever |
| 122 | * running the function. So poll until it's started before |
| 123 | * requesting it to stop. |
| 124 | */ |
| 125 | while (!atomic_read(&bstrap.started)) |
| 126 | msleep(20); |
| 127 | ret = kthread_stop(k); |
| 128 | if (ret) |
| 129 | return ret; |
| 130 | } |
| 131 | return 0; |
| 132 | } |
| 133 | |
| 134 | struct hp_handler { |
| 135 | |
| 136 | /* The following data is stashed when 'rx' is dequeued; */ |
| 137 | /* -------------- */ |
| 138 | /* The Rx FQ, dequeues of which will stash the entire hp_handler */ |
| 139 | struct qman_fq rx; |
| 140 | /* The Tx FQ we should forward to */ |
| 141 | struct qman_fq tx; |
| 142 | /* The value we XOR post-dequeue, prior to validating */ |
| 143 | u32 rx_mixer; |
| 144 | /* The value we XOR pre-enqueue, after validating */ |
| 145 | u32 tx_mixer; |
| 146 | /* what the hotpotato address should be on dequeue */ |
| 147 | dma_addr_t addr; |
| 148 | u32 *frame_ptr; |
| 149 | |
| 150 | /* The following data isn't (necessarily) stashed on dequeue; */ |
| 151 | /* -------------- */ |
| 152 | u32 fqid_rx, fqid_tx; |
| 153 | /* list node for linking us into 'hp_cpu' */ |
| 154 | struct list_head node; |
| 155 | /* Just to check ... */ |
| 156 | unsigned int processor_id; |
| 157 | } ____cacheline_aligned; |
| 158 | |
| 159 | struct hp_cpu { |
| 160 | /* identify the cpu we run on; */ |
| 161 | unsigned int processor_id; |
| 162 | /* root node for the per-cpu list of handlers */ |
| 163 | struct list_head handlers; |
| 164 | /* list node for linking us into 'hp_cpu_list' */ |
| 165 | struct list_head node; |
| 166 | /* |
| 167 | * when repeatedly scanning 'hp_list', each time linking the n'th |
| 168 | * handlers together, this is used as per-cpu iterator state |
| 169 | */ |
| 170 | struct hp_handler *iterator; |
| 171 | }; |
| 172 | |
| 173 | /* Each cpu has one of these */ |
| 174 | static DEFINE_PER_CPU(struct hp_cpu, hp_cpus); |
| 175 | |
| 176 | /* links together the hp_cpu structs, in first-come first-serve order. */ |
| 177 | static LIST_HEAD(hp_cpu_list); |
Fabian Frederick | 39e7ac1 | 2016-12-04 13:44:59 +0100 | [diff] [blame] | 178 | static DEFINE_SPINLOCK(hp_lock); |
Claudiu Manoil | de77562 | 2016-09-22 18:04:11 +0300 | [diff] [blame] | 179 | |
| 180 | static unsigned int hp_cpu_list_length; |
| 181 | |
| 182 | /* the "special" handler, that starts and terminates the test. */ |
| 183 | static struct hp_handler *special_handler; |
| 184 | static int loop_counter; |
| 185 | |
| 186 | /* handlers are allocated out of this, so they're properly aligned. */ |
| 187 | static struct kmem_cache *hp_handler_slab; |
| 188 | |
| 189 | /* this is the frame data */ |
| 190 | static void *__frame_ptr; |
| 191 | static u32 *frame_ptr; |
| 192 | static dma_addr_t frame_dma; |
| 193 | |
Claudiu Manoil | 021ba01 | 2016-11-16 16:40:22 +0200 | [diff] [blame] | 194 | /* needed for dma_map*() */ |
| 195 | static const struct qm_portal_config *pcfg; |
| 196 | |
Claudiu Manoil | de77562 | 2016-09-22 18:04:11 +0300 | [diff] [blame] | 197 | /* the main function waits on this */ |
| 198 | static DECLARE_WAIT_QUEUE_HEAD(queue); |
| 199 | |
| 200 | #define HP_PER_CPU 2 |
| 201 | #define HP_LOOPS 8 |
| 202 | /* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */ |
| 203 | #define HP_NUM_WORDS 80 |
| 204 | /* First word of the LFSR-based frame data */ |
| 205 | #define HP_FIRST_WORD 0xabbaf00d |
| 206 | |
| 207 | static inline u32 do_lfsr(u32 prev) |
| 208 | { |
| 209 | return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u); |
| 210 | } |
| 211 | |
| 212 | static int allocate_frame_data(void) |
| 213 | { |
| 214 | u32 lfsr = HP_FIRST_WORD; |
| 215 | int loop; |
Claudiu Manoil | de77562 | 2016-09-22 18:04:11 +0300 | [diff] [blame] | 216 | |
Claudiu Manoil | 021ba01 | 2016-11-16 16:40:22 +0200 | [diff] [blame] | 217 | if (!qman_dma_portal) { |
| 218 | pr_crit("portal not available\n"); |
Claudiu Manoil | de77562 | 2016-09-22 18:04:11 +0300 | [diff] [blame] | 219 | return -EIO; |
| 220 | } |
Claudiu Manoil | 021ba01 | 2016-11-16 16:40:22 +0200 | [diff] [blame] | 221 | |
| 222 | pcfg = qman_get_qm_portal_config(qman_dma_portal); |
| 223 | |
Claudiu Manoil | de77562 | 2016-09-22 18:04:11 +0300 | [diff] [blame] | 224 | __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL); |
| 225 | if (!__frame_ptr) |
| 226 | return -ENOMEM; |
| 227 | |
| 228 | frame_ptr = PTR_ALIGN(__frame_ptr, 64); |
| 229 | for (loop = 0; loop < HP_NUM_WORDS; loop++) { |
| 230 | frame_ptr[loop] = lfsr; |
| 231 | lfsr = do_lfsr(lfsr); |
| 232 | } |
Claudiu Manoil | 021ba01 | 2016-11-16 16:40:22 +0200 | [diff] [blame] | 233 | |
| 234 | frame_dma = dma_map_single(pcfg->dev, frame_ptr, 4 * HP_NUM_WORDS, |
Claudiu Manoil | de77562 | 2016-09-22 18:04:11 +0300 | [diff] [blame] | 235 | DMA_BIDIRECTIONAL); |
Claudiu Manoil | 021ba01 | 2016-11-16 16:40:22 +0200 | [diff] [blame] | 236 | if (dma_mapping_error(pcfg->dev, frame_dma)) { |
| 237 | pr_crit("dma mapping failure\n"); |
| 238 | kfree(__frame_ptr); |
| 239 | return -EIO; |
| 240 | } |
| 241 | |
Claudiu Manoil | de77562 | 2016-09-22 18:04:11 +0300 | [diff] [blame] | 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | static void deallocate_frame_data(void) |
| 246 | { |
Claudiu Manoil | 021ba01 | 2016-11-16 16:40:22 +0200 | [diff] [blame] | 247 | dma_unmap_single(pcfg->dev, frame_dma, 4 * HP_NUM_WORDS, |
| 248 | DMA_BIDIRECTIONAL); |
Claudiu Manoil | de77562 | 2016-09-22 18:04:11 +0300 | [diff] [blame] | 249 | kfree(__frame_ptr); |
| 250 | } |
| 251 | |
| 252 | static inline int process_frame_data(struct hp_handler *handler, |
| 253 | const struct qm_fd *fd) |
| 254 | { |
| 255 | u32 *p = handler->frame_ptr; |
| 256 | u32 lfsr = HP_FIRST_WORD; |
| 257 | int loop; |
| 258 | |
| 259 | if (qm_fd_addr_get64(fd) != handler->addr) { |
Claudiu Manoil | 021ba01 | 2016-11-16 16:40:22 +0200 | [diff] [blame] | 260 | pr_crit("bad frame address, [%llX != %llX]\n", |
| 261 | qm_fd_addr_get64(fd), handler->addr); |
Claudiu Manoil | de77562 | 2016-09-22 18:04:11 +0300 | [diff] [blame] | 262 | return -EIO; |
| 263 | } |
| 264 | for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { |
| 265 | *p ^= handler->rx_mixer; |
| 266 | if (*p != lfsr) { |
| 267 | pr_crit("corrupt frame data"); |
| 268 | return -EIO; |
| 269 | } |
| 270 | *p ^= handler->tx_mixer; |
| 271 | lfsr = do_lfsr(lfsr); |
| 272 | } |
| 273 | return 0; |
| 274 | } |
| 275 | |
| 276 | static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal, |
| 277 | struct qman_fq *fq, |
| 278 | const struct qm_dqrr_entry *dqrr) |
| 279 | { |
| 280 | struct hp_handler *handler = (struct hp_handler *)fq; |
| 281 | |
| 282 | if (process_frame_data(handler, &dqrr->fd)) { |
| 283 | WARN_ON(1); |
| 284 | goto skip; |
| 285 | } |
| 286 | if (qman_enqueue(&handler->tx, &dqrr->fd)) { |
| 287 | pr_crit("qman_enqueue() failed"); |
| 288 | WARN_ON(1); |
| 289 | } |
| 290 | skip: |
| 291 | return qman_cb_dqrr_consume; |
| 292 | } |
| 293 | |
| 294 | static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal, |
| 295 | struct qman_fq *fq, |
| 296 | const struct qm_dqrr_entry *dqrr) |
| 297 | { |
| 298 | struct hp_handler *handler = (struct hp_handler *)fq; |
| 299 | |
| 300 | process_frame_data(handler, &dqrr->fd); |
| 301 | if (++loop_counter < HP_LOOPS) { |
| 302 | if (qman_enqueue(&handler->tx, &dqrr->fd)) { |
| 303 | pr_crit("qman_enqueue() failed"); |
| 304 | WARN_ON(1); |
| 305 | goto skip; |
| 306 | } |
| 307 | } else { |
| 308 | pr_info("Received final (%dth) frame\n", loop_counter); |
| 309 | wake_up(&queue); |
| 310 | } |
| 311 | skip: |
| 312 | return qman_cb_dqrr_consume; |
| 313 | } |
| 314 | |
| 315 | static int create_per_cpu_handlers(void) |
| 316 | { |
| 317 | struct hp_handler *handler; |
| 318 | int loop; |
| 319 | struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); |
| 320 | |
| 321 | hp_cpu->processor_id = smp_processor_id(); |
| 322 | spin_lock(&hp_lock); |
| 323 | list_add_tail(&hp_cpu->node, &hp_cpu_list); |
| 324 | hp_cpu_list_length++; |
| 325 | spin_unlock(&hp_lock); |
| 326 | INIT_LIST_HEAD(&hp_cpu->handlers); |
| 327 | for (loop = 0; loop < HP_PER_CPU; loop++) { |
| 328 | handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL); |
| 329 | if (!handler) { |
| 330 | pr_crit("kmem_cache_alloc() failed"); |
| 331 | WARN_ON(1); |
| 332 | return -EIO; |
| 333 | } |
| 334 | handler->processor_id = hp_cpu->processor_id; |
| 335 | handler->addr = frame_dma; |
| 336 | handler->frame_ptr = frame_ptr; |
| 337 | list_add_tail(&handler->node, &hp_cpu->handlers); |
| 338 | } |
| 339 | return 0; |
| 340 | } |
| 341 | |
| 342 | static int destroy_per_cpu_handlers(void) |
| 343 | { |
| 344 | struct list_head *loop, *tmp; |
| 345 | struct hp_cpu *hp_cpu = this_cpu_ptr(&hp_cpus); |
| 346 | |
| 347 | spin_lock(&hp_lock); |
| 348 | list_del(&hp_cpu->node); |
| 349 | spin_unlock(&hp_lock); |
| 350 | list_for_each_safe(loop, tmp, &hp_cpu->handlers) { |
| 351 | u32 flags = 0; |
| 352 | struct hp_handler *handler = list_entry(loop, struct hp_handler, |
| 353 | node); |
| 354 | if (qman_retire_fq(&handler->rx, &flags) || |
| 355 | (flags & QMAN_FQ_STATE_BLOCKOOS)) { |
| 356 | pr_crit("qman_retire_fq(rx) failed, flags: %x", flags); |
| 357 | WARN_ON(1); |
| 358 | return -EIO; |
| 359 | } |
| 360 | if (qman_oos_fq(&handler->rx)) { |
| 361 | pr_crit("qman_oos_fq(rx) failed"); |
| 362 | WARN_ON(1); |
| 363 | return -EIO; |
| 364 | } |
| 365 | qman_destroy_fq(&handler->rx); |
| 366 | qman_destroy_fq(&handler->tx); |
| 367 | qman_release_fqid(handler->fqid_rx); |
| 368 | list_del(&handler->node); |
| 369 | kmem_cache_free(hp_handler_slab, handler); |
| 370 | } |
| 371 | return 0; |
| 372 | } |
| 373 | |
| 374 | static inline u8 num_cachelines(u32 offset) |
| 375 | { |
| 376 | u8 res = (offset + (L1_CACHE_BYTES - 1)) |
| 377 | / (L1_CACHE_BYTES); |
| 378 | if (res > 3) |
| 379 | return 3; |
| 380 | return res; |
| 381 | } |
| 382 | #define STASH_DATA_CL \ |
| 383 | num_cachelines(HP_NUM_WORDS * 4) |
| 384 | #define STASH_CTX_CL \ |
| 385 | num_cachelines(offsetof(struct hp_handler, fqid_rx)) |
| 386 | |
| 387 | static int init_handler(void *h) |
| 388 | { |
| 389 | struct qm_mcc_initfq opts; |
| 390 | struct hp_handler *handler = h; |
| 391 | int err; |
| 392 | |
| 393 | if (handler->processor_id != smp_processor_id()) { |
| 394 | err = -EIO; |
| 395 | goto failed; |
| 396 | } |
| 397 | /* Set up rx */ |
| 398 | memset(&handler->rx, 0, sizeof(handler->rx)); |
| 399 | if (handler == special_handler) |
| 400 | handler->rx.cb.dqrr = special_dqrr; |
| 401 | else |
| 402 | handler->rx.cb.dqrr = normal_dqrr; |
| 403 | err = qman_create_fq(handler->fqid_rx, 0, &handler->rx); |
| 404 | if (err) { |
| 405 | pr_crit("qman_create_fq(rx) failed"); |
| 406 | goto failed; |
| 407 | } |
| 408 | memset(&opts, 0, sizeof(opts)); |
Claudiu Manoil | 1805882 | 2016-11-16 16:40:30 +0200 | [diff] [blame] | 409 | opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | |
| 410 | QM_INITFQ_WE_CONTEXTA); |
| 411 | opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING); |
Claudiu Manoil | de77562 | 2016-09-22 18:04:11 +0300 | [diff] [blame] | 412 | qm_fqd_set_stashing(&opts.fqd, 0, STASH_DATA_CL, STASH_CTX_CL); |
| 413 | err = qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED | |
| 414 | QMAN_INITFQ_FLAG_LOCAL, &opts); |
| 415 | if (err) { |
| 416 | pr_crit("qman_init_fq(rx) failed"); |
| 417 | goto failed; |
| 418 | } |
| 419 | /* Set up tx */ |
| 420 | memset(&handler->tx, 0, sizeof(handler->tx)); |
| 421 | err = qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY, |
| 422 | &handler->tx); |
| 423 | if (err) { |
| 424 | pr_crit("qman_create_fq(tx) failed"); |
| 425 | goto failed; |
| 426 | } |
| 427 | |
| 428 | return 0; |
| 429 | failed: |
| 430 | return err; |
| 431 | } |
| 432 | |
| 433 | static void init_handler_cb(void *h) |
| 434 | { |
| 435 | if (init_handler(h)) |
| 436 | WARN_ON(1); |
| 437 | } |
| 438 | |
| 439 | static int init_phase2(void) |
| 440 | { |
| 441 | int loop; |
| 442 | u32 fqid = 0; |
| 443 | u32 lfsr = 0xdeadbeef; |
| 444 | struct hp_cpu *hp_cpu; |
| 445 | struct hp_handler *handler; |
| 446 | |
| 447 | for (loop = 0; loop < HP_PER_CPU; loop++) { |
| 448 | list_for_each_entry(hp_cpu, &hp_cpu_list, node) { |
| 449 | int err; |
| 450 | |
| 451 | if (!loop) |
| 452 | hp_cpu->iterator = list_first_entry( |
| 453 | &hp_cpu->handlers, |
| 454 | struct hp_handler, node); |
| 455 | else |
| 456 | hp_cpu->iterator = list_entry( |
| 457 | hp_cpu->iterator->node.next, |
| 458 | struct hp_handler, node); |
| 459 | /* Rx FQID is the previous handler's Tx FQID */ |
| 460 | hp_cpu->iterator->fqid_rx = fqid; |
| 461 | /* Allocate new FQID for Tx */ |
| 462 | err = qman_alloc_fqid(&fqid); |
| 463 | if (err) { |
| 464 | pr_crit("qman_alloc_fqid() failed"); |
| 465 | return err; |
| 466 | } |
| 467 | hp_cpu->iterator->fqid_tx = fqid; |
| 468 | /* Rx mixer is the previous handler's Tx mixer */ |
| 469 | hp_cpu->iterator->rx_mixer = lfsr; |
| 470 | /* Get new mixer for Tx */ |
| 471 | lfsr = do_lfsr(lfsr); |
| 472 | hp_cpu->iterator->tx_mixer = lfsr; |
| 473 | } |
| 474 | } |
| 475 | /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */ |
| 476 | hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node); |
| 477 | handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node); |
| 478 | if (handler->fqid_rx != 0 || handler->rx_mixer != 0xdeadbeef) |
| 479 | return 1; |
| 480 | handler->fqid_rx = fqid; |
| 481 | handler->rx_mixer = lfsr; |
| 482 | /* and tag it as our "special" handler */ |
| 483 | special_handler = handler; |
| 484 | return 0; |
| 485 | } |
| 486 | |
| 487 | static int init_phase3(void) |
| 488 | { |
| 489 | int loop, err; |
| 490 | struct hp_cpu *hp_cpu; |
| 491 | |
| 492 | for (loop = 0; loop < HP_PER_CPU; loop++) { |
| 493 | list_for_each_entry(hp_cpu, &hp_cpu_list, node) { |
| 494 | if (!loop) |
| 495 | hp_cpu->iterator = list_first_entry( |
| 496 | &hp_cpu->handlers, |
| 497 | struct hp_handler, node); |
| 498 | else |
| 499 | hp_cpu->iterator = list_entry( |
| 500 | hp_cpu->iterator->node.next, |
| 501 | struct hp_handler, node); |
| 502 | preempt_disable(); |
| 503 | if (hp_cpu->processor_id == smp_processor_id()) { |
| 504 | err = init_handler(hp_cpu->iterator); |
| 505 | if (err) |
| 506 | return err; |
| 507 | } else { |
| 508 | smp_call_function_single(hp_cpu->processor_id, |
| 509 | init_handler_cb, hp_cpu->iterator, 1); |
| 510 | } |
| 511 | preempt_enable(); |
| 512 | } |
| 513 | } |
| 514 | return 0; |
| 515 | } |
| 516 | |
| 517 | static int send_first_frame(void *ignore) |
| 518 | { |
| 519 | u32 *p = special_handler->frame_ptr; |
| 520 | u32 lfsr = HP_FIRST_WORD; |
| 521 | int loop, err; |
| 522 | struct qm_fd fd; |
| 523 | |
| 524 | if (special_handler->processor_id != smp_processor_id()) { |
| 525 | err = -EIO; |
| 526 | goto failed; |
| 527 | } |
| 528 | memset(&fd, 0, sizeof(fd)); |
| 529 | qm_fd_addr_set64(&fd, special_handler->addr); |
| 530 | qm_fd_set_contig_big(&fd, HP_NUM_WORDS * 4); |
| 531 | for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) { |
| 532 | if (*p != lfsr) { |
| 533 | err = -EIO; |
| 534 | pr_crit("corrupt frame data"); |
| 535 | goto failed; |
| 536 | } |
| 537 | *p ^= special_handler->tx_mixer; |
| 538 | lfsr = do_lfsr(lfsr); |
| 539 | } |
| 540 | pr_info("Sending first frame\n"); |
| 541 | err = qman_enqueue(&special_handler->tx, &fd); |
| 542 | if (err) { |
| 543 | pr_crit("qman_enqueue() failed"); |
| 544 | goto failed; |
| 545 | } |
| 546 | |
| 547 | return 0; |
| 548 | failed: |
| 549 | return err; |
| 550 | } |
| 551 | |
| 552 | static void send_first_frame_cb(void *ignore) |
| 553 | { |
| 554 | if (send_first_frame(NULL)) |
| 555 | WARN_ON(1); |
| 556 | } |
| 557 | |
| 558 | int qman_test_stash(void) |
| 559 | { |
| 560 | int err; |
| 561 | |
| 562 | if (cpumask_weight(cpu_online_mask) < 2) { |
| 563 | pr_info("%s(): skip - only 1 CPU\n", __func__); |
| 564 | return 0; |
| 565 | } |
| 566 | |
| 567 | pr_info("%s(): Starting\n", __func__); |
| 568 | |
| 569 | hp_cpu_list_length = 0; |
| 570 | loop_counter = 0; |
| 571 | hp_handler_slab = kmem_cache_create("hp_handler_slab", |
| 572 | sizeof(struct hp_handler), L1_CACHE_BYTES, |
| 573 | SLAB_HWCACHE_ALIGN, NULL); |
| 574 | if (!hp_handler_slab) { |
| 575 | err = -EIO; |
| 576 | pr_crit("kmem_cache_create() failed"); |
| 577 | goto failed; |
| 578 | } |
| 579 | |
| 580 | err = allocate_frame_data(); |
| 581 | if (err) |
| 582 | goto failed; |
| 583 | |
| 584 | /* Init phase 1 */ |
| 585 | pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU); |
| 586 | if (on_all_cpus(create_per_cpu_handlers)) { |
| 587 | err = -EIO; |
| 588 | pr_crit("on_each_cpu() failed"); |
| 589 | goto failed; |
| 590 | } |
| 591 | pr_info("Number of cpus: %d, total of %d handlers\n", |
| 592 | hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU); |
| 593 | |
| 594 | err = init_phase2(); |
| 595 | if (err) |
| 596 | goto failed; |
| 597 | |
| 598 | err = init_phase3(); |
| 599 | if (err) |
| 600 | goto failed; |
| 601 | |
| 602 | preempt_disable(); |
| 603 | if (special_handler->processor_id == smp_processor_id()) { |
| 604 | err = send_first_frame(NULL); |
| 605 | if (err) |
| 606 | goto failed; |
| 607 | } else { |
| 608 | smp_call_function_single(special_handler->processor_id, |
| 609 | send_first_frame_cb, NULL, 1); |
| 610 | } |
| 611 | preempt_enable(); |
| 612 | |
| 613 | wait_event(queue, loop_counter == HP_LOOPS); |
| 614 | deallocate_frame_data(); |
| 615 | if (on_all_cpus(destroy_per_cpu_handlers)) { |
| 616 | err = -EIO; |
| 617 | pr_crit("on_each_cpu() failed"); |
| 618 | goto failed; |
| 619 | } |
| 620 | kmem_cache_destroy(hp_handler_slab); |
| 621 | pr_info("%s(): Finished\n", __func__); |
| 622 | |
| 623 | return 0; |
| 624 | failed: |
| 625 | WARN_ON(1); |
| 626 | return err; |
| 627 | } |