blob: 42756a98a728afc2ba8c95e3b63c6aee4bf67870 [file] [log] [blame]
Dave Jiang8a7b6a72016-01-13 13:29:48 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2015 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2015 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * PCIe NTB Perf Linux driver
44 */
45
46#include <linux/init.h>
47#include <linux/kernel.h>
48#include <linux/module.h>
49#include <linux/kthread.h>
50#include <linux/time.h>
51#include <linux/timer.h>
52#include <linux/dma-mapping.h>
53#include <linux/pci.h>
54#include <linux/slab.h>
55#include <linux/spinlock.h>
56#include <linux/debugfs.h>
57#include <linux/dmaengine.h>
58#include <linux/delay.h>
59#include <linux/sizes.h>
60#include <linux/ntb.h>
Logan Gunthorpeda573ea2016-06-20 13:15:05 -060061#include <linux/mutex.h>
Dave Jiang8a7b6a72016-01-13 13:29:48 -070062
63#define DRIVER_NAME "ntb_perf"
64#define DRIVER_DESCRIPTION "PCIe NTB Performance Measurement Tool"
65
66#define DRIVER_LICENSE "Dual BSD/GPL"
67#define DRIVER_VERSION "1.0"
68#define DRIVER_AUTHOR "Dave Jiang <dave.jiang@intel.com>"
69
70#define PERF_LINK_DOWN_TIMEOUT 10
71#define PERF_VERSION 0xffff0001
72#define MAX_THREADS 32
73#define MAX_TEST_SIZE SZ_1M
74#define MAX_SRCS 32
Nicholas Mc Guirecdc08982016-08-22 18:51:36 +020075#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
Dave Jiang8a7b6a72016-01-13 13:29:48 -070076#define DMA_RETRIES 20
77#define SZ_4G (1ULL << 32)
78#define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */
Serge Semin1e530112016-12-14 02:49:14 +030079#define PIDX NTB_DEF_PEER_IDX
Dave Jiang8a7b6a72016-01-13 13:29:48 -070080
81MODULE_LICENSE(DRIVER_LICENSE);
82MODULE_VERSION(DRIVER_VERSION);
83MODULE_AUTHOR(DRIVER_AUTHOR);
84MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
85
86static struct dentry *perf_debugfs_dir;
87
Logan Gunthorpe4aae9772016-06-03 14:50:31 -060088static unsigned long max_mw_size;
89module_param(max_mw_size, ulong, 0644);
90MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
91
Dave Jiang8a7b6a72016-01-13 13:29:48 -070092static unsigned int seg_order = 19; /* 512K */
93module_param(seg_order, uint, 0644);
Gary R Hook94fc7952017-05-04 11:36:52 -050094MODULE_PARM_DESC(seg_order, "size order [2^n] of buffer segment for testing");
Dave Jiang8a7b6a72016-01-13 13:29:48 -070095
96static unsigned int run_order = 32; /* 4G */
97module_param(run_order, uint, 0644);
Gary R Hook94fc7952017-05-04 11:36:52 -050098MODULE_PARM_DESC(run_order, "size order [2^n] of total data to transfer");
Dave Jiang8a7b6a72016-01-13 13:29:48 -070099
100static bool use_dma; /* default to 0 */
101module_param(use_dma, bool, 0644);
102MODULE_PARM_DESC(use_dma, "Using DMA engine to measure performance");
103
104struct perf_mw {
105 phys_addr_t phys_addr;
106 resource_size_t phys_size;
107 resource_size_t xlat_align;
108 resource_size_t xlat_align_size;
109 void __iomem *vbase;
110 size_t xlat_size;
111 size_t buf_size;
112 void *virt_addr;
113 dma_addr_t dma_addr;
114};
115
116struct perf_ctx;
117
118struct pthr_ctx {
119 struct task_struct *thread;
120 struct perf_ctx *perf;
121 atomic_t dma_sync;
122 struct dma_chan *dma_chan;
123 int dma_prep_err;
124 int src_idx;
125 void *srcs[MAX_SRCS];
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600126 wait_queue_head_t *wq;
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600127 int status;
128 u64 copied;
129 u64 diff_us;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700130};
131
132struct perf_ctx {
133 struct ntb_dev *ntb;
134 spinlock_t db_lock;
135 struct perf_mw mw;
136 bool link_is_up;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700137 struct delayed_work link_work;
Logan Gunthorpe26dc6382016-06-20 13:15:07 -0600138 wait_queue_head_t link_wq;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700139 struct dentry *debugfs_node_dir;
140 struct dentry *debugfs_run;
141 struct dentry *debugfs_threads;
142 u8 perf_threads;
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600143 /* mutex ensures only one set of threads run at once */
144 struct mutex run_mutex;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700145 struct pthr_ctx pthr_ctx[MAX_THREADS];
146 atomic_t tsync;
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600147 atomic_t tdone;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700148};
149
150enum {
151 VERSION = 0,
152 MW_SZ_HIGH,
153 MW_SZ_LOW,
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700154 MAX_SPAD
155};
156
157static void perf_link_event(void *ctx)
158{
159 struct perf_ctx *perf = ctx;
160
Logan Gunthorpe35539b52016-06-20 13:15:13 -0600161 if (ntb_link_is_up(perf->ntb, NULL, NULL) == 1) {
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700162 schedule_delayed_work(&perf->link_work, 2*HZ);
Logan Gunthorpe35539b52016-06-20 13:15:13 -0600163 } else {
164 dev_dbg(&perf->ntb->pdev->dev, "link down\n");
165
166 if (!perf->link_is_up)
167 cancel_delayed_work_sync(&perf->link_work);
168
169 perf->link_is_up = false;
170 }
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700171}
172
173static void perf_db_event(void *ctx, int vec)
174{
175 struct perf_ctx *perf = ctx;
176 u64 db_bits, db_mask;
177
178 db_mask = ntb_db_vector_mask(perf->ntb, vec);
179 db_bits = ntb_db_read(perf->ntb);
180
181 dev_dbg(&perf->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n",
182 vec, db_mask, db_bits);
183}
184
185static const struct ntb_ctx_ops perf_ops = {
186 .link_event = perf_link_event,
187 .db_event = perf_db_event,
188};
189
190static void perf_copy_callback(void *data)
191{
192 struct pthr_ctx *pctx = data;
193
194 atomic_dec(&pctx->dma_sync);
195}
196
Arnd Bergmann1985a882016-01-26 10:31:45 +0100197static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700198 char *src, size_t size)
199{
200 struct perf_ctx *perf = pctx->perf;
201 struct dma_async_tx_descriptor *txd;
202 struct dma_chan *chan = pctx->dma_chan;
203 struct dma_device *device;
204 struct dmaengine_unmap_data *unmap;
205 dma_cookie_t cookie;
206 size_t src_off, dst_off;
207 struct perf_mw *mw = &perf->mw;
Arnd Bergmann1985a882016-01-26 10:31:45 +0100208 void __iomem *vbase;
209 void __iomem *dst_vaddr;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700210 dma_addr_t dst_phys;
211 int retries = 0;
212
213 if (!use_dma) {
214 memcpy_toio(dst, src, size);
215 return size;
216 }
217
218 if (!chan) {
219 dev_err(&perf->ntb->dev, "DMA engine does not exist\n");
220 return -EINVAL;
221 }
222
223 device = chan->device;
Arnd Bergmann1985a882016-01-26 10:31:45 +0100224 src_off = (uintptr_t)src & ~PAGE_MASK;
225 dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700226
227 if (!is_dma_copy_aligned(device, src_off, dst_off, size))
228 return -ENODEV;
229
Arnd Bergmann1985a882016-01-26 10:31:45 +0100230 vbase = mw->vbase;
231 dst_vaddr = dst;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700232 dst_phys = mw->phys_addr + (dst_vaddr - vbase);
233
234 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
235 if (!unmap)
236 return -ENOMEM;
237
238 unmap->len = size;
239 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(src),
240 src_off, size, DMA_TO_DEVICE);
241 if (dma_mapping_error(device->dev, unmap->addr[0]))
242 goto err_get_unmap;
243
244 unmap->to_cnt = 1;
245
246 do {
247 txd = device->device_prep_dma_memcpy(chan, dst_phys,
248 unmap->addr[0],
249 size, DMA_PREP_INTERRUPT);
250 if (!txd) {
251 set_current_state(TASK_INTERRUPTIBLE);
252 schedule_timeout(DMA_OUT_RESOURCE_TO);
253 }
254 } while (!txd && (++retries < DMA_RETRIES));
255
256 if (!txd) {
257 pctx->dma_prep_err++;
258 goto err_get_unmap;
259 }
260
261 txd->callback = perf_copy_callback;
262 txd->callback_param = pctx;
263 dma_set_unmap(txd, unmap);
264
265 cookie = dmaengine_submit(txd);
266 if (dma_submit_error(cookie))
267 goto err_set_unmap;
268
Dave Jiang96443472017-01-30 14:21:17 -0700269 dmaengine_unmap_put(unmap);
270
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700271 atomic_inc(&pctx->dma_sync);
272 dma_async_issue_pending(chan);
273
274 return size;
275
276err_set_unmap:
277 dmaengine_unmap_put(unmap);
278err_get_unmap:
279 dmaengine_unmap_put(unmap);
280 return 0;
281}
282
Arnd Bergmann1985a882016-01-26 10:31:45 +0100283static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700284 u64 buf_size, u64 win_size, u64 total)
285{
286 int chunks, total_chunks, i;
287 int copied_chunks = 0;
288 u64 copied = 0, result;
Arnd Bergmann1985a882016-01-26 10:31:45 +0100289 char __iomem *tmp = dst;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700290 u64 perf, diff_us;
291 ktime_t kstart, kstop, kdiff;
Logan Gunthorpefd2ecd82016-06-20 13:15:04 -0600292 unsigned long last_sleep = jiffies;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700293
294 chunks = div64_u64(win_size, buf_size);
295 total_chunks = div64_u64(total, buf_size);
296 kstart = ktime_get();
297
298 for (i = 0; i < total_chunks; i++) {
299 result = perf_copy(pctx, tmp, src, buf_size);
300 copied += result;
301 copied_chunks++;
302 if (copied_chunks == chunks) {
303 tmp = dst;
304 copied_chunks = 0;
305 } else
306 tmp += buf_size;
307
Logan Gunthorpefd2ecd82016-06-20 13:15:04 -0600308 /* Probably should schedule every 5s to prevent soft hang. */
309 if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
310 last_sleep = jiffies;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700311 set_current_state(TASK_INTERRUPTIBLE);
312 schedule_timeout(1);
313 }
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600314
315 if (unlikely(kthread_should_stop()))
316 break;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700317 }
318
319 if (use_dma) {
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600320 pr_debug("%s: All DMA descriptors submitted\n", current->comm);
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600321 while (atomic_read(&pctx->dma_sync) != 0) {
322 if (kthread_should_stop())
323 break;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700324 msleep(20);
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600325 }
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700326 }
327
328 kstop = ktime_get();
329 kdiff = ktime_sub(kstop, kstart);
330 diff_us = ktime_to_us(kdiff);
331
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600332 pr_debug("%s: copied %llu bytes\n", current->comm, copied);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700333
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600334 pr_debug("%s: lasted %llu usecs\n", current->comm, diff_us);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700335
336 perf = div64_u64(copied, diff_us);
337
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600338 pr_debug("%s: MBytes/s: %llu\n", current->comm, perf);
339
340 pctx->copied = copied;
341 pctx->diff_us = diff_us;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700342
343 return 0;
344}
345
346static bool perf_dma_filter_fn(struct dma_chan *chan, void *node)
347{
348 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
349}
350
351static int ntb_perf_thread(void *data)
352{
353 struct pthr_ctx *pctx = data;
354 struct perf_ctx *perf = pctx->perf;
355 struct pci_dev *pdev = perf->ntb->pdev;
356 struct perf_mw *mw = &perf->mw;
Arnd Bergmann1985a882016-01-26 10:31:45 +0100357 char __iomem *dst;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700358 u64 win_size, buf_size, total;
359 void *src;
360 int rc, node, i;
361 struct dma_chan *dma_chan = NULL;
362
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600363 pr_debug("kthread %s starting...\n", current->comm);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700364
365 node = dev_to_node(&pdev->dev);
366
367 if (use_dma && !pctx->dma_chan) {
368 dma_cap_mask_t dma_mask;
369
370 dma_cap_zero(dma_mask);
371 dma_cap_set(DMA_MEMCPY, dma_mask);
372 dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
373 (void *)(unsigned long)node);
374 if (!dma_chan) {
375 pr_warn("%s: cannot acquire DMA channel, quitting\n",
376 current->comm);
377 return -ENODEV;
378 }
379 pctx->dma_chan = dma_chan;
380 }
381
382 for (i = 0; i < MAX_SRCS; i++) {
383 pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
384 if (!pctx->srcs[i]) {
385 rc = -ENOMEM;
386 goto err;
387 }
388 }
389
390 win_size = mw->phys_size;
391 buf_size = 1ULL << seg_order;
392 total = 1ULL << run_order;
393
394 if (buf_size > MAX_TEST_SIZE)
395 buf_size = MAX_TEST_SIZE;
396
Arnd Bergmann1985a882016-01-26 10:31:45 +0100397 dst = (char __iomem *)mw->vbase;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700398
399 atomic_inc(&perf->tsync);
400 while (atomic_read(&perf->tsync) != perf->perf_threads)
401 schedule();
402
403 src = pctx->srcs[pctx->src_idx];
404 pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);
405
406 rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);
407
408 atomic_dec(&perf->tsync);
409
410 if (rc < 0) {
411 pr_err("%s: failed\n", current->comm);
412 rc = -ENXIO;
413 goto err;
414 }
415
416 for (i = 0; i < MAX_SRCS; i++) {
417 kfree(pctx->srcs[i]);
418 pctx->srcs[i] = NULL;
419 }
420
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600421 atomic_inc(&perf->tdone);
422 wake_up(pctx->wq);
423 rc = 0;
424 goto done;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700425
426err:
427 for (i = 0; i < MAX_SRCS; i++) {
428 kfree(pctx->srcs[i]);
429 pctx->srcs[i] = NULL;
430 }
431
432 if (dma_chan) {
433 dma_release_channel(dma_chan);
434 pctx->dma_chan = NULL;
435 }
436
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600437done:
438 /* Wait until we are told to stop */
439 for (;;) {
440 set_current_state(TASK_INTERRUPTIBLE);
441 if (kthread_should_stop())
442 break;
443 schedule();
444 }
445 __set_current_state(TASK_RUNNING);
446
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700447 return rc;
448}
449
450static void perf_free_mw(struct perf_ctx *perf)
451{
452 struct perf_mw *mw = &perf->mw;
453 struct pci_dev *pdev = perf->ntb->pdev;
454
455 if (!mw->virt_addr)
456 return;
457
Serge Semin443b9a12017-01-11 03:11:33 +0300458 ntb_mw_clear_trans(perf->ntb, PIDX, 0);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700459 dma_free_coherent(&pdev->dev, mw->buf_size,
460 mw->virt_addr, mw->dma_addr);
461 mw->xlat_size = 0;
462 mw->buf_size = 0;
463 mw->virt_addr = NULL;
464}
465
466static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
467{
468 struct perf_mw *mw = &perf->mw;
469 size_t xlat_size, buf_size;
Dave Jiangee5f7502016-03-07 15:57:25 -0700470 int rc;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700471
472 if (!size)
473 return -EINVAL;
474
475 xlat_size = round_up(size, mw->xlat_align_size);
476 buf_size = round_up(size, mw->xlat_align);
477
478 if (mw->xlat_size == xlat_size)
479 return 0;
480
481 if (mw->buf_size)
482 perf_free_mw(perf);
483
484 mw->xlat_size = xlat_size;
485 mw->buf_size = buf_size;
486
487 mw->virt_addr = dma_alloc_coherent(&perf->ntb->pdev->dev, buf_size,
488 &mw->dma_addr, GFP_KERNEL);
489 if (!mw->virt_addr) {
490 mw->xlat_size = 0;
491 mw->buf_size = 0;
492 }
493
Serge Semin443b9a12017-01-11 03:11:33 +0300494 rc = ntb_mw_set_trans(perf->ntb, PIDX, 0, mw->dma_addr, mw->xlat_size);
Dave Jiangee5f7502016-03-07 15:57:25 -0700495 if (rc) {
496 dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n");
497 perf_free_mw(perf);
498 return -EIO;
499 }
500
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700501 return 0;
502}
503
504static void perf_link_work(struct work_struct *work)
505{
506 struct perf_ctx *perf =
507 container_of(work, struct perf_ctx, link_work.work);
508 struct ntb_dev *ndev = perf->ntb;
509 struct pci_dev *pdev = ndev->pdev;
510 u32 val;
511 u64 size;
512 int rc;
513
514 dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
515
516 size = perf->mw.phys_size;
Logan Gunthorpe4aae9772016-06-03 14:50:31 -0600517
518 if (max_mw_size && size > max_mw_size)
519 size = max_mw_size;
520
Serge Semind67288a2017-01-11 03:13:20 +0300521 ntb_peer_spad_write(ndev, PIDX, MW_SZ_HIGH, upper_32_bits(size));
522 ntb_peer_spad_write(ndev, PIDX, MW_SZ_LOW, lower_32_bits(size));
523 ntb_peer_spad_write(ndev, PIDX, VERSION, PERF_VERSION);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700524
525 /* now read what peer wrote */
526 val = ntb_spad_read(ndev, VERSION);
527 if (val != PERF_VERSION) {
528 dev_dbg(&pdev->dev, "Remote version = %#x\n", val);
529 goto out;
530 }
531
532 val = ntb_spad_read(ndev, MW_SZ_HIGH);
533 size = (u64)val << 32;
534
535 val = ntb_spad_read(ndev, MW_SZ_LOW);
536 size |= val;
537
538 dev_dbg(&pdev->dev, "Remote MW size = %#llx\n", size);
539
540 rc = perf_set_mw(perf, size);
541 if (rc)
542 goto out1;
543
544 perf->link_is_up = true;
Logan Gunthorpe26dc6382016-06-20 13:15:07 -0600545 wake_up(&perf->link_wq);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700546
547 return;
548
549out1:
550 perf_free_mw(perf);
551
552out:
553 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
554 schedule_delayed_work(&perf->link_work,
555 msecs_to_jiffies(PERF_LINK_DOWN_TIMEOUT));
556}
557
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700558static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
559{
560 struct perf_mw *mw;
561 int rc;
562
563 mw = &perf->mw;
564
Serge Semin443b9a12017-01-11 03:11:33 +0300565 rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align,
566 &mw->xlat_align_size, NULL);
567 if (rc)
568 return rc;
569
570 rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700571 if (rc)
572 return rc;
573
574 perf->mw.vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
575 if (!mw->vbase)
576 return -ENOMEM;
577
578 return 0;
579}
580
581static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
582 size_t count, loff_t *offp)
583{
584 struct perf_ctx *perf = filp->private_data;
585 char *buf;
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600586 ssize_t ret, out_off = 0;
587 struct pthr_ctx *pctx;
588 int i;
589 u64 rate;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700590
591 if (!perf)
592 return 0;
593
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600594 buf = kmalloc(1024, GFP_KERNEL);
Sudip Mukherjee2572c7f2016-03-10 17:51:11 +0530595 if (!buf)
596 return -ENOMEM;
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600597
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600598 if (mutex_is_locked(&perf->run_mutex)) {
Dan Carpenter819baf82016-10-14 10:34:18 +0300599 out_off = scnprintf(buf, 64, "running\n");
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600600 goto read_from_buf;
601 }
602
603 for (i = 0; i < MAX_THREADS; i++) {
604 pctx = &perf->pthr_ctx[i];
605
606 if (pctx->status == -ENODATA)
607 break;
608
609 if (pctx->status) {
Dan Carpenter819baf82016-10-14 10:34:18 +0300610 out_off += scnprintf(buf + out_off, 1024 - out_off,
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600611 "%d: error %d\n", i,
612 pctx->status);
613 continue;
614 }
615
616 rate = div64_u64(pctx->copied, pctx->diff_us);
Dan Carpenter819baf82016-10-14 10:34:18 +0300617 out_off += scnprintf(buf + out_off, 1024 - out_off,
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600618 "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
619 i, pctx->copied, pctx->diff_us, rate);
620 }
621
622read_from_buf:
623 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_off);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700624 kfree(buf);
625
626 return ret;
627}
628
Dave Jiang838850e2016-03-18 16:39:47 -0700629static void threads_cleanup(struct perf_ctx *perf)
630{
631 struct pthr_ctx *pctx;
632 int i;
633
Dave Jiang838850e2016-03-18 16:39:47 -0700634 for (i = 0; i < MAX_THREADS; i++) {
635 pctx = &perf->pthr_ctx[i];
636 if (pctx->thread) {
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600637 pctx->status = kthread_stop(pctx->thread);
Dave Jiang838850e2016-03-18 16:39:47 -0700638 pctx->thread = NULL;
639 }
640 }
641}
642
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600643static void perf_clear_thread_status(struct perf_ctx *perf)
644{
645 int i;
646
647 for (i = 0; i < MAX_THREADS; i++)
648 perf->pthr_ctx[i].status = -ENODATA;
649}
650
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700651static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
652 size_t count, loff_t *offp)
653{
654 struct perf_ctx *perf = filp->private_data;
655 int node, i;
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600656 DECLARE_WAIT_QUEUE_HEAD(wq);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700657
Logan Gunthorpe26dc6382016-06-20 13:15:07 -0600658 if (wait_event_interruptible(perf->link_wq, perf->link_is_up))
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600659 return -ENOLINK;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700660
661 if (perf->perf_threads == 0)
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600662 return -EINVAL;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700663
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600664 if (!mutex_trylock(&perf->run_mutex))
665 return -EBUSY;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700666
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600667 perf_clear_thread_status(perf);
668
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600669 if (perf->perf_threads > MAX_THREADS) {
670 perf->perf_threads = MAX_THREADS;
671 pr_info("Reset total threads to: %u\n", MAX_THREADS);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700672 }
673
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600674 /* no greater than 1M */
675 if (seg_order > MAX_SEG_ORDER) {
676 seg_order = MAX_SEG_ORDER;
677 pr_info("Fix seg_order to %u\n", seg_order);
678 }
679
680 if (run_order < seg_order) {
681 run_order = seg_order;
682 pr_info("Fix run_order to %u\n", run_order);
683 }
684
685 node = dev_to_node(&perf->ntb->pdev->dev);
686 atomic_set(&perf->tdone, 0);
687
688 /* launch kernel thread */
689 for (i = 0; i < perf->perf_threads; i++) {
690 struct pthr_ctx *pctx;
691
692 pctx = &perf->pthr_ctx[i];
693 atomic_set(&pctx->dma_sync, 0);
694 pctx->perf = perf;
695 pctx->wq = &wq;
696 pctx->thread =
697 kthread_create_on_node(ntb_perf_thread,
698 (void *)pctx,
699 node, "ntb_perf %d", i);
700 if (IS_ERR(pctx->thread)) {
701 pctx->thread = NULL;
702 goto err;
703 } else {
704 wake_up_process(pctx->thread);
705 }
706 }
707
708 wait_event_interruptible(wq,
709 atomic_read(&perf->tdone) == perf->perf_threads);
710
711 threads_cleanup(perf);
712 mutex_unlock(&perf->run_mutex);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700713 return count;
Dave Jiang838850e2016-03-18 16:39:47 -0700714
715err:
716 threads_cleanup(perf);
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600717 mutex_unlock(&perf->run_mutex);
Dave Jiang838850e2016-03-18 16:39:47 -0700718 return -ENXIO;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700719}
720
721static const struct file_operations ntb_perf_debugfs_run = {
722 .owner = THIS_MODULE,
723 .open = simple_open,
724 .read = debugfs_run_read,
725 .write = debugfs_run_write,
726};
727
728static int perf_debugfs_setup(struct perf_ctx *perf)
729{
730 struct pci_dev *pdev = perf->ntb->pdev;
731
732 if (!debugfs_initialized())
733 return -ENODEV;
734
735 if (!perf_debugfs_dir) {
736 perf_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
737 if (!perf_debugfs_dir)
738 return -ENODEV;
739 }
740
741 perf->debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
742 perf_debugfs_dir);
743 if (!perf->debugfs_node_dir)
744 return -ENODEV;
745
746 perf->debugfs_run = debugfs_create_file("run", S_IRUSR | S_IWUSR,
747 perf->debugfs_node_dir, perf,
748 &ntb_perf_debugfs_run);
749 if (!perf->debugfs_run)
750 return -ENODEV;
751
752 perf->debugfs_threads = debugfs_create_u8("threads", S_IRUSR | S_IWUSR,
753 perf->debugfs_node_dir,
754 &perf->perf_threads);
755 if (!perf->debugfs_threads)
756 return -ENODEV;
757
758 return 0;
759}
760
761static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
762{
763 struct pci_dev *pdev = ntb->pdev;
764 struct perf_ctx *perf;
765 int node;
766 int rc = 0;
767
Logan Gunthorpe19645a02016-06-07 11:20:22 -0600768 if (ntb_spad_count(ntb) < MAX_SPAD) {
769 dev_err(&ntb->dev, "Not enough scratch pad registers for %s",
770 DRIVER_NAME);
771 return -EIO;
772 }
773
Serge Semin443b9a12017-01-11 03:11:33 +0300774 if (!ntb->ops->mw_set_trans) {
775 dev_err(&ntb->dev, "Need inbound MW based NTB API\n");
776 return -EINVAL;
777 }
778
Serge Semin1e530112016-12-14 02:49:14 +0300779 if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT)
780 dev_warn(&ntb->dev, "Multi-port NTB devices unsupported\n");
781
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700782 node = dev_to_node(&pdev->dev);
783
784 perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
785 if (!perf) {
786 rc = -ENOMEM;
787 goto err_perf;
788 }
789
790 perf->ntb = ntb;
791 perf->perf_threads = 1;
792 atomic_set(&perf->tsync, 0);
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600793 mutex_init(&perf->run_mutex);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700794 spin_lock_init(&perf->db_lock);
795 perf_setup_mw(ntb, perf);
Logan Gunthorpe26dc6382016-06-20 13:15:07 -0600796 init_waitqueue_head(&perf->link_wq);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700797 INIT_DELAYED_WORK(&perf->link_work, perf_link_work);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700798
799 rc = ntb_set_ctx(ntb, perf, &perf_ops);
800 if (rc)
801 goto err_ctx;
802
803 perf->link_is_up = false;
804 ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
805 ntb_link_event(ntb);
806
807 rc = perf_debugfs_setup(perf);
808 if (rc)
809 goto err_ctx;
810
Logan Gunthorpe58fd0f32016-06-20 13:15:06 -0600811 perf_clear_thread_status(perf);
812
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700813 return 0;
814
815err_ctx:
816 cancel_delayed_work_sync(&perf->link_work);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700817 kfree(perf);
818err_perf:
819 return rc;
820}
821
822static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
823{
824 struct perf_ctx *perf = ntb->ctx;
825 int i;
826
827 dev_dbg(&perf->ntb->dev, "%s called\n", __func__);
828
Logan Gunthorpeda573ea2016-06-20 13:15:05 -0600829 mutex_lock(&perf->run_mutex);
830
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700831 cancel_delayed_work_sync(&perf->link_work);
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700832
833 ntb_clear_ctx(ntb);
834 ntb_link_disable(ntb);
835
836 debugfs_remove_recursive(perf_debugfs_dir);
837 perf_debugfs_dir = NULL;
838
839 if (use_dma) {
840 for (i = 0; i < MAX_THREADS; i++) {
841 struct pthr_ctx *pctx = &perf->pthr_ctx[i];
842
843 if (pctx->dma_chan)
844 dma_release_channel(pctx->dma_chan);
845 }
846 }
847
848 kfree(perf);
849}
850
851static struct ntb_client perf_client = {
852 .ops = {
853 .probe = perf_probe,
854 .remove = perf_remove,
855 },
856};
857module_ntb_client(perf_client);