blob: f798beb01ac638e01a64027bae4ad4e7263f9c04 [file] [log] [blame]
Dave Jiang8a7b6a72016-01-13 13:29:48 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2015 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * BSD LICENSE
14 *
15 * Copyright(c) 2015 Intel Corporation. All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 *
21 * * Redistributions of source code must retain the above copyright
22 * notice, this list of conditions and the following disclaimer.
23 * * Redistributions in binary form must reproduce the above copy
24 * notice, this list of conditions and the following disclaimer in
25 * the documentation and/or other materials provided with the
26 * distribution.
27 * * Neither the name of Intel Corporation nor the names of its
28 * contributors may be used to endorse or promote products derived
29 * from this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 *
43 * PCIe NTB Perf Linux driver
44 */
45
46#include <linux/init.h>
47#include <linux/kernel.h>
48#include <linux/module.h>
49#include <linux/kthread.h>
50#include <linux/time.h>
51#include <linux/timer.h>
52#include <linux/dma-mapping.h>
53#include <linux/pci.h>
54#include <linux/slab.h>
55#include <linux/spinlock.h>
56#include <linux/debugfs.h>
57#include <linux/dmaengine.h>
58#include <linux/delay.h>
59#include <linux/sizes.h>
60#include <linux/ntb.h>
61
62#define DRIVER_NAME "ntb_perf"
63#define DRIVER_DESCRIPTION "PCIe NTB Performance Measurement Tool"
64
65#define DRIVER_LICENSE "Dual BSD/GPL"
66#define DRIVER_VERSION "1.0"
67#define DRIVER_AUTHOR "Dave Jiang <dave.jiang@intel.com>"
68
69#define PERF_LINK_DOWN_TIMEOUT 10
70#define PERF_VERSION 0xffff0001
71#define MAX_THREADS 32
72#define MAX_TEST_SIZE SZ_1M
73#define MAX_SRCS 32
74#define DMA_OUT_RESOURCE_TO 50
75#define DMA_RETRIES 20
76#define SZ_4G (1ULL << 32)
77#define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */
78
79MODULE_LICENSE(DRIVER_LICENSE);
80MODULE_VERSION(DRIVER_VERSION);
81MODULE_AUTHOR(DRIVER_AUTHOR);
82MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
83
84static struct dentry *perf_debugfs_dir;
85
86static unsigned int seg_order = 19; /* 512K */
87module_param(seg_order, uint, 0644);
88MODULE_PARM_DESC(seg_order, "size order [n^2] of buffer segment for testing");
89
90static unsigned int run_order = 32; /* 4G */
91module_param(run_order, uint, 0644);
92MODULE_PARM_DESC(run_order, "size order [n^2] of total data to transfer");
93
94static bool use_dma; /* default to 0 */
95module_param(use_dma, bool, 0644);
96MODULE_PARM_DESC(use_dma, "Using DMA engine to measure performance");
97
98struct perf_mw {
99 phys_addr_t phys_addr;
100 resource_size_t phys_size;
101 resource_size_t xlat_align;
102 resource_size_t xlat_align_size;
103 void __iomem *vbase;
104 size_t xlat_size;
105 size_t buf_size;
106 void *virt_addr;
107 dma_addr_t dma_addr;
108};
109
110struct perf_ctx;
111
112struct pthr_ctx {
113 struct task_struct *thread;
114 struct perf_ctx *perf;
115 atomic_t dma_sync;
116 struct dma_chan *dma_chan;
117 int dma_prep_err;
118 int src_idx;
119 void *srcs[MAX_SRCS];
120};
121
122struct perf_ctx {
123 struct ntb_dev *ntb;
124 spinlock_t db_lock;
125 struct perf_mw mw;
126 bool link_is_up;
127 struct work_struct link_cleanup;
128 struct delayed_work link_work;
129 struct dentry *debugfs_node_dir;
130 struct dentry *debugfs_run;
131 struct dentry *debugfs_threads;
132 u8 perf_threads;
133 bool run;
134 struct pthr_ctx pthr_ctx[MAX_THREADS];
135 atomic_t tsync;
136};
137
138enum {
139 VERSION = 0,
140 MW_SZ_HIGH,
141 MW_SZ_LOW,
142 SPAD_MSG,
143 SPAD_ACK,
144 MAX_SPAD
145};
146
147static void perf_link_event(void *ctx)
148{
149 struct perf_ctx *perf = ctx;
150
151 if (ntb_link_is_up(perf->ntb, NULL, NULL) == 1)
152 schedule_delayed_work(&perf->link_work, 2*HZ);
153 else
154 schedule_work(&perf->link_cleanup);
155}
156
157static void perf_db_event(void *ctx, int vec)
158{
159 struct perf_ctx *perf = ctx;
160 u64 db_bits, db_mask;
161
162 db_mask = ntb_db_vector_mask(perf->ntb, vec);
163 db_bits = ntb_db_read(perf->ntb);
164
165 dev_dbg(&perf->ntb->dev, "doorbell vec %d mask %#llx bits %#llx\n",
166 vec, db_mask, db_bits);
167}
168
169static const struct ntb_ctx_ops perf_ops = {
170 .link_event = perf_link_event,
171 .db_event = perf_db_event,
172};
173
174static void perf_copy_callback(void *data)
175{
176 struct pthr_ctx *pctx = data;
177
178 atomic_dec(&pctx->dma_sync);
179}
180
Arnd Bergmann1985a882016-01-26 10:31:45 +0100181static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700182 char *src, size_t size)
183{
184 struct perf_ctx *perf = pctx->perf;
185 struct dma_async_tx_descriptor *txd;
186 struct dma_chan *chan = pctx->dma_chan;
187 struct dma_device *device;
188 struct dmaengine_unmap_data *unmap;
189 dma_cookie_t cookie;
190 size_t src_off, dst_off;
191 struct perf_mw *mw = &perf->mw;
Arnd Bergmann1985a882016-01-26 10:31:45 +0100192 void __iomem *vbase;
193 void __iomem *dst_vaddr;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700194 dma_addr_t dst_phys;
195 int retries = 0;
196
197 if (!use_dma) {
198 memcpy_toio(dst, src, size);
199 return size;
200 }
201
202 if (!chan) {
203 dev_err(&perf->ntb->dev, "DMA engine does not exist\n");
204 return -EINVAL;
205 }
206
207 device = chan->device;
Arnd Bergmann1985a882016-01-26 10:31:45 +0100208 src_off = (uintptr_t)src & ~PAGE_MASK;
209 dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700210
211 if (!is_dma_copy_aligned(device, src_off, dst_off, size))
212 return -ENODEV;
213
Arnd Bergmann1985a882016-01-26 10:31:45 +0100214 vbase = mw->vbase;
215 dst_vaddr = dst;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700216 dst_phys = mw->phys_addr + (dst_vaddr - vbase);
217
218 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
219 if (!unmap)
220 return -ENOMEM;
221
222 unmap->len = size;
223 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(src),
224 src_off, size, DMA_TO_DEVICE);
225 if (dma_mapping_error(device->dev, unmap->addr[0]))
226 goto err_get_unmap;
227
228 unmap->to_cnt = 1;
229
230 do {
231 txd = device->device_prep_dma_memcpy(chan, dst_phys,
232 unmap->addr[0],
233 size, DMA_PREP_INTERRUPT);
234 if (!txd) {
235 set_current_state(TASK_INTERRUPTIBLE);
236 schedule_timeout(DMA_OUT_RESOURCE_TO);
237 }
238 } while (!txd && (++retries < DMA_RETRIES));
239
240 if (!txd) {
241 pctx->dma_prep_err++;
242 goto err_get_unmap;
243 }
244
245 txd->callback = perf_copy_callback;
246 txd->callback_param = pctx;
247 dma_set_unmap(txd, unmap);
248
249 cookie = dmaengine_submit(txd);
250 if (dma_submit_error(cookie))
251 goto err_set_unmap;
252
253 atomic_inc(&pctx->dma_sync);
254 dma_async_issue_pending(chan);
255
256 return size;
257
258err_set_unmap:
259 dmaengine_unmap_put(unmap);
260err_get_unmap:
261 dmaengine_unmap_put(unmap);
262 return 0;
263}
264
Arnd Bergmann1985a882016-01-26 10:31:45 +0100265static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700266 u64 buf_size, u64 win_size, u64 total)
267{
268 int chunks, total_chunks, i;
269 int copied_chunks = 0;
270 u64 copied = 0, result;
Arnd Bergmann1985a882016-01-26 10:31:45 +0100271 char __iomem *tmp = dst;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700272 u64 perf, diff_us;
273 ktime_t kstart, kstop, kdiff;
274
275 chunks = div64_u64(win_size, buf_size);
276 total_chunks = div64_u64(total, buf_size);
277 kstart = ktime_get();
278
279 for (i = 0; i < total_chunks; i++) {
280 result = perf_copy(pctx, tmp, src, buf_size);
281 copied += result;
282 copied_chunks++;
283 if (copied_chunks == chunks) {
284 tmp = dst;
285 copied_chunks = 0;
286 } else
287 tmp += buf_size;
288
289 /* Probably should schedule every 4GB to prevent soft hang. */
290 if (((copied % SZ_4G) == 0) && !use_dma) {
291 set_current_state(TASK_INTERRUPTIBLE);
292 schedule_timeout(1);
293 }
294 }
295
296 if (use_dma) {
297 pr_info("%s: All DMA descriptors submitted\n", current->comm);
298 while (atomic_read(&pctx->dma_sync) != 0)
299 msleep(20);
300 }
301
302 kstop = ktime_get();
303 kdiff = ktime_sub(kstop, kstart);
304 diff_us = ktime_to_us(kdiff);
305
306 pr_info("%s: copied %llu bytes\n", current->comm, copied);
307
308 pr_info("%s: lasted %llu usecs\n", current->comm, diff_us);
309
310 perf = div64_u64(copied, diff_us);
311
312 pr_info("%s: MBytes/s: %llu\n", current->comm, perf);
313
314 return 0;
315}
316
317static bool perf_dma_filter_fn(struct dma_chan *chan, void *node)
318{
319 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
320}
321
322static int ntb_perf_thread(void *data)
323{
324 struct pthr_ctx *pctx = data;
325 struct perf_ctx *perf = pctx->perf;
326 struct pci_dev *pdev = perf->ntb->pdev;
327 struct perf_mw *mw = &perf->mw;
Arnd Bergmann1985a882016-01-26 10:31:45 +0100328 char __iomem *dst;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700329 u64 win_size, buf_size, total;
330 void *src;
331 int rc, node, i;
332 struct dma_chan *dma_chan = NULL;
333
334 pr_info("kthread %s starting...\n", current->comm);
335
336 node = dev_to_node(&pdev->dev);
337
338 if (use_dma && !pctx->dma_chan) {
339 dma_cap_mask_t dma_mask;
340
341 dma_cap_zero(dma_mask);
342 dma_cap_set(DMA_MEMCPY, dma_mask);
343 dma_chan = dma_request_channel(dma_mask, perf_dma_filter_fn,
344 (void *)(unsigned long)node);
345 if (!dma_chan) {
346 pr_warn("%s: cannot acquire DMA channel, quitting\n",
347 current->comm);
348 return -ENODEV;
349 }
350 pctx->dma_chan = dma_chan;
351 }
352
353 for (i = 0; i < MAX_SRCS; i++) {
354 pctx->srcs[i] = kmalloc_node(MAX_TEST_SIZE, GFP_KERNEL, node);
355 if (!pctx->srcs[i]) {
356 rc = -ENOMEM;
357 goto err;
358 }
359 }
360
361 win_size = mw->phys_size;
362 buf_size = 1ULL << seg_order;
363 total = 1ULL << run_order;
364
365 if (buf_size > MAX_TEST_SIZE)
366 buf_size = MAX_TEST_SIZE;
367
Arnd Bergmann1985a882016-01-26 10:31:45 +0100368 dst = (char __iomem *)mw->vbase;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700369
370 atomic_inc(&perf->tsync);
371 while (atomic_read(&perf->tsync) != perf->perf_threads)
372 schedule();
373
374 src = pctx->srcs[pctx->src_idx];
375 pctx->src_idx = (pctx->src_idx + 1) & (MAX_SRCS - 1);
376
377 rc = perf_move_data(pctx, dst, src, buf_size, win_size, total);
378
379 atomic_dec(&perf->tsync);
380
381 if (rc < 0) {
382 pr_err("%s: failed\n", current->comm);
383 rc = -ENXIO;
384 goto err;
385 }
386
387 for (i = 0; i < MAX_SRCS; i++) {
388 kfree(pctx->srcs[i]);
389 pctx->srcs[i] = NULL;
390 }
391
392 return 0;
393
394err:
395 for (i = 0; i < MAX_SRCS; i++) {
396 kfree(pctx->srcs[i]);
397 pctx->srcs[i] = NULL;
398 }
399
400 if (dma_chan) {
401 dma_release_channel(dma_chan);
402 pctx->dma_chan = NULL;
403 }
404
405 return rc;
406}
407
408static void perf_free_mw(struct perf_ctx *perf)
409{
410 struct perf_mw *mw = &perf->mw;
411 struct pci_dev *pdev = perf->ntb->pdev;
412
413 if (!mw->virt_addr)
414 return;
415
416 ntb_mw_clear_trans(perf->ntb, 0);
417 dma_free_coherent(&pdev->dev, mw->buf_size,
418 mw->virt_addr, mw->dma_addr);
419 mw->xlat_size = 0;
420 mw->buf_size = 0;
421 mw->virt_addr = NULL;
422}
423
424static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
425{
426 struct perf_mw *mw = &perf->mw;
427 size_t xlat_size, buf_size;
Dave Jiangee5f7502016-03-07 15:57:25 -0700428 int rc;
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700429
430 if (!size)
431 return -EINVAL;
432
433 xlat_size = round_up(size, mw->xlat_align_size);
434 buf_size = round_up(size, mw->xlat_align);
435
436 if (mw->xlat_size == xlat_size)
437 return 0;
438
439 if (mw->buf_size)
440 perf_free_mw(perf);
441
442 mw->xlat_size = xlat_size;
443 mw->buf_size = buf_size;
444
445 mw->virt_addr = dma_alloc_coherent(&perf->ntb->pdev->dev, buf_size,
446 &mw->dma_addr, GFP_KERNEL);
447 if (!mw->virt_addr) {
448 mw->xlat_size = 0;
449 mw->buf_size = 0;
450 }
451
Dave Jiangee5f7502016-03-07 15:57:25 -0700452 rc = ntb_mw_set_trans(perf->ntb, 0, mw->dma_addr, mw->xlat_size);
453 if (rc) {
454 dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n");
455 perf_free_mw(perf);
456 return -EIO;
457 }
458
Dave Jiang8a7b6a72016-01-13 13:29:48 -0700459 return 0;
460}
461
462static void perf_link_work(struct work_struct *work)
463{
464 struct perf_ctx *perf =
465 container_of(work, struct perf_ctx, link_work.work);
466 struct ntb_dev *ndev = perf->ntb;
467 struct pci_dev *pdev = ndev->pdev;
468 u32 val;
469 u64 size;
470 int rc;
471
472 dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
473
474 size = perf->mw.phys_size;
475 ntb_peer_spad_write(ndev, MW_SZ_HIGH, upper_32_bits(size));
476 ntb_peer_spad_write(ndev, MW_SZ_LOW, lower_32_bits(size));
477 ntb_peer_spad_write(ndev, VERSION, PERF_VERSION);
478
479 /* now read what peer wrote */
480 val = ntb_spad_read(ndev, VERSION);
481 if (val != PERF_VERSION) {
482 dev_dbg(&pdev->dev, "Remote version = %#x\n", val);
483 goto out;
484 }
485
486 val = ntb_spad_read(ndev, MW_SZ_HIGH);
487 size = (u64)val << 32;
488
489 val = ntb_spad_read(ndev, MW_SZ_LOW);
490 size |= val;
491
492 dev_dbg(&pdev->dev, "Remote MW size = %#llx\n", size);
493
494 rc = perf_set_mw(perf, size);
495 if (rc)
496 goto out1;
497
498 perf->link_is_up = true;
499
500 return;
501
502out1:
503 perf_free_mw(perf);
504
505out:
506 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
507 schedule_delayed_work(&perf->link_work,
508 msecs_to_jiffies(PERF_LINK_DOWN_TIMEOUT));
509}
510
511static void perf_link_cleanup(struct work_struct *work)
512{
513 struct perf_ctx *perf = container_of(work,
514 struct perf_ctx,
515 link_cleanup);
516
517 dev_dbg(&perf->ntb->pdev->dev, "%s called\n", __func__);
518
519 if (!perf->link_is_up)
520 cancel_delayed_work_sync(&perf->link_work);
521}
522
523static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf)
524{
525 struct perf_mw *mw;
526 int rc;
527
528 mw = &perf->mw;
529
530 rc = ntb_mw_get_range(ntb, 0, &mw->phys_addr, &mw->phys_size,
531 &mw->xlat_align, &mw->xlat_align_size);
532 if (rc)
533 return rc;
534
535 perf->mw.vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
536 if (!mw->vbase)
537 return -ENOMEM;
538
539 return 0;
540}
541
542static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
543 size_t count, loff_t *offp)
544{
545 struct perf_ctx *perf = filp->private_data;
546 char *buf;
547 ssize_t ret, out_offset;
548
549 if (!perf)
550 return 0;
551
552 buf = kmalloc(64, GFP_KERNEL);
553 out_offset = snprintf(buf, 64, "%d\n", perf->run);
554 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
555 kfree(buf);
556
557 return ret;
558}
559
560static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
561 size_t count, loff_t *offp)
562{
563 struct perf_ctx *perf = filp->private_data;
564 int node, i;
565
566 if (!perf->link_is_up)
567 return 0;
568
569 if (perf->perf_threads == 0)
570 return 0;
571
572 if (atomic_read(&perf->tsync) == 0)
573 perf->run = false;
574
575 if (perf->run) {
576 /* lets stop the threads */
577 perf->run = false;
578 for (i = 0; i < MAX_THREADS; i++) {
579 if (perf->pthr_ctx[i].thread) {
580 kthread_stop(perf->pthr_ctx[i].thread);
581 perf->pthr_ctx[i].thread = NULL;
582 } else
583 break;
584 }
585 } else {
586 perf->run = true;
587
588 if (perf->perf_threads > MAX_THREADS) {
589 perf->perf_threads = MAX_THREADS;
590 pr_info("Reset total threads to: %u\n", MAX_THREADS);
591 }
592
593 /* no greater than 1M */
594 if (seg_order > MAX_SEG_ORDER) {
595 seg_order = MAX_SEG_ORDER;
596 pr_info("Fix seg_order to %u\n", seg_order);
597 }
598
599 if (run_order < seg_order) {
600 run_order = seg_order;
601 pr_info("Fix run_order to %u\n", run_order);
602 }
603
604 node = dev_to_node(&perf->ntb->pdev->dev);
605 /* launch kernel thread */
606 for (i = 0; i < perf->perf_threads; i++) {
607 struct pthr_ctx *pctx;
608
609 pctx = &perf->pthr_ctx[i];
610 atomic_set(&pctx->dma_sync, 0);
611 pctx->perf = perf;
612 pctx->thread =
613 kthread_create_on_node(ntb_perf_thread,
614 (void *)pctx,
615 node, "ntb_perf %d", i);
616 if (pctx->thread)
617 wake_up_process(pctx->thread);
618 else {
619 perf->run = false;
620 for (i = 0; i < MAX_THREADS; i++) {
621 if (pctx->thread) {
622 kthread_stop(pctx->thread);
623 pctx->thread = NULL;
624 }
625 }
626 }
627
628 if (perf->run == false)
629 return -ENXIO;
630 }
631
632 }
633
634 return count;
635}
636
637static const struct file_operations ntb_perf_debugfs_run = {
638 .owner = THIS_MODULE,
639 .open = simple_open,
640 .read = debugfs_run_read,
641 .write = debugfs_run_write,
642};
643
644static int perf_debugfs_setup(struct perf_ctx *perf)
645{
646 struct pci_dev *pdev = perf->ntb->pdev;
647
648 if (!debugfs_initialized())
649 return -ENODEV;
650
651 if (!perf_debugfs_dir) {
652 perf_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
653 if (!perf_debugfs_dir)
654 return -ENODEV;
655 }
656
657 perf->debugfs_node_dir = debugfs_create_dir(pci_name(pdev),
658 perf_debugfs_dir);
659 if (!perf->debugfs_node_dir)
660 return -ENODEV;
661
662 perf->debugfs_run = debugfs_create_file("run", S_IRUSR | S_IWUSR,
663 perf->debugfs_node_dir, perf,
664 &ntb_perf_debugfs_run);
665 if (!perf->debugfs_run)
666 return -ENODEV;
667
668 perf->debugfs_threads = debugfs_create_u8("threads", S_IRUSR | S_IWUSR,
669 perf->debugfs_node_dir,
670 &perf->perf_threads);
671 if (!perf->debugfs_threads)
672 return -ENODEV;
673
674 return 0;
675}
676
677static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
678{
679 struct pci_dev *pdev = ntb->pdev;
680 struct perf_ctx *perf;
681 int node;
682 int rc = 0;
683
684 node = dev_to_node(&pdev->dev);
685
686 perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node);
687 if (!perf) {
688 rc = -ENOMEM;
689 goto err_perf;
690 }
691
692 perf->ntb = ntb;
693 perf->perf_threads = 1;
694 atomic_set(&perf->tsync, 0);
695 perf->run = false;
696 spin_lock_init(&perf->db_lock);
697 perf_setup_mw(ntb, perf);
698 INIT_DELAYED_WORK(&perf->link_work, perf_link_work);
699 INIT_WORK(&perf->link_cleanup, perf_link_cleanup);
700
701 rc = ntb_set_ctx(ntb, perf, &perf_ops);
702 if (rc)
703 goto err_ctx;
704
705 perf->link_is_up = false;
706 ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
707 ntb_link_event(ntb);
708
709 rc = perf_debugfs_setup(perf);
710 if (rc)
711 goto err_ctx;
712
713 return 0;
714
715err_ctx:
716 cancel_delayed_work_sync(&perf->link_work);
717 cancel_work_sync(&perf->link_cleanup);
718 kfree(perf);
719err_perf:
720 return rc;
721}
722
723static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
724{
725 struct perf_ctx *perf = ntb->ctx;
726 int i;
727
728 dev_dbg(&perf->ntb->dev, "%s called\n", __func__);
729
730 cancel_delayed_work_sync(&perf->link_work);
731 cancel_work_sync(&perf->link_cleanup);
732
733 ntb_clear_ctx(ntb);
734 ntb_link_disable(ntb);
735
736 debugfs_remove_recursive(perf_debugfs_dir);
737 perf_debugfs_dir = NULL;
738
739 if (use_dma) {
740 for (i = 0; i < MAX_THREADS; i++) {
741 struct pthr_ctx *pctx = &perf->pthr_ctx[i];
742
743 if (pctx->dma_chan)
744 dma_release_channel(pctx->dma_chan);
745 }
746 }
747
748 kfree(perf);
749}
750
751static struct ntb_client perf_client = {
752 .ops = {
753 .probe = perf_probe,
754 .remove = perf_remove,
755 },
756};
757module_ntb_client(perf_client);