blob: e61db11e6ccca26e5d44495f5fe080e582f79252 [file] [log] [blame]
Jon Masonfce8a7b2012-11-16 19:27:12 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -04008 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * BSD LICENSE
15 *
16 * Copyright(c) 2012 Intel Corporation. All rights reserved.
Allen Hubbee26a5842015-04-09 10:33:20 -040017 * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
Jon Masonfce8a7b2012-11-16 19:27:12 -070018 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 *
23 * * Redistributions of source code must retain the above copyright
24 * notice, this list of conditions and the following disclaimer.
25 * * Redistributions in binary form must reproduce the above copy
26 * notice, this list of conditions and the following disclaimer in
27 * the documentation and/or other materials provided with the
28 * distribution.
29 * * Neither the name of Intel Corporation nor the names of its
30 * contributors may be used to endorse or promote products derived
31 * from this software without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 *
Allen Hubbee26a5842015-04-09 10:33:20 -040045 * PCIe NTB Transport Linux driver
Jon Masonfce8a7b2012-11-16 19:27:12 -070046 *
47 * Contact Information:
48 * Jon Mason <jon.mason@intel.com>
49 */
50#include <linux/debugfs.h>
51#include <linux/delay.h>
Jon Mason282a2fe2013-02-12 09:52:50 -070052#include <linux/dmaengine.h>
Jon Masonfce8a7b2012-11-16 19:27:12 -070053#include <linux/dma-mapping.h>
54#include <linux/errno.h>
55#include <linux/export.h>
56#include <linux/interrupt.h>
57#include <linux/module.h>
58#include <linux/pci.h>
59#include <linux/slab.h>
60#include <linux/types.h>
Dave Jiang06917f72015-05-19 16:45:46 -040061#include <linux/uaccess.h>
Allen Hubbee26a5842015-04-09 10:33:20 -040062#include "linux/ntb.h"
63#include "linux/ntb_transport.h"
Jon Masonfce8a7b2012-11-16 19:27:12 -070064
Allen Hubbee26a5842015-04-09 10:33:20 -040065#define NTB_TRANSPORT_VERSION 4
66#define NTB_TRANSPORT_VER "4"
67#define NTB_TRANSPORT_NAME "ntb_transport"
68#define NTB_TRANSPORT_DESC "Software Queue-Pair Transport over NTB"
69
70MODULE_DESCRIPTION(NTB_TRANSPORT_DESC);
71MODULE_VERSION(NTB_TRANSPORT_VER);
72MODULE_LICENSE("Dual BSD/GPL");
73MODULE_AUTHOR("Intel Corporation");
74
75static unsigned long max_mw_size;
76module_param(max_mw_size, ulong, 0644);
77MODULE_PARM_DESC(max_mw_size, "Limit size of large memory windows");
Jon Masonfce8a7b2012-11-16 19:27:12 -070078
Dave Jiang98914172015-06-03 11:29:38 -040079static unsigned int transport_mtu = 0x10000;
Jon Masonfce8a7b2012-11-16 19:27:12 -070080module_param(transport_mtu, uint, 0644);
81MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
82
Jon Mason948d3a62013-04-18 17:07:36 -070083static unsigned char max_num_clients;
Jon Masonfce8a7b2012-11-16 19:27:12 -070084module_param(max_num_clients, byte, 0644);
85MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
86
Jon Mason282a2fe2013-02-12 09:52:50 -070087static unsigned int copy_bytes = 1024;
88module_param(copy_bytes, uint, 0644);
89MODULE_PARM_DESC(copy_bytes, "Threshold under which NTB will use the CPU to copy instead of DMA");
90
Dave Jianga41ef052015-05-19 16:52:04 -040091static bool use_dma;
92module_param(use_dma, bool, 0644);
93MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy");
94
Allen Hubbee26a5842015-04-09 10:33:20 -040095static struct dentry *nt_debugfs_dir;
96
Jon Masonfce8a7b2012-11-16 19:27:12 -070097struct ntb_queue_entry {
98 /* ntb_queue list reference */
99 struct list_head entry;
Allen Hubbee26a5842015-04-09 10:33:20 -0400100 /* pointers to data to be transferred */
Jon Masonfce8a7b2012-11-16 19:27:12 -0700101 void *cb_data;
102 void *buf;
103 unsigned int len;
104 unsigned int flags;
Dave Jiang9cabc262016-07-20 13:14:07 -0700105 int retries;
106 int errors;
107 unsigned int tx_index;
Jon Mason282a2fe2013-02-12 09:52:50 -0700108
109 struct ntb_transport_qp *qp;
110 union {
111 struct ntb_payload_header __iomem *tx_hdr;
112 struct ntb_payload_header *rx_hdr;
113 };
114 unsigned int index;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700115};
116
Jon Mason793c20e2013-01-19 02:02:26 -0700117struct ntb_rx_info {
118 unsigned int entry;
119};
120
Jon Masonfce8a7b2012-11-16 19:27:12 -0700121struct ntb_transport_qp {
Allen Hubbee26a5842015-04-09 10:33:20 -0400122 struct ntb_transport_ctx *transport;
123 struct ntb_dev *ndev;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700124 void *cb_data;
Dave Jiang569410ca2015-07-13 08:07:22 -0400125 struct dma_chan *tx_dma_chan;
126 struct dma_chan *rx_dma_chan;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700127
128 bool client_ready;
Allen Hubbee26a5842015-04-09 10:33:20 -0400129 bool link_is_up;
Dave Jiange9021332016-02-23 09:11:36 -0700130 bool active;
Allen Hubbee26a5842015-04-09 10:33:20 -0400131
Jon Masonfce8a7b2012-11-16 19:27:12 -0700132 u8 qp_num; /* Only 64 QP's are allowed. 0-63 */
Allen Hubbee26a5842015-04-09 10:33:20 -0400133 u64 qp_bit;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700134
Jon Mason74465642013-01-21 15:28:52 -0700135 struct ntb_rx_info __iomem *rx_info;
Jon Mason793c20e2013-01-19 02:02:26 -0700136 struct ntb_rx_info *remote_rx_info;
137
Jon Mason53ca4fe2013-11-26 11:21:50 -0700138 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
139 void *data, int len);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700140 struct list_head tx_free_q;
141 spinlock_t ntb_tx_free_q_lock;
Jon Mason74465642013-01-21 15:28:52 -0700142 void __iomem *tx_mw;
Jon Mason282a2fe2013-02-12 09:52:50 -0700143 dma_addr_t tx_mw_phys;
Jon Mason793c20e2013-01-19 02:02:26 -0700144 unsigned int tx_index;
145 unsigned int tx_max_entry;
Jon Masonef114ed2013-01-19 02:02:18 -0700146 unsigned int tx_max_frame;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700147
Jon Mason53ca4fe2013-11-26 11:21:50 -0700148 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
149 void *data, int len);
Allen Hubbeda2e5ae2015-07-13 08:07:08 -0400150 struct list_head rx_post_q;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700151 struct list_head rx_pend_q;
152 struct list_head rx_free_q;
Allen Hubbeda2e5ae2015-07-13 08:07:08 -0400153 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
154 spinlock_t ntb_rx_q_lock;
Jon Mason793c20e2013-01-19 02:02:26 -0700155 void *rx_buff;
156 unsigned int rx_index;
157 unsigned int rx_max_entry;
Jon Masonef114ed2013-01-19 02:02:18 -0700158 unsigned int rx_max_frame;
Dave Jianga754a8f2016-04-08 10:49:06 -0700159 unsigned int rx_alloc_entry;
Jon Mason282a2fe2013-02-12 09:52:50 -0700160 dma_cookie_t last_cookie;
Allen Hubbee26a5842015-04-09 10:33:20 -0400161 struct tasklet_struct rxc_db_work;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700162
Jon Mason53ca4fe2013-11-26 11:21:50 -0700163 void (*event_handler)(void *data, int status);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700164 struct delayed_work link_work;
Jon Mason7b4f2d32013-01-19 02:02:19 -0700165 struct work_struct link_cleanup;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700166
167 struct dentry *debugfs_dir;
168 struct dentry *debugfs_stats;
169
170 /* Stats */
171 u64 rx_bytes;
172 u64 rx_pkts;
173 u64 rx_ring_empty;
174 u64 rx_err_no_buf;
175 u64 rx_err_oflow;
176 u64 rx_err_ver;
Jon Mason282a2fe2013-02-12 09:52:50 -0700177 u64 rx_memcpy;
178 u64 rx_async;
Dave Jiang8c874cc2016-01-08 09:51:05 -0700179 u64 dma_rx_prep_err;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700180 u64 tx_bytes;
181 u64 tx_pkts;
182 u64 tx_ring_full;
Jon Mason282a2fe2013-02-12 09:52:50 -0700183 u64 tx_err_no_buf;
184 u64 tx_memcpy;
185 u64 tx_async;
Dave Jiang8c874cc2016-01-08 09:51:05 -0700186 u64 dma_tx_prep_err;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700187};
188
189struct ntb_transport_mw {
Allen Hubbee26a5842015-04-09 10:33:20 -0400190 phys_addr_t phys_addr;
191 resource_size_t phys_size;
192 resource_size_t xlat_align;
193 resource_size_t xlat_align_size;
194 void __iomem *vbase;
195 size_t xlat_size;
196 size_t buff_size;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700197 void *virt_addr;
198 dma_addr_t dma_addr;
199};
200
201struct ntb_transport_client_dev {
202 struct list_head entry;
Allen Hubbee26a5842015-04-09 10:33:20 -0400203 struct ntb_transport_ctx *nt;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700204 struct device dev;
205};
206
Allen Hubbee26a5842015-04-09 10:33:20 -0400207struct ntb_transport_ctx {
Jon Masonfce8a7b2012-11-16 19:27:12 -0700208 struct list_head entry;
209 struct list_head client_devs;
210
Allen Hubbee26a5842015-04-09 10:33:20 -0400211 struct ntb_dev *ndev;
212
213 struct ntb_transport_mw *mw_vec;
214 struct ntb_transport_qp *qp_vec;
215 unsigned int mw_count;
216 unsigned int qp_count;
217 u64 qp_bitmap;
218 u64 qp_bitmap_free;
219
220 bool link_is_up;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700221 struct delayed_work link_work;
Jon Mason7b4f2d32013-01-19 02:02:19 -0700222 struct work_struct link_cleanup;
Dave Jiangc8650fd2015-07-13 08:07:09 -0400223
224 struct dentry *debugfs_node_dir;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700225};
226
227enum {
Allen Hubbee26a5842015-04-09 10:33:20 -0400228 DESC_DONE_FLAG = BIT(0),
229 LINK_DOWN_FLAG = BIT(1),
Jon Masonfce8a7b2012-11-16 19:27:12 -0700230};
231
232struct ntb_payload_header {
Jon Mason74465642013-01-21 15:28:52 -0700233 unsigned int ver;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700234 unsigned int len;
235 unsigned int flags;
236};
237
238enum {
239 VERSION = 0,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700240 QP_LINKS,
Jon Mason113fc502013-01-30 11:40:52 -0700241 NUM_QPS,
242 NUM_MWS,
243 MW0_SZ_HIGH,
244 MW0_SZ_LOW,
245 MW1_SZ_HIGH,
246 MW1_SZ_LOW,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700247 MAX_SPAD,
248};
249
Allen Hubbee26a5842015-04-09 10:33:20 -0400250#define dev_client_dev(__dev) \
251 container_of((__dev), struct ntb_transport_client_dev, dev)
252
253#define drv_client(__drv) \
254 container_of((__drv), struct ntb_transport_client, driver)
255
256#define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700257#define NTB_QP_DEF_NUM_ENTRIES 100
258#define NTB_LINK_DOWN_TIMEOUT 10
Dave Jiang8c874cc2016-01-08 09:51:05 -0700259#define DMA_RETRIES 20
260#define DMA_OUT_RESOURCE_TO 50
Jon Masonfce8a7b2012-11-16 19:27:12 -0700261
Allen Hubbee26a5842015-04-09 10:33:20 -0400262static void ntb_transport_rxc_db(unsigned long data);
263static const struct ntb_ctx_ops ntb_transport_ops;
264static struct ntb_client ntb_transport_client;
Dave Jiang9cabc262016-07-20 13:14:07 -0700265static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
266 struct ntb_queue_entry *entry);
267static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset);
Allen Hubbee26a5842015-04-09 10:33:20 -0400268
269static int ntb_transport_bus_match(struct device *dev,
270 struct device_driver *drv)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700271{
272 return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
273}
274
Allen Hubbee26a5842015-04-09 10:33:20 -0400275static int ntb_transport_bus_probe(struct device *dev)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700276{
Allen Hubbee26a5842015-04-09 10:33:20 -0400277 const struct ntb_transport_client *client;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700278 int rc = -EINVAL;
279
280 get_device(dev);
Allen Hubbee26a5842015-04-09 10:33:20 -0400281
282 client = drv_client(dev->driver);
283 rc = client->probe(dev);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700284 if (rc)
285 put_device(dev);
286
287 return rc;
288}
289
Allen Hubbee26a5842015-04-09 10:33:20 -0400290static int ntb_transport_bus_remove(struct device *dev)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700291{
Allen Hubbee26a5842015-04-09 10:33:20 -0400292 const struct ntb_transport_client *client;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700293
Allen Hubbee26a5842015-04-09 10:33:20 -0400294 client = drv_client(dev->driver);
295 client->remove(dev);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700296
297 put_device(dev);
298
299 return 0;
300}
301
Allen Hubbee26a5842015-04-09 10:33:20 -0400302static struct bus_type ntb_transport_bus = {
303 .name = "ntb_transport",
304 .match = ntb_transport_bus_match,
305 .probe = ntb_transport_bus_probe,
306 .remove = ntb_transport_bus_remove,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700307};
308
309static LIST_HEAD(ntb_transport_list);
310
Allen Hubbee26a5842015-04-09 10:33:20 -0400311static int ntb_bus_init(struct ntb_transport_ctx *nt)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700312{
Dave Jiang31510002015-07-13 08:07:19 -0400313 list_add_tail(&nt->entry, &ntb_transport_list);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700314 return 0;
315}
316
Allen Hubbee26a5842015-04-09 10:33:20 -0400317static void ntb_bus_remove(struct ntb_transport_ctx *nt)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700318{
319 struct ntb_transport_client_dev *client_dev, *cd;
320
321 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
322 dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
323 dev_name(&client_dev->dev));
324 list_del(&client_dev->entry);
325 device_unregister(&client_dev->dev);
326 }
327
328 list_del(&nt->entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700329}
330
Allen Hubbee26a5842015-04-09 10:33:20 -0400331static void ntb_transport_client_release(struct device *dev)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700332{
333 struct ntb_transport_client_dev *client_dev;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700334
Allen Hubbee26a5842015-04-09 10:33:20 -0400335 client_dev = dev_client_dev(dev);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700336 kfree(client_dev);
337}
338
339/**
Allen Hubbee26a5842015-04-09 10:33:20 -0400340 * ntb_transport_unregister_client_dev - Unregister NTB client device
Jon Masonfce8a7b2012-11-16 19:27:12 -0700341 * @device_name: Name of NTB client device
342 *
343 * Unregister an NTB client device with the NTB transport layer
344 */
Allen Hubbee26a5842015-04-09 10:33:20 -0400345void ntb_transport_unregister_client_dev(char *device_name)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700346{
347 struct ntb_transport_client_dev *client, *cd;
Allen Hubbee26a5842015-04-09 10:33:20 -0400348 struct ntb_transport_ctx *nt;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700349
350 list_for_each_entry(nt, &ntb_transport_list, entry)
351 list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
352 if (!strncmp(dev_name(&client->dev), device_name,
353 strlen(device_name))) {
354 list_del(&client->entry);
355 device_unregister(&client->dev);
356 }
357}
Allen Hubbee26a5842015-04-09 10:33:20 -0400358EXPORT_SYMBOL_GPL(ntb_transport_unregister_client_dev);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700359
360/**
Allen Hubbee26a5842015-04-09 10:33:20 -0400361 * ntb_transport_register_client_dev - Register NTB client device
Jon Masonfce8a7b2012-11-16 19:27:12 -0700362 * @device_name: Name of NTB client device
363 *
364 * Register an NTB client device with the NTB transport layer
365 */
Allen Hubbee26a5842015-04-09 10:33:20 -0400366int ntb_transport_register_client_dev(char *device_name)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700367{
368 struct ntb_transport_client_dev *client_dev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400369 struct ntb_transport_ctx *nt;
Allen Hubbe1199aa62015-05-18 06:20:47 -0400370 int node;
Jon Mason8b19d452013-04-26 14:51:57 -0700371 int rc, i = 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700372
Jon Mason8222b402013-01-19 02:02:16 -0700373 if (list_empty(&ntb_transport_list))
374 return -ENODEV;
375
Jon Masonfce8a7b2012-11-16 19:27:12 -0700376 list_for_each_entry(nt, &ntb_transport_list, entry) {
377 struct device *dev;
378
Allen Hubbe1199aa62015-05-18 06:20:47 -0400379 node = dev_to_node(&nt->ndev->dev);
380
381 client_dev = kzalloc_node(sizeof(*client_dev),
382 GFP_KERNEL, node);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700383 if (!client_dev) {
384 rc = -ENOMEM;
385 goto err;
386 }
387
388 dev = &client_dev->dev;
389
390 /* setup and register client devices */
Jon Mason8b19d452013-04-26 14:51:57 -0700391 dev_set_name(dev, "%s%d", device_name, i);
Allen Hubbee26a5842015-04-09 10:33:20 -0400392 dev->bus = &ntb_transport_bus;
393 dev->release = ntb_transport_client_release;
394 dev->parent = &nt->ndev->dev;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700395
396 rc = device_register(dev);
397 if (rc) {
398 kfree(client_dev);
399 goto err;
400 }
401
402 list_add_tail(&client_dev->entry, &nt->client_devs);
Jon Mason8b19d452013-04-26 14:51:57 -0700403 i++;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700404 }
405
406 return 0;
407
408err:
Allen Hubbee26a5842015-04-09 10:33:20 -0400409 ntb_transport_unregister_client_dev(device_name);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700410
411 return rc;
412}
Allen Hubbee26a5842015-04-09 10:33:20 -0400413EXPORT_SYMBOL_GPL(ntb_transport_register_client_dev);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700414
415/**
Allen Hubbeec110bc2015-05-07 06:45:21 -0400416 * ntb_transport_register_client - Register NTB client driver
Jon Masonfce8a7b2012-11-16 19:27:12 -0700417 * @drv: NTB client driver to be registered
418 *
419 * Register an NTB client driver with the NTB transport layer
420 *
421 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
422 */
Allen Hubbee26a5842015-04-09 10:33:20 -0400423int ntb_transport_register_client(struct ntb_transport_client *drv)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700424{
Allen Hubbee26a5842015-04-09 10:33:20 -0400425 drv->driver.bus = &ntb_transport_bus;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700426
Jon Mason8222b402013-01-19 02:02:16 -0700427 if (list_empty(&ntb_transport_list))
428 return -ENODEV;
429
Jon Masonfce8a7b2012-11-16 19:27:12 -0700430 return driver_register(&drv->driver);
431}
Allen Hubbeec110bc2015-05-07 06:45:21 -0400432EXPORT_SYMBOL_GPL(ntb_transport_register_client);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700433
434/**
Allen Hubbeec110bc2015-05-07 06:45:21 -0400435 * ntb_transport_unregister_client - Unregister NTB client driver
Jon Masonfce8a7b2012-11-16 19:27:12 -0700436 * @drv: NTB client driver to be unregistered
437 *
438 * Unregister an NTB client driver with the NTB transport layer
439 *
440 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
441 */
Allen Hubbee26a5842015-04-09 10:33:20 -0400442void ntb_transport_unregister_client(struct ntb_transport_client *drv)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700443{
444 driver_unregister(&drv->driver);
445}
Allen Hubbeec110bc2015-05-07 06:45:21 -0400446EXPORT_SYMBOL_GPL(ntb_transport_unregister_client);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700447
Jon Masonfce8a7b2012-11-16 19:27:12 -0700448static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
449 loff_t *offp)
450{
451 struct ntb_transport_qp *qp;
Jon Masond7237e22013-01-19 02:02:25 -0700452 char *buf;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700453 ssize_t ret, out_offset, out_count;
454
Dave Jiang260bee92015-07-13 08:07:11 -0400455 qp = filp->private_data;
456
457 if (!qp || !qp->link_is_up)
458 return 0;
459
Jon Mason282a2fe2013-02-12 09:52:50 -0700460 out_count = 1000;
Jon Masond7237e22013-01-19 02:02:25 -0700461
462 buf = kmalloc(out_count, GFP_KERNEL);
463 if (!buf)
464 return -ENOMEM;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700465
Jon Masonfce8a7b2012-11-16 19:27:12 -0700466 out_offset = 0;
467 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiangd98ef992015-07-13 08:07:20 -0400468 "\nNTB QP stats:\n\n");
Jon Masonfce8a7b2012-11-16 19:27:12 -0700469 out_offset += snprintf(buf + out_offset, out_count - out_offset,
470 "rx_bytes - \t%llu\n", qp->rx_bytes);
471 out_offset += snprintf(buf + out_offset, out_count - out_offset,
472 "rx_pkts - \t%llu\n", qp->rx_pkts);
473 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason282a2fe2013-02-12 09:52:50 -0700474 "rx_memcpy - \t%llu\n", qp->rx_memcpy);
475 out_offset += snprintf(buf + out_offset, out_count - out_offset,
476 "rx_async - \t%llu\n", qp->rx_async);
477 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700478 "rx_ring_empty - %llu\n", qp->rx_ring_empty);
479 out_offset += snprintf(buf + out_offset, out_count - out_offset,
480 "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
481 out_offset += snprintf(buf + out_offset, out_count - out_offset,
482 "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
483 out_offset += snprintf(buf + out_offset, out_count - out_offset,
484 "rx_err_ver - \t%llu\n", qp->rx_err_ver);
485 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiangd98ef992015-07-13 08:07:20 -0400486 "rx_buff - \t0x%p\n", qp->rx_buff);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700487 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700488 "rx_index - \t%u\n", qp->rx_index);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700489 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jianga754a8f2016-04-08 10:49:06 -0700490 "rx_max_entry - \t%u\n", qp->rx_max_entry);
491 out_offset += snprintf(buf + out_offset, out_count - out_offset,
492 "rx_alloc_entry - \t%u\n\n", qp->rx_alloc_entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700493
494 out_offset += snprintf(buf + out_offset, out_count - out_offset,
495 "tx_bytes - \t%llu\n", qp->tx_bytes);
496 out_offset += snprintf(buf + out_offset, out_count - out_offset,
497 "tx_pkts - \t%llu\n", qp->tx_pkts);
498 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason282a2fe2013-02-12 09:52:50 -0700499 "tx_memcpy - \t%llu\n", qp->tx_memcpy);
500 out_offset += snprintf(buf + out_offset, out_count - out_offset,
501 "tx_async - \t%llu\n", qp->tx_async);
502 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700503 "tx_ring_full - \t%llu\n", qp->tx_ring_full);
504 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason282a2fe2013-02-12 09:52:50 -0700505 "tx_err_no_buf - %llu\n", qp->tx_err_no_buf);
506 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiangd98ef992015-07-13 08:07:20 -0400507 "tx_mw - \t0x%p\n", qp->tx_mw);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700508 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiangd98ef992015-07-13 08:07:20 -0400509 "tx_index (H) - \t%u\n", qp->tx_index);
510 out_offset += snprintf(buf + out_offset, out_count - out_offset,
511 "RRI (T) - \t%u\n",
512 qp->remote_rx_info->entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700513 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Jon Mason793c20e2013-01-19 02:02:26 -0700514 "tx_max_entry - \t%u\n", qp->tx_max_entry);
Dave Jiange74bfee2015-07-13 08:07:17 -0400515 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiange74bfee2015-07-13 08:07:17 -0400516 "free tx - \t%u\n",
517 ntb_transport_tx_free_entry(qp));
Dave Jiang8c874cc2016-01-08 09:51:05 -0700518 out_offset += snprintf(buf + out_offset, out_count - out_offset,
519 "DMA tx prep err - \t%llu\n",
520 qp->dma_tx_prep_err);
521 out_offset += snprintf(buf + out_offset, out_count - out_offset,
522 "DMA rx prep err - \t%llu\n",
523 qp->dma_rx_prep_err);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700524
525 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiangd98ef992015-07-13 08:07:20 -0400526 "\n");
527 out_offset += snprintf(buf + out_offset, out_count - out_offset,
Dave Jiang569410ca2015-07-13 08:07:22 -0400528 "Using TX DMA - \t%s\n",
529 qp->tx_dma_chan ? "Yes" : "No");
530 out_offset += snprintf(buf + out_offset, out_count - out_offset,
531 "Using RX DMA - \t%s\n",
532 qp->rx_dma_chan ? "Yes" : "No");
Dave Jiangd98ef992015-07-13 08:07:20 -0400533 out_offset += snprintf(buf + out_offset, out_count - out_offset,
534 "QP Link - \t%s\n",
Allen Hubbee26a5842015-04-09 10:33:20 -0400535 qp->link_is_up ? "Up" : "Down");
Dave Jiangd98ef992015-07-13 08:07:20 -0400536 out_offset += snprintf(buf + out_offset, out_count - out_offset,
537 "\n");
538
Jon Masond7237e22013-01-19 02:02:25 -0700539 if (out_offset > out_count)
540 out_offset = out_count;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700541
542 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
Jon Masond7237e22013-01-19 02:02:25 -0700543 kfree(buf);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700544 return ret;
545}
546
547static const struct file_operations ntb_qp_debugfs_stats = {
548 .owner = THIS_MODULE,
Jon Masond66d7ac2013-01-19 02:02:20 -0700549 .open = simple_open,
Jon Masonfce8a7b2012-11-16 19:27:12 -0700550 .read = debugfs_read,
551};
552
553static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
554 struct list_head *list)
555{
556 unsigned long flags;
557
558 spin_lock_irqsave(lock, flags);
559 list_add_tail(entry, list);
560 spin_unlock_irqrestore(lock, flags);
561}
562
563static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
Jon Mason53ca4fe2013-11-26 11:21:50 -0700564 struct list_head *list)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700565{
566 struct ntb_queue_entry *entry;
567 unsigned long flags;
568
569 spin_lock_irqsave(lock, flags);
570 if (list_empty(list)) {
571 entry = NULL;
572 goto out;
573 }
574 entry = list_first_entry(list, struct ntb_queue_entry, entry);
575 list_del(&entry->entry);
Dave Jiange74bfee2015-07-13 08:07:17 -0400576
Jon Masonfce8a7b2012-11-16 19:27:12 -0700577out:
578 spin_unlock_irqrestore(lock, flags);
579
580 return entry;
581}
582
Allen Hubbeda2e5ae2015-07-13 08:07:08 -0400583static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
584 struct list_head *list,
585 struct list_head *to_list)
586{
587 struct ntb_queue_entry *entry;
588 unsigned long flags;
589
590 spin_lock_irqsave(lock, flags);
591
592 if (list_empty(list)) {
593 entry = NULL;
594 } else {
595 entry = list_first_entry(list, struct ntb_queue_entry, entry);
596 list_move_tail(&entry->entry, to_list);
597 }
598
599 spin_unlock_irqrestore(lock, flags);
600
601 return entry;
602}
603
Allen Hubbee26a5842015-04-09 10:33:20 -0400604static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
605 unsigned int qp_num)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700606{
Allen Hubbee26a5842015-04-09 10:33:20 -0400607 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
608 struct ntb_transport_mw *mw;
Dave Jianga754a8f2016-04-08 10:49:06 -0700609 struct ntb_dev *ndev = nt->ndev;
610 struct ntb_queue_entry *entry;
Jon Masonef114ed2013-01-19 02:02:18 -0700611 unsigned int rx_size, num_qps_mw;
Allen Hubbee26a5842015-04-09 10:33:20 -0400612 unsigned int mw_num, mw_count, qp_count;
Jon Mason793c20e2013-01-19 02:02:26 -0700613 unsigned int i;
Dave Jianga754a8f2016-04-08 10:49:06 -0700614 int node;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700615
Allen Hubbee26a5842015-04-09 10:33:20 -0400616 mw_count = nt->mw_count;
617 qp_count = nt->qp_count;
Jon Mason948d3a62013-04-18 17:07:36 -0700618
Allen Hubbee26a5842015-04-09 10:33:20 -0400619 mw_num = QP_TO_MW(nt, qp_num);
620 mw = &nt->mw_vec[mw_num];
Jon Masonfce8a7b2012-11-16 19:27:12 -0700621
Allen Hubbee26a5842015-04-09 10:33:20 -0400622 if (!mw->virt_addr)
623 return -ENOMEM;
624
625 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
626 num_qps_mw = qp_count / mw_count + 1;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700627 else
Allen Hubbee26a5842015-04-09 10:33:20 -0400628 num_qps_mw = qp_count / mw_count;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700629
Allen Hubbee26a5842015-04-09 10:33:20 -0400630 rx_size = (unsigned int)mw->xlat_size / num_qps_mw;
Jon Masonc92ba3c2015-10-04 22:54:22 -0400631 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
Jon Mason793c20e2013-01-19 02:02:26 -0700632 rx_size -= sizeof(struct ntb_rx_info);
633
Jon Mason282a2fe2013-02-12 09:52:50 -0700634 qp->remote_rx_info = qp->rx_buff + rx_size;
635
Jon Masonc9d534c2013-02-01 15:45:16 -0700636 /* Due to housekeeping, there must be atleast 2 buffs */
637 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
Jon Mason793c20e2013-01-19 02:02:26 -0700638 qp->rx_max_entry = rx_size / qp->rx_max_frame;
639 qp->rx_index = 0;
640
Dave Jianga754a8f2016-04-08 10:49:06 -0700641 /*
642 * Checking to see if we have more entries than the default.
643 * We should add additional entries if that is the case so we
644 * can be in sync with the transport frames.
645 */
646 node = dev_to_node(&ndev->dev);
647 for (i = qp->rx_alloc_entry; i < qp->rx_max_entry; i++) {
648 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
649 if (!entry)
650 return -ENOMEM;
651
652 entry->qp = qp;
653 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
654 &qp->rx_free_q);
655 qp->rx_alloc_entry++;
656 }
657
Jon Masonc9d534c2013-02-01 15:45:16 -0700658 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700659
Jon Masonef114ed2013-01-19 02:02:18 -0700660 /* setup the hdr offsets with 0's */
Jon Mason793c20e2013-01-19 02:02:26 -0700661 for (i = 0; i < qp->rx_max_entry; i++) {
Allen Hubbee26a5842015-04-09 10:33:20 -0400662 void *offset = (qp->rx_buff + qp->rx_max_frame * (i + 1) -
663 sizeof(struct ntb_payload_header));
Jon Masonef114ed2013-01-19 02:02:18 -0700664 memset(offset, 0, sizeof(struct ntb_payload_header));
Jon Mason793c20e2013-01-19 02:02:26 -0700665 }
Jon Masonfce8a7b2012-11-16 19:27:12 -0700666
667 qp->rx_pkts = 0;
668 qp->tx_pkts = 0;
Jon Mason90f9e932013-02-01 15:34:35 -0700669 qp->tx_index = 0;
Allen Hubbee26a5842015-04-09 10:33:20 -0400670
671 return 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700672}
673
Allen Hubbee26a5842015-04-09 10:33:20 -0400674static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
Jon Mason113fc502013-01-30 11:40:52 -0700675{
Allen Hubbee26a5842015-04-09 10:33:20 -0400676 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
677 struct pci_dev *pdev = nt->ndev->pdev;
Jon Mason113fc502013-01-30 11:40:52 -0700678
679 if (!mw->virt_addr)
680 return;
681
Allen Hubbee26a5842015-04-09 10:33:20 -0400682 ntb_mw_clear_trans(nt->ndev, num_mw);
683 dma_free_coherent(&pdev->dev, mw->buff_size,
684 mw->virt_addr, mw->dma_addr);
685 mw->xlat_size = 0;
686 mw->buff_size = 0;
Jon Mason113fc502013-01-30 11:40:52 -0700687 mw->virt_addr = NULL;
688}
689
Allen Hubbee26a5842015-04-09 10:33:20 -0400690static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
Allen Hubbe8c9edf62015-07-13 08:07:13 -0400691 resource_size_t size)
Jon Masonb77b2632013-02-01 15:25:37 -0700692{
Allen Hubbee26a5842015-04-09 10:33:20 -0400693 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
694 struct pci_dev *pdev = nt->ndev->pdev;
Allen Hubbe8c9edf62015-07-13 08:07:13 -0400695 size_t xlat_size, buff_size;
Allen Hubbee26a5842015-04-09 10:33:20 -0400696 int rc;
697
Allen Hubbe8c9edf62015-07-13 08:07:13 -0400698 if (!size)
699 return -EINVAL;
700
Allen Hubbee26a5842015-04-09 10:33:20 -0400701 xlat_size = round_up(size, mw->xlat_align_size);
702 buff_size = round_up(size, mw->xlat_align);
Jon Masonb77b2632013-02-01 15:25:37 -0700703
704 /* No need to re-setup */
Allen Hubbee26a5842015-04-09 10:33:20 -0400705 if (mw->xlat_size == xlat_size)
Jon Masonb77b2632013-02-01 15:25:37 -0700706 return 0;
707
Allen Hubbee26a5842015-04-09 10:33:20 -0400708 if (mw->buff_size)
Jon Masonb77b2632013-02-01 15:25:37 -0700709 ntb_free_mw(nt, num_mw);
710
Allen Hubbee26a5842015-04-09 10:33:20 -0400711 /* Alloc memory for receiving data. Must be aligned */
712 mw->xlat_size = xlat_size;
713 mw->buff_size = buff_size;
Jon Masonb77b2632013-02-01 15:25:37 -0700714
Allen Hubbee26a5842015-04-09 10:33:20 -0400715 mw->virt_addr = dma_alloc_coherent(&pdev->dev, buff_size,
716 &mw->dma_addr, GFP_KERNEL);
Jon Masonb77b2632013-02-01 15:25:37 -0700717 if (!mw->virt_addr) {
Allen Hubbee26a5842015-04-09 10:33:20 -0400718 mw->xlat_size = 0;
719 mw->buff_size = 0;
Allen Hubbe8c9edf62015-07-13 08:07:13 -0400720 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
Allen Hubbee26a5842015-04-09 10:33:20 -0400721 buff_size);
Jon Masonb77b2632013-02-01 15:25:37 -0700722 return -ENOMEM;
723 }
724
Dave Jiang3cc5ba12014-08-28 13:53:02 -0700725 /*
726 * we must ensure that the memory address allocated is BAR size
727 * aligned in order for the XLAT register to take the value. This
728 * is a requirement of the hardware. It is recommended to setup CMA
729 * for BAR sizes equal or greater than 4MB.
730 */
Allen Hubbee26a5842015-04-09 10:33:20 -0400731 if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) {
732 dev_err(&pdev->dev, "DMA memory %pad is not aligned\n",
Dave Jiang3cc5ba12014-08-28 13:53:02 -0700733 &mw->dma_addr);
734 ntb_free_mw(nt, num_mw);
735 return -ENOMEM;
736 }
737
Jon Masonb77b2632013-02-01 15:25:37 -0700738 /* Notify HW the memory location of the receive buffer */
Allen Hubbee26a5842015-04-09 10:33:20 -0400739 rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size);
740 if (rc) {
741 dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw);
742 ntb_free_mw(nt, num_mw);
743 return -EIO;
744 }
Jon Masonb77b2632013-02-01 15:25:37 -0700745
746 return 0;
747}
748
Allen Hubbe2849b5d72015-05-12 08:09:15 -0400749static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
750{
751 qp->link_is_up = false;
Dave Jiange9021332016-02-23 09:11:36 -0700752 qp->active = false;
Allen Hubbe2849b5d72015-05-12 08:09:15 -0400753
754 qp->tx_index = 0;
755 qp->rx_index = 0;
756 qp->rx_bytes = 0;
757 qp->rx_pkts = 0;
758 qp->rx_ring_empty = 0;
759 qp->rx_err_no_buf = 0;
760 qp->rx_err_oflow = 0;
761 qp->rx_err_ver = 0;
762 qp->rx_memcpy = 0;
763 qp->rx_async = 0;
764 qp->tx_bytes = 0;
765 qp->tx_pkts = 0;
766 qp->tx_ring_full = 0;
767 qp->tx_err_no_buf = 0;
768 qp->tx_memcpy = 0;
769 qp->tx_async = 0;
Dave Jiang8c874cc2016-01-08 09:51:05 -0700770 qp->dma_tx_prep_err = 0;
771 qp->dma_rx_prep_err = 0;
Allen Hubbe2849b5d72015-05-12 08:09:15 -0400772}
773
Jon Masonfca4d512013-09-09 13:39:55 -0700774static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700775{
Allen Hubbee26a5842015-04-09 10:33:20 -0400776 struct ntb_transport_ctx *nt = qp->transport;
777 struct pci_dev *pdev = nt->ndev->pdev;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700778
Allen Hubbee22e0b92015-05-12 06:55:44 -0400779 dev_info(&pdev->dev, "qp %d: Link Cleanup\n", qp->qp_num);
Allen Hubbe2849b5d72015-05-12 08:09:15 -0400780
781 cancel_delayed_work_sync(&qp->link_work);
782 ntb_qp_link_down_reset(qp);
Allen Hubbee26a5842015-04-09 10:33:20 -0400783
784 if (qp->event_handler)
785 qp->event_handler(qp->cb_data, qp->link_is_up);
Jon Masonfca4d512013-09-09 13:39:55 -0700786}
787
788static void ntb_qp_link_cleanup_work(struct work_struct *work)
789{
790 struct ntb_transport_qp *qp = container_of(work,
791 struct ntb_transport_qp,
792 link_cleanup);
Allen Hubbee26a5842015-04-09 10:33:20 -0400793 struct ntb_transport_ctx *nt = qp->transport;
Jon Masonfca4d512013-09-09 13:39:55 -0700794
795 ntb_qp_link_cleanup(qp);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700796
Allen Hubbee26a5842015-04-09 10:33:20 -0400797 if (nt->link_is_up)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700798 schedule_delayed_work(&qp->link_work,
799 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
800}
801
Jon Mason7b4f2d32013-01-19 02:02:19 -0700802static void ntb_qp_link_down(struct ntb_transport_qp *qp)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700803{
Jon Mason7b4f2d32013-01-19 02:02:19 -0700804 schedule_work(&qp->link_cleanup);
805}
806
Allen Hubbee26a5842015-04-09 10:33:20 -0400807static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
Jon Mason7b4f2d32013-01-19 02:02:19 -0700808{
Allen Hubbee26a5842015-04-09 10:33:20 -0400809 struct ntb_transport_qp *qp;
810 u64 qp_bitmap_alloc;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700811 int i;
812
Allen Hubbee26a5842015-04-09 10:33:20 -0400813 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
Jon Masonfca4d512013-09-09 13:39:55 -0700814
Allen Hubbee26a5842015-04-09 10:33:20 -0400815 /* Pass along the info to any clients */
816 for (i = 0; i < nt->qp_count; i++)
817 if (qp_bitmap_alloc & BIT_ULL(i)) {
818 qp = &nt->qp_vec[i];
819 ntb_qp_link_cleanup(qp);
820 cancel_work_sync(&qp->link_cleanup);
821 cancel_delayed_work_sync(&qp->link_work);
822 }
823
824 if (!nt->link_is_up)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700825 cancel_delayed_work_sync(&nt->link_work);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700826
Jon Masonfce8a7b2012-11-16 19:27:12 -0700827 /* The scratchpad registers keep the values if the remote side
828 * goes down, blast them now to give them a sane value the next
829 * time they are accessed
830 */
831 for (i = 0; i < MAX_SPAD; i++)
Allen Hubbee26a5842015-04-09 10:33:20 -0400832 ntb_spad_write(nt->ndev, i, 0);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700833}
834
Jon Masonfca4d512013-09-09 13:39:55 -0700835static void ntb_transport_link_cleanup_work(struct work_struct *work)
836{
Allen Hubbee26a5842015-04-09 10:33:20 -0400837 struct ntb_transport_ctx *nt =
838 container_of(work, struct ntb_transport_ctx, link_cleanup);
Jon Masonfca4d512013-09-09 13:39:55 -0700839
840 ntb_transport_link_cleanup(nt);
841}
842
Allen Hubbee26a5842015-04-09 10:33:20 -0400843static void ntb_transport_event_callback(void *data)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700844{
Allen Hubbee26a5842015-04-09 10:33:20 -0400845 struct ntb_transport_ctx *nt = data;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700846
Allen Hubbee26a5842015-04-09 10:33:20 -0400847 if (ntb_link_is_up(nt->ndev, NULL, NULL) == 1)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700848 schedule_delayed_work(&nt->link_work, 0);
Allen Hubbee26a5842015-04-09 10:33:20 -0400849 else
Jon Mason7b4f2d32013-01-19 02:02:19 -0700850 schedule_work(&nt->link_cleanup);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700851}
852
853static void ntb_transport_link_work(struct work_struct *work)
854{
Allen Hubbee26a5842015-04-09 10:33:20 -0400855 struct ntb_transport_ctx *nt =
856 container_of(work, struct ntb_transport_ctx, link_work.work);
857 struct ntb_dev *ndev = nt->ndev;
858 struct pci_dev *pdev = ndev->pdev;
859 resource_size_t size;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700860 u32 val;
Dave Jiang84f76682016-02-29 09:35:26 -0700861 int rc = 0, i, spad;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700862
Jon Mason113fc502013-01-30 11:40:52 -0700863 /* send the local info, in the opposite order of the way we read it */
Allen Hubbee26a5842015-04-09 10:33:20 -0400864 for (i = 0; i < nt->mw_count; i++) {
865 size = nt->mw_vec[i].phys_size;
Jon Mason113fc502013-01-30 11:40:52 -0700866
Allen Hubbee26a5842015-04-09 10:33:20 -0400867 if (max_mw_size && size > max_mw_size)
868 size = max_mw_size;
869
870 spad = MW0_SZ_HIGH + (i * 2);
Arnd Bergmannfdcb4b22015-10-07 09:59:34 +0200871 ntb_peer_spad_write(ndev, spad, upper_32_bits(size));
Allen Hubbee26a5842015-04-09 10:33:20 -0400872
873 spad = MW0_SZ_LOW + (i * 2);
Arnd Bergmannfdcb4b22015-10-07 09:59:34 +0200874 ntb_peer_spad_write(ndev, spad, lower_32_bits(size));
Jon Masonfce8a7b2012-11-16 19:27:12 -0700875 }
876
Allen Hubbee26a5842015-04-09 10:33:20 -0400877 ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700878
Allen Hubbee26a5842015-04-09 10:33:20 -0400879 ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700880
Allen Hubbee26a5842015-04-09 10:33:20 -0400881 ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700882
883 /* Query the remote side for its info */
Dave Jiang0f69a7d2015-06-02 03:45:07 -0400884 val = ntb_spad_read(ndev, VERSION);
Allen Hubbee26a5842015-04-09 10:33:20 -0400885 dev_dbg(&pdev->dev, "Remote version = %d\n", val);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700886 if (val != NTB_TRANSPORT_VERSION)
887 goto out;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700888
Dave Jiang0f69a7d2015-06-02 03:45:07 -0400889 val = ntb_spad_read(ndev, NUM_QPS);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700890 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
Allen Hubbee26a5842015-04-09 10:33:20 -0400891 if (val != nt->qp_count)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700892 goto out;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700893
Dave Jiang0f69a7d2015-06-02 03:45:07 -0400894 val = ntb_spad_read(ndev, NUM_MWS);
Jon Mason113fc502013-01-30 11:40:52 -0700895 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
Allen Hubbee26a5842015-04-09 10:33:20 -0400896 if (val != nt->mw_count)
897 goto out;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700898
Allen Hubbee26a5842015-04-09 10:33:20 -0400899 for (i = 0; i < nt->mw_count; i++) {
Jon Mason113fc502013-01-30 11:40:52 -0700900 u64 val64;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700901
Dave Jiang0f69a7d2015-06-02 03:45:07 -0400902 val = ntb_spad_read(ndev, MW0_SZ_HIGH + (i * 2));
Allen Hubbee26a5842015-04-09 10:33:20 -0400903 val64 = (u64)val << 32;
Jon Mason113fc502013-01-30 11:40:52 -0700904
Dave Jiang0f69a7d2015-06-02 03:45:07 -0400905 val = ntb_spad_read(ndev, MW0_SZ_LOW + (i * 2));
Jon Mason113fc502013-01-30 11:40:52 -0700906 val64 |= val;
907
Allen Hubbee26a5842015-04-09 10:33:20 -0400908 dev_dbg(&pdev->dev, "Remote MW%d size = %#llx\n", i, val64);
Jon Mason113fc502013-01-30 11:40:52 -0700909
910 rc = ntb_set_mw(nt, i, val64);
911 if (rc)
912 goto out1;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700913 }
914
Allen Hubbee26a5842015-04-09 10:33:20 -0400915 nt->link_is_up = true;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700916
Allen Hubbee26a5842015-04-09 10:33:20 -0400917 for (i = 0; i < nt->qp_count; i++) {
918 struct ntb_transport_qp *qp = &nt->qp_vec[i];
Jon Masonfce8a7b2012-11-16 19:27:12 -0700919
920 ntb_transport_setup_qp_mw(nt, i);
921
Allen Hubbee26a5842015-04-09 10:33:20 -0400922 if (qp->client_ready)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700923 schedule_delayed_work(&qp->link_work, 0);
924 }
925
926 return;
927
Jon Mason113fc502013-01-30 11:40:52 -0700928out1:
Allen Hubbee26a5842015-04-09 10:33:20 -0400929 for (i = 0; i < nt->mw_count; i++)
Jon Mason113fc502013-01-30 11:40:52 -0700930 ntb_free_mw(nt, i);
Dave Jiang84f76682016-02-29 09:35:26 -0700931
932 /* if there's an actual failure, we should just bail */
933 if (rc < 0) {
934 ntb_link_disable(ndev);
935 return;
936 }
937
Jon Masonfce8a7b2012-11-16 19:27:12 -0700938out:
Allen Hubbee26a5842015-04-09 10:33:20 -0400939 if (ntb_link_is_up(ndev, NULL, NULL) == 1)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700940 schedule_delayed_work(&nt->link_work,
941 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
942}
943
944static void ntb_qp_link_work(struct work_struct *work)
945{
946 struct ntb_transport_qp *qp = container_of(work,
947 struct ntb_transport_qp,
948 link_work.work);
Allen Hubbee26a5842015-04-09 10:33:20 -0400949 struct pci_dev *pdev = qp->ndev->pdev;
950 struct ntb_transport_ctx *nt = qp->transport;
951 int val;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700952
Allen Hubbee26a5842015-04-09 10:33:20 -0400953 WARN_ON(!nt->link_is_up);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700954
Allen Hubbee26a5842015-04-09 10:33:20 -0400955 val = ntb_spad_read(nt->ndev, QP_LINKS);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700956
Allen Hubbee26a5842015-04-09 10:33:20 -0400957 ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num));
Jon Masonfce8a7b2012-11-16 19:27:12 -0700958
959 /* query remote spad for qp ready bits */
Allen Hubbee26a5842015-04-09 10:33:20 -0400960 ntb_peer_spad_read(nt->ndev, QP_LINKS);
Allen Hubbe28762282015-05-11 10:08:26 -0400961 dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val);
Jon Masonfce8a7b2012-11-16 19:27:12 -0700962
963 /* See if the remote side is up */
Allen Hubbee26a5842015-04-09 10:33:20 -0400964 if (val & BIT(qp->qp_num)) {
Jon Masonfce8a7b2012-11-16 19:27:12 -0700965 dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
Allen Hubbee26a5842015-04-09 10:33:20 -0400966 qp->link_is_up = true;
Dave Jiange9021332016-02-23 09:11:36 -0700967 qp->active = true;
Allen Hubbee26a5842015-04-09 10:33:20 -0400968
Jon Masonfce8a7b2012-11-16 19:27:12 -0700969 if (qp->event_handler)
Allen Hubbee26a5842015-04-09 10:33:20 -0400970 qp->event_handler(qp->cb_data, qp->link_is_up);
Allen Hubbe8b5a22d2015-07-13 08:07:12 -0400971
Dave Jiange9021332016-02-23 09:11:36 -0700972 if (qp->active)
973 tasklet_schedule(&qp->rxc_db_work);
Allen Hubbee26a5842015-04-09 10:33:20 -0400974 } else if (nt->link_is_up)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700975 schedule_delayed_work(&qp->link_work,
976 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
977}
978
Allen Hubbee26a5842015-04-09 10:33:20 -0400979static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
Jon Mason53ca4fe2013-11-26 11:21:50 -0700980 unsigned int qp_num)
Jon Masonfce8a7b2012-11-16 19:27:12 -0700981{
982 struct ntb_transport_qp *qp;
Allen Hubbee26a5842015-04-09 10:33:20 -0400983 phys_addr_t mw_base;
984 resource_size_t mw_size;
Jon Masonef114ed2013-01-19 02:02:18 -0700985 unsigned int num_qps_mw, tx_size;
Allen Hubbee26a5842015-04-09 10:33:20 -0400986 unsigned int mw_num, mw_count, qp_count;
Jon Mason282a2fe2013-02-12 09:52:50 -0700987 u64 qp_offset;
Jon Mason948d3a62013-04-18 17:07:36 -0700988
Allen Hubbee26a5842015-04-09 10:33:20 -0400989 mw_count = nt->mw_count;
990 qp_count = nt->qp_count;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700991
Allen Hubbee26a5842015-04-09 10:33:20 -0400992 mw_num = QP_TO_MW(nt, qp_num);
Allen Hubbee26a5842015-04-09 10:33:20 -0400993
994 qp = &nt->qp_vec[qp_num];
Jon Masonfce8a7b2012-11-16 19:27:12 -0700995 qp->qp_num = qp_num;
996 qp->transport = nt;
997 qp->ndev = nt->ndev;
Allen Hubbee26a5842015-04-09 10:33:20 -0400998 qp->client_ready = false;
Jon Masonfce8a7b2012-11-16 19:27:12 -0700999 qp->event_handler = NULL;
Allen Hubbe2849b5d72015-05-12 08:09:15 -04001000 ntb_qp_link_down_reset(qp);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001001
Allen Hubbee26a5842015-04-09 10:33:20 -04001002 if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
1003 num_qps_mw = qp_count / mw_count + 1;
Jon Masonef114ed2013-01-19 02:02:18 -07001004 else
Allen Hubbee26a5842015-04-09 10:33:20 -04001005 num_qps_mw = qp_count / mw_count;
Jon Masonef114ed2013-01-19 02:02:18 -07001006
Allen Hubbee26a5842015-04-09 10:33:20 -04001007 mw_base = nt->mw_vec[mw_num].phys_addr;
1008 mw_size = nt->mw_vec[mw_num].phys_size;
1009
1010 tx_size = (unsigned int)mw_size / num_qps_mw;
Jon Masonc92ba3c2015-10-04 22:54:22 -04001011 qp_offset = tx_size * (qp_num / mw_count);
Allen Hubbee26a5842015-04-09 10:33:20 -04001012
1013 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
Jon Mason282a2fe2013-02-12 09:52:50 -07001014 if (!qp->tx_mw)
1015 return -EINVAL;
Jon Mason793c20e2013-01-19 02:02:26 -07001016
Allen Hubbee26a5842015-04-09 10:33:20 -04001017 qp->tx_mw_phys = mw_base + qp_offset;
Jon Mason282a2fe2013-02-12 09:52:50 -07001018 if (!qp->tx_mw_phys)
1019 return -EINVAL;
1020
1021 tx_size -= sizeof(struct ntb_rx_info);
1022 qp->rx_info = qp->tx_mw + tx_size;
1023
Jon Masonc9d534c2013-02-01 15:45:16 -07001024 /* Due to housekeeping, there must be atleast 2 buffs */
1025 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
Jon Mason793c20e2013-01-19 02:02:26 -07001026 qp->tx_max_entry = tx_size / qp->tx_max_frame;
Jon Masonef114ed2013-01-19 02:02:18 -07001027
Dave Jiangc8650fd2015-07-13 08:07:09 -04001028 if (nt->debugfs_node_dir) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001029 char debugfs_name[4];
1030
1031 snprintf(debugfs_name, 4, "qp%d", qp_num);
1032 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
Dave Jiangc8650fd2015-07-13 08:07:09 -04001033 nt->debugfs_node_dir);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001034
1035 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
1036 qp->debugfs_dir, qp,
1037 &ntb_qp_debugfs_stats);
Allen Hubbee26a5842015-04-09 10:33:20 -04001038 } else {
1039 qp->debugfs_dir = NULL;
1040 qp->debugfs_stats = NULL;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001041 }
1042
1043 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
Jon Masonfca4d512013-09-09 13:39:55 -07001044 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001045
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001046 spin_lock_init(&qp->ntb_rx_q_lock);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001047 spin_lock_init(&qp->ntb_tx_free_q_lock);
1048
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001049 INIT_LIST_HEAD(&qp->rx_post_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001050 INIT_LIST_HEAD(&qp->rx_pend_q);
1051 INIT_LIST_HEAD(&qp->rx_free_q);
1052 INIT_LIST_HEAD(&qp->tx_free_q);
Jon Mason282a2fe2013-02-12 09:52:50 -07001053
Allen Hubbee26a5842015-04-09 10:33:20 -04001054 tasklet_init(&qp->rxc_db_work, ntb_transport_rxc_db,
1055 (unsigned long)qp);
1056
Jon Mason282a2fe2013-02-12 09:52:50 -07001057 return 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001058}
1059
Allen Hubbee26a5842015-04-09 10:33:20 -04001060static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001061{
Allen Hubbee26a5842015-04-09 10:33:20 -04001062 struct ntb_transport_ctx *nt;
1063 struct ntb_transport_mw *mw;
1064 unsigned int mw_count, qp_count;
1065 u64 qp_bitmap;
Allen Hubbe1199aa62015-05-18 06:20:47 -04001066 int node;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001067 int rc, i;
1068
Logan Gunthorpe19645a02016-06-07 11:20:22 -06001069 mw_count = ntb_mw_count(ndev);
1070 if (ntb_spad_count(ndev) < (NUM_MWS + 1 + mw_count * 2)) {
1071 dev_err(&ndev->dev, "Not enough scratch pad registers for %s",
1072 NTB_TRANSPORT_NAME);
1073 return -EIO;
1074 }
1075
Allen Hubbee26a5842015-04-09 10:33:20 -04001076 if (ntb_db_is_unsafe(ndev))
1077 dev_dbg(&ndev->dev,
1078 "doorbell is unsafe, proceed anyway...\n");
1079 if (ntb_spad_is_unsafe(ndev))
1080 dev_dbg(&ndev->dev,
1081 "scratchpad is unsafe, proceed anyway...\n");
1082
Allen Hubbe1199aa62015-05-18 06:20:47 -04001083 node = dev_to_node(&ndev->dev);
1084
1085 nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001086 if (!nt)
1087 return -ENOMEM;
1088
Allen Hubbee26a5842015-04-09 10:33:20 -04001089 nt->ndev = ndev;
1090
Allen Hubbee26a5842015-04-09 10:33:20 -04001091 nt->mw_count = mw_count;
1092
Allen Hubbe1199aa62015-05-18 06:20:47 -04001093 nt->mw_vec = kzalloc_node(mw_count * sizeof(*nt->mw_vec),
1094 GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -04001095 if (!nt->mw_vec) {
1096 rc = -ENOMEM;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001097 goto err;
1098 }
1099
Allen Hubbee26a5842015-04-09 10:33:20 -04001100 for (i = 0; i < mw_count; i++) {
1101 mw = &nt->mw_vec[i];
1102
1103 rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size,
1104 &mw->xlat_align, &mw->xlat_align_size);
1105 if (rc)
1106 goto err1;
1107
Dave Jiang06917f72015-05-19 16:45:46 -04001108 mw->vbase = ioremap_wc(mw->phys_addr, mw->phys_size);
Allen Hubbee26a5842015-04-09 10:33:20 -04001109 if (!mw->vbase) {
1110 rc = -ENOMEM;
1111 goto err1;
1112 }
1113
1114 mw->buff_size = 0;
1115 mw->xlat_size = 0;
1116 mw->virt_addr = NULL;
1117 mw->dma_addr = 0;
Jon Mason948d3a62013-04-18 17:07:36 -07001118 }
1119
Allen Hubbee26a5842015-04-09 10:33:20 -04001120 qp_bitmap = ntb_db_valid_mask(ndev);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001121
Allen Hubbee26a5842015-04-09 10:33:20 -04001122 qp_count = ilog2(qp_bitmap);
1123 if (max_num_clients && max_num_clients < qp_count)
1124 qp_count = max_num_clients;
1125 else if (mw_count < qp_count)
1126 qp_count = mw_count;
1127
1128 qp_bitmap &= BIT_ULL(qp_count) - 1;
1129
1130 nt->qp_count = qp_count;
1131 nt->qp_bitmap = qp_bitmap;
1132 nt->qp_bitmap_free = qp_bitmap;
1133
Allen Hubbe1199aa62015-05-18 06:20:47 -04001134 nt->qp_vec = kzalloc_node(qp_count * sizeof(*nt->qp_vec),
1135 GFP_KERNEL, node);
Allen Hubbee26a5842015-04-09 10:33:20 -04001136 if (!nt->qp_vec) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001137 rc = -ENOMEM;
Sudip Mukherjeed4adee02015-10-03 13:39:41 +05301138 goto err1;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001139 }
1140
Dave Jiangc8650fd2015-07-13 08:07:09 -04001141 if (nt_debugfs_dir) {
1142 nt->debugfs_node_dir =
1143 debugfs_create_dir(pci_name(ndev->pdev),
1144 nt_debugfs_dir);
1145 }
1146
Allen Hubbee26a5842015-04-09 10:33:20 -04001147 for (i = 0; i < qp_count; i++) {
Jon Mason282a2fe2013-02-12 09:52:50 -07001148 rc = ntb_transport_init_queue(nt, i);
1149 if (rc)
Sudip Mukherjeed4adee02015-10-03 13:39:41 +05301150 goto err2;
Jon Mason282a2fe2013-02-12 09:52:50 -07001151 }
Jon Masonfce8a7b2012-11-16 19:27:12 -07001152
1153 INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
Jon Masonfca4d512013-09-09 13:39:55 -07001154 INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup_work);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001155
Allen Hubbee26a5842015-04-09 10:33:20 -04001156 rc = ntb_set_ctx(ndev, nt, &ntb_transport_ops);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001157 if (rc)
Sudip Mukherjeed4adee02015-10-03 13:39:41 +05301158 goto err2;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001159
1160 INIT_LIST_HEAD(&nt->client_devs);
1161 rc = ntb_bus_init(nt);
1162 if (rc)
Sudip Mukherjeed4adee02015-10-03 13:39:41 +05301163 goto err3;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001164
Allen Hubbee26a5842015-04-09 10:33:20 -04001165 nt->link_is_up = false;
1166 ntb_link_enable(ndev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
1167 ntb_link_event(ndev);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001168
1169 return 0;
1170
Jon Mason948d3a62013-04-18 17:07:36 -07001171err3:
Sudip Mukherjeed4adee02015-10-03 13:39:41 +05301172 ntb_clear_ctx(ndev);
Jon Mason948d3a62013-04-18 17:07:36 -07001173err2:
Sudip Mukherjeed4adee02015-10-03 13:39:41 +05301174 kfree(nt->qp_vec);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001175err1:
Allen Hubbee26a5842015-04-09 10:33:20 -04001176 while (i--) {
1177 mw = &nt->mw_vec[i];
1178 iounmap(mw->vbase);
1179 }
Sudip Mukherjeed4adee02015-10-03 13:39:41 +05301180 kfree(nt->mw_vec);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001181err:
Jon Masonfce8a7b2012-11-16 19:27:12 -07001182 kfree(nt);
1183 return rc;
1184}
1185
Allen Hubbee26a5842015-04-09 10:33:20 -04001186static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001187{
Allen Hubbee26a5842015-04-09 10:33:20 -04001188 struct ntb_transport_ctx *nt = ndev->ctx;
1189 struct ntb_transport_qp *qp;
1190 u64 qp_bitmap_alloc;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001191 int i;
1192
Jon Masonfca4d512013-09-09 13:39:55 -07001193 ntb_transport_link_cleanup(nt);
Allen Hubbee26a5842015-04-09 10:33:20 -04001194 cancel_work_sync(&nt->link_cleanup);
1195 cancel_delayed_work_sync(&nt->link_work);
1196
1197 qp_bitmap_alloc = nt->qp_bitmap & ~nt->qp_bitmap_free;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001198
1199 /* verify that all the qp's are freed */
Allen Hubbee26a5842015-04-09 10:33:20 -04001200 for (i = 0; i < nt->qp_count; i++) {
1201 qp = &nt->qp_vec[i];
1202 if (qp_bitmap_alloc & BIT_ULL(i))
1203 ntb_transport_free_queue(qp);
1204 debugfs_remove_recursive(qp->debugfs_dir);
Jon Mason1517a3f2013-07-30 15:58:49 -07001205 }
Jon Masonfce8a7b2012-11-16 19:27:12 -07001206
Allen Hubbee26a5842015-04-09 10:33:20 -04001207 ntb_link_disable(ndev);
1208 ntb_clear_ctx(ndev);
1209
Jon Masonfce8a7b2012-11-16 19:27:12 -07001210 ntb_bus_remove(nt);
1211
Allen Hubbee26a5842015-04-09 10:33:20 -04001212 for (i = nt->mw_count; i--; ) {
Jon Mason113fc502013-01-30 11:40:52 -07001213 ntb_free_mw(nt, i);
Allen Hubbee26a5842015-04-09 10:33:20 -04001214 iounmap(nt->mw_vec[i].vbase);
1215 }
Jon Masonfce8a7b2012-11-16 19:27:12 -07001216
Allen Hubbee26a5842015-04-09 10:33:20 -04001217 kfree(nt->qp_vec);
1218 kfree(nt->mw_vec);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001219 kfree(nt);
1220}
1221
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001222static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1223{
1224 struct ntb_queue_entry *entry;
1225 void *cb_data;
1226 unsigned int len;
1227 unsigned long irqflags;
1228
1229 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1230
1231 while (!list_empty(&qp->rx_post_q)) {
1232 entry = list_first_entry(&qp->rx_post_q,
1233 struct ntb_queue_entry, entry);
1234 if (!(entry->flags & DESC_DONE_FLAG))
1235 break;
1236
1237 entry->rx_hdr->flags = 0;
1238 iowrite32(entry->index, &qp->rx_info->entry);
1239
1240 cb_data = entry->cb_data;
1241 len = entry->len;
1242
1243 list_move_tail(&entry->entry, &qp->rx_free_q);
1244
1245 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1246
1247 if (qp->rx_handler && qp->client_ready)
1248 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1249
1250 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1251 }
1252
1253 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1254}
1255
Jon Mason282a2fe2013-02-12 09:52:50 -07001256static void ntb_rx_copy_callback(void *data)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001257{
Jon Mason282a2fe2013-02-12 09:52:50 -07001258 struct ntb_queue_entry *entry = data;
Jon Mason448c6fb2013-01-19 02:02:27 -07001259
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001260 entry->flags |= DESC_DONE_FLAG;
Jon Mason282a2fe2013-02-12 09:52:50 -07001261
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001262 ntb_complete_rxc(entry->qp);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001263}
1264
Jon Mason282a2fe2013-02-12 09:52:50 -07001265static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1266{
1267 void *buf = entry->buf;
1268 size_t len = entry->len;
1269
1270 memcpy(buf, offset, len);
1271
Allen Hubbee26a5842015-04-09 10:33:20 -04001272 /* Ensure that the data is fully copied out before clearing the flag */
1273 wmb();
1274
Jon Mason282a2fe2013-02-12 09:52:50 -07001275 ntb_rx_copy_callback(entry);
1276}
1277
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001278static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
Jon Mason282a2fe2013-02-12 09:52:50 -07001279{
1280 struct dma_async_tx_descriptor *txd;
1281 struct ntb_transport_qp *qp = entry->qp;
Dave Jiang569410ca2015-07-13 08:07:22 -04001282 struct dma_chan *chan = qp->rx_dma_chan;
Jon Mason282a2fe2013-02-12 09:52:50 -07001283 struct dma_device *device;
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001284 size_t pay_off, buff_off, len;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001285 struct dmaengine_unmap_data *unmap;
Jon Mason282a2fe2013-02-12 09:52:50 -07001286 dma_cookie_t cookie;
1287 void *buf = entry->buf;
Dave Jiang8c874cc2016-01-08 09:51:05 -07001288 int retries = 0;
Jon Mason282a2fe2013-02-12 09:52:50 -07001289
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001290 len = entry->len;
Jon Mason282a2fe2013-02-12 09:52:50 -07001291
1292 if (!chan)
1293 goto err;
1294
Jon Mason53ca4fe2013-11-26 11:21:50 -07001295 if (len < copy_bytes)
Allen Hubbe905921e2015-07-13 08:07:21 -04001296 goto err;
Jon Mason282a2fe2013-02-12 09:52:50 -07001297
1298 device = chan->device;
Allen Hubbee26a5842015-04-09 10:33:20 -04001299 pay_off = (size_t)offset & ~PAGE_MASK;
1300 buff_off = (size_t)buf & ~PAGE_MASK;
Jon Mason282a2fe2013-02-12 09:52:50 -07001301
1302 if (!is_dma_copy_aligned(device, pay_off, buff_off, len))
Allen Hubbe905921e2015-07-13 08:07:21 -04001303 goto err;
Jon Mason282a2fe2013-02-12 09:52:50 -07001304
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001305 unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
1306 if (!unmap)
Allen Hubbe905921e2015-07-13 08:07:21 -04001307 goto err;
Jon Mason282a2fe2013-02-12 09:52:50 -07001308
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001309 unmap->len = len;
1310 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(offset),
1311 pay_off, len, DMA_TO_DEVICE);
1312 if (dma_mapping_error(device->dev, unmap->addr[0]))
1313 goto err_get_unmap;
Jon Mason282a2fe2013-02-12 09:52:50 -07001314
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001315 unmap->to_cnt = 1;
1316
1317 unmap->addr[1] = dma_map_page(device->dev, virt_to_page(buf),
1318 buff_off, len, DMA_FROM_DEVICE);
1319 if (dma_mapping_error(device->dev, unmap->addr[1]))
1320 goto err_get_unmap;
1321
1322 unmap->from_cnt = 1;
1323
Dave Jiang8c874cc2016-01-08 09:51:05 -07001324 for (retries = 0; retries < DMA_RETRIES; retries++) {
1325 txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
1326 unmap->addr[0], len,
1327 DMA_PREP_INTERRUPT);
1328 if (txd)
1329 break;
1330
1331 set_current_state(TASK_INTERRUPTIBLE);
1332 schedule_timeout(DMA_OUT_RESOURCE_TO);
1333 }
1334
1335 if (!txd) {
1336 qp->dma_rx_prep_err++;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001337 goto err_get_unmap;
Dave Jiang8c874cc2016-01-08 09:51:05 -07001338 }
Jon Mason282a2fe2013-02-12 09:52:50 -07001339
1340 txd->callback = ntb_rx_copy_callback;
1341 txd->callback_param = entry;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001342 dma_set_unmap(txd, unmap);
Jon Mason282a2fe2013-02-12 09:52:50 -07001343
1344 cookie = dmaengine_submit(txd);
1345 if (dma_submit_error(cookie))
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001346 goto err_set_unmap;
1347
1348 dmaengine_unmap_put(unmap);
Jon Mason282a2fe2013-02-12 09:52:50 -07001349
1350 qp->last_cookie = cookie;
1351
1352 qp->rx_async++;
1353
1354 return;
1355
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001356err_set_unmap:
1357 dmaengine_unmap_put(unmap);
1358err_get_unmap:
1359 dmaengine_unmap_put(unmap);
Jon Mason282a2fe2013-02-12 09:52:50 -07001360err:
1361 ntb_memcpy_rx(entry, offset);
1362 qp->rx_memcpy++;
1363}
1364
Jon Masonfce8a7b2012-11-16 19:27:12 -07001365static int ntb_process_rxc(struct ntb_transport_qp *qp)
1366{
1367 struct ntb_payload_header *hdr;
1368 struct ntb_queue_entry *entry;
1369 void *offset;
1370
Jon Mason793c20e2013-01-19 02:02:26 -07001371 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1372 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
1373
Allen Hubbee26a5842015-04-09 10:33:20 -04001374 dev_dbg(&qp->ndev->pdev->dev, "qp %d: RX ver %u len %d flags %x\n",
1375 qp->qp_num, hdr->ver, hdr->len, hdr->flags);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001376
Jon Masonfce8a7b2012-11-16 19:27:12 -07001377 if (!(hdr->flags & DESC_DONE_FLAG)) {
Allen Hubbee26a5842015-04-09 10:33:20 -04001378 dev_dbg(&qp->ndev->pdev->dev, "done flag not set\n");
Jon Masonfce8a7b2012-11-16 19:27:12 -07001379 qp->rx_ring_empty++;
1380 return -EAGAIN;
1381 }
1382
Allen Hubbee26a5842015-04-09 10:33:20 -04001383 if (hdr->flags & LINK_DOWN_FLAG) {
1384 dev_dbg(&qp->ndev->pdev->dev, "link down flag set\n");
1385 ntb_qp_link_down(qp);
1386 hdr->flags = 0;
Allen Hubbec0900b32015-05-12 06:24:27 -04001387 return -EAGAIN;
Allen Hubbee26a5842015-04-09 10:33:20 -04001388 }
1389
1390 if (hdr->ver != (u32)qp->rx_pkts) {
1391 dev_dbg(&qp->ndev->pdev->dev,
1392 "version mismatch, expected %llu - got %u\n",
1393 qp->rx_pkts, hdr->ver);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001394 qp->rx_err_ver++;
1395 return -EIO;
1396 }
1397
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001398 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
Allen Hubbee26a5842015-04-09 10:33:20 -04001399 if (!entry) {
1400 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1401 qp->rx_err_no_buf++;
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001402 return -EAGAIN;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001403 }
1404
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001405 entry->rx_hdr = hdr;
1406 entry->index = qp->rx_index;
1407
Allen Hubbee26a5842015-04-09 10:33:20 -04001408 if (hdr->len > entry->len) {
1409 dev_dbg(&qp->ndev->pdev->dev,
1410 "receive buffer overflow! Wanted %d got %d\n",
1411 hdr->len, entry->len);
1412 qp->rx_err_oflow++;
1413
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001414 entry->len = -EIO;
1415 entry->flags |= DESC_DONE_FLAG;
1416
1417 ntb_complete_rxc(qp);
1418 } else {
1419 dev_dbg(&qp->ndev->pdev->dev,
1420 "RX OK index %u ver %u size %d into buf size %d\n",
1421 qp->rx_index, hdr->ver, hdr->len, entry->len);
1422
1423 qp->rx_bytes += hdr->len;
1424 qp->rx_pkts++;
1425
1426 entry->len = hdr->len;
1427
1428 ntb_async_rx(entry, offset);
Allen Hubbee26a5842015-04-09 10:33:20 -04001429 }
1430
Jon Mason282a2fe2013-02-12 09:52:50 -07001431 qp->rx_index++;
1432 qp->rx_index %= qp->rx_max_entry;
1433
1434 return 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001435}
1436
Allen Hubbee26a5842015-04-09 10:33:20 -04001437static void ntb_transport_rxc_db(unsigned long data)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001438{
Allen Hubbee26a5842015-04-09 10:33:20 -04001439 struct ntb_transport_qp *qp = (void *)data;
Jon Masonc336acd2013-01-17 15:28:45 -07001440 int rc, i;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001441
Allen Hubbee26a5842015-04-09 10:33:20 -04001442 dev_dbg(&qp->ndev->pdev->dev, "%s: doorbell %d received\n",
1443 __func__, qp->qp_num);
Jon Masone8aeb602013-04-18 17:59:44 -07001444
Jon Masonc336acd2013-01-17 15:28:45 -07001445 /* Limit the number of packets processed in a single interrupt to
1446 * provide fairness to others
1447 */
1448 for (i = 0; i < qp->rx_max_entry; i++) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001449 rc = ntb_process_rxc(qp);
Jon Masonc336acd2013-01-17 15:28:45 -07001450 if (rc)
1451 break;
1452 }
Jon Mason282a2fe2013-02-12 09:52:50 -07001453
Dave Jiang569410ca2015-07-13 08:07:22 -04001454 if (i && qp->rx_dma_chan)
1455 dma_async_issue_pending(qp->rx_dma_chan);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001456
Allen Hubbee26a5842015-04-09 10:33:20 -04001457 if (i == qp->rx_max_entry) {
1458 /* there is more work to do */
Dave Jiange9021332016-02-23 09:11:36 -07001459 if (qp->active)
1460 tasklet_schedule(&qp->rxc_db_work);
Allen Hubbee26a5842015-04-09 10:33:20 -04001461 } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
1462 /* the doorbell bit is set: clear it */
1463 ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
1464 /* ntb_db_read ensures ntb_db_clear write is committed */
1465 ntb_db_read(qp->ndev);
1466
1467 /* an interrupt may have arrived between finishing
1468 * ntb_process_rxc and clearing the doorbell bit:
1469 * there might be some more work to do.
1470 */
Dave Jiange9021332016-02-23 09:11:36 -07001471 if (qp->active)
1472 tasklet_schedule(&qp->rxc_db_work);
Allen Hubbee26a5842015-04-09 10:33:20 -04001473 }
Jon Masonfce8a7b2012-11-16 19:27:12 -07001474}
1475
Dave Jiang9cabc262016-07-20 13:14:07 -07001476static void ntb_tx_copy_callback(void *data,
1477 const struct dmaengine_result *res)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001478{
Jon Mason282a2fe2013-02-12 09:52:50 -07001479 struct ntb_queue_entry *entry = data;
1480 struct ntb_transport_qp *qp = entry->qp;
1481 struct ntb_payload_header __iomem *hdr = entry->tx_hdr;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001482
Dave Jiang9cabc262016-07-20 13:14:07 -07001483 /* we need to check DMA results if we are using DMA */
1484 if (res) {
1485 enum dmaengine_tx_result dma_err = res->result;
1486
1487 switch (dma_err) {
1488 case DMA_TRANS_READ_FAILED:
1489 case DMA_TRANS_WRITE_FAILED:
1490 entry->errors++;
1491 case DMA_TRANS_ABORTED:
1492 {
1493 void __iomem *offset =
1494 qp->tx_mw + qp->tx_max_frame *
1495 entry->tx_index;
1496
1497 /* resubmit via CPU */
1498 ntb_memcpy_tx(entry, offset);
1499 qp->tx_memcpy++;
1500 return;
1501 }
1502
1503 case DMA_TRANS_NOERROR:
1504 default:
1505 break;
1506 }
1507 }
1508
Jon Mason74465642013-01-21 15:28:52 -07001509 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001510
Allen Hubbee26a5842015-04-09 10:33:20 -04001511 ntb_peer_db_set(qp->ndev, BIT_ULL(qp->qp_num));
Jon Masonfce8a7b2012-11-16 19:27:12 -07001512
1513 /* The entry length can only be zero if the packet is intended to be a
1514 * "link down" or similar. Since no payload is being sent in these
1515 * cases, there is nothing to add to the completion queue.
1516 */
1517 if (entry->len > 0) {
1518 qp->tx_bytes += entry->len;
1519
1520 if (qp->tx_handler)
1521 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
1522 entry->len);
1523 }
1524
1525 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
1526}
1527
Jon Mason282a2fe2013-02-12 09:52:50 -07001528static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset)
1529{
Dave Jiang06917f72015-05-19 16:45:46 -04001530#ifdef ARCH_HAS_NOCACHE_UACCESS
1531 /*
1532 * Using non-temporal mov to improve performance on non-cached
1533 * writes, even though we aren't actually copying from user space.
1534 */
1535 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len);
1536#else
Jon Mason282a2fe2013-02-12 09:52:50 -07001537 memcpy_toio(offset, entry->buf, entry->len);
Dave Jiang06917f72015-05-19 16:45:46 -04001538#endif
Jon Mason282a2fe2013-02-12 09:52:50 -07001539
Allen Hubbee26a5842015-04-09 10:33:20 -04001540 /* Ensure that the data is fully copied out before setting the flags */
1541 wmb();
1542
Dave Jiang9cabc262016-07-20 13:14:07 -07001543 ntb_tx_copy_callback(entry, NULL);
Jon Mason282a2fe2013-02-12 09:52:50 -07001544}
1545
Dave Jiang9cabc262016-07-20 13:14:07 -07001546static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1547 struct ntb_queue_entry *entry)
Jon Mason282a2fe2013-02-12 09:52:50 -07001548{
Jon Mason282a2fe2013-02-12 09:52:50 -07001549 struct dma_async_tx_descriptor *txd;
Dave Jiang569410ca2015-07-13 08:07:22 -04001550 struct dma_chan *chan = qp->tx_dma_chan;
Jon Mason282a2fe2013-02-12 09:52:50 -07001551 struct dma_device *device;
Dave Jiang9cabc262016-07-20 13:14:07 -07001552 size_t len = entry->len;
1553 void *buf = entry->buf;
Jon Mason282a2fe2013-02-12 09:52:50 -07001554 size_t dest_off, buff_off;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001555 struct dmaengine_unmap_data *unmap;
1556 dma_addr_t dest;
Jon Mason282a2fe2013-02-12 09:52:50 -07001557 dma_cookie_t cookie;
Dave Jiang8c874cc2016-01-08 09:51:05 -07001558 int retries = 0;
Jon Mason282a2fe2013-02-12 09:52:50 -07001559
Jon Mason282a2fe2013-02-12 09:52:50 -07001560 device = chan->device;
Dave Jiang9cabc262016-07-20 13:14:07 -07001561 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
Allen Hubbee26a5842015-04-09 10:33:20 -04001562 buff_off = (size_t)buf & ~PAGE_MASK;
1563 dest_off = (size_t)dest & ~PAGE_MASK;
Jon Mason282a2fe2013-02-12 09:52:50 -07001564
1565 if (!is_dma_copy_aligned(device, buff_off, dest_off, len))
1566 goto err;
1567
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001568 unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
1569 if (!unmap)
Jon Mason282a2fe2013-02-12 09:52:50 -07001570 goto err;
1571
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001572 unmap->len = len;
1573 unmap->addr[0] = dma_map_page(device->dev, virt_to_page(buf),
1574 buff_off, len, DMA_TO_DEVICE);
1575 if (dma_mapping_error(device->dev, unmap->addr[0]))
1576 goto err_get_unmap;
1577
1578 unmap->to_cnt = 1;
1579
Dave Jiang8c874cc2016-01-08 09:51:05 -07001580 for (retries = 0; retries < DMA_RETRIES; retries++) {
Dave Jiang9cabc262016-07-20 13:14:07 -07001581 txd = device->device_prep_dma_memcpy(chan, dest,
1582 unmap->addr[0], len,
1583 DMA_PREP_INTERRUPT);
Dave Jiang8c874cc2016-01-08 09:51:05 -07001584 if (txd)
1585 break;
1586
1587 set_current_state(TASK_INTERRUPTIBLE);
1588 schedule_timeout(DMA_OUT_RESOURCE_TO);
1589 }
1590
1591 if (!txd) {
1592 qp->dma_tx_prep_err++;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001593 goto err_get_unmap;
Dave Jiang8c874cc2016-01-08 09:51:05 -07001594 }
Jon Mason282a2fe2013-02-12 09:52:50 -07001595
Dave Jiang9cabc262016-07-20 13:14:07 -07001596 txd->callback_result = ntb_tx_copy_callback;
Jon Mason282a2fe2013-02-12 09:52:50 -07001597 txd->callback_param = entry;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001598 dma_set_unmap(txd, unmap);
Jon Mason282a2fe2013-02-12 09:52:50 -07001599
1600 cookie = dmaengine_submit(txd);
1601 if (dma_submit_error(cookie))
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001602 goto err_set_unmap;
1603
1604 dmaengine_unmap_put(unmap);
Jon Mason282a2fe2013-02-12 09:52:50 -07001605
1606 dma_async_issue_pending(chan);
Jon Mason282a2fe2013-02-12 09:52:50 -07001607
Dave Jiang9cabc262016-07-20 13:14:07 -07001608 return 0;
Bartlomiej Zolnierkiewicz6f57fd02013-10-18 19:35:31 +02001609err_set_unmap:
1610 dmaengine_unmap_put(unmap);
1611err_get_unmap:
1612 dmaengine_unmap_put(unmap);
Jon Mason282a2fe2013-02-12 09:52:50 -07001613err:
Dave Jiang9cabc262016-07-20 13:14:07 -07001614 return -ENXIO;
1615}
1616
1617static void ntb_async_tx(struct ntb_transport_qp *qp,
1618 struct ntb_queue_entry *entry)
1619{
1620 struct ntb_payload_header __iomem *hdr;
1621 struct dma_chan *chan = qp->tx_dma_chan;
1622 void __iomem *offset;
1623 int res;
1624
1625 entry->tx_index = qp->tx_index;
1626 offset = qp->tx_mw + qp->tx_max_frame * entry->tx_index;
1627 hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
1628 entry->tx_hdr = hdr;
1629
1630 iowrite32(entry->len, &hdr->len);
1631 iowrite32((u32)qp->tx_pkts, &hdr->ver);
1632
1633 if (!chan)
1634 goto err;
1635
1636 if (entry->len < copy_bytes)
1637 goto err;
1638
1639 res = ntb_async_tx_submit(qp, entry);
1640 if (res < 0)
1641 goto err;
1642
1643 if (!entry->retries)
1644 qp->tx_async++;
1645
1646 return;
1647
1648err:
Jon Mason282a2fe2013-02-12 09:52:50 -07001649 ntb_memcpy_tx(entry, offset);
1650 qp->tx_memcpy++;
1651}
1652
Jon Masonfce8a7b2012-11-16 19:27:12 -07001653static int ntb_process_tx(struct ntb_transport_qp *qp,
1654 struct ntb_queue_entry *entry)
1655{
Jon Mason793c20e2013-01-19 02:02:26 -07001656 if (qp->tx_index == qp->remote_rx_info->entry) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001657 qp->tx_ring_full++;
1658 return -EAGAIN;
1659 }
1660
Jon Masonef114ed2013-01-19 02:02:18 -07001661 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
Jon Masonfce8a7b2012-11-16 19:27:12 -07001662 if (qp->tx_handler)
Jon Mason179f9122015-12-18 13:22:37 -05001663 qp->tx_handler(qp, qp->cb_data, NULL, -EIO);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001664
1665 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
1666 &qp->tx_free_q);
1667 return 0;
1668 }
1669
Jon Mason282a2fe2013-02-12 09:52:50 -07001670 ntb_async_tx(qp, entry);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001671
Jon Mason793c20e2013-01-19 02:02:26 -07001672 qp->tx_index++;
1673 qp->tx_index %= qp->tx_max_entry;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001674
1675 qp->tx_pkts++;
1676
1677 return 0;
1678}
1679
1680static void ntb_send_link_down(struct ntb_transport_qp *qp)
1681{
Allen Hubbee26a5842015-04-09 10:33:20 -04001682 struct pci_dev *pdev = qp->ndev->pdev;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001683 struct ntb_queue_entry *entry;
1684 int i, rc;
1685
Allen Hubbee26a5842015-04-09 10:33:20 -04001686 if (!qp->link_is_up)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001687 return;
1688
Allen Hubbee22e0b92015-05-12 06:55:44 -04001689 dev_info(&pdev->dev, "qp %d: Send Link Down\n", qp->qp_num);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001690
1691 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
Jon Masonf7667552013-01-19 02:02:24 -07001692 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001693 if (entry)
1694 break;
1695 msleep(100);
1696 }
1697
1698 if (!entry)
1699 return;
1700
1701 entry->cb_data = NULL;
1702 entry->buf = NULL;
1703 entry->len = 0;
1704 entry->flags = LINK_DOWN_FLAG;
1705
1706 rc = ntb_process_tx(qp, entry);
1707 if (rc)
1708 dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
1709 qp->qp_num);
Allen Hubbe2849b5d72015-05-12 08:09:15 -04001710
1711 ntb_qp_link_down_reset(qp);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001712}
1713
Allen Hubbe1199aa62015-05-18 06:20:47 -04001714static bool ntb_dma_filter_fn(struct dma_chan *chan, void *node)
1715{
1716 return dev_to_node(&chan->dev->device) == (int)(unsigned long)node;
1717}
1718
Jon Masonfce8a7b2012-11-16 19:27:12 -07001719/**
1720 * ntb_transport_create_queue - Create a new NTB transport layer queue
1721 * @rx_handler: receive callback function
1722 * @tx_handler: transmit callback function
1723 * @event_handler: event callback function
1724 *
1725 * Create a new NTB transport layer queue and provide the queue with a callback
1726 * routine for both transmit and receive. The receive callback routine will be
1727 * used to pass up data when the transport has received it on the queue. The
1728 * transmit callback routine will be called when the transport has completed the
1729 * transmission of the data on the queue and the data is ready to be freed.
1730 *
1731 * RETURNS: pointer to newly created ntb_queue, NULL on error.
1732 */
1733struct ntb_transport_qp *
Allen Hubbee26a5842015-04-09 10:33:20 -04001734ntb_transport_create_queue(void *data, struct device *client_dev,
Jon Masonfce8a7b2012-11-16 19:27:12 -07001735 const struct ntb_queue_handlers *handlers)
1736{
Allen Hubbee26a5842015-04-09 10:33:20 -04001737 struct ntb_dev *ndev;
1738 struct pci_dev *pdev;
1739 struct ntb_transport_ctx *nt;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001740 struct ntb_queue_entry *entry;
1741 struct ntb_transport_qp *qp;
Allen Hubbee26a5842015-04-09 10:33:20 -04001742 u64 qp_bit;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001743 unsigned int free_queue;
Allen Hubbe1199aa62015-05-18 06:20:47 -04001744 dma_cap_mask_t dma_mask;
1745 int node;
Allen Hubbee26a5842015-04-09 10:33:20 -04001746 int i;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001747
Allen Hubbee26a5842015-04-09 10:33:20 -04001748 ndev = dev_ntb(client_dev->parent);
1749 pdev = ndev->pdev;
1750 nt = ndev->ctx;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001751
Allen Hubbe1199aa62015-05-18 06:20:47 -04001752 node = dev_to_node(&ndev->dev);
1753
Jon Masonfce8a7b2012-11-16 19:27:12 -07001754 free_queue = ffs(nt->qp_bitmap);
1755 if (!free_queue)
1756 goto err;
1757
1758 /* decrement free_queue to make it zero based */
1759 free_queue--;
1760
Allen Hubbee26a5842015-04-09 10:33:20 -04001761 qp = &nt->qp_vec[free_queue];
1762 qp_bit = BIT_ULL(qp->qp_num);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001763
Allen Hubbee26a5842015-04-09 10:33:20 -04001764 nt->qp_bitmap_free &= ~qp_bit;
1765
Jon Masonfce8a7b2012-11-16 19:27:12 -07001766 qp->cb_data = data;
1767 qp->rx_handler = handlers->rx_handler;
1768 qp->tx_handler = handlers->tx_handler;
1769 qp->event_handler = handlers->event_handler;
1770
Allen Hubbe1199aa62015-05-18 06:20:47 -04001771 dma_cap_zero(dma_mask);
1772 dma_cap_set(DMA_MEMCPY, dma_mask);
1773
Dave Jianga41ef052015-05-19 16:52:04 -04001774 if (use_dma) {
Dave Jiang569410ca2015-07-13 08:07:22 -04001775 qp->tx_dma_chan =
1776 dma_request_channel(dma_mask, ntb_dma_filter_fn,
1777 (void *)(unsigned long)node);
1778 if (!qp->tx_dma_chan)
1779 dev_info(&pdev->dev, "Unable to allocate TX DMA channel\n");
1780
1781 qp->rx_dma_chan =
1782 dma_request_channel(dma_mask, ntb_dma_filter_fn,
1783 (void *)(unsigned long)node);
1784 if (!qp->rx_dma_chan)
1785 dev_info(&pdev->dev, "Unable to allocate RX DMA channel\n");
Dave Jianga41ef052015-05-19 16:52:04 -04001786 } else {
Dave Jiang569410ca2015-07-13 08:07:22 -04001787 qp->tx_dma_chan = NULL;
1788 qp->rx_dma_chan = NULL;
Dave Jianga41ef052015-05-19 16:52:04 -04001789 }
Dave Jiang569410ca2015-07-13 08:07:22 -04001790
1791 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
1792 qp->tx_dma_chan ? "DMA" : "CPU");
1793
1794 dev_dbg(&pdev->dev, "Using %s memcpy for RX\n",
1795 qp->rx_dma_chan ? "DMA" : "CPU");
Jon Mason282a2fe2013-02-12 09:52:50 -07001796
Jon Masonfce8a7b2012-11-16 19:27:12 -07001797 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
Allen Hubbe1199aa62015-05-18 06:20:47 -04001798 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001799 if (!entry)
1800 goto err1;
1801
Jon Mason282a2fe2013-02-12 09:52:50 -07001802 entry->qp = qp;
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001803 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
Jon Masonf7667552013-01-19 02:02:24 -07001804 &qp->rx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001805 }
Dave Jianga754a8f2016-04-08 10:49:06 -07001806 qp->rx_alloc_entry = NTB_QP_DEF_NUM_ENTRIES;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001807
Dave Jianga754a8f2016-04-08 10:49:06 -07001808 for (i = 0; i < qp->tx_max_entry; i++) {
Allen Hubbe1199aa62015-05-18 06:20:47 -04001809 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001810 if (!entry)
1811 goto err2;
1812
Jon Mason282a2fe2013-02-12 09:52:50 -07001813 entry->qp = qp;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001814 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
Jon Masonf7667552013-01-19 02:02:24 -07001815 &qp->tx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001816 }
1817
Allen Hubbee26a5842015-04-09 10:33:20 -04001818 ntb_db_clear(qp->ndev, qp_bit);
1819 ntb_db_clear_mask(qp->ndev, qp_bit);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001820
1821 dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
1822
1823 return qp;
1824
Jon Masonfce8a7b2012-11-16 19:27:12 -07001825err2:
Jon Masonf7667552013-01-19 02:02:24 -07001826 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001827 kfree(entry);
1828err1:
Dave Jianga754a8f2016-04-08 10:49:06 -07001829 qp->rx_alloc_entry = 0;
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001830 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001831 kfree(entry);
Dave Jiang569410ca2015-07-13 08:07:22 -04001832 if (qp->tx_dma_chan)
1833 dma_release_channel(qp->tx_dma_chan);
1834 if (qp->rx_dma_chan)
1835 dma_release_channel(qp->rx_dma_chan);
Allen Hubbee26a5842015-04-09 10:33:20 -04001836 nt->qp_bitmap_free |= qp_bit;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001837err:
1838 return NULL;
1839}
1840EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1841
1842/**
1843 * ntb_transport_free_queue - Frees NTB transport queue
1844 * @qp: NTB queue to be freed
1845 *
1846 * Frees NTB transport queue
1847 */
1848void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1849{
Jon Mason186f27f2013-01-22 11:35:40 -07001850 struct pci_dev *pdev;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001851 struct ntb_queue_entry *entry;
Allen Hubbee26a5842015-04-09 10:33:20 -04001852 u64 qp_bit;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001853
1854 if (!qp)
1855 return;
1856
Allen Hubbee26a5842015-04-09 10:33:20 -04001857 pdev = qp->ndev->pdev;
Jon Mason186f27f2013-01-22 11:35:40 -07001858
Dave Jiange9021332016-02-23 09:11:36 -07001859 qp->active = false;
1860
Dave Jiang569410ca2015-07-13 08:07:22 -04001861 if (qp->tx_dma_chan) {
1862 struct dma_chan *chan = qp->tx_dma_chan;
Jon Mason282a2fe2013-02-12 09:52:50 -07001863 /* Putting the dma_chan to NULL will force any new traffic to be
1864 * processed by the CPU instead of the DAM engine
1865 */
Dave Jiang569410ca2015-07-13 08:07:22 -04001866 qp->tx_dma_chan = NULL;
1867
1868 /* Try to be nice and wait for any queued DMA engine
1869 * transactions to process before smashing it with a rock
1870 */
1871 dma_sync_wait(chan, qp->last_cookie);
1872 dmaengine_terminate_all(chan);
1873 dma_release_channel(chan);
1874 }
1875
1876 if (qp->rx_dma_chan) {
1877 struct dma_chan *chan = qp->rx_dma_chan;
1878 /* Putting the dma_chan to NULL will force any new traffic to be
1879 * processed by the CPU instead of the DAM engine
1880 */
1881 qp->rx_dma_chan = NULL;
Jon Mason282a2fe2013-02-12 09:52:50 -07001882
1883 /* Try to be nice and wait for any queued DMA engine
1884 * transactions to process before smashing it with a rock
1885 */
1886 dma_sync_wait(chan, qp->last_cookie);
1887 dmaengine_terminate_all(chan);
Allen Hubbe1199aa62015-05-18 06:20:47 -04001888 dma_release_channel(chan);
Jon Mason282a2fe2013-02-12 09:52:50 -07001889 }
Jon Masonfce8a7b2012-11-16 19:27:12 -07001890
Allen Hubbee26a5842015-04-09 10:33:20 -04001891 qp_bit = BIT_ULL(qp->qp_num);
1892
1893 ntb_db_set_mask(qp->ndev, qp_bit);
Dave Jiange9021332016-02-23 09:11:36 -07001894 tasklet_kill(&qp->rxc_db_work);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001895
Jon Mason282a2fe2013-02-12 09:52:50 -07001896 cancel_delayed_work_sync(&qp->link_work);
1897
Allen Hubbee26a5842015-04-09 10:33:20 -04001898 qp->cb_data = NULL;
1899 qp->rx_handler = NULL;
1900 qp->tx_handler = NULL;
1901 qp->event_handler = NULL;
1902
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001903 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001904 kfree(entry);
1905
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001906 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
1907 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
1908 kfree(entry);
1909 }
1910
1911 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
1912 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
Jon Masonfce8a7b2012-11-16 19:27:12 -07001913 kfree(entry);
1914 }
1915
Jon Masonf7667552013-01-19 02:02:24 -07001916 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
Jon Masonfce8a7b2012-11-16 19:27:12 -07001917 kfree(entry);
1918
Allen Hubbe30a4bb12015-07-13 08:07:14 -04001919 qp->transport->qp_bitmap_free |= qp_bit;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001920
1921 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1922}
1923EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
1924
1925/**
1926 * ntb_transport_rx_remove - Dequeues enqueued rx packet
1927 * @qp: NTB queue to be freed
1928 * @len: pointer to variable to write enqueued buffers length
1929 *
1930 * Dequeues unused buffers from receive queue. Should only be used during
1931 * shutdown of qp.
1932 *
1933 * RETURNS: NULL error value on error, or void* for success.
1934 */
1935void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1936{
1937 struct ntb_queue_entry *entry;
1938 void *buf;
1939
Allen Hubbee26a5842015-04-09 10:33:20 -04001940 if (!qp || qp->client_ready)
Jon Masonfce8a7b2012-11-16 19:27:12 -07001941 return NULL;
1942
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001943 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001944 if (!entry)
1945 return NULL;
1946
1947 buf = entry->cb_data;
1948 *len = entry->len;
1949
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001950 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001951
1952 return buf;
1953}
1954EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
1955
1956/**
1957 * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
1958 * @qp: NTB transport layer queue the entry is to be enqueued on
1959 * @cb: per buffer pointer for callback function to use
1960 * @data: pointer to data buffer that incoming packets will be copied into
1961 * @len: length of the data buffer
1962 *
1963 * Enqueue a new receive buffer onto the transport queue into which a NTB
1964 * payload can be received into.
1965 *
1966 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1967 */
1968int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1969 unsigned int len)
1970{
1971 struct ntb_queue_entry *entry;
1972
1973 if (!qp)
1974 return -EINVAL;
1975
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001976 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001977 if (!entry)
1978 return -ENOMEM;
1979
1980 entry->cb_data = cb;
1981 entry->buf = data;
1982 entry->len = len;
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001983 entry->flags = 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -07001984
Allen Hubbeda2e5ae2015-07-13 08:07:08 -04001985 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
1986
Dave Jiange9021332016-02-23 09:11:36 -07001987 if (qp->active)
1988 tasklet_schedule(&qp->rxc_db_work);
Jon Masonfce8a7b2012-11-16 19:27:12 -07001989
1990 return 0;
1991}
1992EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
1993
1994/**
1995 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
1996 * @qp: NTB transport layer queue the entry is to be enqueued on
1997 * @cb: per buffer pointer for callback function to use
1998 * @data: pointer to data buffer that will be sent
1999 * @len: length of the data buffer
2000 *
2001 * Enqueue a new transmit buffer onto the transport queue from which a NTB
Jon Masonf9a2cf82013-07-29 16:46:43 -07002002 * payload will be transmitted. This assumes that a lock is being held to
Jon Masonfce8a7b2012-11-16 19:27:12 -07002003 * serialize access to the qp.
2004 *
2005 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2006 */
2007int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
2008 unsigned int len)
2009{
2010 struct ntb_queue_entry *entry;
2011 int rc;
2012
Allen Hubbee26a5842015-04-09 10:33:20 -04002013 if (!qp || !qp->link_is_up || !len)
Jon Masonfce8a7b2012-11-16 19:27:12 -07002014 return -EINVAL;
2015
2016 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
Jon Mason282a2fe2013-02-12 09:52:50 -07002017 if (!entry) {
2018 qp->tx_err_no_buf++;
Dave Jiange74bfee2015-07-13 08:07:17 -04002019 return -EBUSY;
Jon Mason282a2fe2013-02-12 09:52:50 -07002020 }
Jon Masonfce8a7b2012-11-16 19:27:12 -07002021
2022 entry->cb_data = cb;
2023 entry->buf = data;
2024 entry->len = len;
2025 entry->flags = 0;
Dave Jiang9cabc262016-07-20 13:14:07 -07002026 entry->errors = 0;
2027 entry->retries = 0;
2028 entry->tx_index = 0;
Jon Masonfce8a7b2012-11-16 19:27:12 -07002029
2030 rc = ntb_process_tx(qp, entry);
2031 if (rc)
2032 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
2033 &qp->tx_free_q);
2034
2035 return rc;
2036}
2037EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
2038
2039/**
2040 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
2041 * @qp: NTB transport layer queue to be enabled
2042 *
2043 * Notify NTB transport layer of client readiness to use queue
2044 */
2045void ntb_transport_link_up(struct ntb_transport_qp *qp)
2046{
2047 if (!qp)
2048 return;
2049
Allen Hubbee26a5842015-04-09 10:33:20 -04002050 qp->client_ready = true;
Jon Masonfce8a7b2012-11-16 19:27:12 -07002051
Allen Hubbee26a5842015-04-09 10:33:20 -04002052 if (qp->transport->link_is_up)
Jon Masonfce8a7b2012-11-16 19:27:12 -07002053 schedule_delayed_work(&qp->link_work, 0);
2054}
2055EXPORT_SYMBOL_GPL(ntb_transport_link_up);
2056
2057/**
2058 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
2059 * @qp: NTB transport layer queue to be disabled
2060 *
2061 * Notify NTB transport layer of client's desire to no longer receive data on
2062 * transport queue specified. It is the client's responsibility to ensure all
Jon Masonf9a2cf82013-07-29 16:46:43 -07002063 * entries on queue are purged or otherwise handled appropriately.
Jon Masonfce8a7b2012-11-16 19:27:12 -07002064 */
2065void ntb_transport_link_down(struct ntb_transport_qp *qp)
2066{
Allen Hubbee26a5842015-04-09 10:33:20 -04002067 int val;
Jon Masonfce8a7b2012-11-16 19:27:12 -07002068
2069 if (!qp)
2070 return;
2071
Allen Hubbee26a5842015-04-09 10:33:20 -04002072 qp->client_ready = false;
Jon Masonfce8a7b2012-11-16 19:27:12 -07002073
Allen Hubbee26a5842015-04-09 10:33:20 -04002074 val = ntb_spad_read(qp->ndev, QP_LINKS);
Jon Masonfce8a7b2012-11-16 19:27:12 -07002075
Allen Hubbee26a5842015-04-09 10:33:20 -04002076 ntb_peer_spad_write(qp->ndev, QP_LINKS,
2077 val & ~BIT(qp->qp_num));
Jon Masonfce8a7b2012-11-16 19:27:12 -07002078
Allen Hubbee26a5842015-04-09 10:33:20 -04002079 if (qp->link_is_up)
Jon Masonfce8a7b2012-11-16 19:27:12 -07002080 ntb_send_link_down(qp);
2081 else
2082 cancel_delayed_work_sync(&qp->link_work);
2083}
2084EXPORT_SYMBOL_GPL(ntb_transport_link_down);
2085
2086/**
2087 * ntb_transport_link_query - Query transport link state
2088 * @qp: NTB transport layer queue to be queried
2089 *
2090 * Query connectivity to the remote system of the NTB transport queue
2091 *
2092 * RETURNS: true for link up or false for link down
2093 */
2094bool ntb_transport_link_query(struct ntb_transport_qp *qp)
2095{
Jon Mason186f27f2013-01-22 11:35:40 -07002096 if (!qp)
2097 return false;
2098
Allen Hubbee26a5842015-04-09 10:33:20 -04002099 return qp->link_is_up;
Jon Masonfce8a7b2012-11-16 19:27:12 -07002100}
2101EXPORT_SYMBOL_GPL(ntb_transport_link_query);
2102
2103/**
2104 * ntb_transport_qp_num - Query the qp number
2105 * @qp: NTB transport layer queue to be queried
2106 *
2107 * Query qp number of the NTB transport queue
2108 *
2109 * RETURNS: a zero based number specifying the qp number
2110 */
2111unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
2112{
Jon Mason186f27f2013-01-22 11:35:40 -07002113 if (!qp)
2114 return 0;
2115
Jon Masonfce8a7b2012-11-16 19:27:12 -07002116 return qp->qp_num;
2117}
2118EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
2119
2120/**
2121 * ntb_transport_max_size - Query the max payload size of a qp
2122 * @qp: NTB transport layer queue to be queried
2123 *
2124 * Query the maximum payload size permissible on the given qp
2125 *
2126 * RETURNS: the max payload size of a qp
2127 */
Jon Masonef114ed2013-01-19 02:02:18 -07002128unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
Jon Masonfce8a7b2012-11-16 19:27:12 -07002129{
Dave Jiang04afde42015-09-17 13:27:04 -07002130 unsigned int max_size;
Dave Jiang569410ca2015-07-13 08:07:22 -04002131 unsigned int copy_align;
Dave Jiang04afde42015-09-17 13:27:04 -07002132 struct dma_chan *rx_chan, *tx_chan;
Jon Mason282a2fe2013-02-12 09:52:50 -07002133
Jon Mason186f27f2013-01-22 11:35:40 -07002134 if (!qp)
2135 return 0;
2136
Dave Jiang04afde42015-09-17 13:27:04 -07002137 rx_chan = qp->rx_dma_chan;
2138 tx_chan = qp->tx_dma_chan;
Jon Mason282a2fe2013-02-12 09:52:50 -07002139
Dave Jiang04afde42015-09-17 13:27:04 -07002140 copy_align = max(rx_chan ? rx_chan->device->copy_align : 0,
2141 tx_chan ? tx_chan->device->copy_align : 0);
Dave Jiang569410ca2015-07-13 08:07:22 -04002142
Jon Mason282a2fe2013-02-12 09:52:50 -07002143 /* If DMA engine usage is possible, try to find the max size for that */
Dave Jiang04afde42015-09-17 13:27:04 -07002144 max_size = qp->tx_max_frame - sizeof(struct ntb_payload_header);
2145 max_size = round_down(max_size, 1 << copy_align);
Jon Mason282a2fe2013-02-12 09:52:50 -07002146
Dave Jiang04afde42015-09-17 13:27:04 -07002147 return max_size;
Jon Masonfce8a7b2012-11-16 19:27:12 -07002148}
2149EXPORT_SYMBOL_GPL(ntb_transport_max_size);
Allen Hubbee26a5842015-04-09 10:33:20 -04002150
Dave Jiange74bfee2015-07-13 08:07:17 -04002151unsigned int ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
2152{
2153 unsigned int head = qp->tx_index;
2154 unsigned int tail = qp->remote_rx_info->entry;
2155
2156 return tail > head ? tail - head : qp->tx_max_entry + tail - head;
2157}
2158EXPORT_SYMBOL_GPL(ntb_transport_tx_free_entry);
2159
Allen Hubbee26a5842015-04-09 10:33:20 -04002160static void ntb_transport_doorbell_callback(void *data, int vector)
2161{
2162 struct ntb_transport_ctx *nt = data;
2163 struct ntb_transport_qp *qp;
2164 u64 db_bits;
2165 unsigned int qp_num;
2166
2167 db_bits = (nt->qp_bitmap & ~nt->qp_bitmap_free &
2168 ntb_db_vector_mask(nt->ndev, vector));
2169
2170 while (db_bits) {
2171 qp_num = __ffs(db_bits);
2172 qp = &nt->qp_vec[qp_num];
2173
Dave Jiange9021332016-02-23 09:11:36 -07002174 if (qp->active)
2175 tasklet_schedule(&qp->rxc_db_work);
Allen Hubbee26a5842015-04-09 10:33:20 -04002176
2177 db_bits &= ~BIT_ULL(qp_num);
2178 }
2179}
2180
2181static const struct ntb_ctx_ops ntb_transport_ops = {
2182 .link_event = ntb_transport_event_callback,
2183 .db_event = ntb_transport_doorbell_callback,
2184};
2185
2186static struct ntb_client ntb_transport_client = {
2187 .ops = {
2188 .probe = ntb_transport_probe,
2189 .remove = ntb_transport_free,
2190 },
2191};
2192
2193static int __init ntb_transport_init(void)
2194{
2195 int rc;
2196
Dave Jiang7eb38782015-06-15 08:21:33 -04002197 pr_info("%s, version %s\n", NTB_TRANSPORT_DESC, NTB_TRANSPORT_VER);
2198
Allen Hubbee26a5842015-04-09 10:33:20 -04002199 if (debugfs_initialized())
2200 nt_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2201
2202 rc = bus_register(&ntb_transport_bus);
2203 if (rc)
2204 goto err_bus;
2205
2206 rc = ntb_register_client(&ntb_transport_client);
2207 if (rc)
2208 goto err_client;
2209
2210 return 0;
2211
2212err_client:
2213 bus_unregister(&ntb_transport_bus);
2214err_bus:
2215 debugfs_remove_recursive(nt_debugfs_dir);
2216 return rc;
2217}
2218module_init(ntb_transport_init);
2219
2220static void __exit ntb_transport_exit(void)
2221{
2222 debugfs_remove_recursive(nt_debugfs_dir);
2223
2224 ntb_unregister_client(&ntb_transport_client);
2225 bus_unregister(&ntb_transport_bus);
2226}
2227module_exit(ntb_transport_exit);