blob: dd93aadc996e1ca15e0739a1d60e9c4930b449b8 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/debugfs.h>
Vipul Pandyae5725682012-05-21 17:31:13 +053035#include <linux/vmalloc.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070036
37#include <rdma/ib_verbs.h>
38
39#include "iw_cxgb4.h"
40
41#define DRV_VERSION "0.1"
42
43MODULE_AUTHOR("Steve Wise");
Vipul Pandyaf079af72013-03-14 05:08:58 +000044MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
Steve Wisecfdda9d2010-04-21 15:30:06 -070045MODULE_LICENSE("Dual BSD/GPL");
46MODULE_VERSION(DRV_VERSION);
47
Vipul Pandya80ccdd62013-03-14 05:09:00 +000048static int allow_db_fc_on_t5;
49module_param(allow_db_fc_on_t5, int, 0644);
50MODULE_PARM_DESC(allow_db_fc_on_t5,
51 "Allow DB Flow Control on T5 (default = 0)");
52
53static int allow_db_coalescing_on_t5;
54module_param(allow_db_coalescing_on_t5, int, 0644);
55MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 "Allow DB Coalescing on T5 (default = 0)");
57
Vipul Pandya2c974782012-05-18 15:29:28 +053058struct uld_ctx {
59 struct list_head entry;
60 struct cxgb4_lld_info lldi;
61 struct c4iw_dev *dev;
62};
63
Steve Wise2f25e9a2011-05-09 22:06:23 -070064static LIST_HEAD(uld_ctx_list);
Steve Wisecfdda9d2010-04-21 15:30:06 -070065static DEFINE_MUTEX(dev_mutex);
66
Steve Wise05eb2382014-03-14 21:52:08 +053067#define DB_FC_RESUME_SIZE 64
68#define DB_FC_RESUME_DELAY 1
69#define DB_FC_DRAIN_THRESH 0
70
Steve Wisecfdda9d2010-04-21 15:30:06 -070071static struct dentry *c4iw_debugfs_root;
72
Steve Wise9e8d1fa32010-09-10 11:15:20 -050073struct c4iw_debugfs_data {
Steve Wisecfdda9d2010-04-21 15:30:06 -070074 struct c4iw_dev *devp;
75 char *buf;
76 int bufsize;
77 int pos;
78};
79
Steve Wise9eccfe12014-03-26 17:08:09 -050080/* registered cxgb4 netlink callbacks */
81static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
82 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
83 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
84 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
85 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
86 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
87 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
88};
89
Steve Wise9e8d1fa32010-09-10 11:15:20 -050090static int count_idrs(int id, void *p, void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -070091{
Steve Wisecfdda9d2010-04-21 15:30:06 -070092 int *countp = data;
93
Steve Wisecfdda9d2010-04-21 15:30:06 -070094 *countp = *countp + 1;
95 return 0;
96}
97
Steve Wise9e8d1fa32010-09-10 11:15:20 -050098static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
99 loff_t *ppos)
100{
101 struct c4iw_debugfs_data *d = file->private_data;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500102
Steve Wise31609772010-09-29 18:21:33 +0000103 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500104}
105
106static int dump_qp(int id, void *p, void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700107{
108 struct c4iw_qp *qp = p;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500109 struct c4iw_debugfs_data *qpd = data;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700110 int space;
111 int cc;
112
113 if (id != qp->wq.sq.qid)
114 return 0;
115
116 space = qpd->bufsize - qpd->pos - 1;
117 if (space == 0)
118 return 1;
119
Vipul Pandya830662f2013-07-04 16:10:47 +0530120 if (qp->ep) {
121 if (qp->ep->com.local_addr.ss_family == AF_INET) {
122 struct sockaddr_in *lsin = (struct sockaddr_in *)
123 &qp->ep->com.local_addr;
124 struct sockaddr_in *rsin = (struct sockaddr_in *)
125 &qp->ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500126 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
127 &qp->ep->com.mapped_local_addr;
128 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
129 &qp->ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530130
131 cc = snprintf(qpd->buf + qpd->pos, space,
132 "rc qp sq id %u rq id %u state %u "
133 "onchip %u ep tid %u state %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500134 "%pI4:%u/%u->%pI4:%u/%u\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530135 qp->wq.sq.qid, qp->wq.rq.qid,
136 (int)qp->attr.state,
137 qp->wq.sq.flags & T4_SQ_ONCHIP,
138 qp->ep->hwtid, (int)qp->ep->com.state,
139 &lsin->sin_addr, ntohs(lsin->sin_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500140 ntohs(mapped_lsin->sin_port),
141 &rsin->sin_addr, ntohs(rsin->sin_port),
142 ntohs(mapped_rsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530143 } else {
144 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
145 &qp->ep->com.local_addr;
146 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
147 &qp->ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500148 struct sockaddr_in6 *mapped_lsin6 =
149 (struct sockaddr_in6 *)
150 &qp->ep->com.mapped_local_addr;
151 struct sockaddr_in6 *mapped_rsin6 =
152 (struct sockaddr_in6 *)
153 &qp->ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530154
155 cc = snprintf(qpd->buf + qpd->pos, space,
156 "rc qp sq id %u rq id %u state %u "
157 "onchip %u ep tid %u state %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500158 "%pI6:%u/%u->%pI6:%u/%u\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530159 qp->wq.sq.qid, qp->wq.rq.qid,
160 (int)qp->attr.state,
161 qp->wq.sq.flags & T4_SQ_ONCHIP,
162 qp->ep->hwtid, (int)qp->ep->com.state,
163 &lsin6->sin6_addr,
164 ntohs(lsin6->sin6_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500165 ntohs(mapped_lsin6->sin6_port),
Vipul Pandya830662f2013-07-04 16:10:47 +0530166 &rsin6->sin6_addr,
Steve Wise9eccfe12014-03-26 17:08:09 -0500167 ntohs(rsin6->sin6_port),
168 ntohs(mapped_rsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530169 }
170 } else
Steve Wisedb5d0402011-03-11 22:29:50 +0000171 cc = snprintf(qpd->buf + qpd->pos, space,
172 "qp sq id %u rq id %u state %u onchip %u\n",
173 qp->wq.sq.qid, qp->wq.rq.qid,
174 (int)qp->attr.state,
175 qp->wq.sq.flags & T4_SQ_ONCHIP);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700176 if (cc < space)
177 qpd->pos += cc;
178 return 0;
179}
180
181static int qp_release(struct inode *inode, struct file *file)
182{
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500183 struct c4iw_debugfs_data *qpd = file->private_data;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700184 if (!qpd) {
185 printk(KERN_INFO "%s null qpd?\n", __func__);
186 return 0;
187 }
Vipul Pandyad716a2a2012-05-18 15:29:31 +0530188 vfree(qpd->buf);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700189 kfree(qpd);
190 return 0;
191}
192
193static int qp_open(struct inode *inode, struct file *file)
194{
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500195 struct c4iw_debugfs_data *qpd;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700196 int ret = 0;
197 int count = 1;
198
199 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
200 if (!qpd) {
201 ret = -ENOMEM;
202 goto out;
203 }
204 qpd->devp = inode->i_private;
205 qpd->pos = 0;
206
207 spin_lock_irq(&qpd->devp->lock);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500208 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700209 spin_unlock_irq(&qpd->devp->lock);
210
211 qpd->bufsize = count * 128;
Vipul Pandyad716a2a2012-05-18 15:29:31 +0530212 qpd->buf = vmalloc(qpd->bufsize);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700213 if (!qpd->buf) {
214 ret = -ENOMEM;
215 goto err1;
216 }
217
218 spin_lock_irq(&qpd->devp->lock);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500219 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700220 spin_unlock_irq(&qpd->devp->lock);
221
222 qpd->buf[qpd->pos++] = 0;
223 file->private_data = qpd;
224 goto out;
225err1:
226 kfree(qpd);
227out:
228 return ret;
229}
230
Steve Wisecfdda9d2010-04-21 15:30:06 -0700231static const struct file_operations qp_debugfs_fops = {
232 .owner = THIS_MODULE,
233 .open = qp_open,
234 .release = qp_release,
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500235 .read = debugfs_read,
Steve Wise8bbac892010-09-29 14:11:12 +0000236 .llseek = default_llseek,
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500237};
238
239static int dump_stag(int id, void *p, void *data)
240{
241 struct c4iw_debugfs_data *stagd = data;
242 int space;
243 int cc;
244
245 space = stagd->bufsize - stagd->pos - 1;
246 if (space == 0)
247 return 1;
248
249 cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
250 if (cc < space)
251 stagd->pos += cc;
252 return 0;
253}
254
255static int stag_release(struct inode *inode, struct file *file)
256{
257 struct c4iw_debugfs_data *stagd = file->private_data;
258 if (!stagd) {
259 printk(KERN_INFO "%s null stagd?\n", __func__);
260 return 0;
261 }
262 kfree(stagd->buf);
263 kfree(stagd);
264 return 0;
265}
266
267static int stag_open(struct inode *inode, struct file *file)
268{
269 struct c4iw_debugfs_data *stagd;
270 int ret = 0;
271 int count = 1;
272
273 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
274 if (!stagd) {
275 ret = -ENOMEM;
276 goto out;
277 }
278 stagd->devp = inode->i_private;
279 stagd->pos = 0;
280
281 spin_lock_irq(&stagd->devp->lock);
282 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
283 spin_unlock_irq(&stagd->devp->lock);
284
285 stagd->bufsize = count * sizeof("0x12345678\n");
286 stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
287 if (!stagd->buf) {
288 ret = -ENOMEM;
289 goto err1;
290 }
291
292 spin_lock_irq(&stagd->devp->lock);
293 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
294 spin_unlock_irq(&stagd->devp->lock);
295
296 stagd->buf[stagd->pos++] = 0;
297 file->private_data = stagd;
298 goto out;
299err1:
300 kfree(stagd);
301out:
302 return ret;
303}
304
305static const struct file_operations stag_debugfs_fops = {
306 .owner = THIS_MODULE,
307 .open = stag_open,
308 .release = stag_release,
309 .read = debugfs_read,
Steve Wise8bbac892010-09-29 14:11:12 +0000310 .llseek = default_llseek,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700311};
312
Steve Wise05eb2382014-03-14 21:52:08 +0530313static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
Vipul Pandya422eea02012-05-18 15:29:30 +0530314
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530315static int stats_show(struct seq_file *seq, void *v)
316{
317 struct c4iw_dev *dev = seq->private;
318
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530319 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
320 "Max", "Fail");
321 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530322 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530323 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
324 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530325 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530326 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
327 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530328 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530329 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
330 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530331 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530332 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
333 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530334 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530335 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
336 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530337 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530338 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
Vipul Pandya2c974782012-05-18 15:29:28 +0530339 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
340 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
341 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
Steve Wise05eb2382014-03-14 21:52:08 +0530342 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
Vipul Pandya422eea02012-05-18 15:29:30 +0530343 db_state_str[dev->db_state],
Steve Wise05eb2382014-03-14 21:52:08 +0530344 dev->rdev.stats.db_state_transitions,
345 dev->rdev.stats.db_fc_interruptions);
Vipul Pandya1cab7752012-12-10 09:30:55 +0000346 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
Vipul Pandya793dad92012-12-10 09:30:56 +0000347 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
348 dev->rdev.stats.act_ofld_conn_fails);
349 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
350 dev->rdev.stats.pas_ofld_conn_fails);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530351 return 0;
352}
353
354static int stats_open(struct inode *inode, struct file *file)
355{
356 return single_open(file, stats_show, inode->i_private);
357}
358
359static ssize_t stats_clear(struct file *file, const char __user *buf,
360 size_t count, loff_t *pos)
361{
362 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
363
364 mutex_lock(&dev->rdev.stats.lock);
365 dev->rdev.stats.pd.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530366 dev->rdev.stats.pd.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530367 dev->rdev.stats.qid.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530368 dev->rdev.stats.qid.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530369 dev->rdev.stats.stag.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530370 dev->rdev.stats.stag.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530371 dev->rdev.stats.pbl.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530372 dev->rdev.stats.pbl.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530373 dev->rdev.stats.rqt.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530374 dev->rdev.stats.rqt.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530375 dev->rdev.stats.ocqp.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530376 dev->rdev.stats.ocqp.fail = 0;
Vipul Pandya2c974782012-05-18 15:29:28 +0530377 dev->rdev.stats.db_full = 0;
378 dev->rdev.stats.db_empty = 0;
379 dev->rdev.stats.db_drop = 0;
Vipul Pandya422eea02012-05-18 15:29:30 +0530380 dev->rdev.stats.db_state_transitions = 0;
Vipul Pandya793dad92012-12-10 09:30:56 +0000381 dev->rdev.stats.tcam_full = 0;
382 dev->rdev.stats.act_ofld_conn_fails = 0;
383 dev->rdev.stats.pas_ofld_conn_fails = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530384 mutex_unlock(&dev->rdev.stats.lock);
385 return count;
386}
387
388static const struct file_operations stats_debugfs_fops = {
389 .owner = THIS_MODULE,
390 .open = stats_open,
391 .release = single_release,
392 .read = seq_read,
393 .llseek = seq_lseek,
394 .write = stats_clear,
395};
396
Vipul Pandya793dad92012-12-10 09:30:56 +0000397static int dump_ep(int id, void *p, void *data)
398{
399 struct c4iw_ep *ep = p;
400 struct c4iw_debugfs_data *epd = data;
401 int space;
402 int cc;
403
404 space = epd->bufsize - epd->pos - 1;
405 if (space == 0)
406 return 1;
407
Vipul Pandya830662f2013-07-04 16:10:47 +0530408 if (ep->com.local_addr.ss_family == AF_INET) {
409 struct sockaddr_in *lsin = (struct sockaddr_in *)
410 &ep->com.local_addr;
411 struct sockaddr_in *rsin = (struct sockaddr_in *)
412 &ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500413 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
414 &ep->com.mapped_local_addr;
415 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
416 &ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530417
418 cc = snprintf(epd->buf + epd->pos, space,
419 "ep %p cm_id %p qp %p state %d flags 0x%lx "
420 "history 0x%lx hwtid %d atid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500421 "%pI4:%d/%d <-> %pI4:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530422 ep, ep->com.cm_id, ep->com.qp,
423 (int)ep->com.state, ep->com.flags,
424 ep->com.history, ep->hwtid, ep->atid,
425 &lsin->sin_addr, ntohs(lsin->sin_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500426 ntohs(mapped_lsin->sin_port),
427 &rsin->sin_addr, ntohs(rsin->sin_port),
428 ntohs(mapped_rsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530429 } else {
430 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
431 &ep->com.local_addr;
432 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
433 &ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500434 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
435 &ep->com.mapped_local_addr;
436 struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
437 &ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530438
439 cc = snprintf(epd->buf + epd->pos, space,
440 "ep %p cm_id %p qp %p state %d flags 0x%lx "
441 "history 0x%lx hwtid %d atid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500442 "%pI6:%d/%d <-> %pI6:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530443 ep, ep->com.cm_id, ep->com.qp,
444 (int)ep->com.state, ep->com.flags,
445 ep->com.history, ep->hwtid, ep->atid,
446 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500447 ntohs(mapped_lsin6->sin6_port),
448 &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
449 ntohs(mapped_rsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530450 }
Vipul Pandya793dad92012-12-10 09:30:56 +0000451 if (cc < space)
452 epd->pos += cc;
453 return 0;
454}
455
456static int dump_listen_ep(int id, void *p, void *data)
457{
458 struct c4iw_listen_ep *ep = p;
459 struct c4iw_debugfs_data *epd = data;
460 int space;
461 int cc;
462
463 space = epd->bufsize - epd->pos - 1;
464 if (space == 0)
465 return 1;
466
Vipul Pandya830662f2013-07-04 16:10:47 +0530467 if (ep->com.local_addr.ss_family == AF_INET) {
468 struct sockaddr_in *lsin = (struct sockaddr_in *)
469 &ep->com.local_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500470 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
471 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530472
473 cc = snprintf(epd->buf + epd->pos, space,
474 "ep %p cm_id %p state %d flags 0x%lx stid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500475 "backlog %d %pI4:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530476 ep, ep->com.cm_id, (int)ep->com.state,
477 ep->com.flags, ep->stid, ep->backlog,
Steve Wise9eccfe12014-03-26 17:08:09 -0500478 &lsin->sin_addr, ntohs(lsin->sin_port),
479 ntohs(mapped_lsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530480 } else {
481 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
482 &ep->com.local_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500483 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
484 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530485
486 cc = snprintf(epd->buf + epd->pos, space,
487 "ep %p cm_id %p state %d flags 0x%lx stid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500488 "backlog %d %pI6:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530489 ep, ep->com.cm_id, (int)ep->com.state,
490 ep->com.flags, ep->stid, ep->backlog,
Steve Wise9eccfe12014-03-26 17:08:09 -0500491 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
492 ntohs(mapped_lsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530493 }
Vipul Pandya793dad92012-12-10 09:30:56 +0000494 if (cc < space)
495 epd->pos += cc;
496 return 0;
497}
498
499static int ep_release(struct inode *inode, struct file *file)
500{
501 struct c4iw_debugfs_data *epd = file->private_data;
502 if (!epd) {
503 pr_info("%s null qpd?\n", __func__);
504 return 0;
505 }
506 vfree(epd->buf);
507 kfree(epd);
508 return 0;
509}
510
511static int ep_open(struct inode *inode, struct file *file)
512{
513 struct c4iw_debugfs_data *epd;
514 int ret = 0;
515 int count = 1;
516
517 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
518 if (!epd) {
519 ret = -ENOMEM;
520 goto out;
521 }
522 epd->devp = inode->i_private;
523 epd->pos = 0;
524
525 spin_lock_irq(&epd->devp->lock);
526 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
527 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
528 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
529 spin_unlock_irq(&epd->devp->lock);
530
531 epd->bufsize = count * 160;
532 epd->buf = vmalloc(epd->bufsize);
533 if (!epd->buf) {
534 ret = -ENOMEM;
535 goto err1;
536 }
537
538 spin_lock_irq(&epd->devp->lock);
539 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
540 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
541 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
542 spin_unlock_irq(&epd->devp->lock);
543
544 file->private_data = epd;
545 goto out;
546err1:
547 kfree(epd);
548out:
549 return ret;
550}
551
552static const struct file_operations ep_debugfs_fops = {
553 .owner = THIS_MODULE,
554 .open = ep_open,
555 .release = ep_release,
556 .read = debugfs_read,
557};
558
Steve Wisecfdda9d2010-04-21 15:30:06 -0700559static int setup_debugfs(struct c4iw_dev *devp)
560{
561 struct dentry *de;
562
563 if (!devp->debugfs_root)
564 return -1;
565
566 de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
567 (void *)devp, &qp_debugfs_fops);
568 if (de && de->d_inode)
569 de->d_inode->i_size = 4096;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500570
571 de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
572 (void *)devp, &stag_debugfs_fops);
573 if (de && de->d_inode)
574 de->d_inode->i_size = 4096;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530575
576 de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root,
577 (void *)devp, &stats_debugfs_fops);
578 if (de && de->d_inode)
579 de->d_inode->i_size = 4096;
580
Vipul Pandya793dad92012-12-10 09:30:56 +0000581 de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
582 (void *)devp, &ep_debugfs_fops);
583 if (de && de->d_inode)
584 de->d_inode->i_size = 4096;
585
Steve Wisecfdda9d2010-04-21 15:30:06 -0700586 return 0;
587}
588
589void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
590 struct c4iw_dev_ucontext *uctx)
591{
592 struct list_head *pos, *nxt;
593 struct c4iw_qid_list *entry;
594
595 mutex_lock(&uctx->lock);
596 list_for_each_safe(pos, nxt, &uctx->qpids) {
597 entry = list_entry(pos, struct c4iw_qid_list, entry);
598 list_del_init(&entry->entry);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530599 if (!(entry->qid & rdev->qpmask)) {
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530600 c4iw_put_resource(&rdev->resource.qid_table,
601 entry->qid);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530602 mutex_lock(&rdev->stats.lock);
603 rdev->stats.qid.cur -= rdev->qpmask + 1;
604 mutex_unlock(&rdev->stats.lock);
605 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700606 kfree(entry);
607 }
608
609 list_for_each_safe(pos, nxt, &uctx->qpids) {
610 entry = list_entry(pos, struct c4iw_qid_list, entry);
611 list_del_init(&entry->entry);
612 kfree(entry);
613 }
614 mutex_unlock(&uctx->lock);
615}
616
617void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
618 struct c4iw_dev_ucontext *uctx)
619{
620 INIT_LIST_HEAD(&uctx->qpids);
621 INIT_LIST_HEAD(&uctx->cqids);
622 mutex_init(&uctx->lock);
623}
624
625/* Caller takes care of locking if needed */
626static int c4iw_rdev_open(struct c4iw_rdev *rdev)
627{
628 int err;
629
630 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
631
632 /*
633 * qpshift is the number of bits to shift the qpid left in order
634 * to get the correct address of the doorbell for that qp.
635 */
636 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
637 rdev->qpmask = rdev->lldi.udb_density - 1;
638 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
639 rdev->cqmask = rdev->lldi.ucq_density - 1;
640 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
Steve Wise93fb72e2010-06-23 15:46:55 +0000641 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
642 "qp qid start %u size %u cq qid start %u size %u\n",
Steve Wisecfdda9d2010-04-21 15:30:06 -0700643 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
644 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
645 rdev->lldi.vr->pbl.start,
646 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
Steve Wise93fb72e2010-06-23 15:46:55 +0000647 rdev->lldi.vr->rq.size,
648 rdev->lldi.vr->qp.start,
649 rdev->lldi.vr->qp.size,
650 rdev->lldi.vr->cq.start,
651 rdev->lldi.vr->cq.size);
Ben Hutchings649fb5e2013-10-27 21:50:45 +0000652 PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
Steve Wisecfdda9d2010-04-21 15:30:06 -0700653 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
654 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
Ben Hutchings649fb5e2013-10-27 21:50:45 +0000655 (u64)pci_resource_start(rdev->lldi.pdev, 2),
Steve Wisecfdda9d2010-04-21 15:30:06 -0700656 rdev->lldi.db_reg,
657 rdev->lldi.gts_reg,
658 rdev->qpshift, rdev->qpmask,
659 rdev->cqshift, rdev->cqmask);
660
661 if (c4iw_num_stags(rdev) == 0) {
662 err = -EINVAL;
663 goto err1;
664 }
665
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530666 rdev->stats.pd.total = T4_MAX_NUM_PD;
667 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
668 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
669 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
670 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
671 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
672
Steve Wisecfdda9d2010-04-21 15:30:06 -0700673 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
674 if (err) {
675 printk(KERN_ERR MOD "error %d initializing resources\n", err);
676 goto err1;
677 }
678 err = c4iw_pblpool_create(rdev);
679 if (err) {
680 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
681 goto err2;
682 }
683 err = c4iw_rqtpool_create(rdev);
684 if (err) {
685 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
686 goto err3;
687 }
Steve Wisec6d7b262010-09-13 11:23:57 -0500688 err = c4iw_ocqp_pool_create(rdev);
689 if (err) {
690 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
691 goto err4;
692 }
Steve Wise05eb2382014-03-14 21:52:08 +0530693 rdev->status_page = (struct t4_dev_status_page *)
694 __get_free_page(GFP_KERNEL);
695 if (!rdev->status_page) {
696 pr_err(MOD "error allocating status page\n");
697 goto err4;
698 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700699 return 0;
Steve Wisec6d7b262010-09-13 11:23:57 -0500700err4:
701 c4iw_rqtpool_destroy(rdev);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700702err3:
703 c4iw_pblpool_destroy(rdev);
704err2:
705 c4iw_destroy_resource(&rdev->resource);
706err1:
707 return err;
708}
709
710static void c4iw_rdev_close(struct c4iw_rdev *rdev)
711{
Steve Wise05eb2382014-03-14 21:52:08 +0530712 free_page((unsigned long)rdev->status_page);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700713 c4iw_pblpool_destroy(rdev);
714 c4iw_rqtpool_destroy(rdev);
715 c4iw_destroy_resource(&rdev->resource);
716}
717
Steve Wise9efe10a2011-10-06 09:32:44 -0700718static void c4iw_dealloc(struct uld_ctx *ctx)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700719{
Steve Wise2f25e9a2011-05-09 22:06:23 -0700720 c4iw_rdev_close(&ctx->dev->rdev);
721 idr_destroy(&ctx->dev->cqidr);
722 idr_destroy(&ctx->dev->qpidr);
723 idr_destroy(&ctx->dev->mmidr);
Vipul Pandya793dad92012-12-10 09:30:56 +0000724 idr_destroy(&ctx->dev->hwtid_idr);
725 idr_destroy(&ctx->dev->stid_idr);
726 idr_destroy(&ctx->dev->atid_idr);
Steve Wisefa658a92014-04-09 09:38:25 -0500727 if (ctx->dev->rdev.bar2_kva)
728 iounmap(ctx->dev->rdev.bar2_kva);
729 if (ctx->dev->rdev.oc_mw_kva)
730 iounmap(ctx->dev->rdev.oc_mw_kva);
Steve Wise2f25e9a2011-05-09 22:06:23 -0700731 ib_dealloc_device(&ctx->dev->ibdev);
Steve Wise9eccfe12014-03-26 17:08:09 -0500732 iwpm_exit(RDMA_NL_C4IW);
Steve Wise2f25e9a2011-05-09 22:06:23 -0700733 ctx->dev = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700734}
735
Steve Wise9efe10a2011-10-06 09:32:44 -0700736static void c4iw_remove(struct uld_ctx *ctx)
737{
738 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
739 c4iw_unregister_device(ctx->dev);
740 c4iw_dealloc(ctx);
741}
742
743static int rdma_supported(const struct cxgb4_lld_info *infop)
744{
745 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
746 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
Vipul Pandyaf079af72013-03-14 05:08:58 +0000747 infop->vr->cq.size > 0;
Steve Wise9efe10a2011-10-06 09:32:44 -0700748}
749
Steve Wisecfdda9d2010-04-21 15:30:06 -0700750static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
751{
752 struct c4iw_dev *devp;
753 int ret;
754
Steve Wise9efe10a2011-10-06 09:32:44 -0700755 if (!rdma_supported(infop)) {
756 printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
757 pci_name(infop->pdev));
758 return ERR_PTR(-ENOSYS);
759 }
Vipul Pandyaf079af72013-03-14 05:08:58 +0000760 if (!ocqp_supported(infop))
761 pr_info("%s: On-Chip Queues not supported on this device.\n",
762 pci_name(infop->pdev));
Vipul Pandya80ccdd62013-03-14 05:09:00 +0000763
Steve Wisecfdda9d2010-04-21 15:30:06 -0700764 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
765 if (!devp) {
766 printk(KERN_ERR MOD "Cannot allocate ib device\n");
Steve Wisebbe9a0a2011-05-09 22:06:22 -0700767 return ERR_PTR(-ENOMEM);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700768 }
769 devp->rdev.lldi = *infop;
770
Steve Wisefa658a92014-04-09 09:38:25 -0500771 /*
772 * For T5 devices, we map all of BAR2 with WC.
773 * For T4 devices with onchip qp mem, we map only that part
774 * of BAR2 with WC.
775 */
776 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
777 if (is_t5(devp->rdev.lldi.adapter_type)) {
778 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
779 pci_resource_len(devp->rdev.lldi.pdev, 2));
780 if (!devp->rdev.bar2_kva) {
781 pr_err(MOD "Unable to ioremap BAR2\n");
Christoph Jaeger65b302a2014-04-21 17:02:42 +0200782 ib_dealloc_device(&devp->ibdev);
Steve Wisefa658a92014-04-09 09:38:25 -0500783 return ERR_PTR(-EINVAL);
784 }
785 } else if (ocqp_supported(infop)) {
786 devp->rdev.oc_mw_pa =
787 pci_resource_start(devp->rdev.lldi.pdev, 2) +
788 pci_resource_len(devp->rdev.lldi.pdev, 2) -
789 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
790 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
791 devp->rdev.lldi.vr->ocq.size);
792 if (!devp->rdev.oc_mw_kva) {
793 pr_err(MOD "Unable to ioremap onchip mem\n");
Christoph Jaeger65b302a2014-04-21 17:02:42 +0200794 ib_dealloc_device(&devp->ibdev);
Steve Wisefa658a92014-04-09 09:38:25 -0500795 return ERR_PTR(-EINVAL);
796 }
797 }
Steve Wisec6d7b262010-09-13 11:23:57 -0500798
Steve Wise2f25e9a2011-05-09 22:06:23 -0700799 PDBG(KERN_INFO MOD "ocq memory: "
Steve Wisec6d7b262010-09-13 11:23:57 -0500800 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
801 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
802 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
803
Steve Wisecfdda9d2010-04-21 15:30:06 -0700804 ret = c4iw_rdev_open(&devp->rdev);
805 if (ret) {
Steve Wisecfdda9d2010-04-21 15:30:06 -0700806 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
807 ib_dealloc_device(&devp->ibdev);
Steve Wisebbe9a0a2011-05-09 22:06:22 -0700808 return ERR_PTR(ret);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700809 }
810
811 idr_init(&devp->cqidr);
812 idr_init(&devp->qpidr);
813 idr_init(&devp->mmidr);
Vipul Pandya793dad92012-12-10 09:30:56 +0000814 idr_init(&devp->hwtid_idr);
815 idr_init(&devp->stid_idr);
816 idr_init(&devp->atid_idr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700817 spin_lock_init(&devp->lock);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530818 mutex_init(&devp->rdev.stats.lock);
Vipul Pandya2c974782012-05-18 15:29:28 +0530819 mutex_init(&devp->db_mutex);
Steve Wise05eb2382014-03-14 21:52:08 +0530820 INIT_LIST_HEAD(&devp->db_fc_list);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700821
Steve Wisecfdda9d2010-04-21 15:30:06 -0700822 if (c4iw_debugfs_root) {
823 devp->debugfs_root = debugfs_create_dir(
824 pci_name(devp->rdev.lldi.pdev),
825 c4iw_debugfs_root);
826 setup_debugfs(devp);
827 }
Steve Wise9eccfe12014-03-26 17:08:09 -0500828
829 ret = iwpm_init(RDMA_NL_C4IW);
830 if (ret) {
831 pr_err("port mapper initialization failed with %d\n", ret);
832 ib_dealloc_device(&devp->ibdev);
833 return ERR_PTR(ret);
834 }
835
Steve Wisecfdda9d2010-04-21 15:30:06 -0700836 return devp;
837}
838
839static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
840{
Steve Wise2f25e9a2011-05-09 22:06:23 -0700841 struct uld_ctx *ctx;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700842 static int vers_printed;
843 int i;
844
845 if (!vers_printed++)
Vipul Pandyaf079af72013-03-14 05:08:58 +0000846 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
847 DRV_VERSION);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700848
Steve Wise2f25e9a2011-05-09 22:06:23 -0700849 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
850 if (!ctx) {
851 ctx = ERR_PTR(-ENOMEM);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700852 goto out;
Steve Wise2f25e9a2011-05-09 22:06:23 -0700853 }
854 ctx->lldi = *infop;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700855
856 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -0700857 __func__, pci_name(ctx->lldi.pdev),
858 ctx->lldi.nchan, ctx->lldi.nrxq,
859 ctx->lldi.ntxq, ctx->lldi.nports);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700860
Steve Wise2f25e9a2011-05-09 22:06:23 -0700861 mutex_lock(&dev_mutex);
862 list_add_tail(&ctx->entry, &uld_ctx_list);
863 mutex_unlock(&dev_mutex);
864
865 for (i = 0; i < ctx->lldi.nrxq; i++)
866 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700867out:
Steve Wise2f25e9a2011-05-09 22:06:23 -0700868 return ctx;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700869}
870
Vipul Pandya1cab7752012-12-10 09:30:55 +0000871static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
872 const __be64 *rsp,
873 u32 pktshift)
874{
875 struct sk_buff *skb;
876
877 /*
878 * Allocate space for cpl_pass_accept_req which will be synthesized by
879 * driver. Once the driver synthesizes the request the skb will go
880 * through the regular cpl_pass_accept_req processing.
881 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
882 * cpl_rx_pkt.
883 */
884 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
885 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
886 if (unlikely(!skb))
887 return NULL;
888
889 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
890 sizeof(struct rss_header) - pktshift);
891
892 /*
893 * This skb will contain:
894 * rss_header from the rspq descriptor (1 flit)
895 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
896 * space for the difference between the size of an
897 * rx_pkt and pass_accept_req cpl (1 flit)
898 * the packet data from the gl
899 */
900 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
901 sizeof(struct rss_header));
902 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
903 sizeof(struct cpl_pass_accept_req),
904 gl->va + pktshift,
905 gl->tot_len - pktshift);
906 return skb;
907}
908
909static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
910 const __be64 *rsp)
911{
912 unsigned int opcode = *(u8 *)rsp;
913 struct sk_buff *skb;
914
915 if (opcode != CPL_RX_PKT)
916 goto out;
917
918 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
919 if (skb == NULL)
920 goto out;
921
922 if (c4iw_handlers[opcode] == NULL) {
923 pr_info("%s no handler opcode 0x%x...\n", __func__,
924 opcode);
925 kfree_skb(skb);
926 goto out;
927 }
928 c4iw_handlers[opcode](dev, skb);
929 return 1;
930out:
931 return 0;
932}
933
Steve Wisecfdda9d2010-04-21 15:30:06 -0700934static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
935 const struct pkt_gl *gl)
936{
Steve Wise2f25e9a2011-05-09 22:06:23 -0700937 struct uld_ctx *ctx = handle;
938 struct c4iw_dev *dev = ctx->dev;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700939 struct sk_buff *skb;
Vipul Pandya1cab7752012-12-10 09:30:55 +0000940 u8 opcode;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700941
942 if (gl == NULL) {
943 /* omit RSS and rsp_ctrl at end of descriptor */
944 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
945
946 skb = alloc_skb(256, GFP_ATOMIC);
947 if (!skb)
948 goto nomem;
949 __skb_put(skb, len);
950 skb_copy_to_linear_data(skb, &rsp[1], len);
951 } else if (gl == CXGB4_MSG_AN) {
952 const struct rsp_ctrl *rc = (void *)rsp;
953
954 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
955 c4iw_ev_handler(dev, qid);
956 return 0;
Vipul Pandya1cab7752012-12-10 09:30:55 +0000957 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
958 if (recv_rx_pkt(dev, gl, rsp))
959 return 0;
960
961 pr_info("%s: unexpected FL contents at %p, " \
962 "RSS %#llx, FL %#llx, len %u\n",
963 pci_name(ctx->lldi.pdev), gl->va,
964 (unsigned long long)be64_to_cpu(*rsp),
Vipul Pandyaef5d6352013-01-07 13:12:00 +0000965 (unsigned long long)be64_to_cpu(
966 *(__force __be64 *)gl->va),
Vipul Pandya1cab7752012-12-10 09:30:55 +0000967 gl->tot_len);
968
969 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700970 } else {
Steve Wiseda411ba2010-10-18 15:16:45 +0000971 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700972 if (unlikely(!skb))
973 goto nomem;
974 }
975
Vipul Pandya1cab7752012-12-10 09:30:55 +0000976 opcode = *(u8 *)rsp;
Steve Wisedbb084c2014-03-21 20:40:30 +0530977 if (c4iw_handlers[opcode]) {
Steve Wisecfdda9d2010-04-21 15:30:06 -0700978 c4iw_handlers[opcode](dev, skb);
Steve Wisedbb084c2014-03-21 20:40:30 +0530979 } else {
Vipul Pandya1cab7752012-12-10 09:30:55 +0000980 pr_info("%s no handler opcode 0x%x...\n", __func__,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700981 opcode);
Steve Wisedbb084c2014-03-21 20:40:30 +0530982 kfree_skb(skb);
983 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700984
985 return 0;
986nomem:
987 return -1;
988}
989
990static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
991{
Steve Wise2f25e9a2011-05-09 22:06:23 -0700992 struct uld_ctx *ctx = handle;
Steve Wise1c01c532010-05-20 16:57:32 -0500993
Steve Wisecfdda9d2010-04-21 15:30:06 -0700994 PDBG("%s new_state %u\n", __func__, new_state);
Steve Wise1c01c532010-05-20 16:57:32 -0500995 switch (new_state) {
996 case CXGB4_STATE_UP:
Steve Wise2f25e9a2011-05-09 22:06:23 -0700997 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
998 if (!ctx->dev) {
Steve Wise9efe10a2011-10-06 09:32:44 -0700999 int ret;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001000
1001 ctx->dev = c4iw_alloc(&ctx->lldi);
Steve Wise9efe10a2011-10-06 09:32:44 -07001002 if (IS_ERR(ctx->dev)) {
1003 printk(KERN_ERR MOD
1004 "%s: initialization failed: %ld\n",
1005 pci_name(ctx->lldi.pdev),
1006 PTR_ERR(ctx->dev));
1007 ctx->dev = NULL;
1008 break;
1009 }
1010 ret = c4iw_register_device(ctx->dev);
1011 if (ret) {
Steve Wise1c01c532010-05-20 16:57:32 -05001012 printk(KERN_ERR MOD
1013 "%s: RDMA registration failed: %d\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001014 pci_name(ctx->lldi.pdev), ret);
Steve Wise9efe10a2011-10-06 09:32:44 -07001015 c4iw_dealloc(ctx);
1016 }
Steve Wise1c01c532010-05-20 16:57:32 -05001017 }
1018 break;
1019 case CXGB4_STATE_DOWN:
1020 printk(KERN_INFO MOD "%s: Down\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001021 pci_name(ctx->lldi.pdev));
1022 if (ctx->dev)
1023 c4iw_remove(ctx);
Steve Wise1c01c532010-05-20 16:57:32 -05001024 break;
1025 case CXGB4_STATE_START_RECOVERY:
1026 printk(KERN_INFO MOD "%s: Fatal Error\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001027 pci_name(ctx->lldi.pdev));
1028 if (ctx->dev) {
Steve Wise767fbe82011-03-11 22:30:53 +00001029 struct ib_event event;
1030
Steve Wise2f25e9a2011-05-09 22:06:23 -07001031 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
Steve Wise767fbe82011-03-11 22:30:53 +00001032 memset(&event, 0, sizeof event);
1033 event.event = IB_EVENT_DEVICE_FATAL;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001034 event.device = &ctx->dev->ibdev;
Steve Wise767fbe82011-03-11 22:30:53 +00001035 ib_dispatch_event(&event);
Steve Wise2f25e9a2011-05-09 22:06:23 -07001036 c4iw_remove(ctx);
Steve Wise767fbe82011-03-11 22:30:53 +00001037 }
Steve Wise1c01c532010-05-20 16:57:32 -05001038 break;
1039 case CXGB4_STATE_DETACH:
1040 printk(KERN_INFO MOD "%s: Detach\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001041 pci_name(ctx->lldi.pdev));
1042 if (ctx->dev)
1043 c4iw_remove(ctx);
Steve Wise1c01c532010-05-20 16:57:32 -05001044 break;
1045 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001046 return 0;
1047}
1048
Vipul Pandya2c974782012-05-18 15:29:28 +05301049static int disable_qp_db(int id, void *p, void *data)
1050{
1051 struct c4iw_qp *qp = p;
1052
1053 t4_disable_wq_db(&qp->wq);
1054 return 0;
1055}
1056
1057static void stop_queues(struct uld_ctx *ctx)
1058{
Steve Wise05eb2382014-03-14 21:52:08 +05301059 unsigned long flags;
1060
1061 spin_lock_irqsave(&ctx->dev->lock, flags);
1062 ctx->dev->rdev.stats.db_state_transitions++;
1063 ctx->dev->db_state = STOPPED;
1064 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
Vipul Pandya422eea02012-05-18 15:29:30 +05301065 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301066 else
1067 ctx->dev->rdev.status_page->db_off = 1;
1068 spin_unlock_irqrestore(&ctx->dev->lock, flags);
Vipul Pandya2c974782012-05-18 15:29:28 +05301069}
1070
1071static int enable_qp_db(int id, void *p, void *data)
1072{
1073 struct c4iw_qp *qp = p;
1074
1075 t4_enable_wq_db(&qp->wq);
1076 return 0;
1077}
1078
Steve Wise05eb2382014-03-14 21:52:08 +05301079static void resume_rc_qp(struct c4iw_qp *qp)
1080{
1081 spin_lock(&qp->lock);
Steve Wisefa658a92014-04-09 09:38:25 -05001082 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc,
1083 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301084 qp->wq.sq.wq_pidx_inc = 0;
Steve Wisefa658a92014-04-09 09:38:25 -05001085 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc,
1086 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301087 qp->wq.rq.wq_pidx_inc = 0;
1088 spin_unlock(&qp->lock);
1089}
1090
1091static void resume_a_chunk(struct uld_ctx *ctx)
1092{
1093 int i;
1094 struct c4iw_qp *qp;
1095
1096 for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
1097 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1098 db_fc_entry);
1099 list_del_init(&qp->db_fc_entry);
1100 resume_rc_qp(qp);
1101 if (list_empty(&ctx->dev->db_fc_list))
1102 break;
1103 }
1104}
1105
Vipul Pandya2c974782012-05-18 15:29:28 +05301106static void resume_queues(struct uld_ctx *ctx)
1107{
1108 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301109 if (ctx->dev->db_state != STOPPED)
1110 goto out;
1111 ctx->dev->db_state = FLOW_CONTROL;
1112 while (1) {
1113 if (list_empty(&ctx->dev->db_fc_list)) {
1114 WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1115 ctx->dev->db_state = NORMAL;
1116 ctx->dev->rdev.stats.db_state_transitions++;
1117 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1118 idr_for_each(&ctx->dev->qpidr, enable_qp_db,
1119 NULL);
1120 } else {
1121 ctx->dev->rdev.status_page->db_off = 0;
1122 }
1123 break;
1124 } else {
1125 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1126 < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1127 DB_FC_DRAIN_THRESH)) {
1128 resume_a_chunk(ctx);
1129 }
1130 if (!list_empty(&ctx->dev->db_fc_list)) {
1131 spin_unlock_irq(&ctx->dev->lock);
1132 if (DB_FC_RESUME_DELAY) {
1133 set_current_state(TASK_UNINTERRUPTIBLE);
1134 schedule_timeout(DB_FC_RESUME_DELAY);
1135 }
1136 spin_lock_irq(&ctx->dev->lock);
1137 if (ctx->dev->db_state != FLOW_CONTROL)
1138 break;
1139 }
1140 }
Vipul Pandya422eea02012-05-18 15:29:30 +05301141 }
Steve Wise05eb2382014-03-14 21:52:08 +05301142out:
1143 if (ctx->dev->db_state != NORMAL)
1144 ctx->dev->rdev.stats.db_fc_interruptions++;
Vipul Pandya2c974782012-05-18 15:29:28 +05301145 spin_unlock_irq(&ctx->dev->lock);
1146}
1147
Vipul Pandya422eea02012-05-18 15:29:30 +05301148struct qp_list {
1149 unsigned idx;
1150 struct c4iw_qp **qps;
1151};
1152
1153static int add_and_ref_qp(int id, void *p, void *data)
1154{
1155 struct qp_list *qp_listp = data;
1156 struct c4iw_qp *qp = p;
1157
1158 c4iw_qp_add_ref(&qp->ibqp);
1159 qp_listp->qps[qp_listp->idx++] = qp;
1160 return 0;
1161}
1162
1163static int count_qps(int id, void *p, void *data)
1164{
1165 unsigned *countp = data;
1166 (*countp)++;
1167 return 0;
1168}
1169
Steve Wise05eb2382014-03-14 21:52:08 +05301170static void deref_qps(struct qp_list *qp_list)
Vipul Pandya422eea02012-05-18 15:29:30 +05301171{
1172 int idx;
1173
Steve Wise05eb2382014-03-14 21:52:08 +05301174 for (idx = 0; idx < qp_list->idx; idx++)
1175 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
Vipul Pandya422eea02012-05-18 15:29:30 +05301176}
1177
1178static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1179{
1180 int idx;
1181 int ret;
1182
1183 for (idx = 0; idx < qp_list->idx; idx++) {
1184 struct c4iw_qp *qp = qp_list->qps[idx];
1185
Steve Wise05eb2382014-03-14 21:52:08 +05301186 spin_lock_irq(&qp->rhp->lock);
1187 spin_lock(&qp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301188 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1189 qp->wq.sq.qid,
1190 t4_sq_host_wq_pidx(&qp->wq),
1191 t4_sq_wq_size(&qp->wq));
1192 if (ret) {
Steve Wise05eb2382014-03-14 21:52:08 +05301193 pr_err(KERN_ERR MOD "%s: Fatal error - "
Vipul Pandya422eea02012-05-18 15:29:30 +05301194 "DB overflow recovery failed - "
1195 "error syncing SQ qid %u\n",
1196 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
Steve Wise05eb2382014-03-14 21:52:08 +05301197 spin_unlock(&qp->lock);
1198 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301199 return;
1200 }
Steve Wise05eb2382014-03-14 21:52:08 +05301201 qp->wq.sq.wq_pidx_inc = 0;
Vipul Pandya422eea02012-05-18 15:29:30 +05301202
1203 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1204 qp->wq.rq.qid,
1205 t4_rq_host_wq_pidx(&qp->wq),
1206 t4_rq_wq_size(&qp->wq));
1207
1208 if (ret) {
Steve Wise05eb2382014-03-14 21:52:08 +05301209 pr_err(KERN_ERR MOD "%s: Fatal error - "
Vipul Pandya422eea02012-05-18 15:29:30 +05301210 "DB overflow recovery failed - "
1211 "error syncing RQ qid %u\n",
1212 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
Steve Wise05eb2382014-03-14 21:52:08 +05301213 spin_unlock(&qp->lock);
1214 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301215 return;
1216 }
Steve Wise05eb2382014-03-14 21:52:08 +05301217 qp->wq.rq.wq_pidx_inc = 0;
1218 spin_unlock(&qp->lock);
1219 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301220
1221 /* Wait for the dbfifo to drain */
1222 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1223 set_current_state(TASK_UNINTERRUPTIBLE);
1224 schedule_timeout(usecs_to_jiffies(10));
1225 }
1226 }
1227}
1228
1229static void recover_queues(struct uld_ctx *ctx)
1230{
1231 int count = 0;
1232 struct qp_list qp_list;
1233 int ret;
1234
Vipul Pandya422eea02012-05-18 15:29:30 +05301235 /* slow everybody down */
1236 set_current_state(TASK_UNINTERRUPTIBLE);
1237 schedule_timeout(usecs_to_jiffies(1000));
1238
Vipul Pandya422eea02012-05-18 15:29:30 +05301239 /* flush the SGE contexts */
1240 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1241 if (ret) {
1242 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1243 pci_name(ctx->lldi.pdev));
Steve Wise05eb2382014-03-14 21:52:08 +05301244 return;
Vipul Pandya422eea02012-05-18 15:29:30 +05301245 }
1246
1247 /* Count active queues so we can build a list of queues to recover */
1248 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301249 WARN_ON(ctx->dev->db_state != STOPPED);
1250 ctx->dev->db_state = RECOVERY;
Vipul Pandya422eea02012-05-18 15:29:30 +05301251 idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1252
1253 qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
1254 if (!qp_list.qps) {
1255 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1256 pci_name(ctx->lldi.pdev));
1257 spin_unlock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301258 return;
Vipul Pandya422eea02012-05-18 15:29:30 +05301259 }
1260 qp_list.idx = 0;
1261
1262 /* add and ref each qp so it doesn't get freed */
1263 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1264
1265 spin_unlock_irq(&ctx->dev->lock);
1266
1267 /* now traverse the list in a safe context to recover the db state*/
1268 recover_lost_dbs(ctx, &qp_list);
1269
1270 /* we're almost done! deref the qps and clean up */
Steve Wise05eb2382014-03-14 21:52:08 +05301271 deref_qps(&qp_list);
Vipul Pandya422eea02012-05-18 15:29:30 +05301272 kfree(qp_list.qps);
1273
Vipul Pandya422eea02012-05-18 15:29:30 +05301274 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301275 WARN_ON(ctx->dev->db_state != RECOVERY);
1276 ctx->dev->db_state = STOPPED;
Vipul Pandya422eea02012-05-18 15:29:30 +05301277 spin_unlock_irq(&ctx->dev->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301278}
1279
Vipul Pandya2c974782012-05-18 15:29:28 +05301280static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1281{
1282 struct uld_ctx *ctx = handle;
1283
1284 switch (control) {
1285 case CXGB4_CONTROL_DB_FULL:
1286 stop_queues(ctx);
Vipul Pandya2c974782012-05-18 15:29:28 +05301287 ctx->dev->rdev.stats.db_full++;
Vipul Pandya2c974782012-05-18 15:29:28 +05301288 break;
1289 case CXGB4_CONTROL_DB_EMPTY:
1290 resume_queues(ctx);
1291 mutex_lock(&ctx->dev->rdev.stats.lock);
1292 ctx->dev->rdev.stats.db_empty++;
1293 mutex_unlock(&ctx->dev->rdev.stats.lock);
1294 break;
1295 case CXGB4_CONTROL_DB_DROP:
Vipul Pandya422eea02012-05-18 15:29:30 +05301296 recover_queues(ctx);
Vipul Pandya2c974782012-05-18 15:29:28 +05301297 mutex_lock(&ctx->dev->rdev.stats.lock);
1298 ctx->dev->rdev.stats.db_drop++;
1299 mutex_unlock(&ctx->dev->rdev.stats.lock);
1300 break;
1301 default:
1302 printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
1303 pci_name(ctx->lldi.pdev), control);
1304 break;
1305 }
1306 return 0;
1307}
1308
Steve Wisecfdda9d2010-04-21 15:30:06 -07001309static struct cxgb4_uld_info c4iw_uld_info = {
1310 .name = DRV_NAME,
1311 .add = c4iw_uld_add,
1312 .rx_handler = c4iw_uld_rx_handler,
1313 .state_change = c4iw_uld_state_change,
Vipul Pandya2c974782012-05-18 15:29:28 +05301314 .control = c4iw_uld_control,
Steve Wisecfdda9d2010-04-21 15:30:06 -07001315};
1316
1317static int __init c4iw_init_module(void)
1318{
1319 int err;
1320
1321 err = c4iw_cm_init();
1322 if (err)
1323 return err;
1324
1325 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1326 if (!c4iw_debugfs_root)
1327 printk(KERN_WARNING MOD
1328 "could not create debugfs entry, continuing\n");
1329
Steve Wise9eccfe12014-03-26 17:08:09 -05001330 if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS,
1331 c4iw_nl_cb_table))
1332 pr_err("%s[%u]: Failed to add netlink callback\n"
1333 , __func__, __LINE__);
1334
Steve Wisecfdda9d2010-04-21 15:30:06 -07001335 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1336
1337 return 0;
1338}
1339
1340static void __exit c4iw_exit_module(void)
1341{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001342 struct uld_ctx *ctx, *tmp;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001343
Steve Wisecfdda9d2010-04-21 15:30:06 -07001344 mutex_lock(&dev_mutex);
Steve Wise2f25e9a2011-05-09 22:06:23 -07001345 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1346 if (ctx->dev)
1347 c4iw_remove(ctx);
1348 kfree(ctx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001349 }
1350 mutex_unlock(&dev_mutex);
Steve Wisefd388ce2010-05-20 16:57:27 -05001351 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
Steve Wise9eccfe12014-03-26 17:08:09 -05001352 ibnl_remove_client(RDMA_NL_C4IW);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001353 c4iw_cm_term();
1354 debugfs_remove_recursive(c4iw_debugfs_root);
1355}
1356
1357module_init(c4iw_init_module);
1358module_exit(c4iw_exit_module);