blob: e76358efcaa1db90853c698517e4901b0b70145e [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/debugfs.h>
Vipul Pandyae5725682012-05-21 17:31:13 +053035#include <linux/vmalloc.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070036
37#include <rdma/ib_verbs.h>
38
39#include "iw_cxgb4.h"
40
41#define DRV_VERSION "0.1"
42
43MODULE_AUTHOR("Steve Wise");
Vipul Pandyaf079af72013-03-14 05:08:58 +000044MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
Steve Wisecfdda9d2010-04-21 15:30:06 -070045MODULE_LICENSE("Dual BSD/GPL");
46MODULE_VERSION(DRV_VERSION);
47
Vipul Pandya80ccdd62013-03-14 05:09:00 +000048static int allow_db_fc_on_t5;
49module_param(allow_db_fc_on_t5, int, 0644);
50MODULE_PARM_DESC(allow_db_fc_on_t5,
51 "Allow DB Flow Control on T5 (default = 0)");
52
53static int allow_db_coalescing_on_t5;
54module_param(allow_db_coalescing_on_t5, int, 0644);
55MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 "Allow DB Coalescing on T5 (default = 0)");
57
Vipul Pandya2c974782012-05-18 15:29:28 +053058struct uld_ctx {
59 struct list_head entry;
60 struct cxgb4_lld_info lldi;
61 struct c4iw_dev *dev;
62};
63
Steve Wise2f25e9a2011-05-09 22:06:23 -070064static LIST_HEAD(uld_ctx_list);
Steve Wisecfdda9d2010-04-21 15:30:06 -070065static DEFINE_MUTEX(dev_mutex);
66
Steve Wise05eb2382014-03-14 21:52:08 +053067#define DB_FC_RESUME_SIZE 64
68#define DB_FC_RESUME_DELAY 1
69#define DB_FC_DRAIN_THRESH 0
70
Steve Wisecfdda9d2010-04-21 15:30:06 -070071static struct dentry *c4iw_debugfs_root;
72
Steve Wise9e8d1fa32010-09-10 11:15:20 -050073struct c4iw_debugfs_data {
Steve Wisecfdda9d2010-04-21 15:30:06 -070074 struct c4iw_dev *devp;
75 char *buf;
76 int bufsize;
77 int pos;
78};
79
Steve Wise9eccfe12014-03-26 17:08:09 -050080/* registered cxgb4 netlink callbacks */
81static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
82 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
83 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
84 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
85 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
86 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
87 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
88};
89
Steve Wise9e8d1fa32010-09-10 11:15:20 -050090static int count_idrs(int id, void *p, void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -070091{
Steve Wisecfdda9d2010-04-21 15:30:06 -070092 int *countp = data;
93
Steve Wisecfdda9d2010-04-21 15:30:06 -070094 *countp = *countp + 1;
95 return 0;
96}
97
Steve Wise9e8d1fa32010-09-10 11:15:20 -050098static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
99 loff_t *ppos)
100{
101 struct c4iw_debugfs_data *d = file->private_data;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500102
Steve Wise31609772010-09-29 18:21:33 +0000103 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500104}
105
106static int dump_qp(int id, void *p, void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700107{
108 struct c4iw_qp *qp = p;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500109 struct c4iw_debugfs_data *qpd = data;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700110 int space;
111 int cc;
112
113 if (id != qp->wq.sq.qid)
114 return 0;
115
116 space = qpd->bufsize - qpd->pos - 1;
117 if (space == 0)
118 return 1;
119
Vipul Pandya830662f2013-07-04 16:10:47 +0530120 if (qp->ep) {
121 if (qp->ep->com.local_addr.ss_family == AF_INET) {
122 struct sockaddr_in *lsin = (struct sockaddr_in *)
123 &qp->ep->com.local_addr;
124 struct sockaddr_in *rsin = (struct sockaddr_in *)
125 &qp->ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500126 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
127 &qp->ep->com.mapped_local_addr;
128 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
129 &qp->ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530130
131 cc = snprintf(qpd->buf + qpd->pos, space,
132 "rc qp sq id %u rq id %u state %u "
133 "onchip %u ep tid %u state %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500134 "%pI4:%u/%u->%pI4:%u/%u\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530135 qp->wq.sq.qid, qp->wq.rq.qid,
136 (int)qp->attr.state,
137 qp->wq.sq.flags & T4_SQ_ONCHIP,
138 qp->ep->hwtid, (int)qp->ep->com.state,
139 &lsin->sin_addr, ntohs(lsin->sin_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500140 ntohs(mapped_lsin->sin_port),
141 &rsin->sin_addr, ntohs(rsin->sin_port),
142 ntohs(mapped_rsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530143 } else {
144 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
145 &qp->ep->com.local_addr;
146 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
147 &qp->ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500148 struct sockaddr_in6 *mapped_lsin6 =
149 (struct sockaddr_in6 *)
150 &qp->ep->com.mapped_local_addr;
151 struct sockaddr_in6 *mapped_rsin6 =
152 (struct sockaddr_in6 *)
153 &qp->ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530154
155 cc = snprintf(qpd->buf + qpd->pos, space,
156 "rc qp sq id %u rq id %u state %u "
157 "onchip %u ep tid %u state %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500158 "%pI6:%u/%u->%pI6:%u/%u\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530159 qp->wq.sq.qid, qp->wq.rq.qid,
160 (int)qp->attr.state,
161 qp->wq.sq.flags & T4_SQ_ONCHIP,
162 qp->ep->hwtid, (int)qp->ep->com.state,
163 &lsin6->sin6_addr,
164 ntohs(lsin6->sin6_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500165 ntohs(mapped_lsin6->sin6_port),
Vipul Pandya830662f2013-07-04 16:10:47 +0530166 &rsin6->sin6_addr,
Steve Wise9eccfe12014-03-26 17:08:09 -0500167 ntohs(rsin6->sin6_port),
168 ntohs(mapped_rsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530169 }
170 } else
Steve Wisedb5d0402011-03-11 22:29:50 +0000171 cc = snprintf(qpd->buf + qpd->pos, space,
172 "qp sq id %u rq id %u state %u onchip %u\n",
173 qp->wq.sq.qid, qp->wq.rq.qid,
174 (int)qp->attr.state,
175 qp->wq.sq.flags & T4_SQ_ONCHIP);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700176 if (cc < space)
177 qpd->pos += cc;
178 return 0;
179}
180
181static int qp_release(struct inode *inode, struct file *file)
182{
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500183 struct c4iw_debugfs_data *qpd = file->private_data;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700184 if (!qpd) {
185 printk(KERN_INFO "%s null qpd?\n", __func__);
186 return 0;
187 }
Vipul Pandyad716a2a2012-05-18 15:29:31 +0530188 vfree(qpd->buf);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700189 kfree(qpd);
190 return 0;
191}
192
193static int qp_open(struct inode *inode, struct file *file)
194{
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500195 struct c4iw_debugfs_data *qpd;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700196 int ret = 0;
197 int count = 1;
198
199 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
200 if (!qpd) {
201 ret = -ENOMEM;
202 goto out;
203 }
204 qpd->devp = inode->i_private;
205 qpd->pos = 0;
206
207 spin_lock_irq(&qpd->devp->lock);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500208 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700209 spin_unlock_irq(&qpd->devp->lock);
210
211 qpd->bufsize = count * 128;
Vipul Pandyad716a2a2012-05-18 15:29:31 +0530212 qpd->buf = vmalloc(qpd->bufsize);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700213 if (!qpd->buf) {
214 ret = -ENOMEM;
215 goto err1;
216 }
217
218 spin_lock_irq(&qpd->devp->lock);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500219 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700220 spin_unlock_irq(&qpd->devp->lock);
221
222 qpd->buf[qpd->pos++] = 0;
223 file->private_data = qpd;
224 goto out;
225err1:
226 kfree(qpd);
227out:
228 return ret;
229}
230
Steve Wisecfdda9d2010-04-21 15:30:06 -0700231static const struct file_operations qp_debugfs_fops = {
232 .owner = THIS_MODULE,
233 .open = qp_open,
234 .release = qp_release,
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500235 .read = debugfs_read,
Steve Wise8bbac892010-09-29 14:11:12 +0000236 .llseek = default_llseek,
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500237};
238
239static int dump_stag(int id, void *p, void *data)
240{
241 struct c4iw_debugfs_data *stagd = data;
242 int space;
243 int cc;
244
245 space = stagd->bufsize - stagd->pos - 1;
246 if (space == 0)
247 return 1;
248
249 cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
250 if (cc < space)
251 stagd->pos += cc;
252 return 0;
253}
254
255static int stag_release(struct inode *inode, struct file *file)
256{
257 struct c4iw_debugfs_data *stagd = file->private_data;
258 if (!stagd) {
259 printk(KERN_INFO "%s null stagd?\n", __func__);
260 return 0;
261 }
262 kfree(stagd->buf);
263 kfree(stagd);
264 return 0;
265}
266
267static int stag_open(struct inode *inode, struct file *file)
268{
269 struct c4iw_debugfs_data *stagd;
270 int ret = 0;
271 int count = 1;
272
273 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
274 if (!stagd) {
275 ret = -ENOMEM;
276 goto out;
277 }
278 stagd->devp = inode->i_private;
279 stagd->pos = 0;
280
281 spin_lock_irq(&stagd->devp->lock);
282 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
283 spin_unlock_irq(&stagd->devp->lock);
284
285 stagd->bufsize = count * sizeof("0x12345678\n");
286 stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
287 if (!stagd->buf) {
288 ret = -ENOMEM;
289 goto err1;
290 }
291
292 spin_lock_irq(&stagd->devp->lock);
293 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
294 spin_unlock_irq(&stagd->devp->lock);
295
296 stagd->buf[stagd->pos++] = 0;
297 file->private_data = stagd;
298 goto out;
299err1:
300 kfree(stagd);
301out:
302 return ret;
303}
304
305static const struct file_operations stag_debugfs_fops = {
306 .owner = THIS_MODULE,
307 .open = stag_open,
308 .release = stag_release,
309 .read = debugfs_read,
Steve Wise8bbac892010-09-29 14:11:12 +0000310 .llseek = default_llseek,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700311};
312
Steve Wise05eb2382014-03-14 21:52:08 +0530313static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
Vipul Pandya422eea02012-05-18 15:29:30 +0530314
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530315static int stats_show(struct seq_file *seq, void *v)
316{
317 struct c4iw_dev *dev = seq->private;
318
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530319 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
320 "Max", "Fail");
321 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530322 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530323 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
324 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530325 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530326 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
327 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530328 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530329 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
330 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530331 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530332 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
333 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530334 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530335 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
336 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530337 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530338 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
Vipul Pandya2c974782012-05-18 15:29:28 +0530339 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
340 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
341 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
Steve Wise05eb2382014-03-14 21:52:08 +0530342 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
Vipul Pandya422eea02012-05-18 15:29:30 +0530343 db_state_str[dev->db_state],
Steve Wise05eb2382014-03-14 21:52:08 +0530344 dev->rdev.stats.db_state_transitions,
345 dev->rdev.stats.db_fc_interruptions);
Vipul Pandya1cab7752012-12-10 09:30:55 +0000346 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
Vipul Pandya793dad92012-12-10 09:30:56 +0000347 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
348 dev->rdev.stats.act_ofld_conn_fails);
349 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
350 dev->rdev.stats.pas_ofld_conn_fails);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +0530351 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530352 return 0;
353}
354
355static int stats_open(struct inode *inode, struct file *file)
356{
357 return single_open(file, stats_show, inode->i_private);
358}
359
360static ssize_t stats_clear(struct file *file, const char __user *buf,
361 size_t count, loff_t *pos)
362{
363 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
364
365 mutex_lock(&dev->rdev.stats.lock);
366 dev->rdev.stats.pd.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530367 dev->rdev.stats.pd.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530368 dev->rdev.stats.qid.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530369 dev->rdev.stats.qid.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530370 dev->rdev.stats.stag.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530371 dev->rdev.stats.stag.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530372 dev->rdev.stats.pbl.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530373 dev->rdev.stats.pbl.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530374 dev->rdev.stats.rqt.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530375 dev->rdev.stats.rqt.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530376 dev->rdev.stats.ocqp.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530377 dev->rdev.stats.ocqp.fail = 0;
Vipul Pandya2c974782012-05-18 15:29:28 +0530378 dev->rdev.stats.db_full = 0;
379 dev->rdev.stats.db_empty = 0;
380 dev->rdev.stats.db_drop = 0;
Vipul Pandya422eea02012-05-18 15:29:30 +0530381 dev->rdev.stats.db_state_transitions = 0;
Vipul Pandya793dad92012-12-10 09:30:56 +0000382 dev->rdev.stats.tcam_full = 0;
383 dev->rdev.stats.act_ofld_conn_fails = 0;
384 dev->rdev.stats.pas_ofld_conn_fails = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530385 mutex_unlock(&dev->rdev.stats.lock);
386 return count;
387}
388
389static const struct file_operations stats_debugfs_fops = {
390 .owner = THIS_MODULE,
391 .open = stats_open,
392 .release = single_release,
393 .read = seq_read,
394 .llseek = seq_lseek,
395 .write = stats_clear,
396};
397
Vipul Pandya793dad92012-12-10 09:30:56 +0000398static int dump_ep(int id, void *p, void *data)
399{
400 struct c4iw_ep *ep = p;
401 struct c4iw_debugfs_data *epd = data;
402 int space;
403 int cc;
404
405 space = epd->bufsize - epd->pos - 1;
406 if (space == 0)
407 return 1;
408
Vipul Pandya830662f2013-07-04 16:10:47 +0530409 if (ep->com.local_addr.ss_family == AF_INET) {
410 struct sockaddr_in *lsin = (struct sockaddr_in *)
411 &ep->com.local_addr;
412 struct sockaddr_in *rsin = (struct sockaddr_in *)
413 &ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500414 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
415 &ep->com.mapped_local_addr;
416 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
417 &ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530418
419 cc = snprintf(epd->buf + epd->pos, space,
420 "ep %p cm_id %p qp %p state %d flags 0x%lx "
421 "history 0x%lx hwtid %d atid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500422 "%pI4:%d/%d <-> %pI4:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530423 ep, ep->com.cm_id, ep->com.qp,
424 (int)ep->com.state, ep->com.flags,
425 ep->com.history, ep->hwtid, ep->atid,
426 &lsin->sin_addr, ntohs(lsin->sin_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500427 ntohs(mapped_lsin->sin_port),
428 &rsin->sin_addr, ntohs(rsin->sin_port),
429 ntohs(mapped_rsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530430 } else {
431 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
432 &ep->com.local_addr;
433 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
434 &ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500435 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
436 &ep->com.mapped_local_addr;
437 struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
438 &ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530439
440 cc = snprintf(epd->buf + epd->pos, space,
441 "ep %p cm_id %p qp %p state %d flags 0x%lx "
442 "history 0x%lx hwtid %d atid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500443 "%pI6:%d/%d <-> %pI6:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530444 ep, ep->com.cm_id, ep->com.qp,
445 (int)ep->com.state, ep->com.flags,
446 ep->com.history, ep->hwtid, ep->atid,
447 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500448 ntohs(mapped_lsin6->sin6_port),
449 &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
450 ntohs(mapped_rsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530451 }
Vipul Pandya793dad92012-12-10 09:30:56 +0000452 if (cc < space)
453 epd->pos += cc;
454 return 0;
455}
456
457static int dump_listen_ep(int id, void *p, void *data)
458{
459 struct c4iw_listen_ep *ep = p;
460 struct c4iw_debugfs_data *epd = data;
461 int space;
462 int cc;
463
464 space = epd->bufsize - epd->pos - 1;
465 if (space == 0)
466 return 1;
467
Vipul Pandya830662f2013-07-04 16:10:47 +0530468 if (ep->com.local_addr.ss_family == AF_INET) {
469 struct sockaddr_in *lsin = (struct sockaddr_in *)
470 &ep->com.local_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500471 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
472 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530473
474 cc = snprintf(epd->buf + epd->pos, space,
475 "ep %p cm_id %p state %d flags 0x%lx stid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500476 "backlog %d %pI4:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530477 ep, ep->com.cm_id, (int)ep->com.state,
478 ep->com.flags, ep->stid, ep->backlog,
Steve Wise9eccfe12014-03-26 17:08:09 -0500479 &lsin->sin_addr, ntohs(lsin->sin_port),
480 ntohs(mapped_lsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530481 } else {
482 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
483 &ep->com.local_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500484 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
485 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530486
487 cc = snprintf(epd->buf + epd->pos, space,
488 "ep %p cm_id %p state %d flags 0x%lx stid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500489 "backlog %d %pI6:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530490 ep, ep->com.cm_id, (int)ep->com.state,
491 ep->com.flags, ep->stid, ep->backlog,
Steve Wise9eccfe12014-03-26 17:08:09 -0500492 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
493 ntohs(mapped_lsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530494 }
Vipul Pandya793dad92012-12-10 09:30:56 +0000495 if (cc < space)
496 epd->pos += cc;
497 return 0;
498}
499
500static int ep_release(struct inode *inode, struct file *file)
501{
502 struct c4iw_debugfs_data *epd = file->private_data;
503 if (!epd) {
504 pr_info("%s null qpd?\n", __func__);
505 return 0;
506 }
507 vfree(epd->buf);
508 kfree(epd);
509 return 0;
510}
511
512static int ep_open(struct inode *inode, struct file *file)
513{
514 struct c4iw_debugfs_data *epd;
515 int ret = 0;
516 int count = 1;
517
518 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
519 if (!epd) {
520 ret = -ENOMEM;
521 goto out;
522 }
523 epd->devp = inode->i_private;
524 epd->pos = 0;
525
526 spin_lock_irq(&epd->devp->lock);
527 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
528 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
529 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
530 spin_unlock_irq(&epd->devp->lock);
531
532 epd->bufsize = count * 160;
533 epd->buf = vmalloc(epd->bufsize);
534 if (!epd->buf) {
535 ret = -ENOMEM;
536 goto err1;
537 }
538
539 spin_lock_irq(&epd->devp->lock);
540 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
541 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
542 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
543 spin_unlock_irq(&epd->devp->lock);
544
545 file->private_data = epd;
546 goto out;
547err1:
548 kfree(epd);
549out:
550 return ret;
551}
552
553static const struct file_operations ep_debugfs_fops = {
554 .owner = THIS_MODULE,
555 .open = ep_open,
556 .release = ep_release,
557 .read = debugfs_read,
558};
559
Steve Wisecfdda9d2010-04-21 15:30:06 -0700560static int setup_debugfs(struct c4iw_dev *devp)
561{
562 struct dentry *de;
563
564 if (!devp->debugfs_root)
565 return -1;
566
567 de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
568 (void *)devp, &qp_debugfs_fops);
569 if (de && de->d_inode)
570 de->d_inode->i_size = 4096;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500571
572 de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
573 (void *)devp, &stag_debugfs_fops);
574 if (de && de->d_inode)
575 de->d_inode->i_size = 4096;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530576
577 de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root,
578 (void *)devp, &stats_debugfs_fops);
579 if (de && de->d_inode)
580 de->d_inode->i_size = 4096;
581
Vipul Pandya793dad92012-12-10 09:30:56 +0000582 de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root,
583 (void *)devp, &ep_debugfs_fops);
584 if (de && de->d_inode)
585 de->d_inode->i_size = 4096;
586
Steve Wisecfdda9d2010-04-21 15:30:06 -0700587 return 0;
588}
589
590void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
591 struct c4iw_dev_ucontext *uctx)
592{
593 struct list_head *pos, *nxt;
594 struct c4iw_qid_list *entry;
595
596 mutex_lock(&uctx->lock);
597 list_for_each_safe(pos, nxt, &uctx->qpids) {
598 entry = list_entry(pos, struct c4iw_qid_list, entry);
599 list_del_init(&entry->entry);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530600 if (!(entry->qid & rdev->qpmask)) {
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530601 c4iw_put_resource(&rdev->resource.qid_table,
602 entry->qid);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530603 mutex_lock(&rdev->stats.lock);
604 rdev->stats.qid.cur -= rdev->qpmask + 1;
605 mutex_unlock(&rdev->stats.lock);
606 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700607 kfree(entry);
608 }
609
610 list_for_each_safe(pos, nxt, &uctx->qpids) {
611 entry = list_entry(pos, struct c4iw_qid_list, entry);
612 list_del_init(&entry->entry);
613 kfree(entry);
614 }
615 mutex_unlock(&uctx->lock);
616}
617
618void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
619 struct c4iw_dev_ucontext *uctx)
620{
621 INIT_LIST_HEAD(&uctx->qpids);
622 INIT_LIST_HEAD(&uctx->cqids);
623 mutex_init(&uctx->lock);
624}
625
626/* Caller takes care of locking if needed */
627static int c4iw_rdev_open(struct c4iw_rdev *rdev)
628{
629 int err;
630
631 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
632
633 /*
634 * qpshift is the number of bits to shift the qpid left in order
635 * to get the correct address of the doorbell for that qp.
636 */
637 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
638 rdev->qpmask = rdev->lldi.udb_density - 1;
639 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
640 rdev->cqmask = rdev->lldi.ucq_density - 1;
641 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
Steve Wise93fb72e2010-06-23 15:46:55 +0000642 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
643 "qp qid start %u size %u cq qid start %u size %u\n",
Steve Wisecfdda9d2010-04-21 15:30:06 -0700644 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
645 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
646 rdev->lldi.vr->pbl.start,
647 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
Steve Wise93fb72e2010-06-23 15:46:55 +0000648 rdev->lldi.vr->rq.size,
649 rdev->lldi.vr->qp.start,
650 rdev->lldi.vr->qp.size,
651 rdev->lldi.vr->cq.start,
652 rdev->lldi.vr->cq.size);
Ben Hutchings649fb5e2013-10-27 21:50:45 +0000653 PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
Steve Wisecfdda9d2010-04-21 15:30:06 -0700654 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
655 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
Ben Hutchings649fb5e2013-10-27 21:50:45 +0000656 (u64)pci_resource_start(rdev->lldi.pdev, 2),
Steve Wisecfdda9d2010-04-21 15:30:06 -0700657 rdev->lldi.db_reg,
658 rdev->lldi.gts_reg,
659 rdev->qpshift, rdev->qpmask,
660 rdev->cqshift, rdev->cqmask);
661
662 if (c4iw_num_stags(rdev) == 0) {
663 err = -EINVAL;
664 goto err1;
665 }
666
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530667 rdev->stats.pd.total = T4_MAX_NUM_PD;
668 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
669 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
670 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
671 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
672 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
673
Steve Wisecfdda9d2010-04-21 15:30:06 -0700674 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
675 if (err) {
676 printk(KERN_ERR MOD "error %d initializing resources\n", err);
677 goto err1;
678 }
679 err = c4iw_pblpool_create(rdev);
680 if (err) {
681 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
682 goto err2;
683 }
684 err = c4iw_rqtpool_create(rdev);
685 if (err) {
686 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
687 goto err3;
688 }
Steve Wisec6d7b262010-09-13 11:23:57 -0500689 err = c4iw_ocqp_pool_create(rdev);
690 if (err) {
691 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
692 goto err4;
693 }
Steve Wise05eb2382014-03-14 21:52:08 +0530694 rdev->status_page = (struct t4_dev_status_page *)
695 __get_free_page(GFP_KERNEL);
696 if (!rdev->status_page) {
697 pr_err(MOD "error allocating status page\n");
698 goto err4;
699 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700700 return 0;
Steve Wisec6d7b262010-09-13 11:23:57 -0500701err4:
702 c4iw_rqtpool_destroy(rdev);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700703err3:
704 c4iw_pblpool_destroy(rdev);
705err2:
706 c4iw_destroy_resource(&rdev->resource);
707err1:
708 return err;
709}
710
711static void c4iw_rdev_close(struct c4iw_rdev *rdev)
712{
Steve Wise05eb2382014-03-14 21:52:08 +0530713 free_page((unsigned long)rdev->status_page);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700714 c4iw_pblpool_destroy(rdev);
715 c4iw_rqtpool_destroy(rdev);
716 c4iw_destroy_resource(&rdev->resource);
717}
718
Steve Wise9efe10a2011-10-06 09:32:44 -0700719static void c4iw_dealloc(struct uld_ctx *ctx)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700720{
Steve Wise2f25e9a2011-05-09 22:06:23 -0700721 c4iw_rdev_close(&ctx->dev->rdev);
722 idr_destroy(&ctx->dev->cqidr);
723 idr_destroy(&ctx->dev->qpidr);
724 idr_destroy(&ctx->dev->mmidr);
Vipul Pandya793dad92012-12-10 09:30:56 +0000725 idr_destroy(&ctx->dev->hwtid_idr);
726 idr_destroy(&ctx->dev->stid_idr);
727 idr_destroy(&ctx->dev->atid_idr);
Steve Wisefa658a92014-04-09 09:38:25 -0500728 if (ctx->dev->rdev.bar2_kva)
729 iounmap(ctx->dev->rdev.bar2_kva);
730 if (ctx->dev->rdev.oc_mw_kva)
731 iounmap(ctx->dev->rdev.oc_mw_kva);
Steve Wise2f25e9a2011-05-09 22:06:23 -0700732 ib_dealloc_device(&ctx->dev->ibdev);
Steve Wise9eccfe12014-03-26 17:08:09 -0500733 iwpm_exit(RDMA_NL_C4IW);
Steve Wise2f25e9a2011-05-09 22:06:23 -0700734 ctx->dev = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700735}
736
Steve Wise9efe10a2011-10-06 09:32:44 -0700737static void c4iw_remove(struct uld_ctx *ctx)
738{
739 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
740 c4iw_unregister_device(ctx->dev);
741 c4iw_dealloc(ctx);
742}
743
744static int rdma_supported(const struct cxgb4_lld_info *infop)
745{
746 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
747 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
Vipul Pandyaf079af72013-03-14 05:08:58 +0000748 infop->vr->cq.size > 0;
Steve Wise9efe10a2011-10-06 09:32:44 -0700749}
750
Steve Wisecfdda9d2010-04-21 15:30:06 -0700751static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
752{
753 struct c4iw_dev *devp;
754 int ret;
755
Steve Wise9efe10a2011-10-06 09:32:44 -0700756 if (!rdma_supported(infop)) {
757 printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
758 pci_name(infop->pdev));
759 return ERR_PTR(-ENOSYS);
760 }
Vipul Pandyaf079af72013-03-14 05:08:58 +0000761 if (!ocqp_supported(infop))
762 pr_info("%s: On-Chip Queues not supported on this device.\n",
763 pci_name(infop->pdev));
Vipul Pandya80ccdd62013-03-14 05:09:00 +0000764
Steve Wisecfdda9d2010-04-21 15:30:06 -0700765 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
766 if (!devp) {
767 printk(KERN_ERR MOD "Cannot allocate ib device\n");
Steve Wisebbe9a0a2011-05-09 22:06:22 -0700768 return ERR_PTR(-ENOMEM);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700769 }
770 devp->rdev.lldi = *infop;
771
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530772 /* init various hw-queue params based on lld info */
773 PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
774 __func__, devp->rdev.lldi.sge_ingpadboundary,
775 devp->rdev.lldi.sge_egrstatuspagesize);
776
777 devp->rdev.hw_queue.t4_eq_status_entries =
778 devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
779 devp->rdev.hw_queue.t4_max_eq_size =
780 65520 - devp->rdev.hw_queue.t4_eq_status_entries;
781 devp->rdev.hw_queue.t4_max_iq_size = 65520 - 1;
782 devp->rdev.hw_queue.t4_max_rq_size =
783 8192 - devp->rdev.hw_queue.t4_eq_status_entries;
784 devp->rdev.hw_queue.t4_max_sq_size =
785 devp->rdev.hw_queue.t4_max_eq_size - 1;
786 devp->rdev.hw_queue.t4_max_qp_depth =
787 devp->rdev.hw_queue.t4_max_rq_size - 1;
788 devp->rdev.hw_queue.t4_max_cq_depth =
789 devp->rdev.hw_queue.t4_max_iq_size - 1;
790 devp->rdev.hw_queue.t4_stat_len =
791 devp->rdev.lldi.sge_egrstatuspagesize;
792
Steve Wisefa658a92014-04-09 09:38:25 -0500793 /*
794 * For T5 devices, we map all of BAR2 with WC.
795 * For T4 devices with onchip qp mem, we map only that part
796 * of BAR2 with WC.
797 */
798 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
799 if (is_t5(devp->rdev.lldi.adapter_type)) {
800 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
801 pci_resource_len(devp->rdev.lldi.pdev, 2));
802 if (!devp->rdev.bar2_kva) {
803 pr_err(MOD "Unable to ioremap BAR2\n");
Christoph Jaeger65b302a2014-04-21 17:02:42 +0200804 ib_dealloc_device(&devp->ibdev);
Steve Wisefa658a92014-04-09 09:38:25 -0500805 return ERR_PTR(-EINVAL);
806 }
807 } else if (ocqp_supported(infop)) {
808 devp->rdev.oc_mw_pa =
809 pci_resource_start(devp->rdev.lldi.pdev, 2) +
810 pci_resource_len(devp->rdev.lldi.pdev, 2) -
811 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
812 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
813 devp->rdev.lldi.vr->ocq.size);
814 if (!devp->rdev.oc_mw_kva) {
815 pr_err(MOD "Unable to ioremap onchip mem\n");
Christoph Jaeger65b302a2014-04-21 17:02:42 +0200816 ib_dealloc_device(&devp->ibdev);
Steve Wisefa658a92014-04-09 09:38:25 -0500817 return ERR_PTR(-EINVAL);
818 }
819 }
Steve Wisec6d7b262010-09-13 11:23:57 -0500820
Steve Wise2f25e9a2011-05-09 22:06:23 -0700821 PDBG(KERN_INFO MOD "ocq memory: "
Steve Wisec6d7b262010-09-13 11:23:57 -0500822 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
823 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
824 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
825
Steve Wisecfdda9d2010-04-21 15:30:06 -0700826 ret = c4iw_rdev_open(&devp->rdev);
827 if (ret) {
Steve Wisecfdda9d2010-04-21 15:30:06 -0700828 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
829 ib_dealloc_device(&devp->ibdev);
Steve Wisebbe9a0a2011-05-09 22:06:22 -0700830 return ERR_PTR(ret);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700831 }
832
833 idr_init(&devp->cqidr);
834 idr_init(&devp->qpidr);
835 idr_init(&devp->mmidr);
Vipul Pandya793dad92012-12-10 09:30:56 +0000836 idr_init(&devp->hwtid_idr);
837 idr_init(&devp->stid_idr);
838 idr_init(&devp->atid_idr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700839 spin_lock_init(&devp->lock);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530840 mutex_init(&devp->rdev.stats.lock);
Vipul Pandya2c974782012-05-18 15:29:28 +0530841 mutex_init(&devp->db_mutex);
Steve Wise05eb2382014-03-14 21:52:08 +0530842 INIT_LIST_HEAD(&devp->db_fc_list);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +0530843 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700844
Steve Wisecfdda9d2010-04-21 15:30:06 -0700845 if (c4iw_debugfs_root) {
846 devp->debugfs_root = debugfs_create_dir(
847 pci_name(devp->rdev.lldi.pdev),
848 c4iw_debugfs_root);
849 setup_debugfs(devp);
850 }
Steve Wise9eccfe12014-03-26 17:08:09 -0500851
852 ret = iwpm_init(RDMA_NL_C4IW);
853 if (ret) {
854 pr_err("port mapper initialization failed with %d\n", ret);
855 ib_dealloc_device(&devp->ibdev);
856 return ERR_PTR(ret);
857 }
858
Steve Wisecfdda9d2010-04-21 15:30:06 -0700859 return devp;
860}
861
862static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
863{
Steve Wise2f25e9a2011-05-09 22:06:23 -0700864 struct uld_ctx *ctx;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700865 static int vers_printed;
866 int i;
867
868 if (!vers_printed++)
Vipul Pandyaf079af72013-03-14 05:08:58 +0000869 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
870 DRV_VERSION);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700871
Steve Wise2f25e9a2011-05-09 22:06:23 -0700872 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
873 if (!ctx) {
874 ctx = ERR_PTR(-ENOMEM);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700875 goto out;
Steve Wise2f25e9a2011-05-09 22:06:23 -0700876 }
877 ctx->lldi = *infop;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700878
879 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -0700880 __func__, pci_name(ctx->lldi.pdev),
881 ctx->lldi.nchan, ctx->lldi.nrxq,
882 ctx->lldi.ntxq, ctx->lldi.nports);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700883
Steve Wise2f25e9a2011-05-09 22:06:23 -0700884 mutex_lock(&dev_mutex);
885 list_add_tail(&ctx->entry, &uld_ctx_list);
886 mutex_unlock(&dev_mutex);
887
888 for (i = 0; i < ctx->lldi.nrxq; i++)
889 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700890out:
Steve Wise2f25e9a2011-05-09 22:06:23 -0700891 return ctx;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700892}
893
Vipul Pandya1cab7752012-12-10 09:30:55 +0000894static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
895 const __be64 *rsp,
896 u32 pktshift)
897{
898 struct sk_buff *skb;
899
900 /*
901 * Allocate space for cpl_pass_accept_req which will be synthesized by
902 * driver. Once the driver synthesizes the request the skb will go
903 * through the regular cpl_pass_accept_req processing.
904 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
905 * cpl_rx_pkt.
906 */
907 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
908 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
909 if (unlikely(!skb))
910 return NULL;
911
912 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
913 sizeof(struct rss_header) - pktshift);
914
915 /*
916 * This skb will contain:
917 * rss_header from the rspq descriptor (1 flit)
918 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
919 * space for the difference between the size of an
920 * rx_pkt and pass_accept_req cpl (1 flit)
921 * the packet data from the gl
922 */
923 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
924 sizeof(struct rss_header));
925 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
926 sizeof(struct cpl_pass_accept_req),
927 gl->va + pktshift,
928 gl->tot_len - pktshift);
929 return skb;
930}
931
932static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
933 const __be64 *rsp)
934{
935 unsigned int opcode = *(u8 *)rsp;
936 struct sk_buff *skb;
937
938 if (opcode != CPL_RX_PKT)
939 goto out;
940
941 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
942 if (skb == NULL)
943 goto out;
944
945 if (c4iw_handlers[opcode] == NULL) {
946 pr_info("%s no handler opcode 0x%x...\n", __func__,
947 opcode);
948 kfree_skb(skb);
949 goto out;
950 }
951 c4iw_handlers[opcode](dev, skb);
952 return 1;
953out:
954 return 0;
955}
956
Steve Wisecfdda9d2010-04-21 15:30:06 -0700957static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
958 const struct pkt_gl *gl)
959{
Steve Wise2f25e9a2011-05-09 22:06:23 -0700960 struct uld_ctx *ctx = handle;
961 struct c4iw_dev *dev = ctx->dev;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700962 struct sk_buff *skb;
Vipul Pandya1cab7752012-12-10 09:30:55 +0000963 u8 opcode;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700964
965 if (gl == NULL) {
966 /* omit RSS and rsp_ctrl at end of descriptor */
967 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
968
969 skb = alloc_skb(256, GFP_ATOMIC);
970 if (!skb)
971 goto nomem;
972 __skb_put(skb, len);
973 skb_copy_to_linear_data(skb, &rsp[1], len);
974 } else if (gl == CXGB4_MSG_AN) {
975 const struct rsp_ctrl *rc = (void *)rsp;
976
977 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
978 c4iw_ev_handler(dev, qid);
979 return 0;
Vipul Pandya1cab7752012-12-10 09:30:55 +0000980 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
981 if (recv_rx_pkt(dev, gl, rsp))
982 return 0;
983
984 pr_info("%s: unexpected FL contents at %p, " \
985 "RSS %#llx, FL %#llx, len %u\n",
986 pci_name(ctx->lldi.pdev), gl->va,
987 (unsigned long long)be64_to_cpu(*rsp),
Vipul Pandyaef5d6352013-01-07 13:12:00 +0000988 (unsigned long long)be64_to_cpu(
989 *(__force __be64 *)gl->va),
Vipul Pandya1cab7752012-12-10 09:30:55 +0000990 gl->tot_len);
991
992 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700993 } else {
Steve Wiseda411ba2010-10-18 15:16:45 +0000994 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700995 if (unlikely(!skb))
996 goto nomem;
997 }
998
Vipul Pandya1cab7752012-12-10 09:30:55 +0000999 opcode = *(u8 *)rsp;
Steve Wisedbb084c2014-03-21 20:40:30 +05301000 if (c4iw_handlers[opcode]) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001001 c4iw_handlers[opcode](dev, skb);
Steve Wisedbb084c2014-03-21 20:40:30 +05301002 } else {
Vipul Pandya1cab7752012-12-10 09:30:55 +00001003 pr_info("%s no handler opcode 0x%x...\n", __func__,
Steve Wisecfdda9d2010-04-21 15:30:06 -07001004 opcode);
Steve Wisedbb084c2014-03-21 20:40:30 +05301005 kfree_skb(skb);
1006 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001007
1008 return 0;
1009nomem:
1010 return -1;
1011}
1012
1013static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
1014{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001015 struct uld_ctx *ctx = handle;
Steve Wise1c01c532010-05-20 16:57:32 -05001016
Steve Wisecfdda9d2010-04-21 15:30:06 -07001017 PDBG("%s new_state %u\n", __func__, new_state);
Steve Wise1c01c532010-05-20 16:57:32 -05001018 switch (new_state) {
1019 case CXGB4_STATE_UP:
Steve Wise2f25e9a2011-05-09 22:06:23 -07001020 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
1021 if (!ctx->dev) {
Steve Wise9efe10a2011-10-06 09:32:44 -07001022 int ret;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001023
1024 ctx->dev = c4iw_alloc(&ctx->lldi);
Steve Wise9efe10a2011-10-06 09:32:44 -07001025 if (IS_ERR(ctx->dev)) {
1026 printk(KERN_ERR MOD
1027 "%s: initialization failed: %ld\n",
1028 pci_name(ctx->lldi.pdev),
1029 PTR_ERR(ctx->dev));
1030 ctx->dev = NULL;
1031 break;
1032 }
1033 ret = c4iw_register_device(ctx->dev);
1034 if (ret) {
Steve Wise1c01c532010-05-20 16:57:32 -05001035 printk(KERN_ERR MOD
1036 "%s: RDMA registration failed: %d\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001037 pci_name(ctx->lldi.pdev), ret);
Steve Wise9efe10a2011-10-06 09:32:44 -07001038 c4iw_dealloc(ctx);
1039 }
Steve Wise1c01c532010-05-20 16:57:32 -05001040 }
1041 break;
1042 case CXGB4_STATE_DOWN:
1043 printk(KERN_INFO MOD "%s: Down\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001044 pci_name(ctx->lldi.pdev));
1045 if (ctx->dev)
1046 c4iw_remove(ctx);
Steve Wise1c01c532010-05-20 16:57:32 -05001047 break;
1048 case CXGB4_STATE_START_RECOVERY:
1049 printk(KERN_INFO MOD "%s: Fatal Error\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001050 pci_name(ctx->lldi.pdev));
1051 if (ctx->dev) {
Steve Wise767fbe82011-03-11 22:30:53 +00001052 struct ib_event event;
1053
Steve Wise2f25e9a2011-05-09 22:06:23 -07001054 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
Steve Wise767fbe82011-03-11 22:30:53 +00001055 memset(&event, 0, sizeof event);
1056 event.event = IB_EVENT_DEVICE_FATAL;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001057 event.device = &ctx->dev->ibdev;
Steve Wise767fbe82011-03-11 22:30:53 +00001058 ib_dispatch_event(&event);
Steve Wise2f25e9a2011-05-09 22:06:23 -07001059 c4iw_remove(ctx);
Steve Wise767fbe82011-03-11 22:30:53 +00001060 }
Steve Wise1c01c532010-05-20 16:57:32 -05001061 break;
1062 case CXGB4_STATE_DETACH:
1063 printk(KERN_INFO MOD "%s: Detach\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001064 pci_name(ctx->lldi.pdev));
1065 if (ctx->dev)
1066 c4iw_remove(ctx);
Steve Wise1c01c532010-05-20 16:57:32 -05001067 break;
1068 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001069 return 0;
1070}
1071
Vipul Pandya2c974782012-05-18 15:29:28 +05301072static int disable_qp_db(int id, void *p, void *data)
1073{
1074 struct c4iw_qp *qp = p;
1075
1076 t4_disable_wq_db(&qp->wq);
1077 return 0;
1078}
1079
1080static void stop_queues(struct uld_ctx *ctx)
1081{
Steve Wise05eb2382014-03-14 21:52:08 +05301082 unsigned long flags;
1083
1084 spin_lock_irqsave(&ctx->dev->lock, flags);
1085 ctx->dev->rdev.stats.db_state_transitions++;
1086 ctx->dev->db_state = STOPPED;
1087 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
Vipul Pandya422eea02012-05-18 15:29:30 +05301088 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301089 else
1090 ctx->dev->rdev.status_page->db_off = 1;
1091 spin_unlock_irqrestore(&ctx->dev->lock, flags);
Vipul Pandya2c974782012-05-18 15:29:28 +05301092}
1093
1094static int enable_qp_db(int id, void *p, void *data)
1095{
1096 struct c4iw_qp *qp = p;
1097
1098 t4_enable_wq_db(&qp->wq);
1099 return 0;
1100}
1101
Steve Wise05eb2382014-03-14 21:52:08 +05301102static void resume_rc_qp(struct c4iw_qp *qp)
1103{
1104 spin_lock(&qp->lock);
Steve Wisefa658a92014-04-09 09:38:25 -05001105 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc,
1106 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301107 qp->wq.sq.wq_pidx_inc = 0;
Steve Wisefa658a92014-04-09 09:38:25 -05001108 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc,
1109 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301110 qp->wq.rq.wq_pidx_inc = 0;
1111 spin_unlock(&qp->lock);
1112}
1113
1114static void resume_a_chunk(struct uld_ctx *ctx)
1115{
1116 int i;
1117 struct c4iw_qp *qp;
1118
1119 for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
1120 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1121 db_fc_entry);
1122 list_del_init(&qp->db_fc_entry);
1123 resume_rc_qp(qp);
1124 if (list_empty(&ctx->dev->db_fc_list))
1125 break;
1126 }
1127}
1128
Vipul Pandya2c974782012-05-18 15:29:28 +05301129static void resume_queues(struct uld_ctx *ctx)
1130{
1131 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301132 if (ctx->dev->db_state != STOPPED)
1133 goto out;
1134 ctx->dev->db_state = FLOW_CONTROL;
1135 while (1) {
1136 if (list_empty(&ctx->dev->db_fc_list)) {
1137 WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1138 ctx->dev->db_state = NORMAL;
1139 ctx->dev->rdev.stats.db_state_transitions++;
1140 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1141 idr_for_each(&ctx->dev->qpidr, enable_qp_db,
1142 NULL);
1143 } else {
1144 ctx->dev->rdev.status_page->db_off = 0;
1145 }
1146 break;
1147 } else {
1148 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1149 < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1150 DB_FC_DRAIN_THRESH)) {
1151 resume_a_chunk(ctx);
1152 }
1153 if (!list_empty(&ctx->dev->db_fc_list)) {
1154 spin_unlock_irq(&ctx->dev->lock);
1155 if (DB_FC_RESUME_DELAY) {
1156 set_current_state(TASK_UNINTERRUPTIBLE);
1157 schedule_timeout(DB_FC_RESUME_DELAY);
1158 }
1159 spin_lock_irq(&ctx->dev->lock);
1160 if (ctx->dev->db_state != FLOW_CONTROL)
1161 break;
1162 }
1163 }
Vipul Pandya422eea02012-05-18 15:29:30 +05301164 }
Steve Wise05eb2382014-03-14 21:52:08 +05301165out:
1166 if (ctx->dev->db_state != NORMAL)
1167 ctx->dev->rdev.stats.db_fc_interruptions++;
Vipul Pandya2c974782012-05-18 15:29:28 +05301168 spin_unlock_irq(&ctx->dev->lock);
1169}
1170
Vipul Pandya422eea02012-05-18 15:29:30 +05301171struct qp_list {
1172 unsigned idx;
1173 struct c4iw_qp **qps;
1174};
1175
1176static int add_and_ref_qp(int id, void *p, void *data)
1177{
1178 struct qp_list *qp_listp = data;
1179 struct c4iw_qp *qp = p;
1180
1181 c4iw_qp_add_ref(&qp->ibqp);
1182 qp_listp->qps[qp_listp->idx++] = qp;
1183 return 0;
1184}
1185
1186static int count_qps(int id, void *p, void *data)
1187{
1188 unsigned *countp = data;
1189 (*countp)++;
1190 return 0;
1191}
1192
Steve Wise05eb2382014-03-14 21:52:08 +05301193static void deref_qps(struct qp_list *qp_list)
Vipul Pandya422eea02012-05-18 15:29:30 +05301194{
1195 int idx;
1196
Steve Wise05eb2382014-03-14 21:52:08 +05301197 for (idx = 0; idx < qp_list->idx; idx++)
1198 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
Vipul Pandya422eea02012-05-18 15:29:30 +05301199}
1200
1201static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1202{
1203 int idx;
1204 int ret;
1205
1206 for (idx = 0; idx < qp_list->idx; idx++) {
1207 struct c4iw_qp *qp = qp_list->qps[idx];
1208
Steve Wise05eb2382014-03-14 21:52:08 +05301209 spin_lock_irq(&qp->rhp->lock);
1210 spin_lock(&qp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301211 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1212 qp->wq.sq.qid,
1213 t4_sq_host_wq_pidx(&qp->wq),
1214 t4_sq_wq_size(&qp->wq));
1215 if (ret) {
Steve Wise05eb2382014-03-14 21:52:08 +05301216 pr_err(KERN_ERR MOD "%s: Fatal error - "
Vipul Pandya422eea02012-05-18 15:29:30 +05301217 "DB overflow recovery failed - "
1218 "error syncing SQ qid %u\n",
1219 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
Steve Wise05eb2382014-03-14 21:52:08 +05301220 spin_unlock(&qp->lock);
1221 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301222 return;
1223 }
Steve Wise05eb2382014-03-14 21:52:08 +05301224 qp->wq.sq.wq_pidx_inc = 0;
Vipul Pandya422eea02012-05-18 15:29:30 +05301225
1226 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1227 qp->wq.rq.qid,
1228 t4_rq_host_wq_pidx(&qp->wq),
1229 t4_rq_wq_size(&qp->wq));
1230
1231 if (ret) {
Steve Wise05eb2382014-03-14 21:52:08 +05301232 pr_err(KERN_ERR MOD "%s: Fatal error - "
Vipul Pandya422eea02012-05-18 15:29:30 +05301233 "DB overflow recovery failed - "
1234 "error syncing RQ qid %u\n",
1235 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
Steve Wise05eb2382014-03-14 21:52:08 +05301236 spin_unlock(&qp->lock);
1237 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301238 return;
1239 }
Steve Wise05eb2382014-03-14 21:52:08 +05301240 qp->wq.rq.wq_pidx_inc = 0;
1241 spin_unlock(&qp->lock);
1242 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301243
1244 /* Wait for the dbfifo to drain */
1245 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1246 set_current_state(TASK_UNINTERRUPTIBLE);
1247 schedule_timeout(usecs_to_jiffies(10));
1248 }
1249 }
1250}
1251
1252static void recover_queues(struct uld_ctx *ctx)
1253{
1254 int count = 0;
1255 struct qp_list qp_list;
1256 int ret;
1257
Vipul Pandya422eea02012-05-18 15:29:30 +05301258 /* slow everybody down */
1259 set_current_state(TASK_UNINTERRUPTIBLE);
1260 schedule_timeout(usecs_to_jiffies(1000));
1261
Vipul Pandya422eea02012-05-18 15:29:30 +05301262 /* flush the SGE contexts */
1263 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1264 if (ret) {
1265 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1266 pci_name(ctx->lldi.pdev));
Steve Wise05eb2382014-03-14 21:52:08 +05301267 return;
Vipul Pandya422eea02012-05-18 15:29:30 +05301268 }
1269
1270 /* Count active queues so we can build a list of queues to recover */
1271 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301272 WARN_ON(ctx->dev->db_state != STOPPED);
1273 ctx->dev->db_state = RECOVERY;
Vipul Pandya422eea02012-05-18 15:29:30 +05301274 idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1275
1276 qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
1277 if (!qp_list.qps) {
1278 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1279 pci_name(ctx->lldi.pdev));
1280 spin_unlock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301281 return;
Vipul Pandya422eea02012-05-18 15:29:30 +05301282 }
1283 qp_list.idx = 0;
1284
1285 /* add and ref each qp so it doesn't get freed */
1286 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1287
1288 spin_unlock_irq(&ctx->dev->lock);
1289
1290 /* now traverse the list in a safe context to recover the db state*/
1291 recover_lost_dbs(ctx, &qp_list);
1292
1293 /* we're almost done! deref the qps and clean up */
Steve Wise05eb2382014-03-14 21:52:08 +05301294 deref_qps(&qp_list);
Vipul Pandya422eea02012-05-18 15:29:30 +05301295 kfree(qp_list.qps);
1296
Vipul Pandya422eea02012-05-18 15:29:30 +05301297 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301298 WARN_ON(ctx->dev->db_state != RECOVERY);
1299 ctx->dev->db_state = STOPPED;
Vipul Pandya422eea02012-05-18 15:29:30 +05301300 spin_unlock_irq(&ctx->dev->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301301}
1302
Vipul Pandya2c974782012-05-18 15:29:28 +05301303static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1304{
1305 struct uld_ctx *ctx = handle;
1306
1307 switch (control) {
1308 case CXGB4_CONTROL_DB_FULL:
1309 stop_queues(ctx);
Vipul Pandya2c974782012-05-18 15:29:28 +05301310 ctx->dev->rdev.stats.db_full++;
Vipul Pandya2c974782012-05-18 15:29:28 +05301311 break;
1312 case CXGB4_CONTROL_DB_EMPTY:
1313 resume_queues(ctx);
1314 mutex_lock(&ctx->dev->rdev.stats.lock);
1315 ctx->dev->rdev.stats.db_empty++;
1316 mutex_unlock(&ctx->dev->rdev.stats.lock);
1317 break;
1318 case CXGB4_CONTROL_DB_DROP:
Vipul Pandya422eea02012-05-18 15:29:30 +05301319 recover_queues(ctx);
Vipul Pandya2c974782012-05-18 15:29:28 +05301320 mutex_lock(&ctx->dev->rdev.stats.lock);
1321 ctx->dev->rdev.stats.db_drop++;
1322 mutex_unlock(&ctx->dev->rdev.stats.lock);
1323 break;
1324 default:
1325 printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
1326 pci_name(ctx->lldi.pdev), control);
1327 break;
1328 }
1329 return 0;
1330}
1331
Steve Wisecfdda9d2010-04-21 15:30:06 -07001332static struct cxgb4_uld_info c4iw_uld_info = {
1333 .name = DRV_NAME,
1334 .add = c4iw_uld_add,
1335 .rx_handler = c4iw_uld_rx_handler,
1336 .state_change = c4iw_uld_state_change,
Vipul Pandya2c974782012-05-18 15:29:28 +05301337 .control = c4iw_uld_control,
Steve Wisecfdda9d2010-04-21 15:30:06 -07001338};
1339
1340static int __init c4iw_init_module(void)
1341{
1342 int err;
1343
1344 err = c4iw_cm_init();
1345 if (err)
1346 return err;
1347
1348 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1349 if (!c4iw_debugfs_root)
1350 printk(KERN_WARNING MOD
1351 "could not create debugfs entry, continuing\n");
1352
Steve Wise9eccfe12014-03-26 17:08:09 -05001353 if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS,
1354 c4iw_nl_cb_table))
1355 pr_err("%s[%u]: Failed to add netlink callback\n"
1356 , __func__, __LINE__);
1357
Steve Wisecfdda9d2010-04-21 15:30:06 -07001358 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1359
1360 return 0;
1361}
1362
1363static void __exit c4iw_exit_module(void)
1364{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001365 struct uld_ctx *ctx, *tmp;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001366
Steve Wisecfdda9d2010-04-21 15:30:06 -07001367 mutex_lock(&dev_mutex);
Steve Wise2f25e9a2011-05-09 22:06:23 -07001368 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1369 if (ctx->dev)
1370 c4iw_remove(ctx);
1371 kfree(ctx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001372 }
1373 mutex_unlock(&dev_mutex);
Steve Wisefd388ce2010-05-20 16:57:27 -05001374 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
Steve Wise9eccfe12014-03-26 17:08:09 -05001375 ibnl_remove_client(RDMA_NL_C4IW);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001376 c4iw_cm_term();
1377 debugfs_remove_recursive(c4iw_debugfs_root);
1378}
1379
1380module_init(c4iw_init_module);
1381module_exit(c4iw_exit_module);