blob: ebd60a20f148f3517c46eebae4f082ed2719888a [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/debugfs.h>
Vipul Pandyae5725682012-05-21 17:31:13 +053035#include <linux/vmalloc.h>
Hariprasad Shenaida388972014-07-17 22:31:03 +053036#include <linux/math64.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070037
38#include <rdma/ib_verbs.h>
39
40#include "iw_cxgb4.h"
41
42#define DRV_VERSION "0.1"
43
44MODULE_AUTHOR("Steve Wise");
Vipul Pandyaf079af72013-03-14 05:08:58 +000045MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
Steve Wisecfdda9d2010-04-21 15:30:06 -070046MODULE_LICENSE("Dual BSD/GPL");
47MODULE_VERSION(DRV_VERSION);
48
Vipul Pandya80ccdd62013-03-14 05:09:00 +000049static int allow_db_fc_on_t5;
50module_param(allow_db_fc_on_t5, int, 0644);
51MODULE_PARM_DESC(allow_db_fc_on_t5,
52 "Allow DB Flow Control on T5 (default = 0)");
53
54static int allow_db_coalescing_on_t5;
55module_param(allow_db_coalescing_on_t5, int, 0644);
56MODULE_PARM_DESC(allow_db_coalescing_on_t5,
57 "Allow DB Coalescing on T5 (default = 0)");
58
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +053059int c4iw_wr_log = 0;
60module_param(c4iw_wr_log, int, 0444);
61MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
62
Steve Wise65d4c012014-08-29 11:19:29 -050063static int c4iw_wr_log_size_order = 12;
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +053064module_param(c4iw_wr_log_size_order, int, 0444);
65MODULE_PARM_DESC(c4iw_wr_log_size_order,
66 "Number of entries (log2) in the work request timing log.");
67
Vipul Pandya2c974782012-05-18 15:29:28 +053068struct uld_ctx {
69 struct list_head entry;
70 struct cxgb4_lld_info lldi;
71 struct c4iw_dev *dev;
72};
73
Steve Wise2f25e9a2011-05-09 22:06:23 -070074static LIST_HEAD(uld_ctx_list);
Steve Wisecfdda9d2010-04-21 15:30:06 -070075static DEFINE_MUTEX(dev_mutex);
76
Steve Wise05eb2382014-03-14 21:52:08 +053077#define DB_FC_RESUME_SIZE 64
78#define DB_FC_RESUME_DELAY 1
79#define DB_FC_DRAIN_THRESH 0
80
Steve Wisecfdda9d2010-04-21 15:30:06 -070081static struct dentry *c4iw_debugfs_root;
82
Steve Wise9e8d1fa32010-09-10 11:15:20 -050083struct c4iw_debugfs_data {
Steve Wisecfdda9d2010-04-21 15:30:06 -070084 struct c4iw_dev *devp;
85 char *buf;
86 int bufsize;
87 int pos;
88};
89
Steve Wise9eccfe12014-03-26 17:08:09 -050090/* registered cxgb4 netlink callbacks */
91static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
92 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
93 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
94 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
95 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
Steve Wise5b6b8fe2015-04-21 16:28:41 -040096 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
Steve Wise9eccfe12014-03-26 17:08:09 -050097 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
98 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
99};
100
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500101static int count_idrs(int id, void *p, void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700102{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700103 int *countp = data;
104
Steve Wisecfdda9d2010-04-21 15:30:06 -0700105 *countp = *countp + 1;
106 return 0;
107}
108
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500109static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
110 loff_t *ppos)
111{
112 struct c4iw_debugfs_data *d = file->private_data;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500113
Steve Wise31609772010-09-29 18:21:33 +0000114 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500115}
116
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530117void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
118{
119 struct wr_log_entry le;
120 int idx;
121
122 if (!wq->rdev->wr_log)
123 return;
124
125 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
126 (wq->rdev->wr_log_size - 1);
127 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
128 getnstimeofday(&le.poll_host_ts);
129 le.valid = 1;
130 le.cqe_sge_ts = CQE_TS(cqe);
131 if (SQ_TYPE(cqe)) {
132 le.qid = wq->sq.qid;
133 le.opcode = CQE_OPCODE(cqe);
134 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
135 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
136 le.wr_id = CQE_WRID_SQ_IDX(cqe);
137 } else {
138 le.qid = wq->rq.qid;
139 le.opcode = FW_RI_RECEIVE;
140 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
141 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
142 le.wr_id = CQE_WRID_MSN(cqe);
143 }
144 wq->rdev->wr_log[idx] = le;
145}
146
147static int wr_log_show(struct seq_file *seq, void *v)
148{
149 struct c4iw_dev *dev = seq->private;
150 struct timespec prev_ts = {0, 0};
151 struct wr_log_entry *lep;
152 int prev_ts_set = 0;
153 int idx, end;
154
Hariprasad S6198dd82015-04-22 01:44:59 +0530155#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530156
157 idx = atomic_read(&dev->rdev.wr_log_idx) &
158 (dev->rdev.wr_log_size - 1);
159 end = idx - 1;
160 if (end < 0)
161 end = dev->rdev.wr_log_size - 1;
162 lep = &dev->rdev.wr_log[idx];
163 while (idx != end) {
164 if (lep->valid) {
165 if (!prev_ts_set) {
166 prev_ts_set = 1;
167 prev_ts = lep->poll_host_ts;
168 }
169 seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
170 "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
171 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
172 "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
173 "cqe_poll_delta_ns %llu\n",
174 idx,
175 timespec_sub(lep->poll_host_ts,
176 prev_ts).tv_sec,
177 timespec_sub(lep->poll_host_ts,
178 prev_ts).tv_nsec,
179 lep->qid, lep->opcode,
180 lep->opcode == FW_RI_RECEIVE ?
181 "msn" : "wrid",
182 lep->wr_id,
183 timespec_sub(lep->poll_host_ts,
184 lep->post_host_ts).tv_sec,
185 timespec_sub(lep->poll_host_ts,
186 lep->post_host_ts).tv_nsec,
187 lep->post_sge_ts, lep->cqe_sge_ts,
188 lep->poll_sge_ts,
189 ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
190 ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
191 prev_ts = lep->poll_host_ts;
192 }
193 idx++;
194 if (idx > (dev->rdev.wr_log_size - 1))
195 idx = 0;
196 lep = &dev->rdev.wr_log[idx];
197 }
198#undef ts2ns
199 return 0;
200}
201
202static int wr_log_open(struct inode *inode, struct file *file)
203{
204 return single_open(file, wr_log_show, inode->i_private);
205}
206
207static ssize_t wr_log_clear(struct file *file, const char __user *buf,
208 size_t count, loff_t *pos)
209{
210 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
211 int i;
212
213 if (dev->rdev.wr_log)
214 for (i = 0; i < dev->rdev.wr_log_size; i++)
215 dev->rdev.wr_log[i].valid = 0;
216 return count;
217}
218
219static const struct file_operations wr_log_debugfs_fops = {
220 .owner = THIS_MODULE,
221 .open = wr_log_open,
222 .release = single_release,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .write = wr_log_clear,
226};
227
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500228static int dump_qp(int id, void *p, void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700229{
230 struct c4iw_qp *qp = p;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500231 struct c4iw_debugfs_data *qpd = data;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700232 int space;
233 int cc;
234
235 if (id != qp->wq.sq.qid)
236 return 0;
237
238 space = qpd->bufsize - qpd->pos - 1;
239 if (space == 0)
240 return 1;
241
Vipul Pandya830662f2013-07-04 16:10:47 +0530242 if (qp->ep) {
243 if (qp->ep->com.local_addr.ss_family == AF_INET) {
244 struct sockaddr_in *lsin = (struct sockaddr_in *)
245 &qp->ep->com.local_addr;
246 struct sockaddr_in *rsin = (struct sockaddr_in *)
247 &qp->ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500248 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
249 &qp->ep->com.mapped_local_addr;
250 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
251 &qp->ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530252
253 cc = snprintf(qpd->buf + qpd->pos, space,
254 "rc qp sq id %u rq id %u state %u "
255 "onchip %u ep tid %u state %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500256 "%pI4:%u/%u->%pI4:%u/%u\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530257 qp->wq.sq.qid, qp->wq.rq.qid,
258 (int)qp->attr.state,
259 qp->wq.sq.flags & T4_SQ_ONCHIP,
260 qp->ep->hwtid, (int)qp->ep->com.state,
261 &lsin->sin_addr, ntohs(lsin->sin_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500262 ntohs(mapped_lsin->sin_port),
263 &rsin->sin_addr, ntohs(rsin->sin_port),
264 ntohs(mapped_rsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530265 } else {
266 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
267 &qp->ep->com.local_addr;
268 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
269 &qp->ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500270 struct sockaddr_in6 *mapped_lsin6 =
271 (struct sockaddr_in6 *)
272 &qp->ep->com.mapped_local_addr;
273 struct sockaddr_in6 *mapped_rsin6 =
274 (struct sockaddr_in6 *)
275 &qp->ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530276
277 cc = snprintf(qpd->buf + qpd->pos, space,
278 "rc qp sq id %u rq id %u state %u "
279 "onchip %u ep tid %u state %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500280 "%pI6:%u/%u->%pI6:%u/%u\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530281 qp->wq.sq.qid, qp->wq.rq.qid,
282 (int)qp->attr.state,
283 qp->wq.sq.flags & T4_SQ_ONCHIP,
284 qp->ep->hwtid, (int)qp->ep->com.state,
285 &lsin6->sin6_addr,
286 ntohs(lsin6->sin6_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500287 ntohs(mapped_lsin6->sin6_port),
Vipul Pandya830662f2013-07-04 16:10:47 +0530288 &rsin6->sin6_addr,
Steve Wise9eccfe12014-03-26 17:08:09 -0500289 ntohs(rsin6->sin6_port),
290 ntohs(mapped_rsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530291 }
292 } else
Steve Wisedb5d0402011-03-11 22:29:50 +0000293 cc = snprintf(qpd->buf + qpd->pos, space,
294 "qp sq id %u rq id %u state %u onchip %u\n",
295 qp->wq.sq.qid, qp->wq.rq.qid,
296 (int)qp->attr.state,
297 qp->wq.sq.flags & T4_SQ_ONCHIP);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700298 if (cc < space)
299 qpd->pos += cc;
300 return 0;
301}
302
303static int qp_release(struct inode *inode, struct file *file)
304{
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500305 struct c4iw_debugfs_data *qpd = file->private_data;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700306 if (!qpd) {
307 printk(KERN_INFO "%s null qpd?\n", __func__);
308 return 0;
309 }
Vipul Pandyad716a2a2012-05-18 15:29:31 +0530310 vfree(qpd->buf);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700311 kfree(qpd);
312 return 0;
313}
314
315static int qp_open(struct inode *inode, struct file *file)
316{
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500317 struct c4iw_debugfs_data *qpd;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700318 int count = 1;
319
320 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
Hariprasad S4275a5b2016-01-12 16:33:22 +0530321 if (!qpd)
322 return -ENOMEM;
323
Steve Wisecfdda9d2010-04-21 15:30:06 -0700324 qpd->devp = inode->i_private;
325 qpd->pos = 0;
326
327 spin_lock_irq(&qpd->devp->lock);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500328 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700329 spin_unlock_irq(&qpd->devp->lock);
330
331 qpd->bufsize = count * 128;
Vipul Pandyad716a2a2012-05-18 15:29:31 +0530332 qpd->buf = vmalloc(qpd->bufsize);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700333 if (!qpd->buf) {
Hariprasad S4275a5b2016-01-12 16:33:22 +0530334 kfree(qpd);
335 return -ENOMEM;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700336 }
337
338 spin_lock_irq(&qpd->devp->lock);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500339 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700340 spin_unlock_irq(&qpd->devp->lock);
341
342 qpd->buf[qpd->pos++] = 0;
343 file->private_data = qpd;
Hariprasad S4275a5b2016-01-12 16:33:22 +0530344 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700345}
346
Steve Wisecfdda9d2010-04-21 15:30:06 -0700347static const struct file_operations qp_debugfs_fops = {
348 .owner = THIS_MODULE,
349 .open = qp_open,
350 .release = qp_release,
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500351 .read = debugfs_read,
Steve Wise8bbac892010-09-29 14:11:12 +0000352 .llseek = default_llseek,
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500353};
354
355static int dump_stag(int id, void *p, void *data)
356{
357 struct c4iw_debugfs_data *stagd = data;
358 int space;
359 int cc;
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530360 struct fw_ri_tpte tpte;
361 int ret;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500362
363 space = stagd->bufsize - stagd->pos - 1;
364 if (space == 0)
365 return 1;
366
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530367 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
368 (__be32 *)&tpte);
369 if (ret) {
370 dev_err(&stagd->devp->rdev.lldi.pdev->dev,
371 "%s cxgb4_read_tpte err %d\n", __func__, ret);
372 return ret;
373 }
374 cc = snprintf(stagd->buf + stagd->pos, space,
375 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
376 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
377 (u32)id<<8,
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530378 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
379 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
380 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
381 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
382 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
383 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530384 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
385 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500386 if (cc < space)
387 stagd->pos += cc;
388 return 0;
389}
390
391static int stag_release(struct inode *inode, struct file *file)
392{
393 struct c4iw_debugfs_data *stagd = file->private_data;
394 if (!stagd) {
395 printk(KERN_INFO "%s null stagd?\n", __func__);
396 return 0;
397 }
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530398 vfree(stagd->buf);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500399 kfree(stagd);
400 return 0;
401}
402
403static int stag_open(struct inode *inode, struct file *file)
404{
405 struct c4iw_debugfs_data *stagd;
406 int ret = 0;
407 int count = 1;
408
409 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
410 if (!stagd) {
411 ret = -ENOMEM;
412 goto out;
413 }
414 stagd->devp = inode->i_private;
415 stagd->pos = 0;
416
417 spin_lock_irq(&stagd->devp->lock);
418 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
419 spin_unlock_irq(&stagd->devp->lock);
420
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530421 stagd->bufsize = count * 256;
422 stagd->buf = vmalloc(stagd->bufsize);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500423 if (!stagd->buf) {
424 ret = -ENOMEM;
425 goto err1;
426 }
427
428 spin_lock_irq(&stagd->devp->lock);
429 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
430 spin_unlock_irq(&stagd->devp->lock);
431
432 stagd->buf[stagd->pos++] = 0;
433 file->private_data = stagd;
434 goto out;
435err1:
436 kfree(stagd);
437out:
438 return ret;
439}
440
441static const struct file_operations stag_debugfs_fops = {
442 .owner = THIS_MODULE,
443 .open = stag_open,
444 .release = stag_release,
445 .read = debugfs_read,
Steve Wise8bbac892010-09-29 14:11:12 +0000446 .llseek = default_llseek,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700447};
448
Steve Wise05eb2382014-03-14 21:52:08 +0530449static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
Vipul Pandya422eea02012-05-18 15:29:30 +0530450
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530451static int stats_show(struct seq_file *seq, void *v)
452{
453 struct c4iw_dev *dev = seq->private;
454
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530455 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
456 "Max", "Fail");
457 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530458 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530459 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
460 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530461 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530462 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
463 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530464 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530465 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
466 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530467 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530468 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
469 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530470 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530471 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
472 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530473 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530474 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
Vipul Pandya2c974782012-05-18 15:29:28 +0530475 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
476 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
477 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
Steve Wise05eb2382014-03-14 21:52:08 +0530478 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
Vipul Pandya422eea02012-05-18 15:29:30 +0530479 db_state_str[dev->db_state],
Steve Wise05eb2382014-03-14 21:52:08 +0530480 dev->rdev.stats.db_state_transitions,
481 dev->rdev.stats.db_fc_interruptions);
Vipul Pandya1cab7752012-12-10 09:30:55 +0000482 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
Vipul Pandya793dad92012-12-10 09:30:56 +0000483 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
484 dev->rdev.stats.act_ofld_conn_fails);
485 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
486 dev->rdev.stats.pas_ofld_conn_fails);
Hariprasad S179d03b2015-05-05 03:55:24 +0530487 seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +0530488 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530489 return 0;
490}
491
492static int stats_open(struct inode *inode, struct file *file)
493{
494 return single_open(file, stats_show, inode->i_private);
495}
496
497static ssize_t stats_clear(struct file *file, const char __user *buf,
498 size_t count, loff_t *pos)
499{
500 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
501
502 mutex_lock(&dev->rdev.stats.lock);
503 dev->rdev.stats.pd.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530504 dev->rdev.stats.pd.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530505 dev->rdev.stats.qid.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530506 dev->rdev.stats.qid.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530507 dev->rdev.stats.stag.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530508 dev->rdev.stats.stag.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530509 dev->rdev.stats.pbl.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530510 dev->rdev.stats.pbl.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530511 dev->rdev.stats.rqt.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530512 dev->rdev.stats.rqt.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530513 dev->rdev.stats.ocqp.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530514 dev->rdev.stats.ocqp.fail = 0;
Vipul Pandya2c974782012-05-18 15:29:28 +0530515 dev->rdev.stats.db_full = 0;
516 dev->rdev.stats.db_empty = 0;
517 dev->rdev.stats.db_drop = 0;
Vipul Pandya422eea02012-05-18 15:29:30 +0530518 dev->rdev.stats.db_state_transitions = 0;
Vipul Pandya793dad92012-12-10 09:30:56 +0000519 dev->rdev.stats.tcam_full = 0;
520 dev->rdev.stats.act_ofld_conn_fails = 0;
521 dev->rdev.stats.pas_ofld_conn_fails = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530522 mutex_unlock(&dev->rdev.stats.lock);
523 return count;
524}
525
526static const struct file_operations stats_debugfs_fops = {
527 .owner = THIS_MODULE,
528 .open = stats_open,
529 .release = single_release,
530 .read = seq_read,
531 .llseek = seq_lseek,
532 .write = stats_clear,
533};
534
Vipul Pandya793dad92012-12-10 09:30:56 +0000535static int dump_ep(int id, void *p, void *data)
536{
537 struct c4iw_ep *ep = p;
538 struct c4iw_debugfs_data *epd = data;
539 int space;
540 int cc;
541
542 space = epd->bufsize - epd->pos - 1;
543 if (space == 0)
544 return 1;
545
Vipul Pandya830662f2013-07-04 16:10:47 +0530546 if (ep->com.local_addr.ss_family == AF_INET) {
547 struct sockaddr_in *lsin = (struct sockaddr_in *)
548 &ep->com.local_addr;
549 struct sockaddr_in *rsin = (struct sockaddr_in *)
550 &ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500551 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
552 &ep->com.mapped_local_addr;
553 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
554 &ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530555
556 cc = snprintf(epd->buf + epd->pos, space,
557 "ep %p cm_id %p qp %p state %d flags 0x%lx "
558 "history 0x%lx hwtid %d atid %d "
Hariprasad S179d03b2015-05-05 03:55:24 +0530559 "conn_na %u abort_na %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500560 "%pI4:%d/%d <-> %pI4:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530561 ep, ep->com.cm_id, ep->com.qp,
562 (int)ep->com.state, ep->com.flags,
563 ep->com.history, ep->hwtid, ep->atid,
Hariprasad S179d03b2015-05-05 03:55:24 +0530564 ep->stats.connect_neg_adv,
565 ep->stats.abort_neg_adv,
Vipul Pandya830662f2013-07-04 16:10:47 +0530566 &lsin->sin_addr, ntohs(lsin->sin_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500567 ntohs(mapped_lsin->sin_port),
568 &rsin->sin_addr, ntohs(rsin->sin_port),
569 ntohs(mapped_rsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530570 } else {
571 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
572 &ep->com.local_addr;
573 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
574 &ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500575 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
576 &ep->com.mapped_local_addr;
577 struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
578 &ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530579
580 cc = snprintf(epd->buf + epd->pos, space,
581 "ep %p cm_id %p qp %p state %d flags 0x%lx "
582 "history 0x%lx hwtid %d atid %d "
Hariprasad S179d03b2015-05-05 03:55:24 +0530583 "conn_na %u abort_na %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500584 "%pI6:%d/%d <-> %pI6:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530585 ep, ep->com.cm_id, ep->com.qp,
586 (int)ep->com.state, ep->com.flags,
587 ep->com.history, ep->hwtid, ep->atid,
Hariprasad S179d03b2015-05-05 03:55:24 +0530588 ep->stats.connect_neg_adv,
589 ep->stats.abort_neg_adv,
Vipul Pandya830662f2013-07-04 16:10:47 +0530590 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500591 ntohs(mapped_lsin6->sin6_port),
592 &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
593 ntohs(mapped_rsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530594 }
Vipul Pandya793dad92012-12-10 09:30:56 +0000595 if (cc < space)
596 epd->pos += cc;
597 return 0;
598}
599
600static int dump_listen_ep(int id, void *p, void *data)
601{
602 struct c4iw_listen_ep *ep = p;
603 struct c4iw_debugfs_data *epd = data;
604 int space;
605 int cc;
606
607 space = epd->bufsize - epd->pos - 1;
608 if (space == 0)
609 return 1;
610
Vipul Pandya830662f2013-07-04 16:10:47 +0530611 if (ep->com.local_addr.ss_family == AF_INET) {
612 struct sockaddr_in *lsin = (struct sockaddr_in *)
613 &ep->com.local_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500614 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
615 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530616
617 cc = snprintf(epd->buf + epd->pos, space,
618 "ep %p cm_id %p state %d flags 0x%lx stid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500619 "backlog %d %pI4:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530620 ep, ep->com.cm_id, (int)ep->com.state,
621 ep->com.flags, ep->stid, ep->backlog,
Steve Wise9eccfe12014-03-26 17:08:09 -0500622 &lsin->sin_addr, ntohs(lsin->sin_port),
623 ntohs(mapped_lsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530624 } else {
625 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
626 &ep->com.local_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500627 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
628 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530629
630 cc = snprintf(epd->buf + epd->pos, space,
631 "ep %p cm_id %p state %d flags 0x%lx stid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500632 "backlog %d %pI6:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530633 ep, ep->com.cm_id, (int)ep->com.state,
634 ep->com.flags, ep->stid, ep->backlog,
Steve Wise9eccfe12014-03-26 17:08:09 -0500635 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
636 ntohs(mapped_lsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530637 }
Vipul Pandya793dad92012-12-10 09:30:56 +0000638 if (cc < space)
639 epd->pos += cc;
640 return 0;
641}
642
643static int ep_release(struct inode *inode, struct file *file)
644{
645 struct c4iw_debugfs_data *epd = file->private_data;
646 if (!epd) {
647 pr_info("%s null qpd?\n", __func__);
648 return 0;
649 }
650 vfree(epd->buf);
651 kfree(epd);
652 return 0;
653}
654
655static int ep_open(struct inode *inode, struct file *file)
656{
657 struct c4iw_debugfs_data *epd;
658 int ret = 0;
659 int count = 1;
660
661 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
662 if (!epd) {
663 ret = -ENOMEM;
664 goto out;
665 }
666 epd->devp = inode->i_private;
667 epd->pos = 0;
668
669 spin_lock_irq(&epd->devp->lock);
670 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
671 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
672 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
673 spin_unlock_irq(&epd->devp->lock);
674
Pramod Kumar63a71ba2014-11-21 09:36:35 -0600675 epd->bufsize = count * 240;
Vipul Pandya793dad92012-12-10 09:30:56 +0000676 epd->buf = vmalloc(epd->bufsize);
677 if (!epd->buf) {
678 ret = -ENOMEM;
679 goto err1;
680 }
681
682 spin_lock_irq(&epd->devp->lock);
683 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
684 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
685 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
686 spin_unlock_irq(&epd->devp->lock);
687
688 file->private_data = epd;
689 goto out;
690err1:
691 kfree(epd);
692out:
693 return ret;
694}
695
696static const struct file_operations ep_debugfs_fops = {
697 .owner = THIS_MODULE,
698 .open = ep_open,
699 .release = ep_release,
700 .read = debugfs_read,
701};
702
Steve Wisecfdda9d2010-04-21 15:30:06 -0700703static int setup_debugfs(struct c4iw_dev *devp)
704{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700705 if (!devp->debugfs_root)
706 return -1;
707
David Howellse59b4e92015-01-21 20:03:40 +0000708 debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root,
709 (void *)devp, &qp_debugfs_fops, 4096);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500710
David Howellse59b4e92015-01-21 20:03:40 +0000711 debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root,
712 (void *)devp, &stag_debugfs_fops, 4096);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530713
David Howellse59b4e92015-01-21 20:03:40 +0000714 debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root,
715 (void *)devp, &stats_debugfs_fops, 4096);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530716
David Howellse59b4e92015-01-21 20:03:40 +0000717 debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root,
718 (void *)devp, &ep_debugfs_fops, 4096);
Vipul Pandya793dad92012-12-10 09:30:56 +0000719
David Howellse59b4e92015-01-21 20:03:40 +0000720 if (c4iw_wr_log)
721 debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root,
722 (void *)devp, &wr_log_debugfs_fops, 4096);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700723 return 0;
724}
725
726void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
727 struct c4iw_dev_ucontext *uctx)
728{
729 struct list_head *pos, *nxt;
730 struct c4iw_qid_list *entry;
731
732 mutex_lock(&uctx->lock);
733 list_for_each_safe(pos, nxt, &uctx->qpids) {
734 entry = list_entry(pos, struct c4iw_qid_list, entry);
735 list_del_init(&entry->entry);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530736 if (!(entry->qid & rdev->qpmask)) {
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530737 c4iw_put_resource(&rdev->resource.qid_table,
738 entry->qid);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530739 mutex_lock(&rdev->stats.lock);
740 rdev->stats.qid.cur -= rdev->qpmask + 1;
741 mutex_unlock(&rdev->stats.lock);
742 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700743 kfree(entry);
744 }
745
746 list_for_each_safe(pos, nxt, &uctx->qpids) {
747 entry = list_entry(pos, struct c4iw_qid_list, entry);
748 list_del_init(&entry->entry);
749 kfree(entry);
750 }
751 mutex_unlock(&uctx->lock);
752}
753
754void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
755 struct c4iw_dev_ucontext *uctx)
756{
757 INIT_LIST_HEAD(&uctx->qpids);
758 INIT_LIST_HEAD(&uctx->cqids);
759 mutex_init(&uctx->lock);
760}
761
762/* Caller takes care of locking if needed */
763static int c4iw_rdev_open(struct c4iw_rdev *rdev)
764{
765 int err;
766
767 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
768
769 /*
Hariprasad S4a75a862015-04-22 01:45:01 +0530770 * This implementation assumes udb_density == ucq_density! Eventually
771 * we might need to support this but for now fail the open. Also the
772 * cqid and qpid range must match for now.
773 */
774 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
775 pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
776 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
777 rdev->lldi.ucq_density);
Hariprasad S4275a5b2016-01-12 16:33:22 +0530778 return -EINVAL;
Hariprasad S4a75a862015-04-22 01:45:01 +0530779 }
780 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
781 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
782 pr_err(MOD "%s: unsupported qp and cq id ranges "
783 "qp start %u size %u cq start %u size %u\n",
784 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
785 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
786 rdev->lldi.vr->cq.size);
Hariprasad S4275a5b2016-01-12 16:33:22 +0530787 return -EINVAL;
Hariprasad S4a75a862015-04-22 01:45:01 +0530788 }
789
Steve Wisecfdda9d2010-04-21 15:30:06 -0700790 rdev->qpmask = rdev->lldi.udb_density - 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700791 rdev->cqmask = rdev->lldi.ucq_density - 1;
792 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
Steve Wise93fb72e2010-06-23 15:46:55 +0000793 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
794 "qp qid start %u size %u cq qid start %u size %u\n",
Steve Wisecfdda9d2010-04-21 15:30:06 -0700795 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
796 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
797 rdev->lldi.vr->pbl.start,
798 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
Steve Wise93fb72e2010-06-23 15:46:55 +0000799 rdev->lldi.vr->rq.size,
800 rdev->lldi.vr->qp.start,
801 rdev->lldi.vr->qp.size,
802 rdev->lldi.vr->cq.start,
803 rdev->lldi.vr->cq.size);
Arnd Bergmann30213762016-02-01 17:42:02 +0100804 PDBG("udb %pR db_reg %p gts_reg %p "
Hariprasad S74217d42015-06-09 18:23:12 +0530805 "qpmask 0x%x cqmask 0x%x\n",
Arnd Bergmann30213762016-02-01 17:42:02 +0100806 &rdev->lldi.pdev->resource[2],
Hariprasad S74217d42015-06-09 18:23:12 +0530807 rdev->lldi.db_reg, rdev->lldi.gts_reg,
808 rdev->qpmask, rdev->cqmask);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700809
Hariprasad S4275a5b2016-01-12 16:33:22 +0530810 if (c4iw_num_stags(rdev) == 0)
811 return -EINVAL;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700812
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530813 rdev->stats.pd.total = T4_MAX_NUM_PD;
814 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
815 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
816 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
817 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
818 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
819
Steve Wisecfdda9d2010-04-21 15:30:06 -0700820 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
821 if (err) {
822 printk(KERN_ERR MOD "error %d initializing resources\n", err);
Hariprasad S4275a5b2016-01-12 16:33:22 +0530823 return err;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700824 }
825 err = c4iw_pblpool_create(rdev);
826 if (err) {
827 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
Hariprasad S4275a5b2016-01-12 16:33:22 +0530828 goto destroy_resource;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700829 }
830 err = c4iw_rqtpool_create(rdev);
831 if (err) {
832 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
Hariprasad S4275a5b2016-01-12 16:33:22 +0530833 goto destroy_pblpool;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700834 }
Steve Wisec6d7b262010-09-13 11:23:57 -0500835 err = c4iw_ocqp_pool_create(rdev);
836 if (err) {
837 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
Hariprasad S4275a5b2016-01-12 16:33:22 +0530838 goto destroy_rqtpool;
Steve Wisec6d7b262010-09-13 11:23:57 -0500839 }
Steve Wise05eb2382014-03-14 21:52:08 +0530840 rdev->status_page = (struct t4_dev_status_page *)
841 __get_free_page(GFP_KERNEL);
Hariprasad S82b1df12016-01-12 16:33:21 +0530842 if (!rdev->status_page)
843 goto destroy_ocqp_pool;
Hariprasad Sc5dfb002015-12-11 13:02:01 +0530844 rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
845 rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
846 rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
847 rdev->status_page->cq_size = rdev->lldi.vr->cq.size;
David S. Miller8fd90bb2014-07-22 00:44:59 -0700848
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530849 if (c4iw_wr_log) {
850 rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
851 sizeof(*rdev->wr_log), GFP_KERNEL);
852 if (rdev->wr_log) {
853 rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
854 atomic_set(&rdev->wr_log_idx, 0);
855 } else {
856 pr_err(MOD "error allocating wr_log. Logging disabled\n");
857 }
858 }
David S. Miller8fd90bb2014-07-22 00:44:59 -0700859
Steve Wise6b54d542014-07-08 10:20:35 -0500860 rdev->status_page->db_off = 0;
David S. Miller8fd90bb2014-07-22 00:44:59 -0700861
Steve Wisecfdda9d2010-04-21 15:30:06 -0700862 return 0;
Hariprasad S82b1df12016-01-12 16:33:21 +0530863destroy_ocqp_pool:
864 c4iw_ocqp_pool_destroy(rdev);
Hariprasad S4275a5b2016-01-12 16:33:22 +0530865destroy_rqtpool:
Steve Wisec6d7b262010-09-13 11:23:57 -0500866 c4iw_rqtpool_destroy(rdev);
Hariprasad S4275a5b2016-01-12 16:33:22 +0530867destroy_pblpool:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700868 c4iw_pblpool_destroy(rdev);
Hariprasad S4275a5b2016-01-12 16:33:22 +0530869destroy_resource:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700870 c4iw_destroy_resource(&rdev->resource);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700871 return err;
872}
873
874static void c4iw_rdev_close(struct c4iw_rdev *rdev)
875{
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530876 kfree(rdev->wr_log);
Steve Wise05eb2382014-03-14 21:52:08 +0530877 free_page((unsigned long)rdev->status_page);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700878 c4iw_pblpool_destroy(rdev);
879 c4iw_rqtpool_destroy(rdev);
880 c4iw_destroy_resource(&rdev->resource);
881}
882
Steve Wise9efe10a2011-10-06 09:32:44 -0700883static void c4iw_dealloc(struct uld_ctx *ctx)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700884{
Steve Wise2f25e9a2011-05-09 22:06:23 -0700885 c4iw_rdev_close(&ctx->dev->rdev);
886 idr_destroy(&ctx->dev->cqidr);
887 idr_destroy(&ctx->dev->qpidr);
888 idr_destroy(&ctx->dev->mmidr);
Vipul Pandya793dad92012-12-10 09:30:56 +0000889 idr_destroy(&ctx->dev->hwtid_idr);
890 idr_destroy(&ctx->dev->stid_idr);
891 idr_destroy(&ctx->dev->atid_idr);
Steve Wisefa658a92014-04-09 09:38:25 -0500892 if (ctx->dev->rdev.bar2_kva)
893 iounmap(ctx->dev->rdev.bar2_kva);
894 if (ctx->dev->rdev.oc_mw_kva)
895 iounmap(ctx->dev->rdev.oc_mw_kva);
Steve Wise2f25e9a2011-05-09 22:06:23 -0700896 ib_dealloc_device(&ctx->dev->ibdev);
897 ctx->dev = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700898}
899
Steve Wise9efe10a2011-10-06 09:32:44 -0700900static void c4iw_remove(struct uld_ctx *ctx)
901{
902 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
903 c4iw_unregister_device(ctx->dev);
904 c4iw_dealloc(ctx);
905}
906
907static int rdma_supported(const struct cxgb4_lld_info *infop)
908{
909 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
910 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
Vipul Pandyaf079af72013-03-14 05:08:58 +0000911 infop->vr->cq.size > 0;
Steve Wise9efe10a2011-10-06 09:32:44 -0700912}
913
Steve Wisecfdda9d2010-04-21 15:30:06 -0700914static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
915{
916 struct c4iw_dev *devp;
917 int ret;
918
Steve Wise9efe10a2011-10-06 09:32:44 -0700919 if (!rdma_supported(infop)) {
920 printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
921 pci_name(infop->pdev));
922 return ERR_PTR(-ENOSYS);
923 }
Vipul Pandyaf079af72013-03-14 05:08:58 +0000924 if (!ocqp_supported(infop))
925 pr_info("%s: On-Chip Queues not supported on this device.\n",
926 pci_name(infop->pdev));
Vipul Pandya80ccdd62013-03-14 05:09:00 +0000927
Steve Wisecfdda9d2010-04-21 15:30:06 -0700928 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
929 if (!devp) {
930 printk(KERN_ERR MOD "Cannot allocate ib device\n");
Steve Wisebbe9a0a2011-05-09 22:06:22 -0700931 return ERR_PTR(-ENOMEM);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700932 }
933 devp->rdev.lldi = *infop;
934
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530935 /* init various hw-queue params based on lld info */
936 PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
937 __func__, devp->rdev.lldi.sge_ingpadboundary,
938 devp->rdev.lldi.sge_egrstatuspagesize);
939
940 devp->rdev.hw_queue.t4_eq_status_entries =
941 devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530942 devp->rdev.hw_queue.t4_max_eq_size = 65520;
943 devp->rdev.hw_queue.t4_max_iq_size = 65520;
944 devp->rdev.hw_queue.t4_max_rq_size = 8192 -
945 devp->rdev.hw_queue.t4_eq_status_entries - 1;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530946 devp->rdev.hw_queue.t4_max_sq_size =
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530947 devp->rdev.hw_queue.t4_max_eq_size -
948 devp->rdev.hw_queue.t4_eq_status_entries - 1;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530949 devp->rdev.hw_queue.t4_max_qp_depth =
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530950 devp->rdev.hw_queue.t4_max_rq_size;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530951 devp->rdev.hw_queue.t4_max_cq_depth =
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530952 devp->rdev.hw_queue.t4_max_iq_size - 2;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530953 devp->rdev.hw_queue.t4_stat_len =
954 devp->rdev.lldi.sge_egrstatuspagesize;
955
Steve Wisefa658a92014-04-09 09:38:25 -0500956 /*
Hariprasad S963cab52015-09-23 17:19:27 +0530957 * For T5/T6 devices, we map all of BAR2 with WC.
Steve Wisefa658a92014-04-09 09:38:25 -0500958 * For T4 devices with onchip qp mem, we map only that part
959 * of BAR2 with WC.
960 */
961 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
Hariprasad S963cab52015-09-23 17:19:27 +0530962 if (!is_t4(devp->rdev.lldi.adapter_type)) {
Steve Wisefa658a92014-04-09 09:38:25 -0500963 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
964 pci_resource_len(devp->rdev.lldi.pdev, 2));
965 if (!devp->rdev.bar2_kva) {
966 pr_err(MOD "Unable to ioremap BAR2\n");
Christoph Jaeger65b302a2014-04-21 17:02:42 +0200967 ib_dealloc_device(&devp->ibdev);
Steve Wisefa658a92014-04-09 09:38:25 -0500968 return ERR_PTR(-EINVAL);
969 }
970 } else if (ocqp_supported(infop)) {
971 devp->rdev.oc_mw_pa =
972 pci_resource_start(devp->rdev.lldi.pdev, 2) +
973 pci_resource_len(devp->rdev.lldi.pdev, 2) -
974 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
975 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
976 devp->rdev.lldi.vr->ocq.size);
977 if (!devp->rdev.oc_mw_kva) {
978 pr_err(MOD "Unable to ioremap onchip mem\n");
Christoph Jaeger65b302a2014-04-21 17:02:42 +0200979 ib_dealloc_device(&devp->ibdev);
Steve Wisefa658a92014-04-09 09:38:25 -0500980 return ERR_PTR(-EINVAL);
981 }
982 }
Steve Wisec6d7b262010-09-13 11:23:57 -0500983
Steve Wise2f25e9a2011-05-09 22:06:23 -0700984 PDBG(KERN_INFO MOD "ocq memory: "
Steve Wisec6d7b262010-09-13 11:23:57 -0500985 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
986 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
987 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
988
Steve Wisecfdda9d2010-04-21 15:30:06 -0700989 ret = c4iw_rdev_open(&devp->rdev);
990 if (ret) {
Steve Wisecfdda9d2010-04-21 15:30:06 -0700991 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
992 ib_dealloc_device(&devp->ibdev);
Steve Wisebbe9a0a2011-05-09 22:06:22 -0700993 return ERR_PTR(ret);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700994 }
995
996 idr_init(&devp->cqidr);
997 idr_init(&devp->qpidr);
998 idr_init(&devp->mmidr);
Vipul Pandya793dad92012-12-10 09:30:56 +0000999 idr_init(&devp->hwtid_idr);
1000 idr_init(&devp->stid_idr);
1001 idr_init(&devp->atid_idr);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001002 spin_lock_init(&devp->lock);
Vipul Pandya8d81ef32012-05-18 15:29:27 +05301003 mutex_init(&devp->rdev.stats.lock);
Vipul Pandya2c974782012-05-18 15:29:28 +05301004 mutex_init(&devp->db_mutex);
Steve Wise05eb2382014-03-14 21:52:08 +05301005 INIT_LIST_HEAD(&devp->db_fc_list);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301006 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001007
Steve Wisecfdda9d2010-04-21 15:30:06 -07001008 if (c4iw_debugfs_root) {
1009 devp->debugfs_root = debugfs_create_dir(
1010 pci_name(devp->rdev.lldi.pdev),
1011 c4iw_debugfs_root);
1012 setup_debugfs(devp);
1013 }
Steve Wise9eccfe12014-03-26 17:08:09 -05001014
Steve Wise9eccfe12014-03-26 17:08:09 -05001015
Steve Wisecfdda9d2010-04-21 15:30:06 -07001016 return devp;
1017}
1018
1019static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
1020{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001021 struct uld_ctx *ctx;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001022 static int vers_printed;
1023 int i;
1024
1025 if (!vers_printed++)
Vipul Pandyaf079af72013-03-14 05:08:58 +00001026 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
1027 DRV_VERSION);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001028
Steve Wise2f25e9a2011-05-09 22:06:23 -07001029 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
1030 if (!ctx) {
1031 ctx = ERR_PTR(-ENOMEM);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001032 goto out;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001033 }
1034 ctx->lldi = *infop;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001035
1036 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001037 __func__, pci_name(ctx->lldi.pdev),
1038 ctx->lldi.nchan, ctx->lldi.nrxq,
1039 ctx->lldi.ntxq, ctx->lldi.nports);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001040
Steve Wise2f25e9a2011-05-09 22:06:23 -07001041 mutex_lock(&dev_mutex);
1042 list_add_tail(&ctx->entry, &uld_ctx_list);
1043 mutex_unlock(&dev_mutex);
1044
1045 for (i = 0; i < ctx->lldi.nrxq; i++)
1046 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001047out:
Steve Wise2f25e9a2011-05-09 22:06:23 -07001048 return ctx;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001049}
1050
Vipul Pandya1cab7752012-12-10 09:30:55 +00001051static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
1052 const __be64 *rsp,
1053 u32 pktshift)
1054{
1055 struct sk_buff *skb;
1056
1057 /*
1058 * Allocate space for cpl_pass_accept_req which will be synthesized by
1059 * driver. Once the driver synthesizes the request the skb will go
1060 * through the regular cpl_pass_accept_req processing.
1061 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
1062 * cpl_rx_pkt.
1063 */
1064 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1065 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
1066 if (unlikely(!skb))
1067 return NULL;
1068
1069 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1070 sizeof(struct rss_header) - pktshift);
1071
1072 /*
1073 * This skb will contain:
1074 * rss_header from the rspq descriptor (1 flit)
1075 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
1076 * space for the difference between the size of an
1077 * rx_pkt and pass_accept_req cpl (1 flit)
1078 * the packet data from the gl
1079 */
1080 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
1081 sizeof(struct rss_header));
1082 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
1083 sizeof(struct cpl_pass_accept_req),
1084 gl->va + pktshift,
1085 gl->tot_len - pktshift);
1086 return skb;
1087}
1088
1089static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
1090 const __be64 *rsp)
1091{
1092 unsigned int opcode = *(u8 *)rsp;
1093 struct sk_buff *skb;
1094
1095 if (opcode != CPL_RX_PKT)
1096 goto out;
1097
1098 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
1099 if (skb == NULL)
1100 goto out;
1101
1102 if (c4iw_handlers[opcode] == NULL) {
1103 pr_info("%s no handler opcode 0x%x...\n", __func__,
1104 opcode);
1105 kfree_skb(skb);
1106 goto out;
1107 }
1108 c4iw_handlers[opcode](dev, skb);
1109 return 1;
1110out:
1111 return 0;
1112}
1113
Steve Wisecfdda9d2010-04-21 15:30:06 -07001114static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
1115 const struct pkt_gl *gl)
1116{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001117 struct uld_ctx *ctx = handle;
1118 struct c4iw_dev *dev = ctx->dev;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001119 struct sk_buff *skb;
Vipul Pandya1cab7752012-12-10 09:30:55 +00001120 u8 opcode;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001121
1122 if (gl == NULL) {
1123 /* omit RSS and rsp_ctrl at end of descriptor */
1124 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1125
1126 skb = alloc_skb(256, GFP_ATOMIC);
1127 if (!skb)
1128 goto nomem;
1129 __skb_put(skb, len);
1130 skb_copy_to_linear_data(skb, &rsp[1], len);
1131 } else if (gl == CXGB4_MSG_AN) {
1132 const struct rsp_ctrl *rc = (void *)rsp;
1133
1134 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
1135 c4iw_ev_handler(dev, qid);
1136 return 0;
Vipul Pandya1cab7752012-12-10 09:30:55 +00001137 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
1138 if (recv_rx_pkt(dev, gl, rsp))
1139 return 0;
1140
1141 pr_info("%s: unexpected FL contents at %p, " \
1142 "RSS %#llx, FL %#llx, len %u\n",
1143 pci_name(ctx->lldi.pdev), gl->va,
1144 (unsigned long long)be64_to_cpu(*rsp),
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001145 (unsigned long long)be64_to_cpu(
1146 *(__force __be64 *)gl->va),
Vipul Pandya1cab7752012-12-10 09:30:55 +00001147 gl->tot_len);
1148
1149 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001150 } else {
Steve Wiseda411ba2010-10-18 15:16:45 +00001151 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001152 if (unlikely(!skb))
1153 goto nomem;
1154 }
1155
Vipul Pandya1cab7752012-12-10 09:30:55 +00001156 opcode = *(u8 *)rsp;
Steve Wisedbb084c2014-03-21 20:40:30 +05301157 if (c4iw_handlers[opcode]) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001158 c4iw_handlers[opcode](dev, skb);
Steve Wisedbb084c2014-03-21 20:40:30 +05301159 } else {
Vipul Pandya1cab7752012-12-10 09:30:55 +00001160 pr_info("%s no handler opcode 0x%x...\n", __func__,
Steve Wisecfdda9d2010-04-21 15:30:06 -07001161 opcode);
Steve Wisedbb084c2014-03-21 20:40:30 +05301162 kfree_skb(skb);
1163 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001164
1165 return 0;
1166nomem:
1167 return -1;
1168}
1169
1170static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
1171{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001172 struct uld_ctx *ctx = handle;
Steve Wise1c01c532010-05-20 16:57:32 -05001173
Steve Wisecfdda9d2010-04-21 15:30:06 -07001174 PDBG("%s new_state %u\n", __func__, new_state);
Steve Wise1c01c532010-05-20 16:57:32 -05001175 switch (new_state) {
1176 case CXGB4_STATE_UP:
Steve Wise2f25e9a2011-05-09 22:06:23 -07001177 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
1178 if (!ctx->dev) {
Steve Wise9efe10a2011-10-06 09:32:44 -07001179 int ret;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001180
1181 ctx->dev = c4iw_alloc(&ctx->lldi);
Steve Wise9efe10a2011-10-06 09:32:44 -07001182 if (IS_ERR(ctx->dev)) {
1183 printk(KERN_ERR MOD
1184 "%s: initialization failed: %ld\n",
1185 pci_name(ctx->lldi.pdev),
1186 PTR_ERR(ctx->dev));
1187 ctx->dev = NULL;
1188 break;
1189 }
1190 ret = c4iw_register_device(ctx->dev);
1191 if (ret) {
Steve Wise1c01c532010-05-20 16:57:32 -05001192 printk(KERN_ERR MOD
1193 "%s: RDMA registration failed: %d\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001194 pci_name(ctx->lldi.pdev), ret);
Steve Wise9efe10a2011-10-06 09:32:44 -07001195 c4iw_dealloc(ctx);
1196 }
Steve Wise1c01c532010-05-20 16:57:32 -05001197 }
1198 break;
1199 case CXGB4_STATE_DOWN:
1200 printk(KERN_INFO MOD "%s: Down\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001201 pci_name(ctx->lldi.pdev));
1202 if (ctx->dev)
1203 c4iw_remove(ctx);
Steve Wise1c01c532010-05-20 16:57:32 -05001204 break;
1205 case CXGB4_STATE_START_RECOVERY:
1206 printk(KERN_INFO MOD "%s: Fatal Error\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001207 pci_name(ctx->lldi.pdev));
1208 if (ctx->dev) {
Steve Wise767fbe82011-03-11 22:30:53 +00001209 struct ib_event event;
1210
Steve Wise2f25e9a2011-05-09 22:06:23 -07001211 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
Steve Wise767fbe82011-03-11 22:30:53 +00001212 memset(&event, 0, sizeof event);
1213 event.event = IB_EVENT_DEVICE_FATAL;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001214 event.device = &ctx->dev->ibdev;
Steve Wise767fbe82011-03-11 22:30:53 +00001215 ib_dispatch_event(&event);
Steve Wise2f25e9a2011-05-09 22:06:23 -07001216 c4iw_remove(ctx);
Steve Wise767fbe82011-03-11 22:30:53 +00001217 }
Steve Wise1c01c532010-05-20 16:57:32 -05001218 break;
1219 case CXGB4_STATE_DETACH:
1220 printk(KERN_INFO MOD "%s: Detach\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001221 pci_name(ctx->lldi.pdev));
1222 if (ctx->dev)
1223 c4iw_remove(ctx);
Steve Wise1c01c532010-05-20 16:57:32 -05001224 break;
1225 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001226 return 0;
1227}
1228
Vipul Pandya2c974782012-05-18 15:29:28 +05301229static int disable_qp_db(int id, void *p, void *data)
1230{
1231 struct c4iw_qp *qp = p;
1232
1233 t4_disable_wq_db(&qp->wq);
1234 return 0;
1235}
1236
1237static void stop_queues(struct uld_ctx *ctx)
1238{
Steve Wise05eb2382014-03-14 21:52:08 +05301239 unsigned long flags;
1240
1241 spin_lock_irqsave(&ctx->dev->lock, flags);
1242 ctx->dev->rdev.stats.db_state_transitions++;
1243 ctx->dev->db_state = STOPPED;
1244 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
Vipul Pandya422eea02012-05-18 15:29:30 +05301245 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301246 else
1247 ctx->dev->rdev.status_page->db_off = 1;
1248 spin_unlock_irqrestore(&ctx->dev->lock, flags);
Vipul Pandya2c974782012-05-18 15:29:28 +05301249}
1250
1251static int enable_qp_db(int id, void *p, void *data)
1252{
1253 struct c4iw_qp *qp = p;
1254
1255 t4_enable_wq_db(&qp->wq);
1256 return 0;
1257}
1258
Steve Wise05eb2382014-03-14 21:52:08 +05301259static void resume_rc_qp(struct c4iw_qp *qp)
1260{
1261 spin_lock(&qp->lock);
Hariprasad S963cab52015-09-23 17:19:27 +05301262 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301263 qp->wq.sq.wq_pidx_inc = 0;
Hariprasad S963cab52015-09-23 17:19:27 +05301264 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301265 qp->wq.rq.wq_pidx_inc = 0;
1266 spin_unlock(&qp->lock);
1267}
1268
1269static void resume_a_chunk(struct uld_ctx *ctx)
1270{
1271 int i;
1272 struct c4iw_qp *qp;
1273
1274 for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
1275 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1276 db_fc_entry);
1277 list_del_init(&qp->db_fc_entry);
1278 resume_rc_qp(qp);
1279 if (list_empty(&ctx->dev->db_fc_list))
1280 break;
1281 }
1282}
1283
Vipul Pandya2c974782012-05-18 15:29:28 +05301284static void resume_queues(struct uld_ctx *ctx)
1285{
1286 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301287 if (ctx->dev->db_state != STOPPED)
1288 goto out;
1289 ctx->dev->db_state = FLOW_CONTROL;
1290 while (1) {
1291 if (list_empty(&ctx->dev->db_fc_list)) {
1292 WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1293 ctx->dev->db_state = NORMAL;
1294 ctx->dev->rdev.stats.db_state_transitions++;
1295 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1296 idr_for_each(&ctx->dev->qpidr, enable_qp_db,
1297 NULL);
1298 } else {
1299 ctx->dev->rdev.status_page->db_off = 0;
1300 }
1301 break;
1302 } else {
1303 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1304 < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1305 DB_FC_DRAIN_THRESH)) {
1306 resume_a_chunk(ctx);
1307 }
1308 if (!list_empty(&ctx->dev->db_fc_list)) {
1309 spin_unlock_irq(&ctx->dev->lock);
1310 if (DB_FC_RESUME_DELAY) {
1311 set_current_state(TASK_UNINTERRUPTIBLE);
1312 schedule_timeout(DB_FC_RESUME_DELAY);
1313 }
1314 spin_lock_irq(&ctx->dev->lock);
1315 if (ctx->dev->db_state != FLOW_CONTROL)
1316 break;
1317 }
1318 }
Vipul Pandya422eea02012-05-18 15:29:30 +05301319 }
Steve Wise05eb2382014-03-14 21:52:08 +05301320out:
1321 if (ctx->dev->db_state != NORMAL)
1322 ctx->dev->rdev.stats.db_fc_interruptions++;
Vipul Pandya2c974782012-05-18 15:29:28 +05301323 spin_unlock_irq(&ctx->dev->lock);
1324}
1325
Vipul Pandya422eea02012-05-18 15:29:30 +05301326struct qp_list {
1327 unsigned idx;
1328 struct c4iw_qp **qps;
1329};
1330
1331static int add_and_ref_qp(int id, void *p, void *data)
1332{
1333 struct qp_list *qp_listp = data;
1334 struct c4iw_qp *qp = p;
1335
1336 c4iw_qp_add_ref(&qp->ibqp);
1337 qp_listp->qps[qp_listp->idx++] = qp;
1338 return 0;
1339}
1340
1341static int count_qps(int id, void *p, void *data)
1342{
1343 unsigned *countp = data;
1344 (*countp)++;
1345 return 0;
1346}
1347
Steve Wise05eb2382014-03-14 21:52:08 +05301348static void deref_qps(struct qp_list *qp_list)
Vipul Pandya422eea02012-05-18 15:29:30 +05301349{
1350 int idx;
1351
Steve Wise05eb2382014-03-14 21:52:08 +05301352 for (idx = 0; idx < qp_list->idx; idx++)
1353 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
Vipul Pandya422eea02012-05-18 15:29:30 +05301354}
1355
1356static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1357{
1358 int idx;
1359 int ret;
1360
1361 for (idx = 0; idx < qp_list->idx; idx++) {
1362 struct c4iw_qp *qp = qp_list->qps[idx];
1363
Steve Wise05eb2382014-03-14 21:52:08 +05301364 spin_lock_irq(&qp->rhp->lock);
1365 spin_lock(&qp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301366 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1367 qp->wq.sq.qid,
1368 t4_sq_host_wq_pidx(&qp->wq),
1369 t4_sq_wq_size(&qp->wq));
1370 if (ret) {
Joe Perchesf4f01b52015-05-08 15:58:07 -07001371 pr_err(MOD "%s: Fatal error - "
Vipul Pandya422eea02012-05-18 15:29:30 +05301372 "DB overflow recovery failed - "
1373 "error syncing SQ qid %u\n",
1374 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
Steve Wise05eb2382014-03-14 21:52:08 +05301375 spin_unlock(&qp->lock);
1376 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301377 return;
1378 }
Steve Wise05eb2382014-03-14 21:52:08 +05301379 qp->wq.sq.wq_pidx_inc = 0;
Vipul Pandya422eea02012-05-18 15:29:30 +05301380
1381 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1382 qp->wq.rq.qid,
1383 t4_rq_host_wq_pidx(&qp->wq),
1384 t4_rq_wq_size(&qp->wq));
1385
1386 if (ret) {
Joe Perchesf4f01b52015-05-08 15:58:07 -07001387 pr_err(MOD "%s: Fatal error - "
Vipul Pandya422eea02012-05-18 15:29:30 +05301388 "DB overflow recovery failed - "
1389 "error syncing RQ qid %u\n",
1390 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
Steve Wise05eb2382014-03-14 21:52:08 +05301391 spin_unlock(&qp->lock);
1392 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301393 return;
1394 }
Steve Wise05eb2382014-03-14 21:52:08 +05301395 qp->wq.rq.wq_pidx_inc = 0;
1396 spin_unlock(&qp->lock);
1397 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301398
1399 /* Wait for the dbfifo to drain */
1400 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1401 set_current_state(TASK_UNINTERRUPTIBLE);
1402 schedule_timeout(usecs_to_jiffies(10));
1403 }
1404 }
1405}
1406
1407static void recover_queues(struct uld_ctx *ctx)
1408{
1409 int count = 0;
1410 struct qp_list qp_list;
1411 int ret;
1412
Vipul Pandya422eea02012-05-18 15:29:30 +05301413 /* slow everybody down */
1414 set_current_state(TASK_UNINTERRUPTIBLE);
1415 schedule_timeout(usecs_to_jiffies(1000));
1416
Vipul Pandya422eea02012-05-18 15:29:30 +05301417 /* flush the SGE contexts */
1418 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1419 if (ret) {
1420 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1421 pci_name(ctx->lldi.pdev));
Steve Wise05eb2382014-03-14 21:52:08 +05301422 return;
Vipul Pandya422eea02012-05-18 15:29:30 +05301423 }
1424
1425 /* Count active queues so we can build a list of queues to recover */
1426 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301427 WARN_ON(ctx->dev->db_state != STOPPED);
1428 ctx->dev->db_state = RECOVERY;
Vipul Pandya422eea02012-05-18 15:29:30 +05301429 idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1430
1431 qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
1432 if (!qp_list.qps) {
1433 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1434 pci_name(ctx->lldi.pdev));
1435 spin_unlock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301436 return;
Vipul Pandya422eea02012-05-18 15:29:30 +05301437 }
1438 qp_list.idx = 0;
1439
1440 /* add and ref each qp so it doesn't get freed */
1441 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1442
1443 spin_unlock_irq(&ctx->dev->lock);
1444
1445 /* now traverse the list in a safe context to recover the db state*/
1446 recover_lost_dbs(ctx, &qp_list);
1447
1448 /* we're almost done! deref the qps and clean up */
Steve Wise05eb2382014-03-14 21:52:08 +05301449 deref_qps(&qp_list);
Vipul Pandya422eea02012-05-18 15:29:30 +05301450 kfree(qp_list.qps);
1451
Vipul Pandya422eea02012-05-18 15:29:30 +05301452 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301453 WARN_ON(ctx->dev->db_state != RECOVERY);
1454 ctx->dev->db_state = STOPPED;
Vipul Pandya422eea02012-05-18 15:29:30 +05301455 spin_unlock_irq(&ctx->dev->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301456}
1457
Vipul Pandya2c974782012-05-18 15:29:28 +05301458static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1459{
1460 struct uld_ctx *ctx = handle;
1461
1462 switch (control) {
1463 case CXGB4_CONTROL_DB_FULL:
1464 stop_queues(ctx);
Vipul Pandya2c974782012-05-18 15:29:28 +05301465 ctx->dev->rdev.stats.db_full++;
Vipul Pandya2c974782012-05-18 15:29:28 +05301466 break;
1467 case CXGB4_CONTROL_DB_EMPTY:
1468 resume_queues(ctx);
1469 mutex_lock(&ctx->dev->rdev.stats.lock);
1470 ctx->dev->rdev.stats.db_empty++;
1471 mutex_unlock(&ctx->dev->rdev.stats.lock);
1472 break;
1473 case CXGB4_CONTROL_DB_DROP:
Vipul Pandya422eea02012-05-18 15:29:30 +05301474 recover_queues(ctx);
Vipul Pandya2c974782012-05-18 15:29:28 +05301475 mutex_lock(&ctx->dev->rdev.stats.lock);
1476 ctx->dev->rdev.stats.db_drop++;
1477 mutex_unlock(&ctx->dev->rdev.stats.lock);
1478 break;
1479 default:
1480 printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
1481 pci_name(ctx->lldi.pdev), control);
1482 break;
1483 }
1484 return 0;
1485}
1486
Steve Wisecfdda9d2010-04-21 15:30:06 -07001487static struct cxgb4_uld_info c4iw_uld_info = {
1488 .name = DRV_NAME,
1489 .add = c4iw_uld_add,
1490 .rx_handler = c4iw_uld_rx_handler,
1491 .state_change = c4iw_uld_state_change,
Vipul Pandya2c974782012-05-18 15:29:28 +05301492 .control = c4iw_uld_control,
Steve Wisecfdda9d2010-04-21 15:30:06 -07001493};
1494
1495static int __init c4iw_init_module(void)
1496{
1497 int err;
1498
1499 err = c4iw_cm_init();
1500 if (err)
1501 return err;
1502
1503 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1504 if (!c4iw_debugfs_root)
1505 printk(KERN_WARNING MOD
1506 "could not create debugfs entry, continuing\n");
1507
Steve Wise9eccfe12014-03-26 17:08:09 -05001508 if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS,
1509 c4iw_nl_cb_table))
1510 pr_err("%s[%u]: Failed to add netlink callback\n"
1511 , __func__, __LINE__);
1512
Steve Wise46c13762014-06-20 14:26:25 -05001513 err = iwpm_init(RDMA_NL_C4IW);
1514 if (err) {
1515 pr_err("port mapper initialization failed with %d\n", err);
1516 ibnl_remove_client(RDMA_NL_C4IW);
1517 c4iw_cm_term();
1518 debugfs_remove_recursive(c4iw_debugfs_root);
1519 return err;
1520 }
1521
Steve Wisecfdda9d2010-04-21 15:30:06 -07001522 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1523
1524 return 0;
1525}
1526
1527static void __exit c4iw_exit_module(void)
1528{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001529 struct uld_ctx *ctx, *tmp;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001530
Steve Wisecfdda9d2010-04-21 15:30:06 -07001531 mutex_lock(&dev_mutex);
Steve Wise2f25e9a2011-05-09 22:06:23 -07001532 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1533 if (ctx->dev)
1534 c4iw_remove(ctx);
1535 kfree(ctx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001536 }
1537 mutex_unlock(&dev_mutex);
Steve Wisefd388ce2010-05-20 16:57:27 -05001538 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
Steve Wise46c13762014-06-20 14:26:25 -05001539 iwpm_exit(RDMA_NL_C4IW);
Steve Wise9eccfe12014-03-26 17:08:09 -05001540 ibnl_remove_client(RDMA_NL_C4IW);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001541 c4iw_cm_term();
1542 debugfs_remove_recursive(c4iw_debugfs_root);
1543}
1544
1545module_init(c4iw_init_module);
1546module_exit(c4iw_exit_module);