blob: bf155386a71e2dc8368fdac862ac212dab270aaf [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/debugfs.h>
Vipul Pandyae5725682012-05-21 17:31:13 +053035#include <linux/vmalloc.h>
Hariprasad Shenaida388972014-07-17 22:31:03 +053036#include <linux/math64.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070037
38#include <rdma/ib_verbs.h>
39
40#include "iw_cxgb4.h"
41
42#define DRV_VERSION "0.1"
43
44MODULE_AUTHOR("Steve Wise");
Vipul Pandyaf079af72013-03-14 05:08:58 +000045MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
Steve Wisecfdda9d2010-04-21 15:30:06 -070046MODULE_LICENSE("Dual BSD/GPL");
47MODULE_VERSION(DRV_VERSION);
48
Vipul Pandya80ccdd62013-03-14 05:09:00 +000049static int allow_db_fc_on_t5;
50module_param(allow_db_fc_on_t5, int, 0644);
51MODULE_PARM_DESC(allow_db_fc_on_t5,
52 "Allow DB Flow Control on T5 (default = 0)");
53
54static int allow_db_coalescing_on_t5;
55module_param(allow_db_coalescing_on_t5, int, 0644);
56MODULE_PARM_DESC(allow_db_coalescing_on_t5,
57 "Allow DB Coalescing on T5 (default = 0)");
58
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +053059int c4iw_wr_log = 0;
60module_param(c4iw_wr_log, int, 0444);
61MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
62
Steve Wise65d4c012014-08-29 11:19:29 -050063static int c4iw_wr_log_size_order = 12;
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +053064module_param(c4iw_wr_log_size_order, int, 0444);
65MODULE_PARM_DESC(c4iw_wr_log_size_order,
66 "Number of entries (log2) in the work request timing log.");
67
Vipul Pandya2c974782012-05-18 15:29:28 +053068struct uld_ctx {
69 struct list_head entry;
70 struct cxgb4_lld_info lldi;
71 struct c4iw_dev *dev;
72};
73
Steve Wise2f25e9a2011-05-09 22:06:23 -070074static LIST_HEAD(uld_ctx_list);
Steve Wisecfdda9d2010-04-21 15:30:06 -070075static DEFINE_MUTEX(dev_mutex);
76
Steve Wise05eb2382014-03-14 21:52:08 +053077#define DB_FC_RESUME_SIZE 64
78#define DB_FC_RESUME_DELAY 1
79#define DB_FC_DRAIN_THRESH 0
80
Steve Wisecfdda9d2010-04-21 15:30:06 -070081static struct dentry *c4iw_debugfs_root;
82
Steve Wise9e8d1fa32010-09-10 11:15:20 -050083struct c4iw_debugfs_data {
Steve Wisecfdda9d2010-04-21 15:30:06 -070084 struct c4iw_dev *devp;
85 char *buf;
86 int bufsize;
87 int pos;
88};
89
Steve Wise9eccfe12014-03-26 17:08:09 -050090/* registered cxgb4 netlink callbacks */
91static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
92 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
93 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
94 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
95 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
Steve Wise5b6b8fe2015-04-21 16:28:41 -040096 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
Steve Wise9eccfe12014-03-26 17:08:09 -050097 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
98 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
99};
100
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500101static int count_idrs(int id, void *p, void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700102{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700103 int *countp = data;
104
Steve Wisecfdda9d2010-04-21 15:30:06 -0700105 *countp = *countp + 1;
106 return 0;
107}
108
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500109static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
110 loff_t *ppos)
111{
112 struct c4iw_debugfs_data *d = file->private_data;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500113
Steve Wise31609772010-09-29 18:21:33 +0000114 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500115}
116
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530117void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
118{
119 struct wr_log_entry le;
120 int idx;
121
122 if (!wq->rdev->wr_log)
123 return;
124
125 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
126 (wq->rdev->wr_log_size - 1);
127 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
128 getnstimeofday(&le.poll_host_ts);
129 le.valid = 1;
130 le.cqe_sge_ts = CQE_TS(cqe);
131 if (SQ_TYPE(cqe)) {
132 le.qid = wq->sq.qid;
133 le.opcode = CQE_OPCODE(cqe);
134 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
135 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
136 le.wr_id = CQE_WRID_SQ_IDX(cqe);
137 } else {
138 le.qid = wq->rq.qid;
139 le.opcode = FW_RI_RECEIVE;
140 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
141 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
142 le.wr_id = CQE_WRID_MSN(cqe);
143 }
144 wq->rdev->wr_log[idx] = le;
145}
146
147static int wr_log_show(struct seq_file *seq, void *v)
148{
149 struct c4iw_dev *dev = seq->private;
150 struct timespec prev_ts = {0, 0};
151 struct wr_log_entry *lep;
152 int prev_ts_set = 0;
153 int idx, end;
154
Hariprasad S6198dd82015-04-22 01:44:59 +0530155#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530156
157 idx = atomic_read(&dev->rdev.wr_log_idx) &
158 (dev->rdev.wr_log_size - 1);
159 end = idx - 1;
160 if (end < 0)
161 end = dev->rdev.wr_log_size - 1;
162 lep = &dev->rdev.wr_log[idx];
163 while (idx != end) {
164 if (lep->valid) {
165 if (!prev_ts_set) {
166 prev_ts_set = 1;
167 prev_ts = lep->poll_host_ts;
168 }
169 seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
170 "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
171 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
172 "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
173 "cqe_poll_delta_ns %llu\n",
174 idx,
175 timespec_sub(lep->poll_host_ts,
176 prev_ts).tv_sec,
177 timespec_sub(lep->poll_host_ts,
178 prev_ts).tv_nsec,
179 lep->qid, lep->opcode,
180 lep->opcode == FW_RI_RECEIVE ?
181 "msn" : "wrid",
182 lep->wr_id,
183 timespec_sub(lep->poll_host_ts,
184 lep->post_host_ts).tv_sec,
185 timespec_sub(lep->poll_host_ts,
186 lep->post_host_ts).tv_nsec,
187 lep->post_sge_ts, lep->cqe_sge_ts,
188 lep->poll_sge_ts,
189 ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
190 ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
191 prev_ts = lep->poll_host_ts;
192 }
193 idx++;
194 if (idx > (dev->rdev.wr_log_size - 1))
195 idx = 0;
196 lep = &dev->rdev.wr_log[idx];
197 }
198#undef ts2ns
199 return 0;
200}
201
202static int wr_log_open(struct inode *inode, struct file *file)
203{
204 return single_open(file, wr_log_show, inode->i_private);
205}
206
207static ssize_t wr_log_clear(struct file *file, const char __user *buf,
208 size_t count, loff_t *pos)
209{
210 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
211 int i;
212
213 if (dev->rdev.wr_log)
214 for (i = 0; i < dev->rdev.wr_log_size; i++)
215 dev->rdev.wr_log[i].valid = 0;
216 return count;
217}
218
219static const struct file_operations wr_log_debugfs_fops = {
220 .owner = THIS_MODULE,
221 .open = wr_log_open,
222 .release = single_release,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .write = wr_log_clear,
226};
227
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500228static int dump_qp(int id, void *p, void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700229{
230 struct c4iw_qp *qp = p;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500231 struct c4iw_debugfs_data *qpd = data;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700232 int space;
233 int cc;
234
235 if (id != qp->wq.sq.qid)
236 return 0;
237
238 space = qpd->bufsize - qpd->pos - 1;
239 if (space == 0)
240 return 1;
241
Vipul Pandya830662f2013-07-04 16:10:47 +0530242 if (qp->ep) {
243 if (qp->ep->com.local_addr.ss_family == AF_INET) {
244 struct sockaddr_in *lsin = (struct sockaddr_in *)
245 &qp->ep->com.local_addr;
246 struct sockaddr_in *rsin = (struct sockaddr_in *)
247 &qp->ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500248 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
249 &qp->ep->com.mapped_local_addr;
250 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
251 &qp->ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530252
253 cc = snprintf(qpd->buf + qpd->pos, space,
254 "rc qp sq id %u rq id %u state %u "
255 "onchip %u ep tid %u state %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500256 "%pI4:%u/%u->%pI4:%u/%u\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530257 qp->wq.sq.qid, qp->wq.rq.qid,
258 (int)qp->attr.state,
259 qp->wq.sq.flags & T4_SQ_ONCHIP,
260 qp->ep->hwtid, (int)qp->ep->com.state,
261 &lsin->sin_addr, ntohs(lsin->sin_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500262 ntohs(mapped_lsin->sin_port),
263 &rsin->sin_addr, ntohs(rsin->sin_port),
264 ntohs(mapped_rsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530265 } else {
266 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
267 &qp->ep->com.local_addr;
268 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
269 &qp->ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500270 struct sockaddr_in6 *mapped_lsin6 =
271 (struct sockaddr_in6 *)
272 &qp->ep->com.mapped_local_addr;
273 struct sockaddr_in6 *mapped_rsin6 =
274 (struct sockaddr_in6 *)
275 &qp->ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530276
277 cc = snprintf(qpd->buf + qpd->pos, space,
278 "rc qp sq id %u rq id %u state %u "
279 "onchip %u ep tid %u state %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500280 "%pI6:%u/%u->%pI6:%u/%u\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530281 qp->wq.sq.qid, qp->wq.rq.qid,
282 (int)qp->attr.state,
283 qp->wq.sq.flags & T4_SQ_ONCHIP,
284 qp->ep->hwtid, (int)qp->ep->com.state,
285 &lsin6->sin6_addr,
286 ntohs(lsin6->sin6_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500287 ntohs(mapped_lsin6->sin6_port),
Vipul Pandya830662f2013-07-04 16:10:47 +0530288 &rsin6->sin6_addr,
Steve Wise9eccfe12014-03-26 17:08:09 -0500289 ntohs(rsin6->sin6_port),
290 ntohs(mapped_rsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530291 }
292 } else
Steve Wisedb5d0402011-03-11 22:29:50 +0000293 cc = snprintf(qpd->buf + qpd->pos, space,
294 "qp sq id %u rq id %u state %u onchip %u\n",
295 qp->wq.sq.qid, qp->wq.rq.qid,
296 (int)qp->attr.state,
297 qp->wq.sq.flags & T4_SQ_ONCHIP);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700298 if (cc < space)
299 qpd->pos += cc;
300 return 0;
301}
302
303static int qp_release(struct inode *inode, struct file *file)
304{
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500305 struct c4iw_debugfs_data *qpd = file->private_data;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700306 if (!qpd) {
307 printk(KERN_INFO "%s null qpd?\n", __func__);
308 return 0;
309 }
Vipul Pandyad716a2a2012-05-18 15:29:31 +0530310 vfree(qpd->buf);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700311 kfree(qpd);
312 return 0;
313}
314
315static int qp_open(struct inode *inode, struct file *file)
316{
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500317 struct c4iw_debugfs_data *qpd;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700318 int ret = 0;
319 int count = 1;
320
321 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
322 if (!qpd) {
323 ret = -ENOMEM;
324 goto out;
325 }
326 qpd->devp = inode->i_private;
327 qpd->pos = 0;
328
329 spin_lock_irq(&qpd->devp->lock);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500330 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700331 spin_unlock_irq(&qpd->devp->lock);
332
333 qpd->bufsize = count * 128;
Vipul Pandyad716a2a2012-05-18 15:29:31 +0530334 qpd->buf = vmalloc(qpd->bufsize);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700335 if (!qpd->buf) {
336 ret = -ENOMEM;
337 goto err1;
338 }
339
340 spin_lock_irq(&qpd->devp->lock);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500341 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700342 spin_unlock_irq(&qpd->devp->lock);
343
344 qpd->buf[qpd->pos++] = 0;
345 file->private_data = qpd;
346 goto out;
347err1:
348 kfree(qpd);
349out:
350 return ret;
351}
352
Steve Wisecfdda9d2010-04-21 15:30:06 -0700353static const struct file_operations qp_debugfs_fops = {
354 .owner = THIS_MODULE,
355 .open = qp_open,
356 .release = qp_release,
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500357 .read = debugfs_read,
Steve Wise8bbac892010-09-29 14:11:12 +0000358 .llseek = default_llseek,
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500359};
360
361static int dump_stag(int id, void *p, void *data)
362{
363 struct c4iw_debugfs_data *stagd = data;
364 int space;
365 int cc;
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530366 struct fw_ri_tpte tpte;
367 int ret;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500368
369 space = stagd->bufsize - stagd->pos - 1;
370 if (space == 0)
371 return 1;
372
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530373 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
374 (__be32 *)&tpte);
375 if (ret) {
376 dev_err(&stagd->devp->rdev.lldi.pdev->dev,
377 "%s cxgb4_read_tpte err %d\n", __func__, ret);
378 return ret;
379 }
380 cc = snprintf(stagd->buf + stagd->pos, space,
381 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
382 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
383 (u32)id<<8,
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530384 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
385 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
386 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
387 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
388 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
389 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530390 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
391 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500392 if (cc < space)
393 stagd->pos += cc;
394 return 0;
395}
396
397static int stag_release(struct inode *inode, struct file *file)
398{
399 struct c4iw_debugfs_data *stagd = file->private_data;
400 if (!stagd) {
401 printk(KERN_INFO "%s null stagd?\n", __func__);
402 return 0;
403 }
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530404 vfree(stagd->buf);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500405 kfree(stagd);
406 return 0;
407}
408
409static int stag_open(struct inode *inode, struct file *file)
410{
411 struct c4iw_debugfs_data *stagd;
412 int ret = 0;
413 int count = 1;
414
415 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
416 if (!stagd) {
417 ret = -ENOMEM;
418 goto out;
419 }
420 stagd->devp = inode->i_private;
421 stagd->pos = 0;
422
423 spin_lock_irq(&stagd->devp->lock);
424 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
425 spin_unlock_irq(&stagd->devp->lock);
426
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530427 stagd->bufsize = count * 256;
428 stagd->buf = vmalloc(stagd->bufsize);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500429 if (!stagd->buf) {
430 ret = -ENOMEM;
431 goto err1;
432 }
433
434 spin_lock_irq(&stagd->devp->lock);
435 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
436 spin_unlock_irq(&stagd->devp->lock);
437
438 stagd->buf[stagd->pos++] = 0;
439 file->private_data = stagd;
440 goto out;
441err1:
442 kfree(stagd);
443out:
444 return ret;
445}
446
447static const struct file_operations stag_debugfs_fops = {
448 .owner = THIS_MODULE,
449 .open = stag_open,
450 .release = stag_release,
451 .read = debugfs_read,
Steve Wise8bbac892010-09-29 14:11:12 +0000452 .llseek = default_llseek,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700453};
454
Steve Wise05eb2382014-03-14 21:52:08 +0530455static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
Vipul Pandya422eea02012-05-18 15:29:30 +0530456
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530457static int stats_show(struct seq_file *seq, void *v)
458{
459 struct c4iw_dev *dev = seq->private;
460
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530461 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
462 "Max", "Fail");
463 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530464 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530465 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
466 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530467 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530468 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
469 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530470 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530471 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
472 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530473 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530474 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
475 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530476 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530477 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
478 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530479 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530480 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
Vipul Pandya2c974782012-05-18 15:29:28 +0530481 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
482 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
483 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
Steve Wise05eb2382014-03-14 21:52:08 +0530484 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
Vipul Pandya422eea02012-05-18 15:29:30 +0530485 db_state_str[dev->db_state],
Steve Wise05eb2382014-03-14 21:52:08 +0530486 dev->rdev.stats.db_state_transitions,
487 dev->rdev.stats.db_fc_interruptions);
Vipul Pandya1cab7752012-12-10 09:30:55 +0000488 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
Vipul Pandya793dad92012-12-10 09:30:56 +0000489 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
490 dev->rdev.stats.act_ofld_conn_fails);
491 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
492 dev->rdev.stats.pas_ofld_conn_fails);
Hariprasad S179d03b2015-05-05 03:55:24 +0530493 seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +0530494 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530495 return 0;
496}
497
498static int stats_open(struct inode *inode, struct file *file)
499{
500 return single_open(file, stats_show, inode->i_private);
501}
502
503static ssize_t stats_clear(struct file *file, const char __user *buf,
504 size_t count, loff_t *pos)
505{
506 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
507
508 mutex_lock(&dev->rdev.stats.lock);
509 dev->rdev.stats.pd.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530510 dev->rdev.stats.pd.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530511 dev->rdev.stats.qid.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530512 dev->rdev.stats.qid.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530513 dev->rdev.stats.stag.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530514 dev->rdev.stats.stag.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530515 dev->rdev.stats.pbl.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530516 dev->rdev.stats.pbl.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530517 dev->rdev.stats.rqt.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530518 dev->rdev.stats.rqt.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530519 dev->rdev.stats.ocqp.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530520 dev->rdev.stats.ocqp.fail = 0;
Vipul Pandya2c974782012-05-18 15:29:28 +0530521 dev->rdev.stats.db_full = 0;
522 dev->rdev.stats.db_empty = 0;
523 dev->rdev.stats.db_drop = 0;
Vipul Pandya422eea02012-05-18 15:29:30 +0530524 dev->rdev.stats.db_state_transitions = 0;
Vipul Pandya793dad92012-12-10 09:30:56 +0000525 dev->rdev.stats.tcam_full = 0;
526 dev->rdev.stats.act_ofld_conn_fails = 0;
527 dev->rdev.stats.pas_ofld_conn_fails = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530528 mutex_unlock(&dev->rdev.stats.lock);
529 return count;
530}
531
532static const struct file_operations stats_debugfs_fops = {
533 .owner = THIS_MODULE,
534 .open = stats_open,
535 .release = single_release,
536 .read = seq_read,
537 .llseek = seq_lseek,
538 .write = stats_clear,
539};
540
Vipul Pandya793dad92012-12-10 09:30:56 +0000541static int dump_ep(int id, void *p, void *data)
542{
543 struct c4iw_ep *ep = p;
544 struct c4iw_debugfs_data *epd = data;
545 int space;
546 int cc;
547
548 space = epd->bufsize - epd->pos - 1;
549 if (space == 0)
550 return 1;
551
Vipul Pandya830662f2013-07-04 16:10:47 +0530552 if (ep->com.local_addr.ss_family == AF_INET) {
553 struct sockaddr_in *lsin = (struct sockaddr_in *)
554 &ep->com.local_addr;
555 struct sockaddr_in *rsin = (struct sockaddr_in *)
556 &ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500557 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
558 &ep->com.mapped_local_addr;
559 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
560 &ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530561
562 cc = snprintf(epd->buf + epd->pos, space,
563 "ep %p cm_id %p qp %p state %d flags 0x%lx "
564 "history 0x%lx hwtid %d atid %d "
Hariprasad S179d03b2015-05-05 03:55:24 +0530565 "conn_na %u abort_na %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500566 "%pI4:%d/%d <-> %pI4:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530567 ep, ep->com.cm_id, ep->com.qp,
568 (int)ep->com.state, ep->com.flags,
569 ep->com.history, ep->hwtid, ep->atid,
Hariprasad S179d03b2015-05-05 03:55:24 +0530570 ep->stats.connect_neg_adv,
571 ep->stats.abort_neg_adv,
Vipul Pandya830662f2013-07-04 16:10:47 +0530572 &lsin->sin_addr, ntohs(lsin->sin_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500573 ntohs(mapped_lsin->sin_port),
574 &rsin->sin_addr, ntohs(rsin->sin_port),
575 ntohs(mapped_rsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530576 } else {
577 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
578 &ep->com.local_addr;
579 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
580 &ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500581 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
582 &ep->com.mapped_local_addr;
583 struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
584 &ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530585
586 cc = snprintf(epd->buf + epd->pos, space,
587 "ep %p cm_id %p qp %p state %d flags 0x%lx "
588 "history 0x%lx hwtid %d atid %d "
Hariprasad S179d03b2015-05-05 03:55:24 +0530589 "conn_na %u abort_na %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500590 "%pI6:%d/%d <-> %pI6:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530591 ep, ep->com.cm_id, ep->com.qp,
592 (int)ep->com.state, ep->com.flags,
593 ep->com.history, ep->hwtid, ep->atid,
Hariprasad S179d03b2015-05-05 03:55:24 +0530594 ep->stats.connect_neg_adv,
595 ep->stats.abort_neg_adv,
Vipul Pandya830662f2013-07-04 16:10:47 +0530596 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500597 ntohs(mapped_lsin6->sin6_port),
598 &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
599 ntohs(mapped_rsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530600 }
Vipul Pandya793dad92012-12-10 09:30:56 +0000601 if (cc < space)
602 epd->pos += cc;
603 return 0;
604}
605
606static int dump_listen_ep(int id, void *p, void *data)
607{
608 struct c4iw_listen_ep *ep = p;
609 struct c4iw_debugfs_data *epd = data;
610 int space;
611 int cc;
612
613 space = epd->bufsize - epd->pos - 1;
614 if (space == 0)
615 return 1;
616
Vipul Pandya830662f2013-07-04 16:10:47 +0530617 if (ep->com.local_addr.ss_family == AF_INET) {
618 struct sockaddr_in *lsin = (struct sockaddr_in *)
619 &ep->com.local_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500620 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
621 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530622
623 cc = snprintf(epd->buf + epd->pos, space,
624 "ep %p cm_id %p state %d flags 0x%lx stid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500625 "backlog %d %pI4:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530626 ep, ep->com.cm_id, (int)ep->com.state,
627 ep->com.flags, ep->stid, ep->backlog,
Steve Wise9eccfe12014-03-26 17:08:09 -0500628 &lsin->sin_addr, ntohs(lsin->sin_port),
629 ntohs(mapped_lsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530630 } else {
631 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
632 &ep->com.local_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500633 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
634 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530635
636 cc = snprintf(epd->buf + epd->pos, space,
637 "ep %p cm_id %p state %d flags 0x%lx stid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500638 "backlog %d %pI6:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530639 ep, ep->com.cm_id, (int)ep->com.state,
640 ep->com.flags, ep->stid, ep->backlog,
Steve Wise9eccfe12014-03-26 17:08:09 -0500641 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
642 ntohs(mapped_lsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530643 }
Vipul Pandya793dad92012-12-10 09:30:56 +0000644 if (cc < space)
645 epd->pos += cc;
646 return 0;
647}
648
649static int ep_release(struct inode *inode, struct file *file)
650{
651 struct c4iw_debugfs_data *epd = file->private_data;
652 if (!epd) {
653 pr_info("%s null qpd?\n", __func__);
654 return 0;
655 }
656 vfree(epd->buf);
657 kfree(epd);
658 return 0;
659}
660
661static int ep_open(struct inode *inode, struct file *file)
662{
663 struct c4iw_debugfs_data *epd;
664 int ret = 0;
665 int count = 1;
666
667 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
668 if (!epd) {
669 ret = -ENOMEM;
670 goto out;
671 }
672 epd->devp = inode->i_private;
673 epd->pos = 0;
674
675 spin_lock_irq(&epd->devp->lock);
676 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
677 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
678 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
679 spin_unlock_irq(&epd->devp->lock);
680
Pramod Kumar63a71ba2014-11-21 09:36:35 -0600681 epd->bufsize = count * 240;
Vipul Pandya793dad92012-12-10 09:30:56 +0000682 epd->buf = vmalloc(epd->bufsize);
683 if (!epd->buf) {
684 ret = -ENOMEM;
685 goto err1;
686 }
687
688 spin_lock_irq(&epd->devp->lock);
689 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
690 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
691 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
692 spin_unlock_irq(&epd->devp->lock);
693
694 file->private_data = epd;
695 goto out;
696err1:
697 kfree(epd);
698out:
699 return ret;
700}
701
702static const struct file_operations ep_debugfs_fops = {
703 .owner = THIS_MODULE,
704 .open = ep_open,
705 .release = ep_release,
706 .read = debugfs_read,
707};
708
Steve Wisecfdda9d2010-04-21 15:30:06 -0700709static int setup_debugfs(struct c4iw_dev *devp)
710{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700711 if (!devp->debugfs_root)
712 return -1;
713
David Howellse59b4e92015-01-21 20:03:40 +0000714 debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root,
715 (void *)devp, &qp_debugfs_fops, 4096);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500716
David Howellse59b4e92015-01-21 20:03:40 +0000717 debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root,
718 (void *)devp, &stag_debugfs_fops, 4096);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530719
David Howellse59b4e92015-01-21 20:03:40 +0000720 debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root,
721 (void *)devp, &stats_debugfs_fops, 4096);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530722
David Howellse59b4e92015-01-21 20:03:40 +0000723 debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root,
724 (void *)devp, &ep_debugfs_fops, 4096);
Vipul Pandya793dad92012-12-10 09:30:56 +0000725
David Howellse59b4e92015-01-21 20:03:40 +0000726 if (c4iw_wr_log)
727 debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root,
728 (void *)devp, &wr_log_debugfs_fops, 4096);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700729 return 0;
730}
731
732void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
733 struct c4iw_dev_ucontext *uctx)
734{
735 struct list_head *pos, *nxt;
736 struct c4iw_qid_list *entry;
737
738 mutex_lock(&uctx->lock);
739 list_for_each_safe(pos, nxt, &uctx->qpids) {
740 entry = list_entry(pos, struct c4iw_qid_list, entry);
741 list_del_init(&entry->entry);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530742 if (!(entry->qid & rdev->qpmask)) {
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530743 c4iw_put_resource(&rdev->resource.qid_table,
744 entry->qid);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530745 mutex_lock(&rdev->stats.lock);
746 rdev->stats.qid.cur -= rdev->qpmask + 1;
747 mutex_unlock(&rdev->stats.lock);
748 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700749 kfree(entry);
750 }
751
752 list_for_each_safe(pos, nxt, &uctx->qpids) {
753 entry = list_entry(pos, struct c4iw_qid_list, entry);
754 list_del_init(&entry->entry);
755 kfree(entry);
756 }
757 mutex_unlock(&uctx->lock);
758}
759
760void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
761 struct c4iw_dev_ucontext *uctx)
762{
763 INIT_LIST_HEAD(&uctx->qpids);
764 INIT_LIST_HEAD(&uctx->cqids);
765 mutex_init(&uctx->lock);
766}
767
768/* Caller takes care of locking if needed */
769static int c4iw_rdev_open(struct c4iw_rdev *rdev)
770{
771 int err;
772
773 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
774
775 /*
Hariprasad S4a75a862015-04-22 01:45:01 +0530776 * This implementation assumes udb_density == ucq_density! Eventually
777 * we might need to support this but for now fail the open. Also the
778 * cqid and qpid range must match for now.
779 */
780 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
781 pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
782 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
783 rdev->lldi.ucq_density);
784 err = -EINVAL;
785 goto err1;
786 }
787 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
788 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
789 pr_err(MOD "%s: unsupported qp and cq id ranges "
790 "qp start %u size %u cq start %u size %u\n",
791 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
792 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
793 rdev->lldi.vr->cq.size);
794 err = -EINVAL;
795 goto err1;
796 }
797
Steve Wisecfdda9d2010-04-21 15:30:06 -0700798 rdev->qpmask = rdev->lldi.udb_density - 1;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700799 rdev->cqmask = rdev->lldi.ucq_density - 1;
800 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
Steve Wise93fb72e2010-06-23 15:46:55 +0000801 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
802 "qp qid start %u size %u cq qid start %u size %u\n",
Steve Wisecfdda9d2010-04-21 15:30:06 -0700803 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
804 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
805 rdev->lldi.vr->pbl.start,
806 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
Steve Wise93fb72e2010-06-23 15:46:55 +0000807 rdev->lldi.vr->rq.size,
808 rdev->lldi.vr->qp.start,
809 rdev->lldi.vr->qp.size,
810 rdev->lldi.vr->cq.start,
811 rdev->lldi.vr->cq.size);
Hariprasad S74217d42015-06-09 18:23:12 +0530812 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p "
813 "qpmask 0x%x cqmask 0x%x\n",
Steve Wisecfdda9d2010-04-21 15:30:06 -0700814 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
Hariprasad S6198dd82015-04-22 01:44:59 +0530815 (void *)pci_resource_start(rdev->lldi.pdev, 2),
Hariprasad S74217d42015-06-09 18:23:12 +0530816 rdev->lldi.db_reg, rdev->lldi.gts_reg,
817 rdev->qpmask, rdev->cqmask);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700818
819 if (c4iw_num_stags(rdev) == 0) {
820 err = -EINVAL;
821 goto err1;
822 }
823
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530824 rdev->stats.pd.total = T4_MAX_NUM_PD;
825 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
826 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
827 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
828 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
829 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
830
Steve Wisecfdda9d2010-04-21 15:30:06 -0700831 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
832 if (err) {
833 printk(KERN_ERR MOD "error %d initializing resources\n", err);
834 goto err1;
835 }
836 err = c4iw_pblpool_create(rdev);
837 if (err) {
838 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
839 goto err2;
840 }
841 err = c4iw_rqtpool_create(rdev);
842 if (err) {
843 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
844 goto err3;
845 }
Steve Wisec6d7b262010-09-13 11:23:57 -0500846 err = c4iw_ocqp_pool_create(rdev);
847 if (err) {
848 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
849 goto err4;
850 }
Steve Wise05eb2382014-03-14 21:52:08 +0530851 rdev->status_page = (struct t4_dev_status_page *)
852 __get_free_page(GFP_KERNEL);
Hariprasad Sc5dfb002015-12-11 13:02:01 +0530853 rdev->status_page->qp_start = rdev->lldi.vr->qp.start;
854 rdev->status_page->qp_size = rdev->lldi.vr->qp.size;
855 rdev->status_page->cq_start = rdev->lldi.vr->cq.start;
856 rdev->status_page->cq_size = rdev->lldi.vr->cq.size;
Steve Wise05eb2382014-03-14 21:52:08 +0530857 if (!rdev->status_page) {
858 pr_err(MOD "error allocating status page\n");
859 goto err4;
860 }
David S. Miller8fd90bb2014-07-22 00:44:59 -0700861
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530862 if (c4iw_wr_log) {
863 rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
864 sizeof(*rdev->wr_log), GFP_KERNEL);
865 if (rdev->wr_log) {
866 rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
867 atomic_set(&rdev->wr_log_idx, 0);
868 } else {
869 pr_err(MOD "error allocating wr_log. Logging disabled\n");
870 }
871 }
David S. Miller8fd90bb2014-07-22 00:44:59 -0700872
Steve Wise6b54d542014-07-08 10:20:35 -0500873 rdev->status_page->db_off = 0;
David S. Miller8fd90bb2014-07-22 00:44:59 -0700874
Steve Wisecfdda9d2010-04-21 15:30:06 -0700875 return 0;
Steve Wisec6d7b262010-09-13 11:23:57 -0500876err4:
877 c4iw_rqtpool_destroy(rdev);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700878err3:
879 c4iw_pblpool_destroy(rdev);
880err2:
881 c4iw_destroy_resource(&rdev->resource);
882err1:
883 return err;
884}
885
886static void c4iw_rdev_close(struct c4iw_rdev *rdev)
887{
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530888 kfree(rdev->wr_log);
Steve Wise05eb2382014-03-14 21:52:08 +0530889 free_page((unsigned long)rdev->status_page);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700890 c4iw_pblpool_destroy(rdev);
891 c4iw_rqtpool_destroy(rdev);
892 c4iw_destroy_resource(&rdev->resource);
893}
894
Steve Wise9efe10a2011-10-06 09:32:44 -0700895static void c4iw_dealloc(struct uld_ctx *ctx)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700896{
Steve Wise2f25e9a2011-05-09 22:06:23 -0700897 c4iw_rdev_close(&ctx->dev->rdev);
898 idr_destroy(&ctx->dev->cqidr);
899 idr_destroy(&ctx->dev->qpidr);
900 idr_destroy(&ctx->dev->mmidr);
Vipul Pandya793dad92012-12-10 09:30:56 +0000901 idr_destroy(&ctx->dev->hwtid_idr);
902 idr_destroy(&ctx->dev->stid_idr);
903 idr_destroy(&ctx->dev->atid_idr);
Steve Wisefa658a92014-04-09 09:38:25 -0500904 if (ctx->dev->rdev.bar2_kva)
905 iounmap(ctx->dev->rdev.bar2_kva);
906 if (ctx->dev->rdev.oc_mw_kva)
907 iounmap(ctx->dev->rdev.oc_mw_kva);
Steve Wise2f25e9a2011-05-09 22:06:23 -0700908 ib_dealloc_device(&ctx->dev->ibdev);
909 ctx->dev = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700910}
911
Steve Wise9efe10a2011-10-06 09:32:44 -0700912static void c4iw_remove(struct uld_ctx *ctx)
913{
914 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
915 c4iw_unregister_device(ctx->dev);
916 c4iw_dealloc(ctx);
917}
918
919static int rdma_supported(const struct cxgb4_lld_info *infop)
920{
921 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
922 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
Vipul Pandyaf079af72013-03-14 05:08:58 +0000923 infop->vr->cq.size > 0;
Steve Wise9efe10a2011-10-06 09:32:44 -0700924}
925
Steve Wisecfdda9d2010-04-21 15:30:06 -0700926static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
927{
928 struct c4iw_dev *devp;
929 int ret;
930
Steve Wise9efe10a2011-10-06 09:32:44 -0700931 if (!rdma_supported(infop)) {
932 printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
933 pci_name(infop->pdev));
934 return ERR_PTR(-ENOSYS);
935 }
Vipul Pandyaf079af72013-03-14 05:08:58 +0000936 if (!ocqp_supported(infop))
937 pr_info("%s: On-Chip Queues not supported on this device.\n",
938 pci_name(infop->pdev));
Vipul Pandya80ccdd62013-03-14 05:09:00 +0000939
Steve Wisecfdda9d2010-04-21 15:30:06 -0700940 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
941 if (!devp) {
942 printk(KERN_ERR MOD "Cannot allocate ib device\n");
Steve Wisebbe9a0a2011-05-09 22:06:22 -0700943 return ERR_PTR(-ENOMEM);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700944 }
945 devp->rdev.lldi = *infop;
946
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530947 /* init various hw-queue params based on lld info */
948 PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
949 __func__, devp->rdev.lldi.sge_ingpadboundary,
950 devp->rdev.lldi.sge_egrstatuspagesize);
951
952 devp->rdev.hw_queue.t4_eq_status_entries =
953 devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530954 devp->rdev.hw_queue.t4_max_eq_size = 65520;
955 devp->rdev.hw_queue.t4_max_iq_size = 65520;
956 devp->rdev.hw_queue.t4_max_rq_size = 8192 -
957 devp->rdev.hw_queue.t4_eq_status_entries - 1;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530958 devp->rdev.hw_queue.t4_max_sq_size =
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530959 devp->rdev.hw_queue.t4_max_eq_size -
960 devp->rdev.hw_queue.t4_eq_status_entries - 1;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530961 devp->rdev.hw_queue.t4_max_qp_depth =
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530962 devp->rdev.hw_queue.t4_max_rq_size;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530963 devp->rdev.hw_queue.t4_max_cq_depth =
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530964 devp->rdev.hw_queue.t4_max_iq_size - 2;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530965 devp->rdev.hw_queue.t4_stat_len =
966 devp->rdev.lldi.sge_egrstatuspagesize;
967
Steve Wisefa658a92014-04-09 09:38:25 -0500968 /*
Hariprasad S963cab52015-09-23 17:19:27 +0530969 * For T5/T6 devices, we map all of BAR2 with WC.
Steve Wisefa658a92014-04-09 09:38:25 -0500970 * For T4 devices with onchip qp mem, we map only that part
971 * of BAR2 with WC.
972 */
973 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
Hariprasad S963cab52015-09-23 17:19:27 +0530974 if (!is_t4(devp->rdev.lldi.adapter_type)) {
Steve Wisefa658a92014-04-09 09:38:25 -0500975 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
976 pci_resource_len(devp->rdev.lldi.pdev, 2));
977 if (!devp->rdev.bar2_kva) {
978 pr_err(MOD "Unable to ioremap BAR2\n");
Christoph Jaeger65b302a2014-04-21 17:02:42 +0200979 ib_dealloc_device(&devp->ibdev);
Steve Wisefa658a92014-04-09 09:38:25 -0500980 return ERR_PTR(-EINVAL);
981 }
982 } else if (ocqp_supported(infop)) {
983 devp->rdev.oc_mw_pa =
984 pci_resource_start(devp->rdev.lldi.pdev, 2) +
985 pci_resource_len(devp->rdev.lldi.pdev, 2) -
986 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
987 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
988 devp->rdev.lldi.vr->ocq.size);
989 if (!devp->rdev.oc_mw_kva) {
990 pr_err(MOD "Unable to ioremap onchip mem\n");
Christoph Jaeger65b302a2014-04-21 17:02:42 +0200991 ib_dealloc_device(&devp->ibdev);
Steve Wisefa658a92014-04-09 09:38:25 -0500992 return ERR_PTR(-EINVAL);
993 }
994 }
Steve Wisec6d7b262010-09-13 11:23:57 -0500995
Steve Wise2f25e9a2011-05-09 22:06:23 -0700996 PDBG(KERN_INFO MOD "ocq memory: "
Steve Wisec6d7b262010-09-13 11:23:57 -0500997 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
998 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
999 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
1000
Steve Wisecfdda9d2010-04-21 15:30:06 -07001001 ret = c4iw_rdev_open(&devp->rdev);
1002 if (ret) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001003 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
1004 ib_dealloc_device(&devp->ibdev);
Steve Wisebbe9a0a2011-05-09 22:06:22 -07001005 return ERR_PTR(ret);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001006 }
1007
1008 idr_init(&devp->cqidr);
1009 idr_init(&devp->qpidr);
1010 idr_init(&devp->mmidr);
Vipul Pandya793dad92012-12-10 09:30:56 +00001011 idr_init(&devp->hwtid_idr);
1012 idr_init(&devp->stid_idr);
1013 idr_init(&devp->atid_idr);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001014 spin_lock_init(&devp->lock);
Vipul Pandya8d81ef32012-05-18 15:29:27 +05301015 mutex_init(&devp->rdev.stats.lock);
Vipul Pandya2c974782012-05-18 15:29:28 +05301016 mutex_init(&devp->db_mutex);
Steve Wise05eb2382014-03-14 21:52:08 +05301017 INIT_LIST_HEAD(&devp->db_fc_list);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301018 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001019
Steve Wisecfdda9d2010-04-21 15:30:06 -07001020 if (c4iw_debugfs_root) {
1021 devp->debugfs_root = debugfs_create_dir(
1022 pci_name(devp->rdev.lldi.pdev),
1023 c4iw_debugfs_root);
1024 setup_debugfs(devp);
1025 }
Steve Wise9eccfe12014-03-26 17:08:09 -05001026
Steve Wise9eccfe12014-03-26 17:08:09 -05001027
Steve Wisecfdda9d2010-04-21 15:30:06 -07001028 return devp;
1029}
1030
1031static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
1032{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001033 struct uld_ctx *ctx;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001034 static int vers_printed;
1035 int i;
1036
1037 if (!vers_printed++)
Vipul Pandyaf079af72013-03-14 05:08:58 +00001038 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
1039 DRV_VERSION);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001040
Steve Wise2f25e9a2011-05-09 22:06:23 -07001041 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
1042 if (!ctx) {
1043 ctx = ERR_PTR(-ENOMEM);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001044 goto out;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001045 }
1046 ctx->lldi = *infop;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001047
1048 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001049 __func__, pci_name(ctx->lldi.pdev),
1050 ctx->lldi.nchan, ctx->lldi.nrxq,
1051 ctx->lldi.ntxq, ctx->lldi.nports);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001052
Steve Wise2f25e9a2011-05-09 22:06:23 -07001053 mutex_lock(&dev_mutex);
1054 list_add_tail(&ctx->entry, &uld_ctx_list);
1055 mutex_unlock(&dev_mutex);
1056
1057 for (i = 0; i < ctx->lldi.nrxq; i++)
1058 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001059out:
Steve Wise2f25e9a2011-05-09 22:06:23 -07001060 return ctx;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001061}
1062
Vipul Pandya1cab7752012-12-10 09:30:55 +00001063static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
1064 const __be64 *rsp,
1065 u32 pktshift)
1066{
1067 struct sk_buff *skb;
1068
1069 /*
1070 * Allocate space for cpl_pass_accept_req which will be synthesized by
1071 * driver. Once the driver synthesizes the request the skb will go
1072 * through the regular cpl_pass_accept_req processing.
1073 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
1074 * cpl_rx_pkt.
1075 */
1076 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1077 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
1078 if (unlikely(!skb))
1079 return NULL;
1080
1081 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1082 sizeof(struct rss_header) - pktshift);
1083
1084 /*
1085 * This skb will contain:
1086 * rss_header from the rspq descriptor (1 flit)
1087 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
1088 * space for the difference between the size of an
1089 * rx_pkt and pass_accept_req cpl (1 flit)
1090 * the packet data from the gl
1091 */
1092 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
1093 sizeof(struct rss_header));
1094 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
1095 sizeof(struct cpl_pass_accept_req),
1096 gl->va + pktshift,
1097 gl->tot_len - pktshift);
1098 return skb;
1099}
1100
1101static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
1102 const __be64 *rsp)
1103{
1104 unsigned int opcode = *(u8 *)rsp;
1105 struct sk_buff *skb;
1106
1107 if (opcode != CPL_RX_PKT)
1108 goto out;
1109
1110 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
1111 if (skb == NULL)
1112 goto out;
1113
1114 if (c4iw_handlers[opcode] == NULL) {
1115 pr_info("%s no handler opcode 0x%x...\n", __func__,
1116 opcode);
1117 kfree_skb(skb);
1118 goto out;
1119 }
1120 c4iw_handlers[opcode](dev, skb);
1121 return 1;
1122out:
1123 return 0;
1124}
1125
Steve Wisecfdda9d2010-04-21 15:30:06 -07001126static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
1127 const struct pkt_gl *gl)
1128{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001129 struct uld_ctx *ctx = handle;
1130 struct c4iw_dev *dev = ctx->dev;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001131 struct sk_buff *skb;
Vipul Pandya1cab7752012-12-10 09:30:55 +00001132 u8 opcode;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001133
1134 if (gl == NULL) {
1135 /* omit RSS and rsp_ctrl at end of descriptor */
1136 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1137
1138 skb = alloc_skb(256, GFP_ATOMIC);
1139 if (!skb)
1140 goto nomem;
1141 __skb_put(skb, len);
1142 skb_copy_to_linear_data(skb, &rsp[1], len);
1143 } else if (gl == CXGB4_MSG_AN) {
1144 const struct rsp_ctrl *rc = (void *)rsp;
1145
1146 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
1147 c4iw_ev_handler(dev, qid);
1148 return 0;
Vipul Pandya1cab7752012-12-10 09:30:55 +00001149 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
1150 if (recv_rx_pkt(dev, gl, rsp))
1151 return 0;
1152
1153 pr_info("%s: unexpected FL contents at %p, " \
1154 "RSS %#llx, FL %#llx, len %u\n",
1155 pci_name(ctx->lldi.pdev), gl->va,
1156 (unsigned long long)be64_to_cpu(*rsp),
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001157 (unsigned long long)be64_to_cpu(
1158 *(__force __be64 *)gl->va),
Vipul Pandya1cab7752012-12-10 09:30:55 +00001159 gl->tot_len);
1160
1161 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001162 } else {
Steve Wiseda411ba2010-10-18 15:16:45 +00001163 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001164 if (unlikely(!skb))
1165 goto nomem;
1166 }
1167
Vipul Pandya1cab7752012-12-10 09:30:55 +00001168 opcode = *(u8 *)rsp;
Steve Wisedbb084c2014-03-21 20:40:30 +05301169 if (c4iw_handlers[opcode]) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001170 c4iw_handlers[opcode](dev, skb);
Steve Wisedbb084c2014-03-21 20:40:30 +05301171 } else {
Vipul Pandya1cab7752012-12-10 09:30:55 +00001172 pr_info("%s no handler opcode 0x%x...\n", __func__,
Steve Wisecfdda9d2010-04-21 15:30:06 -07001173 opcode);
Steve Wisedbb084c2014-03-21 20:40:30 +05301174 kfree_skb(skb);
1175 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001176
1177 return 0;
1178nomem:
1179 return -1;
1180}
1181
1182static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
1183{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001184 struct uld_ctx *ctx = handle;
Steve Wise1c01c532010-05-20 16:57:32 -05001185
Steve Wisecfdda9d2010-04-21 15:30:06 -07001186 PDBG("%s new_state %u\n", __func__, new_state);
Steve Wise1c01c532010-05-20 16:57:32 -05001187 switch (new_state) {
1188 case CXGB4_STATE_UP:
Steve Wise2f25e9a2011-05-09 22:06:23 -07001189 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
1190 if (!ctx->dev) {
Steve Wise9efe10a2011-10-06 09:32:44 -07001191 int ret;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001192
1193 ctx->dev = c4iw_alloc(&ctx->lldi);
Steve Wise9efe10a2011-10-06 09:32:44 -07001194 if (IS_ERR(ctx->dev)) {
1195 printk(KERN_ERR MOD
1196 "%s: initialization failed: %ld\n",
1197 pci_name(ctx->lldi.pdev),
1198 PTR_ERR(ctx->dev));
1199 ctx->dev = NULL;
1200 break;
1201 }
1202 ret = c4iw_register_device(ctx->dev);
1203 if (ret) {
Steve Wise1c01c532010-05-20 16:57:32 -05001204 printk(KERN_ERR MOD
1205 "%s: RDMA registration failed: %d\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001206 pci_name(ctx->lldi.pdev), ret);
Steve Wise9efe10a2011-10-06 09:32:44 -07001207 c4iw_dealloc(ctx);
1208 }
Steve Wise1c01c532010-05-20 16:57:32 -05001209 }
1210 break;
1211 case CXGB4_STATE_DOWN:
1212 printk(KERN_INFO MOD "%s: Down\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001213 pci_name(ctx->lldi.pdev));
1214 if (ctx->dev)
1215 c4iw_remove(ctx);
Steve Wise1c01c532010-05-20 16:57:32 -05001216 break;
1217 case CXGB4_STATE_START_RECOVERY:
1218 printk(KERN_INFO MOD "%s: Fatal Error\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001219 pci_name(ctx->lldi.pdev));
1220 if (ctx->dev) {
Steve Wise767fbe82011-03-11 22:30:53 +00001221 struct ib_event event;
1222
Steve Wise2f25e9a2011-05-09 22:06:23 -07001223 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
Steve Wise767fbe82011-03-11 22:30:53 +00001224 memset(&event, 0, sizeof event);
1225 event.event = IB_EVENT_DEVICE_FATAL;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001226 event.device = &ctx->dev->ibdev;
Steve Wise767fbe82011-03-11 22:30:53 +00001227 ib_dispatch_event(&event);
Steve Wise2f25e9a2011-05-09 22:06:23 -07001228 c4iw_remove(ctx);
Steve Wise767fbe82011-03-11 22:30:53 +00001229 }
Steve Wise1c01c532010-05-20 16:57:32 -05001230 break;
1231 case CXGB4_STATE_DETACH:
1232 printk(KERN_INFO MOD "%s: Detach\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001233 pci_name(ctx->lldi.pdev));
1234 if (ctx->dev)
1235 c4iw_remove(ctx);
Steve Wise1c01c532010-05-20 16:57:32 -05001236 break;
1237 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001238 return 0;
1239}
1240
Vipul Pandya2c974782012-05-18 15:29:28 +05301241static int disable_qp_db(int id, void *p, void *data)
1242{
1243 struct c4iw_qp *qp = p;
1244
1245 t4_disable_wq_db(&qp->wq);
1246 return 0;
1247}
1248
1249static void stop_queues(struct uld_ctx *ctx)
1250{
Steve Wise05eb2382014-03-14 21:52:08 +05301251 unsigned long flags;
1252
1253 spin_lock_irqsave(&ctx->dev->lock, flags);
1254 ctx->dev->rdev.stats.db_state_transitions++;
1255 ctx->dev->db_state = STOPPED;
1256 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
Vipul Pandya422eea02012-05-18 15:29:30 +05301257 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301258 else
1259 ctx->dev->rdev.status_page->db_off = 1;
1260 spin_unlock_irqrestore(&ctx->dev->lock, flags);
Vipul Pandya2c974782012-05-18 15:29:28 +05301261}
1262
1263static int enable_qp_db(int id, void *p, void *data)
1264{
1265 struct c4iw_qp *qp = p;
1266
1267 t4_enable_wq_db(&qp->wq);
1268 return 0;
1269}
1270
Steve Wise05eb2382014-03-14 21:52:08 +05301271static void resume_rc_qp(struct c4iw_qp *qp)
1272{
1273 spin_lock(&qp->lock);
Hariprasad S963cab52015-09-23 17:19:27 +05301274 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301275 qp->wq.sq.wq_pidx_inc = 0;
Hariprasad S963cab52015-09-23 17:19:27 +05301276 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301277 qp->wq.rq.wq_pidx_inc = 0;
1278 spin_unlock(&qp->lock);
1279}
1280
1281static void resume_a_chunk(struct uld_ctx *ctx)
1282{
1283 int i;
1284 struct c4iw_qp *qp;
1285
1286 for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
1287 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1288 db_fc_entry);
1289 list_del_init(&qp->db_fc_entry);
1290 resume_rc_qp(qp);
1291 if (list_empty(&ctx->dev->db_fc_list))
1292 break;
1293 }
1294}
1295
Vipul Pandya2c974782012-05-18 15:29:28 +05301296static void resume_queues(struct uld_ctx *ctx)
1297{
1298 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301299 if (ctx->dev->db_state != STOPPED)
1300 goto out;
1301 ctx->dev->db_state = FLOW_CONTROL;
1302 while (1) {
1303 if (list_empty(&ctx->dev->db_fc_list)) {
1304 WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1305 ctx->dev->db_state = NORMAL;
1306 ctx->dev->rdev.stats.db_state_transitions++;
1307 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1308 idr_for_each(&ctx->dev->qpidr, enable_qp_db,
1309 NULL);
1310 } else {
1311 ctx->dev->rdev.status_page->db_off = 0;
1312 }
1313 break;
1314 } else {
1315 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1316 < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1317 DB_FC_DRAIN_THRESH)) {
1318 resume_a_chunk(ctx);
1319 }
1320 if (!list_empty(&ctx->dev->db_fc_list)) {
1321 spin_unlock_irq(&ctx->dev->lock);
1322 if (DB_FC_RESUME_DELAY) {
1323 set_current_state(TASK_UNINTERRUPTIBLE);
1324 schedule_timeout(DB_FC_RESUME_DELAY);
1325 }
1326 spin_lock_irq(&ctx->dev->lock);
1327 if (ctx->dev->db_state != FLOW_CONTROL)
1328 break;
1329 }
1330 }
Vipul Pandya422eea02012-05-18 15:29:30 +05301331 }
Steve Wise05eb2382014-03-14 21:52:08 +05301332out:
1333 if (ctx->dev->db_state != NORMAL)
1334 ctx->dev->rdev.stats.db_fc_interruptions++;
Vipul Pandya2c974782012-05-18 15:29:28 +05301335 spin_unlock_irq(&ctx->dev->lock);
1336}
1337
Vipul Pandya422eea02012-05-18 15:29:30 +05301338struct qp_list {
1339 unsigned idx;
1340 struct c4iw_qp **qps;
1341};
1342
1343static int add_and_ref_qp(int id, void *p, void *data)
1344{
1345 struct qp_list *qp_listp = data;
1346 struct c4iw_qp *qp = p;
1347
1348 c4iw_qp_add_ref(&qp->ibqp);
1349 qp_listp->qps[qp_listp->idx++] = qp;
1350 return 0;
1351}
1352
1353static int count_qps(int id, void *p, void *data)
1354{
1355 unsigned *countp = data;
1356 (*countp)++;
1357 return 0;
1358}
1359
Steve Wise05eb2382014-03-14 21:52:08 +05301360static void deref_qps(struct qp_list *qp_list)
Vipul Pandya422eea02012-05-18 15:29:30 +05301361{
1362 int idx;
1363
Steve Wise05eb2382014-03-14 21:52:08 +05301364 for (idx = 0; idx < qp_list->idx; idx++)
1365 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
Vipul Pandya422eea02012-05-18 15:29:30 +05301366}
1367
1368static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1369{
1370 int idx;
1371 int ret;
1372
1373 for (idx = 0; idx < qp_list->idx; idx++) {
1374 struct c4iw_qp *qp = qp_list->qps[idx];
1375
Steve Wise05eb2382014-03-14 21:52:08 +05301376 spin_lock_irq(&qp->rhp->lock);
1377 spin_lock(&qp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301378 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1379 qp->wq.sq.qid,
1380 t4_sq_host_wq_pidx(&qp->wq),
1381 t4_sq_wq_size(&qp->wq));
1382 if (ret) {
Joe Perchesf4f01b52015-05-08 15:58:07 -07001383 pr_err(MOD "%s: Fatal error - "
Vipul Pandya422eea02012-05-18 15:29:30 +05301384 "DB overflow recovery failed - "
1385 "error syncing SQ qid %u\n",
1386 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
Steve Wise05eb2382014-03-14 21:52:08 +05301387 spin_unlock(&qp->lock);
1388 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301389 return;
1390 }
Steve Wise05eb2382014-03-14 21:52:08 +05301391 qp->wq.sq.wq_pidx_inc = 0;
Vipul Pandya422eea02012-05-18 15:29:30 +05301392
1393 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1394 qp->wq.rq.qid,
1395 t4_rq_host_wq_pidx(&qp->wq),
1396 t4_rq_wq_size(&qp->wq));
1397
1398 if (ret) {
Joe Perchesf4f01b52015-05-08 15:58:07 -07001399 pr_err(MOD "%s: Fatal error - "
Vipul Pandya422eea02012-05-18 15:29:30 +05301400 "DB overflow recovery failed - "
1401 "error syncing RQ qid %u\n",
1402 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
Steve Wise05eb2382014-03-14 21:52:08 +05301403 spin_unlock(&qp->lock);
1404 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301405 return;
1406 }
Steve Wise05eb2382014-03-14 21:52:08 +05301407 qp->wq.rq.wq_pidx_inc = 0;
1408 spin_unlock(&qp->lock);
1409 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301410
1411 /* Wait for the dbfifo to drain */
1412 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1413 set_current_state(TASK_UNINTERRUPTIBLE);
1414 schedule_timeout(usecs_to_jiffies(10));
1415 }
1416 }
1417}
1418
1419static void recover_queues(struct uld_ctx *ctx)
1420{
1421 int count = 0;
1422 struct qp_list qp_list;
1423 int ret;
1424
Vipul Pandya422eea02012-05-18 15:29:30 +05301425 /* slow everybody down */
1426 set_current_state(TASK_UNINTERRUPTIBLE);
1427 schedule_timeout(usecs_to_jiffies(1000));
1428
Vipul Pandya422eea02012-05-18 15:29:30 +05301429 /* flush the SGE contexts */
1430 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1431 if (ret) {
1432 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1433 pci_name(ctx->lldi.pdev));
Steve Wise05eb2382014-03-14 21:52:08 +05301434 return;
Vipul Pandya422eea02012-05-18 15:29:30 +05301435 }
1436
1437 /* Count active queues so we can build a list of queues to recover */
1438 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301439 WARN_ON(ctx->dev->db_state != STOPPED);
1440 ctx->dev->db_state = RECOVERY;
Vipul Pandya422eea02012-05-18 15:29:30 +05301441 idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1442
1443 qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
1444 if (!qp_list.qps) {
1445 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1446 pci_name(ctx->lldi.pdev));
1447 spin_unlock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301448 return;
Vipul Pandya422eea02012-05-18 15:29:30 +05301449 }
1450 qp_list.idx = 0;
1451
1452 /* add and ref each qp so it doesn't get freed */
1453 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1454
1455 spin_unlock_irq(&ctx->dev->lock);
1456
1457 /* now traverse the list in a safe context to recover the db state*/
1458 recover_lost_dbs(ctx, &qp_list);
1459
1460 /* we're almost done! deref the qps and clean up */
Steve Wise05eb2382014-03-14 21:52:08 +05301461 deref_qps(&qp_list);
Vipul Pandya422eea02012-05-18 15:29:30 +05301462 kfree(qp_list.qps);
1463
Vipul Pandya422eea02012-05-18 15:29:30 +05301464 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301465 WARN_ON(ctx->dev->db_state != RECOVERY);
1466 ctx->dev->db_state = STOPPED;
Vipul Pandya422eea02012-05-18 15:29:30 +05301467 spin_unlock_irq(&ctx->dev->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301468}
1469
Vipul Pandya2c974782012-05-18 15:29:28 +05301470static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1471{
1472 struct uld_ctx *ctx = handle;
1473
1474 switch (control) {
1475 case CXGB4_CONTROL_DB_FULL:
1476 stop_queues(ctx);
Vipul Pandya2c974782012-05-18 15:29:28 +05301477 ctx->dev->rdev.stats.db_full++;
Vipul Pandya2c974782012-05-18 15:29:28 +05301478 break;
1479 case CXGB4_CONTROL_DB_EMPTY:
1480 resume_queues(ctx);
1481 mutex_lock(&ctx->dev->rdev.stats.lock);
1482 ctx->dev->rdev.stats.db_empty++;
1483 mutex_unlock(&ctx->dev->rdev.stats.lock);
1484 break;
1485 case CXGB4_CONTROL_DB_DROP:
Vipul Pandya422eea02012-05-18 15:29:30 +05301486 recover_queues(ctx);
Vipul Pandya2c974782012-05-18 15:29:28 +05301487 mutex_lock(&ctx->dev->rdev.stats.lock);
1488 ctx->dev->rdev.stats.db_drop++;
1489 mutex_unlock(&ctx->dev->rdev.stats.lock);
1490 break;
1491 default:
1492 printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
1493 pci_name(ctx->lldi.pdev), control);
1494 break;
1495 }
1496 return 0;
1497}
1498
Steve Wisecfdda9d2010-04-21 15:30:06 -07001499static struct cxgb4_uld_info c4iw_uld_info = {
1500 .name = DRV_NAME,
1501 .add = c4iw_uld_add,
1502 .rx_handler = c4iw_uld_rx_handler,
1503 .state_change = c4iw_uld_state_change,
Vipul Pandya2c974782012-05-18 15:29:28 +05301504 .control = c4iw_uld_control,
Steve Wisecfdda9d2010-04-21 15:30:06 -07001505};
1506
1507static int __init c4iw_init_module(void)
1508{
1509 int err;
1510
1511 err = c4iw_cm_init();
1512 if (err)
1513 return err;
1514
1515 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1516 if (!c4iw_debugfs_root)
1517 printk(KERN_WARNING MOD
1518 "could not create debugfs entry, continuing\n");
1519
Steve Wise9eccfe12014-03-26 17:08:09 -05001520 if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS,
1521 c4iw_nl_cb_table))
1522 pr_err("%s[%u]: Failed to add netlink callback\n"
1523 , __func__, __LINE__);
1524
Steve Wise46c13762014-06-20 14:26:25 -05001525 err = iwpm_init(RDMA_NL_C4IW);
1526 if (err) {
1527 pr_err("port mapper initialization failed with %d\n", err);
1528 ibnl_remove_client(RDMA_NL_C4IW);
1529 c4iw_cm_term();
1530 debugfs_remove_recursive(c4iw_debugfs_root);
1531 return err;
1532 }
1533
Steve Wisecfdda9d2010-04-21 15:30:06 -07001534 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1535
1536 return 0;
1537}
1538
1539static void __exit c4iw_exit_module(void)
1540{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001541 struct uld_ctx *ctx, *tmp;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001542
Steve Wisecfdda9d2010-04-21 15:30:06 -07001543 mutex_lock(&dev_mutex);
Steve Wise2f25e9a2011-05-09 22:06:23 -07001544 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1545 if (ctx->dev)
1546 c4iw_remove(ctx);
1547 kfree(ctx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001548 }
1549 mutex_unlock(&dev_mutex);
Steve Wisefd388ce2010-05-20 16:57:27 -05001550 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
Steve Wise46c13762014-06-20 14:26:25 -05001551 iwpm_exit(RDMA_NL_C4IW);
Steve Wise9eccfe12014-03-26 17:08:09 -05001552 ibnl_remove_client(RDMA_NL_C4IW);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001553 c4iw_cm_term();
1554 debugfs_remove_recursive(c4iw_debugfs_root);
1555}
1556
1557module_init(c4iw_init_module);
1558module_exit(c4iw_exit_module);