blob: 1ffbd038c0aec3dee89fe97086b19921f981b308 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/debugfs.h>
Vipul Pandyae5725682012-05-21 17:31:13 +053035#include <linux/vmalloc.h>
Hariprasad Shenaida388972014-07-17 22:31:03 +053036#include <linux/math64.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070037
38#include <rdma/ib_verbs.h>
39
40#include "iw_cxgb4.h"
41
42#define DRV_VERSION "0.1"
43
44MODULE_AUTHOR("Steve Wise");
Vipul Pandyaf079af72013-03-14 05:08:58 +000045MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
Steve Wisecfdda9d2010-04-21 15:30:06 -070046MODULE_LICENSE("Dual BSD/GPL");
47MODULE_VERSION(DRV_VERSION);
48
Vipul Pandya80ccdd62013-03-14 05:09:00 +000049static int allow_db_fc_on_t5;
50module_param(allow_db_fc_on_t5, int, 0644);
51MODULE_PARM_DESC(allow_db_fc_on_t5,
52 "Allow DB Flow Control on T5 (default = 0)");
53
54static int allow_db_coalescing_on_t5;
55module_param(allow_db_coalescing_on_t5, int, 0644);
56MODULE_PARM_DESC(allow_db_coalescing_on_t5,
57 "Allow DB Coalescing on T5 (default = 0)");
58
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +053059int c4iw_wr_log = 0;
60module_param(c4iw_wr_log, int, 0444);
61MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
62
Steve Wise65d4c012014-08-29 11:19:29 -050063static int c4iw_wr_log_size_order = 12;
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +053064module_param(c4iw_wr_log_size_order, int, 0444);
65MODULE_PARM_DESC(c4iw_wr_log_size_order,
66 "Number of entries (log2) in the work request timing log.");
67
Vipul Pandya2c974782012-05-18 15:29:28 +053068struct uld_ctx {
69 struct list_head entry;
70 struct cxgb4_lld_info lldi;
71 struct c4iw_dev *dev;
72};
73
Steve Wise2f25e9a2011-05-09 22:06:23 -070074static LIST_HEAD(uld_ctx_list);
Steve Wisecfdda9d2010-04-21 15:30:06 -070075static DEFINE_MUTEX(dev_mutex);
76
Steve Wise05eb2382014-03-14 21:52:08 +053077#define DB_FC_RESUME_SIZE 64
78#define DB_FC_RESUME_DELAY 1
79#define DB_FC_DRAIN_THRESH 0
80
Steve Wisecfdda9d2010-04-21 15:30:06 -070081static struct dentry *c4iw_debugfs_root;
82
Steve Wise9e8d1fa32010-09-10 11:15:20 -050083struct c4iw_debugfs_data {
Steve Wisecfdda9d2010-04-21 15:30:06 -070084 struct c4iw_dev *devp;
85 char *buf;
86 int bufsize;
87 int pos;
88};
89
Steve Wise9eccfe12014-03-26 17:08:09 -050090/* registered cxgb4 netlink callbacks */
91static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
92 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
93 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
94 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
95 [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
Steve Wise5b6b8fe2015-04-21 16:28:41 -040096 [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
Steve Wise9eccfe12014-03-26 17:08:09 -050097 [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
98 [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
99};
100
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500101static int count_idrs(int id, void *p, void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700102{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700103 int *countp = data;
104
Steve Wisecfdda9d2010-04-21 15:30:06 -0700105 *countp = *countp + 1;
106 return 0;
107}
108
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500109static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
110 loff_t *ppos)
111{
112 struct c4iw_debugfs_data *d = file->private_data;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500113
Steve Wise31609772010-09-29 18:21:33 +0000114 return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500115}
116
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530117void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
118{
119 struct wr_log_entry le;
120 int idx;
121
122 if (!wq->rdev->wr_log)
123 return;
124
125 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
126 (wq->rdev->wr_log_size - 1);
127 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
128 getnstimeofday(&le.poll_host_ts);
129 le.valid = 1;
130 le.cqe_sge_ts = CQE_TS(cqe);
131 if (SQ_TYPE(cqe)) {
132 le.qid = wq->sq.qid;
133 le.opcode = CQE_OPCODE(cqe);
134 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
135 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
136 le.wr_id = CQE_WRID_SQ_IDX(cqe);
137 } else {
138 le.qid = wq->rq.qid;
139 le.opcode = FW_RI_RECEIVE;
140 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
141 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
142 le.wr_id = CQE_WRID_MSN(cqe);
143 }
144 wq->rdev->wr_log[idx] = le;
145}
146
147static int wr_log_show(struct seq_file *seq, void *v)
148{
149 struct c4iw_dev *dev = seq->private;
150 struct timespec prev_ts = {0, 0};
151 struct wr_log_entry *lep;
152 int prev_ts_set = 0;
153 int idx, end;
154
Hariprasad S6198dd82015-04-22 01:44:59 +0530155#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530156
157 idx = atomic_read(&dev->rdev.wr_log_idx) &
158 (dev->rdev.wr_log_size - 1);
159 end = idx - 1;
160 if (end < 0)
161 end = dev->rdev.wr_log_size - 1;
162 lep = &dev->rdev.wr_log[idx];
163 while (idx != end) {
164 if (lep->valid) {
165 if (!prev_ts_set) {
166 prev_ts_set = 1;
167 prev_ts = lep->poll_host_ts;
168 }
169 seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
170 "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
171 "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
172 "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
173 "cqe_poll_delta_ns %llu\n",
174 idx,
175 timespec_sub(lep->poll_host_ts,
176 prev_ts).tv_sec,
177 timespec_sub(lep->poll_host_ts,
178 prev_ts).tv_nsec,
179 lep->qid, lep->opcode,
180 lep->opcode == FW_RI_RECEIVE ?
181 "msn" : "wrid",
182 lep->wr_id,
183 timespec_sub(lep->poll_host_ts,
184 lep->post_host_ts).tv_sec,
185 timespec_sub(lep->poll_host_ts,
186 lep->post_host_ts).tv_nsec,
187 lep->post_sge_ts, lep->cqe_sge_ts,
188 lep->poll_sge_ts,
189 ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
190 ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
191 prev_ts = lep->poll_host_ts;
192 }
193 idx++;
194 if (idx > (dev->rdev.wr_log_size - 1))
195 idx = 0;
196 lep = &dev->rdev.wr_log[idx];
197 }
198#undef ts2ns
199 return 0;
200}
201
202static int wr_log_open(struct inode *inode, struct file *file)
203{
204 return single_open(file, wr_log_show, inode->i_private);
205}
206
207static ssize_t wr_log_clear(struct file *file, const char __user *buf,
208 size_t count, loff_t *pos)
209{
210 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
211 int i;
212
213 if (dev->rdev.wr_log)
214 for (i = 0; i < dev->rdev.wr_log_size; i++)
215 dev->rdev.wr_log[i].valid = 0;
216 return count;
217}
218
219static const struct file_operations wr_log_debugfs_fops = {
220 .owner = THIS_MODULE,
221 .open = wr_log_open,
222 .release = single_release,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .write = wr_log_clear,
226};
227
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500228static int dump_qp(int id, void *p, void *data)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700229{
230 struct c4iw_qp *qp = p;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500231 struct c4iw_debugfs_data *qpd = data;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700232 int space;
233 int cc;
234
235 if (id != qp->wq.sq.qid)
236 return 0;
237
238 space = qpd->bufsize - qpd->pos - 1;
239 if (space == 0)
240 return 1;
241
Vipul Pandya830662f2013-07-04 16:10:47 +0530242 if (qp->ep) {
243 if (qp->ep->com.local_addr.ss_family == AF_INET) {
244 struct sockaddr_in *lsin = (struct sockaddr_in *)
245 &qp->ep->com.local_addr;
246 struct sockaddr_in *rsin = (struct sockaddr_in *)
247 &qp->ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500248 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
249 &qp->ep->com.mapped_local_addr;
250 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
251 &qp->ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530252
253 cc = snprintf(qpd->buf + qpd->pos, space,
254 "rc qp sq id %u rq id %u state %u "
255 "onchip %u ep tid %u state %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500256 "%pI4:%u/%u->%pI4:%u/%u\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530257 qp->wq.sq.qid, qp->wq.rq.qid,
258 (int)qp->attr.state,
259 qp->wq.sq.flags & T4_SQ_ONCHIP,
260 qp->ep->hwtid, (int)qp->ep->com.state,
261 &lsin->sin_addr, ntohs(lsin->sin_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500262 ntohs(mapped_lsin->sin_port),
263 &rsin->sin_addr, ntohs(rsin->sin_port),
264 ntohs(mapped_rsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530265 } else {
266 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
267 &qp->ep->com.local_addr;
268 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
269 &qp->ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500270 struct sockaddr_in6 *mapped_lsin6 =
271 (struct sockaddr_in6 *)
272 &qp->ep->com.mapped_local_addr;
273 struct sockaddr_in6 *mapped_rsin6 =
274 (struct sockaddr_in6 *)
275 &qp->ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530276
277 cc = snprintf(qpd->buf + qpd->pos, space,
278 "rc qp sq id %u rq id %u state %u "
279 "onchip %u ep tid %u state %u "
Steve Wise9eccfe12014-03-26 17:08:09 -0500280 "%pI6:%u/%u->%pI6:%u/%u\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530281 qp->wq.sq.qid, qp->wq.rq.qid,
282 (int)qp->attr.state,
283 qp->wq.sq.flags & T4_SQ_ONCHIP,
284 qp->ep->hwtid, (int)qp->ep->com.state,
285 &lsin6->sin6_addr,
286 ntohs(lsin6->sin6_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500287 ntohs(mapped_lsin6->sin6_port),
Vipul Pandya830662f2013-07-04 16:10:47 +0530288 &rsin6->sin6_addr,
Steve Wise9eccfe12014-03-26 17:08:09 -0500289 ntohs(rsin6->sin6_port),
290 ntohs(mapped_rsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530291 }
292 } else
Steve Wisedb5d0402011-03-11 22:29:50 +0000293 cc = snprintf(qpd->buf + qpd->pos, space,
294 "qp sq id %u rq id %u state %u onchip %u\n",
295 qp->wq.sq.qid, qp->wq.rq.qid,
296 (int)qp->attr.state,
297 qp->wq.sq.flags & T4_SQ_ONCHIP);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700298 if (cc < space)
299 qpd->pos += cc;
300 return 0;
301}
302
303static int qp_release(struct inode *inode, struct file *file)
304{
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500305 struct c4iw_debugfs_data *qpd = file->private_data;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700306 if (!qpd) {
307 printk(KERN_INFO "%s null qpd?\n", __func__);
308 return 0;
309 }
Vipul Pandyad716a2a2012-05-18 15:29:31 +0530310 vfree(qpd->buf);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700311 kfree(qpd);
312 return 0;
313}
314
315static int qp_open(struct inode *inode, struct file *file)
316{
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500317 struct c4iw_debugfs_data *qpd;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700318 int ret = 0;
319 int count = 1;
320
321 qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
322 if (!qpd) {
323 ret = -ENOMEM;
324 goto out;
325 }
326 qpd->devp = inode->i_private;
327 qpd->pos = 0;
328
329 spin_lock_irq(&qpd->devp->lock);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500330 idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700331 spin_unlock_irq(&qpd->devp->lock);
332
333 qpd->bufsize = count * 128;
Vipul Pandyad716a2a2012-05-18 15:29:31 +0530334 qpd->buf = vmalloc(qpd->bufsize);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700335 if (!qpd->buf) {
336 ret = -ENOMEM;
337 goto err1;
338 }
339
340 spin_lock_irq(&qpd->devp->lock);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500341 idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700342 spin_unlock_irq(&qpd->devp->lock);
343
344 qpd->buf[qpd->pos++] = 0;
345 file->private_data = qpd;
346 goto out;
347err1:
348 kfree(qpd);
349out:
350 return ret;
351}
352
Steve Wisecfdda9d2010-04-21 15:30:06 -0700353static const struct file_operations qp_debugfs_fops = {
354 .owner = THIS_MODULE,
355 .open = qp_open,
356 .release = qp_release,
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500357 .read = debugfs_read,
Steve Wise8bbac892010-09-29 14:11:12 +0000358 .llseek = default_llseek,
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500359};
360
361static int dump_stag(int id, void *p, void *data)
362{
363 struct c4iw_debugfs_data *stagd = data;
364 int space;
365 int cc;
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530366 struct fw_ri_tpte tpte;
367 int ret;
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500368
369 space = stagd->bufsize - stagd->pos - 1;
370 if (space == 0)
371 return 1;
372
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530373 ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
374 (__be32 *)&tpte);
375 if (ret) {
376 dev_err(&stagd->devp->rdev.lldi.pdev->dev,
377 "%s cxgb4_read_tpte err %d\n", __func__, ret);
378 return ret;
379 }
380 cc = snprintf(stagd->buf + stagd->pos, space,
381 "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
382 "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
383 (u32)id<<8,
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530384 FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid)),
385 FW_RI_TPTE_STAGKEY_G(ntohl(tpte.valid_to_pdid)),
386 FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid)),
387 FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid)),
388 FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid)),
389 FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid)),
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530390 ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
391 ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500392 if (cc < space)
393 stagd->pos += cc;
394 return 0;
395}
396
397static int stag_release(struct inode *inode, struct file *file)
398{
399 struct c4iw_debugfs_data *stagd = file->private_data;
400 if (!stagd) {
401 printk(KERN_INFO "%s null stagd?\n", __func__);
402 return 0;
403 }
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530404 vfree(stagd->buf);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500405 kfree(stagd);
406 return 0;
407}
408
409static int stag_open(struct inode *inode, struct file *file)
410{
411 struct c4iw_debugfs_data *stagd;
412 int ret = 0;
413 int count = 1;
414
415 stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
416 if (!stagd) {
417 ret = -ENOMEM;
418 goto out;
419 }
420 stagd->devp = inode->i_private;
421 stagd->pos = 0;
422
423 spin_lock_irq(&stagd->devp->lock);
424 idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
425 spin_unlock_irq(&stagd->devp->lock);
426
Hariprasad Shenai031cf472014-07-14 21:34:53 +0530427 stagd->bufsize = count * 256;
428 stagd->buf = vmalloc(stagd->bufsize);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500429 if (!stagd->buf) {
430 ret = -ENOMEM;
431 goto err1;
432 }
433
434 spin_lock_irq(&stagd->devp->lock);
435 idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
436 spin_unlock_irq(&stagd->devp->lock);
437
438 stagd->buf[stagd->pos++] = 0;
439 file->private_data = stagd;
440 goto out;
441err1:
442 kfree(stagd);
443out:
444 return ret;
445}
446
447static const struct file_operations stag_debugfs_fops = {
448 .owner = THIS_MODULE,
449 .open = stag_open,
450 .release = stag_release,
451 .read = debugfs_read,
Steve Wise8bbac892010-09-29 14:11:12 +0000452 .llseek = default_llseek,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700453};
454
Steve Wise05eb2382014-03-14 21:52:08 +0530455static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY", "STOPPED"};
Vipul Pandya422eea02012-05-18 15:29:30 +0530456
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530457static int stats_show(struct seq_file *seq, void *v)
458{
459 struct c4iw_dev *dev = seq->private;
460
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530461 seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
462 "Max", "Fail");
463 seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530464 dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530465 dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
466 seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530467 dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530468 dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
469 seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530470 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530471 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
472 seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530473 dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530474 dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
475 seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530476 dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530477 dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
478 seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530479 dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530480 dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
Vipul Pandya2c974782012-05-18 15:29:28 +0530481 seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
482 seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
483 seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
Steve Wise05eb2382014-03-14 21:52:08 +0530484 seq_printf(seq, " DB State: %s Transitions %llu FC Interruptions %llu\n",
Vipul Pandya422eea02012-05-18 15:29:30 +0530485 db_state_str[dev->db_state],
Steve Wise05eb2382014-03-14 21:52:08 +0530486 dev->rdev.stats.db_state_transitions,
487 dev->rdev.stats.db_fc_interruptions);
Vipul Pandya1cab7752012-12-10 09:30:55 +0000488 seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full);
Vipul Pandya793dad92012-12-10 09:30:56 +0000489 seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n",
490 dev->rdev.stats.act_ofld_conn_fails);
491 seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
492 dev->rdev.stats.pas_ofld_conn_fails);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +0530493 seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530494 return 0;
495}
496
497static int stats_open(struct inode *inode, struct file *file)
498{
499 return single_open(file, stats_show, inode->i_private);
500}
501
502static ssize_t stats_clear(struct file *file, const char __user *buf,
503 size_t count, loff_t *pos)
504{
505 struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
506
507 mutex_lock(&dev->rdev.stats.lock);
508 dev->rdev.stats.pd.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530509 dev->rdev.stats.pd.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530510 dev->rdev.stats.qid.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530511 dev->rdev.stats.qid.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530512 dev->rdev.stats.stag.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530513 dev->rdev.stats.stag.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530514 dev->rdev.stats.pbl.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530515 dev->rdev.stats.pbl.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530516 dev->rdev.stats.rqt.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530517 dev->rdev.stats.rqt.fail = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530518 dev->rdev.stats.ocqp.max = 0;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530519 dev->rdev.stats.ocqp.fail = 0;
Vipul Pandya2c974782012-05-18 15:29:28 +0530520 dev->rdev.stats.db_full = 0;
521 dev->rdev.stats.db_empty = 0;
522 dev->rdev.stats.db_drop = 0;
Vipul Pandya422eea02012-05-18 15:29:30 +0530523 dev->rdev.stats.db_state_transitions = 0;
Vipul Pandya793dad92012-12-10 09:30:56 +0000524 dev->rdev.stats.tcam_full = 0;
525 dev->rdev.stats.act_ofld_conn_fails = 0;
526 dev->rdev.stats.pas_ofld_conn_fails = 0;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530527 mutex_unlock(&dev->rdev.stats.lock);
528 return count;
529}
530
531static const struct file_operations stats_debugfs_fops = {
532 .owner = THIS_MODULE,
533 .open = stats_open,
534 .release = single_release,
535 .read = seq_read,
536 .llseek = seq_lseek,
537 .write = stats_clear,
538};
539
Vipul Pandya793dad92012-12-10 09:30:56 +0000540static int dump_ep(int id, void *p, void *data)
541{
542 struct c4iw_ep *ep = p;
543 struct c4iw_debugfs_data *epd = data;
544 int space;
545 int cc;
546
547 space = epd->bufsize - epd->pos - 1;
548 if (space == 0)
549 return 1;
550
Vipul Pandya830662f2013-07-04 16:10:47 +0530551 if (ep->com.local_addr.ss_family == AF_INET) {
552 struct sockaddr_in *lsin = (struct sockaddr_in *)
553 &ep->com.local_addr;
554 struct sockaddr_in *rsin = (struct sockaddr_in *)
555 &ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500556 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
557 &ep->com.mapped_local_addr;
558 struct sockaddr_in *mapped_rsin = (struct sockaddr_in *)
559 &ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530560
561 cc = snprintf(epd->buf + epd->pos, space,
562 "ep %p cm_id %p qp %p state %d flags 0x%lx "
563 "history 0x%lx hwtid %d atid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500564 "%pI4:%d/%d <-> %pI4:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530565 ep, ep->com.cm_id, ep->com.qp,
566 (int)ep->com.state, ep->com.flags,
567 ep->com.history, ep->hwtid, ep->atid,
568 &lsin->sin_addr, ntohs(lsin->sin_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500569 ntohs(mapped_lsin->sin_port),
570 &rsin->sin_addr, ntohs(rsin->sin_port),
571 ntohs(mapped_rsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530572 } else {
573 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
574 &ep->com.local_addr;
575 struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
576 &ep->com.remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500577 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
578 &ep->com.mapped_local_addr;
579 struct sockaddr_in6 *mapped_rsin6 = (struct sockaddr_in6 *)
580 &ep->com.mapped_remote_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530581
582 cc = snprintf(epd->buf + epd->pos, space,
583 "ep %p cm_id %p qp %p state %d flags 0x%lx "
584 "history 0x%lx hwtid %d atid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500585 "%pI6:%d/%d <-> %pI6:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530586 ep, ep->com.cm_id, ep->com.qp,
587 (int)ep->com.state, ep->com.flags,
588 ep->com.history, ep->hwtid, ep->atid,
589 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
Steve Wise9eccfe12014-03-26 17:08:09 -0500590 ntohs(mapped_lsin6->sin6_port),
591 &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
592 ntohs(mapped_rsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530593 }
Vipul Pandya793dad92012-12-10 09:30:56 +0000594 if (cc < space)
595 epd->pos += cc;
596 return 0;
597}
598
599static int dump_listen_ep(int id, void *p, void *data)
600{
601 struct c4iw_listen_ep *ep = p;
602 struct c4iw_debugfs_data *epd = data;
603 int space;
604 int cc;
605
606 space = epd->bufsize - epd->pos - 1;
607 if (space == 0)
608 return 1;
609
Vipul Pandya830662f2013-07-04 16:10:47 +0530610 if (ep->com.local_addr.ss_family == AF_INET) {
611 struct sockaddr_in *lsin = (struct sockaddr_in *)
612 &ep->com.local_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500613 struct sockaddr_in *mapped_lsin = (struct sockaddr_in *)
614 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530615
616 cc = snprintf(epd->buf + epd->pos, space,
617 "ep %p cm_id %p state %d flags 0x%lx stid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500618 "backlog %d %pI4:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530619 ep, ep->com.cm_id, (int)ep->com.state,
620 ep->com.flags, ep->stid, ep->backlog,
Steve Wise9eccfe12014-03-26 17:08:09 -0500621 &lsin->sin_addr, ntohs(lsin->sin_port),
622 ntohs(mapped_lsin->sin_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530623 } else {
624 struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
625 &ep->com.local_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500626 struct sockaddr_in6 *mapped_lsin6 = (struct sockaddr_in6 *)
627 &ep->com.mapped_local_addr;
Vipul Pandya830662f2013-07-04 16:10:47 +0530628
629 cc = snprintf(epd->buf + epd->pos, space,
630 "ep %p cm_id %p state %d flags 0x%lx stid %d "
Steve Wise9eccfe12014-03-26 17:08:09 -0500631 "backlog %d %pI6:%d/%d\n",
Vipul Pandya830662f2013-07-04 16:10:47 +0530632 ep, ep->com.cm_id, (int)ep->com.state,
633 ep->com.flags, ep->stid, ep->backlog,
Steve Wise9eccfe12014-03-26 17:08:09 -0500634 &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
635 ntohs(mapped_lsin6->sin6_port));
Vipul Pandya830662f2013-07-04 16:10:47 +0530636 }
Vipul Pandya793dad92012-12-10 09:30:56 +0000637 if (cc < space)
638 epd->pos += cc;
639 return 0;
640}
641
642static int ep_release(struct inode *inode, struct file *file)
643{
644 struct c4iw_debugfs_data *epd = file->private_data;
645 if (!epd) {
646 pr_info("%s null qpd?\n", __func__);
647 return 0;
648 }
649 vfree(epd->buf);
650 kfree(epd);
651 return 0;
652}
653
654static int ep_open(struct inode *inode, struct file *file)
655{
656 struct c4iw_debugfs_data *epd;
657 int ret = 0;
658 int count = 1;
659
660 epd = kmalloc(sizeof(*epd), GFP_KERNEL);
661 if (!epd) {
662 ret = -ENOMEM;
663 goto out;
664 }
665 epd->devp = inode->i_private;
666 epd->pos = 0;
667
668 spin_lock_irq(&epd->devp->lock);
669 idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count);
670 idr_for_each(&epd->devp->atid_idr, count_idrs, &count);
671 idr_for_each(&epd->devp->stid_idr, count_idrs, &count);
672 spin_unlock_irq(&epd->devp->lock);
673
Pramod Kumar63a71ba2014-11-21 09:36:35 -0600674 epd->bufsize = count * 240;
Vipul Pandya793dad92012-12-10 09:30:56 +0000675 epd->buf = vmalloc(epd->bufsize);
676 if (!epd->buf) {
677 ret = -ENOMEM;
678 goto err1;
679 }
680
681 spin_lock_irq(&epd->devp->lock);
682 idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd);
683 idr_for_each(&epd->devp->atid_idr, dump_ep, epd);
684 idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd);
685 spin_unlock_irq(&epd->devp->lock);
686
687 file->private_data = epd;
688 goto out;
689err1:
690 kfree(epd);
691out:
692 return ret;
693}
694
695static const struct file_operations ep_debugfs_fops = {
696 .owner = THIS_MODULE,
697 .open = ep_open,
698 .release = ep_release,
699 .read = debugfs_read,
700};
701
Steve Wisecfdda9d2010-04-21 15:30:06 -0700702static int setup_debugfs(struct c4iw_dev *devp)
703{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700704 if (!devp->debugfs_root)
705 return -1;
706
David Howellse59b4e92015-01-21 20:03:40 +0000707 debugfs_create_file_size("qps", S_IWUSR, devp->debugfs_root,
708 (void *)devp, &qp_debugfs_fops, 4096);
Steve Wise9e8d1fa32010-09-10 11:15:20 -0500709
David Howellse59b4e92015-01-21 20:03:40 +0000710 debugfs_create_file_size("stags", S_IWUSR, devp->debugfs_root,
711 (void *)devp, &stag_debugfs_fops, 4096);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530712
David Howellse59b4e92015-01-21 20:03:40 +0000713 debugfs_create_file_size("stats", S_IWUSR, devp->debugfs_root,
714 (void *)devp, &stats_debugfs_fops, 4096);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530715
David Howellse59b4e92015-01-21 20:03:40 +0000716 debugfs_create_file_size("eps", S_IWUSR, devp->debugfs_root,
717 (void *)devp, &ep_debugfs_fops, 4096);
Vipul Pandya793dad92012-12-10 09:30:56 +0000718
David Howellse59b4e92015-01-21 20:03:40 +0000719 if (c4iw_wr_log)
720 debugfs_create_file_size("wr_log", S_IWUSR, devp->debugfs_root,
721 (void *)devp, &wr_log_debugfs_fops, 4096);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700722 return 0;
723}
724
725void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
726 struct c4iw_dev_ucontext *uctx)
727{
728 struct list_head *pos, *nxt;
729 struct c4iw_qid_list *entry;
730
731 mutex_lock(&uctx->lock);
732 list_for_each_safe(pos, nxt, &uctx->qpids) {
733 entry = list_entry(pos, struct c4iw_qid_list, entry);
734 list_del_init(&entry->entry);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530735 if (!(entry->qid & rdev->qpmask)) {
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530736 c4iw_put_resource(&rdev->resource.qid_table,
737 entry->qid);
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530738 mutex_lock(&rdev->stats.lock);
739 rdev->stats.qid.cur -= rdev->qpmask + 1;
740 mutex_unlock(&rdev->stats.lock);
741 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700742 kfree(entry);
743 }
744
745 list_for_each_safe(pos, nxt, &uctx->qpids) {
746 entry = list_entry(pos, struct c4iw_qid_list, entry);
747 list_del_init(&entry->entry);
748 kfree(entry);
749 }
750 mutex_unlock(&uctx->lock);
751}
752
753void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
754 struct c4iw_dev_ucontext *uctx)
755{
756 INIT_LIST_HEAD(&uctx->qpids);
757 INIT_LIST_HEAD(&uctx->cqids);
758 mutex_init(&uctx->lock);
759}
760
761/* Caller takes care of locking if needed */
762static int c4iw_rdev_open(struct c4iw_rdev *rdev)
763{
764 int err;
765
766 c4iw_init_dev_ucontext(rdev, &rdev->uctx);
767
768 /*
Hariprasad S4a75a862015-04-22 01:45:01 +0530769 * This implementation assumes udb_density == ucq_density! Eventually
770 * we might need to support this but for now fail the open. Also the
771 * cqid and qpid range must match for now.
772 */
773 if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
774 pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
775 pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
776 rdev->lldi.ucq_density);
777 err = -EINVAL;
778 goto err1;
779 }
780 if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
781 rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
782 pr_err(MOD "%s: unsupported qp and cq id ranges "
783 "qp start %u size %u cq start %u size %u\n",
784 pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
785 rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
786 rdev->lldi.vr->cq.size);
787 err = -EINVAL;
788 goto err1;
789 }
790
791 /*
Steve Wisecfdda9d2010-04-21 15:30:06 -0700792 * qpshift is the number of bits to shift the qpid left in order
793 * to get the correct address of the doorbell for that qp.
794 */
795 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
796 rdev->qpmask = rdev->lldi.udb_density - 1;
797 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
798 rdev->cqmask = rdev->lldi.ucq_density - 1;
799 PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
Steve Wise93fb72e2010-06-23 15:46:55 +0000800 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
801 "qp qid start %u size %u cq qid start %u size %u\n",
Steve Wisecfdda9d2010-04-21 15:30:06 -0700802 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
803 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
804 rdev->lldi.vr->pbl.start,
805 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
Steve Wise93fb72e2010-06-23 15:46:55 +0000806 rdev->lldi.vr->rq.size,
807 rdev->lldi.vr->qp.start,
808 rdev->lldi.vr->qp.size,
809 rdev->lldi.vr->cq.start,
810 rdev->lldi.vr->cq.size);
Hariprasad S6198dd82015-04-22 01:44:59 +0530811 PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
Steve Wisecfdda9d2010-04-21 15:30:06 -0700812 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
813 (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
Hariprasad S6198dd82015-04-22 01:44:59 +0530814 (void *)pci_resource_start(rdev->lldi.pdev, 2),
Steve Wisecfdda9d2010-04-21 15:30:06 -0700815 rdev->lldi.db_reg,
816 rdev->lldi.gts_reg,
817 rdev->qpshift, rdev->qpmask,
818 rdev->cqshift, rdev->cqmask);
819
820 if (c4iw_num_stags(rdev) == 0) {
821 err = -EINVAL;
822 goto err1;
823 }
824
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530825 rdev->stats.pd.total = T4_MAX_NUM_PD;
826 rdev->stats.stag.total = rdev->lldi.vr->stag.size;
827 rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
828 rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
829 rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
830 rdev->stats.qid.total = rdev->lldi.vr->qp.size;
831
Steve Wisecfdda9d2010-04-21 15:30:06 -0700832 err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
833 if (err) {
834 printk(KERN_ERR MOD "error %d initializing resources\n", err);
835 goto err1;
836 }
837 err = c4iw_pblpool_create(rdev);
838 if (err) {
839 printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
840 goto err2;
841 }
842 err = c4iw_rqtpool_create(rdev);
843 if (err) {
844 printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
845 goto err3;
846 }
Steve Wisec6d7b262010-09-13 11:23:57 -0500847 err = c4iw_ocqp_pool_create(rdev);
848 if (err) {
849 printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
850 goto err4;
851 }
Steve Wise05eb2382014-03-14 21:52:08 +0530852 rdev->status_page = (struct t4_dev_status_page *)
853 __get_free_page(GFP_KERNEL);
854 if (!rdev->status_page) {
855 pr_err(MOD "error allocating status page\n");
856 goto err4;
857 }
David S. Miller8fd90bb2014-07-22 00:44:59 -0700858
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530859 if (c4iw_wr_log) {
860 rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
861 sizeof(*rdev->wr_log), GFP_KERNEL);
862 if (rdev->wr_log) {
863 rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
864 atomic_set(&rdev->wr_log_idx, 0);
865 } else {
866 pr_err(MOD "error allocating wr_log. Logging disabled\n");
867 }
868 }
David S. Miller8fd90bb2014-07-22 00:44:59 -0700869
Steve Wise6b54d542014-07-08 10:20:35 -0500870 rdev->status_page->db_off = 0;
David S. Miller8fd90bb2014-07-22 00:44:59 -0700871
Steve Wisecfdda9d2010-04-21 15:30:06 -0700872 return 0;
Steve Wisec6d7b262010-09-13 11:23:57 -0500873err4:
874 c4iw_rqtpool_destroy(rdev);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700875err3:
876 c4iw_pblpool_destroy(rdev);
877err2:
878 c4iw_destroy_resource(&rdev->resource);
879err1:
880 return err;
881}
882
883static void c4iw_rdev_close(struct c4iw_rdev *rdev)
884{
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +0530885 kfree(rdev->wr_log);
Steve Wise05eb2382014-03-14 21:52:08 +0530886 free_page((unsigned long)rdev->status_page);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700887 c4iw_pblpool_destroy(rdev);
888 c4iw_rqtpool_destroy(rdev);
889 c4iw_destroy_resource(&rdev->resource);
890}
891
Steve Wise9efe10a2011-10-06 09:32:44 -0700892static void c4iw_dealloc(struct uld_ctx *ctx)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700893{
Steve Wise2f25e9a2011-05-09 22:06:23 -0700894 c4iw_rdev_close(&ctx->dev->rdev);
895 idr_destroy(&ctx->dev->cqidr);
896 idr_destroy(&ctx->dev->qpidr);
897 idr_destroy(&ctx->dev->mmidr);
Vipul Pandya793dad92012-12-10 09:30:56 +0000898 idr_destroy(&ctx->dev->hwtid_idr);
899 idr_destroy(&ctx->dev->stid_idr);
900 idr_destroy(&ctx->dev->atid_idr);
Steve Wisefa658a92014-04-09 09:38:25 -0500901 if (ctx->dev->rdev.bar2_kva)
902 iounmap(ctx->dev->rdev.bar2_kva);
903 if (ctx->dev->rdev.oc_mw_kva)
904 iounmap(ctx->dev->rdev.oc_mw_kva);
Steve Wise2f25e9a2011-05-09 22:06:23 -0700905 ib_dealloc_device(&ctx->dev->ibdev);
906 ctx->dev = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700907}
908
Steve Wise9efe10a2011-10-06 09:32:44 -0700909static void c4iw_remove(struct uld_ctx *ctx)
910{
911 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
912 c4iw_unregister_device(ctx->dev);
913 c4iw_dealloc(ctx);
914}
915
916static int rdma_supported(const struct cxgb4_lld_info *infop)
917{
918 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
919 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
Vipul Pandyaf079af72013-03-14 05:08:58 +0000920 infop->vr->cq.size > 0;
Steve Wise9efe10a2011-10-06 09:32:44 -0700921}
922
Steve Wisecfdda9d2010-04-21 15:30:06 -0700923static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
924{
925 struct c4iw_dev *devp;
926 int ret;
927
Steve Wise9efe10a2011-10-06 09:32:44 -0700928 if (!rdma_supported(infop)) {
929 printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
930 pci_name(infop->pdev));
931 return ERR_PTR(-ENOSYS);
932 }
Vipul Pandyaf079af72013-03-14 05:08:58 +0000933 if (!ocqp_supported(infop))
934 pr_info("%s: On-Chip Queues not supported on this device.\n",
935 pci_name(infop->pdev));
Vipul Pandya80ccdd62013-03-14 05:09:00 +0000936
Steve Wisecfdda9d2010-04-21 15:30:06 -0700937 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
938 if (!devp) {
939 printk(KERN_ERR MOD "Cannot allocate ib device\n");
Steve Wisebbe9a0a2011-05-09 22:06:22 -0700940 return ERR_PTR(-ENOMEM);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700941 }
942 devp->rdev.lldi = *infop;
943
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530944 /* init various hw-queue params based on lld info */
945 PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
946 __func__, devp->rdev.lldi.sge_ingpadboundary,
947 devp->rdev.lldi.sge_egrstatuspagesize);
948
949 devp->rdev.hw_queue.t4_eq_status_entries =
950 devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530951 devp->rdev.hw_queue.t4_max_eq_size = 65520;
952 devp->rdev.hw_queue.t4_max_iq_size = 65520;
953 devp->rdev.hw_queue.t4_max_rq_size = 8192 -
954 devp->rdev.hw_queue.t4_eq_status_entries - 1;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530955 devp->rdev.hw_queue.t4_max_sq_size =
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530956 devp->rdev.hw_queue.t4_max_eq_size -
957 devp->rdev.hw_queue.t4_eq_status_entries - 1;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530958 devp->rdev.hw_queue.t4_max_qp_depth =
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530959 devp->rdev.hw_queue.t4_max_rq_size;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530960 devp->rdev.hw_queue.t4_max_cq_depth =
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +0530961 devp->rdev.hw_queue.t4_max_iq_size - 2;
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530962 devp->rdev.hw_queue.t4_stat_len =
963 devp->rdev.lldi.sge_egrstatuspagesize;
964
Steve Wisefa658a92014-04-09 09:38:25 -0500965 /*
966 * For T5 devices, we map all of BAR2 with WC.
967 * For T4 devices with onchip qp mem, we map only that part
968 * of BAR2 with WC.
969 */
970 devp->rdev.bar2_pa = pci_resource_start(devp->rdev.lldi.pdev, 2);
971 if (is_t5(devp->rdev.lldi.adapter_type)) {
972 devp->rdev.bar2_kva = ioremap_wc(devp->rdev.bar2_pa,
973 pci_resource_len(devp->rdev.lldi.pdev, 2));
974 if (!devp->rdev.bar2_kva) {
975 pr_err(MOD "Unable to ioremap BAR2\n");
Christoph Jaeger65b302a2014-04-21 17:02:42 +0200976 ib_dealloc_device(&devp->ibdev);
Steve Wisefa658a92014-04-09 09:38:25 -0500977 return ERR_PTR(-EINVAL);
978 }
979 } else if (ocqp_supported(infop)) {
980 devp->rdev.oc_mw_pa =
981 pci_resource_start(devp->rdev.lldi.pdev, 2) +
982 pci_resource_len(devp->rdev.lldi.pdev, 2) -
983 roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size);
984 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
985 devp->rdev.lldi.vr->ocq.size);
986 if (!devp->rdev.oc_mw_kva) {
987 pr_err(MOD "Unable to ioremap onchip mem\n");
Christoph Jaeger65b302a2014-04-21 17:02:42 +0200988 ib_dealloc_device(&devp->ibdev);
Steve Wisefa658a92014-04-09 09:38:25 -0500989 return ERR_PTR(-EINVAL);
990 }
991 }
Steve Wisec6d7b262010-09-13 11:23:57 -0500992
Steve Wise2f25e9a2011-05-09 22:06:23 -0700993 PDBG(KERN_INFO MOD "ocq memory: "
Steve Wisec6d7b262010-09-13 11:23:57 -0500994 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
995 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
996 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
997
Steve Wisecfdda9d2010-04-21 15:30:06 -0700998 ret = c4iw_rdev_open(&devp->rdev);
999 if (ret) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001000 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
1001 ib_dealloc_device(&devp->ibdev);
Steve Wisebbe9a0a2011-05-09 22:06:22 -07001002 return ERR_PTR(ret);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001003 }
1004
1005 idr_init(&devp->cqidr);
1006 idr_init(&devp->qpidr);
1007 idr_init(&devp->mmidr);
Vipul Pandya793dad92012-12-10 09:30:56 +00001008 idr_init(&devp->hwtid_idr);
1009 idr_init(&devp->stid_idr);
1010 idr_init(&devp->atid_idr);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001011 spin_lock_init(&devp->lock);
Vipul Pandya8d81ef32012-05-18 15:29:27 +05301012 mutex_init(&devp->rdev.stats.lock);
Vipul Pandya2c974782012-05-18 15:29:28 +05301013 mutex_init(&devp->db_mutex);
Steve Wise05eb2382014-03-14 21:52:08 +05301014 INIT_LIST_HEAD(&devp->db_fc_list);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301015 devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001016
Steve Wisecfdda9d2010-04-21 15:30:06 -07001017 if (c4iw_debugfs_root) {
1018 devp->debugfs_root = debugfs_create_dir(
1019 pci_name(devp->rdev.lldi.pdev),
1020 c4iw_debugfs_root);
1021 setup_debugfs(devp);
1022 }
Steve Wise9eccfe12014-03-26 17:08:09 -05001023
Steve Wise9eccfe12014-03-26 17:08:09 -05001024
Steve Wisecfdda9d2010-04-21 15:30:06 -07001025 return devp;
1026}
1027
1028static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
1029{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001030 struct uld_ctx *ctx;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001031 static int vers_printed;
1032 int i;
1033
1034 if (!vers_printed++)
Vipul Pandyaf079af72013-03-14 05:08:58 +00001035 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
1036 DRV_VERSION);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001037
Steve Wise2f25e9a2011-05-09 22:06:23 -07001038 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
1039 if (!ctx) {
1040 ctx = ERR_PTR(-ENOMEM);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001041 goto out;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001042 }
1043 ctx->lldi = *infop;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001044
1045 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001046 __func__, pci_name(ctx->lldi.pdev),
1047 ctx->lldi.nchan, ctx->lldi.nrxq,
1048 ctx->lldi.ntxq, ctx->lldi.nports);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001049
Steve Wise2f25e9a2011-05-09 22:06:23 -07001050 mutex_lock(&dev_mutex);
1051 list_add_tail(&ctx->entry, &uld_ctx_list);
1052 mutex_unlock(&dev_mutex);
1053
1054 for (i = 0; i < ctx->lldi.nrxq; i++)
1055 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001056out:
Steve Wise2f25e9a2011-05-09 22:06:23 -07001057 return ctx;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001058}
1059
Vipul Pandya1cab7752012-12-10 09:30:55 +00001060static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
1061 const __be64 *rsp,
1062 u32 pktshift)
1063{
1064 struct sk_buff *skb;
1065
1066 /*
1067 * Allocate space for cpl_pass_accept_req which will be synthesized by
1068 * driver. Once the driver synthesizes the request the skb will go
1069 * through the regular cpl_pass_accept_req processing.
1070 * The math here assumes sizeof cpl_pass_accept_req >= sizeof
1071 * cpl_rx_pkt.
1072 */
1073 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1074 sizeof(struct rss_header) - pktshift, GFP_ATOMIC);
1075 if (unlikely(!skb))
1076 return NULL;
1077
1078 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
1079 sizeof(struct rss_header) - pktshift);
1080
1081 /*
1082 * This skb will contain:
1083 * rss_header from the rspq descriptor (1 flit)
1084 * cpl_rx_pkt struct from the rspq descriptor (2 flits)
1085 * space for the difference between the size of an
1086 * rx_pkt and pass_accept_req cpl (1 flit)
1087 * the packet data from the gl
1088 */
1089 skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) +
1090 sizeof(struct rss_header));
1091 skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) +
1092 sizeof(struct cpl_pass_accept_req),
1093 gl->va + pktshift,
1094 gl->tot_len - pktshift);
1095 return skb;
1096}
1097
1098static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl,
1099 const __be64 *rsp)
1100{
1101 unsigned int opcode = *(u8 *)rsp;
1102 struct sk_buff *skb;
1103
1104 if (opcode != CPL_RX_PKT)
1105 goto out;
1106
1107 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift);
1108 if (skb == NULL)
1109 goto out;
1110
1111 if (c4iw_handlers[opcode] == NULL) {
1112 pr_info("%s no handler opcode 0x%x...\n", __func__,
1113 opcode);
1114 kfree_skb(skb);
1115 goto out;
1116 }
1117 c4iw_handlers[opcode](dev, skb);
1118 return 1;
1119out:
1120 return 0;
1121}
1122
Steve Wisecfdda9d2010-04-21 15:30:06 -07001123static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
1124 const struct pkt_gl *gl)
1125{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001126 struct uld_ctx *ctx = handle;
1127 struct c4iw_dev *dev = ctx->dev;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001128 struct sk_buff *skb;
Vipul Pandya1cab7752012-12-10 09:30:55 +00001129 u8 opcode;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001130
1131 if (gl == NULL) {
1132 /* omit RSS and rsp_ctrl at end of descriptor */
1133 unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
1134
1135 skb = alloc_skb(256, GFP_ATOMIC);
1136 if (!skb)
1137 goto nomem;
1138 __skb_put(skb, len);
1139 skb_copy_to_linear_data(skb, &rsp[1], len);
1140 } else if (gl == CXGB4_MSG_AN) {
1141 const struct rsp_ctrl *rc = (void *)rsp;
1142
1143 u32 qid = be32_to_cpu(rc->pldbuflen_qid);
1144 c4iw_ev_handler(dev, qid);
1145 return 0;
Vipul Pandya1cab7752012-12-10 09:30:55 +00001146 } else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) {
1147 if (recv_rx_pkt(dev, gl, rsp))
1148 return 0;
1149
1150 pr_info("%s: unexpected FL contents at %p, " \
1151 "RSS %#llx, FL %#llx, len %u\n",
1152 pci_name(ctx->lldi.pdev), gl->va,
1153 (unsigned long long)be64_to_cpu(*rsp),
Vipul Pandyaef5d6352013-01-07 13:12:00 +00001154 (unsigned long long)be64_to_cpu(
1155 *(__force __be64 *)gl->va),
Vipul Pandya1cab7752012-12-10 09:30:55 +00001156 gl->tot_len);
1157
1158 return 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001159 } else {
Steve Wiseda411ba2010-10-18 15:16:45 +00001160 skb = cxgb4_pktgl_to_skb(gl, 128, 128);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001161 if (unlikely(!skb))
1162 goto nomem;
1163 }
1164
Vipul Pandya1cab7752012-12-10 09:30:55 +00001165 opcode = *(u8 *)rsp;
Steve Wisedbb084c2014-03-21 20:40:30 +05301166 if (c4iw_handlers[opcode]) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001167 c4iw_handlers[opcode](dev, skb);
Steve Wisedbb084c2014-03-21 20:40:30 +05301168 } else {
Vipul Pandya1cab7752012-12-10 09:30:55 +00001169 pr_info("%s no handler opcode 0x%x...\n", __func__,
Steve Wisecfdda9d2010-04-21 15:30:06 -07001170 opcode);
Steve Wisedbb084c2014-03-21 20:40:30 +05301171 kfree_skb(skb);
1172 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001173
1174 return 0;
1175nomem:
1176 return -1;
1177}
1178
1179static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
1180{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001181 struct uld_ctx *ctx = handle;
Steve Wise1c01c532010-05-20 16:57:32 -05001182
Steve Wisecfdda9d2010-04-21 15:30:06 -07001183 PDBG("%s new_state %u\n", __func__, new_state);
Steve Wise1c01c532010-05-20 16:57:32 -05001184 switch (new_state) {
1185 case CXGB4_STATE_UP:
Steve Wise2f25e9a2011-05-09 22:06:23 -07001186 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
1187 if (!ctx->dev) {
Steve Wise9efe10a2011-10-06 09:32:44 -07001188 int ret;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001189
1190 ctx->dev = c4iw_alloc(&ctx->lldi);
Steve Wise9efe10a2011-10-06 09:32:44 -07001191 if (IS_ERR(ctx->dev)) {
1192 printk(KERN_ERR MOD
1193 "%s: initialization failed: %ld\n",
1194 pci_name(ctx->lldi.pdev),
1195 PTR_ERR(ctx->dev));
1196 ctx->dev = NULL;
1197 break;
1198 }
1199 ret = c4iw_register_device(ctx->dev);
1200 if (ret) {
Steve Wise1c01c532010-05-20 16:57:32 -05001201 printk(KERN_ERR MOD
1202 "%s: RDMA registration failed: %d\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001203 pci_name(ctx->lldi.pdev), ret);
Steve Wise9efe10a2011-10-06 09:32:44 -07001204 c4iw_dealloc(ctx);
1205 }
Steve Wise1c01c532010-05-20 16:57:32 -05001206 }
1207 break;
1208 case CXGB4_STATE_DOWN:
1209 printk(KERN_INFO MOD "%s: Down\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001210 pci_name(ctx->lldi.pdev));
1211 if (ctx->dev)
1212 c4iw_remove(ctx);
Steve Wise1c01c532010-05-20 16:57:32 -05001213 break;
1214 case CXGB4_STATE_START_RECOVERY:
1215 printk(KERN_INFO MOD "%s: Fatal Error\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001216 pci_name(ctx->lldi.pdev));
1217 if (ctx->dev) {
Steve Wise767fbe82011-03-11 22:30:53 +00001218 struct ib_event event;
1219
Steve Wise2f25e9a2011-05-09 22:06:23 -07001220 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
Steve Wise767fbe82011-03-11 22:30:53 +00001221 memset(&event, 0, sizeof event);
1222 event.event = IB_EVENT_DEVICE_FATAL;
Steve Wise2f25e9a2011-05-09 22:06:23 -07001223 event.device = &ctx->dev->ibdev;
Steve Wise767fbe82011-03-11 22:30:53 +00001224 ib_dispatch_event(&event);
Steve Wise2f25e9a2011-05-09 22:06:23 -07001225 c4iw_remove(ctx);
Steve Wise767fbe82011-03-11 22:30:53 +00001226 }
Steve Wise1c01c532010-05-20 16:57:32 -05001227 break;
1228 case CXGB4_STATE_DETACH:
1229 printk(KERN_INFO MOD "%s: Detach\n",
Steve Wise2f25e9a2011-05-09 22:06:23 -07001230 pci_name(ctx->lldi.pdev));
1231 if (ctx->dev)
1232 c4iw_remove(ctx);
Steve Wise1c01c532010-05-20 16:57:32 -05001233 break;
1234 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001235 return 0;
1236}
1237
Vipul Pandya2c974782012-05-18 15:29:28 +05301238static int disable_qp_db(int id, void *p, void *data)
1239{
1240 struct c4iw_qp *qp = p;
1241
1242 t4_disable_wq_db(&qp->wq);
1243 return 0;
1244}
1245
1246static void stop_queues(struct uld_ctx *ctx)
1247{
Steve Wise05eb2382014-03-14 21:52:08 +05301248 unsigned long flags;
1249
1250 spin_lock_irqsave(&ctx->dev->lock, flags);
1251 ctx->dev->rdev.stats.db_state_transitions++;
1252 ctx->dev->db_state = STOPPED;
1253 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED)
Vipul Pandya422eea02012-05-18 15:29:30 +05301254 idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301255 else
1256 ctx->dev->rdev.status_page->db_off = 1;
1257 spin_unlock_irqrestore(&ctx->dev->lock, flags);
Vipul Pandya2c974782012-05-18 15:29:28 +05301258}
1259
1260static int enable_qp_db(int id, void *p, void *data)
1261{
1262 struct c4iw_qp *qp = p;
1263
1264 t4_enable_wq_db(&qp->wq);
1265 return 0;
1266}
1267
Steve Wise05eb2382014-03-14 21:52:08 +05301268static void resume_rc_qp(struct c4iw_qp *qp)
1269{
1270 spin_lock(&qp->lock);
Steve Wisefa658a92014-04-09 09:38:25 -05001271 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc,
1272 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301273 qp->wq.sq.wq_pidx_inc = 0;
Steve Wisefa658a92014-04-09 09:38:25 -05001274 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc,
1275 is_t5(qp->rhp->rdev.lldi.adapter_type), NULL);
Steve Wise05eb2382014-03-14 21:52:08 +05301276 qp->wq.rq.wq_pidx_inc = 0;
1277 spin_unlock(&qp->lock);
1278}
1279
1280static void resume_a_chunk(struct uld_ctx *ctx)
1281{
1282 int i;
1283 struct c4iw_qp *qp;
1284
1285 for (i = 0; i < DB_FC_RESUME_SIZE; i++) {
1286 qp = list_first_entry(&ctx->dev->db_fc_list, struct c4iw_qp,
1287 db_fc_entry);
1288 list_del_init(&qp->db_fc_entry);
1289 resume_rc_qp(qp);
1290 if (list_empty(&ctx->dev->db_fc_list))
1291 break;
1292 }
1293}
1294
Vipul Pandya2c974782012-05-18 15:29:28 +05301295static void resume_queues(struct uld_ctx *ctx)
1296{
1297 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301298 if (ctx->dev->db_state != STOPPED)
1299 goto out;
1300 ctx->dev->db_state = FLOW_CONTROL;
1301 while (1) {
1302 if (list_empty(&ctx->dev->db_fc_list)) {
1303 WARN_ON(ctx->dev->db_state != FLOW_CONTROL);
1304 ctx->dev->db_state = NORMAL;
1305 ctx->dev->rdev.stats.db_state_transitions++;
1306 if (ctx->dev->rdev.flags & T4_STATUS_PAGE_DISABLED) {
1307 idr_for_each(&ctx->dev->qpidr, enable_qp_db,
1308 NULL);
1309 } else {
1310 ctx->dev->rdev.status_page->db_off = 0;
1311 }
1312 break;
1313 } else {
1314 if (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1)
1315 < (ctx->dev->rdev.lldi.dbfifo_int_thresh <<
1316 DB_FC_DRAIN_THRESH)) {
1317 resume_a_chunk(ctx);
1318 }
1319 if (!list_empty(&ctx->dev->db_fc_list)) {
1320 spin_unlock_irq(&ctx->dev->lock);
1321 if (DB_FC_RESUME_DELAY) {
1322 set_current_state(TASK_UNINTERRUPTIBLE);
1323 schedule_timeout(DB_FC_RESUME_DELAY);
1324 }
1325 spin_lock_irq(&ctx->dev->lock);
1326 if (ctx->dev->db_state != FLOW_CONTROL)
1327 break;
1328 }
1329 }
Vipul Pandya422eea02012-05-18 15:29:30 +05301330 }
Steve Wise05eb2382014-03-14 21:52:08 +05301331out:
1332 if (ctx->dev->db_state != NORMAL)
1333 ctx->dev->rdev.stats.db_fc_interruptions++;
Vipul Pandya2c974782012-05-18 15:29:28 +05301334 spin_unlock_irq(&ctx->dev->lock);
1335}
1336
Vipul Pandya422eea02012-05-18 15:29:30 +05301337struct qp_list {
1338 unsigned idx;
1339 struct c4iw_qp **qps;
1340};
1341
1342static int add_and_ref_qp(int id, void *p, void *data)
1343{
1344 struct qp_list *qp_listp = data;
1345 struct c4iw_qp *qp = p;
1346
1347 c4iw_qp_add_ref(&qp->ibqp);
1348 qp_listp->qps[qp_listp->idx++] = qp;
1349 return 0;
1350}
1351
1352static int count_qps(int id, void *p, void *data)
1353{
1354 unsigned *countp = data;
1355 (*countp)++;
1356 return 0;
1357}
1358
Steve Wise05eb2382014-03-14 21:52:08 +05301359static void deref_qps(struct qp_list *qp_list)
Vipul Pandya422eea02012-05-18 15:29:30 +05301360{
1361 int idx;
1362
Steve Wise05eb2382014-03-14 21:52:08 +05301363 for (idx = 0; idx < qp_list->idx; idx++)
1364 c4iw_qp_rem_ref(&qp_list->qps[idx]->ibqp);
Vipul Pandya422eea02012-05-18 15:29:30 +05301365}
1366
1367static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
1368{
1369 int idx;
1370 int ret;
1371
1372 for (idx = 0; idx < qp_list->idx; idx++) {
1373 struct c4iw_qp *qp = qp_list->qps[idx];
1374
Steve Wise05eb2382014-03-14 21:52:08 +05301375 spin_lock_irq(&qp->rhp->lock);
1376 spin_lock(&qp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301377 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1378 qp->wq.sq.qid,
1379 t4_sq_host_wq_pidx(&qp->wq),
1380 t4_sq_wq_size(&qp->wq));
1381 if (ret) {
Steve Wise05eb2382014-03-14 21:52:08 +05301382 pr_err(KERN_ERR MOD "%s: Fatal error - "
Vipul Pandya422eea02012-05-18 15:29:30 +05301383 "DB overflow recovery failed - "
1384 "error syncing SQ qid %u\n",
1385 pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
Steve Wise05eb2382014-03-14 21:52:08 +05301386 spin_unlock(&qp->lock);
1387 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301388 return;
1389 }
Steve Wise05eb2382014-03-14 21:52:08 +05301390 qp->wq.sq.wq_pidx_inc = 0;
Vipul Pandya422eea02012-05-18 15:29:30 +05301391
1392 ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
1393 qp->wq.rq.qid,
1394 t4_rq_host_wq_pidx(&qp->wq),
1395 t4_rq_wq_size(&qp->wq));
1396
1397 if (ret) {
Steve Wise05eb2382014-03-14 21:52:08 +05301398 pr_err(KERN_ERR MOD "%s: Fatal error - "
Vipul Pandya422eea02012-05-18 15:29:30 +05301399 "DB overflow recovery failed - "
1400 "error syncing RQ qid %u\n",
1401 pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
Steve Wise05eb2382014-03-14 21:52:08 +05301402 spin_unlock(&qp->lock);
1403 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301404 return;
1405 }
Steve Wise05eb2382014-03-14 21:52:08 +05301406 qp->wq.rq.wq_pidx_inc = 0;
1407 spin_unlock(&qp->lock);
1408 spin_unlock_irq(&qp->rhp->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301409
1410 /* Wait for the dbfifo to drain */
1411 while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
1412 set_current_state(TASK_UNINTERRUPTIBLE);
1413 schedule_timeout(usecs_to_jiffies(10));
1414 }
1415 }
1416}
1417
1418static void recover_queues(struct uld_ctx *ctx)
1419{
1420 int count = 0;
1421 struct qp_list qp_list;
1422 int ret;
1423
Vipul Pandya422eea02012-05-18 15:29:30 +05301424 /* slow everybody down */
1425 set_current_state(TASK_UNINTERRUPTIBLE);
1426 schedule_timeout(usecs_to_jiffies(1000));
1427
Vipul Pandya422eea02012-05-18 15:29:30 +05301428 /* flush the SGE contexts */
1429 ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
1430 if (ret) {
1431 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1432 pci_name(ctx->lldi.pdev));
Steve Wise05eb2382014-03-14 21:52:08 +05301433 return;
Vipul Pandya422eea02012-05-18 15:29:30 +05301434 }
1435
1436 /* Count active queues so we can build a list of queues to recover */
1437 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301438 WARN_ON(ctx->dev->db_state != STOPPED);
1439 ctx->dev->db_state = RECOVERY;
Vipul Pandya422eea02012-05-18 15:29:30 +05301440 idr_for_each(&ctx->dev->qpidr, count_qps, &count);
1441
1442 qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
1443 if (!qp_list.qps) {
1444 printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
1445 pci_name(ctx->lldi.pdev));
1446 spin_unlock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301447 return;
Vipul Pandya422eea02012-05-18 15:29:30 +05301448 }
1449 qp_list.idx = 0;
1450
1451 /* add and ref each qp so it doesn't get freed */
1452 idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
1453
1454 spin_unlock_irq(&ctx->dev->lock);
1455
1456 /* now traverse the list in a safe context to recover the db state*/
1457 recover_lost_dbs(ctx, &qp_list);
1458
1459 /* we're almost done! deref the qps and clean up */
Steve Wise05eb2382014-03-14 21:52:08 +05301460 deref_qps(&qp_list);
Vipul Pandya422eea02012-05-18 15:29:30 +05301461 kfree(qp_list.qps);
1462
Vipul Pandya422eea02012-05-18 15:29:30 +05301463 spin_lock_irq(&ctx->dev->lock);
Steve Wise05eb2382014-03-14 21:52:08 +05301464 WARN_ON(ctx->dev->db_state != RECOVERY);
1465 ctx->dev->db_state = STOPPED;
Vipul Pandya422eea02012-05-18 15:29:30 +05301466 spin_unlock_irq(&ctx->dev->lock);
Vipul Pandya422eea02012-05-18 15:29:30 +05301467}
1468
Vipul Pandya2c974782012-05-18 15:29:28 +05301469static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
1470{
1471 struct uld_ctx *ctx = handle;
1472
1473 switch (control) {
1474 case CXGB4_CONTROL_DB_FULL:
1475 stop_queues(ctx);
Vipul Pandya2c974782012-05-18 15:29:28 +05301476 ctx->dev->rdev.stats.db_full++;
Vipul Pandya2c974782012-05-18 15:29:28 +05301477 break;
1478 case CXGB4_CONTROL_DB_EMPTY:
1479 resume_queues(ctx);
1480 mutex_lock(&ctx->dev->rdev.stats.lock);
1481 ctx->dev->rdev.stats.db_empty++;
1482 mutex_unlock(&ctx->dev->rdev.stats.lock);
1483 break;
1484 case CXGB4_CONTROL_DB_DROP:
Vipul Pandya422eea02012-05-18 15:29:30 +05301485 recover_queues(ctx);
Vipul Pandya2c974782012-05-18 15:29:28 +05301486 mutex_lock(&ctx->dev->rdev.stats.lock);
1487 ctx->dev->rdev.stats.db_drop++;
1488 mutex_unlock(&ctx->dev->rdev.stats.lock);
1489 break;
1490 default:
1491 printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
1492 pci_name(ctx->lldi.pdev), control);
1493 break;
1494 }
1495 return 0;
1496}
1497
Steve Wisecfdda9d2010-04-21 15:30:06 -07001498static struct cxgb4_uld_info c4iw_uld_info = {
1499 .name = DRV_NAME,
1500 .add = c4iw_uld_add,
1501 .rx_handler = c4iw_uld_rx_handler,
1502 .state_change = c4iw_uld_state_change,
Vipul Pandya2c974782012-05-18 15:29:28 +05301503 .control = c4iw_uld_control,
Steve Wisecfdda9d2010-04-21 15:30:06 -07001504};
1505
1506static int __init c4iw_init_module(void)
1507{
1508 int err;
1509
1510 err = c4iw_cm_init();
1511 if (err)
1512 return err;
1513
1514 c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
1515 if (!c4iw_debugfs_root)
1516 printk(KERN_WARNING MOD
1517 "could not create debugfs entry, continuing\n");
1518
Steve Wise9eccfe12014-03-26 17:08:09 -05001519 if (ibnl_add_client(RDMA_NL_C4IW, RDMA_NL_IWPM_NUM_OPS,
1520 c4iw_nl_cb_table))
1521 pr_err("%s[%u]: Failed to add netlink callback\n"
1522 , __func__, __LINE__);
1523
Steve Wise46c13762014-06-20 14:26:25 -05001524 err = iwpm_init(RDMA_NL_C4IW);
1525 if (err) {
1526 pr_err("port mapper initialization failed with %d\n", err);
1527 ibnl_remove_client(RDMA_NL_C4IW);
1528 c4iw_cm_term();
1529 debugfs_remove_recursive(c4iw_debugfs_root);
1530 return err;
1531 }
1532
Steve Wisecfdda9d2010-04-21 15:30:06 -07001533 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1534
1535 return 0;
1536}
1537
1538static void __exit c4iw_exit_module(void)
1539{
Steve Wise2f25e9a2011-05-09 22:06:23 -07001540 struct uld_ctx *ctx, *tmp;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001541
Steve Wisecfdda9d2010-04-21 15:30:06 -07001542 mutex_lock(&dev_mutex);
Steve Wise2f25e9a2011-05-09 22:06:23 -07001543 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
1544 if (ctx->dev)
1545 c4iw_remove(ctx);
1546 kfree(ctx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001547 }
1548 mutex_unlock(&dev_mutex);
Steve Wisefd388ce2010-05-20 16:57:27 -05001549 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
Steve Wise46c13762014-06-20 14:26:25 -05001550 iwpm_exit(RDMA_NL_C4IW);
Steve Wise9eccfe12014-03-26 17:08:09 -05001551 ibnl_remove_client(RDMA_NL_C4IW);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001552 c4iw_cm_term();
1553 debugfs_remove_recursive(c4iw_debugfs_root);
1554}
1555
1556module_init(c4iw_init_module);
1557module_exit(c4iw_exit_module);