blob: ed2ee4ba4b7c3e9e50ed8d162fbc40b5482945ca [file] [log] [blame]
Steve Wiseb038ced2007-02-12 16:16:18 -08001/*
2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
Steve Wiseb038ced2007-02-12 16:16:18 -08003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <asm/delay.h>
33
34#include <linux/mutex.h>
35#include <linux/netdevice.h>
36#include <linux/sched.h>
37#include <linux/spinlock.h>
38#include <linux/pci.h>
David Millerc3bb1092007-03-05 15:21:29 -080039#include <linux/dma-mapping.h>
Eric W. Biederman881d9662007-09-17 11:56:21 -070040#include <net/net_namespace.h>
Steve Wiseb038ced2007-02-12 16:16:18 -080041
42#include "cxio_resource.h"
43#include "cxio_hal.h"
44#include "cxgb3_offload.h"
45#include "sge_defs.h"
46
47static LIST_HEAD(rdev_list);
48static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
49
Adrian Bunk2b540352007-02-21 11:52:49 +010050static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
Steve Wiseb038ced2007-02-12 16:16:18 -080051{
52 struct cxio_rdev *rdev;
53
54 list_for_each_entry(rdev, &rdev_list, entry)
55 if (!strcmp(rdev->dev_name, dev_name))
56 return rdev;
57 return NULL;
58}
59
Adrian Bunk2b540352007-02-21 11:52:49 +010060static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
Steve Wiseb038ced2007-02-12 16:16:18 -080061{
62 struct cxio_rdev *rdev;
63
64 list_for_each_entry(rdev, &rdev_list, entry)
65 if (rdev->t3cdev_p == tdev)
66 return rdev;
67 return NULL;
68}
69
70int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
71 enum t3_cq_opcode op, u32 credit)
72{
73 int ret;
74 struct t3_cqe *cqe;
75 u32 rptr;
76
77 struct rdma_cq_op setup;
78 setup.id = cq->cqid;
79 setup.credits = (op == CQ_CREDIT_UPDATE) ? credit : 0;
80 setup.op = op;
81 ret = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_OP, &setup);
82
83 if ((ret < 0) || (op == CQ_CREDIT_UPDATE))
84 return ret;
85
86 /*
87 * If the rearm returned an index other than our current index,
88 * then there might be CQE's in flight (being DMA'd). We must wait
89 * here for them to complete or the consumer can miss a notification.
90 */
91 if (Q_PTR2IDX((cq->rptr), cq->size_log2) != ret) {
92 int i=0;
93
94 rptr = cq->rptr;
95
96 /*
97 * Keep the generation correct by bumping rptr until it
98 * matches the index returned by the rearm - 1.
99 */
100 while (Q_PTR2IDX((rptr+1), cq->size_log2) != ret)
101 rptr++;
102
103 /*
104 * Now rptr is the index for the (last) cqe that was
105 * in-flight at the time the HW rearmed the CQ. We
106 * spin until that CQE is valid.
107 */
108 cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
109 while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
110 udelay(1);
111 if (i++ > 1000000) {
112 BUG_ON(1);
113 printk(KERN_ERR "%s: stalled rnic\n",
114 rdev_p->dev_name);
115 return -EIO;
116 }
117 }
Roland Dreiered23a722007-05-06 21:02:48 -0700118
119 return 1;
Steve Wiseb038ced2007-02-12 16:16:18 -0800120 }
Roland Dreiered23a722007-05-06 21:02:48 -0700121
Steve Wiseb038ced2007-02-12 16:16:18 -0800122 return 0;
123}
124
Adrian Bunk2b540352007-02-21 11:52:49 +0100125static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
Steve Wiseb038ced2007-02-12 16:16:18 -0800126{
127 struct rdma_cq_setup setup;
128 setup.id = cqid;
129 setup.base_addr = 0; /* NULL address */
130 setup.size = 0; /* disaable the CQ */
131 setup.credits = 0;
132 setup.credit_thres = 0;
133 setup.ovfl_mode = 0;
134 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
135}
136
Adrian Bunk2b540352007-02-21 11:52:49 +0100137static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
Steve Wiseb038ced2007-02-12 16:16:18 -0800138{
139 u64 sge_cmd;
140 struct t3_modify_qp_wr *wqe;
141 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
142 if (!skb) {
Harvey Harrison33718362008-04-16 21:01:10 -0700143 PDBG("%s alloc_skb failed\n", __func__);
Steve Wiseb038ced2007-02-12 16:16:18 -0800144 return -ENOMEM;
145 }
146 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
147 memset(wqe, 0, sizeof(*wqe));
Steve Wise6eda48d2007-06-19 09:27:48 -0500148 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 3, 0, qpid, 7);
Steve Wiseb038ced2007-02-12 16:16:18 -0800149 wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
150 sge_cmd = qpid << 8 | 3;
151 wqe->sge_cmd = cpu_to_be64(sge_cmd);
152 skb->priority = CPL_PRIORITY_CONTROL;
153 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
154}
155
156int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
157{
158 struct rdma_cq_setup setup;
159 int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
160
161 cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
162 if (!cq->cqid)
163 return -ENOMEM;
164 cq->sw_queue = kzalloc(size, GFP_KERNEL);
165 if (!cq->sw_queue)
166 return -ENOMEM;
167 cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
168 (1UL << (cq->size_log2)) *
169 sizeof(struct t3_cqe),
170 &(cq->dma_addr), GFP_KERNEL);
171 if (!cq->queue) {
172 kfree(cq->sw_queue);
173 return -ENOMEM;
174 }
175 pci_unmap_addr_set(cq, mapping, cq->dma_addr);
176 memset(cq->queue, 0, size);
177 setup.id = cq->cqid;
178 setup.base_addr = (u64) (cq->dma_addr);
179 setup.size = 1UL << cq->size_log2;
180 setup.credits = 65535;
181 setup.credit_thres = 1;
Steve Wise8176d292008-01-24 16:30:16 -0600182 if (rdev_p->t3cdev_p->type != T3A)
Steve Wiseb038ced2007-02-12 16:16:18 -0800183 setup.ovfl_mode = 0;
184 else
185 setup.ovfl_mode = 1;
186 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
187}
188
189int cxio_resize_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
190{
191 struct rdma_cq_setup setup;
192 setup.id = cq->cqid;
193 setup.base_addr = (u64) (cq->dma_addr);
194 setup.size = 1UL << cq->size_log2;
195 setup.credits = setup.size;
196 setup.credit_thres = setup.size; /* TBD: overflow recovery */
197 setup.ovfl_mode = 1;
198 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
199}
200
201static u32 get_qpid(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
202{
203 struct cxio_qpid_list *entry;
204 u32 qpid;
205 int i;
206
207 mutex_lock(&uctx->lock);
208 if (!list_empty(&uctx->qpids)) {
209 entry = list_entry(uctx->qpids.next, struct cxio_qpid_list,
210 entry);
211 list_del(&entry->entry);
212 qpid = entry->qpid;
213 kfree(entry);
214 } else {
215 qpid = cxio_hal_get_qpid(rdev_p->rscp);
216 if (!qpid)
217 goto out;
218 for (i = qpid+1; i & rdev_p->qpmask; i++) {
219 entry = kmalloc(sizeof *entry, GFP_KERNEL);
220 if (!entry)
221 break;
222 entry->qpid = i;
223 list_add_tail(&entry->entry, &uctx->qpids);
224 }
225 }
226out:
227 mutex_unlock(&uctx->lock);
Harvey Harrison33718362008-04-16 21:01:10 -0700228 PDBG("%s qpid 0x%x\n", __func__, qpid);
Steve Wiseb038ced2007-02-12 16:16:18 -0800229 return qpid;
230}
231
232static void put_qpid(struct cxio_rdev *rdev_p, u32 qpid,
233 struct cxio_ucontext *uctx)
234{
235 struct cxio_qpid_list *entry;
236
237 entry = kmalloc(sizeof *entry, GFP_KERNEL);
238 if (!entry)
239 return;
Harvey Harrison33718362008-04-16 21:01:10 -0700240 PDBG("%s qpid 0x%x\n", __func__, qpid);
Steve Wiseb038ced2007-02-12 16:16:18 -0800241 entry->qpid = qpid;
242 mutex_lock(&uctx->lock);
243 list_add_tail(&entry->entry, &uctx->qpids);
244 mutex_unlock(&uctx->lock);
245}
246
247void cxio_release_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
248{
249 struct list_head *pos, *nxt;
250 struct cxio_qpid_list *entry;
251
252 mutex_lock(&uctx->lock);
253 list_for_each_safe(pos, nxt, &uctx->qpids) {
254 entry = list_entry(pos, struct cxio_qpid_list, entry);
255 list_del_init(&entry->entry);
256 if (!(entry->qpid & rdev_p->qpmask))
257 cxio_hal_put_qpid(rdev_p->rscp, entry->qpid);
258 kfree(entry);
259 }
260 mutex_unlock(&uctx->lock);
261}
262
263void cxio_init_ucontext(struct cxio_rdev *rdev_p, struct cxio_ucontext *uctx)
264{
265 INIT_LIST_HEAD(&uctx->qpids);
266 mutex_init(&uctx->lock);
267}
268
269int cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain,
270 struct t3_wq *wq, struct cxio_ucontext *uctx)
271{
272 int depth = 1UL << wq->size_log2;
273 int rqsize = 1UL << wq->rq_size_log2;
274
275 wq->qpid = get_qpid(rdev_p, uctx);
276 if (!wq->qpid)
277 return -ENOMEM;
278
279 wq->rq = kzalloc(depth * sizeof(u64), GFP_KERNEL);
280 if (!wq->rq)
281 goto err1;
282
283 wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize);
284 if (!wq->rq_addr)
285 goto err2;
286
287 wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL);
288 if (!wq->sq)
289 goto err3;
290
291 wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
292 depth * sizeof(union t3_wr),
293 &(wq->dma_addr), GFP_KERNEL);
294 if (!wq->queue)
295 goto err4;
296
297 memset(wq->queue, 0, depth * sizeof(union t3_wr));
298 pci_unmap_addr_set(wq, mapping, wq->dma_addr);
299 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
300 if (!kernel_domain)
301 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase +
302 (wq->qpid << rdev_p->qpshift);
Harvey Harrison33718362008-04-16 21:01:10 -0700303 PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -0800304 wq->qpid, wq->doorbell, (unsigned long long) wq->udb);
305 return 0;
306err4:
307 kfree(wq->sq);
308err3:
309 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize);
310err2:
311 kfree(wq->rq);
312err1:
313 put_qpid(rdev_p, wq->qpid, uctx);
314 return -ENOMEM;
315}
316
317int cxio_destroy_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
318{
319 int err;
320 err = cxio_hal_clear_cq_ctx(rdev_p, cq->cqid);
321 kfree(cq->sw_queue);
322 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
323 (1UL << (cq->size_log2))
324 * sizeof(struct t3_cqe), cq->queue,
325 pci_unmap_addr(cq, mapping));
326 cxio_hal_put_cqid(rdev_p->rscp, cq->cqid);
327 return err;
328}
329
330int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq,
331 struct cxio_ucontext *uctx)
332{
333 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
334 (1UL << (wq->size_log2))
335 * sizeof(union t3_wr), wq->queue,
336 pci_unmap_addr(wq, mapping));
337 kfree(wq->sq);
338 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2));
339 kfree(wq->rq);
340 put_qpid(rdev_p, wq->qpid, uctx);
341 return 0;
342}
343
344static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq)
345{
346 struct t3_cqe cqe;
347
Harvey Harrison33718362008-04-16 21:01:10 -0700348 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -0800349 wq, cq, cq->sw_rptr, cq->sw_wptr);
350 memset(&cqe, 0, sizeof(cqe));
351 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
352 V_CQE_OPCODE(T3_SEND) |
353 V_CQE_TYPE(0) |
354 V_CQE_SWCQE(1) |
355 V_CQE_QPID(wq->qpid) |
356 V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
357 cq->size_log2)));
358 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
359 cq->sw_wptr++;
360}
361
362void cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count)
363{
364 u32 ptr;
365
Harvey Harrison33718362008-04-16 21:01:10 -0700366 PDBG("%s wq %p cq %p\n", __func__, wq, cq);
Steve Wiseb038ced2007-02-12 16:16:18 -0800367
368 /* flush RQ */
Harvey Harrison33718362008-04-16 21:01:10 -0700369 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -0800370 wq->rq_rptr, wq->rq_wptr, count);
371 ptr = wq->rq_rptr + count;
372 while (ptr++ != wq->rq_wptr)
373 insert_recv_cqe(wq, cq);
374}
375
376static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
377 struct t3_swsq *sqp)
378{
379 struct t3_cqe cqe;
380
Harvey Harrison33718362008-04-16 21:01:10 -0700381 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -0800382 wq, cq, cq->sw_rptr, cq->sw_wptr);
383 memset(&cqe, 0, sizeof(cqe));
384 cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
385 V_CQE_OPCODE(sqp->opcode) |
386 V_CQE_TYPE(1) |
387 V_CQE_SWCQE(1) |
388 V_CQE_QPID(wq->qpid) |
389 V_CQE_GENBIT(Q_GENBIT(cq->sw_wptr,
390 cq->size_log2)));
391 cqe.u.scqe.wrid_hi = sqp->sq_wptr;
392
393 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
394 cq->sw_wptr++;
395}
396
397void cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
398{
399 __u32 ptr;
400 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
401
402 ptr = wq->sq_rptr + count;
403 sqp += count;
404 while (ptr != wq->sq_wptr) {
405 insert_sq_cqe(wq, cq, sqp);
406 sqp++;
407 ptr++;
408 }
409}
410
411/*
412 * Move all CQEs from the HWCQ into the SWCQ.
413 */
414void cxio_flush_hw_cq(struct t3_cq *cq)
415{
416 struct t3_cqe *cqe, *swcqe;
417
Harvey Harrison33718362008-04-16 21:01:10 -0700418 PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
Steve Wiseb038ced2007-02-12 16:16:18 -0800419 cqe = cxio_next_hw_cqe(cq);
420 while (cqe) {
421 PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700422 __func__, cq->rptr, cq->sw_wptr);
Steve Wiseb038ced2007-02-12 16:16:18 -0800423 swcqe = cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2);
424 *swcqe = *cqe;
425 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
426 cq->sw_wptr++;
427 cq->rptr++;
428 cqe = cxio_next_hw_cqe(cq);
429 }
430}
431
Adrian Bunk2b540352007-02-21 11:52:49 +0100432static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
Steve Wiseb038ced2007-02-12 16:16:18 -0800433{
434 if (CQE_OPCODE(*cqe) == T3_TERMINATE)
435 return 0;
436
437 if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))
438 return 0;
439
440 if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
441 return 0;
442
443 if ((CQE_OPCODE(*cqe) == T3_SEND) && RQ_TYPE(*cqe) &&
444 Q_EMPTY(wq->rq_rptr, wq->rq_wptr))
445 return 0;
446
447 return 1;
448}
449
450void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
451{
452 struct t3_cqe *cqe;
453 u32 ptr;
454
455 *count = 0;
456 ptr = cq->sw_rptr;
457 while (!Q_EMPTY(ptr, cq->sw_wptr)) {
458 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
Steve Wisef8b0dfd2008-04-29 13:46:52 -0700459 if ((SQ_TYPE(*cqe) ||
460 ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) &&
Steve Wiseb038ced2007-02-12 16:16:18 -0800461 (CQE_QPID(*cqe) == wq->qpid))
462 (*count)++;
463 ptr++;
464 }
Harvey Harrison33718362008-04-16 21:01:10 -0700465 PDBG("%s cq %p count %d\n", __func__, cq, *count);
Steve Wiseb038ced2007-02-12 16:16:18 -0800466}
467
468void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
469{
470 struct t3_cqe *cqe;
471 u32 ptr;
472
473 *count = 0;
Harvey Harrison33718362008-04-16 21:01:10 -0700474 PDBG("%s count zero %d\n", __func__, *count);
Steve Wiseb038ced2007-02-12 16:16:18 -0800475 ptr = cq->sw_rptr;
476 while (!Q_EMPTY(ptr, cq->sw_wptr)) {
477 cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
478 if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
479 (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))
480 (*count)++;
481 ptr++;
482 }
Harvey Harrison33718362008-04-16 21:01:10 -0700483 PDBG("%s cq %p count %d\n", __func__, cq, *count);
Steve Wiseb038ced2007-02-12 16:16:18 -0800484}
485
486static int cxio_hal_init_ctrl_cq(struct cxio_rdev *rdev_p)
487{
488 struct rdma_cq_setup setup;
489 setup.id = 0;
490 setup.base_addr = 0; /* NULL address */
491 setup.size = 1; /* enable the CQ */
492 setup.credits = 0;
493
494 /* force SGE to redirect to RspQ and interrupt */
495 setup.credit_thres = 0;
496 setup.ovfl_mode = 1;
497 return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
498}
499
500static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
501{
502 int err;
503 u64 sge_cmd, ctx0, ctx1;
504 u64 base_addr;
505 struct t3_modify_qp_wr *wqe;
Steve Wiseed6ee512007-03-26 17:48:52 -0500506 struct sk_buff *skb;
Steve Wiseb038ced2007-02-12 16:16:18 -0800507
Steve Wiseed6ee512007-03-26 17:48:52 -0500508 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
Steve Wiseb038ced2007-02-12 16:16:18 -0800509 if (!skb) {
Harvey Harrison33718362008-04-16 21:01:10 -0700510 PDBG("%s alloc_skb failed\n", __func__);
Steve Wiseb038ced2007-02-12 16:16:18 -0800511 return -ENOMEM;
512 }
513 err = cxio_hal_init_ctrl_cq(rdev_p);
514 if (err) {
Harvey Harrison33718362008-04-16 21:01:10 -0700515 PDBG("%s err %d initializing ctrl_cq\n", __func__, err);
Steve Wiseed6ee512007-03-26 17:48:52 -0500516 goto err;
Steve Wiseb038ced2007-02-12 16:16:18 -0800517 }
518 rdev_p->ctrl_qp.workq = dma_alloc_coherent(
519 &(rdev_p->rnic_info.pdev->dev),
520 (1 << T3_CTRL_QP_SIZE_LOG2) *
521 sizeof(union t3_wr),
522 &(rdev_p->ctrl_qp.dma_addr),
523 GFP_KERNEL);
524 if (!rdev_p->ctrl_qp.workq) {
Harvey Harrison33718362008-04-16 21:01:10 -0700525 PDBG("%s dma_alloc_coherent failed\n", __func__);
Steve Wiseed6ee512007-03-26 17:48:52 -0500526 err = -ENOMEM;
527 goto err;
Steve Wiseb038ced2007-02-12 16:16:18 -0800528 }
529 pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
530 rdev_p->ctrl_qp.dma_addr);
531 rdev_p->ctrl_qp.doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr;
532 memset(rdev_p->ctrl_qp.workq, 0,
533 (1 << T3_CTRL_QP_SIZE_LOG2) * sizeof(union t3_wr));
534
535 mutex_init(&rdev_p->ctrl_qp.lock);
536 init_waitqueue_head(&rdev_p->ctrl_qp.waitq);
537
538 /* update HW Ctrl QP context */
539 base_addr = rdev_p->ctrl_qp.dma_addr;
540 base_addr >>= 12;
541 ctx0 = (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2)) |
542 V_EC_BASE_LO((u32) base_addr & 0xffff));
543 ctx0 <<= 32;
544 ctx0 |= V_EC_CREDITS(FW_WR_NUM);
545 base_addr >>= 16;
546 ctx1 = (u32) base_addr;
547 base_addr >>= 32;
548 ctx1 |= ((u64) (V_EC_BASE_HI((u32) base_addr & 0xf) | V_EC_RESPQ(0) |
549 V_EC_TYPE(0) | V_EC_GEN(1) |
550 V_EC_UP_TOKEN(T3_CTL_QP_TID) | F_EC_VALID)) << 32;
551 wqe = (struct t3_modify_qp_wr *) skb_put(skb, sizeof(*wqe));
552 memset(wqe, 0, sizeof(*wqe));
Steve Wise6eda48d2007-06-19 09:27:48 -0500553 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_QP_MOD, 0, 0,
Steve Wiseb038ced2007-02-12 16:16:18 -0800554 T3_CTL_QP_TID, 7);
555 wqe->flags = cpu_to_be32(MODQP_WRITE_EC);
556 sge_cmd = (3ULL << 56) | FW_RI_SGEEC_START << 8 | 3;
557 wqe->sge_cmd = cpu_to_be64(sge_cmd);
558 wqe->ctx1 = cpu_to_be64(ctx1);
559 wqe->ctx0 = cpu_to_be64(ctx0);
560 PDBG("CtrlQP dma_addr 0x%llx workq %p size %d\n",
561 (unsigned long long) rdev_p->ctrl_qp.dma_addr,
562 rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2);
563 skb->priority = CPL_PRIORITY_CONTROL;
564 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
Steve Wiseed6ee512007-03-26 17:48:52 -0500565err:
566 kfree_skb(skb);
567 return err;
Steve Wiseb038ced2007-02-12 16:16:18 -0800568}
569
570static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
571{
572 dma_free_coherent(&(rdev_p->rnic_info.pdev->dev),
573 (1UL << T3_CTRL_QP_SIZE_LOG2)
574 * sizeof(union t3_wr), rdev_p->ctrl_qp.workq,
575 pci_unmap_addr(&rdev_p->ctrl_qp, mapping));
576 return cxio_hal_clear_qp_ctx(rdev_p, T3_CTRL_QP_ID);
577}
578
579/* write len bytes of data into addr (32B aligned address)
580 * If data is NULL, clear len byte of memory to zero.
581 * caller aquires the ctrl_qp lock before the call
582 */
583static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev *rdev_p, u32 addr,
584 u32 len, void *data, int completion)
585{
586 u32 i, nr_wqe, copy_len;
587 u8 *copy_data;
Joe Perches94545e82007-12-17 11:30:36 -0800588 u8 wr_len, utx_len; /* length in 8 byte flit */
Steve Wiseb038ced2007-02-12 16:16:18 -0800589 enum t3_wr_flags flag;
590 __be64 *wqe;
591 u64 utx_cmd;
592 addr &= 0x7FFFFFF;
593 nr_wqe = len % 96 ? len / 96 + 1 : len / 96; /* 96B max per WQE */
594 PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700595 __func__, rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, len,
Steve Wiseb038ced2007-02-12 16:16:18 -0800596 nr_wqe, data, addr);
597 utx_len = 3; /* in 32B unit */
598 for (i = 0; i < nr_wqe; i++) {
599 if (Q_FULL(rdev_p->ctrl_qp.rptr, rdev_p->ctrl_qp.wptr,
600 T3_CTRL_QP_SIZE_LOG2)) {
601 PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
Harvey Harrison33718362008-04-16 21:01:10 -0700602 "wait for more space i %d\n", __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -0800603 rdev_p->ctrl_qp.wptr, rdev_p->ctrl_qp.rptr, i);
604 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
605 !Q_FULL(rdev_p->ctrl_qp.rptr,
606 rdev_p->ctrl_qp.wptr,
607 T3_CTRL_QP_SIZE_LOG2))) {
608 PDBG("%s ctrl_qp workq interrupted\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700609 __func__);
Steve Wiseb038ced2007-02-12 16:16:18 -0800610 return -ERESTARTSYS;
611 }
612 PDBG("%s ctrl_qp wakeup, continue posting work request "
Harvey Harrison33718362008-04-16 21:01:10 -0700613 "i %d\n", __func__, i);
Steve Wiseb038ced2007-02-12 16:16:18 -0800614 }
615 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
616 (1 << T3_CTRL_QP_SIZE_LOG2)));
617 flag = 0;
618 if (i == (nr_wqe - 1)) {
619 /* last WQE */
620 flag = completion ? T3_COMPLETION_FLAG : 0;
621 if (len % 32)
622 utx_len = len / 32 + 1;
623 else
624 utx_len = len / 32;
625 }
626
627 /*
628 * Force a CQE to return the credit to the workq in case
629 * we posted more than half the max QP size of WRs
630 */
631 if ((i != 0) &&
632 (i % (((1 << T3_CTRL_QP_SIZE_LOG2)) >> 1) == 0)) {
633 flag = T3_COMPLETION_FLAG;
Harvey Harrison33718362008-04-16 21:01:10 -0700634 PDBG("%s force completion at i %d\n", __func__, i);
Steve Wiseb038ced2007-02-12 16:16:18 -0800635 }
636
637 /* build the utx mem command */
638 wqe += (sizeof(struct t3_bypass_wr) >> 3);
639 utx_cmd = (T3_UTX_MEM_WRITE << 28) | (addr + i * 3);
640 utx_cmd <<= 32;
641 utx_cmd |= (utx_len << 28) | ((utx_len << 2) + 1);
642 *wqe = cpu_to_be64(utx_cmd);
643 wqe++;
644 copy_data = (u8 *) data + i * 96;
645 copy_len = len > 96 ? 96 : len;
646
647 /* clear memory content if data is NULL */
648 if (data)
649 memcpy(wqe, copy_data, copy_len);
650 else
651 memset(wqe, 0, copy_len);
652 if (copy_len % 32)
653 memset(((u8 *) wqe) + copy_len, 0,
654 32 - (copy_len % 32));
655 wr_len = ((sizeof(struct t3_bypass_wr)) >> 3) + 1 +
656 (utx_len << 2);
657 wqe = (__be64 *)(rdev_p->ctrl_qp.workq + (rdev_p->ctrl_qp.wptr %
658 (1 << T3_CTRL_QP_SIZE_LOG2)));
659
660 /* wptr in the WRID[31:0] */
661 ((union t3_wrid *)(wqe+1))->id0.low = rdev_p->ctrl_qp.wptr;
662
663 /*
664 * This must be the last write with a memory barrier
665 * for the genbit
666 */
667 build_fw_riwrh((struct fw_riwrh *) wqe, T3_WR_BP, flag,
668 Q_GENBIT(rdev_p->ctrl_qp.wptr,
669 T3_CTRL_QP_SIZE_LOG2), T3_CTRL_QP_ID,
670 wr_len);
671 if (flag == T3_COMPLETION_FLAG)
672 ring_doorbell(rdev_p->ctrl_qp.doorbell, T3_CTRL_QP_ID);
673 len -= 96;
674 rdev_p->ctrl_qp.wptr++;
675 }
676 return 0;
677}
678
679/* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl, and pbl_size
680 * OUT: stag index, actual pbl_size, pbl_addr allocated.
681 * TBD: shared memory region support
682 */
683static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
684 u32 *stag, u8 stag_state, u32 pdid,
685 enum tpt_mem_type type, enum tpt_mem_perm perm,
686 u32 zbva, u64 to, u32 len, u8 page_size, __be64 *pbl,
687 u32 *pbl_size, u32 *pbl_addr)
688{
689 int err;
690 struct tpt_entry tpt;
691 u32 stag_idx;
692 u32 wptr;
693 int rereg = (*stag != T3_STAG_UNSET);
694
695 stag_state = stag_state > 0;
696 stag_idx = (*stag) >> 8;
697
698 if ((!reset_tpt_entry) && !(*stag != T3_STAG_UNSET)) {
699 stag_idx = cxio_hal_get_stag(rdev_p->rscp);
700 if (!stag_idx)
701 return -ENOMEM;
702 *stag = (stag_idx << 8) | ((*stag) & 0xFF);
703 }
704 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700705 __func__, stag_state, type, pdid, stag_idx);
Steve Wiseb038ced2007-02-12 16:16:18 -0800706
707 if (reset_tpt_entry)
708 cxio_hal_pblpool_free(rdev_p, *pbl_addr, *pbl_size << 3);
709 else if (!rereg) {
710 *pbl_addr = cxio_hal_pblpool_alloc(rdev_p, *pbl_size << 3);
711 if (!*pbl_addr) {
712 return -ENOMEM;
713 }
714 }
715
716 mutex_lock(&rdev_p->ctrl_qp.lock);
717
718 /* write PBL first if any - update pbl only if pbl list exist */
719 if (pbl) {
720
721 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700722 __func__, *pbl_addr, rdev_p->rnic_info.pbl_base,
Steve Wiseb038ced2007-02-12 16:16:18 -0800723 *pbl_size);
724 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
725 (*pbl_addr >> 5),
726 (*pbl_size << 3), pbl, 0);
727 if (err)
728 goto ret;
729 }
730
731 /* write TPT entry */
732 if (reset_tpt_entry)
733 memset(&tpt, 0, sizeof(tpt));
734 else {
735 tpt.valid_stag_pdid = cpu_to_be32(F_TPT_VALID |
736 V_TPT_STAG_KEY((*stag) & M_TPT_STAG_KEY) |
737 V_TPT_STAG_STATE(stag_state) |
738 V_TPT_STAG_TYPE(type) | V_TPT_PDID(pdid));
739 BUG_ON(page_size >= 28);
740 tpt.flags_pagesize_qpid = cpu_to_be32(V_TPT_PERM(perm) |
741 F_TPT_MW_BIND_ENABLE |
742 V_TPT_ADDR_TYPE((zbva ? TPT_ZBTO : TPT_VATO)) |
743 V_TPT_PAGE_SIZE(page_size));
744 tpt.rsvd_pbl_addr = reset_tpt_entry ? 0 :
745 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p, *pbl_addr)>>3));
746 tpt.len = cpu_to_be32(len);
747 tpt.va_hi = cpu_to_be32((u32) (to >> 32));
748 tpt.va_low_or_fbo = cpu_to_be32((u32) (to & 0xFFFFFFFFULL));
749 tpt.rsvd_bind_cnt_or_pstag = 0;
750 tpt.rsvd_pbl_size = reset_tpt_entry ? 0 :
751 cpu_to_be32(V_TPT_PBL_SIZE((*pbl_size) >> 2));
752 }
753 err = cxio_hal_ctrl_qp_write_mem(rdev_p,
754 stag_idx +
755 (rdev_p->rnic_info.tpt_base >> 5),
756 sizeof(tpt), &tpt, 1);
757
758 /* release the stag index to free pool */
759 if (reset_tpt_entry)
760 cxio_hal_put_stag(rdev_p->rscp, stag_idx);
761ret:
762 wptr = rdev_p->ctrl_qp.wptr;
763 mutex_unlock(&rdev_p->ctrl_qp.lock);
764 if (!err)
765 if (wait_event_interruptible(rdev_p->ctrl_qp.waitq,
766 SEQ32_GE(rdev_p->ctrl_qp.rptr,
767 wptr)))
768 return -ERESTARTSYS;
769 return err;
770}
771
Steve Wiseb038ced2007-02-12 16:16:18 -0800772int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
773 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
774 u8 page_size, __be64 *pbl, u32 *pbl_size,
775 u32 *pbl_addr)
776{
777 *stag = T3_STAG_UNSET;
778 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
779 zbva, to, len, page_size, pbl, pbl_size, pbl_addr);
780}
781
782int cxio_reregister_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
783 enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
784 u8 page_size, __be64 *pbl, u32 *pbl_size,
785 u32 *pbl_addr)
786{
787 return __cxio_tpt_op(rdev_p, 0, stag, 1, pdid, TPT_NON_SHARED_MR, perm,
788 zbva, to, len, page_size, pbl, pbl_size, pbl_addr);
789}
790
791int cxio_dereg_mem(struct cxio_rdev *rdev_p, u32 stag, u32 pbl_size,
792 u32 pbl_addr)
793{
794 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL,
795 &pbl_size, &pbl_addr);
796}
797
798int cxio_allocate_window(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid)
799{
800 u32 pbl_size = 0;
801 *stag = T3_STAG_UNSET;
802 return __cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_MW, 0, 0, 0ULL, 0, 0,
803 NULL, &pbl_size, NULL);
804}
805
806int cxio_deallocate_window(struct cxio_rdev *rdev_p, u32 stag)
807{
808 return __cxio_tpt_op(rdev_p, 1, &stag, 0, 0, 0, 0, 0, 0ULL, 0, 0, NULL,
809 NULL, NULL);
810}
811
812int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
813{
814 struct t3_rdma_init_wr *wqe;
815 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_ATOMIC);
816 if (!skb)
817 return -ENOMEM;
Harvey Harrison33718362008-04-16 21:01:10 -0700818 PDBG("%s rdev_p %p\n", __func__, rdev_p);
Steve Wiseb038ced2007-02-12 16:16:18 -0800819 wqe = (struct t3_rdma_init_wr *) __skb_put(skb, sizeof(*wqe));
820 wqe->wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT));
821 wqe->wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(attr->tid) |
822 V_FW_RIWR_LEN(sizeof(*wqe) >> 3));
823 wqe->wrid.id1 = 0;
824 wqe->qpid = cpu_to_be32(attr->qpid);
825 wqe->pdid = cpu_to_be32(attr->pdid);
826 wqe->scqid = cpu_to_be32(attr->scqid);
827 wqe->rcqid = cpu_to_be32(attr->rcqid);
828 wqe->rq_addr = cpu_to_be32(attr->rq_addr - rdev_p->rnic_info.rqt_base);
829 wqe->rq_size = cpu_to_be32(attr->rq_size);
830 wqe->mpaattrs = attr->mpaattrs;
831 wqe->qpcaps = attr->qpcaps;
832 wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
Steve Wisef8b0dfd2008-04-29 13:46:52 -0700833 wqe->rqe_count = cpu_to_be16(attr->rqe_count);
834 wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type));
Steve Wiseb038ced2007-02-12 16:16:18 -0800835 wqe->ord = cpu_to_be32(attr->ord);
836 wqe->ird = cpu_to_be32(attr->ird);
837 wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
838 wqe->qp_dma_size = cpu_to_be32(attr->qp_dma_size);
Steve Wisede3d3532007-05-14 13:27:27 -0500839 wqe->irs = cpu_to_be32(attr->irs);
Steve Wiseb038ced2007-02-12 16:16:18 -0800840 skb->priority = 0; /* 0=>ToeQ; 1=>CtrlQ */
841 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
842}
843
844void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
845{
846 cxio_ev_cb = ev_cb;
847}
848
849void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb)
850{
851 cxio_ev_cb = NULL;
852}
853
854static int cxio_hal_ev_handler(struct t3cdev *t3cdev_p, struct sk_buff *skb)
855{
856 static int cnt;
857 struct cxio_rdev *rdev_p = NULL;
858 struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
859 PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
860 " se %0x notify %0x cqbranch %0x creditth %0x\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700861 cnt, __func__, RSPQ_CQID(rsp_msg), RSPQ_CQPTR(rsp_msg),
Steve Wiseb038ced2007-02-12 16:16:18 -0800862 RSPQ_GENBIT(rsp_msg), RSPQ_OVERFLOW(rsp_msg), RSPQ_AN(rsp_msg),
863 RSPQ_SE(rsp_msg), RSPQ_NOTIFY(rsp_msg), RSPQ_CQBRANCH(rsp_msg),
864 RSPQ_CREDIT_THRESH(rsp_msg));
865 PDBG("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d "
866 "len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
867 CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe),
868 CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
869 CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe),
870 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
871 rdev_p = (struct cxio_rdev *)t3cdev_p->ulp;
872 if (!rdev_p) {
Harvey Harrison33718362008-04-16 21:01:10 -0700873 PDBG("%s called by t3cdev %p with null ulp\n", __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -0800874 t3cdev_p);
875 return 0;
876 }
877 if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) {
878 rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1;
879 wake_up_interruptible(&rdev_p->ctrl_qp.waitq);
880 dev_kfree_skb_irq(skb);
881 } else if (CQE_QPID(rsp_msg->cqe) == 0xfff8)
882 dev_kfree_skb_irq(skb);
883 else if (cxio_ev_cb)
884 (*cxio_ev_cb) (rdev_p, skb);
885 else
886 dev_kfree_skb_irq(skb);
887 cnt++;
888 return 0;
889}
890
891/* Caller takes care of locking if needed */
892int cxio_rdev_open(struct cxio_rdev *rdev_p)
893{
894 struct net_device *netdev_p = NULL;
895 int err = 0;
896 if (strlen(rdev_p->dev_name)) {
897 if (cxio_hal_find_rdev_by_name(rdev_p->dev_name)) {
898 return -EBUSY;
899 }
Eric W. Biederman881d9662007-09-17 11:56:21 -0700900 netdev_p = dev_get_by_name(&init_net, rdev_p->dev_name);
Steve Wiseb038ced2007-02-12 16:16:18 -0800901 if (!netdev_p) {
902 return -EINVAL;
903 }
904 dev_put(netdev_p);
905 } else if (rdev_p->t3cdev_p) {
906 if (cxio_hal_find_rdev_by_t3cdev(rdev_p->t3cdev_p)) {
907 return -EBUSY;
908 }
909 netdev_p = rdev_p->t3cdev_p->lldev;
910 strncpy(rdev_p->dev_name, rdev_p->t3cdev_p->name,
911 T3_MAX_DEV_NAME_LEN);
912 } else {
Harvey Harrison33718362008-04-16 21:01:10 -0700913 PDBG("%s t3cdev_p or dev_name must be set\n", __func__);
Steve Wiseb038ced2007-02-12 16:16:18 -0800914 return -EINVAL;
915 }
916
917 list_add_tail(&rdev_p->entry, &rdev_list);
918
Harvey Harrison33718362008-04-16 21:01:10 -0700919 PDBG("%s opening rnic dev %s\n", __func__, rdev_p->dev_name);
Steve Wiseb038ced2007-02-12 16:16:18 -0800920 memset(&rdev_p->ctrl_qp, 0, sizeof(rdev_p->ctrl_qp));
921 if (!rdev_p->t3cdev_p)
Divy Le Ray5fbf8162007-08-29 19:15:47 -0700922 rdev_p->t3cdev_p = dev2t3cdev(netdev_p);
Steve Wiseb038ced2007-02-12 16:16:18 -0800923 rdev_p->t3cdev_p->ulp = (void *) rdev_p;
924 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_GET_PARAMS,
925 &(rdev_p->rnic_info));
926 if (err) {
927 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700928 __func__, rdev_p->t3cdev_p, err);
Steve Wiseb038ced2007-02-12 16:16:18 -0800929 goto err1;
930 }
931 err = rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, GET_PORTS,
932 &(rdev_p->port_info));
933 if (err) {
934 printk(KERN_ERR "%s t3cdev_p(%p)->ctl returned error %d.\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700935 __func__, rdev_p->t3cdev_p, err);
Steve Wiseb038ced2007-02-12 16:16:18 -0800936 goto err1;
937 }
938
939 /*
940 * qpshift is the number of bits to shift the qpid left in order
941 * to get the correct address of the doorbell for that qp.
942 */
943 cxio_init_ucontext(rdev_p, &rdev_p->uctx);
944 rdev_p->qpshift = PAGE_SHIFT -
945 ilog2(65536 >>
946 ilog2(rdev_p->rnic_info.udbell_len >>
947 PAGE_SHIFT));
948 rdev_p->qpnr = rdev_p->rnic_info.udbell_len >> PAGE_SHIFT;
949 rdev_p->qpmask = (65536 >> ilog2(rdev_p->qpnr)) - 1;
950 PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
951 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700952 __func__, rdev_p->dev_name, rdev_p->rnic_info.tpt_base,
Steve Wiseb038ced2007-02-12 16:16:18 -0800953 rdev_p->rnic_info.tpt_top, cxio_num_stags(rdev_p),
954 rdev_p->rnic_info.pbl_base,
955 rdev_p->rnic_info.pbl_top, rdev_p->rnic_info.rqt_base,
956 rdev_p->rnic_info.rqt_top);
957 PDBG("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu "
958 "qpnr %d qpmask 0x%x\n",
959 rdev_p->rnic_info.udbell_len,
960 rdev_p->rnic_info.udbell_physbase, rdev_p->rnic_info.kdb_addr,
961 rdev_p->qpshift, rdev_p->qpnr, rdev_p->qpmask);
962
963 err = cxio_hal_init_ctrl_qp(rdev_p);
964 if (err) {
965 printk(KERN_ERR "%s error %d initializing ctrl_qp.\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700966 __func__, err);
Steve Wiseb038ced2007-02-12 16:16:18 -0800967 goto err1;
968 }
969 err = cxio_hal_init_resource(rdev_p, cxio_num_stags(rdev_p), 0,
970 0, T3_MAX_NUM_QP, T3_MAX_NUM_CQ,
971 T3_MAX_NUM_PD);
972 if (err) {
973 printk(KERN_ERR "%s error %d initializing hal resources.\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700974 __func__, err);
Steve Wiseb038ced2007-02-12 16:16:18 -0800975 goto err2;
976 }
977 err = cxio_hal_pblpool_create(rdev_p);
978 if (err) {
979 printk(KERN_ERR "%s error %d initializing pbl mem pool.\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700980 __func__, err);
Steve Wiseb038ced2007-02-12 16:16:18 -0800981 goto err3;
982 }
983 err = cxio_hal_rqtpool_create(rdev_p);
984 if (err) {
985 printk(KERN_ERR "%s error %d initializing rqt mem pool.\n",
Harvey Harrison33718362008-04-16 21:01:10 -0700986 __func__, err);
Steve Wiseb038ced2007-02-12 16:16:18 -0800987 goto err4;
988 }
989 return 0;
990err4:
991 cxio_hal_pblpool_destroy(rdev_p);
992err3:
993 cxio_hal_destroy_resource(rdev_p->rscp);
994err2:
995 cxio_hal_destroy_ctrl_qp(rdev_p);
996err1:
997 list_del(&rdev_p->entry);
998 return err;
999}
1000
1001void cxio_rdev_close(struct cxio_rdev *rdev_p)
1002{
1003 if (rdev_p) {
1004 cxio_hal_pblpool_destroy(rdev_p);
1005 cxio_hal_rqtpool_destroy(rdev_p);
1006 list_del(&rdev_p->entry);
1007 rdev_p->t3cdev_p->ulp = NULL;
1008 cxio_hal_destroy_ctrl_qp(rdev_p);
1009 cxio_hal_destroy_resource(rdev_p->rscp);
1010 }
1011}
1012
1013int __init cxio_hal_init(void)
1014{
1015 if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI))
1016 return -ENOMEM;
1017 t3_register_cpl_handler(CPL_ASYNC_NOTIF, cxio_hal_ev_handler);
1018 return 0;
1019}
1020
1021void __exit cxio_hal_exit(void)
1022{
1023 struct cxio_rdev *rdev, *tmp;
1024
1025 t3_register_cpl_handler(CPL_ASYNC_NOTIF, NULL);
1026 list_for_each_entry_safe(rdev, tmp, &rdev_list, entry)
1027 cxio_rdev_close(rdev);
1028 cxio_hal_destroy_rhdl_resource();
1029}
1030
Adrian Bunk2b540352007-02-21 11:52:49 +01001031static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
Steve Wiseb038ced2007-02-12 16:16:18 -08001032{
1033 struct t3_swsq *sqp;
1034 __u32 ptr = wq->sq_rptr;
1035 int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr);
1036
1037 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
1038 while (count--)
1039 if (!sqp->signaled) {
1040 ptr++;
1041 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
1042 } else if (sqp->complete) {
1043
1044 /*
1045 * Insert this completed cqe into the swcq.
1046 */
1047 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
Harvey Harrison33718362008-04-16 21:01:10 -07001048 __func__, Q_PTR2IDX(ptr, wq->sq_size_log2),
Steve Wiseb038ced2007-02-12 16:16:18 -08001049 Q_PTR2IDX(cq->sw_wptr, cq->size_log2));
1050 sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
1051 *(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2))
1052 = sqp->cqe;
1053 cq->sw_wptr++;
1054 sqp->signaled = 0;
1055 break;
1056 } else
1057 break;
1058}
1059
Adrian Bunk2b540352007-02-21 11:52:49 +01001060static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
1061 struct t3_cqe *read_cqe)
Steve Wiseb038ced2007-02-12 16:16:18 -08001062{
1063 read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
1064 read_cqe->len = wq->oldest_read->read_len;
1065 read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(*hw_cqe)) |
1066 V_CQE_SWCQE(SW_CQE(*hw_cqe)) |
1067 V_CQE_OPCODE(T3_READ_REQ) |
1068 V_CQE_TYPE(1));
1069}
1070
1071/*
1072 * Return a ptr to the next read wr in the SWSQ or NULL.
1073 */
Adrian Bunk2b540352007-02-21 11:52:49 +01001074static void advance_oldest_read(struct t3_wq *wq)
Steve Wiseb038ced2007-02-12 16:16:18 -08001075{
1076
1077 u32 rptr = wq->oldest_read - wq->sq + 1;
1078 u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2);
1079
1080 while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) {
1081 wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2);
1082
1083 if (wq->oldest_read->opcode == T3_READ_REQ)
1084 return;
1085 rptr++;
1086 }
1087 wq->oldest_read = NULL;
1088}
1089
1090/*
1091 * cxio_poll_cq
1092 *
1093 * Caller must:
1094 * check the validity of the first CQE,
1095 * supply the wq assicated with the qpid.
1096 *
1097 * credit: cq credit to return to sge.
1098 * cqe_flushed: 1 iff the CQE is flushed.
1099 * cqe: copy of the polled CQE.
1100 *
1101 * return value:
1102 * 0 CQE returned,
1103 * -1 CQE skipped, try again.
1104 */
1105int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
1106 u8 *cqe_flushed, u64 *cookie, u32 *credit)
1107{
1108 int ret = 0;
1109 struct t3_cqe *hw_cqe, read_cqe;
1110
1111 *cqe_flushed = 0;
1112 *credit = 0;
1113 hw_cqe = cxio_next_cqe(cq);
1114
1115 PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
1116 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
Harvey Harrison33718362008-04-16 21:01:10 -07001117 __func__, CQE_OOO(*hw_cqe), CQE_QPID(*hw_cqe),
Steve Wiseb038ced2007-02-12 16:16:18 -08001118 CQE_GENBIT(*hw_cqe), CQE_TYPE(*hw_cqe), CQE_STATUS(*hw_cqe),
1119 CQE_OPCODE(*hw_cqe), CQE_LEN(*hw_cqe), CQE_WRID_HI(*hw_cqe),
1120 CQE_WRID_LOW(*hw_cqe));
1121
1122 /*
1123 * skip cqe's not affiliated with a QP.
1124 */
1125 if (wq == NULL) {
1126 ret = -1;
1127 goto skip_cqe;
1128 }
1129
1130 /*
1131 * Gotta tweak READ completions:
1132 * 1) the cqe doesn't contain the sq_wptr from the wr.
1133 * 2) opcode not reflected from the wr.
1134 * 3) read_len not reflected from the wr.
1135 * 4) cq_type is RQ_TYPE not SQ_TYPE.
1136 */
1137 if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
1138
1139 /*
Steve Wisef8b0dfd2008-04-29 13:46:52 -07001140 * If this is an unsolicited read response, then the read
1141 * was generated by the kernel driver as part of peer-2-peer
1142 * connection setup. So ignore the completion.
1143 */
1144 if (!wq->oldest_read) {
1145 if (CQE_STATUS(*hw_cqe))
1146 wq->error = 1;
1147 ret = -1;
1148 goto skip_cqe;
1149 }
1150
1151 /*
Steve Wiseb038ced2007-02-12 16:16:18 -08001152 * Don't write to the HWCQ, so create a new read req CQE
1153 * in local memory.
1154 */
1155 create_read_req_cqe(wq, hw_cqe, &read_cqe);
1156 hw_cqe = &read_cqe;
1157 advance_oldest_read(wq);
1158 }
1159
1160 /*
1161 * T3A: Discard TERMINATE CQEs.
1162 */
1163 if (CQE_OPCODE(*hw_cqe) == T3_TERMINATE) {
1164 ret = -1;
1165 wq->error = 1;
1166 goto skip_cqe;
1167 }
1168
1169 if (CQE_STATUS(*hw_cqe) || wq->error) {
1170 *cqe_flushed = wq->error;
1171 wq->error = 1;
1172
1173 /*
1174 * T3A inserts errors into the CQE. We cannot return
1175 * these as work completions.
1176 */
1177 /* incoming write failures */
1178 if ((CQE_OPCODE(*hw_cqe) == T3_RDMA_WRITE)
1179 && RQ_TYPE(*hw_cqe)) {
1180 ret = -1;
1181 goto skip_cqe;
1182 }
1183 /* incoming read request failures */
1184 if ((CQE_OPCODE(*hw_cqe) == T3_READ_RESP) && SQ_TYPE(*hw_cqe)) {
1185 ret = -1;
1186 goto skip_cqe;
1187 }
1188
1189 /* incoming SEND with no receive posted failures */
1190 if ((CQE_OPCODE(*hw_cqe) == T3_SEND) && RQ_TYPE(*hw_cqe) &&
1191 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) {
1192 ret = -1;
1193 goto skip_cqe;
1194 }
1195 goto proc_cqe;
1196 }
1197
1198 /*
1199 * RECV completion.
1200 */
1201 if (RQ_TYPE(*hw_cqe)) {
1202
1203 /*
1204 * HW only validates 4 bits of MSN. So we must validate that
1205 * the MSN in the SEND is the next expected MSN. If its not,
1206 * then we complete this with TPT_ERR_MSN and mark the wq in
1207 * error.
1208 */
1209 if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) {
1210 wq->error = 1;
1211 hw_cqe->header |= htonl(V_CQE_STATUS(TPT_ERR_MSN));
1212 goto proc_cqe;
1213 }
1214 goto proc_cqe;
1215 }
1216
1217 /*
1218 * If we get here its a send completion.
1219 *
1220 * Handle out of order completion. These get stuffed
1221 * in the SW SQ. Then the SW SQ is walked to move any
1222 * now in-order completions into the SW CQ. This handles
1223 * 2 cases:
1224 * 1) reaping unsignaled WRs when the first subsequent
1225 * signaled WR is completed.
1226 * 2) out of order read completions.
1227 */
1228 if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) {
1229 struct t3_swsq *sqp;
1230
1231 PDBG("%s out of order completion going in swsq at idx %ld\n",
Harvey Harrison33718362008-04-16 21:01:10 -07001232 __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -08001233 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2));
1234 sqp = wq->sq +
1235 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2);
1236 sqp->cqe = *hw_cqe;
1237 sqp->complete = 1;
1238 ret = -1;
1239 goto flush_wq;
1240 }
1241
1242proc_cqe:
1243 *cqe = *hw_cqe;
1244
1245 /*
1246 * Reap the associated WR(s) that are freed up with this
1247 * completion.
1248 */
1249 if (SQ_TYPE(*hw_cqe)) {
1250 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe);
Harvey Harrison33718362008-04-16 21:01:10 -07001251 PDBG("%s completing sq idx %ld\n", __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -08001252 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2));
1253 *cookie = (wq->sq +
1254 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2))->wr_id;
1255 wq->sq_rptr++;
1256 } else {
Harvey Harrison33718362008-04-16 21:01:10 -07001257 PDBG("%s completing rq idx %ld\n", __func__,
Steve Wiseb038ced2007-02-12 16:16:18 -08001258 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1259 *cookie = *(wq->rq + Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2));
1260 wq->rq_rptr++;
1261 }
1262
1263flush_wq:
1264 /*
1265 * Flush any completed cqes that are now in-order.
1266 */
1267 flush_completed_wrs(wq, cq);
1268
1269skip_cqe:
1270 if (SW_CQE(*hw_cqe)) {
1271 PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
Harvey Harrison33718362008-04-16 21:01:10 -07001272 __func__, cq, cq->cqid, cq->sw_rptr);
Steve Wiseb038ced2007-02-12 16:16:18 -08001273 ++cq->sw_rptr;
1274 } else {
1275 PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
Harvey Harrison33718362008-04-16 21:01:10 -07001276 __func__, cq, cq->cqid, cq->rptr);
Steve Wiseb038ced2007-02-12 16:16:18 -08001277 ++cq->rptr;
1278
1279 /*
1280 * T3A: compute credits.
1281 */
1282 if (((cq->rptr - cq->wptr) > (1 << (cq->size_log2 - 1)))
1283 || ((cq->rptr - cq->wptr) >= 128)) {
1284 *credit = cq->rptr - cq->wptr;
1285 cq->wptr = cq->rptr;
1286 }
1287 }
1288 return ret;
1289}