blob: 6f533fbcc4b3d89fe1f4c9da2ae3fd6220720819 [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31#ifndef __IW_CXGB4_H__
32#define __IW_CXGB4_H__
33
34#include <linux/mutex.h>
35#include <linux/list.h>
36#include <linux/spinlock.h>
37#include <linux/idr.h>
Steve Wisec3373742011-05-20 16:25:05 +000038#include <linux/completion.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070039#include <linux/netdevice.h>
40#include <linux/sched.h>
41#include <linux/pci.h>
42#include <linux/dma-mapping.h>
43#include <linux/inet.h>
44#include <linux/wait.h>
45#include <linux/kref.h>
46#include <linux/timer.h>
47#include <linux/io.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070048
49#include <asm/byteorder.h>
50
51#include <net/net_namespace.h>
52
53#include <rdma/ib_verbs.h>
54#include <rdma/iw_cm.h>
Steve Wise9eccfe12014-03-26 17:08:09 -050055#include <rdma/rdma_netlink.h>
56#include <rdma/iw_portmap.h>
Steve Wisecfdda9d2010-04-21 15:30:06 -070057
58#include "cxgb4.h"
59#include "cxgb4_uld.h"
60#include "l2t.h"
61#include "user.h"
62
63#define DRV_NAME "iw_cxgb4"
64#define MOD DRV_NAME ":"
65
66extern int c4iw_debug;
67#define PDBG(fmt, args...) \
68do { \
69 if (c4iw_debug) \
70 printk(MOD fmt, ## args); \
71} while (0)
72
73#include "t4.h"
74
75#define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
76#define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
77
78static inline void *cplhdr(struct sk_buff *skb)
79{
80 return skb->data;
81}
82
Vipul Pandyaec3eead2012-05-18 15:29:32 +053083#define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */
84#define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */
85
86struct c4iw_id_table {
87 u32 flags;
88 u32 start; /* logical minimal id */
89 u32 last; /* hint for find */
90 u32 max;
91 spinlock_t lock;
92 unsigned long *table;
93};
94
Steve Wisecfdda9d2010-04-21 15:30:06 -070095struct c4iw_resource {
Vipul Pandyaec3eead2012-05-18 15:29:32 +053096 struct c4iw_id_table tpt_table;
97 struct c4iw_id_table qid_table;
98 struct c4iw_id_table pdid_table;
Steve Wisecfdda9d2010-04-21 15:30:06 -070099};
100
101struct c4iw_qid_list {
102 struct list_head entry;
103 u32 qid;
104};
105
106struct c4iw_dev_ucontext {
107 struct list_head qpids;
108 struct list_head cqids;
109 struct mutex lock;
110};
111
112enum c4iw_rdev_flags {
113 T4_FATAL_ERROR = (1<<0),
Steve Wise05eb2382014-03-14 21:52:08 +0530114 T4_STATUS_PAGE_DISABLED = (1<<1),
Steve Wisecfdda9d2010-04-21 15:30:06 -0700115};
116
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530117struct c4iw_stat {
118 u64 total;
119 u64 cur;
120 u64 max;
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530121 u64 fail;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530122};
123
124struct c4iw_stats {
125 struct mutex lock;
126 struct c4iw_stat qid;
127 struct c4iw_stat pd;
128 struct c4iw_stat stag;
129 struct c4iw_stat pbl;
130 struct c4iw_stat rqt;
131 struct c4iw_stat ocqp;
Vipul Pandya2c974782012-05-18 15:29:28 +0530132 u64 db_full;
133 u64 db_empty;
134 u64 db_drop;
Vipul Pandya422eea02012-05-18 15:29:30 +0530135 u64 db_state_transitions;
Steve Wise05eb2382014-03-14 21:52:08 +0530136 u64 db_fc_interruptions;
Vipul Pandya5be78ee2012-12-10 09:30:54 +0000137 u64 tcam_full;
Vipul Pandya793dad92012-12-10 09:30:56 +0000138 u64 act_ofld_conn_fails;
139 u64 pas_ofld_conn_fails;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530140};
141
Steve Wisecfdda9d2010-04-21 15:30:06 -0700142struct c4iw_rdev {
143 struct c4iw_resource resource;
144 unsigned long qpshift;
145 u32 qpmask;
146 unsigned long cqshift;
147 u32 cqmask;
148 struct c4iw_dev_ucontext uctx;
149 struct gen_pool *pbl_pool;
150 struct gen_pool *rqt_pool;
Steve Wisec6d7b262010-09-13 11:23:57 -0500151 struct gen_pool *ocqp_pool;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700152 u32 flags;
153 struct cxgb4_lld_info lldi;
Steve Wisefa658a92014-04-09 09:38:25 -0500154 unsigned long bar2_pa;
155 void __iomem *bar2_kva;
Steve Wisec6d7b262010-09-13 11:23:57 -0500156 unsigned long oc_mw_pa;
157 void __iomem *oc_mw_kva;
Vipul Pandya8d81ef32012-05-18 15:29:27 +0530158 struct c4iw_stats stats;
Steve Wise05eb2382014-03-14 21:52:08 +0530159 struct t4_dev_status_page *status_page;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700160};
161
162static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
163{
164 return rdev->flags & T4_FATAL_ERROR;
165}
166
167static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
168{
169 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
170}
171
Vipul Pandya3b174d92013-03-14 05:09:03 +0000172#define C4IW_WR_TO (30*HZ)
Steve Wiseaadc4df2010-09-10 11:15:25 -0500173
174struct c4iw_wr_wait {
Steve Wisec3373742011-05-20 16:25:05 +0000175 struct completion completion;
Steve Wiseaadc4df2010-09-10 11:15:25 -0500176 int ret;
177};
178
179static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
180{
181 wr_waitp->ret = 0;
Steve Wisec3373742011-05-20 16:25:05 +0000182 init_completion(&wr_waitp->completion);
Steve Wiseaadc4df2010-09-10 11:15:25 -0500183}
184
Steve Wised9594d92011-05-09 22:06:22 -0700185static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
186{
187 wr_waitp->ret = ret;
Steve Wisec3373742011-05-20 16:25:05 +0000188 complete(&wr_waitp->completion);
Steve Wised9594d92011-05-09 22:06:22 -0700189}
190
Steve Wiseaadc4df2010-09-10 11:15:25 -0500191static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
192 struct c4iw_wr_wait *wr_waitp,
193 u32 hwtid, u32 qpid,
194 const char *func)
195{
196 unsigned to = C4IW_WR_TO;
Steve Wised9594d92011-05-09 22:06:22 -0700197 int ret;
Steve Wiseaadc4df2010-09-10 11:15:25 -0500198
Steve Wised9594d92011-05-09 22:06:22 -0700199 do {
Steve Wisec3373742011-05-20 16:25:05 +0000200 ret = wait_for_completion_timeout(&wr_waitp->completion, to);
Steve Wised9594d92011-05-09 22:06:22 -0700201 if (!ret) {
Steve Wiseaadc4df2010-09-10 11:15:25 -0500202 printk(KERN_ERR MOD "%s - Device %s not responding - "
203 "tid %u qpid %u\n", func,
204 pci_name(rdev->lldi.pdev), hwtid, qpid);
Steve Wise2f25e9a2011-05-09 22:06:23 -0700205 if (c4iw_fatal_error(rdev)) {
206 wr_waitp->ret = -EIO;
207 break;
208 }
Steve Wiseaadc4df2010-09-10 11:15:25 -0500209 to = to << 2;
210 }
Steve Wised9594d92011-05-09 22:06:22 -0700211 } while (!ret);
Steve Wiseaadc4df2010-09-10 11:15:25 -0500212 if (wr_waitp->ret)
Steve Wise30c95c22011-05-09 22:06:22 -0700213 PDBG("%s: FW reply %d tid %u qpid %u\n",
214 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
Steve Wiseaadc4df2010-09-10 11:15:25 -0500215 return wr_waitp->ret;
216}
217
Vipul Pandya2c974782012-05-18 15:29:28 +0530218enum db_state {
219 NORMAL = 0,
220 FLOW_CONTROL = 1,
Steve Wise05eb2382014-03-14 21:52:08 +0530221 RECOVERY = 2,
222 STOPPED = 3
Vipul Pandya2c974782012-05-18 15:29:28 +0530223};
224
Steve Wisecfdda9d2010-04-21 15:30:06 -0700225struct c4iw_dev {
226 struct ib_device ibdev;
227 struct c4iw_rdev rdev;
228 u32 device_cap_flags;
229 struct idr cqidr;
230 struct idr qpidr;
231 struct idr mmidr;
232 spinlock_t lock;
Vipul Pandya2c974782012-05-18 15:29:28 +0530233 struct mutex db_mutex;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700234 struct dentry *debugfs_root;
Vipul Pandya2c974782012-05-18 15:29:28 +0530235 enum db_state db_state;
Vipul Pandya793dad92012-12-10 09:30:56 +0000236 struct idr hwtid_idr;
237 struct idr atid_idr;
238 struct idr stid_idr;
Steve Wise05eb2382014-03-14 21:52:08 +0530239 struct list_head db_fc_list;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700240};
241
242static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
243{
244 return container_of(ibdev, struct c4iw_dev, ibdev);
245}
246
247static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
248{
249 return container_of(rdev, struct c4iw_dev, rdev);
250}
251
252static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
253{
254 return idr_find(&rhp->cqidr, cqid);
255}
256
257static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
258{
259 return idr_find(&rhp->qpidr, qpid);
260}
261
262static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid)
263{
264 return idr_find(&rhp->mmidr, mmid);
265}
266
Vipul Pandya2c974782012-05-18 15:29:28 +0530267static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
268 void *handle, u32 id, int lock)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700269{
270 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700271
Tejun Heoe8d4dd62013-02-27 17:04:20 -0800272 if (lock) {
273 idr_preload(GFP_KERNEL);
274 spin_lock_irq(&rhp->lock);
275 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700276
Tejun Heoe8d4dd62013-02-27 17:04:20 -0800277 ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC);
278
279 if (lock) {
280 spin_unlock_irq(&rhp->lock);
281 idr_preload_end();
282 }
283
284 BUG_ON(ret == -ENOSPC);
285 return ret < 0 ? ret : 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700286}
287
Vipul Pandya2c974782012-05-18 15:29:28 +0530288static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr,
289 void *handle, u32 id)
290{
291 return _insert_handle(rhp, idr, handle, id, 1);
292}
293
294static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr,
295 void *handle, u32 id)
296{
297 return _insert_handle(rhp, idr, handle, id, 0);
298}
299
Vipul Pandya422eea02012-05-18 15:29:30 +0530300static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr,
301 u32 id, int lock)
302{
303 if (lock)
304 spin_lock_irq(&rhp->lock);
305 idr_remove(idr, id);
306 if (lock)
307 spin_unlock_irq(&rhp->lock);
308}
309
Steve Wisecfdda9d2010-04-21 15:30:06 -0700310static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id)
311{
Vipul Pandya422eea02012-05-18 15:29:30 +0530312 _remove_handle(rhp, idr, id, 1);
313}
314
315static inline void remove_handle_nolock(struct c4iw_dev *rhp,
316 struct idr *idr, u32 id)
317{
318 _remove_handle(rhp, idr, id, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700319}
320
321struct c4iw_pd {
322 struct ib_pd ibpd;
323 u32 pdid;
324 struct c4iw_dev *rhp;
325};
326
327static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
328{
329 return container_of(ibpd, struct c4iw_pd, ibpd);
330}
331
332struct tpt_attributes {
333 u64 len;
334 u64 va_fbo;
335 enum fw_ri_mem_perms perms;
336 u32 stag;
337 u32 pdid;
338 u32 qpid;
339 u32 pbl_addr;
340 u32 pbl_size;
341 u32 state:1;
342 u32 type:2;
343 u32 rsvd:1;
344 u32 remote_invaliate_disable:1;
345 u32 zbva:1;
346 u32 mw_bind_enable:1;
347 u32 page_size:5;
348};
349
350struct c4iw_mr {
351 struct ib_mr ibmr;
352 struct ib_umem *umem;
353 struct c4iw_dev *rhp;
354 u64 kva;
355 struct tpt_attributes attr;
356};
357
358static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
359{
360 return container_of(ibmr, struct c4iw_mr, ibmr);
361}
362
363struct c4iw_mw {
364 struct ib_mw ibmw;
365 struct c4iw_dev *rhp;
366 u64 kva;
367 struct tpt_attributes attr;
368};
369
370static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
371{
372 return container_of(ibmw, struct c4iw_mw, ibmw);
373}
374
375struct c4iw_fr_page_list {
376 struct ib_fast_reg_page_list ibpl;
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000377 DEFINE_DMA_UNMAP_ADDR(mapping);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700378 dma_addr_t dma_addr;
379 struct c4iw_dev *dev;
Steve Wiseeda6d1d2014-03-19 17:44:45 +0530380 int pll_len;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700381};
382
383static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
384 struct ib_fast_reg_page_list *ibpl)
385{
386 return container_of(ibpl, struct c4iw_fr_page_list, ibpl);
387}
388
389struct c4iw_cq {
390 struct ib_cq ibcq;
391 struct c4iw_dev *rhp;
392 struct t4_cq cq;
393 spinlock_t lock;
Kumar Sanghvi581bbe22011-10-24 21:20:21 +0530394 spinlock_t comp_handler_lock;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700395 atomic_t refcnt;
396 wait_queue_head_t wait;
397};
398
399static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
400{
401 return container_of(ibcq, struct c4iw_cq, ibcq);
402}
403
404struct c4iw_mpa_attributes {
405 u8 initiator;
406 u8 recv_marker_enabled;
407 u8 xmit_marker_enabled;
408 u8 crc_enabled;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530409 u8 enhanced_rdma_conn;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700410 u8 version;
411 u8 p2p_type;
412};
413
414struct c4iw_qp_attributes {
415 u32 scq;
416 u32 rcq;
417 u32 sq_num_entries;
418 u32 rq_num_entries;
419 u32 sq_max_sges;
420 u32 sq_max_sges_rdma_write;
421 u32 rq_max_sges;
422 u32 state;
423 u8 enable_rdma_read;
424 u8 enable_rdma_write;
425 u8 enable_bind;
426 u8 enable_mmid0_fastreg;
427 u32 max_ord;
428 u32 max_ird;
429 u32 pd;
430 u32 next_state;
431 char terminate_buffer[52];
432 u32 terminate_msg_len;
433 u8 is_terminate_local;
434 struct c4iw_mpa_attributes mpa_attr;
435 struct c4iw_ep *llp_stream_handle;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530436 u8 layer_etype;
437 u8 ecode;
Vipul Pandya2c974782012-05-18 15:29:28 +0530438 u16 sq_db_inc;
439 u16 rq_db_inc;
Steve Wisecc18b932014-04-24 14:31:53 -0500440 u8 send_term;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700441};
442
443struct c4iw_qp {
444 struct ib_qp ibqp;
Steve Wise05eb2382014-03-14 21:52:08 +0530445 struct list_head db_fc_entry;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700446 struct c4iw_dev *rhp;
447 struct c4iw_ep *ep;
448 struct c4iw_qp_attributes attr;
449 struct t4_wq wq;
450 spinlock_t lock;
Steve Wise2f5b48c2010-09-10 11:15:36 -0500451 struct mutex mutex;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700452 atomic_t refcnt;
453 wait_queue_head_t wait;
454 struct timer_list timer;
Steve Wiseba32de92014-03-19 17:44:43 +0530455 int sq_sig_all;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700456};
457
458static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
459{
460 return container_of(ibqp, struct c4iw_qp, ibqp);
461}
462
463struct c4iw_ucontext {
464 struct ib_ucontext ibucontext;
465 struct c4iw_dev_ucontext uctx;
466 u32 key;
467 spinlock_t mmap_lock;
468 struct list_head mmaps;
469};
470
471static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
472{
473 return container_of(c, struct c4iw_ucontext, ibucontext);
474}
475
476struct c4iw_mm_entry {
477 struct list_head entry;
478 u64 addr;
479 u32 key;
480 unsigned len;
481};
482
483static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
484 u32 key, unsigned len)
485{
486 struct list_head *pos, *nxt;
487 struct c4iw_mm_entry *mm;
488
489 spin_lock(&ucontext->mmap_lock);
490 list_for_each_safe(pos, nxt, &ucontext->mmaps) {
491
492 mm = list_entry(pos, struct c4iw_mm_entry, entry);
493 if (mm->key == key && mm->len == len) {
494 list_del_init(&mm->entry);
495 spin_unlock(&ucontext->mmap_lock);
496 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
497 key, (unsigned long long) mm->addr, mm->len);
498 return mm;
499 }
500 }
501 spin_unlock(&ucontext->mmap_lock);
502 return NULL;
503}
504
505static inline void insert_mmap(struct c4iw_ucontext *ucontext,
506 struct c4iw_mm_entry *mm)
507{
508 spin_lock(&ucontext->mmap_lock);
509 PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__,
510 mm->key, (unsigned long long) mm->addr, mm->len);
511 list_add_tail(&mm->entry, &ucontext->mmaps);
512 spin_unlock(&ucontext->mmap_lock);
513}
514
515enum c4iw_qp_attr_mask {
516 C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
Vipul Pandya2c974782012-05-18 15:29:28 +0530517 C4IW_QP_ATTR_SQ_DB = 1<<1,
518 C4IW_QP_ATTR_RQ_DB = 1<<2,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700519 C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
520 C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
521 C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
522 C4IW_QP_ATTR_MAX_ORD = 1 << 11,
523 C4IW_QP_ATTR_MAX_IRD = 1 << 12,
524 C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
525 C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
526 C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
527 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
528 C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
529 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
530 C4IW_QP_ATTR_MAX_ORD |
531 C4IW_QP_ATTR_MAX_IRD |
532 C4IW_QP_ATTR_LLP_STREAM_HANDLE |
533 C4IW_QP_ATTR_STREAM_MSG_BUFFER |
534 C4IW_QP_ATTR_MPA_ATTR |
535 C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
536};
537
538int c4iw_modify_qp(struct c4iw_dev *rhp,
539 struct c4iw_qp *qhp,
540 enum c4iw_qp_attr_mask mask,
541 struct c4iw_qp_attributes *attrs,
542 int internal);
543
544enum c4iw_qp_state {
545 C4IW_QP_STATE_IDLE,
546 C4IW_QP_STATE_RTS,
547 C4IW_QP_STATE_ERROR,
548 C4IW_QP_STATE_TERMINATE,
549 C4IW_QP_STATE_CLOSING,
550 C4IW_QP_STATE_TOT
551};
552
553static inline int c4iw_convert_state(enum ib_qp_state ib_state)
554{
555 switch (ib_state) {
556 case IB_QPS_RESET:
557 case IB_QPS_INIT:
558 return C4IW_QP_STATE_IDLE;
559 case IB_QPS_RTS:
560 return C4IW_QP_STATE_RTS;
561 case IB_QPS_SQD:
562 return C4IW_QP_STATE_CLOSING;
563 case IB_QPS_SQE:
564 return C4IW_QP_STATE_TERMINATE;
565 case IB_QPS_ERR:
566 return C4IW_QP_STATE_ERROR;
567 default:
568 return -1;
569 }
570}
571
Vipul Pandya67bbc052012-05-18 15:29:33 +0530572static inline int to_ib_qp_state(int c4iw_qp_state)
573{
574 switch (c4iw_qp_state) {
575 case C4IW_QP_STATE_IDLE:
576 return IB_QPS_INIT;
577 case C4IW_QP_STATE_RTS:
578 return IB_QPS_RTS;
579 case C4IW_QP_STATE_CLOSING:
580 return IB_QPS_SQD;
581 case C4IW_QP_STATE_TERMINATE:
582 return IB_QPS_SQE;
583 case C4IW_QP_STATE_ERROR:
584 return IB_QPS_ERR;
585 }
586 return IB_QPS_ERR;
587}
588
Steve Wisecfdda9d2010-04-21 15:30:06 -0700589static inline u32 c4iw_ib_to_tpt_access(int a)
590{
591 return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
592 (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
593 (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
594 FW_RI_MEM_ACCESS_LOCAL_READ;
595}
596
597static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
598{
599 return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
600 (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
601}
602
603enum c4iw_mmid_state {
604 C4IW_STAG_STATE_VALID,
605 C4IW_STAG_STATE_INVALID
606};
607
608#define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
609
610#define MPA_KEY_REQ "MPA ID Req Frame"
611#define MPA_KEY_REP "MPA ID Rep Frame"
612
613#define MPA_MAX_PRIVATE_DATA 256
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530614#define MPA_ENHANCED_RDMA_CONN 0x10
Steve Wisecfdda9d2010-04-21 15:30:06 -0700615#define MPA_REJECT 0x20
616#define MPA_CRC 0x40
617#define MPA_MARKERS 0x80
618#define MPA_FLAGS_MASK 0xE0
619
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530620#define MPA_V2_PEER2PEER_MODEL 0x8000
621#define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000
622#define MPA_V2_RDMA_WRITE_RTR 0x8000
623#define MPA_V2_RDMA_READ_RTR 0x4000
624#define MPA_V2_IRD_ORD_MASK 0x3FFF
625
Steve Wisecfdda9d2010-04-21 15:30:06 -0700626#define c4iw_put_ep(ep) { \
627 PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \
628 ep, atomic_read(&((ep)->kref.refcount))); \
629 WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
630 kref_put(&((ep)->kref), _c4iw_free_ep); \
631}
632
633#define c4iw_get_ep(ep) { \
634 PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \
635 ep, atomic_read(&((ep)->kref.refcount))); \
636 kref_get(&((ep)->kref)); \
637}
638void _c4iw_free_ep(struct kref *kref);
639
640struct mpa_message {
641 u8 key[16];
642 u8 flags;
643 u8 revision;
644 __be16 private_data_size;
645 u8 private_data[0];
646};
647
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530648struct mpa_v2_conn_params {
649 __be16 ird;
650 __be16 ord;
651};
652
Steve Wisecfdda9d2010-04-21 15:30:06 -0700653struct terminate_message {
654 u8 layer_etype;
655 u8 ecode;
656 __be16 hdrct_rsvd;
657 u8 len_hdrs[0];
658};
659
660#define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
661
662enum c4iw_layers_types {
663 LAYER_RDMAP = 0x00,
664 LAYER_DDP = 0x10,
665 LAYER_MPA = 0x20,
666 RDMAP_LOCAL_CATA = 0x00,
667 RDMAP_REMOTE_PROT = 0x01,
668 RDMAP_REMOTE_OP = 0x02,
669 DDP_LOCAL_CATA = 0x00,
670 DDP_TAGGED_ERR = 0x01,
671 DDP_UNTAGGED_ERR = 0x02,
672 DDP_LLP = 0x03
673};
674
675enum c4iw_rdma_ecodes {
676 RDMAP_INV_STAG = 0x00,
677 RDMAP_BASE_BOUNDS = 0x01,
678 RDMAP_ACC_VIOL = 0x02,
679 RDMAP_STAG_NOT_ASSOC = 0x03,
680 RDMAP_TO_WRAP = 0x04,
681 RDMAP_INV_VERS = 0x05,
682 RDMAP_INV_OPCODE = 0x06,
683 RDMAP_STREAM_CATA = 0x07,
684 RDMAP_GLOBAL_CATA = 0x08,
685 RDMAP_CANT_INV_STAG = 0x09,
686 RDMAP_UNSPECIFIED = 0xff
687};
688
689enum c4iw_ddp_ecodes {
690 DDPT_INV_STAG = 0x00,
691 DDPT_BASE_BOUNDS = 0x01,
692 DDPT_STAG_NOT_ASSOC = 0x02,
693 DDPT_TO_WRAP = 0x03,
694 DDPT_INV_VERS = 0x04,
695 DDPU_INV_QN = 0x01,
696 DDPU_INV_MSN_NOBUF = 0x02,
697 DDPU_INV_MSN_RANGE = 0x03,
698 DDPU_INV_MO = 0x04,
699 DDPU_MSG_TOOBIG = 0x05,
700 DDPU_INV_VERS = 0x06
701};
702
703enum c4iw_mpa_ecodes {
704 MPA_CRC_ERR = 0x02,
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530705 MPA_MARKER_ERR = 0x03,
706 MPA_LOCAL_CATA = 0x05,
707 MPA_INSUFF_IRD = 0x06,
708 MPA_NOMATCH_RTR = 0x07,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700709};
710
711enum c4iw_ep_state {
712 IDLE = 0,
713 LISTEN,
714 CONNECTING,
715 MPA_REQ_WAIT,
716 MPA_REQ_SENT,
717 MPA_REQ_RCVD,
718 MPA_REP_SENT,
719 FPDU_MODE,
720 ABORTING,
721 CLOSING,
722 MORIBUND,
723 DEAD,
724};
725
726enum c4iw_ep_flags {
727 PEER_ABORT_IN_PROGRESS = 0,
728 ABORT_REQ_IN_PROGRESS = 1,
729 RELEASE_RESOURCES = 2,
730 CLOSE_SENT = 3,
Vipul Pandya1ec779c2013-01-07 13:11:56 +0000731 TIMEOUT = 4,
Vipul Pandya325abea2013-01-07 13:11:53 +0000732 QP_REFERENCED = 5,
Steve Wise9eccfe12014-03-26 17:08:09 -0500733 RELEASE_MAPINFO = 6,
Steve Wisecfdda9d2010-04-21 15:30:06 -0700734};
735
Vipul Pandya793dad92012-12-10 09:30:56 +0000736enum c4iw_ep_history {
737 ACT_OPEN_REQ = 0,
738 ACT_OFLD_CONN = 1,
739 ACT_OPEN_RPL = 2,
740 ACT_ESTAB = 3,
741 PASS_ACCEPT_REQ = 4,
742 PASS_ESTAB = 5,
743 ABORT_UPCALL = 6,
744 ESTAB_UPCALL = 7,
745 CLOSE_UPCALL = 8,
746 ULP_ACCEPT = 9,
747 ULP_REJECT = 10,
748 TIMEDOUT = 11,
749 PEER_ABORT = 12,
750 PEER_CLOSE = 13,
751 CONNREQ_UPCALL = 14,
752 ABORT_CONN = 15,
753 DISCONN_UPCALL = 16,
754 EP_DISC_CLOSE = 17,
755 EP_DISC_ABORT = 18,
756 CONN_RPL_UPCALL = 19,
757 ACT_RETRY_NOMEM = 20,
758 ACT_RETRY_INUSE = 21
759};
760
Steve Wisecfdda9d2010-04-21 15:30:06 -0700761struct c4iw_ep_common {
762 struct iw_cm_id *cm_id;
763 struct c4iw_qp *qp;
764 struct c4iw_dev *dev;
765 enum c4iw_ep_state state;
766 struct kref kref;
Steve Wise2f5b48c2010-09-10 11:15:36 -0500767 struct mutex mutex;
Vipul Pandya830662f2013-07-04 16:10:47 +0530768 struct sockaddr_storage local_addr;
769 struct sockaddr_storage remote_addr;
Steve Wise9eccfe12014-03-26 17:08:09 -0500770 struct sockaddr_storage mapped_local_addr;
771 struct sockaddr_storage mapped_remote_addr;
Steve Wiseaadc4df2010-09-10 11:15:25 -0500772 struct c4iw_wr_wait wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700773 unsigned long flags;
Vipul Pandya793dad92012-12-10 09:30:56 +0000774 unsigned long history;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700775};
776
777struct c4iw_listen_ep {
778 struct c4iw_ep_common com;
779 unsigned int stid;
780 int backlog;
781};
782
783struct c4iw_ep {
784 struct c4iw_ep_common com;
785 struct c4iw_ep *parent_ep;
786 struct timer_list timer;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700787 struct list_head entry;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700788 unsigned int atid;
789 u32 hwtid;
790 u32 snd_seq;
791 u32 rcv_seq;
792 struct l2t_entry *l2t;
793 struct dst_entry *dst;
794 struct sk_buff *mpa_skb;
795 struct c4iw_mpa_attributes mpa_attr;
796 u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
797 unsigned int mpa_pkt_len;
798 u32 ird;
799 u32 ord;
800 u32 smac_idx;
801 u32 tx_chan;
802 u32 mtu;
803 u16 mss;
804 u16 emss;
805 u16 plen;
806 u16 rss_qid;
807 u16 txq_idx;
Steve Wised4f1a5c2010-07-23 19:12:32 +0000808 u16 ctrlq_idx;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700809 u8 tos;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +0530810 u8 retry_with_mpa_v1;
811 u8 tried_with_mpa_v1;
Vipul Pandya793dad92012-12-10 09:30:56 +0000812 unsigned int retry_count;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700813};
814
Steve Wise9eccfe12014-03-26 17:08:09 -0500815static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
816 const char *msg)
817{
818
819#define SINA(a) (&(((struct sockaddr_in *)(a))->sin_addr.s_addr))
820#define SINP(a) ntohs(((struct sockaddr_in *)(a))->sin_port)
821#define SIN6A(a) (&(((struct sockaddr_in6 *)(a))->sin6_addr))
822#define SIN6P(a) ntohs(((struct sockaddr_in6 *)(a))->sin6_port)
823
824 if (c4iw_debug) {
825 switch (epc->local_addr.ss_family) {
826 case AF_INET:
827 PDBG("%s %s %pI4:%u/%u <-> %pI4:%u/%u\n",
828 func, msg, SINA(&epc->local_addr),
829 SINP(&epc->local_addr),
830 SINP(&epc->mapped_local_addr),
831 SINA(&epc->remote_addr),
832 SINP(&epc->remote_addr),
833 SINP(&epc->mapped_remote_addr));
834 break;
835 case AF_INET6:
836 PDBG("%s %s %pI6:%u/%u <-> %pI6:%u/%u\n",
837 func, msg, SIN6A(&epc->local_addr),
838 SIN6P(&epc->local_addr),
839 SIN6P(&epc->mapped_local_addr),
840 SIN6A(&epc->remote_addr),
841 SIN6P(&epc->remote_addr),
842 SIN6P(&epc->mapped_remote_addr));
843 break;
844 default:
845 break;
846 }
847 }
848#undef SINA
849#undef SINP
850#undef SIN6A
851#undef SIN6P
852}
853
Steve Wisecfdda9d2010-04-21 15:30:06 -0700854static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
855{
856 return cm_id->provider_data;
857}
858
859static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
860{
861 return cm_id->provider_data;
862}
863
864static inline int compute_wscale(int win)
865{
866 int wscale = 0;
867
868 while (wscale < 14 && (65535<<wscale) < win)
869 wscale++;
870 return wscale;
871}
872
Vipul Pandyaf079af72013-03-14 05:08:58 +0000873static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
874{
875#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
876 return infop->vr->ocq.size > 0;
877#else
878 return 0;
879#endif
880}
881
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530882u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
883void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
884int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
885 u32 reserved, u32 flags);
886void c4iw_id_table_free(struct c4iw_id_table *alloc);
887
Steve Wisecfdda9d2010-04-21 15:30:06 -0700888typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
889
890int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
891 struct l2t_entry *l2t);
892void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
893 struct c4iw_dev_ucontext *uctx);
Vipul Pandyaec3eead2012-05-18 15:29:32 +0530894u32 c4iw_get_resource(struct c4iw_id_table *id_table);
895void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700896int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
897int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
898int c4iw_pblpool_create(struct c4iw_rdev *rdev);
899int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
Steve Wisec6d7b262010-09-13 11:23:57 -0500900int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700901void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
902void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
Steve Wisec6d7b262010-09-13 11:23:57 -0500903void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700904void c4iw_destroy_resource(struct c4iw_resource *rscp);
905int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
906int c4iw_register_device(struct c4iw_dev *dev);
907void c4iw_unregister_device(struct c4iw_dev *dev);
908int __init c4iw_cm_init(void);
909void __exit c4iw_cm_term(void);
910void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
911 struct c4iw_dev_ucontext *uctx);
912void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
913 struct c4iw_dev_ucontext *uctx);
914int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
915int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
916 struct ib_send_wr **bad_wr);
917int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
918 struct ib_recv_wr **bad_wr);
919int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
920 struct ib_mw_bind *mw_bind);
921int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
922int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
923int c4iw_destroy_listen(struct iw_cm_id *cm_id);
924int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
925int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
926void c4iw_qp_add_ref(struct ib_qp *qp);
927void c4iw_qp_rem_ref(struct ib_qp *qp);
928void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list);
929struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(
930 struct ib_device *device,
931 int page_list_len);
932struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);
933int c4iw_dealloc_mw(struct ib_mw *mw);
Shani Michaeli7083e422013-02-06 16:19:12 +0000934struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700935struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
936 u64 length, u64 virt, int acc,
937 struct ib_udata *udata);
938struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
939struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd,
940 struct ib_phys_buf *buffer_list,
941 int num_phys_buf,
942 int acc,
943 u64 *iova_start);
944int c4iw_reregister_phys_mem(struct ib_mr *mr,
945 int mr_rereg_mask,
946 struct ib_pd *pd,
947 struct ib_phys_buf *buffer_list,
948 int num_phys_buf,
949 int acc, u64 *iova_start);
950int c4iw_dereg_mr(struct ib_mr *ib_mr);
951int c4iw_destroy_cq(struct ib_cq *ib_cq);
952struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
953 int vector,
954 struct ib_ucontext *ib_context,
955 struct ib_udata *udata);
956int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
957int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
958int c4iw_destroy_qp(struct ib_qp *ib_qp);
959struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
960 struct ib_qp_init_attr *attrs,
961 struct ib_udata *udata);
962int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
963 int attr_mask, struct ib_udata *udata);
Vipul Pandya67bbc052012-05-18 15:29:33 +0530964int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
965 int attr_mask, struct ib_qp_init_attr *init_attr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700966struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
967u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
968void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
969u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
970void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
Steve Wisec6d7b262010-09-13 11:23:57 -0500971u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
972void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700973int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
Steve Wise1cf24dc2013-08-06 21:04:35 +0530974void c4iw_flush_hw_cq(struct c4iw_cq *chp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700975void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700976int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
977int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
Steve Wise1cf24dc2013-08-06 21:04:35 +0530978int c4iw_flush_sq(struct c4iw_qp *qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700979int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
980u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700981int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
982u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
983void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
984 struct c4iw_dev_ucontext *uctx);
985u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
986void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
987 struct c4iw_dev_ucontext *uctx);
988void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
989
990extern struct cxgb4_client t4c_client;
991extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
Roland Dreierbe4c9ba2010-05-05 14:45:40 -0700992extern int c4iw_max_read_depth;
Vipul Pandya422eea02012-05-18 15:29:30 +0530993extern int db_fc_threshold;
Vipul Pandya80ccdd62013-03-14 05:09:00 +0000994extern int db_coalescing_threshold;
Vipul Pandya42b6a942013-03-14 05:09:01 +0000995extern int use_dsgl;
Vipul Pandya422eea02012-05-18 15:29:30 +0530996
Steve Wisecfdda9d2010-04-21 15:30:06 -0700997
998#endif