blob: e5a07a2d4c5e90bd9514da6fc20dc320cbc806a1 [file] [log] [blame]
Ram Amrani2e0cbc42016-10-10 13:15:30 +03001/* QLogic qedr NIC Driver
2 * Copyright (c) 2015-2016 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/module.h>
33#include <rdma/ib_verbs.h>
34#include <rdma/ib_addr.h>
Ram Amraniac1b36e2016-10-10 13:15:32 +030035#include <rdma/ib_user_verbs.h>
Ram Amrani2e0cbc42016-10-10 13:15:30 +030036#include <linux/netdevice.h>
37#include <linux/iommu.h>
38#include <net/addrconf.h>
39#include <linux/qed/qede_roce.h>
Ram Amraniec72fce2016-10-10 13:15:31 +030040#include <linux/qed/qed_chain.h>
41#include <linux/qed/qed_if.h>
Ram Amrani2e0cbc42016-10-10 13:15:30 +030042#include "qedr.h"
Ram Amraniac1b36e2016-10-10 13:15:32 +030043#include "verbs.h"
44#include <rdma/qedr-abi.h>
Ram Amrani2e0cbc42016-10-10 13:15:30 +030045
46MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
47MODULE_AUTHOR("QLogic Corporation");
48MODULE_LICENSE("Dual BSD/GPL");
49MODULE_VERSION(QEDR_MODULE_VERSION);
50
Ram Amranicecbcdd2016-10-10 13:15:34 +030051#define QEDR_WQ_MULTIPLIER_DFT (3)
52
Ram Amrani2e0cbc42016-10-10 13:15:30 +030053void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
54 enum ib_event_type type)
55{
56 struct ib_event ibev;
57
58 ibev.device = &dev->ibdev;
59 ibev.element.port_num = port_num;
60 ibev.event = type;
61
62 ib_dispatch_event(&ibev);
63}
64
65static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
66 u8 port_num)
67{
68 return IB_LINK_LAYER_ETHERNET;
69}
70
Ram Amraniec72fce2016-10-10 13:15:31 +030071static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
72 size_t str_len)
73{
74 struct qedr_dev *qedr = get_qedr_dev(ibdev);
75 u32 fw_ver = (u32)qedr->attr.fw_ver;
76
77 snprintf(str, str_len, "%d. %d. %d. %d",
78 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
79 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
80}
81
Ram Amrani2e0cbc42016-10-10 13:15:30 +030082static int qedr_register_device(struct qedr_dev *dev)
83{
84 strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
85
86 memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
87 dev->ibdev.owner = THIS_MODULE;
Ram Amraniac1b36e2016-10-10 13:15:32 +030088 dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
89
90 dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
91 QEDR_UVERBS(QUERY_DEVICE) |
Ram Amrania7efd772016-10-10 13:15:33 +030092 QEDR_UVERBS(QUERY_PORT) |
93 QEDR_UVERBS(ALLOC_PD) |
94 QEDR_UVERBS(DEALLOC_PD) |
95 QEDR_UVERBS(CREATE_COMP_CHANNEL) |
96 QEDR_UVERBS(CREATE_CQ) |
97 QEDR_UVERBS(RESIZE_CQ) |
98 QEDR_UVERBS(DESTROY_CQ) |
Ram Amranicecbcdd2016-10-10 13:15:34 +030099 QEDR_UVERBS(REQ_NOTIFY_CQ) |
100 QEDR_UVERBS(CREATE_QP) |
101 QEDR_UVERBS(MODIFY_QP) |
102 QEDR_UVERBS(QUERY_QP) |
Ram Amranie0290cc2016-10-10 13:15:35 +0300103 QEDR_UVERBS(DESTROY_QP) |
104 QEDR_UVERBS(REG_MR) |
Ram Amraniafa0e132016-10-10 13:15:36 +0300105 QEDR_UVERBS(DEREG_MR) |
106 QEDR_UVERBS(POLL_CQ) |
107 QEDR_UVERBS(POST_SEND) |
108 QEDR_UVERBS(POST_RECV);
Ram Amraniac1b36e2016-10-10 13:15:32 +0300109
110 dev->ibdev.phys_port_cnt = 1;
111 dev->ibdev.num_comp_vectors = dev->num_cnq;
112 dev->ibdev.node_type = RDMA_NODE_IB_CA;
113
114 dev->ibdev.query_device = qedr_query_device;
115 dev->ibdev.query_port = qedr_query_port;
116 dev->ibdev.modify_port = qedr_modify_port;
117
118 dev->ibdev.query_gid = qedr_query_gid;
119 dev->ibdev.add_gid = qedr_add_gid;
120 dev->ibdev.del_gid = qedr_del_gid;
121
122 dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
123 dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
124 dev->ibdev.mmap = qedr_mmap;
125
Ram Amrania7efd772016-10-10 13:15:33 +0300126 dev->ibdev.alloc_pd = qedr_alloc_pd;
127 dev->ibdev.dealloc_pd = qedr_dealloc_pd;
128
129 dev->ibdev.create_cq = qedr_create_cq;
130 dev->ibdev.destroy_cq = qedr_destroy_cq;
131 dev->ibdev.resize_cq = qedr_resize_cq;
132 dev->ibdev.req_notify_cq = qedr_arm_cq;
133
Ram Amranicecbcdd2016-10-10 13:15:34 +0300134 dev->ibdev.create_qp = qedr_create_qp;
135 dev->ibdev.modify_qp = qedr_modify_qp;
136 dev->ibdev.query_qp = qedr_query_qp;
137 dev->ibdev.destroy_qp = qedr_destroy_qp;
138
Ram Amrania7efd772016-10-10 13:15:33 +0300139 dev->ibdev.query_pkey = qedr_query_pkey;
140
Ram Amranie0290cc2016-10-10 13:15:35 +0300141 dev->ibdev.get_dma_mr = qedr_get_dma_mr;
142 dev->ibdev.dereg_mr = qedr_dereg_mr;
143 dev->ibdev.reg_user_mr = qedr_reg_user_mr;
144 dev->ibdev.alloc_mr = qedr_alloc_mr;
145 dev->ibdev.map_mr_sg = qedr_map_mr_sg;
146
Ram Amraniafa0e132016-10-10 13:15:36 +0300147 dev->ibdev.poll_cq = qedr_poll_cq;
148 dev->ibdev.post_send = qedr_post_send;
149 dev->ibdev.post_recv = qedr_post_recv;
150
Ram Amraniac1b36e2016-10-10 13:15:32 +0300151 dev->ibdev.dma_device = &dev->pdev->dev;
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300152
153 dev->ibdev.get_link_layer = qedr_link_layer;
Ram Amraniec72fce2016-10-10 13:15:31 +0300154 dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300155
156 return 0;
157}
158
Ram Amraniec72fce2016-10-10 13:15:31 +0300159/* This function allocates fast-path status block memory */
160static int qedr_alloc_mem_sb(struct qedr_dev *dev,
161 struct qed_sb_info *sb_info, u16 sb_id)
162{
163 struct status_block *sb_virt;
164 dma_addr_t sb_phys;
165 int rc;
166
167 sb_virt = dma_alloc_coherent(&dev->pdev->dev,
168 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
169 if (!sb_virt)
170 return -ENOMEM;
171
172 rc = dev->ops->common->sb_init(dev->cdev, sb_info,
173 sb_virt, sb_phys, sb_id,
174 QED_SB_TYPE_CNQ);
175 if (rc) {
176 pr_err("Status block initialization failed\n");
177 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
178 sb_virt, sb_phys);
179 return rc;
180 }
181
182 return 0;
183}
184
185static void qedr_free_mem_sb(struct qedr_dev *dev,
186 struct qed_sb_info *sb_info, int sb_id)
187{
188 if (sb_info->sb_virt) {
189 dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
190 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
191 (void *)sb_info->sb_virt, sb_info->sb_phys);
192 }
193}
194
195static void qedr_free_resources(struct qedr_dev *dev)
196{
197 int i;
198
199 for (i = 0; i < dev->num_cnq; i++) {
200 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
201 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
202 }
203
204 kfree(dev->cnq_array);
205 kfree(dev->sb_array);
206 kfree(dev->sgid_tbl);
207}
208
209static int qedr_alloc_resources(struct qedr_dev *dev)
210{
211 struct qedr_cnq *cnq;
212 __le16 *cons_pi;
213 u16 n_entries;
214 int i, rc;
215
216 dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
217 QEDR_MAX_SGID, GFP_KERNEL);
218 if (!dev->sgid_tbl)
219 return -ENOMEM;
220
221 spin_lock_init(&dev->sgid_lock);
222
223 /* Allocate Status blocks for CNQ */
224 dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
225 GFP_KERNEL);
226 if (!dev->sb_array) {
227 rc = -ENOMEM;
228 goto err1;
229 }
230
231 dev->cnq_array = kcalloc(dev->num_cnq,
232 sizeof(*dev->cnq_array), GFP_KERNEL);
233 if (!dev->cnq_array) {
234 rc = -ENOMEM;
235 goto err2;
236 }
237
238 dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
239
240 /* Allocate CNQ PBLs */
241 n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
242 for (i = 0; i < dev->num_cnq; i++) {
243 cnq = &dev->cnq_array[i];
244
245 rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
246 dev->sb_start + i);
247 if (rc)
248 goto err3;
249
250 rc = dev->ops->common->chain_alloc(dev->cdev,
251 QED_CHAIN_USE_TO_CONSUME,
252 QED_CHAIN_MODE_PBL,
253 QED_CHAIN_CNT_TYPE_U16,
254 n_entries,
255 sizeof(struct regpair *),
256 &cnq->pbl);
257 if (rc)
258 goto err4;
259
260 cnq->dev = dev;
261 cnq->sb = &dev->sb_array[i];
262 cons_pi = dev->sb_array[i].sb_virt->pi_array;
263 cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
264 cnq->index = i;
265 sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
266
267 DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
268 i, qed_chain_get_cons_idx(&cnq->pbl));
269 }
270
271 return 0;
272err4:
273 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
274err3:
275 for (--i; i >= 0; i--) {
276 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
277 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
278 }
279 kfree(dev->cnq_array);
280err2:
281 kfree(dev->sb_array);
282err1:
283 kfree(dev->sgid_tbl);
284 return rc;
285}
286
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300287/* QEDR sysfs interface */
288static ssize_t show_rev(struct device *device, struct device_attribute *attr,
289 char *buf)
290{
291 struct qedr_dev *dev = dev_get_drvdata(device);
292
293 return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
294}
295
296static ssize_t show_hca_type(struct device *device,
297 struct device_attribute *attr, char *buf)
298{
299 return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
300}
301
302static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
303static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
304
305static struct device_attribute *qedr_attributes[] = {
306 &dev_attr_hw_rev,
307 &dev_attr_hca_type
308};
309
310static void qedr_remove_sysfiles(struct qedr_dev *dev)
311{
312 int i;
313
314 for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
315 device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
316}
317
318static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
319{
320 struct pci_dev *bridge;
321 u32 val;
322
323 dev->atomic_cap = IB_ATOMIC_NONE;
324
325 bridge = pdev->bus->self;
326 if (!bridge)
327 return;
328
329 /* Check whether we are connected directly or via a switch */
330 while (bridge && bridge->bus->parent) {
331 DP_DEBUG(dev, QEDR_MSG_INIT,
332 "Device is not connected directly to root. bridge->bus->number=%d primary=%d\n",
333 bridge->bus->number, bridge->bus->primary);
334 /* Need to check Atomic Op Routing Supported all the way to
335 * root complex.
336 */
337 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
338 if (!(val & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) {
339 pcie_capability_clear_word(pdev,
340 PCI_EXP_DEVCTL2,
341 PCI_EXP_DEVCTL2_ATOMIC_REQ);
342 return;
343 }
344 bridge = bridge->bus->parent->self;
345 }
346 bridge = pdev->bus->self;
347
348 /* according to bridge capability */
349 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &val);
350 if (val & PCI_EXP_DEVCAP2_ATOMIC_COMP64) {
351 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
352 PCI_EXP_DEVCTL2_ATOMIC_REQ);
353 dev->atomic_cap = IB_ATOMIC_GLOB;
354 } else {
355 pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
356 PCI_EXP_DEVCTL2_ATOMIC_REQ);
357 }
358}
359
Ram Amraniec72fce2016-10-10 13:15:31 +0300360static const struct qed_rdma_ops *qed_ops;
361
362#define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
363
364static irqreturn_t qedr_irq_handler(int irq, void *handle)
365{
366 u16 hw_comp_cons, sw_comp_cons;
367 struct qedr_cnq *cnq = handle;
Ram Amrania7efd772016-10-10 13:15:33 +0300368 struct regpair *cq_handle;
369 struct qedr_cq *cq;
Ram Amraniec72fce2016-10-10 13:15:31 +0300370
371 qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
372
373 qed_sb_update_sb_idx(cnq->sb);
374
375 hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
376 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
377
378 /* Align protocol-index and chain reads */
379 rmb();
380
381 while (sw_comp_cons != hw_comp_cons) {
Ram Amrania7efd772016-10-10 13:15:33 +0300382 cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
383 cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
384 cq_handle->lo);
385
386 if (cq == NULL) {
387 DP_ERR(cnq->dev,
388 "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
389 cq_handle->hi, cq_handle->lo, sw_comp_cons,
390 hw_comp_cons);
391
392 break;
393 }
394
395 if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
396 DP_ERR(cnq->dev,
397 "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
398 cq_handle->hi, cq_handle->lo, cq);
399 break;
400 }
401
402 cq->arm_flags = 0;
403
404 if (cq->ibcq.comp_handler)
405 (*cq->ibcq.comp_handler)
406 (&cq->ibcq, cq->ibcq.cq_context);
407
Ram Amraniec72fce2016-10-10 13:15:31 +0300408 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
Ram Amrania7efd772016-10-10 13:15:33 +0300409
Ram Amraniec72fce2016-10-10 13:15:31 +0300410 cnq->n_comp++;
Ram Amrania7efd772016-10-10 13:15:33 +0300411
Ram Amraniec72fce2016-10-10 13:15:31 +0300412 }
413
414 qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
415 sw_comp_cons);
416
417 qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
418
419 return IRQ_HANDLED;
420}
421
422static void qedr_sync_free_irqs(struct qedr_dev *dev)
423{
424 u32 vector;
425 int i;
426
427 for (i = 0; i < dev->int_info.used_cnt; i++) {
428 if (dev->int_info.msix_cnt) {
429 vector = dev->int_info.msix[i * dev->num_hwfns].vector;
430 synchronize_irq(vector);
431 free_irq(vector, &dev->cnq_array[i]);
432 }
433 }
434
435 dev->int_info.used_cnt = 0;
436}
437
438static int qedr_req_msix_irqs(struct qedr_dev *dev)
439{
440 int i, rc = 0;
441
442 if (dev->num_cnq > dev->int_info.msix_cnt) {
443 DP_ERR(dev,
444 "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
445 dev->num_cnq, dev->int_info.msix_cnt);
446 return -EINVAL;
447 }
448
449 for (i = 0; i < dev->num_cnq; i++) {
450 rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
451 qedr_irq_handler, 0, dev->cnq_array[i].name,
452 &dev->cnq_array[i]);
453 if (rc) {
454 DP_ERR(dev, "Request cnq %d irq failed\n", i);
455 qedr_sync_free_irqs(dev);
456 } else {
457 DP_DEBUG(dev, QEDR_MSG_INIT,
458 "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
459 dev->cnq_array[i].name, i,
460 &dev->cnq_array[i]);
461 dev->int_info.used_cnt++;
462 }
463 }
464
465 return rc;
466}
467
468static int qedr_setup_irqs(struct qedr_dev *dev)
469{
470 int rc;
471
472 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
473
474 /* Learn Interrupt configuration */
475 rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
476 if (rc < 0)
477 return rc;
478
479 rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
480 if (rc) {
481 DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
482 return rc;
483 }
484
485 if (dev->int_info.msix_cnt) {
486 DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
487 dev->int_info.msix_cnt);
488 rc = qedr_req_msix_irqs(dev);
489 if (rc)
490 return rc;
491 }
492
493 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
494
495 return 0;
496}
497
498static int qedr_set_device_attr(struct qedr_dev *dev)
499{
500 struct qed_rdma_device *qed_attr;
501 struct qedr_device_attr *attr;
502 u32 page_size;
503
504 /* Part 1 - query core capabilities */
505 qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
506
507 /* Part 2 - check capabilities */
508 page_size = ~dev->attr.page_size_caps + 1;
509 if (page_size > PAGE_SIZE) {
510 DP_ERR(dev,
511 "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
512 PAGE_SIZE, page_size);
513 return -ENODEV;
514 }
515
516 /* Part 3 - copy and update capabilities */
517 attr = &dev->attr;
518 attr->vendor_id = qed_attr->vendor_id;
519 attr->vendor_part_id = qed_attr->vendor_part_id;
520 attr->hw_ver = qed_attr->hw_ver;
521 attr->fw_ver = qed_attr->fw_ver;
522 attr->node_guid = qed_attr->node_guid;
523 attr->sys_image_guid = qed_attr->sys_image_guid;
524 attr->max_cnq = qed_attr->max_cnq;
525 attr->max_sge = qed_attr->max_sge;
526 attr->max_inline = qed_attr->max_inline;
527 attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
528 attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
529 attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
530 attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
531 attr->max_dev_resp_rd_atomic_resc =
532 qed_attr->max_dev_resp_rd_atomic_resc;
533 attr->max_cq = qed_attr->max_cq;
534 attr->max_qp = qed_attr->max_qp;
535 attr->max_mr = qed_attr->max_mr;
536 attr->max_mr_size = qed_attr->max_mr_size;
537 attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
538 attr->max_mw = qed_attr->max_mw;
539 attr->max_fmr = qed_attr->max_fmr;
540 attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
541 attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
542 attr->max_pd = qed_attr->max_pd;
543 attr->max_ah = qed_attr->max_ah;
544 attr->max_pkey = qed_attr->max_pkey;
545 attr->max_srq = qed_attr->max_srq;
546 attr->max_srq_wr = qed_attr->max_srq_wr;
547 attr->dev_caps = qed_attr->dev_caps;
548 attr->page_size_caps = qed_attr->page_size_caps;
549 attr->dev_ack_delay = qed_attr->dev_ack_delay;
550 attr->reserved_lkey = qed_attr->reserved_lkey;
551 attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
552 attr->max_stats_queues = qed_attr->max_stats_queues;
553
554 return 0;
555}
556
557static int qedr_init_hw(struct qedr_dev *dev)
558{
559 struct qed_rdma_add_user_out_params out_params;
560 struct qed_rdma_start_in_params *in_params;
561 struct qed_rdma_cnq_params *cur_pbl;
562 struct qed_rdma_events events;
563 dma_addr_t p_phys_table;
564 u32 page_cnt;
565 int rc = 0;
566 int i;
567
568 in_params = kzalloc(sizeof(*in_params), GFP_KERNEL);
569 if (!in_params) {
570 rc = -ENOMEM;
571 goto out;
572 }
573
574 in_params->desired_cnq = dev->num_cnq;
575 for (i = 0; i < dev->num_cnq; i++) {
576 cur_pbl = &in_params->cnq_pbl_list[i];
577
578 page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
579 cur_pbl->num_pbl_pages = page_cnt;
580
581 p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
582 cur_pbl->pbl_ptr = (u64)p_phys_table;
583 }
584
585 events.context = dev;
586
587 in_params->events = &events;
588 in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
589 in_params->max_mtu = dev->ndev->mtu;
590 ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
591
592 rc = dev->ops->rdma_init(dev->cdev, in_params);
593 if (rc)
594 goto out;
595
596 rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
597 if (rc)
598 goto out;
599
600 dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
601 dev->db_phys_addr = out_params.dpi_phys_addr;
602 dev->db_size = out_params.dpi_size;
603 dev->dpi = out_params.dpi;
604
605 rc = qedr_set_device_attr(dev);
606out:
607 kfree(in_params);
608 if (rc)
609 DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
610
611 return rc;
612}
613
614void qedr_stop_hw(struct qedr_dev *dev)
615{
616 dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
617 dev->ops->rdma_stop(dev->rdma_ctx);
618}
619
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300620static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
621 struct net_device *ndev)
622{
Ram Amraniec72fce2016-10-10 13:15:31 +0300623 struct qed_dev_rdma_info dev_info;
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300624 struct qedr_dev *dev;
625 int rc = 0, i;
626
627 dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
628 if (!dev) {
629 pr_err("Unable to allocate ib device\n");
630 return NULL;
631 }
632
633 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
634
635 dev->pdev = pdev;
636 dev->ndev = ndev;
637 dev->cdev = cdev;
638
Ram Amraniec72fce2016-10-10 13:15:31 +0300639 qed_ops = qed_get_rdma_ops();
640 if (!qed_ops) {
641 DP_ERR(dev, "Failed to get qed roce operations\n");
642 goto init_err;
643 }
644
645 dev->ops = qed_ops;
646 rc = qed_ops->fill_dev_info(cdev, &dev_info);
647 if (rc)
648 goto init_err;
649
650 dev->num_hwfns = dev_info.common.num_hwfns;
651 dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
652
653 dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
654 if (!dev->num_cnq) {
655 DP_ERR(dev, "not enough CNQ resources.\n");
656 goto init_err;
657 }
658
Ram Amranicecbcdd2016-10-10 13:15:34 +0300659 dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
660
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300661 qedr_pci_set_atomic(dev, pdev);
662
Ram Amraniec72fce2016-10-10 13:15:31 +0300663 rc = qedr_alloc_resources(dev);
664 if (rc)
665 goto init_err;
666
667 rc = qedr_init_hw(dev);
668 if (rc)
669 goto alloc_err;
670
671 rc = qedr_setup_irqs(dev);
672 if (rc)
673 goto irq_err;
674
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300675 rc = qedr_register_device(dev);
676 if (rc) {
677 DP_ERR(dev, "Unable to allocate register device\n");
Ram Amraniec72fce2016-10-10 13:15:31 +0300678 goto reg_err;
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300679 }
680
681 for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
682 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
Ram Amraniec72fce2016-10-10 13:15:31 +0300683 goto reg_err;
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300684
685 DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
686 return dev;
687
Ram Amraniec72fce2016-10-10 13:15:31 +0300688reg_err:
689 qedr_sync_free_irqs(dev);
690irq_err:
691 qedr_stop_hw(dev);
692alloc_err:
693 qedr_free_resources(dev);
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300694init_err:
695 ib_dealloc_device(&dev->ibdev);
696 DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
697
698 return NULL;
699}
700
701static void qedr_remove(struct qedr_dev *dev)
702{
703 /* First unregister with stack to stop all the active traffic
704 * of the registered clients.
705 */
706 qedr_remove_sysfiles(dev);
707
Ram Amraniec72fce2016-10-10 13:15:31 +0300708 qedr_stop_hw(dev);
709 qedr_sync_free_irqs(dev);
710 qedr_free_resources(dev);
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300711 ib_dealloc_device(&dev->ibdev);
712}
713
714static int qedr_close(struct qedr_dev *dev)
715{
716 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
717
718 return 0;
719}
720
721static void qedr_shutdown(struct qedr_dev *dev)
722{
723 qedr_close(dev);
724 qedr_remove(dev);
725}
726
Ram Amrani1d1424c2016-10-10 13:15:37 +0300727static void qedr_mac_address_change(struct qedr_dev *dev)
728{
729 union ib_gid *sgid = &dev->sgid_tbl[0];
730 u8 guid[8], mac_addr[6];
731 int rc;
732
733 /* Update SGID */
734 ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
735 guid[0] = mac_addr[0] ^ 2;
736 guid[1] = mac_addr[1];
737 guid[2] = mac_addr[2];
738 guid[3] = 0xff;
739 guid[4] = 0xfe;
740 guid[5] = mac_addr[3];
741 guid[6] = mac_addr[4];
742 guid[7] = mac_addr[5];
743 sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
744 memcpy(&sgid->raw[8], guid, sizeof(guid));
745
746 /* Update LL2 */
747 rc = dev->ops->roce_ll2_set_mac_filter(dev->cdev,
748 dev->gsi_ll2_mac_address,
749 dev->ndev->dev_addr);
750
751 ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
752
753 qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
754
755 if (rc)
756 DP_ERR(dev, "Error updating mac filter\n");
757}
758
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300759/* event handling via NIC driver ensures that all the NIC specific
760 * initialization done before RoCE driver notifies
761 * event to stack.
762 */
763static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
764{
765 switch (event) {
766 case QEDE_UP:
767 qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
768 break;
769 case QEDE_DOWN:
770 qedr_close(dev);
771 break;
772 case QEDE_CLOSE:
773 qedr_shutdown(dev);
774 break;
775 case QEDE_CHANGE_ADDR:
Ram Amrani1d1424c2016-10-10 13:15:37 +0300776 qedr_mac_address_change(dev);
Ram Amrani2e0cbc42016-10-10 13:15:30 +0300777 break;
778 default:
779 pr_err("Event not supported\n");
780 }
781}
782
783static struct qedr_driver qedr_drv = {
784 .name = "qedr_driver",
785 .add = qedr_add,
786 .remove = qedr_remove,
787 .notify = qedr_notify,
788};
789
790static int __init qedr_init_module(void)
791{
792 return qede_roce_register_driver(&qedr_drv);
793}
794
795static void __exit qedr_exit_module(void)
796{
797 qede_roce_unregister_driver(&qedr_drv);
798}
799
800module_init(qedr_init_module);
801module_exit(qedr_exit_module);