blob: a95f74bb556915f92d0fa1bdbd2e34eb9814c3b1 [file] [log] [blame]
Ursula Brauna4cf0442017-01-09 16:55:14 +01001/*
2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
3 *
4 * Definitions for IB environment
5 *
6 * Copyright IBM Corp. 2016
7 *
8 * Author(s): Ursula Braun <Ursula Braun@linux.vnet.ibm.com>
9 */
10
11#ifndef _SMC_IB_H
12#define _SMC_IB_H
13
Ursula Braun143c0172017-01-12 14:57:15 +010014#include <linux/if_ether.h>
Ursula Brauna4cf0442017-01-09 16:55:14 +010015#include <rdma/ib_verbs.h>
16
17#define SMC_MAX_PORTS 2 /* Max # of ports */
18#define SMC_GID_SIZE sizeof(union ib_gid)
19
Ursula Braunf38ba1792017-01-09 16:55:19 +010020#define SMC_IB_MAX_SEND_SGE 2
21
Ursula Brauna4cf0442017-01-09 16:55:14 +010022struct smc_ib_devices { /* list of smc ib devices definition */
23 struct list_head list;
24 spinlock_t lock; /* protects list of smc ib devices */
25};
26
27extern struct smc_ib_devices smc_ib_devices; /* list of smc ib devices */
28
29struct smc_ib_device { /* ib-device infos for smc */
30 struct list_head list;
31 struct ib_device *ibdev;
32 struct ib_port_attr pattr[SMC_MAX_PORTS]; /* ib dev. port attrs */
Ursula Braunbd4ad572017-01-09 16:55:20 +010033 struct ib_event_handler event_handler; /* global ib_event handler */
Ursula Braunf38ba1792017-01-09 16:55:19 +010034 struct ib_cq *roce_cq_send; /* send completion queue */
35 struct ib_cq *roce_cq_recv; /* recv completion queue */
36 struct tasklet_struct send_tasklet; /* called by send cq handler */
37 struct tasklet_struct recv_tasklet; /* called by recv cq handler */
Ursula Braun143c0172017-01-12 14:57:15 +010038 char mac[SMC_MAX_PORTS][ETH_ALEN];
39 /* mac address per port*/
Ursula Brauna4cf0442017-01-09 16:55:14 +010040 union ib_gid gid[SMC_MAX_PORTS]; /* gid per port */
41 u8 initialized : 1; /* ib dev CQ, evthdl done */
Ursula Braunbd4ad572017-01-09 16:55:20 +010042 struct work_struct port_event_work;
43 unsigned long port_event_mask;
Ursula Brauna4cf0442017-01-09 16:55:14 +010044};
45
Ursula Brauncd6851f2017-01-09 16:55:18 +010046struct smc_buf_desc;
Ursula Braunf38ba1792017-01-09 16:55:19 +010047struct smc_link;
Ursula Brauncd6851f2017-01-09 16:55:18 +010048
Ursula Brauna4cf0442017-01-09 16:55:14 +010049int smc_ib_register_client(void) __init;
50void smc_ib_unregister_client(void);
51bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport);
52int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport);
Ursula Brauncd6851f2017-01-09 16:55:18 +010053int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size,
54 struct smc_buf_desc *buf_slot,
55 enum dma_data_direction data_direction);
Ursula Braunbd4ad572017-01-09 16:55:20 +010056void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int bufsize,
57 struct smc_buf_desc *buf_slot,
58 enum dma_data_direction data_direction);
Ursula Braunf38ba1792017-01-09 16:55:19 +010059void smc_ib_dealloc_protection_domain(struct smc_link *lnk);
60int smc_ib_create_protection_domain(struct smc_link *lnk);
61void smc_ib_destroy_queue_pair(struct smc_link *lnk);
62int smc_ib_create_queue_pair(struct smc_link *lnk);
Ursula Braunbd4ad572017-01-09 16:55:20 +010063int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
64 struct ib_mr **mr);
65int smc_ib_ready_link(struct smc_link *lnk);
66int smc_ib_modify_qp_rts(struct smc_link *lnk);
67int smc_ib_modify_qp_reset(struct smc_link *lnk);
68long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
69
Ursula Brauna4cf0442017-01-09 16:55:14 +010070
71#endif