blob: 6c4a7a5938b7a8c389e3042b3ed7d52a953310b6 [file] [log] [blame]
Ursula Braunf16a7dd2017-01-09 16:55:26 +01001/*
2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
3 *
4 * Monitoring SMC transport protocol sockets
5 *
6 * Copyright IBM Corp. 2016
7 *
8 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/init.h>
15#include <linux/sock_diag.h>
16#include <linux/inet_diag.h>
17#include <linux/smc_diag.h>
18#include <net/netlink.h>
19#include <net/smc.h>
20
21#include "smc.h"
22#include "smc_core.h"
23
24static void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
25{
26 sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
27 be16_to_cpu(((__be16 *)gid_raw)[0]),
28 be16_to_cpu(((__be16 *)gid_raw)[1]),
29 be16_to_cpu(((__be16 *)gid_raw)[2]),
30 be16_to_cpu(((__be16 *)gid_raw)[3]),
31 be16_to_cpu(((__be16 *)gid_raw)[4]),
32 be16_to_cpu(((__be16 *)gid_raw)[5]),
33 be16_to_cpu(((__be16 *)gid_raw)[6]),
34 be16_to_cpu(((__be16 *)gid_raw)[7]));
35}
36
37static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
38{
39 struct smc_sock *smc = smc_sk(sk);
40
Eric Dumazet421ab4112020-02-10 11:36:13 -080041 memset(r, 0, sizeof(*r));
Karsten Graul558dac22019-02-07 15:56:20 +010042 r->diag_family = sk->sk_family;
Eric Dumazet421ab4112020-02-10 11:36:13 -080043 sock_diag_save_cookie(sk, r->id.idiag_cookie);
Ursula Braunf16a7dd2017-01-09 16:55:26 +010044 if (!smc->clcsock)
45 return;
46 r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
47 r->id.idiag_dport = smc->clcsock->sk->sk_dport;
48 r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
Karsten Grauled759862018-05-02 16:56:45 +020049 if (sk->sk_protocol == SMCPROTO_SMC) {
Karsten Grauled759862018-05-02 16:56:45 +020050 r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
51 r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
52#if IS_ENABLED(CONFIG_IPV6)
53 } else if (sk->sk_protocol == SMCPROTO_SMC6) {
Karsten Grauled759862018-05-02 16:56:45 +020054 memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr,
55 sizeof(smc->clcsock->sk->sk_v6_rcv_saddr));
56 memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr,
57 sizeof(smc->clcsock->sk->sk_v6_daddr));
58#endif
59 }
Ursula Braunf16a7dd2017-01-09 16:55:26 +010060}
61
62static int smc_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
63 struct smc_diag_msg *r,
64 struct user_namespace *user_ns)
65{
66 if (nla_put_u8(skb, SMC_DIAG_SHUTDOWN, sk->sk_shutdown))
67 return 1;
68
69 r->diag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
70 r->diag_inode = sock_i_ino(sk);
71 return 0;
72}
73
74static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
75 struct netlink_callback *cb,
76 const struct smc_diag_req *req,
77 struct nlattr *bc)
78{
79 struct smc_sock *smc = smc_sk(sk);
Karsten Graul603cc142018-07-25 16:35:32 +020080 struct smc_diag_fallback fallback;
Ursula Braunf16a7dd2017-01-09 16:55:26 +010081 struct user_namespace *user_ns;
82 struct smc_diag_msg *r;
83 struct nlmsghdr *nlh;
84
85 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
86 cb->nlh->nlmsg_type, sizeof(*r), NLM_F_MULTI);
87 if (!nlh)
88 return -EMSGSIZE;
89
90 r = nlmsg_data(nlh);
91 smc_diag_msg_common_fill(r, sk);
92 r->diag_state = sk->sk_state;
Karsten Graulc6011712018-07-23 13:53:08 +020093 if (smc->use_fallback)
94 r->diag_mode = SMC_DIAG_MODE_FALLBACK_TCP;
95 else if (smc->conn.lgr && smc->conn.lgr->is_smcd)
96 r->diag_mode = SMC_DIAG_MODE_SMCD;
97 else
98 r->diag_mode = SMC_DIAG_MODE_SMCR;
Ursula Braunf16a7dd2017-01-09 16:55:26 +010099 user_ns = sk_user_ns(NETLINK_CB(cb->skb).sk);
100 if (smc_diag_msg_attrs_fill(sk, skb, r, user_ns))
101 goto errout;
102
Karsten Graul603cc142018-07-25 16:35:32 +0200103 fallback.reason = smc->fallback_rsn;
104 fallback.peer_diagnosis = smc->peer_diagnosis;
105 if (nla_put(skb, SMC_DIAG_FALLBACK, sizeof(fallback), &fallback) < 0)
106 goto errout;
107
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100108 if ((req->diag_ext & (1 << (SMC_DIAG_CONNINFO - 1))) &&
109 smc->conn.alert_token_local) {
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100110 struct smc_connection *conn = &smc->conn;
111 struct smc_diag_conninfo cinfo = {
112 .token = conn->alert_token_local,
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200113 .sndbuf_size = conn->sndbuf_desc ?
114 conn->sndbuf_desc->len : 0,
115 .rmbe_size = conn->rmb_desc ? conn->rmb_desc->len : 0,
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100116 .peer_rmbe_size = conn->peer_rmbe_size,
117
118 .rx_prod.wrap = conn->local_rx_ctrl.prod.wrap,
119 .rx_prod.count = conn->local_rx_ctrl.prod.count,
120 .rx_cons.wrap = conn->local_rx_ctrl.cons.wrap,
121 .rx_cons.count = conn->local_rx_ctrl.cons.count,
122
123 .tx_prod.wrap = conn->local_tx_ctrl.prod.wrap,
124 .tx_prod.count = conn->local_tx_ctrl.prod.count,
125 .tx_cons.wrap = conn->local_tx_ctrl.cons.wrap,
126 .tx_cons.count = conn->local_tx_ctrl.cons.count,
127
128 .tx_prod_flags =
129 *(u8 *)&conn->local_tx_ctrl.prod_flags,
130 .tx_conn_state_flags =
131 *(u8 *)&conn->local_tx_ctrl.conn_state_flags,
132 .rx_prod_flags = *(u8 *)&conn->local_rx_ctrl.prod_flags,
133 .rx_conn_state_flags =
134 *(u8 *)&conn->local_rx_ctrl.conn_state_flags,
135
136 .tx_prep.wrap = conn->tx_curs_prep.wrap,
137 .tx_prep.count = conn->tx_curs_prep.count,
138 .tx_sent.wrap = conn->tx_curs_sent.wrap,
139 .tx_sent.count = conn->tx_curs_sent.count,
140 .tx_fin.wrap = conn->tx_curs_fin.wrap,
141 .tx_fin.count = conn->tx_curs_fin.count,
142 };
143
144 if (nla_put(skb, SMC_DIAG_CONNINFO, sizeof(cinfo), &cinfo) < 0)
145 goto errout;
146 }
147
Hans Wippelc6ba7c92018-06-28 19:05:07 +0200148 if (smc->conn.lgr && !smc->conn.lgr->is_smcd &&
149 (req->diag_ext & (1 << (SMC_DIAG_LGRINFO - 1))) &&
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100150 !list_empty(&smc->conn.lgr->list)) {
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100151 struct smc_diag_lgrinfo linfo = {
152 .role = smc->conn.lgr->role,
153 .lnk[0].ibport = smc->conn.lgr->lnk[0].ibport,
154 .lnk[0].link_id = smc->conn.lgr->lnk[0].link_id,
155 };
156
157 memcpy(linfo.lnk[0].ibname,
158 smc->conn.lgr->lnk[0].smcibdev->ibdev->name,
159 sizeof(smc->conn.lgr->lnk[0].smcibdev->ibdev->name));
160 smc_gid_be16_convert(linfo.lnk[0].gid,
Ursula Braun7005ada2018-07-25 16:35:31 +0200161 smc->conn.lgr->lnk[0].gid);
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100162 smc_gid_be16_convert(linfo.lnk[0].peer_gid,
163 smc->conn.lgr->lnk[0].peer_gid);
164
165 if (nla_put(skb, SMC_DIAG_LGRINFO, sizeof(linfo), &linfo) < 0)
166 goto errout;
167 }
Hans Wippel4b1b7d3b2018-06-28 19:05:12 +0200168 if (smc->conn.lgr && smc->conn.lgr->is_smcd &&
169 (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
170 !list_empty(&smc->conn.lgr->list)) {
171 struct smc_connection *conn = &smc->conn;
Peilin Ye7c8c02c2020-08-20 16:30:52 +0200172 struct smcd_diag_dmbinfo dinfo;
173
174 memset(&dinfo, 0, sizeof(dinfo));
175
176 dinfo.linkid = *((u32 *)conn->lgr->id);
177 dinfo.peer_gid = conn->lgr->peer_gid;
178 dinfo.my_gid = conn->lgr->smcd->local_gid;
179 dinfo.token = conn->rmb_desc->token;
180 dinfo.peer_token = conn->peer_token;
Hans Wippel4b1b7d3b2018-06-28 19:05:12 +0200181
182 if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
183 goto errout;
184 }
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100185
186 nlmsg_end(skb, nlh);
187 return 0;
188
189errout:
190 nlmsg_cancel(skb, nlh);
191 return -EMSGSIZE;
192}
193
Karsten Grauled759862018-05-02 16:56:45 +0200194static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
195 struct netlink_callback *cb)
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100196{
197 struct net *net = sock_net(skb->sk);
198 struct nlattr *bc = NULL;
199 struct hlist_head *head;
200 struct sock *sk;
201 int rc = 0;
202
Karsten Grauled759862018-05-02 16:56:45 +0200203 read_lock(&prot->h.smc_hash->lock);
204 head = &prot->h.smc_hash->ht;
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100205 if (hlist_empty(head))
206 goto out;
207
208 sk_for_each(sk, head) {
209 if (!net_eq(sock_net(sk), net))
210 continue;
211 rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
212 if (rc)
213 break;
214 }
215
216out:
Karsten Grauled759862018-05-02 16:56:45 +0200217 read_unlock(&prot->h.smc_hash->lock);
218 return rc;
219}
220
221static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
222{
223 int rc = 0;
224
225 rc = smc_diag_dump_proto(&smc_proto, skb, cb);
226 if (!rc)
227 rc = smc_diag_dump_proto(&smc_proto6, skb, cb);
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100228 return rc;
229}
230
231static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
232{
233 struct net *net = sock_net(skb->sk);
234
235 if (h->nlmsg_type == SOCK_DIAG_BY_FAMILY &&
236 h->nlmsg_flags & NLM_F_DUMP) {
237 {
238 struct netlink_dump_control c = {
239 .dump = smc_diag_dump,
240 .min_dump_alloc = SKB_WITH_OVERHEAD(32768),
241 };
242 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
243 }
244 }
245 return 0;
246}
247
248static const struct sock_diag_handler smc_diag_handler = {
249 .family = AF_SMC,
250 .dump = smc_diag_handler_dump,
251};
252
253static int __init smc_diag_init(void)
254{
255 return sock_diag_register(&smc_diag_handler);
256}
257
258static void __exit smc_diag_exit(void)
259{
260 sock_diag_unregister(&smc_diag_handler);
261}
262
263module_init(smc_diag_init);
264module_exit(smc_diag_exit);
265MODULE_LICENSE("GPL");
266MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 43 /* AF_SMC */);