blob: aa9a17ac1f7b35749c5158a317b56a4998447e43 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Brauna046d572017-01-09 16:55:16 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * CLC (connection layer control) handshake over initial TCP socket to
6 * prepare for RDMA traffic
7 *
Karsten Graul1a26d022018-03-16 15:06:40 +01008 * Copyright IBM Corp. 2016, 2018
Ursula Brauna046d572017-01-09 16:55:16 +01009 *
10 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
11 */
12
13#include <linux/in.h>
Karsten Graul696cd302018-03-01 13:51:27 +010014#include <linux/inetdevice.h>
Ursula Braun143c0172017-01-12 14:57:15 +010015#include <linux/if_ether.h>
Ingo Molnarc3edc402017-02-02 08:35:14 +010016#include <linux/sched/signal.h>
17
Karsten Graul1a26d022018-03-16 15:06:40 +010018#include <net/addrconf.h>
Ursula Brauna046d572017-01-09 16:55:16 +010019#include <net/sock.h>
20#include <net/tcp.h>
21
22#include "smc.h"
Ursula Braun0cfdd8f2017-01-09 16:55:17 +010023#include "smc_core.h"
Ursula Brauna046d572017-01-09 16:55:16 +010024#include "smc_clc.h"
25#include "smc_ib.h"
Hans Wippelc758dfd2018-06-28 19:05:09 +020026#include "smc_ism.h"
27
28#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
29#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
Ursula Brauna046d572017-01-09 16:55:16 +010030
Stefan Raspl0f627122018-03-01 13:51:26 +010031/* eye catcher "SMCR" EBCDIC for CLC messages */
32static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
Hans Wippelc758dfd2018-06-28 19:05:09 +020033/* eye catcher "SMCD" EBCDIC for CLC messages */
34static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
Stefan Raspl0f627122018-03-01 13:51:26 +010035
Ursula Braune7b7a642017-12-07 13:38:49 +010036/* check if received message has a correct header length and contains valid
37 * heading and trailing eyecatchers
38 */
39static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm)
40{
41 struct smc_clc_msg_proposal_prefix *pclc_prfx;
42 struct smc_clc_msg_accept_confirm *clc;
43 struct smc_clc_msg_proposal *pclc;
44 struct smc_clc_msg_decline *dclc;
45 struct smc_clc_msg_trail *trl;
46
Hans Wippelc758dfd2018-06-28 19:05:09 +020047 if (memcmp(clcm->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
48 memcmp(clcm->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
Ursula Braune7b7a642017-12-07 13:38:49 +010049 return false;
50 switch (clcm->type) {
51 case SMC_CLC_PROPOSAL:
Hans Wippelc758dfd2018-06-28 19:05:09 +020052 if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
53 clcm->path != SMC_TYPE_B)
54 return false;
Ursula Braune7b7a642017-12-07 13:38:49 +010055 pclc = (struct smc_clc_msg_proposal *)clcm;
56 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
57 if (ntohs(pclc->hdr.length) !=
58 sizeof(*pclc) + ntohs(pclc->iparea_offset) +
59 sizeof(*pclc_prfx) +
60 pclc_prfx->ipv6_prefixes_cnt *
61 sizeof(struct smc_clc_ipv6_prefix) +
62 sizeof(*trl))
63 return false;
64 trl = (struct smc_clc_msg_trail *)
65 ((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl));
66 break;
67 case SMC_CLC_ACCEPT:
68 case SMC_CLC_CONFIRM:
Hans Wippelc758dfd2018-06-28 19:05:09 +020069 if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D)
Ursula Braune7b7a642017-12-07 13:38:49 +010070 return false;
Hans Wippelc758dfd2018-06-28 19:05:09 +020071 clc = (struct smc_clc_msg_accept_confirm *)clcm;
72 if ((clcm->path == SMC_TYPE_R &&
73 ntohs(clc->hdr.length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) ||
74 (clcm->path == SMC_TYPE_D &&
75 ntohs(clc->hdr.length) != SMCD_CLC_ACCEPT_CONFIRM_LEN))
76 return false;
77 trl = (struct smc_clc_msg_trail *)
78 ((u8 *)clc + ntohs(clc->hdr.length) - sizeof(*trl));
Ursula Braune7b7a642017-12-07 13:38:49 +010079 break;
80 case SMC_CLC_DECLINE:
81 dclc = (struct smc_clc_msg_decline *)clcm;
82 if (ntohs(dclc->hdr.length) != sizeof(*dclc))
83 return false;
84 trl = &dclc->trl;
85 break;
86 default:
87 return false;
88 }
Hans Wippelc758dfd2018-06-28 19:05:09 +020089 if (memcmp(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER)) &&
90 memcmp(trl->eyecatcher, SMCD_EYECATCHER, sizeof(SMCD_EYECATCHER)))
Ursula Braune7b7a642017-12-07 13:38:49 +010091 return false;
92 return true;
93}
94
Karsten Graulc246d942018-03-16 15:06:39 +010095/* find ipv4 addr on device and get the prefix len, fill CLC proposal msg */
96static int smc_clc_prfx_set4_rcu(struct dst_entry *dst, __be32 ipv4,
97 struct smc_clc_msg_proposal_prefix *prop)
98{
99 struct in_device *in_dev = __in_dev_get_rcu(dst->dev);
100
101 if (!in_dev)
102 return -ENODEV;
103 for_ifa(in_dev) {
104 if (!inet_ifa_match(ipv4, ifa))
105 continue;
106 prop->prefix_len = inet_mask_len(ifa->ifa_mask);
107 prop->outgoing_subnet = ifa->ifa_address & ifa->ifa_mask;
108 /* prop->ipv6_prefixes_cnt = 0; already done by memset before */
109 return 0;
110 } endfor_ifa(in_dev);
111 return -ENOENT;
112}
113
Karsten Graul1a26d022018-03-16 15:06:40 +0100114/* fill CLC proposal msg with ipv6 prefixes from device */
115static int smc_clc_prfx_set6_rcu(struct dst_entry *dst,
116 struct smc_clc_msg_proposal_prefix *prop,
117 struct smc_clc_ipv6_prefix *ipv6_prfx)
118{
119#if IS_ENABLED(CONFIG_IPV6)
120 struct inet6_dev *in6_dev = __in6_dev_get(dst->dev);
121 struct inet6_ifaddr *ifa;
122 int cnt = 0;
123
124 if (!in6_dev)
125 return -ENODEV;
126 /* use a maximum of 8 IPv6 prefixes from device */
127 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
128 if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)
129 continue;
130 ipv6_addr_prefix(&ipv6_prfx[cnt].prefix,
131 &ifa->addr, ifa->prefix_len);
132 ipv6_prfx[cnt].prefix_len = ifa->prefix_len;
133 cnt++;
134 if (cnt == SMC_CLC_MAX_V6_PREFIX)
135 break;
136 }
137 prop->ipv6_prefixes_cnt = cnt;
138 if (cnt)
139 return 0;
140#endif
141 return -ENOENT;
142}
143
Karsten Graulc246d942018-03-16 15:06:39 +0100144/* retrieve and set prefixes in CLC proposal msg */
145static int smc_clc_prfx_set(struct socket *clcsock,
Karsten Graul1a26d022018-03-16 15:06:40 +0100146 struct smc_clc_msg_proposal_prefix *prop,
147 struct smc_clc_ipv6_prefix *ipv6_prfx)
Karsten Graul696cd302018-03-01 13:51:27 +0100148{
149 struct dst_entry *dst = sk_dst_get(clcsock->sk);
Karsten Graulc246d942018-03-16 15:06:39 +0100150 struct sockaddr_storage addrs;
Karsten Graul1a26d022018-03-16 15:06:40 +0100151 struct sockaddr_in6 *addr6;
Karsten Graulc246d942018-03-16 15:06:39 +0100152 struct sockaddr_in *addr;
153 int rc = -ENOENT;
154
155 memset(prop, 0, sizeof(*prop));
156 if (!dst) {
157 rc = -ENOTCONN;
158 goto out;
159 }
160 if (!dst->dev) {
161 rc = -ENODEV;
162 goto out_rel;
163 }
164 /* get address to which the internal TCP socket is bound */
165 kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
166 /* analyze IP specific data of net_device belonging to TCP socket */
Karsten Graul1a26d022018-03-16 15:06:40 +0100167 addr6 = (struct sockaddr_in6 *)&addrs;
Karsten Graulc246d942018-03-16 15:06:39 +0100168 rcu_read_lock();
169 if (addrs.ss_family == PF_INET) {
170 /* IPv4 */
171 addr = (struct sockaddr_in *)&addrs;
172 rc = smc_clc_prfx_set4_rcu(dst, addr->sin_addr.s_addr, prop);
Karsten Graul1a26d022018-03-16 15:06:40 +0100173 } else if (ipv6_addr_v4mapped(&addr6->sin6_addr)) {
174 /* mapped IPv4 address - peer is IPv4 only */
175 rc = smc_clc_prfx_set4_rcu(dst, addr6->sin6_addr.s6_addr32[3],
176 prop);
177 } else {
178 /* IPv6 */
179 rc = smc_clc_prfx_set6_rcu(dst, prop, ipv6_prfx);
Karsten Graulc246d942018-03-16 15:06:39 +0100180 }
181 rcu_read_unlock();
182out_rel:
183 dst_release(dst);
184out:
185 return rc;
186}
187
188/* match ipv4 addrs of dev against addr in CLC proposal */
189static int smc_clc_prfx_match4_rcu(struct net_device *dev,
190 struct smc_clc_msg_proposal_prefix *prop)
191{
192 struct in_device *in_dev = __in_dev_get_rcu(dev);
193
194 if (!in_dev)
195 return -ENODEV;
196 for_ifa(in_dev) {
197 if (prop->prefix_len == inet_mask_len(ifa->ifa_mask) &&
198 inet_ifa_match(prop->outgoing_subnet, ifa))
199 return 0;
200 } endfor_ifa(in_dev);
201
202 return -ENOENT;
203}
204
Karsten Graul1a26d022018-03-16 15:06:40 +0100205/* match ipv6 addrs of dev against addrs in CLC proposal */
206static int smc_clc_prfx_match6_rcu(struct net_device *dev,
207 struct smc_clc_msg_proposal_prefix *prop)
208{
209#if IS_ENABLED(CONFIG_IPV6)
210 struct inet6_dev *in6_dev = __in6_dev_get(dev);
211 struct smc_clc_ipv6_prefix *ipv6_prfx;
212 struct inet6_ifaddr *ifa;
213 int i, max;
214
215 if (!in6_dev)
216 return -ENODEV;
217 /* ipv6 prefix list starts behind smc_clc_msg_proposal_prefix */
218 ipv6_prfx = (struct smc_clc_ipv6_prefix *)((u8 *)prop + sizeof(*prop));
219 max = min_t(u8, prop->ipv6_prefixes_cnt, SMC_CLC_MAX_V6_PREFIX);
220 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
221 if (ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)
222 continue;
223 for (i = 0; i < max; i++) {
224 if (ifa->prefix_len == ipv6_prfx[i].prefix_len &&
225 ipv6_prefix_equal(&ifa->addr, &ipv6_prfx[i].prefix,
226 ifa->prefix_len))
227 return 0;
228 }
229 }
230#endif
231 return -ENOENT;
232}
233
Karsten Graulc246d942018-03-16 15:06:39 +0100234/* check if proposed prefixes match one of our device prefixes */
235int smc_clc_prfx_match(struct socket *clcsock,
236 struct smc_clc_msg_proposal_prefix *prop)
237{
238 struct dst_entry *dst = sk_dst_get(clcsock->sk);
Karsten Graul1a26d022018-03-16 15:06:40 +0100239 int rc;
Karsten Graul696cd302018-03-01 13:51:27 +0100240
241 if (!dst) {
242 rc = -ENOTCONN;
243 goto out;
244 }
245 if (!dst->dev) {
246 rc = -ENODEV;
247 goto out_rel;
248 }
Karsten Graul696cd302018-03-01 13:51:27 +0100249 rcu_read_lock();
Karsten Graulc246d942018-03-16 15:06:39 +0100250 if (!prop->ipv6_prefixes_cnt)
251 rc = smc_clc_prfx_match4_rcu(dst->dev, prop);
Karsten Graul1a26d022018-03-16 15:06:40 +0100252 else
253 rc = smc_clc_prfx_match6_rcu(dst->dev, prop);
Karsten Graul696cd302018-03-01 13:51:27 +0100254 rcu_read_unlock();
Karsten Graul696cd302018-03-01 13:51:27 +0100255out_rel:
256 dst_release(dst);
257out:
258 return rc;
259}
260
Ursula Brauna046d572017-01-09 16:55:16 +0100261/* Wait for data on the tcp-socket, analyze received data
262 * Returns:
263 * 0 if success and it was not a decline that we received.
264 * SMC_CLC_DECL_REPLY if decline received for fallback w/o another decl send.
265 * clcsock error, -EINTR, -ECONNRESET, -EPROTO otherwise.
266 */
267int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
268 u8 expected_type)
269{
Karsten Graulf6bdc422018-07-18 15:22:51 +0200270 long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
Ursula Brauna046d572017-01-09 16:55:16 +0100271 struct sock *clc_sk = smc->clcsock->sk;
272 struct smc_clc_msg_hdr *clcm = buf;
273 struct msghdr msg = {NULL, 0};
274 int reason_code = 0;
Al Virod63d2712017-09-20 20:21:22 -0400275 struct kvec vec = {buf, buflen};
Ursula Brauna046d572017-01-09 16:55:16 +0100276 int len, datlen;
277 int krflags;
278
279 /* peek the first few bytes to determine length of data to receive
280 * so we don't consume any subsequent CLC message or payload data
281 * in the TCP byte stream
282 */
Al Virod63d2712017-09-20 20:21:22 -0400283 /*
284 * Caller must make sure that buflen is no less than
285 * sizeof(struct smc_clc_msg_hdr)
286 */
Ursula Brauna046d572017-01-09 16:55:16 +0100287 krflags = MSG_PEEK | MSG_WAITALL;
288 smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
Al Virod63d2712017-09-20 20:21:22 -0400289 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1,
290 sizeof(struct smc_clc_msg_hdr));
291 len = sock_recvmsg(smc->clcsock, &msg, krflags);
Ursula Brauna046d572017-01-09 16:55:16 +0100292 if (signal_pending(current)) {
293 reason_code = -EINTR;
294 clc_sk->sk_err = EINTR;
295 smc->sk.sk_err = EINTR;
296 goto out;
297 }
298 if (clc_sk->sk_err) {
299 reason_code = -clc_sk->sk_err;
300 smc->sk.sk_err = clc_sk->sk_err;
301 goto out;
302 }
303 if (!len) { /* peer has performed orderly shutdown */
304 smc->sk.sk_err = ECONNRESET;
305 reason_code = -ECONNRESET;
306 goto out;
307 }
308 if (len < 0) {
309 smc->sk.sk_err = -len;
310 reason_code = len;
311 goto out;
312 }
313 datlen = ntohs(clcm->length);
314 if ((len < sizeof(struct smc_clc_msg_hdr)) ||
Ursula Braune7b7a642017-12-07 13:38:49 +0100315 (datlen > buflen) ||
Hans Wippelc758dfd2018-06-28 19:05:09 +0200316 (clcm->version != SMC_CLC_V1) ||
317 (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D &&
318 clcm->path != SMC_TYPE_B) ||
Ursula Brauna046d572017-01-09 16:55:16 +0100319 ((clcm->type != SMC_CLC_DECLINE) &&
320 (clcm->type != expected_type))) {
321 smc->sk.sk_err = EPROTO;
322 reason_code = -EPROTO;
323 goto out;
324 }
325
326 /* receive the complete CLC message */
Ursula Brauna046d572017-01-09 16:55:16 +0100327 memset(&msg, 0, sizeof(struct msghdr));
Ursula Braunab6f6dd2018-03-27 10:43:50 +0200328 iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen);
Ursula Brauna046d572017-01-09 16:55:16 +0100329 krflags = MSG_WAITALL;
Al Virod63d2712017-09-20 20:21:22 -0400330 len = sock_recvmsg(smc->clcsock, &msg, krflags);
Ursula Braune7b7a642017-12-07 13:38:49 +0100331 if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
Ursula Brauna046d572017-01-09 16:55:16 +0100332 smc->sk.sk_err = EPROTO;
333 reason_code = -EPROTO;
334 goto out;
335 }
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100336 if (clcm->type == SMC_CLC_DECLINE) {
Karsten Graul603cc142018-07-25 16:35:32 +0200337 struct smc_clc_msg_decline *dclc;
338
339 dclc = (struct smc_clc_msg_decline *)clcm;
340 reason_code = SMC_CLC_DECL_PEERDECL;
341 smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
Ursula Braunbfbedfd2017-09-21 09:16:32 +0200342 if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
Karsten Graul517c3002018-05-15 17:05:03 +0200343 smc->conn.lgr->sync_err = 1;
Ursula Braunbfbedfd2017-09-21 09:16:32 +0200344 smc_lgr_terminate(smc->conn.lgr);
345 }
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100346 }
347
Ursula Brauna046d572017-01-09 16:55:16 +0100348out:
Karsten Graulf6bdc422018-07-18 15:22:51 +0200349 smc->clcsock->sk->sk_rcvtimeo = rcvtimeo;
Ursula Brauna046d572017-01-09 16:55:16 +0100350 return reason_code;
351}
352
353/* send CLC DECLINE message across internal TCP socket */
Ursula Braunbfbedfd2017-09-21 09:16:32 +0200354int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
Ursula Brauna046d572017-01-09 16:55:16 +0100355{
356 struct smc_clc_msg_decline dclc;
357 struct msghdr msg;
358 struct kvec vec;
359 int len;
360
361 memset(&dclc, 0, sizeof(dclc));
362 memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
363 dclc.hdr.type = SMC_CLC_DECLINE;
364 dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
365 dclc.hdr.version = SMC_CLC_V1;
Ursula Braunbfbedfd2017-09-21 09:16:32 +0200366 dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
Ursula Braunc394e3d2020-02-14 08:59:00 +0100367 if (smc->conn.lgr && !smc->conn.lgr->is_smcd)
368 memcpy(dclc.id_for_peer, local_systemid,
369 sizeof(local_systemid));
Ursula Brauna046d572017-01-09 16:55:16 +0100370 dclc.peer_diagnosis = htonl(peer_diag_info);
371 memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
372
373 memset(&msg, 0, sizeof(msg));
374 vec.iov_base = &dclc;
375 vec.iov_len = sizeof(struct smc_clc_msg_decline);
376 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
377 sizeof(struct smc_clc_msg_decline));
378 if (len < sizeof(struct smc_clc_msg_decline))
379 smc->sk.sk_err = EPROTO;
380 if (len < 0)
381 smc->sk.sk_err = -len;
Ursula Braun0c9f1512017-12-07 13:38:45 +0100382 return sock_error(&smc->sk);
Ursula Brauna046d572017-01-09 16:55:16 +0100383}
384
385/* send CLC PROPOSAL message across internal TCP socket */
Hans Wippelc758dfd2018-06-28 19:05:09 +0200386int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
Ursula Braun7005ada2018-07-25 16:35:31 +0200387 struct smc_ib_device *ibdev, u8 ibport, u8 gid[],
Hans Wippelc758dfd2018-06-28 19:05:09 +0200388 struct smcd_dev *ismdev)
Ursula Brauna046d572017-01-09 16:55:16 +0100389{
Karsten Graul1a26d022018-03-16 15:06:40 +0100390 struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX];
Ursula Braune7b7a642017-12-07 13:38:49 +0100391 struct smc_clc_msg_proposal_prefix pclc_prfx;
Hans Wippelc758dfd2018-06-28 19:05:09 +0200392 struct smc_clc_msg_smcd pclc_smcd;
Ursula Brauna046d572017-01-09 16:55:16 +0100393 struct smc_clc_msg_proposal pclc;
Ursula Braune7b7a642017-12-07 13:38:49 +0100394 struct smc_clc_msg_trail trl;
Karsten Graul1a26d022018-03-16 15:06:40 +0100395 int len, i, plen, rc;
Ursula Brauna046d572017-01-09 16:55:16 +0100396 int reason_code = 0;
Hans Wippelc758dfd2018-06-28 19:05:09 +0200397 struct kvec vec[5];
Ursula Brauna046d572017-01-09 16:55:16 +0100398 struct msghdr msg;
Ursula Brauna046d572017-01-09 16:55:16 +0100399
Karsten Graulc246d942018-03-16 15:06:39 +0100400 /* retrieve ip prefixes for CLC proposal msg */
Karsten Graul1a26d022018-03-16 15:06:40 +0100401 rc = smc_clc_prfx_set(smc->clcsock, &pclc_prfx, ipv6_prfx);
Karsten Graulc246d942018-03-16 15:06:39 +0100402 if (rc)
403 return SMC_CLC_DECL_CNFERR; /* configuration error */
404
Ursula Brauna046d572017-01-09 16:55:16 +0100405 /* send SMC Proposal CLC message */
Karsten Graul1a26d022018-03-16 15:06:40 +0100406 plen = sizeof(pclc) + sizeof(pclc_prfx) +
407 (pclc_prfx.ipv6_prefixes_cnt * sizeof(ipv6_prfx[0])) +
408 sizeof(trl);
Ursula Brauna046d572017-01-09 16:55:16 +0100409 memset(&pclc, 0, sizeof(pclc));
410 memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
411 pclc.hdr.type = SMC_CLC_PROPOSAL;
Ursula Brauna046d572017-01-09 16:55:16 +0100412 pclc.hdr.version = SMC_CLC_V1; /* SMC version */
Hans Wippelc758dfd2018-06-28 19:05:09 +0200413 pclc.hdr.path = smc_type;
414 if (smc_type == SMC_TYPE_R || smc_type == SMC_TYPE_B) {
415 /* add SMC-R specifics */
416 memcpy(pclc.lcl.id_for_peer, local_systemid,
417 sizeof(local_systemid));
Ursula Braun7005ada2018-07-25 16:35:31 +0200418 memcpy(&pclc.lcl.gid, gid, SMC_GID_SIZE);
Hans Wippelc758dfd2018-06-28 19:05:09 +0200419 memcpy(&pclc.lcl.mac, &ibdev->mac[ibport - 1], ETH_ALEN);
420 pclc.iparea_offset = htons(0);
421 }
422 if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
423 /* add SMC-D specifics */
424 memset(&pclc_smcd, 0, sizeof(pclc_smcd));
425 plen += sizeof(pclc_smcd);
426 pclc.iparea_offset = htons(SMC_CLC_PROPOSAL_MAX_OFFSET);
427 pclc_smcd.gid = ismdev->local_gid;
428 }
429 pclc.hdr.length = htons(plen);
Ursula Brauna046d572017-01-09 16:55:16 +0100430
Ursula Braune7b7a642017-12-07 13:38:49 +0100431 memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
Ursula Brauna046d572017-01-09 16:55:16 +0100432 memset(&msg, 0, sizeof(msg));
Karsten Graul1a26d022018-03-16 15:06:40 +0100433 i = 0;
434 vec[i].iov_base = &pclc;
435 vec[i++].iov_len = sizeof(pclc);
Hans Wippelc758dfd2018-06-28 19:05:09 +0200436 if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
437 vec[i].iov_base = &pclc_smcd;
438 vec[i++].iov_len = sizeof(pclc_smcd);
439 }
Karsten Graul1a26d022018-03-16 15:06:40 +0100440 vec[i].iov_base = &pclc_prfx;
441 vec[i++].iov_len = sizeof(pclc_prfx);
442 if (pclc_prfx.ipv6_prefixes_cnt > 0) {
443 vec[i].iov_base = &ipv6_prfx[0];
444 vec[i++].iov_len = pclc_prfx.ipv6_prefixes_cnt *
445 sizeof(ipv6_prfx[0]);
446 }
447 vec[i].iov_base = &trl;
448 vec[i++].iov_len = sizeof(trl);
Ursula Brauna046d572017-01-09 16:55:16 +0100449 /* due to the few bytes needed for clc-handshake this cannot block */
Karsten Graul1a26d022018-03-16 15:06:40 +0100450 len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
YueHaibing38189772018-09-18 15:46:38 +0200451 if (len < 0) {
452 smc->sk.sk_err = smc->clcsock->sk->sk_err;
453 reason_code = -smc->sk.sk_err;
454 } else if (len < (int)sizeof(pclc)) {
455 reason_code = -ENETUNREACH;
456 smc->sk.sk_err = -reason_code;
Ursula Brauna046d572017-01-09 16:55:16 +0100457 }
458
459 return reason_code;
460}
461
462/* send CLC CONFIRM message across internal TCP socket */
463int smc_clc_send_confirm(struct smc_sock *smc)
464{
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100465 struct smc_connection *conn = &smc->conn;
Ursula Brauna046d572017-01-09 16:55:16 +0100466 struct smc_clc_msg_accept_confirm cclc;
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100467 struct smc_link *link;
Ursula Brauna046d572017-01-09 16:55:16 +0100468 int reason_code = 0;
469 struct msghdr msg;
470 struct kvec vec;
471 int len;
472
473 /* send SMC Confirm CLC msg */
474 memset(&cclc, 0, sizeof(cclc));
Ursula Brauna046d572017-01-09 16:55:16 +0100475 cclc.hdr.type = SMC_CLC_CONFIRM;
Ursula Brauna046d572017-01-09 16:55:16 +0100476 cclc.hdr.version = SMC_CLC_V1; /* SMC version */
Hans Wippelc758dfd2018-06-28 19:05:09 +0200477 if (smc->conn.lgr->is_smcd) {
478 /* SMC-D specific settings */
479 memcpy(cclc.hdr.eyecatcher, SMCD_EYECATCHER,
480 sizeof(SMCD_EYECATCHER));
481 cclc.hdr.path = SMC_TYPE_D;
482 cclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
483 cclc.gid = conn->lgr->smcd->local_gid;
484 cclc.token = conn->rmb_desc->token;
485 cclc.dmbe_size = conn->rmbe_size_short;
486 cclc.dmbe_idx = 0;
487 memcpy(&cclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
488 memcpy(cclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
489 sizeof(SMCD_EYECATCHER));
490 } else {
491 /* SMC-R specific settings */
492 link = &conn->lgr->lnk[SMC_SINGLE_LINK];
493 memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER,
494 sizeof(SMC_EYECATCHER));
495 cclc.hdr.path = SMC_TYPE_R;
496 cclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
497 memcpy(cclc.lcl.id_for_peer, local_systemid,
498 sizeof(local_systemid));
Ursula Braun7005ada2018-07-25 16:35:31 +0200499 memcpy(&cclc.lcl.gid, link->gid, SMC_GID_SIZE);
Hans Wippelc758dfd2018-06-28 19:05:09 +0200500 memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1],
501 ETH_ALEN);
502 hton24(cclc.qpn, link->roce_qp->qp_num);
503 cclc.rmb_rkey =
504 htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
505 cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
506 cclc.rmbe_alert_token = htonl(conn->alert_token_local);
507 cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
508 cclc.rmbe_size = conn->rmbe_size_short;
509 cclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
510 (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
511 hton24(cclc.psn, link->psn_initial);
512 memcpy(cclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
513 sizeof(SMC_EYECATCHER));
514 }
Ursula Brauna046d572017-01-09 16:55:16 +0100515
516 memset(&msg, 0, sizeof(msg));
517 vec.iov_base = &cclc;
Hans Wippelc758dfd2018-06-28 19:05:09 +0200518 vec.iov_len = ntohs(cclc.hdr.length);
519 len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
520 ntohs(cclc.hdr.length));
521 if (len < ntohs(cclc.hdr.length)) {
Ursula Brauna046d572017-01-09 16:55:16 +0100522 if (len >= 0) {
523 reason_code = -ENETUNREACH;
524 smc->sk.sk_err = -reason_code;
525 } else {
526 smc->sk.sk_err = smc->clcsock->sk->sk_err;
527 reason_code = -smc->sk.sk_err;
528 }
529 }
530 return reason_code;
531}
532
533/* send CLC ACCEPT message across internal TCP socket */
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100534int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
Ursula Brauna046d572017-01-09 16:55:16 +0100535{
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100536 struct smc_connection *conn = &new_smc->conn;
Ursula Brauna046d572017-01-09 16:55:16 +0100537 struct smc_clc_msg_accept_confirm aclc;
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100538 struct smc_link *link;
Ursula Brauna046d572017-01-09 16:55:16 +0100539 struct msghdr msg;
540 struct kvec vec;
541 int rc = 0;
542 int len;
543
544 memset(&aclc, 0, sizeof(aclc));
Ursula Brauna046d572017-01-09 16:55:16 +0100545 aclc.hdr.type = SMC_CLC_ACCEPT;
Ursula Brauna046d572017-01-09 16:55:16 +0100546 aclc.hdr.version = SMC_CLC_V1; /* SMC version */
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100547 if (srv_first_contact)
548 aclc.hdr.flag = 1;
Hans Wippelc758dfd2018-06-28 19:05:09 +0200549
550 if (new_smc->conn.lgr->is_smcd) {
551 /* SMC-D specific settings */
552 aclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
553 memcpy(aclc.hdr.eyecatcher, SMCD_EYECATCHER,
554 sizeof(SMCD_EYECATCHER));
555 aclc.hdr.path = SMC_TYPE_D;
556 aclc.gid = conn->lgr->smcd->local_gid;
557 aclc.token = conn->rmb_desc->token;
558 aclc.dmbe_size = conn->rmbe_size_short;
559 aclc.dmbe_idx = 0;
560 memcpy(&aclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
561 memcpy(aclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
562 sizeof(SMCD_EYECATCHER));
563 } else {
564 /* SMC-R specific settings */
565 aclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
566 memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER,
567 sizeof(SMC_EYECATCHER));
568 aclc.hdr.path = SMC_TYPE_R;
569 link = &conn->lgr->lnk[SMC_SINGLE_LINK];
570 memcpy(aclc.lcl.id_for_peer, local_systemid,
571 sizeof(local_systemid));
Ursula Braun7005ada2018-07-25 16:35:31 +0200572 memcpy(&aclc.lcl.gid, link->gid, SMC_GID_SIZE);
Hans Wippelc758dfd2018-06-28 19:05:09 +0200573 memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1],
574 ETH_ALEN);
575 hton24(aclc.qpn, link->roce_qp->qp_num);
576 aclc.rmb_rkey =
577 htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
578 aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */
579 aclc.rmbe_alert_token = htonl(conn->alert_token_local);
580 aclc.qp_mtu = link->path_mtu;
581 aclc.rmbe_size = conn->rmbe_size_short,
582 aclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
583 (conn->rmb_desc->sgt[SMC_SINGLE_LINK].sgl));
584 hton24(aclc.psn, link->psn_initial);
585 memcpy(aclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
586 sizeof(SMC_EYECATCHER));
587 }
Ursula Brauna046d572017-01-09 16:55:16 +0100588
589 memset(&msg, 0, sizeof(msg));
590 vec.iov_base = &aclc;
Hans Wippelc758dfd2018-06-28 19:05:09 +0200591 vec.iov_len = ntohs(aclc.hdr.length);
592 len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1,
593 ntohs(aclc.hdr.length));
594 if (len < ntohs(aclc.hdr.length)) {
Ursula Brauna046d572017-01-09 16:55:16 +0100595 if (len >= 0)
596 new_smc->sk.sk_err = EPROTO;
597 else
598 new_smc->sk.sk_err = new_smc->clcsock->sk->sk_err;
599 rc = sock_error(&new_smc->sk);
600 }
601
602 return rc;
603}