blob: 4bb320ae11a9eee9dde251546aed68e27c970193 [file] [log] [blame]
Ursula Braunac713872017-01-09 16:55:13 +01001/*
2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
3 *
4 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
5 * applies to SOCK_STREAM sockets only
6 * offers an alternative communication option for TCP-protocol sockets
7 * applicable with RoCE-cards only
8 *
Ursula Brauna046d572017-01-09 16:55:16 +01009 * Initial restrictions:
Ursula Brauna046d572017-01-09 16:55:16 +010010 * - support for alternate links postponed
11 * - partial support for non-blocking sockets only
12 * - support for urgent data postponed
13 *
Karsten Graulaaa4d332018-03-16 15:06:41 +010014 * Copyright IBM Corp. 2016, 2018
Ursula Braunac713872017-01-09 16:55:13 +010015 *
16 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
17 * based on prototype from Frank Blaschka
18 */
19
20#define KMSG_COMPONENT "smc"
21#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22
23#include <linux/module.h>
24#include <linux/socket.h>
Ursula Brauna046d572017-01-09 16:55:16 +010025#include <linux/workqueue.h>
Ursula Braun5f083182017-01-09 16:55:22 +010026#include <linux/in.h>
Ingo Molnarc3edc402017-02-02 08:35:14 +010027#include <linux/sched/signal.h>
28
Ursula Braunac713872017-01-09 16:55:13 +010029#include <net/sock.h>
Ursula Brauna046d572017-01-09 16:55:16 +010030#include <net/tcp.h>
Ursula Braunf16a7dd2017-01-09 16:55:26 +010031#include <net/smc.h>
Ursula Braun9b67e262018-05-02 16:56:46 +020032#include <asm/ioctls.h>
Ursula Braunac713872017-01-09 16:55:13 +010033
34#include "smc.h"
Ursula Brauna046d572017-01-09 16:55:16 +010035#include "smc_clc.h"
Ursula Braun9bf9abe2017-01-09 16:55:21 +010036#include "smc_llc.h"
Ursula Braun5f083182017-01-09 16:55:22 +010037#include "smc_cdc.h"
Ursula Braun0cfdd8f2017-01-09 16:55:17 +010038#include "smc_core.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010039#include "smc_ib.h"
Thomas Richter6812baa2017-01-09 16:55:15 +010040#include "smc_pnet.h"
Ursula Braune6727f32017-01-09 16:55:23 +010041#include "smc_tx.h"
Ursula Braun952310c2017-01-09 16:55:24 +010042#include "smc_rx.h"
Ursula Braunb38d7322017-01-09 16:55:25 +010043#include "smc_close.h"
Ursula Braunac713872017-01-09 16:55:13 +010044
Ursula Braun0cfdd8f2017-01-09 16:55:17 +010045static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group
46 * creation
47 */
48
Ursula Brauna046d572017-01-09 16:55:16 +010049static void smc_tcp_listen_work(struct work_struct *);
50
Ursula Braunac713872017-01-09 16:55:13 +010051static void smc_set_keepalive(struct sock *sk, int val)
52{
53 struct smc_sock *smc = smc_sk(sk);
54
55 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
56}
57
Ursula Braunf16a7dd2017-01-09 16:55:26 +010058static struct smc_hashinfo smc_v4_hashinfo = {
59 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
60};
61
Karsten Graulaaa4d332018-03-16 15:06:41 +010062static struct smc_hashinfo smc_v6_hashinfo = {
63 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
64};
65
Ursula Braunf16a7dd2017-01-09 16:55:26 +010066int smc_hash_sk(struct sock *sk)
67{
68 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
69 struct hlist_head *head;
70
71 head = &h->ht;
72
73 write_lock_bh(&h->lock);
74 sk_add_node(sk, head);
75 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
76 write_unlock_bh(&h->lock);
77
78 return 0;
79}
80EXPORT_SYMBOL_GPL(smc_hash_sk);
81
82void smc_unhash_sk(struct sock *sk)
83{
84 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
85
86 write_lock_bh(&h->lock);
87 if (sk_del_node_init(sk))
88 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
89 write_unlock_bh(&h->lock);
90}
91EXPORT_SYMBOL_GPL(smc_unhash_sk);
92
93struct proto smc_proto = {
Ursula Braunac713872017-01-09 16:55:13 +010094 .name = "SMC",
95 .owner = THIS_MODULE,
96 .keepalive = smc_set_keepalive,
Ursula Braunf16a7dd2017-01-09 16:55:26 +010097 .hash = smc_hash_sk,
98 .unhash = smc_unhash_sk,
Ursula Braunac713872017-01-09 16:55:13 +010099 .obj_size = sizeof(struct smc_sock),
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100100 .h.smc_hash = &smc_v4_hashinfo,
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -0800101 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Ursula Braunac713872017-01-09 16:55:13 +0100102};
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100103EXPORT_SYMBOL_GPL(smc_proto);
Ursula Braunac713872017-01-09 16:55:13 +0100104
Karsten Graulaaa4d332018-03-16 15:06:41 +0100105struct proto smc_proto6 = {
106 .name = "SMC6",
107 .owner = THIS_MODULE,
108 .keepalive = smc_set_keepalive,
109 .hash = smc_hash_sk,
110 .unhash = smc_unhash_sk,
111 .obj_size = sizeof(struct smc_sock),
112 .h.smc_hash = &smc_v6_hashinfo,
113 .slab_flags = SLAB_TYPESAFE_BY_RCU,
114};
115EXPORT_SYMBOL_GPL(smc_proto6);
116
Ursula Braunac713872017-01-09 16:55:13 +0100117static int smc_release(struct socket *sock)
118{
119 struct sock *sk = sock->sk;
120 struct smc_sock *smc;
Ursula Braunb38d7322017-01-09 16:55:25 +0100121 int rc = 0;
Ursula Braunac713872017-01-09 16:55:13 +0100122
123 if (!sk)
124 goto out;
125
126 smc = smc_sk(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100127 if (sk->sk_state == SMC_LISTEN)
128 /* smc_close_non_accepted() is called and acquires
129 * sock lock for child sockets again
130 */
131 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
132 else
133 lock_sock(sk);
Ursula Braunac713872017-01-09 16:55:13 +0100134
Ursula Braun51f1de72018-01-26 09:28:48 +0100135 if (!smc->use_fallback) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100136 rc = smc_close_active(smc);
137 sock_set_flag(sk, SOCK_DEAD);
138 sk->sk_shutdown |= SHUTDOWN_MASK;
139 }
Ursula Braunac713872017-01-09 16:55:13 +0100140 if (smc->clcsock) {
141 sock_release(smc->clcsock);
142 smc->clcsock = NULL;
143 }
Ursula Braun51f1de72018-01-26 09:28:48 +0100144 if (smc->use_fallback) {
145 sock_put(sk); /* passive closing */
146 sk->sk_state = SMC_CLOSED;
147 sk->sk_state_change(sk);
148 }
Ursula Braunac713872017-01-09 16:55:13 +0100149
150 /* detach socket */
151 sock_orphan(sk);
152 sock->sk = NULL;
Ursula Braun51f1de72018-01-26 09:28:48 +0100153 if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
Ursula Braunb38d7322017-01-09 16:55:25 +0100154 smc_conn_free(&smc->conn);
Ursula Braunac713872017-01-09 16:55:13 +0100155 release_sock(sk);
156
Ursula Braun51f1de72018-01-26 09:28:48 +0100157 sk->sk_prot->unhash(sk);
158 sock_put(sk); /* final sock_put */
Ursula Braunac713872017-01-09 16:55:13 +0100159out:
Ursula Braunb38d7322017-01-09 16:55:25 +0100160 return rc;
Ursula Braunac713872017-01-09 16:55:13 +0100161}
162
163static void smc_destruct(struct sock *sk)
164{
165 if (sk->sk_state != SMC_CLOSED)
166 return;
167 if (!sock_flag(sk, SOCK_DEAD))
168 return;
169
170 sk_refcnt_debug_dec(sk);
171}
172
Karsten Graulaaa4d332018-03-16 15:06:41 +0100173static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
174 int protocol)
Ursula Braunac713872017-01-09 16:55:13 +0100175{
176 struct smc_sock *smc;
Karsten Graulaaa4d332018-03-16 15:06:41 +0100177 struct proto *prot;
Ursula Braunac713872017-01-09 16:55:13 +0100178 struct sock *sk;
179
Karsten Graulaaa4d332018-03-16 15:06:41 +0100180 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
181 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
Ursula Braunac713872017-01-09 16:55:13 +0100182 if (!sk)
183 return NULL;
184
185 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
186 sk->sk_state = SMC_INIT;
187 sk->sk_destruct = smc_destruct;
Karsten Graulaaa4d332018-03-16 15:06:41 +0100188 sk->sk_protocol = protocol;
Ursula Braunac713872017-01-09 16:55:13 +0100189 smc = smc_sk(sk);
Ursula Brauna046d572017-01-09 16:55:16 +0100190 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
Eric Dumazetbe7f3e52018-05-17 03:54:21 -0700191 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
Ursula Brauna046d572017-01-09 16:55:16 +0100192 INIT_LIST_HEAD(&smc->accept_q);
193 spin_lock_init(&smc->accept_q_lock);
Eric Dumazetbe7f3e52018-05-17 03:54:21 -0700194 spin_lock_init(&smc->conn.send_lock);
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100195 sk->sk_prot->hash(sk);
Ursula Brauna046d572017-01-09 16:55:16 +0100196 sk_refcnt_debug_inc(sk);
Ursula Braunac713872017-01-09 16:55:13 +0100197
198 return sk;
199}
200
201static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
202 int addr_len)
203{
204 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
205 struct sock *sk = sock->sk;
206 struct smc_sock *smc;
207 int rc;
208
209 smc = smc_sk(sk);
210
211 /* replicate tests from inet_bind(), to be safe wrt. future changes */
212 rc = -EINVAL;
213 if (addr_len < sizeof(struct sockaddr_in))
214 goto out;
215
216 rc = -EAFNOSUPPORT;
Karsten Graulaaa4d332018-03-16 15:06:41 +0100217 if (addr->sin_family != AF_INET &&
218 addr->sin_family != AF_INET6 &&
219 addr->sin_family != AF_UNSPEC)
220 goto out;
Ursula Braunac713872017-01-09 16:55:13 +0100221 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
Karsten Graulaaa4d332018-03-16 15:06:41 +0100222 if (addr->sin_family == AF_UNSPEC &&
223 addr->sin_addr.s_addr != htonl(INADDR_ANY))
Ursula Braunac713872017-01-09 16:55:13 +0100224 goto out;
225
226 lock_sock(sk);
227
228 /* Check if socket is already active */
229 rc = -EINVAL;
230 if (sk->sk_state != SMC_INIT)
231 goto out_rel;
232
233 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
234 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
235
236out_rel:
237 release_sock(sk);
238out:
239 return rc;
240}
241
242static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
243 unsigned long mask)
244{
245 /* options we don't get control via setsockopt for */
246 nsk->sk_type = osk->sk_type;
247 nsk->sk_sndbuf = osk->sk_sndbuf;
248 nsk->sk_rcvbuf = osk->sk_rcvbuf;
249 nsk->sk_sndtimeo = osk->sk_sndtimeo;
250 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
251 nsk->sk_mark = osk->sk_mark;
252 nsk->sk_priority = osk->sk_priority;
253 nsk->sk_rcvlowat = osk->sk_rcvlowat;
254 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
255 nsk->sk_err = osk->sk_err;
256
257 nsk->sk_flags &= ~mask;
258 nsk->sk_flags |= osk->sk_flags & mask;
259}
260
261#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
262 (1UL << SOCK_KEEPOPEN) | \
263 (1UL << SOCK_LINGER) | \
264 (1UL << SOCK_BROADCAST) | \
265 (1UL << SOCK_TIMESTAMP) | \
266 (1UL << SOCK_DBG) | \
267 (1UL << SOCK_RCVTSTAMP) | \
268 (1UL << SOCK_RCVTSTAMPNS) | \
269 (1UL << SOCK_LOCALROUTE) | \
270 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
271 (1UL << SOCK_RXQ_OVFL) | \
272 (1UL << SOCK_WIFI_STATUS) | \
273 (1UL << SOCK_NOFCS) | \
274 (1UL << SOCK_FILTER_LOCKED))
275/* copy only relevant settings and flags of SOL_SOCKET level from smc to
276 * clc socket (since smc is not called for these options from net/core)
277 */
278static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
279{
280 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
281}
282
283#define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
284 (1UL << SOCK_KEEPOPEN) | \
285 (1UL << SOCK_LINGER) | \
286 (1UL << SOCK_DBG))
287/* copy only settings and flags relevant for smc from clc to smc socket */
288static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
289{
290 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
291}
292
Karsten Graul44aa81c2018-05-15 17:04:55 +0200293/* register a new rmb, optionally send confirm_rkey msg to register with peer */
294static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc,
295 bool conf_rkey)
Karsten Graule63a5f82018-05-03 17:57:37 +0200296{
297 /* register memory region for new rmb */
Karsten Graula6920d12018-05-03 17:57:38 +0200298 if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
299 rmb_desc->regerr = 1;
Karsten Graule63a5f82018-05-03 17:57:37 +0200300 return -EFAULT;
Karsten Graula6920d12018-05-03 17:57:38 +0200301 }
Karsten Graul44aa81c2018-05-15 17:04:55 +0200302 if (!conf_rkey)
303 return 0;
304 /* exchange confirm_rkey msg with peer */
305 if (smc_llc_do_confirm_rkey(link, rmb_desc)) {
306 rmb_desc->regerr = 1;
307 return -EFAULT;
308 }
Karsten Graule63a5f82018-05-03 17:57:37 +0200309 return 0;
310}
311
Stefan Raspl0f627122018-03-01 13:51:26 +0100312static int smc_clnt_conf_first_link(struct smc_sock *smc)
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100313{
Karsten Graul877ae5b2018-05-02 16:56:44 +0200314 struct net *net = sock_net(smc->clcsock->sk);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100315 struct smc_link_group *lgr = smc->conn.lgr;
316 struct smc_link *link;
317 int rest;
318 int rc;
319
320 link = &lgr->lnk[SMC_SINGLE_LINK];
321 /* receive CONFIRM LINK request from server over RoCE fabric */
322 rest = wait_for_completion_interruptible_timeout(
323 &link->llc_confirm,
324 SMC_LLC_WAIT_FIRST_TIME);
325 if (rest <= 0) {
326 struct smc_clc_msg_decline dclc;
327
328 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
329 SMC_CLC_DECLINE);
330 return rc;
331 }
332
Karsten Graul75d320d2018-03-01 13:51:31 +0100333 if (link->llc_confirm_rc)
334 return SMC_CLC_DECL_RMBE_EC;
335
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100336 rc = smc_ib_modify_qp_rts(link);
337 if (rc)
338 return SMC_CLC_DECL_INTERR;
339
340 smc_wr_remember_qp_attr(link);
Ursula Braun652a1e42017-07-28 13:56:17 +0200341
Karsten Graul44aa81c2018-05-15 17:04:55 +0200342 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
Ursula Braun652a1e42017-07-28 13:56:17 +0200343 return SMC_CLC_DECL_INTERR;
344
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100345 /* send CONFIRM LINK response over RoCE fabric */
346 rc = smc_llc_send_confirm_link(link,
347 link->smcibdev->mac[link->ibport - 1],
Stefan Raspl0f627122018-03-01 13:51:26 +0100348 &link->smcibdev->gid[link->ibport - 1],
349 SMC_LLC_RESP);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100350 if (rc < 0)
351 return SMC_CLC_DECL_TCL;
352
Karsten Graul52bedf32018-03-01 13:51:32 +0100353 /* receive ADD LINK request from server over RoCE fabric */
354 rest = wait_for_completion_interruptible_timeout(&link->llc_add,
355 SMC_LLC_WAIT_TIME);
356 if (rest <= 0) {
357 struct smc_clc_msg_decline dclc;
358
359 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
360 SMC_CLC_DECLINE);
361 return rc;
362 }
363
364 /* send add link reject message, only one link supported for now */
365 rc = smc_llc_send_add_link(link,
366 link->smcibdev->mac[link->ibport - 1],
367 &link->smcibdev->gid[link->ibport - 1],
368 SMC_LLC_RESP);
369 if (rc < 0)
370 return SMC_CLC_DECL_TCL;
371
Karsten Graul877ae5b2018-05-02 16:56:44 +0200372 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
Karsten Graul52bedf32018-03-01 13:51:32 +0100373
Karsten Graul75d320d2018-03-01 13:51:31 +0100374 return 0;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100375}
376
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100377static void smc_conn_save_peer_info(struct smc_sock *smc,
378 struct smc_clc_msg_accept_confirm *clc)
379{
380 smc->conn.peer_conn_idx = clc->conn_idx;
Ursula Braun5f083182017-01-09 16:55:22 +0100381 smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token);
Ursula Brauncd6851f2017-01-09 16:55:18 +0100382 smc->conn.peer_rmbe_size = smc_uncompress_bufsize(clc->rmbe_size);
383 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100384}
385
386static void smc_link_save_peer_info(struct smc_link *link,
387 struct smc_clc_msg_accept_confirm *clc)
388{
389 link->peer_qpn = ntoh24(clc->qpn);
390 memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE);
391 memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac));
392 link->peer_psn = ntoh24(clc->psn);
393 link->peer_mtu = clc->qp_mtu;
394}
395
Ursula Brauna046d572017-01-09 16:55:16 +0100396/* setup for RDMA connection of client */
397static int smc_connect_rdma(struct smc_sock *smc)
398{
399 struct smc_clc_msg_accept_confirm aclc;
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100400 int local_contact = SMC_FIRST_CONTACT;
Ursula Brauna046d572017-01-09 16:55:16 +0100401 struct smc_ib_device *smcibdev;
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100402 struct smc_link *link;
403 u8 srv_first_contact;
Ursula Brauna046d572017-01-09 16:55:16 +0100404 int reason_code = 0;
405 int rc = 0;
406 u8 ibport;
407
Ursula Braun51f1de72018-01-26 09:28:48 +0100408 sock_hold(&smc->sk); /* sock put in passive closing */
409
Ursula Braunee9dfbe2018-04-26 17:18:21 +0200410 if (smc->use_fallback)
411 goto out_connected;
412
Ursula Braunc5c1cc92017-10-25 11:01:46 +0200413 if (!tcp_sk(smc->clcsock->sk)->syn_smc) {
414 /* peer has not signalled SMC-capability */
415 smc->use_fallback = true;
416 goto out_connected;
417 }
418
Ursula Brauna046d572017-01-09 16:55:16 +0100419 /* IPSec connections opt out of SMC-R optimizations */
420 if (using_ipsec(smc)) {
421 reason_code = SMC_CLC_DECL_IPSEC;
422 goto decline_rdma;
423 }
424
425 /* PNET table look up: search active ib_device and port
426 * within same PNETID that also contains the ethernet device
427 * used for the internal TCP socket
428 */
429 smc_pnet_find_roce_resource(smc->clcsock->sk, &smcibdev, &ibport);
430 if (!smcibdev) {
431 reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
432 goto decline_rdma;
433 }
434
435 /* do inband token exchange */
436 reason_code = smc_clc_send_proposal(smc, smcibdev, ibport);
437 if (reason_code < 0) {
438 rc = reason_code;
439 goto out_err;
440 }
441 if (reason_code > 0) /* configuration error */
442 goto decline_rdma;
443 /* receive SMC Accept CLC message */
444 reason_code = smc_clc_wait_msg(smc, &aclc, sizeof(aclc),
445 SMC_CLC_ACCEPT);
446 if (reason_code < 0) {
447 rc = reason_code;
448 goto out_err;
449 }
450 if (reason_code > 0)
451 goto decline_rdma;
452
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100453 srv_first_contact = aclc.hdr.flag;
454 mutex_lock(&smc_create_lgr_pending);
Karsten Graulbe6d4672018-03-01 13:51:28 +0100455 local_contact = smc_conn_create(smc, smcibdev, ibport, &aclc.lcl,
456 srv_first_contact);
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100457 if (local_contact < 0) {
458 rc = local_contact;
459 if (rc == -ENOMEM)
460 reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
461 else if (rc == -ENOLINK)
462 reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */
Karsten Graul1401ea02018-05-15 17:05:00 +0200463 else
464 reason_code = SMC_CLC_DECL_INTERR; /* other error */
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100465 goto decline_rdma_unlock;
466 }
467 link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK];
Ursula Brauna046d572017-01-09 16:55:16 +0100468
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100469 smc_conn_save_peer_info(smc, &aclc);
Ursula Brauncd6851f2017-01-09 16:55:18 +0100470
Ursula Braun3e034722017-07-28 13:56:20 +0200471 /* create send buffer and rmb */
472 rc = smc_buf_create(smc);
Ursula Brauncd6851f2017-01-09 16:55:18 +0100473 if (rc) {
474 reason_code = SMC_CLC_DECL_MEM;
475 goto decline_rdma_unlock;
476 }
477
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100478 if (local_contact == SMC_FIRST_CONTACT)
479 smc_link_save_peer_info(link, &aclc);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100480
481 rc = smc_rmb_rtoken_handling(&smc->conn, &aclc);
482 if (rc) {
483 reason_code = SMC_CLC_DECL_INTERR;
484 goto decline_rdma_unlock;
485 }
486
Ursula Braun46c28db2017-04-10 14:58:01 +0200487 smc_close_init(smc);
488 smc_rx_init(smc);
489
Ursula Braunbd4ad572017-01-09 16:55:20 +0100490 if (local_contact == SMC_FIRST_CONTACT) {
491 rc = smc_ib_ready_link(link);
492 if (rc) {
493 reason_code = SMC_CLC_DECL_INTERR;
494 goto decline_rdma_unlock;
495 }
Ursula Braun652a1e42017-07-28 13:56:17 +0200496 } else {
Karsten Graule63a5f82018-05-03 17:57:37 +0200497 if (!smc->conn.rmb_desc->reused) {
Karsten Graul44aa81c2018-05-15 17:04:55 +0200498 if (smc_reg_rmb(link, smc->conn.rmb_desc, true)) {
Ursula Braun652a1e42017-07-28 13:56:17 +0200499 reason_code = SMC_CLC_DECL_INTERR;
500 goto decline_rdma_unlock;
501 }
502 }
Ursula Braunbd4ad572017-01-09 16:55:20 +0100503 }
Ursula Braun10428dd2017-07-28 13:56:22 +0200504 smc_rmb_sync_sg_for_device(&smc->conn);
Ursula Brauna046d572017-01-09 16:55:16 +0100505
506 rc = smc_clc_send_confirm(smc);
507 if (rc)
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100508 goto out_err_unlock;
Ursula Brauna046d572017-01-09 16:55:16 +0100509
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100510 if (local_contact == SMC_FIRST_CONTACT) {
511 /* QP confirmation over RoCE fabric */
Stefan Raspl0f627122018-03-01 13:51:26 +0100512 reason_code = smc_clnt_conf_first_link(smc);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100513 if (reason_code < 0) {
514 rc = reason_code;
515 goto out_err_unlock;
516 }
517 if (reason_code > 0)
518 goto decline_rdma_unlock;
519 }
Ursula Brauna046d572017-01-09 16:55:16 +0100520
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100521 mutex_unlock(&smc_create_lgr_pending);
Ursula Braune6727f32017-01-09 16:55:23 +0100522 smc_tx_init(smc);
523
Ursula Brauna046d572017-01-09 16:55:16 +0100524out_connected:
525 smc_copy_sock_settings_to_clc(smc);
Ursula Braunb38d7322017-01-09 16:55:25 +0100526 if (smc->sk.sk_state == SMC_INIT)
527 smc->sk.sk_state = SMC_ACTIVE;
Ursula Brauna046d572017-01-09 16:55:16 +0100528
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100529 return rc ? rc : local_contact;
Ursula Brauna046d572017-01-09 16:55:16 +0100530
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100531decline_rdma_unlock:
Ursula Braun610db662018-01-25 11:15:34 +0100532 if (local_contact == SMC_FIRST_CONTACT)
533 smc_lgr_forget(smc->conn.lgr);
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100534 mutex_unlock(&smc_create_lgr_pending);
535 smc_conn_free(&smc->conn);
Ursula Brauna046d572017-01-09 16:55:16 +0100536decline_rdma:
537 /* RDMA setup failed, switch back to TCP */
538 smc->use_fallback = true;
539 if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
Ursula Braunbfbedfd2017-09-21 09:16:32 +0200540 rc = smc_clc_send_decline(smc, reason_code);
Ursula Braun0c9f1512017-12-07 13:38:45 +0100541 if (rc < 0)
Ursula Brauna046d572017-01-09 16:55:16 +0100542 goto out_err;
543 }
544 goto out_connected;
545
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100546out_err_unlock:
Ursula Braun610db662018-01-25 11:15:34 +0100547 if (local_contact == SMC_FIRST_CONTACT)
548 smc_lgr_forget(smc->conn.lgr);
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100549 mutex_unlock(&smc_create_lgr_pending);
550 smc_conn_free(&smc->conn);
Ursula Brauna046d572017-01-09 16:55:16 +0100551out_err:
Ursula Braun51f1de72018-01-26 09:28:48 +0100552 if (smc->sk.sk_state == SMC_INIT)
553 sock_put(&smc->sk); /* passive closing */
Ursula Brauna046d572017-01-09 16:55:16 +0100554 return rc;
555}
556
Ursula Braunac713872017-01-09 16:55:13 +0100557static int smc_connect(struct socket *sock, struct sockaddr *addr,
558 int alen, int flags)
559{
560 struct sock *sk = sock->sk;
561 struct smc_sock *smc;
562 int rc = -EINVAL;
563
564 smc = smc_sk(sk);
565
566 /* separate smc parameter checking to be safe */
567 if (alen < sizeof(addr->sa_family))
568 goto out_err;
Karsten Graulaaa4d332018-03-16 15:06:41 +0100569 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
Ursula Braunac713872017-01-09 16:55:13 +0100570 goto out_err;
571
572 lock_sock(sk);
573 switch (sk->sk_state) {
574 default:
575 goto out;
576 case SMC_ACTIVE:
577 rc = -EISCONN;
578 goto out;
579 case SMC_INIT:
580 rc = 0;
581 break;
582 }
583
584 smc_copy_sock_settings_to_clc(smc);
Ursula Braunc5c1cc92017-10-25 11:01:46 +0200585 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
Ursula Braunac713872017-01-09 16:55:13 +0100586 rc = kernel_connect(smc->clcsock, addr, alen, flags);
587 if (rc)
588 goto out;
589
Ursula Brauna046d572017-01-09 16:55:16 +0100590 /* setup RDMA connection */
591 rc = smc_connect_rdma(smc);
592 if (rc < 0)
593 goto out;
594 else
595 rc = 0; /* success cases including fallback */
Ursula Braunac713872017-01-09 16:55:13 +0100596
597out:
598 release_sock(sk);
599out_err:
600 return rc;
601}
602
603static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
604{
Ursula Braun3163c502018-01-24 10:28:12 +0100605 struct socket *new_clcsock = NULL;
606 struct sock *lsk = &lsmc->sk;
Ursula Braunac713872017-01-09 16:55:13 +0100607 struct sock *new_sk;
608 int rc;
609
Ursula Braun3163c502018-01-24 10:28:12 +0100610 release_sock(lsk);
Karsten Graulaaa4d332018-03-16 15:06:41 +0100611 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
Ursula Braunac713872017-01-09 16:55:13 +0100612 if (!new_sk) {
613 rc = -ENOMEM;
Ursula Braun3163c502018-01-24 10:28:12 +0100614 lsk->sk_err = ENOMEM;
Ursula Braunac713872017-01-09 16:55:13 +0100615 *new_smc = NULL;
Ursula Braun3163c502018-01-24 10:28:12 +0100616 lock_sock(lsk);
Ursula Braunac713872017-01-09 16:55:13 +0100617 goto out;
618 }
619 *new_smc = smc_sk(new_sk);
620
621 rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
Ursula Braun3163c502018-01-24 10:28:12 +0100622 lock_sock(lsk);
Ursula Braun35a6b172018-01-24 10:28:13 +0100623 if (rc < 0)
Ursula Braun3163c502018-01-24 10:28:12 +0100624 lsk->sk_err = -rc;
Ursula Braun35a6b172018-01-24 10:28:13 +0100625 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
Ursula Brauna046d572017-01-09 16:55:16 +0100626 if (new_clcsock)
627 sock_release(new_clcsock);
628 new_sk->sk_state = SMC_CLOSED;
629 sock_set_flag(new_sk, SOCK_DEAD);
Ursula Braun3163c502018-01-24 10:28:12 +0100630 new_sk->sk_prot->unhash(new_sk);
Ursula Braun51f1de72018-01-26 09:28:48 +0100631 sock_put(new_sk); /* final */
Ursula Braunac713872017-01-09 16:55:13 +0100632 *new_smc = NULL;
633 goto out;
634 }
635
636 (*new_smc)->clcsock = new_clcsock;
637out:
638 return rc;
639}
640
Ursula Brauna046d572017-01-09 16:55:16 +0100641/* add a just created sock to the accept queue of the listen sock as
642 * candidate for a following socket accept call from user space
643 */
644static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
645{
646 struct smc_sock *par = smc_sk(parent);
647
Ursula Braun51f1de72018-01-26 09:28:48 +0100648 sock_hold(sk); /* sock_put in smc_accept_unlink () */
Ursula Brauna046d572017-01-09 16:55:16 +0100649 spin_lock(&par->accept_q_lock);
650 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
651 spin_unlock(&par->accept_q_lock);
652 sk_acceptq_added(parent);
653}
654
655/* remove a socket from the accept queue of its parental listening socket */
656static void smc_accept_unlink(struct sock *sk)
657{
658 struct smc_sock *par = smc_sk(sk)->listen_smc;
659
660 spin_lock(&par->accept_q_lock);
661 list_del_init(&smc_sk(sk)->accept_q);
662 spin_unlock(&par->accept_q_lock);
663 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
Ursula Braun51f1de72018-01-26 09:28:48 +0100664 sock_put(sk); /* sock_hold in smc_accept_enqueue */
Ursula Brauna046d572017-01-09 16:55:16 +0100665}
666
667/* remove a sock from the accept queue to bind it to a new socket created
668 * for a socket accept call from user space
669 */
Ursula Braunb38d7322017-01-09 16:55:25 +0100670struct sock *smc_accept_dequeue(struct sock *parent,
671 struct socket *new_sock)
Ursula Brauna046d572017-01-09 16:55:16 +0100672{
673 struct smc_sock *isk, *n;
674 struct sock *new_sk;
675
676 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
677 new_sk = (struct sock *)isk;
678
679 smc_accept_unlink(new_sk);
680 if (new_sk->sk_state == SMC_CLOSED) {
Ursula Braun127f4972018-01-26 09:28:49 +0100681 if (isk->clcsock) {
682 sock_release(isk->clcsock);
683 isk->clcsock = NULL;
684 }
Ursula Braun288c83902017-04-10 14:58:04 +0200685 new_sk->sk_prot->unhash(new_sk);
Ursula Braun51f1de72018-01-26 09:28:48 +0100686 sock_put(new_sk); /* final */
Ursula Brauna046d572017-01-09 16:55:16 +0100687 continue;
688 }
689 if (new_sock)
690 sock_graft(new_sk, new_sock);
691 return new_sk;
692 }
693 return NULL;
694}
695
696/* clean up for a created but never accepted sock */
Ursula Braunb38d7322017-01-09 16:55:25 +0100697void smc_close_non_accepted(struct sock *sk)
Ursula Brauna046d572017-01-09 16:55:16 +0100698{
699 struct smc_sock *smc = smc_sk(sk);
700
Ursula Braunb38d7322017-01-09 16:55:25 +0100701 lock_sock(sk);
702 if (!sk->sk_lingertime)
703 /* wait for peer closing */
704 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
Ursula Braun51f1de72018-01-26 09:28:48 +0100705 if (!smc->use_fallback) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100706 smc_close_active(smc);
Ursula Braun288c83902017-04-10 14:58:04 +0200707 sock_set_flag(sk, SOCK_DEAD);
708 sk->sk_shutdown |= SHUTDOWN_MASK;
709 }
Ursula Brauna046d572017-01-09 16:55:16 +0100710 if (smc->clcsock) {
711 struct socket *tcp;
712
713 tcp = smc->clcsock;
714 smc->clcsock = NULL;
715 sock_release(tcp);
716 }
Ursula Braunb38d7322017-01-09 16:55:25 +0100717 if (smc->use_fallback) {
Ursula Braun51f1de72018-01-26 09:28:48 +0100718 sock_put(sk); /* passive closing */
719 sk->sk_state = SMC_CLOSED;
720 } else {
721 if (sk->sk_state == SMC_CLOSED)
722 smc_conn_free(&smc->conn);
Ursula Braunb38d7322017-01-09 16:55:25 +0100723 }
724 release_sock(sk);
Ursula Braun51f1de72018-01-26 09:28:48 +0100725 sk->sk_prot->unhash(sk);
726 sock_put(sk); /* final sock_put */
Ursula Brauna046d572017-01-09 16:55:16 +0100727}
728
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100729static int smc_serv_conf_first_link(struct smc_sock *smc)
730{
Karsten Graul877ae5b2018-05-02 16:56:44 +0200731 struct net *net = sock_net(smc->clcsock->sk);
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100732 struct smc_link_group *lgr = smc->conn.lgr;
733 struct smc_link *link;
734 int rest;
735 int rc;
736
737 link = &lgr->lnk[SMC_SINGLE_LINK];
Ursula Braun652a1e42017-07-28 13:56:17 +0200738
Karsten Graul44aa81c2018-05-15 17:04:55 +0200739 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
Ursula Braun652a1e42017-07-28 13:56:17 +0200740 return SMC_CLC_DECL_INTERR;
741
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100742 /* send CONFIRM LINK request to client over the RoCE fabric */
743 rc = smc_llc_send_confirm_link(link,
744 link->smcibdev->mac[link->ibport - 1],
745 &link->smcibdev->gid[link->ibport - 1],
746 SMC_LLC_REQ);
747 if (rc < 0)
748 return SMC_CLC_DECL_TCL;
749
750 /* receive CONFIRM LINK response from client over the RoCE fabric */
751 rest = wait_for_completion_interruptible_timeout(
752 &link->llc_confirm_resp,
753 SMC_LLC_WAIT_FIRST_TIME);
754 if (rest <= 0) {
755 struct smc_clc_msg_decline dclc;
756
757 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
758 SMC_CLC_DECLINE);
Karsten Graul75d320d2018-03-01 13:51:31 +0100759 return rc;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100760 }
761
Karsten Graul75d320d2018-03-01 13:51:31 +0100762 if (link->llc_confirm_resp_rc)
763 return SMC_CLC_DECL_RMBE_EC;
764
Karsten Graul52bedf32018-03-01 13:51:32 +0100765 /* send ADD LINK request to client over the RoCE fabric */
766 rc = smc_llc_send_add_link(link,
767 link->smcibdev->mac[link->ibport - 1],
768 &link->smcibdev->gid[link->ibport - 1],
769 SMC_LLC_REQ);
770 if (rc < 0)
771 return SMC_CLC_DECL_TCL;
772
773 /* receive ADD LINK response from client over the RoCE fabric */
774 rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp,
775 SMC_LLC_WAIT_TIME);
776 if (rest <= 0) {
777 struct smc_clc_msg_decline dclc;
778
779 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
780 SMC_CLC_DECLINE);
781 return rc;
782 }
783
Karsten Graul877ae5b2018-05-02 16:56:44 +0200784 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
Karsten Graul52bedf32018-03-01 13:51:32 +0100785
Karsten Graul75d320d2018-03-01 13:51:31 +0100786 return 0;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100787}
788
Ursula Brauna046d572017-01-09 16:55:16 +0100789/* setup for RDMA connection of server */
790static void smc_listen_work(struct work_struct *work)
791{
792 struct smc_sock *new_smc = container_of(work, struct smc_sock,
793 smc_listen_work);
Ursula Braune7b7a642017-12-07 13:38:49 +0100794 struct smc_clc_msg_proposal_prefix *pclc_prfx;
Ursula Brauna046d572017-01-09 16:55:16 +0100795 struct socket *newclcsock = new_smc->clcsock;
796 struct smc_sock *lsmc = new_smc->listen_smc;
797 struct smc_clc_msg_accept_confirm cclc;
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100798 int local_contact = SMC_REUSE_CONTACT;
Ursula Brauna046d572017-01-09 16:55:16 +0100799 struct sock *newsmcsk = &new_smc->sk;
Ursula Braune7b7a642017-12-07 13:38:49 +0100800 struct smc_clc_msg_proposal *pclc;
Ursula Brauna046d572017-01-09 16:55:16 +0100801 struct smc_ib_device *smcibdev;
Ursula Braune7b7a642017-12-07 13:38:49 +0100802 u8 buf[SMC_CLC_MAX_LEN];
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100803 struct smc_link *link;
Ursula Brauna046d572017-01-09 16:55:16 +0100804 int reason_code = 0;
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100805 int rc = 0;
Ursula Brauna046d572017-01-09 16:55:16 +0100806 u8 ibport;
807
Ursula Braunee9dfbe2018-04-26 17:18:21 +0200808 if (new_smc->use_fallback)
809 goto out_connected;
810
Ursula Braunc5c1cc92017-10-25 11:01:46 +0200811 /* check if peer is smc capable */
812 if (!tcp_sk(newclcsock->sk)->syn_smc) {
813 new_smc->use_fallback = true;
814 goto out_connected;
815 }
816
Ursula Brauna046d572017-01-09 16:55:16 +0100817 /* do inband token exchange -
818 *wait for and receive SMC Proposal CLC message
819 */
Ursula Braune7b7a642017-12-07 13:38:49 +0100820 reason_code = smc_clc_wait_msg(new_smc, &buf, sizeof(buf),
Ursula Brauna046d572017-01-09 16:55:16 +0100821 SMC_CLC_PROPOSAL);
822 if (reason_code < 0)
823 goto out_err;
824 if (reason_code > 0)
825 goto decline_rdma;
826
827 /* IPSec connections opt out of SMC-R optimizations */
828 if (using_ipsec(new_smc)) {
829 reason_code = SMC_CLC_DECL_IPSEC;
830 goto decline_rdma;
831 }
832
833 /* PNET table look up: search active ib_device and port
834 * within same PNETID that also contains the ethernet device
835 * used for the internal TCP socket
836 */
837 smc_pnet_find_roce_resource(newclcsock->sk, &smcibdev, &ibport);
838 if (!smcibdev) {
839 reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
840 goto decline_rdma;
841 }
842
Ursula Braune7b7a642017-12-07 13:38:49 +0100843 pclc = (struct smc_clc_msg_proposal *)&buf;
844 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
Karsten Graulc246d942018-03-16 15:06:39 +0100845
846 rc = smc_clc_prfx_match(newclcsock, pclc_prfx);
847 if (rc) {
Ursula Brauna046d572017-01-09 16:55:16 +0100848 reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
849 goto decline_rdma;
850 }
851
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100852 /* allocate connection / link group */
853 mutex_lock(&smc_create_lgr_pending);
Karsten Graulbe6d4672018-03-01 13:51:28 +0100854 local_contact = smc_conn_create(new_smc, smcibdev, ibport, &pclc->lcl,
855 0);
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100856 if (local_contact < 0) {
857 rc = local_contact;
858 if (rc == -ENOMEM)
859 reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
Ursula Braun145686b2017-10-25 11:01:44 +0200860 goto decline_rdma_unlock;
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100861 }
862 link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
Ursula Brauna046d572017-01-09 16:55:16 +0100863
Ursula Braun3e034722017-07-28 13:56:20 +0200864 /* create send buffer and rmb */
865 rc = smc_buf_create(new_smc);
Ursula Brauncd6851f2017-01-09 16:55:18 +0100866 if (rc) {
867 reason_code = SMC_CLC_DECL_MEM;
Ursula Braun145686b2017-10-25 11:01:44 +0200868 goto decline_rdma_unlock;
Ursula Brauncd6851f2017-01-09 16:55:18 +0100869 }
Ursula Brauna046d572017-01-09 16:55:16 +0100870
Ursula Braun46c28db2017-04-10 14:58:01 +0200871 smc_close_init(new_smc);
872 smc_rx_init(new_smc);
873
Ursula Braun652a1e42017-07-28 13:56:17 +0200874 if (local_contact != SMC_FIRST_CONTACT) {
Karsten Graule63a5f82018-05-03 17:57:37 +0200875 if (!new_smc->conn.rmb_desc->reused) {
Karsten Graul44aa81c2018-05-15 17:04:55 +0200876 if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true)) {
Ursula Braun652a1e42017-07-28 13:56:17 +0200877 reason_code = SMC_CLC_DECL_INTERR;
Ursula Braun145686b2017-10-25 11:01:44 +0200878 goto decline_rdma_unlock;
Ursula Braun652a1e42017-07-28 13:56:17 +0200879 }
880 }
881 }
Ursula Braun10428dd2017-07-28 13:56:22 +0200882 smc_rmb_sync_sg_for_device(&new_smc->conn);
Ursula Braun652a1e42017-07-28 13:56:17 +0200883
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100884 rc = smc_clc_send_accept(new_smc, local_contact);
Ursula Brauna046d572017-01-09 16:55:16 +0100885 if (rc)
Ursula Braun145686b2017-10-25 11:01:44 +0200886 goto out_err_unlock;
Ursula Brauna046d572017-01-09 16:55:16 +0100887
888 /* receive SMC Confirm CLC message */
889 reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
890 SMC_CLC_CONFIRM);
891 if (reason_code < 0)
Ursula Braun145686b2017-10-25 11:01:44 +0200892 goto out_err_unlock;
Ursula Brauna046d572017-01-09 16:55:16 +0100893 if (reason_code > 0)
Ursula Braun145686b2017-10-25 11:01:44 +0200894 goto decline_rdma_unlock;
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100895 smc_conn_save_peer_info(new_smc, &cclc);
896 if (local_contact == SMC_FIRST_CONTACT)
897 smc_link_save_peer_info(link, &cclc);
Ursula Brauna046d572017-01-09 16:55:16 +0100898
Ursula Braunbd4ad572017-01-09 16:55:20 +0100899 rc = smc_rmb_rtoken_handling(&new_smc->conn, &cclc);
900 if (rc) {
901 reason_code = SMC_CLC_DECL_INTERR;
Ursula Braun145686b2017-10-25 11:01:44 +0200902 goto decline_rdma_unlock;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100903 }
904
Ursula Braunbd4ad572017-01-09 16:55:20 +0100905 if (local_contact == SMC_FIRST_CONTACT) {
906 rc = smc_ib_ready_link(link);
907 if (rc) {
908 reason_code = SMC_CLC_DECL_INTERR;
Ursula Braun145686b2017-10-25 11:01:44 +0200909 goto decline_rdma_unlock;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100910 }
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100911 /* QP confirmation over RoCE fabric */
912 reason_code = smc_serv_conf_first_link(new_smc);
Ursula Braun0c9f1512017-12-07 13:38:45 +0100913 if (reason_code < 0)
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100914 /* peer is not aware of a problem */
Ursula Braun145686b2017-10-25 11:01:44 +0200915 goto out_err_unlock;
Ursula Braun9bf9abe2017-01-09 16:55:21 +0100916 if (reason_code > 0)
Ursula Braun145686b2017-10-25 11:01:44 +0200917 goto decline_rdma_unlock;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100918 }
Ursula Brauna046d572017-01-09 16:55:16 +0100919
Ursula Braune6727f32017-01-09 16:55:23 +0100920 smc_tx_init(new_smc);
Ursula Braun145686b2017-10-25 11:01:44 +0200921 mutex_unlock(&smc_create_lgr_pending);
Ursula Braune6727f32017-01-09 16:55:23 +0100922
Ursula Brauna046d572017-01-09 16:55:16 +0100923out_connected:
924 sk_refcnt_debug_inc(newsmcsk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100925 if (newsmcsk->sk_state == SMC_INIT)
926 newsmcsk->sk_state = SMC_ACTIVE;
Ursula Brauna046d572017-01-09 16:55:16 +0100927enqueue:
Ursula Braunb38d7322017-01-09 16:55:25 +0100928 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
Ursula Brauna046d572017-01-09 16:55:16 +0100929 if (lsmc->sk.sk_state == SMC_LISTEN) {
930 smc_accept_enqueue(&lsmc->sk, newsmcsk);
931 } else { /* no longer listening */
932 smc_close_non_accepted(newsmcsk);
933 }
934 release_sock(&lsmc->sk);
935
936 /* Wake up accept */
937 lsmc->sk.sk_data_ready(&lsmc->sk);
938 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
939 return;
940
Ursula Braun145686b2017-10-25 11:01:44 +0200941decline_rdma_unlock:
Ursula Braun610db662018-01-25 11:15:34 +0100942 if (local_contact == SMC_FIRST_CONTACT)
943 smc_lgr_forget(new_smc->conn.lgr);
Ursula Braun145686b2017-10-25 11:01:44 +0200944 mutex_unlock(&smc_create_lgr_pending);
Ursula Brauna046d572017-01-09 16:55:16 +0100945decline_rdma:
946 /* RDMA setup failed, switch back to TCP */
Ursula Braun0cfdd8f2017-01-09 16:55:17 +0100947 smc_conn_free(&new_smc->conn);
Ursula Brauna046d572017-01-09 16:55:16 +0100948 new_smc->use_fallback = true;
949 if (reason_code && (reason_code != SMC_CLC_DECL_REPLY)) {
Ursula Braun0c9f1512017-12-07 13:38:45 +0100950 if (smc_clc_send_decline(new_smc, reason_code) < 0)
Ursula Brauna046d572017-01-09 16:55:16 +0100951 goto out_err;
952 }
953 goto out_connected;
954
Ursula Braun145686b2017-10-25 11:01:44 +0200955out_err_unlock:
Ursula Braun610db662018-01-25 11:15:34 +0100956 if (local_contact == SMC_FIRST_CONTACT)
957 smc_lgr_forget(new_smc->conn.lgr);
Ursula Braun145686b2017-10-25 11:01:44 +0200958 mutex_unlock(&smc_create_lgr_pending);
Ursula Brauna046d572017-01-09 16:55:16 +0100959out_err:
Ursula Braun51f1de72018-01-26 09:28:48 +0100960 if (newsmcsk->sk_state == SMC_INIT)
961 sock_put(&new_smc->sk); /* passive closing */
Ursula Brauna046d572017-01-09 16:55:16 +0100962 newsmcsk->sk_state = SMC_CLOSED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100963 smc_conn_free(&new_smc->conn);
Ursula Brauna046d572017-01-09 16:55:16 +0100964 goto enqueue; /* queue new sock with sk_err set */
965}
966
967static void smc_tcp_listen_work(struct work_struct *work)
968{
969 struct smc_sock *lsmc = container_of(work, struct smc_sock,
970 tcp_listen_work);
Ursula Braun3163c502018-01-24 10:28:12 +0100971 struct sock *lsk = &lsmc->sk;
Ursula Brauna046d572017-01-09 16:55:16 +0100972 struct smc_sock *new_smc;
973 int rc = 0;
974
Ursula Braun3163c502018-01-24 10:28:12 +0100975 lock_sock(lsk);
976 while (lsk->sk_state == SMC_LISTEN) {
Ursula Brauna046d572017-01-09 16:55:16 +0100977 rc = smc_clcsock_accept(lsmc, &new_smc);
978 if (rc)
979 goto out;
980 if (!new_smc)
981 continue;
982
983 new_smc->listen_smc = lsmc;
Ursula Braunee9dfbe2018-04-26 17:18:21 +0200984 new_smc->use_fallback = lsmc->use_fallback;
Ursula Braun3163c502018-01-24 10:28:12 +0100985 sock_hold(lsk); /* sock_put in smc_listen_work */
Ursula Brauna046d572017-01-09 16:55:16 +0100986 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
987 smc_copy_sock_settings_to_smc(new_smc);
Ursula Braun51f1de72018-01-26 09:28:48 +0100988 sock_hold(&new_smc->sk); /* sock_put in passive closing */
989 if (!schedule_work(&new_smc->smc_listen_work))
990 sock_put(&new_smc->sk);
Ursula Brauna046d572017-01-09 16:55:16 +0100991 }
992
993out:
Ursula Braun3163c502018-01-24 10:28:12 +0100994 release_sock(lsk);
Ursula Braun51f1de72018-01-26 09:28:48 +0100995 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
Ursula Brauna046d572017-01-09 16:55:16 +0100996}
997
Ursula Braunac713872017-01-09 16:55:13 +0100998static int smc_listen(struct socket *sock, int backlog)
999{
1000 struct sock *sk = sock->sk;
1001 struct smc_sock *smc;
1002 int rc;
1003
1004 smc = smc_sk(sk);
1005 lock_sock(sk);
1006
1007 rc = -EINVAL;
1008 if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN))
1009 goto out;
1010
1011 rc = 0;
1012 if (sk->sk_state == SMC_LISTEN) {
1013 sk->sk_max_ack_backlog = backlog;
1014 goto out;
1015 }
1016 /* some socket options are handled in core, so we could not apply
1017 * them to the clc socket -- copy smc socket options to clc socket
1018 */
1019 smc_copy_sock_settings_to_clc(smc);
Ursula Braunee9dfbe2018-04-26 17:18:21 +02001020 if (!smc->use_fallback)
1021 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
Ursula Braunac713872017-01-09 16:55:13 +01001022
1023 rc = kernel_listen(smc->clcsock, backlog);
1024 if (rc)
1025 goto out;
1026 sk->sk_max_ack_backlog = backlog;
1027 sk->sk_ack_backlog = 0;
1028 sk->sk_state = SMC_LISTEN;
Ursula Brauna046d572017-01-09 16:55:16 +01001029 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
Ursula Braun51f1de72018-01-26 09:28:48 +01001030 sock_hold(sk); /* sock_hold in tcp_listen_worker */
1031 if (!schedule_work(&smc->tcp_listen_work))
1032 sock_put(sk);
Ursula Braunac713872017-01-09 16:55:13 +01001033
1034out:
1035 release_sock(sk);
1036 return rc;
1037}
1038
1039static int smc_accept(struct socket *sock, struct socket *new_sock,
David Howellscdfbabf2017-03-09 08:09:05 +00001040 int flags, bool kern)
Ursula Braunac713872017-01-09 16:55:13 +01001041{
Ursula Brauna046d572017-01-09 16:55:16 +01001042 struct sock *sk = sock->sk, *nsk;
1043 DECLARE_WAITQUEUE(wait, current);
Ursula Braunac713872017-01-09 16:55:13 +01001044 struct smc_sock *lsmc;
Ursula Brauna046d572017-01-09 16:55:16 +01001045 long timeo;
1046 int rc = 0;
Ursula Braunac713872017-01-09 16:55:13 +01001047
1048 lsmc = smc_sk(sk);
Ursula Braun51f1de72018-01-26 09:28:48 +01001049 sock_hold(sk); /* sock_put below */
Ursula Braunac713872017-01-09 16:55:13 +01001050 lock_sock(sk);
1051
1052 if (lsmc->sk.sk_state != SMC_LISTEN) {
1053 rc = -EINVAL;
Ursula Braunabb190f2018-04-26 17:18:23 +02001054 release_sock(sk);
Ursula Braunac713872017-01-09 16:55:13 +01001055 goto out;
1056 }
1057
Ursula Brauna046d572017-01-09 16:55:16 +01001058 /* Wait for an incoming connection */
1059 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1060 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1061 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
1062 set_current_state(TASK_INTERRUPTIBLE);
1063 if (!timeo) {
1064 rc = -EAGAIN;
1065 break;
1066 }
1067 release_sock(sk);
1068 timeo = schedule_timeout(timeo);
1069 /* wakeup by sk_data_ready in smc_listen_work() */
1070 sched_annotate_sleep();
1071 lock_sock(sk);
1072 if (signal_pending(current)) {
1073 rc = sock_intr_errno(timeo);
1074 break;
1075 }
1076 }
1077 set_current_state(TASK_RUNNING);
1078 remove_wait_queue(sk_sleep(sk), &wait);
Ursula Braunac713872017-01-09 16:55:13 +01001079
Ursula Brauna046d572017-01-09 16:55:16 +01001080 if (!rc)
1081 rc = sock_error(nsk);
Ursula Braunabb190f2018-04-26 17:18:23 +02001082 release_sock(sk);
1083 if (rc)
1084 goto out;
1085
1086 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
1087 /* wait till data arrives on the socket */
1088 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
1089 MSEC_PER_SEC);
1090 if (smc_sk(nsk)->use_fallback) {
1091 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
1092
1093 lock_sock(clcsk);
1094 if (skb_queue_empty(&clcsk->sk_receive_queue))
1095 sk_wait_data(clcsk, &timeo, NULL);
1096 release_sock(clcsk);
1097 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
1098 lock_sock(nsk);
Stefan Rasplb51fa1b2018-05-03 18:12:37 +02001099 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
Ursula Braunabb190f2018-04-26 17:18:23 +02001100 release_sock(nsk);
1101 }
1102 }
Ursula Braunac713872017-01-09 16:55:13 +01001103
1104out:
Ursula Braun51f1de72018-01-26 09:28:48 +01001105 sock_put(sk); /* sock_hold above */
Ursula Braunac713872017-01-09 16:55:13 +01001106 return rc;
1107}
1108
1109static int smc_getname(struct socket *sock, struct sockaddr *addr,
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +01001110 int peer)
Ursula Braunac713872017-01-09 16:55:13 +01001111{
1112 struct smc_sock *smc;
1113
Ursula Braunb38d7322017-01-09 16:55:25 +01001114 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
1115 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
Ursula Braunac713872017-01-09 16:55:13 +01001116 return -ENOTCONN;
1117
1118 smc = smc_sk(sock->sk);
1119
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +01001120 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
Ursula Braunac713872017-01-09 16:55:13 +01001121}
1122
1123static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1124{
1125 struct sock *sk = sock->sk;
1126 struct smc_sock *smc;
1127 int rc = -EPIPE;
1128
1129 smc = smc_sk(sk);
1130 lock_sock(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +01001131 if ((sk->sk_state != SMC_ACTIVE) &&
1132 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1133 (sk->sk_state != SMC_INIT))
Ursula Braunac713872017-01-09 16:55:13 +01001134 goto out;
Ursula Braunee9dfbe2018-04-26 17:18:21 +02001135
1136 if (msg->msg_flags & MSG_FASTOPEN) {
1137 if (sk->sk_state == SMC_INIT) {
1138 smc->use_fallback = true;
1139 } else {
1140 rc = -EINVAL;
1141 goto out;
1142 }
1143 }
1144
Ursula Braunac713872017-01-09 16:55:13 +01001145 if (smc->use_fallback)
1146 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
1147 else
Ursula Braune6727f32017-01-09 16:55:23 +01001148 rc = smc_tx_sendmsg(smc, msg, len);
Ursula Braunac713872017-01-09 16:55:13 +01001149out:
1150 release_sock(sk);
1151 return rc;
1152}
1153
1154static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1155 int flags)
1156{
1157 struct sock *sk = sock->sk;
1158 struct smc_sock *smc;
1159 int rc = -ENOTCONN;
1160
1161 smc = smc_sk(sk);
1162 lock_sock(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +01001163 if ((sk->sk_state == SMC_INIT) ||
1164 (sk->sk_state == SMC_LISTEN) ||
1165 (sk->sk_state == SMC_CLOSED))
Ursula Braunac713872017-01-09 16:55:13 +01001166 goto out;
1167
Ursula Braunb38d7322017-01-09 16:55:25 +01001168 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1169 rc = 0;
1170 goto out;
1171 }
1172
Stefan Raspl9014db22018-05-03 18:12:39 +02001173 if (smc->use_fallback) {
Ursula Braunac713872017-01-09 16:55:13 +01001174 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
Stefan Raspl9014db22018-05-03 18:12:39 +02001175 } else {
1176 msg->msg_namelen = 0;
1177 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
1178 }
Ursula Braunb38d7322017-01-09 16:55:25 +01001179
Ursula Braunac713872017-01-09 16:55:13 +01001180out:
1181 release_sock(sk);
1182 return rc;
1183}
1184
Al Viroade994f2017-07-03 00:01:49 -04001185static __poll_t smc_accept_poll(struct sock *parent)
Ursula Brauna046d572017-01-09 16:55:16 +01001186{
Ursula Braun8dce2782018-01-26 09:28:47 +01001187 struct smc_sock *isk = smc_sk(parent);
Al Viro63e24802018-02-01 10:02:53 -05001188 __poll_t mask = 0;
Ursula Brauna046d572017-01-09 16:55:16 +01001189
Ursula Braun8dce2782018-01-26 09:28:47 +01001190 spin_lock(&isk->accept_q_lock);
1191 if (!list_empty(&isk->accept_q))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001192 mask = EPOLLIN | EPOLLRDNORM;
Ursula Braun8dce2782018-01-26 09:28:47 +01001193 spin_unlock(&isk->accept_q_lock);
Ursula Brauna046d572017-01-09 16:55:16 +01001194
Ursula Braun8dce2782018-01-26 09:28:47 +01001195 return mask;
Ursula Brauna046d572017-01-09 16:55:16 +01001196}
1197
Al Viroade994f2017-07-03 00:01:49 -04001198static __poll_t smc_poll(struct file *file, struct socket *sock,
Ursula Braunac713872017-01-09 16:55:13 +01001199 poll_table *wait)
1200{
1201 struct sock *sk = sock->sk;
Al Viroe6c8adc2017-07-03 22:25:56 -04001202 __poll_t mask = 0;
Ursula Braunac713872017-01-09 16:55:13 +01001203 struct smc_sock *smc;
Ursula Brauna046d572017-01-09 16:55:16 +01001204 int rc;
Ursula Braunac713872017-01-09 16:55:13 +01001205
Ursula Braun8dce2782018-01-26 09:28:47 +01001206 if (!sk)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001207 return EPOLLNVAL;
Ursula Braun8dce2782018-01-26 09:28:47 +01001208
Ursula Braunac713872017-01-09 16:55:13 +01001209 smc = smc_sk(sock->sk);
Ursula Braun8dce2782018-01-26 09:28:47 +01001210 sock_hold(sk);
1211 lock_sock(sk);
Ursula Brauna046d572017-01-09 16:55:16 +01001212 if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
1213 /* delegate to CLC child sock */
Ursula Braun8dce2782018-01-26 09:28:47 +01001214 release_sock(sk);
Ursula Braunac713872017-01-09 16:55:13 +01001215 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
Ursula Braunac713872017-01-09 16:55:13 +01001216 lock_sock(sk);
Ursula Braun784813a2018-05-02 16:53:56 +02001217 sk->sk_err = smc->clcsock->sk->sk_err;
1218 if (sk->sk_err) {
1219 mask |= EPOLLERR;
1220 } else {
1221 /* if non-blocking connect finished ... */
1222 if (sk->sk_state == SMC_INIT &&
1223 mask & EPOLLOUT &&
1224 smc->clcsock->sk->sk_state != TCP_CLOSE) {
Ursula Brauna046d572017-01-09 16:55:16 +01001225 rc = smc_connect_rdma(smc);
1226 if (rc < 0)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001227 mask |= EPOLLERR;
Ursula Braun8dce2782018-01-26 09:28:47 +01001228 /* success cases including fallback */
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001229 mask |= EPOLLOUT | EPOLLWRNORM;
Ursula Brauna046d572017-01-09 16:55:16 +01001230 }
Ursula Braunac713872017-01-09 16:55:13 +01001231 }
Ursula Braunac713872017-01-09 16:55:13 +01001232 } else {
Ursula Braun8dce2782018-01-26 09:28:47 +01001233 if (sk->sk_state != SMC_CLOSED) {
1234 release_sock(sk);
1235 sock_poll_wait(file, sk_sleep(sk), wait);
1236 lock_sock(sk);
1237 }
Ursula Brauna046d572017-01-09 16:55:16 +01001238 if (sk->sk_err)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001239 mask |= EPOLLERR;
Ursula Braunb38d7322017-01-09 16:55:25 +01001240 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1241 (sk->sk_state == SMC_CLOSED))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001242 mask |= EPOLLHUP;
Ursula Braun8dce2782018-01-26 09:28:47 +01001243 if (sk->sk_state == SMC_LISTEN) {
1244 /* woken up by sk_data_ready in smc_listen_work() */
1245 mask = smc_accept_poll(sk);
1246 } else {
1247 if (atomic_read(&smc->conn.sndbuf_space) ||
1248 sk->sk_shutdown & SEND_SHUTDOWN) {
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001249 mask |= EPOLLOUT | EPOLLWRNORM;
Ursula Braun8dce2782018-01-26 09:28:47 +01001250 } else {
1251 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1252 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1253 }
1254 if (atomic_read(&smc->conn.bytes_to_rcv))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001255 mask |= EPOLLIN | EPOLLRDNORM;
Ursula Braun8dce2782018-01-26 09:28:47 +01001256 if (sk->sk_shutdown & RCV_SHUTDOWN)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001257 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
Ursula Braun8dce2782018-01-26 09:28:47 +01001258 if (sk->sk_state == SMC_APPCLOSEWAIT1)
Linus Torvaldsa9a08842018-02-11 14:34:03 -08001259 mask |= EPOLLIN;
Ursula Braun8dce2782018-01-26 09:28:47 +01001260 }
Ursula Braunb38d7322017-01-09 16:55:25 +01001261
Ursula Braunac713872017-01-09 16:55:13 +01001262 }
Ursula Braun8dce2782018-01-26 09:28:47 +01001263 release_sock(sk);
1264 sock_put(sk);
Ursula Braunac713872017-01-09 16:55:13 +01001265
1266 return mask;
1267}
1268
1269static int smc_shutdown(struct socket *sock, int how)
1270{
1271 struct sock *sk = sock->sk;
1272 struct smc_sock *smc;
1273 int rc = -EINVAL;
Ursula Braunb38d7322017-01-09 16:55:25 +01001274 int rc1 = 0;
Ursula Braunac713872017-01-09 16:55:13 +01001275
1276 smc = smc_sk(sk);
1277
1278 if ((how < SHUT_RD) || (how > SHUT_RDWR))
Ursula Braunb38d7322017-01-09 16:55:25 +01001279 return rc;
Ursula Braunac713872017-01-09 16:55:13 +01001280
1281 lock_sock(sk);
1282
1283 rc = -ENOTCONN;
Ursula Braunb38d7322017-01-09 16:55:25 +01001284 if ((sk->sk_state != SMC_LISTEN) &&
1285 (sk->sk_state != SMC_ACTIVE) &&
1286 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
1287 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
1288 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1289 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
1290 (sk->sk_state != SMC_APPFINCLOSEWAIT))
Ursula Braunac713872017-01-09 16:55:13 +01001291 goto out;
1292 if (smc->use_fallback) {
1293 rc = kernel_sock_shutdown(smc->clcsock, how);
1294 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
1295 if (sk->sk_shutdown == SHUTDOWN_MASK)
1296 sk->sk_state = SMC_CLOSED;
Ursula Braunb38d7322017-01-09 16:55:25 +01001297 goto out;
Ursula Braunac713872017-01-09 16:55:13 +01001298 }
Ursula Braunb38d7322017-01-09 16:55:25 +01001299 switch (how) {
1300 case SHUT_RDWR: /* shutdown in both directions */
1301 rc = smc_close_active(smc);
1302 break;
1303 case SHUT_WR:
1304 rc = smc_close_shutdown_write(smc);
1305 break;
1306 case SHUT_RD:
Ursula Braun1255fcb2018-04-19 15:56:40 +02001307 rc = 0;
1308 /* nothing more to do because peer is not involved */
Ursula Braunb38d7322017-01-09 16:55:25 +01001309 break;
1310 }
Ursula Braun1255fcb2018-04-19 15:56:40 +02001311 if (smc->clcsock)
1312 rc1 = kernel_sock_shutdown(smc->clcsock, how);
Ursula Braunb38d7322017-01-09 16:55:25 +01001313 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1314 sk->sk_shutdown |= how + 1;
Ursula Braunac713872017-01-09 16:55:13 +01001315
1316out:
1317 release_sock(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +01001318 return rc ? rc : rc1;
Ursula Braunac713872017-01-09 16:55:13 +01001319}
1320
1321static int smc_setsockopt(struct socket *sock, int level, int optname,
1322 char __user *optval, unsigned int optlen)
1323{
1324 struct sock *sk = sock->sk;
1325 struct smc_sock *smc;
Ursula Braun01d2f7e2018-04-26 17:18:22 +02001326 int val, rc;
Ursula Braunac713872017-01-09 16:55:13 +01001327
1328 smc = smc_sk(sk);
1329
1330 /* generic setsockopts reaching us here always apply to the
1331 * CLC socket
1332 */
Ursula Braunee9dfbe2018-04-26 17:18:21 +02001333 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
1334 optval, optlen);
1335 if (smc->clcsock->sk->sk_err) {
1336 sk->sk_err = smc->clcsock->sk->sk_err;
1337 sk->sk_error_report(sk);
1338 }
1339 if (rc)
1340 return rc;
1341
Ursula Braun01d2f7e2018-04-26 17:18:22 +02001342 if (optlen < sizeof(int))
1343 return rc;
1344 get_user(val, (int __user *)optval);
1345
Ursula Braunee9dfbe2018-04-26 17:18:21 +02001346 lock_sock(sk);
1347 switch (optname) {
1348 case TCP_ULP:
1349 case TCP_FASTOPEN:
1350 case TCP_FASTOPEN_CONNECT:
1351 case TCP_FASTOPEN_KEY:
1352 case TCP_FASTOPEN_NO_COOKIE:
1353 /* option not supported by SMC */
1354 if (sk->sk_state == SMC_INIT) {
1355 smc->use_fallback = true;
1356 } else {
1357 if (!smc->use_fallback)
1358 rc = -EINVAL;
1359 }
1360 break;
Ursula Braun01d2f7e2018-04-26 17:18:22 +02001361 case TCP_NODELAY:
1362 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
Ursula Braun569bc642018-05-15 17:04:54 +02001363 if (val && !smc->use_fallback)
Ursula Braun01d2f7e2018-04-26 17:18:22 +02001364 mod_delayed_work(system_wq, &smc->conn.tx_work,
1365 0);
1366 }
1367 break;
1368 case TCP_CORK:
1369 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
Ursula Braun569bc642018-05-15 17:04:54 +02001370 if (!val && !smc->use_fallback)
Ursula Braun01d2f7e2018-04-26 17:18:22 +02001371 mod_delayed_work(system_wq, &smc->conn.tx_work,
1372 0);
1373 }
1374 break;
Ursula Braunabb190f2018-04-26 17:18:23 +02001375 case TCP_DEFER_ACCEPT:
1376 smc->sockopt_defer_accept = val;
1377 break;
Ursula Braunee9dfbe2018-04-26 17:18:21 +02001378 default:
1379 break;
1380 }
1381 release_sock(sk);
1382
1383 return rc;
Ursula Braunac713872017-01-09 16:55:13 +01001384}
1385
1386static int smc_getsockopt(struct socket *sock, int level, int optname,
1387 char __user *optval, int __user *optlen)
1388{
1389 struct smc_sock *smc;
1390
1391 smc = smc_sk(sock->sk);
1392 /* socket options apply to the CLC socket */
1393 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
1394 optval, optlen);
1395}
1396
1397static int smc_ioctl(struct socket *sock, unsigned int cmd,
1398 unsigned long arg)
1399{
1400 struct smc_sock *smc;
Ursula Braun9b67e262018-05-02 16:56:46 +02001401 int answ;
Ursula Braunac713872017-01-09 16:55:13 +01001402
1403 smc = smc_sk(sock->sk);
Ursula Braun9b67e262018-05-02 16:56:46 +02001404 if (smc->use_fallback) {
1405 if (!smc->clcsock)
1406 return -EBADF;
Ursula Braunac713872017-01-09 16:55:13 +01001407 return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
Ursula Braun9b67e262018-05-02 16:56:46 +02001408 }
1409 switch (cmd) {
1410 case SIOCINQ: /* same as FIONREAD */
1411 if (smc->sk.sk_state == SMC_LISTEN)
1412 return -EINVAL;
1413 answ = atomic_read(&smc->conn.bytes_to_rcv);
1414 break;
1415 case SIOCOUTQ:
1416 /* output queue size (not send + not acked) */
1417 if (smc->sk.sk_state == SMC_LISTEN)
1418 return -EINVAL;
Hans Wippel69cb7dc2018-05-18 09:34:10 +02001419 answ = smc->conn.sndbuf_desc->len -
Ursula Braun9b67e262018-05-02 16:56:46 +02001420 atomic_read(&smc->conn.sndbuf_space);
1421 break;
1422 case SIOCOUTQNSD:
1423 /* output queue size (not send only) */
1424 if (smc->sk.sk_state == SMC_LISTEN)
1425 return -EINVAL;
1426 answ = smc_tx_prepared_sends(&smc->conn);
1427 break;
1428 default:
1429 return -ENOIOCTLCMD;
1430 }
1431
1432 return put_user(answ, (int __user *)arg);
Ursula Braunac713872017-01-09 16:55:13 +01001433}
1434
1435static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1436 int offset, size_t size, int flags)
1437{
1438 struct sock *sk = sock->sk;
1439 struct smc_sock *smc;
1440 int rc = -EPIPE;
1441
1442 smc = smc_sk(sk);
1443 lock_sock(sk);
Stefan Rasplbda27ff2018-05-03 17:57:39 +02001444 if (sk->sk_state != SMC_ACTIVE) {
1445 release_sock(sk);
Ursula Braunac713872017-01-09 16:55:13 +01001446 goto out;
Stefan Rasplbda27ff2018-05-03 17:57:39 +02001447 }
1448 release_sock(sk);
Ursula Braunac713872017-01-09 16:55:13 +01001449 if (smc->use_fallback)
1450 rc = kernel_sendpage(smc->clcsock, page, offset,
1451 size, flags);
1452 else
1453 rc = sock_no_sendpage(sock, page, offset, size, flags);
1454
1455out:
Ursula Braunac713872017-01-09 16:55:13 +01001456 return rc;
1457}
1458
Stefan Raspl9014db22018-05-03 18:12:39 +02001459/* Map the affected portions of the rmbe into an spd, note the number of bytes
1460 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
1461 * updates till whenever a respective page has been fully processed.
1462 * Note that subsequent recv() calls have to wait till all splice() processing
1463 * completed.
1464 */
Ursula Braunac713872017-01-09 16:55:13 +01001465static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1466 struct pipe_inode_info *pipe, size_t len,
Stefan Raspl9014db22018-05-03 18:12:39 +02001467 unsigned int flags)
Ursula Braunac713872017-01-09 16:55:13 +01001468{
1469 struct sock *sk = sock->sk;
1470 struct smc_sock *smc;
1471 int rc = -ENOTCONN;
1472
1473 smc = smc_sk(sk);
1474 lock_sock(sk);
Stefan Raspl9014db22018-05-03 18:12:39 +02001475
1476 if (sk->sk_state == SMC_INIT ||
1477 sk->sk_state == SMC_LISTEN ||
1478 sk->sk_state == SMC_CLOSED)
Ursula Braunac713872017-01-09 16:55:13 +01001479 goto out;
Stefan Raspl9014db22018-05-03 18:12:39 +02001480
1481 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1482 rc = 0;
1483 goto out;
1484 }
1485
Ursula Braunac713872017-01-09 16:55:13 +01001486 if (smc->use_fallback) {
1487 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
1488 pipe, len, flags);
1489 } else {
Stefan Raspl9014db22018-05-03 18:12:39 +02001490 if (*ppos) {
1491 rc = -ESPIPE;
1492 goto out;
1493 }
1494 if (flags & SPLICE_F_NONBLOCK)
1495 flags = MSG_DONTWAIT;
1496 else
1497 flags = 0;
1498 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
Ursula Braunac713872017-01-09 16:55:13 +01001499 }
1500out:
1501 release_sock(sk);
Stefan Raspl9014db22018-05-03 18:12:39 +02001502
Ursula Braunac713872017-01-09 16:55:13 +01001503 return rc;
1504}
1505
1506/* must look like tcp */
1507static const struct proto_ops smc_sock_ops = {
1508 .family = PF_SMC,
1509 .owner = THIS_MODULE,
1510 .release = smc_release,
1511 .bind = smc_bind,
1512 .connect = smc_connect,
1513 .socketpair = sock_no_socketpair,
1514 .accept = smc_accept,
1515 .getname = smc_getname,
1516 .poll = smc_poll,
1517 .ioctl = smc_ioctl,
1518 .listen = smc_listen,
1519 .shutdown = smc_shutdown,
1520 .setsockopt = smc_setsockopt,
1521 .getsockopt = smc_getsockopt,
1522 .sendmsg = smc_sendmsg,
1523 .recvmsg = smc_recvmsg,
1524 .mmap = sock_no_mmap,
1525 .sendpage = smc_sendpage,
1526 .splice_read = smc_splice_read,
1527};
1528
1529static int smc_create(struct net *net, struct socket *sock, int protocol,
1530 int kern)
1531{
Karsten Graulaaa4d332018-03-16 15:06:41 +01001532 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
Ursula Braunac713872017-01-09 16:55:13 +01001533 struct smc_sock *smc;
1534 struct sock *sk;
1535 int rc;
1536
1537 rc = -ESOCKTNOSUPPORT;
1538 if (sock->type != SOCK_STREAM)
1539 goto out;
1540
1541 rc = -EPROTONOSUPPORT;
Karsten Graulaaa4d332018-03-16 15:06:41 +01001542 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
Ursula Braunac713872017-01-09 16:55:13 +01001543 goto out;
1544
1545 rc = -ENOBUFS;
1546 sock->ops = &smc_sock_ops;
Karsten Graulaaa4d332018-03-16 15:06:41 +01001547 sk = smc_sock_alloc(net, sock, protocol);
Ursula Braunac713872017-01-09 16:55:13 +01001548 if (!sk)
1549 goto out;
1550
1551 /* create internal TCP socket for CLC handshake and fallback */
1552 smc = smc_sk(sk);
Ursula Brauna046d572017-01-09 16:55:16 +01001553 smc->use_fallback = false; /* assume rdma capability first */
Karsten Graulaaa4d332018-03-16 15:06:41 +01001554 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
1555 &smc->clcsock);
Davide Carattia5dcb732018-02-28 12:44:09 +01001556 if (rc) {
Ursula Braunac713872017-01-09 16:55:13 +01001557 sk_common_release(sk);
Davide Carattia5dcb732018-02-28 12:44:09 +01001558 goto out;
1559 }
Ursula Brauncd6851f2017-01-09 16:55:18 +01001560 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
1561 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
Ursula Braunac713872017-01-09 16:55:13 +01001562
1563out:
1564 return rc;
1565}
1566
1567static const struct net_proto_family smc_sock_family_ops = {
1568 .family = PF_SMC,
1569 .owner = THIS_MODULE,
1570 .create = smc_create,
1571};
1572
1573static int __init smc_init(void)
1574{
1575 int rc;
1576
Thomas Richter6812baa2017-01-09 16:55:15 +01001577 rc = smc_pnet_init();
1578 if (rc)
1579 return rc;
1580
Ursula Braun9bf9abe2017-01-09 16:55:21 +01001581 rc = smc_llc_init();
1582 if (rc) {
1583 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
1584 goto out_pnet;
1585 }
1586
Ursula Braun5f083182017-01-09 16:55:22 +01001587 rc = smc_cdc_init();
1588 if (rc) {
1589 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
1590 goto out_pnet;
1591 }
1592
Ursula Braunac713872017-01-09 16:55:13 +01001593 rc = proto_register(&smc_proto, 1);
1594 if (rc) {
Karsten Graulaaa4d332018-03-16 15:06:41 +01001595 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
Thomas Richter6812baa2017-01-09 16:55:15 +01001596 goto out_pnet;
Ursula Braunac713872017-01-09 16:55:13 +01001597 }
1598
Karsten Graulaaa4d332018-03-16 15:06:41 +01001599 rc = proto_register(&smc_proto6, 1);
1600 if (rc) {
1601 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
1602 goto out_proto;
1603 }
1604
Ursula Braunac713872017-01-09 16:55:13 +01001605 rc = sock_register(&smc_sock_family_ops);
1606 if (rc) {
1607 pr_err("%s: sock_register fails with %d\n", __func__, rc);
Karsten Graulaaa4d332018-03-16 15:06:41 +01001608 goto out_proto6;
Ursula Braunac713872017-01-09 16:55:13 +01001609 }
Ursula Braunf16a7dd2017-01-09 16:55:26 +01001610 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
Karsten Graulaaa4d332018-03-16 15:06:41 +01001611 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
Ursula Braunac713872017-01-09 16:55:13 +01001612
Ursula Brauna4cf0442017-01-09 16:55:14 +01001613 rc = smc_ib_register_client();
1614 if (rc) {
1615 pr_err("%s: ib_register fails with %d\n", __func__, rc);
1616 goto out_sock;
1617 }
1618
Ursula Braunc5c1cc92017-10-25 11:01:46 +02001619 static_branch_enable(&tcp_have_smc);
Ursula Braunac713872017-01-09 16:55:13 +01001620 return 0;
1621
Ursula Brauna4cf0442017-01-09 16:55:14 +01001622out_sock:
1623 sock_unregister(PF_SMC);
Karsten Graulaaa4d332018-03-16 15:06:41 +01001624out_proto6:
1625 proto_unregister(&smc_proto6);
Ursula Braunac713872017-01-09 16:55:13 +01001626out_proto:
1627 proto_unregister(&smc_proto);
Thomas Richter6812baa2017-01-09 16:55:15 +01001628out_pnet:
1629 smc_pnet_exit();
Ursula Braunac713872017-01-09 16:55:13 +01001630 return rc;
1631}
1632
1633static void __exit smc_exit(void)
1634{
Hans Wippel9fda3512018-05-18 09:34:11 +02001635 smc_core_exit();
Ursula Braunc5c1cc92017-10-25 11:01:46 +02001636 static_branch_disable(&tcp_have_smc);
Ursula Brauna4cf0442017-01-09 16:55:14 +01001637 smc_ib_unregister_client();
Ursula Braunac713872017-01-09 16:55:13 +01001638 sock_unregister(PF_SMC);
Karsten Graulaaa4d332018-03-16 15:06:41 +01001639 proto_unregister(&smc_proto6);
Ursula Braunac713872017-01-09 16:55:13 +01001640 proto_unregister(&smc_proto);
Thomas Richter6812baa2017-01-09 16:55:15 +01001641 smc_pnet_exit();
Ursula Braunac713872017-01-09 16:55:13 +01001642}
1643
1644module_init(smc_init);
1645module_exit(smc_exit);
1646
1647MODULE_AUTHOR("Ursula Braun <ubraun@linux.vnet.ibm.com>");
1648MODULE_DESCRIPTION("smc socket address family");
1649MODULE_LICENSE("GPL");
1650MODULE_ALIAS_NETPROTO(PF_SMC);