blob: 3dc109f5db56c924ccbc025c5925a3957182c39e [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braunb38d7322017-01-09 16:55:25 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Socket Closing - normal and abnormal
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
10 */
11
12#include <linux/workqueue.h>
Ingo Molnarc3edc402017-02-02 08:35:14 +010013#include <linux/sched/signal.h>
14
Ursula Braunb38d7322017-01-09 16:55:25 +010015#include <net/sock.h>
16
17#include "smc.h"
18#include "smc_tx.h"
19#include "smc_cdc.h"
20#include "smc_close.h"
21
Ursula Braunb38d7322017-01-09 16:55:25 +010022static void smc_close_cleanup_listen(struct sock *parent)
23{
24 struct sock *sk;
25
26 /* Close non-accepted connections */
27 while ((sk = smc_accept_dequeue(parent, NULL)))
28 smc_close_non_accepted(sk);
29}
30
Ursula Braunb38d7322017-01-09 16:55:25 +010031/* wait for sndbuf data being transmitted */
32static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
33{
34 DEFINE_WAIT_FUNC(wait, woken_wake_function);
35 struct sock *sk = &smc->sk;
36
37 if (!timeout)
38 return;
39
40 if (!smc_tx_prepared_sends(&smc->conn))
41 return;
42
43 smc->wait_close_tx_prepared = 1;
44 add_wait_queue(sk_sleep(sk), &wait);
45 while (!signal_pending(current) && timeout) {
46 int rc;
47
48 rc = sk_wait_event(sk, &timeout,
49 !smc_tx_prepared_sends(&smc->conn) ||
50 (sk->sk_err == ECONNABORTED) ||
51 (sk->sk_err == ECONNRESET),
52 &wait);
53 if (rc)
54 break;
55 }
56 remove_wait_queue(sk_sleep(sk), &wait);
57 smc->wait_close_tx_prepared = 0;
58}
59
60void smc_close_wake_tx_prepared(struct smc_sock *smc)
61{
62 if (smc->wait_close_tx_prepared)
63 /* wake up socket closing */
64 smc->sk.sk_state_change(&smc->sk);
65}
66
67static int smc_close_wr(struct smc_connection *conn)
68{
69 conn->local_tx_ctrl.conn_state_flags.peer_done_writing = 1;
70
71 return smc_cdc_get_slot_and_msg_send(conn);
72}
73
74static int smc_close_final(struct smc_connection *conn)
75{
76 if (atomic_read(&conn->bytes_to_rcv))
77 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
78 else
79 conn->local_tx_ctrl.conn_state_flags.peer_conn_closed = 1;
80
81 return smc_cdc_get_slot_and_msg_send(conn);
82}
83
84static int smc_close_abort(struct smc_connection *conn)
85{
86 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
87
88 return smc_cdc_get_slot_and_msg_send(conn);
89}
90
91/* terminate smc socket abnormally - active abort
92 * RDMA communication no longer possible
93 */
Ursula Brauna8ae8902017-12-07 13:38:44 +010094static void smc_close_active_abort(struct smc_sock *smc)
Ursula Braunb38d7322017-01-09 16:55:25 +010095{
Ursula Braun3163c502018-01-24 10:28:12 +010096 struct sock *sk = &smc->sk;
97
Ursula Braunb38d7322017-01-09 16:55:25 +010098 struct smc_cdc_conn_state_flags *txflags =
99 &smc->conn.local_tx_ctrl.conn_state_flags;
100
Ursula Braun3163c502018-01-24 10:28:12 +0100101 sk->sk_err = ECONNABORTED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100102 if (smc->clcsock && smc->clcsock->sk) {
103 smc->clcsock->sk->sk_err = ECONNABORTED;
104 smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
105 }
Ursula Braun3163c502018-01-24 10:28:12 +0100106 switch (sk->sk_state) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100107 case SMC_INIT:
Ursula Braun46c28db2017-04-10 14:58:01 +0200108 case SMC_ACTIVE:
Ursula Braun3163c502018-01-24 10:28:12 +0100109 sk->sk_state = SMC_PEERABORTWAIT;
Ursula Braun611b63a2018-01-25 11:15:31 +0100110 release_sock(sk);
111 cancel_delayed_work_sync(&smc->conn.tx_work);
112 lock_sock(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100113 break;
114 case SMC_APPCLOSEWAIT1:
115 case SMC_APPCLOSEWAIT2:
116 txflags->peer_conn_abort = 1;
117 sock_release(smc->clcsock);
118 if (!smc_cdc_rxed_any_close(&smc->conn))
Ursula Braun3163c502018-01-24 10:28:12 +0100119 sk->sk_state = SMC_PEERABORTWAIT;
Ursula Braunb38d7322017-01-09 16:55:25 +0100120 else
Ursula Braun3163c502018-01-24 10:28:12 +0100121 sk->sk_state = SMC_CLOSED;
Ursula Braun611b63a2018-01-25 11:15:31 +0100122 release_sock(sk);
123 cancel_delayed_work_sync(&smc->conn.tx_work);
124 lock_sock(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100125 break;
126 case SMC_PEERCLOSEWAIT1:
127 case SMC_PEERCLOSEWAIT2:
128 if (!txflags->peer_conn_closed) {
Ursula Braun3163c502018-01-24 10:28:12 +0100129 sk->sk_state = SMC_PEERABORTWAIT;
Ursula Braunb38d7322017-01-09 16:55:25 +0100130 txflags->peer_conn_abort = 1;
131 sock_release(smc->clcsock);
132 } else {
Ursula Braun3163c502018-01-24 10:28:12 +0100133 sk->sk_state = SMC_CLOSED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100134 }
135 break;
136 case SMC_PROCESSABORT:
137 case SMC_APPFINCLOSEWAIT:
138 if (!txflags->peer_conn_closed) {
139 txflags->peer_conn_abort = 1;
140 sock_release(smc->clcsock);
141 }
Ursula Braun3163c502018-01-24 10:28:12 +0100142 sk->sk_state = SMC_CLOSED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100143 break;
144 case SMC_PEERFINCLOSEWAIT:
145 case SMC_PEERABORTWAIT:
146 case SMC_CLOSED:
147 break;
148 }
149
Ursula Braun3163c502018-01-24 10:28:12 +0100150 sock_set_flag(sk, SOCK_DEAD);
151 sk->sk_state_change(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100152}
153
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200154static inline bool smc_close_sent_any_close(struct smc_connection *conn)
155{
156 return conn->local_tx_ctrl.conn_state_flags.peer_conn_abort ||
157 conn->local_tx_ctrl.conn_state_flags.peer_conn_closed;
158}
159
Ursula Braunb38d7322017-01-09 16:55:25 +0100160int smc_close_active(struct smc_sock *smc)
161{
162 struct smc_cdc_conn_state_flags *txflags =
163 &smc->conn.local_tx_ctrl.conn_state_flags;
Ursula Braunb38d7322017-01-09 16:55:25 +0100164 struct smc_connection *conn = &smc->conn;
165 struct sock *sk = &smc->sk;
166 int old_state;
Ursula Braun8c96fee2017-09-21 09:16:34 +0200167 long timeout;
Ursula Braunb38d7322017-01-09 16:55:25 +0100168 int rc = 0;
169
Ursula Braun8c96fee2017-09-21 09:16:34 +0200170 timeout = current->flags & PF_EXITING ?
171 0 : sock_flag(sk, SOCK_LINGER) ?
172 sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
Ursula Braunb38d7322017-01-09 16:55:25 +0100173
Ursula Braunb38d7322017-01-09 16:55:25 +0100174 old_state = sk->sk_state;
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100175again:
176 switch (sk->sk_state) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100177 case SMC_INIT:
178 sk->sk_state = SMC_CLOSED;
179 if (smc->smc_listen_work.func)
Ursula Braun46c28db2017-04-10 14:58:01 +0200180 cancel_work_sync(&smc->smc_listen_work);
Ursula Braunb38d7322017-01-09 16:55:25 +0100181 break;
182 case SMC_LISTEN:
183 sk->sk_state = SMC_CLOSED;
184 sk->sk_state_change(sk); /* wake up accept */
185 if (smc->clcsock && smc->clcsock->sk) {
186 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
187 /* wake up kernel_accept of smc_tcp_listen_worker */
188 smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
189 }
190 release_sock(sk);
191 smc_close_cleanup_listen(sk);
Ursula Braun46c28db2017-04-10 14:58:01 +0200192 cancel_work_sync(&smc->smc_listen_work);
Ursula Braunb38d7322017-01-09 16:55:25 +0100193 lock_sock(sk);
194 break;
195 case SMC_ACTIVE:
196 smc_close_stream_wait(smc, timeout);
197 release_sock(sk);
Ursula Braun18e537c2017-09-21 09:16:33 +0200198 cancel_delayed_work_sync(&conn->tx_work);
Ursula Braunb38d7322017-01-09 16:55:25 +0100199 lock_sock(sk);
200 if (sk->sk_state == SMC_ACTIVE) {
201 /* send close request */
202 rc = smc_close_final(conn);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100203 if (rc)
204 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100205 sk->sk_state = SMC_PEERCLOSEWAIT1;
206 } else {
207 /* peer event has changed the state */
208 goto again;
209 }
210 break;
211 case SMC_APPFINCLOSEWAIT:
212 /* socket already shutdown wr or both (active close) */
213 if (txflags->peer_done_writing &&
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200214 !smc_close_sent_any_close(conn)) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100215 /* just shutdown wr done, send close request */
216 rc = smc_close_final(conn);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100217 if (rc)
218 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100219 }
220 sk->sk_state = SMC_CLOSED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100221 break;
222 case SMC_APPCLOSEWAIT1:
223 case SMC_APPCLOSEWAIT2:
224 if (!smc_cdc_rxed_any_close(conn))
225 smc_close_stream_wait(smc, timeout);
226 release_sock(sk);
Ursula Braun18e537c2017-09-21 09:16:33 +0200227 cancel_delayed_work_sync(&conn->tx_work);
Ursula Braunb38d7322017-01-09 16:55:25 +0100228 lock_sock(sk);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100229 if (sk->sk_state != SMC_APPCLOSEWAIT1 &&
230 sk->sk_state != SMC_APPCLOSEWAIT2)
231 goto again;
232 /* confirm close from peer */
233 rc = smc_close_final(conn);
234 if (rc)
235 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100236 if (smc_cdc_rxed_any_close(conn))
237 /* peer has closed the socket already */
238 sk->sk_state = SMC_CLOSED;
239 else
240 /* peer has just issued a shutdown write */
241 sk->sk_state = SMC_PEERFINCLOSEWAIT;
Ursula Braunb38d7322017-01-09 16:55:25 +0100242 break;
243 case SMC_PEERCLOSEWAIT1:
244 case SMC_PEERCLOSEWAIT2:
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200245 if (txflags->peer_done_writing &&
246 !smc_close_sent_any_close(conn)) {
247 /* just shutdown wr done, send close request */
248 rc = smc_close_final(conn);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100249 if (rc)
250 break;
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200251 }
252 /* peer sending PeerConnectionClosed will cause transition */
253 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100254 case SMC_PEERFINCLOSEWAIT:
255 /* peer sending PeerConnectionClosed will cause transition */
256 break;
257 case SMC_PROCESSABORT:
Ursula Braunb38d7322017-01-09 16:55:25 +0100258 smc_close_abort(conn);
259 sk->sk_state = SMC_CLOSED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100260 break;
261 case SMC_PEERABORTWAIT:
262 case SMC_CLOSED:
263 /* nothing to do, add tracing in future patch */
264 break;
265 }
266
267 if (old_state != sk->sk_state)
Ursula Braun3163c502018-01-24 10:28:12 +0100268 sk->sk_state_change(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100269 return rc;
270}
271
272static void smc_close_passive_abort_received(struct smc_sock *smc)
273{
274 struct smc_cdc_conn_state_flags *txflags =
275 &smc->conn.local_tx_ctrl.conn_state_flags;
276 struct sock *sk = &smc->sk;
277
278 switch (sk->sk_state) {
279 case SMC_ACTIVE:
280 case SMC_APPFINCLOSEWAIT:
281 case SMC_APPCLOSEWAIT1:
282 case SMC_APPCLOSEWAIT2:
Ursula Braunb38d7322017-01-09 16:55:25 +0100283 sk->sk_state = SMC_PROCESSABORT;
284 break;
285 case SMC_PEERCLOSEWAIT1:
286 case SMC_PEERCLOSEWAIT2:
287 if (txflags->peer_done_writing &&
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200288 !smc_close_sent_any_close(&smc->conn)) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100289 /* just shutdown, but not yet closed locally */
Ursula Braunb38d7322017-01-09 16:55:25 +0100290 sk->sk_state = SMC_PROCESSABORT;
291 } else {
292 sk->sk_state = SMC_CLOSED;
293 }
294 break;
295 case SMC_PEERFINCLOSEWAIT:
296 case SMC_PEERABORTWAIT:
297 sk->sk_state = SMC_CLOSED;
298 break;
299 case SMC_INIT:
300 case SMC_PROCESSABORT:
301 /* nothing to do, add tracing in future patch */
302 break;
303 }
304}
305
306/* Some kind of closing has been received: peer_conn_closed, peer_conn_abort,
307 * or peer_done_writing.
Ursula Braunb38d7322017-01-09 16:55:25 +0100308 */
Ursula Braun46c28db2017-04-10 14:58:01 +0200309static void smc_close_passive_work(struct work_struct *work)
Ursula Braunb38d7322017-01-09 16:55:25 +0100310{
Ursula Braun46c28db2017-04-10 14:58:01 +0200311 struct smc_connection *conn = container_of(work,
312 struct smc_connection,
313 close_work);
314 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
315 struct smc_cdc_conn_state_flags *rxflags;
Ursula Braunb38d7322017-01-09 16:55:25 +0100316 struct sock *sk = &smc->sk;
317 int old_state;
318
Ursula Braun3163c502018-01-24 10:28:12 +0100319 lock_sock(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100320 old_state = sk->sk_state;
321
Ursula Braun46c28db2017-04-10 14:58:01 +0200322 if (!conn->alert_token_local) {
323 /* abnormal termination */
324 smc_close_active_abort(smc);
325 goto wakeup;
326 }
327
Ursula Braun3163c502018-01-24 10:28:12 +0100328 rxflags = &conn->local_rx_ctrl.conn_state_flags;
Ursula Braunb38d7322017-01-09 16:55:25 +0100329 if (rxflags->peer_conn_abort) {
330 smc_close_passive_abort_received(smc);
Ursula Braun611b63a2018-01-25 11:15:31 +0100331 release_sock(&smc->sk);
332 cancel_delayed_work_sync(&conn->tx_work);
333 lock_sock(&smc->sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100334 goto wakeup;
335 }
336
337 switch (sk->sk_state) {
338 case SMC_INIT:
Ursula Braun3163c502018-01-24 10:28:12 +0100339 if (atomic_read(&conn->bytes_to_rcv) ||
Ursula Braunb38d7322017-01-09 16:55:25 +0100340 (rxflags->peer_done_writing &&
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200341 !smc_cdc_rxed_any_close(conn)))
Ursula Braunb38d7322017-01-09 16:55:25 +0100342 sk->sk_state = SMC_APPCLOSEWAIT1;
343 else
344 sk->sk_state = SMC_CLOSED;
345 break;
346 case SMC_ACTIVE:
347 sk->sk_state = SMC_APPCLOSEWAIT1;
348 break;
349 case SMC_PEERCLOSEWAIT1:
350 if (rxflags->peer_done_writing)
351 sk->sk_state = SMC_PEERCLOSEWAIT2;
Gustavo A. R. Silva7f6b4372017-10-21 20:35:30 -0500352 /* fall through */
353 /* to check for closing */
Ursula Braunb38d7322017-01-09 16:55:25 +0100354 case SMC_PEERCLOSEWAIT2:
Ursula Braun3163c502018-01-24 10:28:12 +0100355 if (!smc_cdc_rxed_any_close(conn))
Ursula Braunb38d7322017-01-09 16:55:25 +0100356 break;
357 if (sock_flag(sk, SOCK_DEAD) &&
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200358 smc_close_sent_any_close(conn)) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100359 /* smc_release has already been called locally */
360 sk->sk_state = SMC_CLOSED;
361 } else {
362 /* just shutdown, but not yet closed locally */
363 sk->sk_state = SMC_APPFINCLOSEWAIT;
364 }
365 break;
Ursula Braun5ac92a02018-01-25 11:15:32 +0100366 case SMC_PEERFINCLOSEWAIT:
367 if (smc_cdc_rxed_any_close(conn))
368 sk->sk_state = SMC_CLOSED;
369 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100370 case SMC_APPCLOSEWAIT1:
371 case SMC_APPCLOSEWAIT2:
372 case SMC_APPFINCLOSEWAIT:
373 case SMC_PEERABORTWAIT:
374 case SMC_PROCESSABORT:
375 case SMC_CLOSED:
376 /* nothing to do, add tracing in future patch */
377 break;
378 }
379
380wakeup:
Ursula Braunb38d7322017-01-09 16:55:25 +0100381 sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */
382 sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */
383
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200384 if (old_state != sk->sk_state) {
385 sk->sk_state_change(sk);
386 if ((sk->sk_state == SMC_CLOSED) &&
387 (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
Ursula Braun3163c502018-01-24 10:28:12 +0100388 smc_conn_free(conn);
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200389 schedule_delayed_work(&smc->sock_put_work,
390 SMC_CLOSE_SOCK_PUT_DELAY);
391 }
Ursula Braunb38d7322017-01-09 16:55:25 +0100392 }
Ursula Braun3163c502018-01-24 10:28:12 +0100393 release_sock(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100394}
395
396void smc_close_sock_put_work(struct work_struct *work)
397{
398 struct smc_sock *smc = container_of(to_delayed_work(work),
399 struct smc_sock,
400 sock_put_work);
401
Ursula Braunf16a7dd2017-01-09 16:55:26 +0100402 smc->sk.sk_prot->unhash(&smc->sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100403 sock_put(&smc->sk);
404}
405
406int smc_close_shutdown_write(struct smc_sock *smc)
407{
408 struct smc_connection *conn = &smc->conn;
Ursula Braunb38d7322017-01-09 16:55:25 +0100409 struct sock *sk = &smc->sk;
410 int old_state;
Ursula Braun8c96fee2017-09-21 09:16:34 +0200411 long timeout;
Ursula Braunb38d7322017-01-09 16:55:25 +0100412 int rc = 0;
413
Ursula Braun8c96fee2017-09-21 09:16:34 +0200414 timeout = current->flags & PF_EXITING ?
415 0 : sock_flag(sk, SOCK_LINGER) ?
416 sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
Ursula Braunb38d7322017-01-09 16:55:25 +0100417
Ursula Braunb38d7322017-01-09 16:55:25 +0100418 old_state = sk->sk_state;
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100419again:
420 switch (sk->sk_state) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100421 case SMC_ACTIVE:
422 smc_close_stream_wait(smc, timeout);
423 release_sock(sk);
Ursula Braun18e537c2017-09-21 09:16:33 +0200424 cancel_delayed_work_sync(&conn->tx_work);
Ursula Braunb38d7322017-01-09 16:55:25 +0100425 lock_sock(sk);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100426 if (sk->sk_state != SMC_ACTIVE)
427 goto again;
Ursula Braunb38d7322017-01-09 16:55:25 +0100428 /* send close wr request */
429 rc = smc_close_wr(conn);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100430 if (rc)
431 break;
432 sk->sk_state = SMC_PEERCLOSEWAIT1;
Ursula Braunb38d7322017-01-09 16:55:25 +0100433 break;
434 case SMC_APPCLOSEWAIT1:
435 /* passive close */
436 if (!smc_cdc_rxed_any_close(conn))
437 smc_close_stream_wait(smc, timeout);
438 release_sock(sk);
Ursula Braun18e537c2017-09-21 09:16:33 +0200439 cancel_delayed_work_sync(&conn->tx_work);
Ursula Braunb38d7322017-01-09 16:55:25 +0100440 lock_sock(sk);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100441 if (sk->sk_state != SMC_APPCLOSEWAIT1)
442 goto again;
Ursula Braunb38d7322017-01-09 16:55:25 +0100443 /* confirm close from peer */
444 rc = smc_close_wr(conn);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100445 if (rc)
446 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100447 sk->sk_state = SMC_APPCLOSEWAIT2;
448 break;
449 case SMC_APPCLOSEWAIT2:
450 case SMC_PEERFINCLOSEWAIT:
451 case SMC_PEERCLOSEWAIT1:
452 case SMC_PEERCLOSEWAIT2:
453 case SMC_APPFINCLOSEWAIT:
454 case SMC_PROCESSABORT:
455 case SMC_PEERABORTWAIT:
456 /* nothing to do, add tracing in future patch */
457 break;
458 }
459
460 if (old_state != sk->sk_state)
Ursula Braun3163c502018-01-24 10:28:12 +0100461 sk->sk_state_change(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100462 return rc;
463}
Ursula Braun46c28db2017-04-10 14:58:01 +0200464
465/* Initialize close properties on connection establishment. */
466void smc_close_init(struct smc_sock *smc)
467{
468 INIT_WORK(&smc->conn.close_work, smc_close_passive_work);
469}