blob: 303576d815c64adadf7b3a00355f2c43e5721251 [file] [log] [blame]
Peng Taod7e09d02013-05-02 16:46:55 +08001/*
2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
3 *
4 * Copyright (c) 2011, 2012, Intel Corporation.
5 *
6 * Author: Zach Brown <zab@zabbo.net>
7 * Author: Peter J. Braam <braam@clusterfs.com>
8 * Author: Phil Schwan <phil@clusterfs.com>
9 * Author: Eric Barton <eric@bartonsoftware.com>
10 *
11 * This file is part of Portals, http://www.sf.net/projects/sandiaportals/
12 *
13 * Portals is free software; you can redistribute it and/or
14 * modify it under the terms of version 2 of the GNU General Public
15 * License as published by the Free Software Foundation.
16 *
17 * Portals is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
Peng Taod7e09d02013-05-02 16:46:55 +080022 */
23
24#include "socklnd.h"
25
James Simmonsff13fd42016-06-10 16:14:23 -040026struct ksock_tx *
Peng Taod7e09d02013-05-02 16:46:55 +080027ksocknal_alloc_tx(int type, int size)
28{
James Simmonsff13fd42016-06-10 16:14:23 -040029 struct ksock_tx *tx = NULL;
Peng Taod7e09d02013-05-02 16:46:55 +080030
31 if (type == KSOCK_MSG_NOOP) {
32 LASSERT(size == KSOCK_NOOP_TX_SIZE);
33
34 /* searching for a noop tx in free list */
35 spin_lock(&ksocknal_data.ksnd_tx_lock);
36
37 if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
38 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \
James Simmonsff13fd42016-06-10 16:14:23 -040039 next, struct ksock_tx, tx_list);
Peng Taod7e09d02013-05-02 16:46:55 +080040 LASSERT(tx->tx_desc_size == size);
41 list_del(&tx->tx_list);
42 }
43
44 spin_unlock(&ksocknal_data.ksnd_tx_lock);
45 }
46
James Simmons06ace262016-02-12 12:06:08 -050047 if (!tx)
Peng Taod7e09d02013-05-02 16:46:55 +080048 LIBCFS_ALLOC(tx, size);
49
James Simmons06ace262016-02-12 12:06:08 -050050 if (!tx)
Peng Taod7e09d02013-05-02 16:46:55 +080051 return NULL;
52
53 atomic_set(&tx->tx_refcount, 1);
54 tx->tx_zc_aborted = 0;
55 tx->tx_zc_capable = 0;
56 tx->tx_zc_checked = 0;
57 tx->tx_desc_size = size;
58
59 atomic_inc(&ksocknal_data.ksnd_nactive_txs);
60
61 return tx;
62}
63
James Simmonsff13fd42016-06-10 16:14:23 -040064struct ksock_tx *
Peng Taod7e09d02013-05-02 16:46:55 +080065ksocknal_alloc_tx_noop(__u64 cookie, int nonblk)
66{
James Simmonsff13fd42016-06-10 16:14:23 -040067 struct ksock_tx *tx;
Peng Taod7e09d02013-05-02 16:46:55 +080068
69 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE);
James Simmons06ace262016-02-12 12:06:08 -050070 if (!tx) {
Peng Taod7e09d02013-05-02 16:46:55 +080071 CERROR("Can't allocate noop tx desc\n");
72 return NULL;
73 }
74
Mike Shuey97d10d02015-05-19 10:14:37 -040075 tx->tx_conn = NULL;
76 tx->tx_lnetmsg = NULL;
77 tx->tx_kiov = NULL;
78 tx->tx_nkiov = 0;
79 tx->tx_iov = tx->tx_frags.virt.iov;
80 tx->tx_niov = 1;
81 tx->tx_nonblk = nonblk;
Peng Taod7e09d02013-05-02 16:46:55 +080082
83 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP);
84 tx->tx_msg.ksm_zc_cookies[1] = cookie;
85
86 return tx;
87}
88
Peng Taod7e09d02013-05-02 16:46:55 +080089void
James Simmonsff13fd42016-06-10 16:14:23 -040090ksocknal_free_tx(struct ksock_tx *tx)
Peng Taod7e09d02013-05-02 16:46:55 +080091{
92 atomic_dec(&ksocknal_data.ksnd_nactive_txs);
93
James Simmons06ace262016-02-12 12:06:08 -050094 if (!tx->tx_lnetmsg && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) {
Peng Taod7e09d02013-05-02 16:46:55 +080095 /* it's a noop tx */
96 spin_lock(&ksocknal_data.ksnd_tx_lock);
97
98 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs);
99
100 spin_unlock(&ksocknal_data.ksnd_tx_lock);
101 } else {
102 LIBCFS_FREE(tx, tx->tx_desc_size);
103 }
104}
105
Phong Tranf9cd474f2014-08-19 22:45:50 +0700106static int
James Simmonsff13fd42016-06-10 16:14:23 -0400107ksocknal_send_iov(struct ksock_conn *conn, struct ksock_tx *tx)
Peng Taod7e09d02013-05-02 16:46:55 +0800108{
Mike Shuey97d10d02015-05-19 10:14:37 -0400109 struct kvec *iov = tx->tx_iov;
110 int nob;
111 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800112
Mike Shuey97d10d02015-05-19 10:14:37 -0400113 LASSERT(tx->tx_niov > 0);
Peng Taod7e09d02013-05-02 16:46:55 +0800114
115 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */
116 rc = ksocknal_lib_send_iov(conn, tx);
117
118 if (rc <= 0) /* sent nothing? */
Masaru Nomura71397092014-05-15 18:54:05 +0100119 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800120
121 nob = rc;
James Simmonsb31e64c2016-02-12 12:06:06 -0500122 LASSERT(nob <= tx->tx_resid);
Peng Taod7e09d02013-05-02 16:46:55 +0800123 tx->tx_resid -= nob;
124
125 /* "consume" iov */
126 do {
Mike Shuey97d10d02015-05-19 10:14:37 -0400127 LASSERT(tx->tx_niov > 0);
Peng Taod7e09d02013-05-02 16:46:55 +0800128
Oleg Drokin9797fb02016-06-18 23:53:12 -0400129 if (nob < (int)iov->iov_len) {
Peng Taod7e09d02013-05-02 16:46:55 +0800130 iov->iov_base = (void *)((char *)iov->iov_base + nob);
131 iov->iov_len -= nob;
Masaru Nomura71397092014-05-15 18:54:05 +0100132 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800133 }
134
135 nob -= iov->iov_len;
136 tx->tx_iov = ++iov;
137 tx->tx_niov--;
James Simmons5fd88332016-02-12 12:06:09 -0500138 } while (nob);
Peng Taod7e09d02013-05-02 16:46:55 +0800139
Masaru Nomura71397092014-05-15 18:54:05 +0100140 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800141}
142
Phong Tranf9cd474f2014-08-19 22:45:50 +0700143static int
James Simmonsff13fd42016-06-10 16:14:23 -0400144ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx)
Peng Taod7e09d02013-05-02 16:46:55 +0800145{
Mike Shuey97d10d02015-05-19 10:14:37 -0400146 lnet_kiov_t *kiov = tx->tx_kiov;
147 int nob;
148 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800149
James Simmons5fd88332016-02-12 12:06:09 -0500150 LASSERT(!tx->tx_niov);
Mike Shuey97d10d02015-05-19 10:14:37 -0400151 LASSERT(tx->tx_nkiov > 0);
Peng Taod7e09d02013-05-02 16:46:55 +0800152
153 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */
154 rc = ksocknal_lib_send_kiov(conn, tx);
155
156 if (rc <= 0) /* sent nothing? */
Masaru Nomura71397092014-05-15 18:54:05 +0100157 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800158
159 nob = rc;
James Simmonsb31e64c2016-02-12 12:06:06 -0500160 LASSERT(nob <= tx->tx_resid);
Peng Taod7e09d02013-05-02 16:46:55 +0800161 tx->tx_resid -= nob;
162
163 /* "consume" kiov */
164 do {
165 LASSERT(tx->tx_nkiov > 0);
166
167 if (nob < (int)kiov->kiov_len) {
168 kiov->kiov_offset += nob;
169 kiov->kiov_len -= nob;
170 return rc;
171 }
172
173 nob -= (int)kiov->kiov_len;
174 tx->tx_kiov = ++kiov;
175 tx->tx_nkiov--;
James Simmons5fd88332016-02-12 12:06:09 -0500176 } while (nob);
Peng Taod7e09d02013-05-02 16:46:55 +0800177
Masaru Nomura71397092014-05-15 18:54:05 +0100178 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800179}
180
Phong Tranf9cd474f2014-08-19 22:45:50 +0700181static int
James Simmonsff13fd42016-06-10 16:14:23 -0400182ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
Peng Taod7e09d02013-05-02 16:46:55 +0800183{
Mike Shuey97d10d02015-05-19 10:14:37 -0400184 int rc;
185 int bufnob;
Peng Taod7e09d02013-05-02 16:46:55 +0800186
James Simmons5fd88332016-02-12 12:06:09 -0500187 if (ksocknal_data.ksnd_stall_tx) {
Peng Taod3caf4d2014-03-18 21:05:56 +0800188 set_current_state(TASK_UNINTERRUPTIBLE);
189 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
Peng Taod7e09d02013-05-02 16:46:55 +0800190 }
191
James Simmons5fd88332016-02-12 12:06:09 -0500192 LASSERT(tx->tx_resid);
Peng Taod7e09d02013-05-02 16:46:55 +0800193
194 rc = ksocknal_connsock_addref(conn);
James Simmons5fd88332016-02-12 12:06:09 -0500195 if (rc) {
James Simmonsb31e64c2016-02-12 12:06:06 -0500196 LASSERT(conn->ksnc_closing);
Masaru Nomura71397092014-05-15 18:54:05 +0100197 return -ESHUTDOWN;
Peng Taod7e09d02013-05-02 16:46:55 +0800198 }
199
200 do {
201 if (ksocknal_data.ksnd_enomem_tx > 0) {
202 /* testing... */
203 ksocknal_data.ksnd_enomem_tx--;
204 rc = -EAGAIN;
James Simmons5fd88332016-02-12 12:06:09 -0500205 } else if (tx->tx_niov) {
James Simmonsb31e64c2016-02-12 12:06:06 -0500206 rc = ksocknal_send_iov(conn, tx);
Peng Taod7e09d02013-05-02 16:46:55 +0800207 } else {
James Simmonsb31e64c2016-02-12 12:06:06 -0500208 rc = ksocknal_send_kiov(conn, tx);
Peng Taod7e09d02013-05-02 16:46:55 +0800209 }
210
Greg Kroah-Hartmanfb4a1532014-07-12 00:01:03 -0700211 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
Peng Taod7e09d02013-05-02 16:46:55 +0800212 if (rc > 0) /* sent something? */
213 conn->ksnc_tx_bufnob += rc; /* account it */
214
215 if (bufnob < conn->ksnc_tx_bufnob) {
James Simmons4420cfd2016-02-12 12:06:00 -0500216 /*
217 * allocated send buffer bytes < computed; infer
218 * something got ACKed
219 */
Peng Taod7e09d02013-05-02 16:46:55 +0800220 conn->ksnc_tx_deadline =
221 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
222 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
223 conn->ksnc_tx_bufnob = bufnob;
224 mb();
225 }
226
227 if (rc <= 0) { /* Didn't write anything? */
228
James Simmons5fd88332016-02-12 12:06:09 -0500229 if (!rc) /* some stacks return 0 instead of -EAGAIN */
Peng Taod7e09d02013-05-02 16:46:55 +0800230 rc = -EAGAIN;
231
232 /* Check if EAGAIN is due to memory pressure */
Greg Donalda58a38a2014-08-21 12:40:35 -0500233 if (rc == -EAGAIN && ksocknal_lib_memory_pressure(conn))
Peng Taod7e09d02013-05-02 16:46:55 +0800234 rc = -ENOMEM;
235
236 break;
237 }
238
239 /* socket's wmem_queued now includes 'rc' bytes */
James Simmonsb31e64c2016-02-12 12:06:06 -0500240 atomic_sub(rc, &conn->ksnc_tx_nob);
Peng Taod7e09d02013-05-02 16:46:55 +0800241 rc = 0;
242
James Simmons5fd88332016-02-12 12:06:09 -0500243 } while (tx->tx_resid);
Peng Taod7e09d02013-05-02 16:46:55 +0800244
245 ksocknal_connsock_decref(conn);
Masaru Nomura71397092014-05-15 18:54:05 +0100246 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800247}
248
Phong Tranf9cd474f2014-08-19 22:45:50 +0700249static int
James Simmonsff13fd42016-06-10 16:14:23 -0400250ksocknal_recv_iov(struct ksock_conn *conn)
Peng Taod7e09d02013-05-02 16:46:55 +0800251{
Al Virof351bad2014-12-02 17:15:37 +0000252 struct kvec *iov = conn->ksnc_rx_iov;
Mike Shuey97d10d02015-05-19 10:14:37 -0400253 int nob;
254 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800255
Mike Shuey97d10d02015-05-19 10:14:37 -0400256 LASSERT(conn->ksnc_rx_niov > 0);
Peng Taod7e09d02013-05-02 16:46:55 +0800257
James Simmons4420cfd2016-02-12 12:06:00 -0500258 /*
259 * Never touch conn->ksnc_rx_iov or change connection
260 * status inside ksocknal_lib_recv_iov
261 */
Peng Taod7e09d02013-05-02 16:46:55 +0800262 rc = ksocknal_lib_recv_iov(conn);
263
264 if (rc <= 0)
Masaru Nomura71397092014-05-15 18:54:05 +0100265 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800266
267 /* received something... */
268 nob = rc;
269
270 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
271 conn->ksnc_rx_deadline =
272 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
273 mb(); /* order with setting rx_started */
274 conn->ksnc_rx_started = 1;
275
276 conn->ksnc_rx_nob_wanted -= nob;
277 conn->ksnc_rx_nob_left -= nob;
278
279 do {
Mike Shuey97d10d02015-05-19 10:14:37 -0400280 LASSERT(conn->ksnc_rx_niov > 0);
Peng Taod7e09d02013-05-02 16:46:55 +0800281
282 if (nob < (int)iov->iov_len) {
283 iov->iov_len -= nob;
Artemiy Volkov2101f982014-09-13 02:31:33 +1000284 iov->iov_base += nob;
Masaru Nomura71397092014-05-15 18:54:05 +0100285 return -EAGAIN;
Peng Taod7e09d02013-05-02 16:46:55 +0800286 }
287
288 nob -= iov->iov_len;
289 conn->ksnc_rx_iov = ++iov;
290 conn->ksnc_rx_niov--;
James Simmons5fd88332016-02-12 12:06:09 -0500291 } while (nob);
Peng Taod7e09d02013-05-02 16:46:55 +0800292
Masaru Nomura71397092014-05-15 18:54:05 +0100293 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800294}
295
Phong Tranf9cd474f2014-08-19 22:45:50 +0700296static int
James Simmonsff13fd42016-06-10 16:14:23 -0400297ksocknal_recv_kiov(struct ksock_conn *conn)
Peng Taod7e09d02013-05-02 16:46:55 +0800298{
Mike Shuey97d10d02015-05-19 10:14:37 -0400299 lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
300 int nob;
301 int rc;
Mike Rapoport50ffcb72015-10-13 16:03:40 +0300302
Mike Shuey97d10d02015-05-19 10:14:37 -0400303 LASSERT(conn->ksnc_rx_nkiov > 0);
Peng Taod7e09d02013-05-02 16:46:55 +0800304
James Simmons4420cfd2016-02-12 12:06:00 -0500305 /*
306 * Never touch conn->ksnc_rx_kiov or change connection
307 * status inside ksocknal_lib_recv_iov
308 */
Peng Taod7e09d02013-05-02 16:46:55 +0800309 rc = ksocknal_lib_recv_kiov(conn);
310
311 if (rc <= 0)
Masaru Nomura71397092014-05-15 18:54:05 +0100312 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800313
314 /* received something... */
315 nob = rc;
316
317 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
318 conn->ksnc_rx_deadline =
319 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
320 mb(); /* order with setting rx_started */
321 conn->ksnc_rx_started = 1;
322
323 conn->ksnc_rx_nob_wanted -= nob;
324 conn->ksnc_rx_nob_left -= nob;
325
326 do {
Mike Shuey97d10d02015-05-19 10:14:37 -0400327 LASSERT(conn->ksnc_rx_nkiov > 0);
Peng Taod7e09d02013-05-02 16:46:55 +0800328
Oleg Drokin9797fb02016-06-18 23:53:12 -0400329 if (nob < (int)kiov->kiov_len) {
Peng Taod7e09d02013-05-02 16:46:55 +0800330 kiov->kiov_offset += nob;
331 kiov->kiov_len -= nob;
332 return -EAGAIN;
333 }
334
335 nob -= kiov->kiov_len;
336 conn->ksnc_rx_kiov = ++kiov;
337 conn->ksnc_rx_nkiov--;
James Simmons5fd88332016-02-12 12:06:09 -0500338 } while (nob);
Peng Taod7e09d02013-05-02 16:46:55 +0800339
340 return 1;
341}
342
Phong Tranf9cd474f2014-08-19 22:45:50 +0700343static int
James Simmonsff13fd42016-06-10 16:14:23 -0400344ksocknal_receive(struct ksock_conn *conn)
Peng Taod7e09d02013-05-02 16:46:55 +0800345{
James Simmons4420cfd2016-02-12 12:06:00 -0500346 /*
347 * Return 1 on success, 0 on EOF, < 0 on error.
Peng Taod7e09d02013-05-02 16:46:55 +0800348 * Caller checks ksnc_rx_nob_wanted to determine
James Simmons4420cfd2016-02-12 12:06:00 -0500349 * progress/completion.
350 */
Mike Shuey97d10d02015-05-19 10:14:37 -0400351 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800352
James Simmons5fd88332016-02-12 12:06:09 -0500353 if (ksocknal_data.ksnd_stall_rx) {
Peng Taod3caf4d2014-03-18 21:05:56 +0800354 set_current_state(TASK_UNINTERRUPTIBLE);
355 schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
Peng Taod7e09d02013-05-02 16:46:55 +0800356 }
357
358 rc = ksocknal_connsock_addref(conn);
James Simmons5fd88332016-02-12 12:06:09 -0500359 if (rc) {
James Simmonsb31e64c2016-02-12 12:06:06 -0500360 LASSERT(conn->ksnc_closing);
Masaru Nomura71397092014-05-15 18:54:05 +0100361 return -ESHUTDOWN;
Peng Taod7e09d02013-05-02 16:46:55 +0800362 }
363
364 for (;;) {
James Simmons5fd88332016-02-12 12:06:09 -0500365 if (conn->ksnc_rx_niov)
James Simmonsb31e64c2016-02-12 12:06:06 -0500366 rc = ksocknal_recv_iov(conn);
Peng Taod7e09d02013-05-02 16:46:55 +0800367 else
James Simmonsb31e64c2016-02-12 12:06:06 -0500368 rc = ksocknal_recv_kiov(conn);
Peng Taod7e09d02013-05-02 16:46:55 +0800369
370 if (rc <= 0) {
371 /* error/EOF or partial receive */
372 if (rc == -EAGAIN) {
373 rc = 1;
James Simmons5fd88332016-02-12 12:06:09 -0500374 } else if (!rc && conn->ksnc_rx_started) {
Peng Taod7e09d02013-05-02 16:46:55 +0800375 /* EOF in the middle of a message */
376 rc = -EPROTO;
377 }
378 break;
379 }
380
381 /* Completed a fragment */
382
James Simmons5fd88332016-02-12 12:06:09 -0500383 if (!conn->ksnc_rx_nob_wanted) {
Peng Taod7e09d02013-05-02 16:46:55 +0800384 rc = 1;
385 break;
386 }
387 }
388
389 ksocknal_connsock_decref(conn);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800390 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800391}
392
393void
James Simmonsff13fd42016-06-10 16:14:23 -0400394ksocknal_tx_done(lnet_ni_t *ni, struct ksock_tx *tx)
Peng Taod7e09d02013-05-02 16:46:55 +0800395{
Mike Shuey97d10d02015-05-19 10:14:37 -0400396 lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
James Simmons5fd88332016-02-12 12:06:09 -0500397 int rc = (!tx->tx_resid && !tx->tx_zc_aborted) ? 0 : -EIO;
Peng Taod7e09d02013-05-02 16:46:55 +0800398
James Simmons06ace262016-02-12 12:06:08 -0500399 LASSERT(ni || tx->tx_conn);
Peng Taod7e09d02013-05-02 16:46:55 +0800400
James Simmons06ace262016-02-12 12:06:08 -0500401 if (tx->tx_conn)
Peng Taod7e09d02013-05-02 16:46:55 +0800402 ksocknal_conn_decref(tx->tx_conn);
403
James Simmons06ace262016-02-12 12:06:08 -0500404 if (!ni && tx->tx_conn)
Peng Taod7e09d02013-05-02 16:46:55 +0800405 ni = tx->tx_conn->ksnc_peer->ksnp_ni;
406
James Simmonsb31e64c2016-02-12 12:06:06 -0500407 ksocknal_free_tx(tx);
James Simmons06ace262016-02-12 12:06:08 -0500408 if (lnetmsg) /* KSOCK_MSG_NOOP go without lnetmsg */
James Simmonsb31e64c2016-02-12 12:06:06 -0500409 lnet_finalize(ni, lnetmsg, rc);
Peng Taod7e09d02013-05-02 16:46:55 +0800410}
411
412void
James Simmonsb31e64c2016-02-12 12:06:06 -0500413ksocknal_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int error)
Peng Taod7e09d02013-05-02 16:46:55 +0800414{
James Simmonsff13fd42016-06-10 16:14:23 -0400415 struct ksock_tx *tx;
Peng Taod7e09d02013-05-02 16:46:55 +0800416
James Simmonsb31e64c2016-02-12 12:06:06 -0500417 while (!list_empty(txlist)) {
James Simmonsff13fd42016-06-10 16:14:23 -0400418 tx = list_entry(txlist->next, struct ksock_tx, tx_list);
Peng Taod7e09d02013-05-02 16:46:55 +0800419
James Simmons06ace262016-02-12 12:06:08 -0500420 if (error && tx->tx_lnetmsg) {
Peng Taod7e09d02013-05-02 16:46:55 +0800421 CNETERR("Deleting packet type %d len %d %s->%s\n",
James Simmonsb31e64c2016-02-12 12:06:06 -0500422 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
423 le32_to_cpu(tx->tx_lnetmsg->msg_hdr.payload_length),
Peng Taod7e09d02013-05-02 16:46:55 +0800424 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)),
425 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid)));
426 } else if (error) {
427 CNETERR("Deleting noop packet\n");
428 }
429
Mike Shuey97d10d02015-05-19 10:14:37 -0400430 list_del(&tx->tx_list);
Peng Taod7e09d02013-05-02 16:46:55 +0800431
Mike Shuey97d10d02015-05-19 10:14:37 -0400432 LASSERT(atomic_read(&tx->tx_refcount) == 1);
433 ksocknal_tx_done(ni, tx);
Peng Taod7e09d02013-05-02 16:46:55 +0800434 }
435}
436
437static void
James Simmonsff13fd42016-06-10 16:14:23 -0400438ksocknal_check_zc_req(struct ksock_tx *tx)
Peng Taod7e09d02013-05-02 16:46:55 +0800439{
James Simmonsff13fd42016-06-10 16:14:23 -0400440 struct ksock_conn *conn = tx->tx_conn;
441 struct ksock_peer *peer = conn->ksnc_peer;
Peng Taod7e09d02013-05-02 16:46:55 +0800442
James Simmons4420cfd2016-02-12 12:06:00 -0500443 /*
444 * Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx
Peng Taod7e09d02013-05-02 16:46:55 +0800445 * to ksnp_zc_req_list if some fragment of this message should be sent
446 * zero-copy. Our peer will send an ACK containing this cookie when
447 * she has received this message to tell us we can signal completion.
448 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on
James Simmons4420cfd2016-02-12 12:06:00 -0500449 * ksnp_zc_req_list.
450 */
Mike Shuey97d10d02015-05-19 10:14:37 -0400451 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
452 LASSERT(tx->tx_zc_capable);
Peng Taod7e09d02013-05-02 16:46:55 +0800453
454 tx->tx_zc_checked = 1;
455
456 if (conn->ksnc_proto == &ksocknal_protocol_v1x ||
457 !conn->ksnc_zc_capable)
458 return;
459
James Simmons4420cfd2016-02-12 12:06:00 -0500460 /*
461 * assign cookie and queue tx to pending list, it will be released when
462 * a matching ack is received. See ksocknal_handle_zcack()
463 */
Peng Taod7e09d02013-05-02 16:46:55 +0800464 ksocknal_tx_addref(tx);
465
466 spin_lock(&peer->ksnp_lock);
467
468 /* ZC_REQ is going to be pinned to the peer */
469 tx->tx_deadline =
470 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
471
James Simmons5fd88332016-02-12 12:06:09 -0500472 LASSERT(!tx->tx_msg.ksm_zc_cookies[0]);
Peng Taod7e09d02013-05-02 16:46:55 +0800473
474 tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++;
475
James Simmons5fd88332016-02-12 12:06:09 -0500476 if (!peer->ksnp_zc_next_cookie)
Peng Taod7e09d02013-05-02 16:46:55 +0800477 peer->ksnp_zc_next_cookie = SOCKNAL_KEEPALIVE_PING + 1;
478
479 list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list);
480
481 spin_unlock(&peer->ksnp_lock);
482}
483
484static void
James Simmonsff13fd42016-06-10 16:14:23 -0400485ksocknal_uncheck_zc_req(struct ksock_tx *tx)
Peng Taod7e09d02013-05-02 16:46:55 +0800486{
James Simmonsff13fd42016-06-10 16:14:23 -0400487 struct ksock_peer *peer = tx->tx_conn->ksnc_peer;
Peng Taod7e09d02013-05-02 16:46:55 +0800488
489 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP);
490 LASSERT(tx->tx_zc_capable);
491
492 tx->tx_zc_checked = 0;
493
494 spin_lock(&peer->ksnp_lock);
495
James Simmons5fd88332016-02-12 12:06:09 -0500496 if (!tx->tx_msg.ksm_zc_cookies[0]) {
Peng Taod7e09d02013-05-02 16:46:55 +0800497 /* Not waiting for an ACK */
498 spin_unlock(&peer->ksnp_lock);
499 return;
500 }
501
502 tx->tx_msg.ksm_zc_cookies[0] = 0;
503 list_del(&tx->tx_zc_list);
504
505 spin_unlock(&peer->ksnp_lock);
506
507 ksocknal_tx_decref(tx);
508}
509
Phong Tranf9cd474f2014-08-19 22:45:50 +0700510static int
James Simmonsff13fd42016-06-10 16:14:23 -0400511ksocknal_process_transmit(struct ksock_conn *conn, struct ksock_tx *tx)
Peng Taod7e09d02013-05-02 16:46:55 +0800512{
Mike Shuey97d10d02015-05-19 10:14:37 -0400513 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800514
515 if (tx->tx_zc_capable && !tx->tx_zc_checked)
516 ksocknal_check_zc_req(tx);
517
James Simmonsb31e64c2016-02-12 12:06:06 -0500518 rc = ksocknal_transmit(conn, tx);
Peng Taod7e09d02013-05-02 16:46:55 +0800519
Mike Shuey97d10d02015-05-19 10:14:37 -0400520 CDEBUG(D_NET, "send(%d) %d\n", tx->tx_resid, rc);
Peng Taod7e09d02013-05-02 16:46:55 +0800521
James Simmons5fd88332016-02-12 12:06:09 -0500522 if (!tx->tx_resid) {
Peng Taod7e09d02013-05-02 16:46:55 +0800523 /* Sent everything OK */
James Simmons5fd88332016-02-12 12:06:09 -0500524 LASSERT(!rc);
Peng Taod7e09d02013-05-02 16:46:55 +0800525
Masaru Nomura71397092014-05-15 18:54:05 +0100526 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800527 }
528
529 if (rc == -EAGAIN)
Masaru Nomura71397092014-05-15 18:54:05 +0100530 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800531
532 if (rc == -ENOMEM) {
533 static int counter;
534
535 counter++; /* exponential backoff warnings */
536 if ((counter & (-counter)) == counter)
Dmitry Eremin323b0b22015-07-06 12:48:54 -0400537 CWARN("%u ENOMEM tx %p\n", counter, conn);
Peng Taod7e09d02013-05-02 16:46:55 +0800538
539 /* Queue on ksnd_enomem_conns for retry after a timeout */
540 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
541
542 /* enomem list takes over scheduler's ref... */
James Simmonsb31e64c2016-02-12 12:06:06 -0500543 LASSERT(conn->ksnc_tx_scheduled);
Peng Taod7e09d02013-05-02 16:46:55 +0800544 list_add_tail(&conn->ksnc_tx_list,
James Simmonsc314c312016-02-12 12:06:01 -0500545 &ksocknal_data.ksnd_enomem_conns);
Peng Taod7e09d02013-05-02 16:46:55 +0800546 if (!cfs_time_aftereq(cfs_time_add(cfs_time_current(),
547 SOCKNAL_ENOMEM_RETRY),
548 ksocknal_data.ksnd_reaper_waketime))
James Simmonsb31e64c2016-02-12 12:06:06 -0500549 wake_up(&ksocknal_data.ksnd_reaper_waitq);
Peng Taod7e09d02013-05-02 16:46:55 +0800550
551 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
Masaru Nomura71397092014-05-15 18:54:05 +0100552 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800553 }
554
555 /* Actual error */
Mike Shuey97d10d02015-05-19 10:14:37 -0400556 LASSERT(rc < 0);
Peng Taod7e09d02013-05-02 16:46:55 +0800557
558 if (!conn->ksnc_closing) {
559 switch (rc) {
560 case -ECONNRESET:
Joe Perches2d00bd12014-11-23 11:28:50 -0800561 LCONSOLE_WARN("Host %pI4h reset our connection while we were sending data; it may have rebooted.\n",
Peng Tao5e8f6922013-07-15 22:27:09 +0800562 &conn->ksnc_ipaddr);
Peng Taod7e09d02013-05-02 16:46:55 +0800563 break;
564 default:
Joe Perches2d00bd12014-11-23 11:28:50 -0800565 LCONSOLE_WARN("There was an unexpected network error while writing to %pI4h: %d.\n",
Peng Tao5e8f6922013-07-15 22:27:09 +0800566 &conn->ksnc_ipaddr, rc);
Peng Taod7e09d02013-05-02 16:46:55 +0800567 break;
568 }
Joe Perches2d00bd12014-11-23 11:28:50 -0800569 CDEBUG(D_NET, "[%p] Error %d on write to %s ip %pI4h:%d\n",
570 conn, rc,
Peng Taod7e09d02013-05-02 16:46:55 +0800571 libcfs_id2str(conn->ksnc_peer->ksnp_id),
Peng Tao5e8f6922013-07-15 22:27:09 +0800572 &conn->ksnc_ipaddr,
Peng Taod7e09d02013-05-02 16:46:55 +0800573 conn->ksnc_port);
574 }
575
576 if (tx->tx_zc_checked)
577 ksocknal_uncheck_zc_req(tx);
578
579 /* it's not an error if conn is being closed */
James Simmonsb31e64c2016-02-12 12:06:06 -0500580 ksocknal_close_conn_and_siblings(conn, (conn->ksnc_closing) ? 0 : rc);
Peng Taod7e09d02013-05-02 16:46:55 +0800581
Masaru Nomura71397092014-05-15 18:54:05 +0100582 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800583}
584
Phong Tranf9cd474f2014-08-19 22:45:50 +0700585static void
James Simmonsff13fd42016-06-10 16:14:23 -0400586ksocknal_launch_connection_locked(struct ksock_route *route)
Peng Taod7e09d02013-05-02 16:46:55 +0800587{
Peng Taod7e09d02013-05-02 16:46:55 +0800588 /* called holding write lock on ksnd_global_lock */
589
Mike Shuey97d10d02015-05-19 10:14:37 -0400590 LASSERT(!route->ksnr_scheduled);
591 LASSERT(!route->ksnr_connecting);
James Simmons5fd88332016-02-12 12:06:09 -0500592 LASSERT(ksocknal_route_mask() & ~route->ksnr_connected);
Peng Taod7e09d02013-05-02 16:46:55 +0800593
594 route->ksnr_scheduled = 1; /* scheduling conn for connd */
595 ksocknal_route_addref(route); /* extra ref for connd */
596
597 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
598
599 list_add_tail(&route->ksnr_connd_list,
James Simmonsc314c312016-02-12 12:06:01 -0500600 &ksocknal_data.ksnd_connd_routes);
Peng Taod7e09d02013-05-02 16:46:55 +0800601 wake_up(&ksocknal_data.ksnd_connd_waitq);
602
603 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
604}
605
606void
James Simmonsff13fd42016-06-10 16:14:23 -0400607ksocknal_launch_all_connections_locked(struct ksock_peer *peer)
Peng Taod7e09d02013-05-02 16:46:55 +0800608{
James Simmonsff13fd42016-06-10 16:14:23 -0400609 struct ksock_route *route;
Peng Taod7e09d02013-05-02 16:46:55 +0800610
611 /* called holding write lock on ksnd_global_lock */
612 for (;;) {
613 /* launch any/all connections that need it */
614 route = ksocknal_find_connectable_route_locked(peer);
James Simmons06ace262016-02-12 12:06:08 -0500615 if (!route)
Peng Taod7e09d02013-05-02 16:46:55 +0800616 return;
617
618 ksocknal_launch_connection_locked(route);
619 }
620}
621
James Simmonsff13fd42016-06-10 16:14:23 -0400622struct ksock_conn *
623ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk)
Peng Taod7e09d02013-05-02 16:46:55 +0800624{
Mike Shuey97d10d02015-05-19 10:14:37 -0400625 struct list_head *tmp;
James Simmonsff13fd42016-06-10 16:14:23 -0400626 struct ksock_conn *conn;
627 struct ksock_conn *typed = NULL;
628 struct ksock_conn *fallback = NULL;
Mike Shuey97d10d02015-05-19 10:14:37 -0400629 int tnob = 0;
630 int fnob = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800631
James Simmonsb31e64c2016-02-12 12:06:06 -0500632 list_for_each(tmp, &peer->ksnp_conns) {
James Simmonsff13fd42016-06-10 16:14:23 -0400633 struct ksock_conn *c = list_entry(tmp, struct ksock_conn, ksnc_list);
Mike Shuey97d10d02015-05-19 10:14:37 -0400634 int nob = atomic_read(&c->ksnc_tx_nob) +
Mike Rapoportb1ff8902015-08-22 17:17:20 +0300635 c->ksnc_sock->sk->sk_wmem_queued;
Mike Shuey97d10d02015-05-19 10:14:37 -0400636 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800637
Mike Shuey97d10d02015-05-19 10:14:37 -0400638 LASSERT(!c->ksnc_closing);
James Simmons06ace262016-02-12 12:06:08 -0500639 LASSERT(c->ksnc_proto &&
640 c->ksnc_proto->pro_match_tx);
Peng Taod7e09d02013-05-02 16:46:55 +0800641
642 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk);
643
644 switch (rc) {
645 default:
646 LBUG();
647 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */
648 continue;
649
650 case SOCKNAL_MATCH_YES: /* typed connection */
James Simmons06ace262016-02-12 12:06:08 -0500651 if (!typed || tnob > nob ||
Peng Taod7e09d02013-05-02 16:46:55 +0800652 (tnob == nob && *ksocknal_tunables.ksnd_round_robin &&
653 cfs_time_after(typed->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
654 typed = c;
655 tnob = nob;
656 }
657 break;
658
659 case SOCKNAL_MATCH_MAY: /* fallback connection */
James Simmons06ace262016-02-12 12:06:08 -0500660 if (!fallback || fnob > nob ||
Peng Taod7e09d02013-05-02 16:46:55 +0800661 (fnob == nob && *ksocknal_tunables.ksnd_round_robin &&
662 cfs_time_after(fallback->ksnc_tx_last_post, c->ksnc_tx_last_post))) {
663 fallback = c;
Mike Shuey97d10d02015-05-19 10:14:37 -0400664 fnob = nob;
Peng Taod7e09d02013-05-02 16:46:55 +0800665 }
666 break;
667 }
668 }
669
670 /* prefer the typed selection */
James Simmons06ace262016-02-12 12:06:08 -0500671 conn = (typed) ? typed : fallback;
Peng Taod7e09d02013-05-02 16:46:55 +0800672
James Simmons06ace262016-02-12 12:06:08 -0500673 if (conn)
Peng Taod7e09d02013-05-02 16:46:55 +0800674 conn->ksnc_tx_last_post = cfs_time_current();
675
676 return conn;
677}
678
679void
James Simmonsff13fd42016-06-10 16:14:23 -0400680ksocknal_tx_prep(struct ksock_conn *conn, struct ksock_tx *tx)
Peng Taod7e09d02013-05-02 16:46:55 +0800681{
682 conn->ksnc_proto->pro_pack(tx);
683
James Simmonsb31e64c2016-02-12 12:06:06 -0500684 atomic_add(tx->tx_nob, &conn->ksnc_tx_nob);
Peng Taod7e09d02013-05-02 16:46:55 +0800685 ksocknal_conn_addref(conn); /* +1 ref for tx */
686 tx->tx_conn = conn;
687}
688
689void
James Simmonsff13fd42016-06-10 16:14:23 -0400690ksocknal_queue_tx_locked(struct ksock_tx *tx, struct ksock_conn *conn)
Peng Taod7e09d02013-05-02 16:46:55 +0800691{
James Simmonsff13fd42016-06-10 16:14:23 -0400692 struct ksock_sched *sched = conn->ksnc_scheduler;
Mike Shuey97d10d02015-05-19 10:14:37 -0400693 ksock_msg_t *msg = &tx->tx_msg;
James Simmonsff13fd42016-06-10 16:14:23 -0400694 struct ksock_tx *ztx = NULL;
Mike Shuey97d10d02015-05-19 10:14:37 -0400695 int bufnob = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800696
James Simmons4420cfd2016-02-12 12:06:00 -0500697 /*
698 * called holding global lock (read or irq-write) and caller may
Peng Taod7e09d02013-05-02 16:46:55 +0800699 * not have dropped this lock between finding conn and calling me,
700 * so we don't need the {get,put}connsock dance to deref
James Simmons4420cfd2016-02-12 12:06:00 -0500701 * ksnc_sock...
702 */
Peng Taod7e09d02013-05-02 16:46:55 +0800703 LASSERT(!conn->ksnc_closing);
704
Peng Tao5e8f6922013-07-15 22:27:09 +0800705 CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
James Simmonsc314c312016-02-12 12:06:01 -0500706 libcfs_id2str(conn->ksnc_peer->ksnp_id),
707 &conn->ksnc_ipaddr, conn->ksnc_port);
Peng Taod7e09d02013-05-02 16:46:55 +0800708
709 ksocknal_tx_prep(conn, tx);
710
James Simmons4420cfd2016-02-12 12:06:00 -0500711 /*
712 * Ensure the frags we've been given EXACTLY match the number of
Peng Taod7e09d02013-05-02 16:46:55 +0800713 * bytes we want to send. Many TCP/IP stacks disregard any total
714 * size parameters passed to them and just look at the frags.
715 *
716 * We always expect at least 1 mapped fragment containing the
James Simmons4420cfd2016-02-12 12:06:00 -0500717 * complete ksocknal message header.
718 */
James Simmonsb31e64c2016-02-12 12:06:06 -0500719 LASSERT(lnet_iov_nob(tx->tx_niov, tx->tx_iov) +
Mike Shuey97d10d02015-05-19 10:14:37 -0400720 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) ==
721 (unsigned int)tx->tx_nob);
722 LASSERT(tx->tx_niov >= 1);
723 LASSERT(tx->tx_resid == tx->tx_nob);
Peng Taod7e09d02013-05-02 16:46:55 +0800724
James Simmonsb31e64c2016-02-12 12:06:06 -0500725 CDEBUG(D_NET, "Packet %p type %d, nob %d niov %d nkiov %d\n",
James Simmons06ace262016-02-12 12:06:08 -0500726 tx, (tx->tx_lnetmsg) ? tx->tx_lnetmsg->msg_hdr.type :
James Simmonsb31e64c2016-02-12 12:06:06 -0500727 KSOCK_MSG_NOOP,
728 tx->tx_nob, tx->tx_niov, tx->tx_nkiov);
Peng Taod7e09d02013-05-02 16:46:55 +0800729
730 /*
731 * FIXME: SOCK_WMEM_QUEUED and SOCK_ERROR could block in __DARWIN8__
732 * but they're used inside spinlocks a lot.
733 */
Greg Kroah-Hartmanfb4a1532014-07-12 00:01:03 -0700734 bufnob = conn->ksnc_sock->sk->sk_wmem_queued;
Peng Taod7e09d02013-05-02 16:46:55 +0800735 spin_lock_bh(&sched->kss_lock);
736
James Simmons5fd88332016-02-12 12:06:09 -0500737 if (list_empty(&conn->ksnc_tx_queue) && !bufnob) {
Peng Taod7e09d02013-05-02 16:46:55 +0800738 /* First packet starts the timeout */
739 conn->ksnc_tx_deadline =
740 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
741 if (conn->ksnc_tx_bufnob > 0) /* something got ACKed */
742 conn->ksnc_peer->ksnp_last_alive = cfs_time_current();
743 conn->ksnc_tx_bufnob = 0;
744 mb(); /* order with adding to tx_queue */
745 }
746
747 if (msg->ksm_type == KSOCK_MSG_NOOP) {
James Simmons4420cfd2016-02-12 12:06:00 -0500748 /*
749 * The packet is noop ZC ACK, try to piggyback the ack_cookie
750 * on a normal packet so I don't need to send it
751 */
James Simmons5fd88332016-02-12 12:06:09 -0500752 LASSERT(msg->ksm_zc_cookies[1]);
James Simmons06ace262016-02-12 12:06:08 -0500753 LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
Peng Taod7e09d02013-05-02 16:46:55 +0800754
755 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
756 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
757
758 } else {
James Simmons4420cfd2016-02-12 12:06:00 -0500759 /*
760 * It's a normal packet - can it piggback a noop zc-ack that
761 * has been queued already?
762 */
James Simmons5fd88332016-02-12 12:06:09 -0500763 LASSERT(!msg->ksm_zc_cookies[1]);
James Simmons06ace262016-02-12 12:06:08 -0500764 LASSERT(conn->ksnc_proto->pro_queue_tx_msg);
Peng Taod7e09d02013-05-02 16:46:55 +0800765
766 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx);
767 /* ztx will be released later */
768 }
769
James Simmons06ace262016-02-12 12:06:08 -0500770 if (ztx) {
James Simmonsb31e64c2016-02-12 12:06:06 -0500771 atomic_sub(ztx->tx_nob, &conn->ksnc_tx_nob);
Peng Taod7e09d02013-05-02 16:46:55 +0800772 list_add_tail(&ztx->tx_list, &sched->kss_zombie_noop_txs);
773 }
774
775 if (conn->ksnc_tx_ready && /* able to send */
776 !conn->ksnc_tx_scheduled) { /* not scheduled to send */
777 /* +1 ref for scheduler */
778 ksocknal_conn_addref(conn);
James Simmonsc314c312016-02-12 12:06:01 -0500779 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
Peng Taod7e09d02013-05-02 16:46:55 +0800780 conn->ksnc_tx_scheduled = 1;
James Simmonsb31e64c2016-02-12 12:06:06 -0500781 wake_up(&sched->kss_waitq);
Peng Taod7e09d02013-05-02 16:46:55 +0800782 }
783
784 spin_unlock_bh(&sched->kss_lock);
785}
786
James Simmonsff13fd42016-06-10 16:14:23 -0400787struct ksock_route *
788ksocknal_find_connectable_route_locked(struct ksock_peer *peer)
Peng Taod7e09d02013-05-02 16:46:55 +0800789{
Mike Shuey97d10d02015-05-19 10:14:37 -0400790 unsigned long now = cfs_time_current();
791 struct list_head *tmp;
James Simmonsff13fd42016-06-10 16:14:23 -0400792 struct ksock_route *route;
Peng Taod7e09d02013-05-02 16:46:55 +0800793
James Simmonsb31e64c2016-02-12 12:06:06 -0500794 list_for_each(tmp, &peer->ksnp_routes) {
James Simmonsff13fd42016-06-10 16:14:23 -0400795 route = list_entry(tmp, struct ksock_route, ksnr_list);
Peng Taod7e09d02013-05-02 16:46:55 +0800796
Mike Shuey97d10d02015-05-19 10:14:37 -0400797 LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
Peng Taod7e09d02013-05-02 16:46:55 +0800798
799 if (route->ksnr_scheduled) /* connections being established */
800 continue;
801
802 /* all route types connected ? */
James Simmons5fd88332016-02-12 12:06:09 -0500803 if (!(ksocknal_route_mask() & ~route->ksnr_connected))
Peng Taod7e09d02013-05-02 16:46:55 +0800804 continue;
805
James Simmons5fd88332016-02-12 12:06:09 -0500806 if (!(!route->ksnr_retry_interval || /* first attempt */
Peng Taod7e09d02013-05-02 16:46:55 +0800807 cfs_time_aftereq(now, route->ksnr_timeout))) {
808 CDEBUG(D_NET,
Joe Perches2d00bd12014-11-23 11:28:50 -0800809 "Too soon to retry route %pI4h (cnted %d, interval %ld, %ld secs later)\n",
Peng Tao5e8f6922013-07-15 22:27:09 +0800810 &route->ksnr_ipaddr,
Peng Taod7e09d02013-05-02 16:46:55 +0800811 route->ksnr_connected,
812 route->ksnr_retry_interval,
813 cfs_duration_sec(route->ksnr_timeout - now));
814 continue;
815 }
816
Masaru Nomura71397092014-05-15 18:54:05 +0100817 return route;
Peng Taod7e09d02013-05-02 16:46:55 +0800818 }
819
Masaru Nomura71397092014-05-15 18:54:05 +0100820 return NULL;
Peng Taod7e09d02013-05-02 16:46:55 +0800821}
822
James Simmonsff13fd42016-06-10 16:14:23 -0400823struct ksock_route *
824ksocknal_find_connecting_route_locked(struct ksock_peer *peer)
Peng Taod7e09d02013-05-02 16:46:55 +0800825{
Mike Shuey97d10d02015-05-19 10:14:37 -0400826 struct list_head *tmp;
James Simmonsff13fd42016-06-10 16:14:23 -0400827 struct ksock_route *route;
Peng Taod7e09d02013-05-02 16:46:55 +0800828
James Simmonsb31e64c2016-02-12 12:06:06 -0500829 list_for_each(tmp, &peer->ksnp_routes) {
James Simmonsff13fd42016-06-10 16:14:23 -0400830 route = list_entry(tmp, struct ksock_route, ksnr_list);
Peng Taod7e09d02013-05-02 16:46:55 +0800831
Mike Shuey97d10d02015-05-19 10:14:37 -0400832 LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
Peng Taod7e09d02013-05-02 16:46:55 +0800833
834 if (route->ksnr_scheduled)
Masaru Nomura71397092014-05-15 18:54:05 +0100835 return route;
Peng Taod7e09d02013-05-02 16:46:55 +0800836 }
837
Masaru Nomura71397092014-05-15 18:54:05 +0100838 return NULL;
Peng Taod7e09d02013-05-02 16:46:55 +0800839}
840
841int
James Simmonsff13fd42016-06-10 16:14:23 -0400842ksocknal_launch_packet(lnet_ni_t *ni, struct ksock_tx *tx, lnet_process_id_t id)
Peng Taod7e09d02013-05-02 16:46:55 +0800843{
James Simmonsff13fd42016-06-10 16:14:23 -0400844 struct ksock_peer *peer;
845 struct ksock_conn *conn;
Mike Shuey97d10d02015-05-19 10:14:37 -0400846 rwlock_t *g_lock;
847 int retry;
848 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800849
James Simmons06ace262016-02-12 12:06:08 -0500850 LASSERT(!tx->tx_conn);
Peng Taod7e09d02013-05-02 16:46:55 +0800851
852 g_lock = &ksocknal_data.ksnd_global_lock;
853
854 for (retry = 0;; retry = 1) {
855 read_lock(g_lock);
856 peer = ksocknal_find_peer_locked(ni, id);
James Simmons06ace262016-02-12 12:06:08 -0500857 if (peer) {
858 if (!ksocknal_find_connectable_route_locked(peer)) {
Peng Taod7e09d02013-05-02 16:46:55 +0800859 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
James Simmons06ace262016-02-12 12:06:08 -0500860 if (conn) {
James Simmons4420cfd2016-02-12 12:06:00 -0500861 /*
862 * I've got no routes that need to be
Peng Taod7e09d02013-05-02 16:46:55 +0800863 * connecting and I do have an actual
James Simmons4420cfd2016-02-12 12:06:00 -0500864 * connection...
865 */
James Simmonsb31e64c2016-02-12 12:06:06 -0500866 ksocknal_queue_tx_locked(tx, conn);
Peng Taod7e09d02013-05-02 16:46:55 +0800867 read_unlock(g_lock);
Masaru Nomura71397092014-05-15 18:54:05 +0100868 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800869 }
870 }
871 }
872
873 /* I'll need a write lock... */
874 read_unlock(g_lock);
875
876 write_lock_bh(g_lock);
877
878 peer = ksocknal_find_peer_locked(ni, id);
James Simmons06ace262016-02-12 12:06:08 -0500879 if (peer)
Peng Taod7e09d02013-05-02 16:46:55 +0800880 break;
881
882 write_unlock_bh(g_lock);
883
James Simmons5fd88332016-02-12 12:06:09 -0500884 if (id.pid & LNET_PID_USERFLAG) {
Joe Perches2d00bd12014-11-23 11:28:50 -0800885 CERROR("Refusing to create a connection to userspace process %s\n",
886 libcfs_id2str(id));
Peng Taod7e09d02013-05-02 16:46:55 +0800887 return -EHOSTUNREACH;
888 }
889
890 if (retry) {
891 CERROR("Can't find peer %s\n", libcfs_id2str(id));
892 return -EHOSTUNREACH;
893 }
894
895 rc = ksocknal_add_peer(ni, id,
896 LNET_NIDADDR(id.nid),
897 lnet_acceptor_port());
James Simmons5fd88332016-02-12 12:06:09 -0500898 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +0800899 CERROR("Can't add peer %s: %d\n",
900 libcfs_id2str(id), rc);
901 return rc;
902 }
903 }
904
905 ksocknal_launch_all_connections_locked(peer);
906
907 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk);
James Simmons06ace262016-02-12 12:06:08 -0500908 if (conn) {
Peng Taod7e09d02013-05-02 16:46:55 +0800909 /* Connection exists; queue message on it */
James Simmonsb31e64c2016-02-12 12:06:06 -0500910 ksocknal_queue_tx_locked(tx, conn);
Peng Taod7e09d02013-05-02 16:46:55 +0800911 write_unlock_bh(g_lock);
Masaru Nomura71397092014-05-15 18:54:05 +0100912 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800913 }
914
915 if (peer->ksnp_accepting > 0 ||
James Simmons06ace262016-02-12 12:06:08 -0500916 ksocknal_find_connecting_route_locked(peer)) {
Peng Taod7e09d02013-05-02 16:46:55 +0800917 /* the message is going to be pinned to the peer */
918 tx->tx_deadline =
919 cfs_time_shift(*ksocknal_tunables.ksnd_timeout);
920
921 /* Queue the message until a connection is established */
James Simmonsb31e64c2016-02-12 12:06:06 -0500922 list_add_tail(&tx->tx_list, &peer->ksnp_tx_queue);
Peng Taod7e09d02013-05-02 16:46:55 +0800923 write_unlock_bh(g_lock);
924 return 0;
925 }
926
927 write_unlock_bh(g_lock);
928
929 /* NB Routes may be ignored if connections to them failed recently */
930 CNETERR("No usable routes to %s\n", libcfs_id2str(id));
Masaru Nomura71397092014-05-15 18:54:05 +0100931 return -EHOSTUNREACH;
Peng Taod7e09d02013-05-02 16:46:55 +0800932}
933
934int
935ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
936{
Mike Shuey97d10d02015-05-19 10:14:37 -0400937 int mpflag = 1;
938 int type = lntmsg->msg_type;
Peng Taod7e09d02013-05-02 16:46:55 +0800939 lnet_process_id_t target = lntmsg->msg_target;
Mike Shuey97d10d02015-05-19 10:14:37 -0400940 unsigned int payload_niov = lntmsg->msg_niov;
941 struct kvec *payload_iov = lntmsg->msg_iov;
942 lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
943 unsigned int payload_offset = lntmsg->msg_offset;
944 unsigned int payload_nob = lntmsg->msg_len;
James Simmonsff13fd42016-06-10 16:14:23 -0400945 struct ksock_tx *tx;
Mike Shuey97d10d02015-05-19 10:14:37 -0400946 int desc_size;
947 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800948
James Simmons4420cfd2016-02-12 12:06:00 -0500949 /*
950 * NB 'private' is different depending on what we're sending.
951 * Just ignore it...
952 */
Peng Taod7e09d02013-05-02 16:46:55 +0800953 CDEBUG(D_NET, "sending %u bytes in %d frags to %s\n",
954 payload_nob, payload_niov, libcfs_id2str(target));
955
James Simmons5fd88332016-02-12 12:06:09 -0500956 LASSERT(!payload_nob || payload_niov > 0);
Mike Shuey97d10d02015-05-19 10:14:37 -0400957 LASSERT(payload_niov <= LNET_MAX_IOV);
Peng Taod7e09d02013-05-02 16:46:55 +0800958 /* payload is either all vaddrs or all pages */
James Simmons06ace262016-02-12 12:06:08 -0500959 LASSERT(!(payload_kiov && payload_iov));
James Simmonsb31e64c2016-02-12 12:06:06 -0500960 LASSERT(!in_interrupt());
Peng Taod7e09d02013-05-02 16:46:55 +0800961
James Simmons06ace262016-02-12 12:06:08 -0500962 if (payload_iov)
James Simmonsff13fd42016-06-10 16:14:23 -0400963 desc_size = offsetof(struct ksock_tx,
Peng Taod7e09d02013-05-02 16:46:55 +0800964 tx_frags.virt.iov[1 + payload_niov]);
965 else
James Simmonsff13fd42016-06-10 16:14:23 -0400966 desc_size = offsetof(struct ksock_tx,
Peng Taod7e09d02013-05-02 16:46:55 +0800967 tx_frags.paged.kiov[payload_niov]);
968
969 if (lntmsg->msg_vmflush)
970 mpflag = cfs_memory_pressure_get_and_set();
971 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size);
James Simmons06ace262016-02-12 12:06:08 -0500972 if (!tx) {
Peng Taod7e09d02013-05-02 16:46:55 +0800973 CERROR("Can't allocate tx desc type %d size %d\n",
974 type, desc_size);
975 if (lntmsg->msg_vmflush)
976 cfs_memory_pressure_restore(mpflag);
Masaru Nomura71397092014-05-15 18:54:05 +0100977 return -ENOMEM;
Peng Taod7e09d02013-05-02 16:46:55 +0800978 }
979
980 tx->tx_conn = NULL; /* set when assigned a conn */
981 tx->tx_lnetmsg = lntmsg;
982
James Simmons06ace262016-02-12 12:06:08 -0500983 if (payload_iov) {
Peng Taod7e09d02013-05-02 16:46:55 +0800984 tx->tx_kiov = NULL;
985 tx->tx_nkiov = 0;
986 tx->tx_iov = tx->tx_frags.virt.iov;
987 tx->tx_niov = 1 +
988 lnet_extract_iov(payload_niov, &tx->tx_iov[1],
989 payload_niov, payload_iov,
990 payload_offset, payload_nob);
991 } else {
992 tx->tx_niov = 1;
993 tx->tx_iov = &tx->tx_frags.paged.iov;
994 tx->tx_kiov = tx->tx_frags.paged.kiov;
995 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov,
996 payload_niov, payload_kiov,
997 payload_offset, payload_nob);
998
999 if (payload_nob >= *ksocknal_tunables.ksnd_zc_min_payload)
1000 tx->tx_zc_capable = 1;
1001 }
1002
1003 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET);
1004
1005 /* The first fragment will be set later in pro_pack */
1006 rc = ksocknal_launch_packet(ni, tx, target);
Oleg Drokinaadbacc2014-01-23 23:45:05 -05001007 if (!mpflag)
Peng Taod7e09d02013-05-02 16:46:55 +08001008 cfs_memory_pressure_restore(mpflag);
Oleg Drokinaadbacc2014-01-23 23:45:05 -05001009
James Simmons5fd88332016-02-12 12:06:09 -05001010 if (!rc)
Masaru Nomura71397092014-05-15 18:54:05 +01001011 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +08001012
1013 ksocknal_free_tx(tx);
Masaru Nomura71397092014-05-15 18:54:05 +01001014 return -EIO;
Peng Taod7e09d02013-05-02 16:46:55 +08001015}
1016
1017int
1018ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
1019{
Kees Cook9edf0f62013-09-10 21:37:19 -07001020 struct task_struct *task = kthread_run(fn, arg, "%s", name);
Peng Taod7e09d02013-05-02 16:46:55 +08001021
1022 if (IS_ERR(task))
1023 return PTR_ERR(task);
1024
1025 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1026 ksocknal_data.ksnd_nthreads++;
1027 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1028 return 0;
1029}
1030
1031void
James Simmonsb31e64c2016-02-12 12:06:06 -05001032ksocknal_thread_fini(void)
Peng Taod7e09d02013-05-02 16:46:55 +08001033{
1034 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1035 ksocknal_data.ksnd_nthreads--;
1036 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1037}
1038
1039int
James Simmonsff13fd42016-06-10 16:14:23 -04001040ksocknal_new_packet(struct ksock_conn *conn, int nob_to_skip)
Peng Taod7e09d02013-05-02 16:46:55 +08001041{
1042 static char ksocknal_slop_buffer[4096];
1043
Mike Shuey97d10d02015-05-19 10:14:37 -04001044 int nob;
1045 unsigned int niov;
1046 int skipped;
Peng Taod7e09d02013-05-02 16:46:55 +08001047
James Simmons06ace262016-02-12 12:06:08 -05001048 LASSERT(conn->ksnc_proto);
Peng Taod7e09d02013-05-02 16:46:55 +08001049
James Simmons5fd88332016-02-12 12:06:09 -05001050 if (*ksocknal_tunables.ksnd_eager_ack & conn->ksnc_type) {
Peng Taod7e09d02013-05-02 16:46:55 +08001051 /* Remind the socket to ack eagerly... */
1052 ksocknal_lib_eager_ack(conn);
1053 }
1054
James Simmons5fd88332016-02-12 12:06:09 -05001055 if (!nob_to_skip) { /* right at next packet boundary now */
Peng Taod7e09d02013-05-02 16:46:55 +08001056 conn->ksnc_rx_started = 0;
1057 mb(); /* racing with timeout thread */
1058
1059 switch (conn->ksnc_proto->pro_version) {
1060 case KSOCK_PROTO_V2:
1061 case KSOCK_PROTO_V3:
1062 conn->ksnc_rx_state = SOCKNAL_RX_KSM_HEADER;
Al Virof351bad2014-12-02 17:15:37 +00001063 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1064 conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg;
Peng Taod7e09d02013-05-02 16:46:55 +08001065
1066 conn->ksnc_rx_nob_wanted = offsetof(ksock_msg_t, ksm_u);
1067 conn->ksnc_rx_nob_left = offsetof(ksock_msg_t, ksm_u);
1068 conn->ksnc_rx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u);
1069 break;
1070
1071 case KSOCK_PROTO_V1:
1072 /* Receiving bare lnet_hdr_t */
1073 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1074 conn->ksnc_rx_nob_wanted = sizeof(lnet_hdr_t);
1075 conn->ksnc_rx_nob_left = sizeof(lnet_hdr_t);
1076
Al Virof351bad2014-12-02 17:15:37 +00001077 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1078 conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
James Simmonsb31e64c2016-02-12 12:06:06 -05001079 conn->ksnc_rx_iov[0].iov_len = sizeof(lnet_hdr_t);
Peng Taod7e09d02013-05-02 16:46:55 +08001080 break;
1081
1082 default:
James Simmonsb31e64c2016-02-12 12:06:06 -05001083 LBUG();
Peng Taod7e09d02013-05-02 16:46:55 +08001084 }
1085 conn->ksnc_rx_niov = 1;
1086
1087 conn->ksnc_rx_kiov = NULL;
1088 conn->ksnc_rx_nkiov = 0;
1089 conn->ksnc_rx_csum = ~0;
Masaru Nomura71397092014-05-15 18:54:05 +01001090 return 1;
Peng Taod7e09d02013-05-02 16:46:55 +08001091 }
1092
James Simmons4420cfd2016-02-12 12:06:00 -05001093 /*
1094 * Set up to skip as much as possible now. If there's more left
1095 * (ran out of iov entries) we'll get called again
1096 */
Peng Taod7e09d02013-05-02 16:46:55 +08001097 conn->ksnc_rx_state = SOCKNAL_RX_SLOP;
1098 conn->ksnc_rx_nob_left = nob_to_skip;
Al Virof351bad2014-12-02 17:15:37 +00001099 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
Peng Taod7e09d02013-05-02 16:46:55 +08001100 skipped = 0;
1101 niov = 0;
1102
1103 do {
Jeremiah Mahler462ef1e2014-12-25 16:04:42 -08001104 nob = min_t(int, nob_to_skip, sizeof(ksocknal_slop_buffer));
Peng Taod7e09d02013-05-02 16:46:55 +08001105
1106 conn->ksnc_rx_iov[niov].iov_base = ksocknal_slop_buffer;
1107 conn->ksnc_rx_iov[niov].iov_len = nob;
1108 niov++;
1109 skipped += nob;
Mike Rapoportb2952d62015-09-03 11:49:13 +03001110 nob_to_skip -= nob;
Peng Taod7e09d02013-05-02 16:46:55 +08001111
James Simmons5fd88332016-02-12 12:06:09 -05001112 } while (nob_to_skip && /* mustn't overflow conn's rx iov */
James Simmonsb31e64c2016-02-12 12:06:06 -05001113 niov < sizeof(conn->ksnc_rx_iov_space) / sizeof(struct iovec));
Peng Taod7e09d02013-05-02 16:46:55 +08001114
1115 conn->ksnc_rx_niov = niov;
1116 conn->ksnc_rx_kiov = NULL;
1117 conn->ksnc_rx_nkiov = 0;
1118 conn->ksnc_rx_nob_wanted = skipped;
Masaru Nomura71397092014-05-15 18:54:05 +01001119 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +08001120}
1121
Phong Tranf9cd474f2014-08-19 22:45:50 +07001122static int
James Simmonsff13fd42016-06-10 16:14:23 -04001123ksocknal_process_receive(struct ksock_conn *conn)
Peng Taod7e09d02013-05-02 16:46:55 +08001124{
Mike Shuey97d10d02015-05-19 10:14:37 -04001125 lnet_hdr_t *lhdr;
Peng Taod7e09d02013-05-02 16:46:55 +08001126 lnet_process_id_t *id;
Mike Shuey97d10d02015-05-19 10:14:37 -04001127 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001128
James Simmonsb31e64c2016-02-12 12:06:06 -05001129 LASSERT(atomic_read(&conn->ksnc_conn_refcount) > 0);
Peng Taod7e09d02013-05-02 16:46:55 +08001130
1131 /* NB: sched lock NOT held */
Masanari Iida2b284322013-08-25 10:10:14 +09001132 /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
Mike Shuey97d10d02015-05-19 10:14:37 -04001133 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
1134 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
1135 conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
1136 conn->ksnc_rx_state == SOCKNAL_RX_SLOP);
Peng Taod7e09d02013-05-02 16:46:55 +08001137 again:
James Simmons5fd88332016-02-12 12:06:09 -05001138 if (conn->ksnc_rx_nob_wanted) {
Peng Taod7e09d02013-05-02 16:46:55 +08001139 rc = ksocknal_receive(conn);
1140
1141 if (rc <= 0) {
James Simmonsb31e64c2016-02-12 12:06:06 -05001142 LASSERT(rc != -EAGAIN);
Peng Taod7e09d02013-05-02 16:46:55 +08001143
James Simmons5fd88332016-02-12 12:06:09 -05001144 if (!rc)
Joe Perches2d00bd12014-11-23 11:28:50 -08001145 CDEBUG(D_NET, "[%p] EOF from %s ip %pI4h:%d\n",
1146 conn,
1147 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1148 &conn->ksnc_ipaddr,
1149 conn->ksnc_port);
Peng Taod7e09d02013-05-02 16:46:55 +08001150 else if (!conn->ksnc_closing)
Joe Perches2d00bd12014-11-23 11:28:50 -08001151 CERROR("[%p] Error %d on read from %s ip %pI4h:%d\n",
1152 conn, rc,
1153 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1154 &conn->ksnc_ipaddr,
1155 conn->ksnc_port);
Peng Taod7e09d02013-05-02 16:46:55 +08001156
1157 /* it's not an error if conn is being closed */
James Simmonsb31e64c2016-02-12 12:06:06 -05001158 ksocknal_close_conn_and_siblings(conn,
1159 (conn->ksnc_closing) ? 0 : rc);
James Simmons5fd88332016-02-12 12:06:09 -05001160 return (!rc ? -ESHUTDOWN : rc);
Peng Taod7e09d02013-05-02 16:46:55 +08001161 }
1162
James Simmons5fd88332016-02-12 12:06:09 -05001163 if (conn->ksnc_rx_nob_wanted) {
Peng Taod7e09d02013-05-02 16:46:55 +08001164 /* short read */
Masaru Nomura71397092014-05-15 18:54:05 +01001165 return -EAGAIN;
Peng Taod7e09d02013-05-02 16:46:55 +08001166 }
1167 }
1168 switch (conn->ksnc_rx_state) {
1169 case SOCKNAL_RX_KSM_HEADER:
1170 if (conn->ksnc_flip) {
1171 __swab32s(&conn->ksnc_msg.ksm_type);
1172 __swab32s(&conn->ksnc_msg.ksm_csum);
1173 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[0]);
1174 __swab64s(&conn->ksnc_msg.ksm_zc_cookies[1]);
1175 }
1176
1177 if (conn->ksnc_msg.ksm_type != KSOCK_MSG_NOOP &&
1178 conn->ksnc_msg.ksm_type != KSOCK_MSG_LNET) {
1179 CERROR("%s: Unknown message type: %x\n",
1180 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1181 conn->ksnc_msg.ksm_type);
1182 ksocknal_new_packet(conn, 0);
1183 ksocknal_close_conn_and_siblings(conn, -EPROTO);
Masaru Nomura71397092014-05-15 18:54:05 +01001184 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +08001185 }
1186
1187 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP &&
James Simmons5fd88332016-02-12 12:06:09 -05001188 conn->ksnc_msg.ksm_csum && /* has checksum */
Peng Taod7e09d02013-05-02 16:46:55 +08001189 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1190 /* NOOP Checksum error */
1191 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1192 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1193 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1194 ksocknal_new_packet(conn, 0);
1195 ksocknal_close_conn_and_siblings(conn, -EPROTO);
Masaru Nomura71397092014-05-15 18:54:05 +01001196 return -EIO;
Peng Taod7e09d02013-05-02 16:46:55 +08001197 }
1198
James Simmons5fd88332016-02-12 12:06:09 -05001199 if (conn->ksnc_msg.ksm_zc_cookies[1]) {
Peng Taod7e09d02013-05-02 16:46:55 +08001200 __u64 cookie = 0;
1201
James Simmonsb31e64c2016-02-12 12:06:06 -05001202 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
Peng Taod7e09d02013-05-02 16:46:55 +08001203
1204 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP)
1205 cookie = conn->ksnc_msg.ksm_zc_cookies[0];
1206
1207 rc = conn->ksnc_proto->pro_handle_zcack(conn, cookie,
1208 conn->ksnc_msg.ksm_zc_cookies[1]);
1209
James Simmons5fd88332016-02-12 12:06:09 -05001210 if (rc) {
Greg Kroah-Hartmanb0f5aad2014-07-12 20:06:04 -07001211 CERROR("%s: Unknown ZC-ACK cookie: %llu, %llu\n",
Peng Taod7e09d02013-05-02 16:46:55 +08001212 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1213 cookie, conn->ksnc_msg.ksm_zc_cookies[1]);
1214 ksocknal_new_packet(conn, 0);
1215 ksocknal_close_conn_and_siblings(conn, -EPROTO);
Masaru Nomura71397092014-05-15 18:54:05 +01001216 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001217 }
1218 }
1219
1220 if (conn->ksnc_msg.ksm_type == KSOCK_MSG_NOOP) {
James Simmonsb31e64c2016-02-12 12:06:06 -05001221 ksocknal_new_packet(conn, 0);
Peng Taod7e09d02013-05-02 16:46:55 +08001222 return 0; /* NOOP is done and just return */
1223 }
1224
1225 conn->ksnc_rx_state = SOCKNAL_RX_LNET_HEADER;
1226 conn->ksnc_rx_nob_wanted = sizeof(ksock_lnet_msg_t);
1227 conn->ksnc_rx_nob_left = sizeof(ksock_lnet_msg_t);
1228
Al Virof351bad2014-12-02 17:15:37 +00001229 conn->ksnc_rx_iov = (struct kvec *)&conn->ksnc_rx_iov_space;
1230 conn->ksnc_rx_iov[0].iov_base = &conn->ksnc_msg.ksm_u.lnetmsg;
Peng Taod7e09d02013-05-02 16:46:55 +08001231 conn->ksnc_rx_iov[0].iov_len = sizeof(ksock_lnet_msg_t);
1232
1233 conn->ksnc_rx_niov = 1;
1234 conn->ksnc_rx_kiov = NULL;
1235 conn->ksnc_rx_nkiov = 0;
1236
1237 goto again; /* read lnet header now */
1238
1239 case SOCKNAL_RX_LNET_HEADER:
1240 /* unpack message header */
1241 conn->ksnc_proto->pro_unpack(&conn->ksnc_msg);
1242
James Simmons5fd88332016-02-12 12:06:09 -05001243 if (conn->ksnc_peer->ksnp_id.pid & LNET_PID_USERFLAG) {
Peng Taod7e09d02013-05-02 16:46:55 +08001244 /* Userspace peer */
1245 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
Mike Shuey97d10d02015-05-19 10:14:37 -04001246 id = &conn->ksnc_peer->ksnp_id;
Peng Taod7e09d02013-05-02 16:46:55 +08001247
1248 /* Substitute process ID assigned at connection time */
1249 lhdr->src_pid = cpu_to_le32(id->pid);
1250 lhdr->src_nid = cpu_to_le64(id->nid);
1251 }
1252
1253 conn->ksnc_rx_state = SOCKNAL_RX_PARSE;
1254 ksocknal_conn_addref(conn); /* ++ref while parsing */
1255
1256 rc = lnet_parse(conn->ksnc_peer->ksnp_ni,
1257 &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr,
1258 conn->ksnc_peer->ksnp_id.nid, conn, 0);
1259 if (rc < 0) {
1260 /* I just received garbage: give up on this conn */
1261 ksocknal_new_packet(conn, 0);
James Simmonsb31e64c2016-02-12 12:06:06 -05001262 ksocknal_close_conn_and_siblings(conn, rc);
Peng Taod7e09d02013-05-02 16:46:55 +08001263 ksocknal_conn_decref(conn);
Masaru Nomura71397092014-05-15 18:54:05 +01001264 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +08001265 }
1266
1267 /* I'm racing with ksocknal_recv() */
James Simmonsb31e64c2016-02-12 12:06:06 -05001268 LASSERT(conn->ksnc_rx_state == SOCKNAL_RX_PARSE ||
1269 conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD);
Peng Taod7e09d02013-05-02 16:46:55 +08001270
1271 if (conn->ksnc_rx_state != SOCKNAL_RX_LNET_PAYLOAD)
1272 return 0;
1273
1274 /* ksocknal_recv() got called */
1275 goto again;
1276
1277 case SOCKNAL_RX_LNET_PAYLOAD:
1278 /* payload all received */
1279 rc = 0;
1280
James Simmons5fd88332016-02-12 12:06:09 -05001281 if (!conn->ksnc_rx_nob_left && /* not truncating */
1282 conn->ksnc_msg.ksm_csum && /* has checksum */
Peng Taod7e09d02013-05-02 16:46:55 +08001283 conn->ksnc_msg.ksm_csum != conn->ksnc_rx_csum) {
1284 CERROR("%s: Checksum error, wire:0x%08X data:0x%08X\n",
1285 libcfs_id2str(conn->ksnc_peer->ksnp_id),
1286 conn->ksnc_msg.ksm_csum, conn->ksnc_rx_csum);
1287 rc = -EIO;
1288 }
1289
James Simmons5fd88332016-02-12 12:06:09 -05001290 if (!rc && conn->ksnc_msg.ksm_zc_cookies[0]) {
Peng Taod7e09d02013-05-02 16:46:55 +08001291 LASSERT(conn->ksnc_proto != &ksocknal_protocol_v1x);
1292
1293 lhdr = &conn->ksnc_msg.ksm_u.lnetmsg.ksnm_hdr;
Mike Shuey97d10d02015-05-19 10:14:37 -04001294 id = &conn->ksnc_peer->ksnp_id;
Peng Taod7e09d02013-05-02 16:46:55 +08001295
1296 rc = conn->ksnc_proto->pro_handle_zcreq(conn,
1297 conn->ksnc_msg.ksm_zc_cookies[0],
1298 *ksocknal_tunables.ksnd_nonblk_zcack ||
1299 le64_to_cpu(lhdr->src_nid) != id->nid);
1300 }
1301
1302 lnet_finalize(conn->ksnc_peer->ksnp_ni, conn->ksnc_cookie, rc);
1303
James Simmons5fd88332016-02-12 12:06:09 -05001304 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08001305 ksocknal_new_packet(conn, 0);
James Simmonsb31e64c2016-02-12 12:06:06 -05001306 ksocknal_close_conn_and_siblings(conn, rc);
Masaru Nomura71397092014-05-15 18:54:05 +01001307 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +08001308 }
1309 /* Fall through */
1310
1311 case SOCKNAL_RX_SLOP:
1312 /* starting new packet? */
James Simmonsb31e64c2016-02-12 12:06:06 -05001313 if (ksocknal_new_packet(conn, conn->ksnc_rx_nob_left))
Peng Taod7e09d02013-05-02 16:46:55 +08001314 return 0; /* come back later */
1315 goto again; /* try to finish reading slop now */
1316
1317 default:
1318 break;
1319 }
1320
1321 /* Not Reached */
Mike Shuey97d10d02015-05-19 10:14:37 -04001322 LBUG();
Masaru Nomura71397092014-05-15 18:54:05 +01001323 return -EINVAL; /* keep gcc happy */
Peng Taod7e09d02013-05-02 16:46:55 +08001324}
1325
1326int
James Simmonsb31e64c2016-02-12 12:06:06 -05001327ksocknal_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
1328 unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
1329 unsigned int offset, unsigned int mlen, unsigned int rlen)
Peng Taod7e09d02013-05-02 16:46:55 +08001330{
James Simmonsff13fd42016-06-10 16:14:23 -04001331 struct ksock_conn *conn = private;
1332 struct ksock_sched *sched = conn->ksnc_scheduler;
Peng Taod7e09d02013-05-02 16:46:55 +08001333
Mike Shuey97d10d02015-05-19 10:14:37 -04001334 LASSERT(mlen <= rlen);
1335 LASSERT(niov <= LNET_MAX_IOV);
Peng Taod7e09d02013-05-02 16:46:55 +08001336
1337 conn->ksnc_cookie = msg;
1338 conn->ksnc_rx_nob_wanted = mlen;
Mike Shuey97d10d02015-05-19 10:14:37 -04001339 conn->ksnc_rx_nob_left = rlen;
Peng Taod7e09d02013-05-02 16:46:55 +08001340
James Simmons5fd88332016-02-12 12:06:09 -05001341 if (!mlen || iov) {
Peng Taod7e09d02013-05-02 16:46:55 +08001342 conn->ksnc_rx_nkiov = 0;
1343 conn->ksnc_rx_kiov = NULL;
1344 conn->ksnc_rx_iov = conn->ksnc_rx_iov_space.iov;
1345 conn->ksnc_rx_niov =
1346 lnet_extract_iov(LNET_MAX_IOV, conn->ksnc_rx_iov,
1347 niov, iov, offset, mlen);
1348 } else {
1349 conn->ksnc_rx_niov = 0;
Mike Shuey97d10d02015-05-19 10:14:37 -04001350 conn->ksnc_rx_iov = NULL;
Peng Taod7e09d02013-05-02 16:46:55 +08001351 conn->ksnc_rx_kiov = conn->ksnc_rx_iov_space.kiov;
1352 conn->ksnc_rx_nkiov =
1353 lnet_extract_kiov(LNET_MAX_IOV, conn->ksnc_rx_kiov,
1354 niov, kiov, offset, mlen);
1355 }
1356
Mike Shuey97d10d02015-05-19 10:14:37 -04001357 LASSERT(mlen ==
1358 lnet_iov_nob(conn->ksnc_rx_niov, conn->ksnc_rx_iov) +
1359 lnet_kiov_nob(conn->ksnc_rx_nkiov, conn->ksnc_rx_kiov));
Peng Taod7e09d02013-05-02 16:46:55 +08001360
Mike Shuey97d10d02015-05-19 10:14:37 -04001361 LASSERT(conn->ksnc_rx_scheduled);
Peng Taod7e09d02013-05-02 16:46:55 +08001362
1363 spin_lock_bh(&sched->kss_lock);
1364
1365 switch (conn->ksnc_rx_state) {
1366 case SOCKNAL_RX_PARSE_WAIT:
1367 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
James Simmonsb31e64c2016-02-12 12:06:06 -05001368 wake_up(&sched->kss_waitq);
1369 LASSERT(conn->ksnc_rx_ready);
Peng Taod7e09d02013-05-02 16:46:55 +08001370 break;
1371
1372 case SOCKNAL_RX_PARSE:
1373 /* scheduler hasn't noticed I'm parsing yet */
1374 break;
1375 }
1376
1377 conn->ksnc_rx_state = SOCKNAL_RX_LNET_PAYLOAD;
1378
1379 spin_unlock_bh(&sched->kss_lock);
1380 ksocknal_conn_decref(conn);
1381 return 0;
1382}
1383
1384static inline int
James Simmonsff13fd42016-06-10 16:14:23 -04001385ksocknal_sched_cansleep(struct ksock_sched *sched)
Peng Taod7e09d02013-05-02 16:46:55 +08001386{
Mike Shuey97d10d02015-05-19 10:14:37 -04001387 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001388
1389 spin_lock_bh(&sched->kss_lock);
1390
Haneen Mohammedb6ee3822015-03-13 20:48:53 +03001391 rc = !ksocknal_data.ksnd_shuttingdown &&
Peng Taod7e09d02013-05-02 16:46:55 +08001392 list_empty(&sched->kss_rx_conns) &&
Haneen Mohammedb6ee3822015-03-13 20:48:53 +03001393 list_empty(&sched->kss_tx_conns);
Peng Taod7e09d02013-05-02 16:46:55 +08001394
1395 spin_unlock_bh(&sched->kss_lock);
1396 return rc;
1397}
1398
1399int ksocknal_scheduler(void *arg)
1400{
Mike Shuey97d10d02015-05-19 10:14:37 -04001401 struct ksock_sched_info *info;
James Simmonsff13fd42016-06-10 16:14:23 -04001402 struct ksock_sched *sched;
1403 struct ksock_conn *conn;
1404 struct ksock_tx *tx;
Mike Shuey97d10d02015-05-19 10:14:37 -04001405 int rc;
1406 int nloops = 0;
1407 long id = (long)arg;
Peng Taod7e09d02013-05-02 16:46:55 +08001408
1409 info = ksocknal_data.ksnd_sched_info[KSOCK_THREAD_CPT(id)];
1410 sched = &info->ksi_scheds[KSOCK_THREAD_SID(id)];
1411
1412 cfs_block_allsigs();
1413
1414 rc = cfs_cpt_bind(lnet_cpt_table(), info->ksi_cpt);
James Simmons5fd88332016-02-12 12:06:09 -05001415 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08001416 CERROR("Can't set CPT affinity to %d: %d\n",
1417 info->ksi_cpt, rc);
1418 }
1419
1420 spin_lock_bh(&sched->kss_lock);
1421
1422 while (!ksocknal_data.ksnd_shuttingdown) {
1423 int did_something = 0;
1424
1425 /* Ensure I progress everything semi-fairly */
1426
James Simmonsb31e64c2016-02-12 12:06:06 -05001427 if (!list_empty(&sched->kss_rx_conns)) {
Peng Taod7e09d02013-05-02 16:46:55 +08001428 conn = list_entry(sched->kss_rx_conns.next,
James Simmonsff13fd42016-06-10 16:14:23 -04001429 struct ksock_conn, ksnc_rx_list);
Peng Taod7e09d02013-05-02 16:46:55 +08001430 list_del(&conn->ksnc_rx_list);
1431
1432 LASSERT(conn->ksnc_rx_scheduled);
1433 LASSERT(conn->ksnc_rx_ready);
1434
James Simmons4420cfd2016-02-12 12:06:00 -05001435 /*
1436 * clear rx_ready in case receive isn't complete.
Peng Taod7e09d02013-05-02 16:46:55 +08001437 * Do it BEFORE we call process_recv, since
1438 * data_ready can set it any time after we release
James Simmons4420cfd2016-02-12 12:06:00 -05001439 * kss_lock.
1440 */
Peng Taod7e09d02013-05-02 16:46:55 +08001441 conn->ksnc_rx_ready = 0;
1442 spin_unlock_bh(&sched->kss_lock);
1443
1444 rc = ksocknal_process_receive(conn);
1445
1446 spin_lock_bh(&sched->kss_lock);
1447
1448 /* I'm the only one that can clear this flag */
1449 LASSERT(conn->ksnc_rx_scheduled);
1450
1451 /* Did process_receive get everything it wanted? */
James Simmons5fd88332016-02-12 12:06:09 -05001452 if (!rc)
Peng Taod7e09d02013-05-02 16:46:55 +08001453 conn->ksnc_rx_ready = 1;
1454
1455 if (conn->ksnc_rx_state == SOCKNAL_RX_PARSE) {
James Simmons4420cfd2016-02-12 12:06:00 -05001456 /*
1457 * Conn blocked waiting for ksocknal_recv()
Peng Taod7e09d02013-05-02 16:46:55 +08001458 * I change its state (under lock) to signal
James Simmons4420cfd2016-02-12 12:06:00 -05001459 * it can be rescheduled
1460 */
Peng Taod7e09d02013-05-02 16:46:55 +08001461 conn->ksnc_rx_state = SOCKNAL_RX_PARSE_WAIT;
1462 } else if (conn->ksnc_rx_ready) {
1463 /* reschedule for rx */
James Simmonsc314c312016-02-12 12:06:01 -05001464 list_add_tail(&conn->ksnc_rx_list,
1465 &sched->kss_rx_conns);
Peng Taod7e09d02013-05-02 16:46:55 +08001466 } else {
1467 conn->ksnc_rx_scheduled = 0;
1468 /* drop my ref */
1469 ksocknal_conn_decref(conn);
1470 }
1471
1472 did_something = 1;
1473 }
1474
James Simmonsb31e64c2016-02-12 12:06:06 -05001475 if (!list_empty(&sched->kss_tx_conns)) {
Mike Shuey97d10d02015-05-19 10:14:37 -04001476 LIST_HEAD(zlist);
Peng Taod7e09d02013-05-02 16:46:55 +08001477
1478 if (!list_empty(&sched->kss_zombie_noop_txs)) {
James Simmonsc314c312016-02-12 12:06:01 -05001479 list_add(&zlist, &sched->kss_zombie_noop_txs);
Peng Taod7e09d02013-05-02 16:46:55 +08001480 list_del_init(&sched->kss_zombie_noop_txs);
1481 }
1482
1483 conn = list_entry(sched->kss_tx_conns.next,
James Simmonsff13fd42016-06-10 16:14:23 -04001484 struct ksock_conn, ksnc_tx_list);
James Simmonsb31e64c2016-02-12 12:06:06 -05001485 list_del(&conn->ksnc_tx_list);
Peng Taod7e09d02013-05-02 16:46:55 +08001486
1487 LASSERT(conn->ksnc_tx_scheduled);
1488 LASSERT(conn->ksnc_tx_ready);
1489 LASSERT(!list_empty(&conn->ksnc_tx_queue));
1490
1491 tx = list_entry(conn->ksnc_tx_queue.next,
James Simmonsff13fd42016-06-10 16:14:23 -04001492 struct ksock_tx, tx_list);
Peng Taod7e09d02013-05-02 16:46:55 +08001493
1494 if (conn->ksnc_tx_carrier == tx)
1495 ksocknal_next_tx_carrier(conn);
1496
1497 /* dequeue now so empty list => more to send */
1498 list_del(&tx->tx_list);
1499
James Simmons4420cfd2016-02-12 12:06:00 -05001500 /*
1501 * Clear tx_ready in case send isn't complete. Do
Peng Taod7e09d02013-05-02 16:46:55 +08001502 * it BEFORE we call process_transmit, since
1503 * write_space can set it any time after we release
James Simmons4420cfd2016-02-12 12:06:00 -05001504 * kss_lock.
1505 */
Peng Taod7e09d02013-05-02 16:46:55 +08001506 conn->ksnc_tx_ready = 0;
1507 spin_unlock_bh(&sched->kss_lock);
1508
1509 if (!list_empty(&zlist)) {
James Simmons4420cfd2016-02-12 12:06:00 -05001510 /*
1511 * free zombie noop txs, it's fast because
1512 * noop txs are just put in freelist
1513 */
Peng Taod7e09d02013-05-02 16:46:55 +08001514 ksocknal_txlist_done(NULL, &zlist, 0);
1515 }
1516
1517 rc = ksocknal_process_transmit(conn, tx);
1518
1519 if (rc == -ENOMEM || rc == -EAGAIN) {
1520 /* Incomplete send: replace tx on HEAD of tx_queue */
1521 spin_lock_bh(&sched->kss_lock);
James Simmonsc314c312016-02-12 12:06:01 -05001522 list_add(&tx->tx_list, &conn->ksnc_tx_queue);
Peng Taod7e09d02013-05-02 16:46:55 +08001523 } else {
1524 /* Complete send; tx -ref */
1525 ksocknal_tx_decref(tx);
1526
1527 spin_lock_bh(&sched->kss_lock);
1528 /* assume space for more */
1529 conn->ksnc_tx_ready = 1;
1530 }
1531
1532 if (rc == -ENOMEM) {
James Simmons4420cfd2016-02-12 12:06:00 -05001533 /*
1534 * Do nothing; after a short timeout, this
1535 * conn will be reposted on kss_tx_conns.
1536 */
Peng Taod7e09d02013-05-02 16:46:55 +08001537 } else if (conn->ksnc_tx_ready &&
Mike Shuey97d10d02015-05-19 10:14:37 -04001538 !list_empty(&conn->ksnc_tx_queue)) {
Peng Taod7e09d02013-05-02 16:46:55 +08001539 /* reschedule for tx */
Mike Shuey97d10d02015-05-19 10:14:37 -04001540 list_add_tail(&conn->ksnc_tx_list,
James Simmonsc314c312016-02-12 12:06:01 -05001541 &sched->kss_tx_conns);
Peng Taod7e09d02013-05-02 16:46:55 +08001542 } else {
1543 conn->ksnc_tx_scheduled = 0;
1544 /* drop my ref */
1545 ksocknal_conn_decref(conn);
1546 }
1547
1548 did_something = 1;
1549 }
1550 if (!did_something || /* nothing to do */
1551 ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
1552 spin_unlock_bh(&sched->kss_lock);
1553
1554 nloops = 0;
1555
1556 if (!did_something) { /* wait for something to do */
James Simmons46ffc932014-09-09 13:39:04 -05001557 rc = wait_event_interruptible_exclusive(
Peng Taod7e09d02013-05-02 16:46:55 +08001558 sched->kss_waitq,
James Simmons46ffc932014-09-09 13:39:04 -05001559 !ksocknal_sched_cansleep(sched));
James Simmons5fd88332016-02-12 12:06:09 -05001560 LASSERT(!rc);
Peng Taod7e09d02013-05-02 16:46:55 +08001561 } else {
1562 cond_resched();
1563 }
1564
1565 spin_lock_bh(&sched->kss_lock);
1566 }
1567 }
1568
1569 spin_unlock_bh(&sched->kss_lock);
1570 ksocknal_thread_fini();
1571 return 0;
1572}
1573
1574/*
1575 * Add connection to kss_rx_conns of scheduler
1576 * and wakeup the scheduler.
1577 */
James Simmonsff13fd42016-06-10 16:14:23 -04001578void ksocknal_read_callback(struct ksock_conn *conn)
Peng Taod7e09d02013-05-02 16:46:55 +08001579{
James Simmonsff13fd42016-06-10 16:14:23 -04001580 struct ksock_sched *sched;
Peng Taod7e09d02013-05-02 16:46:55 +08001581
1582 sched = conn->ksnc_scheduler;
1583
1584 spin_lock_bh(&sched->kss_lock);
1585
1586 conn->ksnc_rx_ready = 1;
1587
1588 if (!conn->ksnc_rx_scheduled) { /* not being progressed */
James Simmonsc314c312016-02-12 12:06:01 -05001589 list_add_tail(&conn->ksnc_rx_list, &sched->kss_rx_conns);
Peng Taod7e09d02013-05-02 16:46:55 +08001590 conn->ksnc_rx_scheduled = 1;
1591 /* extra ref for scheduler */
1592 ksocknal_conn_addref(conn);
1593
James Simmonsb31e64c2016-02-12 12:06:06 -05001594 wake_up(&sched->kss_waitq);
Peng Taod7e09d02013-05-02 16:46:55 +08001595 }
1596 spin_unlock_bh(&sched->kss_lock);
Peng Taod7e09d02013-05-02 16:46:55 +08001597}
1598
1599/*
1600 * Add connection to kss_tx_conns of scheduler
1601 * and wakeup the scheduler.
1602 */
James Simmonsff13fd42016-06-10 16:14:23 -04001603void ksocknal_write_callback(struct ksock_conn *conn)
Peng Taod7e09d02013-05-02 16:46:55 +08001604{
James Simmonsff13fd42016-06-10 16:14:23 -04001605 struct ksock_sched *sched;
Peng Taod7e09d02013-05-02 16:46:55 +08001606
1607 sched = conn->ksnc_scheduler;
1608
1609 spin_lock_bh(&sched->kss_lock);
1610
1611 conn->ksnc_tx_ready = 1;
1612
Greg Donald995c8b42014-09-01 06:36:14 -05001613 if (!conn->ksnc_tx_scheduled && /* not being progressed */
1614 !list_empty(&conn->ksnc_tx_queue)) { /* packets to send */
James Simmonsc314c312016-02-12 12:06:01 -05001615 list_add_tail(&conn->ksnc_tx_list, &sched->kss_tx_conns);
Peng Taod7e09d02013-05-02 16:46:55 +08001616 conn->ksnc_tx_scheduled = 1;
1617 /* extra ref for scheduler */
1618 ksocknal_conn_addref(conn);
1619
James Simmonsb31e64c2016-02-12 12:06:06 -05001620 wake_up(&sched->kss_waitq);
Peng Taod7e09d02013-05-02 16:46:55 +08001621 }
1622
1623 spin_unlock_bh(&sched->kss_lock);
Peng Taod7e09d02013-05-02 16:46:55 +08001624}
1625
James Simmonsff13fd42016-06-10 16:14:23 -04001626static struct ksock_proto *
James Simmonsb31e64c2016-02-12 12:06:06 -05001627ksocknal_parse_proto_version(ksock_hello_msg_t *hello)
Peng Taod7e09d02013-05-02 16:46:55 +08001628{
Mike Shuey97d10d02015-05-19 10:14:37 -04001629 __u32 version = 0;
Peng Taod7e09d02013-05-02 16:46:55 +08001630
1631 if (hello->kshm_magic == LNET_PROTO_MAGIC)
1632 version = hello->kshm_version;
1633 else if (hello->kshm_magic == __swab32(LNET_PROTO_MAGIC))
1634 version = __swab32(hello->kshm_version);
1635
James Simmons5fd88332016-02-12 12:06:09 -05001636 if (version) {
Peng Taod7e09d02013-05-02 16:46:55 +08001637#if SOCKNAL_VERSION_DEBUG
1638 if (*ksocknal_tunables.ksnd_protocol == 1)
1639 return NULL;
1640
1641 if (*ksocknal_tunables.ksnd_protocol == 2 &&
1642 version == KSOCK_PROTO_V3)
1643 return NULL;
1644#endif
1645 if (version == KSOCK_PROTO_V2)
1646 return &ksocknal_protocol_v2x;
1647
1648 if (version == KSOCK_PROTO_V3)
1649 return &ksocknal_protocol_v3x;
1650
1651 return NULL;
1652 }
1653
1654 if (hello->kshm_magic == le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
1655 lnet_magicversion_t *hmv = (lnet_magicversion_t *)hello;
1656
James Simmonsb31e64c2016-02-12 12:06:06 -05001657 CLASSERT(sizeof(lnet_magicversion_t) ==
1658 offsetof(ksock_hello_msg_t, kshm_src_nid));
Peng Taod7e09d02013-05-02 16:46:55 +08001659
James Simmonsb31e64c2016-02-12 12:06:06 -05001660 if (hmv->version_major == cpu_to_le16(KSOCK_PROTO_V1_MAJOR) &&
1661 hmv->version_minor == cpu_to_le16(KSOCK_PROTO_V1_MINOR))
Peng Taod7e09d02013-05-02 16:46:55 +08001662 return &ksocknal_protocol_v1x;
1663 }
1664
1665 return NULL;
1666}
1667
1668int
James Simmonsff13fd42016-06-10 16:14:23 -04001669ksocknal_send_hello(lnet_ni_t *ni, struct ksock_conn *conn,
James Simmonsb31e64c2016-02-12 12:06:06 -05001670 lnet_nid_t peer_nid, ksock_hello_msg_t *hello)
Peng Taod7e09d02013-05-02 16:46:55 +08001671{
1672 /* CAVEAT EMPTOR: this byte flips 'ipaddrs' */
James Simmonsff13fd42016-06-10 16:14:23 -04001673 struct ksock_net *net = (struct ksock_net *)ni->ni_data;
Peng Taod7e09d02013-05-02 16:46:55 +08001674
Mike Shuey97d10d02015-05-19 10:14:37 -04001675 LASSERT(hello->kshm_nips <= LNET_MAX_INTERFACES);
Peng Taod7e09d02013-05-02 16:46:55 +08001676
1677 /* rely on caller to hold a ref on socket so it wouldn't disappear */
James Simmons06ace262016-02-12 12:06:08 -05001678 LASSERT(conn->ksnc_proto);
Peng Taod7e09d02013-05-02 16:46:55 +08001679
Mike Shuey97d10d02015-05-19 10:14:37 -04001680 hello->kshm_src_nid = ni->ni_nid;
1681 hello->kshm_dst_nid = peer_nid;
1682 hello->kshm_src_pid = the_lnet.ln_pid;
Peng Taod7e09d02013-05-02 16:46:55 +08001683
1684 hello->kshm_src_incarnation = net->ksnn_incarnation;
Mike Shuey97d10d02015-05-19 10:14:37 -04001685 hello->kshm_ctype = conn->ksnc_type;
Peng Taod7e09d02013-05-02 16:46:55 +08001686
1687 return conn->ksnc_proto->pro_send_hello(conn, hello);
1688}
1689
Phong Tranf9cd474f2014-08-19 22:45:50 +07001690static int
Peng Taod7e09d02013-05-02 16:46:55 +08001691ksocknal_invert_type(int type)
1692{
Greg Donald9d0b2b72014-08-22 09:06:15 -05001693 switch (type) {
Peng Taod7e09d02013-05-02 16:46:55 +08001694 case SOCKLND_CONN_ANY:
1695 case SOCKLND_CONN_CONTROL:
Masaru Nomura71397092014-05-15 18:54:05 +01001696 return type;
Peng Taod7e09d02013-05-02 16:46:55 +08001697 case SOCKLND_CONN_BULK_IN:
1698 return SOCKLND_CONN_BULK_OUT;
1699 case SOCKLND_CONN_BULK_OUT:
1700 return SOCKLND_CONN_BULK_IN;
1701 default:
Masaru Nomura71397092014-05-15 18:54:05 +01001702 return SOCKLND_CONN_NONE;
Peng Taod7e09d02013-05-02 16:46:55 +08001703 }
1704}
1705
1706int
James Simmonsff13fd42016-06-10 16:14:23 -04001707ksocknal_recv_hello(lnet_ni_t *ni, struct ksock_conn *conn,
James Simmonsb31e64c2016-02-12 12:06:06 -05001708 ksock_hello_msg_t *hello, lnet_process_id_t *peerid,
1709 __u64 *incarnation)
Peng Taod7e09d02013-05-02 16:46:55 +08001710{
1711 /* Return < 0 fatal error
1712 * 0 success
1713 * EALREADY lost connection race
1714 * EPROTO protocol version mismatch
1715 */
Mike Shuey97d10d02015-05-19 10:14:37 -04001716 struct socket *sock = conn->ksnc_sock;
James Simmons06ace262016-02-12 12:06:08 -05001717 int active = !!conn->ksnc_proto;
Mike Shuey97d10d02015-05-19 10:14:37 -04001718 int timeout;
1719 int proto_match;
1720 int rc;
James Simmonsff13fd42016-06-10 16:14:23 -04001721 struct ksock_proto *proto;
Mike Shuey97d10d02015-05-19 10:14:37 -04001722 lnet_process_id_t recv_id;
Peng Taod7e09d02013-05-02 16:46:55 +08001723
1724 /* socket type set on active connections - not set on passive */
Mike Shuey97d10d02015-05-19 10:14:37 -04001725 LASSERT(!active == !(conn->ksnc_type != SOCKLND_CONN_NONE));
Peng Taod7e09d02013-05-02 16:46:55 +08001726
1727 timeout = active ? *ksocknal_tunables.ksnd_timeout :
1728 lnet_acceptor_timeout();
1729
James Simmonsb31e64c2016-02-12 12:06:06 -05001730 rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
James Simmons5fd88332016-02-12 12:06:09 -05001731 if (rc) {
Peng Tao5e8f6922013-07-15 22:27:09 +08001732 CERROR("Error %d reading HELLO from %pI4h\n",
James Simmonsc314c312016-02-12 12:06:01 -05001733 rc, &conn->ksnc_ipaddr);
James Simmonsb31e64c2016-02-12 12:06:06 -05001734 LASSERT(rc < 0);
Peng Taod7e09d02013-05-02 16:46:55 +08001735 return rc;
1736 }
1737
1738 if (hello->kshm_magic != LNET_PROTO_MAGIC &&
1739 hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
James Simmonsb31e64c2016-02-12 12:06:06 -05001740 hello->kshm_magic != le32_to_cpu(LNET_PROTO_TCP_MAGIC)) {
Peng Taod7e09d02013-05-02 16:46:55 +08001741 /* Unexpected magic! */
Joe Perches2d00bd12014-11-23 11:28:50 -08001742 CERROR("Bad magic(1) %#08x (%#08x expected) from %pI4h\n",
James Simmonsb31e64c2016-02-12 12:06:06 -05001743 __cpu_to_le32(hello->kshm_magic),
Joe Perches2d00bd12014-11-23 11:28:50 -08001744 LNET_PROTO_TCP_MAGIC,
1745 &conn->ksnc_ipaddr);
Peng Taod7e09d02013-05-02 16:46:55 +08001746 return -EPROTO;
1747 }
1748
James Simmons1ad6a732015-06-08 22:27:10 -04001749 rc = lnet_sock_read(sock, &hello->kshm_version,
1750 sizeof(hello->kshm_version), timeout);
James Simmons5fd88332016-02-12 12:06:09 -05001751 if (rc) {
Peng Tao5e8f6922013-07-15 22:27:09 +08001752 CERROR("Error %d reading HELLO from %pI4h\n",
James Simmonsc314c312016-02-12 12:06:01 -05001753 rc, &conn->ksnc_ipaddr);
Mike Shuey97d10d02015-05-19 10:14:37 -04001754 LASSERT(rc < 0);
Peng Taod7e09d02013-05-02 16:46:55 +08001755 return rc;
1756 }
1757
1758 proto = ksocknal_parse_proto_version(hello);
James Simmons06ace262016-02-12 12:06:08 -05001759 if (!proto) {
Peng Taod7e09d02013-05-02 16:46:55 +08001760 if (!active) {
1761 /* unknown protocol from peer, tell peer my protocol */
1762 conn->ksnc_proto = &ksocknal_protocol_v3x;
1763#if SOCKNAL_VERSION_DEBUG
1764 if (*ksocknal_tunables.ksnd_protocol == 2)
1765 conn->ksnc_proto = &ksocknal_protocol_v2x;
1766 else if (*ksocknal_tunables.ksnd_protocol == 1)
1767 conn->ksnc_proto = &ksocknal_protocol_v1x;
1768#endif
1769 hello->kshm_nips = 0;
1770 ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
1771 }
1772
Joe Perches2d00bd12014-11-23 11:28:50 -08001773 CERROR("Unknown protocol version (%d.x expected) from %pI4h\n",
1774 conn->ksnc_proto->pro_version,
1775 &conn->ksnc_ipaddr);
Peng Taod7e09d02013-05-02 16:46:55 +08001776
1777 return -EPROTO;
1778 }
1779
1780 proto_match = (conn->ksnc_proto == proto);
1781 conn->ksnc_proto = proto;
1782
1783 /* receive the rest of hello message anyway */
1784 rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
James Simmons5fd88332016-02-12 12:06:09 -05001785 if (rc) {
Peng Tao5e8f6922013-07-15 22:27:09 +08001786 CERROR("Error %d reading or checking hello from from %pI4h\n",
1787 rc, &conn->ksnc_ipaddr);
Mike Shuey97d10d02015-05-19 10:14:37 -04001788 LASSERT(rc < 0);
Peng Taod7e09d02013-05-02 16:46:55 +08001789 return rc;
1790 }
1791
1792 *incarnation = hello->kshm_src_incarnation;
1793
1794 if (hello->kshm_src_nid == LNET_NID_ANY) {
Joe Perches2d00bd12014-11-23 11:28:50 -08001795 CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY from %pI4h\n",
1796 &conn->ksnc_ipaddr);
Peng Taod7e09d02013-05-02 16:46:55 +08001797 return -EPROTO;
1798 }
1799
1800 if (!active &&
1801 conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
1802 /* Userspace NAL assigns peer process ID from socket */
1803 recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
1804 recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
1805 } else {
1806 recv_id.nid = hello->kshm_src_nid;
1807 recv_id.pid = hello->kshm_src_pid;
1808 }
1809
1810 if (!active) {
1811 *peerid = recv_id;
1812
1813 /* peer determines type */
1814 conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
1815 if (conn->ksnc_type == SOCKLND_CONN_NONE) {
Peng Tao5e8f6922013-07-15 22:27:09 +08001816 CERROR("Unexpected type %d from %s ip %pI4h\n",
James Simmonsc314c312016-02-12 12:06:01 -05001817 hello->kshm_ctype, libcfs_id2str(*peerid),
1818 &conn->ksnc_ipaddr);
Peng Taod7e09d02013-05-02 16:46:55 +08001819 return -EPROTO;
1820 }
1821
1822 return 0;
1823 }
1824
1825 if (peerid->pid != recv_id.pid ||
1826 peerid->nid != recv_id.nid) {
Joe Perches2d00bd12014-11-23 11:28:50 -08001827 LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host %pI4h, but they claimed they were %s; please check your Lustre configuration.\n",
Peng Taod7e09d02013-05-02 16:46:55 +08001828 libcfs_id2str(*peerid),
Peng Tao5e8f6922013-07-15 22:27:09 +08001829 &conn->ksnc_ipaddr,
Peng Taod7e09d02013-05-02 16:46:55 +08001830 libcfs_id2str(recv_id));
1831 return -EPROTO;
1832 }
1833
1834 if (hello->kshm_ctype == SOCKLND_CONN_NONE) {
1835 /* Possible protocol mismatch or I lost the connection race */
1836 return proto_match ? EALREADY : EPROTO;
1837 }
1838
1839 if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
Peng Tao5e8f6922013-07-15 22:27:09 +08001840 CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
James Simmonsc314c312016-02-12 12:06:01 -05001841 conn->ksnc_type, libcfs_id2str(*peerid),
1842 &conn->ksnc_ipaddr, hello->kshm_ctype);
Peng Taod7e09d02013-05-02 16:46:55 +08001843 return -EPROTO;
1844 }
1845
1846 return 0;
1847}
1848
Phong Tranf9cd474f2014-08-19 22:45:50 +07001849static int
James Simmonsff13fd42016-06-10 16:14:23 -04001850ksocknal_connect(struct ksock_route *route)
Peng Taod7e09d02013-05-02 16:46:55 +08001851{
Mike Shuey97d10d02015-05-19 10:14:37 -04001852 LIST_HEAD(zombies);
James Simmonsff13fd42016-06-10 16:14:23 -04001853 struct ksock_peer *peer = route->ksnr_peer;
Mike Shuey97d10d02015-05-19 10:14:37 -04001854 int type;
1855 int wanted;
1856 struct socket *sock;
1857 unsigned long deadline;
1858 int retry_later = 0;
1859 int rc = 0;
Peng Taod7e09d02013-05-02 16:46:55 +08001860
1861 deadline = cfs_time_add(cfs_time_current(),
1862 cfs_time_seconds(*ksocknal_tunables.ksnd_timeout));
1863
1864 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1865
Mike Shuey97d10d02015-05-19 10:14:37 -04001866 LASSERT(route->ksnr_scheduled);
1867 LASSERT(!route->ksnr_connecting);
Peng Taod7e09d02013-05-02 16:46:55 +08001868
1869 route->ksnr_connecting = 1;
1870
1871 for (;;) {
1872 wanted = ksocknal_route_mask() & ~route->ksnr_connected;
1873
James Simmons4420cfd2016-02-12 12:06:00 -05001874 /*
1875 * stop connecting if peer/route got closed under me, or
1876 * route got connected while queued
1877 */
Peng Taod7e09d02013-05-02 16:46:55 +08001878 if (peer->ksnp_closing || route->ksnr_deleted ||
James Simmons5fd88332016-02-12 12:06:09 -05001879 !wanted) {
Peng Taod7e09d02013-05-02 16:46:55 +08001880 retry_later = 0;
1881 break;
1882 }
1883
1884 /* reschedule if peer is connecting to me */
1885 if (peer->ksnp_accepting > 0) {
1886 CDEBUG(D_NET,
1887 "peer %s(%d) already connecting to me, retry later.\n",
1888 libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
1889 retry_later = 1;
1890 }
1891
1892 if (retry_later) /* needs reschedule */
1893 break;
1894
James Simmons5fd88332016-02-12 12:06:09 -05001895 if (wanted & (1 << SOCKLND_CONN_ANY)) {
Peng Taod7e09d02013-05-02 16:46:55 +08001896 type = SOCKLND_CONN_ANY;
James Simmons5fd88332016-02-12 12:06:09 -05001897 } else if (wanted & (1 << SOCKLND_CONN_CONTROL)) {
Peng Taod7e09d02013-05-02 16:46:55 +08001898 type = SOCKLND_CONN_CONTROL;
James Simmons5fd88332016-02-12 12:06:09 -05001899 } else if (wanted & (1 << SOCKLND_CONN_BULK_IN)) {
Peng Taod7e09d02013-05-02 16:46:55 +08001900 type = SOCKLND_CONN_BULK_IN;
1901 } else {
James Simmons5fd88332016-02-12 12:06:09 -05001902 LASSERT(wanted & (1 << SOCKLND_CONN_BULK_OUT));
Peng Taod7e09d02013-05-02 16:46:55 +08001903 type = SOCKLND_CONN_BULK_OUT;
1904 }
1905
1906 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1907
1908 if (cfs_time_aftereq(cfs_time_current(), deadline)) {
1909 rc = -ETIMEDOUT;
1910 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1911 route->ksnr_ipaddr,
1912 route->ksnr_port);
1913 goto failed;
1914 }
1915
1916 rc = lnet_connect(&sock, peer->ksnp_id.nid,
1917 route->ksnr_myipaddr,
1918 route->ksnr_ipaddr, route->ksnr_port);
James Simmons5fd88332016-02-12 12:06:09 -05001919 if (rc)
Peng Taod7e09d02013-05-02 16:46:55 +08001920 goto failed;
1921
1922 rc = ksocknal_create_conn(peer->ksnp_ni, route, sock, type);
1923 if (rc < 0) {
1924 lnet_connect_console_error(rc, peer->ksnp_id.nid,
1925 route->ksnr_ipaddr,
1926 route->ksnr_port);
1927 goto failed;
1928 }
1929
James Simmons4420cfd2016-02-12 12:06:00 -05001930 /*
1931 * A +ve RC means I have to retry because I lost the connection
1932 * race or I have to renegotiate protocol version
1933 */
James Simmons5fd88332016-02-12 12:06:09 -05001934 retry_later = (rc);
Peng Taod7e09d02013-05-02 16:46:55 +08001935 if (retry_later)
1936 CDEBUG(D_NET, "peer %s: conn race, retry later.\n",
1937 libcfs_nid2str(peer->ksnp_id.nid));
1938
1939 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1940 }
1941
1942 route->ksnr_scheduled = 0;
1943 route->ksnr_connecting = 0;
1944
1945 if (retry_later) {
James Simmons4420cfd2016-02-12 12:06:00 -05001946 /*
1947 * re-queue for attention; this frees me up to handle
1948 * the peer's incoming connection request
1949 */
Peng Taod7e09d02013-05-02 16:46:55 +08001950 if (rc == EALREADY ||
James Simmons5fd88332016-02-12 12:06:09 -05001951 (!rc && peer->ksnp_accepting > 0)) {
James Simmons4420cfd2016-02-12 12:06:00 -05001952 /*
1953 * We want to introduce a delay before next
Peng Taod7e09d02013-05-02 16:46:55 +08001954 * attempt to connect if we lost conn race,
1955 * but the race is resolved quickly usually,
James Simmons4420cfd2016-02-12 12:06:00 -05001956 * so min_reconnectms should be good heuristic
1957 */
Peng Taod7e09d02013-05-02 16:46:55 +08001958 route->ksnr_retry_interval =
James Simmons51078e22016-02-12 12:06:04 -05001959 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000;
Peng Taod7e09d02013-05-02 16:46:55 +08001960 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1961 route->ksnr_retry_interval);
1962 }
1963
1964 ksocknal_launch_connection_locked(route);
1965 }
1966
1967 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
1968 return retry_later;
1969
1970 failed:
1971 write_lock_bh(&ksocknal_data.ksnd_global_lock);
1972
1973 route->ksnr_scheduled = 0;
1974 route->ksnr_connecting = 0;
1975
1976 /* This is a retry rather than a new connection */
1977 route->ksnr_retry_interval *= 2;
1978 route->ksnr_retry_interval =
Jeremiah Mahler0c575412014-12-25 16:04:41 -08001979 max(route->ksnr_retry_interval,
James Simmons51078e22016-02-12 12:06:04 -05001980 cfs_time_seconds(*ksocknal_tunables.ksnd_min_reconnectms) / 1000);
Peng Taod7e09d02013-05-02 16:46:55 +08001981 route->ksnr_retry_interval =
Jeremiah Mahler0c575412014-12-25 16:04:41 -08001982 min(route->ksnr_retry_interval,
James Simmons51078e22016-02-12 12:06:04 -05001983 cfs_time_seconds(*ksocknal_tunables.ksnd_max_reconnectms) / 1000);
Peng Taod7e09d02013-05-02 16:46:55 +08001984
James Simmons5fd88332016-02-12 12:06:09 -05001985 LASSERT(route->ksnr_retry_interval);
Peng Taod7e09d02013-05-02 16:46:55 +08001986 route->ksnr_timeout = cfs_time_add(cfs_time_current(),
1987 route->ksnr_retry_interval);
1988
1989 if (!list_empty(&peer->ksnp_tx_queue) &&
James Simmons5fd88332016-02-12 12:06:09 -05001990 !peer->ksnp_accepting &&
James Simmons06ace262016-02-12 12:06:08 -05001991 !ksocknal_find_connecting_route_locked(peer)) {
James Simmonsff13fd42016-06-10 16:14:23 -04001992 struct ksock_conn *conn;
Peng Taod7e09d02013-05-02 16:46:55 +08001993
James Simmons4420cfd2016-02-12 12:06:00 -05001994 /*
1995 * ksnp_tx_queue is queued on a conn on successful
1996 * connection for V1.x and V2.x
1997 */
James Simmonsb31e64c2016-02-12 12:06:06 -05001998 if (!list_empty(&peer->ksnp_conns)) {
Peng Taod7e09d02013-05-02 16:46:55 +08001999 conn = list_entry(peer->ksnp_conns.next,
James Simmonsff13fd42016-06-10 16:14:23 -04002000 struct ksock_conn, ksnc_list);
James Simmonsb31e64c2016-02-12 12:06:06 -05002001 LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
Peng Taod7e09d02013-05-02 16:46:55 +08002002 }
2003
James Simmons4420cfd2016-02-12 12:06:00 -05002004 /*
2005 * take all the blocked packets while I've got the lock and
2006 * complete below...
2007 */
Peng Taod7e09d02013-05-02 16:46:55 +08002008 list_splice_init(&peer->ksnp_tx_queue, &zombies);
2009 }
2010
Masanari Iida2b284322013-08-25 10:10:14 +09002011#if 0 /* irrelevant with only eager routes */
Peng Taod7e09d02013-05-02 16:46:55 +08002012 if (!route->ksnr_deleted) {
2013 /* make this route least-favourite for re-selection */
2014 list_del(&route->ksnr_list);
2015 list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
2016 }
2017#endif
2018 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2019
2020 ksocknal_peer_failed(peer);
2021 ksocknal_txlist_done(peer->ksnp_ni, &zombies, 1);
2022 return 0;
2023}
2024
2025/*
2026 * check whether we need to create more connds.
2027 * It will try to create new thread if it's necessary, @timeout can
2028 * be updated if failed to create, so caller wouldn't keep try while
2029 * running out of resource.
2030 */
2031static int
Arnd Bergmann74ad5782015-09-27 16:45:21 -04002032ksocknal_connd_check_start(time64_t sec, long *timeout)
Peng Taod7e09d02013-05-02 16:46:55 +08002033{
2034 char name[16];
2035 int rc;
2036 int total = ksocknal_data.ksnd_connd_starting +
2037 ksocknal_data.ksnd_connd_running;
2038
2039 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2040 /* still in initializing */
2041 return 0;
2042 }
2043
2044 if (total >= *ksocknal_tunables.ksnd_nconnds_max ||
2045 total > ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV) {
James Simmons4420cfd2016-02-12 12:06:00 -05002046 /*
2047 * can't create more connd, or still have enough
2048 * threads to handle more connecting
2049 */
Peng Taod7e09d02013-05-02 16:46:55 +08002050 return 0;
2051 }
2052
2053 if (list_empty(&ksocknal_data.ksnd_connd_routes)) {
2054 /* no pending connecting request */
2055 return 0;
2056 }
2057
2058 if (sec - ksocknal_data.ksnd_connd_failed_stamp <= 1) {
2059 /* may run out of resource, retry later */
2060 *timeout = cfs_time_seconds(1);
2061 return 0;
2062 }
2063
2064 if (ksocknal_data.ksnd_connd_starting > 0) {
2065 /* serialize starting to avoid flood */
2066 return 0;
2067 }
2068
2069 ksocknal_data.ksnd_connd_starting_stamp = sec;
2070 ksocknal_data.ksnd_connd_starting++;
2071 spin_unlock_bh(&ksocknal_data.ksnd_connd_lock);
2072
2073 /* NB: total is the next id */
2074 snprintf(name, sizeof(name), "socknal_cd%02d", total);
2075 rc = ksocknal_thread_start(ksocknal_connd, NULL, name);
2076
2077 spin_lock_bh(&ksocknal_data.ksnd_connd_lock);
James Simmons5fd88332016-02-12 12:06:09 -05002078 if (!rc)
Peng Taod7e09d02013-05-02 16:46:55 +08002079 return 1;
2080
2081 /* we tried ... */
2082 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2083 ksocknal_data.ksnd_connd_starting--;
Arnd Bergmann74ad5782015-09-27 16:45:21 -04002084 ksocknal_data.ksnd_connd_failed_stamp = ktime_get_real_seconds();
Peng Taod7e09d02013-05-02 16:46:55 +08002085
2086 return 1;
2087}
2088
2089/*
2090 * check whether current thread can exit, it will return 1 if there are too
2091 * many threads and no creating in past 120 seconds.
2092 * Also, this function may update @timeout to make caller come back
2093 * again to recheck these conditions.
2094 */
2095static int
Arnd Bergmann74ad5782015-09-27 16:45:21 -04002096ksocknal_connd_check_stop(time64_t sec, long *timeout)
Peng Taod7e09d02013-05-02 16:46:55 +08002097{
2098 int val;
2099
2100 if (unlikely(ksocknal_data.ksnd_init < SOCKNAL_INIT_ALL)) {
2101 /* still in initializing */
2102 return 0;
2103 }
2104
2105 if (ksocknal_data.ksnd_connd_starting > 0) {
2106 /* in progress of starting new thread */
2107 return 0;
2108 }
2109
2110 if (ksocknal_data.ksnd_connd_running <=
2111 *ksocknal_tunables.ksnd_nconnds) { /* can't shrink */
2112 return 0;
2113 }
2114
2115 /* created thread in past 120 seconds? */
2116 val = (int)(ksocknal_data.ksnd_connd_starting_stamp +
2117 SOCKNAL_CONND_TIMEOUT - sec);
2118
2119 *timeout = (val > 0) ? cfs_time_seconds(val) :
2120 cfs_time_seconds(SOCKNAL_CONND_TIMEOUT);
2121 if (val > 0)
2122 return 0;
2123
2124 /* no creating in past 120 seconds */
2125
2126 return ksocknal_data.ksnd_connd_running >
2127 ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV;
2128}
2129
James Simmons4420cfd2016-02-12 12:06:00 -05002130/*
2131 * Go through connd_routes queue looking for a route that we can process
2132 * right now, @timeout_p can be updated if we need to come back later
2133 */
James Simmonsff13fd42016-06-10 16:14:23 -04002134static struct ksock_route *
Peng Taod7e09d02013-05-02 16:46:55 +08002135ksocknal_connd_get_route_locked(signed long *timeout_p)
2136{
James Simmonsff13fd42016-06-10 16:14:23 -04002137 struct ksock_route *route;
Mike Shuey97d10d02015-05-19 10:14:37 -04002138 unsigned long now;
Peng Taod7e09d02013-05-02 16:46:55 +08002139
2140 now = cfs_time_current();
2141
2142 /* connd_routes can contain both pending and ordinary routes */
James Simmonsc314c312016-02-12 12:06:01 -05002143 list_for_each_entry(route, &ksocknal_data.ksnd_connd_routes,
2144 ksnr_connd_list) {
James Simmons5fd88332016-02-12 12:06:09 -05002145 if (!route->ksnr_retry_interval ||
Peng Taod7e09d02013-05-02 16:46:55 +08002146 cfs_time_aftereq(now, route->ksnr_timeout))
2147 return route;
2148
2149 if (*timeout_p == MAX_SCHEDULE_TIMEOUT ||
2150 (int)*timeout_p > (int)(route->ksnr_timeout - now))
2151 *timeout_p = (int)(route->ksnr_timeout - now);
2152 }
2153
2154 return NULL;
2155}
2156
2157int
James Simmonsb31e64c2016-02-12 12:06:06 -05002158ksocknal_connd(void *arg)
Peng Taod7e09d02013-05-02 16:46:55 +08002159{
Mike Shuey97d10d02015-05-19 10:14:37 -04002160 spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
James Simmonsff13fd42016-06-10 16:14:23 -04002161 struct ksock_connreq *cr;
Mike Shuey97d10d02015-05-19 10:14:37 -04002162 wait_queue_t wait;
2163 int nloops = 0;
2164 int cons_retry = 0;
Peng Taod7e09d02013-05-02 16:46:55 +08002165
Mike Shuey97d10d02015-05-19 10:14:37 -04002166 cfs_block_allsigs();
Peng Taod7e09d02013-05-02 16:46:55 +08002167
Peng Tao9e795d32014-03-18 21:05:52 +08002168 init_waitqueue_entry(&wait, current);
Peng Taod7e09d02013-05-02 16:46:55 +08002169
2170 spin_lock_bh(connd_lock);
2171
2172 LASSERT(ksocknal_data.ksnd_connd_starting > 0);
2173 ksocknal_data.ksnd_connd_starting--;
2174 ksocknal_data.ksnd_connd_running++;
2175
2176 while (!ksocknal_data.ksnd_shuttingdown) {
James Simmonsff13fd42016-06-10 16:14:23 -04002177 struct ksock_route *route = NULL;
Arnd Bergmann74ad5782015-09-27 16:45:21 -04002178 time64_t sec = ktime_get_real_seconds();
Peng Taod7e09d02013-05-02 16:46:55 +08002179 long timeout = MAX_SCHEDULE_TIMEOUT;
Mike Shuey97d10d02015-05-19 10:14:37 -04002180 int dropped_lock = 0;
Peng Taod7e09d02013-05-02 16:46:55 +08002181
2182 if (ksocknal_connd_check_stop(sec, &timeout)) {
2183 /* wakeup another one to check stop */
2184 wake_up(&ksocknal_data.ksnd_connd_waitq);
2185 break;
2186 }
2187
2188 if (ksocknal_connd_check_start(sec, &timeout)) {
2189 /* created new thread */
2190 dropped_lock = 1;
2191 }
2192
2193 if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
2194 /* Connection accepted by the listener */
James Simmonsff13fd42016-06-10 16:14:23 -04002195 cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
2196 struct ksock_connreq, ksncr_list);
Peng Taod7e09d02013-05-02 16:46:55 +08002197
2198 list_del(&cr->ksncr_list);
2199 spin_unlock_bh(connd_lock);
2200 dropped_lock = 1;
2201
2202 ksocknal_create_conn(cr->ksncr_ni, NULL,
2203 cr->ksncr_sock, SOCKLND_CONN_NONE);
2204 lnet_ni_decref(cr->ksncr_ni);
2205 LIBCFS_FREE(cr, sizeof(*cr));
2206
2207 spin_lock_bh(connd_lock);
2208 }
2209
James Simmons4420cfd2016-02-12 12:06:00 -05002210 /*
2211 * Only handle an outgoing connection request if there
Peng Taod7e09d02013-05-02 16:46:55 +08002212 * is a thread left to handle incoming connections and
James Simmons4420cfd2016-02-12 12:06:00 -05002213 * create new connd
2214 */
Peng Taod7e09d02013-05-02 16:46:55 +08002215 if (ksocknal_data.ksnd_connd_connecting + SOCKNAL_CONND_RESV <
2216 ksocknal_data.ksnd_connd_running) {
2217 route = ksocknal_connd_get_route_locked(&timeout);
2218 }
James Simmons06ace262016-02-12 12:06:08 -05002219 if (route) {
James Simmonsb31e64c2016-02-12 12:06:06 -05002220 list_del(&route->ksnr_connd_list);
Peng Taod7e09d02013-05-02 16:46:55 +08002221 ksocknal_data.ksnd_connd_connecting++;
2222 spin_unlock_bh(connd_lock);
2223 dropped_lock = 1;
2224
2225 if (ksocknal_connect(route)) {
2226 /* consecutive retry */
2227 if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
Joe Perches2d00bd12014-11-23 11:28:50 -08002228 CWARN("massive consecutive re-connecting to %pI4h\n",
Peng Tao5e8f6922013-07-15 22:27:09 +08002229 &route->ksnr_ipaddr);
Peng Taod7e09d02013-05-02 16:46:55 +08002230 cons_retry = 0;
2231 }
2232 } else {
2233 cons_retry = 0;
2234 }
2235
2236 ksocknal_route_decref(route);
2237
2238 spin_lock_bh(connd_lock);
2239 ksocknal_data.ksnd_connd_connecting--;
2240 }
2241
2242 if (dropped_lock) {
2243 if (++nloops < SOCKNAL_RESCHED)
2244 continue;
2245 spin_unlock_bh(connd_lock);
2246 nloops = 0;
2247 cond_resched();
2248 spin_lock_bh(connd_lock);
2249 continue;
2250 }
2251
2252 /* Nothing to do for 'timeout' */
2253 set_current_state(TASK_INTERRUPTIBLE);
2254 add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
2255 spin_unlock_bh(connd_lock);
2256
2257 nloops = 0;
Peng Taob7efb982014-03-18 21:05:54 +08002258 schedule_timeout(timeout);
Peng Taod7e09d02013-05-02 16:46:55 +08002259
Peng Taod7e09d02013-05-02 16:46:55 +08002260 remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
2261 spin_lock_bh(connd_lock);
2262 }
2263 ksocknal_data.ksnd_connd_running--;
2264 spin_unlock_bh(connd_lock);
2265
2266 ksocknal_thread_fini();
2267 return 0;
2268}
2269
James Simmonsff13fd42016-06-10 16:14:23 -04002270static struct ksock_conn *
2271ksocknal_find_timed_out_conn(struct ksock_peer *peer)
Peng Taod7e09d02013-05-02 16:46:55 +08002272{
2273 /* We're called with a shared lock on ksnd_global_lock */
James Simmonsff13fd42016-06-10 16:14:23 -04002274 struct ksock_conn *conn;
Mike Shuey97d10d02015-05-19 10:14:37 -04002275 struct list_head *ctmp;
Peng Taod7e09d02013-05-02 16:46:55 +08002276
James Simmonsb31e64c2016-02-12 12:06:06 -05002277 list_for_each(ctmp, &peer->ksnp_conns) {
Mike Shuey97d10d02015-05-19 10:14:37 -04002278 int error;
Mike Rapoport50ffcb72015-10-13 16:03:40 +03002279
James Simmonsff13fd42016-06-10 16:14:23 -04002280 conn = list_entry(ctmp, struct ksock_conn, ksnc_list);
Peng Taod7e09d02013-05-02 16:46:55 +08002281
2282 /* Don't need the {get,put}connsock dance to deref ksnc_sock */
Mike Shuey97d10d02015-05-19 10:14:37 -04002283 LASSERT(!conn->ksnc_closing);
Peng Taod7e09d02013-05-02 16:46:55 +08002284
James Simmons4420cfd2016-02-12 12:06:00 -05002285 /*
2286 * SOCK_ERROR will reset error code of socket in
2287 * some platform (like Darwin8.x)
2288 */
Greg Kroah-Hartmanfb4a1532014-07-12 00:01:03 -07002289 error = conn->ksnc_sock->sk->sk_err;
James Simmons5fd88332016-02-12 12:06:09 -05002290 if (error) {
Peng Taod7e09d02013-05-02 16:46:55 +08002291 ksocknal_conn_addref(conn);
2292
2293 switch (error) {
2294 case ECONNRESET:
Joe Perches2d00bd12014-11-23 11:28:50 -08002295 CNETERR("A connection with %s (%pI4h:%d) was reset; it may have rebooted.\n",
Peng Taod7e09d02013-05-02 16:46:55 +08002296 libcfs_id2str(peer->ksnp_id),
Peng Tao5e8f6922013-07-15 22:27:09 +08002297 &conn->ksnc_ipaddr,
Peng Taod7e09d02013-05-02 16:46:55 +08002298 conn->ksnc_port);
2299 break;
2300 case ETIMEDOUT:
Joe Perches2d00bd12014-11-23 11:28:50 -08002301 CNETERR("A connection with %s (%pI4h:%d) timed out; the network or node may be down.\n",
Peng Taod7e09d02013-05-02 16:46:55 +08002302 libcfs_id2str(peer->ksnp_id),
Peng Tao5e8f6922013-07-15 22:27:09 +08002303 &conn->ksnc_ipaddr,
Peng Taod7e09d02013-05-02 16:46:55 +08002304 conn->ksnc_port);
2305 break;
2306 default:
Joe Perches2d00bd12014-11-23 11:28:50 -08002307 CNETERR("An unexpected network error %d occurred with %s (%pI4h:%d\n",
2308 error,
Peng Taod7e09d02013-05-02 16:46:55 +08002309 libcfs_id2str(peer->ksnp_id),
Peng Tao5e8f6922013-07-15 22:27:09 +08002310 &conn->ksnc_ipaddr,
Peng Taod7e09d02013-05-02 16:46:55 +08002311 conn->ksnc_port);
2312 break;
2313 }
2314
Masaru Nomura71397092014-05-15 18:54:05 +01002315 return conn;
Peng Taod7e09d02013-05-02 16:46:55 +08002316 }
2317
2318 if (conn->ksnc_rx_started &&
2319 cfs_time_aftereq(cfs_time_current(),
2320 conn->ksnc_rx_deadline)) {
2321 /* Timed out incomplete incoming message */
2322 ksocknal_conn_addref(conn);
Joe Perches2d00bd12014-11-23 11:28:50 -08002323 CNETERR("Timeout receiving from %s (%pI4h:%d), state %d wanted %d left %d\n",
Peng Taod7e09d02013-05-02 16:46:55 +08002324 libcfs_id2str(peer->ksnp_id),
Peng Tao5e8f6922013-07-15 22:27:09 +08002325 &conn->ksnc_ipaddr,
Peng Taod7e09d02013-05-02 16:46:55 +08002326 conn->ksnc_port,
2327 conn->ksnc_rx_state,
2328 conn->ksnc_rx_nob_wanted,
2329 conn->ksnc_rx_nob_left);
Masaru Nomura71397092014-05-15 18:54:05 +01002330 return conn;
Peng Taod7e09d02013-05-02 16:46:55 +08002331 }
2332
2333 if ((!list_empty(&conn->ksnc_tx_queue) ||
James Simmons5fd88332016-02-12 12:06:09 -05002334 conn->ksnc_sock->sk->sk_wmem_queued) &&
Peng Taod7e09d02013-05-02 16:46:55 +08002335 cfs_time_aftereq(cfs_time_current(),
2336 conn->ksnc_tx_deadline)) {
James Simmons4420cfd2016-02-12 12:06:00 -05002337 /*
2338 * Timed out messages queued for sending or
2339 * buffered in the socket's send buffer
2340 */
Peng Taod7e09d02013-05-02 16:46:55 +08002341 ksocknal_conn_addref(conn);
Joe Perches2d00bd12014-11-23 11:28:50 -08002342 CNETERR("Timeout sending data to %s (%pI4h:%d) the network or that node may be down.\n",
Peng Taod7e09d02013-05-02 16:46:55 +08002343 libcfs_id2str(peer->ksnp_id),
Peng Tao5e8f6922013-07-15 22:27:09 +08002344 &conn->ksnc_ipaddr,
Peng Taod7e09d02013-05-02 16:46:55 +08002345 conn->ksnc_port);
Masaru Nomura71397092014-05-15 18:54:05 +01002346 return conn;
Peng Taod7e09d02013-05-02 16:46:55 +08002347 }
2348 }
2349
Masaru Nomura71397092014-05-15 18:54:05 +01002350 return NULL;
Peng Taod7e09d02013-05-02 16:46:55 +08002351}
2352
2353static inline void
James Simmonsff13fd42016-06-10 16:14:23 -04002354ksocknal_flush_stale_txs(struct ksock_peer *peer)
Peng Taod7e09d02013-05-02 16:46:55 +08002355{
James Simmonsff13fd42016-06-10 16:14:23 -04002356 struct ksock_tx *tx;
2357 struct ksock_tx *tmp;
Mike Shuey97d10d02015-05-19 10:14:37 -04002358 LIST_HEAD(stale_txs);
Peng Taod7e09d02013-05-02 16:46:55 +08002359
2360 write_lock_bh(&ksocknal_data.ksnd_global_lock);
2361
Bhaktipriya Shridhar0daec762016-02-28 00:08:46 +05302362 list_for_each_entry_safe(tx, tmp, &peer->ksnp_tx_queue, tx_list) {
Peng Taod7e09d02013-05-02 16:46:55 +08002363 if (!cfs_time_aftereq(cfs_time_current(),
2364 tx->tx_deadline))
2365 break;
2366
James Simmonsb31e64c2016-02-12 12:06:06 -05002367 list_del(&tx->tx_list);
2368 list_add_tail(&tx->tx_list, &stale_txs);
Peng Taod7e09d02013-05-02 16:46:55 +08002369 }
2370
2371 write_unlock_bh(&ksocknal_data.ksnd_global_lock);
2372
2373 ksocknal_txlist_done(peer->ksnp_ni, &stale_txs, 1);
2374}
2375
Phong Tranf9cd474f2014-08-19 22:45:50 +07002376static int
James Simmonsff13fd42016-06-10 16:14:23 -04002377ksocknal_send_keepalive_locked(struct ksock_peer *peer)
frank zagoa161de82015-12-23 12:32:11 -05002378 __must_hold(&ksocknal_data.ksnd_global_lock)
Peng Taod7e09d02013-05-02 16:46:55 +08002379{
James Simmonsff13fd42016-06-10 16:14:23 -04002380 struct ksock_sched *sched;
2381 struct ksock_conn *conn;
2382 struct ksock_tx *tx;
Peng Taod7e09d02013-05-02 16:46:55 +08002383
2384 if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
2385 return 0;
2386
2387 if (peer->ksnp_proto != &ksocknal_protocol_v3x)
2388 return 0;
2389
2390 if (*ksocknal_tunables.ksnd_keepalive <= 0 ||
Greg Kroah-Hartman699503b2014-07-12 01:03:41 -07002391 time_before(cfs_time_current(),
2392 cfs_time_add(peer->ksnp_last_alive,
2393 cfs_time_seconds(*ksocknal_tunables.ksnd_keepalive))))
Peng Taod7e09d02013-05-02 16:46:55 +08002394 return 0;
2395
Greg Kroah-Hartman699503b2014-07-12 01:03:41 -07002396 if (time_before(cfs_time_current(), peer->ksnp_send_keepalive))
Peng Taod7e09d02013-05-02 16:46:55 +08002397 return 0;
2398
James Simmons4420cfd2016-02-12 12:06:00 -05002399 /*
2400 * retry 10 secs later, so we wouldn't put pressure
2401 * on this peer if we failed to send keepalive this time
2402 */
Peng Taod7e09d02013-05-02 16:46:55 +08002403 peer->ksnp_send_keepalive = cfs_time_shift(10);
2404
2405 conn = ksocknal_find_conn_locked(peer, NULL, 1);
James Simmons06ace262016-02-12 12:06:08 -05002406 if (conn) {
Peng Taod7e09d02013-05-02 16:46:55 +08002407 sched = conn->ksnc_scheduler;
2408
2409 spin_lock_bh(&sched->kss_lock);
2410 if (!list_empty(&conn->ksnc_tx_queue)) {
2411 spin_unlock_bh(&sched->kss_lock);
2412 /* there is an queued ACK, don't need keepalive */
2413 return 0;
2414 }
2415
2416 spin_unlock_bh(&sched->kss_lock);
2417 }
2418
2419 read_unlock(&ksocknal_data.ksnd_global_lock);
2420
2421 /* cookie = 1 is reserved for keepalive PING */
2422 tx = ksocknal_alloc_tx_noop(1, 1);
James Simmons06ace262016-02-12 12:06:08 -05002423 if (!tx) {
Peng Taod7e09d02013-05-02 16:46:55 +08002424 read_lock(&ksocknal_data.ksnd_global_lock);
2425 return -ENOMEM;
2426 }
2427
James Simmons5fd88332016-02-12 12:06:09 -05002428 if (!ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id)) {
Peng Taod7e09d02013-05-02 16:46:55 +08002429 read_lock(&ksocknal_data.ksnd_global_lock);
2430 return 1;
2431 }
2432
2433 ksocknal_free_tx(tx);
2434 read_lock(&ksocknal_data.ksnd_global_lock);
2435
2436 return -EIO;
2437}
2438
Phong Tranf9cd474f2014-08-19 22:45:50 +07002439static void
James Simmonsb31e64c2016-02-12 12:06:06 -05002440ksocknal_check_peer_timeouts(int idx)
Peng Taod7e09d02013-05-02 16:46:55 +08002441{
Mike Shuey97d10d02015-05-19 10:14:37 -04002442 struct list_head *peers = &ksocknal_data.ksnd_peers[idx];
James Simmonsff13fd42016-06-10 16:14:23 -04002443 struct ksock_peer *peer;
2444 struct ksock_conn *conn;
2445 struct ksock_tx *tx;
Peng Taod7e09d02013-05-02 16:46:55 +08002446
2447 again:
James Simmons4420cfd2016-02-12 12:06:00 -05002448 /*
2449 * NB. We expect to have a look at all the peers and not find any
Peng Taod7e09d02013-05-02 16:46:55 +08002450 * connections to time out, so we just use a shared lock while we
James Simmons4420cfd2016-02-12 12:06:00 -05002451 * take a look...
2452 */
Peng Taod7e09d02013-05-02 16:46:55 +08002453 read_lock(&ksocknal_data.ksnd_global_lock);
2454
2455 list_for_each_entry(peer, peers, ksnp_list) {
Mike Shuey97d10d02015-05-19 10:14:37 -04002456 unsigned long deadline = 0;
2457 int resid = 0;
2458 int n = 0;
Peng Taod7e09d02013-05-02 16:46:55 +08002459
James Simmons5fd88332016-02-12 12:06:09 -05002460 if (ksocknal_send_keepalive_locked(peer)) {
Peng Taod7e09d02013-05-02 16:46:55 +08002461 read_unlock(&ksocknal_data.ksnd_global_lock);
2462 goto again;
2463 }
2464
James Simmonsb31e64c2016-02-12 12:06:06 -05002465 conn = ksocknal_find_timed_out_conn(peer);
Peng Taod7e09d02013-05-02 16:46:55 +08002466
James Simmons06ace262016-02-12 12:06:08 -05002467 if (conn) {
Peng Taod7e09d02013-05-02 16:46:55 +08002468 read_unlock(&ksocknal_data.ksnd_global_lock);
2469
James Simmonsb31e64c2016-02-12 12:06:06 -05002470 ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
Peng Taod7e09d02013-05-02 16:46:55 +08002471
James Simmons4420cfd2016-02-12 12:06:00 -05002472 /*
2473 * NB we won't find this one again, but we can't
Peng Taod7e09d02013-05-02 16:46:55 +08002474 * just proceed with the next peer, since we dropped
James Simmons4420cfd2016-02-12 12:06:00 -05002475 * ksnd_global_lock and it might be dead already!
2476 */
Peng Taod7e09d02013-05-02 16:46:55 +08002477 ksocknal_conn_decref(conn);
2478 goto again;
2479 }
2480
James Simmons4420cfd2016-02-12 12:06:00 -05002481 /*
2482 * we can't process stale txs right here because we're
2483 * holding only shared lock
2484 */
James Simmonsb31e64c2016-02-12 12:06:06 -05002485 if (!list_empty(&peer->ksnp_tx_queue)) {
James Simmonsff13fd42016-06-10 16:14:23 -04002486 struct ksock_tx *tx = list_entry(peer->ksnp_tx_queue.next,
2487 struct ksock_tx, tx_list);
Peng Taod7e09d02013-05-02 16:46:55 +08002488
2489 if (cfs_time_aftereq(cfs_time_current(),
2490 tx->tx_deadline)) {
Peng Taod7e09d02013-05-02 16:46:55 +08002491 ksocknal_peer_addref(peer);
2492 read_unlock(&ksocknal_data.ksnd_global_lock);
2493
2494 ksocknal_flush_stale_txs(peer);
2495
2496 ksocknal_peer_decref(peer);
2497 goto again;
2498 }
2499 }
2500
2501 if (list_empty(&peer->ksnp_zc_req_list))
2502 continue;
2503
2504 spin_lock(&peer->ksnp_lock);
2505 list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) {
2506 if (!cfs_time_aftereq(cfs_time_current(),
2507 tx->tx_deadline))
2508 break;
2509 /* ignore the TX if connection is being closed */
2510 if (tx->tx_conn->ksnc_closing)
2511 continue;
2512 n++;
2513 }
2514
James Simmons5fd88332016-02-12 12:06:09 -05002515 if (!n) {
Peng Taod7e09d02013-05-02 16:46:55 +08002516 spin_unlock(&peer->ksnp_lock);
2517 continue;
2518 }
2519
2520 tx = list_entry(peer->ksnp_zc_req_list.next,
James Simmonsff13fd42016-06-10 16:14:23 -04002521 struct ksock_tx, tx_zc_list);
Peng Taod7e09d02013-05-02 16:46:55 +08002522 deadline = tx->tx_deadline;
Mike Shuey97d10d02015-05-19 10:14:37 -04002523 resid = tx->tx_resid;
2524 conn = tx->tx_conn;
Peng Taod7e09d02013-05-02 16:46:55 +08002525 ksocknal_conn_addref(conn);
2526
2527 spin_unlock(&peer->ksnp_lock);
2528 read_unlock(&ksocknal_data.ksnd_global_lock);
2529
Joe Perches2d00bd12014-11-23 11:28:50 -08002530 CERROR("Total %d stale ZC_REQs for peer %s detected; the oldest(%p) timed out %ld secs ago, resid: %d, wmem: %d\n",
Peng Taod7e09d02013-05-02 16:46:55 +08002531 n, libcfs_nid2str(peer->ksnp_id.nid), tx,
2532 cfs_duration_sec(cfs_time_current() - deadline),
Greg Kroah-Hartmanfb4a1532014-07-12 00:01:03 -07002533 resid, conn->ksnc_sock->sk->sk_wmem_queued);
Peng Taod7e09d02013-05-02 16:46:55 +08002534
James Simmonsb31e64c2016-02-12 12:06:06 -05002535 ksocknal_close_conn_and_siblings(conn, -ETIMEDOUT);
Peng Taod7e09d02013-05-02 16:46:55 +08002536 ksocknal_conn_decref(conn);
2537 goto again;
2538 }
2539
2540 read_unlock(&ksocknal_data.ksnd_global_lock);
2541}
2542
2543int
James Simmonsb31e64c2016-02-12 12:06:06 -05002544ksocknal_reaper(void *arg)
Peng Taod7e09d02013-05-02 16:46:55 +08002545{
Mike Shuey97d10d02015-05-19 10:14:37 -04002546 wait_queue_t wait;
James Simmonsff13fd42016-06-10 16:14:23 -04002547 struct ksock_conn *conn;
2548 struct ksock_sched *sched;
Mike Shuey97d10d02015-05-19 10:14:37 -04002549 struct list_head enomem_conns;
2550 int nenomem_conns;
2551 long timeout;
2552 int i;
2553 int peer_index = 0;
2554 unsigned long deadline = cfs_time_current();
Peng Taod7e09d02013-05-02 16:46:55 +08002555
Mike Shuey97d10d02015-05-19 10:14:37 -04002556 cfs_block_allsigs();
Peng Taod7e09d02013-05-02 16:46:55 +08002557
2558 INIT_LIST_HEAD(&enomem_conns);
Peng Tao9e795d32014-03-18 21:05:52 +08002559 init_waitqueue_entry(&wait, current);
Peng Taod7e09d02013-05-02 16:46:55 +08002560
2561 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2562
2563 while (!ksocknal_data.ksnd_shuttingdown) {
James Simmonsb31e64c2016-02-12 12:06:06 -05002564 if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
2565 conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
James Simmonsff13fd42016-06-10 16:14:23 -04002566 struct ksock_conn, ksnc_list);
James Simmonsb31e64c2016-02-12 12:06:06 -05002567 list_del(&conn->ksnc_list);
Peng Taod7e09d02013-05-02 16:46:55 +08002568
2569 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2570
2571 ksocknal_terminate_conn(conn);
2572 ksocknal_conn_decref(conn);
2573
2574 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2575 continue;
2576 }
2577
James Simmonsb31e64c2016-02-12 12:06:06 -05002578 if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
2579 conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
James Simmonsff13fd42016-06-10 16:14:23 -04002580 struct ksock_conn, ksnc_list);
James Simmonsb31e64c2016-02-12 12:06:06 -05002581 list_del(&conn->ksnc_list);
Peng Taod7e09d02013-05-02 16:46:55 +08002582
2583 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2584
2585 ksocknal_destroy_conn(conn);
2586
2587 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2588 continue;
2589 }
2590
James Simmonsb31e64c2016-02-12 12:06:06 -05002591 if (!list_empty(&ksocknal_data.ksnd_enomem_conns)) {
Peng Taod7e09d02013-05-02 16:46:55 +08002592 list_add(&enomem_conns,
James Simmonsc314c312016-02-12 12:06:01 -05002593 &ksocknal_data.ksnd_enomem_conns);
Peng Taod7e09d02013-05-02 16:46:55 +08002594 list_del_init(&ksocknal_data.ksnd_enomem_conns);
2595 }
2596
2597 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2598
2599 /* reschedule all the connections that stalled with ENOMEM... */
2600 nenomem_conns = 0;
James Simmonsb31e64c2016-02-12 12:06:06 -05002601 while (!list_empty(&enomem_conns)) {
James Simmonsff13fd42016-06-10 16:14:23 -04002602 conn = list_entry(enomem_conns.next, struct ksock_conn,
James Simmonsc314c312016-02-12 12:06:01 -05002603 ksnc_tx_list);
James Simmonsb31e64c2016-02-12 12:06:06 -05002604 list_del(&conn->ksnc_tx_list);
Peng Taod7e09d02013-05-02 16:46:55 +08002605
2606 sched = conn->ksnc_scheduler;
2607
2608 spin_lock_bh(&sched->kss_lock);
2609
2610 LASSERT(conn->ksnc_tx_scheduled);
2611 conn->ksnc_tx_ready = 1;
2612 list_add_tail(&conn->ksnc_tx_list,
James Simmonsc314c312016-02-12 12:06:01 -05002613 &sched->kss_tx_conns);
Peng Taod7e09d02013-05-02 16:46:55 +08002614 wake_up(&sched->kss_waitq);
2615
2616 spin_unlock_bh(&sched->kss_lock);
2617 nenomem_conns++;
2618 }
2619
2620 /* careful with the jiffy wrap... */
2621 while ((timeout = cfs_time_sub(deadline,
2622 cfs_time_current())) <= 0) {
2623 const int n = 4;
2624 const int p = 1;
Mike Shuey97d10d02015-05-19 10:14:37 -04002625 int chunk = ksocknal_data.ksnd_peer_hash_size;
Peng Taod7e09d02013-05-02 16:46:55 +08002626
James Simmons4420cfd2016-02-12 12:06:00 -05002627 /*
2628 * Time to check for timeouts on a few more peers: I do
Peng Taod7e09d02013-05-02 16:46:55 +08002629 * checks every 'p' seconds on a proportion of the peer
2630 * table and I need to check every connection 'n' times
2631 * within a timeout interval, to ensure I detect a
2632 * timeout on any connection within (n+1)/n times the
James Simmons4420cfd2016-02-12 12:06:00 -05002633 * timeout interval.
2634 */
Peng Taod7e09d02013-05-02 16:46:55 +08002635 if (*ksocknal_tunables.ksnd_timeout > n * p)
2636 chunk = (chunk * n * p) /
2637 *ksocknal_tunables.ksnd_timeout;
James Simmons5fd88332016-02-12 12:06:09 -05002638 if (!chunk)
Peng Taod7e09d02013-05-02 16:46:55 +08002639 chunk = 1;
2640
2641 for (i = 0; i < chunk; i++) {
James Simmonsb31e64c2016-02-12 12:06:06 -05002642 ksocknal_check_peer_timeouts(peer_index);
Peng Taod7e09d02013-05-02 16:46:55 +08002643 peer_index = (peer_index + 1) %
2644 ksocknal_data.ksnd_peer_hash_size;
2645 }
2646
2647 deadline = cfs_time_add(deadline, cfs_time_seconds(p));
2648 }
2649
James Simmons5fd88332016-02-12 12:06:09 -05002650 if (nenomem_conns) {
James Simmons4420cfd2016-02-12 12:06:00 -05002651 /*
2652 * Reduce my timeout if I rescheduled ENOMEM conns.
Peng Taod7e09d02013-05-02 16:46:55 +08002653 * This also prevents me getting woken immediately
James Simmons4420cfd2016-02-12 12:06:00 -05002654 * if any go back on my enomem list.
2655 */
Peng Taod7e09d02013-05-02 16:46:55 +08002656 timeout = SOCKNAL_ENOMEM_RETRY;
2657 }
2658 ksocknal_data.ksnd_reaper_waketime =
2659 cfs_time_add(cfs_time_current(), timeout);
2660
James Simmonsb31e64c2016-02-12 12:06:06 -05002661 set_current_state(TASK_INTERRUPTIBLE);
2662 add_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
Peng Taod7e09d02013-05-02 16:46:55 +08002663
2664 if (!ksocknal_data.ksnd_shuttingdown &&
James Simmonsb31e64c2016-02-12 12:06:06 -05002665 list_empty(&ksocknal_data.ksnd_deathrow_conns) &&
2666 list_empty(&ksocknal_data.ksnd_zombie_conns))
Peng Taob7efb982014-03-18 21:05:54 +08002667 schedule_timeout(timeout);
Peng Taod7e09d02013-05-02 16:46:55 +08002668
James Simmonsb31e64c2016-02-12 12:06:06 -05002669 set_current_state(TASK_RUNNING);
2670 remove_wait_queue(&ksocknal_data.ksnd_reaper_waitq, &wait);
Peng Taod7e09d02013-05-02 16:46:55 +08002671
2672 spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
2673 }
2674
2675 spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
2676
2677 ksocknal_thread_fini();
2678 return 0;
2679}