blob: e8d9db4d8179bd852a995d6a4d87f31acda6ba9b [file] [log] [blame]
Peng Taod7e09d02013-05-02 16:46:55 +08001/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
Oleg Drokin6a5b99a2016-06-14 23:33:40 -040018 * http://www.gnu.org/licenses/gpl-2.0.html
Peng Taod7e09d02013-05-02 16:46:55 +080019 *
Peng Taod7e09d02013-05-02 16:46:55 +080020 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
Andreas Dilger1dc563a2015-11-08 18:09:37 -050026 * Copyright (c) 2011, 2015, Intel Corporation.
Peng Taod7e09d02013-05-02 16:46:55 +080027 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lnet/klnds/o2iblnd/o2iblnd.c
33 *
34 * Author: Eric Barton <eric@bartonsoftware.com>
35 */
36
Peng Tao5f432642013-06-07 22:07:21 +080037#include <asm/div64.h>
John L. Hammondd664d1f2015-06-11 15:18:08 -040038#include <asm/page.h>
39#include "o2iblnd.h"
Peng Taod7e09d02013-05-02 16:46:55 +080040
Frank Zago439b4d42016-03-02 17:02:00 -050041static lnd_t the_o2iblnd;
Peng Taod7e09d02013-05-02 16:46:55 +080042
James Simmons8d9de3f2016-06-10 16:13:39 -040043struct kib_data kiblnd_data;
Peng Taod7e09d02013-05-02 16:46:55 +080044
Guillaume Matheronfebe73b2015-04-02 19:35:45 +020045static __u32 kiblnd_cksum(void *ptr, int nob)
Peng Taod7e09d02013-05-02 16:46:55 +080046{
Mike Shueyec3d17c2015-05-19 10:14:36 -040047 char *c = ptr;
48 __u32 sum = 0;
Peng Taod7e09d02013-05-02 16:46:55 +080049
50 while (nob-- > 0)
51 sum = ((sum << 1) | (sum >> 31)) + *c++;
52
53 /* ensure I don't return 0 (== no checksum) */
James Simmons5fd88332016-02-12 12:06:09 -050054 return !sum ? 1 : sum;
Peng Taod7e09d02013-05-02 16:46:55 +080055}
56
Guillaume Matheronfebe73b2015-04-02 19:35:45 +020057static char *kiblnd_msgtype2str(int type)
Peng Taod7e09d02013-05-02 16:46:55 +080058{
59 switch (type) {
60 case IBLND_MSG_CONNREQ:
61 return "CONNREQ";
62
63 case IBLND_MSG_CONNACK:
64 return "CONNACK";
65
66 case IBLND_MSG_NOOP:
67 return "NOOP";
68
69 case IBLND_MSG_IMMEDIATE:
70 return "IMMEDIATE";
71
72 case IBLND_MSG_PUT_REQ:
73 return "PUT_REQ";
74
75 case IBLND_MSG_PUT_NAK:
76 return "PUT_NAK";
77
78 case IBLND_MSG_PUT_ACK:
79 return "PUT_ACK";
80
81 case IBLND_MSG_PUT_DONE:
82 return "PUT_DONE";
83
84 case IBLND_MSG_GET_REQ:
85 return "GET_REQ";
86
87 case IBLND_MSG_GET_DONE:
88 return "GET_DONE";
89
90 default:
91 return "???";
92 }
93}
94
Guillaume Matheronfebe73b2015-04-02 19:35:45 +020095static int kiblnd_msgtype2size(int type)
Peng Taod7e09d02013-05-02 16:46:55 +080096{
James Simmons8d9de3f2016-06-10 16:13:39 -040097 const int hdr_size = offsetof(struct kib_msg, ibm_u);
Peng Taod7e09d02013-05-02 16:46:55 +080098
99 switch (type) {
100 case IBLND_MSG_CONNREQ:
101 case IBLND_MSG_CONNACK:
James Simmons8d9de3f2016-06-10 16:13:39 -0400102 return hdr_size + sizeof(struct kib_connparams);
Peng Taod7e09d02013-05-02 16:46:55 +0800103
104 case IBLND_MSG_NOOP:
105 return hdr_size;
106
107 case IBLND_MSG_IMMEDIATE:
James Simmons8d9de3f2016-06-10 16:13:39 -0400108 return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]);
Peng Taod7e09d02013-05-02 16:46:55 +0800109
110 case IBLND_MSG_PUT_REQ:
James Simmons8d9de3f2016-06-10 16:13:39 -0400111 return hdr_size + sizeof(struct kib_putreq_msg);
Peng Taod7e09d02013-05-02 16:46:55 +0800112
113 case IBLND_MSG_PUT_ACK:
James Simmons8d9de3f2016-06-10 16:13:39 -0400114 return hdr_size + sizeof(struct kib_putack_msg);
Peng Taod7e09d02013-05-02 16:46:55 +0800115
116 case IBLND_MSG_GET_REQ:
James Simmons8d9de3f2016-06-10 16:13:39 -0400117 return hdr_size + sizeof(struct kib_get_msg);
Peng Taod7e09d02013-05-02 16:46:55 +0800118
119 case IBLND_MSG_PUT_NAK:
120 case IBLND_MSG_PUT_DONE:
121 case IBLND_MSG_GET_DONE:
James Simmons8d9de3f2016-06-10 16:13:39 -0400122 return hdr_size + sizeof(struct kib_completion_msg);
Peng Taod7e09d02013-05-02 16:46:55 +0800123 default:
124 return -1;
125 }
126}
127
James Simmons8d9de3f2016-06-10 16:13:39 -0400128static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
Peng Taod7e09d02013-05-02 16:46:55 +0800129{
James Simmons8d9de3f2016-06-10 16:13:39 -0400130 struct kib_rdma_desc *rd;
James Simmonsbbc2d822016-08-24 11:11:58 -0400131 int msg_size;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400132 int nob;
133 int n;
134 int i;
Peng Taod7e09d02013-05-02 16:46:55 +0800135
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200136 LASSERT(msg->ibm_type == IBLND_MSG_GET_REQ ||
James Simmonsc314c312016-02-12 12:06:01 -0500137 msg->ibm_type == IBLND_MSG_PUT_ACK);
Peng Taod7e09d02013-05-02 16:46:55 +0800138
139 rd = msg->ibm_type == IBLND_MSG_GET_REQ ?
140 &msg->ibm_u.get.ibgm_rd :
141 &msg->ibm_u.putack.ibpam_rd;
142
143 if (flip) {
144 __swab32s(&rd->rd_key);
145 __swab32s(&rd->rd_nfrags);
146 }
147
148 n = rd->rd_nfrags;
149
James Simmons8d9de3f2016-06-10 16:13:39 -0400150 nob = offsetof(struct kib_msg, ibm_u) +
Peng Taod7e09d02013-05-02 16:46:55 +0800151 kiblnd_rd_msg_size(rd, msg->ibm_type, n);
152
153 if (msg->ibm_nob < nob) {
154 CERROR("Short %s: %d(%d)\n",
155 kiblnd_msgtype2str(msg->ibm_type), msg->ibm_nob, nob);
156 return 1;
157 }
158
James Simmonsbbc2d822016-08-24 11:11:58 -0400159 msg_size = kiblnd_rd_size(rd);
160 if (msg_size <= 0 || msg_size > LNET_MAX_PAYLOAD) {
161 CERROR("Bad msg_size: %d, should be 0 < n <= %d\n",
162 msg_size, LNET_MAX_PAYLOAD);
163 return 1;
164 }
165
Peng Taod7e09d02013-05-02 16:46:55 +0800166 if (!flip)
167 return 0;
168
169 for (i = 0; i < n; i++) {
170 __swab32s(&rd->rd_frags[i].rf_nob);
171 __swab64s(&rd->rd_frags[i].rf_addr);
172 }
173
174 return 0;
175}
176
James Simmons8d9de3f2016-06-10 16:13:39 -0400177void kiblnd_pack_msg(lnet_ni_t *ni, struct kib_msg *msg, int version,
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200178 int credits, lnet_nid_t dstnid, __u64 dststamp)
Peng Taod7e09d02013-05-02 16:46:55 +0800179{
James Simmons8d9de3f2016-06-10 16:13:39 -0400180 struct kib_net *net = ni->ni_data;
Peng Taod7e09d02013-05-02 16:46:55 +0800181
James Simmons4420cfd2016-02-12 12:06:00 -0500182 /*
183 * CAVEAT EMPTOR! all message fields not set here should have been
184 * initialised previously.
185 */
Peng Taod7e09d02013-05-02 16:46:55 +0800186 msg->ibm_magic = IBLND_MSG_MAGIC;
187 msg->ibm_version = version;
188 /* ibm_type */
189 msg->ibm_credits = credits;
190 /* ibm_nob */
191 msg->ibm_cksum = 0;
192 msg->ibm_srcnid = ni->ni_nid;
193 msg->ibm_srcstamp = net->ibn_incarnation;
194 msg->ibm_dstnid = dstnid;
195 msg->ibm_dststamp = dststamp;
196
197 if (*kiblnd_tunables.kib_cksum) {
198 /* NB ibm_cksum zero while computing cksum */
199 msg->ibm_cksum = kiblnd_cksum(msg, msg->ibm_nob);
200 }
201}
202
James Simmons8d9de3f2016-06-10 16:13:39 -0400203int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
Peng Taod7e09d02013-05-02 16:46:55 +0800204{
James Simmons8d9de3f2016-06-10 16:13:39 -0400205 const int hdr_size = offsetof(struct kib_msg, ibm_u);
Mike Shueyec3d17c2015-05-19 10:14:36 -0400206 __u32 msg_cksum;
207 __u16 version;
208 int msg_nob;
209 int flip;
Peng Taod7e09d02013-05-02 16:46:55 +0800210
211 /* 6 bytes are enough to have received magic + version */
212 if (nob < 6) {
213 CERROR("Short message: %d\n", nob);
214 return -EPROTO;
215 }
216
217 if (msg->ibm_magic == IBLND_MSG_MAGIC) {
218 flip = 0;
219 } else if (msg->ibm_magic == __swab32(IBLND_MSG_MAGIC)) {
220 flip = 1;
221 } else {
222 CERROR("Bad magic: %08x\n", msg->ibm_magic);
223 return -EPROTO;
224 }
225
226 version = flip ? __swab16(msg->ibm_version) : msg->ibm_version;
227 if (version != IBLND_MSG_VERSION &&
228 version != IBLND_MSG_VERSION_1) {
229 CERROR("Bad version: %x\n", version);
230 return -EPROTO;
231 }
232
233 if (nob < hdr_size) {
234 CERROR("Short message: %d\n", nob);
235 return -EPROTO;
236 }
237
238 msg_nob = flip ? __swab32(msg->ibm_nob) : msg->ibm_nob;
239 if (msg_nob > nob) {
240 CERROR("Short message: got %d, wanted %d\n", nob, msg_nob);
241 return -EPROTO;
242 }
243
James Simmons4420cfd2016-02-12 12:06:00 -0500244 /*
245 * checksum must be computed with ibm_cksum zero and BEFORE anything
246 * gets flipped
247 */
Peng Taod7e09d02013-05-02 16:46:55 +0800248 msg_cksum = flip ? __swab32(msg->ibm_cksum) : msg->ibm_cksum;
249 msg->ibm_cksum = 0;
James Simmons5fd88332016-02-12 12:06:09 -0500250 if (msg_cksum &&
Peng Taod7e09d02013-05-02 16:46:55 +0800251 msg_cksum != kiblnd_cksum(msg, msg_nob)) {
252 CERROR("Bad checksum\n");
253 return -EPROTO;
254 }
255
256 msg->ibm_cksum = msg_cksum;
257
258 if (flip) {
259 /* leave magic unflipped as a clue to peer endianness */
260 msg->ibm_version = version;
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200261 CLASSERT(sizeof(msg->ibm_type) == 1);
262 CLASSERT(sizeof(msg->ibm_credits) == 1);
Peng Taod7e09d02013-05-02 16:46:55 +0800263 msg->ibm_nob = msg_nob;
264 __swab64s(&msg->ibm_srcnid);
265 __swab64s(&msg->ibm_srcstamp);
266 __swab64s(&msg->ibm_dstnid);
267 __swab64s(&msg->ibm_dststamp);
268 }
269
270 if (msg->ibm_srcnid == LNET_NID_ANY) {
271 CERROR("Bad src nid: %s\n", libcfs_nid2str(msg->ibm_srcnid));
272 return -EPROTO;
273 }
274
275 if (msg_nob < kiblnd_msgtype2size(msg->ibm_type)) {
276 CERROR("Short %s: %d(%d)\n", kiblnd_msgtype2str(msg->ibm_type),
277 msg_nob, kiblnd_msgtype2size(msg->ibm_type));
278 return -EPROTO;
279 }
280
281 switch (msg->ibm_type) {
282 default:
283 CERROR("Unknown message type %x\n", msg->ibm_type);
284 return -EPROTO;
285
286 case IBLND_MSG_NOOP:
287 case IBLND_MSG_IMMEDIATE:
288 case IBLND_MSG_PUT_REQ:
289 break;
290
291 case IBLND_MSG_PUT_ACK:
292 case IBLND_MSG_GET_REQ:
293 if (kiblnd_unpack_rd(msg, flip))
294 return -EPROTO;
295 break;
296
297 case IBLND_MSG_PUT_NAK:
298 case IBLND_MSG_PUT_DONE:
299 case IBLND_MSG_GET_DONE:
300 if (flip)
301 __swab32s(&msg->ibm_u.completion.ibcm_status);
302 break;
303
304 case IBLND_MSG_CONNREQ:
305 case IBLND_MSG_CONNACK:
306 if (flip) {
307 __swab16s(&msg->ibm_u.connparams.ibcp_queue_depth);
308 __swab16s(&msg->ibm_u.connparams.ibcp_max_frags);
309 __swab32s(&msg->ibm_u.connparams.ibcp_max_msg_size);
310 }
311 break;
312 }
313 return 0;
314}
315
James Simmons8d9de3f2016-06-10 16:13:39 -0400316int kiblnd_create_peer(lnet_ni_t *ni, struct kib_peer **peerp, lnet_nid_t nid)
Peng Taod7e09d02013-05-02 16:46:55 +0800317{
James Simmons8d9de3f2016-06-10 16:13:39 -0400318 struct kib_peer *peer;
319 struct kib_net *net = ni->ni_data;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400320 int cpt = lnet_cpt_of_nid(nid);
321 unsigned long flags;
Peng Taod7e09d02013-05-02 16:46:55 +0800322
James Simmons06ace262016-02-12 12:06:08 -0500323 LASSERT(net);
Peng Taod7e09d02013-05-02 16:46:55 +0800324 LASSERT(nid != LNET_NID_ANY);
325
326 LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
James Simmons06ace262016-02-12 12:06:08 -0500327 if (!peer) {
Peng Taod7e09d02013-05-02 16:46:55 +0800328 CERROR("Cannot allocate peer\n");
329 return -ENOMEM;
330 }
331
Peng Taod7e09d02013-05-02 16:46:55 +0800332 peer->ibp_ni = ni;
333 peer->ibp_nid = nid;
334 peer->ibp_error = 0;
335 peer->ibp_last_alive = 0;
Amir Shehata9e7d5bf2016-05-06 21:30:25 -0400336 peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni);
Amir Shehataf6e50062016-05-06 21:30:27 -0400337 peer->ibp_queue_depth = ni->ni_peertxcredits;
Peng Taod7e09d02013-05-02 16:46:55 +0800338 atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
339
340 INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
341 INIT_LIST_HEAD(&peer->ibp_conns);
342 INIT_LIST_HEAD(&peer->ibp_tx_queue);
343
344 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
345
346 /* always called with a ref on ni, which prevents ni being shutdown */
James Simmons5fd88332016-02-12 12:06:09 -0500347 LASSERT(!net->ibn_shutdown);
Peng Taod7e09d02013-05-02 16:46:55 +0800348
349 /* npeers only grows with the global lock held */
350 atomic_inc(&net->ibn_npeers);
351
352 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
353
354 *peerp = peer;
355 return 0;
356}
357
James Simmons8d9de3f2016-06-10 16:13:39 -0400358void kiblnd_destroy_peer(struct kib_peer *peer)
Peng Taod7e09d02013-05-02 16:46:55 +0800359{
James Simmons8d9de3f2016-06-10 16:13:39 -0400360 struct kib_net *net = peer->ibp_ni->ni_data;
Peng Taod7e09d02013-05-02 16:46:55 +0800361
James Simmons06ace262016-02-12 12:06:08 -0500362 LASSERT(net);
James Simmons5fd88332016-02-12 12:06:09 -0500363 LASSERT(!atomic_read(&peer->ibp_refcount));
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200364 LASSERT(!kiblnd_peer_active(peer));
Liang Zhen4d99b252016-03-02 18:53:29 -0500365 LASSERT(kiblnd_peer_idle(peer));
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200366 LASSERT(list_empty(&peer->ibp_tx_queue));
Peng Taod7e09d02013-05-02 16:46:55 +0800367
368 LIBCFS_FREE(peer, sizeof(*peer));
369
James Simmons4420cfd2016-02-12 12:06:00 -0500370 /*
371 * NB a peer's connections keep a reference on their peer until
Peng Taod7e09d02013-05-02 16:46:55 +0800372 * they are destroyed, so we can be assured that _all_ state to do
373 * with this peer has been cleaned up when its refcount drops to
James Simmons4420cfd2016-02-12 12:06:00 -0500374 * zero.
375 */
Peng Taod7e09d02013-05-02 16:46:55 +0800376 atomic_dec(&net->ibn_npeers);
377}
378
James Simmons8d9de3f2016-06-10 16:13:39 -0400379struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid)
Peng Taod7e09d02013-05-02 16:46:55 +0800380{
James Simmons4420cfd2016-02-12 12:06:00 -0500381 /*
382 * the caller is responsible for accounting the additional reference
383 * that this creates
384 */
Mike Shueyec3d17c2015-05-19 10:14:36 -0400385 struct list_head *peer_list = kiblnd_nid2peerlist(nid);
386 struct list_head *tmp;
James Simmons8d9de3f2016-06-10 16:13:39 -0400387 struct kib_peer *peer;
Peng Taod7e09d02013-05-02 16:46:55 +0800388
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200389 list_for_each(tmp, peer_list) {
James Simmons8d9de3f2016-06-10 16:13:39 -0400390 peer = list_entry(tmp, struct kib_peer, ibp_list);
Liang Zhen4d99b252016-03-02 18:53:29 -0500391 LASSERT(!kiblnd_peer_idle(peer));
Peng Taod7e09d02013-05-02 16:46:55 +0800392
393 if (peer->ibp_nid != nid)
394 continue;
395
396 CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
397 peer, libcfs_nid2str(nid),
398 atomic_read(&peer->ibp_refcount),
399 peer->ibp_version);
400 return peer;
401 }
402 return NULL;
403}
404
James Simmons8d9de3f2016-06-10 16:13:39 -0400405void kiblnd_unlink_peer_locked(struct kib_peer *peer)
Peng Taod7e09d02013-05-02 16:46:55 +0800406{
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200407 LASSERT(list_empty(&peer->ibp_conns));
Peng Taod7e09d02013-05-02 16:46:55 +0800408
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200409 LASSERT(kiblnd_peer_active(peer));
Peng Taod7e09d02013-05-02 16:46:55 +0800410 list_del_init(&peer->ibp_list);
411 /* lose peerlist's ref */
412 kiblnd_peer_decref(peer);
413}
414
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200415static int kiblnd_get_peer_info(lnet_ni_t *ni, int index,
416 lnet_nid_t *nidp, int *count)
Peng Taod7e09d02013-05-02 16:46:55 +0800417{
James Simmons8d9de3f2016-06-10 16:13:39 -0400418 struct kib_peer *peer;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400419 struct list_head *ptmp;
420 int i;
421 unsigned long flags;
Peng Taod7e09d02013-05-02 16:46:55 +0800422
423 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
424
425 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200426 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
James Simmons8d9de3f2016-06-10 16:13:39 -0400427 peer = list_entry(ptmp, struct kib_peer, ibp_list);
Liang Zhen4d99b252016-03-02 18:53:29 -0500428 LASSERT(!kiblnd_peer_idle(peer));
Peng Taod7e09d02013-05-02 16:46:55 +0800429
430 if (peer->ibp_ni != ni)
431 continue;
432
433 if (index-- > 0)
434 continue;
435
436 *nidp = peer->ibp_nid;
437 *count = atomic_read(&peer->ibp_refcount);
438
439 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
440 flags);
441 return 0;
442 }
443 }
444
445 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
446 return -ENOENT;
447}
448
James Simmons8d9de3f2016-06-10 16:13:39 -0400449static void kiblnd_del_peer_locked(struct kib_peer *peer)
Peng Taod7e09d02013-05-02 16:46:55 +0800450{
Mike Shueyec3d17c2015-05-19 10:14:36 -0400451 struct list_head *ctmp;
452 struct list_head *cnxt;
James Simmons8d9de3f2016-06-10 16:13:39 -0400453 struct kib_conn *conn;
Peng Taod7e09d02013-05-02 16:46:55 +0800454
455 if (list_empty(&peer->ibp_conns)) {
456 kiblnd_unlink_peer_locked(peer);
457 } else {
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200458 list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
James Simmons8d9de3f2016-06-10 16:13:39 -0400459 conn = list_entry(ctmp, struct kib_conn, ibc_list);
Peng Taod7e09d02013-05-02 16:46:55 +0800460
461 kiblnd_close_conn_locked(conn, 0);
462 }
463 /* NB closing peer's last conn unlinked it. */
464 }
James Simmons4420cfd2016-02-12 12:06:00 -0500465 /*
466 * NB peer now unlinked; might even be freed if the peer table had the
467 * last ref on it.
468 */
Peng Taod7e09d02013-05-02 16:46:55 +0800469}
470
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200471static int kiblnd_del_peer(lnet_ni_t *ni, lnet_nid_t nid)
Peng Taod7e09d02013-05-02 16:46:55 +0800472{
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200473 LIST_HEAD(zombies);
Mike Shueyec3d17c2015-05-19 10:14:36 -0400474 struct list_head *ptmp;
475 struct list_head *pnxt;
James Simmons8d9de3f2016-06-10 16:13:39 -0400476 struct kib_peer *peer;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400477 int lo;
478 int hi;
479 int i;
480 unsigned long flags;
481 int rc = -ENOENT;
Peng Taod7e09d02013-05-02 16:46:55 +0800482
483 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
484
485 if (nid != LNET_NID_ANY) {
James Simmonsd3d3d372016-02-12 12:06:05 -0500486 lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
487 hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
Peng Taod7e09d02013-05-02 16:46:55 +0800488 } else {
489 lo = 0;
490 hi = kiblnd_data.kib_peer_hash_size - 1;
491 }
492
493 for (i = lo; i <= hi; i++) {
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200494 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
James Simmons8d9de3f2016-06-10 16:13:39 -0400495 peer = list_entry(ptmp, struct kib_peer, ibp_list);
Liang Zhen4d99b252016-03-02 18:53:29 -0500496 LASSERT(!kiblnd_peer_idle(peer));
Peng Taod7e09d02013-05-02 16:46:55 +0800497
498 if (peer->ibp_ni != ni)
499 continue;
500
501 if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
502 continue;
503
504 if (!list_empty(&peer->ibp_tx_queue)) {
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200505 LASSERT(list_empty(&peer->ibp_conns));
Peng Taod7e09d02013-05-02 16:46:55 +0800506
507 list_splice_init(&peer->ibp_tx_queue,
James Simmonsc314c312016-02-12 12:06:01 -0500508 &zombies);
Peng Taod7e09d02013-05-02 16:46:55 +0800509 }
510
511 kiblnd_del_peer_locked(peer);
512 rc = 0; /* matched something */
513 }
514 }
515
516 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
517
518 kiblnd_txlist_done(ni, &zombies, -EIO);
519
520 return rc;
521}
522
James Simmons8d9de3f2016-06-10 16:13:39 -0400523static struct kib_conn *kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
Peng Taod7e09d02013-05-02 16:46:55 +0800524{
James Simmons8d9de3f2016-06-10 16:13:39 -0400525 struct kib_peer *peer;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400526 struct list_head *ptmp;
James Simmons8d9de3f2016-06-10 16:13:39 -0400527 struct kib_conn *conn;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400528 struct list_head *ctmp;
529 int i;
530 unsigned long flags;
Peng Taod7e09d02013-05-02 16:46:55 +0800531
532 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
533
534 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200535 list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
James Simmons8d9de3f2016-06-10 16:13:39 -0400536 peer = list_entry(ptmp, struct kib_peer, ibp_list);
Liang Zhen4d99b252016-03-02 18:53:29 -0500537 LASSERT(!kiblnd_peer_idle(peer));
Peng Taod7e09d02013-05-02 16:46:55 +0800538
539 if (peer->ibp_ni != ni)
540 continue;
541
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200542 list_for_each(ctmp, &peer->ibp_conns) {
Peng Taod7e09d02013-05-02 16:46:55 +0800543 if (index-- > 0)
544 continue;
545
James Simmons8d9de3f2016-06-10 16:13:39 -0400546 conn = list_entry(ctmp, struct kib_conn,
James Simmonsc314c312016-02-12 12:06:01 -0500547 ibc_list);
Peng Taod7e09d02013-05-02 16:46:55 +0800548 kiblnd_conn_addref(conn);
Guillaume Matheron7a3888a2015-04-02 19:52:07 +0200549 read_unlock_irqrestore(
550 &kiblnd_data.kib_global_lock,
551 flags);
Peng Taod7e09d02013-05-02 16:46:55 +0800552 return conn;
553 }
554 }
555 }
556
557 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
558 return NULL;
559}
560
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200561int kiblnd_translate_mtu(int value)
Peng Taod7e09d02013-05-02 16:46:55 +0800562{
563 switch (value) {
564 default:
565 return -1;
566 case 0:
567 return 0;
568 case 256:
569 return IB_MTU_256;
570 case 512:
571 return IB_MTU_512;
572 case 1024:
573 return IB_MTU_1024;
574 case 2048:
575 return IB_MTU_2048;
576 case 4096:
577 return IB_MTU_4096;
578 }
579}
580
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200581static void kiblnd_setup_mtu_locked(struct rdma_cm_id *cmid)
Peng Taod7e09d02013-05-02 16:46:55 +0800582{
Mike Shueyec3d17c2015-05-19 10:14:36 -0400583 int mtu;
Peng Taod7e09d02013-05-02 16:46:55 +0800584
585 /* XXX There is no path record for iWARP, set by netdev->change_mtu? */
James Simmons06ace262016-02-12 12:06:08 -0500586 if (!cmid->route.path_rec)
Peng Taod7e09d02013-05-02 16:46:55 +0800587 return;
588
589 mtu = kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu);
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200590 LASSERT(mtu >= 0);
James Simmons5fd88332016-02-12 12:06:09 -0500591 if (mtu)
Peng Taod7e09d02013-05-02 16:46:55 +0800592 cmid->route.path_rec->mtu = mtu;
593}
594
James Simmons8d9de3f2016-06-10 16:13:39 -0400595static int kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
Peng Taod7e09d02013-05-02 16:46:55 +0800596{
Mike Shueyec3d17c2015-05-19 10:14:36 -0400597 cpumask_t *mask;
598 int vectors;
599 int off;
600 int i;
601 lnet_nid_t nid = conn->ibc_peer->ibp_nid;
Peng Taod7e09d02013-05-02 16:46:55 +0800602
603 vectors = conn->ibc_cmid->device->num_comp_vectors;
604 if (vectors <= 1)
605 return 0;
606
607 mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
James Simmons06ace262016-02-12 12:06:08 -0500608 if (!mask)
Peng Tao3867ea52013-07-15 22:27:10 +0800609 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800610
611 /* hash NID to CPU id in this partition... */
Oleg Drokin4a316f72015-03-07 19:24:27 -0500612 off = do_div(nid, cpumask_weight(mask));
613 for_each_cpu(i, mask) {
James Simmons5fd88332016-02-12 12:06:09 -0500614 if (!off--)
Peng Taod7e09d02013-05-02 16:46:55 +0800615 return i % vectors;
616 }
617
618 LBUG();
619 return 1;
620}
621
James Simmons8d9de3f2016-06-10 16:13:39 -0400622struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cmid,
Oleg Drokin24c198e2016-08-21 18:04:35 -0400623 int state, int version)
Peng Taod7e09d02013-05-02 16:46:55 +0800624{
James Simmons4420cfd2016-02-12 12:06:00 -0500625 /*
626 * CAVEAT EMPTOR:
Peng Taod7e09d02013-05-02 16:46:55 +0800627 * If the new conn is created successfully it takes over the caller's
628 * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
629 * is destroyed. On failure, the caller's ref on 'peer' remains and
630 * she must dispose of 'cmid'. (Actually I'd block forever if I tried
631 * to destroy 'cmid' here since I'm called from the CM which still has
James Simmons4420cfd2016-02-12 12:06:00 -0500632 * its ref on 'cmid').
633 */
Mike Shueyec3d17c2015-05-19 10:14:36 -0400634 rwlock_t *glock = &kiblnd_data.kib_global_lock;
James Simmons8d9de3f2016-06-10 16:13:39 -0400635 struct kib_net *net = peer->ibp_ni->ni_data;
636 struct kib_dev *dev;
Peng Taod7e09d02013-05-02 16:46:55 +0800637 struct ib_qp_init_attr *init_qp_attr;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400638 struct kib_sched_info *sched;
Linus Torvalds23908db2015-06-26 15:46:08 -0700639 struct ib_cq_init_attr cq_attr = {};
James Simmons8d9de3f2016-06-10 16:13:39 -0400640 struct kib_conn *conn;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400641 struct ib_cq *cq;
642 unsigned long flags;
643 int cpt;
644 int rc;
645 int i;
Peng Taod7e09d02013-05-02 16:46:55 +0800646
James Simmons06ace262016-02-12 12:06:08 -0500647 LASSERT(net);
Peng Taod7e09d02013-05-02 16:46:55 +0800648 LASSERT(!in_interrupt());
649
650 dev = net->ibn_dev;
651
652 cpt = lnet_cpt_of_nid(peer->ibp_nid);
653 sched = kiblnd_data.kib_scheds[cpt];
654
655 LASSERT(sched->ibs_nthreads > 0);
656
657 LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
658 sizeof(*init_qp_attr));
James Simmons06ace262016-02-12 12:06:08 -0500659 if (!init_qp_attr) {
Peng Taod7e09d02013-05-02 16:46:55 +0800660 CERROR("Can't allocate qp_attr for %s\n",
661 libcfs_nid2str(peer->ibp_nid));
662 goto failed_0;
663 }
664
665 LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
James Simmons06ace262016-02-12 12:06:08 -0500666 if (!conn) {
Peng Taod7e09d02013-05-02 16:46:55 +0800667 CERROR("Can't allocate connection for %s\n",
668 libcfs_nid2str(peer->ibp_nid));
669 goto failed_1;
670 }
671
672 conn->ibc_state = IBLND_CONN_INIT;
673 conn->ibc_version = version;
674 conn->ibc_peer = peer; /* I take the caller's ref */
675 cmid->context = conn; /* for future CM callbacks */
676 conn->ibc_cmid = cmid;
Amir Shehataa01fa102016-03-02 18:53:25 -0500677 conn->ibc_max_frags = peer->ibp_max_frags;
678 conn->ibc_queue_depth = peer->ibp_queue_depth;
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -0500679
Peng Taod7e09d02013-05-02 16:46:55 +0800680 INIT_LIST_HEAD(&conn->ibc_early_rxs);
681 INIT_LIST_HEAD(&conn->ibc_tx_noops);
682 INIT_LIST_HEAD(&conn->ibc_tx_queue);
683 INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
684 INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
685 INIT_LIST_HEAD(&conn->ibc_active_txs);
686 spin_lock_init(&conn->ibc_lock);
687
688 LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
689 sizeof(*conn->ibc_connvars));
James Simmons06ace262016-02-12 12:06:08 -0500690 if (!conn->ibc_connvars) {
Peng Taod7e09d02013-05-02 16:46:55 +0800691 CERROR("Can't allocate in-progress connection state\n");
692 goto failed_2;
693 }
694
695 write_lock_irqsave(glock, flags);
696 if (dev->ibd_failover) {
697 write_unlock_irqrestore(glock, flags);
698 CERROR("%s: failover in progress\n", dev->ibd_ifname);
699 goto failed_2;
700 }
701
702 if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
703 /* wakeup failover thread and teardown connection */
704 if (kiblnd_dev_can_failover(dev)) {
705 list_add_tail(&dev->ibd_fail_list,
706 &kiblnd_data.kib_failed_devs);
707 wake_up(&kiblnd_data.kib_failover_waitq);
708 }
709
710 write_unlock_irqrestore(glock, flags);
711 CERROR("cmid HCA(%s), kib_dev(%s) need failover\n",
712 cmid->device->name, dev->ibd_ifname);
713 goto failed_2;
714 }
715
716 kiblnd_hdev_addref_locked(dev->ibd_hdev);
717 conn->ibc_hdev = dev->ibd_hdev;
718
719 kiblnd_setup_mtu_locked(cmid);
720
721 write_unlock_irqrestore(glock, flags);
722
723 LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
James Simmons8d9de3f2016-06-10 16:13:39 -0400724 IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
James Simmons06ace262016-02-12 12:06:08 -0500725 if (!conn->ibc_rxs) {
Peng Taod7e09d02013-05-02 16:46:55 +0800726 CERROR("Cannot allocate RX buffers\n");
727 goto failed_2;
728 }
729
730 rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -0500731 IBLND_RX_MSG_PAGES(conn));
James Simmons5fd88332016-02-12 12:06:09 -0500732 if (rc)
Peng Taod7e09d02013-05-02 16:46:55 +0800733 goto failed_2;
734
735 kiblnd_map_rx_descs(conn);
736
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -0500737 cq_attr.cqe = IBLND_CQ_ENTRIES(conn);
Matan Barak8e372102015-06-11 16:35:21 +0300738 cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
Peng Taod7e09d02013-05-02 16:46:55 +0800739 cq = ib_create_cq(cmid->device,
740 kiblnd_cq_completion, kiblnd_cq_event, conn,
Matan Barak8e372102015-06-11 16:35:21 +0300741 &cq_attr);
Peng Taod7e09d02013-05-02 16:46:55 +0800742 if (IS_ERR(cq)) {
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -0500743 CERROR("Failed to create CQ with %d CQEs: %ld\n",
744 IBLND_CQ_ENTRIES(conn), PTR_ERR(cq));
Peng Taod7e09d02013-05-02 16:46:55 +0800745 goto failed_2;
746 }
747
748 conn->ibc_cq = cq;
749
750 rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
James Simmons5fd88332016-02-12 12:06:09 -0500751 if (rc) {
Frank Zago9c379662016-03-02 17:02:02 -0500752 CERROR("Can't request completion notification: %d\n", rc);
Peng Taod7e09d02013-05-02 16:46:55 +0800753 goto failed_2;
754 }
755
756 init_qp_attr->event_handler = kiblnd_qp_event;
757 init_qp_attr->qp_context = conn;
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -0500758 init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn);
759 init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
Peng Taod7e09d02013-05-02 16:46:55 +0800760 init_qp_attr->cap.max_send_sge = 1;
761 init_qp_attr->cap.max_recv_sge = 1;
762 init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
763 init_qp_attr->qp_type = IB_QPT_RC;
764 init_qp_attr->send_cq = cq;
765 init_qp_attr->recv_cq = cq;
766
767 conn->ibc_sched = sched;
768
769 rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
James Simmons5fd88332016-02-12 12:06:09 -0500770 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +0800771 CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
772 rc, init_qp_attr->cap.max_send_wr,
773 init_qp_attr->cap.max_recv_wr);
774 goto failed_2;
775 }
776
777 LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
778
779 /* 1 ref for caller and each rxmsg */
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -0500780 atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn));
781 conn->ibc_nrx = IBLND_RX_MSGS(conn);
Peng Taod7e09d02013-05-02 16:46:55 +0800782
783 /* post receives */
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -0500784 for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
Peng Taod7e09d02013-05-02 16:46:55 +0800785 rc = kiblnd_post_rx(&conn->ibc_rxs[i],
786 IBLND_POSTRX_NO_CREDIT);
James Simmons5fd88332016-02-12 12:06:09 -0500787 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +0800788 CERROR("Can't post rxmsg: %d\n", rc);
789
790 /* Make posted receives complete */
791 kiblnd_abort_receives(conn);
792
James Simmons4420cfd2016-02-12 12:06:00 -0500793 /*
794 * correct # of posted buffers
795 * NB locking needed now I'm racing with completion
796 */
Peng Taod7e09d02013-05-02 16:46:55 +0800797 spin_lock_irqsave(&sched->ibs_lock, flags);
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -0500798 conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i;
Peng Taod7e09d02013-05-02 16:46:55 +0800799 spin_unlock_irqrestore(&sched->ibs_lock, flags);
800
James Simmons4420cfd2016-02-12 12:06:00 -0500801 /*
802 * cmid will be destroyed by CM(ofed) after cm_callback
Peng Taod7e09d02013-05-02 16:46:55 +0800803 * returned, so we can't refer it anymore
James Simmons4420cfd2016-02-12 12:06:00 -0500804 * (by kiblnd_connd()->kiblnd_destroy_conn)
805 */
Peng Taod7e09d02013-05-02 16:46:55 +0800806 rdma_destroy_qp(conn->ibc_cmid);
807 conn->ibc_cmid = NULL;
808
809 /* Drop my own and unused rxbuffer refcounts */
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -0500810 while (i++ <= IBLND_RX_MSGS(conn))
Peng Taod7e09d02013-05-02 16:46:55 +0800811 kiblnd_conn_decref(conn);
812
813 return NULL;
814 }
815 }
816
817 /* Init successful! */
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200818 LASSERT(state == IBLND_CONN_ACTIVE_CONNECT ||
James Simmonsc314c312016-02-12 12:06:01 -0500819 state == IBLND_CONN_PASSIVE_WAIT);
Peng Taod7e09d02013-05-02 16:46:55 +0800820 conn->ibc_state = state;
821
822 /* 1 more conn */
823 atomic_inc(&net->ibn_nconns);
824 return conn;
825
826 failed_2:
Dmitry Eremin383e0622018-01-25 16:51:04 +0300827 kiblnd_destroy_conn(conn);
828 LIBCFS_FREE(conn, sizeof(*conn));
Peng Taod7e09d02013-05-02 16:46:55 +0800829 failed_1:
830 LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
831 failed_0:
832 return NULL;
833}
834
Dmitry Eremin383e0622018-01-25 16:51:04 +0300835void kiblnd_destroy_conn(struct kib_conn *conn)
Peng Taod7e09d02013-05-02 16:46:55 +0800836{
837 struct rdma_cm_id *cmid = conn->ibc_cmid;
James Simmons8d9de3f2016-06-10 16:13:39 -0400838 struct kib_peer *peer = conn->ibc_peer;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400839 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800840
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200841 LASSERT(!in_interrupt());
James Simmons5fd88332016-02-12 12:06:09 -0500842 LASSERT(!atomic_read(&conn->ibc_refcount));
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200843 LASSERT(list_empty(&conn->ibc_early_rxs));
844 LASSERT(list_empty(&conn->ibc_tx_noops));
845 LASSERT(list_empty(&conn->ibc_tx_queue));
846 LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
847 LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
848 LASSERT(list_empty(&conn->ibc_active_txs));
James Simmons5fd88332016-02-12 12:06:09 -0500849 LASSERT(!conn->ibc_noops_posted);
850 LASSERT(!conn->ibc_nsends_posted);
Peng Taod7e09d02013-05-02 16:46:55 +0800851
852 switch (conn->ibc_state) {
853 default:
854 /* conn must be completely disengaged from the network */
855 LBUG();
856
857 case IBLND_CONN_DISCONNECTED:
858 /* connvars should have been freed already */
James Simmons06ace262016-02-12 12:06:08 -0500859 LASSERT(!conn->ibc_connvars);
Peng Taod7e09d02013-05-02 16:46:55 +0800860 break;
861
862 case IBLND_CONN_INIT:
863 break;
864 }
865
866 /* conn->ibc_cmid might be destroyed by CM already */
James Simmons06ace262016-02-12 12:06:08 -0500867 if (cmid && cmid->qp)
Peng Taod7e09d02013-05-02 16:46:55 +0800868 rdma_destroy_qp(cmid);
869
James Simmons06ace262016-02-12 12:06:08 -0500870 if (conn->ibc_cq) {
Peng Taod7e09d02013-05-02 16:46:55 +0800871 rc = ib_destroy_cq(conn->ibc_cq);
James Simmons5fd88332016-02-12 12:06:09 -0500872 if (rc)
Peng Taod7e09d02013-05-02 16:46:55 +0800873 CWARN("Error destroying CQ: %d\n", rc);
874 }
875
James Simmons06ace262016-02-12 12:06:08 -0500876 if (conn->ibc_rx_pages)
Peng Taod7e09d02013-05-02 16:46:55 +0800877 kiblnd_unmap_rx_descs(conn);
878
James Simmons06ace262016-02-12 12:06:08 -0500879 if (conn->ibc_rxs) {
Peng Taod7e09d02013-05-02 16:46:55 +0800880 LIBCFS_FREE(conn->ibc_rxs,
James Simmons8d9de3f2016-06-10 16:13:39 -0400881 IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
Peng Taod7e09d02013-05-02 16:46:55 +0800882 }
883
James Simmons06ace262016-02-12 12:06:08 -0500884 if (conn->ibc_connvars)
Peng Taod7e09d02013-05-02 16:46:55 +0800885 LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
886
James Simmons06ace262016-02-12 12:06:08 -0500887 if (conn->ibc_hdev)
Peng Taod7e09d02013-05-02 16:46:55 +0800888 kiblnd_hdev_decref(conn->ibc_hdev);
889
890 /* See CAVEAT EMPTOR above in kiblnd_create_conn */
891 if (conn->ibc_state != IBLND_CONN_INIT) {
James Simmons8d9de3f2016-06-10 16:13:39 -0400892 struct kib_net *net = peer->ibp_ni->ni_data;
Peng Taod7e09d02013-05-02 16:46:55 +0800893
894 kiblnd_peer_decref(peer);
895 rdma_destroy_id(cmid);
896 atomic_dec(&net->ibn_nconns);
897 }
Peng Taod7e09d02013-05-02 16:46:55 +0800898}
899
James Simmons8d9de3f2016-06-10 16:13:39 -0400900int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why)
Peng Taod7e09d02013-05-02 16:46:55 +0800901{
James Simmons8d9de3f2016-06-10 16:13:39 -0400902 struct kib_conn *conn;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400903 struct list_head *ctmp;
904 struct list_head *cnxt;
905 int count = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800906
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200907 list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
James Simmons8d9de3f2016-06-10 16:13:39 -0400908 conn = list_entry(ctmp, struct kib_conn, ibc_list);
Peng Taod7e09d02013-05-02 16:46:55 +0800909
Joe Perches2d00bd12014-11-23 11:28:50 -0800910 CDEBUG(D_NET, "Closing conn -> %s, version: %x, reason: %d\n",
Peng Taod7e09d02013-05-02 16:46:55 +0800911 libcfs_nid2str(peer->ibp_nid),
912 conn->ibc_version, why);
913
914 kiblnd_close_conn_locked(conn, why);
915 count++;
916 }
917
918 return count;
919}
920
James Simmons8d9de3f2016-06-10 16:13:39 -0400921int kiblnd_close_stale_conns_locked(struct kib_peer *peer,
James Simmonsc314c312016-02-12 12:06:01 -0500922 int version, __u64 incarnation)
Peng Taod7e09d02013-05-02 16:46:55 +0800923{
James Simmons8d9de3f2016-06-10 16:13:39 -0400924 struct kib_conn *conn;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400925 struct list_head *ctmp;
926 struct list_head *cnxt;
927 int count = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800928
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200929 list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
James Simmons8d9de3f2016-06-10 16:13:39 -0400930 conn = list_entry(ctmp, struct kib_conn, ibc_list);
Peng Taod7e09d02013-05-02 16:46:55 +0800931
932 if (conn->ibc_version == version &&
933 conn->ibc_incarnation == incarnation)
934 continue;
935
Guillaume Matheron7a3888a2015-04-02 19:52:07 +0200936 CDEBUG(D_NET,
937 "Closing stale conn -> %s version: %x, incarnation:%#llx(%x, %#llx)\n",
Peng Taod7e09d02013-05-02 16:46:55 +0800938 libcfs_nid2str(peer->ibp_nid),
939 conn->ibc_version, conn->ibc_incarnation,
940 version, incarnation);
941
942 kiblnd_close_conn_locked(conn, -ESTALE);
943 count++;
944 }
945
946 return count;
947}
948
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200949static int kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
Peng Taod7e09d02013-05-02 16:46:55 +0800950{
James Simmons8d9de3f2016-06-10 16:13:39 -0400951 struct kib_peer *peer;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400952 struct list_head *ptmp;
953 struct list_head *pnxt;
954 int lo;
955 int hi;
956 int i;
957 unsigned long flags;
958 int count = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800959
960 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
961
James Simmonsd3d3d372016-02-12 12:06:05 -0500962 if (nid != LNET_NID_ANY) {
963 lo = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
964 hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
965 } else {
Peng Taod7e09d02013-05-02 16:46:55 +0800966 lo = 0;
967 hi = kiblnd_data.kib_peer_hash_size - 1;
968 }
969
970 for (i = lo; i <= hi; i++) {
Guillaume Matheronfebe73b2015-04-02 19:35:45 +0200971 list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
James Simmons8d9de3f2016-06-10 16:13:39 -0400972 peer = list_entry(ptmp, struct kib_peer, ibp_list);
Liang Zhen4d99b252016-03-02 18:53:29 -0500973 LASSERT(!kiblnd_peer_idle(peer));
Peng Taod7e09d02013-05-02 16:46:55 +0800974
975 if (peer->ibp_ni != ni)
976 continue;
977
978 if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
979 continue;
980
981 count += kiblnd_close_peer_conns_locked(peer, 0);
982 }
983 }
984
985 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
986
987 /* wildcards always succeed */
988 if (nid == LNET_NID_ANY)
989 return 0;
990
James Simmons5fd88332016-02-12 12:06:09 -0500991 return !count ? -ENOENT : 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800992}
993
Frank Zago439b4d42016-03-02 17:02:00 -0500994static int kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
Peng Taod7e09d02013-05-02 16:46:55 +0800995{
996 struct libcfs_ioctl_data *data = arg;
Mike Shueyec3d17c2015-05-19 10:14:36 -0400997 int rc = -EINVAL;
Peng Taod7e09d02013-05-02 16:46:55 +0800998
Greg Donalda58a38a2014-08-21 12:40:35 -0500999 switch (cmd) {
Peng Taod7e09d02013-05-02 16:46:55 +08001000 case IOC_LIBCFS_GET_PEER: {
Mike Shueyec3d17c2015-05-19 10:14:36 -04001001 lnet_nid_t nid = 0;
1002 int count = 0;
Peng Taod7e09d02013-05-02 16:46:55 +08001003
1004 rc = kiblnd_get_peer_info(ni, data->ioc_count,
1005 &nid, &count);
Mike Shueyec3d17c2015-05-19 10:14:36 -04001006 data->ioc_nid = nid;
1007 data->ioc_count = count;
Peng Taod7e09d02013-05-02 16:46:55 +08001008 break;
1009 }
1010
1011 case IOC_LIBCFS_DEL_PEER: {
1012 rc = kiblnd_del_peer(ni, data->ioc_nid);
1013 break;
1014 }
1015 case IOC_LIBCFS_GET_CONN: {
James Simmons8d9de3f2016-06-10 16:13:39 -04001016 struct kib_conn *conn;
Peng Taod7e09d02013-05-02 16:46:55 +08001017
1018 rc = 0;
1019 conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
James Simmons06ace262016-02-12 12:06:08 -05001020 if (!conn) {
Peng Taod7e09d02013-05-02 16:46:55 +08001021 rc = -ENOENT;
1022 break;
1023 }
1024
James Simmons06ace262016-02-12 12:06:08 -05001025 LASSERT(conn->ibc_cmid);
Peng Taod7e09d02013-05-02 16:46:55 +08001026 data->ioc_nid = conn->ibc_peer->ibp_nid;
James Simmons06ace262016-02-12 12:06:08 -05001027 if (!conn->ibc_cmid->route.path_rec)
Peng Taod7e09d02013-05-02 16:46:55 +08001028 data->ioc_u32[0] = 0; /* iWarp has no path MTU */
1029 else
1030 data->ioc_u32[0] =
1031 ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
1032 kiblnd_conn_decref(conn);
1033 break;
1034 }
1035 case IOC_LIBCFS_CLOSE_CONNECTION: {
1036 rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
1037 break;
1038 }
1039
1040 default:
1041 break;
1042 }
1043
1044 return rc;
1045}
1046
Frank Zago439b4d42016-03-02 17:02:00 -05001047static void kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, unsigned long *when)
Peng Taod7e09d02013-05-02 16:46:55 +08001048{
Mike Shueyec3d17c2015-05-19 10:14:36 -04001049 unsigned long last_alive = 0;
1050 unsigned long now = cfs_time_current();
1051 rwlock_t *glock = &kiblnd_data.kib_global_lock;
James Simmons8d9de3f2016-06-10 16:13:39 -04001052 struct kib_peer *peer;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001053 unsigned long flags;
Peng Taod7e09d02013-05-02 16:46:55 +08001054
1055 read_lock_irqsave(glock, flags);
1056
1057 peer = kiblnd_find_peer_locked(nid);
Liang Zhen4d99b252016-03-02 18:53:29 -05001058 if (peer)
Peng Taod7e09d02013-05-02 16:46:55 +08001059 last_alive = peer->ibp_last_alive;
Peng Taod7e09d02013-05-02 16:46:55 +08001060
1061 read_unlock_irqrestore(glock, flags);
1062
James Simmons5fd88332016-02-12 12:06:09 -05001063 if (last_alive)
Peng Taod7e09d02013-05-02 16:46:55 +08001064 *when = last_alive;
1065
James Simmons4420cfd2016-02-12 12:06:00 -05001066 /*
1067 * peer is not persistent in hash, trigger peer creation
1068 * and connection establishment with a NULL tx
1069 */
James Simmons06ace262016-02-12 12:06:08 -05001070 if (!peer)
Peng Taod7e09d02013-05-02 16:46:55 +08001071 kiblnd_launch_tx(ni, NULL, nid);
1072
1073 CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
1074 libcfs_nid2str(nid), peer,
1075 last_alive ? cfs_duration_sec(now - last_alive) : -1);
Peng Taod7e09d02013-05-02 16:46:55 +08001076}
1077
James Simmons8d9de3f2016-06-10 16:13:39 -04001078static void kiblnd_free_pages(struct kib_pages *p)
Peng Taod7e09d02013-05-02 16:46:55 +08001079{
Mike Shueyec3d17c2015-05-19 10:14:36 -04001080 int npages = p->ibp_npages;
1081 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08001082
1083 for (i = 0; i < npages; i++) {
James Simmons06ace262016-02-12 12:06:08 -05001084 if (p->ibp_pages[i])
Peng Taod7e09d02013-05-02 16:46:55 +08001085 __free_page(p->ibp_pages[i]);
1086 }
1087
James Simmons8d9de3f2016-06-10 16:13:39 -04001088 LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages]));
Peng Taod7e09d02013-05-02 16:46:55 +08001089}
1090
James Simmons8d9de3f2016-06-10 16:13:39 -04001091int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
Peng Taod7e09d02013-05-02 16:46:55 +08001092{
James Simmons8d9de3f2016-06-10 16:13:39 -04001093 struct kib_pages *p;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001094 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08001095
1096 LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
James Simmons8d9de3f2016-06-10 16:13:39 -04001097 offsetof(struct kib_pages, ibp_pages[npages]));
James Simmons06ace262016-02-12 12:06:08 -05001098 if (!p) {
Peng Taod7e09d02013-05-02 16:46:55 +08001099 CERROR("Can't allocate descriptor for %d pages\n", npages);
1100 return -ENOMEM;
1101 }
1102
James Simmons8d9de3f2016-06-10 16:13:39 -04001103 memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
Peng Taod7e09d02013-05-02 16:46:55 +08001104 p->ibp_npages = npages;
1105
1106 for (i = 0; i < npages; i++) {
Peng Tao49c02a72013-06-03 21:58:22 +08001107 p->ibp_pages[i] = alloc_pages_node(
1108 cfs_cpt_spread_node(lnet_cpt_table(), cpt),
Ann Koehler0be19af2014-04-27 13:06:36 -04001109 GFP_NOFS, 0);
James Simmons06ace262016-02-12 12:06:08 -05001110 if (!p->ibp_pages[i]) {
Peng Taod7e09d02013-05-02 16:46:55 +08001111 CERROR("Can't allocate page %d of %d\n", i, npages);
1112 kiblnd_free_pages(p);
1113 return -ENOMEM;
1114 }
1115 }
1116
1117 *pp = p;
1118 return 0;
1119}
1120
James Simmons8d9de3f2016-06-10 16:13:39 -04001121void kiblnd_unmap_rx_descs(struct kib_conn *conn)
Peng Taod7e09d02013-05-02 16:46:55 +08001122{
James Simmons8d9de3f2016-06-10 16:13:39 -04001123 struct kib_rx *rx;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001124 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08001125
James Simmons06ace262016-02-12 12:06:08 -05001126 LASSERT(conn->ibc_rxs);
1127 LASSERT(conn->ibc_hdev);
Peng Taod7e09d02013-05-02 16:46:55 +08001128
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -05001129 for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
Peng Taod7e09d02013-05-02 16:46:55 +08001130 rx = &conn->ibc_rxs[i];
1131
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001132 LASSERT(rx->rx_nob >= 0); /* not posted */
Peng Taod7e09d02013-05-02 16:46:55 +08001133
1134 kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
1135 KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
1136 rx->rx_msgaddr),
1137 IBLND_MSG_SIZE, DMA_FROM_DEVICE);
1138 }
1139
1140 kiblnd_free_pages(conn->ibc_rx_pages);
1141
1142 conn->ibc_rx_pages = NULL;
1143}
1144
James Simmons8d9de3f2016-06-10 16:13:39 -04001145void kiblnd_map_rx_descs(struct kib_conn *conn)
Peng Taod7e09d02013-05-02 16:46:55 +08001146{
James Simmons8d9de3f2016-06-10 16:13:39 -04001147 struct kib_rx *rx;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001148 struct page *pg;
1149 int pg_off;
1150 int ipg;
1151 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08001152
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -05001153 for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) {
Peng Taod7e09d02013-05-02 16:46:55 +08001154 pg = conn->ibc_rx_pages->ibp_pages[ipg];
1155 rx = &conn->ibc_rxs[i];
1156
1157 rx->rx_conn = conn;
James Simmons8d9de3f2016-06-10 16:13:39 -04001158 rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off);
Peng Taod7e09d02013-05-02 16:46:55 +08001159
1160 rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
Guillaume Matheron7a3888a2015-04-02 19:52:07 +02001161 rx->rx_msg,
1162 IBLND_MSG_SIZE,
Peng Taod7e09d02013-05-02 16:46:55 +08001163 DMA_FROM_DEVICE);
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001164 LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
James Simmonsc314c312016-02-12 12:06:01 -05001165 rx->rx_msgaddr));
Peng Taod7e09d02013-05-02 16:46:55 +08001166 KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
1167
Greg Donald1d8cb702014-08-25 20:07:19 -05001168 CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
Peng Taod7e09d02013-05-02 16:46:55 +08001169 i, rx->rx_msg, rx->rx_msgaddr,
John L. Hammondd664d1f2015-06-11 15:18:08 -04001170 (__u64)(page_to_phys(pg) + pg_off));
Peng Taod7e09d02013-05-02 16:46:55 +08001171
1172 pg_off += IBLND_MSG_SIZE;
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001173 LASSERT(pg_off <= PAGE_SIZE);
Peng Taod7e09d02013-05-02 16:46:55 +08001174
1175 if (pg_off == PAGE_SIZE) {
1176 pg_off = 0;
1177 ipg++;
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -05001178 LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn));
Peng Taod7e09d02013-05-02 16:46:55 +08001179 }
1180 }
1181}
1182
James Simmons8d9de3f2016-06-10 16:13:39 -04001183static void kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo)
Peng Taod7e09d02013-05-02 16:46:55 +08001184{
James Simmons8d9de3f2016-06-10 16:13:39 -04001185 struct kib_hca_dev *hdev = tpo->tpo_hdev;
1186 struct kib_tx *tx;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001187 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08001188
James Simmons5fd88332016-02-12 12:06:09 -05001189 LASSERT(!tpo->tpo_pool.po_allocated);
Peng Taod7e09d02013-05-02 16:46:55 +08001190
James Simmons06ace262016-02-12 12:06:08 -05001191 if (!hdev)
Peng Taod7e09d02013-05-02 16:46:55 +08001192 return;
1193
1194 for (i = 0; i < tpo->tpo_pool.po_size; i++) {
1195 tx = &tpo->tpo_tx_descs[i];
1196 kiblnd_dma_unmap_single(hdev->ibh_ibdev,
1197 KIBLND_UNMAP_ADDR(tx, tx_msgunmap,
1198 tx->tx_msgaddr),
1199 IBLND_MSG_SIZE, DMA_TO_DEVICE);
1200 }
1201
1202 kiblnd_hdev_decref(hdev);
1203 tpo->tpo_hdev = NULL;
1204}
1205
James Simmons8d9de3f2016-06-10 16:13:39 -04001206static struct kib_hca_dev *kiblnd_current_hdev(struct kib_dev *dev)
Peng Taod7e09d02013-05-02 16:46:55 +08001207{
James Simmons8d9de3f2016-06-10 16:13:39 -04001208 struct kib_hca_dev *hdev;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001209 unsigned long flags;
1210 int i = 0;
Peng Taod7e09d02013-05-02 16:46:55 +08001211
1212 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1213 while (dev->ibd_failover) {
1214 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
James Simmons5fd88332016-02-12 12:06:09 -05001215 if (!(i++ % 50))
Peng Taod7e09d02013-05-02 16:46:55 +08001216 CDEBUG(D_NET, "%s: Wait for failover\n",
1217 dev->ibd_ifname);
Liang Zhenea363b42016-03-02 18:53:30 -05001218 set_current_state(TASK_INTERRUPTIBLE);
Peng Taod7e09d02013-05-02 16:46:55 +08001219 schedule_timeout(cfs_time_seconds(1) / 100);
1220
1221 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1222 }
1223
1224 kiblnd_hdev_addref_locked(dev->ibd_hdev);
1225 hdev = dev->ibd_hdev;
1226
1227 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1228
1229 return hdev;
1230}
1231
James Simmons8d9de3f2016-06-10 16:13:39 -04001232static void kiblnd_map_tx_pool(struct kib_tx_pool *tpo)
Peng Taod7e09d02013-05-02 16:46:55 +08001233{
James Simmons8d9de3f2016-06-10 16:13:39 -04001234 struct kib_pages *txpgs = tpo->tpo_tx_pages;
1235 struct kib_pool *pool = &tpo->tpo_pool;
1236 struct kib_net *net = pool->po_owner->ps_net;
1237 struct kib_dev *dev;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001238 struct page *page;
James Simmons8d9de3f2016-06-10 16:13:39 -04001239 struct kib_tx *tx;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001240 int page_offset;
1241 int ipage;
1242 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08001243
James Simmons06ace262016-02-12 12:06:08 -05001244 LASSERT(net);
Peng Taod7e09d02013-05-02 16:46:55 +08001245
1246 dev = net->ibn_dev;
1247
1248 /* pre-mapped messages are not bigger than 1 page */
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001249 CLASSERT(IBLND_MSG_SIZE <= PAGE_SIZE);
Peng Taod7e09d02013-05-02 16:46:55 +08001250
1251 /* No fancy arithmetic when we do the buffer calculations */
James Simmons5fd88332016-02-12 12:06:09 -05001252 CLASSERT(!(PAGE_SIZE % IBLND_MSG_SIZE));
Peng Taod7e09d02013-05-02 16:46:55 +08001253
1254 tpo->tpo_hdev = kiblnd_current_hdev(dev);
1255
1256 for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
1257 page = txpgs->ibp_pages[ipage];
1258 tx = &tpo->tpo_tx_descs[i];
1259
James Simmons8d9de3f2016-06-10 16:13:39 -04001260 tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) +
Peng Taod7e09d02013-05-02 16:46:55 +08001261 page_offset);
1262
1263 tx->tx_msgaddr = kiblnd_dma_map_single(
1264 tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
1265 IBLND_MSG_SIZE, DMA_TO_DEVICE);
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001266 LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
James Simmonsc314c312016-02-12 12:06:01 -05001267 tx->tx_msgaddr));
Peng Taod7e09d02013-05-02 16:46:55 +08001268 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
1269
1270 list_add(&tx->tx_list, &pool->po_free_list);
1271
1272 page_offset += IBLND_MSG_SIZE;
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001273 LASSERT(page_offset <= PAGE_SIZE);
Peng Taod7e09d02013-05-02 16:46:55 +08001274
1275 if (page_offset == PAGE_SIZE) {
1276 page_offset = 0;
1277 ipage++;
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001278 LASSERT(ipage <= txpgs->ibp_npages);
Peng Taod7e09d02013-05-02 16:46:55 +08001279 }
1280 }
1281}
1282
James Simmons8d9de3f2016-06-10 16:13:39 -04001283struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd,
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -05001284 int negotiated_nfrags)
Peng Taod7e09d02013-05-02 16:46:55 +08001285{
James Simmons8d9de3f2016-06-10 16:13:39 -04001286 struct kib_net *net = ni->ni_data;
1287 struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
Amir Shehata32c8deb82016-05-06 21:30:28 -04001288 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
1289 __u16 nfrags;
1290 int mod;
1291
1292 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
1293 mod = tunables->lnd_map_on_demand;
1294 nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod;
Jeremy Filizetti2fb44f22016-03-02 18:53:24 -05001295
Amir Shehata7cadcc72016-03-02 17:02:03 -05001296 LASSERT(hdev->ibh_mrs);
Peng Taod7e09d02013-05-02 16:46:55 +08001297
Amir Shehata32c8deb82016-05-06 21:30:28 -04001298 if (mod > 0 && nfrags <= rd->rd_nfrags)
Peng Taod7e09d02013-05-02 16:46:55 +08001299 return NULL;
1300
Amir Shehata7cadcc72016-03-02 17:02:03 -05001301 return hdev->ibh_mrs;
Peng Taod7e09d02013-05-02 16:46:55 +08001302}
1303
James Simmons8d9de3f2016-06-10 16:13:39 -04001304static void kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
Peng Taod7e09d02013-05-02 16:46:55 +08001305{
Dmitry Eremin8daab0a2016-05-05 14:53:00 -04001306 LASSERT(!fpo->fpo_map_count);
Peng Taod7e09d02013-05-02 16:46:55 +08001307
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001308 if (fpo->fpo_is_fmr) {
1309 if (fpo->fmr.fpo_fmr_pool)
1310 ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
1311 } else {
1312 struct kib_fast_reg_descriptor *frd, *tmp;
1313 int i = 0;
Peng Taod7e09d02013-05-02 16:46:55 +08001314
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001315 list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
1316 frd_list) {
1317 list_del(&frd->frd_list);
1318 ib_dereg_mr(frd->frd_mr);
1319 LIBCFS_FREE(frd, sizeof(*frd));
1320 i++;
1321 }
1322 if (i < fpo->fast_reg.fpo_pool_size)
1323 CERROR("FastReg pool still has %d regions registered\n",
1324 fpo->fast_reg.fpo_pool_size - i);
1325 }
Peng Taod7e09d02013-05-02 16:46:55 +08001326
Dmitry Eremin8daab0a2016-05-05 14:53:00 -04001327 if (fpo->fpo_hdev)
1328 kiblnd_hdev_decref(fpo->fpo_hdev);
Peng Taod7e09d02013-05-02 16:46:55 +08001329
Dmitry Eremin8daab0a2016-05-05 14:53:00 -04001330 LIBCFS_FREE(fpo, sizeof(*fpo));
Peng Taod7e09d02013-05-02 16:46:55 +08001331}
1332
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001333static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
Peng Taod7e09d02013-05-02 16:46:55 +08001334{
James Simmons8d9de3f2016-06-10 16:13:39 -04001335 struct kib_fmr_pool *fpo, *tmp;
Peng Taod7e09d02013-05-02 16:46:55 +08001336
Dmitry Eremin0d33ec52016-05-05 14:53:01 -04001337 list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
Dmitry Eremin8daab0a2016-05-05 14:53:00 -04001338 list_del(&fpo->fpo_list);
1339 kiblnd_destroy_fmr_pool(fpo);
Peng Taod7e09d02013-05-02 16:46:55 +08001340 }
1341}
1342
Amir Shehata32c8deb82016-05-06 21:30:28 -04001343static int
1344kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
1345 int ncpts)
Peng Taod7e09d02013-05-02 16:46:55 +08001346{
Amir Shehata32c8deb82016-05-06 21:30:28 -04001347 int size = tunables->lnd_fmr_pool_size / ncpts;
Peng Taod7e09d02013-05-02 16:46:55 +08001348
1349 return max(IBLND_FMR_POOL, size);
1350}
1351
Amir Shehata32c8deb82016-05-06 21:30:28 -04001352static int
1353kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
1354 int ncpts)
Peng Taod7e09d02013-05-02 16:46:55 +08001355{
Amir Shehata32c8deb82016-05-06 21:30:28 -04001356 int size = tunables->lnd_fmr_flush_trigger / ncpts;
Peng Taod7e09d02013-05-02 16:46:55 +08001357
1358 return max(IBLND_FMR_POOL_FLUSH, size);
1359}
1360
James Simmons8d9de3f2016-06-10 16:13:39 -04001361static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo)
Peng Taod7e09d02013-05-02 16:46:55 +08001362{
Peng Taod7e09d02013-05-02 16:46:55 +08001363 struct ib_fmr_pool_param param = {
James Simmons51078e22016-02-12 12:06:04 -05001364 .max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
Mike Shueyec3d17c2015-05-19 10:14:36 -04001365 .page_shift = PAGE_SHIFT,
1366 .access = (IB_ACCESS_LOCAL_WRITE |
Miguel Bernabeu Diaze39f6ef2015-08-05 23:44:36 +02001367 IB_ACCESS_REMOTE_WRITE),
Mike Shueyec3d17c2015-05-19 10:14:36 -04001368 .pool_size = fps->fps_pool_size,
Peng Taod7e09d02013-05-02 16:46:55 +08001369 .dirty_watermark = fps->fps_flush_trigger,
1370 .flush_function = NULL,
Mike Shueyec3d17c2015-05-19 10:14:36 -04001371 .flush_arg = NULL,
Amir Shehata32c8deb82016-05-06 21:30:28 -04001372 .cache = !!fps->fps_cache };
Dmitry Ereminf66fb152016-05-05 14:53:03 -04001373 int rc = 0;
1374
1375 fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
1376 &param);
1377 if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
1378 rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
1379 if (rc != -ENOSYS)
1380 CERROR("Failed to create FMR pool: %d\n", rc);
1381 else
1382 CERROR("FMRs are not supported\n");
1383 }
1384
1385 return rc;
1386}
1387
James Simmons8d9de3f2016-06-10 16:13:39 -04001388static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps, struct kib_fmr_pool *fpo)
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001389{
1390 struct kib_fast_reg_descriptor *frd, *tmp;
1391 int i, rc;
1392
1393 INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
1394 fpo->fast_reg.fpo_pool_size = 0;
1395 for (i = 0; i < fps->fps_pool_size; i++) {
1396 LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
1397 sizeof(*frd));
1398 if (!frd) {
1399 CERROR("Failed to allocate a new fast_reg descriptor\n");
1400 rc = -ENOMEM;
1401 goto out;
1402 }
1403
1404 frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
1405 IB_MR_TYPE_MEM_REG,
1406 LNET_MAX_PAYLOAD / PAGE_SIZE);
1407 if (IS_ERR(frd->frd_mr)) {
1408 rc = PTR_ERR(frd->frd_mr);
1409 CERROR("Failed to allocate ib_alloc_mr: %d\n", rc);
1410 frd->frd_mr = NULL;
1411 goto out_middle;
1412 }
1413
1414 frd->frd_valid = true;
1415
1416 list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
1417 fpo->fast_reg.fpo_pool_size++;
1418 }
1419
1420 return 0;
1421
1422out_middle:
1423 if (frd->frd_mr)
1424 ib_dereg_mr(frd->frd_mr);
1425 LIBCFS_FREE(frd, sizeof(*frd));
1426
1427out:
1428 list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
1429 frd_list) {
1430 list_del(&frd->frd_list);
1431 ib_dereg_mr(frd->frd_mr);
1432 LIBCFS_FREE(frd, sizeof(*frd));
1433 }
1434
1435 return rc;
1436}
1437
James Simmons8d9de3f2016-06-10 16:13:39 -04001438static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
1439 struct kib_fmr_pool **pp_fpo)
Dmitry Ereminf66fb152016-05-05 14:53:03 -04001440{
James Simmons8d9de3f2016-06-10 16:13:39 -04001441 struct kib_dev *dev = fps->fps_net->ibn_dev;
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001442 struct ib_device_attr *dev_attr;
James Simmons8d9de3f2016-06-10 16:13:39 -04001443 struct kib_fmr_pool *fpo;
Peng Taod7e09d02013-05-02 16:46:55 +08001444 int rc;
1445
1446 LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
James Simmons06ace262016-02-12 12:06:08 -05001447 if (!fpo)
Peng Taod7e09d02013-05-02 16:46:55 +08001448 return -ENOMEM;
1449
1450 fpo->fpo_hdev = kiblnd_current_hdev(dev);
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001451 dev_attr = &fpo->fpo_hdev->ibh_ibdev->attrs;
Peng Taod7e09d02013-05-02 16:46:55 +08001452
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001453 /* Check for FMR or FastReg support */
1454 fpo->fpo_is_fmr = 0;
Dmitry Ereminf66fb152016-05-05 14:53:03 -04001455 if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr &&
1456 fpo->fpo_hdev->ibh_ibdev->dealloc_fmr &&
1457 fpo->fpo_hdev->ibh_ibdev->map_phys_fmr &&
1458 fpo->fpo_hdev->ibh_ibdev->unmap_fmr) {
1459 LCONSOLE_INFO("Using FMR for registration\n");
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001460 fpo->fpo_is_fmr = 1;
1461 } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
1462 LCONSOLE_INFO("Using FastReg for registration\n");
Dmitry Ereminf66fb152016-05-05 14:53:03 -04001463 } else {
1464 rc = -ENOSYS;
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001465 LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n");
Dmitry Ereminf66fb152016-05-05 14:53:03 -04001466 goto out_fpo;
Peng Taod7e09d02013-05-02 16:46:55 +08001467 }
1468
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001469 if (fpo->fpo_is_fmr)
1470 rc = kiblnd_alloc_fmr_pool(fps, fpo);
1471 else
1472 rc = kiblnd_alloc_freg_pool(fps, fpo);
Dmitry Ereminf66fb152016-05-05 14:53:03 -04001473 if (rc)
1474 goto out_fpo;
1475
Peng Taod7e09d02013-05-02 16:46:55 +08001476 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
Dmitry Ereminf66fb152016-05-05 14:53:03 -04001477 fpo->fpo_owner = fps;
Peng Taod7e09d02013-05-02 16:46:55 +08001478 *pp_fpo = fpo;
1479
1480 return 0;
Dmitry Ereminf66fb152016-05-05 14:53:03 -04001481
1482out_fpo:
1483 kiblnd_hdev_decref(fpo->fpo_hdev);
1484 LIBCFS_FREE(fpo, sizeof(*fpo));
1485 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001486}
1487
James Simmons8d9de3f2016-06-10 16:13:39 -04001488static void kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps,
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001489 struct list_head *zombies)
Peng Taod7e09d02013-05-02 16:46:55 +08001490{
James Simmons06ace262016-02-12 12:06:08 -05001491 if (!fps->fps_net) /* intialized? */
Peng Taod7e09d02013-05-02 16:46:55 +08001492 return;
1493
1494 spin_lock(&fps->fps_lock);
1495
1496 while (!list_empty(&fps->fps_pool_list)) {
James Simmons8d9de3f2016-06-10 16:13:39 -04001497 struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next,
1498 struct kib_fmr_pool, fpo_list);
Peng Taod7e09d02013-05-02 16:46:55 +08001499 fpo->fpo_failed = 1;
1500 list_del(&fpo->fpo_list);
James Simmons5fd88332016-02-12 12:06:09 -05001501 if (!fpo->fpo_map_count)
Peng Taod7e09d02013-05-02 16:46:55 +08001502 list_add(&fpo->fpo_list, zombies);
1503 else
1504 list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
1505 }
1506
1507 spin_unlock(&fps->fps_lock);
1508}
1509
James Simmons8d9de3f2016-06-10 16:13:39 -04001510static void kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps)
Peng Taod7e09d02013-05-02 16:46:55 +08001511{
James Simmons06ace262016-02-12 12:06:08 -05001512 if (fps->fps_net) { /* initialized? */
Peng Taod7e09d02013-05-02 16:46:55 +08001513 kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
1514 kiblnd_destroy_fmr_pool_list(&fps->fps_pool_list);
1515 }
1516}
1517
Amir Shehata32c8deb82016-05-06 21:30:28 -04001518static int
James Simmons8d9de3f2016-06-10 16:13:39 -04001519kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts,
1520 struct kib_net *net,
Amir Shehata32c8deb82016-05-06 21:30:28 -04001521 struct lnet_ioctl_config_o2iblnd_tunables *tunables)
Peng Taod7e09d02013-05-02 16:46:55 +08001522{
James Simmons8d9de3f2016-06-10 16:13:39 -04001523 struct kib_fmr_pool *fpo;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001524 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001525
Janani Ravichandrana4e872f2016-02-10 22:47:33 -05001526 memset(fps, 0, sizeof(*fps));
Peng Taod7e09d02013-05-02 16:46:55 +08001527
1528 fps->fps_net = net;
1529 fps->fps_cpt = cpt;
Amir Shehata32c8deb82016-05-06 21:30:28 -04001530
1531 fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
1532 fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
1533 fps->fps_cache = tunables->lnd_fmr_cache;
1534
Peng Taod7e09d02013-05-02 16:46:55 +08001535 spin_lock_init(&fps->fps_lock);
1536 INIT_LIST_HEAD(&fps->fps_pool_list);
1537 INIT_LIST_HEAD(&fps->fps_failed_pool_list);
1538
1539 rc = kiblnd_create_fmr_pool(fps, &fpo);
James Simmons5fd88332016-02-12 12:06:09 -05001540 if (!rc)
Peng Taod7e09d02013-05-02 16:46:55 +08001541 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1542
1543 return rc;
1544}
1545
James Simmons8d9de3f2016-06-10 16:13:39 -04001546static int kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, unsigned long now)
Peng Taod7e09d02013-05-02 16:46:55 +08001547{
James Simmons5fd88332016-02-12 12:06:09 -05001548 if (fpo->fpo_map_count) /* still in use */
Peng Taod7e09d02013-05-02 16:46:55 +08001549 return 0;
1550 if (fpo->fpo_failed)
1551 return 1;
1552 return cfs_time_aftereq(now, fpo->fpo_deadline);
1553}
1554
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001555static int
James Simmons8d9de3f2016-06-10 16:13:39 -04001556kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd)
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001557{
1558 __u64 *pages = tx->tx_pages;
James Simmons8d9de3f2016-06-10 16:13:39 -04001559 struct kib_hca_dev *hdev;
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001560 int npages;
1561 int size;
1562 int i;
1563
1564 hdev = tx->tx_pool->tpo_hdev;
1565
1566 for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
1567 for (size = 0; size < rd->rd_frags[i].rf_nob;
1568 size += hdev->ibh_page_size) {
1569 pages[npages++] = (rd->rd_frags[i].rf_addr &
1570 hdev->ibh_page_mask) + size;
1571 }
1572 }
1573
1574 return npages;
1575}
1576
James Simmons8d9de3f2016-06-10 16:13:39 -04001577void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status)
Peng Taod7e09d02013-05-02 16:46:55 +08001578{
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001579 LIST_HEAD(zombies);
James Simmons8d9de3f2016-06-10 16:13:39 -04001580 struct kib_fmr_pool *fpo = fmr->fmr_pool;
1581 struct kib_fmr_poolset *fps;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001582 unsigned long now = cfs_time_current();
James Simmons8d9de3f2016-06-10 16:13:39 -04001583 struct kib_fmr_pool *tmp;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001584 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001585
Dmitry Eremin1f199a02016-05-05 14:53:05 -04001586 if (!fpo)
1587 return;
Peng Taod7e09d02013-05-02 16:46:55 +08001588
Dmitry Eremin1f199a02016-05-05 14:53:05 -04001589 fps = fpo->fpo_owner;
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001590 if (fpo->fpo_is_fmr) {
1591 if (fmr->fmr_pfmr) {
1592 rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
1593 LASSERT(!rc);
1594 fmr->fmr_pfmr = NULL;
1595 }
Peng Taod7e09d02013-05-02 16:46:55 +08001596
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001597 if (status) {
1598 rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
1599 LASSERT(!rc);
1600 }
1601 } else {
1602 struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
Peng Taod7e09d02013-05-02 16:46:55 +08001603
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001604 if (frd) {
1605 frd->frd_valid = false;
1606 spin_lock(&fps->fps_lock);
1607 list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
1608 spin_unlock(&fps->fps_lock);
1609 fmr->fmr_frd = NULL;
1610 }
Peng Taod7e09d02013-05-02 16:46:55 +08001611 }
Peng Taod7e09d02013-05-02 16:46:55 +08001612 fmr->fmr_pool = NULL;
Peng Taod7e09d02013-05-02 16:46:55 +08001613
1614 spin_lock(&fps->fps_lock);
Igor Ishchenko747327972015-01-12 18:16:26 +02001615 fpo->fpo_map_count--; /* decref the pool */
Peng Taod7e09d02013-05-02 16:46:55 +08001616
1617 list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
1618 /* the first pool is persistent */
1619 if (fps->fps_pool_list.next == &fpo->fpo_list)
1620 continue;
1621
1622 if (kiblnd_fmr_pool_is_idle(fpo, now)) {
1623 list_move(&fpo->fpo_list, &zombies);
Igor Ishchenko747327972015-01-12 18:16:26 +02001624 fps->fps_version++;
Peng Taod7e09d02013-05-02 16:46:55 +08001625 }
1626 }
1627 spin_unlock(&fps->fps_lock);
1628
1629 if (!list_empty(&zombies))
1630 kiblnd_destroy_fmr_pool_list(&zombies);
1631}
1632
James Simmons8d9de3f2016-06-10 16:13:39 -04001633int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
1634 struct kib_rdma_desc *rd, __u32 nob, __u64 iov,
1635 struct kib_fmr *fmr)
Peng Taod7e09d02013-05-02 16:46:55 +08001636{
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001637 __u64 *pages = tx->tx_pages;
1638 bool is_rx = (rd != tx->tx_rd);
1639 bool tx_pages_mapped = 0;
James Simmons8d9de3f2016-06-10 16:13:39 -04001640 struct kib_fmr_pool *fpo;
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001641 int npages = 0;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001642 __u64 version;
1643 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001644
1645 again:
1646 spin_lock(&fps->fps_lock);
1647 version = fps->fps_version;
1648 list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
1649 fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1650 fpo->fpo_map_count++;
Peng Taod7e09d02013-05-02 16:46:55 +08001651
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001652 if (fpo->fpo_is_fmr) {
1653 struct ib_pool_fmr *pfmr;
1654
1655 spin_unlock(&fps->fps_lock);
1656
1657 if (!tx_pages_mapped) {
1658 npages = kiblnd_map_tx_pages(tx, rd);
1659 tx_pages_mapped = 1;
1660 }
1661
1662 pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
1663 pages, npages, iov);
1664 if (likely(!IS_ERR(pfmr))) {
1665 fmr->fmr_key = is_rx ? pfmr->fmr->rkey :
1666 pfmr->fmr->lkey;
1667 fmr->fmr_frd = NULL;
1668 fmr->fmr_pfmr = pfmr;
1669 fmr->fmr_pool = fpo;
1670 return 0;
1671 }
1672 rc = PTR_ERR(pfmr);
1673 } else {
1674 if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
1675 struct kib_fast_reg_descriptor *frd;
1676 struct ib_reg_wr *wr;
1677 struct ib_mr *mr;
1678 int n;
1679
1680 frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
1681 struct kib_fast_reg_descriptor,
1682 frd_list);
1683 list_del(&frd->frd_list);
1684 spin_unlock(&fps->fps_lock);
1685
1686 mr = frd->frd_mr;
1687
1688 if (!frd->frd_valid) {
1689 __u32 key = is_rx ? mr->rkey : mr->lkey;
1690 struct ib_send_wr *inv_wr;
1691
1692 inv_wr = &frd->frd_inv_wr;
1693 memset(inv_wr, 0, sizeof(*inv_wr));
1694 inv_wr->opcode = IB_WR_LOCAL_INV;
1695 inv_wr->wr_id = IBLND_WID_MR;
1696 inv_wr->ex.invalidate_rkey = key;
1697
1698 /* Bump the key */
1699 key = ib_inc_rkey(key);
1700 ib_update_fast_reg_key(mr, key);
1701 }
1702
1703 n = ib_map_mr_sg(mr, tx->tx_frags,
Linus Torvalds2f37dd12016-05-20 22:20:48 -07001704 tx->tx_nfrags, NULL, PAGE_SIZE);
Dmitry Eremin80e05b32016-05-05 14:53:07 -04001705 if (unlikely(n != tx->tx_nfrags)) {
1706 CERROR("Failed to map mr %d/%d elements\n",
1707 n, tx->tx_nfrags);
1708 return n < 0 ? n : -EINVAL;
1709 }
1710
1711 mr->iova = iov;
1712
1713 /* Prepare FastReg WR */
1714 wr = &frd->frd_fastreg_wr;
1715 memset(wr, 0, sizeof(*wr));
1716 wr->wr.opcode = IB_WR_REG_MR;
1717 wr->wr.wr_id = IBLND_WID_MR;
1718 wr->wr.num_sge = 0;
1719 wr->wr.send_flags = 0;
1720 wr->mr = mr;
1721 wr->key = is_rx ? mr->rkey : mr->lkey;
1722 wr->access = (IB_ACCESS_LOCAL_WRITE |
1723 IB_ACCESS_REMOTE_WRITE);
1724
1725 fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
1726 fmr->fmr_frd = frd;
1727 fmr->fmr_pfmr = NULL;
1728 fmr->fmr_pool = fpo;
1729 return 0;
1730 }
1731 spin_unlock(&fps->fps_lock);
1732 rc = -EBUSY;
Peng Taod7e09d02013-05-02 16:46:55 +08001733 }
1734
1735 spin_lock(&fps->fps_lock);
1736 fpo->fpo_map_count--;
Dmitry Ereminc1b2e0b2016-05-05 14:53:04 -04001737 if (rc != -EAGAIN) {
Peng Taod7e09d02013-05-02 16:46:55 +08001738 spin_unlock(&fps->fps_lock);
Dmitry Ereminc1b2e0b2016-05-05 14:53:04 -04001739 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001740 }
1741
1742 /* EAGAIN and ... */
1743 if (version != fps->fps_version) {
1744 spin_unlock(&fps->fps_lock);
1745 goto again;
1746 }
1747 }
1748
1749 if (fps->fps_increasing) {
1750 spin_unlock(&fps->fps_lock);
James Simmonsc314c312016-02-12 12:06:01 -05001751 CDEBUG(D_NET, "Another thread is allocating new FMR pool, waiting for her to complete\n");
Peng Taod7e09d02013-05-02 16:46:55 +08001752 schedule();
1753 goto again;
Peng Taod7e09d02013-05-02 16:46:55 +08001754 }
1755
Greg Kroah-Hartman699503b2014-07-12 01:03:41 -07001756 if (time_before(cfs_time_current(), fps->fps_next_retry)) {
Peng Taod7e09d02013-05-02 16:46:55 +08001757 /* someone failed recently */
1758 spin_unlock(&fps->fps_lock);
1759 return -EAGAIN;
1760 }
1761
1762 fps->fps_increasing = 1;
1763 spin_unlock(&fps->fps_lock);
1764
1765 CDEBUG(D_NET, "Allocate new FMR pool\n");
1766 rc = kiblnd_create_fmr_pool(fps, &fpo);
1767 spin_lock(&fps->fps_lock);
1768 fps->fps_increasing = 0;
James Simmons5fd88332016-02-12 12:06:09 -05001769 if (!rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08001770 fps->fps_version++;
1771 list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
1772 } else {
1773 fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
1774 }
1775 spin_unlock(&fps->fps_lock);
1776
1777 goto again;
1778}
1779
James Simmons8d9de3f2016-06-10 16:13:39 -04001780static void kiblnd_fini_pool(struct kib_pool *pool)
Peng Taod7e09d02013-05-02 16:46:55 +08001781{
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001782 LASSERT(list_empty(&pool->po_free_list));
James Simmons5fd88332016-02-12 12:06:09 -05001783 LASSERT(!pool->po_allocated);
Peng Taod7e09d02013-05-02 16:46:55 +08001784
1785 CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
1786}
1787
James Simmons8d9de3f2016-06-10 16:13:39 -04001788static void kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size)
Peng Taod7e09d02013-05-02 16:46:55 +08001789{
1790 CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
1791
Janani Ravichandrana4e872f2016-02-10 22:47:33 -05001792 memset(pool, 0, sizeof(*pool));
Peng Taod7e09d02013-05-02 16:46:55 +08001793 INIT_LIST_HEAD(&pool->po_free_list);
1794 pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1795 pool->po_owner = ps;
1796 pool->po_size = size;
1797}
1798
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001799static void kiblnd_destroy_pool_list(struct list_head *head)
Peng Taod7e09d02013-05-02 16:46:55 +08001800{
James Simmons8d9de3f2016-06-10 16:13:39 -04001801 struct kib_pool *pool;
Peng Taod7e09d02013-05-02 16:46:55 +08001802
1803 while (!list_empty(head)) {
James Simmons8d9de3f2016-06-10 16:13:39 -04001804 pool = list_entry(head->next, struct kib_pool, po_list);
Peng Taod7e09d02013-05-02 16:46:55 +08001805 list_del(&pool->po_list);
1806
James Simmons06ace262016-02-12 12:06:08 -05001807 LASSERT(pool->po_owner);
Peng Taod7e09d02013-05-02 16:46:55 +08001808 pool->po_owner->ps_pool_destroy(pool);
1809 }
1810}
1811
James Simmons8d9de3f2016-06-10 16:13:39 -04001812static void kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
Peng Taod7e09d02013-05-02 16:46:55 +08001813{
James Simmons06ace262016-02-12 12:06:08 -05001814 if (!ps->ps_net) /* intialized? */
Peng Taod7e09d02013-05-02 16:46:55 +08001815 return;
1816
1817 spin_lock(&ps->ps_lock);
1818 while (!list_empty(&ps->ps_pool_list)) {
James Simmons8d9de3f2016-06-10 16:13:39 -04001819 struct kib_pool *po = list_entry(ps->ps_pool_list.next,
1820 struct kib_pool, po_list);
Peng Taod7e09d02013-05-02 16:46:55 +08001821 po->po_failed = 1;
1822 list_del(&po->po_list);
James Simmons5fd88332016-02-12 12:06:09 -05001823 if (!po->po_allocated)
Peng Taod7e09d02013-05-02 16:46:55 +08001824 list_add(&po->po_list, zombies);
1825 else
1826 list_add(&po->po_list, &ps->ps_failed_pool_list);
1827 }
1828 spin_unlock(&ps->ps_lock);
1829}
1830
James Simmons8d9de3f2016-06-10 16:13:39 -04001831static void kiblnd_fini_poolset(struct kib_poolset *ps)
Peng Taod7e09d02013-05-02 16:46:55 +08001832{
James Simmons06ace262016-02-12 12:06:08 -05001833 if (ps->ps_net) { /* initialized? */
Peng Taod7e09d02013-05-02 16:46:55 +08001834 kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
1835 kiblnd_destroy_pool_list(&ps->ps_pool_list);
1836 }
1837}
1838
James Simmons8d9de3f2016-06-10 16:13:39 -04001839static int kiblnd_init_poolset(struct kib_poolset *ps, int cpt,
1840 struct kib_net *net, char *name, int size,
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001841 kib_ps_pool_create_t po_create,
1842 kib_ps_pool_destroy_t po_destroy,
1843 kib_ps_node_init_t nd_init,
1844 kib_ps_node_fini_t nd_fini)
Peng Taod7e09d02013-05-02 16:46:55 +08001845{
James Simmons8d9de3f2016-06-10 16:13:39 -04001846 struct kib_pool *pool;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001847 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001848
Janani Ravichandrana4e872f2016-02-10 22:47:33 -05001849 memset(ps, 0, sizeof(*ps));
Peng Taod7e09d02013-05-02 16:46:55 +08001850
Mike Shueyec3d17c2015-05-19 10:14:36 -04001851 ps->ps_cpt = cpt;
1852 ps->ps_net = net;
Peng Taod7e09d02013-05-02 16:46:55 +08001853 ps->ps_pool_create = po_create;
1854 ps->ps_pool_destroy = po_destroy;
1855 ps->ps_node_init = nd_init;
1856 ps->ps_node_fini = nd_fini;
1857 ps->ps_pool_size = size;
1858 if (strlcpy(ps->ps_name, name, sizeof(ps->ps_name))
1859 >= sizeof(ps->ps_name))
1860 return -E2BIG;
1861 spin_lock_init(&ps->ps_lock);
1862 INIT_LIST_HEAD(&ps->ps_pool_list);
1863 INIT_LIST_HEAD(&ps->ps_failed_pool_list);
1864
1865 rc = ps->ps_pool_create(ps, size, &pool);
James Simmons5fd88332016-02-12 12:06:09 -05001866 if (!rc)
Peng Taod7e09d02013-05-02 16:46:55 +08001867 list_add(&pool->po_list, &ps->ps_pool_list);
1868 else
1869 CERROR("Failed to create the first pool for %s\n", ps->ps_name);
1870
1871 return rc;
1872}
1873
James Simmons8d9de3f2016-06-10 16:13:39 -04001874static int kiblnd_pool_is_idle(struct kib_pool *pool, unsigned long now)
Peng Taod7e09d02013-05-02 16:46:55 +08001875{
James Simmons5fd88332016-02-12 12:06:09 -05001876 if (pool->po_allocated) /* still in use */
Peng Taod7e09d02013-05-02 16:46:55 +08001877 return 0;
1878 if (pool->po_failed)
1879 return 1;
1880 return cfs_time_aftereq(now, pool->po_deadline);
1881}
1882
James Simmons8d9de3f2016-06-10 16:13:39 -04001883void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
Peng Taod7e09d02013-05-02 16:46:55 +08001884{
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001885 LIST_HEAD(zombies);
James Simmons8d9de3f2016-06-10 16:13:39 -04001886 struct kib_poolset *ps = pool->po_owner;
1887 struct kib_pool *tmp;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001888 unsigned long now = cfs_time_current();
Peng Taod7e09d02013-05-02 16:46:55 +08001889
1890 spin_lock(&ps->ps_lock);
1891
James Simmons06ace262016-02-12 12:06:08 -05001892 if (ps->ps_node_fini)
Peng Taod7e09d02013-05-02 16:46:55 +08001893 ps->ps_node_fini(pool, node);
1894
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02001895 LASSERT(pool->po_allocated > 0);
Peng Taod7e09d02013-05-02 16:46:55 +08001896 list_add(node, &pool->po_free_list);
Igor Ishchenko747327972015-01-12 18:16:26 +02001897 pool->po_allocated--;
Peng Taod7e09d02013-05-02 16:46:55 +08001898
1899 list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
1900 /* the first pool is persistent */
1901 if (ps->ps_pool_list.next == &pool->po_list)
1902 continue;
1903
1904 if (kiblnd_pool_is_idle(pool, now))
1905 list_move(&pool->po_list, &zombies);
1906 }
1907 spin_unlock(&ps->ps_lock);
1908
1909 if (!list_empty(&zombies))
1910 kiblnd_destroy_pool_list(&zombies);
1911}
1912
James Simmons8d9de3f2016-06-10 16:13:39 -04001913struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps)
Peng Taod7e09d02013-05-02 16:46:55 +08001914{
Mike Shueyec3d17c2015-05-19 10:14:36 -04001915 struct list_head *node;
James Simmons8d9de3f2016-06-10 16:13:39 -04001916 struct kib_pool *pool;
Liang Zhenea363b42016-03-02 18:53:30 -05001917 unsigned int interval = 1;
1918 unsigned long time_before;
1919 unsigned int trips = 0;
Mike Shueyec3d17c2015-05-19 10:14:36 -04001920 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001921
1922 again:
1923 spin_lock(&ps->ps_lock);
1924 list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
1925 if (list_empty(&pool->po_free_list))
1926 continue;
1927
Igor Ishchenko747327972015-01-12 18:16:26 +02001928 pool->po_allocated++;
Peng Taod7e09d02013-05-02 16:46:55 +08001929 pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
1930 node = pool->po_free_list.next;
1931 list_del(node);
1932
James Simmons06ace262016-02-12 12:06:08 -05001933 if (ps->ps_node_init) {
Peng Taod7e09d02013-05-02 16:46:55 +08001934 /* still hold the lock */
1935 ps->ps_node_init(pool, node);
1936 }
1937 spin_unlock(&ps->ps_lock);
1938 return node;
1939 }
1940
1941 /* no available tx pool and ... */
1942 if (ps->ps_increasing) {
1943 /* another thread is allocating a new pool */
1944 spin_unlock(&ps->ps_lock);
Liang Zhenea363b42016-03-02 18:53:30 -05001945 trips++;
1946 CDEBUG(D_NET, "Another thread is allocating new %s pool, waiting %d HZs for her to complete. trips = %d\n",
1947 ps->ps_name, interval, trips);
1948
1949 set_current_state(TASK_INTERRUPTIBLE);
1950 schedule_timeout(interval);
1951 if (interval < cfs_time_seconds(1))
1952 interval *= 2;
1953
Peng Taod7e09d02013-05-02 16:46:55 +08001954 goto again;
1955 }
1956
Greg Kroah-Hartman699503b2014-07-12 01:03:41 -07001957 if (time_before(cfs_time_current(), ps->ps_next_retry)) {
Peng Taod7e09d02013-05-02 16:46:55 +08001958 /* someone failed recently */
1959 spin_unlock(&ps->ps_lock);
1960 return NULL;
1961 }
1962
1963 ps->ps_increasing = 1;
1964 spin_unlock(&ps->ps_lock);
1965
1966 CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
Liang Zhenea363b42016-03-02 18:53:30 -05001967 time_before = cfs_time_current();
Peng Taod7e09d02013-05-02 16:46:55 +08001968 rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
Liang Zhenea363b42016-03-02 18:53:30 -05001969 CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
1970 cfs_time_current() - time_before);
Peng Taod7e09d02013-05-02 16:46:55 +08001971
1972 spin_lock(&ps->ps_lock);
1973 ps->ps_increasing = 0;
James Simmons5fd88332016-02-12 12:06:09 -05001974 if (!rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08001975 list_add_tail(&pool->po_list, &ps->ps_pool_list);
1976 } else {
1977 ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
1978 CERROR("Can't allocate new %s pool because out of memory\n",
1979 ps->ps_name);
1980 }
1981 spin_unlock(&ps->ps_lock);
1982
1983 goto again;
1984}
1985
James Simmons8d9de3f2016-06-10 16:13:39 -04001986static void kiblnd_destroy_tx_pool(struct kib_pool *pool)
Peng Taod7e09d02013-05-02 16:46:55 +08001987{
James Simmons8d9de3f2016-06-10 16:13:39 -04001988 struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool, tpo_pool);
Mike Shueyec3d17c2015-05-19 10:14:36 -04001989 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08001990
James Simmons5fd88332016-02-12 12:06:09 -05001991 LASSERT(!pool->po_allocated);
Peng Taod7e09d02013-05-02 16:46:55 +08001992
James Simmons06ace262016-02-12 12:06:08 -05001993 if (tpo->tpo_tx_pages) {
Peng Taod7e09d02013-05-02 16:46:55 +08001994 kiblnd_unmap_tx_pool(tpo);
1995 kiblnd_free_pages(tpo->tpo_tx_pages);
1996 }
1997
James Simmons06ace262016-02-12 12:06:08 -05001998 if (!tpo->tpo_tx_descs)
Peng Taod7e09d02013-05-02 16:46:55 +08001999 goto out;
2000
2001 for (i = 0; i < pool->po_size; i++) {
James Simmons8d9de3f2016-06-10 16:13:39 -04002002 struct kib_tx *tx = &tpo->tpo_tx_descs[i];
Peng Taod7e09d02013-05-02 16:46:55 +08002003
2004 list_del(&tx->tx_list);
James Simmons06ace262016-02-12 12:06:08 -05002005 if (tx->tx_pages)
Peng Taod7e09d02013-05-02 16:46:55 +08002006 LIBCFS_FREE(tx->tx_pages,
2007 LNET_MAX_IOV *
2008 sizeof(*tx->tx_pages));
James Simmons06ace262016-02-12 12:06:08 -05002009 if (tx->tx_frags)
Peng Taod7e09d02013-05-02 16:46:55 +08002010 LIBCFS_FREE(tx->tx_frags,
James Simmons147280d2016-05-09 10:53:48 -04002011 (1 + IBLND_MAX_RDMA_FRAGS) *
2012 sizeof(*tx->tx_frags));
James Simmons06ace262016-02-12 12:06:08 -05002013 if (tx->tx_wrq)
Peng Taod7e09d02013-05-02 16:46:55 +08002014 LIBCFS_FREE(tx->tx_wrq,
2015 (1 + IBLND_MAX_RDMA_FRAGS) *
2016 sizeof(*tx->tx_wrq));
James Simmons06ace262016-02-12 12:06:08 -05002017 if (tx->tx_sge)
Peng Taod7e09d02013-05-02 16:46:55 +08002018 LIBCFS_FREE(tx->tx_sge,
2019 (1 + IBLND_MAX_RDMA_FRAGS) *
2020 sizeof(*tx->tx_sge));
James Simmons06ace262016-02-12 12:06:08 -05002021 if (tx->tx_rd)
Peng Taod7e09d02013-05-02 16:46:55 +08002022 LIBCFS_FREE(tx->tx_rd,
James Simmons8d9de3f2016-06-10 16:13:39 -04002023 offsetof(struct kib_rdma_desc,
Peng Taod7e09d02013-05-02 16:46:55 +08002024 rd_frags[IBLND_MAX_RDMA_FRAGS]));
2025 }
2026
2027 LIBCFS_FREE(tpo->tpo_tx_descs,
James Simmons8d9de3f2016-06-10 16:13:39 -04002028 pool->po_size * sizeof(struct kib_tx));
Peng Taod7e09d02013-05-02 16:46:55 +08002029out:
2030 kiblnd_fini_pool(pool);
Janani Ravichandrana4e872f2016-02-10 22:47:33 -05002031 LIBCFS_FREE(tpo, sizeof(*tpo));
Peng Taod7e09d02013-05-02 16:46:55 +08002032}
2033
2034static int kiblnd_tx_pool_size(int ncpts)
2035{
2036 int ntx = *kiblnd_tunables.kib_ntx / ncpts;
2037
2038 return max(IBLND_TX_POOL, ntx);
2039}
2040
James Simmons8d9de3f2016-06-10 16:13:39 -04002041static int kiblnd_create_tx_pool(struct kib_poolset *ps, int size,
2042 struct kib_pool **pp_po)
Peng Taod7e09d02013-05-02 16:46:55 +08002043{
Mike Shueyec3d17c2015-05-19 10:14:36 -04002044 int i;
2045 int npg;
James Simmons8d9de3f2016-06-10 16:13:39 -04002046 struct kib_pool *pool;
2047 struct kib_tx_pool *tpo;
Peng Taod7e09d02013-05-02 16:46:55 +08002048
2049 LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
James Simmons06ace262016-02-12 12:06:08 -05002050 if (!tpo) {
Peng Taod7e09d02013-05-02 16:46:55 +08002051 CERROR("Failed to allocate TX pool\n");
2052 return -ENOMEM;
2053 }
2054
2055 pool = &tpo->tpo_pool;
2056 kiblnd_init_pool(ps, pool, size);
2057 tpo->tpo_tx_descs = NULL;
2058 tpo->tpo_tx_pages = NULL;
2059
2060 npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
James Simmons5fd88332016-02-12 12:06:09 -05002061 if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg)) {
Peng Taod7e09d02013-05-02 16:46:55 +08002062 CERROR("Can't allocate tx pages: %d\n", npg);
Janani Ravichandrana4e872f2016-02-10 22:47:33 -05002063 LIBCFS_FREE(tpo, sizeof(*tpo));
Peng Taod7e09d02013-05-02 16:46:55 +08002064 return -ENOMEM;
2065 }
2066
2067 LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
James Simmons8d9de3f2016-06-10 16:13:39 -04002068 size * sizeof(struct kib_tx));
James Simmons06ace262016-02-12 12:06:08 -05002069 if (!tpo->tpo_tx_descs) {
Peng Taod7e09d02013-05-02 16:46:55 +08002070 CERROR("Can't allocate %d tx descriptors\n", size);
2071 ps->ps_pool_destroy(pool);
2072 return -ENOMEM;
2073 }
2074
James Simmons8d9de3f2016-06-10 16:13:39 -04002075 memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx));
Peng Taod7e09d02013-05-02 16:46:55 +08002076
2077 for (i = 0; i < size; i++) {
James Simmons8d9de3f2016-06-10 16:13:39 -04002078 struct kib_tx *tx = &tpo->tpo_tx_descs[i];
Peng Taod7e09d02013-05-02 16:46:55 +08002079
2080 tx->tx_pool = tpo;
James Simmons06ace262016-02-12 12:06:08 -05002081 if (ps->ps_net->ibn_fmr_ps) {
Peng Taod7e09d02013-05-02 16:46:55 +08002082 LIBCFS_CPT_ALLOC(tx->tx_pages,
2083 lnet_cpt_table(), ps->ps_cpt,
2084 LNET_MAX_IOV * sizeof(*tx->tx_pages));
James Simmons06ace262016-02-12 12:06:08 -05002085 if (!tx->tx_pages)
Peng Taod7e09d02013-05-02 16:46:55 +08002086 break;
2087 }
2088
2089 LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
James Simmons147280d2016-05-09 10:53:48 -04002090 (1 + IBLND_MAX_RDMA_FRAGS) *
2091 sizeof(*tx->tx_frags));
James Simmons06ace262016-02-12 12:06:08 -05002092 if (!tx->tx_frags)
Peng Taod7e09d02013-05-02 16:46:55 +08002093 break;
2094
James Simmons147280d2016-05-09 10:53:48 -04002095 sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
Peng Taod7e09d02013-05-02 16:46:55 +08002096
2097 LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
2098 (1 + IBLND_MAX_RDMA_FRAGS) *
2099 sizeof(*tx->tx_wrq));
James Simmons06ace262016-02-12 12:06:08 -05002100 if (!tx->tx_wrq)
Peng Taod7e09d02013-05-02 16:46:55 +08002101 break;
2102
2103 LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
2104 (1 + IBLND_MAX_RDMA_FRAGS) *
2105 sizeof(*tx->tx_sge));
James Simmons06ace262016-02-12 12:06:08 -05002106 if (!tx->tx_sge)
Peng Taod7e09d02013-05-02 16:46:55 +08002107 break;
2108
2109 LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
James Simmons8d9de3f2016-06-10 16:13:39 -04002110 offsetof(struct kib_rdma_desc,
Peng Taod7e09d02013-05-02 16:46:55 +08002111 rd_frags[IBLND_MAX_RDMA_FRAGS]));
James Simmons06ace262016-02-12 12:06:08 -05002112 if (!tx->tx_rd)
Peng Taod7e09d02013-05-02 16:46:55 +08002113 break;
2114 }
2115
2116 if (i == size) {
2117 kiblnd_map_tx_pool(tpo);
2118 *pp_po = pool;
2119 return 0;
2120 }
2121
2122 ps->ps_pool_destroy(pool);
2123 return -ENOMEM;
2124}
2125
James Simmons8d9de3f2016-06-10 16:13:39 -04002126static void kiblnd_tx_init(struct kib_pool *pool, struct list_head *node)
Peng Taod7e09d02013-05-02 16:46:55 +08002127{
James Simmons8d9de3f2016-06-10 16:13:39 -04002128 struct kib_tx_poolset *tps = container_of(pool->po_owner,
2129 struct kib_tx_poolset,
2130 tps_poolset);
2131 struct kib_tx *tx = list_entry(node, struct kib_tx, tx_list);
Peng Taod7e09d02013-05-02 16:46:55 +08002132
Igor Ishchenko747327972015-01-12 18:16:26 +02002133 tx->tx_cookie = tps->tps_next_tx_cookie++;
Peng Taod7e09d02013-05-02 16:46:55 +08002134}
2135
James Simmons8d9de3f2016-06-10 16:13:39 -04002136static void kiblnd_net_fini_pools(struct kib_net *net)
Peng Taod7e09d02013-05-02 16:46:55 +08002137{
Mike Shueyec3d17c2015-05-19 10:14:36 -04002138 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08002139
2140 cfs_cpt_for_each(i, lnet_cpt_table()) {
James Simmons8d9de3f2016-06-10 16:13:39 -04002141 struct kib_tx_poolset *tps;
2142 struct kib_fmr_poolset *fps;
Peng Taod7e09d02013-05-02 16:46:55 +08002143
James Simmons06ace262016-02-12 12:06:08 -05002144 if (net->ibn_tx_ps) {
Peng Taod7e09d02013-05-02 16:46:55 +08002145 tps = net->ibn_tx_ps[i];
2146 kiblnd_fini_poolset(&tps->tps_poolset);
2147 }
2148
James Simmons06ace262016-02-12 12:06:08 -05002149 if (net->ibn_fmr_ps) {
Peng Taod7e09d02013-05-02 16:46:55 +08002150 fps = net->ibn_fmr_ps[i];
2151 kiblnd_fini_fmr_poolset(fps);
2152 }
Peng Taod7e09d02013-05-02 16:46:55 +08002153 }
2154
James Simmons06ace262016-02-12 12:06:08 -05002155 if (net->ibn_tx_ps) {
Peng Taod7e09d02013-05-02 16:46:55 +08002156 cfs_percpt_free(net->ibn_tx_ps);
2157 net->ibn_tx_ps = NULL;
2158 }
2159
James Simmons06ace262016-02-12 12:06:08 -05002160 if (net->ibn_fmr_ps) {
Peng Taod7e09d02013-05-02 16:46:55 +08002161 cfs_percpt_free(net->ibn_fmr_ps);
2162 net->ibn_fmr_ps = NULL;
2163 }
Peng Taod7e09d02013-05-02 16:46:55 +08002164}
2165
James Simmons8d9de3f2016-06-10 16:13:39 -04002166static int kiblnd_net_init_pools(struct kib_net *net, lnet_ni_t *ni, __u32 *cpts,
Amir Shehata32c8deb82016-05-06 21:30:28 -04002167 int ncpts)
Peng Taod7e09d02013-05-02 16:46:55 +08002168{
Amir Shehata32c8deb82016-05-06 21:30:28 -04002169 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
Mike Shueyec3d17c2015-05-19 10:14:36 -04002170 unsigned long flags;
2171 int cpt;
Amir Shehata32c8deb82016-05-06 21:30:28 -04002172 int rc;
Mike Shueyec3d17c2015-05-19 10:14:36 -04002173 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08002174
Amir Shehata32c8deb82016-05-06 21:30:28 -04002175 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
2176
Peng Taod7e09d02013-05-02 16:46:55 +08002177 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
Amir Shehata32c8deb82016-05-06 21:30:28 -04002178 if (!tunables->lnd_map_on_demand) {
Mike Shueyec3d17c2015-05-19 10:14:36 -04002179 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
Peng Taod7e09d02013-05-02 16:46:55 +08002180 goto create_tx_pool;
2181 }
2182
2183 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2184
Amir Shehata32c8deb82016-05-06 21:30:28 -04002185 if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
Peng Taod7e09d02013-05-02 16:46:55 +08002186 CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
Amir Shehata32c8deb82016-05-06 21:30:28 -04002187 tunables->lnd_fmr_pool_size,
Peng Taod7e09d02013-05-02 16:46:55 +08002188 *kiblnd_tunables.kib_ntx / 4);
2189 rc = -EINVAL;
2190 goto failed;
2191 }
2192
Oleg Drokin415bcb52015-08-18 21:04:35 -04002193 /*
2194 * TX pool must be created later than FMR, see LU-2268
2195 * for details
2196 */
James Simmons06ace262016-02-12 12:06:08 -05002197 LASSERT(!net->ibn_tx_ps);
Peng Taod7e09d02013-05-02 16:46:55 +08002198
Oleg Drokin415bcb52015-08-18 21:04:35 -04002199 /*
2200 * premapping can fail if ibd_nmr > 1, so we always create
2201 * FMR pool and map-on-demand if premapping failed
James Simmons7e221b62016-03-24 11:24:02 -04002202 *
2203 * cfs_precpt_alloc is creating an array of struct kib_fmr_poolset
2204 * The number of struct kib_fmr_poolsets create is equal to the
2205 * number of CPTs that exist, i.e net->ibn_fmr_ps[cpt].
Oleg Drokin415bcb52015-08-18 21:04:35 -04002206 */
Peng Taod7e09d02013-05-02 16:46:55 +08002207 net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
James Simmons8d9de3f2016-06-10 16:13:39 -04002208 sizeof(struct kib_fmr_poolset));
James Simmons06ace262016-02-12 12:06:08 -05002209 if (!net->ibn_fmr_ps) {
Peng Taod7e09d02013-05-02 16:46:55 +08002210 CERROR("Failed to allocate FMR pool array\n");
2211 rc = -ENOMEM;
2212 goto failed;
2213 }
2214
2215 for (i = 0; i < ncpts; i++) {
James Simmons06ace262016-02-12 12:06:08 -05002216 cpt = !cpts ? i : cpts[i];
Amir Shehata32c8deb82016-05-06 21:30:28 -04002217 rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
2218 net, tunables);
Amir Shehata7cadcc72016-03-02 17:02:03 -05002219 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08002220 CERROR("Can't initialize FMR pool for CPT %d: %d\n",
2221 cpt, rc);
2222 goto failed;
2223 }
2224 }
2225
Amir Shehata7cadcc72016-03-02 17:02:03 -05002226 if (i > 0)
Peng Taod7e09d02013-05-02 16:46:55 +08002227 LASSERT(i == ncpts);
Peng Taod7e09d02013-05-02 16:46:55 +08002228
2229 create_tx_pool:
James Simmons7e221b62016-03-24 11:24:02 -04002230 /*
2231 * cfs_precpt_alloc is creating an array of struct kib_tx_poolset
2232 * The number of struct kib_tx_poolsets create is equal to the
2233 * number of CPTs that exist, i.e net->ibn_tx_ps[cpt].
2234 */
Peng Taod7e09d02013-05-02 16:46:55 +08002235 net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
James Simmons8d9de3f2016-06-10 16:13:39 -04002236 sizeof(struct kib_tx_poolset));
James Simmons06ace262016-02-12 12:06:08 -05002237 if (!net->ibn_tx_ps) {
Peng Taod7e09d02013-05-02 16:46:55 +08002238 CERROR("Failed to allocate tx pool array\n");
2239 rc = -ENOMEM;
2240 goto failed;
2241 }
2242
2243 for (i = 0; i < ncpts; i++) {
James Simmons06ace262016-02-12 12:06:08 -05002244 cpt = !cpts ? i : cpts[i];
Peng Taod7e09d02013-05-02 16:46:55 +08002245 rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
2246 cpt, net, "TX",
2247 kiblnd_tx_pool_size(ncpts),
2248 kiblnd_create_tx_pool,
2249 kiblnd_destroy_tx_pool,
2250 kiblnd_tx_init, NULL);
James Simmons5fd88332016-02-12 12:06:09 -05002251 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08002252 CERROR("Can't initialize TX pool for CPT %d: %d\n",
2253 cpt, rc);
2254 goto failed;
2255 }
2256 }
2257
2258 return 0;
2259 failed:
2260 kiblnd_net_fini_pools(net);
James Simmons5fd88332016-02-12 12:06:09 -05002261 LASSERT(rc);
Peng Taod7e09d02013-05-02 16:46:55 +08002262 return rc;
2263}
2264
James Simmons8d9de3f2016-06-10 16:13:39 -04002265static int kiblnd_hdev_get_attr(struct kib_hca_dev *hdev)
Peng Taod7e09d02013-05-02 16:46:55 +08002266{
James Simmons4420cfd2016-02-12 12:06:00 -05002267 /*
2268 * It's safe to assume a HCA can handle a page size
2269 * matching that of the native system
2270 */
Peng Taod7e09d02013-05-02 16:46:55 +08002271 hdev->ibh_page_shift = PAGE_SHIFT;
2272 hdev->ibh_page_size = 1 << PAGE_SHIFT;
2273 hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
2274
Or Gerlitzcebfe5c2015-12-18 10:59:49 +02002275 hdev->ibh_mr_size = hdev->ibh_ibdev->attrs.max_mr_size;
Peng Taod7e09d02013-05-02 16:46:55 +08002276 if (hdev->ibh_mr_size == ~0ULL) {
2277 hdev->ibh_mr_shift = 64;
2278 return 0;
2279 }
2280
Greg Kroah-Hartman55f5a822014-07-12 20:26:07 -07002281 CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
Peng Taod7e09d02013-05-02 16:46:55 +08002282 return -EINVAL;
2283}
2284
James Simmons8d9de3f2016-06-10 16:13:39 -04002285static void kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev)
Peng Taod7e09d02013-05-02 16:46:55 +08002286{
Amir Shehata7cadcc72016-03-02 17:02:03 -05002287 if (!hdev->ibh_mrs)
Peng Taod7e09d02013-05-02 16:46:55 +08002288 return;
2289
Amir Shehata7cadcc72016-03-02 17:02:03 -05002290 ib_dereg_mr(hdev->ibh_mrs);
Peng Taod7e09d02013-05-02 16:46:55 +08002291
Amir Shehata7cadcc72016-03-02 17:02:03 -05002292 hdev->ibh_mrs = NULL;
Peng Taod7e09d02013-05-02 16:46:55 +08002293}
2294
James Simmons8d9de3f2016-06-10 16:13:39 -04002295void kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
Peng Taod7e09d02013-05-02 16:46:55 +08002296{
2297 kiblnd_hdev_cleanup_mrs(hdev);
2298
James Simmons06ace262016-02-12 12:06:08 -05002299 if (hdev->ibh_pd)
Peng Taod7e09d02013-05-02 16:46:55 +08002300 ib_dealloc_pd(hdev->ibh_pd);
2301
James Simmons06ace262016-02-12 12:06:08 -05002302 if (hdev->ibh_cmid)
Peng Taod7e09d02013-05-02 16:46:55 +08002303 rdma_destroy_id(hdev->ibh_cmid);
2304
2305 LIBCFS_FREE(hdev, sizeof(*hdev));
2306}
2307
James Simmons8d9de3f2016-06-10 16:13:39 -04002308static int kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev)
Peng Taod7e09d02013-05-02 16:46:55 +08002309{
2310 struct ib_mr *mr;
Mike Shueyec3d17c2015-05-19 10:14:36 -04002311 int rc;
Mike Shueyec3d17c2015-05-19 10:14:36 -04002312 int acflags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
Peng Taod7e09d02013-05-02 16:46:55 +08002313
2314 rc = kiblnd_hdev_get_attr(hdev);
James Simmons5fd88332016-02-12 12:06:09 -05002315 if (rc)
Peng Taod7e09d02013-05-02 16:46:55 +08002316 return rc;
2317
Luis de Bethencourt01738442015-10-21 18:40:40 +01002318 mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
2319 if (IS_ERR(mr)) {
2320 CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
2321 kiblnd_hdev_cleanup_mrs(hdev);
2322 return PTR_ERR(mr);
2323 }
Peng Taod7e09d02013-05-02 16:46:55 +08002324
Amir Shehata7cadcc72016-03-02 17:02:03 -05002325 hdev->ibh_mrs = mr;
Peng Taod7e09d02013-05-02 16:46:55 +08002326
Peng Taod7e09d02013-05-02 16:46:55 +08002327 return 0;
2328}
2329
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002330/* DUMMY */
Guillaume Matheron7a3888a2015-04-02 19:52:07 +02002331static int kiblnd_dummy_callback(struct rdma_cm_id *cmid,
2332 struct rdma_cm_event *event)
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002333{
Peng Taod7e09d02013-05-02 16:46:55 +08002334 return 0;
2335}
2336
James Simmons8d9de3f2016-06-10 16:13:39 -04002337static int kiblnd_dev_need_failover(struct kib_dev *dev)
Peng Taod7e09d02013-05-02 16:46:55 +08002338{
Mike Shueyec3d17c2015-05-19 10:14:36 -04002339 struct rdma_cm_id *cmid;
2340 struct sockaddr_in srcaddr;
2341 struct sockaddr_in dstaddr;
2342 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08002343
James Simmons06ace262016-02-12 12:06:08 -05002344 if (!dev->ibd_hdev || /* initializing */
2345 !dev->ibd_hdev->ibh_cmid || /* listener is dead */
Peng Taod7e09d02013-05-02 16:46:55 +08002346 *kiblnd_tunables.kib_dev_failover > 1) /* debugging */
2347 return 1;
2348
James Simmons4420cfd2016-02-12 12:06:00 -05002349 /*
2350 * XXX: it's UGLY, but I don't have better way to find
Peng Taod7e09d02013-05-02 16:46:55 +08002351 * ib-bonding HCA failover because:
2352 *
2353 * a. no reliable CM event for HCA failover...
2354 * b. no OFED API to get ib_device for current net_device...
2355 *
2356 * We have only two choices at this point:
2357 *
2358 * a. rdma_bind_addr(), it will conflict with listener cmid
James Simmons4420cfd2016-02-12 12:06:00 -05002359 * b. rdma_resolve_addr() to zero addr
2360 */
Peng Taod7e09d02013-05-02 16:46:55 +08002361 cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
2362 IB_QPT_RC);
2363 if (IS_ERR(cmid)) {
2364 rc = PTR_ERR(cmid);
2365 CERROR("Failed to create cmid for failover: %d\n", rc);
2366 return rc;
2367 }
2368
2369 memset(&srcaddr, 0, sizeof(srcaddr));
Mike Shueyec3d17c2015-05-19 10:14:36 -04002370 srcaddr.sin_family = AF_INET;
Peng Taod7e09d02013-05-02 16:46:55 +08002371 srcaddr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2372
2373 memset(&dstaddr, 0, sizeof(dstaddr));
2374 dstaddr.sin_family = AF_INET;
2375 rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
2376 (struct sockaddr *)&dstaddr, 1);
James Simmons5fd88332016-02-12 12:06:09 -05002377 if (rc || !cmid->device) {
Peng Tao5e8f6922013-07-15 22:27:09 +08002378 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2379 dev->ibd_ifname, &dev->ibd_ifip,
Peng Taod7e09d02013-05-02 16:46:55 +08002380 cmid->device, rc);
2381 rdma_destroy_id(cmid);
2382 return rc;
2383 }
2384
Liang Zhen199a0cc2015-09-14 18:41:33 -04002385 rc = dev->ibd_hdev->ibh_ibdev != cmid->device; /* true for failover */
2386 rdma_destroy_id(cmid);
Peng Taod7e09d02013-05-02 16:46:55 +08002387
Liang Zhen199a0cc2015-09-14 18:41:33 -04002388 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +08002389}
2390
James Simmons8d9de3f2016-06-10 16:13:39 -04002391int kiblnd_dev_failover(struct kib_dev *dev)
Peng Taod7e09d02013-05-02 16:46:55 +08002392{
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002393 LIST_HEAD(zombie_tpo);
2394 LIST_HEAD(zombie_ppo);
2395 LIST_HEAD(zombie_fpo);
Mike Shueyec3d17c2015-05-19 10:14:36 -04002396 struct rdma_cm_id *cmid = NULL;
James Simmons8d9de3f2016-06-10 16:13:39 -04002397 struct kib_hca_dev *hdev = NULL;
Mike Shueyec3d17c2015-05-19 10:14:36 -04002398 struct ib_pd *pd;
James Simmons8d9de3f2016-06-10 16:13:39 -04002399 struct kib_net *net;
Mike Shueyec3d17c2015-05-19 10:14:36 -04002400 struct sockaddr_in addr;
2401 unsigned long flags;
2402 int rc = 0;
2403 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08002404
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002405 LASSERT(*kiblnd_tunables.kib_dev_failover > 1 ||
James Simmons06ace262016-02-12 12:06:08 -05002406 dev->ibd_can_failover || !dev->ibd_hdev);
Peng Taod7e09d02013-05-02 16:46:55 +08002407
2408 rc = kiblnd_dev_need_failover(dev);
2409 if (rc <= 0)
2410 goto out;
2411
James Simmons06ace262016-02-12 12:06:08 -05002412 if (dev->ibd_hdev &&
2413 dev->ibd_hdev->ibh_cmid) {
James Simmons4420cfd2016-02-12 12:06:00 -05002414 /*
2415 * XXX it's not good to close old listener at here,
Peng Taod7e09d02013-05-02 16:46:55 +08002416 * because we can fail to create new listener.
2417 * But we have to close it now, otherwise rdma_bind_addr
James Simmons4420cfd2016-02-12 12:06:00 -05002418 * will return EADDRINUSE... How crap!
2419 */
Peng Taod7e09d02013-05-02 16:46:55 +08002420 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2421
2422 cmid = dev->ibd_hdev->ibh_cmid;
James Simmons4420cfd2016-02-12 12:06:00 -05002423 /*
2424 * make next schedule of kiblnd_dev_need_failover()
2425 * return 1 for me
2426 */
Peng Taod7e09d02013-05-02 16:46:55 +08002427 dev->ibd_hdev->ibh_cmid = NULL;
2428 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2429
2430 rdma_destroy_id(cmid);
2431 }
2432
2433 cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
2434 IB_QPT_RC);
2435 if (IS_ERR(cmid)) {
2436 rc = PTR_ERR(cmid);
2437 CERROR("Failed to create cmid for failover: %d\n", rc);
2438 goto out;
2439 }
2440
2441 memset(&addr, 0, sizeof(addr));
2442 addr.sin_family = AF_INET;
2443 addr.sin_addr.s_addr = (__force u32)htonl(dev->ibd_ifip);
2444 addr.sin_port = htons(*kiblnd_tunables.kib_service);
2445
2446 /* Bind to failover device or port */
2447 rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
James Simmons5fd88332016-02-12 12:06:09 -05002448 if (rc || !cmid->device) {
Peng Tao5e8f6922013-07-15 22:27:09 +08002449 CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
2450 dev->ibd_ifname, &dev->ibd_ifip,
Peng Taod7e09d02013-05-02 16:46:55 +08002451 cmid->device, rc);
2452 rdma_destroy_id(cmid);
2453 goto out;
2454 }
2455
2456 LIBCFS_ALLOC(hdev, sizeof(*hdev));
James Simmons06ace262016-02-12 12:06:08 -05002457 if (!hdev) {
Peng Taod7e09d02013-05-02 16:46:55 +08002458 CERROR("Failed to allocate kib_hca_dev\n");
2459 rdma_destroy_id(cmid);
2460 rc = -ENOMEM;
2461 goto out;
2462 }
2463
2464 atomic_set(&hdev->ibh_ref, 1);
2465 hdev->ibh_dev = dev;
2466 hdev->ibh_cmid = cmid;
2467 hdev->ibh_ibdev = cmid->device;
2468
Christoph Hellwiged082d32016-09-05 12:56:17 +02002469 pd = ib_alloc_pd(cmid->device, 0);
Peng Taod7e09d02013-05-02 16:46:55 +08002470 if (IS_ERR(pd)) {
2471 rc = PTR_ERR(pd);
2472 CERROR("Can't allocate PD: %d\n", rc);
2473 goto out;
2474 }
2475
2476 hdev->ibh_pd = pd;
2477
2478 rc = rdma_listen(cmid, 0);
James Simmons5fd88332016-02-12 12:06:09 -05002479 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08002480 CERROR("Can't start new listener: %d\n", rc);
2481 goto out;
2482 }
2483
2484 rc = kiblnd_hdev_setup_mrs(hdev);
James Simmons5fd88332016-02-12 12:06:09 -05002485 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08002486 CERROR("Can't setup device: %d\n", rc);
2487 goto out;
2488 }
2489
2490 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2491
Fabian Frederick6d37b172015-06-10 18:32:21 +02002492 swap(dev->ibd_hdev, hdev); /* take over the refcount */
Peng Taod7e09d02013-05-02 16:46:55 +08002493
2494 list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
2495 cfs_cpt_for_each(i, lnet_cpt_table()) {
2496 kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
2497 &zombie_tpo);
2498
Oleg Drokin415bcb52015-08-18 21:04:35 -04002499 if (net->ibn_fmr_ps)
Peng Taod7e09d02013-05-02 16:46:55 +08002500 kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
2501 &zombie_fpo);
Peng Taod7e09d02013-05-02 16:46:55 +08002502 }
2503 }
2504
2505 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2506 out:
2507 if (!list_empty(&zombie_tpo))
2508 kiblnd_destroy_pool_list(&zombie_tpo);
2509 if (!list_empty(&zombie_ppo))
2510 kiblnd_destroy_pool_list(&zombie_ppo);
2511 if (!list_empty(&zombie_fpo))
2512 kiblnd_destroy_fmr_pool_list(&zombie_fpo);
James Simmons06ace262016-02-12 12:06:08 -05002513 if (hdev)
Peng Taod7e09d02013-05-02 16:46:55 +08002514 kiblnd_hdev_decref(hdev);
2515
James Simmons5fd88332016-02-12 12:06:09 -05002516 if (rc)
Peng Taod7e09d02013-05-02 16:46:55 +08002517 dev->ibd_failed_failover++;
2518 else
2519 dev->ibd_failed_failover = 0;
2520
2521 return rc;
2522}
2523
James Simmons8d9de3f2016-06-10 16:13:39 -04002524void kiblnd_destroy_dev(struct kib_dev *dev)
Peng Taod7e09d02013-05-02 16:46:55 +08002525{
James Simmons5fd88332016-02-12 12:06:09 -05002526 LASSERT(!dev->ibd_nnets);
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002527 LASSERT(list_empty(&dev->ibd_nets));
Peng Taod7e09d02013-05-02 16:46:55 +08002528
2529 list_del(&dev->ibd_fail_list);
2530 list_del(&dev->ibd_list);
2531
James Simmons06ace262016-02-12 12:06:08 -05002532 if (dev->ibd_hdev)
Peng Taod7e09d02013-05-02 16:46:55 +08002533 kiblnd_hdev_decref(dev->ibd_hdev);
2534
2535 LIBCFS_FREE(dev, sizeof(*dev));
2536}
2537
James Simmons8d9de3f2016-06-10 16:13:39 -04002538static struct kib_dev *kiblnd_create_dev(char *ifname)
Peng Taod7e09d02013-05-02 16:46:55 +08002539{
2540 struct net_device *netdev;
James Simmons8d9de3f2016-06-10 16:13:39 -04002541 struct kib_dev *dev;
Mike Shueyec3d17c2015-05-19 10:14:36 -04002542 __u32 netmask;
2543 __u32 ip;
2544 int up;
2545 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08002546
James Simmons1ad6a732015-06-08 22:27:10 -04002547 rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
James Simmons5fd88332016-02-12 12:06:09 -05002548 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08002549 CERROR("Can't query IPoIB interface %s: %d\n",
2550 ifname, rc);
2551 return NULL;
2552 }
2553
2554 if (!up) {
2555 CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
2556 return NULL;
2557 }
2558
2559 LIBCFS_ALLOC(dev, sizeof(*dev));
James Simmons06ace262016-02-12 12:06:08 -05002560 if (!dev)
Peng Taod7e09d02013-05-02 16:46:55 +08002561 return NULL;
2562
Peng Taod7e09d02013-05-02 16:46:55 +08002563 netdev = dev_get_by_name(&init_net, ifname);
James Simmons06ace262016-02-12 12:06:08 -05002564 if (!netdev) {
Peng Taod7e09d02013-05-02 16:46:55 +08002565 dev->ibd_can_failover = 0;
2566 } else {
2567 dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
2568 dev_put(netdev);
2569 }
2570
2571 INIT_LIST_HEAD(&dev->ibd_nets);
2572 INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
2573 INIT_LIST_HEAD(&dev->ibd_fail_list);
2574 dev->ibd_ifip = ip;
2575 strcpy(&dev->ibd_ifname[0], ifname);
2576
2577 /* initialize the device */
2578 rc = kiblnd_dev_failover(dev);
James Simmons5fd88332016-02-12 12:06:09 -05002579 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08002580 CERROR("Can't initialize device: %d\n", rc);
2581 LIBCFS_FREE(dev, sizeof(*dev));
2582 return NULL;
2583 }
2584
James Simmonsc314c312016-02-12 12:06:01 -05002585 list_add_tail(&dev->ibd_list, &kiblnd_data.kib_devs);
Peng Taod7e09d02013-05-02 16:46:55 +08002586 return dev;
2587}
2588
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002589static void kiblnd_base_shutdown(void)
Peng Taod7e09d02013-05-02 16:46:55 +08002590{
Mike Shueyec3d17c2015-05-19 10:14:36 -04002591 struct kib_sched_info *sched;
2592 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08002593
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002594 LASSERT(list_empty(&kiblnd_data.kib_devs));
Peng Taod7e09d02013-05-02 16:46:55 +08002595
Peng Taod7e09d02013-05-02 16:46:55 +08002596 switch (kiblnd_data.kib_init) {
2597 default:
2598 LBUG();
2599
2600 case IBLND_INIT_ALL:
2601 case IBLND_INIT_DATA:
James Simmons06ace262016-02-12 12:06:08 -05002602 LASSERT(kiblnd_data.kib_peers);
Guillaume Matheron7a3888a2015-04-02 19:52:07 +02002603 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002604 LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002605 LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
2606 LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
Liang Zhen4d99b252016-03-02 18:53:29 -05002607 LASSERT(list_empty(&kiblnd_data.kib_reconn_list));
2608 LASSERT(list_empty(&kiblnd_data.kib_reconn_wait));
Peng Taod7e09d02013-05-02 16:46:55 +08002609
2610 /* flag threads to terminate; wake and wait for them to die */
2611 kiblnd_data.kib_shutdown = 1;
2612
James Simmons4420cfd2016-02-12 12:06:00 -05002613 /*
2614 * NB: we really want to stop scheduler threads net by net
Peng Taod7e09d02013-05-02 16:46:55 +08002615 * instead of the whole module, this should be improved
James Simmons4420cfd2016-02-12 12:06:00 -05002616 * with dynamic configuration LNet
2617 */
Peng Taod7e09d02013-05-02 16:46:55 +08002618 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds)
2619 wake_up_all(&sched->ibs_waitq);
2620
2621 wake_up_all(&kiblnd_data.kib_connd_waitq);
2622 wake_up_all(&kiblnd_data.kib_failover_waitq);
2623
2624 i = 2;
James Simmons5fd88332016-02-12 12:06:09 -05002625 while (atomic_read(&kiblnd_data.kib_nthreads)) {
Peng Taod7e09d02013-05-02 16:46:55 +08002626 i++;
Guillaume Matheron7a3888a2015-04-02 19:52:07 +02002627 /* power of 2 ? */
2628 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
Peng Taod7e09d02013-05-02 16:46:55 +08002629 "Waiting for %d threads to terminate\n",
2630 atomic_read(&kiblnd_data.kib_nthreads));
Peng Taod3caf4d2014-03-18 21:05:56 +08002631 set_current_state(TASK_UNINTERRUPTIBLE);
2632 schedule_timeout(cfs_time_seconds(1));
Peng Taod7e09d02013-05-02 16:46:55 +08002633 }
2634
2635 /* fall through */
2636
2637 case IBLND_INIT_NOTHING:
2638 break;
2639 }
2640
James Simmons06ace262016-02-12 12:06:08 -05002641 if (kiblnd_data.kib_peers) {
Peng Taod7e09d02013-05-02 16:46:55 +08002642 LIBCFS_FREE(kiblnd_data.kib_peers,
2643 sizeof(struct list_head) *
2644 kiblnd_data.kib_peer_hash_size);
2645 }
2646
James Simmons06ace262016-02-12 12:06:08 -05002647 if (kiblnd_data.kib_scheds)
Peng Taod7e09d02013-05-02 16:46:55 +08002648 cfs_percpt_free(kiblnd_data.kib_scheds);
2649
Peng Taod7e09d02013-05-02 16:46:55 +08002650 kiblnd_data.kib_init = IBLND_INIT_NOTHING;
2651 module_put(THIS_MODULE);
2652}
2653
Frank Zago439b4d42016-03-02 17:02:00 -05002654static void kiblnd_shutdown(lnet_ni_t *ni)
Peng Taod7e09d02013-05-02 16:46:55 +08002655{
James Simmons8d9de3f2016-06-10 16:13:39 -04002656 struct kib_net *net = ni->ni_data;
Mike Shueyec3d17c2015-05-19 10:14:36 -04002657 rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
2658 int i;
2659 unsigned long flags;
Peng Taod7e09d02013-05-02 16:46:55 +08002660
2661 LASSERT(kiblnd_data.kib_init == IBLND_INIT_ALL);
2662
James Simmons06ace262016-02-12 12:06:08 -05002663 if (!net)
Peng Taod7e09d02013-05-02 16:46:55 +08002664 goto out;
2665
Peng Taod7e09d02013-05-02 16:46:55 +08002666 write_lock_irqsave(g_lock, flags);
2667 net->ibn_shutdown = 1;
2668 write_unlock_irqrestore(g_lock, flags);
2669
2670 switch (net->ibn_init) {
2671 default:
2672 LBUG();
2673
2674 case IBLND_INIT_ALL:
2675 /* nuke all existing peers within this net */
2676 kiblnd_del_peer(ni, LNET_NID_ANY);
2677
2678 /* Wait for all peer state to clean up */
2679 i = 2;
James Simmons5fd88332016-02-12 12:06:09 -05002680 while (atomic_read(&net->ibn_npeers)) {
Peng Taod7e09d02013-05-02 16:46:55 +08002681 i++;
2682 CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
2683 "%s: waiting for %d peers to disconnect\n",
2684 libcfs_nid2str(ni->ni_nid),
2685 atomic_read(&net->ibn_npeers));
Peng Taod3caf4d2014-03-18 21:05:56 +08002686 set_current_state(TASK_UNINTERRUPTIBLE);
2687 schedule_timeout(cfs_time_seconds(1));
Peng Taod7e09d02013-05-02 16:46:55 +08002688 }
2689
2690 kiblnd_net_fini_pools(net);
2691
2692 write_lock_irqsave(g_lock, flags);
2693 LASSERT(net->ibn_dev->ibd_nnets > 0);
2694 net->ibn_dev->ibd_nnets--;
2695 list_del(&net->ibn_list);
2696 write_unlock_irqrestore(g_lock, flags);
2697
2698 /* fall through */
2699
2700 case IBLND_INIT_NOTHING:
James Simmons5fd88332016-02-12 12:06:09 -05002701 LASSERT(!atomic_read(&net->ibn_nconns));
Peng Taod7e09d02013-05-02 16:46:55 +08002702
James Simmons5fd88332016-02-12 12:06:09 -05002703 if (net->ibn_dev && !net->ibn_dev->ibd_nnets)
Peng Taod7e09d02013-05-02 16:46:55 +08002704 kiblnd_destroy_dev(net->ibn_dev);
2705
2706 break;
2707 }
2708
Peng Taod7e09d02013-05-02 16:46:55 +08002709 net->ibn_init = IBLND_INIT_NOTHING;
2710 ni->ni_data = NULL;
2711
2712 LIBCFS_FREE(net, sizeof(*net));
2713
2714out:
2715 if (list_empty(&kiblnd_data.kib_devs))
2716 kiblnd_base_shutdown();
Peng Taod7e09d02013-05-02 16:46:55 +08002717}
2718
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002719static int kiblnd_base_startup(void)
Peng Taod7e09d02013-05-02 16:46:55 +08002720{
Mike Shueyec3d17c2015-05-19 10:14:36 -04002721 struct kib_sched_info *sched;
2722 int rc;
2723 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08002724
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002725 LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
Peng Taod7e09d02013-05-02 16:46:55 +08002726
2727 try_module_get(THIS_MODULE);
Guillaume Matheron7a3888a2015-04-02 19:52:07 +02002728 /* zero pointers, flags etc */
2729 memset(&kiblnd_data, 0, sizeof(kiblnd_data));
Peng Taod7e09d02013-05-02 16:46:55 +08002730
2731 rwlock_init(&kiblnd_data.kib_global_lock);
2732
2733 INIT_LIST_HEAD(&kiblnd_data.kib_devs);
2734 INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
2735
2736 kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
2737 LIBCFS_ALLOC(kiblnd_data.kib_peers,
Mike Shueyec3d17c2015-05-19 10:14:36 -04002738 sizeof(struct list_head) * kiblnd_data.kib_peer_hash_size);
James Simmons06ace262016-02-12 12:06:08 -05002739 if (!kiblnd_data.kib_peers)
Peng Taod7e09d02013-05-02 16:46:55 +08002740 goto failed;
Peng Taod7e09d02013-05-02 16:46:55 +08002741 for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
2742 INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
2743
2744 spin_lock_init(&kiblnd_data.kib_connd_lock);
2745 INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
2746 INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
Liang Zhen4d99b252016-03-02 18:53:29 -05002747 INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list);
2748 INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait);
2749
Peng Taod7e09d02013-05-02 16:46:55 +08002750 init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
2751 init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
2752
2753 kiblnd_data.kib_scheds = cfs_percpt_alloc(lnet_cpt_table(),
2754 sizeof(*sched));
James Simmons06ace262016-02-12 12:06:08 -05002755 if (!kiblnd_data.kib_scheds)
Peng Taod7e09d02013-05-02 16:46:55 +08002756 goto failed;
2757
2758 cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
Mike Shueyec3d17c2015-05-19 10:14:36 -04002759 int nthrs;
Peng Taod7e09d02013-05-02 16:46:55 +08002760
2761 spin_lock_init(&sched->ibs_lock);
2762 INIT_LIST_HEAD(&sched->ibs_conns);
2763 init_waitqueue_head(&sched->ibs_waitq);
2764
2765 nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
2766 if (*kiblnd_tunables.kib_nscheds > 0) {
2767 nthrs = min(nthrs, *kiblnd_tunables.kib_nscheds);
2768 } else {
James Simmons4420cfd2016-02-12 12:06:00 -05002769 /*
2770 * max to half of CPUs, another half is reserved for
2771 * upper layer modules
2772 */
Peng Taod7e09d02013-05-02 16:46:55 +08002773 nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
2774 }
2775
2776 sched->ibs_nthreads_max = nthrs;
2777 sched->ibs_cpt = i;
2778 }
2779
2780 kiblnd_data.kib_error_qpa.qp_state = IB_QPS_ERR;
2781
2782 /* lists/ptrs/locks initialised */
2783 kiblnd_data.kib_init = IBLND_INIT_DATA;
2784 /*****************************************************/
2785
2786 rc = kiblnd_thread_start(kiblnd_connd, NULL, "kiblnd_connd");
James Simmons5fd88332016-02-12 12:06:09 -05002787 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08002788 CERROR("Can't spawn o2iblnd connd: %d\n", rc);
2789 goto failed;
2790 }
2791
James Simmons5fd88332016-02-12 12:06:09 -05002792 if (*kiblnd_tunables.kib_dev_failover)
Peng Taod7e09d02013-05-02 16:46:55 +08002793 rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
2794 "kiblnd_failover");
2795
James Simmons5fd88332016-02-12 12:06:09 -05002796 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08002797 CERROR("Can't spawn o2iblnd failover thread: %d\n", rc);
2798 goto failed;
2799 }
2800
2801 /* flag everything initialised */
2802 kiblnd_data.kib_init = IBLND_INIT_ALL;
2803 /*****************************************************/
2804
2805 return 0;
2806
2807 failed:
2808 kiblnd_base_shutdown();
2809 return -ENETDOWN;
2810}
2811
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002812static int kiblnd_start_schedulers(struct kib_sched_info *sched)
Peng Taod7e09d02013-05-02 16:46:55 +08002813{
Mike Shueyec3d17c2015-05-19 10:14:36 -04002814 int rc = 0;
2815 int nthrs;
2816 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08002817
James Simmons5fd88332016-02-12 12:06:09 -05002818 if (!sched->ibs_nthreads) {
Peng Taod7e09d02013-05-02 16:46:55 +08002819 if (*kiblnd_tunables.kib_nscheds > 0) {
2820 nthrs = sched->ibs_nthreads_max;
2821 } else {
2822 nthrs = cfs_cpt_weight(lnet_cpt_table(),
2823 sched->ibs_cpt);
2824 nthrs = min(max(IBLND_N_SCHED, nthrs >> 1), nthrs);
2825 nthrs = min(IBLND_N_SCHED_HIGH, nthrs);
2826 }
2827 } else {
2828 LASSERT(sched->ibs_nthreads <= sched->ibs_nthreads_max);
2829 /* increase one thread if there is new interface */
Haneen Mohammedb6ee3822015-03-13 20:48:53 +03002830 nthrs = sched->ibs_nthreads < sched->ibs_nthreads_max;
Peng Taod7e09d02013-05-02 16:46:55 +08002831 }
2832
2833 for (i = 0; i < nthrs; i++) {
Mike Shueyec3d17c2015-05-19 10:14:36 -04002834 long id;
2835 char name[20];
Guillaume Matheron7a3888a2015-04-02 19:52:07 +02002836
Peng Taod7e09d02013-05-02 16:46:55 +08002837 id = KIB_THREAD_ID(sched->ibs_cpt, sched->ibs_nthreads + i);
2838 snprintf(name, sizeof(name), "kiblnd_sd_%02ld_%02ld",
2839 KIB_THREAD_CPT(id), KIB_THREAD_TID(id));
2840 rc = kiblnd_thread_start(kiblnd_scheduler, (void *)id, name);
James Simmons5fd88332016-02-12 12:06:09 -05002841 if (!rc)
Peng Taod7e09d02013-05-02 16:46:55 +08002842 continue;
2843
2844 CERROR("Can't spawn thread %d for scheduler[%d]: %d\n",
2845 sched->ibs_cpt, sched->ibs_nthreads + i, rc);
2846 break;
2847 }
2848
2849 sched->ibs_nthreads += i;
2850 return rc;
2851}
2852
James Simmons8d9de3f2016-06-10 16:13:39 -04002853static int kiblnd_dev_start_threads(struct kib_dev *dev, int newdev, __u32 *cpts,
Guillaume Matheron7a3888a2015-04-02 19:52:07 +02002854 int ncpts)
Peng Taod7e09d02013-05-02 16:46:55 +08002855{
Mike Shueyec3d17c2015-05-19 10:14:36 -04002856 int cpt;
2857 int rc;
2858 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08002859
2860 for (i = 0; i < ncpts; i++) {
2861 struct kib_sched_info *sched;
2862
James Simmons06ace262016-02-12 12:06:08 -05002863 cpt = !cpts ? i : cpts[i];
Peng Taod7e09d02013-05-02 16:46:55 +08002864 sched = kiblnd_data.kib_scheds[cpt];
2865
2866 if (!newdev && sched->ibs_nthreads > 0)
2867 continue;
2868
2869 rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
James Simmons5fd88332016-02-12 12:06:09 -05002870 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08002871 CERROR("Failed to start scheduler threads for %s\n",
2872 dev->ibd_ifname);
2873 return rc;
2874 }
2875 }
2876 return 0;
2877}
2878
James Simmons8d9de3f2016-06-10 16:13:39 -04002879static struct kib_dev *kiblnd_dev_search(char *ifname)
Peng Taod7e09d02013-05-02 16:46:55 +08002880{
James Simmons8d9de3f2016-06-10 16:13:39 -04002881 struct kib_dev *alias = NULL;
2882 struct kib_dev *dev;
Mike Shueyec3d17c2015-05-19 10:14:36 -04002883 char *colon;
2884 char *colon2;
Peng Taod7e09d02013-05-02 16:46:55 +08002885
2886 colon = strchr(ifname, ':');
2887 list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
James Simmons5fd88332016-02-12 12:06:09 -05002888 if (!strcmp(&dev->ibd_ifname[0], ifname))
Peng Taod7e09d02013-05-02 16:46:55 +08002889 return dev;
2890
James Simmons06ace262016-02-12 12:06:08 -05002891 if (alias)
Peng Taod7e09d02013-05-02 16:46:55 +08002892 continue;
2893
2894 colon2 = strchr(dev->ibd_ifname, ':');
James Simmons06ace262016-02-12 12:06:08 -05002895 if (colon)
Peng Taod7e09d02013-05-02 16:46:55 +08002896 *colon = 0;
James Simmons06ace262016-02-12 12:06:08 -05002897 if (colon2)
Peng Taod7e09d02013-05-02 16:46:55 +08002898 *colon2 = 0;
2899
James Simmons5fd88332016-02-12 12:06:09 -05002900 if (!strcmp(&dev->ibd_ifname[0], ifname))
Peng Taod7e09d02013-05-02 16:46:55 +08002901 alias = dev;
2902
James Simmons06ace262016-02-12 12:06:08 -05002903 if (colon)
Peng Taod7e09d02013-05-02 16:46:55 +08002904 *colon = ':';
James Simmons06ace262016-02-12 12:06:08 -05002905 if (colon2)
Peng Taod7e09d02013-05-02 16:46:55 +08002906 *colon2 = ':';
2907 }
2908 return alias;
2909}
2910
Frank Zago439b4d42016-03-02 17:02:00 -05002911static int kiblnd_startup(lnet_ni_t *ni)
Peng Taod7e09d02013-05-02 16:46:55 +08002912{
Mike Shueyec3d17c2015-05-19 10:14:36 -04002913 char *ifname;
James Simmons8d9de3f2016-06-10 16:13:39 -04002914 struct kib_dev *ibdev = NULL;
2915 struct kib_net *net;
Arnd Bergmann473c4e02015-09-27 16:45:13 -04002916 struct timespec64 tv;
Mike Shueyec3d17c2015-05-19 10:14:36 -04002917 unsigned long flags;
2918 int rc;
2919 int newdev;
Peng Taod7e09d02013-05-02 16:46:55 +08002920
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002921 LASSERT(ni->ni_lnd == &the_o2iblnd);
Peng Taod7e09d02013-05-02 16:46:55 +08002922
2923 if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
2924 rc = kiblnd_base_startup();
James Simmons5fd88332016-02-12 12:06:09 -05002925 if (rc)
Peng Taod7e09d02013-05-02 16:46:55 +08002926 return rc;
2927 }
2928
2929 LIBCFS_ALLOC(net, sizeof(*net));
2930 ni->ni_data = net;
James Simmons06ace262016-02-12 12:06:08 -05002931 if (!net)
Jiayi Ye3247c4e2014-10-25 11:40:32 +08002932 goto net_failed;
Peng Taod7e09d02013-05-02 16:46:55 +08002933
Arnd Bergmann473c4e02015-09-27 16:45:13 -04002934 ktime_get_real_ts64(&tv);
2935 net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC +
2936 tv.tv_nsec / NSEC_PER_USEC;
Peng Taod7e09d02013-05-02 16:46:55 +08002937
Amir Shehataf6e50062016-05-06 21:30:27 -04002938 rc = kiblnd_tunables_setup(ni);
Amir Shehata025ba822016-05-06 21:30:26 -04002939 if (rc)
2940 goto net_failed;
Peng Taod7e09d02013-05-02 16:46:55 +08002941
James Simmons06ace262016-02-12 12:06:08 -05002942 if (ni->ni_interfaces[0]) {
Peng Taod7e09d02013-05-02 16:46:55 +08002943 /* Use the IPoIB interface specified in 'networks=' */
2944
Guillaume Matheronfebe73b2015-04-02 19:35:45 +02002945 CLASSERT(LNET_MAX_INTERFACES > 1);
James Simmons06ace262016-02-12 12:06:08 -05002946 if (ni->ni_interfaces[1]) {
Peng Taod7e09d02013-05-02 16:46:55 +08002947 CERROR("Multiple interfaces not supported\n");
2948 goto failed;
2949 }
2950
2951 ifname = ni->ni_interfaces[0];
2952 } else {
2953 ifname = *kiblnd_tunables.kib_default_ipif;
2954 }
2955
2956 if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
2957 CERROR("IPoIB interface name too long: %s\n", ifname);
2958 goto failed;
2959 }
2960
2961 ibdev = kiblnd_dev_search(ifname);
2962
James Simmons06ace262016-02-12 12:06:08 -05002963 newdev = !ibdev;
Peng Taod7e09d02013-05-02 16:46:55 +08002964 /* hmm...create kib_dev even for alias */
James Simmons5fd88332016-02-12 12:06:09 -05002965 if (!ibdev || strcmp(&ibdev->ibd_ifname[0], ifname))
Peng Taod7e09d02013-05-02 16:46:55 +08002966 ibdev = kiblnd_create_dev(ifname);
2967
James Simmons06ace262016-02-12 12:06:08 -05002968 if (!ibdev)
Peng Taod7e09d02013-05-02 16:46:55 +08002969 goto failed;
2970
2971 net->ibn_dev = ibdev;
2972 ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
2973
2974 rc = kiblnd_dev_start_threads(ibdev, newdev,
2975 ni->ni_cpts, ni->ni_ncpts);
James Simmons5fd88332016-02-12 12:06:09 -05002976 if (rc)
Peng Taod7e09d02013-05-02 16:46:55 +08002977 goto failed;
2978
Amir Shehata32c8deb82016-05-06 21:30:28 -04002979 rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
James Simmons5fd88332016-02-12 12:06:09 -05002980 if (rc) {
Peng Taod7e09d02013-05-02 16:46:55 +08002981 CERROR("Failed to initialize NI pools: %d\n", rc);
2982 goto failed;
2983 }
2984
2985 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2986 ibdev->ibd_nnets++;
2987 list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
2988 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2989
2990 net->ibn_init = IBLND_INIT_ALL;
2991
2992 return 0;
2993
2994failed:
James Simmons06ace262016-02-12 12:06:08 -05002995 if (!net->ibn_dev && ibdev)
Peng Taod7e09d02013-05-02 16:46:55 +08002996 kiblnd_destroy_dev(ibdev);
2997
Jiayi Ye3247c4e2014-10-25 11:40:32 +08002998net_failed:
Peng Taod7e09d02013-05-02 16:46:55 +08002999 kiblnd_shutdown(ni);
3000
3001 CDEBUG(D_NET, "kiblnd_startup failed\n");
3002 return -ENETDOWN;
3003}
3004
Frank Zago439b4d42016-03-02 17:02:00 -05003005static lnd_t the_o2iblnd = {
3006 .lnd_type = O2IBLND,
3007 .lnd_startup = kiblnd_startup,
3008 .lnd_shutdown = kiblnd_shutdown,
3009 .lnd_ctl = kiblnd_ctl,
3010 .lnd_query = kiblnd_query,
3011 .lnd_send = kiblnd_send,
3012 .lnd_recv = kiblnd_recv,
3013};
3014
Andreas Dilgere0f94112016-02-26 11:36:05 -05003015static void __exit ko2iblnd_exit(void)
Peng Taod7e09d02013-05-02 16:46:55 +08003016{
3017 lnet_unregister_lnd(&the_o2iblnd);
Peng Taod7e09d02013-05-02 16:46:55 +08003018}
3019
Andreas Dilgere0f94112016-02-26 11:36:05 -05003020static int __init ko2iblnd_init(void)
Peng Taod7e09d02013-05-02 16:46:55 +08003021{
James Simmons8d9de3f2016-06-10 16:13:39 -04003022 CLASSERT(sizeof(struct kib_msg) <= IBLND_MSG_SIZE);
3023 CLASSERT(offsetof(struct kib_msg,
James Simmonsc314c312016-02-12 12:06:01 -05003024 ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
3025 <= IBLND_MSG_SIZE);
James Simmons8d9de3f2016-06-10 16:13:39 -04003026 CLASSERT(offsetof(struct kib_msg,
James Simmonsc314c312016-02-12 12:06:01 -05003027 ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
3028 <= IBLND_MSG_SIZE);
Peng Taod7e09d02013-05-02 16:46:55 +08003029
Amir Shehata025ba822016-05-06 21:30:26 -04003030 kiblnd_tunables_init();
Peng Taod7e09d02013-05-02 16:46:55 +08003031
3032 lnet_register_lnd(&the_o2iblnd);
3033
3034 return 0;
3035}
3036
James Simmonsa0455472015-11-04 13:40:02 -05003037MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
Andreas Dilger57878e12016-02-26 11:36:04 -05003038MODULE_DESCRIPTION("OpenIB gen2 LNet Network Driver");
James Simmons5b0e50b2016-02-26 11:36:03 -05003039MODULE_VERSION("2.7.0");
Peng Taod7e09d02013-05-02 16:46:55 +08003040MODULE_LICENSE("GPL");
3041
Andreas Dilgere0f94112016-02-26 11:36:05 -05003042module_init(ko2iblnd_init);
3043module_exit(ko2iblnd_exit);