blob: 5879a06ada9383490f08d1ca100f5297158e0101 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
Roland Dreier43506d92007-07-09 16:17:32 -07003 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07004 * Copyright (c) 2006 Intel Corporation. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 */
34
35#include <linux/module.h>
36#include <linux/init.h>
37#include <linux/err.h>
38#include <linux/random.h>
39#include <linux/spinlock.h>
40#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/dma-mapping.h>
42#include <linux/kref.h>
43#include <linux/idr.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080044#include <linux/workqueue.h>
Matan Barakdd5f03b2013-12-12 18:03:11 +020045#include <uapi/linux/if_ether.h>
Roland Dreiera4d61e82005-08-25 13:40:04 -070046#include <rdma/ib_pack.h>
Sean Hefty6d969a42006-06-17 20:37:39 -070047#include <rdma/ib_cache.h>
Kaike Wan2ca546b2015-08-14 08:52:09 -040048#include <rdma/rdma_netlink.h>
49#include <net/netlink.h>
50#include <uapi/rdma/ib_user_sa.h>
51#include <rdma/ib_marshall.h>
Matan Barak20029832015-12-23 14:56:53 +020052#include <rdma/ib_addr.h>
Sean Heftyfaec2f72007-02-15 17:00:17 -080053#include "sa.h"
Matan Barak20029832015-12-23 14:56:53 +020054#include "core_priv.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Kaike Wan2ca546b2015-08-14 08:52:09 -040056#define IB_SA_LOCAL_SVC_TIMEOUT_MIN 100
57#define IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT 2000
58#define IB_SA_LOCAL_SVC_TIMEOUT_MAX 200000
59static int sa_local_svc_timeout_ms = IB_SA_LOCAL_SVC_TIMEOUT_DEFAULT;
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061struct ib_sa_sm_ah {
62 struct ib_ah *ah;
63 struct kref ref;
Sean Hefty2aec5c62007-06-18 11:03:58 -070064 u16 pkey_index;
Sean Heftyd0e7bb12007-04-05 10:51:10 -070065 u8 src_path_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066};
67
Alex Vesker3d3fd742016-07-06 16:36:34 +030068struct ib_sa_classport_cache {
69 bool valid;
70 struct ib_class_port_info data;
71};
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073struct ib_sa_port {
74 struct ib_mad_agent *agent;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 struct ib_sa_sm_ah *sm_ah;
76 struct work_struct update_task;
Alex Vesker3d3fd742016-07-06 16:36:34 +030077 struct ib_sa_classport_cache classport_info;
78 spinlock_t classport_lock; /* protects class port info set */
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 spinlock_t ah_lock;
80 u8 port_num;
81};
82
83struct ib_sa_device {
84 int start_port, end_port;
85 struct ib_event_handler event_handler;
86 struct ib_sa_port port[0];
87};
88
89struct ib_sa_query {
90 void (*callback)(struct ib_sa_query *, int, struct ib_sa_mad *);
91 void (*release)(struct ib_sa_query *);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -070092 struct ib_sa_client *client;
Sean Hefty34816ad2005-10-25 10:51:39 -070093 struct ib_sa_port *port;
94 struct ib_mad_send_buf *mad_buf;
95 struct ib_sa_sm_ah *sm_ah;
96 int id;
Kaike Wan2ca546b2015-08-14 08:52:09 -040097 u32 flags;
98 struct list_head list; /* Local svc request list */
99 u32 seq; /* Local svc request sequence number */
100 unsigned long timeout; /* Local svc timeout */
101 u8 path_use; /* How will the pathrecord be used */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102};
103
Kaike Wan2ca546b2015-08-14 08:52:09 -0400104#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
105#define IB_SA_CANCEL 0x00000002
106
Hal Rosenstockcbae32c2005-07-27 11:45:38 -0700107struct ib_sa_service_query {
108 void (*callback)(int, struct ib_sa_service_rec *, void *);
109 void *context;
110 struct ib_sa_query sa_query;
111};
112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113struct ib_sa_path_query {
114 void (*callback)(int, struct ib_sa_path_rec *, void *);
115 void *context;
116 struct ib_sa_query sa_query;
117};
118
Erez Shitritaeab97e2012-06-19 11:21:38 +0300119struct ib_sa_guidinfo_query {
120 void (*callback)(int, struct ib_sa_guidinfo_rec *, void *);
121 void *context;
122 struct ib_sa_query sa_query;
123};
124
Erez Shitrit628e6f72016-05-25 22:02:05 +0300125struct ib_sa_classport_info_query {
126 void (*callback)(int, struct ib_class_port_info *, void *);
127 void *context;
128 struct ib_sa_query sa_query;
129};
130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131struct ib_sa_mcmember_query {
132 void (*callback)(int, struct ib_sa_mcmember_rec *, void *);
133 void *context;
134 struct ib_sa_query sa_query;
135};
136
Kaike Wan2ca546b2015-08-14 08:52:09 -0400137static LIST_HEAD(ib_nl_request_list);
138static DEFINE_SPINLOCK(ib_nl_request_lock);
139static atomic_t ib_nl_sa_request_seq;
140static struct workqueue_struct *ib_nl_wq;
141static struct delayed_work ib_nl_timed_work;
142static const struct nla_policy ib_nl_policy[LS_NLA_TYPE_MAX] = {
143 [LS_NLA_TYPE_PATH_RECORD] = {.type = NLA_BINARY,
144 .len = sizeof(struct ib_path_rec_data)},
145 [LS_NLA_TYPE_TIMEOUT] = {.type = NLA_U32},
146 [LS_NLA_TYPE_SERVICE_ID] = {.type = NLA_U64},
147 [LS_NLA_TYPE_DGID] = {.type = NLA_BINARY,
148 .len = sizeof(struct rdma_nla_ls_gid)},
149 [LS_NLA_TYPE_SGID] = {.type = NLA_BINARY,
150 .len = sizeof(struct rdma_nla_ls_gid)},
151 [LS_NLA_TYPE_TCLASS] = {.type = NLA_U8},
152 [LS_NLA_TYPE_PKEY] = {.type = NLA_U16},
153 [LS_NLA_TYPE_QOS_CLASS] = {.type = NLA_U16},
154};
155
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157static void ib_sa_add_one(struct ib_device *device);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300158static void ib_sa_remove_one(struct ib_device *device, void *client_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160static struct ib_client sa_client = {
161 .name = "sa",
162 .add = ib_sa_add_one,
163 .remove = ib_sa_remove_one
164};
165
Roland Dreier6276e082009-09-05 20:24:23 -0700166static DEFINE_SPINLOCK(idr_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167static DEFINE_IDR(query_idr);
168
Roland Dreier6276e082009-09-05 20:24:23 -0700169static DEFINE_SPINLOCK(tid_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170static u32 tid;
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172#define PATH_REC_FIELD(field) \
173 .struct_offset_bytes = offsetof(struct ib_sa_path_rec, field), \
174 .struct_size_bytes = sizeof ((struct ib_sa_path_rec *) 0)->field, \
175 .field_name = "sa_path_rec:" #field
176
177static const struct ib_field path_rec_table[] = {
Sean Hefty733d65f2007-08-08 15:41:28 -0700178 { PATH_REC_FIELD(service_id),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 .offset_words = 0,
180 .offset_bits = 0,
Sean Hefty733d65f2007-08-08 15:41:28 -0700181 .size_bits = 64 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 { PATH_REC_FIELD(dgid),
183 .offset_words = 2,
184 .offset_bits = 0,
185 .size_bits = 128 },
186 { PATH_REC_FIELD(sgid),
187 .offset_words = 6,
188 .offset_bits = 0,
189 .size_bits = 128 },
190 { PATH_REC_FIELD(dlid),
191 .offset_words = 10,
192 .offset_bits = 0,
193 .size_bits = 16 },
194 { PATH_REC_FIELD(slid),
195 .offset_words = 10,
196 .offset_bits = 16,
197 .size_bits = 16 },
198 { PATH_REC_FIELD(raw_traffic),
199 .offset_words = 11,
200 .offset_bits = 0,
201 .size_bits = 1 },
202 { RESERVED,
203 .offset_words = 11,
204 .offset_bits = 1,
205 .size_bits = 3 },
206 { PATH_REC_FIELD(flow_label),
207 .offset_words = 11,
208 .offset_bits = 4,
209 .size_bits = 20 },
210 { PATH_REC_FIELD(hop_limit),
211 .offset_words = 11,
212 .offset_bits = 24,
213 .size_bits = 8 },
214 { PATH_REC_FIELD(traffic_class),
215 .offset_words = 12,
216 .offset_bits = 0,
217 .size_bits = 8 },
218 { PATH_REC_FIELD(reversible),
219 .offset_words = 12,
220 .offset_bits = 8,
221 .size_bits = 1 },
222 { PATH_REC_FIELD(numb_path),
223 .offset_words = 12,
224 .offset_bits = 9,
225 .size_bits = 7 },
226 { PATH_REC_FIELD(pkey),
227 .offset_words = 12,
228 .offset_bits = 16,
229 .size_bits = 16 },
Sean Hefty733d65f2007-08-08 15:41:28 -0700230 { PATH_REC_FIELD(qos_class),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 .offset_words = 13,
232 .offset_bits = 0,
233 .size_bits = 12 },
234 { PATH_REC_FIELD(sl),
235 .offset_words = 13,
236 .offset_bits = 12,
237 .size_bits = 4 },
238 { PATH_REC_FIELD(mtu_selector),
239 .offset_words = 13,
240 .offset_bits = 16,
241 .size_bits = 2 },
242 { PATH_REC_FIELD(mtu),
243 .offset_words = 13,
244 .offset_bits = 18,
245 .size_bits = 6 },
246 { PATH_REC_FIELD(rate_selector),
247 .offset_words = 13,
248 .offset_bits = 24,
249 .size_bits = 2 },
250 { PATH_REC_FIELD(rate),
251 .offset_words = 13,
252 .offset_bits = 26,
253 .size_bits = 6 },
254 { PATH_REC_FIELD(packet_life_time_selector),
255 .offset_words = 14,
256 .offset_bits = 0,
257 .size_bits = 2 },
258 { PATH_REC_FIELD(packet_life_time),
259 .offset_words = 14,
260 .offset_bits = 2,
261 .size_bits = 6 },
262 { PATH_REC_FIELD(preference),
263 .offset_words = 14,
264 .offset_bits = 8,
265 .size_bits = 8 },
266 { RESERVED,
267 .offset_words = 14,
268 .offset_bits = 16,
269 .size_bits = 48 },
270};
271
272#define MCMEMBER_REC_FIELD(field) \
273 .struct_offset_bytes = offsetof(struct ib_sa_mcmember_rec, field), \
274 .struct_size_bytes = sizeof ((struct ib_sa_mcmember_rec *) 0)->field, \
275 .field_name = "sa_mcmember_rec:" #field
276
277static const struct ib_field mcmember_rec_table[] = {
278 { MCMEMBER_REC_FIELD(mgid),
279 .offset_words = 0,
280 .offset_bits = 0,
281 .size_bits = 128 },
282 { MCMEMBER_REC_FIELD(port_gid),
283 .offset_words = 4,
284 .offset_bits = 0,
285 .size_bits = 128 },
286 { MCMEMBER_REC_FIELD(qkey),
287 .offset_words = 8,
288 .offset_bits = 0,
289 .size_bits = 32 },
290 { MCMEMBER_REC_FIELD(mlid),
291 .offset_words = 9,
292 .offset_bits = 0,
293 .size_bits = 16 },
294 { MCMEMBER_REC_FIELD(mtu_selector),
295 .offset_words = 9,
296 .offset_bits = 16,
297 .size_bits = 2 },
298 { MCMEMBER_REC_FIELD(mtu),
299 .offset_words = 9,
300 .offset_bits = 18,
301 .size_bits = 6 },
302 { MCMEMBER_REC_FIELD(traffic_class),
303 .offset_words = 9,
304 .offset_bits = 24,
305 .size_bits = 8 },
306 { MCMEMBER_REC_FIELD(pkey),
307 .offset_words = 10,
308 .offset_bits = 0,
309 .size_bits = 16 },
310 { MCMEMBER_REC_FIELD(rate_selector),
311 .offset_words = 10,
312 .offset_bits = 16,
313 .size_bits = 2 },
314 { MCMEMBER_REC_FIELD(rate),
315 .offset_words = 10,
316 .offset_bits = 18,
317 .size_bits = 6 },
318 { MCMEMBER_REC_FIELD(packet_life_time_selector),
319 .offset_words = 10,
320 .offset_bits = 24,
321 .size_bits = 2 },
322 { MCMEMBER_REC_FIELD(packet_life_time),
323 .offset_words = 10,
324 .offset_bits = 26,
325 .size_bits = 6 },
326 { MCMEMBER_REC_FIELD(sl),
327 .offset_words = 11,
328 .offset_bits = 0,
329 .size_bits = 4 },
330 { MCMEMBER_REC_FIELD(flow_label),
331 .offset_words = 11,
332 .offset_bits = 4,
333 .size_bits = 20 },
334 { MCMEMBER_REC_FIELD(hop_limit),
335 .offset_words = 11,
336 .offset_bits = 24,
337 .size_bits = 8 },
338 { MCMEMBER_REC_FIELD(scope),
339 .offset_words = 12,
340 .offset_bits = 0,
341 .size_bits = 4 },
342 { MCMEMBER_REC_FIELD(join_state),
343 .offset_words = 12,
344 .offset_bits = 4,
345 .size_bits = 4 },
346 { MCMEMBER_REC_FIELD(proxy_join),
347 .offset_words = 12,
348 .offset_bits = 8,
349 .size_bits = 1 },
350 { RESERVED,
351 .offset_words = 12,
352 .offset_bits = 9,
353 .size_bits = 23 },
354};
355
Hal Rosenstockcbae32c2005-07-27 11:45:38 -0700356#define SERVICE_REC_FIELD(field) \
357 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
358 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
359 .field_name = "sa_service_rec:" #field
360
361static const struct ib_field service_rec_table[] = {
362 { SERVICE_REC_FIELD(id),
363 .offset_words = 0,
364 .offset_bits = 0,
365 .size_bits = 64 },
366 { SERVICE_REC_FIELD(gid),
367 .offset_words = 2,
368 .offset_bits = 0,
369 .size_bits = 128 },
370 { SERVICE_REC_FIELD(pkey),
371 .offset_words = 6,
372 .offset_bits = 0,
373 .size_bits = 16 },
374 { SERVICE_REC_FIELD(lease),
375 .offset_words = 7,
376 .offset_bits = 0,
377 .size_bits = 32 },
378 { SERVICE_REC_FIELD(key),
379 .offset_words = 8,
380 .offset_bits = 0,
381 .size_bits = 128 },
382 { SERVICE_REC_FIELD(name),
383 .offset_words = 12,
384 .offset_bits = 0,
385 .size_bits = 64*8 },
386 { SERVICE_REC_FIELD(data8),
387 .offset_words = 28,
388 .offset_bits = 0,
389 .size_bits = 16*8 },
390 { SERVICE_REC_FIELD(data16),
391 .offset_words = 32,
392 .offset_bits = 0,
393 .size_bits = 8*16 },
394 { SERVICE_REC_FIELD(data32),
395 .offset_words = 36,
396 .offset_bits = 0,
397 .size_bits = 4*32 },
398 { SERVICE_REC_FIELD(data64),
399 .offset_words = 40,
400 .offset_bits = 0,
401 .size_bits = 2*64 },
402};
403
Erez Shitrit628e6f72016-05-25 22:02:05 +0300404#define CLASSPORTINFO_REC_FIELD(field) \
405 .struct_offset_bytes = offsetof(struct ib_class_port_info, field), \
406 .struct_size_bytes = sizeof((struct ib_class_port_info *)0)->field, \
407 .field_name = "ib_class_port_info:" #field
408
409static const struct ib_field classport_info_rec_table[] = {
410 { CLASSPORTINFO_REC_FIELD(base_version),
411 .offset_words = 0,
412 .offset_bits = 0,
413 .size_bits = 8 },
414 { CLASSPORTINFO_REC_FIELD(class_version),
415 .offset_words = 0,
416 .offset_bits = 8,
417 .size_bits = 8 },
418 { CLASSPORTINFO_REC_FIELD(capability_mask),
419 .offset_words = 0,
420 .offset_bits = 16,
421 .size_bits = 16 },
422 { CLASSPORTINFO_REC_FIELD(cap_mask2_resp_time),
423 .offset_words = 1,
424 .offset_bits = 0,
425 .size_bits = 32 },
426 { CLASSPORTINFO_REC_FIELD(redirect_gid),
427 .offset_words = 2,
428 .offset_bits = 0,
429 .size_bits = 128 },
430 { CLASSPORTINFO_REC_FIELD(redirect_tcslfl),
431 .offset_words = 6,
432 .offset_bits = 0,
433 .size_bits = 32 },
434 { CLASSPORTINFO_REC_FIELD(redirect_lid),
435 .offset_words = 7,
436 .offset_bits = 0,
437 .size_bits = 16 },
438 { CLASSPORTINFO_REC_FIELD(redirect_pkey),
439 .offset_words = 7,
440 .offset_bits = 16,
441 .size_bits = 16 },
442
443 { CLASSPORTINFO_REC_FIELD(redirect_qp),
444 .offset_words = 8,
445 .offset_bits = 0,
446 .size_bits = 32 },
447 { CLASSPORTINFO_REC_FIELD(redirect_qkey),
448 .offset_words = 9,
449 .offset_bits = 0,
450 .size_bits = 32 },
451
452 { CLASSPORTINFO_REC_FIELD(trap_gid),
453 .offset_words = 10,
454 .offset_bits = 0,
455 .size_bits = 128 },
456 { CLASSPORTINFO_REC_FIELD(trap_tcslfl),
457 .offset_words = 14,
458 .offset_bits = 0,
459 .size_bits = 32 },
460
461 { CLASSPORTINFO_REC_FIELD(trap_lid),
462 .offset_words = 15,
463 .offset_bits = 0,
464 .size_bits = 16 },
465 { CLASSPORTINFO_REC_FIELD(trap_pkey),
466 .offset_words = 15,
467 .offset_bits = 16,
468 .size_bits = 16 },
469
470 { CLASSPORTINFO_REC_FIELD(trap_hlqp),
471 .offset_words = 16,
472 .offset_bits = 0,
473 .size_bits = 32 },
474 { CLASSPORTINFO_REC_FIELD(trap_qkey),
475 .offset_words = 17,
476 .offset_bits = 0,
477 .size_bits = 32 },
478};
479
Erez Shitritaeab97e2012-06-19 11:21:38 +0300480#define GUIDINFO_REC_FIELD(field) \
481 .struct_offset_bytes = offsetof(struct ib_sa_guidinfo_rec, field), \
482 .struct_size_bytes = sizeof((struct ib_sa_guidinfo_rec *) 0)->field, \
483 .field_name = "sa_guidinfo_rec:" #field
484
485static const struct ib_field guidinfo_rec_table[] = {
486 { GUIDINFO_REC_FIELD(lid),
487 .offset_words = 0,
488 .offset_bits = 0,
489 .size_bits = 16 },
490 { GUIDINFO_REC_FIELD(block_num),
491 .offset_words = 0,
492 .offset_bits = 16,
493 .size_bits = 8 },
494 { GUIDINFO_REC_FIELD(res1),
495 .offset_words = 0,
496 .offset_bits = 24,
497 .size_bits = 8 },
498 { GUIDINFO_REC_FIELD(res2),
499 .offset_words = 1,
500 .offset_bits = 0,
501 .size_bits = 32 },
502 { GUIDINFO_REC_FIELD(guid_info_list),
503 .offset_words = 2,
504 .offset_bits = 0,
505 .size_bits = 512 },
506};
507
Kaike Wan2ca546b2015-08-14 08:52:09 -0400508static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
509{
510 query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
511}
512
513static inline int ib_sa_query_cancelled(struct ib_sa_query *query)
514{
515 return (query->flags & IB_SA_CANCEL);
516}
517
518static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
519 struct ib_sa_query *query)
520{
521 struct ib_sa_path_rec *sa_rec = query->mad_buf->context[1];
522 struct ib_sa_mad *mad = query->mad_buf->mad;
523 ib_sa_comp_mask comp_mask = mad->sa_hdr.comp_mask;
524 u16 val16;
525 u64 val64;
526 struct rdma_ls_resolve_header *header;
527
528 query->mad_buf->context[1] = NULL;
529
530 /* Construct the family header first */
531 header = (struct rdma_ls_resolve_header *)
532 skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
533 memcpy(header->device_name, query->port->agent->device->name,
534 LS_DEVICE_NAME_MAX);
535 header->port_num = query->port->port_num;
536
537 if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
538 sa_rec->reversible != 0)
539 query->path_use = LS_RESOLVE_PATH_USE_GMP;
540 else
541 query->path_use = LS_RESOLVE_PATH_USE_UNIDIRECTIONAL;
542 header->path_use = query->path_use;
543
544 /* Now build the attributes */
545 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
546 val64 = be64_to_cpu(sa_rec->service_id);
547 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
548 sizeof(val64), &val64);
549 }
550 if (comp_mask & IB_SA_PATH_REC_DGID)
551 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_DGID,
552 sizeof(sa_rec->dgid), &sa_rec->dgid);
553 if (comp_mask & IB_SA_PATH_REC_SGID)
554 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SGID,
555 sizeof(sa_rec->sgid), &sa_rec->sgid);
556 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
557 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_TCLASS,
558 sizeof(sa_rec->traffic_class), &sa_rec->traffic_class);
559
560 if (comp_mask & IB_SA_PATH_REC_PKEY) {
561 val16 = be16_to_cpu(sa_rec->pkey);
562 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_PKEY,
563 sizeof(val16), &val16);
564 }
565 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS) {
566 val16 = be16_to_cpu(sa_rec->qos_class);
567 nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_QOS_CLASS,
568 sizeof(val16), &val16);
569 }
570}
571
572static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
573{
574 int len = 0;
575
576 if (comp_mask & IB_SA_PATH_REC_SERVICE_ID)
577 len += nla_total_size(sizeof(u64));
578 if (comp_mask & IB_SA_PATH_REC_DGID)
579 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
580 if (comp_mask & IB_SA_PATH_REC_SGID)
581 len += nla_total_size(sizeof(struct rdma_nla_ls_gid));
582 if (comp_mask & IB_SA_PATH_REC_TRAFFIC_CLASS)
583 len += nla_total_size(sizeof(u8));
584 if (comp_mask & IB_SA_PATH_REC_PKEY)
585 len += nla_total_size(sizeof(u16));
586 if (comp_mask & IB_SA_PATH_REC_QOS_CLASS)
587 len += nla_total_size(sizeof(u16));
588
589 /*
590 * Make sure that at least some of the required comp_mask bits are
591 * set.
592 */
593 if (WARN_ON(len == 0))
594 return len;
595
596 /* Add the family header */
597 len += NLMSG_ALIGN(sizeof(struct rdma_ls_resolve_header));
598
599 return len;
600}
601
Kaike Wan3ebd2fd2015-10-30 08:23:45 -0400602static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
Kaike Wan2ca546b2015-08-14 08:52:09 -0400603{
604 struct sk_buff *skb = NULL;
605 struct nlmsghdr *nlh;
606 void *data;
607 int ret = 0;
608 struct ib_sa_mad *mad;
609 int len;
610
611 mad = query->mad_buf->mad;
612 len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
613 if (len <= 0)
614 return -EMSGSIZE;
615
Kaike Wan3ebd2fd2015-10-30 08:23:45 -0400616 skb = nlmsg_new(len, gfp_mask);
Kaike Wan2ca546b2015-08-14 08:52:09 -0400617 if (!skb)
618 return -ENOMEM;
619
620 /* Put nlmsg header only for now */
621 data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS,
Kaike Wanba13b5f2015-08-20 14:20:42 -0400622 RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST);
Kaike Wan2ca546b2015-08-14 08:52:09 -0400623 if (!data) {
Mark Bloch0f377d82016-05-06 22:45:27 +0300624 nlmsg_free(skb);
Kaike Wan2ca546b2015-08-14 08:52:09 -0400625 return -EMSGSIZE;
626 }
627
628 /* Add attributes */
629 ib_nl_set_path_rec_attrs(skb, query);
630
631 /* Repair the nlmsg header length */
632 nlmsg_end(skb, nlh);
633
Kaike Wan3ebd2fd2015-10-30 08:23:45 -0400634 ret = ibnl_multicast(skb, nlh, RDMA_NL_GROUP_LS, gfp_mask);
Kaike Wan2ca546b2015-08-14 08:52:09 -0400635 if (!ret)
636 ret = len;
637 else
638 ret = 0;
639
640 return ret;
641}
642
Kaike Wan3ebd2fd2015-10-30 08:23:45 -0400643static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
Kaike Wan2ca546b2015-08-14 08:52:09 -0400644{
645 unsigned long flags;
646 unsigned long delay;
647 int ret;
648
649 INIT_LIST_HEAD(&query->list);
650 query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
651
Kaike Wan3ebd2fd2015-10-30 08:23:45 -0400652 /* Put the request on the list first.*/
Kaike Wan2ca546b2015-08-14 08:52:09 -0400653 spin_lock_irqsave(&ib_nl_request_lock, flags);
Kaike Wan2ca546b2015-08-14 08:52:09 -0400654 delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
655 query->timeout = delay + jiffies;
656 list_add_tail(&query->list, &ib_nl_request_list);
657 /* Start the timeout if this is the only request */
658 if (ib_nl_request_list.next == &query->list)
659 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
Kaike Wan2ca546b2015-08-14 08:52:09 -0400660 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
661
Kaike Wan3ebd2fd2015-10-30 08:23:45 -0400662 ret = ib_nl_send_msg(query, gfp_mask);
663 if (ret <= 0) {
664 ret = -EIO;
665 /* Remove the request */
666 spin_lock_irqsave(&ib_nl_request_lock, flags);
667 list_del(&query->list);
668 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
669 } else {
670 ret = 0;
671 }
672
Kaike Wan2ca546b2015-08-14 08:52:09 -0400673 return ret;
674}
675
676static int ib_nl_cancel_request(struct ib_sa_query *query)
677{
678 unsigned long flags;
679 struct ib_sa_query *wait_query;
680 int found = 0;
681
682 spin_lock_irqsave(&ib_nl_request_lock, flags);
683 list_for_each_entry(wait_query, &ib_nl_request_list, list) {
684 /* Let the timeout to take care of the callback */
685 if (query == wait_query) {
686 query->flags |= IB_SA_CANCEL;
687 query->timeout = jiffies;
688 list_move(&query->list, &ib_nl_request_list);
689 found = 1;
690 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work, 1);
691 break;
692 }
693 }
694 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
695
696 return found;
697}
698
699static void send_handler(struct ib_mad_agent *agent,
700 struct ib_mad_send_wc *mad_send_wc);
701
702static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
703 const struct nlmsghdr *nlh)
704{
705 struct ib_mad_send_wc mad_send_wc;
706 struct ib_sa_mad *mad = NULL;
707 const struct nlattr *head, *curr;
708 struct ib_path_rec_data *rec;
709 int len, rem;
710 u32 mask = 0;
711 int status = -EIO;
712
713 if (query->callback) {
714 head = (const struct nlattr *) nlmsg_data(nlh);
715 len = nlmsg_len(nlh);
716 switch (query->path_use) {
717 case LS_RESOLVE_PATH_USE_UNIDIRECTIONAL:
718 mask = IB_PATH_PRIMARY | IB_PATH_OUTBOUND;
719 break;
720
721 case LS_RESOLVE_PATH_USE_ALL:
722 case LS_RESOLVE_PATH_USE_GMP:
723 default:
724 mask = IB_PATH_PRIMARY | IB_PATH_GMP |
725 IB_PATH_BIDIRECTIONAL;
726 break;
727 }
728 nla_for_each_attr(curr, head, len, rem) {
729 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) {
730 rec = nla_data(curr);
731 /*
732 * Get the first one. In the future, we may
733 * need to get up to 6 pathrecords.
734 */
735 if ((rec->flags & mask) == mask) {
736 mad = query->mad_buf->mad;
737 mad->mad_hdr.method |=
738 IB_MGMT_METHOD_RESP;
739 memcpy(mad->data, rec->path_rec,
740 sizeof(rec->path_rec));
741 status = 0;
742 break;
743 }
744 }
745 }
746 query->callback(query, status, mad);
747 }
748
749 mad_send_wc.send_buf = query->mad_buf;
750 mad_send_wc.status = IB_WC_SUCCESS;
751 send_handler(query->mad_buf->mad_agent, &mad_send_wc);
752}
753
754static void ib_nl_request_timeout(struct work_struct *work)
755{
756 unsigned long flags;
757 struct ib_sa_query *query;
758 unsigned long delay;
759 struct ib_mad_send_wc mad_send_wc;
760 int ret;
761
762 spin_lock_irqsave(&ib_nl_request_lock, flags);
763 while (!list_empty(&ib_nl_request_list)) {
764 query = list_entry(ib_nl_request_list.next,
765 struct ib_sa_query, list);
766
767 if (time_after(query->timeout, jiffies)) {
768 delay = query->timeout - jiffies;
769 if ((long)delay <= 0)
770 delay = 1;
771 queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
772 break;
773 }
774
775 list_del(&query->list);
776 ib_sa_disable_local_svc(query);
777 /* Hold the lock to protect against query cancellation */
778 if (ib_sa_query_cancelled(query))
779 ret = -1;
780 else
781 ret = ib_post_send_mad(query->mad_buf, NULL);
782 if (ret) {
783 mad_send_wc.send_buf = query->mad_buf;
784 mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
785 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
786 send_handler(query->port->agent, &mad_send_wc);
787 spin_lock_irqsave(&ib_nl_request_lock, flags);
788 }
789 }
790 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
791}
792
Mark Bloch735c6312016-05-19 17:12:35 +0300793int ib_nl_handle_set_timeout(struct sk_buff *skb,
794 struct netlink_callback *cb)
Kaike Wan2ca546b2015-08-14 08:52:09 -0400795{
796 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
797 int timeout, delta, abs_delta;
798 const struct nlattr *attr;
799 unsigned long flags;
800 struct ib_sa_query *query;
801 long delay = 0;
802 struct nlattr *tb[LS_NLA_TYPE_MAX];
803 int ret;
804
Kaike Wan2deeb472016-01-21 08:41:31 -0500805 if (!(nlh->nlmsg_flags & NLM_F_REQUEST) ||
806 !(NETLINK_CB(skb).sk) ||
807 !netlink_capable(skb, CAP_NET_ADMIN))
Kaike Wan2ca546b2015-08-14 08:52:09 -0400808 return -EPERM;
809
810 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
811 nlmsg_len(nlh), ib_nl_policy);
812 attr = (const struct nlattr *)tb[LS_NLA_TYPE_TIMEOUT];
813 if (ret || !attr)
814 goto settimeout_out;
815
816 timeout = *(int *) nla_data(attr);
817 if (timeout < IB_SA_LOCAL_SVC_TIMEOUT_MIN)
818 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MIN;
819 if (timeout > IB_SA_LOCAL_SVC_TIMEOUT_MAX)
820 timeout = IB_SA_LOCAL_SVC_TIMEOUT_MAX;
821
822 delta = timeout - sa_local_svc_timeout_ms;
823 if (delta < 0)
824 abs_delta = -delta;
825 else
826 abs_delta = delta;
827
828 if (delta != 0) {
829 spin_lock_irqsave(&ib_nl_request_lock, flags);
830 sa_local_svc_timeout_ms = timeout;
831 list_for_each_entry(query, &ib_nl_request_list, list) {
832 if (delta < 0 && abs_delta > query->timeout)
833 query->timeout = 0;
834 else
835 query->timeout += delta;
836
837 /* Get the new delay from the first entry */
838 if (!delay) {
839 delay = query->timeout - jiffies;
840 if (delay <= 0)
841 delay = 1;
842 }
843 }
844 if (delay)
845 mod_delayed_work(ib_nl_wq, &ib_nl_timed_work,
846 (unsigned long)delay);
847 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
848 }
849
850settimeout_out:
851 return skb->len;
852}
853
854static inline int ib_nl_is_good_resolve_resp(const struct nlmsghdr *nlh)
855{
856 struct nlattr *tb[LS_NLA_TYPE_MAX];
857 int ret;
858
859 if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
860 return 0;
861
862 ret = nla_parse(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
863 nlmsg_len(nlh), ib_nl_policy);
864 if (ret)
865 return 0;
866
867 return 1;
868}
869
Mark Bloch735c6312016-05-19 17:12:35 +0300870int ib_nl_handle_resolve_resp(struct sk_buff *skb,
871 struct netlink_callback *cb)
Kaike Wan2ca546b2015-08-14 08:52:09 -0400872{
873 const struct nlmsghdr *nlh = (struct nlmsghdr *)cb->nlh;
874 unsigned long flags;
875 struct ib_sa_query *query;
876 struct ib_mad_send_buf *send_buf;
877 struct ib_mad_send_wc mad_send_wc;
878 int found = 0;
879 int ret;
880
Kaike Wan2deeb472016-01-21 08:41:31 -0500881 if ((nlh->nlmsg_flags & NLM_F_REQUEST) ||
882 !(NETLINK_CB(skb).sk) ||
883 !netlink_capable(skb, CAP_NET_ADMIN))
Kaike Wan2ca546b2015-08-14 08:52:09 -0400884 return -EPERM;
885
886 spin_lock_irqsave(&ib_nl_request_lock, flags);
887 list_for_each_entry(query, &ib_nl_request_list, list) {
888 /*
889 * If the query is cancelled, let the timeout routine
890 * take care of it.
891 */
892 if (nlh->nlmsg_seq == query->seq) {
893 found = !ib_sa_query_cancelled(query);
894 if (found)
895 list_del(&query->list);
896 break;
897 }
898 }
899
900 if (!found) {
901 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
902 goto resp_out;
903 }
904
905 send_buf = query->mad_buf;
906
907 if (!ib_nl_is_good_resolve_resp(nlh)) {
908 /* if the result is a failure, send out the packet via IB */
909 ib_sa_disable_local_svc(query);
910 ret = ib_post_send_mad(query->mad_buf, NULL);
911 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
912 if (ret) {
913 mad_send_wc.send_buf = send_buf;
914 mad_send_wc.status = IB_WC_GENERAL_ERR;
915 send_handler(query->port->agent, &mad_send_wc);
916 }
917 } else {
918 spin_unlock_irqrestore(&ib_nl_request_lock, flags);
919 ib_nl_process_good_resolve_rsp(query, nlh);
920 }
921
922resp_out:
923 return skb->len;
924}
925
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926static void free_sm_ah(struct kref *kref)
927{
928 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
929
930 ib_destroy_ah(sm_ah->ah);
931 kfree(sm_ah);
932}
933
David Howellsc4028952006-11-22 14:57:56 +0000934static void update_sm_ah(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935{
David Howellsc4028952006-11-22 14:57:56 +0000936 struct ib_sa_port *port =
937 container_of(work, struct ib_sa_port, update_task);
Moni Shoua164ba082008-07-14 23:48:43 -0700938 struct ib_sa_sm_ah *new_ah;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 struct ib_port_attr port_attr;
940 struct ib_ah_attr ah_attr;
941
942 if (ib_query_port(port->agent->device, port->port_num, &port_attr)) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530943 pr_warn("Couldn't query port\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 return;
945 }
946
947 new_ah = kmalloc(sizeof *new_ah, GFP_KERNEL);
948 if (!new_ah) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 return;
950 }
951
952 kref_init(&new_ah->ref);
Sean Heftyd0e7bb12007-04-05 10:51:10 -0700953 new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
Sean Hefty2aec5c62007-06-18 11:03:58 -0700955 new_ah->pkey_index = 0;
956 if (ib_find_pkey(port->agent->device, port->port_num,
Roland Dreier53998912007-08-03 10:45:17 -0700957 IB_DEFAULT_PKEY_FULL, &new_ah->pkey_index))
Parav Panditaba25a3e2016-03-02 00:50:29 +0530958 pr_err("Couldn't find index for default PKey\n");
Sean Hefty2aec5c62007-06-18 11:03:58 -0700959
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 memset(&ah_attr, 0, sizeof ah_attr);
961 ah_attr.dlid = port_attr.sm_lid;
962 ah_attr.sl = port_attr.sm_sl;
963 ah_attr.port_num = port->port_num;
Eli Cohena0c1b2a2016-03-11 22:58:37 +0200964 if (port_attr.grh_required) {
965 ah_attr.ah_flags = IB_AH_GRH;
966 ah_attr.grh.dgid.global.subnet_prefix = cpu_to_be64(port_attr.subnet_prefix);
967 ah_attr.grh.dgid.global.interface_id = cpu_to_be64(IB_SA_WELL_KNOWN_GUID);
968 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
970 new_ah->ah = ib_create_ah(port->agent->qp->pd, &ah_attr);
971 if (IS_ERR(new_ah->ah)) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530972 pr_warn("Couldn't create new SM AH\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 kfree(new_ah);
974 return;
975 }
976
977 spin_lock_irq(&port->ah_lock);
Jack Morgenstein6b708b32009-03-03 14:30:01 -0800978 if (port->sm_ah)
979 kref_put(&port->sm_ah->ref, free_sm_ah);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 port->sm_ah = new_ah;
981 spin_unlock_irq(&port->ah_lock);
982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983}
984
985static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event)
986{
987 if (event->event == IB_EVENT_PORT_ERR ||
988 event->event == IB_EVENT_PORT_ACTIVE ||
989 event->event == IB_EVENT_LID_CHANGE ||
990 event->event == IB_EVENT_PKEY_CHANGE ||
Jack Morgensteinacaea9e2006-08-15 17:20:50 +0300991 event->event == IB_EVENT_SM_CHANGE ||
992 event->event == IB_EVENT_CLIENT_REREGISTER) {
Moni Shoua164ba082008-07-14 23:48:43 -0700993 unsigned long flags;
994 struct ib_sa_device *sa_dev =
995 container_of(handler, typeof(*sa_dev), event_handler);
996 struct ib_sa_port *port =
997 &sa_dev->port[event->element.port_num - sa_dev->start_port];
998
Moni Shoua9247a8e2015-06-10 12:13:32 +0300999 if (!rdma_cap_ib_sa(handler->device, port->port_num))
Eli Cohenfac70d52010-09-27 17:51:11 -07001000 return;
1001
Moni Shoua164ba082008-07-14 23:48:43 -07001002 spin_lock_irqsave(&port->ah_lock, flags);
1003 if (port->sm_ah)
1004 kref_put(&port->sm_ah->ref, free_sm_ah);
1005 port->sm_ah = NULL;
1006 spin_unlock_irqrestore(&port->ah_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
Alex Vesker3d3fd742016-07-06 16:36:34 +03001008 if (event->event == IB_EVENT_SM_CHANGE ||
1009 event->event == IB_EVENT_CLIENT_REREGISTER ||
1010 event->event == IB_EVENT_LID_CHANGE) {
1011 spin_lock_irqsave(&port->classport_lock, flags);
1012 port->classport_info.valid = false;
1013 spin_unlock_irqrestore(&port->classport_lock, flags);
1014 }
Tejun Heof0626712010-10-19 15:24:36 +00001015 queue_work(ib_wq, &sa_dev->port[event->element.port_num -
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 sa_dev->start_port].update_task);
1017 }
1018}
1019
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001020void ib_sa_register_client(struct ib_sa_client *client)
1021{
1022 atomic_set(&client->users, 1);
1023 init_completion(&client->comp);
1024}
1025EXPORT_SYMBOL(ib_sa_register_client);
1026
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001027void ib_sa_unregister_client(struct ib_sa_client *client)
1028{
1029 ib_sa_client_put(client);
1030 wait_for_completion(&client->comp);
1031}
1032EXPORT_SYMBOL(ib_sa_unregister_client);
1033
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034/**
1035 * ib_sa_cancel_query - try to cancel an SA query
1036 * @id:ID of query to cancel
1037 * @query:query pointer to cancel
1038 *
1039 * Try to cancel an SA query. If the id and query don't match up or
1040 * the query has already completed, nothing is done. Otherwise the
1041 * query is canceled and will complete with a status of -EINTR.
1042 */
1043void ib_sa_cancel_query(int id, struct ib_sa_query *query)
1044{
1045 unsigned long flags;
1046 struct ib_mad_agent *agent;
Sean Hefty34816ad2005-10-25 10:51:39 -07001047 struct ib_mad_send_buf *mad_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048
1049 spin_lock_irqsave(&idr_lock, flags);
1050 if (idr_find(&query_idr, id) != query) {
1051 spin_unlock_irqrestore(&idr_lock, flags);
1052 return;
1053 }
1054 agent = query->port->agent;
Sean Hefty34816ad2005-10-25 10:51:39 -07001055 mad_buf = query->mad_buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 spin_unlock_irqrestore(&idr_lock, flags);
1057
Kaike Wan2ca546b2015-08-14 08:52:09 -04001058 /*
1059 * If the query is still on the netlink request list, schedule
1060 * it to be cancelled by the timeout routine. Otherwise, it has been
1061 * sent to the MAD layer and has to be cancelled from there.
1062 */
1063 if (!ib_nl_cancel_request(query))
1064 ib_cancel_mad(agent, mad_buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065}
1066EXPORT_SYMBOL(ib_sa_cancel_query);
1067
Sean Heftyd0e7bb12007-04-05 10:51:10 -07001068static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
1069{
1070 struct ib_sa_device *sa_dev;
1071 struct ib_sa_port *port;
1072 unsigned long flags;
1073 u8 src_path_mask;
1074
1075 sa_dev = ib_get_client_data(device, &sa_client);
1076 if (!sa_dev)
1077 return 0x7f;
1078
1079 port = &sa_dev->port[port_num - sa_dev->start_port];
1080 spin_lock_irqsave(&port->ah_lock, flags);
1081 src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
1082 spin_unlock_irqrestore(&port->ah_lock, flags);
1083
1084 return src_path_mask;
1085}
1086
Sean Hefty6d969a42006-06-17 20:37:39 -07001087int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
1088 struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
1089{
1090 int ret;
1091 u16 gid_index;
Matan Barak20029832015-12-23 14:56:53 +02001092 int use_roce;
1093 struct net_device *ndev = NULL;
Sean Hefty6d969a42006-06-17 20:37:39 -07001094
1095 memset(ah_attr, 0, sizeof *ah_attr);
1096 ah_attr->dlid = be16_to_cpu(rec->dlid);
1097 ah_attr->sl = rec->sl;
Sean Heftyd0e7bb12007-04-05 10:51:10 -07001098 ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
1099 get_src_path_mask(device, port_num);
Sean Hefty6d969a42006-06-17 20:37:39 -07001100 ah_attr->port_num = port_num;
Roland Dreier7084f842007-02-16 15:31:24 -08001101 ah_attr->static_rate = rec->rate;
Sean Hefty6d969a42006-06-17 20:37:39 -07001102
Matan Barak20029832015-12-23 14:56:53 +02001103 use_roce = rdma_cap_eth_ah(device, port_num);
Eli Cohen3c86aa72010-10-13 21:26:51 +02001104
Matan Barak20029832015-12-23 14:56:53 +02001105 if (use_roce) {
1106 struct net_device *idev;
1107 struct net_device *resolved_dev;
1108 struct rdma_dev_addr dev_addr = {.bound_dev_if = rec->ifindex,
1109 .net = rec->net ? rec->net :
1110 &init_net};
1111 union {
Matan Barak20029832015-12-23 14:56:53 +02001112 struct sockaddr_in _sockaddr_in;
1113 struct sockaddr_in6 _sockaddr_in6;
1114 } sgid_addr, dgid_addr;
Matan Barakba36e372015-10-15 18:38:47 +03001115
Matan Barak20029832015-12-23 14:56:53 +02001116 if (!device->get_netdev)
1117 return -EOPNOTSUPP;
1118
Greg Kroah-Hartmand41d78c2019-08-07 18:44:12 +02001119 rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid);
1120 rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid);
Matan Barak20029832015-12-23 14:56:53 +02001121
1122 /* validate the route */
Greg Kroah-Hartmand41d78c2019-08-07 18:44:12 +02001123 ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr,
1124 (struct sockaddr *)&dgid_addr,
1125 &dev_addr);
Matan Barak20029832015-12-23 14:56:53 +02001126 if (ret)
1127 return ret;
1128
1129 if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
1130 dev_addr.network == RDMA_NETWORK_IPV6) &&
1131 rec->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP)
1132 return -EINVAL;
1133
1134 idev = device->get_netdev(device, port_num);
1135 if (!idev)
1136 return -ENODEV;
1137
1138 resolved_dev = dev_get_by_index(dev_addr.net,
1139 dev_addr.bound_dev_if);
Parav Pandit9bcfd1c2018-03-07 08:07:41 +02001140 if (!resolved_dev) {
1141 dev_put(idev);
1142 return -ENODEV;
Matan Barak20029832015-12-23 14:56:53 +02001143 }
1144 ndev = ib_get_ndev_from_path(rec);
1145 rcu_read_lock();
1146 if ((ndev && ndev != resolved_dev) ||
1147 (resolved_dev != idev &&
1148 !rdma_is_upper_dev_rcu(idev, resolved_dev)))
1149 ret = -EHOSTUNREACH;
1150 rcu_read_unlock();
1151 dev_put(idev);
1152 dev_put(resolved_dev);
1153 if (ret) {
1154 if (ndev)
1155 dev_put(ndev);
1156 return ret;
1157 }
1158 }
1159
Or Gerlitz11d8d642016-03-01 18:52:23 +02001160 if (rec->hop_limit > 0 || use_roce) {
Sean Hefty6d969a42006-06-17 20:37:39 -07001161 ah_attr->ah_flags = IB_AH_GRH;
1162 ah_attr->grh.dgid = rec->dgid;
1163
Matan Barak20029832015-12-23 14:56:53 +02001164 ret = ib_find_cached_gid_by_port(device, &rec->sgid,
1165 rec->gid_type, port_num, ndev,
1166 &gid_index);
Matan Barakba36e372015-10-15 18:38:47 +03001167 if (ret) {
1168 if (ndev)
1169 dev_put(ndev);
Sean Hefty6d969a42006-06-17 20:37:39 -07001170 return ret;
Matan Barakba36e372015-10-15 18:38:47 +03001171 }
Sean Hefty6d969a42006-06-17 20:37:39 -07001172
Sean Heftyca222c62006-06-17 20:37:40 -07001173 ah_attr->grh.sgid_index = gid_index;
1174 ah_attr->grh.flow_label = be32_to_cpu(rec->flow_label);
1175 ah_attr->grh.hop_limit = rec->hop_limit;
Sean Hefty6d969a42006-06-17 20:37:39 -07001176 ah_attr->grh.traffic_class = rec->traffic_class;
Matan Barakba36e372015-10-15 18:38:47 +03001177 if (ndev)
1178 dev_put(ndev);
Sean Hefty6d969a42006-06-17 20:37:39 -07001179 }
Matan Barak20029832015-12-23 14:56:53 +02001180
1181 if (use_roce)
Matan Barakdd5f03b2013-12-12 18:03:11 +02001182 memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN);
Matan Barak20029832015-12-23 14:56:53 +02001183
Sean Hefty6d969a42006-06-17 20:37:39 -07001184 return 0;
1185}
1186EXPORT_SYMBOL(ib_init_ah_from_path);
1187
Sean Hefty2aec5c62007-06-18 11:03:58 -07001188static int alloc_mad(struct ib_sa_query *query, gfp_t gfp_mask)
1189{
1190 unsigned long flags;
1191
1192 spin_lock_irqsave(&query->port->ah_lock, flags);
Moni Shoua164ba082008-07-14 23:48:43 -07001193 if (!query->port->sm_ah) {
1194 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1195 return -EAGAIN;
1196 }
Sean Hefty2aec5c62007-06-18 11:03:58 -07001197 kref_get(&query->port->sm_ah->ref);
1198 query->sm_ah = query->port->sm_ah;
1199 spin_unlock_irqrestore(&query->port->ah_lock, flags);
1200
1201 query->mad_buf = ib_create_send_mad(query->port->agent, 1,
1202 query->sm_ah->pkey_index,
1203 0, IB_MGMT_SA_HDR, IB_MGMT_SA_DATA,
Ira Weinyda2dfaa2015-06-06 14:38:28 -04001204 gfp_mask,
1205 IB_MGMT_BASE_VERSION);
Ali Ayoub3c10c7c2007-09-09 14:55:11 +03001206 if (IS_ERR(query->mad_buf)) {
Sean Hefty2aec5c62007-06-18 11:03:58 -07001207 kref_put(&query->sm_ah->ref, free_sm_ah);
1208 return -ENOMEM;
1209 }
1210
1211 query->mad_buf->ah = query->sm_ah->ah;
1212
1213 return 0;
1214}
1215
1216static void free_mad(struct ib_sa_query *query)
1217{
1218 ib_free_send_mad(query->mad_buf);
1219 kref_put(&query->sm_ah->ref, free_sm_ah);
1220}
1221
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222static void init_mad(struct ib_sa_mad *mad, struct ib_mad_agent *agent)
1223{
1224 unsigned long flags;
1225
1226 memset(mad, 0, sizeof *mad);
1227
1228 mad->mad_hdr.base_version = IB_MGMT_BASE_VERSION;
1229 mad->mad_hdr.mgmt_class = IB_MGMT_CLASS_SUBN_ADM;
1230 mad->mad_hdr.class_version = IB_SA_CLASS_VERSION;
1231
1232 spin_lock_irqsave(&tid_lock, flags);
1233 mad->mad_hdr.tid =
1234 cpu_to_be64(((u64) agent->hi_tid) << 32 | tid++);
1235 spin_unlock_irqrestore(&tid_lock, flags);
1236}
1237
Michael S. Tsirkine322fed2006-07-14 00:23:56 -07001238static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239{
Mel Gormand0164ad2015-11-06 16:28:21 -08001240 bool preload = gfpflags_allow_blocking(gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 unsigned long flags;
Sean Hefty34816ad2005-10-25 10:51:39 -07001242 int ret, id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243
Tejun Heo3b069c52013-02-27 17:04:16 -08001244 if (preload)
1245 idr_preload(gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 spin_lock_irqsave(&idr_lock, flags);
Tejun Heo3b069c52013-02-27 17:04:16 -08001247
1248 id = idr_alloc(&query_idr, query, 0, 0, GFP_NOWAIT);
1249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 spin_unlock_irqrestore(&idr_lock, flags);
Tejun Heo3b069c52013-02-27 17:04:16 -08001251 if (preload)
1252 idr_preload_end();
1253 if (id < 0)
1254 return id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
Sean Hefty34816ad2005-10-25 10:51:39 -07001256 query->mad_buf->timeout_ms = timeout_ms;
1257 query->mad_buf->context[0] = query;
1258 query->id = id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
Kaike Wan2ca546b2015-08-14 08:52:09 -04001260 if (query->flags & IB_SA_ENABLE_LOCAL_SERVICE) {
1261 if (!ibnl_chk_listeners(RDMA_NL_GROUP_LS)) {
Kaike Wan3ebd2fd2015-10-30 08:23:45 -04001262 if (!ib_nl_make_request(query, gfp_mask))
Kaike Wan2ca546b2015-08-14 08:52:09 -04001263 return id;
1264 }
1265 ib_sa_disable_local_svc(query);
1266 }
1267
Sean Hefty34816ad2005-10-25 10:51:39 -07001268 ret = ib_post_send_mad(query->mad_buf, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 if (ret) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 spin_lock_irqsave(&idr_lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -07001271 idr_remove(&query_idr, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 spin_unlock_irqrestore(&idr_lock, flags);
1273 }
1274
Roland Dreierdae4c1d2005-06-27 14:36:46 -07001275 /*
1276 * It's not safe to dereference query any more, because the
1277 * send may already have completed and freed the query in
Sean Hefty34816ad2005-10-25 10:51:39 -07001278 * another context.
Roland Dreierdae4c1d2005-06-27 14:36:46 -07001279 */
Sean Hefty34816ad2005-10-25 10:51:39 -07001280 return ret ? ret : id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281}
1282
Sean Heftya7ca1f02009-11-16 09:30:33 -08001283void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec)
1284{
1285 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), attribute, rec);
1286}
1287EXPORT_SYMBOL(ib_sa_unpack_path);
1288
Sean Hefty2e08b582013-05-29 10:09:26 -07001289void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute)
1290{
1291 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, attribute);
1292}
1293EXPORT_SYMBOL(ib_sa_pack_path);
1294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
1296 int status,
1297 struct ib_sa_mad *mad)
1298{
1299 struct ib_sa_path_query *query =
1300 container_of(sa_query, struct ib_sa_path_query, sa_query);
1301
1302 if (mad) {
1303 struct ib_sa_path_rec rec;
1304
1305 ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
1306 mad->data, &rec);
Matan Barakba36e372015-10-15 18:38:47 +03001307 rec.net = NULL;
1308 rec.ifindex = 0;
Matan Barakb39ffa12015-12-23 14:56:47 +02001309 rec.gid_type = IB_GID_TYPE_IB;
Amitoj Kaur Chawladb9314c2016-02-12 13:16:10 +05301310 eth_zero_addr(rec.dmac);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 query->callback(status, &rec, query->context);
1312 } else
1313 query->callback(status, NULL, query->context);
1314}
1315
1316static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
1317{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 kfree(container_of(sa_query, struct ib_sa_path_query, sa_query));
1319}
1320
1321/**
1322 * ib_sa_path_rec_get - Start a Path get query
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001323 * @client:SA client
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 * @device:device to send query on
1325 * @port_num: port number to send query on
1326 * @rec:Path Record to send in query
1327 * @comp_mask:component mask to send in query
1328 * @timeout_ms:time to wait for response
1329 * @gfp_mask:GFP mask to use for internal allocations
1330 * @callback:function called when query completes, times out or is
1331 * canceled
1332 * @context:opaque user context passed to callback
1333 * @sa_query:query context, used to cancel query
1334 *
1335 * Send a Path Record Get query to the SA to look up a path. The
1336 * callback function will be called when the query completes (or
1337 * fails); status is 0 for a successful response, -EINTR if the query
1338 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1339 * occurred sending the query. The resp parameter of the callback is
1340 * only valid if status is 0.
1341 *
1342 * If the return value of ib_sa_path_rec_get() is negative, it is an
1343 * error code. Otherwise it is a query ID that can be used to cancel
1344 * the query.
1345 */
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001346int ib_sa_path_rec_get(struct ib_sa_client *client,
1347 struct ib_device *device, u8 port_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 struct ib_sa_path_rec *rec,
1349 ib_sa_comp_mask comp_mask,
Al Virodd0fc662005-10-07 07:46:04 +01001350 int timeout_ms, gfp_t gfp_mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 void (*callback)(int status,
1352 struct ib_sa_path_rec *resp,
1353 void *context),
1354 void *context,
1355 struct ib_sa_query **sa_query)
1356{
1357 struct ib_sa_path_query *query;
1358 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
Roland Dreier56c202d2005-10-13 10:45:02 -07001359 struct ib_sa_port *port;
1360 struct ib_mad_agent *agent;
Sean Hefty34816ad2005-10-25 10:51:39 -07001361 struct ib_sa_mad *mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 int ret;
1363
Roland Dreier56c202d2005-10-13 10:45:02 -07001364 if (!sa_dev)
1365 return -ENODEV;
1366
1367 port = &sa_dev->port[port_num - sa_dev->start_port];
1368 agent = port->agent;
1369
Kaike Wan5d265772015-08-14 08:52:08 -04001370 query = kzalloc(sizeof(*query), gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 if (!query)
1372 return -ENOMEM;
Sean Hefty34816ad2005-10-25 10:51:39 -07001373
Sean Hefty2aec5c62007-06-18 11:03:58 -07001374 query->sa_query.port = port;
1375 ret = alloc_mad(&query->sa_query, gfp_mask);
1376 if (ret)
Sean Hefty34816ad2005-10-25 10:51:39 -07001377 goto err1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001379 ib_sa_client_get(client);
1380 query->sa_query.client = client;
1381 query->callback = callback;
1382 query->context = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383
Sean Hefty34816ad2005-10-25 10:51:39 -07001384 mad = query->sa_query.mad_buf->mad;
1385 init_mad(mad, agent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386
Sean Hefty34816ad2005-10-25 10:51:39 -07001387 query->sa_query.callback = callback ? ib_sa_path_rec_callback : NULL;
1388 query->sa_query.release = ib_sa_path_rec_release;
Sean Hefty34816ad2005-10-25 10:51:39 -07001389 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1390 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_PATH_REC);
1391 mad->sa_hdr.comp_mask = comp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
Sean Hefty34816ad2005-10-25 10:51:39 -07001393 ib_pack(path_rec_table, ARRAY_SIZE(path_rec_table), rec, mad->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
1395 *sa_query = &query->sa_query;
Roland Dreierdae4c1d2005-06-27 14:36:46 -07001396
Kaike Wan2ca546b2015-08-14 08:52:09 -04001397 query->sa_query.flags |= IB_SA_ENABLE_LOCAL_SERVICE;
1398 query->sa_query.mad_buf->context[1] = rec;
1399
Michael S. Tsirkine322fed2006-07-14 00:23:56 -07001400 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
Sean Hefty34816ad2005-10-25 10:51:39 -07001401 if (ret < 0)
1402 goto err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403
Roland Dreierdae4c1d2005-06-27 14:36:46 -07001404 return ret;
Sean Hefty34816ad2005-10-25 10:51:39 -07001405
1406err2:
1407 *sa_query = NULL;
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001408 ib_sa_client_put(query->sa_query.client);
Sean Hefty2aec5c62007-06-18 11:03:58 -07001409 free_mad(&query->sa_query);
Sean Hefty34816ad2005-10-25 10:51:39 -07001410
1411err1:
1412 kfree(query);
1413 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414}
1415EXPORT_SYMBOL(ib_sa_path_rec_get);
1416
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001417static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
1418 int status,
1419 struct ib_sa_mad *mad)
1420{
1421 struct ib_sa_service_query *query =
1422 container_of(sa_query, struct ib_sa_service_query, sa_query);
1423
1424 if (mad) {
1425 struct ib_sa_service_rec rec;
1426
1427 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
1428 mad->data, &rec);
1429 query->callback(status, &rec, query->context);
1430 } else
1431 query->callback(status, NULL, query->context);
1432}
1433
1434static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
1435{
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001436 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
1437}
1438
1439/**
1440 * ib_sa_service_rec_query - Start Service Record operation
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001441 * @client:SA client
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001442 * @device:device to send request on
1443 * @port_num: port number to send request on
1444 * @method:SA method - should be get, set, or delete
1445 * @rec:Service Record to send in request
1446 * @comp_mask:component mask to send in request
1447 * @timeout_ms:time to wait for response
1448 * @gfp_mask:GFP mask to use for internal allocations
1449 * @callback:function called when request completes, times out or is
1450 * canceled
1451 * @context:opaque user context passed to callback
1452 * @sa_query:request context, used to cancel request
1453 *
1454 * Send a Service Record set/get/delete to the SA to register,
1455 * unregister or query a service record.
1456 * The callback function will be called when the request completes (or
1457 * fails); status is 0 for a successful response, -EINTR if the query
1458 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
1459 * occurred sending the query. The resp parameter of the callback is
1460 * only valid if status is 0.
1461 *
1462 * If the return value of ib_sa_service_rec_query() is negative, it is an
1463 * error code. Otherwise it is a request ID that can be used to cancel
1464 * the query.
1465 */
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001466int ib_sa_service_rec_query(struct ib_sa_client *client,
1467 struct ib_device *device, u8 port_num, u8 method,
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001468 struct ib_sa_service_rec *rec,
1469 ib_sa_comp_mask comp_mask,
Al Virodd0fc662005-10-07 07:46:04 +01001470 int timeout_ms, gfp_t gfp_mask,
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001471 void (*callback)(int status,
1472 struct ib_sa_service_rec *resp,
1473 void *context),
1474 void *context,
1475 struct ib_sa_query **sa_query)
1476{
1477 struct ib_sa_service_query *query;
1478 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
Roland Dreier56c202d2005-10-13 10:45:02 -07001479 struct ib_sa_port *port;
1480 struct ib_mad_agent *agent;
Sean Hefty34816ad2005-10-25 10:51:39 -07001481 struct ib_sa_mad *mad;
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001482 int ret;
1483
Roland Dreier56c202d2005-10-13 10:45:02 -07001484 if (!sa_dev)
1485 return -ENODEV;
1486
1487 port = &sa_dev->port[port_num - sa_dev->start_port];
1488 agent = port->agent;
1489
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001490 if (method != IB_MGMT_METHOD_GET &&
1491 method != IB_MGMT_METHOD_SET &&
1492 method != IB_SA_METHOD_DELETE)
1493 return -EINVAL;
1494
Kaike Wan5d265772015-08-14 08:52:08 -04001495 query = kzalloc(sizeof(*query), gfp_mask);
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001496 if (!query)
1497 return -ENOMEM;
Sean Hefty34816ad2005-10-25 10:51:39 -07001498
Sean Hefty2aec5c62007-06-18 11:03:58 -07001499 query->sa_query.port = port;
1500 ret = alloc_mad(&query->sa_query, gfp_mask);
1501 if (ret)
Sean Hefty34816ad2005-10-25 10:51:39 -07001502 goto err1;
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001503
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001504 ib_sa_client_get(client);
1505 query->sa_query.client = client;
1506 query->callback = callback;
1507 query->context = context;
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001508
Sean Hefty34816ad2005-10-25 10:51:39 -07001509 mad = query->sa_query.mad_buf->mad;
1510 init_mad(mad, agent);
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001511
Sean Hefty34816ad2005-10-25 10:51:39 -07001512 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
1513 query->sa_query.release = ib_sa_service_rec_release;
Sean Hefty34816ad2005-10-25 10:51:39 -07001514 mad->mad_hdr.method = method;
1515 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
1516 mad->sa_hdr.comp_mask = comp_mask;
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001517
1518 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
Sean Hefty34816ad2005-10-25 10:51:39 -07001519 rec, mad->data);
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001520
1521 *sa_query = &query->sa_query;
1522
Michael S. Tsirkine322fed2006-07-14 00:23:56 -07001523 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
Sean Hefty34816ad2005-10-25 10:51:39 -07001524 if (ret < 0)
1525 goto err2;
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001526
1527 return ret;
Sean Hefty34816ad2005-10-25 10:51:39 -07001528
1529err2:
1530 *sa_query = NULL;
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001531 ib_sa_client_put(query->sa_query.client);
Sean Hefty2aec5c62007-06-18 11:03:58 -07001532 free_mad(&query->sa_query);
Sean Hefty34816ad2005-10-25 10:51:39 -07001533
1534err1:
1535 kfree(query);
1536 return ret;
Hal Rosenstockcbae32c2005-07-27 11:45:38 -07001537}
1538EXPORT_SYMBOL(ib_sa_service_rec_query);
1539
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
1541 int status,
1542 struct ib_sa_mad *mad)
1543{
1544 struct ib_sa_mcmember_query *query =
1545 container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
1546
1547 if (mad) {
1548 struct ib_sa_mcmember_rec rec;
1549
1550 ib_unpack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
1551 mad->data, &rec);
1552 query->callback(status, &rec, query->context);
1553 } else
1554 query->callback(status, NULL, query->context);
1555}
1556
1557static void ib_sa_mcmember_rec_release(struct ib_sa_query *sa_query)
1558{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 kfree(container_of(sa_query, struct ib_sa_mcmember_query, sa_query));
1560}
1561
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001562int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
1563 struct ib_device *device, u8 port_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 u8 method,
1565 struct ib_sa_mcmember_rec *rec,
1566 ib_sa_comp_mask comp_mask,
Al Virodd0fc662005-10-07 07:46:04 +01001567 int timeout_ms, gfp_t gfp_mask,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 void (*callback)(int status,
1569 struct ib_sa_mcmember_rec *resp,
1570 void *context),
1571 void *context,
1572 struct ib_sa_query **sa_query)
1573{
1574 struct ib_sa_mcmember_query *query;
1575 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
Roland Dreier56c202d2005-10-13 10:45:02 -07001576 struct ib_sa_port *port;
1577 struct ib_mad_agent *agent;
Sean Hefty34816ad2005-10-25 10:51:39 -07001578 struct ib_sa_mad *mad;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 int ret;
1580
Roland Dreier56c202d2005-10-13 10:45:02 -07001581 if (!sa_dev)
1582 return -ENODEV;
1583
1584 port = &sa_dev->port[port_num - sa_dev->start_port];
1585 agent = port->agent;
1586
Kaike Wan5d265772015-08-14 08:52:08 -04001587 query = kzalloc(sizeof(*query), gfp_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 if (!query)
1589 return -ENOMEM;
Sean Hefty34816ad2005-10-25 10:51:39 -07001590
Sean Hefty2aec5c62007-06-18 11:03:58 -07001591 query->sa_query.port = port;
1592 ret = alloc_mad(&query->sa_query, gfp_mask);
1593 if (ret)
Sean Hefty34816ad2005-10-25 10:51:39 -07001594 goto err1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001596 ib_sa_client_get(client);
1597 query->sa_query.client = client;
1598 query->callback = callback;
1599 query->context = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600
Sean Hefty34816ad2005-10-25 10:51:39 -07001601 mad = query->sa_query.mad_buf->mad;
1602 init_mad(mad, agent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603
Sean Hefty34816ad2005-10-25 10:51:39 -07001604 query->sa_query.callback = callback ? ib_sa_mcmember_rec_callback : NULL;
1605 query->sa_query.release = ib_sa_mcmember_rec_release;
Sean Hefty34816ad2005-10-25 10:51:39 -07001606 mad->mad_hdr.method = method;
1607 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC);
1608 mad->sa_hdr.comp_mask = comp_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
1610 ib_pack(mcmember_rec_table, ARRAY_SIZE(mcmember_rec_table),
Sean Hefty34816ad2005-10-25 10:51:39 -07001611 rec, mad->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
1613 *sa_query = &query->sa_query;
Roland Dreierdae4c1d2005-06-27 14:36:46 -07001614
Michael S. Tsirkine322fed2006-07-14 00:23:56 -07001615 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
Sean Hefty34816ad2005-10-25 10:51:39 -07001616 if (ret < 0)
1617 goto err2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618
Roland Dreierdae4c1d2005-06-27 14:36:46 -07001619 return ret;
Sean Hefty34816ad2005-10-25 10:51:39 -07001620
1621err2:
1622 *sa_query = NULL;
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001623 ib_sa_client_put(query->sa_query.client);
Sean Hefty2aec5c62007-06-18 11:03:58 -07001624 free_mad(&query->sa_query);
Sean Hefty34816ad2005-10-25 10:51:39 -07001625
1626err1:
1627 kfree(query);
1628 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
Erez Shitritaeab97e2012-06-19 11:21:38 +03001631/* Support GuidInfoRecord */
1632static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
1633 int status,
1634 struct ib_sa_mad *mad)
1635{
1636 struct ib_sa_guidinfo_query *query =
1637 container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
1638
1639 if (mad) {
1640 struct ib_sa_guidinfo_rec rec;
1641
1642 ib_unpack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table),
1643 mad->data, &rec);
1644 query->callback(status, &rec, query->context);
1645 } else
1646 query->callback(status, NULL, query->context);
1647}
1648
1649static void ib_sa_guidinfo_rec_release(struct ib_sa_query *sa_query)
1650{
1651 kfree(container_of(sa_query, struct ib_sa_guidinfo_query, sa_query));
1652}
1653
1654int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
1655 struct ib_device *device, u8 port_num,
1656 struct ib_sa_guidinfo_rec *rec,
1657 ib_sa_comp_mask comp_mask, u8 method,
1658 int timeout_ms, gfp_t gfp_mask,
1659 void (*callback)(int status,
1660 struct ib_sa_guidinfo_rec *resp,
1661 void *context),
1662 void *context,
1663 struct ib_sa_query **sa_query)
1664{
1665 struct ib_sa_guidinfo_query *query;
1666 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1667 struct ib_sa_port *port;
1668 struct ib_mad_agent *agent;
1669 struct ib_sa_mad *mad;
1670 int ret;
1671
1672 if (!sa_dev)
1673 return -ENODEV;
1674
1675 if (method != IB_MGMT_METHOD_GET &&
1676 method != IB_MGMT_METHOD_SET &&
1677 method != IB_SA_METHOD_DELETE) {
1678 return -EINVAL;
1679 }
1680
1681 port = &sa_dev->port[port_num - sa_dev->start_port];
1682 agent = port->agent;
1683
Kaike Wan5d265772015-08-14 08:52:08 -04001684 query = kzalloc(sizeof(*query), gfp_mask);
Erez Shitritaeab97e2012-06-19 11:21:38 +03001685 if (!query)
1686 return -ENOMEM;
1687
1688 query->sa_query.port = port;
1689 ret = alloc_mad(&query->sa_query, gfp_mask);
1690 if (ret)
1691 goto err1;
1692
1693 ib_sa_client_get(client);
1694 query->sa_query.client = client;
1695 query->callback = callback;
1696 query->context = context;
1697
1698 mad = query->sa_query.mad_buf->mad;
1699 init_mad(mad, agent);
1700
1701 query->sa_query.callback = callback ? ib_sa_guidinfo_rec_callback : NULL;
1702 query->sa_query.release = ib_sa_guidinfo_rec_release;
1703
1704 mad->mad_hdr.method = method;
1705 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_GUID_INFO_REC);
1706 mad->sa_hdr.comp_mask = comp_mask;
1707
1708 ib_pack(guidinfo_rec_table, ARRAY_SIZE(guidinfo_rec_table), rec,
1709 mad->data);
1710
1711 *sa_query = &query->sa_query;
1712
1713 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1714 if (ret < 0)
1715 goto err2;
1716
1717 return ret;
1718
1719err2:
1720 *sa_query = NULL;
1721 ib_sa_client_put(query->sa_query.client);
1722 free_mad(&query->sa_query);
1723
1724err1:
1725 kfree(query);
1726 return ret;
1727}
1728EXPORT_SYMBOL(ib_sa_guid_info_rec_query);
1729
Erez Shitrit628e6f72016-05-25 22:02:05 +03001730/* Support get SA ClassPortInfo */
1731static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
1732 int status,
1733 struct ib_sa_mad *mad)
1734{
Alex Vesker3d3fd742016-07-06 16:36:34 +03001735 unsigned long flags;
Erez Shitrit628e6f72016-05-25 22:02:05 +03001736 struct ib_sa_classport_info_query *query =
1737 container_of(sa_query, struct ib_sa_classport_info_query, sa_query);
1738
1739 if (mad) {
1740 struct ib_class_port_info rec;
1741
1742 ib_unpack(classport_info_rec_table,
1743 ARRAY_SIZE(classport_info_rec_table),
1744 mad->data, &rec);
Alex Vesker3d3fd742016-07-06 16:36:34 +03001745
1746 spin_lock_irqsave(&sa_query->port->classport_lock, flags);
1747 if (!status && !sa_query->port->classport_info.valid) {
1748 memcpy(&sa_query->port->classport_info.data, &rec,
1749 sizeof(sa_query->port->classport_info.data));
1750
1751 sa_query->port->classport_info.valid = true;
1752 }
1753 spin_unlock_irqrestore(&sa_query->port->classport_lock, flags);
1754
Erez Shitrit628e6f72016-05-25 22:02:05 +03001755 query->callback(status, &rec, query->context);
1756 } else {
1757 query->callback(status, NULL, query->context);
1758 }
1759}
1760
1761static void ib_sa_portclass_info_rec_release(struct ib_sa_query *sa_query)
1762{
1763 kfree(container_of(sa_query, struct ib_sa_classport_info_query,
1764 sa_query));
1765}
1766
1767int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
1768 struct ib_device *device, u8 port_num,
1769 int timeout_ms, gfp_t gfp_mask,
1770 void (*callback)(int status,
1771 struct ib_class_port_info *resp,
1772 void *context),
1773 void *context,
1774 struct ib_sa_query **sa_query)
1775{
1776 struct ib_sa_classport_info_query *query;
1777 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
1778 struct ib_sa_port *port;
1779 struct ib_mad_agent *agent;
1780 struct ib_sa_mad *mad;
Alex Vesker3d3fd742016-07-06 16:36:34 +03001781 struct ib_class_port_info cached_class_port_info;
Erez Shitrit628e6f72016-05-25 22:02:05 +03001782 int ret;
Alex Vesker3d3fd742016-07-06 16:36:34 +03001783 unsigned long flags;
Erez Shitrit628e6f72016-05-25 22:02:05 +03001784
1785 if (!sa_dev)
1786 return -ENODEV;
1787
1788 port = &sa_dev->port[port_num - sa_dev->start_port];
1789 agent = port->agent;
1790
Alex Vesker3d3fd742016-07-06 16:36:34 +03001791 /* Use cached ClassPortInfo attribute if valid instead of sending mad */
1792 spin_lock_irqsave(&port->classport_lock, flags);
1793 if (port->classport_info.valid && callback) {
1794 memcpy(&cached_class_port_info, &port->classport_info.data,
1795 sizeof(cached_class_port_info));
1796 spin_unlock_irqrestore(&port->classport_lock, flags);
1797 callback(0, &cached_class_port_info, context);
1798 return 0;
1799 }
1800 spin_unlock_irqrestore(&port->classport_lock, flags);
1801
Erez Shitrit628e6f72016-05-25 22:02:05 +03001802 query = kzalloc(sizeof(*query), gfp_mask);
1803 if (!query)
1804 return -ENOMEM;
1805
1806 query->sa_query.port = port;
1807 ret = alloc_mad(&query->sa_query, gfp_mask);
1808 if (ret)
1809 goto err1;
1810
1811 ib_sa_client_get(client);
1812 query->sa_query.client = client;
1813 query->callback = callback;
1814 query->context = context;
1815
1816 mad = query->sa_query.mad_buf->mad;
1817 init_mad(mad, agent);
1818
1819 query->sa_query.callback = callback ? ib_sa_classport_info_rec_callback : NULL;
1820
1821 query->sa_query.release = ib_sa_portclass_info_rec_release;
1822 /* support GET only */
1823 mad->mad_hdr.method = IB_MGMT_METHOD_GET;
1824 mad->mad_hdr.attr_id = cpu_to_be16(IB_SA_ATTR_CLASS_PORTINFO);
1825 mad->sa_hdr.comp_mask = 0;
1826 *sa_query = &query->sa_query;
1827
1828 ret = send_mad(&query->sa_query, timeout_ms, gfp_mask);
1829 if (ret < 0)
1830 goto err2;
1831
1832 return ret;
1833
1834err2:
1835 *sa_query = NULL;
1836 ib_sa_client_put(query->sa_query.client);
1837 free_mad(&query->sa_query);
1838
1839err1:
1840 kfree(query);
1841 return ret;
1842}
1843EXPORT_SYMBOL(ib_sa_classport_info_rec_query);
1844
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845static void send_handler(struct ib_mad_agent *agent,
1846 struct ib_mad_send_wc *mad_send_wc)
1847{
Sean Hefty34816ad2005-10-25 10:51:39 -07001848 struct ib_sa_query *query = mad_send_wc->send_buf->context[0];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849 unsigned long flags;
1850
Roland Dreiere4f50f02005-05-25 12:31:29 -07001851 if (query->callback)
1852 switch (mad_send_wc->status) {
1853 case IB_WC_SUCCESS:
1854 /* No callback -- already got recv */
1855 break;
1856 case IB_WC_RESP_TIMEOUT_ERR:
1857 query->callback(query, -ETIMEDOUT, NULL);
1858 break;
1859 case IB_WC_WR_FLUSH_ERR:
1860 query->callback(query, -EINTR, NULL);
1861 break;
1862 default:
1863 query->callback(query, -EIO, NULL);
1864 break;
1865 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 spin_lock_irqsave(&idr_lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -07001868 idr_remove(&query_idr, query->id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 spin_unlock_irqrestore(&idr_lock, flags);
Sean Hefty34816ad2005-10-25 10:51:39 -07001870
Sean Hefty2aec5c62007-06-18 11:03:58 -07001871 free_mad(query);
Michael S. Tsirkinc1a0b232006-08-21 16:40:12 -07001872 ib_sa_client_put(query->client);
Sean Hefty34816ad2005-10-25 10:51:39 -07001873 query->release(query);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874}
1875
1876static void recv_handler(struct ib_mad_agent *mad_agent,
Christoph Hellwigca281262016-01-04 14:15:58 +01001877 struct ib_mad_send_buf *send_buf,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 struct ib_mad_recv_wc *mad_recv_wc)
1879{
1880 struct ib_sa_query *query;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
Christoph Hellwigca281262016-01-04 14:15:58 +01001882 if (!send_buf)
1883 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
Christoph Hellwigca281262016-01-04 14:15:58 +01001885 query = send_buf->context[0];
Sean Hefty34816ad2005-10-25 10:51:39 -07001886 if (query->callback) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
1888 query->callback(query,
1889 mad_recv_wc->recv_buf.mad->mad_hdr.status ?
1890 -EINVAL : 0,
1891 (struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
1892 else
1893 query->callback(query, -EIO, NULL);
1894 }
1895
1896 ib_free_recv_mad(mad_recv_wc);
1897}
1898
1899static void ib_sa_add_one(struct ib_device *device)
1900{
1901 struct ib_sa_device *sa_dev;
1902 int s, e, i;
Michael Wang08e36812015-05-05 14:50:22 +02001903 int count = 0;
Tom Tucker07ebafb2006-08-03 16:02:42 -05001904
Hal Rosenstock41390322015-06-29 09:57:00 -04001905 s = rdma_start_port(device);
1906 e = rdma_end_port(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907
Eli Cohenfac70d52010-09-27 17:51:11 -07001908 sa_dev = kzalloc(sizeof *sa_dev +
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 (e - s + 1) * sizeof (struct ib_sa_port),
1910 GFP_KERNEL);
1911 if (!sa_dev)
1912 return;
1913
1914 sa_dev->start_port = s;
1915 sa_dev->end_port = e;
1916
1917 for (i = 0; i <= e - s; ++i) {
Eli Cohenfac70d52010-09-27 17:51:11 -07001918 spin_lock_init(&sa_dev->port[i].ah_lock);
Michael Wangfe53ba22015-05-05 14:50:36 +02001919 if (!rdma_cap_ib_sa(device, i + 1))
Eli Cohenfac70d52010-09-27 17:51:11 -07001920 continue;
1921
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 sa_dev->port[i].sm_ah = NULL;
1923 sa_dev->port[i].port_num = i + s;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
Alex Vesker3d3fd742016-07-06 16:36:34 +03001925 spin_lock_init(&sa_dev->port[i].classport_lock);
1926 sa_dev->port[i].classport_info.valid = false;
1927
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 sa_dev->port[i].agent =
1929 ib_register_mad_agent(device, i + s, IB_QPT_GSI,
1930 NULL, 0, send_handler,
Ira Weiny0f29b462014-08-08 19:00:55 -04001931 recv_handler, sa_dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 if (IS_ERR(sa_dev->port[i].agent))
1933 goto err;
1934
David Howellsc4028952006-11-22 14:57:56 +00001935 INIT_WORK(&sa_dev->port[i].update_task, update_sm_ah);
Michael Wang08e36812015-05-05 14:50:22 +02001936
1937 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 }
1939
Michael Wang08e36812015-05-05 14:50:22 +02001940 if (!count)
1941 goto free;
1942
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 ib_set_client_data(device, &sa_client, sa_dev);
1944
1945 /*
1946 * We register our event handler after everything is set up,
1947 * and then update our cached info after the event handler is
1948 * registered to avoid any problems if a port changes state
1949 * during our initialization.
1950 */
1951
1952 INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event);
1953 if (ib_register_event_handler(&sa_dev->event_handler))
1954 goto err;
1955
Michael Wang08e36812015-05-05 14:50:22 +02001956 for (i = 0; i <= e - s; ++i) {
Michael Wangfe53ba22015-05-05 14:50:36 +02001957 if (rdma_cap_ib_sa(device, i + 1))
Eli Cohenfac70d52010-09-27 17:51:11 -07001958 update_sm_ah(&sa_dev->port[i].update_task);
Michael Wang08e36812015-05-05 14:50:22 +02001959 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961 return;
1962
1963err:
Michael Wang08e36812015-05-05 14:50:22 +02001964 while (--i >= 0) {
Michael Wangfe53ba22015-05-05 14:50:36 +02001965 if (rdma_cap_ib_sa(device, i + 1))
Eli Cohenfac70d52010-09-27 17:51:11 -07001966 ib_unregister_mad_agent(sa_dev->port[i].agent);
Michael Wang08e36812015-05-05 14:50:22 +02001967 }
1968free:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 kfree(sa_dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 return;
1971}
1972
Haggai Eran7c1eb452015-07-30 17:50:14 +03001973static void ib_sa_remove_one(struct ib_device *device, void *client_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974{
Haggai Eran7c1eb452015-07-30 17:50:14 +03001975 struct ib_sa_device *sa_dev = client_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 int i;
1977
1978 if (!sa_dev)
1979 return;
1980
1981 ib_unregister_event_handler(&sa_dev->event_handler);
1982
Tejun Heo96e61fa2011-01-24 11:06:54 +00001983 flush_workqueue(ib_wq);
Michael S. Tsirkin0f47ae02006-01-17 09:53:51 -08001984
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
Michael Wangfe53ba22015-05-05 14:50:36 +02001986 if (rdma_cap_ib_sa(device, i + 1)) {
Eli Cohenfac70d52010-09-27 17:51:11 -07001987 ib_unregister_mad_agent(sa_dev->port[i].agent);
1988 if (sa_dev->port[i].sm_ah)
1989 kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
1990 }
1991
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 }
1993
1994 kfree(sa_dev);
1995}
1996
Mark Blochc2e49c92016-05-19 17:12:33 +03001997int ib_sa_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998{
1999 int ret;
2000
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 get_random_bytes(&tid, sizeof tid);
2002
Kaike Wan2ca546b2015-08-14 08:52:09 -04002003 atomic_set(&ib_nl_sa_request_seq, 0);
2004
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 ret = ib_register_client(&sa_client);
Sean Heftyfaec2f72007-02-15 17:00:17 -08002006 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +05302007 pr_err("Couldn't register ib_sa client\n");
Sean Heftyfaec2f72007-02-15 17:00:17 -08002008 goto err1;
2009 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
Sean Heftyfaec2f72007-02-15 17:00:17 -08002011 ret = mcast_init();
2012 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +05302013 pr_err("Couldn't initialize multicast handling\n");
Sean Heftyfaec2f72007-02-15 17:00:17 -08002014 goto err2;
2015 }
2016
Bhaktipriya Shridhar4534d852016-08-15 23:27:46 +05302017 ib_nl_wq = alloc_ordered_workqueue("ib_nl_sa_wq", WQ_MEM_RECLAIM);
Kaike Wan2ca546b2015-08-14 08:52:09 -04002018 if (!ib_nl_wq) {
2019 ret = -ENOMEM;
2020 goto err3;
2021 }
2022
Kaike Wan2ca546b2015-08-14 08:52:09 -04002023 INIT_DELAYED_WORK(&ib_nl_timed_work, ib_nl_request_timeout);
2024
Sean Heftyfaec2f72007-02-15 17:00:17 -08002025 return 0;
Mark Bloch735c6312016-05-19 17:12:35 +03002026
Kaike Wan2ca546b2015-08-14 08:52:09 -04002027err3:
2028 mcast_cleanup();
Sean Heftyfaec2f72007-02-15 17:00:17 -08002029err2:
2030 ib_unregister_client(&sa_client);
2031err1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 return ret;
2033}
2034
Mark Blochc2e49c92016-05-19 17:12:33 +03002035void ib_sa_cleanup(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036{
Kaike Wan2ca546b2015-08-14 08:52:09 -04002037 cancel_delayed_work(&ib_nl_timed_work);
2038 flush_workqueue(ib_nl_wq);
2039 destroy_workqueue(ib_nl_wq);
Sean Heftyfaec2f72007-02-15 17:00:17 -08002040 mcast_cleanup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 ib_unregister_client(&sa_client);
Roland Dreier5d7edb32005-10-24 10:53:25 -07002042 idr_destroy(&query_idr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043}