blob: 2826e06311a50e193b9033396ac610c4e808f644 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07003 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 */
33
34#include <linux/module.h>
35#include <linux/string.h>
36#include <linux/errno.h>
Ahmed S. Darwish9a6b0902007-02-06 18:07:25 +020037#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/slab.h>
39#include <linux/init.h>
Ingo Molnar95ed6442006-01-13 14:51:39 -080040#include <linux/mutex.h>
Yotam Kenneth9268f722015-07-30 17:50:15 +030041#include <linux/netdevice.h>
Daniel Jurgens8f408ab2017-05-19 15:48:53 +030042#include <linux/security.h>
43#include <linux/notifier.h>
Roland Dreierb2cbae22011-05-20 11:46:11 -070044#include <rdma/rdma_netlink.h>
Matan Barak03db3a22015-07-30 18:33:26 +030045#include <rdma/ib_addr.h>
46#include <rdma/ib_cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48#include "core_priv.h"
49
50MODULE_AUTHOR("Roland Dreier");
51MODULE_DESCRIPTION("core kernel InfiniBand API");
52MODULE_LICENSE("Dual BSD/GPL");
53
54struct ib_client_data {
55 struct list_head list;
56 struct ib_client *client;
57 void * data;
Haggai Eran7c1eb452015-07-30 17:50:14 +030058 /* The device or client is going down. Do not call client or device
59 * callbacks other than remove(). */
60 bool going_down;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061};
62
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080063struct workqueue_struct *ib_comp_wq;
Tejun Heof0626712010-10-19 15:24:36 +000064struct workqueue_struct *ib_wq;
65EXPORT_SYMBOL_GPL(ib_wq);
66
Haggai Eran5aa44bb2015-07-30 17:50:13 +030067/* The device_list and client_list contain devices and clients after their
68 * registration has completed, and the devices and clients are removed
69 * during unregistration. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070070static LIST_HEAD(device_list);
71static LIST_HEAD(client_list);
72
73/*
Haggai Eran5aa44bb2015-07-30 17:50:13 +030074 * device_mutex and lists_rwsem protect access to both device_list and
75 * client_list. device_mutex protects writer access by device and client
76 * registration / de-registration. lists_rwsem protects reader access to
77 * these lists. Iterators of these lists must lock it for read, while updates
78 * to the lists must be done with a write lock. A special case is when the
79 * device_mutex is locked. In this case locking the lists for read access is
80 * not necessary as the device_mutex implies it.
Haggai Eran7c1eb452015-07-30 17:50:14 +030081 *
82 * lists_rwsem also protects access to the client data list.
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 */
Ingo Molnar95ed6442006-01-13 14:51:39 -080084static DEFINE_MUTEX(device_mutex);
Haggai Eran5aa44bb2015-07-30 17:50:13 +030085static DECLARE_RWSEM(lists_rwsem);
86
Daniel Jurgens8f408ab2017-05-19 15:48:53 +030087static int ib_security_change(struct notifier_block *nb, unsigned long event,
88 void *lsm_data);
89static void ib_policy_change_task(struct work_struct *work);
90static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
91
92static struct notifier_block ibdev_lsm_nb = {
93 .notifier_call = ib_security_change,
94};
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96static int ib_device_check_mandatory(struct ib_device *device)
97{
98#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
99 static const struct {
100 size_t offset;
101 char *name;
102 } mandatory_table[] = {
103 IB_MANDATORY_FUNC(query_device),
104 IB_MANDATORY_FUNC(query_port),
105 IB_MANDATORY_FUNC(query_pkey),
106 IB_MANDATORY_FUNC(query_gid),
107 IB_MANDATORY_FUNC(alloc_pd),
108 IB_MANDATORY_FUNC(dealloc_pd),
109 IB_MANDATORY_FUNC(create_ah),
110 IB_MANDATORY_FUNC(destroy_ah),
111 IB_MANDATORY_FUNC(create_qp),
112 IB_MANDATORY_FUNC(modify_qp),
113 IB_MANDATORY_FUNC(destroy_qp),
114 IB_MANDATORY_FUNC(post_send),
115 IB_MANDATORY_FUNC(post_recv),
116 IB_MANDATORY_FUNC(create_cq),
117 IB_MANDATORY_FUNC(destroy_cq),
118 IB_MANDATORY_FUNC(poll_cq),
119 IB_MANDATORY_FUNC(req_notify_cq),
120 IB_MANDATORY_FUNC(get_dma_mr),
Ira Weiny77386132015-05-13 20:02:58 -0400121 IB_MANDATORY_FUNC(dereg_mr),
122 IB_MANDATORY_FUNC(get_port_immutable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 };
124 int i;
125
Ahmed S. Darwish9a6b0902007-02-06 18:07:25 +0200126 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530128 pr_warn("Device %s is missing mandatory function %s\n",
129 device->name, mandatory_table[i].name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 return -EINVAL;
131 }
132 }
133
134 return 0;
135}
136
Leon Romanovskyf8978bd2018-01-01 13:07:15 +0200137static struct ib_device *__ib_device_get_by_index(u32 index)
Leon Romanovskyecc82c52017-06-18 14:39:59 +0300138{
139 struct ib_device *device;
140
141 list_for_each_entry(device, &device_list, core_list)
142 if (device->index == index)
143 return device;
144
145 return NULL;
146}
147
Leon Romanovskyf8978bd2018-01-01 13:07:15 +0200148/*
149 * Caller is responsible to return refrerence count by calling put_device()
150 */
151struct ib_device *ib_device_get_by_index(u32 index)
152{
153 struct ib_device *device;
154
155 down_read(&lists_rwsem);
156 device = __ib_device_get_by_index(index);
157 if (device)
158 get_device(&device->dev);
159
160 up_read(&lists_rwsem);
161 return device;
162}
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164static struct ib_device *__ib_device_get_by_name(const char *name)
165{
166 struct ib_device *device;
167
168 list_for_each_entry(device, &device_list, core_list)
169 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
170 return device;
171
172 return NULL;
173}
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175static int alloc_name(char *name)
176{
Roland Dreier65d470b2007-10-09 19:59:04 -0700177 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 char buf[IB_DEVICE_NAME_MAX];
179 struct ib_device *device;
180 int i;
181
Roland Dreier65d470b2007-10-09 19:59:04 -0700182 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 if (!inuse)
184 return -ENOMEM;
185
186 list_for_each_entry(device, &device_list, core_list) {
187 if (!sscanf(device->name, name, &i))
188 continue;
189 if (i < 0 || i >= PAGE_SIZE * 8)
190 continue;
191 snprintf(buf, sizeof buf, name, i);
192 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
193 set_bit(i, inuse);
194 }
195
196 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
197 free_page((unsigned long) inuse);
198 snprintf(buf, sizeof buf, name, i);
199
200 if (__ib_device_get_by_name(buf))
201 return -ENFILE;
202
203 strlcpy(name, buf, IB_DEVICE_NAME_MAX);
204 return 0;
205}
206
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600207static void ib_device_release(struct device *device)
208{
209 struct ib_device *dev = container_of(device, struct ib_device, dev);
210
Parav Pandit4be3a4f2017-03-19 10:55:55 +0200211 WARN_ON(dev->reg_state == IB_DEV_REGISTERED);
212 if (dev->reg_state == IB_DEV_UNREGISTERED) {
213 /*
214 * In IB_DEV_UNINITIALIZED state, cache or port table
215 * is not even created. Free cache and port table only when
216 * device reaches UNREGISTERED state.
217 */
218 ib_cache_release_one(dev);
219 kfree(dev->port_immutable);
220 }
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600221 kfree(dev);
222}
223
224static int ib_device_uevent(struct device *device,
225 struct kobj_uevent_env *env)
226{
227 struct ib_device *dev = container_of(device, struct ib_device, dev);
228
229 if (add_uevent_var(env, "NAME=%s", dev->name))
230 return -ENOMEM;
231
232 /*
233 * It would be nice to pass the node GUID with the event...
234 */
235
236 return 0;
237}
238
239static struct class ib_class = {
240 .name = "infiniband",
241 .dev_release = ib_device_release,
242 .dev_uevent = ib_device_uevent,
243};
244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245/**
246 * ib_alloc_device - allocate an IB device struct
247 * @size:size of structure to allocate
248 *
249 * Low-level drivers should use ib_alloc_device() to allocate &struct
250 * ib_device. @size is the size of the structure to be allocated,
251 * including any private data used by the low-level driver.
252 * ib_dealloc_device() must be used to free structures allocated with
253 * ib_alloc_device().
254 */
255struct ib_device *ib_alloc_device(size_t size)
256{
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600257 struct ib_device *device;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600259 if (WARN_ON(size < sizeof(struct ib_device)))
260 return NULL;
261
262 device = kzalloc(size, GFP_KERNEL);
263 if (!device)
264 return NULL;
265
266 device->dev.class = &ib_class;
267 device_initialize(&device->dev);
268
269 dev_set_drvdata(&device->dev, device);
270
271 INIT_LIST_HEAD(&device->event_handler_list);
272 spin_lock_init(&device->event_handler_lock);
273 spin_lock_init(&device->client_data_lock);
274 INIT_LIST_HEAD(&device->client_data_list);
275 INIT_LIST_HEAD(&device->port_list);
276
277 return device;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
279EXPORT_SYMBOL(ib_alloc_device);
280
281/**
282 * ib_dealloc_device - free an IB device struct
283 * @device:structure to free
284 *
285 * Free a structure allocated with ib_alloc_device().
286 */
287void ib_dealloc_device(struct ib_device *device)
288{
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600289 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
290 device->reg_state != IB_DEV_UNINITIALIZED);
Leon Romanovsky924b8902018-01-01 13:07:13 +0200291 put_device(&device->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293EXPORT_SYMBOL(ib_dealloc_device);
294
295static int add_client_context(struct ib_device *device, struct ib_client *client)
296{
297 struct ib_client_data *context;
298 unsigned long flags;
299
300 context = kmalloc(sizeof *context, GFP_KERNEL);
Leon Romanovskya0b34552016-11-03 16:44:10 +0200301 if (!context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
304 context->client = client;
305 context->data = NULL;
Haggai Eran7c1eb452015-07-30 17:50:14 +0300306 context->going_down = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Haggai Eran7c1eb452015-07-30 17:50:14 +0300308 down_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 spin_lock_irqsave(&device->client_data_lock, flags);
310 list_add(&context->list, &device->client_data_list);
311 spin_unlock_irqrestore(&device->client_data_lock, flags);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300312 up_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314 return 0;
315}
316
Ira Weiny337877a2015-06-06 14:38:29 -0400317static int verify_immutable(const struct ib_device *dev, u8 port)
318{
319 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
320 rdma_max_mad_size(dev, port) != 0);
321}
322
Ira Weiny77386132015-05-13 20:02:58 -0400323static int read_port_immutable(struct ib_device *device)
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300324{
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600325 int ret;
Ira Weiny77386132015-05-13 20:02:58 -0400326 u8 start_port = rdma_start_port(device);
327 u8 end_port = rdma_end_port(device);
328 u8 port;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300329
Ira Weiny77386132015-05-13 20:02:58 -0400330 /**
331 * device->port_immutable is indexed directly by the port number to make
332 * access to this data as efficient as possible.
333 *
334 * Therefore port_immutable is declared as a 1 based array with
335 * potential empty slots at the beginning.
336 */
337 device->port_immutable = kzalloc(sizeof(*device->port_immutable)
338 * (end_port + 1),
339 GFP_KERNEL);
340 if (!device->port_immutable)
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600341 return -ENOMEM;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300342
Ira Weiny77386132015-05-13 20:02:58 -0400343 for (port = start_port; port <= end_port; ++port) {
344 ret = device->get_port_immutable(device, port,
345 &device->port_immutable[port]);
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300346 if (ret)
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600347 return ret;
Ira Weiny337877a2015-06-06 14:38:29 -0400348
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600349 if (verify_immutable(device, port))
350 return -EINVAL;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300351 }
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600352 return 0;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300353}
354
Leon Romanovsky9abb0d12017-06-27 16:49:53 +0300355void ib_get_device_fw_str(struct ib_device *dev, char *str)
Ira Weiny5fa76c22016-06-15 02:21:56 -0400356{
357 if (dev->get_dev_fw_str)
Leon Romanovsky9abb0d12017-06-27 16:49:53 +0300358 dev->get_dev_fw_str(dev, str);
Ira Weiny5fa76c22016-06-15 02:21:56 -0400359 else
360 str[0] = '\0';
361}
362EXPORT_SYMBOL(ib_get_device_fw_str);
363
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300364static int setup_port_pkey_list(struct ib_device *device)
365{
366 int i;
367
368 /**
369 * device->port_pkey_list is indexed directly by the port number,
370 * Therefore it is declared as a 1 based array with potential empty
371 * slots at the beginning.
372 */
373 device->port_pkey_list = kcalloc(rdma_end_port(device) + 1,
374 sizeof(*device->port_pkey_list),
375 GFP_KERNEL);
376
377 if (!device->port_pkey_list)
378 return -ENOMEM;
379
380 for (i = 0; i < (rdma_end_port(device) + 1); i++) {
381 spin_lock_init(&device->port_pkey_list[i].list_lock);
382 INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list);
383 }
384
385 return 0;
386}
387
Daniel Jurgens8f408ab2017-05-19 15:48:53 +0300388static void ib_policy_change_task(struct work_struct *work)
389{
390 struct ib_device *dev;
391
392 down_read(&lists_rwsem);
393 list_for_each_entry(dev, &device_list, core_list) {
394 int i;
395
396 for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) {
397 u64 sp;
398 int ret = ib_get_cached_subnet_prefix(dev,
399 i,
400 &sp);
401
402 WARN_ONCE(ret,
403 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
404 ret);
Daniel Jurgensa750cfd2017-07-05 16:15:21 +0300405 if (!ret)
406 ib_security_cache_change(dev, i, sp);
Daniel Jurgens8f408ab2017-05-19 15:48:53 +0300407 }
408 }
409 up_read(&lists_rwsem);
410}
411
412static int ib_security_change(struct notifier_block *nb, unsigned long event,
413 void *lsm_data)
414{
415 if (event != LSM_POLICY_CHANGE)
416 return NOTIFY_DONE;
417
418 schedule_work(&ib_policy_change_work);
419
420 return NOTIFY_OK;
421}
422
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423/**
Leon Romanovskyecc82c52017-06-18 14:39:59 +0300424 * __dev_new_index - allocate an device index
425 *
426 * Returns a suitable unique value for a new device interface
427 * number. It assumes that there are less than 2^32-1 ib devices
428 * will be present in the system.
429 */
430static u32 __dev_new_index(void)
431{
432 /*
433 * The device index to allow stable naming.
434 * Similar to struct net -> ifindex.
435 */
436 static u32 index;
437
438 for (;;) {
439 if (!(++index))
440 index = 1;
441
442 if (!__ib_device_get_by_index(index))
443 return index;
444 }
445}
446
447/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 * ib_register_device - Register an IB device with IB core
449 * @device:Device to register
450 *
451 * Low-level drivers use ib_register_device() to register their
452 * devices with the IB core. All registered clients will receive a
453 * callback for each device that is added. @device must be allocated
454 * with ib_alloc_device().
455 */
Ralph Campbell9a6edb62010-05-06 17:03:25 -0700456int ib_register_device(struct ib_device *device,
457 int (*port_callback)(struct ib_device *,
458 u8, struct kobject *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459{
460 int ret;
Doug Ledfordb8071ad2015-08-15 10:16:14 -0400461 struct ib_client *client;
Ira Weiny3e153a92015-12-18 10:59:44 +0200462 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
Bart Van Assche99db9492017-01-20 13:04:36 -0800463 struct device *parent = device->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Bart Van Assche0957c292017-03-07 22:56:53 +0000465 WARN_ON_ONCE(device->dma_device);
466 if (device->dev.dma_ops) {
467 /*
468 * The caller provided custom DMA operations. Copy the
469 * DMA-related fields that are used by e.g. dma_alloc_coherent()
470 * into device->dev.
471 */
472 device->dma_device = &device->dev;
Bart Van Assche02ee9da2018-01-03 13:28:18 -0800473 if (!device->dev.dma_mask) {
474 if (parent)
475 device->dev.dma_mask = parent->dma_mask;
476 else
477 WARN_ON_ONCE(true);
478 }
479 if (!device->dev.coherent_dma_mask) {
480 if (parent)
481 device->dev.coherent_dma_mask =
482 parent->coherent_dma_mask;
483 else
484 WARN_ON_ONCE(true);
485 }
Bart Van Assche0957c292017-03-07 22:56:53 +0000486 } else {
487 /*
488 * The caller did not provide custom DMA operations. Use the
489 * DMA mapping operations of the parent device.
490 */
Bart Van Assche02ee9da2018-01-03 13:28:18 -0800491 WARN_ON_ONCE(!parent);
Bart Van Assche0957c292017-03-07 22:56:53 +0000492 device->dma_device = parent;
493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
Ingo Molnar95ed6442006-01-13 14:51:39 -0800495 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497 if (strchr(device->name, '%')) {
498 ret = alloc_name(device->name);
499 if (ret)
500 goto out;
501 }
502
503 if (ib_device_check_mandatory(device)) {
504 ret = -EINVAL;
505 goto out;
506 }
507
Ira Weiny77386132015-05-13 20:02:58 -0400508 ret = read_port_immutable(device);
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300509 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530510 pr_warn("Couldn't create per port immutable data %s\n",
511 device->name);
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300512 goto out;
513 }
514
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300515 ret = setup_port_pkey_list(device);
516 if (ret) {
517 pr_warn("Couldn't create per port_pkey_list\n");
518 goto out;
519 }
520
Matan Barak03db3a22015-07-30 18:33:26 +0300521 ret = ib_cache_setup_one(device);
522 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530523 pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
Parav Pandit4be3a4f2017-03-19 10:55:55 +0200524 goto port_cleanup;
Matan Barak03db3a22015-07-30 18:33:26 +0300525 }
526
Parav Pandit43579b52017-01-10 00:02:14 +0000527 ret = ib_device_register_rdmacg(device);
528 if (ret) {
529 pr_warn("Couldn't register device with rdma cgroup\n");
Parav Pandit4be3a4f2017-03-19 10:55:55 +0200530 goto cache_cleanup;
Parav Pandit43579b52017-01-10 00:02:14 +0000531 }
532
Ira Weiny3e153a92015-12-18 10:59:44 +0200533 memset(&device->attrs, 0, sizeof(device->attrs));
534 ret = device->query_device(device, &device->attrs, &uhw);
535 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530536 pr_warn("Couldn't query the device attributes\n");
Parav Pandit4be3a4f2017-03-19 10:55:55 +0200537 goto cache_cleanup;
Ira Weiny3e153a92015-12-18 10:59:44 +0200538 }
539
Ralph Campbell9a6edb62010-05-06 17:03:25 -0700540 ret = ib_device_register_sysfs(device, port_callback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530542 pr_warn("Couldn't register device %s with driver model\n",
543 device->name);
Parav Pandit4be3a4f2017-03-19 10:55:55 +0200544 goto cache_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 }
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 device->reg_state = IB_DEV_REGISTERED;
548
Doug Ledfordb8071ad2015-08-15 10:16:14 -0400549 list_for_each_entry(client, &client_list, list)
Sagi Grimbergb059e212017-07-02 11:20:50 +0300550 if (!add_client_context(device, client) && client->add)
Doug Ledfordb8071ad2015-08-15 10:16:14 -0400551 client->add(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552
Leon Romanovskyecc82c52017-06-18 14:39:59 +0300553 device->index = __dev_new_index();
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300554 down_write(&lists_rwsem);
555 list_add_tail(&device->core_list, &device_list);
556 up_write(&lists_rwsem);
Parav Pandit4be3a4f2017-03-19 10:55:55 +0200557 mutex_unlock(&device_mutex);
558 return 0;
559
560cache_cleanup:
561 ib_cache_cleanup_one(device);
562 ib_cache_release_one(device);
563port_cleanup:
564 kfree(device->port_immutable);
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300565out:
Ingo Molnar95ed6442006-01-13 14:51:39 -0800566 mutex_unlock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 return ret;
568}
569EXPORT_SYMBOL(ib_register_device);
570
571/**
572 * ib_unregister_device - Unregister an IB device
573 * @device:Device to unregister
574 *
575 * Unregister an IB device. All clients will receive a remove callback.
576 */
577void ib_unregister_device(struct ib_device *device)
578{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 struct ib_client_data *context, *tmp;
580 unsigned long flags;
581
Ingo Molnar95ed6442006-01-13 14:51:39 -0800582 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300584 down_write(&lists_rwsem);
585 list_del(&device->core_list);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300586 spin_lock_irqsave(&device->client_data_lock, flags);
587 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
588 context->going_down = true;
589 spin_unlock_irqrestore(&device->client_data_lock, flags);
590 downgrade_write(&lists_rwsem);
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300591
Haggai Eran7c1eb452015-07-30 17:50:14 +0300592 list_for_each_entry_safe(context, tmp, &device->client_data_list,
593 list) {
594 if (context->client->remove)
595 context->client->remove(device, context->data);
596 }
597 up_read(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Parav Pandit43579b52017-01-10 00:02:14 +0000599 ib_device_unregister_rdmacg(device);
Roland Dreier9206dff2009-02-25 13:27:46 -0800600 ib_device_unregister_sysfs(device);
Shiraz Saleem06f81742017-07-17 14:03:50 -0500601
602 mutex_unlock(&device_mutex);
603
Matan Barak03db3a22015-07-30 18:33:26 +0300604 ib_cache_cleanup_one(device);
Roland Dreier9206dff2009-02-25 13:27:46 -0800605
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300606 ib_security_destroy_port_pkey_list(device);
607 kfree(device->port_pkey_list);
608
Haggai Eran7c1eb452015-07-30 17:50:14 +0300609 down_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 spin_lock_irqsave(&device->client_data_lock, flags);
611 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
612 kfree(context);
613 spin_unlock_irqrestore(&device->client_data_lock, flags);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300614 up_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
616 device->reg_state = IB_DEV_UNREGISTERED;
617}
618EXPORT_SYMBOL(ib_unregister_device);
619
620/**
621 * ib_register_client - Register an IB client
622 * @client:Client to register
623 *
624 * Upper level users of the IB drivers can use ib_register_client() to
625 * register callbacks for IB device addition and removal. When an IB
626 * device is added, each registered client's add method will be called
627 * (in the order the clients were registered), and when a device is
628 * removed, each client's remove method will be called (in the reverse
629 * order that clients were registered). In addition, when
630 * ib_register_client() is called, the client will receive an add
631 * callback for all devices already registered.
632 */
633int ib_register_client(struct ib_client *client)
634{
635 struct ib_device *device;
636
Ingo Molnar95ed6442006-01-13 14:51:39 -0800637 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 list_for_each_entry(device, &device_list, core_list)
Sagi Grimbergb059e212017-07-02 11:20:50 +0300640 if (!add_client_context(device, client) && client->add)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 client->add(device);
642
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300643 down_write(&lists_rwsem);
644 list_add_tail(&client->list, &client_list);
645 up_write(&lists_rwsem);
646
Ingo Molnar95ed6442006-01-13 14:51:39 -0800647 mutex_unlock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
649 return 0;
650}
651EXPORT_SYMBOL(ib_register_client);
652
653/**
654 * ib_unregister_client - Unregister an IB client
655 * @client:Client to unregister
656 *
657 * Upper level users use ib_unregister_client() to remove their client
658 * registration. When ib_unregister_client() is called, the client
659 * will receive a remove callback for each IB device still registered.
660 */
661void ib_unregister_client(struct ib_client *client)
662{
663 struct ib_client_data *context, *tmp;
664 struct ib_device *device;
665 unsigned long flags;
666
Ingo Molnar95ed6442006-01-13 14:51:39 -0800667 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300669 down_write(&lists_rwsem);
670 list_del(&client->list);
671 up_write(&lists_rwsem);
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 list_for_each_entry(device, &device_list, core_list) {
Haggai Eran7c1eb452015-07-30 17:50:14 +0300674 struct ib_client_data *found_context = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Haggai Eran7c1eb452015-07-30 17:50:14 +0300676 down_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 spin_lock_irqsave(&device->client_data_lock, flags);
678 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
679 if (context->client == client) {
Haggai Eran7c1eb452015-07-30 17:50:14 +0300680 context->going_down = true;
681 found_context = context;
682 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 }
684 spin_unlock_irqrestore(&device->client_data_lock, flags);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300685 up_write(&lists_rwsem);
686
687 if (client->remove)
688 client->remove(device, found_context ?
689 found_context->data : NULL);
690
691 if (!found_context) {
692 pr_warn("No client context found for %s/%s\n",
693 device->name, client->name);
694 continue;
695 }
696
697 down_write(&lists_rwsem);
698 spin_lock_irqsave(&device->client_data_lock, flags);
699 list_del(&found_context->list);
700 kfree(found_context);
701 spin_unlock_irqrestore(&device->client_data_lock, flags);
702 up_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
Ingo Molnar95ed6442006-01-13 14:51:39 -0800705 mutex_unlock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706}
707EXPORT_SYMBOL(ib_unregister_client);
708
709/**
710 * ib_get_client_data - Get IB client context
711 * @device:Device to get context for
712 * @client:Client to get context for
713 *
714 * ib_get_client_data() returns client context set with
715 * ib_set_client_data().
716 */
717void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
718{
719 struct ib_client_data *context;
720 void *ret = NULL;
721 unsigned long flags;
722
723 spin_lock_irqsave(&device->client_data_lock, flags);
724 list_for_each_entry(context, &device->client_data_list, list)
725 if (context->client == client) {
726 ret = context->data;
727 break;
728 }
729 spin_unlock_irqrestore(&device->client_data_lock, flags);
730
731 return ret;
732}
733EXPORT_SYMBOL(ib_get_client_data);
734
735/**
Krishna Kumar9cd330d2006-09-22 15:22:58 -0700736 * ib_set_client_data - Set IB client context
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 * @device:Device to set context for
738 * @client:Client to set context for
739 * @data:Context to set
740 *
741 * ib_set_client_data() sets client context that can be retrieved with
742 * ib_get_client_data().
743 */
744void ib_set_client_data(struct ib_device *device, struct ib_client *client,
745 void *data)
746{
747 struct ib_client_data *context;
748 unsigned long flags;
749
750 spin_lock_irqsave(&device->client_data_lock, flags);
751 list_for_each_entry(context, &device->client_data_list, list)
752 if (context->client == client) {
753 context->data = data;
754 goto out;
755 }
756
Parav Panditaba25a3e2016-03-02 00:50:29 +0530757 pr_warn("No client context found for %s/%s\n",
758 device->name, client->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
760out:
761 spin_unlock_irqrestore(&device->client_data_lock, flags);
762}
763EXPORT_SYMBOL(ib_set_client_data);
764
765/**
766 * ib_register_event_handler - Register an IB event handler
767 * @event_handler:Handler to register
768 *
769 * ib_register_event_handler() registers an event handler that will be
770 * called back when asynchronous IB events occur (as defined in
771 * chapter 11 of the InfiniBand Architecture Specification). This
772 * callback may occur in interrupt context.
773 */
Leon Romanovskydcc98812017-08-17 15:50:36 +0300774void ib_register_event_handler(struct ib_event_handler *event_handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775{
776 unsigned long flags;
777
778 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
779 list_add_tail(&event_handler->list,
780 &event_handler->device->event_handler_list);
781 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782}
783EXPORT_SYMBOL(ib_register_event_handler);
784
785/**
786 * ib_unregister_event_handler - Unregister an event handler
787 * @event_handler:Handler to unregister
788 *
789 * Unregister an event handler registered with
790 * ib_register_event_handler().
791 */
Leon Romanovskydcc98812017-08-17 15:50:36 +0300792void ib_unregister_event_handler(struct ib_event_handler *event_handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793{
794 unsigned long flags;
795
796 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
797 list_del(&event_handler->list);
798 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799}
800EXPORT_SYMBOL(ib_unregister_event_handler);
801
802/**
803 * ib_dispatch_event - Dispatch an asynchronous event
804 * @event:Event to dispatch
805 *
806 * Low-level drivers must call ib_dispatch_event() to dispatch the
807 * event to all registered event handlers when an asynchronous event
808 * occurs.
809 */
810void ib_dispatch_event(struct ib_event *event)
811{
812 unsigned long flags;
813 struct ib_event_handler *handler;
814
815 spin_lock_irqsave(&event->device->event_handler_lock, flags);
816
817 list_for_each_entry(handler, &event->device->event_handler_list, list)
818 handler->handler(handler, event);
819
820 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
821}
822EXPORT_SYMBOL(ib_dispatch_event);
823
824/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 * ib_query_port - Query IB port attributes
826 * @device:Device to query
827 * @port_num:Port number to query
828 * @port_attr:Port attributes
829 *
830 * ib_query_port() returns the attributes of a port through the
831 * @port_attr pointer.
832 */
833int ib_query_port(struct ib_device *device,
834 u8 port_num,
835 struct ib_port_attr *port_attr)
836{
Eli Cohenfad61ad2016-03-11 22:58:36 +0200837 union ib_gid gid;
838 int err;
839
Yuval Shaia24dc8312017-01-25 18:41:37 +0200840 if (!rdma_is_port_valid(device, port_num))
Roland Dreier116c0072005-10-03 09:32:33 -0700841 return -EINVAL;
842
Eli Cohenfad61ad2016-03-11 22:58:36 +0200843 memset(port_attr, 0, sizeof(*port_attr));
844 err = device->query_port(device, port_num, port_attr);
845 if (err || port_attr->subnet_prefix)
846 return err;
847
Eli Cohend7012462016-06-04 15:15:18 +0300848 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
849 return 0;
850
Eli Cohenfad61ad2016-03-11 22:58:36 +0200851 err = ib_query_gid(device, port_num, 0, &gid, NULL);
852 if (err)
853 return err;
854
855 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
856 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857}
858EXPORT_SYMBOL(ib_query_port);
859
860/**
861 * ib_query_gid - Get GID table entry
862 * @device:Device to query
863 * @port_num:Port number to query
864 * @index:GID table index to query
865 * @gid:Returned GID
Matan Barak55ee3ab2015-10-15 18:38:45 +0300866 * @attr: Returned GID attributes related to this GID index (only in RoCE).
867 * NULL means ignore.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 *
869 * ib_query_gid() fetches the specified GID table entry.
870 */
871int ib_query_gid(struct ib_device *device,
Matan Barak55ee3ab2015-10-15 18:38:45 +0300872 u8 port_num, int index, union ib_gid *gid,
873 struct ib_gid_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874{
Matan Barak03db3a22015-07-30 18:33:26 +0300875 if (rdma_cap_roce_gid_table(device, port_num))
Matan Barak55ee3ab2015-10-15 18:38:45 +0300876 return ib_get_cached_gid(device, port_num, index, gid, attr);
877
878 if (attr)
879 return -EINVAL;
Matan Barak03db3a22015-07-30 18:33:26 +0300880
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 return device->query_gid(device, port_num, index, gid);
882}
883EXPORT_SYMBOL(ib_query_gid);
884
885/**
Matan Barak03db3a22015-07-30 18:33:26 +0300886 * ib_enum_roce_netdev - enumerate all RoCE ports
887 * @ib_dev : IB device we want to query
888 * @filter: Should we call the callback?
889 * @filter_cookie: Cookie passed to filter
890 * @cb: Callback to call for each found RoCE ports
891 * @cookie: Cookie passed back to the callback
892 *
893 * Enumerates all of the physical RoCE ports of ib_dev
894 * which are related to netdevice and calls callback() on each
895 * device for which filter() function returns non zero.
896 */
897void ib_enum_roce_netdev(struct ib_device *ib_dev,
898 roce_netdev_filter filter,
899 void *filter_cookie,
900 roce_netdev_callback cb,
901 void *cookie)
902{
903 u8 port;
904
905 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
906 port++)
907 if (rdma_protocol_roce(ib_dev, port)) {
908 struct net_device *idev = NULL;
909
910 if (ib_dev->get_netdev)
911 idev = ib_dev->get_netdev(ib_dev, port);
912
913 if (idev &&
914 idev->reg_state >= NETREG_UNREGISTERED) {
915 dev_put(idev);
916 idev = NULL;
917 }
918
919 if (filter(ib_dev, port, idev, filter_cookie))
920 cb(ib_dev, port, idev, cookie);
921
922 if (idev)
923 dev_put(idev);
924 }
925}
926
927/**
928 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
929 * @filter: Should we call the callback?
930 * @filter_cookie: Cookie passed to filter
931 * @cb: Callback to call for each found RoCE ports
932 * @cookie: Cookie passed back to the callback
933 *
934 * Enumerates all RoCE devices' physical ports which are related
935 * to netdevices and calls callback() on each device for which
936 * filter() function returns non zero.
937 */
938void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
939 void *filter_cookie,
940 roce_netdev_callback cb,
941 void *cookie)
942{
943 struct ib_device *dev;
944
945 down_read(&lists_rwsem);
946 list_for_each_entry(dev, &device_list, core_list)
947 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
948 up_read(&lists_rwsem);
949}
950
951/**
Leon Romanovsky8030c832017-06-19 14:04:56 +0300952 * ib_enum_all_devs - enumerate all ib_devices
953 * @cb: Callback to call for each found ib_device
954 *
955 * Enumerates all ib_devices and calls callback() on each device.
956 */
957int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
958 struct netlink_callback *cb)
959{
960 struct ib_device *dev;
961 unsigned int idx = 0;
962 int ret = 0;
963
964 down_read(&lists_rwsem);
965 list_for_each_entry(dev, &device_list, core_list) {
966 ret = nldev_cb(dev, skb, cb, idx);
967 if (ret)
968 break;
969 idx++;
970 }
971
972 up_read(&lists_rwsem);
973 return ret;
974}
975
976/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 * ib_query_pkey - Get P_Key table entry
978 * @device:Device to query
979 * @port_num:Port number to query
980 * @index:P_Key table index to query
981 * @pkey:Returned P_Key
982 *
983 * ib_query_pkey() fetches the specified P_Key table entry.
984 */
985int ib_query_pkey(struct ib_device *device,
986 u8 port_num, u16 index, u16 *pkey)
987{
988 return device->query_pkey(device, port_num, index, pkey);
989}
990EXPORT_SYMBOL(ib_query_pkey);
991
992/**
993 * ib_modify_device - Change IB device attributes
994 * @device:Device to modify
995 * @device_modify_mask:Mask of attributes to change
996 * @device_modify:New attribute values
997 *
998 * ib_modify_device() changes a device's attributes as specified by
999 * the @device_modify_mask and @device_modify structure.
1000 */
1001int ib_modify_device(struct ib_device *device,
1002 int device_modify_mask,
1003 struct ib_device_modify *device_modify)
1004{
Bart Van Assche10e1b542011-06-18 16:35:42 +00001005 if (!device->modify_device)
1006 return -ENOSYS;
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 return device->modify_device(device, device_modify_mask,
1009 device_modify);
1010}
1011EXPORT_SYMBOL(ib_modify_device);
1012
1013/**
1014 * ib_modify_port - Modifies the attributes for the specified port.
1015 * @device: The device to modify.
1016 * @port_num: The number of the port to modify.
1017 * @port_modify_mask: Mask used to specify which attributes of the port
1018 * to change.
1019 * @port_modify: New attribute values for the port.
1020 *
1021 * ib_modify_port() changes a port's attributes as specified by the
1022 * @port_modify_mask and @port_modify structure.
1023 */
1024int ib_modify_port(struct ib_device *device,
1025 u8 port_num, int port_modify_mask,
1026 struct ib_port_modify *port_modify)
1027{
Selvin Xavier61e09622017-08-23 01:08:07 -07001028 int rc;
Bart Van Assche10e1b542011-06-18 16:35:42 +00001029
Yuval Shaia24dc8312017-01-25 18:41:37 +02001030 if (!rdma_is_port_valid(device, port_num))
Roland Dreier116c0072005-10-03 09:32:33 -07001031 return -EINVAL;
1032
Selvin Xavier61e09622017-08-23 01:08:07 -07001033 if (device->modify_port)
1034 rc = device->modify_port(device, port_num, port_modify_mask,
1035 port_modify);
1036 else
1037 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
1038 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039}
1040EXPORT_SYMBOL(ib_modify_port);
1041
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001042/**
1043 * ib_find_gid - Returns the port number and GID table index where
Parav Panditdbb12562017-11-14 14:52:12 +02001044 * a specified GID value occurs. Its searches only for IB link layer.
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001045 * @device: The device to query.
1046 * @gid: The GID value to search for.
Matan Barak55ee3ab2015-10-15 18:38:45 +03001047 * @ndev: The ndev related to the GID to search for.
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001048 * @port_num: The port number of the device where the GID value was found.
1049 * @index: The index into the GID table where the GID was found. This
1050 * parameter may be NULL.
1051 */
1052int ib_find_gid(struct ib_device *device, union ib_gid *gid,
Parav Panditdbb12562017-11-14 14:52:12 +02001053 struct net_device *ndev, u8 *port_num, u16 *index)
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001054{
1055 union ib_gid tmp_gid;
1056 int ret, port, i;
1057
Ira Weiny0cf18d72015-05-13 20:02:55 -04001058 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
Parav Panditdbb12562017-11-14 14:52:12 +02001059 if (rdma_cap_roce_gid_table(device, port))
Matan Barakb39ffa12015-12-23 14:56:47 +02001060 continue;
1061
Ira Weiny77386132015-05-13 20:02:58 -04001062 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
Matan Barak55ee3ab2015-10-15 18:38:45 +03001063 ret = ib_query_gid(device, port, i, &tmp_gid, NULL);
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001064 if (ret)
1065 return ret;
1066 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
1067 *port_num = port;
1068 if (index)
1069 *index = i;
1070 return 0;
1071 }
1072 }
1073 }
1074
1075 return -ENOENT;
1076}
1077EXPORT_SYMBOL(ib_find_gid);
1078
1079/**
1080 * ib_find_pkey - Returns the PKey table index where a specified
1081 * PKey value occurs.
1082 * @device: The device to query.
1083 * @port_num: The port number of the device to search for the PKey.
1084 * @pkey: The PKey value to search for.
1085 * @index: The index into the PKey table where the PKey was found.
1086 */
1087int ib_find_pkey(struct ib_device *device,
1088 u8 port_num, u16 pkey, u16 *index)
1089{
1090 int ret, i;
1091 u16 tmp_pkey;
Jack Morgensteinff7166c2012-08-03 08:40:38 +00001092 int partial_ix = -1;
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001093
Ira Weiny77386132015-05-13 20:02:58 -04001094 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001095 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
1096 if (ret)
1097 return ret;
Moni Shoua36026ec2007-07-23 10:07:42 +03001098 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
Jack Morgensteinff7166c2012-08-03 08:40:38 +00001099 /* if there is full-member pkey take it.*/
1100 if (tmp_pkey & 0x8000) {
1101 *index = i;
1102 return 0;
1103 }
1104 if (partial_ix < 0)
1105 partial_ix = i;
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001106 }
1107 }
1108
Jack Morgensteinff7166c2012-08-03 08:40:38 +00001109 /*no full-member, if exists take the limited*/
1110 if (partial_ix >= 0) {
1111 *index = partial_ix;
1112 return 0;
1113 }
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001114 return -ENOENT;
1115}
1116EXPORT_SYMBOL(ib_find_pkey);
1117
Yotam Kenneth9268f722015-07-30 17:50:15 +03001118/**
1119 * ib_get_net_dev_by_params() - Return the appropriate net_dev
1120 * for a received CM request
1121 * @dev: An RDMA device on which the request has been received.
1122 * @port: Port number on the RDMA device.
1123 * @pkey: The Pkey the request came on.
1124 * @gid: A GID that the net_dev uses to communicate.
1125 * @addr: Contains the IP address that the request specified as its
1126 * destination.
1127 */
1128struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
1129 u8 port,
1130 u16 pkey,
1131 const union ib_gid *gid,
1132 const struct sockaddr *addr)
1133{
1134 struct net_device *net_dev = NULL;
1135 struct ib_client_data *context;
1136
1137 if (!rdma_protocol_ib(dev, port))
1138 return NULL;
1139
1140 down_read(&lists_rwsem);
1141
1142 list_for_each_entry(context, &dev->client_data_list, list) {
1143 struct ib_client *client = context->client;
1144
1145 if (context->going_down)
1146 continue;
1147
1148 if (client->get_net_dev_by_params) {
1149 net_dev = client->get_net_dev_by_params(dev, port, pkey,
1150 gid, addr,
1151 context->data);
1152 if (net_dev)
1153 break;
1154 }
1155 }
1156
1157 up_read(&lists_rwsem);
1158
1159 return net_dev;
1160}
1161EXPORT_SYMBOL(ib_get_net_dev_by_params);
1162
Leon Romanovskyd0e312f2017-12-05 22:30:04 +02001163static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
Mark Bloch735c6312016-05-19 17:12:35 +03001164 [RDMA_NL_LS_OP_RESOLVE] = {
Leon Romanovsky647c75a2017-06-15 14:20:39 +03001165 .doit = ib_nl_handle_resolve_resp,
Leon Romanovskye3a2b932017-06-12 16:00:19 +03001166 .flags = RDMA_NL_ADMIN_PERM,
1167 },
Mark Bloch735c6312016-05-19 17:12:35 +03001168 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
Leon Romanovsky647c75a2017-06-15 14:20:39 +03001169 .doit = ib_nl_handle_set_timeout,
Leon Romanovskye3a2b932017-06-12 16:00:19 +03001170 .flags = RDMA_NL_ADMIN_PERM,
1171 },
Mark Blochae43f822016-05-19 17:12:36 +03001172 [RDMA_NL_LS_OP_IP_RESOLVE] = {
Leon Romanovsky647c75a2017-06-15 14:20:39 +03001173 .doit = ib_nl_handle_ip_res_resp,
Leon Romanovskye3a2b932017-06-12 16:00:19 +03001174 .flags = RDMA_NL_ADMIN_PERM,
1175 },
Mark Bloch735c6312016-05-19 17:12:35 +03001176};
1177
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178static int __init ib_core_init(void)
1179{
1180 int ret;
1181
Tejun Heof0626712010-10-19 15:24:36 +00001182 ib_wq = alloc_workqueue("infiniband", 0, 0);
1183 if (!ib_wq)
1184 return -ENOMEM;
1185
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001186 ib_comp_wq = alloc_workqueue("ib-comp-wq",
Sagi Grimbergb7363e62017-03-08 22:03:17 +02001187 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001188 if (!ib_comp_wq) {
1189 ret = -ENOMEM;
1190 goto err;
1191 }
1192
Jason Gunthorpe55aeed02015-08-04 15:23:34 -06001193 ret = class_register(&ib_class);
Nir Muchtarfd75c782011-05-20 11:46:10 -07001194 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +05301195 pr_warn("Couldn't create InfiniBand device class\n");
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001196 goto err_comp;
Nir Muchtarfd75c782011-05-20 11:46:10 -07001197 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
Leon Romanovskyc9901722017-06-05 10:20:11 +03001199 ret = rdma_nl_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 if (ret) {
Leon Romanovskyc9901722017-06-05 10:20:11 +03001201 pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
Nir Muchtarfd75c782011-05-20 11:46:10 -07001202 goto err_sysfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 }
1204
Leon Romanovskye3f20f02016-05-19 17:12:31 +03001205 ret = addr_init();
1206 if (ret) {
1207 pr_warn("Could't init IB address resolution\n");
1208 goto err_ibnl;
1209 }
1210
Mark Bloch4c2cb422016-05-19 17:12:32 +03001211 ret = ib_mad_init();
1212 if (ret) {
1213 pr_warn("Couldn't init IB MAD\n");
1214 goto err_addr;
1215 }
1216
Mark Blochc2e49c92016-05-19 17:12:33 +03001217 ret = ib_sa_init();
1218 if (ret) {
1219 pr_warn("Couldn't init SA\n");
1220 goto err_mad;
1221 }
1222
Daniel Jurgens8f408ab2017-05-19 15:48:53 +03001223 ret = register_lsm_notifier(&ibdev_lsm_nb);
1224 if (ret) {
1225 pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
Leon Romanovskyc9901722017-06-05 10:20:11 +03001226 goto err_sa;
Daniel Jurgens8f408ab2017-05-19 15:48:53 +03001227 }
1228
Leon Romanovsky6c80b412017-06-20 09:14:15 +03001229 nldev_init();
Leon Romanovskyc9901722017-06-05 10:20:11 +03001230 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
Matan Barak03db3a22015-07-30 18:33:26 +03001231 ib_cache_setup();
Roland Dreierb2cbae22011-05-20 11:46:11 -07001232
Nir Muchtarfd75c782011-05-20 11:46:10 -07001233 return 0;
1234
Mark Bloch735c6312016-05-19 17:12:35 +03001235err_sa:
1236 ib_sa_cleanup();
Mark Blochc2e49c92016-05-19 17:12:33 +03001237err_mad:
1238 ib_mad_cleanup();
Mark Bloch4c2cb422016-05-19 17:12:32 +03001239err_addr:
1240 addr_cleanup();
Leon Romanovskye3f20f02016-05-19 17:12:31 +03001241err_ibnl:
Leon Romanovskyc9901722017-06-05 10:20:11 +03001242 rdma_nl_exit();
Nir Muchtarfd75c782011-05-20 11:46:10 -07001243err_sysfs:
Jason Gunthorpe55aeed02015-08-04 15:23:34 -06001244 class_unregister(&ib_class);
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001245err_comp:
1246 destroy_workqueue(ib_comp_wq);
Nir Muchtarfd75c782011-05-20 11:46:10 -07001247err:
1248 destroy_workqueue(ib_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 return ret;
1250}
1251
1252static void __exit ib_core_cleanup(void)
1253{
1254 ib_cache_cleanup();
Leon Romanovsky6c80b412017-06-20 09:14:15 +03001255 nldev_exit();
Leon Romanovskyc9901722017-06-05 10:20:11 +03001256 rdma_nl_unregister(RDMA_NL_LS);
1257 unregister_lsm_notifier(&ibdev_lsm_nb);
Mark Blochc2e49c92016-05-19 17:12:33 +03001258 ib_sa_cleanup();
Mark Bloch4c2cb422016-05-19 17:12:32 +03001259 ib_mad_cleanup();
Leon Romanovskye3f20f02016-05-19 17:12:31 +03001260 addr_cleanup();
Leon Romanovskyc9901722017-06-05 10:20:11 +03001261 rdma_nl_exit();
Jason Gunthorpe55aeed02015-08-04 15:23:34 -06001262 class_unregister(&ib_class);
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001263 destroy_workqueue(ib_comp_wq);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08001264 /* Make sure that any pending umem accounting work is done. */
Tejun Heof0626712010-10-19 15:24:36 +00001265 destroy_workqueue(ib_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266}
1267
Jason Gunthorpee3bf14b2017-08-14 14:57:39 -06001268MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
1269
Dmitry Monakhova9cd1a62017-11-27 13:39:05 +00001270subsys_initcall(ib_core_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271module_exit(ib_core_cleanup);