blob: b7459cf524e454f946d683fbddcc2e9d070cb42d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07003 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 */
33
34#include <linux/module.h>
35#include <linux/string.h>
36#include <linux/errno.h>
Ahmed S. Darwish9a6b0902007-02-06 18:07:25 +020037#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/slab.h>
39#include <linux/init.h>
Ingo Molnar95ed6442006-01-13 14:51:39 -080040#include <linux/mutex.h>
Yotam Kenneth9268f722015-07-30 17:50:15 +030041#include <linux/netdevice.h>
Daniel Jurgens8f408ab2017-05-19 15:48:53 +030042#include <linux/security.h>
43#include <linux/notifier.h>
Roland Dreierb2cbae22011-05-20 11:46:11 -070044#include <rdma/rdma_netlink.h>
Matan Barak03db3a22015-07-30 18:33:26 +030045#include <rdma/ib_addr.h>
46#include <rdma/ib_cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48#include "core_priv.h"
49
50MODULE_AUTHOR("Roland Dreier");
51MODULE_DESCRIPTION("core kernel InfiniBand API");
52MODULE_LICENSE("Dual BSD/GPL");
53
54struct ib_client_data {
55 struct list_head list;
56 struct ib_client *client;
57 void * data;
Haggai Eran7c1eb452015-07-30 17:50:14 +030058 /* The device or client is going down. Do not call client or device
59 * callbacks other than remove(). */
60 bool going_down;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061};
62
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -080063struct workqueue_struct *ib_comp_wq;
Tejun Heof0626712010-10-19 15:24:36 +000064struct workqueue_struct *ib_wq;
65EXPORT_SYMBOL_GPL(ib_wq);
66
Haggai Eran5aa44bb2015-07-30 17:50:13 +030067/* The device_list and client_list contain devices and clients after their
68 * registration has completed, and the devices and clients are removed
69 * during unregistration. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070070static LIST_HEAD(device_list);
71static LIST_HEAD(client_list);
72
73/*
Haggai Eran5aa44bb2015-07-30 17:50:13 +030074 * device_mutex and lists_rwsem protect access to both device_list and
75 * client_list. device_mutex protects writer access by device and client
76 * registration / de-registration. lists_rwsem protects reader access to
77 * these lists. Iterators of these lists must lock it for read, while updates
78 * to the lists must be done with a write lock. A special case is when the
79 * device_mutex is locked. In this case locking the lists for read access is
80 * not necessary as the device_mutex implies it.
Haggai Eran7c1eb452015-07-30 17:50:14 +030081 *
82 * lists_rwsem also protects access to the client data list.
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 */
Ingo Molnar95ed6442006-01-13 14:51:39 -080084static DEFINE_MUTEX(device_mutex);
Haggai Eran5aa44bb2015-07-30 17:50:13 +030085static DECLARE_RWSEM(lists_rwsem);
86
Daniel Jurgens8f408ab2017-05-19 15:48:53 +030087static int ib_security_change(struct notifier_block *nb, unsigned long event,
88 void *lsm_data);
89static void ib_policy_change_task(struct work_struct *work);
90static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
91
92static struct notifier_block ibdev_lsm_nb = {
93 .notifier_call = ib_security_change,
94};
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96static int ib_device_check_mandatory(struct ib_device *device)
97{
98#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
99 static const struct {
100 size_t offset;
101 char *name;
102 } mandatory_table[] = {
103 IB_MANDATORY_FUNC(query_device),
104 IB_MANDATORY_FUNC(query_port),
105 IB_MANDATORY_FUNC(query_pkey),
106 IB_MANDATORY_FUNC(query_gid),
107 IB_MANDATORY_FUNC(alloc_pd),
108 IB_MANDATORY_FUNC(dealloc_pd),
109 IB_MANDATORY_FUNC(create_ah),
110 IB_MANDATORY_FUNC(destroy_ah),
111 IB_MANDATORY_FUNC(create_qp),
112 IB_MANDATORY_FUNC(modify_qp),
113 IB_MANDATORY_FUNC(destroy_qp),
114 IB_MANDATORY_FUNC(post_send),
115 IB_MANDATORY_FUNC(post_recv),
116 IB_MANDATORY_FUNC(create_cq),
117 IB_MANDATORY_FUNC(destroy_cq),
118 IB_MANDATORY_FUNC(poll_cq),
119 IB_MANDATORY_FUNC(req_notify_cq),
120 IB_MANDATORY_FUNC(get_dma_mr),
Ira Weiny77386132015-05-13 20:02:58 -0400121 IB_MANDATORY_FUNC(dereg_mr),
122 IB_MANDATORY_FUNC(get_port_immutable)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 };
124 int i;
125
Ahmed S. Darwish9a6b0902007-02-06 18:07:25 +0200126 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530128 pr_warn("Device %s is missing mandatory function %s\n",
129 device->name, mandatory_table[i].name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 return -EINVAL;
131 }
132 }
133
134 return 0;
135}
136
Leon Romanovskyf8978bd2018-01-01 13:07:15 +0200137static struct ib_device *__ib_device_get_by_index(u32 index)
Leon Romanovskyecc82c52017-06-18 14:39:59 +0300138{
139 struct ib_device *device;
140
141 list_for_each_entry(device, &device_list, core_list)
142 if (device->index == index)
143 return device;
144
145 return NULL;
146}
147
Leon Romanovskyf8978bd2018-01-01 13:07:15 +0200148/*
149 * Caller is responsible to return refrerence count by calling put_device()
150 */
151struct ib_device *ib_device_get_by_index(u32 index)
152{
153 struct ib_device *device;
154
155 down_read(&lists_rwsem);
156 device = __ib_device_get_by_index(index);
157 if (device)
158 get_device(&device->dev);
159
160 up_read(&lists_rwsem);
161 return device;
162}
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164static struct ib_device *__ib_device_get_by_name(const char *name)
165{
166 struct ib_device *device;
167
168 list_for_each_entry(device, &device_list, core_list)
169 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
170 return device;
171
172 return NULL;
173}
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175static int alloc_name(char *name)
176{
Roland Dreier65d470b2007-10-09 19:59:04 -0700177 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 char buf[IB_DEVICE_NAME_MAX];
179 struct ib_device *device;
180 int i;
181
Roland Dreier65d470b2007-10-09 19:59:04 -0700182 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 if (!inuse)
184 return -ENOMEM;
185
186 list_for_each_entry(device, &device_list, core_list) {
187 if (!sscanf(device->name, name, &i))
188 continue;
189 if (i < 0 || i >= PAGE_SIZE * 8)
190 continue;
191 snprintf(buf, sizeof buf, name, i);
192 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
193 set_bit(i, inuse);
194 }
195
196 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
197 free_page((unsigned long) inuse);
198 snprintf(buf, sizeof buf, name, i);
199
200 if (__ib_device_get_by_name(buf))
201 return -ENFILE;
202
203 strlcpy(name, buf, IB_DEVICE_NAME_MAX);
204 return 0;
205}
206
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600207static void ib_device_release(struct device *device)
208{
209 struct ib_device *dev = container_of(device, struct ib_device, dev);
210
Parav Pandit4be3a4f2017-03-19 10:55:55 +0200211 WARN_ON(dev->reg_state == IB_DEV_REGISTERED);
212 if (dev->reg_state == IB_DEV_UNREGISTERED) {
213 /*
214 * In IB_DEV_UNINITIALIZED state, cache or port table
215 * is not even created. Free cache and port table only when
216 * device reaches UNREGISTERED state.
217 */
218 ib_cache_release_one(dev);
219 kfree(dev->port_immutable);
220 }
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600221 kfree(dev);
222}
223
224static int ib_device_uevent(struct device *device,
225 struct kobj_uevent_env *env)
226{
227 struct ib_device *dev = container_of(device, struct ib_device, dev);
228
229 if (add_uevent_var(env, "NAME=%s", dev->name))
230 return -ENOMEM;
231
232 /*
233 * It would be nice to pass the node GUID with the event...
234 */
235
236 return 0;
237}
238
239static struct class ib_class = {
240 .name = "infiniband",
241 .dev_release = ib_device_release,
242 .dev_uevent = ib_device_uevent,
243};
244
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245/**
246 * ib_alloc_device - allocate an IB device struct
247 * @size:size of structure to allocate
248 *
249 * Low-level drivers should use ib_alloc_device() to allocate &struct
250 * ib_device. @size is the size of the structure to be allocated,
251 * including any private data used by the low-level driver.
252 * ib_dealloc_device() must be used to free structures allocated with
253 * ib_alloc_device().
254 */
255struct ib_device *ib_alloc_device(size_t size)
256{
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600257 struct ib_device *device;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600259 if (WARN_ON(size < sizeof(struct ib_device)))
260 return NULL;
261
262 device = kzalloc(size, GFP_KERNEL);
263 if (!device)
264 return NULL;
265
Leon Romanovsky02d88832018-01-28 11:17:20 +0200266 rdma_restrack_init(&device->res);
267
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600268 device->dev.class = &ib_class;
269 device_initialize(&device->dev);
270
271 dev_set_drvdata(&device->dev, device);
272
273 INIT_LIST_HEAD(&device->event_handler_list);
274 spin_lock_init(&device->event_handler_lock);
275 spin_lock_init(&device->client_data_lock);
276 INIT_LIST_HEAD(&device->client_data_list);
277 INIT_LIST_HEAD(&device->port_list);
278
279 return device;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
281EXPORT_SYMBOL(ib_alloc_device);
282
283/**
284 * ib_dealloc_device - free an IB device struct
285 * @device:structure to free
286 *
287 * Free a structure allocated with ib_alloc_device().
288 */
289void ib_dealloc_device(struct ib_device *device)
290{
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600291 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
292 device->reg_state != IB_DEV_UNINITIALIZED);
Leon Romanovsky103140e2018-03-21 09:00:29 +0200293 rdma_restrack_clean(&device->res);
Leon Romanovsky924b8902018-01-01 13:07:13 +0200294 put_device(&device->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295}
296EXPORT_SYMBOL(ib_dealloc_device);
297
298static int add_client_context(struct ib_device *device, struct ib_client *client)
299{
300 struct ib_client_data *context;
301 unsigned long flags;
302
303 context = kmalloc(sizeof *context, GFP_KERNEL);
Leon Romanovskya0b34552016-11-03 16:44:10 +0200304 if (!context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
307 context->client = client;
308 context->data = NULL;
Haggai Eran7c1eb452015-07-30 17:50:14 +0300309 context->going_down = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
Haggai Eran7c1eb452015-07-30 17:50:14 +0300311 down_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 spin_lock_irqsave(&device->client_data_lock, flags);
313 list_add(&context->list, &device->client_data_list);
314 spin_unlock_irqrestore(&device->client_data_lock, flags);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300315 up_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
317 return 0;
318}
319
Ira Weiny337877a2015-06-06 14:38:29 -0400320static int verify_immutable(const struct ib_device *dev, u8 port)
321{
322 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
323 rdma_max_mad_size(dev, port) != 0);
324}
325
Ira Weiny77386132015-05-13 20:02:58 -0400326static int read_port_immutable(struct ib_device *device)
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300327{
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600328 int ret;
Ira Weiny77386132015-05-13 20:02:58 -0400329 u8 start_port = rdma_start_port(device);
330 u8 end_port = rdma_end_port(device);
331 u8 port;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300332
Ira Weiny77386132015-05-13 20:02:58 -0400333 /**
334 * device->port_immutable is indexed directly by the port number to make
335 * access to this data as efficient as possible.
336 *
337 * Therefore port_immutable is declared as a 1 based array with
338 * potential empty slots at the beginning.
339 */
340 device->port_immutable = kzalloc(sizeof(*device->port_immutable)
341 * (end_port + 1),
342 GFP_KERNEL);
343 if (!device->port_immutable)
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600344 return -ENOMEM;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300345
Ira Weiny77386132015-05-13 20:02:58 -0400346 for (port = start_port; port <= end_port; ++port) {
347 ret = device->get_port_immutable(device, port,
348 &device->port_immutable[port]);
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300349 if (ret)
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600350 return ret;
Ira Weiny337877a2015-06-06 14:38:29 -0400351
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600352 if (verify_immutable(device, port))
353 return -EINVAL;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300354 }
Jason Gunthorpe55aeed02015-08-04 15:23:34 -0600355 return 0;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300356}
357
Leon Romanovsky9abb0d12017-06-27 16:49:53 +0300358void ib_get_device_fw_str(struct ib_device *dev, char *str)
Ira Weiny5fa76c22016-06-15 02:21:56 -0400359{
360 if (dev->get_dev_fw_str)
Leon Romanovsky9abb0d12017-06-27 16:49:53 +0300361 dev->get_dev_fw_str(dev, str);
Ira Weiny5fa76c22016-06-15 02:21:56 -0400362 else
363 str[0] = '\0';
364}
365EXPORT_SYMBOL(ib_get_device_fw_str);
366
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300367static int setup_port_pkey_list(struct ib_device *device)
368{
369 int i;
370
371 /**
372 * device->port_pkey_list is indexed directly by the port number,
373 * Therefore it is declared as a 1 based array with potential empty
374 * slots at the beginning.
375 */
376 device->port_pkey_list = kcalloc(rdma_end_port(device) + 1,
377 sizeof(*device->port_pkey_list),
378 GFP_KERNEL);
379
380 if (!device->port_pkey_list)
381 return -ENOMEM;
382
383 for (i = 0; i < (rdma_end_port(device) + 1); i++) {
384 spin_lock_init(&device->port_pkey_list[i].list_lock);
385 INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list);
386 }
387
388 return 0;
389}
390
Daniel Jurgens8f408ab2017-05-19 15:48:53 +0300391static void ib_policy_change_task(struct work_struct *work)
392{
393 struct ib_device *dev;
394
395 down_read(&lists_rwsem);
396 list_for_each_entry(dev, &device_list, core_list) {
397 int i;
398
399 for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) {
400 u64 sp;
401 int ret = ib_get_cached_subnet_prefix(dev,
402 i,
403 &sp);
404
405 WARN_ONCE(ret,
406 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
407 ret);
Daniel Jurgensa750cfd2017-07-05 16:15:21 +0300408 if (!ret)
409 ib_security_cache_change(dev, i, sp);
Daniel Jurgens8f408ab2017-05-19 15:48:53 +0300410 }
411 }
412 up_read(&lists_rwsem);
413}
414
415static int ib_security_change(struct notifier_block *nb, unsigned long event,
416 void *lsm_data)
417{
418 if (event != LSM_POLICY_CHANGE)
419 return NOTIFY_DONE;
420
421 schedule_work(&ib_policy_change_work);
422
423 return NOTIFY_OK;
424}
425
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426/**
Leon Romanovskyecc82c52017-06-18 14:39:59 +0300427 * __dev_new_index - allocate an device index
428 *
429 * Returns a suitable unique value for a new device interface
430 * number. It assumes that there are less than 2^32-1 ib devices
431 * will be present in the system.
432 */
433static u32 __dev_new_index(void)
434{
435 /*
436 * The device index to allow stable naming.
437 * Similar to struct net -> ifindex.
438 */
439 static u32 index;
440
441 for (;;) {
442 if (!(++index))
443 index = 1;
444
445 if (!__ib_device_get_by_index(index))
446 return index;
447 }
448}
449
450/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 * ib_register_device - Register an IB device with IB core
452 * @device:Device to register
453 *
454 * Low-level drivers use ib_register_device() to register their
455 * devices with the IB core. All registered clients will receive a
456 * callback for each device that is added. @device must be allocated
457 * with ib_alloc_device().
458 */
Ralph Campbell9a6edb62010-05-06 17:03:25 -0700459int ib_register_device(struct ib_device *device,
460 int (*port_callback)(struct ib_device *,
461 u8, struct kobject *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462{
463 int ret;
Doug Ledfordb8071ad2015-08-15 10:16:14 -0400464 struct ib_client *client;
Ira Weiny3e153a92015-12-18 10:59:44 +0200465 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
Bart Van Assche99db9492017-01-20 13:04:36 -0800466 struct device *parent = device->dev.parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
Bart Van Assche0957c292017-03-07 22:56:53 +0000468 WARN_ON_ONCE(device->dma_device);
469 if (device->dev.dma_ops) {
470 /*
471 * The caller provided custom DMA operations. Copy the
472 * DMA-related fields that are used by e.g. dma_alloc_coherent()
473 * into device->dev.
474 */
475 device->dma_device = &device->dev;
Bart Van Assche02ee9da2018-01-03 13:28:18 -0800476 if (!device->dev.dma_mask) {
477 if (parent)
478 device->dev.dma_mask = parent->dma_mask;
479 else
480 WARN_ON_ONCE(true);
481 }
482 if (!device->dev.coherent_dma_mask) {
483 if (parent)
484 device->dev.coherent_dma_mask =
485 parent->coherent_dma_mask;
486 else
487 WARN_ON_ONCE(true);
488 }
Bart Van Assche0957c292017-03-07 22:56:53 +0000489 } else {
490 /*
491 * The caller did not provide custom DMA operations. Use the
492 * DMA mapping operations of the parent device.
493 */
Bart Van Assche02ee9da2018-01-03 13:28:18 -0800494 WARN_ON_ONCE(!parent);
Bart Van Assche0957c292017-03-07 22:56:53 +0000495 device->dma_device = parent;
496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Ingo Molnar95ed6442006-01-13 14:51:39 -0800498 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
500 if (strchr(device->name, '%')) {
501 ret = alloc_name(device->name);
502 if (ret)
503 goto out;
504 }
505
506 if (ib_device_check_mandatory(device)) {
507 ret = -EINVAL;
508 goto out;
509 }
510
Ira Weiny77386132015-05-13 20:02:58 -0400511 ret = read_port_immutable(device);
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300512 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530513 pr_warn("Couldn't create per port immutable data %s\n",
514 device->name);
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300515 goto out;
516 }
517
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300518 ret = setup_port_pkey_list(device);
519 if (ret) {
520 pr_warn("Couldn't create per port_pkey_list\n");
521 goto out;
522 }
523
Matan Barak03db3a22015-07-30 18:33:26 +0300524 ret = ib_cache_setup_one(device);
525 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530526 pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
Parav Pandit4be3a4f2017-03-19 10:55:55 +0200527 goto port_cleanup;
Matan Barak03db3a22015-07-30 18:33:26 +0300528 }
529
Parav Pandit43579b52017-01-10 00:02:14 +0000530 ret = ib_device_register_rdmacg(device);
531 if (ret) {
532 pr_warn("Couldn't register device with rdma cgroup\n");
Parav Pandit4be3a4f2017-03-19 10:55:55 +0200533 goto cache_cleanup;
Parav Pandit43579b52017-01-10 00:02:14 +0000534 }
535
Ira Weiny3e153a92015-12-18 10:59:44 +0200536 memset(&device->attrs, 0, sizeof(device->attrs));
537 ret = device->query_device(device, &device->attrs, &uhw);
538 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530539 pr_warn("Couldn't query the device attributes\n");
Parav Pandit2fb4f4e2018-02-25 13:39:56 +0200540 goto cg_cleanup;
Ira Weiny3e153a92015-12-18 10:59:44 +0200541 }
542
Ralph Campbell9a6edb62010-05-06 17:03:25 -0700543 ret = ib_device_register_sysfs(device, port_callback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +0530545 pr_warn("Couldn't register device %s with driver model\n",
546 device->name);
Parav Pandit2fb4f4e2018-02-25 13:39:56 +0200547 goto cg_cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 }
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 device->reg_state = IB_DEV_REGISTERED;
551
Doug Ledfordb8071ad2015-08-15 10:16:14 -0400552 list_for_each_entry(client, &client_list, list)
Sagi Grimbergb059e212017-07-02 11:20:50 +0300553 if (!add_client_context(device, client) && client->add)
Doug Ledfordb8071ad2015-08-15 10:16:14 -0400554 client->add(device);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Leon Romanovskyecc82c52017-06-18 14:39:59 +0300556 device->index = __dev_new_index();
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300557 down_write(&lists_rwsem);
558 list_add_tail(&device->core_list, &device_list);
559 up_write(&lists_rwsem);
Parav Pandit4be3a4f2017-03-19 10:55:55 +0200560 mutex_unlock(&device_mutex);
561 return 0;
562
Parav Pandit2fb4f4e2018-02-25 13:39:56 +0200563cg_cleanup:
564 ib_device_unregister_rdmacg(device);
Parav Pandit4be3a4f2017-03-19 10:55:55 +0200565cache_cleanup:
566 ib_cache_cleanup_one(device);
567 ib_cache_release_one(device);
568port_cleanup:
569 kfree(device->port_immutable);
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300570out:
Ingo Molnar95ed6442006-01-13 14:51:39 -0800571 mutex_unlock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 return ret;
573}
574EXPORT_SYMBOL(ib_register_device);
575
576/**
577 * ib_unregister_device - Unregister an IB device
578 * @device:Device to unregister
579 *
580 * Unregister an IB device. All clients will receive a remove callback.
581 */
582void ib_unregister_device(struct ib_device *device)
583{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 struct ib_client_data *context, *tmp;
585 unsigned long flags;
586
Ingo Molnar95ed6442006-01-13 14:51:39 -0800587 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300589 down_write(&lists_rwsem);
590 list_del(&device->core_list);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300591 spin_lock_irqsave(&device->client_data_lock, flags);
592 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
593 context->going_down = true;
594 spin_unlock_irqrestore(&device->client_data_lock, flags);
595 downgrade_write(&lists_rwsem);
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300596
Haggai Eran7c1eb452015-07-30 17:50:14 +0300597 list_for_each_entry_safe(context, tmp, &device->client_data_list,
598 list) {
599 if (context->client->remove)
600 context->client->remove(device, context->data);
601 }
602 up_read(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
Parav Pandit43579b52017-01-10 00:02:14 +0000604 ib_device_unregister_rdmacg(device);
Roland Dreier9206dff2009-02-25 13:27:46 -0800605 ib_device_unregister_sysfs(device);
Shiraz Saleem06f81742017-07-17 14:03:50 -0500606
607 mutex_unlock(&device_mutex);
608
Matan Barak03db3a22015-07-30 18:33:26 +0300609 ib_cache_cleanup_one(device);
Roland Dreier9206dff2009-02-25 13:27:46 -0800610
Daniel Jurgensd291f1a2017-05-19 15:48:52 +0300611 ib_security_destroy_port_pkey_list(device);
612 kfree(device->port_pkey_list);
613
Haggai Eran7c1eb452015-07-30 17:50:14 +0300614 down_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 spin_lock_irqsave(&device->client_data_lock, flags);
616 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
617 kfree(context);
618 spin_unlock_irqrestore(&device->client_data_lock, flags);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300619 up_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 device->reg_state = IB_DEV_UNREGISTERED;
622}
623EXPORT_SYMBOL(ib_unregister_device);
624
625/**
626 * ib_register_client - Register an IB client
627 * @client:Client to register
628 *
629 * Upper level users of the IB drivers can use ib_register_client() to
630 * register callbacks for IB device addition and removal. When an IB
631 * device is added, each registered client's add method will be called
632 * (in the order the clients were registered), and when a device is
633 * removed, each client's remove method will be called (in the reverse
634 * order that clients were registered). In addition, when
635 * ib_register_client() is called, the client will receive an add
636 * callback for all devices already registered.
637 */
638int ib_register_client(struct ib_client *client)
639{
640 struct ib_device *device;
641
Ingo Molnar95ed6442006-01-13 14:51:39 -0800642 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 list_for_each_entry(device, &device_list, core_list)
Sagi Grimbergb059e212017-07-02 11:20:50 +0300645 if (!add_client_context(device, client) && client->add)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 client->add(device);
647
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300648 down_write(&lists_rwsem);
649 list_add_tail(&client->list, &client_list);
650 up_write(&lists_rwsem);
651
Ingo Molnar95ed6442006-01-13 14:51:39 -0800652 mutex_unlock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654 return 0;
655}
656EXPORT_SYMBOL(ib_register_client);
657
658/**
659 * ib_unregister_client - Unregister an IB client
660 * @client:Client to unregister
661 *
662 * Upper level users use ib_unregister_client() to remove their client
663 * registration. When ib_unregister_client() is called, the client
664 * will receive a remove callback for each IB device still registered.
665 */
666void ib_unregister_client(struct ib_client *client)
667{
668 struct ib_client_data *context, *tmp;
669 struct ib_device *device;
670 unsigned long flags;
671
Ingo Molnar95ed6442006-01-13 14:51:39 -0800672 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
Haggai Eran5aa44bb2015-07-30 17:50:13 +0300674 down_write(&lists_rwsem);
675 list_del(&client->list);
676 up_write(&lists_rwsem);
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 list_for_each_entry(device, &device_list, core_list) {
Haggai Eran7c1eb452015-07-30 17:50:14 +0300679 struct ib_client_data *found_context = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Haggai Eran7c1eb452015-07-30 17:50:14 +0300681 down_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 spin_lock_irqsave(&device->client_data_lock, flags);
683 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
684 if (context->client == client) {
Haggai Eran7c1eb452015-07-30 17:50:14 +0300685 context->going_down = true;
686 found_context = context;
687 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 }
689 spin_unlock_irqrestore(&device->client_data_lock, flags);
Haggai Eran7c1eb452015-07-30 17:50:14 +0300690 up_write(&lists_rwsem);
691
692 if (client->remove)
693 client->remove(device, found_context ?
694 found_context->data : NULL);
695
696 if (!found_context) {
697 pr_warn("No client context found for %s/%s\n",
698 device->name, client->name);
699 continue;
700 }
701
702 down_write(&lists_rwsem);
703 spin_lock_irqsave(&device->client_data_lock, flags);
704 list_del(&found_context->list);
705 kfree(found_context);
706 spin_unlock_irqrestore(&device->client_data_lock, flags);
707 up_write(&lists_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
Ingo Molnar95ed6442006-01-13 14:51:39 -0800710 mutex_unlock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711}
712EXPORT_SYMBOL(ib_unregister_client);
713
714/**
715 * ib_get_client_data - Get IB client context
716 * @device:Device to get context for
717 * @client:Client to get context for
718 *
719 * ib_get_client_data() returns client context set with
720 * ib_set_client_data().
721 */
722void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
723{
724 struct ib_client_data *context;
725 void *ret = NULL;
726 unsigned long flags;
727
728 spin_lock_irqsave(&device->client_data_lock, flags);
729 list_for_each_entry(context, &device->client_data_list, list)
730 if (context->client == client) {
731 ret = context->data;
732 break;
733 }
734 spin_unlock_irqrestore(&device->client_data_lock, flags);
735
736 return ret;
737}
738EXPORT_SYMBOL(ib_get_client_data);
739
740/**
Krishna Kumar9cd330d2006-09-22 15:22:58 -0700741 * ib_set_client_data - Set IB client context
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 * @device:Device to set context for
743 * @client:Client to set context for
744 * @data:Context to set
745 *
746 * ib_set_client_data() sets client context that can be retrieved with
747 * ib_get_client_data().
748 */
749void ib_set_client_data(struct ib_device *device, struct ib_client *client,
750 void *data)
751{
752 struct ib_client_data *context;
753 unsigned long flags;
754
755 spin_lock_irqsave(&device->client_data_lock, flags);
756 list_for_each_entry(context, &device->client_data_list, list)
757 if (context->client == client) {
758 context->data = data;
759 goto out;
760 }
761
Parav Panditaba25a3e2016-03-02 00:50:29 +0530762 pr_warn("No client context found for %s/%s\n",
763 device->name, client->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
765out:
766 spin_unlock_irqrestore(&device->client_data_lock, flags);
767}
768EXPORT_SYMBOL(ib_set_client_data);
769
770/**
771 * ib_register_event_handler - Register an IB event handler
772 * @event_handler:Handler to register
773 *
774 * ib_register_event_handler() registers an event handler that will be
775 * called back when asynchronous IB events occur (as defined in
776 * chapter 11 of the InfiniBand Architecture Specification). This
777 * callback may occur in interrupt context.
778 */
Leon Romanovskydcc98812017-08-17 15:50:36 +0300779void ib_register_event_handler(struct ib_event_handler *event_handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780{
781 unsigned long flags;
782
783 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
784 list_add_tail(&event_handler->list,
785 &event_handler->device->event_handler_list);
786 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787}
788EXPORT_SYMBOL(ib_register_event_handler);
789
790/**
791 * ib_unregister_event_handler - Unregister an event handler
792 * @event_handler:Handler to unregister
793 *
794 * Unregister an event handler registered with
795 * ib_register_event_handler().
796 */
Leon Romanovskydcc98812017-08-17 15:50:36 +0300797void ib_unregister_event_handler(struct ib_event_handler *event_handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798{
799 unsigned long flags;
800
801 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
802 list_del(&event_handler->list);
803 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804}
805EXPORT_SYMBOL(ib_unregister_event_handler);
806
807/**
808 * ib_dispatch_event - Dispatch an asynchronous event
809 * @event:Event to dispatch
810 *
811 * Low-level drivers must call ib_dispatch_event() to dispatch the
812 * event to all registered event handlers when an asynchronous event
813 * occurs.
814 */
815void ib_dispatch_event(struct ib_event *event)
816{
817 unsigned long flags;
818 struct ib_event_handler *handler;
819
820 spin_lock_irqsave(&event->device->event_handler_lock, flags);
821
822 list_for_each_entry(handler, &event->device->event_handler_list, list)
823 handler->handler(handler, event);
824
825 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
826}
827EXPORT_SYMBOL(ib_dispatch_event);
828
829/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 * ib_query_port - Query IB port attributes
831 * @device:Device to query
832 * @port_num:Port number to query
833 * @port_attr:Port attributes
834 *
835 * ib_query_port() returns the attributes of a port through the
836 * @port_attr pointer.
837 */
838int ib_query_port(struct ib_device *device,
839 u8 port_num,
840 struct ib_port_attr *port_attr)
841{
Eli Cohenfad61ad2016-03-11 22:58:36 +0200842 union ib_gid gid;
843 int err;
844
Yuval Shaia24dc8312017-01-25 18:41:37 +0200845 if (!rdma_is_port_valid(device, port_num))
Roland Dreier116c0072005-10-03 09:32:33 -0700846 return -EINVAL;
847
Eli Cohenfad61ad2016-03-11 22:58:36 +0200848 memset(port_attr, 0, sizeof(*port_attr));
849 err = device->query_port(device, port_num, port_attr);
850 if (err || port_attr->subnet_prefix)
851 return err;
852
Eli Cohend7012462016-06-04 15:15:18 +0300853 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
854 return 0;
855
Eli Cohenfad61ad2016-03-11 22:58:36 +0200856 err = ib_query_gid(device, port_num, 0, &gid, NULL);
857 if (err)
858 return err;
859
860 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
861 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862}
863EXPORT_SYMBOL(ib_query_port);
864
865/**
866 * ib_query_gid - Get GID table entry
867 * @device:Device to query
868 * @port_num:Port number to query
869 * @index:GID table index to query
870 * @gid:Returned GID
Matan Barak55ee3ab2015-10-15 18:38:45 +0300871 * @attr: Returned GID attributes related to this GID index (only in RoCE).
872 * NULL means ignore.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 *
874 * ib_query_gid() fetches the specified GID table entry.
875 */
876int ib_query_gid(struct ib_device *device,
Matan Barak55ee3ab2015-10-15 18:38:45 +0300877 u8 port_num, int index, union ib_gid *gid,
878 struct ib_gid_attr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
Matan Barak03db3a22015-07-30 18:33:26 +0300880 if (rdma_cap_roce_gid_table(device, port_num))
Matan Barak55ee3ab2015-10-15 18:38:45 +0300881 return ib_get_cached_gid(device, port_num, index, gid, attr);
882
883 if (attr)
884 return -EINVAL;
Matan Barak03db3a22015-07-30 18:33:26 +0300885
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 return device->query_gid(device, port_num, index, gid);
887}
888EXPORT_SYMBOL(ib_query_gid);
889
890/**
Matan Barak03db3a22015-07-30 18:33:26 +0300891 * ib_enum_roce_netdev - enumerate all RoCE ports
892 * @ib_dev : IB device we want to query
893 * @filter: Should we call the callback?
894 * @filter_cookie: Cookie passed to filter
895 * @cb: Callback to call for each found RoCE ports
896 * @cookie: Cookie passed back to the callback
897 *
898 * Enumerates all of the physical RoCE ports of ib_dev
899 * which are related to netdevice and calls callback() on each
900 * device for which filter() function returns non zero.
901 */
902void ib_enum_roce_netdev(struct ib_device *ib_dev,
903 roce_netdev_filter filter,
904 void *filter_cookie,
905 roce_netdev_callback cb,
906 void *cookie)
907{
908 u8 port;
909
910 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
911 port++)
912 if (rdma_protocol_roce(ib_dev, port)) {
913 struct net_device *idev = NULL;
914
915 if (ib_dev->get_netdev)
916 idev = ib_dev->get_netdev(ib_dev, port);
917
918 if (idev &&
919 idev->reg_state >= NETREG_UNREGISTERED) {
920 dev_put(idev);
921 idev = NULL;
922 }
923
924 if (filter(ib_dev, port, idev, filter_cookie))
925 cb(ib_dev, port, idev, cookie);
926
927 if (idev)
928 dev_put(idev);
929 }
930}
931
932/**
933 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
934 * @filter: Should we call the callback?
935 * @filter_cookie: Cookie passed to filter
936 * @cb: Callback to call for each found RoCE ports
937 * @cookie: Cookie passed back to the callback
938 *
939 * Enumerates all RoCE devices' physical ports which are related
940 * to netdevices and calls callback() on each device for which
941 * filter() function returns non zero.
942 */
943void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
944 void *filter_cookie,
945 roce_netdev_callback cb,
946 void *cookie)
947{
948 struct ib_device *dev;
949
950 down_read(&lists_rwsem);
951 list_for_each_entry(dev, &device_list, core_list)
952 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
953 up_read(&lists_rwsem);
954}
955
956/**
Leon Romanovsky8030c832017-06-19 14:04:56 +0300957 * ib_enum_all_devs - enumerate all ib_devices
958 * @cb: Callback to call for each found ib_device
959 *
960 * Enumerates all ib_devices and calls callback() on each device.
961 */
962int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
963 struct netlink_callback *cb)
964{
965 struct ib_device *dev;
966 unsigned int idx = 0;
967 int ret = 0;
968
969 down_read(&lists_rwsem);
970 list_for_each_entry(dev, &device_list, core_list) {
971 ret = nldev_cb(dev, skb, cb, idx);
972 if (ret)
973 break;
974 idx++;
975 }
976
977 up_read(&lists_rwsem);
978 return ret;
979}
980
981/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 * ib_query_pkey - Get P_Key table entry
983 * @device:Device to query
984 * @port_num:Port number to query
985 * @index:P_Key table index to query
986 * @pkey:Returned P_Key
987 *
988 * ib_query_pkey() fetches the specified P_Key table entry.
989 */
990int ib_query_pkey(struct ib_device *device,
991 u8 port_num, u16 index, u16 *pkey)
992{
993 return device->query_pkey(device, port_num, index, pkey);
994}
995EXPORT_SYMBOL(ib_query_pkey);
996
997/**
998 * ib_modify_device - Change IB device attributes
999 * @device:Device to modify
1000 * @device_modify_mask:Mask of attributes to change
1001 * @device_modify:New attribute values
1002 *
1003 * ib_modify_device() changes a device's attributes as specified by
1004 * the @device_modify_mask and @device_modify structure.
1005 */
1006int ib_modify_device(struct ib_device *device,
1007 int device_modify_mask,
1008 struct ib_device_modify *device_modify)
1009{
Bart Van Assche10e1b542011-06-18 16:35:42 +00001010 if (!device->modify_device)
1011 return -ENOSYS;
1012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 return device->modify_device(device, device_modify_mask,
1014 device_modify);
1015}
1016EXPORT_SYMBOL(ib_modify_device);
1017
1018/**
1019 * ib_modify_port - Modifies the attributes for the specified port.
1020 * @device: The device to modify.
1021 * @port_num: The number of the port to modify.
1022 * @port_modify_mask: Mask used to specify which attributes of the port
1023 * to change.
1024 * @port_modify: New attribute values for the port.
1025 *
1026 * ib_modify_port() changes a port's attributes as specified by the
1027 * @port_modify_mask and @port_modify structure.
1028 */
1029int ib_modify_port(struct ib_device *device,
1030 u8 port_num, int port_modify_mask,
1031 struct ib_port_modify *port_modify)
1032{
Selvin Xavier61e09622017-08-23 01:08:07 -07001033 int rc;
Bart Van Assche10e1b542011-06-18 16:35:42 +00001034
Yuval Shaia24dc8312017-01-25 18:41:37 +02001035 if (!rdma_is_port_valid(device, port_num))
Roland Dreier116c0072005-10-03 09:32:33 -07001036 return -EINVAL;
1037
Selvin Xavier61e09622017-08-23 01:08:07 -07001038 if (device->modify_port)
1039 rc = device->modify_port(device, port_num, port_modify_mask,
1040 port_modify);
1041 else
1042 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
1043 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044}
1045EXPORT_SYMBOL(ib_modify_port);
1046
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001047/**
1048 * ib_find_gid - Returns the port number and GID table index where
Parav Panditdbb12562017-11-14 14:52:12 +02001049 * a specified GID value occurs. Its searches only for IB link layer.
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001050 * @device: The device to query.
1051 * @gid: The GID value to search for.
Matan Barak55ee3ab2015-10-15 18:38:45 +03001052 * @ndev: The ndev related to the GID to search for.
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001053 * @port_num: The port number of the device where the GID value was found.
1054 * @index: The index into the GID table where the GID was found. This
1055 * parameter may be NULL.
1056 */
1057int ib_find_gid(struct ib_device *device, union ib_gid *gid,
Parav Panditdbb12562017-11-14 14:52:12 +02001058 struct net_device *ndev, u8 *port_num, u16 *index)
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001059{
1060 union ib_gid tmp_gid;
1061 int ret, port, i;
1062
Ira Weiny0cf18d72015-05-13 20:02:55 -04001063 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
Parav Panditdbb12562017-11-14 14:52:12 +02001064 if (rdma_cap_roce_gid_table(device, port))
Matan Barakb39ffa12015-12-23 14:56:47 +02001065 continue;
1066
Ira Weiny77386132015-05-13 20:02:58 -04001067 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
Matan Barak55ee3ab2015-10-15 18:38:45 +03001068 ret = ib_query_gid(device, port, i, &tmp_gid, NULL);
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001069 if (ret)
1070 return ret;
1071 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
1072 *port_num = port;
1073 if (index)
1074 *index = i;
1075 return 0;
1076 }
1077 }
1078 }
1079
1080 return -ENOENT;
1081}
1082EXPORT_SYMBOL(ib_find_gid);
1083
1084/**
1085 * ib_find_pkey - Returns the PKey table index where a specified
1086 * PKey value occurs.
1087 * @device: The device to query.
1088 * @port_num: The port number of the device to search for the PKey.
1089 * @pkey: The PKey value to search for.
1090 * @index: The index into the PKey table where the PKey was found.
1091 */
1092int ib_find_pkey(struct ib_device *device,
1093 u8 port_num, u16 pkey, u16 *index)
1094{
1095 int ret, i;
1096 u16 tmp_pkey;
Jack Morgensteinff7166c2012-08-03 08:40:38 +00001097 int partial_ix = -1;
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001098
Ira Weiny77386132015-05-13 20:02:58 -04001099 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001100 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
1101 if (ret)
1102 return ret;
Moni Shoua36026ec2007-07-23 10:07:42 +03001103 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
Jack Morgensteinff7166c2012-08-03 08:40:38 +00001104 /* if there is full-member pkey take it.*/
1105 if (tmp_pkey & 0x8000) {
1106 *index = i;
1107 return 0;
1108 }
1109 if (partial_ix < 0)
1110 partial_ix = i;
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001111 }
1112 }
1113
Jack Morgensteinff7166c2012-08-03 08:40:38 +00001114 /*no full-member, if exists take the limited*/
1115 if (partial_ix >= 0) {
1116 *index = partial_ix;
1117 return 0;
1118 }
Yosef Etigin5eb620c2007-05-14 07:26:51 +03001119 return -ENOENT;
1120}
1121EXPORT_SYMBOL(ib_find_pkey);
1122
Yotam Kenneth9268f722015-07-30 17:50:15 +03001123/**
1124 * ib_get_net_dev_by_params() - Return the appropriate net_dev
1125 * for a received CM request
1126 * @dev: An RDMA device on which the request has been received.
1127 * @port: Port number on the RDMA device.
1128 * @pkey: The Pkey the request came on.
1129 * @gid: A GID that the net_dev uses to communicate.
1130 * @addr: Contains the IP address that the request specified as its
1131 * destination.
1132 */
1133struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
1134 u8 port,
1135 u16 pkey,
1136 const union ib_gid *gid,
1137 const struct sockaddr *addr)
1138{
1139 struct net_device *net_dev = NULL;
1140 struct ib_client_data *context;
1141
1142 if (!rdma_protocol_ib(dev, port))
1143 return NULL;
1144
1145 down_read(&lists_rwsem);
1146
1147 list_for_each_entry(context, &dev->client_data_list, list) {
1148 struct ib_client *client = context->client;
1149
1150 if (context->going_down)
1151 continue;
1152
1153 if (client->get_net_dev_by_params) {
1154 net_dev = client->get_net_dev_by_params(dev, port, pkey,
1155 gid, addr,
1156 context->data);
1157 if (net_dev)
1158 break;
1159 }
1160 }
1161
1162 up_read(&lists_rwsem);
1163
1164 return net_dev;
1165}
1166EXPORT_SYMBOL(ib_get_net_dev_by_params);
1167
Leon Romanovskyd0e312f2017-12-05 22:30:04 +02001168static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
Mark Bloch735c6312016-05-19 17:12:35 +03001169 [RDMA_NL_LS_OP_RESOLVE] = {
Leon Romanovsky647c75a2017-06-15 14:20:39 +03001170 .doit = ib_nl_handle_resolve_resp,
Leon Romanovskye3a2b932017-06-12 16:00:19 +03001171 .flags = RDMA_NL_ADMIN_PERM,
1172 },
Mark Bloch735c6312016-05-19 17:12:35 +03001173 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
Leon Romanovsky647c75a2017-06-15 14:20:39 +03001174 .doit = ib_nl_handle_set_timeout,
Leon Romanovskye3a2b932017-06-12 16:00:19 +03001175 .flags = RDMA_NL_ADMIN_PERM,
1176 },
Mark Blochae43f822016-05-19 17:12:36 +03001177 [RDMA_NL_LS_OP_IP_RESOLVE] = {
Leon Romanovsky647c75a2017-06-15 14:20:39 +03001178 .doit = ib_nl_handle_ip_res_resp,
Leon Romanovskye3a2b932017-06-12 16:00:19 +03001179 .flags = RDMA_NL_ADMIN_PERM,
1180 },
Mark Bloch735c6312016-05-19 17:12:35 +03001181};
1182
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183static int __init ib_core_init(void)
1184{
1185 int ret;
1186
Tejun Heof0626712010-10-19 15:24:36 +00001187 ib_wq = alloc_workqueue("infiniband", 0, 0);
1188 if (!ib_wq)
1189 return -ENOMEM;
1190
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001191 ib_comp_wq = alloc_workqueue("ib-comp-wq",
Sagi Grimbergb7363e62017-03-08 22:03:17 +02001192 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001193 if (!ib_comp_wq) {
1194 ret = -ENOMEM;
1195 goto err;
1196 }
1197
Jason Gunthorpe55aeed02015-08-04 15:23:34 -06001198 ret = class_register(&ib_class);
Nir Muchtarfd75c782011-05-20 11:46:10 -07001199 if (ret) {
Parav Panditaba25a3e2016-03-02 00:50:29 +05301200 pr_warn("Couldn't create InfiniBand device class\n");
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001201 goto err_comp;
Nir Muchtarfd75c782011-05-20 11:46:10 -07001202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Leon Romanovskyc9901722017-06-05 10:20:11 +03001204 ret = rdma_nl_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 if (ret) {
Leon Romanovskyc9901722017-06-05 10:20:11 +03001206 pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
Nir Muchtarfd75c782011-05-20 11:46:10 -07001207 goto err_sysfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 }
1209
Leon Romanovskye3f20f02016-05-19 17:12:31 +03001210 ret = addr_init();
1211 if (ret) {
1212 pr_warn("Could't init IB address resolution\n");
1213 goto err_ibnl;
1214 }
1215
Mark Bloch4c2cb422016-05-19 17:12:32 +03001216 ret = ib_mad_init();
1217 if (ret) {
1218 pr_warn("Couldn't init IB MAD\n");
1219 goto err_addr;
1220 }
1221
Mark Blochc2e49c92016-05-19 17:12:33 +03001222 ret = ib_sa_init();
1223 if (ret) {
1224 pr_warn("Couldn't init SA\n");
1225 goto err_mad;
1226 }
1227
Daniel Jurgens8f408ab2017-05-19 15:48:53 +03001228 ret = register_lsm_notifier(&ibdev_lsm_nb);
1229 if (ret) {
1230 pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
Leon Romanovskyc9901722017-06-05 10:20:11 +03001231 goto err_sa;
Daniel Jurgens8f408ab2017-05-19 15:48:53 +03001232 }
1233
Leon Romanovsky6c80b412017-06-20 09:14:15 +03001234 nldev_init();
Leon Romanovskyc9901722017-06-05 10:20:11 +03001235 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
Matan Barak03db3a22015-07-30 18:33:26 +03001236 ib_cache_setup();
Roland Dreierb2cbae22011-05-20 11:46:11 -07001237
Nir Muchtarfd75c782011-05-20 11:46:10 -07001238 return 0;
1239
Mark Bloch735c6312016-05-19 17:12:35 +03001240err_sa:
1241 ib_sa_cleanup();
Mark Blochc2e49c92016-05-19 17:12:33 +03001242err_mad:
1243 ib_mad_cleanup();
Mark Bloch4c2cb422016-05-19 17:12:32 +03001244err_addr:
1245 addr_cleanup();
Leon Romanovskye3f20f02016-05-19 17:12:31 +03001246err_ibnl:
Leon Romanovskyc9901722017-06-05 10:20:11 +03001247 rdma_nl_exit();
Nir Muchtarfd75c782011-05-20 11:46:10 -07001248err_sysfs:
Jason Gunthorpe55aeed02015-08-04 15:23:34 -06001249 class_unregister(&ib_class);
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001250err_comp:
1251 destroy_workqueue(ib_comp_wq);
Nir Muchtarfd75c782011-05-20 11:46:10 -07001252err:
1253 destroy_workqueue(ib_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 return ret;
1255}
1256
1257static void __exit ib_core_cleanup(void)
1258{
1259 ib_cache_cleanup();
Leon Romanovsky6c80b412017-06-20 09:14:15 +03001260 nldev_exit();
Leon Romanovskyc9901722017-06-05 10:20:11 +03001261 rdma_nl_unregister(RDMA_NL_LS);
1262 unregister_lsm_notifier(&ibdev_lsm_nb);
Mark Blochc2e49c92016-05-19 17:12:33 +03001263 ib_sa_cleanup();
Mark Bloch4c2cb422016-05-19 17:12:32 +03001264 ib_mad_cleanup();
Leon Romanovskye3f20f02016-05-19 17:12:31 +03001265 addr_cleanup();
Leon Romanovskyc9901722017-06-05 10:20:11 +03001266 rdma_nl_exit();
Jason Gunthorpe55aeed02015-08-04 15:23:34 -06001267 class_unregister(&ib_class);
Christoph Hellwig14d3a3b2015-12-11 11:53:03 -08001268 destroy_workqueue(ib_comp_wq);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -08001269 /* Make sure that any pending umem accounting work is done. */
Tejun Heof0626712010-10-19 15:24:36 +00001270 destroy_workqueue(ib_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271}
1272
Jason Gunthorpee3bf14b2017-08-14 14:57:39 -06001273MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
1274
Dmitry Monakhova9cd1a62017-11-27 13:39:05 +00001275subsys_initcall(ib_core_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276module_exit(ib_core_cleanup);