blob: 0f16dd4d9bb45f34dce3d55976552913c40229ec [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07003 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 */
33
34#include <linux/module.h>
35#include <linux/string.h>
36#include <linux/errno.h>
Ahmed S. Darwish9a6b0902007-02-06 18:07:25 +020037#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/slab.h>
39#include <linux/init.h>
Ingo Molnar95ed6442006-01-13 14:51:39 -080040#include <linux/mutex.h>
Roland Dreierb2cbae22011-05-20 11:46:11 -070041#include <rdma/rdma_netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43#include "core_priv.h"
44
45MODULE_AUTHOR("Roland Dreier");
46MODULE_DESCRIPTION("core kernel InfiniBand API");
47MODULE_LICENSE("Dual BSD/GPL");
48
49struct ib_client_data {
50 struct list_head list;
51 struct ib_client *client;
52 void * data;
53};
54
Tejun Heof0626712010-10-19 15:24:36 +000055struct workqueue_struct *ib_wq;
56EXPORT_SYMBOL_GPL(ib_wq);
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058static LIST_HEAD(device_list);
59static LIST_HEAD(client_list);
60
61/*
Ingo Molnar95ed6442006-01-13 14:51:39 -080062 * device_mutex protects access to both device_list and client_list.
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 * There's no real point to using multiple locks or something fancier
64 * like an rwsem: we always access both lists, and we're always
65 * modifying one list or the other list. In any case this is not a
66 * hot path so there's no point in trying to optimize.
67 */
Ingo Molnar95ed6442006-01-13 14:51:39 -080068static DEFINE_MUTEX(device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70static int ib_device_check_mandatory(struct ib_device *device)
71{
72#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
73 static const struct {
74 size_t offset;
75 char *name;
76 } mandatory_table[] = {
77 IB_MANDATORY_FUNC(query_device),
78 IB_MANDATORY_FUNC(query_port),
Michael Wang6b90a6d2015-05-05 14:50:18 +020079 IB_MANDATORY_FUNC(query_protocol),
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 IB_MANDATORY_FUNC(query_pkey),
81 IB_MANDATORY_FUNC(query_gid),
82 IB_MANDATORY_FUNC(alloc_pd),
83 IB_MANDATORY_FUNC(dealloc_pd),
84 IB_MANDATORY_FUNC(create_ah),
85 IB_MANDATORY_FUNC(destroy_ah),
86 IB_MANDATORY_FUNC(create_qp),
87 IB_MANDATORY_FUNC(modify_qp),
88 IB_MANDATORY_FUNC(destroy_qp),
89 IB_MANDATORY_FUNC(post_send),
90 IB_MANDATORY_FUNC(post_recv),
91 IB_MANDATORY_FUNC(create_cq),
92 IB_MANDATORY_FUNC(destroy_cq),
93 IB_MANDATORY_FUNC(poll_cq),
94 IB_MANDATORY_FUNC(req_notify_cq),
95 IB_MANDATORY_FUNC(get_dma_mr),
96 IB_MANDATORY_FUNC(dereg_mr)
97 };
98 int i;
99
Ahmed S. Darwish9a6b0902007-02-06 18:07:25 +0200100 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
102 printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
103 device->name, mandatory_table[i].name);
104 return -EINVAL;
105 }
106 }
107
108 return 0;
109}
110
111static struct ib_device *__ib_device_get_by_name(const char *name)
112{
113 struct ib_device *device;
114
115 list_for_each_entry(device, &device_list, core_list)
116 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
117 return device;
118
119 return NULL;
120}
121
122
123static int alloc_name(char *name)
124{
Roland Dreier65d470b2007-10-09 19:59:04 -0700125 unsigned long *inuse;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 char buf[IB_DEVICE_NAME_MAX];
127 struct ib_device *device;
128 int i;
129
Roland Dreier65d470b2007-10-09 19:59:04 -0700130 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 if (!inuse)
132 return -ENOMEM;
133
134 list_for_each_entry(device, &device_list, core_list) {
135 if (!sscanf(device->name, name, &i))
136 continue;
137 if (i < 0 || i >= PAGE_SIZE * 8)
138 continue;
139 snprintf(buf, sizeof buf, name, i);
140 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
141 set_bit(i, inuse);
142 }
143
144 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
145 free_page((unsigned long) inuse);
146 snprintf(buf, sizeof buf, name, i);
147
148 if (__ib_device_get_by_name(buf))
149 return -ENFILE;
150
151 strlcpy(name, buf, IB_DEVICE_NAME_MAX);
152 return 0;
153}
154
155/**
156 * ib_alloc_device - allocate an IB device struct
157 * @size:size of structure to allocate
158 *
159 * Low-level drivers should use ib_alloc_device() to allocate &struct
160 * ib_device. @size is the size of the structure to be allocated,
161 * including any private data used by the low-level driver.
162 * ib_dealloc_device() must be used to free structures allocated with
163 * ib_alloc_device().
164 */
165struct ib_device *ib_alloc_device(size_t size)
166{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 BUG_ON(size < sizeof (struct ib_device));
168
Roland Dreierde6eb662005-11-02 07:23:14 -0800169 return kzalloc(size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170}
171EXPORT_SYMBOL(ib_alloc_device);
172
173/**
174 * ib_dealloc_device - free an IB device struct
175 * @device:structure to free
176 *
177 * Free a structure allocated with ib_alloc_device().
178 */
179void ib_dealloc_device(struct ib_device *device)
180{
181 if (device->reg_state == IB_DEV_UNINITIALIZED) {
182 kfree(device);
183 return;
184 }
185
186 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
187
Roland Dreier9206dff2009-02-25 13:27:46 -0800188 kobject_put(&device->dev.kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190EXPORT_SYMBOL(ib_dealloc_device);
191
192static int add_client_context(struct ib_device *device, struct ib_client *client)
193{
194 struct ib_client_data *context;
195 unsigned long flags;
196
197 context = kmalloc(sizeof *context, GFP_KERNEL);
198 if (!context) {
199 printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
200 device->name, client->name);
201 return -ENOMEM;
202 }
203
204 context->client = client;
205 context->data = NULL;
206
207 spin_lock_irqsave(&device->client_data_lock, flags);
208 list_add(&context->list, &device->client_data_list);
209 spin_unlock_irqrestore(&device->client_data_lock, flags);
210
211 return 0;
212}
213
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300214static int read_port_table_lengths(struct ib_device *device)
215{
216 struct ib_port_attr *tprops = NULL;
217 int num_ports, ret = -ENOMEM;
218 u8 port_index;
219
220 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
221 if (!tprops)
222 goto out;
223
Ira Weiny0cf18d72015-05-13 20:02:55 -0400224 num_ports = rdma_end_port(device) - rdma_start_port(device) + 1;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300225
226 device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports,
227 GFP_KERNEL);
228 device->gid_tbl_len = kmalloc(sizeof *device->gid_tbl_len * num_ports,
229 GFP_KERNEL);
230 if (!device->pkey_tbl_len || !device->gid_tbl_len)
231 goto err;
232
233 for (port_index = 0; port_index < num_ports; ++port_index) {
Ira Weiny0cf18d72015-05-13 20:02:55 -0400234 ret = ib_query_port(device, port_index + rdma_start_port(device),
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300235 tprops);
236 if (ret)
237 goto err;
238 device->pkey_tbl_len[port_index] = tprops->pkey_tbl_len;
239 device->gid_tbl_len[port_index] = tprops->gid_tbl_len;
240 }
241
242 ret = 0;
243 goto out;
244
245err:
246 kfree(device->gid_tbl_len);
247 kfree(device->pkey_tbl_len);
248out:
249 kfree(tprops);
250 return ret;
251}
252
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253/**
254 * ib_register_device - Register an IB device with IB core
255 * @device:Device to register
256 *
257 * Low-level drivers use ib_register_device() to register their
258 * devices with the IB core. All registered clients will receive a
259 * callback for each device that is added. @device must be allocated
260 * with ib_alloc_device().
261 */
Ralph Campbell9a6edb62010-05-06 17:03:25 -0700262int ib_register_device(struct ib_device *device,
263 int (*port_callback)(struct ib_device *,
264 u8, struct kobject *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
266 int ret;
267
Ingo Molnar95ed6442006-01-13 14:51:39 -0800268 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
270 if (strchr(device->name, '%')) {
271 ret = alloc_name(device->name);
272 if (ret)
273 goto out;
274 }
275
276 if (ib_device_check_mandatory(device)) {
277 ret = -EINVAL;
278 goto out;
279 }
280
281 INIT_LIST_HEAD(&device->event_handler_list);
282 INIT_LIST_HEAD(&device->client_data_list);
283 spin_lock_init(&device->event_handler_lock);
284 spin_lock_init(&device->client_data_lock);
285
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300286 ret = read_port_table_lengths(device);
287 if (ret) {
288 printk(KERN_WARNING "Couldn't create table lengths cache for device %s\n",
289 device->name);
290 goto out;
291 }
292
Ralph Campbell9a6edb62010-05-06 17:03:25 -0700293 ret = ib_device_register_sysfs(device, port_callback);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if (ret) {
295 printk(KERN_WARNING "Couldn't register device %s with driver model\n",
296 device->name);
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300297 kfree(device->gid_tbl_len);
298 kfree(device->pkey_tbl_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 goto out;
300 }
301
302 list_add_tail(&device->core_list, &device_list);
303
304 device->reg_state = IB_DEV_REGISTERED;
305
306 {
307 struct ib_client *client;
308
309 list_for_each_entry(client, &client_list, list)
310 if (client->add && !add_client_context(device, client))
311 client->add(device);
312 }
313
314 out:
Ingo Molnar95ed6442006-01-13 14:51:39 -0800315 mutex_unlock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 return ret;
317}
318EXPORT_SYMBOL(ib_register_device);
319
320/**
321 * ib_unregister_device - Unregister an IB device
322 * @device:Device to unregister
323 *
324 * Unregister an IB device. All clients will receive a remove callback.
325 */
326void ib_unregister_device(struct ib_device *device)
327{
328 struct ib_client *client;
329 struct ib_client_data *context, *tmp;
330 unsigned long flags;
331
Ingo Molnar95ed6442006-01-13 14:51:39 -0800332 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334 list_for_each_entry_reverse(client, &client_list, list)
335 if (client->remove)
336 client->remove(device);
337
338 list_del(&device->core_list);
339
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300340 kfree(device->gid_tbl_len);
341 kfree(device->pkey_tbl_len);
342
Ingo Molnar95ed6442006-01-13 14:51:39 -0800343 mutex_unlock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Roland Dreier9206dff2009-02-25 13:27:46 -0800345 ib_device_unregister_sysfs(device);
346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 spin_lock_irqsave(&device->client_data_lock, flags);
348 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
349 kfree(context);
350 spin_unlock_irqrestore(&device->client_data_lock, flags);
351
352 device->reg_state = IB_DEV_UNREGISTERED;
353}
354EXPORT_SYMBOL(ib_unregister_device);
355
356/**
357 * ib_register_client - Register an IB client
358 * @client:Client to register
359 *
360 * Upper level users of the IB drivers can use ib_register_client() to
361 * register callbacks for IB device addition and removal. When an IB
362 * device is added, each registered client's add method will be called
363 * (in the order the clients were registered), and when a device is
364 * removed, each client's remove method will be called (in the reverse
365 * order that clients were registered). In addition, when
366 * ib_register_client() is called, the client will receive an add
367 * callback for all devices already registered.
368 */
369int ib_register_client(struct ib_client *client)
370{
371 struct ib_device *device;
372
Ingo Molnar95ed6442006-01-13 14:51:39 -0800373 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 list_add_tail(&client->list, &client_list);
376 list_for_each_entry(device, &device_list, core_list)
377 if (client->add && !add_client_context(device, client))
378 client->add(device);
379
Ingo Molnar95ed6442006-01-13 14:51:39 -0800380 mutex_unlock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
382 return 0;
383}
384EXPORT_SYMBOL(ib_register_client);
385
386/**
387 * ib_unregister_client - Unregister an IB client
388 * @client:Client to unregister
389 *
390 * Upper level users use ib_unregister_client() to remove their client
391 * registration. When ib_unregister_client() is called, the client
392 * will receive a remove callback for each IB device still registered.
393 */
394void ib_unregister_client(struct ib_client *client)
395{
396 struct ib_client_data *context, *tmp;
397 struct ib_device *device;
398 unsigned long flags;
399
Ingo Molnar95ed6442006-01-13 14:51:39 -0800400 mutex_lock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
402 list_for_each_entry(device, &device_list, core_list) {
403 if (client->remove)
404 client->remove(device);
405
406 spin_lock_irqsave(&device->client_data_lock, flags);
407 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
408 if (context->client == client) {
409 list_del(&context->list);
410 kfree(context);
411 }
412 spin_unlock_irqrestore(&device->client_data_lock, flags);
413 }
414 list_del(&client->list);
415
Ingo Molnar95ed6442006-01-13 14:51:39 -0800416 mutex_unlock(&device_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417}
418EXPORT_SYMBOL(ib_unregister_client);
419
420/**
421 * ib_get_client_data - Get IB client context
422 * @device:Device to get context for
423 * @client:Client to get context for
424 *
425 * ib_get_client_data() returns client context set with
426 * ib_set_client_data().
427 */
428void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
429{
430 struct ib_client_data *context;
431 void *ret = NULL;
432 unsigned long flags;
433
434 spin_lock_irqsave(&device->client_data_lock, flags);
435 list_for_each_entry(context, &device->client_data_list, list)
436 if (context->client == client) {
437 ret = context->data;
438 break;
439 }
440 spin_unlock_irqrestore(&device->client_data_lock, flags);
441
442 return ret;
443}
444EXPORT_SYMBOL(ib_get_client_data);
445
446/**
Krishna Kumar9cd330d2006-09-22 15:22:58 -0700447 * ib_set_client_data - Set IB client context
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 * @device:Device to set context for
449 * @client:Client to set context for
450 * @data:Context to set
451 *
452 * ib_set_client_data() sets client context that can be retrieved with
453 * ib_get_client_data().
454 */
455void ib_set_client_data(struct ib_device *device, struct ib_client *client,
456 void *data)
457{
458 struct ib_client_data *context;
459 unsigned long flags;
460
461 spin_lock_irqsave(&device->client_data_lock, flags);
462 list_for_each_entry(context, &device->client_data_list, list)
463 if (context->client == client) {
464 context->data = data;
465 goto out;
466 }
467
468 printk(KERN_WARNING "No client context found for %s/%s\n",
469 device->name, client->name);
470
471out:
472 spin_unlock_irqrestore(&device->client_data_lock, flags);
473}
474EXPORT_SYMBOL(ib_set_client_data);
475
476/**
477 * ib_register_event_handler - Register an IB event handler
478 * @event_handler:Handler to register
479 *
480 * ib_register_event_handler() registers an event handler that will be
481 * called back when asynchronous IB events occur (as defined in
482 * chapter 11 of the InfiniBand Architecture Specification). This
483 * callback may occur in interrupt context.
484 */
485int ib_register_event_handler (struct ib_event_handler *event_handler)
486{
487 unsigned long flags;
488
489 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
490 list_add_tail(&event_handler->list,
491 &event_handler->device->event_handler_list);
492 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
493
494 return 0;
495}
496EXPORT_SYMBOL(ib_register_event_handler);
497
498/**
499 * ib_unregister_event_handler - Unregister an event handler
500 * @event_handler:Handler to unregister
501 *
502 * Unregister an event handler registered with
503 * ib_register_event_handler().
504 */
505int ib_unregister_event_handler(struct ib_event_handler *event_handler)
506{
507 unsigned long flags;
508
509 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
510 list_del(&event_handler->list);
511 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
512
513 return 0;
514}
515EXPORT_SYMBOL(ib_unregister_event_handler);
516
517/**
518 * ib_dispatch_event - Dispatch an asynchronous event
519 * @event:Event to dispatch
520 *
521 * Low-level drivers must call ib_dispatch_event() to dispatch the
522 * event to all registered event handlers when an asynchronous event
523 * occurs.
524 */
525void ib_dispatch_event(struct ib_event *event)
526{
527 unsigned long flags;
528 struct ib_event_handler *handler;
529
530 spin_lock_irqsave(&event->device->event_handler_lock, flags);
531
532 list_for_each_entry(handler, &event->device->event_handler_list, list)
533 handler->handler(handler, event);
534
535 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
536}
537EXPORT_SYMBOL(ib_dispatch_event);
538
539/**
540 * ib_query_device - Query IB device attributes
541 * @device:Device to query
542 * @device_attr:Device attributes
543 *
544 * ib_query_device() returns the attributes of a device through the
545 * @device_attr pointer.
546 */
547int ib_query_device(struct ib_device *device,
548 struct ib_device_attr *device_attr)
549{
550 return device->query_device(device, device_attr);
551}
552EXPORT_SYMBOL(ib_query_device);
553
554/**
555 * ib_query_port - Query IB port attributes
556 * @device:Device to query
557 * @port_num:Port number to query
558 * @port_attr:Port attributes
559 *
560 * ib_query_port() returns the attributes of a port through the
561 * @port_attr pointer.
562 */
563int ib_query_port(struct ib_device *device,
564 u8 port_num,
565 struct ib_port_attr *port_attr)
566{
Ira Weiny0cf18d72015-05-13 20:02:55 -0400567 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
Roland Dreier116c0072005-10-03 09:32:33 -0700568 return -EINVAL;
569
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 return device->query_port(device, port_num, port_attr);
571}
572EXPORT_SYMBOL(ib_query_port);
573
574/**
575 * ib_query_gid - Get GID table entry
576 * @device:Device to query
577 * @port_num:Port number to query
578 * @index:GID table index to query
579 * @gid:Returned GID
580 *
581 * ib_query_gid() fetches the specified GID table entry.
582 */
583int ib_query_gid(struct ib_device *device,
584 u8 port_num, int index, union ib_gid *gid)
585{
586 return device->query_gid(device, port_num, index, gid);
587}
588EXPORT_SYMBOL(ib_query_gid);
589
590/**
591 * ib_query_pkey - Get P_Key table entry
592 * @device:Device to query
593 * @port_num:Port number to query
594 * @index:P_Key table index to query
595 * @pkey:Returned P_Key
596 *
597 * ib_query_pkey() fetches the specified P_Key table entry.
598 */
599int ib_query_pkey(struct ib_device *device,
600 u8 port_num, u16 index, u16 *pkey)
601{
602 return device->query_pkey(device, port_num, index, pkey);
603}
604EXPORT_SYMBOL(ib_query_pkey);
605
606/**
607 * ib_modify_device - Change IB device attributes
608 * @device:Device to modify
609 * @device_modify_mask:Mask of attributes to change
610 * @device_modify:New attribute values
611 *
612 * ib_modify_device() changes a device's attributes as specified by
613 * the @device_modify_mask and @device_modify structure.
614 */
615int ib_modify_device(struct ib_device *device,
616 int device_modify_mask,
617 struct ib_device_modify *device_modify)
618{
Bart Van Assche10e1b542011-06-18 16:35:42 +0000619 if (!device->modify_device)
620 return -ENOSYS;
621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 return device->modify_device(device, device_modify_mask,
623 device_modify);
624}
625EXPORT_SYMBOL(ib_modify_device);
626
627/**
628 * ib_modify_port - Modifies the attributes for the specified port.
629 * @device: The device to modify.
630 * @port_num: The number of the port to modify.
631 * @port_modify_mask: Mask used to specify which attributes of the port
632 * to change.
633 * @port_modify: New attribute values for the port.
634 *
635 * ib_modify_port() changes a port's attributes as specified by the
636 * @port_modify_mask and @port_modify structure.
637 */
638int ib_modify_port(struct ib_device *device,
639 u8 port_num, int port_modify_mask,
640 struct ib_port_modify *port_modify)
641{
Bart Van Assche10e1b542011-06-18 16:35:42 +0000642 if (!device->modify_port)
643 return -ENOSYS;
644
Ira Weiny0cf18d72015-05-13 20:02:55 -0400645 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
Roland Dreier116c0072005-10-03 09:32:33 -0700646 return -EINVAL;
647
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 return device->modify_port(device, port_num, port_modify_mask,
649 port_modify);
650}
651EXPORT_SYMBOL(ib_modify_port);
652
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300653/**
654 * ib_find_gid - Returns the port number and GID table index where
655 * a specified GID value occurs.
656 * @device: The device to query.
657 * @gid: The GID value to search for.
658 * @port_num: The port number of the device where the GID value was found.
659 * @index: The index into the GID table where the GID was found. This
660 * parameter may be NULL.
661 */
662int ib_find_gid(struct ib_device *device, union ib_gid *gid,
663 u8 *port_num, u16 *index)
664{
665 union ib_gid tmp_gid;
666 int ret, port, i;
667
Ira Weiny0cf18d72015-05-13 20:02:55 -0400668 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
669 for (i = 0; i < device->gid_tbl_len[port - rdma_start_port(device)]; ++i) {
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300670 ret = ib_query_gid(device, port, i, &tmp_gid);
671 if (ret)
672 return ret;
673 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
674 *port_num = port;
675 if (index)
676 *index = i;
677 return 0;
678 }
679 }
680 }
681
682 return -ENOENT;
683}
684EXPORT_SYMBOL(ib_find_gid);
685
686/**
687 * ib_find_pkey - Returns the PKey table index where a specified
688 * PKey value occurs.
689 * @device: The device to query.
690 * @port_num: The port number of the device to search for the PKey.
691 * @pkey: The PKey value to search for.
692 * @index: The index into the PKey table where the PKey was found.
693 */
694int ib_find_pkey(struct ib_device *device,
695 u8 port_num, u16 pkey, u16 *index)
696{
697 int ret, i;
698 u16 tmp_pkey;
Jack Morgensteinff7166c2012-08-03 08:40:38 +0000699 int partial_ix = -1;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300700
Ira Weiny0cf18d72015-05-13 20:02:55 -0400701 for (i = 0; i < device->pkey_tbl_len[port_num - rdma_start_port(device)]; ++i) {
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300702 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
703 if (ret)
704 return ret;
Moni Shoua36026ec2007-07-23 10:07:42 +0300705 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
Jack Morgensteinff7166c2012-08-03 08:40:38 +0000706 /* if there is full-member pkey take it.*/
707 if (tmp_pkey & 0x8000) {
708 *index = i;
709 return 0;
710 }
711 if (partial_ix < 0)
712 partial_ix = i;
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300713 }
714 }
715
Jack Morgensteinff7166c2012-08-03 08:40:38 +0000716 /*no full-member, if exists take the limited*/
717 if (partial_ix >= 0) {
718 *index = partial_ix;
719 return 0;
720 }
Yosef Etigin5eb620c2007-05-14 07:26:51 +0300721 return -ENOENT;
722}
723EXPORT_SYMBOL(ib_find_pkey);
724
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725static int __init ib_core_init(void)
726{
727 int ret;
728
Tejun Heof0626712010-10-19 15:24:36 +0000729 ib_wq = alloc_workqueue("infiniband", 0, 0);
730 if (!ib_wq)
731 return -ENOMEM;
732
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 ret = ib_sysfs_setup();
Nir Muchtarfd75c782011-05-20 11:46:10 -0700734 if (ret) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
Nir Muchtarfd75c782011-05-20 11:46:10 -0700736 goto err;
737 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
Roland Dreierb2cbae22011-05-20 11:46:11 -0700739 ret = ibnl_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 if (ret) {
Roland Dreierb2cbae22011-05-20 11:46:11 -0700741 printk(KERN_WARNING "Couldn't init IB netlink interface\n");
Nir Muchtarfd75c782011-05-20 11:46:10 -0700742 goto err_sysfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 }
744
Roland Dreierb2cbae22011-05-20 11:46:11 -0700745 ret = ib_cache_setup();
746 if (ret) {
747 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
748 goto err_nl;
749 }
750
Nir Muchtarfd75c782011-05-20 11:46:10 -0700751 return 0;
752
Roland Dreierb2cbae22011-05-20 11:46:11 -0700753err_nl:
754 ibnl_cleanup();
755
Nir Muchtarfd75c782011-05-20 11:46:10 -0700756err_sysfs:
757 ib_sysfs_cleanup();
758
759err:
760 destroy_workqueue(ib_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 return ret;
762}
763
764static void __exit ib_core_cleanup(void)
765{
766 ib_cache_cleanup();
Roland Dreierb2cbae22011-05-20 11:46:11 -0700767 ibnl_cleanup();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 ib_sysfs_cleanup();
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800769 /* Make sure that any pending umem accounting work is done. */
Tejun Heof0626712010-10-19 15:24:36 +0000770 destroy_workqueue(ib_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771}
772
773module_init(ib_core_init);
774module_exit(ib_core_cleanup);