1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/mutex.h>
41 
42 #include "core_priv.h"
43 
44 MODULE_AUTHOR("Roland Dreier");
45 MODULE_DESCRIPTION("core kernel InfiniBand API");
46 MODULE_LICENSE("Dual BSD/GPL");
47 
48 struct ib_client_data {
49 	struct list_head  list;
50 	struct ib_client *client;
51 	void *            data;
52 };
53 
54 struct workqueue_struct *ib_wq;
55 EXPORT_SYMBOL_GPL(ib_wq);
56 
57 static LIST_HEAD(device_list);
58 static LIST_HEAD(client_list);
59 
60 /*
61  * device_mutex protects access to both device_list and client_list.
62  * There's no real point to using multiple locks or something fancier
63  * like an rwsem: we always access both lists, and we're always
64  * modifying one list or the other list.  In any case this is not a
65  * hot path so there's no point in trying to optimize.
66  */
67 static DEFINE_MUTEX(device_mutex);
68 
ib_device_check_mandatory(struct ib_device * device)69 static int ib_device_check_mandatory(struct ib_device *device)
70 {
71 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
72 	static const struct {
73 		size_t offset;
74 		char  *name;
75 	} mandatory_table[] = {
76 		IB_MANDATORY_FUNC(query_device),
77 		IB_MANDATORY_FUNC(query_port),
78 		IB_MANDATORY_FUNC(query_pkey),
79 		IB_MANDATORY_FUNC(query_gid),
80 		IB_MANDATORY_FUNC(alloc_pd),
81 		IB_MANDATORY_FUNC(dealloc_pd),
82 		IB_MANDATORY_FUNC(create_ah),
83 		IB_MANDATORY_FUNC(destroy_ah),
84 		IB_MANDATORY_FUNC(create_qp),
85 		IB_MANDATORY_FUNC(modify_qp),
86 		IB_MANDATORY_FUNC(destroy_qp),
87 		IB_MANDATORY_FUNC(post_send),
88 		IB_MANDATORY_FUNC(post_recv),
89 		IB_MANDATORY_FUNC(create_cq),
90 		IB_MANDATORY_FUNC(destroy_cq),
91 		IB_MANDATORY_FUNC(poll_cq),
92 		IB_MANDATORY_FUNC(req_notify_cq),
93 		IB_MANDATORY_FUNC(get_dma_mr),
94 		IB_MANDATORY_FUNC(dereg_mr)
95 	};
96 	int i;
97 
98 	for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
99 		if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
100 			printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
101 			       device->name, mandatory_table[i].name);
102 			return -EINVAL;
103 		}
104 	}
105 
106 	return 0;
107 }
108 
__ib_device_get_by_name(const char * name)109 static struct ib_device *__ib_device_get_by_name(const char *name)
110 {
111 	struct ib_device *device;
112 
113 	list_for_each_entry(device, &device_list, core_list)
114 		if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
115 			return device;
116 
117 	return NULL;
118 }
119 
120 
alloc_name(char * name)121 static int alloc_name(char *name)
122 {
123 	unsigned long *inuse;
124 	char buf[IB_DEVICE_NAME_MAX];
125 	struct ib_device *device;
126 	int i;
127 
128 	inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
129 	if (!inuse)
130 		return -ENOMEM;
131 
132 	list_for_each_entry(device, &device_list, core_list) {
133 		if (!sscanf(device->name, name, &i))
134 			continue;
135 		if (i < 0 || i >= PAGE_SIZE * 8)
136 			continue;
137 		snprintf(buf, sizeof buf, name, i);
138 		if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
139 			set_bit(i, inuse);
140 	}
141 
142 	i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
143 	free_page((unsigned long) inuse);
144 	snprintf(buf, sizeof buf, name, i);
145 
146 	if (__ib_device_get_by_name(buf))
147 		return -ENFILE;
148 
149 	strlcpy(name, buf, IB_DEVICE_NAME_MAX);
150 	return 0;
151 }
152 
start_port(struct ib_device * device)153 static int start_port(struct ib_device *device)
154 {
155 	return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
156 }
157 
158 
end_port(struct ib_device * device)159 static int end_port(struct ib_device *device)
160 {
161 	return (device->node_type == RDMA_NODE_IB_SWITCH) ?
162 		0 : device->phys_port_cnt;
163 }
164 
165 /**
166  * ib_alloc_device - allocate an IB device struct
167  * @size:size of structure to allocate
168  *
169  * Low-level drivers should use ib_alloc_device() to allocate &struct
170  * ib_device.  @size is the size of the structure to be allocated,
171  * including any private data used by the low-level driver.
172  * ib_dealloc_device() must be used to free structures allocated with
173  * ib_alloc_device().
174  */
ib_alloc_device(size_t size)175 struct ib_device *ib_alloc_device(size_t size)
176 {
177 	BUG_ON(size < sizeof (struct ib_device));
178 
179 	return kzalloc(size, GFP_KERNEL);
180 }
181 EXPORT_SYMBOL(ib_alloc_device);
182 
183 /**
184  * ib_dealloc_device - free an IB device struct
185  * @device:structure to free
186  *
187  * Free a structure allocated with ib_alloc_device().
188  */
ib_dealloc_device(struct ib_device * device)189 void ib_dealloc_device(struct ib_device *device)
190 {
191 	if (device->reg_state == IB_DEV_UNINITIALIZED) {
192 		kfree(device);
193 		return;
194 	}
195 
196 	BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
197 
198 	kobject_put(&device->dev.kobj);
199 }
200 EXPORT_SYMBOL(ib_dealloc_device);
201 
add_client_context(struct ib_device * device,struct ib_client * client)202 static int add_client_context(struct ib_device *device, struct ib_client *client)
203 {
204 	struct ib_client_data *context;
205 	unsigned long flags;
206 
207 	context = kmalloc(sizeof *context, GFP_KERNEL);
208 	if (!context) {
209 		printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
210 		       device->name, client->name);
211 		return -ENOMEM;
212 	}
213 
214 	context->client = client;
215 	context->data   = NULL;
216 
217 	spin_lock_irqsave(&device->client_data_lock, flags);
218 	list_add(&context->list, &device->client_data_list);
219 	spin_unlock_irqrestore(&device->client_data_lock, flags);
220 
221 	return 0;
222 }
223 
read_port_table_lengths(struct ib_device * device)224 static int read_port_table_lengths(struct ib_device *device)
225 {
226 	struct ib_port_attr *tprops = NULL;
227 	int num_ports, ret = -ENOMEM;
228 	u8 port_index;
229 
230 	tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
231 	if (!tprops)
232 		goto out;
233 
234 	num_ports = end_port(device) - start_port(device) + 1;
235 
236 	device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports,
237 				       GFP_KERNEL);
238 	device->gid_tbl_len = kmalloc(sizeof *device->gid_tbl_len * num_ports,
239 				      GFP_KERNEL);
240 	if (!device->pkey_tbl_len || !device->gid_tbl_len)
241 		goto err;
242 
243 	for (port_index = 0; port_index < num_ports; ++port_index) {
244 		ret = ib_query_port(device, port_index + start_port(device),
245 					tprops);
246 		if (ret)
247 			goto err;
248 		device->pkey_tbl_len[port_index] = tprops->pkey_tbl_len;
249 		device->gid_tbl_len[port_index]  = tprops->gid_tbl_len;
250 	}
251 
252 	ret = 0;
253 	goto out;
254 
255 err:
256 	kfree(device->gid_tbl_len);
257 	kfree(device->pkey_tbl_len);
258 out:
259 	kfree(tprops);
260 	return ret;
261 }
262 
263 /**
264  * ib_register_device - Register an IB device with IB core
265  * @device:Device to register
266  *
267  * Low-level drivers use ib_register_device() to register their
268  * devices with the IB core.  All registered clients will receive a
269  * callback for each device that is added. @device must be allocated
270  * with ib_alloc_device().
271  */
ib_register_device(struct ib_device * device,int (* port_callback)(struct ib_device *,u8,struct kobject *))272 int ib_register_device(struct ib_device *device,
273 		       int (*port_callback)(struct ib_device *,
274 					    u8, struct kobject *))
275 {
276 	int ret;
277 
278 	mutex_lock(&device_mutex);
279 
280 	if (strchr(device->name, '%')) {
281 		ret = alloc_name(device->name);
282 		if (ret)
283 			goto out;
284 	}
285 
286 	if (ib_device_check_mandatory(device)) {
287 		ret = -EINVAL;
288 		goto out;
289 	}
290 
291 	INIT_LIST_HEAD(&device->event_handler_list);
292 	INIT_LIST_HEAD(&device->client_data_list);
293 	spin_lock_init(&device->event_handler_lock);
294 	spin_lock_init(&device->client_data_lock);
295 
296 	ret = read_port_table_lengths(device);
297 	if (ret) {
298 		printk(KERN_WARNING "Couldn't create table lengths cache for device %s\n",
299 		       device->name);
300 		goto out;
301 	}
302 
303 	ret = ib_device_register_sysfs(device, port_callback);
304 	if (ret) {
305 		printk(KERN_WARNING "Couldn't register device %s with driver model\n",
306 		       device->name);
307 		kfree(device->gid_tbl_len);
308 		kfree(device->pkey_tbl_len);
309 		goto out;
310 	}
311 
312 	list_add_tail(&device->core_list, &device_list);
313 
314 	device->reg_state = IB_DEV_REGISTERED;
315 
316 	{
317 		struct ib_client *client;
318 
319 		list_for_each_entry(client, &client_list, list)
320 			if (client->add && !add_client_context(device, client))
321 				client->add(device);
322 	}
323 
324  out:
325 	mutex_unlock(&device_mutex);
326 	return ret;
327 }
328 EXPORT_SYMBOL(ib_register_device);
329 
330 /**
331  * ib_unregister_device - Unregister an IB device
332  * @device:Device to unregister
333  *
334  * Unregister an IB device.  All clients will receive a remove callback.
335  */
ib_unregister_device(struct ib_device * device)336 void ib_unregister_device(struct ib_device *device)
337 {
338 	struct ib_client *client;
339 	struct ib_client_data *context, *tmp;
340 	unsigned long flags;
341 
342 	mutex_lock(&device_mutex);
343 
344 	list_for_each_entry_reverse(client, &client_list, list)
345 		if (client->remove)
346 			client->remove(device);
347 
348 	list_del(&device->core_list);
349 
350 	kfree(device->gid_tbl_len);
351 	kfree(device->pkey_tbl_len);
352 
353 	mutex_unlock(&device_mutex);
354 
355 	ib_device_unregister_sysfs(device);
356 
357 	spin_lock_irqsave(&device->client_data_lock, flags);
358 	list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
359 		kfree(context);
360 	spin_unlock_irqrestore(&device->client_data_lock, flags);
361 
362 	device->reg_state = IB_DEV_UNREGISTERED;
363 }
364 EXPORT_SYMBOL(ib_unregister_device);
365 
366 /**
367  * ib_register_client - Register an IB client
368  * @client:Client to register
369  *
370  * Upper level users of the IB drivers can use ib_register_client() to
371  * register callbacks for IB device addition and removal.  When an IB
372  * device is added, each registered client's add method will be called
373  * (in the order the clients were registered), and when a device is
374  * removed, each client's remove method will be called (in the reverse
375  * order that clients were registered).  In addition, when
376  * ib_register_client() is called, the client will receive an add
377  * callback for all devices already registered.
378  */
ib_register_client(struct ib_client * client)379 int ib_register_client(struct ib_client *client)
380 {
381 	struct ib_device *device;
382 
383 	mutex_lock(&device_mutex);
384 
385 	list_add_tail(&client->list, &client_list);
386 	list_for_each_entry(device, &device_list, core_list)
387 		if (client->add && !add_client_context(device, client))
388 			client->add(device);
389 
390 	mutex_unlock(&device_mutex);
391 
392 	return 0;
393 }
394 EXPORT_SYMBOL(ib_register_client);
395 
396 /**
397  * ib_unregister_client - Unregister an IB client
398  * @client:Client to unregister
399  *
400  * Upper level users use ib_unregister_client() to remove their client
401  * registration.  When ib_unregister_client() is called, the client
402  * will receive a remove callback for each IB device still registered.
403  */
ib_unregister_client(struct ib_client * client)404 void ib_unregister_client(struct ib_client *client)
405 {
406 	struct ib_client_data *context, *tmp;
407 	struct ib_device *device;
408 	unsigned long flags;
409 
410 	mutex_lock(&device_mutex);
411 
412 	list_for_each_entry(device, &device_list, core_list) {
413 		if (client->remove)
414 			client->remove(device);
415 
416 		spin_lock_irqsave(&device->client_data_lock, flags);
417 		list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
418 			if (context->client == client) {
419 				list_del(&context->list);
420 				kfree(context);
421 			}
422 		spin_unlock_irqrestore(&device->client_data_lock, flags);
423 	}
424 	list_del(&client->list);
425 
426 	mutex_unlock(&device_mutex);
427 }
428 EXPORT_SYMBOL(ib_unregister_client);
429 
430 /**
431  * ib_get_client_data - Get IB client context
432  * @device:Device to get context for
433  * @client:Client to get context for
434  *
435  * ib_get_client_data() returns client context set with
436  * ib_set_client_data().
437  */
ib_get_client_data(struct ib_device * device,struct ib_client * client)438 void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
439 {
440 	struct ib_client_data *context;
441 	void *ret = NULL;
442 	unsigned long flags;
443 
444 	spin_lock_irqsave(&device->client_data_lock, flags);
445 	list_for_each_entry(context, &device->client_data_list, list)
446 		if (context->client == client) {
447 			ret = context->data;
448 			break;
449 		}
450 	spin_unlock_irqrestore(&device->client_data_lock, flags);
451 
452 	return ret;
453 }
454 EXPORT_SYMBOL(ib_get_client_data);
455 
456 /**
457  * ib_set_client_data - Set IB client context
458  * @device:Device to set context for
459  * @client:Client to set context for
460  * @data:Context to set
461  *
462  * ib_set_client_data() sets client context that can be retrieved with
463  * ib_get_client_data().
464  */
ib_set_client_data(struct ib_device * device,struct ib_client * client,void * data)465 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
466 			void *data)
467 {
468 	struct ib_client_data *context;
469 	unsigned long flags;
470 
471 	spin_lock_irqsave(&device->client_data_lock, flags);
472 	list_for_each_entry(context, &device->client_data_list, list)
473 		if (context->client == client) {
474 			context->data = data;
475 			goto out;
476 		}
477 
478 	printk(KERN_WARNING "No client context found for %s/%s\n",
479 	       device->name, client->name);
480 
481 out:
482 	spin_unlock_irqrestore(&device->client_data_lock, flags);
483 }
484 EXPORT_SYMBOL(ib_set_client_data);
485 
486 /**
487  * ib_register_event_handler - Register an IB event handler
488  * @event_handler:Handler to register
489  *
490  * ib_register_event_handler() registers an event handler that will be
491  * called back when asynchronous IB events occur (as defined in
492  * chapter 11 of the InfiniBand Architecture Specification).  This
493  * callback may occur in interrupt context.
494  */
ib_register_event_handler(struct ib_event_handler * event_handler)495 int ib_register_event_handler  (struct ib_event_handler *event_handler)
496 {
497 	unsigned long flags;
498 
499 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
500 	list_add_tail(&event_handler->list,
501 		      &event_handler->device->event_handler_list);
502 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
503 
504 	return 0;
505 }
506 EXPORT_SYMBOL(ib_register_event_handler);
507 
508 /**
509  * ib_unregister_event_handler - Unregister an event handler
510  * @event_handler:Handler to unregister
511  *
512  * Unregister an event handler registered with
513  * ib_register_event_handler().
514  */
ib_unregister_event_handler(struct ib_event_handler * event_handler)515 int ib_unregister_event_handler(struct ib_event_handler *event_handler)
516 {
517 	unsigned long flags;
518 
519 	spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
520 	list_del(&event_handler->list);
521 	spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
522 
523 	return 0;
524 }
525 EXPORT_SYMBOL(ib_unregister_event_handler);
526 
527 /**
528  * ib_dispatch_event - Dispatch an asynchronous event
529  * @event:Event to dispatch
530  *
531  * Low-level drivers must call ib_dispatch_event() to dispatch the
532  * event to all registered event handlers when an asynchronous event
533  * occurs.
534  */
ib_dispatch_event(struct ib_event * event)535 void ib_dispatch_event(struct ib_event *event)
536 {
537 	unsigned long flags;
538 	struct ib_event_handler *handler;
539 
540 	spin_lock_irqsave(&event->device->event_handler_lock, flags);
541 
542 	list_for_each_entry(handler, &event->device->event_handler_list, list)
543 		handler->handler(handler, event);
544 
545 	spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
546 }
547 EXPORT_SYMBOL(ib_dispatch_event);
548 
549 /**
550  * ib_query_device - Query IB device attributes
551  * @device:Device to query
552  * @device_attr:Device attributes
553  *
554  * ib_query_device() returns the attributes of a device through the
555  * @device_attr pointer.
556  */
ib_query_device(struct ib_device * device,struct ib_device_attr * device_attr)557 int ib_query_device(struct ib_device *device,
558 		    struct ib_device_attr *device_attr)
559 {
560 	return device->query_device(device, device_attr);
561 }
562 EXPORT_SYMBOL(ib_query_device);
563 
564 /**
565  * ib_query_port - Query IB port attributes
566  * @device:Device to query
567  * @port_num:Port number to query
568  * @port_attr:Port attributes
569  *
570  * ib_query_port() returns the attributes of a port through the
571  * @port_attr pointer.
572  */
ib_query_port(struct ib_device * device,u8 port_num,struct ib_port_attr * port_attr)573 int ib_query_port(struct ib_device *device,
574 		  u8 port_num,
575 		  struct ib_port_attr *port_attr)
576 {
577 	if (port_num < start_port(device) || port_num > end_port(device))
578 		return -EINVAL;
579 
580 	return device->query_port(device, port_num, port_attr);
581 }
582 EXPORT_SYMBOL(ib_query_port);
583 
584 /**
585  * ib_query_gid - Get GID table entry
586  * @device:Device to query
587  * @port_num:Port number to query
588  * @index:GID table index to query
589  * @gid:Returned GID
590  *
591  * ib_query_gid() fetches the specified GID table entry.
592  */
ib_query_gid(struct ib_device * device,u8 port_num,int index,union ib_gid * gid)593 int ib_query_gid(struct ib_device *device,
594 		 u8 port_num, int index, union ib_gid *gid)
595 {
596 	return device->query_gid(device, port_num, index, gid);
597 }
598 EXPORT_SYMBOL(ib_query_gid);
599 
600 /**
601  * ib_query_pkey - Get P_Key table entry
602  * @device:Device to query
603  * @port_num:Port number to query
604  * @index:P_Key table index to query
605  * @pkey:Returned P_Key
606  *
607  * ib_query_pkey() fetches the specified P_Key table entry.
608  */
ib_query_pkey(struct ib_device * device,u8 port_num,u16 index,u16 * pkey)609 int ib_query_pkey(struct ib_device *device,
610 		  u8 port_num, u16 index, u16 *pkey)
611 {
612 	return device->query_pkey(device, port_num, index, pkey);
613 }
614 EXPORT_SYMBOL(ib_query_pkey);
615 
616 /**
617  * ib_modify_device - Change IB device attributes
618  * @device:Device to modify
619  * @device_modify_mask:Mask of attributes to change
620  * @device_modify:New attribute values
621  *
622  * ib_modify_device() changes a device's attributes as specified by
623  * the @device_modify_mask and @device_modify structure.
624  */
ib_modify_device(struct ib_device * device,int device_modify_mask,struct ib_device_modify * device_modify)625 int ib_modify_device(struct ib_device *device,
626 		     int device_modify_mask,
627 		     struct ib_device_modify *device_modify)
628 {
629 	return device->modify_device(device, device_modify_mask,
630 				     device_modify);
631 }
632 EXPORT_SYMBOL(ib_modify_device);
633 
634 /**
635  * ib_modify_port - Modifies the attributes for the specified port.
636  * @device: The device to modify.
637  * @port_num: The number of the port to modify.
638  * @port_modify_mask: Mask used to specify which attributes of the port
639  *   to change.
640  * @port_modify: New attribute values for the port.
641  *
642  * ib_modify_port() changes a port's attributes as specified by the
643  * @port_modify_mask and @port_modify structure.
644  */
ib_modify_port(struct ib_device * device,u8 port_num,int port_modify_mask,struct ib_port_modify * port_modify)645 int ib_modify_port(struct ib_device *device,
646 		   u8 port_num, int port_modify_mask,
647 		   struct ib_port_modify *port_modify)
648 {
649 	if (port_num < start_port(device) || port_num > end_port(device))
650 		return -EINVAL;
651 
652 	return device->modify_port(device, port_num, port_modify_mask,
653 				   port_modify);
654 }
655 EXPORT_SYMBOL(ib_modify_port);
656 
657 /**
658  * ib_find_gid - Returns the port number and GID table index where
659  *   a specified GID value occurs.
660  * @device: The device to query.
661  * @gid: The GID value to search for.
662  * @port_num: The port number of the device where the GID value was found.
663  * @index: The index into the GID table where the GID was found.  This
664  *   parameter may be NULL.
665  */
ib_find_gid(struct ib_device * device,union ib_gid * gid,u8 * port_num,u16 * index)666 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
667 		u8 *port_num, u16 *index)
668 {
669 	union ib_gid tmp_gid;
670 	int ret, port, i;
671 
672 	for (port = start_port(device); port <= end_port(device); ++port) {
673 		for (i = 0; i < device->gid_tbl_len[port - start_port(device)]; ++i) {
674 			ret = ib_query_gid(device, port, i, &tmp_gid);
675 			if (ret)
676 				return ret;
677 			if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
678 				*port_num = port;
679 				if (index)
680 					*index = i;
681 				return 0;
682 			}
683 		}
684 	}
685 
686 	return -ENOENT;
687 }
688 EXPORT_SYMBOL(ib_find_gid);
689 
690 /**
691  * ib_find_pkey - Returns the PKey table index where a specified
692  *   PKey value occurs.
693  * @device: The device to query.
694  * @port_num: The port number of the device to search for the PKey.
695  * @pkey: The PKey value to search for.
696  * @index: The index into the PKey table where the PKey was found.
697  */
ib_find_pkey(struct ib_device * device,u8 port_num,u16 pkey,u16 * index)698 int ib_find_pkey(struct ib_device *device,
699 		 u8 port_num, u16 pkey, u16 *index)
700 {
701 	int ret, i;
702 	u16 tmp_pkey;
703 
704 	for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) {
705 		ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
706 		if (ret)
707 			return ret;
708 
709 		if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
710 			*index = i;
711 			return 0;
712 		}
713 	}
714 
715 	return -ENOENT;
716 }
717 EXPORT_SYMBOL(ib_find_pkey);
718 
ib_core_init(void)719 static int __init ib_core_init(void)
720 {
721 	int ret;
722 
723 	ib_wq = alloc_workqueue("infiniband", 0, 0);
724 	if (!ib_wq)
725 		return -ENOMEM;
726 
727 	ret = ib_sysfs_setup();
728 	if (ret)
729 		printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
730 
731 	ret = ib_cache_setup();
732 	if (ret) {
733 		printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
734 		ib_sysfs_cleanup();
735 		destroy_workqueue(ib_wq);
736 	}
737 
738 	return ret;
739 }
740 
ib_core_cleanup(void)741 static void __exit ib_core_cleanup(void)
742 {
743 	ib_cache_cleanup();
744 	ib_sysfs_cleanup();
745 	/* Make sure that any pending umem accounting work is done. */
746 	destroy_workqueue(ib_wq);
747 }
748 
749 module_init(ib_core_init);
750 module_exit(ib_core_cleanup);
751