1 /*
2  * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/rculist.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 
38 #include "ipath_verbs.h"
39 
40 /*
41  * Global table of GID to attached QPs.
42  * The table is global to all ipath devices since a send from one QP/device
43  * needs to be locally routed to any locally attached QPs on the same
44  * or different device.
45  */
46 static struct rb_root mcast_tree;
47 static DEFINE_SPINLOCK(mcast_lock);
48 
49 /**
50  * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
51  * @qp: the QP to link
52  */
ipath_mcast_qp_alloc(struct ipath_qp * qp)53 static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
54 {
55 	struct ipath_mcast_qp *mqp;
56 
57 	mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
58 	if (!mqp)
59 		goto bail;
60 
61 	mqp->qp = qp;
62 	atomic_inc(&qp->refcount);
63 
64 bail:
65 	return mqp;
66 }
67 
ipath_mcast_qp_free(struct ipath_mcast_qp * mqp)68 static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
69 {
70 	struct ipath_qp *qp = mqp->qp;
71 
72 	/* Notify ipath_destroy_qp() if it is waiting. */
73 	if (atomic_dec_and_test(&qp->refcount))
74 		wake_up(&qp->wait);
75 
76 	kfree(mqp);
77 }
78 
79 /**
80  * ipath_mcast_alloc - allocate the multicast GID structure
81  * @mgid: the multicast GID
82  *
83  * A list of QPs will be attached to this structure.
84  */
ipath_mcast_alloc(union ib_gid * mgid)85 static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
86 {
87 	struct ipath_mcast *mcast;
88 
89 	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
90 	if (!mcast)
91 		goto bail;
92 
93 	mcast->mgid = *mgid;
94 	INIT_LIST_HEAD(&mcast->qp_list);
95 	init_waitqueue_head(&mcast->wait);
96 	atomic_set(&mcast->refcount, 0);
97 	mcast->n_attached = 0;
98 
99 bail:
100 	return mcast;
101 }
102 
ipath_mcast_free(struct ipath_mcast * mcast)103 static void ipath_mcast_free(struct ipath_mcast *mcast)
104 {
105 	struct ipath_mcast_qp *p, *tmp;
106 
107 	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
108 		ipath_mcast_qp_free(p);
109 
110 	kfree(mcast);
111 }
112 
113 /**
114  * ipath_mcast_find - search the global table for the given multicast GID
115  * @mgid: the multicast GID to search for
116  *
117  * Returns NULL if not found.
118  *
119  * The caller is responsible for decrementing the reference count if found.
120  */
ipath_mcast_find(union ib_gid * mgid)121 struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
122 {
123 	struct rb_node *n;
124 	unsigned long flags;
125 	struct ipath_mcast *mcast;
126 
127 	spin_lock_irqsave(&mcast_lock, flags);
128 	n = mcast_tree.rb_node;
129 	while (n) {
130 		int ret;
131 
132 		mcast = rb_entry(n, struct ipath_mcast, rb_node);
133 
134 		ret = memcmp(mgid->raw, mcast->mgid.raw,
135 			     sizeof(union ib_gid));
136 		if (ret < 0)
137 			n = n->rb_left;
138 		else if (ret > 0)
139 			n = n->rb_right;
140 		else {
141 			atomic_inc(&mcast->refcount);
142 			spin_unlock_irqrestore(&mcast_lock, flags);
143 			goto bail;
144 		}
145 	}
146 	spin_unlock_irqrestore(&mcast_lock, flags);
147 
148 	mcast = NULL;
149 
150 bail:
151 	return mcast;
152 }
153 
154 /**
155  * ipath_mcast_add - insert mcast GID into table and attach QP struct
156  * @mcast: the mcast GID table
157  * @mqp: the QP to attach
158  *
159  * Return zero if both were added.  Return EEXIST if the GID was already in
160  * the table but the QP was added.  Return ESRCH if the QP was already
161  * attached and neither structure was added.
162  */
ipath_mcast_add(struct ipath_ibdev * dev,struct ipath_mcast * mcast,struct ipath_mcast_qp * mqp)163 static int ipath_mcast_add(struct ipath_ibdev *dev,
164 			   struct ipath_mcast *mcast,
165 			   struct ipath_mcast_qp *mqp)
166 {
167 	struct rb_node **n = &mcast_tree.rb_node;
168 	struct rb_node *pn = NULL;
169 	int ret;
170 
171 	spin_lock_irq(&mcast_lock);
172 
173 	while (*n) {
174 		struct ipath_mcast *tmcast;
175 		struct ipath_mcast_qp *p;
176 
177 		pn = *n;
178 		tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
179 
180 		ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
181 			     sizeof(union ib_gid));
182 		if (ret < 0) {
183 			n = &pn->rb_left;
184 			continue;
185 		}
186 		if (ret > 0) {
187 			n = &pn->rb_right;
188 			continue;
189 		}
190 
191 		/* Search the QP list to see if this is already there. */
192 		list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
193 			if (p->qp == mqp->qp) {
194 				ret = ESRCH;
195 				goto bail;
196 			}
197 		}
198 		if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
199 			ret = ENOMEM;
200 			goto bail;
201 		}
202 
203 		tmcast->n_attached++;
204 
205 		list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
206 		ret = EEXIST;
207 		goto bail;
208 	}
209 
210 	spin_lock(&dev->n_mcast_grps_lock);
211 	if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
212 		spin_unlock(&dev->n_mcast_grps_lock);
213 		ret = ENOMEM;
214 		goto bail;
215 	}
216 
217 	dev->n_mcast_grps_allocated++;
218 	spin_unlock(&dev->n_mcast_grps_lock);
219 
220 	mcast->n_attached++;
221 
222 	list_add_tail_rcu(&mqp->list, &mcast->qp_list);
223 
224 	atomic_inc(&mcast->refcount);
225 	rb_link_node(&mcast->rb_node, pn, n);
226 	rb_insert_color(&mcast->rb_node, &mcast_tree);
227 
228 	ret = 0;
229 
230 bail:
231 	spin_unlock_irq(&mcast_lock);
232 
233 	return ret;
234 }
235 
ipath_multicast_attach(struct ib_qp * ibqp,union ib_gid * gid,u16 lid)236 int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
237 {
238 	struct ipath_qp *qp = to_iqp(ibqp);
239 	struct ipath_ibdev *dev = to_idev(ibqp->device);
240 	struct ipath_mcast *mcast;
241 	struct ipath_mcast_qp *mqp;
242 	int ret;
243 
244 	/*
245 	 * Allocate data structures since its better to do this outside of
246 	 * spin locks and it will most likely be needed.
247 	 */
248 	mcast = ipath_mcast_alloc(gid);
249 	if (mcast == NULL) {
250 		ret = -ENOMEM;
251 		goto bail;
252 	}
253 	mqp = ipath_mcast_qp_alloc(qp);
254 	if (mqp == NULL) {
255 		ipath_mcast_free(mcast);
256 		ret = -ENOMEM;
257 		goto bail;
258 	}
259 	switch (ipath_mcast_add(dev, mcast, mqp)) {
260 	case ESRCH:
261 		/* Neither was used: can't attach the same QP twice. */
262 		ipath_mcast_qp_free(mqp);
263 		ipath_mcast_free(mcast);
264 		ret = -EINVAL;
265 		goto bail;
266 	case EEXIST:		/* The mcast wasn't used */
267 		ipath_mcast_free(mcast);
268 		break;
269 	case ENOMEM:
270 		/* Exceeded the maximum number of mcast groups. */
271 		ipath_mcast_qp_free(mqp);
272 		ipath_mcast_free(mcast);
273 		ret = -ENOMEM;
274 		goto bail;
275 	default:
276 		break;
277 	}
278 
279 	ret = 0;
280 
281 bail:
282 	return ret;
283 }
284 
ipath_multicast_detach(struct ib_qp * ibqp,union ib_gid * gid,u16 lid)285 int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
286 {
287 	struct ipath_qp *qp = to_iqp(ibqp);
288 	struct ipath_ibdev *dev = to_idev(ibqp->device);
289 	struct ipath_mcast *mcast = NULL;
290 	struct ipath_mcast_qp *p, *tmp;
291 	struct rb_node *n;
292 	int last = 0;
293 	int ret;
294 
295 	spin_lock_irq(&mcast_lock);
296 
297 	/* Find the GID in the mcast table. */
298 	n = mcast_tree.rb_node;
299 	while (1) {
300 		if (n == NULL) {
301 			spin_unlock_irq(&mcast_lock);
302 			ret = -EINVAL;
303 			goto bail;
304 		}
305 
306 		mcast = rb_entry(n, struct ipath_mcast, rb_node);
307 		ret = memcmp(gid->raw, mcast->mgid.raw,
308 			     sizeof(union ib_gid));
309 		if (ret < 0)
310 			n = n->rb_left;
311 		else if (ret > 0)
312 			n = n->rb_right;
313 		else
314 			break;
315 	}
316 
317 	/* Search the QP list. */
318 	list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
319 		if (p->qp != qp)
320 			continue;
321 		/*
322 		 * We found it, so remove it, but don't poison the forward
323 		 * link until we are sure there are no list walkers.
324 		 */
325 		list_del_rcu(&p->list);
326 		mcast->n_attached--;
327 
328 		/* If this was the last attached QP, remove the GID too. */
329 		if (list_empty(&mcast->qp_list)) {
330 			rb_erase(&mcast->rb_node, &mcast_tree);
331 			last = 1;
332 		}
333 		break;
334 	}
335 
336 	spin_unlock_irq(&mcast_lock);
337 
338 	if (p) {
339 		/*
340 		 * Wait for any list walkers to finish before freeing the
341 		 * list element.
342 		 */
343 		wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
344 		ipath_mcast_qp_free(p);
345 	}
346 	if (last) {
347 		atomic_dec(&mcast->refcount);
348 		wait_event(mcast->wait, !atomic_read(&mcast->refcount));
349 		ipath_mcast_free(mcast);
350 		spin_lock_irq(&dev->n_mcast_grps_lock);
351 		dev->n_mcast_grps_allocated--;
352 		spin_unlock_irq(&dev->n_mcast_grps_lock);
353 	}
354 
355 	ret = 0;
356 
357 bail:
358 	return ret;
359 }
360 
ipath_mcast_tree_empty(void)361 int ipath_mcast_tree_empty(void)
362 {
363 	return mcast_tree.rb_node == NULL;
364 }
365