1 /*
2  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/skbuff.h>
33 #include <linux/netdevice.h>
34 #include <linux/if.h>
35 #include <linux/if_vlan.h>
36 #include <linux/jhash.h>
37 #include <linux/slab.h>
38 #include <net/neighbour.h>
39 #include "common.h"
40 #include "t3cdev.h"
41 #include "cxgb3_defs.h"
42 #include "l2t.h"
43 #include "t3_cpl.h"
44 #include "firmware_exports.h"
45 
46 #define VLAN_NONE 0xfff
47 
48 /*
49  * Module locking notes:  There is a RW lock protecting the L2 table as a
50  * whole plus a spinlock per L2T entry.  Entry lookups and allocations happen
51  * under the protection of the table lock, individual entry changes happen
52  * while holding that entry's spinlock.  The table lock nests outside the
53  * entry locks.  Allocations of new entries take the table lock as writers so
54  * no other lookups can happen while allocating new entries.  Entry updates
55  * take the table lock as readers so multiple entries can be updated in
56  * parallel.  An L2T entry can be dropped by decrementing its reference count
57  * and therefore can happen in parallel with entry allocation but no entry
58  * can change state or increment its ref count during allocation as both of
59  * these perform lookups.
60  */
61 
vlan_prio(const struct l2t_entry * e)62 static inline unsigned int vlan_prio(const struct l2t_entry *e)
63 {
64 	return e->vlan >> 13;
65 }
66 
arp_hash(u32 key,int ifindex,const struct l2t_data * d)67 static inline unsigned int arp_hash(u32 key, int ifindex,
68 				    const struct l2t_data *d)
69 {
70 	return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
71 }
72 
neigh_replace(struct l2t_entry * e,struct neighbour * n)73 static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
74 {
75 	neigh_hold(n);
76 	if (e->neigh)
77 		neigh_release(e->neigh);
78 	e->neigh = n;
79 }
80 
81 /*
82  * Set up an L2T entry and send any packets waiting in the arp queue.  The
83  * supplied skb is used for the CPL_L2T_WRITE_REQ.  Must be called with the
84  * entry locked.
85  */
setup_l2e_send_pending(struct t3cdev * dev,struct sk_buff * skb,struct l2t_entry * e)86 static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
87 				  struct l2t_entry *e)
88 {
89 	struct cpl_l2t_write_req *req;
90 	struct sk_buff *tmp;
91 
92 	if (!skb) {
93 		skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
94 		if (!skb)
95 			return -ENOMEM;
96 	}
97 
98 	req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
99 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
100 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
101 	req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
102 			    V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
103 			    V_L2T_W_PRIO(vlan_prio(e)));
104 	memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
105 	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
106 	skb->priority = CPL_PRIORITY_CONTROL;
107 	cxgb3_ofld_send(dev, skb);
108 
109 	skb_queue_walk_safe(&e->arpq, skb, tmp) {
110 		__skb_unlink(skb, &e->arpq);
111 		cxgb3_ofld_send(dev, skb);
112 	}
113 	e->state = L2T_STATE_VALID;
114 
115 	return 0;
116 }
117 
118 /*
119  * Add a packet to the an L2T entry's queue of packets awaiting resolution.
120  * Must be called with the entry's lock held.
121  */
arpq_enqueue(struct l2t_entry * e,struct sk_buff * skb)122 static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
123 {
124 	__skb_queue_tail(&e->arpq, skb);
125 }
126 
t3_l2t_send_slow(struct t3cdev * dev,struct sk_buff * skb,struct l2t_entry * e)127 int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
128 		     struct l2t_entry *e)
129 {
130 again:
131 	switch (e->state) {
132 	case L2T_STATE_STALE:	/* entry is stale, kick off revalidation */
133 		neigh_event_send(e->neigh, NULL);
134 		spin_lock_bh(&e->lock);
135 		if (e->state == L2T_STATE_STALE)
136 			e->state = L2T_STATE_VALID;
137 		spin_unlock_bh(&e->lock);
138 	case L2T_STATE_VALID:	/* fast-path, send the packet on */
139 		return cxgb3_ofld_send(dev, skb);
140 	case L2T_STATE_RESOLVING:
141 		spin_lock_bh(&e->lock);
142 		if (e->state != L2T_STATE_RESOLVING) {
143 			/* ARP already completed */
144 			spin_unlock_bh(&e->lock);
145 			goto again;
146 		}
147 		arpq_enqueue(e, skb);
148 		spin_unlock_bh(&e->lock);
149 
150 		/*
151 		 * Only the first packet added to the arpq should kick off
152 		 * resolution.  However, because the alloc_skb below can fail,
153 		 * we allow each packet added to the arpq to retry resolution
154 		 * as a way of recovering from transient memory exhaustion.
155 		 * A better way would be to use a work request to retry L2T
156 		 * entries when there's no memory.
157 		 */
158 		if (!neigh_event_send(e->neigh, NULL)) {
159 			skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
160 					GFP_ATOMIC);
161 			if (!skb)
162 				break;
163 
164 			spin_lock_bh(&e->lock);
165 			if (!skb_queue_empty(&e->arpq))
166 				setup_l2e_send_pending(dev, skb, e);
167 			else	/* we lost the race */
168 				__kfree_skb(skb);
169 			spin_unlock_bh(&e->lock);
170 		}
171 	}
172 	return 0;
173 }
174 
175 EXPORT_SYMBOL(t3_l2t_send_slow);
176 
t3_l2t_send_event(struct t3cdev * dev,struct l2t_entry * e)177 void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
178 {
179 again:
180 	switch (e->state) {
181 	case L2T_STATE_STALE:	/* entry is stale, kick off revalidation */
182 		neigh_event_send(e->neigh, NULL);
183 		spin_lock_bh(&e->lock);
184 		if (e->state == L2T_STATE_STALE) {
185 			e->state = L2T_STATE_VALID;
186 		}
187 		spin_unlock_bh(&e->lock);
188 		return;
189 	case L2T_STATE_VALID:	/* fast-path, send the packet on */
190 		return;
191 	case L2T_STATE_RESOLVING:
192 		spin_lock_bh(&e->lock);
193 		if (e->state != L2T_STATE_RESOLVING) {
194 			/* ARP already completed */
195 			spin_unlock_bh(&e->lock);
196 			goto again;
197 		}
198 		spin_unlock_bh(&e->lock);
199 
200 		/*
201 		 * Only the first packet added to the arpq should kick off
202 		 * resolution.  However, because the alloc_skb below can fail,
203 		 * we allow each packet added to the arpq to retry resolution
204 		 * as a way of recovering from transient memory exhaustion.
205 		 * A better way would be to use a work request to retry L2T
206 		 * entries when there's no memory.
207 		 */
208 		neigh_event_send(e->neigh, NULL);
209 	}
210 }
211 
212 EXPORT_SYMBOL(t3_l2t_send_event);
213 
214 /*
215  * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
216  */
alloc_l2e(struct l2t_data * d)217 static struct l2t_entry *alloc_l2e(struct l2t_data *d)
218 {
219 	struct l2t_entry *end, *e, **p;
220 
221 	if (!atomic_read(&d->nfree))
222 		return NULL;
223 
224 	/* there's definitely a free entry */
225 	for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
226 		if (atomic_read(&e->refcnt) == 0)
227 			goto found;
228 
229 	for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
230 found:
231 	d->rover = e + 1;
232 	atomic_dec(&d->nfree);
233 
234 	/*
235 	 * The entry we found may be an inactive entry that is
236 	 * presently in the hash table.  We need to remove it.
237 	 */
238 	if (e->state != L2T_STATE_UNUSED) {
239 		int hash = arp_hash(e->addr, e->ifindex, d);
240 
241 		for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
242 			if (*p == e) {
243 				*p = e->next;
244 				break;
245 			}
246 		e->state = L2T_STATE_UNUSED;
247 	}
248 	return e;
249 }
250 
251 /*
252  * Called when an L2T entry has no more users.  The entry is left in the hash
253  * table since it is likely to be reused but we also bump nfree to indicate
254  * that the entry can be reallocated for a different neighbor.  We also drop
255  * the existing neighbor reference in case the neighbor is going away and is
256  * waiting on our reference.
257  *
258  * Because entries can be reallocated to other neighbors once their ref count
259  * drops to 0 we need to take the entry's lock to avoid races with a new
260  * incarnation.
261  */
t3_l2e_free(struct l2t_data * d,struct l2t_entry * e)262 void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
263 {
264 	spin_lock_bh(&e->lock);
265 	if (atomic_read(&e->refcnt) == 0) {	/* hasn't been recycled */
266 		if (e->neigh) {
267 			neigh_release(e->neigh);
268 			e->neigh = NULL;
269 		}
270 	}
271 	spin_unlock_bh(&e->lock);
272 	atomic_inc(&d->nfree);
273 }
274 
275 EXPORT_SYMBOL(t3_l2e_free);
276 
277 /*
278  * Update an L2T entry that was previously used for the same next hop as neigh.
279  * Must be called with softirqs disabled.
280  */
reuse_entry(struct l2t_entry * e,struct neighbour * neigh)281 static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
282 {
283 	unsigned int nud_state;
284 
285 	spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
286 
287 	if (neigh != e->neigh)
288 		neigh_replace(e, neigh);
289 	nud_state = neigh->nud_state;
290 	if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
291 	    !(nud_state & NUD_VALID))
292 		e->state = L2T_STATE_RESOLVING;
293 	else if (nud_state & NUD_CONNECTED)
294 		e->state = L2T_STATE_VALID;
295 	else
296 		e->state = L2T_STATE_STALE;
297 	spin_unlock(&e->lock);
298 }
299 
t3_l2t_get(struct t3cdev * cdev,struct neighbour * neigh,struct net_device * dev)300 struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
301 			     struct net_device *dev)
302 {
303 	struct l2t_entry *e;
304 	struct l2t_data *d = L2DATA(cdev);
305 	u32 addr = *(u32 *) neigh->primary_key;
306 	int ifidx = neigh->dev->ifindex;
307 	int hash = arp_hash(addr, ifidx, d);
308 	struct port_info *p = netdev_priv(dev);
309 	int smt_idx = p->port_id;
310 
311 	write_lock_bh(&d->lock);
312 	for (e = d->l2tab[hash].first; e; e = e->next)
313 		if (e->addr == addr && e->ifindex == ifidx &&
314 		    e->smt_idx == smt_idx) {
315 			l2t_hold(d, e);
316 			if (atomic_read(&e->refcnt) == 1)
317 				reuse_entry(e, neigh);
318 			goto done;
319 		}
320 
321 	/* Need to allocate a new entry */
322 	e = alloc_l2e(d);
323 	if (e) {
324 		spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
325 		e->next = d->l2tab[hash].first;
326 		d->l2tab[hash].first = e;
327 		e->state = L2T_STATE_RESOLVING;
328 		e->addr = addr;
329 		e->ifindex = ifidx;
330 		e->smt_idx = smt_idx;
331 		atomic_set(&e->refcnt, 1);
332 		neigh_replace(e, neigh);
333 		if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
334 			e->vlan = vlan_dev_vlan_id(neigh->dev);
335 		else
336 			e->vlan = VLAN_NONE;
337 		spin_unlock(&e->lock);
338 	}
339 done:
340 	write_unlock_bh(&d->lock);
341 	return e;
342 }
343 
344 EXPORT_SYMBOL(t3_l2t_get);
345 
346 /*
347  * Called when address resolution fails for an L2T entry to handle packets
348  * on the arpq head.  If a packet specifies a failure handler it is invoked,
349  * otherwise the packets is sent to the offload device.
350  *
351  * XXX: maybe we should abandon the latter behavior and just require a failure
352  * handler.
353  */
handle_failed_resolution(struct t3cdev * dev,struct sk_buff_head * arpq)354 static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
355 {
356 	struct sk_buff *skb, *tmp;
357 
358 	skb_queue_walk_safe(arpq, skb, tmp) {
359 		struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
360 
361 		__skb_unlink(skb, arpq);
362 		if (cb->arp_failure_handler)
363 			cb->arp_failure_handler(dev, skb);
364 		else
365 			cxgb3_ofld_send(dev, skb);
366 	}
367 }
368 
369 /*
370  * Called when the host's ARP layer makes a change to some entry that is
371  * loaded into the HW L2 table.
372  */
t3_l2t_update(struct t3cdev * dev,struct neighbour * neigh)373 void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
374 {
375 	struct sk_buff_head arpq;
376 	struct l2t_entry *e;
377 	struct l2t_data *d = L2DATA(dev);
378 	u32 addr = *(u32 *) neigh->primary_key;
379 	int ifidx = neigh->dev->ifindex;
380 	int hash = arp_hash(addr, ifidx, d);
381 
382 	read_lock_bh(&d->lock);
383 	for (e = d->l2tab[hash].first; e; e = e->next)
384 		if (e->addr == addr && e->ifindex == ifidx) {
385 			spin_lock(&e->lock);
386 			goto found;
387 		}
388 	read_unlock_bh(&d->lock);
389 	return;
390 
391 found:
392 	__skb_queue_head_init(&arpq);
393 
394 	read_unlock(&d->lock);
395 	if (atomic_read(&e->refcnt)) {
396 		if (neigh != e->neigh)
397 			neigh_replace(e, neigh);
398 
399 		if (e->state == L2T_STATE_RESOLVING) {
400 			if (neigh->nud_state & NUD_FAILED) {
401 				skb_queue_splice_init(&e->arpq, &arpq);
402 			} else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
403 				setup_l2e_send_pending(dev, NULL, e);
404 		} else {
405 			e->state = neigh->nud_state & NUD_CONNECTED ?
406 			    L2T_STATE_VALID : L2T_STATE_STALE;
407 			if (memcmp(e->dmac, neigh->ha, 6))
408 				setup_l2e_send_pending(dev, NULL, e);
409 		}
410 	}
411 	spin_unlock_bh(&e->lock);
412 
413 	if (!skb_queue_empty(&arpq))
414 		handle_failed_resolution(dev, &arpq);
415 }
416 
t3_init_l2t(unsigned int l2t_capacity)417 struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
418 {
419 	struct l2t_data *d;
420 	int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
421 
422 	d = cxgb_alloc_mem(size);
423 	if (!d)
424 		return NULL;
425 
426 	d->nentries = l2t_capacity;
427 	d->rover = &d->l2tab[1];	/* entry 0 is not used */
428 	atomic_set(&d->nfree, l2t_capacity - 1);
429 	rwlock_init(&d->lock);
430 
431 	for (i = 0; i < l2t_capacity; ++i) {
432 		d->l2tab[i].idx = i;
433 		d->l2tab[i].state = L2T_STATE_UNUSED;
434 		__skb_queue_head_init(&d->l2tab[i].arpq);
435 		spin_lock_init(&d->l2tab[i].lock);
436 		atomic_set(&d->l2tab[i].refcnt, 0);
437 	}
438 	return d;
439 }
440 
t3_free_l2t(struct l2t_data * d)441 void t3_free_l2t(struct l2t_data *d)
442 {
443 	cxgb_free_mem(d);
444 }
445 
446