1 /* -----------------------------------------------------------------------------
2  * Copyright (c) 2011 Ozmo Inc
3  * Released under the GNU General Public License Version 2 (GPLv2).
4  * -----------------------------------------------------------------------------
5  */
6 #include <linux/init.h>
7 #include <linux/module.h>
8 #include <linux/timer.h>
9 #include <linux/sched.h>
10 #include <linux/netdevice.h>
11 #include <linux/errno.h>
12 #include "ozconfig.h"
13 #include "ozprotocol.h"
14 #include "ozeltbuf.h"
15 #include "ozpd.h"
16 #include "ozproto.h"
17 #include "oztrace.h"
18 #include "ozevent.h"
19 #include "ozcdev.h"
20 #include "ozusbsvc.h"
21 #include <asm/unaligned.h>
22 #include <linux/uaccess.h>
23 #include <net/psnap.h>
24 /*------------------------------------------------------------------------------
25  */
26 #define OZ_MAX_TX_POOL_SIZE	6
27 /* Maximum number of uncompleted isoc frames that can be pending.
28  */
29 #define OZ_MAX_SUBMITTED_ISOC	16
30 /*------------------------------------------------------------------------------
31  */
32 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
33 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
34 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
35 static int oz_send_isoc_frame(struct oz_pd *pd);
36 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
37 static void oz_isoc_stream_free(struct oz_isoc_stream *st);
38 static int oz_send_next_queued_frame(struct oz_pd *pd, int *more_data);
39 static void oz_isoc_destructor(struct sk_buff *skb);
40 static int oz_def_app_init(void);
41 static void oz_def_app_term(void);
42 static int oz_def_app_start(struct oz_pd *pd, int resume);
43 static void oz_def_app_stop(struct oz_pd *pd, int pause);
44 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
45 /*------------------------------------------------------------------------------
46  * Counts the uncompleted isoc frames submitted to netcard.
47  */
48 static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
49 /* Application handler functions.
50  */
51 static struct oz_app_if g_app_if[OZ_APPID_MAX] = {
52 	{oz_usb_init,
53 	oz_usb_term,
54 	oz_usb_start,
55 	oz_usb_stop,
56 	oz_usb_rx,
57 	oz_usb_heartbeat,
58 	oz_usb_farewell,
59 	OZ_APPID_USB},
60 
61 	{oz_def_app_init,
62 	oz_def_app_term,
63 	oz_def_app_start,
64 	oz_def_app_stop,
65 	oz_def_app_rx,
66 	0,
67 	0,
68 	OZ_APPID_UNUSED1},
69 
70 	{oz_def_app_init,
71 	oz_def_app_term,
72 	oz_def_app_start,
73 	oz_def_app_stop,
74 	oz_def_app_rx,
75 	0,
76 	0,
77 	OZ_APPID_UNUSED2},
78 
79 	{oz_cdev_init,
80 	oz_cdev_term,
81 	oz_cdev_start,
82 	oz_cdev_stop,
83 	oz_cdev_rx,
84 	0,
85 	0,
86 	OZ_APPID_SERIAL},
87 };
88 /*------------------------------------------------------------------------------
89  * Context: process
90  */
oz_def_app_init(void)91 static int oz_def_app_init(void)
92 {
93 	return 0;
94 }
95 /*------------------------------------------------------------------------------
96  * Context: process
97  */
oz_def_app_term(void)98 static void oz_def_app_term(void)
99 {
100 }
101 /*------------------------------------------------------------------------------
102  * Context: softirq
103  */
oz_def_app_start(struct oz_pd * pd,int resume)104 static int oz_def_app_start(struct oz_pd *pd, int resume)
105 {
106 	return 0;
107 }
108 /*------------------------------------------------------------------------------
109  * Context: softirq
110  */
oz_def_app_stop(struct oz_pd * pd,int pause)111 static void oz_def_app_stop(struct oz_pd *pd, int pause)
112 {
113 }
114 /*------------------------------------------------------------------------------
115  * Context: softirq
116  */
oz_def_app_rx(struct oz_pd * pd,struct oz_elt * elt)117 static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
118 {
119 }
120 /*------------------------------------------------------------------------------
121  * Context: softirq or process
122  */
oz_pd_set_state(struct oz_pd * pd,unsigned state)123 void oz_pd_set_state(struct oz_pd *pd, unsigned state)
124 {
125 	pd->state = state;
126 	oz_event_log(OZ_EVT_PD_STATE, 0, 0, 0, state);
127 #ifdef WANT_TRACE
128 	switch (state) {
129 	case OZ_PD_S_IDLE:
130 		oz_trace("PD State: OZ_PD_S_IDLE\n");
131 		break;
132 	case OZ_PD_S_CONNECTED:
133 		oz_trace("PD State: OZ_PD_S_CONNECTED\n");
134 		break;
135 	case OZ_PD_S_STOPPED:
136 		oz_trace("PD State: OZ_PD_S_STOPPED\n");
137 		break;
138 	case OZ_PD_S_SLEEP:
139 		oz_trace("PD State: OZ_PD_S_SLEEP\n");
140 		break;
141 	}
142 #endif /* WANT_TRACE */
143 }
144 /*------------------------------------------------------------------------------
145  * Context: softirq or process
146  */
oz_pd_get(struct oz_pd * pd)147 void oz_pd_get(struct oz_pd *pd)
148 {
149 	atomic_inc(&pd->ref_count);
150 }
151 /*------------------------------------------------------------------------------
152  * Context: softirq or process
153  */
oz_pd_put(struct oz_pd * pd)154 void oz_pd_put(struct oz_pd *pd)
155 {
156 	if (atomic_dec_and_test(&pd->ref_count))
157 		oz_pd_destroy(pd);
158 }
159 /*------------------------------------------------------------------------------
160  * Context: softirq-serialized
161  */
oz_pd_alloc(u8 * mac_addr)162 struct oz_pd *oz_pd_alloc(u8 *mac_addr)
163 {
164 	struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
165 	if (pd) {
166 		int i;
167 		atomic_set(&pd->ref_count, 2);
168 		for (i = 0; i < OZ_APPID_MAX; i++)
169 			spin_lock_init(&pd->app_lock[i]);
170 		pd->last_rx_pkt_num = 0xffffffff;
171 		oz_pd_set_state(pd, OZ_PD_S_IDLE);
172 		pd->max_tx_size = OZ_MAX_TX_SIZE;
173 		memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
174 		if (0 != oz_elt_buf_init(&pd->elt_buff)) {
175 			kfree(pd);
176 			pd = 0;
177 		}
178 		spin_lock_init(&pd->tx_frame_lock);
179 		INIT_LIST_HEAD(&pd->tx_queue);
180 		INIT_LIST_HEAD(&pd->farewell_list);
181 		pd->last_sent_frame = &pd->tx_queue;
182 		spin_lock_init(&pd->stream_lock);
183 		INIT_LIST_HEAD(&pd->stream_list);
184 	}
185 	return pd;
186 }
187 /*------------------------------------------------------------------------------
188  * Context: softirq or process
189  */
oz_pd_destroy(struct oz_pd * pd)190 void oz_pd_destroy(struct oz_pd *pd)
191 {
192 	struct list_head *e;
193 	struct oz_tx_frame *f;
194 	struct oz_isoc_stream *st;
195 	struct oz_farewell *fwell;
196 	oz_trace("Destroying PD\n");
197 	/* Delete any streams.
198 	 */
199 	e = pd->stream_list.next;
200 	while (e != &pd->stream_list) {
201 		st = container_of(e, struct oz_isoc_stream, link);
202 		e = e->next;
203 		oz_isoc_stream_free(st);
204 	}
205 	/* Free any queued tx frames.
206 	 */
207 	e = pd->tx_queue.next;
208 	while (e != &pd->tx_queue) {
209 		f = container_of(e, struct oz_tx_frame, link);
210 		e = e->next;
211 		oz_retire_frame(pd, f);
212 	}
213 	oz_elt_buf_term(&pd->elt_buff);
214 	/* Free any farewells.
215 	 */
216 	e = pd->farewell_list.next;
217 	while (e != &pd->farewell_list) {
218 		fwell = container_of(e, struct oz_farewell, link);
219 		e = e->next;
220 		kfree(fwell);
221 	}
222 	/* Deallocate all frames in tx pool.
223 	 */
224 	while (pd->tx_pool) {
225 		e = pd->tx_pool;
226 		pd->tx_pool = e->next;
227 		kfree(container_of(e, struct oz_tx_frame, link));
228 	}
229 	if (pd->net_dev)
230 		dev_put(pd->net_dev);
231 	kfree(pd);
232 }
233 /*------------------------------------------------------------------------------
234  * Context: softirq-serialized
235  */
oz_services_start(struct oz_pd * pd,u16 apps,int resume)236 int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
237 {
238 	struct oz_app_if *ai;
239 	int rc = 0;
240 	oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
241 	for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
242 		if (apps & (1<<ai->app_id)) {
243 			if (ai->start(pd, resume)) {
244 				rc = -1;
245 				oz_trace("Unabled to start service %d\n",
246 					ai->app_id);
247 				break;
248 			}
249 			oz_polling_lock_bh();
250 			pd->total_apps |= (1<<ai->app_id);
251 			if (resume)
252 				pd->paused_apps &= ~(1<<ai->app_id);
253 			oz_polling_unlock_bh();
254 		}
255 	}
256 	return rc;
257 }
258 /*------------------------------------------------------------------------------
259  * Context: softirq or process
260  */
oz_services_stop(struct oz_pd * pd,u16 apps,int pause)261 void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
262 {
263 	struct oz_app_if *ai;
264 	oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
265 	for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
266 		if (apps & (1<<ai->app_id)) {
267 			oz_polling_lock_bh();
268 			if (pause) {
269 				pd->paused_apps |= (1<<ai->app_id);
270 			} else {
271 				pd->total_apps &= ~(1<<ai->app_id);
272 				pd->paused_apps &= ~(1<<ai->app_id);
273 			}
274 			oz_polling_unlock_bh();
275 			ai->stop(pd, pause);
276 		}
277 	}
278 }
279 /*------------------------------------------------------------------------------
280  * Context: softirq
281  */
oz_pd_heartbeat(struct oz_pd * pd,u16 apps)282 void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
283 {
284 	struct oz_app_if *ai;
285 	int more = 0;
286 	for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
287 		if (ai->heartbeat && (apps & (1<<ai->app_id))) {
288 			if (ai->heartbeat(pd))
289 				more = 1;
290 		}
291 	}
292 	if (more)
293 		oz_pd_request_heartbeat(pd);
294 	if (pd->mode & OZ_F_ISOC_ANYTIME) {
295 		int count = 8;
296 		while (count-- && (oz_send_isoc_frame(pd) >= 0))
297 			;
298 	}
299 }
300 /*------------------------------------------------------------------------------
301  * Context: softirq or process
302  */
oz_pd_stop(struct oz_pd * pd)303 void oz_pd_stop(struct oz_pd *pd)
304 {
305 	u16 stop_apps = 0;
306 	oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
307 	oz_pd_indicate_farewells(pd);
308 	oz_polling_lock_bh();
309 	stop_apps = pd->total_apps;
310 	pd->total_apps = 0;
311 	pd->paused_apps = 0;
312 	oz_polling_unlock_bh();
313 	oz_services_stop(pd, stop_apps, 0);
314 	oz_polling_lock_bh();
315 	oz_pd_set_state(pd, OZ_PD_S_STOPPED);
316 	/* Remove from PD list.*/
317 	list_del(&pd->link);
318 	oz_polling_unlock_bh();
319 	oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
320 	oz_timer_delete(pd, 0);
321 	oz_pd_put(pd);
322 }
323 /*------------------------------------------------------------------------------
324  * Context: softirq
325  */
oz_pd_sleep(struct oz_pd * pd)326 int oz_pd_sleep(struct oz_pd *pd)
327 {
328 	int do_stop = 0;
329 	u16 stop_apps = 0;
330 	oz_polling_lock_bh();
331 	if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
332 		oz_polling_unlock_bh();
333 		return 0;
334 	}
335 	if (pd->keep_alive_j && pd->session_id) {
336 		oz_pd_set_state(pd, OZ_PD_S_SLEEP);
337 		pd->pulse_time_j = jiffies + pd->keep_alive_j;
338 		oz_trace("Sleep Now %lu until %lu\n",
339 			jiffies, pd->pulse_time_j);
340 	} else {
341 		do_stop = 1;
342 	}
343 	stop_apps = pd->total_apps;
344 	oz_polling_unlock_bh();
345 	if (do_stop) {
346 		oz_pd_stop(pd);
347 	} else {
348 		oz_services_stop(pd, stop_apps, 1);
349 		oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
350 	}
351 	return do_stop;
352 }
353 /*------------------------------------------------------------------------------
354  * Context: softirq
355  */
oz_tx_frame_alloc(struct oz_pd * pd)356 static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
357 {
358 	struct oz_tx_frame *f = 0;
359 	spin_lock_bh(&pd->tx_frame_lock);
360 	if (pd->tx_pool) {
361 		f = container_of(pd->tx_pool, struct oz_tx_frame, link);
362 		pd->tx_pool = pd->tx_pool->next;
363 		pd->tx_pool_count--;
364 	}
365 	spin_unlock_bh(&pd->tx_frame_lock);
366 	if (f == 0)
367 		f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
368 	if (f) {
369 		f->total_size = sizeof(struct oz_hdr);
370 		INIT_LIST_HEAD(&f->link);
371 		INIT_LIST_HEAD(&f->elt_list);
372 	}
373 	return f;
374 }
375 /*------------------------------------------------------------------------------
376  * Context: softirq or process
377  */
oz_tx_frame_free(struct oz_pd * pd,struct oz_tx_frame * f)378 static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
379 {
380 	spin_lock_bh(&pd->tx_frame_lock);
381 	if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
382 		f->link.next = pd->tx_pool;
383 		pd->tx_pool = &f->link;
384 		pd->tx_pool_count++;
385 		f = 0;
386 	}
387 	spin_unlock_bh(&pd->tx_frame_lock);
388 	if (f)
389 		kfree(f);
390 }
391 /*------------------------------------------------------------------------------
392  * Context: softirq
393  */
oz_prepare_frame(struct oz_pd * pd,int empty)394 int oz_prepare_frame(struct oz_pd *pd, int empty)
395 {
396 	struct oz_tx_frame *f;
397 	if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
398 		return -1;
399 	if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
400 		return -1;
401 	if (!empty && !oz_are_elts_available(&pd->elt_buff))
402 		return -1;
403 	f = oz_tx_frame_alloc(pd);
404 	if (f == 0)
405 		return -1;
406 	f->hdr.control =
407 		(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
408 	++pd->last_tx_pkt_num;
409 	put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
410 	if (empty == 0) {
411 		oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
412 			pd->max_tx_size, &f->elt_list);
413 	}
414 	spin_lock(&pd->tx_frame_lock);
415 	list_add_tail(&f->link, &pd->tx_queue);
416 	pd->nb_queued_frames++;
417 	spin_unlock(&pd->tx_frame_lock);
418 	return 0;
419 }
420 /*------------------------------------------------------------------------------
421  * Context: softirq-serialized
422  */
oz_build_frame(struct oz_pd * pd,struct oz_tx_frame * f)423 static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
424 {
425 	struct sk_buff *skb = 0;
426 	struct net_device *dev = pd->net_dev;
427 	struct oz_hdr *oz_hdr;
428 	struct oz_elt *elt;
429 	struct list_head *e;
430 	/* Allocate skb with enough space for the lower layers as well
431 	 * as the space we need.
432 	 */
433 	skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
434 	if (skb == 0)
435 		return 0;
436 	/* Reserve the head room for lower layers.
437 	 */
438 	skb_reserve(skb, LL_RESERVED_SPACE(dev));
439 	skb_reset_network_header(skb);
440 	skb->dev = dev;
441 	skb->protocol = htons(OZ_ETHERTYPE);
442 	if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
443 		dev->dev_addr, skb->len) < 0)
444 		goto fail;
445 	/* Push the tail to the end of the area we are going to copy to.
446 	 */
447 	oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
448 	f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
449 	memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
450 	/* Copy the elements into the frame body.
451 	 */
452 	elt = (struct oz_elt *)(oz_hdr+1);
453 	for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
454 		struct oz_elt_info *ei;
455 		ei = container_of(e, struct oz_elt_info, link);
456 		memcpy(elt, ei->data, ei->length);
457 		elt = oz_next_elt(elt);
458 	}
459 	return skb;
460 fail:
461 	kfree_skb(skb);
462 	return 0;
463 }
464 /*------------------------------------------------------------------------------
465  * Context: softirq or process
466  */
oz_retire_frame(struct oz_pd * pd,struct oz_tx_frame * f)467 static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
468 {
469 	struct list_head *e;
470 	struct oz_elt_info *ei;
471 	e = f->elt_list.next;
472 	while (e != &f->elt_list) {
473 		ei = container_of(e, struct oz_elt_info, link);
474 		e = e->next;
475 		list_del_init(&ei->link);
476 		if (ei->callback)
477 			ei->callback(pd, ei->context);
478 		spin_lock_bh(&pd->elt_buff.lock);
479 		oz_elt_info_free(&pd->elt_buff, ei);
480 		spin_unlock_bh(&pd->elt_buff.lock);
481 	}
482 	oz_tx_frame_free(pd, f);
483 	if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
484 		oz_trim_elt_pool(&pd->elt_buff);
485 }
486 /*------------------------------------------------------------------------------
487  * Context: softirq-serialized
488  */
oz_send_next_queued_frame(struct oz_pd * pd,int * more_data)489 static int oz_send_next_queued_frame(struct oz_pd *pd, int *more_data)
490 {
491 	struct sk_buff *skb;
492 	struct oz_tx_frame *f;
493 	struct list_head *e;
494 	*more_data = 0;
495 	spin_lock(&pd->tx_frame_lock);
496 	e = pd->last_sent_frame->next;
497 	if (e == &pd->tx_queue) {
498 		spin_unlock(&pd->tx_frame_lock);
499 		return -1;
500 	}
501 	pd->last_sent_frame = e;
502 	if (e->next != &pd->tx_queue)
503 		*more_data = 1;
504 	f = container_of(e, struct oz_tx_frame, link);
505 	skb = oz_build_frame(pd, f);
506 	spin_unlock(&pd->tx_frame_lock);
507 	oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
508 	if (skb) {
509 		oz_event_log(OZ_EVT_TX_FRAME,
510 			0,
511 			(((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
512 			0, f->hdr.pkt_num);
513 		if (dev_queue_xmit(skb) < 0)
514 			return -1;
515 	}
516 	return 0;
517 }
518 /*------------------------------------------------------------------------------
519  * Context: softirq-serialized
520  */
oz_send_queued_frames(struct oz_pd * pd,int backlog)521 void oz_send_queued_frames(struct oz_pd *pd, int backlog)
522 {
523 	int more;
524 	if (backlog <  OZ_MAX_QUEUED_FRAMES) {
525 		if (oz_send_next_queued_frame(pd, &more) >= 0) {
526 			while (more && oz_send_next_queued_frame(pd, &more))
527 				;
528 		} else {
529 			if (((pd->mode & OZ_F_ISOC_ANYTIME) == 0)
530 				|| (pd->isoc_sent == 0)) {
531 				if (oz_prepare_frame(pd, 1) >= 0)
532 					oz_send_next_queued_frame(pd, &more);
533 			}
534 		}
535 	} else {
536 		oz_send_next_queued_frame(pd, &more);
537 	}
538 }
539 /*------------------------------------------------------------------------------
540  * Context: softirq
541  */
oz_send_isoc_frame(struct oz_pd * pd)542 static int oz_send_isoc_frame(struct oz_pd *pd)
543 {
544 	struct sk_buff *skb = 0;
545 	struct net_device *dev = pd->net_dev;
546 	struct oz_hdr *oz_hdr;
547 	struct oz_elt *elt;
548 	struct list_head *e;
549 	struct list_head list;
550 	int total_size = sizeof(struct oz_hdr);
551 	INIT_LIST_HEAD(&list);
552 
553 	oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
554 		pd->max_tx_size, &list);
555 	if (list.next == &list)
556 		return 0;
557 	skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
558 	if (skb == 0) {
559 		oz_trace("Cannot alloc skb\n");
560 		oz_elt_info_free_chain(&pd->elt_buff, &list);
561 		return -1;
562 	}
563 	skb_reserve(skb, LL_RESERVED_SPACE(dev));
564 	skb_reset_network_header(skb);
565 	skb->dev = dev;
566 	skb->protocol = htons(OZ_ETHERTYPE);
567 	if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
568 		dev->dev_addr, skb->len) < 0) {
569 		kfree_skb(skb);
570 		return -1;
571 	}
572 	oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
573 	oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
574 	oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
575 	elt = (struct oz_elt *)(oz_hdr+1);
576 
577 	for (e = list.next; e != &list; e = e->next) {
578 		struct oz_elt_info *ei;
579 		ei = container_of(e, struct oz_elt_info, link);
580 		memcpy(elt, ei->data, ei->length);
581 		elt = oz_next_elt(elt);
582 	}
583 	oz_event_log(OZ_EVT_TX_ISOC, 0, 0, 0, 0);
584 	dev_queue_xmit(skb);
585 	oz_elt_info_free_chain(&pd->elt_buff, &list);
586 	return 0;
587 }
588 /*------------------------------------------------------------------------------
589  * Context: softirq-serialized
590  */
oz_retire_tx_frames(struct oz_pd * pd,u8 lpn)591 void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
592 {
593 	struct list_head *e;
594 	struct oz_tx_frame *f;
595 	struct list_head *first = 0;
596 	struct list_head *last = 0;
597 	u8 diff;
598 	u32 pkt_num;
599 
600 	spin_lock(&pd->tx_frame_lock);
601 	e = pd->tx_queue.next;
602 	while (e != &pd->tx_queue) {
603 		f = container_of(e, struct oz_tx_frame, link);
604 		pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
605 		diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
606 		if (diff > OZ_LAST_PN_HALF_CYCLE)
607 			break;
608 		if (first == 0)
609 			first = e;
610 		last = e;
611 		e = e->next;
612 		pd->nb_queued_frames--;
613 	}
614 	if (first) {
615 		last->next->prev = &pd->tx_queue;
616 		pd->tx_queue.next = last->next;
617 		last->next = 0;
618 	}
619 	pd->last_sent_frame = &pd->tx_queue;
620 	spin_unlock(&pd->tx_frame_lock);
621 	while (first) {
622 		f = container_of(first, struct oz_tx_frame, link);
623 		first = first->next;
624 		oz_retire_frame(pd, f);
625 	}
626 }
627 /*------------------------------------------------------------------------------
628  * Precondition: stream_lock must be held.
629  * Context: softirq
630  */
pd_stream_find(struct oz_pd * pd,u8 ep_num)631 static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
632 {
633 	struct list_head *e;
634 	struct oz_isoc_stream *st;
635 	list_for_each(e, &pd->stream_list) {
636 		st = container_of(e, struct oz_isoc_stream, link);
637 		if (st->ep_num == ep_num)
638 			return st;
639 	}
640 	return 0;
641 }
642 /*------------------------------------------------------------------------------
643  * Context: softirq
644  */
oz_isoc_stream_create(struct oz_pd * pd,u8 ep_num)645 int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
646 {
647 	struct oz_isoc_stream *st =
648 		kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
649 	if (!st)
650 		return -ENOMEM;
651 	st->ep_num = ep_num;
652 	spin_lock_bh(&pd->stream_lock);
653 	if (!pd_stream_find(pd, ep_num)) {
654 		list_add(&st->link, &pd->stream_list);
655 		st = 0;
656 	}
657 	spin_unlock_bh(&pd->stream_lock);
658 	if (st)
659 		kfree(st);
660 	return 0;
661 }
662 /*------------------------------------------------------------------------------
663  * Context: softirq or process
664  */
oz_isoc_stream_free(struct oz_isoc_stream * st)665 static void oz_isoc_stream_free(struct oz_isoc_stream *st)
666 {
667 	if (st->skb)
668 		kfree_skb(st->skb);
669 	kfree(st);
670 }
671 /*------------------------------------------------------------------------------
672  * Context: softirq
673  */
oz_isoc_stream_delete(struct oz_pd * pd,u8 ep_num)674 int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
675 {
676 	struct oz_isoc_stream *st;
677 	spin_lock_bh(&pd->stream_lock);
678 	st = pd_stream_find(pd, ep_num);
679 	if (st)
680 		list_del(&st->link);
681 	spin_unlock_bh(&pd->stream_lock);
682 	if (st)
683 		oz_isoc_stream_free(st);
684 	return 0;
685 }
686 /*------------------------------------------------------------------------------
687  * Context: any
688  */
oz_isoc_destructor(struct sk_buff * skb)689 static void oz_isoc_destructor(struct sk_buff *skb)
690 {
691 	atomic_dec(&g_submitted_isoc);
692 	oz_event_log(OZ_EVT_TX_ISOC_DONE, atomic_read(&g_submitted_isoc),
693 		0, skb, 0);
694 }
695 /*------------------------------------------------------------------------------
696  * Context: softirq
697  */
oz_send_isoc_unit(struct oz_pd * pd,u8 ep_num,u8 * data,int len)698 int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, u8 *data, int len)
699 {
700 	struct net_device *dev = pd->net_dev;
701 	struct oz_isoc_stream *st;
702 	u8 nb_units = 0;
703 	struct sk_buff *skb = 0;
704 	struct oz_hdr *oz_hdr = 0;
705 	int size = 0;
706 	spin_lock_bh(&pd->stream_lock);
707 	st = pd_stream_find(pd, ep_num);
708 	if (st) {
709 		skb = st->skb;
710 		st->skb = 0;
711 		nb_units = st->nb_units;
712 		st->nb_units = 0;
713 		oz_hdr = st->oz_hdr;
714 		size = st->size;
715 	}
716 	spin_unlock_bh(&pd->stream_lock);
717 	if (!st)
718 		return 0;
719 	if (!skb) {
720 		/* Allocate enough space for max size frame. */
721 		skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
722 				GFP_ATOMIC);
723 		if (skb == 0)
724 			return 0;
725 		/* Reserve the head room for lower layers. */
726 		skb_reserve(skb, LL_RESERVED_SPACE(dev));
727 		skb_reset_network_header(skb);
728 		skb->dev = dev;
729 		skb->protocol = htons(OZ_ETHERTYPE);
730 		size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
731 		oz_hdr = (struct oz_hdr *)skb_put(skb, size);
732 	}
733 	memcpy(skb_put(skb, len), data, len);
734 	size += len;
735 	if (++nb_units < pd->ms_per_isoc) {
736 		spin_lock_bh(&pd->stream_lock);
737 		st->skb = skb;
738 		st->nb_units = nb_units;
739 		st->oz_hdr = oz_hdr;
740 		st->size = size;
741 		spin_unlock_bh(&pd->stream_lock);
742 	} else {
743 		struct oz_hdr oz;
744 		struct oz_isoc_large iso;
745 		spin_lock_bh(&pd->stream_lock);
746 		iso.frame_number = st->frame_num;
747 		st->frame_num += nb_units;
748 		spin_unlock_bh(&pd->stream_lock);
749 		oz.control =
750 			(OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
751 		oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
752 		oz.pkt_num = 0;
753 		iso.endpoint = ep_num;
754 		iso.format = OZ_DATA_F_ISOC_LARGE;
755 		iso.ms_data = nb_units;
756 		memcpy(oz_hdr, &oz, sizeof(oz));
757 		memcpy(oz_hdr+1, &iso, sizeof(iso));
758 		if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
759 				dev->dev_addr, skb->len) < 0) {
760 			kfree_skb(skb);
761 			return -1;
762 		}
763 		if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
764 			skb->destructor = oz_isoc_destructor;
765 			atomic_inc(&g_submitted_isoc);
766 			oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
767 				skb, atomic_read(&g_submitted_isoc));
768 			if (dev_queue_xmit(skb) < 0)
769 				return -1;
770 		} else {
771 			oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, 0, 0);
772 			kfree_skb(skb);
773 		}
774 	}
775 	return 0;
776 }
777 /*------------------------------------------------------------------------------
778  * Context: process
779  */
oz_apps_init(void)780 void oz_apps_init(void)
781 {
782 	int i;
783 	for (i = 0; i < OZ_APPID_MAX; i++)
784 		if (g_app_if[i].init)
785 			g_app_if[i].init();
786 }
787 /*------------------------------------------------------------------------------
788  * Context: process
789  */
oz_apps_term(void)790 void oz_apps_term(void)
791 {
792 	int i;
793 	/* Terminate all the apps. */
794 	for (i = 0; i < OZ_APPID_MAX; i++)
795 		if (g_app_if[i].term)
796 			g_app_if[i].term();
797 }
798 /*------------------------------------------------------------------------------
799  * Context: softirq-serialized
800  */
oz_handle_app_elt(struct oz_pd * pd,u8 app_id,struct oz_elt * elt)801 void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
802 {
803 	struct oz_app_if *ai;
804 	if (app_id == 0 || app_id > OZ_APPID_MAX)
805 		return;
806 	ai = &g_app_if[app_id-1];
807 	ai->rx(pd, elt);
808 }
809 /*------------------------------------------------------------------------------
810  * Context: softirq or process
811  */
oz_pd_indicate_farewells(struct oz_pd * pd)812 void oz_pd_indicate_farewells(struct oz_pd *pd)
813 {
814 	struct oz_farewell *f;
815 	struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
816 	while (1) {
817 		oz_polling_lock_bh();
818 		if (list_empty(&pd->farewell_list)) {
819 			oz_polling_unlock_bh();
820 			break;
821 		}
822 		f = list_first_entry(&pd->farewell_list,
823 				struct oz_farewell, link);
824 		list_del(&f->link);
825 		oz_polling_unlock_bh();
826 		if (ai->farewell)
827 			ai->farewell(pd, f->ep_num, f->report, f->len);
828 		kfree(f);
829 	}
830 }
831