1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License as published by
4  * the Free Software Foundation; either version 2 of the License, or
5  * (at your option) any later version.
6  *
7  * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
8  * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
9  * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de)
10  */
11 #include <linux/errno.h>
12 #include <linux/types.h>
13 #include <linux/socket.h>
14 #include <linux/in.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/timer.h>
18 #include <linux/string.h>
19 #include <linux/sockios.h>
20 #include <linux/spinlock.h>
21 #include <linux/net.h>
22 #include <linux/slab.h>
23 #include <net/ax25.h>
24 #include <linux/inet.h>
25 #include <linux/netdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/netfilter.h>
28 #include <net/sock.h>
29 #include <asm/uaccess.h>
30 #include <asm/system.h>
31 #include <linux/fcntl.h>
32 #include <linux/mm.h>
33 #include <linux/interrupt.h>
34 
35 static DEFINE_SPINLOCK(ax25_frag_lock);
36 
ax25_send_frame(struct sk_buff * skb,int paclen,ax25_address * src,ax25_address * dest,ax25_digi * digi,struct net_device * dev)37 ax25_cb *ax25_send_frame(struct sk_buff *skb, int paclen, ax25_address *src, ax25_address *dest, ax25_digi *digi, struct net_device *dev)
38 {
39 	ax25_dev *ax25_dev;
40 	ax25_cb *ax25;
41 
42 	/*
43 	 * Take the default packet length for the device if zero is
44 	 * specified.
45 	 */
46 	if (paclen == 0) {
47 		if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
48 			return NULL;
49 
50 		paclen = ax25_dev->values[AX25_VALUES_PACLEN];
51 	}
52 
53 	/*
54 	 * Look for an existing connection.
55 	 */
56 	if ((ax25 = ax25_find_cb(src, dest, digi, dev)) != NULL) {
57 		ax25_output(ax25, paclen, skb);
58 		return ax25;		/* It already existed */
59 	}
60 
61 	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
62 		return NULL;
63 
64 	if ((ax25 = ax25_create_cb()) == NULL)
65 		return NULL;
66 
67 	ax25_fillin_cb(ax25, ax25_dev);
68 
69 	ax25->source_addr = *src;
70 	ax25->dest_addr   = *dest;
71 
72 	if (digi != NULL) {
73 		ax25->digipeat = kmemdup(digi, sizeof(*digi), GFP_ATOMIC);
74 		if (ax25->digipeat == NULL) {
75 			ax25_cb_put(ax25);
76 			return NULL;
77 		}
78 	}
79 
80 	switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
81 	case AX25_PROTO_STD_SIMPLEX:
82 	case AX25_PROTO_STD_DUPLEX:
83 		ax25_std_establish_data_link(ax25);
84 		break;
85 
86 #ifdef CONFIG_AX25_DAMA_SLAVE
87 	case AX25_PROTO_DAMA_SLAVE:
88 		if (ax25_dev->dama.slave)
89 			ax25_ds_establish_data_link(ax25);
90 		else
91 			ax25_std_establish_data_link(ax25);
92 		break;
93 #endif
94 	}
95 
96 	/*
97 	 * There is one ref for the state machine; a caller needs
98 	 * one more to put it back, just like with the existing one.
99 	 */
100 	ax25_cb_hold(ax25);
101 
102 	ax25_cb_add(ax25);
103 
104 	ax25->state = AX25_STATE_1;
105 
106 	ax25_start_heartbeat(ax25);
107 
108 	ax25_output(ax25, paclen, skb);
109 
110 	return ax25;			/* We had to create it */
111 }
112 
113 EXPORT_SYMBOL(ax25_send_frame);
114 
115 /*
116  *	All outgoing AX.25 I frames pass via this routine. Therefore this is
117  *	where the fragmentation of frames takes place. If fragment is set to
118  *	zero then we are not allowed to do fragmentation, even if the frame
119  *	is too large.
120  */
ax25_output(ax25_cb * ax25,int paclen,struct sk_buff * skb)121 void ax25_output(ax25_cb *ax25, int paclen, struct sk_buff *skb)
122 {
123 	struct sk_buff *skbn;
124 	unsigned char *p;
125 	int frontlen, len, fragno, ka9qfrag, first = 1;
126 
127 	if (paclen < 16) {
128 		WARN_ON_ONCE(1);
129 		kfree_skb(skb);
130 		return;
131 	}
132 
133 	if ((skb->len - 1) > paclen) {
134 		if (*skb->data == AX25_P_TEXT) {
135 			skb_pull(skb, 1); /* skip PID */
136 			ka9qfrag = 0;
137 		} else {
138 			paclen -= 2;	/* Allow for fragment control info */
139 			ka9qfrag = 1;
140 		}
141 
142 		fragno = skb->len / paclen;
143 		if (skb->len % paclen == 0) fragno--;
144 
145 		frontlen = skb_headroom(skb);	/* Address space + CTRL */
146 
147 		while (skb->len > 0) {
148 			spin_lock_bh(&ax25_frag_lock);
149 			if ((skbn = alloc_skb(paclen + 2 + frontlen, GFP_ATOMIC)) == NULL) {
150 				spin_unlock_bh(&ax25_frag_lock);
151 				printk(KERN_CRIT "AX.25: ax25_output - out of memory\n");
152 				return;
153 			}
154 
155 			if (skb->sk != NULL)
156 				skb_set_owner_w(skbn, skb->sk);
157 
158 			spin_unlock_bh(&ax25_frag_lock);
159 
160 			len = (paclen > skb->len) ? skb->len : paclen;
161 
162 			if (ka9qfrag == 1) {
163 				skb_reserve(skbn, frontlen + 2);
164 				skb_set_network_header(skbn,
165 						      skb_network_offset(skb));
166 				skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
167 				p = skb_push(skbn, 2);
168 
169 				*p++ = AX25_P_SEGMENT;
170 
171 				*p = fragno--;
172 				if (first) {
173 					*p |= AX25_SEG_FIRST;
174 					first = 0;
175 				}
176 			} else {
177 				skb_reserve(skbn, frontlen + 1);
178 				skb_set_network_header(skbn,
179 						      skb_network_offset(skb));
180 				skb_copy_from_linear_data(skb, skb_put(skbn, len), len);
181 				p = skb_push(skbn, 1);
182 				*p = AX25_P_TEXT;
183 			}
184 
185 			skb_pull(skb, len);
186 			skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */
187 		}
188 
189 		kfree_skb(skb);
190 	} else {
191 		skb_queue_tail(&ax25->write_queue, skb);	  /* Throw it on the queue */
192 	}
193 
194 	switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
195 	case AX25_PROTO_STD_SIMPLEX:
196 	case AX25_PROTO_STD_DUPLEX:
197 		ax25_kick(ax25);
198 		break;
199 
200 #ifdef CONFIG_AX25_DAMA_SLAVE
201 	/*
202 	 * A DAMA slave is _required_ to work as normal AX.25L2V2
203 	 * if no DAMA master is available.
204 	 */
205 	case AX25_PROTO_DAMA_SLAVE:
206 		if (!ax25->ax25_dev->dama.slave) ax25_kick(ax25);
207 		break;
208 #endif
209 	}
210 }
211 
212 /*
213  *  This procedure is passed a buffer descriptor for an iframe. It builds
214  *  the rest of the control part of the frame and then writes it out.
215  */
ax25_send_iframe(ax25_cb * ax25,struct sk_buff * skb,int poll_bit)216 static void ax25_send_iframe(ax25_cb *ax25, struct sk_buff *skb, int poll_bit)
217 {
218 	unsigned char *frame;
219 
220 	if (skb == NULL)
221 		return;
222 
223 	skb_reset_network_header(skb);
224 
225 	if (ax25->modulus == AX25_MODULUS) {
226 		frame = skb_push(skb, 1);
227 
228 		*frame = AX25_I;
229 		*frame |= (poll_bit) ? AX25_PF : 0;
230 		*frame |= (ax25->vr << 5);
231 		*frame |= (ax25->vs << 1);
232 	} else {
233 		frame = skb_push(skb, 2);
234 
235 		frame[0] = AX25_I;
236 		frame[0] |= (ax25->vs << 1);
237 		frame[1] = (poll_bit) ? AX25_EPF : 0;
238 		frame[1] |= (ax25->vr << 1);
239 	}
240 
241 	ax25_start_idletimer(ax25);
242 
243 	ax25_transmit_buffer(ax25, skb, AX25_COMMAND);
244 }
245 
ax25_kick(ax25_cb * ax25)246 void ax25_kick(ax25_cb *ax25)
247 {
248 	struct sk_buff *skb, *skbn;
249 	int last = 1;
250 	unsigned short start, end, next;
251 
252 	if (ax25->state != AX25_STATE_3 && ax25->state != AX25_STATE_4)
253 		return;
254 
255 	if (ax25->condition & AX25_COND_PEER_RX_BUSY)
256 		return;
257 
258 	if (skb_peek(&ax25->write_queue) == NULL)
259 		return;
260 
261 	start = (skb_peek(&ax25->ack_queue) == NULL) ? ax25->va : ax25->vs;
262 	end   = (ax25->va + ax25->window) % ax25->modulus;
263 
264 	if (start == end)
265 		return;
266 
267 	/*
268 	 * Transmit data until either we're out of data to send or
269 	 * the window is full. Send a poll on the final I frame if
270 	 * the window is filled.
271 	 */
272 
273 	/*
274 	 * Dequeue the frame and copy it.
275 	 * Check for race with ax25_clear_queues().
276 	 */
277 	skb  = skb_dequeue(&ax25->write_queue);
278 	if (!skb)
279 		return;
280 
281 	ax25->vs = start;
282 
283 	do {
284 		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
285 			skb_queue_head(&ax25->write_queue, skb);
286 			break;
287 		}
288 
289 		if (skb->sk != NULL)
290 			skb_set_owner_w(skbn, skb->sk);
291 
292 		next = (ax25->vs + 1) % ax25->modulus;
293 		last = (next == end);
294 
295 		/*
296 		 * Transmit the frame copy.
297 		 * bke 960114: do not set the Poll bit on the last frame
298 		 * in DAMA mode.
299 		 */
300 		switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) {
301 		case AX25_PROTO_STD_SIMPLEX:
302 		case AX25_PROTO_STD_DUPLEX:
303 			ax25_send_iframe(ax25, skbn, (last) ? AX25_POLLON : AX25_POLLOFF);
304 			break;
305 
306 #ifdef CONFIG_AX25_DAMA_SLAVE
307 		case AX25_PROTO_DAMA_SLAVE:
308 			ax25_send_iframe(ax25, skbn, AX25_POLLOFF);
309 			break;
310 #endif
311 		}
312 
313 		ax25->vs = next;
314 
315 		/*
316 		 * Requeue the original data frame.
317 		 */
318 		skb_queue_tail(&ax25->ack_queue, skb);
319 
320 	} while (!last && (skb = skb_dequeue(&ax25->write_queue)) != NULL);
321 
322 	ax25->condition &= ~AX25_COND_ACK_PENDING;
323 
324 	if (!ax25_t1timer_running(ax25)) {
325 		ax25_stop_t3timer(ax25);
326 		ax25_calculate_t1(ax25);
327 		ax25_start_t1timer(ax25);
328 	}
329 }
330 
ax25_transmit_buffer(ax25_cb * ax25,struct sk_buff * skb,int type)331 void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
332 {
333 	struct sk_buff *skbn;
334 	unsigned char *ptr;
335 	int headroom;
336 
337 	if (ax25->ax25_dev == NULL) {
338 		ax25_disconnect(ax25, ENETUNREACH);
339 		return;
340 	}
341 
342 	headroom = ax25_addr_size(ax25->digipeat);
343 
344 	if (skb_headroom(skb) < headroom) {
345 		if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
346 			printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
347 			kfree_skb(skb);
348 			return;
349 		}
350 
351 		if (skb->sk != NULL)
352 			skb_set_owner_w(skbn, skb->sk);
353 
354 		kfree_skb(skb);
355 		skb = skbn;
356 	}
357 
358 	ptr = skb_push(skb, headroom);
359 
360 	ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);
361 
362 	ax25_queue_xmit(skb, ax25->ax25_dev->dev);
363 }
364 
365 /*
366  *	A small shim to dev_queue_xmit to add the KISS control byte, and do
367  *	any packet forwarding in operation.
368  */
ax25_queue_xmit(struct sk_buff * skb,struct net_device * dev)369 void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev)
370 {
371 	unsigned char *ptr;
372 
373 	skb->protocol = ax25_type_trans(skb, ax25_fwd_dev(dev));
374 
375 	ptr  = skb_push(skb, 1);
376 	*ptr = 0x00;			/* KISS */
377 
378 	dev_queue_xmit(skb);
379 }
380 
ax25_check_iframes_acked(ax25_cb * ax25,unsigned short nr)381 int ax25_check_iframes_acked(ax25_cb *ax25, unsigned short nr)
382 {
383 	if (ax25->vs == nr) {
384 		ax25_frames_acked(ax25, nr);
385 		ax25_calculate_rtt(ax25);
386 		ax25_stop_t1timer(ax25);
387 		ax25_start_t3timer(ax25);
388 		return 1;
389 	} else {
390 		if (ax25->va != nr) {
391 			ax25_frames_acked(ax25, nr);
392 			ax25_calculate_t1(ax25);
393 			ax25_start_t1timer(ax25);
394 			return 1;
395 		}
396 	}
397 	return 0;
398 }
399 
400