1 /*
2  *  net/dccp/output.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *	This program is free software; you can redistribute it and/or
8  *	modify it under the terms of the GNU General Public License
9  *	as published by the Free Software Foundation; either version
10  *	2 of the License, or (at your option) any later version.
11  */
12 
13 #include <linux/dccp.h>
14 #include <linux/kernel.h>
15 #include <linux/skbuff.h>
16 #include <linux/slab.h>
17 
18 #include <net/inet_sock.h>
19 #include <net/sock.h>
20 
21 #include "ackvec.h"
22 #include "ccid.h"
23 #include "dccp.h"
24 
dccp_event_ack_sent(struct sock * sk)25 static inline void dccp_event_ack_sent(struct sock *sk)
26 {
27 	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
28 }
29 
dccp_skb_entail(struct sock * sk,struct sk_buff * skb)30 static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
31 {
32 	skb_set_owner_w(skb, sk);
33 	WARN_ON(sk->sk_send_head);
34 	sk->sk_send_head = skb;
35 }
36 
37 /*
38  * All SKB's seen here are completely headerless. It is our
39  * job to build the DCCP header, and pass the packet down to
40  * IP so it can do the same plus pass the packet off to the
41  * device.
42  */
dccp_transmit_skb(struct sock * sk,struct sk_buff * skb)43 static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
44 {
45 	if (likely(skb != NULL)) {
46 		const struct inet_sock *inet = inet_sk(sk);
47 		const struct inet_connection_sock *icsk = inet_csk(sk);
48 		struct dccp_sock *dp = dccp_sk(sk);
49 		struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
50 		struct dccp_hdr *dh;
51 		/* XXX For now we're using only 48 bits sequence numbers */
52 		const u32 dccp_header_size = sizeof(*dh) +
53 					     sizeof(struct dccp_hdr_ext) +
54 					  dccp_packet_hdr_len(dcb->dccpd_type);
55 		int err, set_ack = 1;
56 		u64 ackno = dp->dccps_gsr;
57 		/*
58 		 * Increment GSS here already in case the option code needs it.
59 		 * Update GSS for real only if option processing below succeeds.
60 		 */
61 		dcb->dccpd_seq = ADD48(dp->dccps_gss, 1);
62 
63 		switch (dcb->dccpd_type) {
64 		case DCCP_PKT_DATA:
65 			set_ack = 0;
66 			/* fall through */
67 		case DCCP_PKT_DATAACK:
68 		case DCCP_PKT_RESET:
69 			break;
70 
71 		case DCCP_PKT_REQUEST:
72 			set_ack = 0;
73 			/* Use ISS on the first (non-retransmitted) Request. */
74 			if (icsk->icsk_retransmits == 0)
75 				dcb->dccpd_seq = dp->dccps_iss;
76 			/* fall through */
77 
78 		case DCCP_PKT_SYNC:
79 		case DCCP_PKT_SYNCACK:
80 			ackno = dcb->dccpd_ack_seq;
81 			/* fall through */
82 		default:
83 			/*
84 			 * Set owner/destructor: some skbs are allocated via
85 			 * alloc_skb (e.g. when retransmission may happen).
86 			 * Only Data, DataAck, and Reset packets should come
87 			 * through here with skb->sk set.
88 			 */
89 			WARN_ON(skb->sk);
90 			skb_set_owner_w(skb, sk);
91 			break;
92 		}
93 
94 		if (dccp_insert_options(sk, skb)) {
95 			kfree_skb(skb);
96 			return -EPROTO;
97 		}
98 
99 
100 		/* Build DCCP header and checksum it. */
101 		dh = dccp_zeroed_hdr(skb, dccp_header_size);
102 		dh->dccph_type	= dcb->dccpd_type;
103 		dh->dccph_sport	= inet->inet_sport;
104 		dh->dccph_dport	= inet->inet_dport;
105 		dh->dccph_doff	= (dccp_header_size + dcb->dccpd_opt_len) / 4;
106 		dh->dccph_ccval	= dcb->dccpd_ccval;
107 		dh->dccph_cscov = dp->dccps_pcslen;
108 		/* XXX For now we're using only 48 bits sequence numbers */
109 		dh->dccph_x	= 1;
110 
111 		dccp_update_gss(sk, dcb->dccpd_seq);
112 		dccp_hdr_set_seq(dh, dp->dccps_gss);
113 		if (set_ack)
114 			dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno);
115 
116 		switch (dcb->dccpd_type) {
117 		case DCCP_PKT_REQUEST:
118 			dccp_hdr_request(skb)->dccph_req_service =
119 							dp->dccps_service;
120 			/*
121 			 * Limit Ack window to ISS <= P.ackno <= GSS, so that
122 			 * only Responses to Requests we sent are considered.
123 			 */
124 			dp->dccps_awl = dp->dccps_iss;
125 			break;
126 		case DCCP_PKT_RESET:
127 			dccp_hdr_reset(skb)->dccph_reset_code =
128 							dcb->dccpd_reset_code;
129 			break;
130 		}
131 
132 		icsk->icsk_af_ops->send_check(sk, skb);
133 
134 		if (set_ack)
135 			dccp_event_ack_sent(sk);
136 
137 		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
138 
139 		err = icsk->icsk_af_ops->queue_xmit(skb);
140 		return net_xmit_eval(err);
141 	}
142 	return -ENOBUFS;
143 }
144 
145 /**
146  * dccp_determine_ccmps  -  Find out about CCID-specific packet-size limits
147  * We only consider the HC-sender CCID for setting the CCMPS (RFC 4340, 14.),
148  * since the RX CCID is restricted to feedback packets (Acks), which are small
149  * in comparison with the data traffic. A value of 0 means "no current CCMPS".
150  */
dccp_determine_ccmps(const struct dccp_sock * dp)151 static u32 dccp_determine_ccmps(const struct dccp_sock *dp)
152 {
153 	const struct ccid *tx_ccid = dp->dccps_hc_tx_ccid;
154 
155 	if (tx_ccid == NULL || tx_ccid->ccid_ops == NULL)
156 		return 0;
157 	return tx_ccid->ccid_ops->ccid_ccmps;
158 }
159 
dccp_sync_mss(struct sock * sk,u32 pmtu)160 unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu)
161 {
162 	struct inet_connection_sock *icsk = inet_csk(sk);
163 	struct dccp_sock *dp = dccp_sk(sk);
164 	u32 ccmps = dccp_determine_ccmps(dp);
165 	u32 cur_mps = ccmps ? min(pmtu, ccmps) : pmtu;
166 
167 	/* Account for header lengths and IPv4/v6 option overhead */
168 	cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len +
169 		    sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext));
170 
171 	/*
172 	 * Leave enough headroom for common DCCP header options.
173 	 * This only considers options which may appear on DCCP-Data packets, as
174 	 * per table 3 in RFC 4340, 5.8. When running out of space for other
175 	 * options (eg. Ack Vector which can take up to 255 bytes), it is better
176 	 * to schedule a separate Ack. Thus we leave headroom for the following:
177 	 *  - 1 byte for Slow Receiver (11.6)
178 	 *  - 6 bytes for Timestamp (13.1)
179 	 *  - 10 bytes for Timestamp Echo (13.3)
180 	 *  - 8 bytes for NDP count (7.7, when activated)
181 	 *  - 6 bytes for Data Checksum (9.3)
182 	 *  - %DCCPAV_MIN_OPTLEN bytes for Ack Vector size (11.4, when enabled)
183 	 */
184 	cur_mps -= roundup(1 + 6 + 10 + dp->dccps_send_ndp_count * 8 + 6 +
185 			   (dp->dccps_hc_rx_ackvec ? DCCPAV_MIN_OPTLEN : 0), 4);
186 
187 	/* And store cached results */
188 	icsk->icsk_pmtu_cookie = pmtu;
189 	dp->dccps_mss_cache = cur_mps;
190 
191 	return cur_mps;
192 }
193 
194 EXPORT_SYMBOL_GPL(dccp_sync_mss);
195 
dccp_write_space(struct sock * sk)196 void dccp_write_space(struct sock *sk)
197 {
198 	struct socket_wq *wq;
199 
200 	rcu_read_lock();
201 	wq = rcu_dereference(sk->sk_wq);
202 	if (wq_has_sleeper(wq))
203 		wake_up_interruptible(&wq->wait);
204 	/* Should agree with poll, otherwise some programs break */
205 	if (sock_writeable(sk))
206 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
207 
208 	rcu_read_unlock();
209 }
210 
211 /**
212  * dccp_wait_for_ccid  -  Await CCID send permission
213  * @sk:    socket to wait for
214  * @delay: timeout in jiffies
215  * This is used by CCIDs which need to delay the send time in process context.
216  */
dccp_wait_for_ccid(struct sock * sk,unsigned long delay)217 static int dccp_wait_for_ccid(struct sock *sk, unsigned long delay)
218 {
219 	DEFINE_WAIT(wait);
220 	long remaining;
221 
222 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
223 	sk->sk_write_pending++;
224 	release_sock(sk);
225 
226 	remaining = schedule_timeout(delay);
227 
228 	lock_sock(sk);
229 	sk->sk_write_pending--;
230 	finish_wait(sk_sleep(sk), &wait);
231 
232 	if (signal_pending(current) || sk->sk_err)
233 		return -1;
234 	return remaining;
235 }
236 
237 /**
238  * dccp_xmit_packet  -  Send data packet under control of CCID
239  * Transmits next-queued payload and informs CCID to account for the packet.
240  */
dccp_xmit_packet(struct sock * sk)241 static void dccp_xmit_packet(struct sock *sk)
242 {
243 	int err, len;
244 	struct dccp_sock *dp = dccp_sk(sk);
245 	struct sk_buff *skb = dccp_qpolicy_pop(sk);
246 
247 	if (unlikely(skb == NULL))
248 		return;
249 	len = skb->len;
250 
251 	if (sk->sk_state == DCCP_PARTOPEN) {
252 		const u32 cur_mps = dp->dccps_mss_cache - DCCP_FEATNEG_OVERHEAD;
253 		/*
254 		 * See 8.1.5 - Handshake Completion.
255 		 *
256 		 * For robustness we resend Confirm options until the client has
257 		 * entered OPEN. During the initial feature negotiation, the MPS
258 		 * is smaller than usual, reduced by the Change/Confirm options.
259 		 */
260 		if (!list_empty(&dp->dccps_featneg) && len > cur_mps) {
261 			DCCP_WARN("Payload too large (%d) for featneg.\n", len);
262 			dccp_send_ack(sk);
263 			dccp_feat_list_purge(&dp->dccps_featneg);
264 		}
265 
266 		inet_csk_schedule_ack(sk);
267 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
268 					      inet_csk(sk)->icsk_rto,
269 					      DCCP_RTO_MAX);
270 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
271 	} else if (dccp_ack_pending(sk)) {
272 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATAACK;
273 	} else {
274 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_DATA;
275 	}
276 
277 	err = dccp_transmit_skb(sk, skb);
278 	if (err)
279 		dccp_pr_debug("transmit_skb() returned err=%d\n", err);
280 	/*
281 	 * Register this one as sent even if an error occurred. To the remote
282 	 * end a local packet drop is indistinguishable from network loss, i.e.
283 	 * any local drop will eventually be reported via receiver feedback.
284 	 */
285 	ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
286 
287 	/*
288 	 * If the CCID needs to transfer additional header options out-of-band
289 	 * (e.g. Ack Vectors or feature-negotiation options), it activates this
290 	 * flag to schedule a Sync. The Sync will automatically incorporate all
291 	 * currently pending header options, thus clearing the backlog.
292 	 */
293 	if (dp->dccps_sync_scheduled)
294 		dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
295 }
296 
297 /**
298  * dccp_flush_write_queue  -  Drain queue at end of connection
299  * Since dccp_sendmsg queues packets without waiting for them to be sent, it may
300  * happen that the TX queue is not empty at the end of a connection. We give the
301  * HC-sender CCID a grace period of up to @time_budget jiffies. If this function
302  * returns with a non-empty write queue, it will be purged later.
303  */
dccp_flush_write_queue(struct sock * sk,long * time_budget)304 void dccp_flush_write_queue(struct sock *sk, long *time_budget)
305 {
306 	struct dccp_sock *dp = dccp_sk(sk);
307 	struct sk_buff *skb;
308 	long delay, rc;
309 
310 	while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) {
311 		rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
312 
313 		switch (ccid_packet_dequeue_eval(rc)) {
314 		case CCID_PACKET_WILL_DEQUEUE_LATER:
315 			/*
316 			 * If the CCID determines when to send, the next sending
317 			 * time is unknown or the CCID may not even send again
318 			 * (e.g. remote host crashes or lost Ack packets).
319 			 */
320 			DCCP_WARN("CCID did not manage to send all packets\n");
321 			return;
322 		case CCID_PACKET_DELAY:
323 			delay = msecs_to_jiffies(rc);
324 			if (delay > *time_budget)
325 				return;
326 			rc = dccp_wait_for_ccid(sk, delay);
327 			if (rc < 0)
328 				return;
329 			*time_budget -= (delay - rc);
330 			/* check again if we can send now */
331 			break;
332 		case CCID_PACKET_SEND_AT_ONCE:
333 			dccp_xmit_packet(sk);
334 			break;
335 		case CCID_PACKET_ERR:
336 			skb_dequeue(&sk->sk_write_queue);
337 			kfree_skb(skb);
338 			dccp_pr_debug("packet discarded due to err=%ld\n", rc);
339 		}
340 	}
341 }
342 
dccp_write_xmit(struct sock * sk)343 void dccp_write_xmit(struct sock *sk)
344 {
345 	struct dccp_sock *dp = dccp_sk(sk);
346 	struct sk_buff *skb;
347 
348 	while ((skb = dccp_qpolicy_top(sk))) {
349 		int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
350 
351 		switch (ccid_packet_dequeue_eval(rc)) {
352 		case CCID_PACKET_WILL_DEQUEUE_LATER:
353 			return;
354 		case CCID_PACKET_DELAY:
355 			sk_reset_timer(sk, &dp->dccps_xmit_timer,
356 				       jiffies + msecs_to_jiffies(rc));
357 			return;
358 		case CCID_PACKET_SEND_AT_ONCE:
359 			dccp_xmit_packet(sk);
360 			break;
361 		case CCID_PACKET_ERR:
362 			dccp_qpolicy_drop(sk, skb);
363 			dccp_pr_debug("packet discarded due to err=%d\n", rc);
364 		}
365 	}
366 }
367 
368 /**
369  * dccp_retransmit_skb  -  Retransmit Request, Close, or CloseReq packets
370  * There are only four retransmittable packet types in DCCP:
371  * - Request  in client-REQUEST  state (sec. 8.1.1),
372  * - CloseReq in server-CLOSEREQ state (sec. 8.3),
373  * - Close    in   node-CLOSING  state (sec. 8.3),
374  * - Acks in client-PARTOPEN state (sec. 8.1.5, handled by dccp_delack_timer()).
375  * This function expects sk->sk_send_head to contain the original skb.
376  */
dccp_retransmit_skb(struct sock * sk)377 int dccp_retransmit_skb(struct sock *sk)
378 {
379 	WARN_ON(sk->sk_send_head == NULL);
380 
381 	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0)
382 		return -EHOSTUNREACH; /* Routing failure or similar. */
383 
384 	/* this count is used to distinguish original and retransmitted skb */
385 	inet_csk(sk)->icsk_retransmits++;
386 
387 	return dccp_transmit_skb(sk, skb_clone(sk->sk_send_head, GFP_ATOMIC));
388 }
389 
dccp_make_response(struct sock * sk,struct dst_entry * dst,struct request_sock * req)390 struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst,
391 				   struct request_sock *req)
392 {
393 	struct dccp_hdr *dh;
394 	struct dccp_request_sock *dreq;
395 	const u32 dccp_header_size = sizeof(struct dccp_hdr) +
396 				     sizeof(struct dccp_hdr_ext) +
397 				     sizeof(struct dccp_hdr_response);
398 	struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1,
399 					   GFP_ATOMIC);
400 	if (skb == NULL)
401 		return NULL;
402 
403 	/* Reserve space for headers. */
404 	skb_reserve(skb, sk->sk_prot->max_header);
405 
406 	skb_dst_set(skb, dst_clone(dst));
407 
408 	dreq = dccp_rsk(req);
409 	if (inet_rsk(req)->acked)	/* increase ISS upon retransmission */
410 		dccp_inc_seqno(&dreq->dreq_iss);
411 	DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE;
412 	DCCP_SKB_CB(skb)->dccpd_seq  = dreq->dreq_iss;
413 
414 	/* Resolve feature dependencies resulting from choice of CCID */
415 	if (dccp_feat_server_ccid_dependencies(dreq))
416 		goto response_failed;
417 
418 	if (dccp_insert_options_rsk(dreq, skb))
419 		goto response_failed;
420 
421 	/* Build and checksum header */
422 	dh = dccp_zeroed_hdr(skb, dccp_header_size);
423 
424 	dh->dccph_sport	= inet_rsk(req)->loc_port;
425 	dh->dccph_dport	= inet_rsk(req)->rmt_port;
426 	dh->dccph_doff	= (dccp_header_size +
427 			   DCCP_SKB_CB(skb)->dccpd_opt_len) / 4;
428 	dh->dccph_type	= DCCP_PKT_RESPONSE;
429 	dh->dccph_x	= 1;
430 	dccp_hdr_set_seq(dh, dreq->dreq_iss);
431 	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr);
432 	dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service;
433 
434 	dccp_csum_outgoing(skb);
435 
436 	/* We use `acked' to remember that a Response was already sent. */
437 	inet_rsk(req)->acked = 1;
438 	DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
439 	return skb;
440 response_failed:
441 	kfree_skb(skb);
442 	return NULL;
443 }
444 
445 EXPORT_SYMBOL_GPL(dccp_make_response);
446 
447 /* answer offending packet in @rcv_skb with Reset from control socket @ctl */
dccp_ctl_make_reset(struct sock * sk,struct sk_buff * rcv_skb)448 struct sk_buff *dccp_ctl_make_reset(struct sock *sk, struct sk_buff *rcv_skb)
449 {
450 	struct dccp_hdr *rxdh = dccp_hdr(rcv_skb), *dh;
451 	struct dccp_skb_cb *dcb = DCCP_SKB_CB(rcv_skb);
452 	const u32 dccp_hdr_reset_len = sizeof(struct dccp_hdr) +
453 				       sizeof(struct dccp_hdr_ext) +
454 				       sizeof(struct dccp_hdr_reset);
455 	struct dccp_hdr_reset *dhr;
456 	struct sk_buff *skb;
457 
458 	skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
459 	if (skb == NULL)
460 		return NULL;
461 
462 	skb_reserve(skb, sk->sk_prot->max_header);
463 
464 	/* Swap the send and the receive. */
465 	dh = dccp_zeroed_hdr(skb, dccp_hdr_reset_len);
466 	dh->dccph_type	= DCCP_PKT_RESET;
467 	dh->dccph_sport	= rxdh->dccph_dport;
468 	dh->dccph_dport	= rxdh->dccph_sport;
469 	dh->dccph_doff	= dccp_hdr_reset_len / 4;
470 	dh->dccph_x	= 1;
471 
472 	dhr = dccp_hdr_reset(skb);
473 	dhr->dccph_reset_code = dcb->dccpd_reset_code;
474 
475 	switch (dcb->dccpd_reset_code) {
476 	case DCCP_RESET_CODE_PACKET_ERROR:
477 		dhr->dccph_reset_data[0] = rxdh->dccph_type;
478 		break;
479 	case DCCP_RESET_CODE_OPTION_ERROR:	/* fall through */
480 	case DCCP_RESET_CODE_MANDATORY_ERROR:
481 		memcpy(dhr->dccph_reset_data, dcb->dccpd_reset_data, 3);
482 		break;
483 	}
484 	/*
485 	 * From RFC 4340, 8.3.1:
486 	 *   If P.ackno exists, set R.seqno := P.ackno + 1.
487 	 *   Else set R.seqno := 0.
488 	 */
489 	if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
490 		dccp_hdr_set_seq(dh, ADD48(dcb->dccpd_ack_seq, 1));
491 	dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dcb->dccpd_seq);
492 
493 	dccp_csum_outgoing(skb);
494 	return skb;
495 }
496 
497 EXPORT_SYMBOL_GPL(dccp_ctl_make_reset);
498 
499 /* send Reset on established socket, to close or abort the connection */
dccp_send_reset(struct sock * sk,enum dccp_reset_codes code)500 int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code)
501 {
502 	struct sk_buff *skb;
503 	/*
504 	 * FIXME: what if rebuild_header fails?
505 	 * Should we be doing a rebuild_header here?
506 	 */
507 	int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk);
508 
509 	if (err != 0)
510 		return err;
511 
512 	skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC);
513 	if (skb == NULL)
514 		return -ENOBUFS;
515 
516 	/* Reserve space for headers and prepare control bits. */
517 	skb_reserve(skb, sk->sk_prot->max_header);
518 	DCCP_SKB_CB(skb)->dccpd_type	   = DCCP_PKT_RESET;
519 	DCCP_SKB_CB(skb)->dccpd_reset_code = code;
520 
521 	return dccp_transmit_skb(sk, skb);
522 }
523 
524 /*
525  * Do all connect socket setups that can be done AF independent.
526  */
dccp_connect(struct sock * sk)527 int dccp_connect(struct sock *sk)
528 {
529 	struct sk_buff *skb;
530 	struct dccp_sock *dp = dccp_sk(sk);
531 	struct dst_entry *dst = __sk_dst_get(sk);
532 	struct inet_connection_sock *icsk = inet_csk(sk);
533 
534 	sk->sk_err = 0;
535 	sock_reset_flag(sk, SOCK_DONE);
536 
537 	dccp_sync_mss(sk, dst_mtu(dst));
538 
539 	/* do not connect if feature negotiation setup fails */
540 	if (dccp_feat_finalise_settings(dccp_sk(sk)))
541 		return -EPROTO;
542 
543 	/* Initialise GAR as per 8.5; AWL/AWH are set in dccp_transmit_skb() */
544 	dp->dccps_gar = dp->dccps_iss;
545 
546 	skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation);
547 	if (unlikely(skb == NULL))
548 		return -ENOBUFS;
549 
550 	/* Reserve space for headers. */
551 	skb_reserve(skb, sk->sk_prot->max_header);
552 
553 	DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
554 
555 	dccp_skb_entail(sk, skb);
556 	dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
557 	DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);
558 
559 	/* Timer for repeating the REQUEST until an answer. */
560 	icsk->icsk_retransmits = 0;
561 	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
562 				  icsk->icsk_rto, DCCP_RTO_MAX);
563 	return 0;
564 }
565 
566 EXPORT_SYMBOL_GPL(dccp_connect);
567 
dccp_send_ack(struct sock * sk)568 void dccp_send_ack(struct sock *sk)
569 {
570 	/* If we have been reset, we may not send again. */
571 	if (sk->sk_state != DCCP_CLOSED) {
572 		struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header,
573 						GFP_ATOMIC);
574 
575 		if (skb == NULL) {
576 			inet_csk_schedule_ack(sk);
577 			inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
578 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
579 						  TCP_DELACK_MAX,
580 						  DCCP_RTO_MAX);
581 			return;
582 		}
583 
584 		/* Reserve space for headers */
585 		skb_reserve(skb, sk->sk_prot->max_header);
586 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK;
587 		dccp_transmit_skb(sk, skb);
588 	}
589 }
590 
591 EXPORT_SYMBOL_GPL(dccp_send_ack);
592 
593 #if 0
594 /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */
595 void dccp_send_delayed_ack(struct sock *sk)
596 {
597 	struct inet_connection_sock *icsk = inet_csk(sk);
598 	/*
599 	 * FIXME: tune this timer. elapsed time fixes the skew, so no problem
600 	 * with using 2s, and active senders also piggyback the ACK into a
601 	 * DATAACK packet, so this is really for quiescent senders.
602 	 */
603 	unsigned long timeout = jiffies + 2 * HZ;
604 
605 	/* Use new timeout only if there wasn't a older one earlier. */
606 	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
607 		/* If delack timer was blocked or is about to expire,
608 		 * send ACK now.
609 		 *
610 		 * FIXME: check the "about to expire" part
611 		 */
612 		if (icsk->icsk_ack.blocked) {
613 			dccp_send_ack(sk);
614 			return;
615 		}
616 
617 		if (!time_before(timeout, icsk->icsk_ack.timeout))
618 			timeout = icsk->icsk_ack.timeout;
619 	}
620 	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
621 	icsk->icsk_ack.timeout = timeout;
622 	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
623 }
624 #endif
625 
dccp_send_sync(struct sock * sk,const u64 ackno,const enum dccp_pkt_type pkt_type)626 void dccp_send_sync(struct sock *sk, const u64 ackno,
627 		    const enum dccp_pkt_type pkt_type)
628 {
629 	/*
630 	 * We are not putting this on the write queue, so
631 	 * dccp_transmit_skb() will set the ownership to this
632 	 * sock.
633 	 */
634 	struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC);
635 
636 	if (skb == NULL) {
637 		/* FIXME: how to make sure the sync is sent? */
638 		DCCP_CRIT("could not send %s", dccp_packet_name(pkt_type));
639 		return;
640 	}
641 
642 	/* Reserve space for headers and prepare control bits. */
643 	skb_reserve(skb, sk->sk_prot->max_header);
644 	DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
645 	DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
646 
647 	/*
648 	 * Clear the flag in case the Sync was scheduled for out-of-band data,
649 	 * such as carrying a long Ack Vector.
650 	 */
651 	dccp_sk(sk)->dccps_sync_scheduled = 0;
652 
653 	dccp_transmit_skb(sk, skb);
654 }
655 
656 EXPORT_SYMBOL_GPL(dccp_send_sync);
657 
658 /*
659  * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
660  * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
661  * any circumstances.
662  */
dccp_send_close(struct sock * sk,const int active)663 void dccp_send_close(struct sock *sk, const int active)
664 {
665 	struct dccp_sock *dp = dccp_sk(sk);
666 	struct sk_buff *skb;
667 	const gfp_t prio = active ? GFP_KERNEL : GFP_ATOMIC;
668 
669 	skb = alloc_skb(sk->sk_prot->max_header, prio);
670 	if (skb == NULL)
671 		return;
672 
673 	/* Reserve space for headers and prepare control bits. */
674 	skb_reserve(skb, sk->sk_prot->max_header);
675 	if (dp->dccps_role == DCCP_ROLE_SERVER && !dp->dccps_server_timewait)
676 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSEREQ;
677 	else
678 		DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_CLOSE;
679 
680 	if (active) {
681 		dccp_skb_entail(sk, skb);
682 		dccp_transmit_skb(sk, skb_clone(skb, prio));
683 		/*
684 		 * Retransmission timer for active-close: RFC 4340, 8.3 requires
685 		 * to retransmit the Close/CloseReq until the CLOSING/CLOSEREQ
686 		 * state can be left. The initial timeout is 2 RTTs.
687 		 * Since RTT measurement is done by the CCIDs, there is no easy
688 		 * way to get an RTT sample. The fallback RTT from RFC 4340, 3.4
689 		 * is too low (200ms); we use a high value to avoid unnecessary
690 		 * retransmissions when the link RTT is > 0.2 seconds.
691 		 * FIXME: Let main module sample RTTs and use that instead.
692 		 */
693 		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
694 					  DCCP_TIMEOUT_INIT, DCCP_RTO_MAX);
695 	} else
696 		dccp_transmit_skb(sk, skb);
697 }
698