1 /* ip_nat_helper.c - generic support functions for NAT helpers
2  *
3  * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4  * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/gfp.h>
12 #include <linux/kmod.h>
13 #include <linux/types.h>
14 #include <linux/timer.h>
15 #include <linux/skbuff.h>
16 #include <linux/tcp.h>
17 #include <linux/udp.h>
18 #include <net/checksum.h>
19 #include <net/tcp.h>
20 #include <net/route.h>
21 
22 #include <linux/netfilter_ipv4.h>
23 #include <net/netfilter/nf_conntrack.h>
24 #include <net/netfilter/nf_conntrack_helper.h>
25 #include <net/netfilter/nf_conntrack_ecache.h>
26 #include <net/netfilter/nf_conntrack_expect.h>
27 #include <net/netfilter/nf_nat.h>
28 #include <net/netfilter/nf_nat_protocol.h>
29 #include <net/netfilter/nf_nat_core.h>
30 #include <net/netfilter/nf_nat_helper.h>
31 
32 #define DUMP_OFFSET(x) \
33 	pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
34 		 x->offset_before, x->offset_after, x->correction_pos);
35 
36 static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
37 
38 /* Setup TCP sequence correction given this change at this sequence */
39 static inline void
adjust_tcp_sequence(u32 seq,int sizediff,struct nf_conn * ct,enum ip_conntrack_info ctinfo)40 adjust_tcp_sequence(u32 seq,
41 		    int sizediff,
42 		    struct nf_conn *ct,
43 		    enum ip_conntrack_info ctinfo)
44 {
45 	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
46 	struct nf_conn_nat *nat = nfct_nat(ct);
47 	struct nf_nat_seq *this_way = &nat->seq[dir];
48 
49 	pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
50 		 seq, sizediff);
51 
52 	pr_debug("adjust_tcp_sequence: Seq_offset before: ");
53 	DUMP_OFFSET(this_way);
54 
55 	spin_lock_bh(&nf_nat_seqofs_lock);
56 
57 	/* SYN adjust. If it's uninitialized, or this is after last
58 	 * correction, record it: we don't handle more than one
59 	 * adjustment in the window, but do deal with common case of a
60 	 * retransmit */
61 	if (this_way->offset_before == this_way->offset_after ||
62 	    before(this_way->correction_pos, seq)) {
63 		this_way->correction_pos = seq;
64 		this_way->offset_before = this_way->offset_after;
65 		this_way->offset_after += sizediff;
66 	}
67 	spin_unlock_bh(&nf_nat_seqofs_lock);
68 
69 	pr_debug("adjust_tcp_sequence: Seq_offset after: ");
70 	DUMP_OFFSET(this_way);
71 }
72 
73 /* Get the offset value, for conntrack */
nf_nat_get_offset(const struct nf_conn * ct,enum ip_conntrack_dir dir,u32 seq)74 s16 nf_nat_get_offset(const struct nf_conn *ct,
75 		      enum ip_conntrack_dir dir,
76 		      u32 seq)
77 {
78 	struct nf_conn_nat *nat = nfct_nat(ct);
79 	struct nf_nat_seq *this_way;
80 	s16 offset;
81 
82 	if (!nat)
83 		return 0;
84 
85 	this_way = &nat->seq[dir];
86 	spin_lock_bh(&nf_nat_seqofs_lock);
87 	offset = after(seq, this_way->correction_pos)
88 		 ? this_way->offset_after : this_way->offset_before;
89 	spin_unlock_bh(&nf_nat_seqofs_lock);
90 
91 	return offset;
92 }
93 EXPORT_SYMBOL_GPL(nf_nat_get_offset);
94 
95 /* Frobs data inside this packet, which is linear. */
mangle_contents(struct sk_buff * skb,unsigned int dataoff,unsigned int match_offset,unsigned int match_len,const char * rep_buffer,unsigned int rep_len)96 static void mangle_contents(struct sk_buff *skb,
97 			    unsigned int dataoff,
98 			    unsigned int match_offset,
99 			    unsigned int match_len,
100 			    const char *rep_buffer,
101 			    unsigned int rep_len)
102 {
103 	unsigned char *data;
104 
105 	BUG_ON(skb_is_nonlinear(skb));
106 	data = skb_network_header(skb) + dataoff;
107 
108 	/* move post-replacement */
109 	memmove(data + match_offset + rep_len,
110 		data + match_offset + match_len,
111 		skb->tail - (skb->network_header + dataoff +
112 			     match_offset + match_len));
113 
114 	/* insert data from buffer */
115 	memcpy(data + match_offset, rep_buffer, rep_len);
116 
117 	/* update skb info */
118 	if (rep_len > match_len) {
119 		pr_debug("nf_nat_mangle_packet: Extending packet by "
120 			 "%u from %u bytes\n", rep_len - match_len, skb->len);
121 		skb_put(skb, rep_len - match_len);
122 	} else {
123 		pr_debug("nf_nat_mangle_packet: Shrinking packet from "
124 			 "%u from %u bytes\n", match_len - rep_len, skb->len);
125 		__skb_trim(skb, skb->len + rep_len - match_len);
126 	}
127 
128 	/* fix IP hdr checksum information */
129 	ip_hdr(skb)->tot_len = htons(skb->len);
130 	ip_send_check(ip_hdr(skb));
131 }
132 
133 /* Unusual, but possible case. */
enlarge_skb(struct sk_buff * skb,unsigned int extra)134 static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
135 {
136 	if (skb->len + extra > 65535)
137 		return 0;
138 
139 	if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
140 		return 0;
141 
142 	return 1;
143 }
144 
nf_nat_set_seq_adjust(struct nf_conn * ct,enum ip_conntrack_info ctinfo,__be32 seq,s16 off)145 void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
146 			   __be32 seq, s16 off)
147 {
148 	if (!off)
149 		return;
150 	set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
151 	adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
152 	nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
153 }
154 EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
155 
nf_nat_csum(struct sk_buff * skb,const struct iphdr * iph,void * data,int datalen,__sum16 * check,int oldlen)156 static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data,
157 			int datalen, __sum16 *check, int oldlen)
158 {
159 	struct rtable *rt = skb_rtable(skb);
160 
161 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
162 		if (!(rt->rt_flags & RTCF_LOCAL) &&
163 		    (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
164 			skb->ip_summed = CHECKSUM_PARTIAL;
165 			skb->csum_start = skb_headroom(skb) +
166 					  skb_network_offset(skb) +
167 					  iph->ihl * 4;
168 			skb->csum_offset = (void *)check - data;
169 			*check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
170 						    datalen, iph->protocol, 0);
171 		} else {
172 			*check = 0;
173 			*check = csum_tcpudp_magic(iph->saddr, iph->daddr,
174 						   datalen, iph->protocol,
175 						   csum_partial(data, datalen,
176 								0));
177 			if (iph->protocol == IPPROTO_UDP && !*check)
178 				*check = CSUM_MANGLED_0;
179 		}
180 	} else
181 		inet_proto_csum_replace2(check, skb,
182 					 htons(oldlen), htons(datalen), 1);
183 }
184 
185 /* Generic function for mangling variable-length address changes inside
186  * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
187  * command in FTP).
188  *
189  * Takes care about all the nasty sequence number changes, checksumming,
190  * skb enlargement, ...
191  *
192  * */
__nf_nat_mangle_tcp_packet(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,unsigned int match_offset,unsigned int match_len,const char * rep_buffer,unsigned int rep_len,bool adjust)193 int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
194 			       struct nf_conn *ct,
195 			       enum ip_conntrack_info ctinfo,
196 			       unsigned int match_offset,
197 			       unsigned int match_len,
198 			       const char *rep_buffer,
199 			       unsigned int rep_len, bool adjust)
200 {
201 	struct iphdr *iph;
202 	struct tcphdr *tcph;
203 	int oldlen, datalen;
204 
205 	if (!skb_make_writable(skb, skb->len))
206 		return 0;
207 
208 	if (rep_len > match_len &&
209 	    rep_len - match_len > skb_tailroom(skb) &&
210 	    !enlarge_skb(skb, rep_len - match_len))
211 		return 0;
212 
213 	SKB_LINEAR_ASSERT(skb);
214 
215 	iph = ip_hdr(skb);
216 	tcph = (void *)iph + iph->ihl*4;
217 
218 	oldlen = skb->len - iph->ihl*4;
219 	mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
220 			match_offset, match_len, rep_buffer, rep_len);
221 
222 	datalen = skb->len - iph->ihl*4;
223 	nf_nat_csum(skb, iph, tcph, datalen, &tcph->check, oldlen);
224 
225 	if (adjust && rep_len != match_len)
226 		nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
227 				      (int)rep_len - (int)match_len);
228 
229 	return 1;
230 }
231 EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
232 
233 /* Generic function for mangling variable-length address changes inside
234  * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
235  * command in the Amanda protocol)
236  *
237  * Takes care about all the nasty sequence number changes, checksumming,
238  * skb enlargement, ...
239  *
240  * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
241  *       should be fairly easy to do.
242  */
243 int
nf_nat_mangle_udp_packet(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo,unsigned int match_offset,unsigned int match_len,const char * rep_buffer,unsigned int rep_len)244 nf_nat_mangle_udp_packet(struct sk_buff *skb,
245 			 struct nf_conn *ct,
246 			 enum ip_conntrack_info ctinfo,
247 			 unsigned int match_offset,
248 			 unsigned int match_len,
249 			 const char *rep_buffer,
250 			 unsigned int rep_len)
251 {
252 	struct iphdr *iph;
253 	struct udphdr *udph;
254 	int datalen, oldlen;
255 
256 	if (!skb_make_writable(skb, skb->len))
257 		return 0;
258 
259 	if (rep_len > match_len &&
260 	    rep_len - match_len > skb_tailroom(skb) &&
261 	    !enlarge_skb(skb, rep_len - match_len))
262 		return 0;
263 
264 	iph = ip_hdr(skb);
265 	udph = (void *)iph + iph->ihl*4;
266 
267 	oldlen = skb->len - iph->ihl*4;
268 	mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
269 			match_offset, match_len, rep_buffer, rep_len);
270 
271 	/* update the length of the UDP packet */
272 	datalen = skb->len - iph->ihl*4;
273 	udph->len = htons(datalen);
274 
275 	/* fix udp checksum if udp checksum was previously calculated */
276 	if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
277 		return 1;
278 
279 	nf_nat_csum(skb, iph, udph, datalen, &udph->check, oldlen);
280 
281 	return 1;
282 }
283 EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
284 
285 /* Adjust one found SACK option including checksum correction */
286 static void
sack_adjust(struct sk_buff * skb,struct tcphdr * tcph,unsigned int sackoff,unsigned int sackend,struct nf_nat_seq * natseq)287 sack_adjust(struct sk_buff *skb,
288 	    struct tcphdr *tcph,
289 	    unsigned int sackoff,
290 	    unsigned int sackend,
291 	    struct nf_nat_seq *natseq)
292 {
293 	while (sackoff < sackend) {
294 		struct tcp_sack_block_wire *sack;
295 		__be32 new_start_seq, new_end_seq;
296 
297 		sack = (void *)skb->data + sackoff;
298 		if (after(ntohl(sack->start_seq) - natseq->offset_before,
299 			  natseq->correction_pos))
300 			new_start_seq = htonl(ntohl(sack->start_seq)
301 					- natseq->offset_after);
302 		else
303 			new_start_seq = htonl(ntohl(sack->start_seq)
304 					- natseq->offset_before);
305 
306 		if (after(ntohl(sack->end_seq) - natseq->offset_before,
307 			  natseq->correction_pos))
308 			new_end_seq = htonl(ntohl(sack->end_seq)
309 				      - natseq->offset_after);
310 		else
311 			new_end_seq = htonl(ntohl(sack->end_seq)
312 				      - natseq->offset_before);
313 
314 		pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
315 			 ntohl(sack->start_seq), new_start_seq,
316 			 ntohl(sack->end_seq), new_end_seq);
317 
318 		inet_proto_csum_replace4(&tcph->check, skb,
319 					 sack->start_seq, new_start_seq, 0);
320 		inet_proto_csum_replace4(&tcph->check, skb,
321 					 sack->end_seq, new_end_seq, 0);
322 		sack->start_seq = new_start_seq;
323 		sack->end_seq = new_end_seq;
324 		sackoff += sizeof(*sack);
325 	}
326 }
327 
328 /* TCP SACK sequence number adjustment */
329 static inline unsigned int
nf_nat_sack_adjust(struct sk_buff * skb,struct tcphdr * tcph,struct nf_conn * ct,enum ip_conntrack_info ctinfo)330 nf_nat_sack_adjust(struct sk_buff *skb,
331 		   struct tcphdr *tcph,
332 		   struct nf_conn *ct,
333 		   enum ip_conntrack_info ctinfo)
334 {
335 	unsigned int dir, optoff, optend;
336 	struct nf_conn_nat *nat = nfct_nat(ct);
337 
338 	optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
339 	optend = ip_hdrlen(skb) + tcph->doff * 4;
340 
341 	if (!skb_make_writable(skb, optend))
342 		return 0;
343 
344 	dir = CTINFO2DIR(ctinfo);
345 
346 	while (optoff < optend) {
347 		/* Usually: option, length. */
348 		unsigned char *op = skb->data + optoff;
349 
350 		switch (op[0]) {
351 		case TCPOPT_EOL:
352 			return 1;
353 		case TCPOPT_NOP:
354 			optoff++;
355 			continue;
356 		default:
357 			/* no partial options */
358 			if (optoff + 1 == optend ||
359 			    optoff + op[1] > optend ||
360 			    op[1] < 2)
361 				return 0;
362 			if (op[0] == TCPOPT_SACK &&
363 			    op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
364 			    ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
365 				sack_adjust(skb, tcph, optoff+2,
366 					    optoff+op[1], &nat->seq[!dir]);
367 			optoff += op[1];
368 		}
369 	}
370 	return 1;
371 }
372 
373 /* TCP sequence number adjustment.  Returns 1 on success, 0 on failure */
374 int
nf_nat_seq_adjust(struct sk_buff * skb,struct nf_conn * ct,enum ip_conntrack_info ctinfo)375 nf_nat_seq_adjust(struct sk_buff *skb,
376 		  struct nf_conn *ct,
377 		  enum ip_conntrack_info ctinfo)
378 {
379 	struct tcphdr *tcph;
380 	int dir;
381 	__be32 newseq, newack;
382 	s16 seqoff, ackoff;
383 	struct nf_conn_nat *nat = nfct_nat(ct);
384 	struct nf_nat_seq *this_way, *other_way;
385 
386 	dir = CTINFO2DIR(ctinfo);
387 
388 	this_way = &nat->seq[dir];
389 	other_way = &nat->seq[!dir];
390 
391 	if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
392 		return 0;
393 
394 	tcph = (void *)skb->data + ip_hdrlen(skb);
395 	if (after(ntohl(tcph->seq), this_way->correction_pos))
396 		seqoff = this_way->offset_after;
397 	else
398 		seqoff = this_way->offset_before;
399 
400 	if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
401 		  other_way->correction_pos))
402 		ackoff = other_way->offset_after;
403 	else
404 		ackoff = other_way->offset_before;
405 
406 	newseq = htonl(ntohl(tcph->seq) + seqoff);
407 	newack = htonl(ntohl(tcph->ack_seq) - ackoff);
408 
409 	inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
410 	inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
411 
412 	pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
413 		 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
414 		 ntohl(newack));
415 
416 	tcph->seq = newseq;
417 	tcph->ack_seq = newack;
418 
419 	return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
420 }
421 
422 /* Setup NAT on this expected conntrack so it follows master. */
423 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
nf_nat_follow_master(struct nf_conn * ct,struct nf_conntrack_expect * exp)424 void nf_nat_follow_master(struct nf_conn *ct,
425 			  struct nf_conntrack_expect *exp)
426 {
427 	struct nf_nat_ipv4_range range;
428 
429 	/* This must be a fresh one. */
430 	BUG_ON(ct->status & IPS_NAT_DONE_MASK);
431 
432 	/* Change src to where master sends to */
433 	range.flags = NF_NAT_RANGE_MAP_IPS;
434 	range.min_ip = range.max_ip
435 		= ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
436 	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
437 
438 	/* For DST manip, map port here to where it's expected. */
439 	range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
440 	range.min = range.max = exp->saved_proto;
441 	range.min_ip = range.max_ip
442 		= ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
443 	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
444 }
445 EXPORT_SYMBOL(nf_nat_follow_master);
446