1 /* ip_nat_mangle.c - generic support functions for NAT helpers
2 *
3 * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org>
4 *
5 * distributed under the terms of GNU GPL
6 *
7 * 14 Jan 2002 Harald Welte <laforge@gnumonks.org>:
8 * - add support for SACK adjustment
9 * 14 Mar 2002 Harald Welte <laforge@gnumonks.org>:
10 * - merge SACK support into newnat API
11 * 16 Aug 2002 Brian J. Murrell <netfilter@interlinx.bc.ca>:
12 * - make ip_nat_resize_packet more generic (TCP and UDP)
13 * - add ip_nat_mangle_udp_packet
14 */
15 #include <linux/version.h>
16 #include <linux/config.h>
17 #include <linux/module.h>
18 #include <linux/kmod.h>
19 #include <linux/types.h>
20 #include <linux/timer.h>
21 #include <linux/skbuff.h>
22 #include <linux/netfilter_ipv4.h>
23 #include <linux/brlock.h>
24 #include <net/checksum.h>
25 #include <net/icmp.h>
26 #include <net/ip.h>
27 #include <net/tcp.h>
28 #include <net/udp.h>
29
30 #define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_nat_lock)
31 #define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_nat_lock)
32
33 #include <linux/netfilter_ipv4/ip_conntrack.h>
34 #include <linux/netfilter_ipv4/ip_conntrack_helper.h>
35 #include <linux/netfilter_ipv4/ip_nat.h>
36 #include <linux/netfilter_ipv4/ip_nat_protocol.h>
37 #include <linux/netfilter_ipv4/ip_nat_core.h>
38 #include <linux/netfilter_ipv4/ip_nat_helper.h>
39 #include <linux/netfilter_ipv4/listhelp.h>
40
41 #if 0
42 #define DEBUGP printk
43 #define DUMP_OFFSET(x) printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos);
44 #else
45 #define DEBUGP(format, args...)
46 #define DUMP_OFFSET(x)
47 #endif
48
49 DECLARE_LOCK(ip_nat_seqofs_lock);
50
51 static inline int
ip_nat_resize_packet(struct sk_buff ** skb,struct ip_conntrack * ct,enum ip_conntrack_info ctinfo,int new_size)52 ip_nat_resize_packet(struct sk_buff **skb,
53 struct ip_conntrack *ct,
54 enum ip_conntrack_info ctinfo,
55 int new_size)
56 {
57 struct iphdr *iph;
58 int dir;
59 struct ip_nat_seq *this_way, *other_way;
60
61 DEBUGP("ip_nat_resize_packet: old_size = %u, new_size = %u\n",
62 (*skb)->len, new_size);
63
64 dir = CTINFO2DIR(ctinfo);
65
66 this_way = &ct->nat.info.seq[dir];
67 other_way = &ct->nat.info.seq[!dir];
68
69 if (new_size > (*skb)->len + skb_tailroom(*skb)) {
70 struct sk_buff *newskb;
71 newskb = skb_copy_expand(*skb, skb_headroom(*skb),
72 new_size - (*skb)->len,
73 GFP_ATOMIC);
74
75 if (!newskb) {
76 printk("ip_nat_resize_packet: oom\n");
77 return 0;
78 } else {
79 kfree_skb(*skb);
80 *skb = newskb;
81 }
82 }
83
84 iph = (*skb)->nh.iph;
85 if (iph->protocol == IPPROTO_TCP) {
86 struct tcphdr *tcph = (void *)iph + iph->ihl*4;
87
88 DEBUGP("ip_nat_resize_packet: Seq_offset before: ");
89 DUMP_OFFSET(this_way);
90
91 LOCK_BH(&ip_nat_seqofs_lock);
92
93 /* SYN adjust. If it's uninitialized, of this is after last
94 * correction, record it: we don't handle more than one
95 * adjustment in the window, but do deal with common case of a
96 * retransmit */
97 if (this_way->offset_before == this_way->offset_after
98 || before(this_way->correction_pos, ntohl(tcph->seq))) {
99 this_way->correction_pos = ntohl(tcph->seq);
100 this_way->offset_before = this_way->offset_after;
101 this_way->offset_after = (int32_t)
102 this_way->offset_before + new_size -
103 (*skb)->len;
104 }
105
106 UNLOCK_BH(&ip_nat_seqofs_lock);
107
108 DEBUGP("ip_nat_resize_packet: Seq_offset after: ");
109 DUMP_OFFSET(this_way);
110 }
111
112 return 1;
113 }
114
115
116 /* Generic function for mangling variable-length address changes inside
117 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
118 * command in FTP).
119 *
120 * Takes care about all the nasty sequence number changes, checksumming,
121 * skb enlargement, ...
122 *
123 * */
124 int
ip_nat_mangle_tcp_packet(struct sk_buff ** skb,struct ip_conntrack * ct,enum ip_conntrack_info ctinfo,unsigned int match_offset,unsigned int match_len,char * rep_buffer,unsigned int rep_len)125 ip_nat_mangle_tcp_packet(struct sk_buff **skb,
126 struct ip_conntrack *ct,
127 enum ip_conntrack_info ctinfo,
128 unsigned int match_offset,
129 unsigned int match_len,
130 char *rep_buffer,
131 unsigned int rep_len)
132 {
133 struct iphdr *iph = (*skb)->nh.iph;
134 struct tcphdr *tcph;
135 unsigned char *data;
136 u_int32_t tcplen, newlen, newtcplen;
137
138 tcplen = (*skb)->len - iph->ihl*4;
139 newtcplen = tcplen - match_len + rep_len;
140 newlen = iph->ihl*4 + newtcplen;
141
142 if (newlen > 65535) {
143 if (net_ratelimit())
144 printk("ip_nat_mangle_tcp_packet: nat'ed packet "
145 "exceeds maximum packet size\n");
146 return 0;
147 }
148
149 if ((*skb)->len != newlen) {
150 if (!ip_nat_resize_packet(skb, ct, ctinfo, newlen)) {
151 printk("resize_packet failed!!\n");
152 return 0;
153 }
154 }
155
156 /* Alexey says: if a hook changes _data_ ... it can break
157 original packet sitting in tcp queue and this is fatal */
158 if (skb_cloned(*skb)) {
159 struct sk_buff *nskb = skb_copy(*skb, GFP_ATOMIC);
160 if (!nskb) {
161 if (net_ratelimit())
162 printk("Out of memory cloning TCP packet\n");
163 return 0;
164 }
165 /* Rest of kernel will get very unhappy if we pass it
166 a suddenly-orphaned skbuff */
167 if ((*skb)->sk)
168 skb_set_owner_w(nskb, (*skb)->sk);
169 kfree_skb(*skb);
170 *skb = nskb;
171 }
172
173 /* skb may be copied !! */
174 iph = (*skb)->nh.iph;
175 tcph = (void *)iph + iph->ihl*4;
176 data = (void *)tcph + tcph->doff*4;
177
178 if (rep_len != match_len)
179 /* move post-replacement */
180 memmove(data + match_offset + rep_len,
181 data + match_offset + match_len,
182 (*skb)->tail - (data + match_offset + match_len));
183
184 /* insert data from buffer */
185 memcpy(data + match_offset, rep_buffer, rep_len);
186
187 /* update skb info */
188 if (newlen > (*skb)->len) {
189 DEBUGP("ip_nat_mangle_tcp_packet: Extending packet by "
190 "%u to %u bytes\n", newlen - (*skb)->len, newlen);
191 skb_put(*skb, newlen - (*skb)->len);
192 } else {
193 DEBUGP("ip_nat_mangle_tcp_packet: Shrinking packet from "
194 "%u to %u bytes\n", (*skb)->len, newlen);
195 skb_trim(*skb, newlen);
196 }
197
198 iph->tot_len = htons(newlen);
199 /* fix checksum information */
200 tcph->check = 0;
201 tcph->check = tcp_v4_check(tcph, newtcplen, iph->saddr, iph->daddr,
202 csum_partial((char *)tcph, newtcplen, 0));
203 ip_send_check(iph);
204
205 return 1;
206 }
207
208 /* Generic function for mangling variable-length address changes inside
209 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
210 * command in the Amanda protocol)
211 *
212 * Takes care about all the nasty sequence number changes, checksumming,
213 * skb enlargement, ...
214 *
215 * XXX - This function could be merged with ip_nat_mangle_tcp_packet which
216 * should be fairly easy to do.
217 */
218 int
ip_nat_mangle_udp_packet(struct sk_buff ** skb,struct ip_conntrack * ct,enum ip_conntrack_info ctinfo,unsigned int match_offset,unsigned int match_len,char * rep_buffer,unsigned int rep_len)219 ip_nat_mangle_udp_packet(struct sk_buff **skb,
220 struct ip_conntrack *ct,
221 enum ip_conntrack_info ctinfo,
222 unsigned int match_offset,
223 unsigned int match_len,
224 char *rep_buffer,
225 unsigned int rep_len)
226 {
227 struct iphdr *iph = (*skb)->nh.iph;
228 struct udphdr *udph = (void *)iph + iph->ihl * 4;
229 unsigned char *data;
230 u_int32_t udplen, newlen, newudplen;
231
232 udplen = (*skb)->len - iph->ihl*4;
233 newudplen = udplen - match_len + rep_len;
234 newlen = iph->ihl*4 + newudplen;
235
236 /* UDP helpers might accidentally mangle the wrong packet */
237 if (udplen < sizeof(*udph) + match_offset + match_len) {
238 if (net_ratelimit())
239 printk("ip_nat_mangle_udp_packet: undersized packet\n");
240 return 0;
241 }
242
243 if (newlen > 65535) {
244 if (net_ratelimit())
245 printk("ip_nat_mangle_udp_packet: nat'ed packet "
246 "exceeds maximum packet size\n");
247 return 0;
248 }
249
250 if ((*skb)->len != newlen) {
251 if (!ip_nat_resize_packet(skb, ct, ctinfo, newlen)) {
252 printk("resize_packet failed!!\n");
253 return 0;
254 }
255 }
256
257 /* Alexey says: if a hook changes _data_ ... it can break
258 original packet sitting in tcp queue and this is fatal */
259 if (skb_cloned(*skb)) {
260 struct sk_buff *nskb = skb_copy(*skb, GFP_ATOMIC);
261 if (!nskb) {
262 if (net_ratelimit())
263 printk("Out of memory cloning TCP packet\n");
264 return 0;
265 }
266 /* Rest of kernel will get very unhappy if we pass it
267 a suddenly-orphaned skbuff */
268 if ((*skb)->sk)
269 skb_set_owner_w(nskb, (*skb)->sk);
270 kfree_skb(*skb);
271 *skb = nskb;
272 }
273
274 /* skb may be copied !! */
275 iph = (*skb)->nh.iph;
276 udph = (void *)iph + iph->ihl*4;
277 data = (void *)udph + sizeof(struct udphdr);
278
279 if (rep_len != match_len)
280 /* move post-replacement */
281 memmove(data + match_offset + rep_len,
282 data + match_offset + match_len,
283 (*skb)->tail - (data + match_offset + match_len));
284
285 /* insert data from buffer */
286 memcpy(data + match_offset, rep_buffer, rep_len);
287
288 /* update skb info */
289 if (newlen > (*skb)->len) {
290 DEBUGP("ip_nat_mangle_udp_packet: Extending packet by "
291 "%u to %u bytes\n", newlen - (*skb)->len, newlen);
292 skb_put(*skb, newlen - (*skb)->len);
293 } else {
294 DEBUGP("ip_nat_mangle_udp_packet: Shrinking packet from "
295 "%u to %u bytes\n", (*skb)->len, newlen);
296 skb_trim(*skb, newlen);
297 }
298
299 /* update the length of the UDP and IP packets to the new values*/
300 udph->len = htons((*skb)->len - iph->ihl*4);
301 iph->tot_len = htons(newlen);
302
303 /* fix udp checksum if udp checksum was previously calculated */
304 if (udph->check != 0) {
305 udph->check = 0;
306 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
307 newudplen, IPPROTO_UDP,
308 csum_partial((char *)udph,
309 newudplen, 0));
310 }
311
312 ip_send_check(iph);
313
314 return 1;
315 }
316
317 /* Adjust one found SACK option including checksum correction */
318 static void
sack_adjust(struct tcphdr * tcph,unsigned char * ptr,struct ip_nat_seq * natseq)319 sack_adjust(struct tcphdr *tcph,
320 unsigned char *ptr,
321 struct ip_nat_seq *natseq)
322 {
323 struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2);
324 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
325 int i;
326
327 for (i = 0; i < num_sacks; i++, sp++) {
328 u_int32_t new_start_seq, new_end_seq;
329
330 if (after(ntohl(sp->start_seq) - natseq->offset_before,
331 natseq->correction_pos))
332 new_start_seq = ntohl(sp->start_seq)
333 - natseq->offset_after;
334 else
335 new_start_seq = ntohl(sp->start_seq)
336 - natseq->offset_before;
337 new_start_seq = htonl(new_start_seq);
338
339 if (after(ntohl(sp->end_seq) - natseq->offset_before,
340 natseq->correction_pos))
341 new_end_seq = ntohl(sp->end_seq)
342 - natseq->offset_after;
343 else
344 new_end_seq = ntohl(sp->end_seq)
345 - natseq->offset_before;
346 new_end_seq = htonl(new_end_seq);
347
348 DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
349 ntohl(sp->start_seq), new_start_seq,
350 ntohl(sp->end_seq), new_end_seq);
351
352 tcph->check =
353 ip_nat_cheat_check(~sp->start_seq, new_start_seq,
354 ip_nat_cheat_check(~sp->end_seq,
355 new_end_seq,
356 tcph->check));
357
358 sp->start_seq = new_start_seq;
359 sp->end_seq = new_end_seq;
360 }
361 }
362
363
364 /* TCP SACK sequence number adjustment. */
365 static inline void
ip_nat_sack_adjust(struct sk_buff * skb,struct ip_conntrack * ct,enum ip_conntrack_info ctinfo)366 ip_nat_sack_adjust(struct sk_buff *skb,
367 struct ip_conntrack *ct,
368 enum ip_conntrack_info ctinfo)
369 {
370 struct tcphdr *tcph;
371 unsigned char *ptr, *optend;
372 unsigned int dir;
373
374 tcph = (void *)skb->nh.iph + skb->nh.iph->ihl*4;
375 optend = (unsigned char *)tcph + tcph->doff*4;
376 ptr = (unsigned char *)(tcph+1);
377
378 dir = CTINFO2DIR(ctinfo);
379
380 while (ptr < optend) {
381 int opcode = ptr[0];
382 int opsize;
383
384 switch (opcode) {
385 case TCPOPT_EOL:
386 return;
387 case TCPOPT_NOP:
388 ptr++;
389 continue;
390 default:
391 opsize = ptr[1];
392 /* no partial opts */
393 if (ptr + opsize > optend || opsize < 2)
394 return;
395 if (opcode == TCPOPT_SACK) {
396 /* found SACK */
397 if((opsize >= (TCPOLEN_SACK_BASE
398 +TCPOLEN_SACK_PERBLOCK)) &&
399 !((opsize - TCPOLEN_SACK_BASE)
400 % TCPOLEN_SACK_PERBLOCK))
401 sack_adjust(tcph, ptr,
402 &ct->nat.info.seq[!dir]);
403 }
404 ptr += opsize;
405 }
406 }
407 }
408
409 /* TCP sequence number adjustment */
410 int
ip_nat_seq_adjust(struct sk_buff * skb,struct ip_conntrack * ct,enum ip_conntrack_info ctinfo)411 ip_nat_seq_adjust(struct sk_buff *skb,
412 struct ip_conntrack *ct,
413 enum ip_conntrack_info ctinfo)
414 {
415 struct iphdr *iph;
416 struct tcphdr *tcph;
417 int dir, newseq, newack;
418 struct ip_nat_seq *this_way, *other_way;
419
420 iph = skb->nh.iph;
421 tcph = (void *)iph + iph->ihl*4;
422
423 dir = CTINFO2DIR(ctinfo);
424
425 this_way = &ct->nat.info.seq[dir];
426 other_way = &ct->nat.info.seq[!dir];
427
428 if (after(ntohl(tcph->seq), this_way->correction_pos))
429 newseq = ntohl(tcph->seq) + this_way->offset_after;
430 else
431 newseq = ntohl(tcph->seq) + this_way->offset_before;
432 newseq = htonl(newseq);
433
434 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
435 other_way->correction_pos))
436 newack = ntohl(tcph->ack_seq) - other_way->offset_after;
437 else
438 newack = ntohl(tcph->ack_seq) - other_way->offset_before;
439 newack = htonl(newack);
440
441 tcph->check = ip_nat_cheat_check(~tcph->seq, newseq,
442 ip_nat_cheat_check(~tcph->ack_seq,
443 newack,
444 tcph->check));
445
446 DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n",
447 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
448 ntohl(newack));
449
450 tcph->seq = newseq;
451 tcph->ack_seq = newack;
452
453 ip_nat_sack_adjust(skb, ct, ctinfo);
454
455 return 0;
456 }
457
458 static inline int
helper_cmp(const struct ip_nat_helper * helper,const struct ip_conntrack_tuple * tuple)459 helper_cmp(const struct ip_nat_helper *helper,
460 const struct ip_conntrack_tuple *tuple)
461 {
462 return ip_ct_tuple_mask_cmp(tuple, &helper->tuple, &helper->mask);
463 }
464
465 #define MODULE_MAX_NAMELEN 32
466
ip_nat_helper_register(struct ip_nat_helper * me)467 int ip_nat_helper_register(struct ip_nat_helper *me)
468 {
469 int ret = 0;
470
471 if (me->me && !(me->flags & IP_NAT_HELPER_F_STANDALONE)) {
472 struct ip_conntrack_helper *ct_helper;
473
474 if ((ct_helper = ip_ct_find_helper(&me->tuple))
475 && ct_helper->me) {
476 __MOD_INC_USE_COUNT(ct_helper->me);
477 } else {
478
479 /* We are a NAT helper for protocol X. If we need
480 * respective conntrack helper for protoccol X, compute
481 * conntrack helper name and try to load module */
482 char name[MODULE_MAX_NAMELEN];
483 const char *tmp = me->me->name;
484
485 if (strlen(tmp) + 6 > MODULE_MAX_NAMELEN) {
486 printk("%s: unable to "
487 "compute conntrack helper name "
488 "from %s\n", __FUNCTION__, tmp);
489 return -EBUSY;
490 }
491 tmp += 6;
492 sprintf(name, "ip_conntrack%s", tmp);
493 #ifdef CONFIG_KMOD
494 if (!request_module(name)
495 && (ct_helper = ip_ct_find_helper(&me->tuple))
496 && ct_helper->me) {
497 __MOD_INC_USE_COUNT(ct_helper->me);
498 } else {
499 printk("unable to load module %s\n", name);
500 return -EBUSY;
501 }
502 #else
503 printk("unable to load module %s automatically "
504 "because kernel was compiled without kernel "
505 "module loader support\n", name);
506 return -EBUSY;
507 #endif
508 }
509 }
510 WRITE_LOCK(&ip_nat_lock);
511 if (LIST_FIND(&helpers, helper_cmp, struct ip_nat_helper *,&me->tuple))
512 ret = -EBUSY;
513 else {
514 list_prepend(&helpers, me);
515 MOD_INC_USE_COUNT;
516 }
517 WRITE_UNLOCK(&ip_nat_lock);
518
519 return ret;
520 }
521
522 static int
kill_helper(struct ip_conntrack * i,void * helper)523 kill_helper(struct ip_conntrack *i, void *helper)
524 {
525 return (i->nat.info.helper == helper);
526 }
527
ip_nat_helper_unregister(struct ip_nat_helper * me)528 void ip_nat_helper_unregister(struct ip_nat_helper *me)
529 {
530 int found = 0;
531
532 WRITE_LOCK(&ip_nat_lock);
533 /* Autoloading conntrack helper might have failed */
534 if (LIST_FIND(&helpers, helper_cmp, struct ip_nat_helper *,&me->tuple)) {
535 LIST_DELETE(&helpers, me);
536 found = 1;
537 }
538 WRITE_UNLOCK(&ip_nat_lock);
539
540 /* Someone could be still looking at the helper in a bh. */
541 br_write_lock_bh(BR_NETPROTO_LOCK);
542 br_write_unlock_bh(BR_NETPROTO_LOCK);
543
544 /* Find anything using it, and umm, kill them. We can't turn
545 them into normal connections: if we've adjusted SYNs, then
546 they'll ackstorm. So we just drop it. We used to just
547 bump module count when a connection existed, but that
548 forces admins to gen fake RSTs or bounce box, either of
549 which is just a long-winded way of making things
550 worse. --RR */
551 ip_ct_iterate_cleanup(kill_helper, me);
552
553 if (found)
554 MOD_DEC_USE_COUNT;
555
556 /* If we are no standalone NAT helper, we need to decrement usage count
557 * on our conntrack helper */
558 if (me->me && !(me->flags & IP_NAT_HELPER_F_STANDALONE)) {
559 struct ip_conntrack_helper *ct_helper;
560
561 if ((ct_helper = ip_ct_find_helper(&me->tuple))
562 && ct_helper->me) {
563 __MOD_DEC_USE_COUNT(ct_helper->me);
564 } else
565 printk("%s: unable to decrement usage count"
566 " of conntrack helper %s\n",
567 __FUNCTION__, me->me->name);
568 }
569 }
570