1 /* NAT for netfilter; shared with compatibility layer. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/gfp.h>
16 #include <net/checksum.h>
17 #include <net/icmp.h>
18 #include <net/ip.h>
19 #include <net/tcp.h> /* For tcp_prot in getorigdst */
20 #include <linux/icmp.h>
21 #include <linux/udp.h>
22 #include <linux/jhash.h>
23
24 #include <linux/netfilter_ipv4.h>
25 #include <net/netfilter/nf_conntrack.h>
26 #include <net/netfilter/nf_conntrack_core.h>
27 #include <net/netfilter/nf_nat.h>
28 #include <net/netfilter/nf_nat_protocol.h>
29 #include <net/netfilter/nf_nat_core.h>
30 #include <net/netfilter/nf_nat_helper.h>
31 #include <net/netfilter/nf_conntrack_helper.h>
32 #include <net/netfilter/nf_conntrack_l3proto.h>
33 #include <net/netfilter/nf_conntrack_l4proto.h>
34 #include <net/netfilter/nf_conntrack_zones.h>
35
36 static DEFINE_SPINLOCK(nf_nat_lock);
37
38 static struct nf_conntrack_l3proto *l3proto __read_mostly;
39
40 #define MAX_IP_NAT_PROTO 256
41 static const struct nf_nat_protocol __rcu *nf_nat_protos[MAX_IP_NAT_PROTO]
42 __read_mostly;
43
44 static inline const struct nf_nat_protocol *
__nf_nat_proto_find(u_int8_t protonum)45 __nf_nat_proto_find(u_int8_t protonum)
46 {
47 return rcu_dereference(nf_nat_protos[protonum]);
48 }
49
50 /* We keep an extra hash for each conntrack, for fast searching. */
51 static inline unsigned int
hash_by_src(const struct net * net,u16 zone,const struct nf_conntrack_tuple * tuple)52 hash_by_src(const struct net *net, u16 zone,
53 const struct nf_conntrack_tuple *tuple)
54 {
55 unsigned int hash;
56
57 /* Original src, to ensure we map it consistently if poss. */
58 hash = jhash_3words((__force u32)tuple->src.u3.ip,
59 (__force u32)tuple->src.u.all ^ zone,
60 tuple->dst.protonum, 0);
61 return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
62 }
63
64 /* Is this tuple already taken? (not by us) */
65 int
nf_nat_used_tuple(const struct nf_conntrack_tuple * tuple,const struct nf_conn * ignored_conntrack)66 nf_nat_used_tuple(const struct nf_conntrack_tuple *tuple,
67 const struct nf_conn *ignored_conntrack)
68 {
69 /* Conntrack tracking doesn't keep track of outgoing tuples; only
70 incoming ones. NAT means they don't have a fixed mapping,
71 so we invert the tuple and look for the incoming reply.
72
73 We could keep a separate hash if this proves too slow. */
74 struct nf_conntrack_tuple reply;
75
76 nf_ct_invert_tuplepr(&reply, tuple);
77 return nf_conntrack_tuple_taken(&reply, ignored_conntrack);
78 }
79 EXPORT_SYMBOL(nf_nat_used_tuple);
80
81 /* If we source map this tuple so reply looks like reply_tuple, will
82 * that meet the constraints of range. */
83 static int
in_range(const struct nf_conntrack_tuple * tuple,const struct nf_nat_range * range)84 in_range(const struct nf_conntrack_tuple *tuple,
85 const struct nf_nat_range *range)
86 {
87 const struct nf_nat_protocol *proto;
88 int ret = 0;
89
90 /* If we are supposed to map IPs, then we must be in the
91 range specified, otherwise let this drag us onto a new src IP. */
92 if (range->flags & IP_NAT_RANGE_MAP_IPS) {
93 if (ntohl(tuple->src.u3.ip) < ntohl(range->min_ip) ||
94 ntohl(tuple->src.u3.ip) > ntohl(range->max_ip))
95 return 0;
96 }
97
98 rcu_read_lock();
99 proto = __nf_nat_proto_find(tuple->dst.protonum);
100 if (!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
101 proto->in_range(tuple, IP_NAT_MANIP_SRC,
102 &range->min, &range->max))
103 ret = 1;
104 rcu_read_unlock();
105
106 return ret;
107 }
108
109 static inline int
same_src(const struct nf_conn * ct,const struct nf_conntrack_tuple * tuple)110 same_src(const struct nf_conn *ct,
111 const struct nf_conntrack_tuple *tuple)
112 {
113 const struct nf_conntrack_tuple *t;
114
115 t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
116 return (t->dst.protonum == tuple->dst.protonum &&
117 t->src.u3.ip == tuple->src.u3.ip &&
118 t->src.u.all == tuple->src.u.all);
119 }
120
121 /* Only called for SRC manip */
122 static int
find_appropriate_src(struct net * net,u16 zone,const struct nf_conntrack_tuple * tuple,struct nf_conntrack_tuple * result,const struct nf_nat_range * range)123 find_appropriate_src(struct net *net, u16 zone,
124 const struct nf_conntrack_tuple *tuple,
125 struct nf_conntrack_tuple *result,
126 const struct nf_nat_range *range)
127 {
128 unsigned int h = hash_by_src(net, zone, tuple);
129 const struct nf_conn_nat *nat;
130 const struct nf_conn *ct;
131 const struct hlist_node *n;
132
133 rcu_read_lock();
134 hlist_for_each_entry_rcu(nat, n, &net->ipv4.nat_bysource[h], bysource) {
135 ct = nat->ct;
136 if (same_src(ct, tuple) && nf_ct_zone(ct) == zone) {
137 /* Copy source part from reply tuple. */
138 nf_ct_invert_tuplepr(result,
139 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
140 result->dst = tuple->dst;
141
142 if (in_range(result, range)) {
143 rcu_read_unlock();
144 return 1;
145 }
146 }
147 }
148 rcu_read_unlock();
149 return 0;
150 }
151
152 /* For [FUTURE] fragmentation handling, we want the least-used
153 src-ip/dst-ip/proto triple. Fairness doesn't come into it. Thus
154 if the range specifies 1.2.3.4 ports 10000-10005 and 1.2.3.5 ports
155 1-65535, we don't do pro-rata allocation based on ports; we choose
156 the ip with the lowest src-ip/dst-ip/proto usage.
157 */
158 static void
find_best_ips_proto(u16 zone,struct nf_conntrack_tuple * tuple,const struct nf_nat_range * range,const struct nf_conn * ct,enum nf_nat_manip_type maniptype)159 find_best_ips_proto(u16 zone, struct nf_conntrack_tuple *tuple,
160 const struct nf_nat_range *range,
161 const struct nf_conn *ct,
162 enum nf_nat_manip_type maniptype)
163 {
164 __be32 *var_ipp;
165 /* Host order */
166 u_int32_t minip, maxip, j;
167
168 /* No IP mapping? Do nothing. */
169 if (!(range->flags & IP_NAT_RANGE_MAP_IPS))
170 return;
171
172 if (maniptype == IP_NAT_MANIP_SRC)
173 var_ipp = &tuple->src.u3.ip;
174 else
175 var_ipp = &tuple->dst.u3.ip;
176
177 /* Fast path: only one choice. */
178 if (range->min_ip == range->max_ip) {
179 *var_ipp = range->min_ip;
180 return;
181 }
182
183 /* Hashing source and destination IPs gives a fairly even
184 * spread in practice (if there are a small number of IPs
185 * involved, there usually aren't that many connections
186 * anyway). The consistency means that servers see the same
187 * client coming from the same IP (some Internet Banking sites
188 * like this), even across reboots. */
189 minip = ntohl(range->min_ip);
190 maxip = ntohl(range->max_ip);
191 j = jhash_2words((__force u32)tuple->src.u3.ip,
192 range->flags & IP_NAT_RANGE_PERSISTENT ?
193 0 : (__force u32)tuple->dst.u3.ip ^ zone, 0);
194 j = ((u64)j * (maxip - minip + 1)) >> 32;
195 *var_ipp = htonl(minip + j);
196 }
197
198 /* Manipulate the tuple into the range given. For NF_INET_POST_ROUTING,
199 * we change the source to map into the range. For NF_INET_PRE_ROUTING
200 * and NF_INET_LOCAL_OUT, we change the destination to map into the
201 * range. It might not be possible to get a unique tuple, but we try.
202 * At worst (or if we race), we will end up with a final duplicate in
203 * __ip_conntrack_confirm and drop the packet. */
204 static void
get_unique_tuple(struct nf_conntrack_tuple * tuple,const struct nf_conntrack_tuple * orig_tuple,const struct nf_nat_range * range,struct nf_conn * ct,enum nf_nat_manip_type maniptype)205 get_unique_tuple(struct nf_conntrack_tuple *tuple,
206 const struct nf_conntrack_tuple *orig_tuple,
207 const struct nf_nat_range *range,
208 struct nf_conn *ct,
209 enum nf_nat_manip_type maniptype)
210 {
211 struct net *net = nf_ct_net(ct);
212 const struct nf_nat_protocol *proto;
213 u16 zone = nf_ct_zone(ct);
214
215 /* 1) If this srcip/proto/src-proto-part is currently mapped,
216 and that same mapping gives a unique tuple within the given
217 range, use that.
218
219 This is only required for source (ie. NAT/masq) mappings.
220 So far, we don't do local source mappings, so multiple
221 manips not an issue. */
222 if (maniptype == IP_NAT_MANIP_SRC &&
223 !(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
224 /* try the original tuple first */
225 if (in_range(orig_tuple, range)) {
226 if (!nf_nat_used_tuple(orig_tuple, ct)) {
227 *tuple = *orig_tuple;
228 return;
229 }
230 } else if (find_appropriate_src(net, zone, orig_tuple, tuple,
231 range)) {
232 pr_debug("get_unique_tuple: Found current src map\n");
233 if (!nf_nat_used_tuple(tuple, ct))
234 return;
235 }
236 }
237
238 /* 2) Select the least-used IP/proto combination in the given
239 range. */
240 *tuple = *orig_tuple;
241 find_best_ips_proto(zone, tuple, range, ct, maniptype);
242
243 /* 3) The per-protocol part of the manip is made to map into
244 the range to make a unique tuple. */
245
246 rcu_read_lock();
247 proto = __nf_nat_proto_find(orig_tuple->dst.protonum);
248
249 /* Only bother mapping if it's not already in range and unique */
250 if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM)) {
251 if (range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) {
252 if (proto->in_range(tuple, maniptype, &range->min,
253 &range->max) &&
254 (range->min.all == range->max.all ||
255 !nf_nat_used_tuple(tuple, ct)))
256 goto out;
257 } else if (!nf_nat_used_tuple(tuple, ct)) {
258 goto out;
259 }
260 }
261
262 /* Last change: get protocol to try to obtain unique tuple. */
263 proto->unique_tuple(tuple, range, maniptype, ct);
264 out:
265 rcu_read_unlock();
266 }
267
268 unsigned int
nf_nat_setup_info(struct nf_conn * ct,const struct nf_nat_range * range,enum nf_nat_manip_type maniptype)269 nf_nat_setup_info(struct nf_conn *ct,
270 const struct nf_nat_range *range,
271 enum nf_nat_manip_type maniptype)
272 {
273 struct net *net = nf_ct_net(ct);
274 struct nf_conntrack_tuple curr_tuple, new_tuple;
275 struct nf_conn_nat *nat;
276
277 /* nat helper or nfctnetlink also setup binding */
278 nat = nfct_nat(ct);
279 if (!nat) {
280 nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
281 if (nat == NULL) {
282 pr_debug("failed to add NAT extension\n");
283 return NF_ACCEPT;
284 }
285 }
286
287 NF_CT_ASSERT(maniptype == IP_NAT_MANIP_SRC ||
288 maniptype == IP_NAT_MANIP_DST);
289 BUG_ON(nf_nat_initialized(ct, maniptype));
290
291 /* What we've got will look like inverse of reply. Normally
292 this is what is in the conntrack, except for prior
293 manipulations (future optimization: if num_manips == 0,
294 orig_tp =
295 conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple) */
296 nf_ct_invert_tuplepr(&curr_tuple,
297 &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
298
299 get_unique_tuple(&new_tuple, &curr_tuple, range, ct, maniptype);
300
301 if (!nf_ct_tuple_equal(&new_tuple, &curr_tuple)) {
302 struct nf_conntrack_tuple reply;
303
304 /* Alter conntrack table so will recognize replies. */
305 nf_ct_invert_tuplepr(&reply, &new_tuple);
306 nf_conntrack_alter_reply(ct, &reply);
307
308 /* Non-atomic: we own this at the moment. */
309 if (maniptype == IP_NAT_MANIP_SRC)
310 ct->status |= IPS_SRC_NAT;
311 else
312 ct->status |= IPS_DST_NAT;
313 }
314
315 if (maniptype == IP_NAT_MANIP_SRC) {
316 unsigned int srchash;
317
318 srchash = hash_by_src(net, nf_ct_zone(ct),
319 &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
320 spin_lock_bh(&nf_nat_lock);
321 /* nf_conntrack_alter_reply might re-allocate exntension aera */
322 nat = nfct_nat(ct);
323 nat->ct = ct;
324 hlist_add_head_rcu(&nat->bysource,
325 &net->ipv4.nat_bysource[srchash]);
326 spin_unlock_bh(&nf_nat_lock);
327 }
328
329 /* It's done. */
330 if (maniptype == IP_NAT_MANIP_DST)
331 ct->status |= IPS_DST_NAT_DONE;
332 else
333 ct->status |= IPS_SRC_NAT_DONE;
334
335 return NF_ACCEPT;
336 }
337 EXPORT_SYMBOL(nf_nat_setup_info);
338
339 /* Returns true if succeeded. */
340 static bool
manip_pkt(u_int16_t proto,struct sk_buff * skb,unsigned int iphdroff,const struct nf_conntrack_tuple * target,enum nf_nat_manip_type maniptype)341 manip_pkt(u_int16_t proto,
342 struct sk_buff *skb,
343 unsigned int iphdroff,
344 const struct nf_conntrack_tuple *target,
345 enum nf_nat_manip_type maniptype)
346 {
347 struct iphdr *iph;
348 const struct nf_nat_protocol *p;
349
350 if (!skb_make_writable(skb, iphdroff + sizeof(*iph)))
351 return false;
352
353 iph = (void *)skb->data + iphdroff;
354
355 /* Manipulate protcol part. */
356
357 /* rcu_read_lock()ed by nf_hook_slow */
358 p = __nf_nat_proto_find(proto);
359 if (!p->manip_pkt(skb, iphdroff, target, maniptype))
360 return false;
361
362 iph = (void *)skb->data + iphdroff;
363
364 if (maniptype == IP_NAT_MANIP_SRC) {
365 csum_replace4(&iph->check, iph->saddr, target->src.u3.ip);
366 iph->saddr = target->src.u3.ip;
367 } else {
368 csum_replace4(&iph->check, iph->daddr, target->dst.u3.ip);
369 iph->daddr = target->dst.u3.ip;
370 }
371 return true;
372 }
373
374 /* Do packet manipulations according to nf_nat_setup_info. */
nf_nat_packet(struct nf_conn * ct,enum ip_conntrack_info ctinfo,unsigned int hooknum,struct sk_buff * skb)375 unsigned int nf_nat_packet(struct nf_conn *ct,
376 enum ip_conntrack_info ctinfo,
377 unsigned int hooknum,
378 struct sk_buff *skb)
379 {
380 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
381 unsigned long statusbit;
382 enum nf_nat_manip_type mtype = HOOK2MANIP(hooknum);
383
384 if (mtype == IP_NAT_MANIP_SRC)
385 statusbit = IPS_SRC_NAT;
386 else
387 statusbit = IPS_DST_NAT;
388
389 /* Invert if this is reply dir. */
390 if (dir == IP_CT_DIR_REPLY)
391 statusbit ^= IPS_NAT_MASK;
392
393 /* Non-atomic: these bits don't change. */
394 if (ct->status & statusbit) {
395 struct nf_conntrack_tuple target;
396
397 /* We are aiming to look like inverse of other direction. */
398 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
399
400 if (!manip_pkt(target.dst.protonum, skb, 0, &target, mtype))
401 return NF_DROP;
402 }
403 return NF_ACCEPT;
404 }
405 EXPORT_SYMBOL_GPL(nf_nat_packet);
406
407 /* Dir is direction ICMP is coming from (opposite to packet it contains) */
nf_nat_icmp_reply_translation(struct nf_conn * ct,enum ip_conntrack_info ctinfo,unsigned int hooknum,struct sk_buff * skb)408 int nf_nat_icmp_reply_translation(struct nf_conn *ct,
409 enum ip_conntrack_info ctinfo,
410 unsigned int hooknum,
411 struct sk_buff *skb)
412 {
413 struct {
414 struct icmphdr icmp;
415 struct iphdr ip;
416 } *inside;
417 const struct nf_conntrack_l4proto *l4proto;
418 struct nf_conntrack_tuple inner, target;
419 int hdrlen = ip_hdrlen(skb);
420 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
421 unsigned long statusbit;
422 enum nf_nat_manip_type manip = HOOK2MANIP(hooknum);
423
424 if (!skb_make_writable(skb, hdrlen + sizeof(*inside)))
425 return 0;
426
427 inside = (void *)skb->data + hdrlen;
428
429 /* We're actually going to mangle it beyond trivial checksum
430 adjustment, so make sure the current checksum is correct. */
431 if (nf_ip_checksum(skb, hooknum, hdrlen, 0))
432 return 0;
433
434 /* Must be RELATED */
435 NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
436 skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY);
437
438 /* Redirects on non-null nats must be dropped, else they'll
439 start talking to each other without our translation, and be
440 confused... --RR */
441 if (inside->icmp.type == ICMP_REDIRECT) {
442 /* If NAT isn't finished, assume it and drop. */
443 if ((ct->status & IPS_NAT_DONE_MASK) != IPS_NAT_DONE_MASK)
444 return 0;
445
446 if (ct->status & IPS_NAT_MASK)
447 return 0;
448 }
449
450 if (manip == IP_NAT_MANIP_SRC)
451 statusbit = IPS_SRC_NAT;
452 else
453 statusbit = IPS_DST_NAT;
454
455 /* Invert if this is reply dir. */
456 if (dir == IP_CT_DIR_REPLY)
457 statusbit ^= IPS_NAT_MASK;
458
459 if (!(ct->status & statusbit))
460 return 1;
461
462 pr_debug("icmp_reply_translation: translating error %p manip %u "
463 "dir %s\n", skb, manip,
464 dir == IP_CT_DIR_ORIGINAL ? "ORIG" : "REPLY");
465
466 /* rcu_read_lock()ed by nf_hook_slow */
467 l4proto = __nf_ct_l4proto_find(PF_INET, inside->ip.protocol);
468
469 if (!nf_ct_get_tuple(skb, hdrlen + sizeof(struct icmphdr),
470 (hdrlen +
471 sizeof(struct icmphdr) + inside->ip.ihl * 4),
472 (u_int16_t)AF_INET, inside->ip.protocol,
473 &inner, l3proto, l4proto))
474 return 0;
475
476 /* Change inner back to look like incoming packet. We do the
477 opposite manip on this hook to normal, because it might not
478 pass all hooks (locally-generated ICMP). Consider incoming
479 packet: PREROUTING (DST manip), routing produces ICMP, goes
480 through POSTROUTING (which must correct the DST manip). */
481 if (!manip_pkt(inside->ip.protocol, skb, hdrlen + sizeof(inside->icmp),
482 &ct->tuplehash[!dir].tuple, !manip))
483 return 0;
484
485 if (skb->ip_summed != CHECKSUM_PARTIAL) {
486 /* Reloading "inside" here since manip_pkt inner. */
487 inside = (void *)skb->data + hdrlen;
488 inside->icmp.checksum = 0;
489 inside->icmp.checksum =
490 csum_fold(skb_checksum(skb, hdrlen,
491 skb->len - hdrlen, 0));
492 }
493
494 /* Change outer to look the reply to an incoming packet
495 * (proto 0 means don't invert per-proto part). */
496 nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple);
497 if (!manip_pkt(0, skb, 0, &target, manip))
498 return 0;
499
500 return 1;
501 }
502 EXPORT_SYMBOL_GPL(nf_nat_icmp_reply_translation);
503
504 /* Protocol registration. */
nf_nat_protocol_register(const struct nf_nat_protocol * proto)505 int nf_nat_protocol_register(const struct nf_nat_protocol *proto)
506 {
507 int ret = 0;
508
509 spin_lock_bh(&nf_nat_lock);
510 if (rcu_dereference_protected(
511 nf_nat_protos[proto->protonum],
512 lockdep_is_held(&nf_nat_lock)
513 ) != &nf_nat_unknown_protocol) {
514 ret = -EBUSY;
515 goto out;
516 }
517 rcu_assign_pointer(nf_nat_protos[proto->protonum], proto);
518 out:
519 spin_unlock_bh(&nf_nat_lock);
520 return ret;
521 }
522 EXPORT_SYMBOL(nf_nat_protocol_register);
523
524 /* No one stores the protocol anywhere; simply delete it. */
nf_nat_protocol_unregister(const struct nf_nat_protocol * proto)525 void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto)
526 {
527 spin_lock_bh(&nf_nat_lock);
528 rcu_assign_pointer(nf_nat_protos[proto->protonum],
529 &nf_nat_unknown_protocol);
530 spin_unlock_bh(&nf_nat_lock);
531 synchronize_rcu();
532 }
533 EXPORT_SYMBOL(nf_nat_protocol_unregister);
534
535 /* No one using conntrack by the time this called. */
nf_nat_cleanup_conntrack(struct nf_conn * ct)536 static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
537 {
538 struct nf_conn_nat *nat = nf_ct_ext_find(ct, NF_CT_EXT_NAT);
539
540 if (nat == NULL || nat->ct == NULL)
541 return;
542
543 NF_CT_ASSERT(nat->ct->status & IPS_SRC_NAT_DONE);
544
545 spin_lock_bh(&nf_nat_lock);
546 hlist_del_rcu(&nat->bysource);
547 spin_unlock_bh(&nf_nat_lock);
548 }
549
nf_nat_move_storage(void * new,void * old)550 static void nf_nat_move_storage(void *new, void *old)
551 {
552 struct nf_conn_nat *new_nat = new;
553 struct nf_conn_nat *old_nat = old;
554 struct nf_conn *ct = old_nat->ct;
555
556 if (!ct || !(ct->status & IPS_SRC_NAT_DONE))
557 return;
558
559 spin_lock_bh(&nf_nat_lock);
560 hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource);
561 spin_unlock_bh(&nf_nat_lock);
562 }
563
564 static struct nf_ct_ext_type nat_extend __read_mostly = {
565 .len = sizeof(struct nf_conn_nat),
566 .align = __alignof__(struct nf_conn_nat),
567 .destroy = nf_nat_cleanup_conntrack,
568 .move = nf_nat_move_storage,
569 .id = NF_CT_EXT_NAT,
570 .flags = NF_CT_EXT_F_PREALLOC,
571 };
572
573 #if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
574
575 #include <linux/netfilter/nfnetlink.h>
576 #include <linux/netfilter/nfnetlink_conntrack.h>
577
578 static const struct nf_nat_protocol *
nf_nat_proto_find_get(u_int8_t protonum)579 nf_nat_proto_find_get(u_int8_t protonum)
580 {
581 const struct nf_nat_protocol *p;
582
583 rcu_read_lock();
584 p = __nf_nat_proto_find(protonum);
585 if (!try_module_get(p->me))
586 p = &nf_nat_unknown_protocol;
587 rcu_read_unlock();
588
589 return p;
590 }
591
592 static void
nf_nat_proto_put(const struct nf_nat_protocol * p)593 nf_nat_proto_put(const struct nf_nat_protocol *p)
594 {
595 module_put(p->me);
596 }
597
598 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
599 [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 },
600 [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 },
601 };
602
nfnetlink_parse_nat_proto(struct nlattr * attr,const struct nf_conn * ct,struct nf_nat_range * range)603 static int nfnetlink_parse_nat_proto(struct nlattr *attr,
604 const struct nf_conn *ct,
605 struct nf_nat_range *range)
606 {
607 struct nlattr *tb[CTA_PROTONAT_MAX+1];
608 const struct nf_nat_protocol *npt;
609 int err;
610
611 err = nla_parse_nested(tb, CTA_PROTONAT_MAX, attr, protonat_nla_policy);
612 if (err < 0)
613 return err;
614
615 npt = nf_nat_proto_find_get(nf_ct_protonum(ct));
616 if (npt->nlattr_to_range)
617 err = npt->nlattr_to_range(tb, range);
618 nf_nat_proto_put(npt);
619 return err;
620 }
621
622 static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
623 [CTA_NAT_MINIP] = { .type = NLA_U32 },
624 [CTA_NAT_MAXIP] = { .type = NLA_U32 },
625 };
626
627 static int
nfnetlink_parse_nat(const struct nlattr * nat,const struct nf_conn * ct,struct nf_nat_range * range)628 nfnetlink_parse_nat(const struct nlattr *nat,
629 const struct nf_conn *ct, struct nf_nat_range *range)
630 {
631 struct nlattr *tb[CTA_NAT_MAX+1];
632 int err;
633
634 memset(range, 0, sizeof(*range));
635
636 err = nla_parse_nested(tb, CTA_NAT_MAX, nat, nat_nla_policy);
637 if (err < 0)
638 return err;
639
640 if (tb[CTA_NAT_MINIP])
641 range->min_ip = nla_get_be32(tb[CTA_NAT_MINIP]);
642
643 if (!tb[CTA_NAT_MAXIP])
644 range->max_ip = range->min_ip;
645 else
646 range->max_ip = nla_get_be32(tb[CTA_NAT_MAXIP]);
647
648 if (range->min_ip)
649 range->flags |= IP_NAT_RANGE_MAP_IPS;
650
651 if (!tb[CTA_NAT_PROTO])
652 return 0;
653
654 err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
655 if (err < 0)
656 return err;
657
658 return 0;
659 }
660
661 static int
nfnetlink_parse_nat_setup(struct nf_conn * ct,enum nf_nat_manip_type manip,const struct nlattr * attr)662 nfnetlink_parse_nat_setup(struct nf_conn *ct,
663 enum nf_nat_manip_type manip,
664 const struct nlattr *attr)
665 {
666 struct nf_nat_range range;
667
668 if (nfnetlink_parse_nat(attr, ct, &range) < 0)
669 return -EINVAL;
670 if (nf_nat_initialized(ct, manip))
671 return -EEXIST;
672
673 return nf_nat_setup_info(ct, &range, manip);
674 }
675 #else
676 static int
nfnetlink_parse_nat_setup(struct nf_conn * ct,enum nf_nat_manip_type manip,const struct nlattr * attr)677 nfnetlink_parse_nat_setup(struct nf_conn *ct,
678 enum nf_nat_manip_type manip,
679 const struct nlattr *attr)
680 {
681 return -EOPNOTSUPP;
682 }
683 #endif
684
nf_nat_net_init(struct net * net)685 static int __net_init nf_nat_net_init(struct net *net)
686 {
687 /* Leave them the same for the moment. */
688 net->ipv4.nat_htable_size = net->ct.htable_size;
689 net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size, 0);
690 if (!net->ipv4.nat_bysource)
691 return -ENOMEM;
692 return 0;
693 }
694
695 /* Clear NAT section of all conntracks, in case we're loaded again. */
clean_nat(struct nf_conn * i,void * data)696 static int clean_nat(struct nf_conn *i, void *data)
697 {
698 struct nf_conn_nat *nat = nfct_nat(i);
699
700 if (!nat)
701 return 0;
702 memset(nat, 0, sizeof(*nat));
703 i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
704 return 0;
705 }
706
nf_nat_net_exit(struct net * net)707 static void __net_exit nf_nat_net_exit(struct net *net)
708 {
709 nf_ct_iterate_cleanup(net, &clean_nat, NULL);
710 synchronize_rcu();
711 nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_htable_size);
712 }
713
714 static struct pernet_operations nf_nat_net_ops = {
715 .init = nf_nat_net_init,
716 .exit = nf_nat_net_exit,
717 };
718
nf_nat_init(void)719 static int __init nf_nat_init(void)
720 {
721 size_t i;
722 int ret;
723
724 need_ipv4_conntrack();
725
726 ret = nf_ct_extend_register(&nat_extend);
727 if (ret < 0) {
728 printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
729 return ret;
730 }
731
732 ret = register_pernet_subsys(&nf_nat_net_ops);
733 if (ret < 0)
734 goto cleanup_extend;
735
736 /* Sew in builtin protocols. */
737 spin_lock_bh(&nf_nat_lock);
738 for (i = 0; i < MAX_IP_NAT_PROTO; i++)
739 rcu_assign_pointer(nf_nat_protos[i], &nf_nat_unknown_protocol);
740 rcu_assign_pointer(nf_nat_protos[IPPROTO_TCP], &nf_nat_protocol_tcp);
741 rcu_assign_pointer(nf_nat_protos[IPPROTO_UDP], &nf_nat_protocol_udp);
742 rcu_assign_pointer(nf_nat_protos[IPPROTO_ICMP], &nf_nat_protocol_icmp);
743 spin_unlock_bh(&nf_nat_lock);
744
745 /* Initialize fake conntrack so that NAT will skip it */
746 nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
747
748 l3proto = nf_ct_l3proto_find_get((u_int16_t)AF_INET);
749
750 BUG_ON(nf_nat_seq_adjust_hook != NULL);
751 rcu_assign_pointer(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
752 BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
753 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook,
754 nfnetlink_parse_nat_setup);
755 BUG_ON(nf_ct_nat_offset != NULL);
756 rcu_assign_pointer(nf_ct_nat_offset, nf_nat_get_offset);
757 return 0;
758
759 cleanup_extend:
760 nf_ct_extend_unregister(&nat_extend);
761 return ret;
762 }
763
nf_nat_cleanup(void)764 static void __exit nf_nat_cleanup(void)
765 {
766 unregister_pernet_subsys(&nf_nat_net_ops);
767 nf_ct_l3proto_put(l3proto);
768 nf_ct_extend_unregister(&nat_extend);
769 rcu_assign_pointer(nf_nat_seq_adjust_hook, NULL);
770 rcu_assign_pointer(nfnetlink_parse_nat_setup_hook, NULL);
771 rcu_assign_pointer(nf_ct_nat_offset, NULL);
772 synchronize_net();
773 }
774
775 MODULE_LICENSE("GPL");
776 MODULE_ALIAS("nf-nat-ipv4");
777
778 module_init(nf_nat_init);
779 module_exit(nf_nat_cleanup);
780