1 /*
2  * ip_vs_proto_udp.c:	UDP load balancing support for IPVS
3  *
4  * Authors:     Wensong Zhang <wensong@linuxvirtualserver.org>
5  *              Julian Anastasov <ja@ssi.bg>
6  *
7  *              This program is free software; you can redistribute it and/or
8  *              modify it under the terms of the GNU General Public License
9  *              as published by the Free Software Foundation; either version
10  *              2 of the License, or (at your option) any later version.
11  *
12  * Changes:     Hans Schillstrom <hans.schillstrom@ericsson.com>
13  *              Network name space (netns) aware.
14  *
15  */
16 
17 #define KMSG_COMPONENT "IPVS"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
19 
20 #include <linux/in.h>
21 #include <linux/ip.h>
22 #include <linux/kernel.h>
23 #include <linux/netfilter.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <linux/udp.h>
26 
27 #include <net/ip_vs.h>
28 #include <net/ip.h>
29 #include <net/ip6_checksum.h>
30 
31 static int
udp_conn_schedule(int af,struct sk_buff * skb,struct ip_vs_proto_data * pd,int * verdict,struct ip_vs_conn ** cpp)32 udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
33 		  int *verdict, struct ip_vs_conn **cpp)
34 {
35 	struct net *net;
36 	struct ip_vs_service *svc;
37 	struct udphdr _udph, *uh;
38 	struct ip_vs_iphdr iph;
39 
40 	ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
41 
42 	uh = skb_header_pointer(skb, iph.len, sizeof(_udph), &_udph);
43 	if (uh == NULL) {
44 		*verdict = NF_DROP;
45 		return 0;
46 	}
47 	net = skb_net(skb);
48 	svc = ip_vs_service_get(net, af, skb->mark, iph.protocol,
49 				&iph.daddr, uh->dest);
50 	if (svc) {
51 		int ignored;
52 
53 		if (ip_vs_todrop(net_ipvs(net))) {
54 			/*
55 			 * It seems that we are very loaded.
56 			 * We have to drop this packet :(
57 			 */
58 			ip_vs_service_put(svc);
59 			*verdict = NF_DROP;
60 			return 0;
61 		}
62 
63 		/*
64 		 * Let the virtual server select a real server for the
65 		 * incoming connection, and create a connection entry.
66 		 */
67 		*cpp = ip_vs_schedule(svc, skb, pd, &ignored);
68 		if (!*cpp && ignored <= 0) {
69 			if (!ignored)
70 				*verdict = ip_vs_leave(svc, skb, pd);
71 			else {
72 				ip_vs_service_put(svc);
73 				*verdict = NF_DROP;
74 			}
75 			return 0;
76 		}
77 		ip_vs_service_put(svc);
78 	}
79 	/* NF_ACCEPT */
80 	return 1;
81 }
82 
83 
84 static inline void
udp_fast_csum_update(int af,struct udphdr * uhdr,const union nf_inet_addr * oldip,const union nf_inet_addr * newip,__be16 oldport,__be16 newport)85 udp_fast_csum_update(int af, struct udphdr *uhdr,
86 		     const union nf_inet_addr *oldip,
87 		     const union nf_inet_addr *newip,
88 		     __be16 oldport, __be16 newport)
89 {
90 #ifdef CONFIG_IP_VS_IPV6
91 	if (af == AF_INET6)
92 		uhdr->check =
93 			csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
94 					 ip_vs_check_diff2(oldport, newport,
95 						~csum_unfold(uhdr->check))));
96 	else
97 #endif
98 		uhdr->check =
99 			csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
100 					 ip_vs_check_diff2(oldport, newport,
101 						~csum_unfold(uhdr->check))));
102 	if (!uhdr->check)
103 		uhdr->check = CSUM_MANGLED_0;
104 }
105 
106 static inline void
udp_partial_csum_update(int af,struct udphdr * uhdr,const union nf_inet_addr * oldip,const union nf_inet_addr * newip,__be16 oldlen,__be16 newlen)107 udp_partial_csum_update(int af, struct udphdr *uhdr,
108 		     const union nf_inet_addr *oldip,
109 		     const union nf_inet_addr *newip,
110 		     __be16 oldlen, __be16 newlen)
111 {
112 #ifdef CONFIG_IP_VS_IPV6
113 	if (af == AF_INET6)
114 		uhdr->check =
115 			~csum_fold(ip_vs_check_diff16(oldip->ip6, newip->ip6,
116 					 ip_vs_check_diff2(oldlen, newlen,
117 						csum_unfold(uhdr->check))));
118 	else
119 #endif
120 	uhdr->check =
121 		~csum_fold(ip_vs_check_diff4(oldip->ip, newip->ip,
122 				ip_vs_check_diff2(oldlen, newlen,
123 						csum_unfold(uhdr->check))));
124 }
125 
126 
127 static int
udp_snat_handler(struct sk_buff * skb,struct ip_vs_protocol * pp,struct ip_vs_conn * cp)128 udp_snat_handler(struct sk_buff *skb,
129 		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
130 {
131 	struct udphdr *udph;
132 	unsigned int udphoff;
133 	int oldlen;
134 	int payload_csum = 0;
135 
136 #ifdef CONFIG_IP_VS_IPV6
137 	if (cp->af == AF_INET6)
138 		udphoff = sizeof(struct ipv6hdr);
139 	else
140 #endif
141 		udphoff = ip_hdrlen(skb);
142 	oldlen = skb->len - udphoff;
143 
144 	/* csum_check requires unshared skb */
145 	if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
146 		return 0;
147 
148 	if (unlikely(cp->app != NULL)) {
149 		int ret;
150 
151 		/* Some checks before mangling */
152 		if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
153 			return 0;
154 
155 		/*
156 		 *	Call application helper if needed
157 		 */
158 		if (!(ret = ip_vs_app_pkt_out(cp, skb)))
159 			return 0;
160 		/* ret=2: csum update is needed after payload mangling */
161 		if (ret == 1)
162 			oldlen = skb->len - udphoff;
163 		else
164 			payload_csum = 1;
165 	}
166 
167 	udph = (void *)skb_network_header(skb) + udphoff;
168 	udph->source = cp->vport;
169 
170 	/*
171 	 *	Adjust UDP checksums
172 	 */
173 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
174 		udp_partial_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
175 					htons(oldlen),
176 					htons(skb->len - udphoff));
177 	} else if (!payload_csum && (udph->check != 0)) {
178 		/* Only port and addr are changed, do fast csum update */
179 		udp_fast_csum_update(cp->af, udph, &cp->daddr, &cp->vaddr,
180 				     cp->dport, cp->vport);
181 		if (skb->ip_summed == CHECKSUM_COMPLETE)
182 			skb->ip_summed = (cp->app && pp->csum_check) ?
183 					 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
184 	} else {
185 		/* full checksum calculation */
186 		udph->check = 0;
187 		skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
188 #ifdef CONFIG_IP_VS_IPV6
189 		if (cp->af == AF_INET6)
190 			udph->check = csum_ipv6_magic(&cp->vaddr.in6,
191 						      &cp->caddr.in6,
192 						      skb->len - udphoff,
193 						      cp->protocol, skb->csum);
194 		else
195 #endif
196 			udph->check = csum_tcpudp_magic(cp->vaddr.ip,
197 							cp->caddr.ip,
198 							skb->len - udphoff,
199 							cp->protocol,
200 							skb->csum);
201 		if (udph->check == 0)
202 			udph->check = CSUM_MANGLED_0;
203 		skb->ip_summed = CHECKSUM_UNNECESSARY;
204 		IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
205 			  pp->name, udph->check,
206 			  (char*)&(udph->check) - (char*)udph);
207 	}
208 	return 1;
209 }
210 
211 
212 static int
udp_dnat_handler(struct sk_buff * skb,struct ip_vs_protocol * pp,struct ip_vs_conn * cp)213 udp_dnat_handler(struct sk_buff *skb,
214 		 struct ip_vs_protocol *pp, struct ip_vs_conn *cp)
215 {
216 	struct udphdr *udph;
217 	unsigned int udphoff;
218 	int oldlen;
219 	int payload_csum = 0;
220 
221 #ifdef CONFIG_IP_VS_IPV6
222 	if (cp->af == AF_INET6)
223 		udphoff = sizeof(struct ipv6hdr);
224 	else
225 #endif
226 		udphoff = ip_hdrlen(skb);
227 	oldlen = skb->len - udphoff;
228 
229 	/* csum_check requires unshared skb */
230 	if (!skb_make_writable(skb, udphoff+sizeof(*udph)))
231 		return 0;
232 
233 	if (unlikely(cp->app != NULL)) {
234 		int ret;
235 
236 		/* Some checks before mangling */
237 		if (pp->csum_check && !pp->csum_check(cp->af, skb, pp))
238 			return 0;
239 
240 		/*
241 		 *	Attempt ip_vs_app call.
242 		 *	It will fix ip_vs_conn
243 		 */
244 		if (!(ret = ip_vs_app_pkt_in(cp, skb)))
245 			return 0;
246 		/* ret=2: csum update is needed after payload mangling */
247 		if (ret == 1)
248 			oldlen = skb->len - udphoff;
249 		else
250 			payload_csum = 1;
251 	}
252 
253 	udph = (void *)skb_network_header(skb) + udphoff;
254 	udph->dest = cp->dport;
255 
256 	/*
257 	 *	Adjust UDP checksums
258 	 */
259 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
260 		udp_partial_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
261 					htons(oldlen),
262 					htons(skb->len - udphoff));
263 	} else if (!payload_csum && (udph->check != 0)) {
264 		/* Only port and addr are changed, do fast csum update */
265 		udp_fast_csum_update(cp->af, udph, &cp->vaddr, &cp->daddr,
266 				     cp->vport, cp->dport);
267 		if (skb->ip_summed == CHECKSUM_COMPLETE)
268 			skb->ip_summed = (cp->app && pp->csum_check) ?
269 					 CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
270 	} else {
271 		/* full checksum calculation */
272 		udph->check = 0;
273 		skb->csum = skb_checksum(skb, udphoff, skb->len - udphoff, 0);
274 #ifdef CONFIG_IP_VS_IPV6
275 		if (cp->af == AF_INET6)
276 			udph->check = csum_ipv6_magic(&cp->caddr.in6,
277 						      &cp->daddr.in6,
278 						      skb->len - udphoff,
279 						      cp->protocol, skb->csum);
280 		else
281 #endif
282 			udph->check = csum_tcpudp_magic(cp->caddr.ip,
283 							cp->daddr.ip,
284 							skb->len - udphoff,
285 							cp->protocol,
286 							skb->csum);
287 		if (udph->check == 0)
288 			udph->check = CSUM_MANGLED_0;
289 		skb->ip_summed = CHECKSUM_UNNECESSARY;
290 	}
291 	return 1;
292 }
293 
294 
295 static int
udp_csum_check(int af,struct sk_buff * skb,struct ip_vs_protocol * pp)296 udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
297 {
298 	struct udphdr _udph, *uh;
299 	unsigned int udphoff;
300 
301 #ifdef CONFIG_IP_VS_IPV6
302 	if (af == AF_INET6)
303 		udphoff = sizeof(struct ipv6hdr);
304 	else
305 #endif
306 		udphoff = ip_hdrlen(skb);
307 
308 	uh = skb_header_pointer(skb, udphoff, sizeof(_udph), &_udph);
309 	if (uh == NULL)
310 		return 0;
311 
312 	if (uh->check != 0) {
313 		switch (skb->ip_summed) {
314 		case CHECKSUM_NONE:
315 			skb->csum = skb_checksum(skb, udphoff,
316 						 skb->len - udphoff, 0);
317 		case CHECKSUM_COMPLETE:
318 #ifdef CONFIG_IP_VS_IPV6
319 			if (af == AF_INET6) {
320 				if (csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
321 						    &ipv6_hdr(skb)->daddr,
322 						    skb->len - udphoff,
323 						    ipv6_hdr(skb)->nexthdr,
324 						    skb->csum)) {
325 					IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
326 							 "Failed checksum for");
327 					return 0;
328 				}
329 			} else
330 #endif
331 				if (csum_tcpudp_magic(ip_hdr(skb)->saddr,
332 						      ip_hdr(skb)->daddr,
333 						      skb->len - udphoff,
334 						      ip_hdr(skb)->protocol,
335 						      skb->csum)) {
336 					IP_VS_DBG_RL_PKT(0, af, pp, skb, 0,
337 							 "Failed checksum for");
338 					return 0;
339 				}
340 			break;
341 		default:
342 			/* No need to checksum. */
343 			break;
344 		}
345 	}
346 	return 1;
347 }
348 
udp_app_hashkey(__be16 port)349 static inline __u16 udp_app_hashkey(__be16 port)
350 {
351 	return (((__force u16)port >> UDP_APP_TAB_BITS) ^ (__force u16)port)
352 		& UDP_APP_TAB_MASK;
353 }
354 
355 
udp_register_app(struct net * net,struct ip_vs_app * inc)356 static int udp_register_app(struct net *net, struct ip_vs_app *inc)
357 {
358 	struct ip_vs_app *i;
359 	__u16 hash;
360 	__be16 port = inc->port;
361 	int ret = 0;
362 	struct netns_ipvs *ipvs = net_ipvs(net);
363 	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
364 
365 	hash = udp_app_hashkey(port);
366 
367 
368 	spin_lock_bh(&ipvs->udp_app_lock);
369 	list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
370 		if (i->port == port) {
371 			ret = -EEXIST;
372 			goto out;
373 		}
374 	}
375 	list_add(&inc->p_list, &ipvs->udp_apps[hash]);
376 	atomic_inc(&pd->appcnt);
377 
378   out:
379 	spin_unlock_bh(&ipvs->udp_app_lock);
380 	return ret;
381 }
382 
383 
384 static void
udp_unregister_app(struct net * net,struct ip_vs_app * inc)385 udp_unregister_app(struct net *net, struct ip_vs_app *inc)
386 {
387 	struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
388 	struct netns_ipvs *ipvs = net_ipvs(net);
389 
390 	spin_lock_bh(&ipvs->udp_app_lock);
391 	atomic_dec(&pd->appcnt);
392 	list_del(&inc->p_list);
393 	spin_unlock_bh(&ipvs->udp_app_lock);
394 }
395 
396 
udp_app_conn_bind(struct ip_vs_conn * cp)397 static int udp_app_conn_bind(struct ip_vs_conn *cp)
398 {
399 	struct netns_ipvs *ipvs = net_ipvs(ip_vs_conn_net(cp));
400 	int hash;
401 	struct ip_vs_app *inc;
402 	int result = 0;
403 
404 	/* Default binding: bind app only for NAT */
405 	if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
406 		return 0;
407 
408 	/* Lookup application incarnations and bind the right one */
409 	hash = udp_app_hashkey(cp->vport);
410 
411 	spin_lock(&ipvs->udp_app_lock);
412 	list_for_each_entry(inc, &ipvs->udp_apps[hash], p_list) {
413 		if (inc->port == cp->vport) {
414 			if (unlikely(!ip_vs_app_inc_get(inc)))
415 				break;
416 			spin_unlock(&ipvs->udp_app_lock);
417 
418 			IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
419 				      "%s:%u to app %s on port %u\n",
420 				      __func__,
421 				      IP_VS_DBG_ADDR(cp->af, &cp->caddr),
422 				      ntohs(cp->cport),
423 				      IP_VS_DBG_ADDR(cp->af, &cp->vaddr),
424 				      ntohs(cp->vport),
425 				      inc->name, ntohs(inc->port));
426 
427 			cp->app = inc;
428 			if (inc->init_conn)
429 				result = inc->init_conn(inc, cp);
430 			goto out;
431 		}
432 	}
433 	spin_unlock(&ipvs->udp_app_lock);
434 
435   out:
436 	return result;
437 }
438 
439 
440 static const int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
441 	[IP_VS_UDP_S_NORMAL]		=	5*60*HZ,
442 	[IP_VS_UDP_S_LAST]		=	2*HZ,
443 };
444 
445 static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
446 	[IP_VS_UDP_S_NORMAL]		=	"UDP",
447 	[IP_VS_UDP_S_LAST]		=	"BUG!",
448 };
449 
udp_state_name(int state)450 static const char * udp_state_name(int state)
451 {
452 	if (state >= IP_VS_UDP_S_LAST)
453 		return "ERR!";
454 	return udp_state_name_table[state] ? udp_state_name_table[state] : "?";
455 }
456 
457 static void
udp_state_transition(struct ip_vs_conn * cp,int direction,const struct sk_buff * skb,struct ip_vs_proto_data * pd)458 udp_state_transition(struct ip_vs_conn *cp, int direction,
459 		     const struct sk_buff *skb,
460 		     struct ip_vs_proto_data *pd)
461 {
462 	if (unlikely(!pd)) {
463 		pr_err("UDP no ns data\n");
464 		return;
465 	}
466 
467 	cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL];
468 }
469 
__udp_init(struct net * net,struct ip_vs_proto_data * pd)470 static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
471 {
472 	struct netns_ipvs *ipvs = net_ipvs(net);
473 
474 	ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
475 	spin_lock_init(&ipvs->udp_app_lock);
476 	pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
477 							sizeof(udp_timeouts));
478 	if (!pd->timeout_table)
479 		return -ENOMEM;
480 	return 0;
481 }
482 
__udp_exit(struct net * net,struct ip_vs_proto_data * pd)483 static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd)
484 {
485 	kfree(pd->timeout_table);
486 }
487 
488 
489 struct ip_vs_protocol ip_vs_protocol_udp = {
490 	.name =			"UDP",
491 	.protocol =		IPPROTO_UDP,
492 	.num_states =		IP_VS_UDP_S_LAST,
493 	.dont_defrag =		0,
494 	.init =			NULL,
495 	.exit =			NULL,
496 	.init_netns =		__udp_init,
497 	.exit_netns =		__udp_exit,
498 	.conn_schedule =	udp_conn_schedule,
499 	.conn_in_get =		ip_vs_conn_in_get_proto,
500 	.conn_out_get =		ip_vs_conn_out_get_proto,
501 	.snat_handler =		udp_snat_handler,
502 	.dnat_handler =		udp_dnat_handler,
503 	.csum_check =		udp_csum_check,
504 	.state_transition =	udp_state_transition,
505 	.state_name =		udp_state_name,
506 	.register_app =		udp_register_app,
507 	.unregister_app =	udp_unregister_app,
508 	.app_conn_bind =	udp_app_conn_bind,
509 	.debug_packet =		ip_vs_tcpudp_debug_packet,
510 	.timeout_change =	NULL,
511 };
512