1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		Generic TIME_WAIT sockets functions
7  *
8  *		From code orinally in TCP
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/kmemcheck.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <net/inet_hashtables.h>
16 #include <net/inet_timewait_sock.h>
17 #include <net/ip.h>
18 
19 
20 /**
21  *	inet_twsk_unhash - unhash a timewait socket from established hash
22  *	@tw: timewait socket
23  *
24  *	unhash a timewait socket from established hash, if hashed.
25  *	ehash lock must be held by caller.
26  *	Returns 1 if caller should call inet_twsk_put() after lock release.
27  */
inet_twsk_unhash(struct inet_timewait_sock * tw)28 int inet_twsk_unhash(struct inet_timewait_sock *tw)
29 {
30 	if (hlist_nulls_unhashed(&tw->tw_node))
31 		return 0;
32 
33 	hlist_nulls_del_rcu(&tw->tw_node);
34 	sk_nulls_node_init(&tw->tw_node);
35 	/*
36 	 * We cannot call inet_twsk_put() ourself under lock,
37 	 * caller must call it for us.
38 	 */
39 	return 1;
40 }
41 
42 /**
43  *	inet_twsk_bind_unhash - unhash a timewait socket from bind hash
44  *	@tw: timewait socket
45  *	@hashinfo: hashinfo pointer
46  *
47  *	unhash a timewait socket from bind hash, if hashed.
48  *	bind hash lock must be held by caller.
49  *	Returns 1 if caller should call inet_twsk_put() after lock release.
50  */
inet_twsk_bind_unhash(struct inet_timewait_sock * tw,struct inet_hashinfo * hashinfo)51 int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
52 			  struct inet_hashinfo *hashinfo)
53 {
54 	struct inet_bind_bucket *tb = tw->tw_tb;
55 
56 	if (!tb)
57 		return 0;
58 
59 	__hlist_del(&tw->tw_bind_node);
60 	tw->tw_tb = NULL;
61 	inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
62 	/*
63 	 * We cannot call inet_twsk_put() ourself under lock,
64 	 * caller must call it for us.
65 	 */
66 	return 1;
67 }
68 
69 /* Must be called with locally disabled BHs. */
__inet_twsk_kill(struct inet_timewait_sock * tw,struct inet_hashinfo * hashinfo)70 static void __inet_twsk_kill(struct inet_timewait_sock *tw,
71 			     struct inet_hashinfo *hashinfo)
72 {
73 	struct inet_bind_hashbucket *bhead;
74 	int refcnt;
75 	/* Unlink from established hashes. */
76 	spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
77 
78 	spin_lock(lock);
79 	refcnt = inet_twsk_unhash(tw);
80 	spin_unlock(lock);
81 
82 	/* Disassociate with bind bucket. */
83 	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
84 			hashinfo->bhash_size)];
85 
86 	spin_lock(&bhead->lock);
87 	refcnt += inet_twsk_bind_unhash(tw, hashinfo);
88 	spin_unlock(&bhead->lock);
89 
90 #ifdef SOCK_REFCNT_DEBUG
91 	if (atomic_read(&tw->tw_refcnt) != 1) {
92 		printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
93 		       tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
94 	}
95 #endif
96 	while (refcnt) {
97 		inet_twsk_put(tw);
98 		refcnt--;
99 	}
100 }
101 
inet_twsk_free(struct inet_timewait_sock * tw)102 static noinline void inet_twsk_free(struct inet_timewait_sock *tw)
103 {
104 	struct module *owner = tw->tw_prot->owner;
105 	twsk_destructor((struct sock *)tw);
106 #ifdef SOCK_REFCNT_DEBUG
107 	pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
108 #endif
109 	release_net(twsk_net(tw));
110 	kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
111 	module_put(owner);
112 }
113 
inet_twsk_put(struct inet_timewait_sock * tw)114 void inet_twsk_put(struct inet_timewait_sock *tw)
115 {
116 	if (atomic_dec_and_test(&tw->tw_refcnt))
117 		inet_twsk_free(tw);
118 }
119 EXPORT_SYMBOL_GPL(inet_twsk_put);
120 
121 /*
122  * Enter the time wait state. This is called with locally disabled BH.
123  * Essentially we whip up a timewait bucket, copy the relevant info into it
124  * from the SK, and mess with hash chains and list linkage.
125  */
__inet_twsk_hashdance(struct inet_timewait_sock * tw,struct sock * sk,struct inet_hashinfo * hashinfo)126 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
127 			   struct inet_hashinfo *hashinfo)
128 {
129 	const struct inet_sock *inet = inet_sk(sk);
130 	const struct inet_connection_sock *icsk = inet_csk(sk);
131 	struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
132 	spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
133 	struct inet_bind_hashbucket *bhead;
134 	/* Step 1: Put TW into bind hash. Original socket stays there too.
135 	   Note, that any socket with inet->num != 0 MUST be bound in
136 	   binding cache, even if it is closed.
137 	 */
138 	bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
139 			hashinfo->bhash_size)];
140 	spin_lock(&bhead->lock);
141 	tw->tw_tb = icsk->icsk_bind_hash;
142 	WARN_ON(!icsk->icsk_bind_hash);
143 	inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
144 	spin_unlock(&bhead->lock);
145 
146 	spin_lock(lock);
147 
148 	/*
149 	 * Step 2: Hash TW into TIMEWAIT chain.
150 	 * Should be done before removing sk from established chain
151 	 * because readers are lockless and search established first.
152 	 */
153 	inet_twsk_add_node_rcu(tw, &ehead->twchain);
154 
155 	/* Step 3: Remove SK from established hash. */
156 	if (__sk_nulls_del_node_init_rcu(sk))
157 		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
158 
159 	/*
160 	 * Notes :
161 	 * - We initially set tw_refcnt to 0 in inet_twsk_alloc()
162 	 * - We add one reference for the bhash link
163 	 * - We add one reference for the ehash link
164 	 * - We want this refcnt update done before allowing other
165 	 *   threads to find this tw in ehash chain.
166 	 */
167 	atomic_add(1 + 1 + 1, &tw->tw_refcnt);
168 
169 	spin_unlock(lock);
170 }
171 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
172 
inet_twsk_alloc(const struct sock * sk,const int state)173 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
174 {
175 	struct inet_timewait_sock *tw =
176 		kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
177 				 GFP_ATOMIC);
178 	if (tw != NULL) {
179 		const struct inet_sock *inet = inet_sk(sk);
180 
181 		kmemcheck_annotate_bitfield(tw, flags);
182 
183 		/* Give us an identity. */
184 		tw->tw_daddr	    = inet->inet_daddr;
185 		tw->tw_rcv_saddr    = inet->inet_rcv_saddr;
186 		tw->tw_bound_dev_if = sk->sk_bound_dev_if;
187 		tw->tw_tos	    = inet->tos;
188 		tw->tw_num	    = inet->inet_num;
189 		tw->tw_state	    = TCP_TIME_WAIT;
190 		tw->tw_substate	    = state;
191 		tw->tw_sport	    = inet->inet_sport;
192 		tw->tw_dport	    = inet->inet_dport;
193 		tw->tw_family	    = sk->sk_family;
194 		tw->tw_reuse	    = sk->sk_reuse;
195 		tw->tw_hash	    = sk->sk_hash;
196 		tw->tw_ipv6only	    = 0;
197 		tw->tw_transparent  = inet->transparent;
198 		tw->tw_prot	    = sk->sk_prot_creator;
199 		twsk_net_set(tw, hold_net(sock_net(sk)));
200 		/*
201 		 * Because we use RCU lookups, we should not set tw_refcnt
202 		 * to a non null value before everything is setup for this
203 		 * timewait socket.
204 		 */
205 		atomic_set(&tw->tw_refcnt, 0);
206 		inet_twsk_dead_node_init(tw);
207 		__module_get(tw->tw_prot->owner);
208 	}
209 
210 	return tw;
211 }
212 EXPORT_SYMBOL_GPL(inet_twsk_alloc);
213 
214 /* Returns non-zero if quota exceeded.  */
inet_twdr_do_twkill_work(struct inet_timewait_death_row * twdr,const int slot)215 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
216 				    const int slot)
217 {
218 	struct inet_timewait_sock *tw;
219 	struct hlist_node *node;
220 	unsigned int killed;
221 	int ret;
222 
223 	/* NOTE: compare this to previous version where lock
224 	 * was released after detaching chain. It was racy,
225 	 * because tw buckets are scheduled in not serialized context
226 	 * in 2.3 (with netfilter), and with softnet it is common, because
227 	 * soft irqs are not sequenced.
228 	 */
229 	killed = 0;
230 	ret = 0;
231 rescan:
232 	inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
233 		__inet_twsk_del_dead_node(tw);
234 		spin_unlock(&twdr->death_lock);
235 		__inet_twsk_kill(tw, twdr->hashinfo);
236 #ifdef CONFIG_NET_NS
237 		NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED);
238 #endif
239 		inet_twsk_put(tw);
240 		killed++;
241 		spin_lock(&twdr->death_lock);
242 		if (killed > INET_TWDR_TWKILL_QUOTA) {
243 			ret = 1;
244 			break;
245 		}
246 
247 		/* While we dropped twdr->death_lock, another cpu may have
248 		 * killed off the next TW bucket in the list, therefore
249 		 * do a fresh re-read of the hlist head node with the
250 		 * lock reacquired.  We still use the hlist traversal
251 		 * macro in order to get the prefetches.
252 		 */
253 		goto rescan;
254 	}
255 
256 	twdr->tw_count -= killed;
257 #ifndef CONFIG_NET_NS
258 	NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITED, killed);
259 #endif
260 	return ret;
261 }
262 
inet_twdr_hangman(unsigned long data)263 void inet_twdr_hangman(unsigned long data)
264 {
265 	struct inet_timewait_death_row *twdr;
266 	int unsigned need_timer;
267 
268 	twdr = (struct inet_timewait_death_row *)data;
269 	spin_lock(&twdr->death_lock);
270 
271 	if (twdr->tw_count == 0)
272 		goto out;
273 
274 	need_timer = 0;
275 	if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
276 		twdr->thread_slots |= (1 << twdr->slot);
277 		schedule_work(&twdr->twkill_work);
278 		need_timer = 1;
279 	} else {
280 		/* We purged the entire slot, anything left?  */
281 		if (twdr->tw_count)
282 			need_timer = 1;
283 		twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
284 	}
285 	if (need_timer)
286 		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
287 out:
288 	spin_unlock(&twdr->death_lock);
289 }
290 EXPORT_SYMBOL_GPL(inet_twdr_hangman);
291 
inet_twdr_twkill_work(struct work_struct * work)292 void inet_twdr_twkill_work(struct work_struct *work)
293 {
294 	struct inet_timewait_death_row *twdr =
295 		container_of(work, struct inet_timewait_death_row, twkill_work);
296 	int i;
297 
298 	BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS - 1) >
299 			(sizeof(twdr->thread_slots) * 8));
300 
301 	while (twdr->thread_slots) {
302 		spin_lock_bh(&twdr->death_lock);
303 		for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
304 			if (!(twdr->thread_slots & (1 << i)))
305 				continue;
306 
307 			while (inet_twdr_do_twkill_work(twdr, i) != 0) {
308 				if (need_resched()) {
309 					spin_unlock_bh(&twdr->death_lock);
310 					schedule();
311 					spin_lock_bh(&twdr->death_lock);
312 				}
313 			}
314 
315 			twdr->thread_slots &= ~(1 << i);
316 		}
317 		spin_unlock_bh(&twdr->death_lock);
318 	}
319 }
320 EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
321 
322 /* These are always called from BH context.  See callers in
323  * tcp_input.c to verify this.
324  */
325 
326 /* This is for handling early-kills of TIME_WAIT sockets. */
inet_twsk_deschedule(struct inet_timewait_sock * tw,struct inet_timewait_death_row * twdr)327 void inet_twsk_deschedule(struct inet_timewait_sock *tw,
328 			  struct inet_timewait_death_row *twdr)
329 {
330 	spin_lock(&twdr->death_lock);
331 	if (inet_twsk_del_dead_node(tw)) {
332 		inet_twsk_put(tw);
333 		if (--twdr->tw_count == 0)
334 			del_timer(&twdr->tw_timer);
335 	}
336 	spin_unlock(&twdr->death_lock);
337 	__inet_twsk_kill(tw, twdr->hashinfo);
338 }
339 EXPORT_SYMBOL(inet_twsk_deschedule);
340 
inet_twsk_schedule(struct inet_timewait_sock * tw,struct inet_timewait_death_row * twdr,const int timeo,const int timewait_len)341 void inet_twsk_schedule(struct inet_timewait_sock *tw,
342 		       struct inet_timewait_death_row *twdr,
343 		       const int timeo, const int timewait_len)
344 {
345 	struct hlist_head *list;
346 	int slot;
347 
348 	/* timeout := RTO * 3.5
349 	 *
350 	 * 3.5 = 1+2+0.5 to wait for two retransmits.
351 	 *
352 	 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
353 	 * our ACK acking that FIN can be lost. If N subsequent retransmitted
354 	 * FINs (or previous seqments) are lost (probability of such event
355 	 * is p^(N+1), where p is probability to lose single packet and
356 	 * time to detect the loss is about RTO*(2^N - 1) with exponential
357 	 * backoff). Normal timewait length is calculated so, that we
358 	 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
359 	 * [ BTW Linux. following BSD, violates this requirement waiting
360 	 *   only for 60sec, we should wait at least for 240 secs.
361 	 *   Well, 240 consumes too much of resources 8)
362 	 * ]
363 	 * This interval is not reduced to catch old duplicate and
364 	 * responces to our wandering segments living for two MSLs.
365 	 * However, if we use PAWS to detect
366 	 * old duplicates, we can reduce the interval to bounds required
367 	 * by RTO, rather than MSL. So, if peer understands PAWS, we
368 	 * kill tw bucket after 3.5*RTO (it is important that this number
369 	 * is greater than TS tick!) and detect old duplicates with help
370 	 * of PAWS.
371 	 */
372 	slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
373 
374 	spin_lock(&twdr->death_lock);
375 
376 	/* Unlink it, if it was scheduled */
377 	if (inet_twsk_del_dead_node(tw))
378 		twdr->tw_count--;
379 	else
380 		atomic_inc(&tw->tw_refcnt);
381 
382 	if (slot >= INET_TWDR_RECYCLE_SLOTS) {
383 		/* Schedule to slow timer */
384 		if (timeo >= timewait_len) {
385 			slot = INET_TWDR_TWKILL_SLOTS - 1;
386 		} else {
387 			slot = DIV_ROUND_UP(timeo, twdr->period);
388 			if (slot >= INET_TWDR_TWKILL_SLOTS)
389 				slot = INET_TWDR_TWKILL_SLOTS - 1;
390 		}
391 		tw->tw_ttd = jiffies + timeo;
392 		slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
393 		list = &twdr->cells[slot];
394 	} else {
395 		tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
396 
397 		if (twdr->twcal_hand < 0) {
398 			twdr->twcal_hand = 0;
399 			twdr->twcal_jiffie = jiffies;
400 			twdr->twcal_timer.expires = twdr->twcal_jiffie +
401 					      (slot << INET_TWDR_RECYCLE_TICK);
402 			add_timer(&twdr->twcal_timer);
403 		} else {
404 			if (time_after(twdr->twcal_timer.expires,
405 				       jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
406 				mod_timer(&twdr->twcal_timer,
407 					  jiffies + (slot << INET_TWDR_RECYCLE_TICK));
408 			slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
409 		}
410 		list = &twdr->twcal_row[slot];
411 	}
412 
413 	hlist_add_head(&tw->tw_death_node, list);
414 
415 	if (twdr->tw_count++ == 0)
416 		mod_timer(&twdr->tw_timer, jiffies + twdr->period);
417 	spin_unlock(&twdr->death_lock);
418 }
419 EXPORT_SYMBOL_GPL(inet_twsk_schedule);
420 
inet_twdr_twcal_tick(unsigned long data)421 void inet_twdr_twcal_tick(unsigned long data)
422 {
423 	struct inet_timewait_death_row *twdr;
424 	int n, slot;
425 	unsigned long j;
426 	unsigned long now = jiffies;
427 	int killed = 0;
428 	int adv = 0;
429 
430 	twdr = (struct inet_timewait_death_row *)data;
431 
432 	spin_lock(&twdr->death_lock);
433 	if (twdr->twcal_hand < 0)
434 		goto out;
435 
436 	slot = twdr->twcal_hand;
437 	j = twdr->twcal_jiffie;
438 
439 	for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
440 		if (time_before_eq(j, now)) {
441 			struct hlist_node *node, *safe;
442 			struct inet_timewait_sock *tw;
443 
444 			inet_twsk_for_each_inmate_safe(tw, node, safe,
445 						       &twdr->twcal_row[slot]) {
446 				__inet_twsk_del_dead_node(tw);
447 				__inet_twsk_kill(tw, twdr->hashinfo);
448 #ifdef CONFIG_NET_NS
449 				NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
450 #endif
451 				inet_twsk_put(tw);
452 				killed++;
453 			}
454 		} else {
455 			if (!adv) {
456 				adv = 1;
457 				twdr->twcal_jiffie = j;
458 				twdr->twcal_hand = slot;
459 			}
460 
461 			if (!hlist_empty(&twdr->twcal_row[slot])) {
462 				mod_timer(&twdr->twcal_timer, j);
463 				goto out;
464 			}
465 		}
466 		j += 1 << INET_TWDR_RECYCLE_TICK;
467 		slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
468 	}
469 	twdr->twcal_hand = -1;
470 
471 out:
472 	if ((twdr->tw_count -= killed) == 0)
473 		del_timer(&twdr->tw_timer);
474 #ifndef CONFIG_NET_NS
475 	NET_ADD_STATS_BH(&init_net, LINUX_MIB_TIMEWAITKILLED, killed);
476 #endif
477 	spin_unlock(&twdr->death_lock);
478 }
479 EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);
480 
inet_twsk_purge(struct inet_hashinfo * hashinfo,struct inet_timewait_death_row * twdr,int family)481 void inet_twsk_purge(struct inet_hashinfo *hashinfo,
482 		     struct inet_timewait_death_row *twdr, int family)
483 {
484 	struct inet_timewait_sock *tw;
485 	struct sock *sk;
486 	struct hlist_nulls_node *node;
487 	unsigned int slot;
488 
489 	for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
490 		struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
491 restart_rcu:
492 		rcu_read_lock();
493 restart:
494 		sk_nulls_for_each_rcu(sk, node, &head->twchain) {
495 			tw = inet_twsk(sk);
496 			if ((tw->tw_family != family) ||
497 				atomic_read(&twsk_net(tw)->count))
498 				continue;
499 
500 			if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
501 				continue;
502 
503 			if (unlikely((tw->tw_family != family) ||
504 				     atomic_read(&twsk_net(tw)->count))) {
505 				inet_twsk_put(tw);
506 				goto restart;
507 			}
508 
509 			rcu_read_unlock();
510 			local_bh_disable();
511 			inet_twsk_deschedule(tw, twdr);
512 			local_bh_enable();
513 			inet_twsk_put(tw);
514 			goto restart_rcu;
515 		}
516 		/* If the nulls value we got at the end of this lookup is
517 		 * not the expected one, we must restart lookup.
518 		 * We probably met an item that was moved to another chain.
519 		 */
520 		if (get_nulls_value(node) != slot)
521 			goto restart;
522 		rcu_read_unlock();
523 	}
524 }
525 EXPORT_SYMBOL_GPL(inet_twsk_purge);
526