1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_FRAG_H__
3 #define __NET_FRAG_H__
4 
5 #include <linux/rhashtable-types.h>
6 #include <linux/completion.h>
7 #include <linux/in6.h>
8 #include <linux/rbtree_types.h>
9 #include <linux/refcount.h>
10 #include <net/dropreason-core.h>
11 
12 /* Per netns frag queues directory */
13 struct fqdir {
14 	/* sysctls */
15 	long			high_thresh;
16 	long			low_thresh;
17 	int			timeout;
18 	int			max_dist;
19 	struct inet_frags	*f;
20 	struct net		*net;
21 	bool			dead;
22 
23 	struct rhashtable       rhashtable ____cacheline_aligned_in_smp;
24 
25 	/* Keep atomic mem on separate cachelines in structs that include it */
26 	atomic_long_t		mem ____cacheline_aligned_in_smp;
27 	struct work_struct	destroy_work;
28 	struct llist_node	free_list;
29 };
30 
31 /**
32  * enum: fragment queue flags
33  *
34  * @INET_FRAG_FIRST_IN: first fragment has arrived
35  * @INET_FRAG_LAST_IN: final fragment has arrived
36  * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
37  * @INET_FRAG_HASH_DEAD: inet_frag_kill() has not removed fq from rhashtable
38  * @INET_FRAG_DROP: if skbs must be dropped (instead of being consumed)
39  */
40 enum {
41 	INET_FRAG_FIRST_IN	= BIT(0),
42 	INET_FRAG_LAST_IN	= BIT(1),
43 	INET_FRAG_COMPLETE	= BIT(2),
44 	INET_FRAG_HASH_DEAD	= BIT(3),
45 	INET_FRAG_DROP		= BIT(4),
46 };
47 
48 struct frag_v4_compare_key {
49 	__be32		saddr;
50 	__be32		daddr;
51 	u32		user;
52 	u32		vif;
53 	__be16		id;
54 	u16		protocol;
55 };
56 
57 struct frag_v6_compare_key {
58 	struct in6_addr	saddr;
59 	struct in6_addr	daddr;
60 	u32		user;
61 	__be32		id;
62 	u32		iif;
63 };
64 
65 /**
66  * struct inet_frag_queue - fragment queue
67  *
68  * @node: rhash node
69  * @key: keys identifying this frag.
70  * @timer: queue expiration timer
71  * @lock: spinlock protecting this frag
72  * @refcnt: reference count of the queue
73  * @rb_fragments: received fragments rb-tree root
74  * @fragments_tail: received fragments tail
75  * @last_run_head: the head of the last "run". see ip_fragment.c
76  * @stamp: timestamp of the last received fragment
77  * @len: total length of the original datagram
78  * @meat: length of received fragments so far
79  * @mono_delivery_time: stamp has a mono delivery time (EDT)
80  * @flags: fragment queue flags
81  * @max_size: maximum received fragment size
82  * @fqdir: pointer to struct fqdir
83  * @rcu: rcu head for freeing deferall
84  */
85 struct inet_frag_queue {
86 	struct rhash_head	node;
87 	union {
88 		struct frag_v4_compare_key v4;
89 		struct frag_v6_compare_key v6;
90 	} key;
91 	struct timer_list	timer;
92 	spinlock_t		lock;
93 	refcount_t		refcnt;
94 	struct rb_root		rb_fragments;
95 	struct sk_buff		*fragments_tail;
96 	struct sk_buff		*last_run_head;
97 	ktime_t			stamp;
98 	int			len;
99 	int			meat;
100 	u8			mono_delivery_time;
101 	__u8			flags;
102 	u16			max_size;
103 	struct fqdir		*fqdir;
104 	struct rcu_head		rcu;
105 };
106 
107 struct inet_frags {
108 	unsigned int		qsize;
109 
110 	void			(*constructor)(struct inet_frag_queue *q,
111 					       const void *arg);
112 	void			(*destructor)(struct inet_frag_queue *);
113 	void			(*frag_expire)(struct timer_list *t);
114 	struct kmem_cache	*frags_cachep;
115 	const char		*frags_cache_name;
116 	struct rhashtable_params rhash_params;
117 	refcount_t		refcnt;
118 	struct completion	completion;
119 };
120 
121 int inet_frags_init(struct inet_frags *);
122 void inet_frags_fini(struct inet_frags *);
123 
124 int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
125 
fqdir_pre_exit(struct fqdir * fqdir)126 static inline void fqdir_pre_exit(struct fqdir *fqdir)
127 {
128 	/* Prevent creation of new frags.
129 	 * Pairs with READ_ONCE() in inet_frag_find().
130 	 */
131 	WRITE_ONCE(fqdir->high_thresh, 0);
132 
133 	/* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
134 	 * and ip6frag_expire_frag_queue().
135 	 */
136 	WRITE_ONCE(fqdir->dead, true);
137 }
138 void fqdir_exit(struct fqdir *fqdir);
139 
140 void inet_frag_kill(struct inet_frag_queue *q);
141 void inet_frag_destroy(struct inet_frag_queue *q);
142 struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
143 
144 /* Free all skbs in the queue; return the sum of their truesizes. */
145 unsigned int inet_frag_rbtree_purge(struct rb_root *root,
146 				    enum skb_drop_reason reason);
147 
inet_frag_put(struct inet_frag_queue * q)148 static inline void inet_frag_put(struct inet_frag_queue *q)
149 {
150 	if (refcount_dec_and_test(&q->refcnt))
151 		inet_frag_destroy(q);
152 }
153 
154 /* Memory Tracking Functions. */
155 
frag_mem_limit(const struct fqdir * fqdir)156 static inline long frag_mem_limit(const struct fqdir *fqdir)
157 {
158 	return atomic_long_read(&fqdir->mem);
159 }
160 
sub_frag_mem_limit(struct fqdir * fqdir,long val)161 static inline void sub_frag_mem_limit(struct fqdir *fqdir, long val)
162 {
163 	atomic_long_sub(val, &fqdir->mem);
164 }
165 
add_frag_mem_limit(struct fqdir * fqdir,long val)166 static inline void add_frag_mem_limit(struct fqdir *fqdir, long val)
167 {
168 	atomic_long_add(val, &fqdir->mem);
169 }
170 
171 /* RFC 3168 support :
172  * We want to check ECN values of all fragments, do detect invalid combinations.
173  * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
174  */
175 #define	IPFRAG_ECN_NOT_ECT	0x01 /* one frag had ECN_NOT_ECT */
176 #define	IPFRAG_ECN_ECT_1	0x02 /* one frag had ECN_ECT_1 */
177 #define	IPFRAG_ECN_ECT_0	0x04 /* one frag had ECN_ECT_0 */
178 #define	IPFRAG_ECN_CE		0x08 /* one frag had ECN_CE */
179 
180 extern const u8 ip_frag_ecn_table[16];
181 
182 /* Return values of inet_frag_queue_insert() */
183 #define IPFRAG_OK	0
184 #define IPFRAG_DUP	1
185 #define IPFRAG_OVERLAP	2
186 int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
187 			   int offset, int end);
188 void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
189 			      struct sk_buff *parent);
190 void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
191 			    void *reasm_data, bool try_coalesce);
192 struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
193 
194 #endif
195