1 /* Structure dynamic extension infrastructure
2  * Copyright (C) 2004 Rusty Russell IBM Corporation
3  * Copyright (C) 2007 Netfilter Core Team <coreteam@netfilter.org>
4  * Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/skbuff.h>
17 #include <net/netfilter/nf_conntrack_extend.h>
18 
19 static struct nf_ct_ext_type __rcu *nf_ct_ext_types[NF_CT_EXT_NUM];
20 static DEFINE_MUTEX(nf_ct_ext_type_mutex);
21 
__nf_ct_ext_destroy(struct nf_conn * ct)22 void __nf_ct_ext_destroy(struct nf_conn *ct)
23 {
24 	unsigned int i;
25 	struct nf_ct_ext_type *t;
26 	struct nf_ct_ext *ext = ct->ext;
27 
28 	for (i = 0; i < NF_CT_EXT_NUM; i++) {
29 		if (!__nf_ct_ext_exist(ext, i))
30 			continue;
31 
32 		rcu_read_lock();
33 		t = rcu_dereference(nf_ct_ext_types[i]);
34 
35 		/* Here the nf_ct_ext_type might have been unregisterd.
36 		 * I.e., it has responsible to cleanup private
37 		 * area in all conntracks when it is unregisterd.
38 		 */
39 		if (t && t->destroy)
40 			t->destroy(ct);
41 		rcu_read_unlock();
42 	}
43 }
44 EXPORT_SYMBOL(__nf_ct_ext_destroy);
45 
46 static void *
nf_ct_ext_create(struct nf_ct_ext ** ext,enum nf_ct_ext_id id,gfp_t gfp)47 nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
48 {
49 	unsigned int off, len;
50 	struct nf_ct_ext_type *t;
51 	size_t alloc_size;
52 
53 	rcu_read_lock();
54 	t = rcu_dereference(nf_ct_ext_types[id]);
55 	BUG_ON(t == NULL);
56 	off = ALIGN(sizeof(struct nf_ct_ext), t->align);
57 	len = off + t->len;
58 	alloc_size = t->alloc_size;
59 	rcu_read_unlock();
60 
61 	*ext = kzalloc(alloc_size, gfp);
62 	if (!*ext)
63 		return NULL;
64 
65 	(*ext)->offset[id] = off;
66 	(*ext)->len = len;
67 
68 	return (void *)(*ext) + off;
69 }
70 
__nf_ct_ext_add(struct nf_conn * ct,enum nf_ct_ext_id id,gfp_t gfp)71 void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
72 {
73 	struct nf_ct_ext *old, *new;
74 	int i, newlen, newoff;
75 	struct nf_ct_ext_type *t;
76 
77 	/* Conntrack must not be confirmed to avoid races on reallocation. */
78 	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
79 
80 	old = ct->ext;
81 	if (!old)
82 		return nf_ct_ext_create(&ct->ext, id, gfp);
83 
84 	if (__nf_ct_ext_exist(old, id))
85 		return NULL;
86 
87 	rcu_read_lock();
88 	t = rcu_dereference(nf_ct_ext_types[id]);
89 	BUG_ON(t == NULL);
90 
91 	newoff = ALIGN(old->len, t->align);
92 	newlen = newoff + t->len;
93 	rcu_read_unlock();
94 
95 	new = __krealloc(old, newlen, gfp);
96 	if (!new)
97 		return NULL;
98 
99 	if (new != old) {
100 		for (i = 0; i < NF_CT_EXT_NUM; i++) {
101 			if (!__nf_ct_ext_exist(old, i))
102 				continue;
103 
104 			rcu_read_lock();
105 			t = rcu_dereference(nf_ct_ext_types[i]);
106 			if (t && t->move)
107 				t->move((void *)new + new->offset[i],
108 					(void *)old + old->offset[i]);
109 			rcu_read_unlock();
110 		}
111 		kfree_rcu(old, rcu);
112 		ct->ext = new;
113 	}
114 
115 	new->offset[id] = newoff;
116 	new->len = newlen;
117 	memset((void *)new + newoff, 0, newlen - newoff);
118 	return (void *)new + newoff;
119 }
120 EXPORT_SYMBOL(__nf_ct_ext_add);
121 
update_alloc_size(struct nf_ct_ext_type * type)122 static void update_alloc_size(struct nf_ct_ext_type *type)
123 {
124 	int i, j;
125 	struct nf_ct_ext_type *t1, *t2;
126 	enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1;
127 
128 	/* unnecessary to update all types */
129 	if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) {
130 		min = type->id;
131 		max = type->id;
132 	}
133 
134 	/* This assumes that extended areas in conntrack for the types
135 	   whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
136 	for (i = min; i <= max; i++) {
137 		t1 = rcu_dereference_protected(nf_ct_ext_types[i],
138 				lockdep_is_held(&nf_ct_ext_type_mutex));
139 		if (!t1)
140 			continue;
141 
142 		t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) +
143 				 t1->len;
144 		for (j = 0; j < NF_CT_EXT_NUM; j++) {
145 			t2 = rcu_dereference_protected(nf_ct_ext_types[j],
146 				lockdep_is_held(&nf_ct_ext_type_mutex));
147 			if (t2 == NULL || t2 == t1 ||
148 			    (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
149 				continue;
150 
151 			t1->alloc_size = ALIGN(t1->alloc_size, t2->align)
152 					 + t2->len;
153 		}
154 	}
155 }
156 
157 /* This MUST be called in process context. */
nf_ct_extend_register(struct nf_ct_ext_type * type)158 int nf_ct_extend_register(struct nf_ct_ext_type *type)
159 {
160 	int ret = 0;
161 
162 	mutex_lock(&nf_ct_ext_type_mutex);
163 	if (nf_ct_ext_types[type->id]) {
164 		ret = -EBUSY;
165 		goto out;
166 	}
167 
168 	/* This ensures that nf_ct_ext_create() can allocate enough area
169 	   before updating alloc_size */
170 	type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
171 			   + type->len;
172 	rcu_assign_pointer(nf_ct_ext_types[type->id], type);
173 	update_alloc_size(type);
174 out:
175 	mutex_unlock(&nf_ct_ext_type_mutex);
176 	return ret;
177 }
178 EXPORT_SYMBOL_GPL(nf_ct_extend_register);
179 
180 /* This MUST be called in process context. */
nf_ct_extend_unregister(struct nf_ct_ext_type * type)181 void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
182 {
183 	mutex_lock(&nf_ct_ext_type_mutex);
184 	RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
185 	update_alloc_size(type);
186 	mutex_unlock(&nf_ct_ext_type_mutex);
187 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
188 }
189 EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
190