1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/netfilter.h>
5 #include <linux/rhashtable.h>
6 #include <linux/netdevice.h>
7 #include <linux/tc_act/tc_csum.h>
8 #include <net/flow_offload.h>
9 #include <net/netfilter/nf_flow_table.h>
10 #include <net/netfilter/nf_tables.h>
11 #include <net/netfilter/nf_conntrack.h>
12 #include <net/netfilter/nf_conntrack_acct.h>
13 #include <net/netfilter/nf_conntrack_core.h>
14 #include <net/netfilter/nf_conntrack_tuple.h>
15 
16 static struct workqueue_struct *nf_flow_offload_add_wq;
17 static struct workqueue_struct *nf_flow_offload_del_wq;
18 static struct workqueue_struct *nf_flow_offload_stats_wq;
19 
20 struct flow_offload_work {
21 	struct list_head	list;
22 	enum flow_cls_command	cmd;
23 	struct nf_flowtable	*flowtable;
24 	struct flow_offload	*flow;
25 	struct work_struct	work;
26 };
27 
28 #define NF_FLOW_DISSECTOR(__match, __type, __field)	\
29 	(__match)->dissector.offset[__type] =		\
30 		offsetof(struct nf_flow_key, __field)
31 
nf_flow_rule_lwt_match(struct nf_flow_match * match,struct ip_tunnel_info * tun_info)32 static void nf_flow_rule_lwt_match(struct nf_flow_match *match,
33 				   struct ip_tunnel_info *tun_info)
34 {
35 	struct nf_flow_key *mask = &match->mask;
36 	struct nf_flow_key *key = &match->key;
37 	unsigned int enc_keys;
38 
39 	if (!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX))
40 		return;
41 
42 	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_CONTROL, enc_control);
43 	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
44 	key->enc_key_id.keyid = tunnel_id_to_key32(tun_info->key.tun_id);
45 	mask->enc_key_id.keyid = 0xffffffff;
46 	enc_keys = BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
47 		   BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL);
48 
49 	if (ip_tunnel_info_af(tun_info) == AF_INET) {
50 		NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
51 				  enc_ipv4);
52 		key->enc_ipv4.src = tun_info->key.u.ipv4.dst;
53 		key->enc_ipv4.dst = tun_info->key.u.ipv4.src;
54 		if (key->enc_ipv4.src)
55 			mask->enc_ipv4.src = 0xffffffff;
56 		if (key->enc_ipv4.dst)
57 			mask->enc_ipv4.dst = 0xffffffff;
58 		enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
59 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
60 	} else {
61 		memcpy(&key->enc_ipv6.src, &tun_info->key.u.ipv6.dst,
62 		       sizeof(struct in6_addr));
63 		memcpy(&key->enc_ipv6.dst, &tun_info->key.u.ipv6.src,
64 		       sizeof(struct in6_addr));
65 		if (memcmp(&key->enc_ipv6.src, &in6addr_any,
66 			   sizeof(struct in6_addr)))
67 			memset(&mask->enc_ipv6.src, 0xff,
68 			       sizeof(struct in6_addr));
69 		if (memcmp(&key->enc_ipv6.dst, &in6addr_any,
70 			   sizeof(struct in6_addr)))
71 			memset(&mask->enc_ipv6.dst, 0xff,
72 			       sizeof(struct in6_addr));
73 		enc_keys |= BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS);
74 		key->enc_control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
75 	}
76 
77 	match->dissector.used_keys |= enc_keys;
78 }
79 
nf_flow_rule_vlan_match(struct flow_dissector_key_vlan * key,struct flow_dissector_key_vlan * mask,u16 vlan_id,__be16 proto)80 static void nf_flow_rule_vlan_match(struct flow_dissector_key_vlan *key,
81 				    struct flow_dissector_key_vlan *mask,
82 				    u16 vlan_id, __be16 proto)
83 {
84 	key->vlan_id = vlan_id;
85 	mask->vlan_id = VLAN_VID_MASK;
86 	key->vlan_tpid = proto;
87 	mask->vlan_tpid = 0xffff;
88 }
89 
nf_flow_rule_match(struct nf_flow_match * match,const struct flow_offload_tuple * tuple,struct dst_entry * other_dst)90 static int nf_flow_rule_match(struct nf_flow_match *match,
91 			      const struct flow_offload_tuple *tuple,
92 			      struct dst_entry *other_dst)
93 {
94 	struct nf_flow_key *mask = &match->mask;
95 	struct nf_flow_key *key = &match->key;
96 	struct ip_tunnel_info *tun_info;
97 	bool vlan_encap = false;
98 
99 	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_META, meta);
100 	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CONTROL, control);
101 	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_BASIC, basic);
102 	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4);
103 	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6);
104 	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_TCP, tcp);
105 	NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_PORTS, tp);
106 
107 	if (other_dst && other_dst->lwtstate) {
108 		tun_info = lwt_tun_info(other_dst->lwtstate);
109 		nf_flow_rule_lwt_match(match, tun_info);
110 	}
111 
112 	if (tuple->xmit_type == FLOW_OFFLOAD_XMIT_TC)
113 		key->meta.ingress_ifindex = tuple->tc.iifidx;
114 	else
115 		key->meta.ingress_ifindex = tuple->iifidx;
116 
117 	mask->meta.ingress_ifindex = 0xffffffff;
118 
119 	if (tuple->encap_num > 0 && !(tuple->in_vlan_ingress & BIT(0)) &&
120 	    tuple->encap[0].proto == htons(ETH_P_8021Q)) {
121 		NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_VLAN, vlan);
122 		nf_flow_rule_vlan_match(&key->vlan, &mask->vlan,
123 					tuple->encap[0].id,
124 					tuple->encap[0].proto);
125 		vlan_encap = true;
126 	}
127 
128 	if (tuple->encap_num > 1 && !(tuple->in_vlan_ingress & BIT(1)) &&
129 	    tuple->encap[1].proto == htons(ETH_P_8021Q)) {
130 		if (vlan_encap) {
131 			NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_CVLAN,
132 					  cvlan);
133 			nf_flow_rule_vlan_match(&key->cvlan, &mask->cvlan,
134 						tuple->encap[1].id,
135 						tuple->encap[1].proto);
136 		} else {
137 			NF_FLOW_DISSECTOR(match, FLOW_DISSECTOR_KEY_VLAN,
138 					  vlan);
139 			nf_flow_rule_vlan_match(&key->vlan, &mask->vlan,
140 						tuple->encap[1].id,
141 						tuple->encap[1].proto);
142 		}
143 	}
144 
145 	switch (tuple->l3proto) {
146 	case AF_INET:
147 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
148 		key->basic.n_proto = htons(ETH_P_IP);
149 		key->ipv4.src = tuple->src_v4.s_addr;
150 		mask->ipv4.src = 0xffffffff;
151 		key->ipv4.dst = tuple->dst_v4.s_addr;
152 		mask->ipv4.dst = 0xffffffff;
153 		break;
154        case AF_INET6:
155 		key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
156 		key->basic.n_proto = htons(ETH_P_IPV6);
157 		key->ipv6.src = tuple->src_v6;
158 		memset(&mask->ipv6.src, 0xff, sizeof(mask->ipv6.src));
159 		key->ipv6.dst = tuple->dst_v6;
160 		memset(&mask->ipv6.dst, 0xff, sizeof(mask->ipv6.dst));
161 		break;
162 	default:
163 		return -EOPNOTSUPP;
164 	}
165 	mask->control.addr_type = 0xffff;
166 	match->dissector.used_keys |= BIT(key->control.addr_type);
167 	mask->basic.n_proto = 0xffff;
168 
169 	switch (tuple->l4proto) {
170 	case IPPROTO_TCP:
171 		key->tcp.flags = 0;
172 		mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16);
173 		match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
174 		break;
175 	case IPPROTO_UDP:
176 	case IPPROTO_GRE:
177 		break;
178 	default:
179 		return -EOPNOTSUPP;
180 	}
181 
182 	key->basic.ip_proto = tuple->l4proto;
183 	mask->basic.ip_proto = 0xff;
184 
185 	match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_META) |
186 				      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
187 				      BIT(FLOW_DISSECTOR_KEY_BASIC);
188 
189 	switch (tuple->l4proto) {
190 	case IPPROTO_TCP:
191 	case IPPROTO_UDP:
192 		key->tp.src = tuple->src_port;
193 		mask->tp.src = 0xffff;
194 		key->tp.dst = tuple->dst_port;
195 		mask->tp.dst = 0xffff;
196 
197 		match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
198 		break;
199 	}
200 
201 	return 0;
202 }
203 
flow_offload_mangle(struct flow_action_entry * entry,enum flow_action_mangle_base htype,u32 offset,const __be32 * value,const __be32 * mask)204 static void flow_offload_mangle(struct flow_action_entry *entry,
205 				enum flow_action_mangle_base htype, u32 offset,
206 				const __be32 *value, const __be32 *mask)
207 {
208 	entry->id = FLOW_ACTION_MANGLE;
209 	entry->mangle.htype = htype;
210 	entry->mangle.offset = offset;
211 	memcpy(&entry->mangle.mask, mask, sizeof(u32));
212 	memcpy(&entry->mangle.val, value, sizeof(u32));
213 }
214 
215 static inline struct flow_action_entry *
flow_action_entry_next(struct nf_flow_rule * flow_rule)216 flow_action_entry_next(struct nf_flow_rule *flow_rule)
217 {
218 	int i = flow_rule->rule->action.num_entries++;
219 
220 	return &flow_rule->rule->action.entries[i];
221 }
222 
flow_offload_eth_src(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)223 static int flow_offload_eth_src(struct net *net,
224 				const struct flow_offload *flow,
225 				enum flow_offload_tuple_dir dir,
226 				struct nf_flow_rule *flow_rule)
227 {
228 	struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
229 	struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
230 	const struct flow_offload_tuple *other_tuple, *this_tuple;
231 	struct net_device *dev = NULL;
232 	const unsigned char *addr;
233 	u32 mask, val;
234 	u16 val16;
235 
236 	this_tuple = &flow->tuplehash[dir].tuple;
237 
238 	switch (this_tuple->xmit_type) {
239 	case FLOW_OFFLOAD_XMIT_DIRECT:
240 		addr = this_tuple->out.h_source;
241 		break;
242 	case FLOW_OFFLOAD_XMIT_NEIGH:
243 		other_tuple = &flow->tuplehash[!dir].tuple;
244 		dev = dev_get_by_index(net, other_tuple->iifidx);
245 		if (!dev)
246 			return -ENOENT;
247 
248 		addr = dev->dev_addr;
249 		break;
250 	default:
251 		return -EOPNOTSUPP;
252 	}
253 
254 	mask = ~0xffff0000;
255 	memcpy(&val16, addr, 2);
256 	val = val16 << 16;
257 	flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
258 			    &val, &mask);
259 
260 	mask = ~0xffffffff;
261 	memcpy(&val, addr + 2, 4);
262 	flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
263 			    &val, &mask);
264 
265 	dev_put(dev);
266 
267 	return 0;
268 }
269 
flow_offload_eth_dst(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)270 static int flow_offload_eth_dst(struct net *net,
271 				const struct flow_offload *flow,
272 				enum flow_offload_tuple_dir dir,
273 				struct nf_flow_rule *flow_rule)
274 {
275 	struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
276 	struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
277 	const struct flow_offload_tuple *other_tuple, *this_tuple;
278 	const struct dst_entry *dst_cache;
279 	unsigned char ha[ETH_ALEN];
280 	struct neighbour *n;
281 	const void *daddr;
282 	u32 mask, val;
283 	u8 nud_state;
284 	u16 val16;
285 
286 	this_tuple = &flow->tuplehash[dir].tuple;
287 
288 	switch (this_tuple->xmit_type) {
289 	case FLOW_OFFLOAD_XMIT_DIRECT:
290 		ether_addr_copy(ha, this_tuple->out.h_dest);
291 		break;
292 	case FLOW_OFFLOAD_XMIT_NEIGH:
293 		other_tuple = &flow->tuplehash[!dir].tuple;
294 		daddr = &other_tuple->src_v4;
295 		dst_cache = this_tuple->dst_cache;
296 		n = dst_neigh_lookup(dst_cache, daddr);
297 		if (!n)
298 			return -ENOENT;
299 
300 		read_lock_bh(&n->lock);
301 		nud_state = n->nud_state;
302 		ether_addr_copy(ha, n->ha);
303 		read_unlock_bh(&n->lock);
304 		neigh_release(n);
305 
306 		if (!(nud_state & NUD_VALID))
307 			return -ENOENT;
308 		break;
309 	default:
310 		return -EOPNOTSUPP;
311 	}
312 
313 	mask = ~0xffffffff;
314 	memcpy(&val, ha, 4);
315 	flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 0,
316 			    &val, &mask);
317 
318 	mask = ~0x0000ffff;
319 	memcpy(&val16, ha + 4, 2);
320 	val = val16;
321 	flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
322 			    &val, &mask);
323 
324 	return 0;
325 }
326 
flow_offload_ipv4_snat(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)327 static void flow_offload_ipv4_snat(struct net *net,
328 				   const struct flow_offload *flow,
329 				   enum flow_offload_tuple_dir dir,
330 				   struct nf_flow_rule *flow_rule)
331 {
332 	struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
333 	u32 mask = ~htonl(0xffffffff);
334 	__be32 addr;
335 	u32 offset;
336 
337 	switch (dir) {
338 	case FLOW_OFFLOAD_DIR_ORIGINAL:
339 		addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v4.s_addr;
340 		offset = offsetof(struct iphdr, saddr);
341 		break;
342 	case FLOW_OFFLOAD_DIR_REPLY:
343 		addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v4.s_addr;
344 		offset = offsetof(struct iphdr, daddr);
345 		break;
346 	default:
347 		return;
348 	}
349 
350 	flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
351 			    &addr, &mask);
352 }
353 
flow_offload_ipv4_dnat(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)354 static void flow_offload_ipv4_dnat(struct net *net,
355 				   const struct flow_offload *flow,
356 				   enum flow_offload_tuple_dir dir,
357 				   struct nf_flow_rule *flow_rule)
358 {
359 	struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
360 	u32 mask = ~htonl(0xffffffff);
361 	__be32 addr;
362 	u32 offset;
363 
364 	switch (dir) {
365 	case FLOW_OFFLOAD_DIR_ORIGINAL:
366 		addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v4.s_addr;
367 		offset = offsetof(struct iphdr, daddr);
368 		break;
369 	case FLOW_OFFLOAD_DIR_REPLY:
370 		addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v4.s_addr;
371 		offset = offsetof(struct iphdr, saddr);
372 		break;
373 	default:
374 		return;
375 	}
376 
377 	flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP4, offset,
378 			    &addr, &mask);
379 }
380 
flow_offload_ipv6_mangle(struct nf_flow_rule * flow_rule,unsigned int offset,const __be32 * addr,const __be32 * mask)381 static void flow_offload_ipv6_mangle(struct nf_flow_rule *flow_rule,
382 				     unsigned int offset,
383 				     const __be32 *addr, const __be32 *mask)
384 {
385 	struct flow_action_entry *entry;
386 	int i;
387 
388 	for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++) {
389 		entry = flow_action_entry_next(flow_rule);
390 		flow_offload_mangle(entry, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
391 				    offset + i * sizeof(u32), &addr[i], mask);
392 	}
393 }
394 
flow_offload_ipv6_snat(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)395 static void flow_offload_ipv6_snat(struct net *net,
396 				   const struct flow_offload *flow,
397 				   enum flow_offload_tuple_dir dir,
398 				   struct nf_flow_rule *flow_rule)
399 {
400 	u32 mask = ~htonl(0xffffffff);
401 	const __be32 *addr;
402 	u32 offset;
403 
404 	switch (dir) {
405 	case FLOW_OFFLOAD_DIR_ORIGINAL:
406 		addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6.s6_addr32;
407 		offset = offsetof(struct ipv6hdr, saddr);
408 		break;
409 	case FLOW_OFFLOAD_DIR_REPLY:
410 		addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6.s6_addr32;
411 		offset = offsetof(struct ipv6hdr, daddr);
412 		break;
413 	default:
414 		return;
415 	}
416 
417 	flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
418 }
419 
flow_offload_ipv6_dnat(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)420 static void flow_offload_ipv6_dnat(struct net *net,
421 				   const struct flow_offload *flow,
422 				   enum flow_offload_tuple_dir dir,
423 				   struct nf_flow_rule *flow_rule)
424 {
425 	u32 mask = ~htonl(0xffffffff);
426 	const __be32 *addr;
427 	u32 offset;
428 
429 	switch (dir) {
430 	case FLOW_OFFLOAD_DIR_ORIGINAL:
431 		addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6.s6_addr32;
432 		offset = offsetof(struct ipv6hdr, daddr);
433 		break;
434 	case FLOW_OFFLOAD_DIR_REPLY:
435 		addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6.s6_addr32;
436 		offset = offsetof(struct ipv6hdr, saddr);
437 		break;
438 	default:
439 		return;
440 	}
441 
442 	flow_offload_ipv6_mangle(flow_rule, offset, addr, &mask);
443 }
444 
flow_offload_l4proto(const struct flow_offload * flow)445 static int flow_offload_l4proto(const struct flow_offload *flow)
446 {
447 	u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
448 	u8 type = 0;
449 
450 	switch (protonum) {
451 	case IPPROTO_TCP:
452 		type = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
453 		break;
454 	case IPPROTO_UDP:
455 		type = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
456 		break;
457 	default:
458 		break;
459 	}
460 
461 	return type;
462 }
463 
flow_offload_port_snat(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)464 static void flow_offload_port_snat(struct net *net,
465 				   const struct flow_offload *flow,
466 				   enum flow_offload_tuple_dir dir,
467 				   struct nf_flow_rule *flow_rule)
468 {
469 	struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
470 	u32 mask, port;
471 	u32 offset;
472 
473 	switch (dir) {
474 	case FLOW_OFFLOAD_DIR_ORIGINAL:
475 		port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_port);
476 		offset = 0; /* offsetof(struct tcphdr, source); */
477 		port = htonl(port << 16);
478 		mask = ~htonl(0xffff0000);
479 		break;
480 	case FLOW_OFFLOAD_DIR_REPLY:
481 		port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_port);
482 		offset = 0; /* offsetof(struct tcphdr, dest); */
483 		port = htonl(port);
484 		mask = ~htonl(0xffff);
485 		break;
486 	default:
487 		return;
488 	}
489 
490 	flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
491 			    &port, &mask);
492 }
493 
flow_offload_port_dnat(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)494 static void flow_offload_port_dnat(struct net *net,
495 				   const struct flow_offload *flow,
496 				   enum flow_offload_tuple_dir dir,
497 				   struct nf_flow_rule *flow_rule)
498 {
499 	struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
500 	u32 mask, port;
501 	u32 offset;
502 
503 	switch (dir) {
504 	case FLOW_OFFLOAD_DIR_ORIGINAL:
505 		port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_port);
506 		offset = 0; /* offsetof(struct tcphdr, dest); */
507 		port = htonl(port);
508 		mask = ~htonl(0xffff);
509 		break;
510 	case FLOW_OFFLOAD_DIR_REPLY:
511 		port = ntohs(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_port);
512 		offset = 0; /* offsetof(struct tcphdr, source); */
513 		port = htonl(port << 16);
514 		mask = ~htonl(0xffff0000);
515 		break;
516 	default:
517 		return;
518 	}
519 
520 	flow_offload_mangle(entry, flow_offload_l4proto(flow), offset,
521 			    &port, &mask);
522 }
523 
flow_offload_ipv4_checksum(struct net * net,const struct flow_offload * flow,struct nf_flow_rule * flow_rule)524 static void flow_offload_ipv4_checksum(struct net *net,
525 				       const struct flow_offload *flow,
526 				       struct nf_flow_rule *flow_rule)
527 {
528 	u8 protonum = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.l4proto;
529 	struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
530 
531 	entry->id = FLOW_ACTION_CSUM;
532 	entry->csum_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR;
533 
534 	switch (protonum) {
535 	case IPPROTO_TCP:
536 		entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_TCP;
537 		break;
538 	case IPPROTO_UDP:
539 		entry->csum_flags |= TCA_CSUM_UPDATE_FLAG_UDP;
540 		break;
541 	}
542 }
543 
flow_offload_redirect(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)544 static void flow_offload_redirect(struct net *net,
545 				  const struct flow_offload *flow,
546 				  enum flow_offload_tuple_dir dir,
547 				  struct nf_flow_rule *flow_rule)
548 {
549 	const struct flow_offload_tuple *this_tuple, *other_tuple;
550 	struct flow_action_entry *entry;
551 	struct net_device *dev;
552 	int ifindex;
553 
554 	this_tuple = &flow->tuplehash[dir].tuple;
555 	switch (this_tuple->xmit_type) {
556 	case FLOW_OFFLOAD_XMIT_DIRECT:
557 		this_tuple = &flow->tuplehash[dir].tuple;
558 		ifindex = this_tuple->out.hw_ifidx;
559 		break;
560 	case FLOW_OFFLOAD_XMIT_NEIGH:
561 		other_tuple = &flow->tuplehash[!dir].tuple;
562 		ifindex = other_tuple->iifidx;
563 		break;
564 	default:
565 		return;
566 	}
567 
568 	dev = dev_get_by_index(net, ifindex);
569 	if (!dev)
570 		return;
571 
572 	entry = flow_action_entry_next(flow_rule);
573 	entry->id = FLOW_ACTION_REDIRECT;
574 	entry->dev = dev;
575 }
576 
flow_offload_encap_tunnel(const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)577 static void flow_offload_encap_tunnel(const struct flow_offload *flow,
578 				      enum flow_offload_tuple_dir dir,
579 				      struct nf_flow_rule *flow_rule)
580 {
581 	const struct flow_offload_tuple *this_tuple;
582 	struct flow_action_entry *entry;
583 	struct dst_entry *dst;
584 
585 	this_tuple = &flow->tuplehash[dir].tuple;
586 	if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
587 		return;
588 
589 	dst = this_tuple->dst_cache;
590 	if (dst && dst->lwtstate) {
591 		struct ip_tunnel_info *tun_info;
592 
593 		tun_info = lwt_tun_info(dst->lwtstate);
594 		if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
595 			entry = flow_action_entry_next(flow_rule);
596 			entry->id = FLOW_ACTION_TUNNEL_ENCAP;
597 			entry->tunnel = tun_info;
598 		}
599 	}
600 }
601 
flow_offload_decap_tunnel(const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)602 static void flow_offload_decap_tunnel(const struct flow_offload *flow,
603 				      enum flow_offload_tuple_dir dir,
604 				      struct nf_flow_rule *flow_rule)
605 {
606 	const struct flow_offload_tuple *other_tuple;
607 	struct flow_action_entry *entry;
608 	struct dst_entry *dst;
609 
610 	other_tuple = &flow->tuplehash[!dir].tuple;
611 	if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
612 		return;
613 
614 	dst = other_tuple->dst_cache;
615 	if (dst && dst->lwtstate) {
616 		struct ip_tunnel_info *tun_info;
617 
618 		tun_info = lwt_tun_info(dst->lwtstate);
619 		if (tun_info && (tun_info->mode & IP_TUNNEL_INFO_TX)) {
620 			entry = flow_action_entry_next(flow_rule);
621 			entry->id = FLOW_ACTION_TUNNEL_DECAP;
622 		}
623 	}
624 }
625 
626 static int
nf_flow_rule_route_common(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)627 nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
628 			  enum flow_offload_tuple_dir dir,
629 			  struct nf_flow_rule *flow_rule)
630 {
631 	const struct flow_offload_tuple *other_tuple;
632 	const struct flow_offload_tuple *tuple;
633 	int i;
634 
635 	flow_offload_decap_tunnel(flow, dir, flow_rule);
636 	flow_offload_encap_tunnel(flow, dir, flow_rule);
637 
638 	if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
639 	    flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
640 		return -1;
641 
642 	tuple = &flow->tuplehash[dir].tuple;
643 
644 	for (i = 0; i < tuple->encap_num; i++) {
645 		struct flow_action_entry *entry;
646 
647 		if (tuple->in_vlan_ingress & BIT(i))
648 			continue;
649 
650 		if (tuple->encap[i].proto == htons(ETH_P_8021Q)) {
651 			entry = flow_action_entry_next(flow_rule);
652 			entry->id = FLOW_ACTION_VLAN_POP;
653 		}
654 	}
655 
656 	other_tuple = &flow->tuplehash[!dir].tuple;
657 
658 	for (i = 0; i < other_tuple->encap_num; i++) {
659 		struct flow_action_entry *entry;
660 
661 		if (other_tuple->in_vlan_ingress & BIT(i))
662 			continue;
663 
664 		entry = flow_action_entry_next(flow_rule);
665 
666 		switch (other_tuple->encap[i].proto) {
667 		case htons(ETH_P_PPP_SES):
668 			entry->id = FLOW_ACTION_PPPOE_PUSH;
669 			entry->pppoe.sid = other_tuple->encap[i].id;
670 			break;
671 		case htons(ETH_P_8021Q):
672 			entry->id = FLOW_ACTION_VLAN_PUSH;
673 			entry->vlan.vid = other_tuple->encap[i].id;
674 			entry->vlan.proto = other_tuple->encap[i].proto;
675 			break;
676 		}
677 	}
678 
679 	return 0;
680 }
681 
nf_flow_rule_route_ipv4(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)682 int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
683 			    enum flow_offload_tuple_dir dir,
684 			    struct nf_flow_rule *flow_rule)
685 {
686 	if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
687 		return -1;
688 
689 	if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
690 		flow_offload_ipv4_snat(net, flow, dir, flow_rule);
691 		flow_offload_port_snat(net, flow, dir, flow_rule);
692 	}
693 	if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
694 		flow_offload_ipv4_dnat(net, flow, dir, flow_rule);
695 		flow_offload_port_dnat(net, flow, dir, flow_rule);
696 	}
697 	if (test_bit(NF_FLOW_SNAT, &flow->flags) ||
698 	    test_bit(NF_FLOW_DNAT, &flow->flags))
699 		flow_offload_ipv4_checksum(net, flow, flow_rule);
700 
701 	flow_offload_redirect(net, flow, dir, flow_rule);
702 
703 	return 0;
704 }
705 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv4);
706 
nf_flow_rule_route_ipv6(struct net * net,const struct flow_offload * flow,enum flow_offload_tuple_dir dir,struct nf_flow_rule * flow_rule)707 int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
708 			    enum flow_offload_tuple_dir dir,
709 			    struct nf_flow_rule *flow_rule)
710 {
711 	if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
712 		return -1;
713 
714 	if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
715 		flow_offload_ipv6_snat(net, flow, dir, flow_rule);
716 		flow_offload_port_snat(net, flow, dir, flow_rule);
717 	}
718 	if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
719 		flow_offload_ipv6_dnat(net, flow, dir, flow_rule);
720 		flow_offload_port_dnat(net, flow, dir, flow_rule);
721 	}
722 
723 	flow_offload_redirect(net, flow, dir, flow_rule);
724 
725 	return 0;
726 }
727 EXPORT_SYMBOL_GPL(nf_flow_rule_route_ipv6);
728 
729 #define NF_FLOW_RULE_ACTION_MAX	16
730 
731 static struct nf_flow_rule *
nf_flow_offload_rule_alloc(struct net * net,const struct flow_offload_work * offload,enum flow_offload_tuple_dir dir)732 nf_flow_offload_rule_alloc(struct net *net,
733 			   const struct flow_offload_work *offload,
734 			   enum flow_offload_tuple_dir dir)
735 {
736 	const struct nf_flowtable *flowtable = offload->flowtable;
737 	const struct flow_offload_tuple *tuple, *other_tuple;
738 	const struct flow_offload *flow = offload->flow;
739 	struct dst_entry *other_dst = NULL;
740 	struct nf_flow_rule *flow_rule;
741 	int err = -ENOMEM;
742 
743 	flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
744 	if (!flow_rule)
745 		goto err_flow;
746 
747 	flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX);
748 	if (!flow_rule->rule)
749 		goto err_flow_rule;
750 
751 	flow_rule->rule->match.dissector = &flow_rule->match.dissector;
752 	flow_rule->rule->match.mask = &flow_rule->match.mask;
753 	flow_rule->rule->match.key = &flow_rule->match.key;
754 
755 	tuple = &flow->tuplehash[dir].tuple;
756 	other_tuple = &flow->tuplehash[!dir].tuple;
757 	if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)
758 		other_dst = other_tuple->dst_cache;
759 
760 	err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst);
761 	if (err < 0)
762 		goto err_flow_match;
763 
764 	flow_rule->rule->action.num_entries = 0;
765 	if (flowtable->type->action(net, flow, dir, flow_rule) < 0)
766 		goto err_flow_match;
767 
768 	return flow_rule;
769 
770 err_flow_match:
771 	kfree(flow_rule->rule);
772 err_flow_rule:
773 	kfree(flow_rule);
774 err_flow:
775 	return NULL;
776 }
777 
__nf_flow_offload_destroy(struct nf_flow_rule * flow_rule)778 static void __nf_flow_offload_destroy(struct nf_flow_rule *flow_rule)
779 {
780 	struct flow_action_entry *entry;
781 	int i;
782 
783 	for (i = 0; i < flow_rule->rule->action.num_entries; i++) {
784 		entry = &flow_rule->rule->action.entries[i];
785 		if (entry->id != FLOW_ACTION_REDIRECT)
786 			continue;
787 
788 		dev_put(entry->dev);
789 	}
790 	kfree(flow_rule->rule);
791 	kfree(flow_rule);
792 }
793 
nf_flow_offload_destroy(struct nf_flow_rule * flow_rule[])794 static void nf_flow_offload_destroy(struct nf_flow_rule *flow_rule[])
795 {
796 	int i;
797 
798 	for (i = 0; i < FLOW_OFFLOAD_DIR_MAX; i++)
799 		__nf_flow_offload_destroy(flow_rule[i]);
800 }
801 
nf_flow_offload_alloc(const struct flow_offload_work * offload,struct nf_flow_rule * flow_rule[])802 static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
803 				 struct nf_flow_rule *flow_rule[])
804 {
805 	struct net *net = read_pnet(&offload->flowtable->net);
806 
807 	flow_rule[0] = nf_flow_offload_rule_alloc(net, offload,
808 						  FLOW_OFFLOAD_DIR_ORIGINAL);
809 	if (!flow_rule[0])
810 		return -ENOMEM;
811 
812 	flow_rule[1] = nf_flow_offload_rule_alloc(net, offload,
813 						  FLOW_OFFLOAD_DIR_REPLY);
814 	if (!flow_rule[1]) {
815 		__nf_flow_offload_destroy(flow_rule[0]);
816 		return -ENOMEM;
817 	}
818 
819 	return 0;
820 }
821 
nf_flow_offload_init(struct flow_cls_offload * cls_flow,__be16 proto,int priority,enum flow_cls_command cmd,const struct flow_offload_tuple * tuple,struct netlink_ext_ack * extack)822 static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
823 				 __be16 proto, int priority,
824 				 enum flow_cls_command cmd,
825 				 const struct flow_offload_tuple *tuple,
826 				 struct netlink_ext_ack *extack)
827 {
828 	cls_flow->common.protocol = proto;
829 	cls_flow->common.prio = priority;
830 	cls_flow->common.extack = extack;
831 	cls_flow->command = cmd;
832 	cls_flow->cookie = (unsigned long)tuple;
833 }
834 
nf_flow_offload_tuple(struct nf_flowtable * flowtable,struct flow_offload * flow,struct nf_flow_rule * flow_rule,enum flow_offload_tuple_dir dir,int priority,int cmd,struct flow_stats * stats,struct list_head * block_cb_list)835 static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
836 				 struct flow_offload *flow,
837 				 struct nf_flow_rule *flow_rule,
838 				 enum flow_offload_tuple_dir dir,
839 				 int priority, int cmd,
840 				 struct flow_stats *stats,
841 				 struct list_head *block_cb_list)
842 {
843 	struct flow_cls_offload cls_flow = {};
844 	struct flow_block_cb *block_cb;
845 	struct netlink_ext_ack extack;
846 	__be16 proto = ETH_P_ALL;
847 	int err, i = 0;
848 
849 	nf_flow_offload_init(&cls_flow, proto, priority, cmd,
850 			     &flow->tuplehash[dir].tuple, &extack);
851 	if (cmd == FLOW_CLS_REPLACE)
852 		cls_flow.rule = flow_rule->rule;
853 
854 	down_read(&flowtable->flow_block_lock);
855 	list_for_each_entry(block_cb, block_cb_list, list) {
856 		err = block_cb->cb(TC_SETUP_CLSFLOWER, &cls_flow,
857 				   block_cb->cb_priv);
858 		if (err < 0)
859 			continue;
860 
861 		i++;
862 	}
863 	up_read(&flowtable->flow_block_lock);
864 
865 	if (cmd == FLOW_CLS_STATS)
866 		memcpy(stats, &cls_flow.stats, sizeof(*stats));
867 
868 	return i;
869 }
870 
flow_offload_tuple_add(struct flow_offload_work * offload,struct nf_flow_rule * flow_rule,enum flow_offload_tuple_dir dir)871 static int flow_offload_tuple_add(struct flow_offload_work *offload,
872 				  struct nf_flow_rule *flow_rule,
873 				  enum flow_offload_tuple_dir dir)
874 {
875 	return nf_flow_offload_tuple(offload->flowtable, offload->flow,
876 				     flow_rule, dir,
877 				     offload->flowtable->priority,
878 				     FLOW_CLS_REPLACE, NULL,
879 				     &offload->flowtable->flow_block.cb_list);
880 }
881 
flow_offload_tuple_del(struct flow_offload_work * offload,enum flow_offload_tuple_dir dir)882 static void flow_offload_tuple_del(struct flow_offload_work *offload,
883 				   enum flow_offload_tuple_dir dir)
884 {
885 	nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
886 			      offload->flowtable->priority,
887 			      FLOW_CLS_DESTROY, NULL,
888 			      &offload->flowtable->flow_block.cb_list);
889 }
890 
flow_offload_rule_add(struct flow_offload_work * offload,struct nf_flow_rule * flow_rule[])891 static int flow_offload_rule_add(struct flow_offload_work *offload,
892 				 struct nf_flow_rule *flow_rule[])
893 {
894 	int ok_count = 0;
895 
896 	ok_count += flow_offload_tuple_add(offload, flow_rule[0],
897 					   FLOW_OFFLOAD_DIR_ORIGINAL);
898 	ok_count += flow_offload_tuple_add(offload, flow_rule[1],
899 					   FLOW_OFFLOAD_DIR_REPLY);
900 	if (ok_count == 0)
901 		return -ENOENT;
902 
903 	return 0;
904 }
905 
flow_offload_work_add(struct flow_offload_work * offload)906 static void flow_offload_work_add(struct flow_offload_work *offload)
907 {
908 	struct nf_flow_rule *flow_rule[FLOW_OFFLOAD_DIR_MAX];
909 	int err;
910 
911 	err = nf_flow_offload_alloc(offload, flow_rule);
912 	if (err < 0)
913 		return;
914 
915 	err = flow_offload_rule_add(offload, flow_rule);
916 	if (err < 0)
917 		goto out;
918 
919 	set_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
920 
921 out:
922 	nf_flow_offload_destroy(flow_rule);
923 }
924 
flow_offload_work_del(struct flow_offload_work * offload)925 static void flow_offload_work_del(struct flow_offload_work *offload)
926 {
927 	clear_bit(IPS_HW_OFFLOAD_BIT, &offload->flow->ct->status);
928 	flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_ORIGINAL);
929 	flow_offload_tuple_del(offload, FLOW_OFFLOAD_DIR_REPLY);
930 	set_bit(NF_FLOW_HW_DEAD, &offload->flow->flags);
931 }
932 
flow_offload_tuple_stats(struct flow_offload_work * offload,enum flow_offload_tuple_dir dir,struct flow_stats * stats)933 static void flow_offload_tuple_stats(struct flow_offload_work *offload,
934 				     enum flow_offload_tuple_dir dir,
935 				     struct flow_stats *stats)
936 {
937 	nf_flow_offload_tuple(offload->flowtable, offload->flow, NULL, dir,
938 			      offload->flowtable->priority,
939 			      FLOW_CLS_STATS, stats,
940 			      &offload->flowtable->flow_block.cb_list);
941 }
942 
flow_offload_work_stats(struct flow_offload_work * offload)943 static void flow_offload_work_stats(struct flow_offload_work *offload)
944 {
945 	struct flow_stats stats[FLOW_OFFLOAD_DIR_MAX] = {};
946 	u64 lastused;
947 
948 	flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_ORIGINAL, &stats[0]);
949 	flow_offload_tuple_stats(offload, FLOW_OFFLOAD_DIR_REPLY, &stats[1]);
950 
951 	lastused = max_t(u64, stats[0].lastused, stats[1].lastused);
952 	offload->flow->timeout = max_t(u64, offload->flow->timeout,
953 				       lastused + flow_offload_get_timeout(offload->flow));
954 
955 	if (offload->flowtable->flags & NF_FLOWTABLE_COUNTER) {
956 		if (stats[0].pkts)
957 			nf_ct_acct_add(offload->flow->ct,
958 				       FLOW_OFFLOAD_DIR_ORIGINAL,
959 				       stats[0].pkts, stats[0].bytes);
960 		if (stats[1].pkts)
961 			nf_ct_acct_add(offload->flow->ct,
962 				       FLOW_OFFLOAD_DIR_REPLY,
963 				       stats[1].pkts, stats[1].bytes);
964 	}
965 }
966 
flow_offload_work_handler(struct work_struct * work)967 static void flow_offload_work_handler(struct work_struct *work)
968 {
969 	struct flow_offload_work *offload;
970 	struct net *net;
971 
972 	offload = container_of(work, struct flow_offload_work, work);
973 	net = read_pnet(&offload->flowtable->net);
974 	switch (offload->cmd) {
975 		case FLOW_CLS_REPLACE:
976 			flow_offload_work_add(offload);
977 			NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_add);
978 			break;
979 		case FLOW_CLS_DESTROY:
980 			flow_offload_work_del(offload);
981 			NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_del);
982 			break;
983 		case FLOW_CLS_STATS:
984 			flow_offload_work_stats(offload);
985 			NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count_wq_stats);
986 			break;
987 		default:
988 			WARN_ON_ONCE(1);
989 	}
990 
991 	clear_bit(NF_FLOW_HW_PENDING, &offload->flow->flags);
992 	kfree(offload);
993 }
994 
flow_offload_queue_work(struct flow_offload_work * offload)995 static void flow_offload_queue_work(struct flow_offload_work *offload)
996 {
997 	struct net *net = read_pnet(&offload->flowtable->net);
998 
999 	if (offload->cmd == FLOW_CLS_REPLACE) {
1000 		NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_add);
1001 		queue_work(nf_flow_offload_add_wq, &offload->work);
1002 	} else if (offload->cmd == FLOW_CLS_DESTROY) {
1003 		NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_del);
1004 		queue_work(nf_flow_offload_del_wq, &offload->work);
1005 	} else {
1006 		NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_stats);
1007 		queue_work(nf_flow_offload_stats_wq, &offload->work);
1008 	}
1009 }
1010 
1011 static struct flow_offload_work *
nf_flow_offload_work_alloc(struct nf_flowtable * flowtable,struct flow_offload * flow,unsigned int cmd)1012 nf_flow_offload_work_alloc(struct nf_flowtable *flowtable,
1013 			   struct flow_offload *flow, unsigned int cmd)
1014 {
1015 	struct flow_offload_work *offload;
1016 
1017 	if (test_and_set_bit(NF_FLOW_HW_PENDING, &flow->flags))
1018 		return NULL;
1019 
1020 	offload = kmalloc(sizeof(struct flow_offload_work), GFP_ATOMIC);
1021 	if (!offload) {
1022 		clear_bit(NF_FLOW_HW_PENDING, &flow->flags);
1023 		return NULL;
1024 	}
1025 
1026 	offload->cmd = cmd;
1027 	offload->flow = flow;
1028 	offload->flowtable = flowtable;
1029 	INIT_WORK(&offload->work, flow_offload_work_handler);
1030 
1031 	return offload;
1032 }
1033 
1034 
nf_flow_offload_add(struct nf_flowtable * flowtable,struct flow_offload * flow)1035 void nf_flow_offload_add(struct nf_flowtable *flowtable,
1036 			 struct flow_offload *flow)
1037 {
1038 	struct flow_offload_work *offload;
1039 
1040 	offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_REPLACE);
1041 	if (!offload)
1042 		return;
1043 
1044 	flow_offload_queue_work(offload);
1045 }
1046 
nf_flow_offload_del(struct nf_flowtable * flowtable,struct flow_offload * flow)1047 void nf_flow_offload_del(struct nf_flowtable *flowtable,
1048 			 struct flow_offload *flow)
1049 {
1050 	struct flow_offload_work *offload;
1051 
1052 	offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_DESTROY);
1053 	if (!offload)
1054 		return;
1055 
1056 	set_bit(NF_FLOW_HW_DYING, &flow->flags);
1057 	flow_offload_queue_work(offload);
1058 }
1059 
nf_flow_offload_stats(struct nf_flowtable * flowtable,struct flow_offload * flow)1060 void nf_flow_offload_stats(struct nf_flowtable *flowtable,
1061 			   struct flow_offload *flow)
1062 {
1063 	struct flow_offload_work *offload;
1064 	__s32 delta;
1065 
1066 	delta = nf_flow_timeout_delta(flow->timeout);
1067 	if ((delta >= (9 * flow_offload_get_timeout(flow)) / 10))
1068 		return;
1069 
1070 	offload = nf_flow_offload_work_alloc(flowtable, flow, FLOW_CLS_STATS);
1071 	if (!offload)
1072 		return;
1073 
1074 	flow_offload_queue_work(offload);
1075 }
1076 
nf_flow_table_offload_flush_cleanup(struct nf_flowtable * flowtable)1077 void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable)
1078 {
1079 	if (nf_flowtable_hw_offload(flowtable)) {
1080 		flush_workqueue(nf_flow_offload_del_wq);
1081 		nf_flow_table_gc_run(flowtable);
1082 	}
1083 }
1084 
nf_flow_table_offload_flush(struct nf_flowtable * flowtable)1085 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
1086 {
1087 	if (nf_flowtable_hw_offload(flowtable)) {
1088 		flush_workqueue(nf_flow_offload_add_wq);
1089 		flush_workqueue(nf_flow_offload_del_wq);
1090 		flush_workqueue(nf_flow_offload_stats_wq);
1091 	}
1092 }
1093 
nf_flow_table_block_setup(struct nf_flowtable * flowtable,struct flow_block_offload * bo,enum flow_block_command cmd)1094 static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
1095 				     struct flow_block_offload *bo,
1096 				     enum flow_block_command cmd)
1097 {
1098 	struct flow_block_cb *block_cb, *next;
1099 	int err = 0;
1100 
1101 	down_write(&flowtable->flow_block_lock);
1102 	switch (cmd) {
1103 	case FLOW_BLOCK_BIND:
1104 		list_splice(&bo->cb_list, &flowtable->flow_block.cb_list);
1105 		break;
1106 	case FLOW_BLOCK_UNBIND:
1107 		list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
1108 			list_del(&block_cb->list);
1109 			flow_block_cb_free(block_cb);
1110 		}
1111 		break;
1112 	default:
1113 		WARN_ON_ONCE(1);
1114 		err = -EOPNOTSUPP;
1115 	}
1116 	up_write(&flowtable->flow_block_lock);
1117 
1118 	return err;
1119 }
1120 
nf_flow_table_block_offload_init(struct flow_block_offload * bo,struct net * net,enum flow_block_command cmd,struct nf_flowtable * flowtable,struct netlink_ext_ack * extack)1121 static void nf_flow_table_block_offload_init(struct flow_block_offload *bo,
1122 					     struct net *net,
1123 					     enum flow_block_command cmd,
1124 					     struct nf_flowtable *flowtable,
1125 					     struct netlink_ext_ack *extack)
1126 {
1127 	memset(bo, 0, sizeof(*bo));
1128 	bo->net		= net;
1129 	bo->block	= &flowtable->flow_block;
1130 	bo->command	= cmd;
1131 	bo->binder_type	= FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS;
1132 	bo->extack	= extack;
1133 	bo->cb_list_head = &flowtable->flow_block.cb_list;
1134 	INIT_LIST_HEAD(&bo->cb_list);
1135 }
1136 
nf_flow_table_indr_cleanup(struct flow_block_cb * block_cb)1137 static void nf_flow_table_indr_cleanup(struct flow_block_cb *block_cb)
1138 {
1139 	struct nf_flowtable *flowtable = block_cb->indr.data;
1140 	struct net_device *dev = block_cb->indr.dev;
1141 
1142 	nf_flow_table_gc_cleanup(flowtable, dev);
1143 	down_write(&flowtable->flow_block_lock);
1144 	list_del(&block_cb->list);
1145 	list_del(&block_cb->driver_list);
1146 	flow_block_cb_free(block_cb);
1147 	up_write(&flowtable->flow_block_lock);
1148 }
1149 
nf_flow_table_indr_offload_cmd(struct flow_block_offload * bo,struct nf_flowtable * flowtable,struct net_device * dev,enum flow_block_command cmd,struct netlink_ext_ack * extack)1150 static int nf_flow_table_indr_offload_cmd(struct flow_block_offload *bo,
1151 					  struct nf_flowtable *flowtable,
1152 					  struct net_device *dev,
1153 					  enum flow_block_command cmd,
1154 					  struct netlink_ext_ack *extack)
1155 {
1156 	nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
1157 					 extack);
1158 
1159 	return flow_indr_dev_setup_offload(dev, NULL, TC_SETUP_FT, flowtable, bo,
1160 					   nf_flow_table_indr_cleanup);
1161 }
1162 
nf_flow_table_offload_cmd(struct flow_block_offload * bo,struct nf_flowtable * flowtable,struct net_device * dev,enum flow_block_command cmd,struct netlink_ext_ack * extack)1163 static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
1164 				     struct nf_flowtable *flowtable,
1165 				     struct net_device *dev,
1166 				     enum flow_block_command cmd,
1167 				     struct netlink_ext_ack *extack)
1168 {
1169 	int err;
1170 
1171 	nf_flow_table_block_offload_init(bo, dev_net(dev), cmd, flowtable,
1172 					 extack);
1173 	down_write(&flowtable->flow_block_lock);
1174 	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_FT, bo);
1175 	up_write(&flowtable->flow_block_lock);
1176 	if (err < 0)
1177 		return err;
1178 
1179 	return 0;
1180 }
1181 
nf_flow_table_offload_setup(struct nf_flowtable * flowtable,struct net_device * dev,enum flow_block_command cmd)1182 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
1183 				struct net_device *dev,
1184 				enum flow_block_command cmd)
1185 {
1186 	struct netlink_ext_ack extack = {};
1187 	struct flow_block_offload bo;
1188 	int err;
1189 
1190 	if (!nf_flowtable_hw_offload(flowtable))
1191 		return 0;
1192 
1193 	if (dev->netdev_ops->ndo_setup_tc)
1194 		err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd,
1195 						&extack);
1196 	else
1197 		err = nf_flow_table_indr_offload_cmd(&bo, flowtable, dev, cmd,
1198 						     &extack);
1199 	if (err < 0)
1200 		return err;
1201 
1202 	return nf_flow_table_block_setup(flowtable, &bo, cmd);
1203 }
1204 EXPORT_SYMBOL_GPL(nf_flow_table_offload_setup);
1205 
nf_flow_table_offload_init(void)1206 int nf_flow_table_offload_init(void)
1207 {
1208 	nf_flow_offload_add_wq  = alloc_workqueue("nf_ft_offload_add",
1209 						  WQ_UNBOUND | WQ_SYSFS, 0);
1210 	if (!nf_flow_offload_add_wq)
1211 		return -ENOMEM;
1212 
1213 	nf_flow_offload_del_wq  = alloc_workqueue("nf_ft_offload_del",
1214 						  WQ_UNBOUND | WQ_SYSFS, 0);
1215 	if (!nf_flow_offload_del_wq)
1216 		goto err_del_wq;
1217 
1218 	nf_flow_offload_stats_wq  = alloc_workqueue("nf_ft_offload_stats",
1219 						    WQ_UNBOUND | WQ_SYSFS, 0);
1220 	if (!nf_flow_offload_stats_wq)
1221 		goto err_stats_wq;
1222 
1223 	return 0;
1224 
1225 err_stats_wq:
1226 	destroy_workqueue(nf_flow_offload_del_wq);
1227 err_del_wq:
1228 	destroy_workqueue(nf_flow_offload_add_wq);
1229 	return -ENOMEM;
1230 }
1231 
nf_flow_table_offload_exit(void)1232 void nf_flow_table_offload_exit(void)
1233 {
1234 	destroy_workqueue(nf_flow_offload_add_wq);
1235 	destroy_workqueue(nf_flow_offload_del_wq);
1236 	destroy_workqueue(nf_flow_offload_stats_wq);
1237 }
1238