1 /*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
4 * Author:
5 * Jay Schulist <jschlst@samba.org>
6 *
7 * Based on the design of:
8 * - The Berkeley Packet Filter
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 * Andi Kleen - Fix a few bad bugs and races.
16 * Kris Katterjohn - Added many additional checks in sk_chk_filter()
17 */
18
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/mm.h>
22 #include <linux/fcntl.h>
23 #include <linux/socket.h>
24 #include <linux/in.h>
25 #include <linux/inet.h>
26 #include <linux/netdevice.h>
27 #include <linux/if_packet.h>
28 #include <linux/gfp.h>
29 #include <net/ip.h>
30 #include <net/protocol.h>
31 #include <net/netlink.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <linux/errno.h>
35 #include <linux/timer.h>
36 #include <asm/uaccess.h>
37 #include <asm/unaligned.h>
38 #include <linux/filter.h>
39 #include <linux/reciprocal_div.h>
40 #include <linux/ratelimit.h>
41
42 /* No hurry in this branch
43 *
44 * Exported for the bpf jit load helper.
45 */
bpf_internal_load_pointer_neg_helper(const struct sk_buff * skb,int k,unsigned int size)46 void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
47 {
48 u8 *ptr = NULL;
49
50 if (k >= SKF_NET_OFF)
51 ptr = skb_network_header(skb) + k - SKF_NET_OFF;
52 else if (k >= SKF_LL_OFF)
53 ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
54
55 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
56 return ptr;
57 return NULL;
58 }
59
load_pointer(const struct sk_buff * skb,int k,unsigned int size,void * buffer)60 static inline void *load_pointer(const struct sk_buff *skb, int k,
61 unsigned int size, void *buffer)
62 {
63 if (k >= 0)
64 return skb_header_pointer(skb, k, size, buffer);
65 return bpf_internal_load_pointer_neg_helper(skb, k, size);
66 }
67
68 /**
69 * sk_filter - run a packet through a socket filter
70 * @sk: sock associated with &sk_buff
71 * @skb: buffer to filter
72 *
73 * Run the filter code and then cut skb->data to correct size returned by
74 * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
75 * than pkt_len we keep whole skb->data. This is the socket level
76 * wrapper to sk_run_filter. It returns 0 if the packet should
77 * be accepted or -EPERM if the packet should be tossed.
78 *
79 */
sk_filter(struct sock * sk,struct sk_buff * skb)80 int sk_filter(struct sock *sk, struct sk_buff *skb)
81 {
82 int err;
83 struct sk_filter *filter;
84
85 err = security_sock_rcv_skb(sk, skb);
86 if (err)
87 return err;
88
89 rcu_read_lock();
90 filter = rcu_dereference(sk->sk_filter);
91 if (filter) {
92 unsigned int pkt_len = SK_RUN_FILTER(filter, skb);
93
94 err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
95 }
96 rcu_read_unlock();
97
98 return err;
99 }
100 EXPORT_SYMBOL(sk_filter);
101
102 /**
103 * sk_run_filter - run a filter on a socket
104 * @skb: buffer to run the filter on
105 * @fentry: filter to apply
106 *
107 * Decode and apply filter instructions to the skb->data.
108 * Return length to keep, 0 for none. @skb is the data we are
109 * filtering, @filter is the array of filter instructions.
110 * Because all jumps are guaranteed to be before last instruction,
111 * and last instruction guaranteed to be a RET, we dont need to check
112 * flen. (We used to pass to this function the length of filter)
113 */
sk_run_filter(const struct sk_buff * skb,const struct sock_filter * fentry)114 unsigned int sk_run_filter(const struct sk_buff *skb,
115 const struct sock_filter *fentry)
116 {
117 void *ptr;
118 u32 A = 0; /* Accumulator */
119 u32 X = 0; /* Index Register */
120 u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
121 u32 tmp;
122 int k;
123
124 /*
125 * Process array of filter instructions.
126 */
127 for (;; fentry++) {
128 #if defined(CONFIG_X86_32)
129 #define K (fentry->k)
130 #else
131 const u32 K = fentry->k;
132 #endif
133
134 switch (fentry->code) {
135 case BPF_S_ALU_ADD_X:
136 A += X;
137 continue;
138 case BPF_S_ALU_ADD_K:
139 A += K;
140 continue;
141 case BPF_S_ALU_SUB_X:
142 A -= X;
143 continue;
144 case BPF_S_ALU_SUB_K:
145 A -= K;
146 continue;
147 case BPF_S_ALU_MUL_X:
148 A *= X;
149 continue;
150 case BPF_S_ALU_MUL_K:
151 A *= K;
152 continue;
153 case BPF_S_ALU_DIV_X:
154 if (X == 0)
155 return 0;
156 A /= X;
157 continue;
158 case BPF_S_ALU_DIV_K:
159 A = reciprocal_divide(A, K);
160 continue;
161 case BPF_S_ALU_AND_X:
162 A &= X;
163 continue;
164 case BPF_S_ALU_AND_K:
165 A &= K;
166 continue;
167 case BPF_S_ALU_OR_X:
168 A |= X;
169 continue;
170 case BPF_S_ALU_OR_K:
171 A |= K;
172 continue;
173 case BPF_S_ALU_LSH_X:
174 A <<= X;
175 continue;
176 case BPF_S_ALU_LSH_K:
177 A <<= K;
178 continue;
179 case BPF_S_ALU_RSH_X:
180 A >>= X;
181 continue;
182 case BPF_S_ALU_RSH_K:
183 A >>= K;
184 continue;
185 case BPF_S_ALU_NEG:
186 A = -A;
187 continue;
188 case BPF_S_JMP_JA:
189 fentry += K;
190 continue;
191 case BPF_S_JMP_JGT_K:
192 fentry += (A > K) ? fentry->jt : fentry->jf;
193 continue;
194 case BPF_S_JMP_JGE_K:
195 fentry += (A >= K) ? fentry->jt : fentry->jf;
196 continue;
197 case BPF_S_JMP_JEQ_K:
198 fentry += (A == K) ? fentry->jt : fentry->jf;
199 continue;
200 case BPF_S_JMP_JSET_K:
201 fentry += (A & K) ? fentry->jt : fentry->jf;
202 continue;
203 case BPF_S_JMP_JGT_X:
204 fentry += (A > X) ? fentry->jt : fentry->jf;
205 continue;
206 case BPF_S_JMP_JGE_X:
207 fentry += (A >= X) ? fentry->jt : fentry->jf;
208 continue;
209 case BPF_S_JMP_JEQ_X:
210 fentry += (A == X) ? fentry->jt : fentry->jf;
211 continue;
212 case BPF_S_JMP_JSET_X:
213 fentry += (A & X) ? fentry->jt : fentry->jf;
214 continue;
215 case BPF_S_LD_W_ABS:
216 k = K;
217 load_w:
218 ptr = load_pointer(skb, k, 4, &tmp);
219 if (ptr != NULL) {
220 A = get_unaligned_be32(ptr);
221 continue;
222 }
223 return 0;
224 case BPF_S_LD_H_ABS:
225 k = K;
226 load_h:
227 ptr = load_pointer(skb, k, 2, &tmp);
228 if (ptr != NULL) {
229 A = get_unaligned_be16(ptr);
230 continue;
231 }
232 return 0;
233 case BPF_S_LD_B_ABS:
234 k = K;
235 load_b:
236 ptr = load_pointer(skb, k, 1, &tmp);
237 if (ptr != NULL) {
238 A = *(u8 *)ptr;
239 continue;
240 }
241 return 0;
242 case BPF_S_LD_W_LEN:
243 A = skb->len;
244 continue;
245 case BPF_S_LDX_W_LEN:
246 X = skb->len;
247 continue;
248 case BPF_S_LD_W_IND:
249 k = X + K;
250 goto load_w;
251 case BPF_S_LD_H_IND:
252 k = X + K;
253 goto load_h;
254 case BPF_S_LD_B_IND:
255 k = X + K;
256 goto load_b;
257 case BPF_S_LDX_B_MSH:
258 ptr = load_pointer(skb, K, 1, &tmp);
259 if (ptr != NULL) {
260 X = (*(u8 *)ptr & 0xf) << 2;
261 continue;
262 }
263 return 0;
264 case BPF_S_LD_IMM:
265 A = K;
266 continue;
267 case BPF_S_LDX_IMM:
268 X = K;
269 continue;
270 case BPF_S_LD_MEM:
271 A = mem[K];
272 continue;
273 case BPF_S_LDX_MEM:
274 X = mem[K];
275 continue;
276 case BPF_S_MISC_TAX:
277 X = A;
278 continue;
279 case BPF_S_MISC_TXA:
280 A = X;
281 continue;
282 case BPF_S_RET_K:
283 return K;
284 case BPF_S_RET_A:
285 return A;
286 case BPF_S_ST:
287 mem[K] = A;
288 continue;
289 case BPF_S_STX:
290 mem[K] = X;
291 continue;
292 case BPF_S_ANC_PROTOCOL:
293 A = ntohs(skb->protocol);
294 continue;
295 case BPF_S_ANC_PKTTYPE:
296 A = skb->pkt_type;
297 continue;
298 case BPF_S_ANC_IFINDEX:
299 if (!skb->dev)
300 return 0;
301 A = skb->dev->ifindex;
302 continue;
303 case BPF_S_ANC_MARK:
304 A = skb->mark;
305 continue;
306 case BPF_S_ANC_QUEUE:
307 A = skb->queue_mapping;
308 continue;
309 case BPF_S_ANC_HATYPE:
310 if (!skb->dev)
311 return 0;
312 A = skb->dev->type;
313 continue;
314 case BPF_S_ANC_RXHASH:
315 A = skb->rxhash;
316 continue;
317 case BPF_S_ANC_CPU:
318 A = raw_smp_processor_id();
319 continue;
320 case BPF_S_ANC_NLATTR: {
321 struct nlattr *nla;
322
323 if (skb_is_nonlinear(skb))
324 return 0;
325 if (skb->len < sizeof(struct nlattr))
326 return 0;
327 if (A > skb->len - sizeof(struct nlattr))
328 return 0;
329
330 nla = nla_find((struct nlattr *)&skb->data[A],
331 skb->len - A, X);
332 if (nla)
333 A = (void *)nla - (void *)skb->data;
334 else
335 A = 0;
336 continue;
337 }
338 case BPF_S_ANC_NLATTR_NEST: {
339 struct nlattr *nla;
340
341 if (skb_is_nonlinear(skb))
342 return 0;
343 if (skb->len < sizeof(struct nlattr))
344 return 0;
345 if (A > skb->len - sizeof(struct nlattr))
346 return 0;
347
348 nla = (struct nlattr *)&skb->data[A];
349 if (nla->nla_len > skb->len - A)
350 return 0;
351
352 nla = nla_find_nested(nla, X);
353 if (nla)
354 A = (void *)nla - (void *)skb->data;
355 else
356 A = 0;
357 continue;
358 }
359 default:
360 WARN_RATELIMIT(1, "Unknown code:%u jt:%u tf:%u k:%u\n",
361 fentry->code, fentry->jt,
362 fentry->jf, fentry->k);
363 return 0;
364 }
365 }
366
367 return 0;
368 }
369 EXPORT_SYMBOL(sk_run_filter);
370
371 /*
372 * Security :
373 * A BPF program is able to use 16 cells of memory to store intermediate
374 * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
375 * As we dont want to clear mem[] array for each packet going through
376 * sk_run_filter(), we check that filter loaded by user never try to read
377 * a cell if not previously written, and we check all branches to be sure
378 * a malicious user doesn't try to abuse us.
379 */
check_load_and_stores(struct sock_filter * filter,int flen)380 static int check_load_and_stores(struct sock_filter *filter, int flen)
381 {
382 u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
383 int pc, ret = 0;
384
385 BUILD_BUG_ON(BPF_MEMWORDS > 16);
386 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
387 if (!masks)
388 return -ENOMEM;
389 memset(masks, 0xff, flen * sizeof(*masks));
390
391 for (pc = 0; pc < flen; pc++) {
392 memvalid &= masks[pc];
393
394 switch (filter[pc].code) {
395 case BPF_S_ST:
396 case BPF_S_STX:
397 memvalid |= (1 << filter[pc].k);
398 break;
399 case BPF_S_LD_MEM:
400 case BPF_S_LDX_MEM:
401 if (!(memvalid & (1 << filter[pc].k))) {
402 ret = -EINVAL;
403 goto error;
404 }
405 break;
406 case BPF_S_JMP_JA:
407 /* a jump must set masks on target */
408 masks[pc + 1 + filter[pc].k] &= memvalid;
409 memvalid = ~0;
410 break;
411 case BPF_S_JMP_JEQ_K:
412 case BPF_S_JMP_JEQ_X:
413 case BPF_S_JMP_JGE_K:
414 case BPF_S_JMP_JGE_X:
415 case BPF_S_JMP_JGT_K:
416 case BPF_S_JMP_JGT_X:
417 case BPF_S_JMP_JSET_X:
418 case BPF_S_JMP_JSET_K:
419 /* a jump must set masks on targets */
420 masks[pc + 1 + filter[pc].jt] &= memvalid;
421 masks[pc + 1 + filter[pc].jf] &= memvalid;
422 memvalid = ~0;
423 break;
424 }
425 }
426 error:
427 kfree(masks);
428 return ret;
429 }
430
431 /**
432 * sk_chk_filter - verify socket filter code
433 * @filter: filter to verify
434 * @flen: length of filter
435 *
436 * Check the user's filter code. If we let some ugly
437 * filter code slip through kaboom! The filter must contain
438 * no references or jumps that are out of range, no illegal
439 * instructions, and must end with a RET instruction.
440 *
441 * All jumps are forward as they are not signed.
442 *
443 * Returns 0 if the rule set is legal or -EINVAL if not.
444 */
sk_chk_filter(struct sock_filter * filter,unsigned int flen)445 int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
446 {
447 /*
448 * Valid instructions are initialized to non-0.
449 * Invalid instructions are initialized to 0.
450 */
451 static const u8 codes[] = {
452 [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K,
453 [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X,
454 [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K,
455 [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X,
456 [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K,
457 [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X,
458 [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X,
459 [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K,
460 [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X,
461 [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K,
462 [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X,
463 [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K,
464 [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X,
465 [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K,
466 [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X,
467 [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG,
468 [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS,
469 [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS,
470 [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS,
471 [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN,
472 [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND,
473 [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND,
474 [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND,
475 [BPF_LD|BPF_IMM] = BPF_S_LD_IMM,
476 [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN,
477 [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH,
478 [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM,
479 [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX,
480 [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA,
481 [BPF_RET|BPF_K] = BPF_S_RET_K,
482 [BPF_RET|BPF_A] = BPF_S_RET_A,
483 [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K,
484 [BPF_LD|BPF_MEM] = BPF_S_LD_MEM,
485 [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM,
486 [BPF_ST] = BPF_S_ST,
487 [BPF_STX] = BPF_S_STX,
488 [BPF_JMP|BPF_JA] = BPF_S_JMP_JA,
489 [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K,
490 [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X,
491 [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K,
492 [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X,
493 [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K,
494 [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X,
495 [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
496 [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
497 };
498 int pc;
499
500 if (flen == 0 || flen > BPF_MAXINSNS)
501 return -EINVAL;
502
503 /* check the filter code now */
504 for (pc = 0; pc < flen; pc++) {
505 struct sock_filter *ftest = &filter[pc];
506 u16 code = ftest->code;
507
508 if (code >= ARRAY_SIZE(codes))
509 return -EINVAL;
510 code = codes[code];
511 if (!code)
512 return -EINVAL;
513 /* Some instructions need special checks */
514 switch (code) {
515 case BPF_S_ALU_DIV_K:
516 /* check for division by zero */
517 if (ftest->k == 0)
518 return -EINVAL;
519 ftest->k = reciprocal_value(ftest->k);
520 break;
521 case BPF_S_LD_MEM:
522 case BPF_S_LDX_MEM:
523 case BPF_S_ST:
524 case BPF_S_STX:
525 /* check for invalid memory addresses */
526 if (ftest->k >= BPF_MEMWORDS)
527 return -EINVAL;
528 break;
529 case BPF_S_JMP_JA:
530 /*
531 * Note, the large ftest->k might cause loops.
532 * Compare this with conditional jumps below,
533 * where offsets are limited. --ANK (981016)
534 */
535 if (ftest->k >= (unsigned)(flen-pc-1))
536 return -EINVAL;
537 break;
538 case BPF_S_JMP_JEQ_K:
539 case BPF_S_JMP_JEQ_X:
540 case BPF_S_JMP_JGE_K:
541 case BPF_S_JMP_JGE_X:
542 case BPF_S_JMP_JGT_K:
543 case BPF_S_JMP_JGT_X:
544 case BPF_S_JMP_JSET_X:
545 case BPF_S_JMP_JSET_K:
546 /* for conditionals both must be safe */
547 if (pc + ftest->jt + 1 >= flen ||
548 pc + ftest->jf + 1 >= flen)
549 return -EINVAL;
550 break;
551 case BPF_S_LD_W_ABS:
552 case BPF_S_LD_H_ABS:
553 case BPF_S_LD_B_ABS:
554 #define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \
555 code = BPF_S_ANC_##CODE; \
556 break
557 switch (ftest->k) {
558 ANCILLARY(PROTOCOL);
559 ANCILLARY(PKTTYPE);
560 ANCILLARY(IFINDEX);
561 ANCILLARY(NLATTR);
562 ANCILLARY(NLATTR_NEST);
563 ANCILLARY(MARK);
564 ANCILLARY(QUEUE);
565 ANCILLARY(HATYPE);
566 ANCILLARY(RXHASH);
567 ANCILLARY(CPU);
568 }
569 }
570 ftest->code = code;
571 }
572
573 /* last instruction must be a RET code */
574 switch (filter[flen - 1].code) {
575 case BPF_S_RET_K:
576 case BPF_S_RET_A:
577 return check_load_and_stores(filter, flen);
578 }
579 return -EINVAL;
580 }
581 EXPORT_SYMBOL(sk_chk_filter);
582
583 /**
584 * sk_filter_release_rcu - Release a socket filter by rcu_head
585 * @rcu: rcu_head that contains the sk_filter to free
586 */
sk_filter_release_rcu(struct rcu_head * rcu)587 void sk_filter_release_rcu(struct rcu_head *rcu)
588 {
589 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
590
591 bpf_jit_free(fp);
592 kfree(fp);
593 }
594 EXPORT_SYMBOL(sk_filter_release_rcu);
595
596 /**
597 * sk_attach_filter - attach a socket filter
598 * @fprog: the filter program
599 * @sk: the socket to use
600 *
601 * Attach the user's filter code. We first run some sanity checks on
602 * it to make sure it does not explode on us later. If an error
603 * occurs or there is insufficient memory for the filter a negative
604 * errno code is returned. On success the return is zero.
605 */
sk_attach_filter(struct sock_fprog * fprog,struct sock * sk)606 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
607 {
608 struct sk_filter *fp, *old_fp;
609 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
610 int err;
611
612 /* Make sure new filter is there and in the right amounts. */
613 if (fprog->filter == NULL)
614 return -EINVAL;
615
616 fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
617 if (!fp)
618 return -ENOMEM;
619 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
620 sock_kfree_s(sk, fp, fsize+sizeof(*fp));
621 return -EFAULT;
622 }
623
624 atomic_set(&fp->refcnt, 1);
625 fp->len = fprog->len;
626 fp->bpf_func = sk_run_filter;
627
628 err = sk_chk_filter(fp->insns, fp->len);
629 if (err) {
630 sk_filter_uncharge(sk, fp);
631 return err;
632 }
633
634 bpf_jit_compile(fp);
635
636 old_fp = rcu_dereference_protected(sk->sk_filter,
637 sock_owned_by_user(sk));
638 rcu_assign_pointer(sk->sk_filter, fp);
639
640 if (old_fp)
641 sk_filter_uncharge(sk, old_fp);
642 return 0;
643 }
644 EXPORT_SYMBOL_GPL(sk_attach_filter);
645
sk_detach_filter(struct sock * sk)646 int sk_detach_filter(struct sock *sk)
647 {
648 int ret = -ENOENT;
649 struct sk_filter *filter;
650
651 filter = rcu_dereference_protected(sk->sk_filter,
652 sock_owned_by_user(sk));
653 if (filter) {
654 RCU_INIT_POINTER(sk->sk_filter, NULL);
655 sk_filter_uncharge(sk, filter);
656 ret = 0;
657 }
658 return ret;
659 }
660 EXPORT_SYMBOL_GPL(sk_detach_filter);
661