1 /*
2 * NETLINK Kernel-user communication protocol.
3 *
4 * Authors: Alan Cox <alan@redhat.com>
5 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith
13 * added netlink_proto_exit
14 *
15 */
16
17 #include <linux/config.h>
18 #include <linux/module.h>
19
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/major.h>
23 #include <linux/signal.h>
24 #include <linux/sched.h>
25 #include <linux/errno.h>
26 #include <linux/string.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/un.h>
30 #include <linux/fcntl.h>
31 #include <linux/termios.h>
32 #include <linux/sockios.h>
33 #include <linux/net.h>
34 #include <linux/fs.h>
35 #include <linux/slab.h>
36 #include <asm/uaccess.h>
37 #include <linux/skbuff.h>
38 #include <linux/netdevice.h>
39 #include <linux/rtnetlink.h>
40 #include <linux/proc_fs.h>
41 #include <linux/smp_lock.h>
42 #include <linux/notifier.h>
43 #include <linux/jhash.h>
44 #include <linux/random.h>
45 #include <linux/bitops.h>
46 #include <linux/mm.h>
47 #include <linux/types.h>
48 #include <net/sock.h>
49 #include <net/scm.h>
50
51 #define Nprintk(a...)
52
53 #if defined(CONFIG_NETLINK_DEV) || defined(CONFIG_NETLINK_DEV_MODULE)
54 #define NL_EMULATE_DEV
55 #endif
56
57 struct netlink_opt
58 {
59 u32 pid;
60 unsigned int groups;
61 u32 dst_pid;
62 unsigned int dst_groups;
63 unsigned long state;
64 int (*handler)(int unit, struct sk_buff *skb);
65 wait_queue_head_t wait;
66 struct netlink_callback *cb;
67 spinlock_t cb_lock;
68 void (*data_ready)(struct sock *sk, int bytes);
69 };
70
71 struct nl_pid_hash {
72 struct sock **table;
73 unsigned long rehash_time;
74
75 unsigned int mask;
76 unsigned int shift;
77
78 unsigned int entries;
79 unsigned int max_shift;
80
81 u32 rnd;
82 };
83
84 struct netlink_table {
85 struct nl_pid_hash hash;
86 struct sock *mc_list;
87 };
88
89 #define nlk_sk(__sk) ((__sk)->protinfo.af_netlink)
90
91 static struct netlink_table *nl_table;
92
93 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
94 static unsigned int nl_nonroot[MAX_LINKS];
95
96 #ifdef NL_EMULATE_DEV
97 static struct socket *netlink_kernel[MAX_LINKS];
98 #endif
99
100 static int netlink_dump(struct sock *sk);
101 static void netlink_destroy_callback(struct netlink_callback *cb);
102
103 atomic_t netlink_sock_nr;
104
105 static rwlock_t nl_table_lock = RW_LOCK_UNLOCKED;
106 static atomic_t nl_table_users = ATOMIC_INIT(0);
107
108 static struct notifier_block *netlink_chain;
109
nl_pid_hashfn(struct nl_pid_hash * hash,u32 pid)110 static struct sock **nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
111 {
112 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
113 }
114
netlink_sock_destruct(struct sock * sk)115 static void netlink_sock_destruct(struct sock *sk)
116 {
117 skb_queue_purge(&sk->receive_queue);
118
119 if (!sk->dead) {
120 printk("Freeing alive netlink socket %p\n", sk);
121 return;
122 }
123 BUG_TRAP(atomic_read(&sk->rmem_alloc)==0);
124 BUG_TRAP(atomic_read(&sk->wmem_alloc)==0);
125 BUG_TRAP(sk->protinfo.af_netlink->cb==NULL);
126
127 kfree(sk->protinfo.af_netlink);
128
129 atomic_dec(&netlink_sock_nr);
130 #ifdef NETLINK_REFCNT_DEBUG
131 printk(KERN_DEBUG "NETLINK %p released, %d are still alive\n", sk, atomic_read(&netlink_sock_nr));
132 #endif
133 }
134
135 /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP.
136 * Look, when several writers sleep and reader wakes them up, all but one
137 * immediately hit write lock and grab all the cpus. Exclusive sleep solves
138 * this, _but_ remember, it adds useless work on UP machines.
139 */
140
netlink_table_grab(void)141 static void netlink_table_grab(void)
142 {
143 write_lock_bh(&nl_table_lock);
144
145 if (atomic_read(&nl_table_users)) {
146 DECLARE_WAITQUEUE(wait, current);
147
148 add_wait_queue_exclusive(&nl_table_wait, &wait);
149 for(;;) {
150 set_current_state(TASK_UNINTERRUPTIBLE);
151 if (atomic_read(&nl_table_users) == 0)
152 break;
153 write_unlock_bh(&nl_table_lock);
154 schedule();
155 write_lock_bh(&nl_table_lock);
156 }
157
158 __set_current_state(TASK_RUNNING);
159 remove_wait_queue(&nl_table_wait, &wait);
160 }
161 }
162
netlink_table_ungrab(void)163 static __inline__ void netlink_table_ungrab(void)
164 {
165 write_unlock_bh(&nl_table_lock);
166 wake_up(&nl_table_wait);
167 }
168
169 static __inline__ void
netlink_lock_table(void)170 netlink_lock_table(void)
171 {
172 /* read_lock() synchronizes us to netlink_table_grab */
173
174 read_lock(&nl_table_lock);
175 atomic_inc(&nl_table_users);
176 read_unlock(&nl_table_lock);
177 }
178
179 static __inline__ void
netlink_unlock_table(void)180 netlink_unlock_table(void)
181 {
182 if (atomic_dec_and_test(&nl_table_users))
183 wake_up(&nl_table_wait);
184 }
185
netlink_lookup(int protocol,u32 pid)186 static __inline__ struct sock *netlink_lookup(int protocol, u32 pid)
187 {
188 struct nl_pid_hash *hash = &nl_table[protocol].hash;
189 struct sock *sk;
190
191 read_lock(&nl_table_lock);
192 for (sk = *nl_pid_hashfn(hash, pid); sk; sk = sk->next) {
193 if (sk->protinfo.af_netlink->pid == pid) {
194 sock_hold(sk);
195 read_unlock(&nl_table_lock);
196 return sk;
197 }
198 }
199
200 read_unlock(&nl_table_lock);
201 return NULL;
202 }
203
nl_pid_hash_alloc(size_t size)204 static inline struct sock **nl_pid_hash_alloc(size_t size)
205 {
206 if (size <= PAGE_SIZE)
207 return kmalloc(size, GFP_ATOMIC);
208 else
209 return (struct sock **)
210 __get_free_pages(GFP_ATOMIC, get_order(size));
211 }
212
nl_pid_hash_free(struct sock ** table,size_t size)213 static inline void nl_pid_hash_free(struct sock **table, size_t size)
214 {
215 if (size <= PAGE_SIZE)
216 kfree(table);
217 else
218 free_pages((unsigned long)table, get_order(size));
219 }
220
nl_pid_hash_rehash(struct nl_pid_hash * hash,int grow)221 static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
222 {
223 unsigned int omask, mask, shift;
224 size_t osize, size;
225 struct sock **otable, **table;
226 int i;
227
228 omask = mask = hash->mask;
229 osize = size = (mask + 1) * sizeof(*table);
230 shift = hash->shift;
231
232 if (grow) {
233 if (++shift > hash->max_shift)
234 return 0;
235 mask = mask * 2 + 1;
236 size *= 2;
237 }
238
239 table = nl_pid_hash_alloc(size);
240 if (!table)
241 return 0;
242
243 memset(table, 0, size);
244 otable = hash->table;
245 hash->table = table;
246 hash->mask = mask;
247 hash->shift = shift;
248 get_random_bytes(&hash->rnd, sizeof(hash->rnd));
249
250 for (i = 0; i <= omask; i++) {
251 struct sock *sk;
252 struct sock *tmp, **head;
253
254 for (sk = otable[i]; sk; sk = tmp) {
255 tmp = sk->next;
256 head = nl_pid_hashfn(hash, nlk_sk(sk)->pid);
257 sk->next = *head;
258 *head = sk;
259 }
260 }
261
262 nl_pid_hash_free(otable, osize);
263 hash->rehash_time = jiffies + 10 * 60 * HZ;
264 return 1;
265 }
266
nl_pid_hash_dilute(struct nl_pid_hash * hash,int len)267 static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
268 {
269 int avg = hash->entries >> hash->shift;
270
271 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
272 return 1;
273
274 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
275 nl_pid_hash_rehash(hash, 0);
276 return 1;
277 }
278
279 return 0;
280 }
281
282 extern struct proto_ops netlink_ops;
283
netlink_insert(struct sock * sk,u32 pid)284 static int netlink_insert(struct sock *sk, u32 pid)
285 {
286 struct nl_pid_hash *hash = &nl_table[sk->protocol].hash;
287 struct sock **head;
288 int err = -EADDRINUSE;
289 struct sock *osk;
290 int len;
291
292 netlink_table_grab();
293 head = nl_pid_hashfn(hash, pid);
294 len = 0;
295 for (osk = *head; osk; osk = osk->next) {
296 if (osk->protinfo.af_netlink->pid == pid)
297 break;
298 len++;
299 }
300 if (osk)
301 goto err;
302
303 err = -EBUSY;
304 if (nlk_sk(sk)->pid)
305 goto err;
306
307 err = -ENOMEM;
308 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
309 goto err;
310
311 if (len && nl_pid_hash_dilute(hash, len))
312 head = nl_pid_hashfn(hash, pid);
313 hash->entries++;
314 nlk_sk(sk)->pid = pid;
315 sk->next = *head;
316 *head = sk;
317 sock_hold(sk);
318 err = 0;
319
320 err:
321 netlink_table_ungrab();
322 return err;
323 }
324
netlink_remove(struct sock * sk)325 static void netlink_remove(struct sock *sk)
326 {
327 struct sock **skp;
328 struct netlink_table *table = &nl_table[sk->protocol];
329 struct nl_pid_hash *hash = &table->hash;
330 u32 pid = nlk_sk(sk)->pid;
331
332 netlink_table_grab();
333 for (skp = nl_pid_hashfn(hash, pid); *skp; skp = &((*skp)->next)) {
334 if (*skp == sk) {
335 hash->entries--;
336 *skp = sk->next;
337 __sock_put(sk);
338 break;
339 }
340 }
341 if (!nlk_sk(sk)->groups)
342 goto out;
343 for (skp = &table->mc_list; *skp; skp = &((*skp)->bind_next)) {
344 if (*skp == sk) {
345 *skp = sk->bind_next;
346 break;
347 }
348 }
349 out:
350 netlink_table_ungrab();
351 }
352
netlink_create(struct socket * sock,int protocol)353 static int netlink_create(struct socket *sock, int protocol)
354 {
355 struct sock *sk;
356
357 sock->state = SS_UNCONNECTED;
358
359 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
360 return -ESOCKTNOSUPPORT;
361
362 if (protocol<0 || protocol >= MAX_LINKS)
363 return -EPROTONOSUPPORT;
364
365 sock->ops = &netlink_ops;
366
367 sk = sk_alloc(PF_NETLINK, GFP_KERNEL, 1);
368 if (!sk)
369 return -ENOMEM;
370
371 sock_init_data(sock,sk);
372
373 sk->protinfo.af_netlink = kmalloc(sizeof(struct netlink_opt), GFP_KERNEL);
374 if (sk->protinfo.af_netlink == NULL) {
375 sk_free(sk);
376 return -ENOMEM;
377 }
378 memset(sk->protinfo.af_netlink, 0, sizeof(struct netlink_opt));
379
380 spin_lock_init(&sk->protinfo.af_netlink->cb_lock);
381 init_waitqueue_head(&sk->protinfo.af_netlink->wait);
382 sk->destruct = netlink_sock_destruct;
383 atomic_inc(&netlink_sock_nr);
384
385 sk->protocol=protocol;
386 return 0;
387 }
388
netlink_release(struct socket * sock)389 static int netlink_release(struct socket *sock)
390 {
391 struct sock *sk = sock->sk;
392
393 if (!sk)
394 return 0;
395
396 netlink_remove(sk);
397
398 spin_lock(&sk->protinfo.af_netlink->cb_lock);
399 if (sk->protinfo.af_netlink->cb) {
400 sk->protinfo.af_netlink->cb->done(sk->protinfo.af_netlink->cb);
401 netlink_destroy_callback(sk->protinfo.af_netlink->cb);
402 sk->protinfo.af_netlink->cb = NULL;
403 __sock_put(sk);
404 }
405 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
406
407 /* OK. Socket is unlinked, and, therefore,
408 no new packets will arrive */
409
410 sock_orphan(sk);
411 sock->sk = NULL;
412 wake_up_interruptible_all(&sk->protinfo.af_netlink->wait);
413
414 skb_queue_purge(&sk->write_queue);
415
416 if (sk->protinfo.af_netlink->pid && !sk->protinfo.af_netlink->groups) {
417 struct netlink_notify n = { protocol:sk->protocol,
418 pid:sk->protinfo.af_netlink->pid };
419 notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
420 }
421
422 sock_put(sk);
423 return 0;
424 }
425
netlink_autobind(struct socket * sock)426 static int netlink_autobind(struct socket *sock)
427 {
428 struct sock *sk = sock->sk;
429 struct nl_pid_hash *hash = &nl_table[sk->protocol].hash;
430 struct sock *osk;
431 s32 pid = current->pid;
432 int err;
433 static s32 rover = -4097;
434
435 retry:
436 cond_resched();
437 netlink_table_grab();
438 for (osk = *nl_pid_hashfn(hash, pid); osk; osk = osk->next) {
439 if (osk->protinfo.af_netlink->pid == pid) {
440 /* Bind collision, search negative pid values. */
441 pid = rover--;
442 if (rover > -4097)
443 rover = -4097;
444 netlink_table_ungrab();
445 goto retry;
446 }
447 }
448 netlink_table_ungrab();
449
450 err = netlink_insert(sk, pid);
451 if (err == -EADDRINUSE)
452 goto retry;
453
454 /* If 2 threads race to autobind, that is fine. */
455 if (err == -EBUSY)
456 err = 0;
457
458 return err;
459 }
460
netlink_capable(struct socket * sock,unsigned int flag)461 static inline int netlink_capable(struct socket *sock, unsigned int flag)
462 {
463 return (nl_nonroot[sock->sk->protocol] & flag) || capable(CAP_NET_ADMIN);
464 }
465
netlink_bind(struct socket * sock,struct sockaddr * addr,int addr_len)466 static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
467 {
468 struct sock *sk = sock->sk;
469 struct sock **skp;
470 int err;
471 struct netlink_opt *nlk = nlk_sk(sk);
472 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
473
474 if (nladdr->nl_family != AF_NETLINK)
475 return -EINVAL;
476
477 /* Only superuser is allowed to listen multicasts */
478 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
479 return -EPERM;
480
481 if (sk->protinfo.af_netlink->pid) {
482 if (nladdr->nl_pid != sk->protinfo.af_netlink->pid)
483 return -EINVAL;
484 } else {
485 err = nladdr->nl_pid ?
486 netlink_insert(sk, nladdr->nl_pid) :
487 netlink_autobind(sock);
488 if (err)
489 return err;
490 }
491
492 if (!nladdr->nl_groups && !nlk->groups)
493 return 0;
494
495 netlink_table_grab();
496 skp = &nl_table[sk->protocol].mc_list;
497 if (nlk->groups && !nladdr->nl_groups) {
498 for (; *skp; skp = &((*skp)->bind_next)) {
499 if (*skp == sk) {
500 *skp = sk->bind_next;
501 break;
502 }
503 }
504 } else if (!nlk->groups && nladdr->nl_groups) {
505 sk->bind_next = *skp;
506 *skp = sk;
507 }
508 nlk->groups = nladdr->nl_groups;
509 netlink_table_ungrab();
510
511 return 0;
512 }
513
netlink_connect(struct socket * sock,struct sockaddr * addr,int alen,int flags)514 static int netlink_connect(struct socket *sock, struct sockaddr *addr,
515 int alen, int flags)
516 {
517 int err = 0;
518 struct sock *sk = sock->sk;
519 struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;
520
521 if (addr->sa_family == AF_UNSPEC) {
522 sk->protinfo.af_netlink->dst_pid = 0;
523 sk->protinfo.af_netlink->dst_groups = 0;
524 return 0;
525 }
526 if (addr->sa_family != AF_NETLINK)
527 return -EINVAL;
528
529 /* Only superuser is allowed to send multicasts */
530 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
531 return -EPERM;
532
533 if (!sk->protinfo.af_netlink->pid)
534 err = netlink_autobind(sock);
535
536 if (err == 0) {
537 sk->protinfo.af_netlink->dst_pid = nladdr->nl_pid;
538 sk->protinfo.af_netlink->dst_groups = nladdr->nl_groups;
539 }
540
541 return 0;
542 }
543
netlink_getname(struct socket * sock,struct sockaddr * addr,int * addr_len,int peer)544 static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer)
545 {
546 struct sock *sk = sock->sk;
547 struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr;
548
549 nladdr->nl_family = AF_NETLINK;
550 nladdr->nl_pad = 0;
551 *addr_len = sizeof(*nladdr);
552
553 if (peer) {
554 nladdr->nl_pid = sk->protinfo.af_netlink->dst_pid;
555 nladdr->nl_groups = sk->protinfo.af_netlink->dst_groups;
556 } else {
557 nladdr->nl_pid = sk->protinfo.af_netlink->pid;
558 nladdr->nl_groups = sk->protinfo.af_netlink->groups;
559 }
560 return 0;
561 }
562
netlink_overrun(struct sock * sk)563 static void netlink_overrun(struct sock *sk)
564 {
565 if (!test_and_set_bit(0, &sk->protinfo.af_netlink->state)) {
566 sk->err = ENOBUFS;
567 sk->error_report(sk);
568 }
569 }
570
netlink_unicast(struct sock * ssk,struct sk_buff * skb,u32 pid,int nonblock)571 int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
572 {
573 struct sock *sk;
574 int len = skb->len;
575 int protocol = ssk->protocol;
576 long timeo;
577 DECLARE_WAITQUEUE(wait, current);
578
579 timeo = sock_sndtimeo(ssk, nonblock);
580
581 retry:
582 sk = netlink_lookup(protocol, pid);
583 if (sk == NULL)
584 goto no_dst;
585
586 /* Don't bother queuing skb if kernel socket has no input function */
587 if (sk->protinfo.af_netlink->pid == 0 &&
588 !sk->protinfo.af_netlink->data_ready)
589 goto no_dst;
590
591 #ifdef NL_EMULATE_DEV
592 if (sk->protinfo.af_netlink->handler) {
593 skb_orphan(skb);
594 len = sk->protinfo.af_netlink->handler(protocol, skb);
595 sock_put(sk);
596 return len;
597 }
598 #endif
599
600 if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
601 test_bit(0, &sk->protinfo.af_netlink->state)) {
602 if (!timeo) {
603 if (ssk->protinfo.af_netlink->pid == 0)
604 netlink_overrun(sk);
605 sock_put(sk);
606 kfree_skb(skb);
607 return -EAGAIN;
608 }
609
610 __set_current_state(TASK_INTERRUPTIBLE);
611 add_wait_queue(&sk->protinfo.af_netlink->wait, &wait);
612
613 if ((atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
614 test_bit(0, &sk->protinfo.af_netlink->state)) &&
615 !sk->dead)
616 timeo = schedule_timeout(timeo);
617
618 __set_current_state(TASK_RUNNING);
619 remove_wait_queue(&sk->protinfo.af_netlink->wait, &wait);
620 sock_put(sk);
621
622 if (signal_pending(current)) {
623 kfree_skb(skb);
624 return sock_intr_errno(timeo);
625 }
626 goto retry;
627 }
628
629 skb_orphan(skb);
630 skb_set_owner_r(skb, sk);
631 skb_queue_tail(&sk->receive_queue, skb);
632 sk->data_ready(sk, len);
633 sock_put(sk);
634 return len;
635
636 no_dst:
637 kfree_skb(skb);
638 return -ECONNREFUSED;
639 }
640
netlink_broadcast_deliver(struct sock * sk,struct sk_buff * skb)641 static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
642 {
643 #ifdef NL_EMULATE_DEV
644 if (sk->protinfo.af_netlink->handler) {
645 skb_orphan(skb);
646 sk->protinfo.af_netlink->handler(sk->protocol, skb);
647 return 0;
648 } else
649 #endif
650 if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf &&
651 !test_bit(0, &sk->protinfo.af_netlink->state)) {
652 skb_orphan(skb);
653 skb_set_owner_r(skb, sk);
654 skb_queue_tail(&sk->receive_queue, skb);
655 sk->data_ready(sk, skb->len);
656 return 0;
657 }
658 return -1;
659 }
660
661 struct netlink_broadcast_data {
662 struct sock *exclude_sk;
663 u32 pid;
664 u32 group;
665 int failure;
666 int allocation;
667 struct sk_buff *skb, *skb2;
668 };
669
do_one_broadcast(struct sock * sk,struct netlink_broadcast_data * p)670 static inline int do_one_broadcast(struct sock *sk,
671 struct netlink_broadcast_data *p)
672 {
673 struct netlink_opt *nlk = nlk_sk(sk);
674 int val;
675
676 if (p->exclude_sk == sk)
677 goto out;
678
679 if (nlk->pid == p->pid || !(nlk->groups & p->group))
680 goto out;
681
682 if (p->failure) {
683 netlink_overrun(sk);
684 goto out;
685 }
686
687 sock_hold(sk);
688 if (p->skb2 == NULL) {
689 if (atomic_read(&p->skb->users) != 1) {
690 p->skb2 = skb_clone(p->skb, p->allocation);
691 } else {
692 p->skb2 = p->skb;
693 atomic_inc(&p->skb->users);
694 }
695 }
696 if (p->skb2 == NULL) {
697 netlink_overrun(sk);
698 /* Clone failed. Notify ALL listeners. */
699 p->failure = 1;
700 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
701 netlink_overrun(sk);
702 } else
703 p->skb2 = NULL;
704 sock_put(sk);
705
706 out:
707 return 0;
708 }
709
netlink_broadcast(struct sock * ssk,struct sk_buff * skb,u32 pid,u32 group,int allocation)710 void netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
711 u32 group, int allocation)
712 {
713 struct netlink_broadcast_data info;
714 struct sock *sk;
715
716 info.exclude_sk = ssk;
717 info.pid = pid;
718 info.group = group;
719 info.failure = 0;
720 info.allocation = allocation;
721 info.skb = skb;
722 info.skb2 = NULL;
723
724 /* While we sleep in clone, do not allow to change socket list */
725
726 netlink_lock_table();
727
728 for (sk = nl_table[ssk->protocol].mc_list; sk; sk = sk->bind_next)
729 do_one_broadcast(sk, &info);
730
731 netlink_unlock_table();
732
733 if (info.skb2)
734 kfree_skb(info.skb2);
735 kfree_skb(skb);
736 }
737
738 struct netlink_set_err_data {
739 struct sock *exclude_sk;
740 u32 pid;
741 u32 group;
742 int code;
743 };
744
do_one_set_err(struct sock * sk,struct netlink_set_err_data * p)745 static inline int do_one_set_err(struct sock *sk,
746 struct netlink_set_err_data *p)
747 {
748 struct netlink_opt *nlk = nlk_sk(sk);
749
750 if (sk == p->exclude_sk)
751 goto out;
752
753 if (nlk->pid == p->pid || !(nlk->groups & p->group))
754 goto out;
755
756 sk->err = p->code;
757 sk->error_report(sk);
758 out:
759 return 0;
760 }
761
netlink_set_err(struct sock * ssk,u32 pid,u32 group,int code)762 void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
763 {
764 struct netlink_set_err_data info;
765 struct sock *sk;
766
767 info.exclude_sk = ssk;
768 info.pid = pid;
769 info.group = group;
770 info.code = code;
771
772 read_lock(&nl_table_lock);
773 for (sk = nl_table[ssk->protocol].mc_list; sk; sk = sk->bind_next)
774 do_one_set_err(sk, &info);
775 read_unlock(&nl_table_lock);
776 }
777
netlink_rcv_wake(struct sock * sk)778 static inline void netlink_rcv_wake(struct sock *sk)
779 {
780 if (skb_queue_len(&sk->receive_queue) == 0)
781 clear_bit(0, &sk->protinfo.af_netlink->state);
782 if (!test_bit(0, &sk->protinfo.af_netlink->state))
783 wake_up_interruptible(&sk->protinfo.af_netlink->wait);
784 }
785
netlink_sendmsg(struct socket * sock,struct msghdr * msg,int len,struct scm_cookie * scm)786 static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, int len,
787 struct scm_cookie *scm)
788 {
789 struct sock *sk = sock->sk;
790 struct sockaddr_nl *addr=msg->msg_name;
791 u32 dst_pid;
792 u32 dst_groups;
793 struct sk_buff *skb;
794 int err;
795
796 if (msg->msg_flags&MSG_OOB)
797 return -EOPNOTSUPP;
798
799 if (msg->msg_namelen) {
800 if (addr->nl_family != AF_NETLINK)
801 return -EINVAL;
802 dst_pid = addr->nl_pid;
803 dst_groups = addr->nl_groups;
804 if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND))
805 return -EPERM;
806 } else {
807 dst_pid = sk->protinfo.af_netlink->dst_pid;
808 dst_groups = sk->protinfo.af_netlink->dst_groups;
809 }
810
811 if (!sk->protinfo.af_netlink->pid) {
812 err = netlink_autobind(sock);
813 if (err)
814 goto out;
815 }
816
817 err = -EMSGSIZE;
818 if ((unsigned)len > sk->sndbuf-32)
819 goto out;
820 err = -ENOBUFS;
821 skb = alloc_skb(len, GFP_KERNEL);
822 if (skb==NULL)
823 goto out;
824
825 NETLINK_CB(skb).pid = sk->protinfo.af_netlink->pid;
826 NETLINK_CB(skb).groups = sk->protinfo.af_netlink->groups;
827 NETLINK_CB(skb).dst_pid = dst_pid;
828 NETLINK_CB(skb).dst_groups = dst_groups;
829 memcpy(NETLINK_CREDS(skb), &scm->creds, sizeof(struct ucred));
830
831 /* What can I do? Netlink is asynchronous, so that
832 we will have to save current capabilities to
833 check them, when this message will be delivered
834 to corresponding kernel module. --ANK (980802)
835 */
836 NETLINK_CB(skb).eff_cap = current->cap_effective;
837
838 err = -EFAULT;
839 if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) {
840 kfree_skb(skb);
841 goto out;
842 }
843
844 if (dst_groups) {
845 atomic_inc(&skb->users);
846 netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL);
847 }
848 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
849
850 out:
851 return err;
852 }
853
netlink_recvmsg(struct socket * sock,struct msghdr * msg,int len,int flags,struct scm_cookie * scm)854 static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, int len,
855 int flags, struct scm_cookie *scm)
856 {
857 struct sock *sk = sock->sk;
858 int noblock = flags&MSG_DONTWAIT;
859 int copied;
860 struct sk_buff *skb;
861 int err;
862
863 if (flags&MSG_OOB)
864 return -EOPNOTSUPP;
865
866 copied = 0;
867
868 skb = skb_recv_datagram(sk,flags,noblock,&err);
869 if (skb==NULL)
870 goto out;
871
872 msg->msg_namelen = 0;
873
874 copied = skb->len;
875 if (len < copied) {
876 msg->msg_flags |= MSG_TRUNC;
877 copied = len;
878 }
879
880 skb->h.raw = skb->data;
881 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
882
883 if (msg->msg_name) {
884 struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name;
885 addr->nl_family = AF_NETLINK;
886 addr->nl_pad = 0;
887 addr->nl_pid = NETLINK_CB(skb).pid;
888 addr->nl_groups = NETLINK_CB(skb).dst_groups;
889 msg->msg_namelen = sizeof(*addr);
890 }
891
892 scm->creds = *NETLINK_CREDS(skb);
893 skb_free_datagram(sk, skb);
894
895 if (sk->protinfo.af_netlink->cb
896 && atomic_read(&sk->rmem_alloc) <= sk->rcvbuf/2)
897 netlink_dump(sk);
898
899 out:
900 netlink_rcv_wake(sk);
901 return err ? : copied;
902 }
903
netlink_data_ready(struct sock * sk,int len)904 void netlink_data_ready(struct sock *sk, int len)
905 {
906 if (sk->protinfo.af_netlink->data_ready)
907 sk->protinfo.af_netlink->data_ready(sk, len);
908 netlink_rcv_wake(sk);
909 }
910
911 /*
912 * We export these functions to other modules. They provide a
913 * complete set of kernel non-blocking support for message
914 * queueing.
915 */
916
917 struct sock *
netlink_kernel_create(int unit,void (* input)(struct sock * sk,int len))918 netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len))
919 {
920 struct socket *sock;
921 struct sock *sk;
922
923 if (!nl_table)
924 return NULL;
925
926 if (unit<0 || unit>=MAX_LINKS)
927 return NULL;
928
929 if (!(sock = sock_alloc()))
930 return NULL;
931
932 sock->type = SOCK_RAW;
933
934 if (netlink_create(sock, unit) < 0) {
935 sock_release(sock);
936 return NULL;
937 }
938 sk = sock->sk;
939 sk->data_ready = netlink_data_ready;
940 if (input)
941 sk->protinfo.af_netlink->data_ready = input;
942
943 netlink_insert(sk, 0);
944 return sk;
945 }
946
netlink_set_nonroot(int protocol,unsigned int flags)947 void netlink_set_nonroot(int protocol, unsigned int flags)
948 {
949 if ((unsigned int)protocol < MAX_LINKS)
950 nl_nonroot[protocol] = flags;
951 }
952
netlink_destroy_callback(struct netlink_callback * cb)953 static void netlink_destroy_callback(struct netlink_callback *cb)
954 {
955 if (cb->skb)
956 kfree_skb(cb->skb);
957 kfree(cb);
958 }
959
960 /*
961 * It looks a bit ugly.
962 * It would be better to create kernel thread.
963 */
964
netlink_dump(struct sock * sk)965 static int netlink_dump(struct sock *sk)
966 {
967 struct netlink_callback *cb;
968 struct sk_buff *skb;
969 struct nlmsghdr *nlh;
970 int len;
971
972 skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL);
973 if (!skb)
974 return -ENOBUFS;
975
976 spin_lock(&sk->protinfo.af_netlink->cb_lock);
977
978 cb = sk->protinfo.af_netlink->cb;
979 if (cb == NULL) {
980 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
981 kfree_skb(skb);
982 return -EINVAL;
983 }
984
985 len = cb->dump(skb, cb);
986
987 if (len > 0) {
988 sock_hold(sk);
989 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
990 skb_queue_tail(&sk->receive_queue, skb);
991 sk->data_ready(sk, len);
992 sock_put(sk);
993 return 0;
994 }
995
996 nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int));
997 nlh->nlmsg_flags |= NLM_F_MULTI;
998 memcpy(NLMSG_DATA(nlh), &len, sizeof(len));
999 skb_queue_tail(&sk->receive_queue, skb);
1000 sk->data_ready(sk, skb->len);
1001
1002 cb->done(cb);
1003 sk->protinfo.af_netlink->cb = NULL;
1004 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
1005
1006 netlink_destroy_callback(cb);
1007 sock_put(sk);
1008 return 0;
1009 }
1010
netlink_dump_start(struct sock * ssk,struct sk_buff * skb,struct nlmsghdr * nlh,int (* dump)(struct sk_buff * skb,struct netlink_callback *),int (* done)(struct netlink_callback *))1011 int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1012 struct nlmsghdr *nlh,
1013 int (*dump)(struct sk_buff *skb, struct netlink_callback*),
1014 int (*done)(struct netlink_callback*))
1015 {
1016 struct netlink_callback *cb;
1017 struct sock *sk;
1018
1019 cb = kmalloc(sizeof(*cb), GFP_KERNEL);
1020 if (cb == NULL)
1021 return -ENOBUFS;
1022
1023 memset(cb, 0, sizeof(*cb));
1024 cb->dump = dump;
1025 cb->done = done;
1026 cb->nlh = nlh;
1027 atomic_inc(&skb->users);
1028 cb->skb = skb;
1029
1030 sk = netlink_lookup(ssk->protocol, NETLINK_CB(skb).pid);
1031 if (sk == NULL) {
1032 netlink_destroy_callback(cb);
1033 return -ECONNREFUSED;
1034 }
1035 /* A dump is in progress... */
1036 spin_lock(&sk->protinfo.af_netlink->cb_lock);
1037 if (sk->protinfo.af_netlink->cb) {
1038 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
1039 netlink_destroy_callback(cb);
1040 sock_put(sk);
1041 return -EBUSY;
1042 }
1043 sk->protinfo.af_netlink->cb = cb;
1044 spin_unlock(&sk->protinfo.af_netlink->cb_lock);
1045
1046 netlink_dump(sk);
1047 return 0;
1048 }
1049
netlink_ack(struct sk_buff * in_skb,struct nlmsghdr * nlh,int err)1050 void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1051 {
1052 struct sk_buff *skb;
1053 struct nlmsghdr *rep;
1054 struct nlmsgerr *errmsg;
1055 int size;
1056
1057 if (err == 0)
1058 size = NLMSG_SPACE(sizeof(struct nlmsgerr));
1059 else
1060 size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len));
1061
1062 skb = alloc_skb(size, GFP_KERNEL);
1063 if (!skb) {
1064 struct sock *sk;
1065
1066 sk = netlink_lookup(in_skb->sk->protocol,
1067 NETLINK_CB(in_skb).pid);
1068 if (sk) {
1069 sk->err = ENOBUFS;
1070 sk->error_report(sk);
1071 sock_put(sk);
1072 }
1073 }
1074
1075 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1076 NLMSG_ERROR, sizeof(struct nlmsgerr));
1077 errmsg = NLMSG_DATA(rep);
1078 errmsg->error = err;
1079 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr));
1080 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1081 }
1082
1083
1084 #ifdef NL_EMULATE_DEV
1085
1086 static rwlock_t nl_emu_lock = RW_LOCK_UNLOCKED;
1087
1088 /*
1089 * Backward compatibility.
1090 */
1091
netlink_attach(int unit,int (* function)(int,struct sk_buff * skb))1092 int netlink_attach(int unit, int (*function)(int, struct sk_buff *skb))
1093 {
1094 struct sock *sk = netlink_kernel_create(unit, NULL);
1095 if (sk == NULL)
1096 return -ENOBUFS;
1097 sk->protinfo.af_netlink->handler = function;
1098 write_lock_bh(&nl_emu_lock);
1099 netlink_kernel[unit] = sk->socket;
1100 write_unlock_bh(&nl_emu_lock);
1101 return 0;
1102 }
1103
netlink_detach(int unit)1104 void netlink_detach(int unit)
1105 {
1106 struct socket *sock;
1107
1108 write_lock_bh(&nl_emu_lock);
1109 sock = netlink_kernel[unit];
1110 netlink_kernel[unit] = NULL;
1111 write_unlock_bh(&nl_emu_lock);
1112
1113 sock_release(sock);
1114 }
1115
netlink_post(int unit,struct sk_buff * skb)1116 int netlink_post(int unit, struct sk_buff *skb)
1117 {
1118 struct socket *sock;
1119
1120 read_lock(&nl_emu_lock);
1121 sock = netlink_kernel[unit];
1122 if (sock) {
1123 struct sock *sk = sock->sk;
1124 memset(skb->cb, 0, sizeof(skb->cb));
1125 sock_hold(sk);
1126 read_unlock(&nl_emu_lock);
1127
1128 netlink_broadcast(sk, skb, 0, ~0, GFP_ATOMIC);
1129
1130 sock_put(sk);
1131 return 0;
1132 }
1133 read_unlock(&nl_emu_lock);
1134 return -EUNATCH;
1135 }
1136
1137 #endif
1138
1139
1140 #ifdef CONFIG_PROC_FS
1141 struct nl_seq_iter {
1142 int link;
1143 int hash_idx;
1144 };
1145
netlink_read_proc(char * buffer,char ** start,off_t offset,int length,int * eof,void * data)1146 static int netlink_read_proc(char *buffer, char **start, off_t offset,
1147 int length, int *eof, void *data)
1148 {
1149 off_t pos=0;
1150 off_t begin=0;
1151 int len=0;
1152 int i, j;
1153 struct sock *s;
1154
1155 len+= sprintf(buffer,"sk Eth Pid Groups "
1156 "Rmem Wmem Dump Locks\n");
1157
1158 for (i=0; i<MAX_LINKS; i++) {
1159 struct nl_pid_hash *hash = &nl_table[i].hash;
1160
1161 read_lock(&nl_table_lock);
1162 for (j = 0; j <= hash->mask; j++) {
1163 for (s = hash->table[j]; s; s = s->next) {
1164 len += sprintf(buffer + len,
1165 "%p %-3d %-6d %08x %-8d %-8d %p %d",
1166 s,
1167 s->protocol,
1168 s->protinfo.af_netlink->pid,
1169 s->protinfo.af_netlink->groups,
1170 atomic_read(&s->rmem_alloc),
1171 atomic_read(&s->wmem_alloc),
1172 s->protinfo.af_netlink->cb,
1173 atomic_read(&s->refcnt));
1174
1175 buffer[len++]='\n';
1176
1177 pos = begin + len;
1178 if (pos < offset) {
1179 len = 0;
1180 begin = pos;
1181 }
1182 if (pos > offset + length) {
1183 read_unlock(&nl_table_lock);
1184 goto done;
1185 }
1186 }
1187 }
1188 read_unlock(&nl_table_lock);
1189 }
1190 *eof = 1;
1191
1192 done:
1193 *start=buffer+(offset-begin);
1194 len-=(offset-begin);
1195 if(len>length)
1196 len=length;
1197 if(len<0)
1198 len=0;
1199 return len;
1200 }
1201 #endif
1202
netlink_register_notifier(struct notifier_block * nb)1203 int netlink_register_notifier(struct notifier_block *nb)
1204 {
1205 return notifier_chain_register(&netlink_chain, nb);
1206 }
1207
netlink_unregister_notifier(struct notifier_block * nb)1208 int netlink_unregister_notifier(struct notifier_block *nb)
1209 {
1210 return notifier_chain_unregister(&netlink_chain, nb);
1211 }
1212
1213 struct proto_ops netlink_ops = {
1214 family: PF_NETLINK,
1215
1216 release: netlink_release,
1217 bind: netlink_bind,
1218 connect: netlink_connect,
1219 socketpair: sock_no_socketpair,
1220 accept: sock_no_accept,
1221 getname: netlink_getname,
1222 poll: datagram_poll,
1223 ioctl: sock_no_ioctl,
1224 listen: sock_no_listen,
1225 shutdown: sock_no_shutdown,
1226 setsockopt: sock_no_setsockopt,
1227 getsockopt: sock_no_getsockopt,
1228 sendmsg: netlink_sendmsg,
1229 recvmsg: netlink_recvmsg,
1230 mmap: sock_no_mmap,
1231 sendpage: sock_no_sendpage,
1232 };
1233
1234 struct net_proto_family netlink_family_ops = {
1235 PF_NETLINK,
1236 netlink_create
1237 };
1238
1239 extern void netlink_skb_parms_too_large(void);
1240
netlink_proto_init(void)1241 int __init netlink_proto_init(void)
1242 {
1243 struct sk_buff *dummy_skb;
1244 int i;
1245 unsigned long max;
1246 unsigned int order;
1247
1248 if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb))
1249 netlink_skb_parms_too_large();
1250
1251 nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL);
1252 if (!nl_table) {
1253 enomem:
1254 printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n");
1255 return -ENOMEM;
1256 }
1257
1258 memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS);
1259
1260 if (num_physpages >= (128 * 1024))
1261 max = num_physpages >> (21 - PAGE_SHIFT);
1262 else
1263 max = num_physpages >> (23 - PAGE_SHIFT);
1264
1265 for (order = 0; (1UL << order) < max + 1; order++)
1266 ;
1267 order += PAGE_SHIFT - 1;
1268 max = (1UL << order) / sizeof(struct sock *);
1269 if (max > UINT_MAX)
1270 max = UINT_MAX;
1271 for (order = 0; (1UL << order) < max + 1; order++)
1272 ;
1273 order--;
1274
1275 for (i = 0; i < MAX_LINKS; i++) {
1276 struct nl_pid_hash *hash = &nl_table[i].hash;
1277
1278 hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table));
1279 if (!hash->table) {
1280 while (i-- > 0)
1281 nl_pid_hash_free(nl_table[i].hash.table,
1282 1 * sizeof(*hash->table));
1283 kfree(nl_table);
1284 goto enomem;
1285 }
1286 memset(hash->table, 0, 1 * sizeof(*hash->table));
1287 hash->max_shift = order;
1288 hash->shift = 0;
1289 hash->mask = 0;
1290 hash->rehash_time = jiffies;
1291 }
1292
1293 sock_register(&netlink_family_ops);
1294 #ifdef CONFIG_PROC_FS
1295 create_proc_read_entry("net/netlink", 0, 0, netlink_read_proc, NULL);
1296 #endif
1297 return 0;
1298 }
1299
netlink_proto_exit(void)1300 static void __exit netlink_proto_exit(void)
1301 {
1302 sock_unregister(PF_NETLINK);
1303 remove_proc_entry("net/netlink", NULL);
1304 kfree(nl_table);
1305 nl_table = NULL;
1306 }
1307
1308 #ifdef MODULE
1309 module_init(netlink_proto_init);
1310 #endif
1311 module_exit(netlink_proto_exit);
1312