1 /*
2  *  linux/fs/block_dev.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6 
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 
11 #define HASH_BITS	6
12 #define HASH_SIZE	(1UL << HASH_BITS)
13 #define HASH_MASK	(HASH_SIZE-1)
14 static struct list_head cdev_hashtable[HASH_SIZE];
15 static spinlock_t cdev_lock = SPIN_LOCK_UNLOCKED;
16 static kmem_cache_t * cdev_cachep;
17 
18 #define alloc_cdev() \
19 	 ((struct char_device *) kmem_cache_alloc(cdev_cachep, SLAB_KERNEL))
20 #define destroy_cdev(cdev) kmem_cache_free(cdev_cachep, (cdev))
21 
init_once(void * foo,kmem_cache_t * cachep,unsigned long flags)22 static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
23 {
24 	struct char_device * cdev = (struct char_device *) foo;
25 
26 	if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
27 	    SLAB_CTOR_CONSTRUCTOR)
28 	{
29 		memset(cdev, 0, sizeof(*cdev));
30 		sema_init(&cdev->sem, 1);
31 	}
32 }
33 
cdev_cache_init(void)34 void __init cdev_cache_init(void)
35 {
36 	int i;
37 	struct list_head *head = cdev_hashtable;
38 
39 	i = HASH_SIZE;
40 	do {
41 		INIT_LIST_HEAD(head);
42 		head++;
43 		i--;
44 	} while (i);
45 
46 	cdev_cachep = kmem_cache_create("cdev_cache",
47 					 sizeof(struct char_device),
48 					 0, SLAB_HWCACHE_ALIGN, init_once,
49 					 NULL);
50 	if (!cdev_cachep)
51 		panic("Cannot create cdev_cache SLAB cache");
52 }
53 
54 /*
55  * Most likely _very_ bad one - but then it's hardly critical for small
56  * /dev and can be fixed when somebody will need really large one.
57  */
hash(dev_t dev)58 static inline unsigned long hash(dev_t dev)
59 {
60 	unsigned long tmp = dev;
61 	tmp = tmp + (tmp >> HASH_BITS) + (tmp >> HASH_BITS*2);
62 	return tmp & HASH_MASK;
63 }
64 
cdfind(dev_t dev,struct list_head * head)65 static struct char_device *cdfind(dev_t dev, struct list_head *head)
66 {
67 	struct list_head *p;
68 	struct char_device *cdev;
69 	for (p=head->next; p!=head; p=p->next) {
70 		cdev = list_entry(p, struct char_device, hash);
71 		if (cdev->dev != dev)
72 			continue;
73 		atomic_inc(&cdev->count);
74 		return cdev;
75 	}
76 	return NULL;
77 }
78 
cdget(dev_t dev)79 struct char_device *cdget(dev_t dev)
80 {
81 	struct list_head * head = cdev_hashtable + hash(dev);
82 	struct char_device *cdev, *new_cdev;
83 	spin_lock(&cdev_lock);
84 	cdev = cdfind(dev, head);
85 	spin_unlock(&cdev_lock);
86 	if (cdev)
87 		return cdev;
88 	new_cdev = alloc_cdev();
89 	if (!new_cdev)
90 		return NULL;
91 	atomic_set(&new_cdev->count,1);
92 	new_cdev->dev = dev;
93 	spin_lock(&cdev_lock);
94 	cdev = cdfind(dev, head);
95 	if (!cdev) {
96 		list_add(&new_cdev->hash, head);
97 		spin_unlock(&cdev_lock);
98 		return new_cdev;
99 	}
100 	spin_unlock(&cdev_lock);
101 	destroy_cdev(new_cdev);
102 	return cdev;
103 }
104 
cdput(struct char_device * cdev)105 void cdput(struct char_device *cdev)
106 {
107 	if (atomic_dec_and_lock(&cdev->count, &cdev_lock)) {
108 		list_del(&cdev->hash);
109 		spin_unlock(&cdev_lock);
110 		destroy_cdev(cdev);
111 	}
112 }
113 
114