1 /*
2 * iobuf.c
3 *
4 * Keep track of the general-purpose IO-buffer structures used to track
5 * abstract kernel-space io buffers.
6 *
7 */
8
9 #include <linux/iobuf.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12
13
14 static kmem_cache_t *kiobuf_cachep;
15
end_kio_request(struct kiobuf * kiobuf,int uptodate)16 void end_kio_request(struct kiobuf *kiobuf, int uptodate)
17 {
18 if ((!uptodate) && !kiobuf->errno)
19 kiobuf->errno = -EIO;
20
21 if (atomic_dec_and_test(&kiobuf->io_count)) {
22 if (kiobuf->end_io)
23 kiobuf->end_io(kiobuf);
24 wake_up(&kiobuf->wait_queue);
25 }
26 }
27
kiobuf_init(struct kiobuf * iobuf)28 static int kiobuf_init(struct kiobuf *iobuf)
29 {
30 init_waitqueue_head(&iobuf->wait_queue);
31 iobuf->array_len = 0;
32 iobuf->nr_pages = 0;
33 iobuf->locked = 0;
34 iobuf->bh = NULL;
35 iobuf->blocks = NULL;
36 atomic_set(&iobuf->io_count, 0);
37 iobuf->end_io = NULL;
38 return expand_kiobuf(iobuf, KIO_STATIC_PAGES);
39 }
40
alloc_kiobuf_bhs(struct kiobuf * kiobuf)41 int alloc_kiobuf_bhs(struct kiobuf * kiobuf)
42 {
43 int i;
44
45 kiobuf->blocks =
46 kmalloc(sizeof(*kiobuf->blocks) * KIO_MAX_SECTORS, GFP_KERNEL);
47 if (unlikely(!kiobuf->blocks))
48 goto nomem;
49 kiobuf->bh =
50 kmalloc(sizeof(*kiobuf->bh) * KIO_MAX_SECTORS, GFP_KERNEL);
51 if (unlikely(!kiobuf->bh))
52 goto nomem;
53
54 for (i = 0; i < KIO_MAX_SECTORS; i++) {
55 kiobuf->bh[i] = kmem_cache_alloc(bh_cachep, GFP_KERNEL);
56 if (unlikely(!kiobuf->bh[i]))
57 goto nomem2;
58 }
59
60 return 0;
61
62 nomem2:
63 while (i--) {
64 kmem_cache_free(bh_cachep, kiobuf->bh[i]);
65 kiobuf->bh[i] = NULL;
66 }
67 memset(kiobuf->bh, 0, sizeof(*kiobuf->bh) * KIO_MAX_SECTORS);
68
69 nomem:
70 free_kiobuf_bhs(kiobuf);
71 return -ENOMEM;
72 }
73
free_kiobuf_bhs(struct kiobuf * kiobuf)74 void free_kiobuf_bhs(struct kiobuf * kiobuf)
75 {
76 int i;
77
78 if (kiobuf->bh) {
79 for (i = 0; i < KIO_MAX_SECTORS; i++)
80 if (kiobuf->bh[i])
81 kmem_cache_free(bh_cachep, kiobuf->bh[i]);
82 kfree(kiobuf->bh);
83 kiobuf->bh = NULL;
84 }
85
86 if (kiobuf->blocks) {
87 kfree(kiobuf->blocks);
88 kiobuf->blocks = NULL;
89 }
90 }
91
alloc_kiovec(int nr,struct kiobuf ** bufp)92 int alloc_kiovec(int nr, struct kiobuf **bufp)
93 {
94 int i;
95 struct kiobuf *iobuf;
96
97 for (i = 0; i < nr; i++) {
98 iobuf = kmem_cache_alloc(kiobuf_cachep, GFP_KERNEL);
99 if (unlikely(!iobuf))
100 goto nomem;
101 if (unlikely(kiobuf_init(iobuf)))
102 goto nomem2;
103 if (unlikely(alloc_kiobuf_bhs(iobuf)))
104 goto nomem2;
105 bufp[i] = iobuf;
106 }
107
108 return 0;
109
110 nomem2:
111 kmem_cache_free(kiobuf_cachep, iobuf);
112 nomem:
113 free_kiovec(i, bufp);
114 return -ENOMEM;
115 }
116
free_kiovec(int nr,struct kiobuf ** bufp)117 void free_kiovec(int nr, struct kiobuf **bufp)
118 {
119 int i;
120 struct kiobuf *iobuf;
121
122 for (i = 0; i < nr; i++) {
123 iobuf = bufp[i];
124 if (iobuf->locked)
125 unlock_kiovec(1, &iobuf);
126 kfree(iobuf->maplist);
127 free_kiobuf_bhs(iobuf);
128 kmem_cache_free(kiobuf_cachep, bufp[i]);
129 }
130 }
131
expand_kiobuf(struct kiobuf * iobuf,int wanted)132 int expand_kiobuf(struct kiobuf *iobuf, int wanted)
133 {
134 struct page ** maplist;
135
136 if (iobuf->array_len >= wanted)
137 return 0;
138
139 maplist = kmalloc(wanted * sizeof(struct page **), GFP_KERNEL);
140 if (unlikely(!maplist))
141 return -ENOMEM;
142
143 /* Did it grow while we waited? */
144 if (unlikely(iobuf->array_len >= wanted)) {
145 kfree(maplist);
146 return 0;
147 }
148
149 if (iobuf->array_len) {
150 memcpy(maplist, iobuf->maplist, iobuf->array_len * sizeof(*maplist));
151 kfree(iobuf->maplist);
152 }
153
154 iobuf->maplist = maplist;
155 iobuf->array_len = wanted;
156 return 0;
157 }
158
kiobuf_wait_for_io(struct kiobuf * kiobuf)159 void kiobuf_wait_for_io(struct kiobuf *kiobuf)
160 {
161 struct task_struct *tsk = current;
162 DECLARE_WAITQUEUE(wait, tsk);
163
164 if (atomic_read(&kiobuf->io_count) == 0)
165 return;
166
167 add_wait_queue(&kiobuf->wait_queue, &wait);
168 repeat:
169 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
170 if (atomic_read(&kiobuf->io_count) != 0) {
171 run_task_queue(&tq_disk);
172 schedule();
173 if (atomic_read(&kiobuf->io_count) != 0)
174 goto repeat;
175 }
176 tsk->state = TASK_RUNNING;
177 remove_wait_queue(&kiobuf->wait_queue, &wait);
178 }
179
iobuf_cache_init(void)180 void __init iobuf_cache_init(void)
181 {
182 kiobuf_cachep = kmem_cache_create("kiobuf", sizeof(struct kiobuf),
183 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
184 if (!kiobuf_cachep)
185 panic("Cannot create kiobuf SLAB cache");
186 }
187