1 /*
2 * linux/kernel/resource.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
6 *
7 * Arbitrary resource management.
8 */
9
10 #include <linux/sched.h>
11 #include <linux/errno.h>
12 #include <linux/ioport.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/seq_file.h>
17 #include <asm/io.h>
18
19 struct resource ioport_resource = { "PCI IO", 0x0000, IO_SPACE_LIMIT, IORESOURCE_IO };
20 struct resource iomem_resource = { "PCI mem", 0x00000000, 0xffffffff, IORESOURCE_MEM };
21
22 static rwlock_t resource_lock = RW_LOCK_UNLOCKED;
23
24 enum { MAX_IORES_LEVEL = 5 };
25
r_next(struct seq_file * m,void * v,loff_t * pos)26 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
27 {
28 struct resource *p = v;
29 (*pos)++;
30 if (p->child)
31 return p->child;
32 while (!p->sibling && p->parent)
33 p = p->parent;
34 return p->sibling;
35 }
36
r_start(struct seq_file * m,loff_t * pos)37 static void *r_start(struct seq_file *m, loff_t *pos)
38 {
39 struct resource *p = m->private;
40 loff_t l = 0;
41 read_lock(&resource_lock);
42 for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
43 ;
44 return p;
45 }
46
r_stop(struct seq_file * m,void * v)47 static void r_stop(struct seq_file *m, void *v)
48 {
49 read_unlock(&resource_lock);
50 }
51
r_show(struct seq_file * m,void * v)52 static int r_show(struct seq_file *m, void *v)
53 {
54 struct resource *root = m->private;
55 struct resource *r = v, *p;
56 int width = root->end < 0x10000 ? 4 : 8;
57 int depth;
58
59 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
60 if (p->parent == root)
61 break;
62 seq_printf(m, "%*s%0*lx-%0*lx : %s\n",
63 depth * 2, "",
64 width, r->start,
65 width, r->end,
66 r->name ? r->name : "<BAD>");
67 return 0;
68 }
69
70 static struct seq_operations resource_op = {
71 .start = r_start,
72 .next = r_next,
73 .stop = r_stop,
74 .show = r_show,
75 };
76
ioports_open(struct inode * inode,struct file * file)77 static int ioports_open(struct inode *inode, struct file *file)
78 {
79 int res = seq_open(file, &resource_op);
80 if (!res) {
81 struct seq_file *m = file->private_data;
82 m->private = &ioport_resource;
83 }
84 return res;
85 }
86
iomem_open(struct inode * inode,struct file * file)87 static int iomem_open(struct inode *inode, struct file *file)
88 {
89 int res = seq_open(file, &resource_op);
90 if (!res) {
91 struct seq_file *m = file->private_data;
92 m->private = &iomem_resource;
93 }
94 return res;
95 }
96
97 struct file_operations proc_ioports_operations = {
98 .open = ioports_open,
99 .read = seq_read,
100 .llseek = seq_lseek,
101 .release = seq_release,
102 };
103
104 struct file_operations proc_iomem_operations = {
105 .open = iomem_open,
106 .read = seq_read,
107 .llseek = seq_lseek,
108 .release = seq_release,
109 };
110
111 /* Return the conflict entry if you can't request it */
__request_resource(struct resource * root,struct resource * new)112 static struct resource * __request_resource(struct resource *root, struct resource *new)
113 {
114 unsigned long start = new->start;
115 unsigned long end = new->end;
116 struct resource *tmp, **p;
117
118 if (end < start)
119 return root;
120 if (start < root->start)
121 return root;
122 if (end > root->end)
123 return root;
124 p = &root->child;
125 for (;;) {
126 tmp = *p;
127 if (!tmp || tmp->start > end) {
128 new->sibling = tmp;
129 *p = new;
130 new->parent = root;
131 return NULL;
132 }
133 p = &tmp->sibling;
134 if (tmp->end < start)
135 continue;
136 return tmp;
137 }
138 }
139
__release_resource(struct resource * old)140 static int __release_resource(struct resource *old)
141 {
142 struct resource *tmp, **p;
143
144 p = &old->parent->child;
145 for (;;) {
146 tmp = *p;
147 if (!tmp)
148 break;
149 if (tmp == old) {
150 *p = tmp->sibling;
151 old->parent = NULL;
152 return 0;
153 }
154 p = &tmp->sibling;
155 }
156 return -EINVAL;
157 }
158
request_resource(struct resource * root,struct resource * new)159 int request_resource(struct resource *root, struct resource *new)
160 {
161 struct resource *conflict;
162
163 write_lock(&resource_lock);
164 conflict = __request_resource(root, new);
165 write_unlock(&resource_lock);
166 return conflict ? -EBUSY : 0;
167 }
168
release_resource(struct resource * old)169 int release_resource(struct resource *old)
170 {
171 int retval;
172
173 write_lock(&resource_lock);
174 retval = __release_resource(old);
175 write_unlock(&resource_lock);
176 return retval;
177 }
178
check_resource(struct resource * root,unsigned long start,unsigned long len)179 int check_resource(struct resource *root, unsigned long start, unsigned long len)
180 {
181 struct resource *conflict, tmp;
182
183 tmp.start = start;
184 tmp.end = start + len - 1;
185 write_lock(&resource_lock);
186 conflict = __request_resource(root, &tmp);
187 if (!conflict)
188 __release_resource(&tmp);
189 write_unlock(&resource_lock);
190 return conflict ? -EBUSY : 0;
191 }
192
193 /*
194 * Find empty slot in the resource tree given range and alignment.
195 */
find_resource(struct resource * root,struct resource * new,unsigned long size,unsigned long min,unsigned long max,unsigned long align,void (* alignf)(void *,struct resource *,unsigned long,unsigned long),void * alignf_data)196 static int find_resource(struct resource *root, struct resource *new,
197 unsigned long size,
198 unsigned long min, unsigned long max,
199 unsigned long align,
200 void (*alignf)(void *, struct resource *,
201 unsigned long, unsigned long),
202 void *alignf_data)
203 {
204 struct resource *this = root->child;
205
206 new->start = root->start;
207 for(;;) {
208 if (this)
209 new->end = this->start;
210 else
211 new->end = root->end;
212 if (new->start < min)
213 new->start = min;
214 if (new->end > max)
215 new->end = max;
216 new->start = (new->start + align - 1) & ~(align - 1);
217 if (alignf)
218 alignf(alignf_data, new, size, align);
219 if (new->start < new->end && new->end - new->start + 1 >= size) {
220 new->end = new->start + size - 1;
221 return 0;
222 }
223 if (!this)
224 break;
225 new->start = this->end + 1;
226 this = this->sibling;
227 }
228 return -EBUSY;
229 }
230
231 /*
232 * Allocate empty slot in the resource tree given range and alignment.
233 */
allocate_resource(struct resource * root,struct resource * new,unsigned long size,unsigned long min,unsigned long max,unsigned long align,void (* alignf)(void *,struct resource *,unsigned long,unsigned long),void * alignf_data)234 int allocate_resource(struct resource *root, struct resource *new,
235 unsigned long size,
236 unsigned long min, unsigned long max,
237 unsigned long align,
238 void (*alignf)(void *, struct resource *,
239 unsigned long, unsigned long),
240 void *alignf_data)
241 {
242 int err;
243
244 write_lock(&resource_lock);
245 err = find_resource(root, new, size, min, max, align, alignf, alignf_data);
246 if (err >= 0 && __request_resource(root, new))
247 err = -EBUSY;
248 write_unlock(&resource_lock);
249 return err;
250 }
251
252 /*
253 * This is compatibility stuff for IO resources.
254 *
255 * Note how this, unlike the above, knows about
256 * the IO flag meanings (busy etc).
257 *
258 * Request-region creates a new busy region.
259 *
260 * Check-region returns non-zero if the area is already busy
261 *
262 * Release-region releases a matching busy region.
263 */
__request_region(struct resource * parent,unsigned long start,unsigned long n,const char * name)264 struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name)
265 {
266 struct resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
267
268 if (res) {
269 memset(res, 0, sizeof(*res));
270 res->name = name;
271 res->start = start;
272 res->end = start + n - 1;
273 res->flags = IORESOURCE_BUSY;
274
275 write_lock(&resource_lock);
276
277 for (;;) {
278 struct resource *conflict;
279
280 conflict = __request_resource(parent, res);
281 if (!conflict)
282 break;
283 if (conflict != parent) {
284 parent = conflict;
285 if (!(conflict->flags & IORESOURCE_BUSY))
286 continue;
287 }
288
289 /* Uhhuh, that didn't work out.. */
290 kfree(res);
291 res = NULL;
292 break;
293 }
294 write_unlock(&resource_lock);
295 }
296 return res;
297 }
298
__check_region(struct resource * parent,unsigned long start,unsigned long n)299 int __check_region(struct resource *parent, unsigned long start, unsigned long n)
300 {
301 struct resource * res;
302
303 res = __request_region(parent, start, n, "check-region");
304 if (!res)
305 return -EBUSY;
306
307 release_resource(res);
308 kfree(res);
309 return 0;
310 }
311
__release_region(struct resource * parent,unsigned long start,unsigned long n)312 void __release_region(struct resource *parent, unsigned long start, unsigned long n)
313 {
314 struct resource **p;
315 unsigned long end;
316
317 p = &parent->child;
318 end = start + n - 1;
319
320 for (;;) {
321 struct resource *res = *p;
322
323 if (!res)
324 break;
325 if (res->start <= start && res->end >= end) {
326 if (!(res->flags & IORESOURCE_BUSY)) {
327 p = &res->child;
328 continue;
329 }
330 if (res->start != start || res->end != end)
331 break;
332 *p = res->sibling;
333 kfree(res);
334 return;
335 }
336 p = &res->sibling;
337 }
338 printk("Trying to free nonexistent resource <%08lx-%08lx>\n", start, end);
339 }
340
341 /*
342 * Called from init/main.c to reserve IO ports.
343 */
344 #define MAXRESERVE 4
reserve_setup(char * str)345 static int __init reserve_setup(char *str)
346 {
347 static int reserved = 0;
348 static struct resource reserve[MAXRESERVE];
349
350 for (;;) {
351 unsigned int io_start, io_num;
352 int x = reserved;
353
354 if (get_option (&str, &io_start) != 2)
355 break;
356 if (get_option (&str, &io_num) == 0)
357 break;
358 if (x < MAXRESERVE) {
359 struct resource *res = reserve + x;
360 res->name = "reserved";
361 res->start = io_start;
362 res->end = io_start + io_num - 1;
363 res->flags = IORESOURCE_BUSY;
364 res->child = NULL;
365 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
366 reserved = x+1;
367 }
368 }
369 return 1;
370 }
371
372 __setup("reserve=", reserve_setup);
373