1 /*
2 * linux/kernel/resource.c
3 *
4 * Copyright (C) 1999 Linus Torvalds
5 * Copyright (C) 1999 Martin Mares <mj@ucw.cz>
6 *
7 * Arbitrary resource management.
8 */
9
10 #include <linux/export.h>
11 #include <linux/errno.h>
12 #include <linux/ioport.h>
13 #include <linux/init.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/fs.h>
17 #include <linux/proc_fs.h>
18 #include <linux/sched.h>
19 #include <linux/seq_file.h>
20 #include <linux/device.h>
21 #include <linux/pfn.h>
22 #include <asm/io.h>
23
24
25 struct resource ioport_resource = {
26 .name = "PCI IO",
27 .start = 0,
28 .end = IO_SPACE_LIMIT,
29 .flags = IORESOURCE_IO,
30 };
31 EXPORT_SYMBOL(ioport_resource);
32
33 struct resource iomem_resource = {
34 .name = "PCI mem",
35 .start = 0,
36 .end = -1,
37 .flags = IORESOURCE_MEM,
38 };
39 EXPORT_SYMBOL(iomem_resource);
40
41 /* constraints to be met while allocating resources */
42 struct resource_constraint {
43 resource_size_t min, max, align;
44 resource_size_t (*alignf)(void *, const struct resource *,
45 resource_size_t, resource_size_t);
46 void *alignf_data;
47 };
48
49 static DEFINE_RWLOCK(resource_lock);
50
r_next(struct seq_file * m,void * v,loff_t * pos)51 static void *r_next(struct seq_file *m, void *v, loff_t *pos)
52 {
53 struct resource *p = v;
54 (*pos)++;
55 if (p->child)
56 return p->child;
57 while (!p->sibling && p->parent)
58 p = p->parent;
59 return p->sibling;
60 }
61
62 #ifdef CONFIG_PROC_FS
63
64 enum { MAX_IORES_LEVEL = 5 };
65
r_start(struct seq_file * m,loff_t * pos)66 static void *r_start(struct seq_file *m, loff_t *pos)
67 __acquires(resource_lock)
68 {
69 struct resource *p = m->private;
70 loff_t l = 0;
71 read_lock(&resource_lock);
72 for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
73 ;
74 return p;
75 }
76
r_stop(struct seq_file * m,void * v)77 static void r_stop(struct seq_file *m, void *v)
78 __releases(resource_lock)
79 {
80 read_unlock(&resource_lock);
81 }
82
r_show(struct seq_file * m,void * v)83 static int r_show(struct seq_file *m, void *v)
84 {
85 struct resource *root = m->private;
86 struct resource *r = v, *p;
87 int width = root->end < 0x10000 ? 4 : 8;
88 int depth;
89
90 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
91 if (p->parent == root)
92 break;
93 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
94 depth * 2, "",
95 width, (unsigned long long) r->start,
96 width, (unsigned long long) r->end,
97 r->name ? r->name : "<BAD>");
98 return 0;
99 }
100
101 static const struct seq_operations resource_op = {
102 .start = r_start,
103 .next = r_next,
104 .stop = r_stop,
105 .show = r_show,
106 };
107
ioports_open(struct inode * inode,struct file * file)108 static int ioports_open(struct inode *inode, struct file *file)
109 {
110 int res = seq_open(file, &resource_op);
111 if (!res) {
112 struct seq_file *m = file->private_data;
113 m->private = &ioport_resource;
114 }
115 return res;
116 }
117
iomem_open(struct inode * inode,struct file * file)118 static int iomem_open(struct inode *inode, struct file *file)
119 {
120 int res = seq_open(file, &resource_op);
121 if (!res) {
122 struct seq_file *m = file->private_data;
123 m->private = &iomem_resource;
124 }
125 return res;
126 }
127
128 static const struct file_operations proc_ioports_operations = {
129 .open = ioports_open,
130 .read = seq_read,
131 .llseek = seq_lseek,
132 .release = seq_release,
133 };
134
135 static const struct file_operations proc_iomem_operations = {
136 .open = iomem_open,
137 .read = seq_read,
138 .llseek = seq_lseek,
139 .release = seq_release,
140 };
141
ioresources_init(void)142 static int __init ioresources_init(void)
143 {
144 proc_create("ioports", 0, NULL, &proc_ioports_operations);
145 proc_create("iomem", 0, NULL, &proc_iomem_operations);
146 return 0;
147 }
148 __initcall(ioresources_init);
149
150 #endif /* CONFIG_PROC_FS */
151
152 /* Return the conflict entry if you can't request it */
__request_resource(struct resource * root,struct resource * new)153 static struct resource * __request_resource(struct resource *root, struct resource *new)
154 {
155 resource_size_t start = new->start;
156 resource_size_t end = new->end;
157 struct resource *tmp, **p;
158
159 if (end < start)
160 return root;
161 if (start < root->start)
162 return root;
163 if (end > root->end)
164 return root;
165 p = &root->child;
166 for (;;) {
167 tmp = *p;
168 if (!tmp || tmp->start > end) {
169 new->sibling = tmp;
170 *p = new;
171 new->parent = root;
172 return NULL;
173 }
174 p = &tmp->sibling;
175 if (tmp->end < start)
176 continue;
177 return tmp;
178 }
179 }
180
__release_resource(struct resource * old)181 static int __release_resource(struct resource *old)
182 {
183 struct resource *tmp, **p;
184
185 p = &old->parent->child;
186 for (;;) {
187 tmp = *p;
188 if (!tmp)
189 break;
190 if (tmp == old) {
191 *p = tmp->sibling;
192 old->parent = NULL;
193 return 0;
194 }
195 p = &tmp->sibling;
196 }
197 return -EINVAL;
198 }
199
__release_child_resources(struct resource * r)200 static void __release_child_resources(struct resource *r)
201 {
202 struct resource *tmp, *p;
203 resource_size_t size;
204
205 p = r->child;
206 r->child = NULL;
207 while (p) {
208 tmp = p;
209 p = p->sibling;
210
211 tmp->parent = NULL;
212 tmp->sibling = NULL;
213 __release_child_resources(tmp);
214
215 printk(KERN_DEBUG "release child resource %pR\n", tmp);
216 /* need to restore size, and keep flags */
217 size = resource_size(tmp);
218 tmp->start = 0;
219 tmp->end = size - 1;
220 }
221 }
222
release_child_resources(struct resource * r)223 void release_child_resources(struct resource *r)
224 {
225 write_lock(&resource_lock);
226 __release_child_resources(r);
227 write_unlock(&resource_lock);
228 }
229
230 /**
231 * request_resource_conflict - request and reserve an I/O or memory resource
232 * @root: root resource descriptor
233 * @new: resource descriptor desired by caller
234 *
235 * Returns 0 for success, conflict resource on error.
236 */
request_resource_conflict(struct resource * root,struct resource * new)237 struct resource *request_resource_conflict(struct resource *root, struct resource *new)
238 {
239 struct resource *conflict;
240
241 write_lock(&resource_lock);
242 conflict = __request_resource(root, new);
243 write_unlock(&resource_lock);
244 return conflict;
245 }
246
247 /**
248 * request_resource - request and reserve an I/O or memory resource
249 * @root: root resource descriptor
250 * @new: resource descriptor desired by caller
251 *
252 * Returns 0 for success, negative error code on error.
253 */
request_resource(struct resource * root,struct resource * new)254 int request_resource(struct resource *root, struct resource *new)
255 {
256 struct resource *conflict;
257
258 conflict = request_resource_conflict(root, new);
259 return conflict ? -EBUSY : 0;
260 }
261
262 EXPORT_SYMBOL(request_resource);
263
264 /**
265 * release_resource - release a previously reserved resource
266 * @old: resource pointer
267 */
release_resource(struct resource * old)268 int release_resource(struct resource *old)
269 {
270 int retval;
271
272 write_lock(&resource_lock);
273 retval = __release_resource(old);
274 write_unlock(&resource_lock);
275 return retval;
276 }
277
278 EXPORT_SYMBOL(release_resource);
279
280 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
281 /*
282 * Finds the lowest memory reosurce exists within [res->start.res->end)
283 * the caller must specify res->start, res->end, res->flags and "name".
284 * If found, returns 0, res is overwritten, if not found, returns -1.
285 */
find_next_system_ram(struct resource * res,char * name)286 static int find_next_system_ram(struct resource *res, char *name)
287 {
288 resource_size_t start, end;
289 struct resource *p;
290
291 BUG_ON(!res);
292
293 start = res->start;
294 end = res->end;
295 BUG_ON(start >= end);
296
297 read_lock(&resource_lock);
298 for (p = iomem_resource.child; p ; p = p->sibling) {
299 /* system ram is just marked as IORESOURCE_MEM */
300 if (p->flags != res->flags)
301 continue;
302 if (name && strcmp(p->name, name))
303 continue;
304 if (p->start > end) {
305 p = NULL;
306 break;
307 }
308 if ((p->end >= start) && (p->start < end))
309 break;
310 }
311 read_unlock(&resource_lock);
312 if (!p)
313 return -1;
314 /* copy data */
315 if (res->start < p->start)
316 res->start = p->start;
317 if (res->end > p->end)
318 res->end = p->end;
319 return 0;
320 }
321
322 /*
323 * This function calls callback against all memory range of "System RAM"
324 * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
325 * Now, this function is only for "System RAM".
326 */
walk_system_ram_range(unsigned long start_pfn,unsigned long nr_pages,void * arg,int (* func)(unsigned long,unsigned long,void *))327 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
328 void *arg, int (*func)(unsigned long, unsigned long, void *))
329 {
330 struct resource res;
331 unsigned long pfn, end_pfn;
332 u64 orig_end;
333 int ret = -1;
334
335 res.start = (u64) start_pfn << PAGE_SHIFT;
336 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
337 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
338 orig_end = res.end;
339 while ((res.start < res.end) &&
340 (find_next_system_ram(&res, "System RAM") >= 0)) {
341 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
342 end_pfn = (res.end + 1) >> PAGE_SHIFT;
343 if (end_pfn > pfn)
344 ret = (*func)(pfn, end_pfn - pfn, arg);
345 if (ret)
346 break;
347 res.start = res.end + 1;
348 res.end = orig_end;
349 }
350 return ret;
351 }
352
353 #endif
354
__is_ram(unsigned long pfn,unsigned long nr_pages,void * arg)355 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
356 {
357 return 1;
358 }
359 /*
360 * This generic page_is_ram() returns true if specified address is
361 * registered as "System RAM" in iomem_resource list.
362 */
page_is_ram(unsigned long pfn)363 int __weak page_is_ram(unsigned long pfn)
364 {
365 return walk_system_ram_range(pfn, 1, NULL, __is_ram) == 1;
366 }
367
arch_remove_reservations(struct resource * avail)368 void __weak arch_remove_reservations(struct resource *avail)
369 {
370 }
371
simple_align_resource(void * data,const struct resource * avail,resource_size_t size,resource_size_t align)372 static resource_size_t simple_align_resource(void *data,
373 const struct resource *avail,
374 resource_size_t size,
375 resource_size_t align)
376 {
377 return avail->start;
378 }
379
resource_clip(struct resource * res,resource_size_t min,resource_size_t max)380 static void resource_clip(struct resource *res, resource_size_t min,
381 resource_size_t max)
382 {
383 if (res->start < min)
384 res->start = min;
385 if (res->end > max)
386 res->end = max;
387 }
388
resource_contains(struct resource * res1,struct resource * res2)389 static bool resource_contains(struct resource *res1, struct resource *res2)
390 {
391 return res1->start <= res2->start && res1->end >= res2->end;
392 }
393
394 /*
395 * Find empty slot in the resource tree with the given range and
396 * alignment constraints
397 */
__find_resource(struct resource * root,struct resource * old,struct resource * new,resource_size_t size,struct resource_constraint * constraint)398 static int __find_resource(struct resource *root, struct resource *old,
399 struct resource *new,
400 resource_size_t size,
401 struct resource_constraint *constraint)
402 {
403 struct resource *this = root->child;
404 struct resource tmp = *new, avail, alloc;
405
406 tmp.flags = new->flags;
407 tmp.start = root->start;
408 /*
409 * Skip past an allocated resource that starts at 0, since the assignment
410 * of this->start - 1 to tmp->end below would cause an underflow.
411 */
412 if (this && this->start == root->start) {
413 tmp.start = (this == old) ? old->start : this->end + 1;
414 this = this->sibling;
415 }
416 for(;;) {
417 if (this)
418 tmp.end = (this == old) ? this->end : this->start - 1;
419 else
420 tmp.end = root->end;
421
422 if (tmp.end < tmp.start)
423 goto next;
424
425 resource_clip(&tmp, constraint->min, constraint->max);
426 arch_remove_reservations(&tmp);
427
428 /* Check for overflow after ALIGN() */
429 avail = *new;
430 avail.start = ALIGN(tmp.start, constraint->align);
431 avail.end = tmp.end;
432 if (avail.start >= tmp.start) {
433 alloc.start = constraint->alignf(constraint->alignf_data, &avail,
434 size, constraint->align);
435 alloc.end = alloc.start + size - 1;
436 if (resource_contains(&avail, &alloc)) {
437 new->start = alloc.start;
438 new->end = alloc.end;
439 return 0;
440 }
441 }
442
443 next: if (!this || this->end == root->end)
444 break;
445
446 if (this != old)
447 tmp.start = this->end + 1;
448 this = this->sibling;
449 }
450 return -EBUSY;
451 }
452
453 /*
454 * Find empty slot in the resource tree given range and alignment.
455 */
find_resource(struct resource * root,struct resource * new,resource_size_t size,struct resource_constraint * constraint)456 static int find_resource(struct resource *root, struct resource *new,
457 resource_size_t size,
458 struct resource_constraint *constraint)
459 {
460 return __find_resource(root, NULL, new, size, constraint);
461 }
462
463 /**
464 * reallocate_resource - allocate a slot in the resource tree given range & alignment.
465 * The resource will be relocated if the new size cannot be reallocated in the
466 * current location.
467 *
468 * @root: root resource descriptor
469 * @old: resource descriptor desired by caller
470 * @newsize: new size of the resource descriptor
471 * @constraint: the size and alignment constraints to be met.
472 */
reallocate_resource(struct resource * root,struct resource * old,resource_size_t newsize,struct resource_constraint * constraint)473 int reallocate_resource(struct resource *root, struct resource *old,
474 resource_size_t newsize,
475 struct resource_constraint *constraint)
476 {
477 int err=0;
478 struct resource new = *old;
479 struct resource *conflict;
480
481 write_lock(&resource_lock);
482
483 if ((err = __find_resource(root, old, &new, newsize, constraint)))
484 goto out;
485
486 if (resource_contains(&new, old)) {
487 old->start = new.start;
488 old->end = new.end;
489 goto out;
490 }
491
492 if (old->child) {
493 err = -EBUSY;
494 goto out;
495 }
496
497 if (resource_contains(old, &new)) {
498 old->start = new.start;
499 old->end = new.end;
500 } else {
501 __release_resource(old);
502 *old = new;
503 conflict = __request_resource(root, old);
504 BUG_ON(conflict);
505 }
506 out:
507 write_unlock(&resource_lock);
508 return err;
509 }
510
511
512 /**
513 * allocate_resource - allocate empty slot in the resource tree given range & alignment.
514 * The resource will be reallocated with a new size if it was already allocated
515 * @root: root resource descriptor
516 * @new: resource descriptor desired by caller
517 * @size: requested resource region size
518 * @min: minimum size to allocate
519 * @max: maximum size to allocate
520 * @align: alignment requested, in bytes
521 * @alignf: alignment function, optional, called if not NULL
522 * @alignf_data: arbitrary data to pass to the @alignf function
523 */
allocate_resource(struct resource * root,struct resource * new,resource_size_t size,resource_size_t min,resource_size_t max,resource_size_t align,resource_size_t (* alignf)(void *,const struct resource *,resource_size_t,resource_size_t),void * alignf_data)524 int allocate_resource(struct resource *root, struct resource *new,
525 resource_size_t size, resource_size_t min,
526 resource_size_t max, resource_size_t align,
527 resource_size_t (*alignf)(void *,
528 const struct resource *,
529 resource_size_t,
530 resource_size_t),
531 void *alignf_data)
532 {
533 int err;
534 struct resource_constraint constraint;
535
536 if (!alignf)
537 alignf = simple_align_resource;
538
539 constraint.min = min;
540 constraint.max = max;
541 constraint.align = align;
542 constraint.alignf = alignf;
543 constraint.alignf_data = alignf_data;
544
545 if ( new->parent ) {
546 /* resource is already allocated, try reallocating with
547 the new constraints */
548 return reallocate_resource(root, new, size, &constraint);
549 }
550
551 write_lock(&resource_lock);
552 err = find_resource(root, new, size, &constraint);
553 if (err >= 0 && __request_resource(root, new))
554 err = -EBUSY;
555 write_unlock(&resource_lock);
556 return err;
557 }
558
559 EXPORT_SYMBOL(allocate_resource);
560
561 /**
562 * lookup_resource - find an existing resource by a resource start address
563 * @root: root resource descriptor
564 * @start: resource start address
565 *
566 * Returns a pointer to the resource if found, NULL otherwise
567 */
lookup_resource(struct resource * root,resource_size_t start)568 struct resource *lookup_resource(struct resource *root, resource_size_t start)
569 {
570 struct resource *res;
571
572 read_lock(&resource_lock);
573 for (res = root->child; res; res = res->sibling) {
574 if (res->start == start)
575 break;
576 }
577 read_unlock(&resource_lock);
578
579 return res;
580 }
581
582 /*
583 * Insert a resource into the resource tree. If successful, return NULL,
584 * otherwise return the conflicting resource (compare to __request_resource())
585 */
__insert_resource(struct resource * parent,struct resource * new)586 static struct resource * __insert_resource(struct resource *parent, struct resource *new)
587 {
588 struct resource *first, *next;
589
590 for (;; parent = first) {
591 first = __request_resource(parent, new);
592 if (!first)
593 return first;
594
595 if (first == parent)
596 return first;
597 if (WARN_ON(first == new)) /* duplicated insertion */
598 return first;
599
600 if ((first->start > new->start) || (first->end < new->end))
601 break;
602 if ((first->start == new->start) && (first->end == new->end))
603 break;
604 }
605
606 for (next = first; ; next = next->sibling) {
607 /* Partial overlap? Bad, and unfixable */
608 if (next->start < new->start || next->end > new->end)
609 return next;
610 if (!next->sibling)
611 break;
612 if (next->sibling->start > new->end)
613 break;
614 }
615
616 new->parent = parent;
617 new->sibling = next->sibling;
618 new->child = first;
619
620 next->sibling = NULL;
621 for (next = first; next; next = next->sibling)
622 next->parent = new;
623
624 if (parent->child == first) {
625 parent->child = new;
626 } else {
627 next = parent->child;
628 while (next->sibling != first)
629 next = next->sibling;
630 next->sibling = new;
631 }
632 return NULL;
633 }
634
635 /**
636 * insert_resource_conflict - Inserts resource in the resource tree
637 * @parent: parent of the new resource
638 * @new: new resource to insert
639 *
640 * Returns 0 on success, conflict resource if the resource can't be inserted.
641 *
642 * This function is equivalent to request_resource_conflict when no conflict
643 * happens. If a conflict happens, and the conflicting resources
644 * entirely fit within the range of the new resource, then the new
645 * resource is inserted and the conflicting resources become children of
646 * the new resource.
647 */
insert_resource_conflict(struct resource * parent,struct resource * new)648 struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
649 {
650 struct resource *conflict;
651
652 write_lock(&resource_lock);
653 conflict = __insert_resource(parent, new);
654 write_unlock(&resource_lock);
655 return conflict;
656 }
657
658 /**
659 * insert_resource - Inserts a resource in the resource tree
660 * @parent: parent of the new resource
661 * @new: new resource to insert
662 *
663 * Returns 0 on success, -EBUSY if the resource can't be inserted.
664 */
insert_resource(struct resource * parent,struct resource * new)665 int insert_resource(struct resource *parent, struct resource *new)
666 {
667 struct resource *conflict;
668
669 conflict = insert_resource_conflict(parent, new);
670 return conflict ? -EBUSY : 0;
671 }
672
673 /**
674 * insert_resource_expand_to_fit - Insert a resource into the resource tree
675 * @root: root resource descriptor
676 * @new: new resource to insert
677 *
678 * Insert a resource into the resource tree, possibly expanding it in order
679 * to make it encompass any conflicting resources.
680 */
insert_resource_expand_to_fit(struct resource * root,struct resource * new)681 void insert_resource_expand_to_fit(struct resource *root, struct resource *new)
682 {
683 if (new->parent)
684 return;
685
686 write_lock(&resource_lock);
687 for (;;) {
688 struct resource *conflict;
689
690 conflict = __insert_resource(root, new);
691 if (!conflict)
692 break;
693 if (conflict == root)
694 break;
695
696 /* Ok, expand resource to cover the conflict, then try again .. */
697 if (conflict->start < new->start)
698 new->start = conflict->start;
699 if (conflict->end > new->end)
700 new->end = conflict->end;
701
702 printk("Expanded resource %s due to conflict with %s\n", new->name, conflict->name);
703 }
704 write_unlock(&resource_lock);
705 }
706
707 /**
708 * adjust_resource - modify a resource's start and size
709 * @res: resource to modify
710 * @start: new start value
711 * @size: new size
712 *
713 * Given an existing resource, change its start and size to match the
714 * arguments. Returns 0 on success, -EBUSY if it can't fit.
715 * Existing children of the resource are assumed to be immutable.
716 */
adjust_resource(struct resource * res,resource_size_t start,resource_size_t size)717 int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size)
718 {
719 struct resource *tmp, *parent = res->parent;
720 resource_size_t end = start + size - 1;
721 int result = -EBUSY;
722
723 write_lock(&resource_lock);
724
725 if ((start < parent->start) || (end > parent->end))
726 goto out;
727
728 for (tmp = res->child; tmp; tmp = tmp->sibling) {
729 if ((tmp->start < start) || (tmp->end > end))
730 goto out;
731 }
732
733 if (res->sibling && (res->sibling->start <= end))
734 goto out;
735
736 tmp = parent->child;
737 if (tmp != res) {
738 while (tmp->sibling != res)
739 tmp = tmp->sibling;
740 if (start <= tmp->end)
741 goto out;
742 }
743
744 res->start = start;
745 res->end = end;
746 result = 0;
747
748 out:
749 write_unlock(&resource_lock);
750 return result;
751 }
752 EXPORT_SYMBOL(adjust_resource);
753
__reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)754 static void __init __reserve_region_with_split(struct resource *root,
755 resource_size_t start, resource_size_t end,
756 const char *name)
757 {
758 struct resource *parent = root;
759 struct resource *conflict;
760 struct resource *res = kzalloc(sizeof(*res), GFP_ATOMIC);
761 struct resource *next_res = NULL;
762
763 if (!res)
764 return;
765
766 res->name = name;
767 res->start = start;
768 res->end = end;
769 res->flags = IORESOURCE_BUSY;
770
771 while (1) {
772
773 conflict = __request_resource(parent, res);
774 if (!conflict) {
775 if (!next_res)
776 break;
777 res = next_res;
778 next_res = NULL;
779 continue;
780 }
781
782 /* conflict covered whole area */
783 if (conflict->start <= res->start &&
784 conflict->end >= res->end) {
785 kfree(res);
786 WARN_ON(next_res);
787 break;
788 }
789
790 /* failed, split and try again */
791 if (conflict->start > res->start) {
792 end = res->end;
793 res->end = conflict->start - 1;
794 if (conflict->end < end) {
795 next_res = kzalloc(sizeof(*next_res),
796 GFP_ATOMIC);
797 if (!next_res) {
798 kfree(res);
799 break;
800 }
801 next_res->name = name;
802 next_res->start = conflict->end + 1;
803 next_res->end = end;
804 next_res->flags = IORESOURCE_BUSY;
805 }
806 } else {
807 res->start = conflict->end + 1;
808 }
809 }
810
811 }
812
reserve_region_with_split(struct resource * root,resource_size_t start,resource_size_t end,const char * name)813 void __init reserve_region_with_split(struct resource *root,
814 resource_size_t start, resource_size_t end,
815 const char *name)
816 {
817 write_lock(&resource_lock);
818 __reserve_region_with_split(root, start, end, name);
819 write_unlock(&resource_lock);
820 }
821
822 /**
823 * resource_alignment - calculate resource's alignment
824 * @res: resource pointer
825 *
826 * Returns alignment on success, 0 (invalid alignment) on failure.
827 */
resource_alignment(struct resource * res)828 resource_size_t resource_alignment(struct resource *res)
829 {
830 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) {
831 case IORESOURCE_SIZEALIGN:
832 return resource_size(res);
833 case IORESOURCE_STARTALIGN:
834 return res->start;
835 default:
836 return 0;
837 }
838 }
839
840 /*
841 * This is compatibility stuff for IO resources.
842 *
843 * Note how this, unlike the above, knows about
844 * the IO flag meanings (busy etc).
845 *
846 * request_region creates a new busy region.
847 *
848 * check_region returns non-zero if the area is already busy.
849 *
850 * release_region releases a matching busy region.
851 */
852
853 static DECLARE_WAIT_QUEUE_HEAD(muxed_resource_wait);
854
855 /**
856 * __request_region - create a new busy resource region
857 * @parent: parent resource descriptor
858 * @start: resource start address
859 * @n: resource region size
860 * @name: reserving caller's ID string
861 * @flags: IO resource flags
862 */
__request_region(struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)863 struct resource * __request_region(struct resource *parent,
864 resource_size_t start, resource_size_t n,
865 const char *name, int flags)
866 {
867 DECLARE_WAITQUEUE(wait, current);
868 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
869
870 if (!res)
871 return NULL;
872
873 res->name = name;
874 res->start = start;
875 res->end = start + n - 1;
876 res->flags = IORESOURCE_BUSY;
877 res->flags |= flags;
878
879 write_lock(&resource_lock);
880
881 for (;;) {
882 struct resource *conflict;
883
884 conflict = __request_resource(parent, res);
885 if (!conflict)
886 break;
887 if (conflict != parent) {
888 parent = conflict;
889 if (!(conflict->flags & IORESOURCE_BUSY))
890 continue;
891 }
892 if (conflict->flags & flags & IORESOURCE_MUXED) {
893 add_wait_queue(&muxed_resource_wait, &wait);
894 write_unlock(&resource_lock);
895 set_current_state(TASK_UNINTERRUPTIBLE);
896 schedule();
897 remove_wait_queue(&muxed_resource_wait, &wait);
898 write_lock(&resource_lock);
899 continue;
900 }
901 /* Uhhuh, that didn't work out.. */
902 kfree(res);
903 res = NULL;
904 break;
905 }
906 write_unlock(&resource_lock);
907 return res;
908 }
909 EXPORT_SYMBOL(__request_region);
910
911 /**
912 * __check_region - check if a resource region is busy or free
913 * @parent: parent resource descriptor
914 * @start: resource start address
915 * @n: resource region size
916 *
917 * Returns 0 if the region is free at the moment it is checked,
918 * returns %-EBUSY if the region is busy.
919 *
920 * NOTE:
921 * This function is deprecated because its use is racy.
922 * Even if it returns 0, a subsequent call to request_region()
923 * may fail because another driver etc. just allocated the region.
924 * Do NOT use it. It will be removed from the kernel.
925 */
__check_region(struct resource * parent,resource_size_t start,resource_size_t n)926 int __check_region(struct resource *parent, resource_size_t start,
927 resource_size_t n)
928 {
929 struct resource * res;
930
931 res = __request_region(parent, start, n, "check-region", 0);
932 if (!res)
933 return -EBUSY;
934
935 release_resource(res);
936 kfree(res);
937 return 0;
938 }
939 EXPORT_SYMBOL(__check_region);
940
941 /**
942 * __release_region - release a previously reserved resource region
943 * @parent: parent resource descriptor
944 * @start: resource start address
945 * @n: resource region size
946 *
947 * The described resource region must match a currently busy region.
948 */
__release_region(struct resource * parent,resource_size_t start,resource_size_t n)949 void __release_region(struct resource *parent, resource_size_t start,
950 resource_size_t n)
951 {
952 struct resource **p;
953 resource_size_t end;
954
955 p = &parent->child;
956 end = start + n - 1;
957
958 write_lock(&resource_lock);
959
960 for (;;) {
961 struct resource *res = *p;
962
963 if (!res)
964 break;
965 if (res->start <= start && res->end >= end) {
966 if (!(res->flags & IORESOURCE_BUSY)) {
967 p = &res->child;
968 continue;
969 }
970 if (res->start != start || res->end != end)
971 break;
972 *p = res->sibling;
973 write_unlock(&resource_lock);
974 if (res->flags & IORESOURCE_MUXED)
975 wake_up(&muxed_resource_wait);
976 kfree(res);
977 return;
978 }
979 p = &res->sibling;
980 }
981
982 write_unlock(&resource_lock);
983
984 printk(KERN_WARNING "Trying to free nonexistent resource "
985 "<%016llx-%016llx>\n", (unsigned long long)start,
986 (unsigned long long)end);
987 }
988 EXPORT_SYMBOL(__release_region);
989
990 /*
991 * Managed region resource
992 */
993 struct region_devres {
994 struct resource *parent;
995 resource_size_t start;
996 resource_size_t n;
997 };
998
devm_region_release(struct device * dev,void * res)999 static void devm_region_release(struct device *dev, void *res)
1000 {
1001 struct region_devres *this = res;
1002
1003 __release_region(this->parent, this->start, this->n);
1004 }
1005
devm_region_match(struct device * dev,void * res,void * match_data)1006 static int devm_region_match(struct device *dev, void *res, void *match_data)
1007 {
1008 struct region_devres *this = res, *match = match_data;
1009
1010 return this->parent == match->parent &&
1011 this->start == match->start && this->n == match->n;
1012 }
1013
__devm_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name)1014 struct resource * __devm_request_region(struct device *dev,
1015 struct resource *parent, resource_size_t start,
1016 resource_size_t n, const char *name)
1017 {
1018 struct region_devres *dr = NULL;
1019 struct resource *res;
1020
1021 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
1022 GFP_KERNEL);
1023 if (!dr)
1024 return NULL;
1025
1026 dr->parent = parent;
1027 dr->start = start;
1028 dr->n = n;
1029
1030 res = __request_region(parent, start, n, name, 0);
1031 if (res)
1032 devres_add(dev, dr);
1033 else
1034 devres_free(dr);
1035
1036 return res;
1037 }
1038 EXPORT_SYMBOL(__devm_request_region);
1039
__devm_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n)1040 void __devm_release_region(struct device *dev, struct resource *parent,
1041 resource_size_t start, resource_size_t n)
1042 {
1043 struct region_devres match_data = { parent, start, n };
1044
1045 __release_region(parent, start, n);
1046 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
1047 &match_data));
1048 }
1049 EXPORT_SYMBOL(__devm_release_region);
1050
1051 /*
1052 * Called from init/main.c to reserve IO ports.
1053 */
1054 #define MAXRESERVE 4
reserve_setup(char * str)1055 static int __init reserve_setup(char *str)
1056 {
1057 static int reserved;
1058 static struct resource reserve[MAXRESERVE];
1059
1060 for (;;) {
1061 unsigned int io_start, io_num;
1062 int x = reserved;
1063
1064 if (get_option (&str, &io_start) != 2)
1065 break;
1066 if (get_option (&str, &io_num) == 0)
1067 break;
1068 if (x < MAXRESERVE) {
1069 struct resource *res = reserve + x;
1070 res->name = "reserved";
1071 res->start = io_start;
1072 res->end = io_start + io_num - 1;
1073 res->flags = IORESOURCE_BUSY;
1074 res->child = NULL;
1075 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
1076 reserved = x+1;
1077 }
1078 }
1079 return 1;
1080 }
1081
1082 __setup("reserve=", reserve_setup);
1083
1084 /*
1085 * Check if the requested addr and size spans more than any slot in the
1086 * iomem resource tree.
1087 */
iomem_map_sanity_check(resource_size_t addr,unsigned long size)1088 int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
1089 {
1090 struct resource *p = &iomem_resource;
1091 int err = 0;
1092 loff_t l;
1093
1094 read_lock(&resource_lock);
1095 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1096 /*
1097 * We can probably skip the resources without
1098 * IORESOURCE_IO attribute?
1099 */
1100 if (p->start >= addr + size)
1101 continue;
1102 if (p->end < addr)
1103 continue;
1104 if (PFN_DOWN(p->start) <= PFN_DOWN(addr) &&
1105 PFN_DOWN(p->end) >= PFN_DOWN(addr + size - 1))
1106 continue;
1107 /*
1108 * if a resource is "BUSY", it's not a hardware resource
1109 * but a driver mapping of such a resource; we don't want
1110 * to warn for those; some drivers legitimately map only
1111 * partial hardware resources. (example: vesafb)
1112 */
1113 if (p->flags & IORESOURCE_BUSY)
1114 continue;
1115
1116 printk(KERN_WARNING "resource map sanity check conflict: "
1117 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
1118 (unsigned long long)addr,
1119 (unsigned long long)(addr + size - 1),
1120 (unsigned long long)p->start,
1121 (unsigned long long)p->end,
1122 p->name);
1123 err = -1;
1124 break;
1125 }
1126 read_unlock(&resource_lock);
1127
1128 return err;
1129 }
1130
1131 #ifdef CONFIG_STRICT_DEVMEM
1132 static int strict_iomem_checks = 1;
1133 #else
1134 static int strict_iomem_checks;
1135 #endif
1136
1137 /*
1138 * check if an address is reserved in the iomem resource tree
1139 * returns 1 if reserved, 0 if not reserved.
1140 */
iomem_is_exclusive(u64 addr)1141 int iomem_is_exclusive(u64 addr)
1142 {
1143 struct resource *p = &iomem_resource;
1144 int err = 0;
1145 loff_t l;
1146 int size = PAGE_SIZE;
1147
1148 if (!strict_iomem_checks)
1149 return 0;
1150
1151 addr = addr & PAGE_MASK;
1152
1153 read_lock(&resource_lock);
1154 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
1155 /*
1156 * We can probably skip the resources without
1157 * IORESOURCE_IO attribute?
1158 */
1159 if (p->start >= addr + size)
1160 break;
1161 if (p->end < addr)
1162 continue;
1163 if (p->flags & IORESOURCE_BUSY &&
1164 p->flags & IORESOURCE_EXCLUSIVE) {
1165 err = 1;
1166 break;
1167 }
1168 }
1169 read_unlock(&resource_lock);
1170
1171 return err;
1172 }
1173
strict_iomem(char * str)1174 static int __init strict_iomem(char *str)
1175 {
1176 if (strstr(str, "relaxed"))
1177 strict_iomem_checks = 0;
1178 if (strstr(str, "strict"))
1179 strict_iomem_checks = 1;
1180 return 1;
1181 }
1182
1183 __setup("iomem=", strict_iomem);
1184