1 /*
2  * Copyright 2010 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 
25 #include "drmP.h"
26 #include "nouveau_drv.h"
27 #include "nouveau_mm.h"
28 
29 static inline void
region_put(struct nouveau_mm * mm,struct nouveau_mm_node * a)30 region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
31 {
32 	list_del(&a->nl_entry);
33 	list_del(&a->fl_entry);
34 	kfree(a);
35 }
36 
37 static struct nouveau_mm_node *
region_split(struct nouveau_mm * mm,struct nouveau_mm_node * a,u32 size)38 region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
39 {
40 	struct nouveau_mm_node *b;
41 
42 	if (a->length == size)
43 		return a;
44 
45 	b = kmalloc(sizeof(*b), GFP_KERNEL);
46 	if (unlikely(b == NULL))
47 		return NULL;
48 
49 	b->offset = a->offset;
50 	b->length = size;
51 	b->type   = a->type;
52 	a->offset += size;
53 	a->length -= size;
54 	list_add_tail(&b->nl_entry, &a->nl_entry);
55 	if (b->type == 0)
56 		list_add_tail(&b->fl_entry, &a->fl_entry);
57 	return b;
58 }
59 
60 #define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
61 	list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
62 
63 void
nouveau_mm_put(struct nouveau_mm * mm,struct nouveau_mm_node * this)64 nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
65 {
66 	struct nouveau_mm_node *prev = node(this, prev);
67 	struct nouveau_mm_node *next = node(this, next);
68 
69 	list_add(&this->fl_entry, &mm->free);
70 	this->type = 0;
71 
72 	if (prev && prev->type == 0) {
73 		prev->length += this->length;
74 		region_put(mm, this);
75 		this = prev;
76 	}
77 
78 	if (next && next->type == 0) {
79 		next->offset  = this->offset;
80 		next->length += this->length;
81 		region_put(mm, this);
82 	}
83 }
84 
85 int
nouveau_mm_get(struct nouveau_mm * mm,int type,u32 size,u32 size_nc,u32 align,struct nouveau_mm_node ** pnode)86 nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
87 	       u32 align, struct nouveau_mm_node **pnode)
88 {
89 	struct nouveau_mm_node *prev, *this, *next;
90 	u32 min = size_nc ? size_nc : size;
91 	u32 align_mask = align - 1;
92 	u32 splitoff;
93 	u32 s, e;
94 
95 	list_for_each_entry(this, &mm->free, fl_entry) {
96 		e = this->offset + this->length;
97 		s = this->offset;
98 
99 		prev = node(this, prev);
100 		if (prev && prev->type != type)
101 			s = roundup(s, mm->block_size);
102 
103 		next = node(this, next);
104 		if (next && next->type != type)
105 			e = rounddown(e, mm->block_size);
106 
107 		s  = (s + align_mask) & ~align_mask;
108 		e &= ~align_mask;
109 		if (s > e || e - s < min)
110 			continue;
111 
112 		splitoff = s - this->offset;
113 		if (splitoff && !region_split(mm, this, splitoff))
114 			return -ENOMEM;
115 
116 		this = region_split(mm, this, min(size, e - s));
117 		if (!this)
118 			return -ENOMEM;
119 
120 		this->type = type;
121 		list_del(&this->fl_entry);
122 		*pnode = this;
123 		return 0;
124 	}
125 
126 	return -ENOSPC;
127 }
128 
129 int
nouveau_mm_init(struct nouveau_mm * mm,u32 offset,u32 length,u32 block)130 nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
131 {
132 	struct nouveau_mm_node *node;
133 
134 	if (block) {
135 		mutex_init(&mm->mutex);
136 		INIT_LIST_HEAD(&mm->nodes);
137 		INIT_LIST_HEAD(&mm->free);
138 		mm->block_size = block;
139 		mm->heap_nodes = 0;
140 	}
141 
142 	node = kzalloc(sizeof(*node), GFP_KERNEL);
143 	if (!node)
144 		return -ENOMEM;
145 	node->offset = roundup(offset, mm->block_size);
146 	node->length = rounddown(offset + length, mm->block_size) - node->offset;
147 
148 	list_add_tail(&node->nl_entry, &mm->nodes);
149 	list_add_tail(&node->fl_entry, &mm->free);
150 	mm->heap_nodes++;
151 	return 0;
152 }
153 
154 int
nouveau_mm_fini(struct nouveau_mm * mm)155 nouveau_mm_fini(struct nouveau_mm *mm)
156 {
157 	struct nouveau_mm_node *node, *heap =
158 		list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
159 	int nodes = 0;
160 
161 	list_for_each_entry(node, &mm->nodes, nl_entry) {
162 		if (nodes++ == mm->heap_nodes) {
163 			printk(KERN_ERR "nouveau_mm in use at destroy time!\n");
164 			list_for_each_entry(node, &mm->nodes, nl_entry) {
165 				printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
166 				       node->type, node->offset, node->length);
167 			}
168 			WARN_ON(1);
169 			return -EBUSY;
170 		}
171 	}
172 
173 	kfree(heap);
174 	return 0;
175 }
176