1 /* radeon_mem.c -- Simple agp/fb memory manager for radeon -*- linux-c -*-
2 *
3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32 #include "radeon.h"
33 #include "drmP.h"
34 #include "drm.h"
35 #include "radeon_drm.h"
36 #include "radeon_drv.h"
37 #include "drm_os_linux.h"
38
39 /* Very simple allocator for agp memory, working on a static range
40 * already mapped into each client's address space.
41 */
42
split_block(struct mem_block * p,int start,int size,int pid)43 static struct mem_block *split_block(struct mem_block *p, int start, int size,
44 int pid )
45 {
46 /* Maybe cut off the start of an existing block */
47 if (start > p->start) {
48 struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL);
49 if (!newblock)
50 goto out;
51 newblock->start = start;
52 newblock->size = p->size - (start - p->start);
53 newblock->pid = 0;
54 newblock->next = p->next;
55 newblock->prev = p;
56 p->next->prev = newblock;
57 p->next = newblock;
58 p->size -= newblock->size;
59 p = newblock;
60 }
61
62 /* Maybe cut off the end of an existing block */
63 if (size < p->size) {
64 struct mem_block *newblock = kmalloc(sizeof(*newblock), GFP_KERNEL);
65 if (!newblock)
66 goto out;
67 newblock->start = start + size;
68 newblock->size = p->size - size;
69 newblock->pid = 0;
70 newblock->next = p->next;
71 newblock->prev = p;
72 p->next->prev = newblock;
73 p->next = newblock;
74 p->size = size;
75 }
76
77 out:
78 /* Our block is in the middle */
79 p->pid = pid;
80 return p;
81 }
82
alloc_block(struct mem_block * heap,int size,int align2,int pid)83 static struct mem_block *alloc_block( struct mem_block *heap, int size,
84 int align2, int pid )
85 {
86 struct mem_block *p;
87 int mask = (1 << align2)-1;
88
89 for (p = heap->next ; p != heap ; p = p->next) {
90 int start = (p->start + mask) & ~mask;
91 if (p->pid == 0 && start + size <= p->start + p->size)
92 return split_block( p, start, size, pid );
93 }
94
95 return NULL;
96 }
97
find_block(struct mem_block * heap,int start)98 static struct mem_block *find_block( struct mem_block *heap, int start )
99 {
100 struct mem_block *p;
101
102 for (p = heap->next ; p != heap ; p = p->next)
103 if (p->start == start)
104 return p;
105
106 return NULL;
107 }
108
109
free_block(struct mem_block * p)110 static void free_block( struct mem_block *p )
111 {
112 p->pid = 0;
113
114 /* Assumes a single contiguous range. Needs a special pid in
115 * 'heap' to stop it being subsumed.
116 */
117 if (p->next->pid == 0) {
118 struct mem_block *q = p->next;
119 p->size += q->size;
120 p->next = q->next;
121 p->next->prev = p;
122 kfree(q);
123 }
124
125 if (p->prev->pid == 0) {
126 struct mem_block *q = p->prev;
127 q->size += p->size;
128 q->next = p->next;
129 q->next->prev = q;
130 kfree(p);
131 }
132 }
133
134 #if 0
135 static void print_heap( struct mem_block *heap )
136 {
137 struct mem_block *p;
138
139 for (p = heap->next ; p != heap ; p = p->next)
140 DRM_DEBUG("0x%x..0x%x (0x%x) -- owner %d\n",
141 p->start, p->start + p->size,
142 p->size, p->pid);
143 }
144 #endif
145
146 /* Initialize. How to check for an uninitialized heap?
147 */
init_heap(struct mem_block ** heap,int start,int size)148 static int init_heap(struct mem_block **heap, int start, int size)
149 {
150 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
151
152 if (!blocks)
153 return -ENOMEM;
154
155 *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
156 if (!*heap) {
157 kfree( blocks );
158 return -ENOMEM;
159 }
160
161 blocks->start = start;
162 blocks->size = size;
163 blocks->pid = 0;
164 blocks->next = blocks->prev = *heap;
165
166 memset( *heap, 0, sizeof(**heap) );
167 (*heap)->pid = -1;
168 (*heap)->next = (*heap)->prev = blocks;
169 return 0;
170 }
171
172
173 /* Free all blocks associated with the releasing pid.
174 */
radeon_mem_release(struct mem_block * heap)175 void radeon_mem_release( struct mem_block *heap )
176 {
177 int pid = current->pid;
178 struct mem_block *p;
179
180 if (!heap || !heap->next)
181 return;
182
183 for (p = heap->next ; p != heap ; p = p->next) {
184 if (p->pid == pid)
185 p->pid = 0;
186 }
187
188 /* Assumes a single contiguous range. Needs a special pid in
189 * 'heap' to stop it being subsumed.
190 */
191 for (p = heap->next ; p != heap ; p = p->next) {
192 while (p->pid == 0 && p->next->pid == 0) {
193 struct mem_block *q = p->next;
194 p->size += q->size;
195 p->next = q->next;
196 p->next->prev = p;
197 kfree(q);
198 }
199 }
200 }
201
202 /* Shutdown.
203 */
radeon_mem_takedown(struct mem_block ** heap)204 void radeon_mem_takedown( struct mem_block **heap )
205 {
206 struct mem_block *p;
207
208 if (!*heap)
209 return;
210
211 for (p = (*heap)->next ; p != *heap ; ) {
212 struct mem_block *q = p;
213 p = p->next;
214 kfree(q);
215 }
216
217 kfree( *heap );
218 *heap = 0;
219 }
220
221
222
223 /* IOCTL HANDLERS */
224
get_heap(drm_radeon_private_t * dev_priv,int region)225 static struct mem_block **get_heap( drm_radeon_private_t *dev_priv,
226 int region )
227 {
228 switch( region ) {
229 case RADEON_MEM_REGION_AGP:
230 return &dev_priv->agp_heap;
231 case RADEON_MEM_REGION_FB:
232 return &dev_priv->fb_heap;
233 default:
234 return 0;
235 }
236 }
237
radeon_mem_alloc(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long data)238 int radeon_mem_alloc(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data )
239 {
240 drm_file_t *priv = filp->private_data;
241 drm_device_t *dev = priv->dev;
242 drm_radeon_private_t *dev_priv = dev->dev_private;
243 drm_radeon_mem_alloc_t alloc;
244 struct mem_block *block, **heap;
245
246 if ( !dev_priv ) {
247 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
248 return -EINVAL;
249 }
250
251 DRM_COPY_FROM_USER_IOCTL( alloc, (drm_radeon_mem_alloc_t *)data,
252 sizeof(alloc) );
253
254 heap = get_heap( dev_priv, alloc.region );
255 if (!heap || !*heap)
256 return -EFAULT;
257
258 /* Make things easier on ourselves: all allocations at least
259 * 4k aligned.
260 */
261 if (alloc.alignment < 12)
262 alloc.alignment = 12;
263
264 block = alloc_block( *heap, alloc.size, alloc.alignment,
265 current->pid );
266
267 if (!block)
268 return -ENOMEM;
269
270 if ( copy_to_user( alloc.region_offset, &block->start,
271 sizeof(int) ) ) {
272 DRM_ERROR( "copy_to_user\n" );
273 return -EFAULT;
274 }
275
276 return 0;
277 }
278
279
280
radeon_mem_free(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long data)281 int radeon_mem_free(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data)
282 {
283 drm_file_t *priv = filp->private_data;
284 drm_device_t *dev = priv->dev;
285 drm_radeon_private_t *dev_priv = dev->dev_private;
286 drm_radeon_mem_free_t memfree;
287 struct mem_block *block, **heap;
288
289 if ( !dev_priv ) {
290 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
291 return -EINVAL;
292 }
293
294 DRM_COPY_FROM_USER_IOCTL( memfree, (drm_radeon_mem_free_t *)data,
295 sizeof(memfree) );
296
297 heap = get_heap( dev_priv, memfree.region );
298 if (!heap || !*heap)
299 return -EFAULT;
300
301 block = find_block( *heap, memfree.region_offset );
302 if (!block)
303 return -EFAULT;
304
305 if (block->pid != current->pid)
306 return -EPERM;
307
308 free_block( block );
309 return 0;
310 }
311
radeon_mem_init_heap(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long data)312 int radeon_mem_init_heap(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data)
313 {
314 drm_file_t *priv = filp->private_data;
315 drm_device_t *dev = priv->dev;
316 drm_radeon_private_t *dev_priv = dev->dev_private;
317 drm_radeon_mem_init_heap_t initheap;
318 struct mem_block **heap;
319
320 if ( !dev_priv ) {
321 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
322 return -EINVAL;
323 }
324
325 DRM_COPY_FROM_USER_IOCTL( initheap, (drm_radeon_mem_init_heap_t *)data,
326 sizeof(initheap) );
327
328 heap = get_heap( dev_priv, initheap.region );
329 if (!heap)
330 return -EFAULT;
331
332 if (*heap) {
333 DRM_ERROR("heap already initialized?");
334 return -EFAULT;
335 }
336
337 return init_heap( heap, initheap.start, initheap.size );
338 }
339
340
341