1 /* drm_memory.h -- Memory management wrappers for DRM -*- linux-c -*-
2 * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 */
31
32 #include <linux/config.h>
33 #include "drmP.h"
34 #include <linux/wrapper.h>
35
36 typedef struct drm_mem_stats {
37 const char *name;
38 int succeed_count;
39 int free_count;
40 int fail_count;
41 unsigned long bytes_allocated;
42 unsigned long bytes_freed;
43 } drm_mem_stats_t;
44
45 static spinlock_t DRM(mem_lock) = SPIN_LOCK_UNLOCKED;
46 static unsigned long DRM(ram_available) = 0; /* In pages */
47 static unsigned long DRM(ram_used) = 0;
48 static drm_mem_stats_t DRM(mem_stats)[] = {
49 [DRM_MEM_DMA] = { "dmabufs" },
50 [DRM_MEM_SAREA] = { "sareas" },
51 [DRM_MEM_DRIVER] = { "driver" },
52 [DRM_MEM_MAGIC] = { "magic" },
53 [DRM_MEM_IOCTLS] = { "ioctltab" },
54 [DRM_MEM_MAPS] = { "maplist" },
55 [DRM_MEM_VMAS] = { "vmalist" },
56 [DRM_MEM_BUFS] = { "buflist" },
57 [DRM_MEM_SEGS] = { "seglist" },
58 [DRM_MEM_PAGES] = { "pagelist" },
59 [DRM_MEM_FILES] = { "files" },
60 [DRM_MEM_QUEUES] = { "queues" },
61 [DRM_MEM_CMDS] = { "commands" },
62 [DRM_MEM_MAPPINGS] = { "mappings" },
63 [DRM_MEM_BUFLISTS] = { "buflists" },
64 [DRM_MEM_AGPLISTS] = { "agplist" },
65 [DRM_MEM_SGLISTS] = { "sglist" },
66 [DRM_MEM_TOTALAGP] = { "totalagp" },
67 [DRM_MEM_BOUNDAGP] = { "boundagp" },
68 [DRM_MEM_CTXBITMAP] = { "ctxbitmap"},
69 [DRM_MEM_STUB] = { "stub" },
70 { NULL, 0, } /* Last entry must be null */
71 };
72
DRM(mem_init)73 void DRM(mem_init)(void)
74 {
75 drm_mem_stats_t *mem;
76 struct sysinfo si;
77
78 for (mem = DRM(mem_stats); mem->name; ++mem) {
79 mem->succeed_count = 0;
80 mem->free_count = 0;
81 mem->fail_count = 0;
82 mem->bytes_allocated = 0;
83 mem->bytes_freed = 0;
84 }
85
86 si_meminfo(&si);
87 DRM(ram_available) = si.totalram;
88 DRM(ram_used) = 0;
89 }
90
91 /* drm_mem_info is called whenever a process reads /dev/drm/mem. */
92
DRM(_mem_info)93 static int DRM(_mem_info)(char *buf, char **start, off_t offset,
94 int request, int *eof, void *data)
95 {
96 drm_mem_stats_t *pt;
97 int len = 0;
98
99 if (offset > DRM_PROC_LIMIT) {
100 *eof = 1;
101 return 0;
102 }
103
104 *eof = 0;
105 *start = &buf[offset];
106
107 DRM_PROC_PRINT(" total counts "
108 " | outstanding \n");
109 DRM_PROC_PRINT("type alloc freed fail bytes freed"
110 " | allocs bytes\n\n");
111 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
112 "system", 0, 0, 0,
113 DRM(ram_available) << (PAGE_SHIFT - 10));
114 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB |\n",
115 "locked", 0, 0, 0, DRM(ram_used) >> 10);
116 DRM_PROC_PRINT("\n");
117 for (pt = DRM(mem_stats); pt->name; pt++) {
118 DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
119 pt->name,
120 pt->succeed_count,
121 pt->free_count,
122 pt->fail_count,
123 pt->bytes_allocated,
124 pt->bytes_freed,
125 pt->succeed_count - pt->free_count,
126 (long)pt->bytes_allocated
127 - (long)pt->bytes_freed);
128 }
129
130 if (len > request + offset) return request;
131 *eof = 1;
132 return len - offset;
133 }
134
DRM(mem_info)135 int DRM(mem_info)(char *buf, char **start, off_t offset,
136 int len, int *eof, void *data)
137 {
138 int ret;
139
140 spin_lock(&DRM(mem_lock));
141 ret = DRM(_mem_info)(buf, start, offset, len, eof, data);
142 spin_unlock(&DRM(mem_lock));
143 return ret;
144 }
145
DRM(alloc)146 void *DRM(alloc)(size_t size, int area)
147 {
148 void *pt;
149
150 if (!size) {
151 DRM_MEM_ERROR(area, "Allocating 0 bytes\n");
152 return NULL;
153 }
154
155 if (!(pt = kmalloc(size, GFP_KERNEL))) {
156 spin_lock(&DRM(mem_lock));
157 ++DRM(mem_stats)[area].fail_count;
158 spin_unlock(&DRM(mem_lock));
159 return NULL;
160 }
161 spin_lock(&DRM(mem_lock));
162 ++DRM(mem_stats)[area].succeed_count;
163 DRM(mem_stats)[area].bytes_allocated += size;
164 spin_unlock(&DRM(mem_lock));
165 return pt;
166 }
167
DRM(realloc)168 void *DRM(realloc)(void *oldpt, size_t oldsize, size_t size, int area)
169 {
170 void *pt;
171
172 if (!(pt = DRM(alloc)(size, area))) return NULL;
173 if (oldpt && oldsize) {
174 memcpy(pt, oldpt, oldsize);
175 DRM(free)(oldpt, oldsize, area);
176 }
177 return pt;
178 }
179
DRM(strdup)180 char *DRM(strdup)(const char *s, int area)
181 {
182 char *pt;
183 int length = s ? strlen(s) : 0;
184
185 if (!(pt = DRM(alloc)(length+1, area))) return NULL;
186 strcpy(pt, s);
187 return pt;
188 }
189
DRM(strfree)190 void DRM(strfree)(const char *s, int area)
191 {
192 unsigned int size;
193
194 if (!s) return;
195
196 size = 1 + (s ? strlen(s) : 0);
197 DRM(free)((void *)s, size, area);
198 }
199
DRM(free)200 void DRM(free)(void *pt, size_t size, int area)
201 {
202 int alloc_count;
203 int free_count;
204
205 if (!pt) DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
206 else kfree(pt);
207 spin_lock(&DRM(mem_lock));
208 DRM(mem_stats)[area].bytes_freed += size;
209 free_count = ++DRM(mem_stats)[area].free_count;
210 alloc_count = DRM(mem_stats)[area].succeed_count;
211 spin_unlock(&DRM(mem_lock));
212 if (free_count > alloc_count) {
213 DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
214 free_count, alloc_count);
215 }
216 }
217
DRM(alloc_pages)218 unsigned long DRM(alloc_pages)(int order, int area)
219 {
220 unsigned long address;
221 unsigned long bytes = PAGE_SIZE << order;
222 unsigned long addr;
223 unsigned int sz;
224
225 spin_lock(&DRM(mem_lock));
226 if ((DRM(ram_used) >> PAGE_SHIFT)
227 > (DRM_RAM_PERCENT * DRM(ram_available)) / 100) {
228 spin_unlock(&DRM(mem_lock));
229 return 0;
230 }
231 spin_unlock(&DRM(mem_lock));
232
233 address = __get_free_pages(GFP_KERNEL, order);
234 if (!address) {
235 spin_lock(&DRM(mem_lock));
236 ++DRM(mem_stats)[area].fail_count;
237 spin_unlock(&DRM(mem_lock));
238 return 0;
239 }
240 spin_lock(&DRM(mem_lock));
241 ++DRM(mem_stats)[area].succeed_count;
242 DRM(mem_stats)[area].bytes_allocated += bytes;
243 DRM(ram_used) += bytes;
244 spin_unlock(&DRM(mem_lock));
245
246
247 /* Zero outside the lock */
248 memset((void *)address, 0, bytes);
249
250 /* Reserve */
251 for (addr = address, sz = bytes;
252 sz > 0;
253 addr += PAGE_SIZE, sz -= PAGE_SIZE) {
254 mem_map_reserve(virt_to_page(addr));
255 }
256
257 return address;
258 }
259
DRM(free_pages)260 void DRM(free_pages)(unsigned long address, int order, int area)
261 {
262 unsigned long bytes = PAGE_SIZE << order;
263 int alloc_count;
264 int free_count;
265 unsigned long addr;
266 unsigned int sz;
267
268 if (!address) {
269 DRM_MEM_ERROR(area, "Attempt to free address 0\n");
270 } else {
271 /* Unreserve */
272 for (addr = address, sz = bytes;
273 sz > 0;
274 addr += PAGE_SIZE, sz -= PAGE_SIZE) {
275 mem_map_unreserve(virt_to_page(addr));
276 }
277 free_pages(address, order);
278 }
279
280 spin_lock(&DRM(mem_lock));
281 free_count = ++DRM(mem_stats)[area].free_count;
282 alloc_count = DRM(mem_stats)[area].succeed_count;
283 DRM(mem_stats)[area].bytes_freed += bytes;
284 DRM(ram_used) -= bytes;
285 spin_unlock(&DRM(mem_lock));
286 if (free_count > alloc_count) {
287 DRM_MEM_ERROR(area,
288 "Excess frees: %d frees, %d allocs\n",
289 free_count, alloc_count);
290 }
291 }
292
DRM(ioremap)293 void *DRM(ioremap)(unsigned long offset, unsigned long size, drm_device_t *dev)
294 {
295 void *pt;
296
297 if (!size) {
298 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
299 "Mapping 0 bytes at 0x%08lx\n", offset);
300 return NULL;
301 }
302
303 if (!(pt = ioremap(offset, size))) {
304 spin_lock(&DRM(mem_lock));
305 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
306 spin_unlock(&DRM(mem_lock));
307 return NULL;
308 }
309 spin_lock(&DRM(mem_lock));
310 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
311 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
312 spin_unlock(&DRM(mem_lock));
313 return pt;
314 }
315
DRM(ioremap_nocache)316 void *DRM(ioremap_nocache)(unsigned long offset, unsigned long size, drm_device_t *dev)
317 {
318 void *pt;
319
320 if (!size) {
321 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
322 "Mapping 0 bytes at 0x%08lx\n", offset);
323 return NULL;
324 }
325
326 if (!(pt = ioremap_nocache(offset, size))) {
327 spin_lock(&DRM(mem_lock));
328 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
329 spin_unlock(&DRM(mem_lock));
330 return NULL;
331 }
332 spin_lock(&DRM(mem_lock));
333 ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
334 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
335 spin_unlock(&DRM(mem_lock));
336 return pt;
337 }
338
DRM(ioremapfree)339 void DRM(ioremapfree)(void *pt, unsigned long size, drm_device_t *dev)
340 {
341 int alloc_count;
342 int free_count;
343
344 if (!pt)
345 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
346 "Attempt to free NULL pointer\n");
347 else
348 iounmap(pt);
349
350 spin_lock(&DRM(mem_lock));
351 DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
352 free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
353 alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
354 spin_unlock(&DRM(mem_lock));
355 if (free_count > alloc_count) {
356 DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
357 "Excess frees: %d frees, %d allocs\n",
358 free_count, alloc_count);
359 }
360 }
361
362 #if __REALLY_HAVE_AGP
363
DRM(alloc_agp)364 agp_memory *DRM(alloc_agp)(int pages, u32 type)
365 {
366 agp_memory *handle;
367
368 if (!pages) {
369 DRM_MEM_ERROR(DRM_MEM_TOTALAGP, "Allocating 0 pages\n");
370 return NULL;
371 }
372
373 if ((handle = DRM(agp_allocate_memory)(pages, type))) {
374 spin_lock(&DRM(mem_lock));
375 ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
376 DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
377 += pages << PAGE_SHIFT;
378 spin_unlock(&DRM(mem_lock));
379 return handle;
380 }
381 spin_lock(&DRM(mem_lock));
382 ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count;
383 spin_unlock(&DRM(mem_lock));
384 return NULL;
385 }
386
DRM(free_agp)387 int DRM(free_agp)(agp_memory *handle, int pages)
388 {
389 int alloc_count;
390 int free_count;
391 int retval = -EINVAL;
392
393 if (!handle) {
394 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
395 "Attempt to free NULL AGP handle\n");
396 return retval;;
397 }
398
399 if (DRM(agp_free_memory)(handle)) {
400 spin_lock(&DRM(mem_lock));
401 free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
402 alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
403 DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
404 += pages << PAGE_SHIFT;
405 spin_unlock(&DRM(mem_lock));
406 if (free_count > alloc_count) {
407 DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
408 "Excess frees: %d frees, %d allocs\n",
409 free_count, alloc_count);
410 }
411 return 0;
412 }
413 return retval;
414 }
415
DRM(bind_agp)416 int DRM(bind_agp)(agp_memory *handle, unsigned int start)
417 {
418 int retcode = -EINVAL;
419
420 if (!handle) {
421 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
422 "Attempt to bind NULL AGP handle\n");
423 return retcode;
424 }
425
426 if (!(retcode = DRM(agp_bind_memory)(handle, start))) {
427 spin_lock(&DRM(mem_lock));
428 ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
429 DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
430 += handle->page_count << PAGE_SHIFT;
431 spin_unlock(&DRM(mem_lock));
432 return retcode;
433 }
434 spin_lock(&DRM(mem_lock));
435 ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count;
436 spin_unlock(&DRM(mem_lock));
437 return retcode;
438 }
439
DRM(unbind_agp)440 int DRM(unbind_agp)(agp_memory *handle)
441 {
442 int alloc_count;
443 int free_count;
444 int retcode = -EINVAL;
445
446 if (!handle) {
447 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
448 "Attempt to unbind NULL AGP handle\n");
449 return retcode;
450 }
451
452 if ((retcode = DRM(agp_unbind_memory)(handle))) return retcode;
453 spin_lock(&DRM(mem_lock));
454 free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
455 alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
456 DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
457 += handle->page_count << PAGE_SHIFT;
458 spin_unlock(&DRM(mem_lock));
459 if (free_count > alloc_count) {
460 DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
461 "Excess frees: %d frees, %d allocs\n",
462 free_count, alloc_count);
463 }
464 return retcode;
465 }
466 #endif
467