1 /*
2  * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
3  * All rights reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sub license,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
26  */
27 
28 #include <linux/slab.h>
29 
30 #include <drm/drm_device.h>
31 #include <drm/drm_file.h>
32 #include <drm/via_drm.h>
33 
34 #include "via_drv.h"
35 
36 #define VIA_MM_ALIGN_SHIFT 4
37 #define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
38 
39 struct via_memblock {
40 	struct drm_mm_node mm_node;
41 	struct list_head owner_list;
42 };
43 
via_agp_init(struct drm_device * dev,void * data,struct drm_file * file_priv)44 int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
45 {
46 	drm_via_agp_t *agp = data;
47 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
48 
49 	mutex_lock(&dev->struct_mutex);
50 	drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
51 
52 	dev_priv->agp_initialized = 1;
53 	dev_priv->agp_offset = agp->offset;
54 	mutex_unlock(&dev->struct_mutex);
55 
56 	DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
57 	return 0;
58 }
59 
via_fb_init(struct drm_device * dev,void * data,struct drm_file * file_priv)60 int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
61 {
62 	drm_via_fb_t *fb = data;
63 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
64 
65 	mutex_lock(&dev->struct_mutex);
66 	drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
67 
68 	dev_priv->vram_initialized = 1;
69 	dev_priv->vram_offset = fb->offset;
70 
71 	mutex_unlock(&dev->struct_mutex);
72 	DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
73 
74 	return 0;
75 
76 }
77 
via_final_context(struct drm_device * dev,int context)78 int via_final_context(struct drm_device *dev, int context)
79 {
80 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
81 
82 	via_release_futex(dev_priv, context);
83 
84 	/* Linux specific until context tracking code gets ported to BSD */
85 	/* Last context, perform cleanup */
86 	if (list_is_singular(&dev->ctxlist)) {
87 		DRM_DEBUG("Last Context\n");
88 		drm_legacy_irq_uninstall(dev);
89 		via_cleanup_futex(dev_priv);
90 		via_do_cleanup_map(dev);
91 	}
92 	return 1;
93 }
94 
via_lastclose(struct drm_device * dev)95 void via_lastclose(struct drm_device *dev)
96 {
97 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
98 
99 	if (!dev_priv)
100 		return;
101 
102 	mutex_lock(&dev->struct_mutex);
103 	if (dev_priv->vram_initialized) {
104 		drm_mm_takedown(&dev_priv->vram_mm);
105 		dev_priv->vram_initialized = 0;
106 	}
107 	if (dev_priv->agp_initialized) {
108 		drm_mm_takedown(&dev_priv->agp_mm);
109 		dev_priv->agp_initialized = 0;
110 	}
111 	mutex_unlock(&dev->struct_mutex);
112 }
113 
via_mem_alloc(struct drm_device * dev,void * data,struct drm_file * file)114 int via_mem_alloc(struct drm_device *dev, void *data,
115 		  struct drm_file *file)
116 {
117 	drm_via_mem_t *mem = data;
118 	int retval = 0, user_key;
119 	struct via_memblock *item;
120 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
121 	struct via_file_private *file_priv = file->driver_priv;
122 	unsigned long tmpSize;
123 
124 	if (mem->type > VIA_MEM_AGP) {
125 		DRM_ERROR("Unknown memory type allocation\n");
126 		return -EINVAL;
127 	}
128 	mutex_lock(&dev->struct_mutex);
129 	if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
130 		      dev_priv->agp_initialized)) {
131 		mutex_unlock(&dev->struct_mutex);
132 		DRM_ERROR
133 		    ("Attempt to allocate from uninitialized memory manager.\n");
134 		return -EINVAL;
135 	}
136 
137 	item = kzalloc(sizeof(*item), GFP_KERNEL);
138 	if (!item) {
139 		retval = -ENOMEM;
140 		goto fail_alloc;
141 	}
142 
143 	tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
144 	if (mem->type == VIA_MEM_AGP)
145 		retval = drm_mm_insert_node(&dev_priv->agp_mm,
146 					    &item->mm_node,
147 					    tmpSize);
148 	else
149 		retval = drm_mm_insert_node(&dev_priv->vram_mm,
150 					    &item->mm_node,
151 					    tmpSize);
152 	if (retval)
153 		goto fail_alloc;
154 
155 	retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
156 	if (retval < 0)
157 		goto fail_idr;
158 	user_key = retval;
159 
160 	list_add(&item->owner_list, &file_priv->obj_list);
161 	mutex_unlock(&dev->struct_mutex);
162 
163 	mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
164 		      dev_priv->vram_offset : dev_priv->agp_offset) +
165 	    ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
166 	mem->index = user_key;
167 
168 	return 0;
169 
170 fail_idr:
171 	drm_mm_remove_node(&item->mm_node);
172 fail_alloc:
173 	kfree(item);
174 	mutex_unlock(&dev->struct_mutex);
175 
176 	mem->offset = 0;
177 	mem->size = 0;
178 	mem->index = 0;
179 	DRM_DEBUG("Video memory allocation failed\n");
180 
181 	return retval;
182 }
183 
via_mem_free(struct drm_device * dev,void * data,struct drm_file * file_priv)184 int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
185 {
186 	drm_via_private_t *dev_priv = dev->dev_private;
187 	drm_via_mem_t *mem = data;
188 	struct via_memblock *obj;
189 
190 	mutex_lock(&dev->struct_mutex);
191 	obj = idr_find(&dev_priv->object_idr, mem->index);
192 	if (obj == NULL) {
193 		mutex_unlock(&dev->struct_mutex);
194 		return -EINVAL;
195 	}
196 
197 	idr_remove(&dev_priv->object_idr, mem->index);
198 	list_del(&obj->owner_list);
199 	drm_mm_remove_node(&obj->mm_node);
200 	kfree(obj);
201 	mutex_unlock(&dev->struct_mutex);
202 
203 	DRM_DEBUG("free = 0x%lx\n", mem->index);
204 
205 	return 0;
206 }
207 
208 
via_reclaim_buffers_locked(struct drm_device * dev,struct drm_file * file)209 void via_reclaim_buffers_locked(struct drm_device *dev,
210 				struct drm_file *file)
211 {
212 	struct via_file_private *file_priv = file->driver_priv;
213 	struct via_memblock *entry, *next;
214 
215 	if (!(dev->master && file->master->lock.hw_lock))
216 		return;
217 
218 	drm_legacy_idlelock_take(&file->master->lock);
219 
220 	mutex_lock(&dev->struct_mutex);
221 	if (list_empty(&file_priv->obj_list)) {
222 		mutex_unlock(&dev->struct_mutex);
223 		drm_legacy_idlelock_release(&file->master->lock);
224 
225 		return;
226 	}
227 
228 	via_driver_dma_quiescent(dev);
229 
230 	list_for_each_entry_safe(entry, next, &file_priv->obj_list,
231 				 owner_list) {
232 		list_del(&entry->owner_list);
233 		drm_mm_remove_node(&entry->mm_node);
234 		kfree(entry);
235 	}
236 	mutex_unlock(&dev->struct_mutex);
237 
238 	drm_legacy_idlelock_release(&file->master->lock);
239 
240 	return;
241 }
242