1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * psb GEM interface
4 *
5 * Copyright (c) 2011, Intel Corporation.
6 *
7 * Authors: Alan Cox
8 *
9 * TODO:
10 * - we need to work out if the MMU is relevant (eg for
11 * accelerated operations on a GEM object)
12 */
13
14 #include <linux/pagemap.h>
15
16 #include <asm/set_memory.h>
17
18 #include <drm/drm.h>
19 #include <drm/drm_vma_manager.h>
20
21 #include "gem.h"
22 #include "psb_drv.h"
23
24 /*
25 * PSB GEM object
26 */
27
psb_gem_pin(struct psb_gem_object * pobj)28 int psb_gem_pin(struct psb_gem_object *pobj)
29 {
30 struct drm_gem_object *obj = &pobj->base;
31 struct drm_device *dev = obj->dev;
32 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
33 u32 gpu_base = dev_priv->gtt.gatt_start;
34 struct page **pages;
35 unsigned int npages;
36 int ret;
37
38 ret = dma_resv_lock(obj->resv, NULL);
39 if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
40 return ret;
41
42 if (pobj->in_gart || pobj->stolen)
43 goto out; /* already mapped */
44
45 pages = drm_gem_get_pages(obj);
46 if (IS_ERR(pages)) {
47 ret = PTR_ERR(pages);
48 goto err_dma_resv_unlock;
49 }
50
51 npages = obj->size / PAGE_SIZE;
52
53 set_pages_array_wc(pages, npages);
54
55 psb_gtt_insert_pages(dev_priv, &pobj->resource, pages);
56 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), pages,
57 (gpu_base + pobj->offset), npages, 0, 0,
58 PSB_MMU_CACHED_MEMORY);
59
60 pobj->pages = pages;
61
62 out:
63 ++pobj->in_gart;
64 dma_resv_unlock(obj->resv);
65
66 return 0;
67
68 err_dma_resv_unlock:
69 dma_resv_unlock(obj->resv);
70 return ret;
71 }
72
psb_gem_unpin(struct psb_gem_object * pobj)73 void psb_gem_unpin(struct psb_gem_object *pobj)
74 {
75 struct drm_gem_object *obj = &pobj->base;
76 struct drm_device *dev = obj->dev;
77 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
78 u32 gpu_base = dev_priv->gtt.gatt_start;
79 unsigned long npages;
80 int ret;
81
82 ret = dma_resv_lock(obj->resv, NULL);
83 if (drm_WARN_ONCE(dev, ret, "dma_resv_lock() failed, ret=%d\n", ret))
84 return;
85
86 WARN_ON(!pobj->in_gart);
87
88 --pobj->in_gart;
89
90 if (pobj->in_gart || pobj->stolen)
91 goto out;
92
93 npages = obj->size / PAGE_SIZE;
94
95 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
96 (gpu_base + pobj->offset), npages, 0, 0);
97 psb_gtt_remove_pages(dev_priv, &pobj->resource);
98
99 /* Reset caching flags */
100 set_pages_array_wb(pobj->pages, npages);
101
102 drm_gem_put_pages(obj, pobj->pages, true, false);
103 pobj->pages = NULL;
104
105 out:
106 dma_resv_unlock(obj->resv);
107 }
108
109 static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
110
psb_gem_free_object(struct drm_gem_object * obj)111 static void psb_gem_free_object(struct drm_gem_object *obj)
112 {
113 struct psb_gem_object *pobj = to_psb_gem_object(obj);
114
115 /* Undo the mmap pin if we are destroying the object */
116 if (pobj->mmapping)
117 psb_gem_unpin(pobj);
118
119 drm_gem_object_release(obj);
120
121 WARN_ON(pobj->in_gart && !pobj->stolen);
122
123 release_resource(&pobj->resource);
124 kfree(pobj);
125 }
126
127 static const struct vm_operations_struct psb_gem_vm_ops = {
128 .fault = psb_gem_fault,
129 .open = drm_gem_vm_open,
130 .close = drm_gem_vm_close,
131 };
132
133 static const struct drm_gem_object_funcs psb_gem_object_funcs = {
134 .free = psb_gem_free_object,
135 .vm_ops = &psb_gem_vm_ops,
136 };
137
138 struct psb_gem_object *
psb_gem_create(struct drm_device * dev,u64 size,const char * name,bool stolen,u32 align)139 psb_gem_create(struct drm_device *dev, u64 size, const char *name, bool stolen, u32 align)
140 {
141 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
142 struct psb_gem_object *pobj;
143 struct drm_gem_object *obj;
144 int ret;
145
146 size = roundup(size, PAGE_SIZE);
147
148 pobj = kzalloc(sizeof(*pobj), GFP_KERNEL);
149 if (!pobj)
150 return ERR_PTR(-ENOMEM);
151 obj = &pobj->base;
152
153 /* GTT resource */
154
155 ret = psb_gtt_allocate_resource(dev_priv, &pobj->resource, name, size, align, stolen,
156 &pobj->offset);
157 if (ret)
158 goto err_kfree;
159
160 if (stolen) {
161 pobj->stolen = true;
162 pobj->in_gart = 1;
163 }
164
165 /* GEM object */
166
167 obj->funcs = &psb_gem_object_funcs;
168
169 if (stolen) {
170 drm_gem_private_object_init(dev, obj, size);
171 } else {
172 ret = drm_gem_object_init(dev, obj, size);
173 if (ret)
174 goto err_release_resource;
175
176 /* Limit the object to 32-bit mappings */
177 mapping_set_gfp_mask(obj->filp->f_mapping, GFP_KERNEL | __GFP_DMA32);
178 }
179
180 return pobj;
181
182 err_release_resource:
183 release_resource(&pobj->resource);
184 err_kfree:
185 kfree(pobj);
186 return ERR_PTR(ret);
187 }
188
189 /**
190 * psb_gem_dumb_create - create a dumb buffer
191 * @file: our client file
192 * @dev: our device
193 * @args: the requested arguments copied from userspace
194 *
195 * Allocate a buffer suitable for use for a frame buffer of the
196 * form described by user space. Give userspace a handle by which
197 * to reference it.
198 */
psb_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)199 int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
200 struct drm_mode_create_dumb *args)
201 {
202 size_t pitch, size;
203 struct psb_gem_object *pobj;
204 struct drm_gem_object *obj;
205 u32 handle;
206 int ret;
207
208 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
209 pitch = ALIGN(pitch, 64);
210
211 size = pitch * args->height;
212 size = roundup(size, PAGE_SIZE);
213 if (!size)
214 return -EINVAL;
215
216 pobj = psb_gem_create(dev, size, "gem", false, PAGE_SIZE);
217 if (IS_ERR(pobj))
218 return PTR_ERR(pobj);
219 obj = &pobj->base;
220
221 ret = drm_gem_handle_create(file, obj, &handle);
222 if (ret)
223 goto err_drm_gem_object_put;
224
225 drm_gem_object_put(obj);
226
227 args->pitch = pitch;
228 args->size = size;
229 args->handle = handle;
230
231 return 0;
232
233 err_drm_gem_object_put:
234 drm_gem_object_put(obj);
235 return ret;
236 }
237
238 /**
239 * psb_gem_fault - pagefault handler for GEM objects
240 * @vmf: fault detail
241 *
242 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
243 * does most of the work for us including the actual map/unmap calls
244 * but we need to do the actual page work.
245 *
246 * This code eventually needs to handle faulting objects in and out
247 * of the GTT and repacking it when we run out of space. We can put
248 * that off for now and for our simple uses
249 *
250 * The VMA was set up by GEM. In doing so it also ensured that the
251 * vma->vm_private_data points to the GEM object that is backing this
252 * mapping.
253 */
psb_gem_fault(struct vm_fault * vmf)254 static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
255 {
256 struct vm_area_struct *vma = vmf->vma;
257 struct drm_gem_object *obj;
258 struct psb_gem_object *pobj;
259 int err;
260 vm_fault_t ret;
261 unsigned long pfn;
262 pgoff_t page_offset;
263 struct drm_device *dev;
264 struct drm_psb_private *dev_priv;
265
266 obj = vma->vm_private_data; /* GEM object */
267 dev = obj->dev;
268 dev_priv = to_drm_psb_private(dev);
269
270 pobj = to_psb_gem_object(obj);
271
272 /* Make sure we don't parallel update on a fault, nor move or remove
273 something from beneath our feet */
274 mutex_lock(&dev_priv->mmap_mutex);
275
276 /* For now the mmap pins the object and it stays pinned. As things
277 stand that will do us no harm */
278 if (pobj->mmapping == 0) {
279 err = psb_gem_pin(pobj);
280 if (err < 0) {
281 dev_err(dev->dev, "gma500: pin failed: %d\n", err);
282 ret = vmf_error(err);
283 goto fail;
284 }
285 pobj->mmapping = 1;
286 }
287
288 /* Page relative to the VMA start - we must calculate this ourselves
289 because vmf->pgoff is the fake GEM offset */
290 page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
291
292 /* CPU view of the page, don't go via the GART for CPU writes */
293 if (pobj->stolen)
294 pfn = (dev_priv->stolen_base + pobj->offset) >> PAGE_SHIFT;
295 else
296 pfn = page_to_pfn(pobj->pages[page_offset]);
297 ret = vmf_insert_pfn(vma, vmf->address, pfn);
298 fail:
299 mutex_unlock(&dev_priv->mmap_mutex);
300
301 return ret;
302 }
303
304 /*
305 * Memory management
306 */
307
308 /* Insert vram stolen pages into the GTT. */
psb_gem_mm_populate_stolen(struct drm_psb_private * pdev)309 static void psb_gem_mm_populate_stolen(struct drm_psb_private *pdev)
310 {
311 struct drm_device *dev = &pdev->dev;
312 unsigned int pfn_base;
313 unsigned int i, num_pages;
314 uint32_t pte;
315
316 pfn_base = pdev->stolen_base >> PAGE_SHIFT;
317 num_pages = pdev->vram_stolen_size >> PAGE_SHIFT;
318
319 drm_dbg(dev, "Set up %u stolen pages starting at 0x%08x, GTT offset %dK\n",
320 num_pages, pfn_base << PAGE_SHIFT, 0);
321
322 for (i = 0; i < num_pages; ++i) {
323 pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
324 iowrite32(pte, pdev->gtt_map + i);
325 }
326
327 (void)ioread32(pdev->gtt_map + i - 1);
328 }
329
psb_gem_mm_init(struct drm_device * dev)330 int psb_gem_mm_init(struct drm_device *dev)
331 {
332 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
333 struct pci_dev *pdev = to_pci_dev(dev->dev);
334 unsigned long stolen_size, vram_stolen_size;
335 struct psb_gtt *pg;
336 int ret;
337
338 mutex_init(&dev_priv->mmap_mutex);
339
340 pg = &dev_priv->gtt;
341
342 pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
343 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
344
345 stolen_size = vram_stolen_size;
346
347 dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
348 dev_priv->stolen_base, vram_stolen_size / 1024);
349
350 pg->stolen_size = stolen_size;
351 dev_priv->vram_stolen_size = vram_stolen_size;
352
353 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size);
354 if (!dev_priv->vram_addr) {
355 dev_err(dev->dev, "Failure to map stolen base.\n");
356 ret = -ENOMEM;
357 goto err_mutex_destroy;
358 }
359
360 psb_gem_mm_populate_stolen(dev_priv);
361
362 return 0;
363
364 err_mutex_destroy:
365 mutex_destroy(&dev_priv->mmap_mutex);
366 return ret;
367 }
368
psb_gem_mm_fini(struct drm_device * dev)369 void psb_gem_mm_fini(struct drm_device *dev)
370 {
371 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
372
373 iounmap(dev_priv->vram_addr);
374
375 mutex_destroy(&dev_priv->mmap_mutex);
376 }
377
378 /* Re-insert all pinned GEM objects into GTT. */
psb_gem_mm_populate_resources(struct drm_psb_private * pdev)379 static void psb_gem_mm_populate_resources(struct drm_psb_private *pdev)
380 {
381 unsigned int restored = 0, total = 0, size = 0;
382 struct resource *r = pdev->gtt_mem->child;
383 struct drm_device *dev = &pdev->dev;
384 struct psb_gem_object *pobj;
385
386 while (r) {
387 /*
388 * TODO: GTT restoration needs a refactoring, so that we don't have to touch
389 * struct psb_gem_object here. The type represents a GEM object and is
390 * not related to the GTT itself.
391 */
392 pobj = container_of(r, struct psb_gem_object, resource);
393 if (pobj->pages) {
394 psb_gtt_insert_pages(pdev, &pobj->resource, pobj->pages);
395 size += resource_size(&pobj->resource);
396 ++restored;
397 }
398 r = r->sibling;
399 ++total;
400 }
401
402 drm_dbg(dev, "Restored %u of %u gtt ranges (%u KB)", restored, total, (size / 1024));
403 }
404
psb_gem_mm_resume(struct drm_device * dev)405 int psb_gem_mm_resume(struct drm_device *dev)
406 {
407 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
408 struct pci_dev *pdev = to_pci_dev(dev->dev);
409 unsigned long stolen_size, vram_stolen_size;
410 struct psb_gtt *pg;
411
412 pg = &dev_priv->gtt;
413
414 pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base);
415 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE;
416
417 stolen_size = vram_stolen_size;
418
419 dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n", dev_priv->stolen_base,
420 vram_stolen_size / 1024);
421
422 if (stolen_size != pg->stolen_size) {
423 dev_err(dev->dev, "GTT resume error.\n");
424 return -EINVAL;
425 }
426
427 psb_gem_mm_populate_stolen(dev_priv);
428 psb_gem_mm_populate_resources(dev_priv);
429
430 return 0;
431 }
432