1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2
3 /*
4 * Xen para-virtual DRM device
5 *
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
7 *
8 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9 */
10
11 #include <linux/dma-buf.h>
12 #include <linux/scatterlist.h>
13 #include <linux/shmem_fs.h>
14
15 #include <drm/drm_fb_helper.h>
16 #include <drm/drm_gem.h>
17 #include <drm/drm_prime.h>
18 #include <drm/drm_probe_helper.h>
19
20 #include <xen/balloon.h>
21 #include <xen/xen.h>
22
23 #include "xen_drm_front.h"
24 #include "xen_drm_front_gem.h"
25
26 struct xen_gem_object {
27 struct drm_gem_object base;
28
29 size_t num_pages;
30 struct page **pages;
31
32 /* set for buffers allocated by the backend */
33 bool be_alloc;
34
35 /* this is for imported PRIME buffer */
36 struct sg_table *sgt_imported;
37 };
38
39 static inline struct xen_gem_object *
to_xen_gem_obj(struct drm_gem_object * gem_obj)40 to_xen_gem_obj(struct drm_gem_object *gem_obj)
41 {
42 return container_of(gem_obj, struct xen_gem_object, base);
43 }
44
gem_alloc_pages_array(struct xen_gem_object * xen_obj,size_t buf_size)45 static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
46 size_t buf_size)
47 {
48 xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
50 sizeof(struct page *), GFP_KERNEL);
51 return !xen_obj->pages ? -ENOMEM : 0;
52 }
53
gem_free_pages_array(struct xen_gem_object * xen_obj)54 static void gem_free_pages_array(struct xen_gem_object *xen_obj)
55 {
56 kvfree(xen_obj->pages);
57 xen_obj->pages = NULL;
58 }
59
xen_drm_front_gem_object_mmap(struct drm_gem_object * gem_obj,struct vm_area_struct * vma)60 static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
61 struct vm_area_struct *vma)
62 {
63 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
64 int ret;
65
66 vma->vm_ops = gem_obj->funcs->vm_ops;
67
68 /*
69 * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
70 * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
71 * the whole buffer.
72 */
73 vma->vm_flags &= ~VM_PFNMAP;
74 vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
75 vma->vm_pgoff = 0;
76
77 /*
78 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
79 * all memory which is shared with other entities in the system
80 * (including the hypervisor and other guests) must reside in memory
81 * which is mapped as Normal Inner Write-Back Outer Write-Back
82 * Inner-Shareable.
83 */
84 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
85
86 /*
87 * vm_operations_struct.fault handler will be called if CPU access
88 * to VM is here. For GPUs this isn't the case, because CPU doesn't
89 * touch the memory. Insert pages now, so both CPU and GPU are happy.
90 *
91 * FIXME: as we insert all the pages now then no .fault handler must
92 * be called, so don't provide one
93 */
94 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
95 if (ret < 0)
96 DRM_ERROR("Failed to map pages into vma: %d\n", ret);
97
98 return ret;
99 }
100
101 static const struct vm_operations_struct xen_drm_drv_vm_ops = {
102 .open = drm_gem_vm_open,
103 .close = drm_gem_vm_close,
104 };
105
106 static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
107 .free = xen_drm_front_gem_object_free,
108 .get_sg_table = xen_drm_front_gem_get_sg_table,
109 .vmap = xen_drm_front_gem_prime_vmap,
110 .vunmap = xen_drm_front_gem_prime_vunmap,
111 .mmap = xen_drm_front_gem_object_mmap,
112 .vm_ops = &xen_drm_drv_vm_ops,
113 };
114
gem_create_obj(struct drm_device * dev,size_t size)115 static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
116 size_t size)
117 {
118 struct xen_gem_object *xen_obj;
119 int ret;
120
121 xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
122 if (!xen_obj)
123 return ERR_PTR(-ENOMEM);
124
125 xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
126
127 ret = drm_gem_object_init(dev, &xen_obj->base, size);
128 if (ret < 0) {
129 kfree(xen_obj);
130 return ERR_PTR(ret);
131 }
132
133 return xen_obj;
134 }
135
gem_create(struct drm_device * dev,size_t size)136 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
137 {
138 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
139 struct xen_gem_object *xen_obj;
140 int ret;
141
142 size = round_up(size, PAGE_SIZE);
143 xen_obj = gem_create_obj(dev, size);
144 if (IS_ERR(xen_obj))
145 return xen_obj;
146
147 if (drm_info->front_info->cfg.be_alloc) {
148 /*
149 * backend will allocate space for this buffer, so
150 * only allocate array of pointers to pages
151 */
152 ret = gem_alloc_pages_array(xen_obj, size);
153 if (ret < 0)
154 goto fail;
155
156 /*
157 * allocate ballooned pages which will be used to map
158 * grant references provided by the backend
159 */
160 ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
161 xen_obj->pages);
162 if (ret < 0) {
163 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
164 xen_obj->num_pages, ret);
165 gem_free_pages_array(xen_obj);
166 goto fail;
167 }
168
169 xen_obj->be_alloc = true;
170 return xen_obj;
171 }
172 /*
173 * need to allocate backing pages now, so we can share those
174 * with the backend
175 */
176 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
177 xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
178 if (IS_ERR(xen_obj->pages)) {
179 ret = PTR_ERR(xen_obj->pages);
180 xen_obj->pages = NULL;
181 goto fail;
182 }
183
184 return xen_obj;
185
186 fail:
187 DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
188 return ERR_PTR(ret);
189 }
190
xen_drm_front_gem_create(struct drm_device * dev,size_t size)191 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
192 size_t size)
193 {
194 struct xen_gem_object *xen_obj;
195
196 xen_obj = gem_create(dev, size);
197 if (IS_ERR(xen_obj))
198 return ERR_CAST(xen_obj);
199
200 return &xen_obj->base;
201 }
202
xen_drm_front_gem_free_object_unlocked(struct drm_gem_object * gem_obj)203 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
204 {
205 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
206
207 if (xen_obj->base.import_attach) {
208 drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
209 gem_free_pages_array(xen_obj);
210 } else {
211 if (xen_obj->pages) {
212 if (xen_obj->be_alloc) {
213 xen_free_unpopulated_pages(xen_obj->num_pages,
214 xen_obj->pages);
215 gem_free_pages_array(xen_obj);
216 } else {
217 drm_gem_put_pages(&xen_obj->base,
218 xen_obj->pages, true, false);
219 }
220 }
221 }
222 drm_gem_object_release(gem_obj);
223 kfree(xen_obj);
224 }
225
xen_drm_front_gem_get_pages(struct drm_gem_object * gem_obj)226 struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
227 {
228 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
229
230 return xen_obj->pages;
231 }
232
xen_drm_front_gem_get_sg_table(struct drm_gem_object * gem_obj)233 struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
234 {
235 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
236
237 if (!xen_obj->pages)
238 return ERR_PTR(-ENOMEM);
239
240 return drm_prime_pages_to_sg(gem_obj->dev,
241 xen_obj->pages, xen_obj->num_pages);
242 }
243
244 struct drm_gem_object *
xen_drm_front_gem_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)245 xen_drm_front_gem_import_sg_table(struct drm_device *dev,
246 struct dma_buf_attachment *attach,
247 struct sg_table *sgt)
248 {
249 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
250 struct xen_gem_object *xen_obj;
251 size_t size;
252 int ret;
253
254 size = attach->dmabuf->size;
255 xen_obj = gem_create_obj(dev, size);
256 if (IS_ERR(xen_obj))
257 return ERR_CAST(xen_obj);
258
259 ret = gem_alloc_pages_array(xen_obj, size);
260 if (ret < 0)
261 return ERR_PTR(ret);
262
263 xen_obj->sgt_imported = sgt;
264
265 ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
266 xen_obj->num_pages);
267 if (ret < 0)
268 return ERR_PTR(ret);
269
270 ret = xen_drm_front_dbuf_create(drm_info->front_info,
271 xen_drm_front_dbuf_to_cookie(&xen_obj->base),
272 0, 0, 0, size, sgt->sgl->offset,
273 xen_obj->pages);
274 if (ret < 0)
275 return ERR_PTR(ret);
276
277 DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
278 size, sgt->orig_nents);
279
280 return &xen_obj->base;
281 }
282
xen_drm_front_gem_prime_vmap(struct drm_gem_object * gem_obj,struct iosys_map * map)283 int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj,
284 struct iosys_map *map)
285 {
286 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
287 void *vaddr;
288
289 if (!xen_obj->pages)
290 return -ENOMEM;
291
292 /* Please see comment in gem_mmap_obj on mapping and attributes. */
293 vaddr = vmap(xen_obj->pages, xen_obj->num_pages,
294 VM_MAP, PAGE_KERNEL);
295 if (!vaddr)
296 return -ENOMEM;
297 iosys_map_set_vaddr(map, vaddr);
298
299 return 0;
300 }
301
xen_drm_front_gem_prime_vunmap(struct drm_gem_object * gem_obj,struct iosys_map * map)302 void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
303 struct iosys_map *map)
304 {
305 vunmap(map->vaddr);
306 }
307