1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3
4 #include <linux/err.h>
5 #include <linux/slab.h>
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8
9 #include <drm/panfrost_drm.h>
10 #include "panfrost_device.h"
11 #include "panfrost_gem.h"
12 #include "panfrost_mmu.h"
13
14 /* Called DRM core on the last userspace/kernel unreference of the
15 * BO.
16 */
panfrost_gem_free_object(struct drm_gem_object * obj)17 static void panfrost_gem_free_object(struct drm_gem_object *obj)
18 {
19 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
20 struct panfrost_device *pfdev = obj->dev->dev_private;
21
22 /*
23 * Make sure the BO is no longer inserted in the shrinker list before
24 * taking care of the destruction itself. If we don't do that we have a
25 * race condition between this function and what's done in
26 * panfrost_gem_shrinker_scan().
27 */
28 mutex_lock(&pfdev->shrinker_lock);
29 list_del_init(&bo->base.madv_list);
30 mutex_unlock(&pfdev->shrinker_lock);
31
32 /*
33 * If we still have mappings attached to the BO, there's a problem in
34 * our refcounting.
35 */
36 WARN_ON_ONCE(!list_empty(&bo->mappings.list));
37
38 if (bo->sgts) {
39 int i;
40 int n_sgt = bo->base.base.size / SZ_2M;
41
42 for (i = 0; i < n_sgt; i++) {
43 if (bo->sgts[i].sgl) {
44 dma_unmap_sgtable(pfdev->dev, &bo->sgts[i],
45 DMA_BIDIRECTIONAL, 0);
46 sg_free_table(&bo->sgts[i]);
47 }
48 }
49 kvfree(bo->sgts);
50 }
51
52 drm_gem_shmem_free(&bo->base);
53 }
54
55 struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object * bo,struct panfrost_file_priv * priv)56 panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
57 struct panfrost_file_priv *priv)
58 {
59 struct panfrost_gem_mapping *iter, *mapping = NULL;
60
61 mutex_lock(&bo->mappings.lock);
62 list_for_each_entry(iter, &bo->mappings.list, node) {
63 if (iter->mmu == priv->mmu) {
64 kref_get(&iter->refcount);
65 mapping = iter;
66 break;
67 }
68 }
69 mutex_unlock(&bo->mappings.lock);
70
71 return mapping;
72 }
73
74 static void
panfrost_gem_teardown_mapping(struct panfrost_gem_mapping * mapping)75 panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping)
76 {
77 if (mapping->active)
78 panfrost_mmu_unmap(mapping);
79
80 spin_lock(&mapping->mmu->mm_lock);
81 if (drm_mm_node_allocated(&mapping->mmnode))
82 drm_mm_remove_node(&mapping->mmnode);
83 spin_unlock(&mapping->mmu->mm_lock);
84 }
85
panfrost_gem_mapping_release(struct kref * kref)86 static void panfrost_gem_mapping_release(struct kref *kref)
87 {
88 struct panfrost_gem_mapping *mapping;
89
90 mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
91
92 panfrost_gem_teardown_mapping(mapping);
93 drm_gem_object_put(&mapping->obj->base.base);
94 panfrost_mmu_ctx_put(mapping->mmu);
95 kfree(mapping);
96 }
97
panfrost_gem_mapping_put(struct panfrost_gem_mapping * mapping)98 void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping)
99 {
100 if (!mapping)
101 return;
102
103 kref_put(&mapping->refcount, panfrost_gem_mapping_release);
104 }
105
panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object * bo)106 void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo)
107 {
108 struct panfrost_gem_mapping *mapping;
109
110 list_for_each_entry(mapping, &bo->mappings.list, node)
111 panfrost_gem_teardown_mapping(mapping);
112 }
113
panfrost_gem_open(struct drm_gem_object * obj,struct drm_file * file_priv)114 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
115 {
116 int ret;
117 size_t size = obj->size;
118 u64 align;
119 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
120 unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
121 struct panfrost_file_priv *priv = file_priv->driver_priv;
122 struct panfrost_gem_mapping *mapping;
123
124 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
125 if (!mapping)
126 return -ENOMEM;
127
128 INIT_LIST_HEAD(&mapping->node);
129 kref_init(&mapping->refcount);
130 drm_gem_object_get(obj);
131 mapping->obj = bo;
132
133 /*
134 * Executable buffers cannot cross a 16MB boundary as the program
135 * counter is 24-bits. We assume executable buffers will be less than
136 * 16MB and aligning executable buffers to their size will avoid
137 * crossing a 16MB boundary.
138 */
139 if (!bo->noexec)
140 align = size >> PAGE_SHIFT;
141 else
142 align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
143
144 mapping->mmu = panfrost_mmu_ctx_get(priv->mmu);
145 spin_lock(&mapping->mmu->mm_lock);
146 ret = drm_mm_insert_node_generic(&mapping->mmu->mm, &mapping->mmnode,
147 size >> PAGE_SHIFT, align, color, 0);
148 spin_unlock(&mapping->mmu->mm_lock);
149 if (ret)
150 goto err;
151
152 if (!bo->is_heap) {
153 ret = panfrost_mmu_map(mapping);
154 if (ret)
155 goto err;
156 }
157
158 mutex_lock(&bo->mappings.lock);
159 WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED);
160 list_add_tail(&mapping->node, &bo->mappings.list);
161 mutex_unlock(&bo->mappings.lock);
162
163 err:
164 if (ret)
165 panfrost_gem_mapping_put(mapping);
166 return ret;
167 }
168
panfrost_gem_close(struct drm_gem_object * obj,struct drm_file * file_priv)169 void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
170 {
171 struct panfrost_file_priv *priv = file_priv->driver_priv;
172 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
173 struct panfrost_gem_mapping *mapping = NULL, *iter;
174
175 mutex_lock(&bo->mappings.lock);
176 list_for_each_entry(iter, &bo->mappings.list, node) {
177 if (iter->mmu == priv->mmu) {
178 mapping = iter;
179 list_del(&iter->node);
180 break;
181 }
182 }
183 mutex_unlock(&bo->mappings.lock);
184
185 panfrost_gem_mapping_put(mapping);
186 }
187
panfrost_gem_pin(struct drm_gem_object * obj)188 static int panfrost_gem_pin(struct drm_gem_object *obj)
189 {
190 struct panfrost_gem_object *bo = to_panfrost_bo(obj);
191
192 if (bo->is_heap)
193 return -EINVAL;
194
195 return drm_gem_shmem_pin(&bo->base);
196 }
197
198 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
199 .free = panfrost_gem_free_object,
200 .open = panfrost_gem_open,
201 .close = panfrost_gem_close,
202 .print_info = drm_gem_shmem_object_print_info,
203 .pin = panfrost_gem_pin,
204 .unpin = drm_gem_shmem_object_unpin,
205 .get_sg_table = drm_gem_shmem_object_get_sg_table,
206 .vmap = drm_gem_shmem_object_vmap,
207 .vunmap = drm_gem_shmem_object_vunmap,
208 .mmap = drm_gem_shmem_object_mmap,
209 .vm_ops = &drm_gem_shmem_vm_ops,
210 };
211
212 /**
213 * panfrost_gem_create_object - Implementation of driver->gem_create_object.
214 * @dev: DRM device
215 * @size: Size in bytes of the memory the object will reference
216 *
217 * This lets the GEM helpers allocate object structs for us, and keep
218 * our BO stats correct.
219 */
panfrost_gem_create_object(struct drm_device * dev,size_t size)220 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
221 {
222 struct panfrost_device *pfdev = dev->dev_private;
223 struct panfrost_gem_object *obj;
224
225 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
226 if (!obj)
227 return ERR_PTR(-ENOMEM);
228
229 INIT_LIST_HEAD(&obj->mappings.list);
230 mutex_init(&obj->mappings.lock);
231 obj->base.base.funcs = &panfrost_gem_funcs;
232 obj->base.map_wc = !pfdev->coherent;
233
234 return &obj->base.base;
235 }
236
237 struct panfrost_gem_object *
panfrost_gem_create(struct drm_device * dev,size_t size,u32 flags)238 panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
239 {
240 struct drm_gem_shmem_object *shmem;
241 struct panfrost_gem_object *bo;
242
243 /* Round up heap allocations to 2MB to keep fault handling simple */
244 if (flags & PANFROST_BO_HEAP)
245 size = roundup(size, SZ_2M);
246
247 shmem = drm_gem_shmem_create(dev, size);
248 if (IS_ERR(shmem))
249 return ERR_CAST(shmem);
250
251 bo = to_panfrost_bo(&shmem->base);
252 bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
253 bo->is_heap = !!(flags & PANFROST_BO_HEAP);
254
255 return bo;
256 }
257
258 struct drm_gem_object *
panfrost_gem_prime_import_sg_table(struct drm_device * dev,struct dma_buf_attachment * attach,struct sg_table * sgt)259 panfrost_gem_prime_import_sg_table(struct drm_device *dev,
260 struct dma_buf_attachment *attach,
261 struct sg_table *sgt)
262 {
263 struct drm_gem_object *obj;
264 struct panfrost_gem_object *bo;
265
266 obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
267 if (IS_ERR(obj))
268 return ERR_CAST(obj);
269
270 bo = to_panfrost_bo(obj);
271 bo->noexec = true;
272
273 return obj;
274 }
275