1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <drm/drm_fourcc.h>
7 
8 #include "display/intel_display.h"
9 #include "gem/i915_gem_ioctls.h"
10 #include "gem/i915_gem_lmem.h"
11 #include "gem/i915_gem_region.h"
12 #include "pxp/intel_pxp.h"
13 
14 #include "i915_drv.h"
15 #include "i915_gem_create.h"
16 #include "i915_trace.h"
17 #include "i915_user_extensions.h"
18 
object_max_page_size(struct intel_memory_region ** placements,unsigned int n_placements)19 static u32 object_max_page_size(struct intel_memory_region **placements,
20 				unsigned int n_placements)
21 {
22 	u32 max_page_size = 0;
23 	int i;
24 
25 	for (i = 0; i < n_placements; i++) {
26 		struct intel_memory_region *mr = placements[i];
27 
28 		GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
29 		max_page_size = max_t(u32, max_page_size, mr->min_page_size);
30 	}
31 
32 	GEM_BUG_ON(!max_page_size);
33 	return max_page_size;
34 }
35 
object_set_placements(struct drm_i915_gem_object * obj,struct intel_memory_region ** placements,unsigned int n_placements)36 static int object_set_placements(struct drm_i915_gem_object *obj,
37 				 struct intel_memory_region **placements,
38 				 unsigned int n_placements)
39 {
40 	struct intel_memory_region **arr;
41 	unsigned int i;
42 
43 	GEM_BUG_ON(!n_placements);
44 
45 	/*
46 	 * For the common case of one memory region, skip storing an
47 	 * allocated array and just point at the region directly.
48 	 */
49 	if (n_placements == 1) {
50 		struct intel_memory_region *mr = placements[0];
51 		struct drm_i915_private *i915 = mr->i915;
52 
53 		obj->mm.placements = &i915->mm.regions[mr->id];
54 		obj->mm.n_placements = 1;
55 	} else {
56 		arr = kmalloc_array(n_placements,
57 				    sizeof(struct intel_memory_region *),
58 				    GFP_KERNEL);
59 		if (!arr)
60 			return -ENOMEM;
61 
62 		for (i = 0; i < n_placements; i++)
63 			arr[i] = placements[i];
64 
65 		obj->mm.placements = arr;
66 		obj->mm.n_placements = n_placements;
67 	}
68 
69 	return 0;
70 }
71 
i915_gem_publish(struct drm_i915_gem_object * obj,struct drm_file * file,u64 * size_p,u32 * handle_p)72 static int i915_gem_publish(struct drm_i915_gem_object *obj,
73 			    struct drm_file *file,
74 			    u64 *size_p,
75 			    u32 *handle_p)
76 {
77 	u64 size = obj->base.size;
78 	int ret;
79 
80 	ret = drm_gem_handle_create(file, &obj->base, handle_p);
81 	/* drop reference from allocate - handle holds it now */
82 	i915_gem_object_put(obj);
83 	if (ret)
84 		return ret;
85 
86 	*size_p = size;
87 	return 0;
88 }
89 
90 static struct drm_i915_gem_object *
__i915_gem_object_create_user_ext(struct drm_i915_private * i915,u64 size,struct intel_memory_region ** placements,unsigned int n_placements,unsigned int ext_flags)91 __i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
92 				  struct intel_memory_region **placements,
93 				  unsigned int n_placements,
94 				  unsigned int ext_flags)
95 {
96 	struct intel_memory_region *mr = placements[0];
97 	struct drm_i915_gem_object *obj;
98 	unsigned int flags;
99 	int ret;
100 
101 	i915_gem_flush_free_objects(i915);
102 
103 	size = round_up(size, object_max_page_size(placements, n_placements));
104 	if (size == 0)
105 		return ERR_PTR(-EINVAL);
106 
107 	/* For most of the ABI (e.g. mmap) we think in system pages */
108 	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
109 
110 	if (i915_gem_object_size_2big(size))
111 		return ERR_PTR(-E2BIG);
112 
113 	obj = i915_gem_object_alloc();
114 	if (!obj)
115 		return ERR_PTR(-ENOMEM);
116 
117 	ret = object_set_placements(obj, placements, n_placements);
118 	if (ret)
119 		goto object_free;
120 
121 	/*
122 	 * I915_BO_ALLOC_USER will make sure the object is cleared before
123 	 * any user access.
124 	 */
125 	flags = I915_BO_ALLOC_USER;
126 
127 	ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags);
128 	if (ret)
129 		goto object_free;
130 
131 	GEM_BUG_ON(size != obj->base.size);
132 
133 	/* Add any flag set by create_ext options */
134 	obj->flags |= ext_flags;
135 
136 	trace_i915_gem_object_create(obj);
137 	return obj;
138 
139 object_free:
140 	if (obj->mm.n_placements > 1)
141 		kfree(obj->mm.placements);
142 	i915_gem_object_free(obj);
143 	return ERR_PTR(ret);
144 }
145 
146 /**
147  * __i915_gem_object_create_user - Creates a new object using the same path as
148  *                                 DRM_I915_GEM_CREATE_EXT
149  * @i915: i915 private
150  * @size: size of the buffer, in bytes
151  * @placements: possible placement regions, in priority order
152  * @n_placements: number of possible placement regions
153  *
154  * This function is exposed primarily for selftests and does very little
155  * error checking.  It is assumed that the set of placement regions has
156  * already been verified to be valid.
157  */
158 struct drm_i915_gem_object *
__i915_gem_object_create_user(struct drm_i915_private * i915,u64 size,struct intel_memory_region ** placements,unsigned int n_placements)159 __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
160 			      struct intel_memory_region **placements,
161 			      unsigned int n_placements)
162 {
163 	return __i915_gem_object_create_user_ext(i915, size, placements,
164 						 n_placements, 0);
165 }
166 
167 int
i915_gem_dumb_create(struct drm_file * file,struct drm_device * dev,struct drm_mode_create_dumb * args)168 i915_gem_dumb_create(struct drm_file *file,
169 		     struct drm_device *dev,
170 		     struct drm_mode_create_dumb *args)
171 {
172 	struct drm_i915_gem_object *obj;
173 	struct intel_memory_region *mr;
174 	enum intel_memory_type mem_type;
175 	int cpp = DIV_ROUND_UP(args->bpp, 8);
176 	u32 format;
177 
178 	switch (cpp) {
179 	case 1:
180 		format = DRM_FORMAT_C8;
181 		break;
182 	case 2:
183 		format = DRM_FORMAT_RGB565;
184 		break;
185 	case 4:
186 		format = DRM_FORMAT_XRGB8888;
187 		break;
188 	default:
189 		return -EINVAL;
190 	}
191 
192 	/* have to work out size/pitch and return them */
193 	args->pitch = ALIGN(args->width * cpp, 64);
194 
195 	/* align stride to page size so that we can remap */
196 	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
197 						    DRM_FORMAT_MOD_LINEAR))
198 		args->pitch = ALIGN(args->pitch, 4096);
199 
200 	if (args->pitch < args->width)
201 		return -EINVAL;
202 
203 	args->size = mul_u32_u32(args->pitch, args->height);
204 
205 	mem_type = INTEL_MEMORY_SYSTEM;
206 	if (HAS_LMEM(to_i915(dev)))
207 		mem_type = INTEL_MEMORY_LOCAL;
208 
209 	mr = intel_memory_region_by_type(to_i915(dev), mem_type);
210 
211 	obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1);
212 	if (IS_ERR(obj))
213 		return PTR_ERR(obj);
214 
215 	return i915_gem_publish(obj, file, &args->size, &args->handle);
216 }
217 
218 /**
219  * i915_gem_create_ioctl - Creates a new mm object and returns a handle to it.
220  * @dev: drm device pointer
221  * @data: ioctl data blob
222  * @file: drm file pointer
223  */
224 int
i915_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * file)225 i915_gem_create_ioctl(struct drm_device *dev, void *data,
226 		      struct drm_file *file)
227 {
228 	struct drm_i915_private *i915 = to_i915(dev);
229 	struct drm_i915_gem_create *args = data;
230 	struct drm_i915_gem_object *obj;
231 	struct intel_memory_region *mr;
232 
233 	mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
234 
235 	obj = __i915_gem_object_create_user(i915, args->size, &mr, 1);
236 	if (IS_ERR(obj))
237 		return PTR_ERR(obj);
238 
239 	return i915_gem_publish(obj, file, &args->size, &args->handle);
240 }
241 
242 struct create_ext {
243 	struct drm_i915_private *i915;
244 	struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
245 	unsigned int n_placements;
246 	unsigned int placement_mask;
247 	unsigned long flags;
248 	unsigned int pat_index;
249 };
250 
repr_placements(char * buf,size_t size,struct intel_memory_region ** placements,int n_placements)251 static void repr_placements(char *buf, size_t size,
252 			    struct intel_memory_region **placements,
253 			    int n_placements)
254 {
255 	int i;
256 
257 	buf[0] = '\0';
258 
259 	for (i = 0; i < n_placements; i++) {
260 		struct intel_memory_region *mr = placements[i];
261 		int r;
262 
263 		r = snprintf(buf, size, "\n  %s -> { class: %d, inst: %d }",
264 			     mr->name, mr->type, mr->instance);
265 		if (r >= size)
266 			return;
267 
268 		buf += r;
269 		size -= r;
270 	}
271 }
272 
set_placements(struct drm_i915_gem_create_ext_memory_regions * args,struct create_ext * ext_data)273 static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
274 			  struct create_ext *ext_data)
275 {
276 	struct drm_i915_private *i915 = ext_data->i915;
277 	struct drm_i915_gem_memory_class_instance __user *uregions =
278 		u64_to_user_ptr(args->regions);
279 	struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
280 	u32 mask;
281 	int i, ret = 0;
282 
283 	if (args->pad) {
284 		drm_dbg(&i915->drm, "pad should be zero\n");
285 		ret = -EINVAL;
286 	}
287 
288 	if (!args->num_regions) {
289 		drm_dbg(&i915->drm, "num_regions is zero\n");
290 		ret = -EINVAL;
291 	}
292 
293 	BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
294 	BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements));
295 	if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
296 		drm_dbg(&i915->drm, "num_regions is too large\n");
297 		ret = -EINVAL;
298 	}
299 
300 	if (ret)
301 		return ret;
302 
303 	mask = 0;
304 	for (i = 0; i < args->num_regions; i++) {
305 		struct drm_i915_gem_memory_class_instance region;
306 		struct intel_memory_region *mr;
307 
308 		if (copy_from_user(&region, uregions, sizeof(region)))
309 			return -EFAULT;
310 
311 		mr = intel_memory_region_lookup(i915,
312 						region.memory_class,
313 						region.memory_instance);
314 		if (!mr || mr->private) {
315 			drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
316 				region.memory_class, region.memory_instance, i);
317 			ret = -EINVAL;
318 			goto out_dump;
319 		}
320 
321 		if (mask & BIT(mr->id)) {
322 			drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
323 				mr->name, region.memory_class,
324 				region.memory_instance, i);
325 			ret = -EINVAL;
326 			goto out_dump;
327 		}
328 
329 		placements[i] = mr;
330 		mask |= BIT(mr->id);
331 
332 		++uregions;
333 	}
334 
335 	if (ext_data->n_placements) {
336 		ret = -EINVAL;
337 		goto out_dump;
338 	}
339 
340 	ext_data->n_placements = args->num_regions;
341 	for (i = 0; i < args->num_regions; i++)
342 		ext_data->placements[i] = placements[i];
343 
344 	ext_data->placement_mask = mask;
345 	return 0;
346 
347 out_dump:
348 	if (1) {
349 		char buf[256];
350 
351 		if (ext_data->n_placements) {
352 			repr_placements(buf,
353 					sizeof(buf),
354 					ext_data->placements,
355 					ext_data->n_placements);
356 			drm_dbg(&i915->drm,
357 				"Placements were already set in previous EXT. Existing placements: %s\n",
358 				buf);
359 		}
360 
361 		repr_placements(buf, sizeof(buf), placements, i);
362 		drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
363 	}
364 
365 	return ret;
366 }
367 
ext_set_placements(struct i915_user_extension __user * base,void * data)368 static int ext_set_placements(struct i915_user_extension __user *base,
369 			      void *data)
370 {
371 	struct drm_i915_gem_create_ext_memory_regions ext;
372 
373 	if (copy_from_user(&ext, base, sizeof(ext)))
374 		return -EFAULT;
375 
376 	return set_placements(&ext, data);
377 }
378 
ext_set_protected(struct i915_user_extension __user * base,void * data)379 static int ext_set_protected(struct i915_user_extension __user *base, void *data)
380 {
381 	struct drm_i915_gem_create_ext_protected_content ext;
382 	struct create_ext *ext_data = data;
383 
384 	if (copy_from_user(&ext, base, sizeof(ext)))
385 		return -EFAULT;
386 
387 	if (ext.flags)
388 		return -EINVAL;
389 
390 	if (!intel_pxp_is_enabled(ext_data->i915->pxp))
391 		return -ENODEV;
392 
393 	ext_data->flags |= I915_BO_PROTECTED;
394 
395 	return 0;
396 }
397 
ext_set_pat(struct i915_user_extension __user * base,void * data)398 static int ext_set_pat(struct i915_user_extension __user *base, void *data)
399 {
400 	struct create_ext *ext_data = data;
401 	struct drm_i915_private *i915 = ext_data->i915;
402 	struct drm_i915_gem_create_ext_set_pat ext;
403 	unsigned int max_pat_index;
404 
405 	BUILD_BUG_ON(sizeof(struct drm_i915_gem_create_ext_set_pat) !=
406 		     offsetofend(struct drm_i915_gem_create_ext_set_pat, rsvd));
407 
408 	/* Limiting the extension only to Meteor Lake */
409 	if (!IS_METEORLAKE(i915))
410 		return -ENODEV;
411 
412 	if (copy_from_user(&ext, base, sizeof(ext)))
413 		return -EFAULT;
414 
415 	max_pat_index = INTEL_INFO(i915)->max_pat_index;
416 
417 	if (ext.pat_index > max_pat_index) {
418 		drm_dbg(&i915->drm, "PAT index is invalid: %u\n",
419 			ext.pat_index);
420 		return -EINVAL;
421 	}
422 
423 	ext_data->pat_index = ext.pat_index;
424 
425 	return 0;
426 }
427 
428 static const i915_user_extension_fn create_extensions[] = {
429 	[I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
430 	[I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
431 	[I915_GEM_CREATE_EXT_SET_PAT] = ext_set_pat,
432 };
433 
434 #define PAT_INDEX_NOT_SET	0xffff
435 /**
436  * i915_gem_create_ext_ioctl - Creates a new mm object and returns a handle to it.
437  * @dev: drm device pointer
438  * @data: ioctl data blob
439  * @file: drm file pointer
440  */
441 int
i915_gem_create_ext_ioctl(struct drm_device * dev,void * data,struct drm_file * file)442 i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
443 			  struct drm_file *file)
444 {
445 	struct drm_i915_private *i915 = to_i915(dev);
446 	struct drm_i915_gem_create_ext *args = data;
447 	struct create_ext ext_data = { .i915 = i915 };
448 	struct drm_i915_gem_object *obj;
449 	int ret;
450 
451 	if (args->flags & ~I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS)
452 		return -EINVAL;
453 
454 	ext_data.pat_index = PAT_INDEX_NOT_SET;
455 	ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
456 				   create_extensions,
457 				   ARRAY_SIZE(create_extensions),
458 				   &ext_data);
459 	if (ret)
460 		return ret;
461 
462 	if (!ext_data.n_placements) {
463 		ext_data.placements[0] =
464 			intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
465 		ext_data.n_placements = 1;
466 	}
467 
468 	if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) {
469 		if (ext_data.n_placements == 1)
470 			return -EINVAL;
471 
472 		/*
473 		 * We always need to be able to spill to system memory, if we
474 		 * can't place in the mappable part of LMEM.
475 		 */
476 		if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM)))
477 			return -EINVAL;
478 	} else {
479 		if (ext_data.n_placements > 1 ||
480 		    ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM)
481 			ext_data.flags |= I915_BO_ALLOC_GPU_ONLY;
482 	}
483 
484 	obj = __i915_gem_object_create_user_ext(i915, args->size,
485 						ext_data.placements,
486 						ext_data.n_placements,
487 						ext_data.flags);
488 	if (IS_ERR(obj))
489 		return PTR_ERR(obj);
490 
491 	if (ext_data.pat_index != PAT_INDEX_NOT_SET) {
492 		i915_gem_object_set_pat_index(obj, ext_data.pat_index);
493 		/* Mark pat_index is set by UMD */
494 		obj->pat_set_by_user = true;
495 	}
496 
497 	return i915_gem_publish(obj, file, &args->size, &args->handle);
498 }
499