1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  */
28 #include "drmP.h"
29 #include "drm_sarea.h"
30 #include "radeon.h"
31 #include "radeon_drm.h"
32 
33 #include <linux/vga_switcheroo.h>
34 #include <linux/slab.h>
35 
radeon_driver_unload_kms(struct drm_device * dev)36 int radeon_driver_unload_kms(struct drm_device *dev)
37 {
38 	struct radeon_device *rdev = dev->dev_private;
39 
40 	if (rdev == NULL)
41 		return 0;
42 	if (rdev->rmmio == NULL)
43 		goto done_free;
44 	radeon_modeset_fini(rdev);
45 	radeon_device_fini(rdev);
46 
47 done_free:
48 	kfree(rdev);
49 	dev->dev_private = NULL;
50 	return 0;
51 }
52 
radeon_driver_load_kms(struct drm_device * dev,unsigned long flags)53 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
54 {
55 	struct radeon_device *rdev;
56 	int r, acpi_status;
57 
58 	rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
59 	if (rdev == NULL) {
60 		return -ENOMEM;
61 	}
62 	dev->dev_private = (void *)rdev;
63 
64 	pci_set_master(dev->pdev);
65 
66 	/* update BUS flag */
67 	if (drm_pci_device_is_agp(dev)) {
68 		flags |= RADEON_IS_AGP;
69 	} else if (pci_is_pcie(dev->pdev)) {
70 		flags |= RADEON_IS_PCIE;
71 	} else {
72 		flags |= RADEON_IS_PCI;
73 	}
74 
75 	/* radeon_device_init should report only fatal error
76 	 * like memory allocation failure or iomapping failure,
77 	 * or memory manager initialization failure, it must
78 	 * properly initialize the GPU MC controller and permit
79 	 * VRAM allocation
80 	 */
81 	r = radeon_device_init(rdev, dev, dev->pdev, flags);
82 	if (r) {
83 		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
84 		goto out;
85 	}
86 
87 	/* Call ACPI methods */
88 	acpi_status = radeon_acpi_init(rdev);
89 	if (acpi_status)
90 		dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
91 
92 	/* Again modeset_init should fail only on fatal error
93 	 * otherwise it should provide enough functionalities
94 	 * for shadowfb to run
95 	 */
96 	r = radeon_modeset_init(rdev);
97 	if (r)
98 		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
99 out:
100 	if (r)
101 		radeon_driver_unload_kms(dev);
102 	return r;
103 }
104 
radeon_set_filp_rights(struct drm_device * dev,struct drm_file ** owner,struct drm_file * applier,uint32_t * value)105 static void radeon_set_filp_rights(struct drm_device *dev,
106 				   struct drm_file **owner,
107 				   struct drm_file *applier,
108 				   uint32_t *value)
109 {
110 	mutex_lock(&dev->struct_mutex);
111 	if (*value == 1) {
112 		/* wants rights */
113 		if (!*owner)
114 			*owner = applier;
115 	} else if (*value == 0) {
116 		/* revokes rights */
117 		if (*owner == applier)
118 			*owner = NULL;
119 	}
120 	*value = *owner == applier ? 1 : 0;
121 	mutex_unlock(&dev->struct_mutex);
122 }
123 
124 /*
125  * Userspace get information ioctl
126  */
radeon_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)127 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
128 {
129 	struct radeon_device *rdev = dev->dev_private;
130 	struct drm_radeon_info *info;
131 	struct radeon_mode_info *minfo = &rdev->mode_info;
132 	uint32_t *value_ptr;
133 	uint32_t value;
134 	struct drm_crtc *crtc;
135 	int i, found;
136 
137 	info = data;
138 	value_ptr = (uint32_t *)((unsigned long)info->value);
139 	if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
140 		return -EFAULT;
141 
142 	switch (info->request) {
143 	case RADEON_INFO_DEVICE_ID:
144 		value = dev->pci_device;
145 		break;
146 	case RADEON_INFO_NUM_GB_PIPES:
147 		value = rdev->num_gb_pipes;
148 		break;
149 	case RADEON_INFO_NUM_Z_PIPES:
150 		value = rdev->num_z_pipes;
151 		break;
152 	case RADEON_INFO_ACCEL_WORKING:
153 		/* xf86-video-ati 6.13.0 relies on this being false for evergreen */
154 		if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
155 			value = false;
156 		else
157 			value = rdev->accel_working;
158 		break;
159 	case RADEON_INFO_CRTC_FROM_ID:
160 		for (i = 0, found = 0; i < rdev->num_crtc; i++) {
161 			crtc = (struct drm_crtc *)minfo->crtcs[i];
162 			if (crtc && crtc->base.id == value) {
163 				struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
164 				value = radeon_crtc->crtc_id;
165 				found = 1;
166 				break;
167 			}
168 		}
169 		if (!found) {
170 			DRM_DEBUG_KMS("unknown crtc id %d\n", value);
171 			return -EINVAL;
172 		}
173 		break;
174 	case RADEON_INFO_ACCEL_WORKING2:
175 		value = rdev->accel_working;
176 		break;
177 	case RADEON_INFO_TILING_CONFIG:
178 		if (rdev->family >= CHIP_TAHITI)
179 			value = rdev->config.si.tile_config;
180 		else if (rdev->family >= CHIP_CAYMAN)
181 			value = rdev->config.cayman.tile_config;
182 		else if (rdev->family >= CHIP_CEDAR)
183 			value = rdev->config.evergreen.tile_config;
184 		else if (rdev->family >= CHIP_RV770)
185 			value = rdev->config.rv770.tile_config;
186 		else if (rdev->family >= CHIP_R600)
187 			value = rdev->config.r600.tile_config;
188 		else {
189 			DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
190 			return -EINVAL;
191 		}
192 		break;
193 	case RADEON_INFO_WANT_HYPERZ:
194 		/* The "value" here is both an input and output parameter.
195 		 * If the input value is 1, filp requests hyper-z access.
196 		 * If the input value is 0, filp revokes its hyper-z access.
197 		 *
198 		 * When returning, the value is 1 if filp owns hyper-z access,
199 		 * 0 otherwise. */
200 		if (value >= 2) {
201 			DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
202 			return -EINVAL;
203 		}
204 		radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, &value);
205 		break;
206 	case RADEON_INFO_WANT_CMASK:
207 		/* The same logic as Hyper-Z. */
208 		if (value >= 2) {
209 			DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", value);
210 			return -EINVAL;
211 		}
212 		radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, &value);
213 		break;
214 	case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
215 		/* return clock value in KHz */
216 		value = rdev->clock.spll.reference_freq * 10;
217 		break;
218 	case RADEON_INFO_NUM_BACKENDS:
219 		if (rdev->family >= CHIP_TAHITI)
220 			value = rdev->config.si.max_backends_per_se *
221 				rdev->config.si.max_shader_engines;
222 		else if (rdev->family >= CHIP_CAYMAN)
223 			value = rdev->config.cayman.max_backends_per_se *
224 				rdev->config.cayman.max_shader_engines;
225 		else if (rdev->family >= CHIP_CEDAR)
226 			value = rdev->config.evergreen.max_backends;
227 		else if (rdev->family >= CHIP_RV770)
228 			value = rdev->config.rv770.max_backends;
229 		else if (rdev->family >= CHIP_R600)
230 			value = rdev->config.r600.max_backends;
231 		else {
232 			return -EINVAL;
233 		}
234 		break;
235 	case RADEON_INFO_NUM_TILE_PIPES:
236 		if (rdev->family >= CHIP_TAHITI)
237 			value = rdev->config.si.max_tile_pipes;
238 		else if (rdev->family >= CHIP_CAYMAN)
239 			value = rdev->config.cayman.max_tile_pipes;
240 		else if (rdev->family >= CHIP_CEDAR)
241 			value = rdev->config.evergreen.max_tile_pipes;
242 		else if (rdev->family >= CHIP_RV770)
243 			value = rdev->config.rv770.max_tile_pipes;
244 		else if (rdev->family >= CHIP_R600)
245 			value = rdev->config.r600.max_tile_pipes;
246 		else {
247 			return -EINVAL;
248 		}
249 		break;
250 	case RADEON_INFO_FUSION_GART_WORKING:
251 		value = 1;
252 		break;
253 	case RADEON_INFO_BACKEND_MAP:
254 		if (rdev->family >= CHIP_TAHITI)
255 			value = rdev->config.si.backend_map;
256 		else if (rdev->family >= CHIP_CAYMAN)
257 			value = rdev->config.cayman.backend_map;
258 		else if (rdev->family >= CHIP_CEDAR)
259 			value = rdev->config.evergreen.backend_map;
260 		else if (rdev->family >= CHIP_RV770)
261 			value = rdev->config.rv770.backend_map;
262 		else if (rdev->family >= CHIP_R600)
263 			value = rdev->config.r600.backend_map;
264 		else {
265 			return -EINVAL;
266 		}
267 		break;
268 	case RADEON_INFO_VA_START:
269 		/* this is where we report if vm is supported or not */
270 		if (rdev->family < CHIP_CAYMAN)
271 			return -EINVAL;
272 		value = RADEON_VA_RESERVED_SIZE;
273 		break;
274 	case RADEON_INFO_IB_VM_MAX_SIZE:
275 		/* this is where we report if vm is supported or not */
276 		if (rdev->family < CHIP_CAYMAN)
277 			return -EINVAL;
278 		value = RADEON_IB_VM_MAX_SIZE;
279 		break;
280 	case RADEON_INFO_MAX_PIPES:
281 		if (rdev->family >= CHIP_TAHITI)
282 			value = rdev->config.si.max_pipes_per_simd;
283 		else if (rdev->family >= CHIP_CAYMAN)
284 			value = rdev->config.cayman.max_pipes_per_simd;
285 		else if (rdev->family >= CHIP_CEDAR)
286 			value = rdev->config.evergreen.max_pipes;
287 		else if (rdev->family >= CHIP_RV770)
288 			value = rdev->config.rv770.max_pipes;
289 		else if (rdev->family >= CHIP_R600)
290 			value = rdev->config.r600.max_pipes;
291 		else {
292 			return -EINVAL;
293 		}
294 		break;
295 	default:
296 		DRM_DEBUG_KMS("Invalid request %d\n", info->request);
297 		return -EINVAL;
298 	}
299 	if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
300 		DRM_ERROR("copy_to_user\n");
301 		return -EFAULT;
302 	}
303 	return 0;
304 }
305 
306 
307 /*
308  * Outdated mess for old drm with Xorg being in charge (void function now).
309  */
radeon_driver_firstopen_kms(struct drm_device * dev)310 int radeon_driver_firstopen_kms(struct drm_device *dev)
311 {
312 	return 0;
313 }
314 
radeon_driver_lastclose_kms(struct drm_device * dev)315 void radeon_driver_lastclose_kms(struct drm_device *dev)
316 {
317 	vga_switcheroo_process_delayed_switch();
318 }
319 
radeon_driver_open_kms(struct drm_device * dev,struct drm_file * file_priv)320 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
321 {
322 	struct radeon_device *rdev = dev->dev_private;
323 
324 	file_priv->driver_priv = NULL;
325 
326 	/* new gpu have virtual address space support */
327 	if (rdev->family >= CHIP_CAYMAN) {
328 		struct radeon_fpriv *fpriv;
329 		int r;
330 
331 		fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
332 		if (unlikely(!fpriv)) {
333 			return -ENOMEM;
334 		}
335 
336 		r = radeon_vm_init(rdev, &fpriv->vm);
337 		if (r) {
338 			radeon_vm_fini(rdev, &fpriv->vm);
339 			kfree(fpriv);
340 			return r;
341 		}
342 
343 		file_priv->driver_priv = fpriv;
344 	}
345 	return 0;
346 }
347 
radeon_driver_postclose_kms(struct drm_device * dev,struct drm_file * file_priv)348 void radeon_driver_postclose_kms(struct drm_device *dev,
349 				 struct drm_file *file_priv)
350 {
351 	struct radeon_device *rdev = dev->dev_private;
352 
353 	/* new gpu have virtual address space support */
354 	if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
355 		struct radeon_fpriv *fpriv = file_priv->driver_priv;
356 
357 		radeon_vm_fini(rdev, &fpriv->vm);
358 		kfree(fpriv);
359 		file_priv->driver_priv = NULL;
360 	}
361 }
362 
radeon_driver_preclose_kms(struct drm_device * dev,struct drm_file * file_priv)363 void radeon_driver_preclose_kms(struct drm_device *dev,
364 				struct drm_file *file_priv)
365 {
366 	struct radeon_device *rdev = dev->dev_private;
367 	if (rdev->hyperz_filp == file_priv)
368 		rdev->hyperz_filp = NULL;
369 	if (rdev->cmask_filp == file_priv)
370 		rdev->cmask_filp = NULL;
371 }
372 
373 /*
374  * VBlank related functions.
375  */
radeon_get_vblank_counter_kms(struct drm_device * dev,int crtc)376 u32 radeon_get_vblank_counter_kms(struct drm_device *dev, int crtc)
377 {
378 	struct radeon_device *rdev = dev->dev_private;
379 
380 	if (crtc < 0 || crtc >= rdev->num_crtc) {
381 		DRM_ERROR("Invalid crtc %d\n", crtc);
382 		return -EINVAL;
383 	}
384 
385 	return radeon_get_vblank_counter(rdev, crtc);
386 }
387 
radeon_enable_vblank_kms(struct drm_device * dev,int crtc)388 int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
389 {
390 	struct radeon_device *rdev = dev->dev_private;
391 
392 	if (crtc < 0 || crtc >= rdev->num_crtc) {
393 		DRM_ERROR("Invalid crtc %d\n", crtc);
394 		return -EINVAL;
395 	}
396 
397 	rdev->irq.crtc_vblank_int[crtc] = true;
398 
399 	return radeon_irq_set(rdev);
400 }
401 
radeon_disable_vblank_kms(struct drm_device * dev,int crtc)402 void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
403 {
404 	struct radeon_device *rdev = dev->dev_private;
405 
406 	if (crtc < 0 || crtc >= rdev->num_crtc) {
407 		DRM_ERROR("Invalid crtc %d\n", crtc);
408 		return;
409 	}
410 
411 	rdev->irq.crtc_vblank_int[crtc] = false;
412 
413 	radeon_irq_set(rdev);
414 }
415 
radeon_get_vblank_timestamp_kms(struct drm_device * dev,int crtc,int * max_error,struct timeval * vblank_time,unsigned flags)416 int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
417 				    int *max_error,
418 				    struct timeval *vblank_time,
419 				    unsigned flags)
420 {
421 	struct drm_crtc *drmcrtc;
422 	struct radeon_device *rdev = dev->dev_private;
423 
424 	if (crtc < 0 || crtc >= dev->num_crtcs) {
425 		DRM_ERROR("Invalid crtc %d\n", crtc);
426 		return -EINVAL;
427 	}
428 
429 	/* Get associated drm_crtc: */
430 	drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
431 
432 	/* Helper routine in DRM core does all the work: */
433 	return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
434 						     vblank_time, flags,
435 						     drmcrtc);
436 }
437 
438 /*
439  * IOCTL.
440  */
radeon_dma_ioctl_kms(struct drm_device * dev,void * data,struct drm_file * file_priv)441 int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
442 			 struct drm_file *file_priv)
443 {
444 	/* Not valid in KMS. */
445 	return -EINVAL;
446 }
447 
448 #define KMS_INVALID_IOCTL(name)						\
449 int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
450 {									\
451 	DRM_ERROR("invalid ioctl with kms %s\n", __func__);		\
452 	return -EINVAL;							\
453 }
454 
455 /*
456  * All these ioctls are invalid in kms world.
457  */
458 KMS_INVALID_IOCTL(radeon_cp_init_kms)
459 KMS_INVALID_IOCTL(radeon_cp_start_kms)
460 KMS_INVALID_IOCTL(radeon_cp_stop_kms)
461 KMS_INVALID_IOCTL(radeon_cp_reset_kms)
462 KMS_INVALID_IOCTL(radeon_cp_idle_kms)
463 KMS_INVALID_IOCTL(radeon_cp_resume_kms)
464 KMS_INVALID_IOCTL(radeon_engine_reset_kms)
465 KMS_INVALID_IOCTL(radeon_fullscreen_kms)
466 KMS_INVALID_IOCTL(radeon_cp_swap_kms)
467 KMS_INVALID_IOCTL(radeon_cp_clear_kms)
468 KMS_INVALID_IOCTL(radeon_cp_vertex_kms)
469 KMS_INVALID_IOCTL(radeon_cp_indices_kms)
470 KMS_INVALID_IOCTL(radeon_cp_texture_kms)
471 KMS_INVALID_IOCTL(radeon_cp_stipple_kms)
472 KMS_INVALID_IOCTL(radeon_cp_indirect_kms)
473 KMS_INVALID_IOCTL(radeon_cp_vertex2_kms)
474 KMS_INVALID_IOCTL(radeon_cp_cmdbuf_kms)
475 KMS_INVALID_IOCTL(radeon_cp_getparam_kms)
476 KMS_INVALID_IOCTL(radeon_cp_flip_kms)
477 KMS_INVALID_IOCTL(radeon_mem_alloc_kms)
478 KMS_INVALID_IOCTL(radeon_mem_free_kms)
479 KMS_INVALID_IOCTL(radeon_mem_init_heap_kms)
480 KMS_INVALID_IOCTL(radeon_irq_emit_kms)
481 KMS_INVALID_IOCTL(radeon_irq_wait_kms)
482 KMS_INVALID_IOCTL(radeon_cp_setparam_kms)
483 KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
484 KMS_INVALID_IOCTL(radeon_surface_free_kms)
485 
486 
487 struct drm_ioctl_desc radeon_ioctls_kms[] = {
488 	DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
489 	DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
490 	DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
491 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESET, radeon_cp_reset_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
492 	DRM_IOCTL_DEF_DRV(RADEON_CP_IDLE, radeon_cp_idle_kms, DRM_AUTH),
493 	DRM_IOCTL_DEF_DRV(RADEON_CP_RESUME, radeon_cp_resume_kms, DRM_AUTH),
494 	DRM_IOCTL_DEF_DRV(RADEON_RESET, radeon_engine_reset_kms, DRM_AUTH),
495 	DRM_IOCTL_DEF_DRV(RADEON_FULLSCREEN, radeon_fullscreen_kms, DRM_AUTH),
496 	DRM_IOCTL_DEF_DRV(RADEON_SWAP, radeon_cp_swap_kms, DRM_AUTH),
497 	DRM_IOCTL_DEF_DRV(RADEON_CLEAR, radeon_cp_clear_kms, DRM_AUTH),
498 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX, radeon_cp_vertex_kms, DRM_AUTH),
499 	DRM_IOCTL_DEF_DRV(RADEON_INDICES, radeon_cp_indices_kms, DRM_AUTH),
500 	DRM_IOCTL_DEF_DRV(RADEON_TEXTURE, radeon_cp_texture_kms, DRM_AUTH),
501 	DRM_IOCTL_DEF_DRV(RADEON_STIPPLE, radeon_cp_stipple_kms, DRM_AUTH),
502 	DRM_IOCTL_DEF_DRV(RADEON_INDIRECT, radeon_cp_indirect_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
503 	DRM_IOCTL_DEF_DRV(RADEON_VERTEX2, radeon_cp_vertex2_kms, DRM_AUTH),
504 	DRM_IOCTL_DEF_DRV(RADEON_CMDBUF, radeon_cp_cmdbuf_kms, DRM_AUTH),
505 	DRM_IOCTL_DEF_DRV(RADEON_GETPARAM, radeon_cp_getparam_kms, DRM_AUTH),
506 	DRM_IOCTL_DEF_DRV(RADEON_FLIP, radeon_cp_flip_kms, DRM_AUTH),
507 	DRM_IOCTL_DEF_DRV(RADEON_ALLOC, radeon_mem_alloc_kms, DRM_AUTH),
508 	DRM_IOCTL_DEF_DRV(RADEON_FREE, radeon_mem_free_kms, DRM_AUTH),
509 	DRM_IOCTL_DEF_DRV(RADEON_INIT_HEAP, radeon_mem_init_heap_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
510 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_EMIT, radeon_irq_emit_kms, DRM_AUTH),
511 	DRM_IOCTL_DEF_DRV(RADEON_IRQ_WAIT, radeon_irq_wait_kms, DRM_AUTH),
512 	DRM_IOCTL_DEF_DRV(RADEON_SETPARAM, radeon_cp_setparam_kms, DRM_AUTH),
513 	DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
514 	DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
515 	/* KMS */
516 	DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
517 	DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
518 	DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
519 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
520 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
521 	DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
522 	DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
523 	DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
524 	DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
525 	DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
526 	DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
527 	DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
528 	DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
529 };
530 int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
531