1 #ifndef __DRM_DRM_LEGACY_H__
2 #define __DRM_DRM_LEGACY_H__
3 /*
4 * Legacy driver interfaces for the Direct Rendering Manager
5 *
6 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
7 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Copyright (c) 2009-2010, Code Aurora Forum.
9 * All rights reserved.
10 * Copyright © 2014 Intel Corporation
11 * Daniel Vetter <daniel.vetter@ffwll.ch>
12 *
13 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
14 * Author: Gareth Hughes <gareth@valinux.com>
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36 #include <linux/agp_backend.h>
37
38 #include <drm/drm.h>
39 #include <drm/drm_auth.h>
40
41 struct drm_device;
42 struct drm_driver;
43 struct file;
44 struct pci_driver;
45
46 /*
47 * Legacy Support for palateontologic DRM drivers
48 *
49 * If you add a new driver and it uses any of these functions or structures,
50 * you're doing it terribly wrong.
51 */
52
53 /*
54 * Hash-table Support
55 */
56
57 struct drm_hash_item {
58 struct hlist_node head;
59 unsigned long key;
60 };
61
62 struct drm_open_hash {
63 struct hlist_head *table;
64 u8 order;
65 };
66
67 /**
68 * DMA buffer.
69 */
70 struct drm_buf {
71 int idx; /**< Index into master buflist */
72 int total; /**< Buffer size */
73 int order; /**< log-base-2(total) */
74 int used; /**< Amount of buffer in use (for DMA) */
75 unsigned long offset; /**< Byte offset (used internally) */
76 void *address; /**< Address of buffer */
77 unsigned long bus_address; /**< Bus address of buffer */
78 struct drm_buf *next; /**< Kernel-only: used for free list */
79 __volatile__ int waiting; /**< On kernel DMA queue */
80 __volatile__ int pending; /**< On hardware DMA queue */
81 struct drm_file *file_priv; /**< Private of holding file descr */
82 int context; /**< Kernel queue for this buffer */
83 int while_locked; /**< Dispatch this buffer while locked */
84 enum {
85 DRM_LIST_NONE = 0,
86 DRM_LIST_FREE = 1,
87 DRM_LIST_WAIT = 2,
88 DRM_LIST_PEND = 3,
89 DRM_LIST_PRIO = 4,
90 DRM_LIST_RECLAIM = 5
91 } list; /**< Which list we're on */
92
93 int dev_priv_size; /**< Size of buffer private storage */
94 void *dev_private; /**< Per-buffer private storage */
95 };
96
97 typedef struct drm_dma_handle {
98 dma_addr_t busaddr;
99 void *vaddr;
100 size_t size;
101 } drm_dma_handle_t;
102
103 /**
104 * Buffer entry. There is one of this for each buffer size order.
105 */
106 struct drm_buf_entry {
107 int buf_size; /**< size */
108 int buf_count; /**< number of buffers */
109 struct drm_buf *buflist; /**< buffer list */
110 int seg_count;
111 int page_order;
112 struct drm_dma_handle **seglist;
113
114 int low_mark; /**< Low water mark */
115 int high_mark; /**< High water mark */
116 };
117
118 /**
119 * DMA data.
120 */
121 struct drm_device_dma {
122
123 struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
124 int buf_count; /**< total number of buffers */
125 struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
126 int seg_count;
127 int page_count; /**< number of pages */
128 unsigned long *pagelist; /**< page list */
129 unsigned long byte_count;
130 enum {
131 _DRM_DMA_USE_AGP = 0x01,
132 _DRM_DMA_USE_SG = 0x02,
133 _DRM_DMA_USE_FB = 0x04,
134 _DRM_DMA_USE_PCI_RO = 0x08
135 } flags;
136
137 };
138
139 /**
140 * Scatter-gather memory.
141 */
142 struct drm_sg_mem {
143 unsigned long handle;
144 void *virtual;
145 int pages;
146 struct page **pagelist;
147 dma_addr_t *busaddr;
148 };
149
150 /**
151 * Kernel side of a mapping
152 */
153 struct drm_local_map {
154 dma_addr_t offset; /**< Requested physical address (0 for SAREA)*/
155 unsigned long size; /**< Requested physical size (bytes) */
156 enum drm_map_type type; /**< Type of memory to map */
157 enum drm_map_flags flags; /**< Flags */
158 void *handle; /**< User-space: "Handle" to pass to mmap() */
159 /**< Kernel-space: kernel-virtual address */
160 int mtrr; /**< MTRR slot used */
161 };
162
163 typedef struct drm_local_map drm_local_map_t;
164
165 /**
166 * Mappings list
167 */
168 struct drm_map_list {
169 struct list_head head; /**< list head */
170 struct drm_hash_item hash;
171 struct drm_local_map *map; /**< mapping */
172 uint64_t user_token;
173 struct drm_master *master;
174 };
175
176 int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
177 unsigned int size, enum drm_map_type type,
178 enum drm_map_flags flags, struct drm_local_map **map_p);
179 struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
180 void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
181 int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
182 struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
183 int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma);
184
185 int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
186 int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
187
188 /**
189 * Test that the hardware lock is held by the caller, returning otherwise.
190 *
191 * \param dev DRM device.
192 * \param filp file pointer of the caller.
193 */
194 #define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
195 do { \
196 if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
197 _file_priv->master->lock.file_priv != _file_priv) { \
198 DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
199 __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
200 _file_priv->master->lock.file_priv, _file_priv); \
201 return -EINVAL; \
202 } \
203 } while (0)
204
205 void drm_legacy_idlelock_take(struct drm_lock_data *lock);
206 void drm_legacy_idlelock_release(struct drm_lock_data *lock);
207
208 /* drm_irq.c */
209 int drm_legacy_irq_uninstall(struct drm_device *dev);
210
211 /* drm_pci.c */
212
213 #ifdef CONFIG_PCI
214
215 int drm_legacy_pci_init(const struct drm_driver *driver,
216 struct pci_driver *pdriver);
217 void drm_legacy_pci_exit(const struct drm_driver *driver,
218 struct pci_driver *pdriver);
219
220 #else
221
drm_pci_alloc(struct drm_device * dev,size_t size,size_t align)222 static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
223 size_t size, size_t align)
224 {
225 return NULL;
226 }
227
drm_pci_free(struct drm_device * dev,struct drm_dma_handle * dmah)228 static inline void drm_pci_free(struct drm_device *dev,
229 struct drm_dma_handle *dmah)
230 {
231 }
232
drm_legacy_pci_init(const struct drm_driver * driver,struct pci_driver * pdriver)233 static inline int drm_legacy_pci_init(const struct drm_driver *driver,
234 struct pci_driver *pdriver)
235 {
236 return -EINVAL;
237 }
238
drm_legacy_pci_exit(const struct drm_driver * driver,struct pci_driver * pdriver)239 static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
240 struct pci_driver *pdriver)
241 {
242 }
243
244 #endif
245
246 /*
247 * AGP Support
248 */
249
250 struct drm_agp_head {
251 struct agp_kern_info agp_info;
252 struct list_head memory;
253 unsigned long mode;
254 struct agp_bridge_data *bridge;
255 int enabled;
256 int acquired;
257 unsigned long base;
258 int agp_mtrr;
259 int cant_use_aperture;
260 unsigned long page_mask;
261 };
262
263 #if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
264 struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev);
265 int drm_legacy_agp_acquire(struct drm_device *dev);
266 int drm_legacy_agp_release(struct drm_device *dev);
267 int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
268 int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info);
269 int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
270 int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
271 int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
272 int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
273 #else
drm_legacy_agp_init(struct drm_device * dev)274 static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
275 {
276 return NULL;
277 }
278
drm_legacy_agp_acquire(struct drm_device * dev)279 static inline int drm_legacy_agp_acquire(struct drm_device *dev)
280 {
281 return -ENODEV;
282 }
283
drm_legacy_agp_release(struct drm_device * dev)284 static inline int drm_legacy_agp_release(struct drm_device *dev)
285 {
286 return -ENODEV;
287 }
288
drm_legacy_agp_enable(struct drm_device * dev,struct drm_agp_mode mode)289 static inline int drm_legacy_agp_enable(struct drm_device *dev,
290 struct drm_agp_mode mode)
291 {
292 return -ENODEV;
293 }
294
drm_legacy_agp_info(struct drm_device * dev,struct drm_agp_info * info)295 static inline int drm_legacy_agp_info(struct drm_device *dev,
296 struct drm_agp_info *info)
297 {
298 return -ENODEV;
299 }
300
drm_legacy_agp_alloc(struct drm_device * dev,struct drm_agp_buffer * request)301 static inline int drm_legacy_agp_alloc(struct drm_device *dev,
302 struct drm_agp_buffer *request)
303 {
304 return -ENODEV;
305 }
306
drm_legacy_agp_free(struct drm_device * dev,struct drm_agp_buffer * request)307 static inline int drm_legacy_agp_free(struct drm_device *dev,
308 struct drm_agp_buffer *request)
309 {
310 return -ENODEV;
311 }
312
drm_legacy_agp_unbind(struct drm_device * dev,struct drm_agp_binding * request)313 static inline int drm_legacy_agp_unbind(struct drm_device *dev,
314 struct drm_agp_binding *request)
315 {
316 return -ENODEV;
317 }
318
drm_legacy_agp_bind(struct drm_device * dev,struct drm_agp_binding * request)319 static inline int drm_legacy_agp_bind(struct drm_device *dev,
320 struct drm_agp_binding *request)
321 {
322 return -ENODEV;
323 }
324 #endif
325
326 /* drm_memory.c */
327 void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
328 void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
329 void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
330
331 #endif /* __DRM_DRM_LEGACY_H__ */
332