1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
4 */
5
6 #ifndef __LINUX_HOST1X_H
7 #define __LINUX_HOST1X_H
8
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/spinlock.h>
12 #include <linux/types.h>
13
14 enum host1x_class {
15 HOST1X_CLASS_HOST1X = 0x1,
16 HOST1X_CLASS_GR2D = 0x51,
17 HOST1X_CLASS_GR2D_SB = 0x52,
18 HOST1X_CLASS_VIC = 0x5D,
19 HOST1X_CLASS_GR3D = 0x60,
20 HOST1X_CLASS_NVDEC = 0xF0,
21 HOST1X_CLASS_NVDEC1 = 0xF5,
22 };
23
24 struct host1x;
25 struct host1x_client;
26 struct iommu_group;
27
28 u64 host1x_get_dma_mask(struct host1x *host1x);
29
30 /**
31 * struct host1x_bo_cache - host1x buffer object cache
32 * @mappings: list of mappings
33 * @lock: synchronizes accesses to the list of mappings
34 *
35 * Note that entries are not periodically evicted from this cache and instead need to be
36 * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
37 * released when the last reference to a buffer object represented by a mapping in this
38 * cache is dropped.
39 */
40 struct host1x_bo_cache {
41 struct list_head mappings;
42 struct mutex lock;
43 };
44
host1x_bo_cache_init(struct host1x_bo_cache * cache)45 static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
46 {
47 INIT_LIST_HEAD(&cache->mappings);
48 mutex_init(&cache->lock);
49 }
50
host1x_bo_cache_destroy(struct host1x_bo_cache * cache)51 static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
52 {
53 /* XXX warn if not empty? */
54 mutex_destroy(&cache->lock);
55 }
56
57 /**
58 * struct host1x_client_ops - host1x client operations
59 * @early_init: host1x client early initialization code
60 * @init: host1x client initialization code
61 * @exit: host1x client tear down code
62 * @late_exit: host1x client late tear down code
63 * @suspend: host1x client suspend code
64 * @resume: host1x client resume code
65 */
66 struct host1x_client_ops {
67 int (*early_init)(struct host1x_client *client);
68 int (*init)(struct host1x_client *client);
69 int (*exit)(struct host1x_client *client);
70 int (*late_exit)(struct host1x_client *client);
71 int (*suspend)(struct host1x_client *client);
72 int (*resume)(struct host1x_client *client);
73 };
74
75 /**
76 * struct host1x_client - host1x client structure
77 * @list: list node for the host1x client
78 * @host: pointer to struct device representing the host1x controller
79 * @dev: pointer to struct device backing this host1x client
80 * @group: IOMMU group that this client is a member of
81 * @ops: host1x client operations
82 * @class: host1x class represented by this client
83 * @channel: host1x channel associated with this client
84 * @syncpts: array of syncpoints requested for this client
85 * @num_syncpts: number of syncpoints requested for this client
86 * @parent: pointer to parent structure
87 * @usecount: reference count for this structure
88 * @lock: mutex for mutually exclusive concurrency
89 * @cache: host1x buffer object cache
90 */
91 struct host1x_client {
92 struct list_head list;
93 struct device *host;
94 struct device *dev;
95 struct iommu_group *group;
96
97 const struct host1x_client_ops *ops;
98
99 enum host1x_class class;
100 struct host1x_channel *channel;
101
102 struct host1x_syncpt **syncpts;
103 unsigned int num_syncpts;
104
105 struct host1x_client *parent;
106 unsigned int usecount;
107 struct mutex lock;
108
109 struct host1x_bo_cache cache;
110 };
111
112 /*
113 * host1x buffer objects
114 */
115
116 struct host1x_bo;
117 struct sg_table;
118
119 struct host1x_bo_mapping {
120 struct kref ref;
121 struct dma_buf_attachment *attach;
122 enum dma_data_direction direction;
123 struct list_head list;
124 struct host1x_bo *bo;
125 struct sg_table *sgt;
126 unsigned int chunks;
127 struct device *dev;
128 dma_addr_t phys;
129 size_t size;
130
131 struct host1x_bo_cache *cache;
132 struct list_head entry;
133 };
134
to_host1x_bo_mapping(struct kref * ref)135 static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
136 {
137 return container_of(ref, struct host1x_bo_mapping, ref);
138 }
139
140 struct host1x_bo_ops {
141 struct host1x_bo *(*get)(struct host1x_bo *bo);
142 void (*put)(struct host1x_bo *bo);
143 struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
144 enum dma_data_direction dir);
145 void (*unpin)(struct host1x_bo_mapping *map);
146 void *(*mmap)(struct host1x_bo *bo);
147 void (*munmap)(struct host1x_bo *bo, void *addr);
148 };
149
150 struct host1x_bo {
151 const struct host1x_bo_ops *ops;
152 struct list_head mappings;
153 spinlock_t lock;
154 };
155
host1x_bo_init(struct host1x_bo * bo,const struct host1x_bo_ops * ops)156 static inline void host1x_bo_init(struct host1x_bo *bo,
157 const struct host1x_bo_ops *ops)
158 {
159 INIT_LIST_HEAD(&bo->mappings);
160 spin_lock_init(&bo->lock);
161 bo->ops = ops;
162 }
163
host1x_bo_get(struct host1x_bo * bo)164 static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
165 {
166 return bo->ops->get(bo);
167 }
168
host1x_bo_put(struct host1x_bo * bo)169 static inline void host1x_bo_put(struct host1x_bo *bo)
170 {
171 bo->ops->put(bo);
172 }
173
174 struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
175 enum dma_data_direction dir,
176 struct host1x_bo_cache *cache);
177 void host1x_bo_unpin(struct host1x_bo_mapping *map);
178
host1x_bo_mmap(struct host1x_bo * bo)179 static inline void *host1x_bo_mmap(struct host1x_bo *bo)
180 {
181 return bo->ops->mmap(bo);
182 }
183
host1x_bo_munmap(struct host1x_bo * bo,void * addr)184 static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
185 {
186 bo->ops->munmap(bo, addr);
187 }
188
189 /*
190 * host1x syncpoints
191 */
192
193 #define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
194 #define HOST1X_SYNCPT_HAS_BASE (1 << 1)
195
196 struct host1x_syncpt_base;
197 struct host1x_syncpt;
198 struct host1x;
199
200 struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
201 struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
202 struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
203 u32 host1x_syncpt_id(struct host1x_syncpt *sp);
204 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
205 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
206 u32 host1x_syncpt_read(struct host1x_syncpt *sp);
207 int host1x_syncpt_incr(struct host1x_syncpt *sp);
208 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
209 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
210 u32 *value);
211 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
212 unsigned long flags);
213 void host1x_syncpt_put(struct host1x_syncpt *sp);
214 struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
215 unsigned long flags,
216 const char *name);
217
218 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
219 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
220
221 void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
222 u32 syncpt_id);
223
224 struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
225
226 /*
227 * host1x channel
228 */
229
230 struct host1x_channel;
231 struct host1x_job;
232
233 struct host1x_channel *host1x_channel_request(struct host1x_client *client);
234 struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
235 void host1x_channel_stop(struct host1x_channel *channel);
236 void host1x_channel_put(struct host1x_channel *channel);
237 int host1x_job_submit(struct host1x_job *job);
238
239 /*
240 * host1x job
241 */
242
243 #define HOST1X_RELOC_READ (1 << 0)
244 #define HOST1X_RELOC_WRITE (1 << 1)
245
246 struct host1x_reloc {
247 struct {
248 struct host1x_bo *bo;
249 unsigned long offset;
250 } cmdbuf;
251 struct {
252 struct host1x_bo *bo;
253 unsigned long offset;
254 } target;
255 unsigned long shift;
256 unsigned long flags;
257 };
258
259 struct host1x_job {
260 /* When refcount goes to zero, job can be freed */
261 struct kref ref;
262
263 /* List entry */
264 struct list_head list;
265
266 /* Channel where job is submitted to */
267 struct host1x_channel *channel;
268
269 /* client where the job originated */
270 struct host1x_client *client;
271
272 /* Gathers and their memory */
273 struct host1x_job_cmd *cmds;
274 unsigned int num_cmds;
275
276 /* Array of handles to be pinned & unpinned */
277 struct host1x_reloc *relocs;
278 unsigned int num_relocs;
279 struct host1x_job_unpin_data *unpins;
280 unsigned int num_unpins;
281
282 dma_addr_t *addr_phys;
283 dma_addr_t *gather_addr_phys;
284 dma_addr_t *reloc_addr_phys;
285
286 /* Sync point id, number of increments and end related to the submit */
287 struct host1x_syncpt *syncpt;
288 u32 syncpt_incrs;
289 u32 syncpt_end;
290
291 /* Completion waiter ref */
292 void *waiter;
293
294 /* Maximum time to wait for this job */
295 unsigned int timeout;
296
297 /* Job has timed out and should be released */
298 bool cancelled;
299
300 /* Index and number of slots used in the push buffer */
301 unsigned int first_get;
302 unsigned int num_slots;
303
304 /* Copy of gathers */
305 size_t gather_copy_size;
306 dma_addr_t gather_copy;
307 u8 *gather_copy_mapped;
308
309 /* Check if register is marked as an address reg */
310 int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
311
312 /* Check if class belongs to the unit */
313 int (*is_valid_class)(u32 class);
314
315 /* Request a SETCLASS to this class */
316 u32 class;
317
318 /* Add a channel wait for previous ops to complete */
319 bool serialize;
320
321 /* Fast-forward syncpoint increments on job timeout */
322 bool syncpt_recovery;
323
324 /* Callback called when job is freed */
325 void (*release)(struct host1x_job *job);
326 void *user_data;
327
328 /* Whether host1x-side firewall should be ran for this job or not */
329 bool enable_firewall;
330
331 /* Options for configuring engine data stream ID */
332 /* Context device to use for job */
333 struct host1x_memory_context *memory_context;
334 /* Stream ID to use if context isolation is disabled (!memory_context) */
335 u32 engine_fallback_streamid;
336 /* Engine offset to program stream ID to */
337 u32 engine_streamid_offset;
338 };
339
340 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
341 u32 num_cmdbufs, u32 num_relocs,
342 bool skip_firewall);
343 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
344 unsigned int words, unsigned int offset);
345 void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
346 bool relative, u32 next_class);
347 struct host1x_job *host1x_job_get(struct host1x_job *job);
348 void host1x_job_put(struct host1x_job *job);
349 int host1x_job_pin(struct host1x_job *job, struct device *dev);
350 void host1x_job_unpin(struct host1x_job *job);
351
352 /*
353 * subdevice probe infrastructure
354 */
355
356 struct host1x_device;
357
358 /**
359 * struct host1x_driver - host1x logical device driver
360 * @driver: core driver
361 * @subdevs: table of OF device IDs matching subdevices for this driver
362 * @list: list node for the driver
363 * @probe: called when the host1x logical device is probed
364 * @remove: called when the host1x logical device is removed
365 * @shutdown: called when the host1x logical device is shut down
366 */
367 struct host1x_driver {
368 struct device_driver driver;
369
370 const struct of_device_id *subdevs;
371 struct list_head list;
372
373 int (*probe)(struct host1x_device *device);
374 int (*remove)(struct host1x_device *device);
375 void (*shutdown)(struct host1x_device *device);
376 };
377
378 static inline struct host1x_driver *
to_host1x_driver(struct device_driver * driver)379 to_host1x_driver(struct device_driver *driver)
380 {
381 return container_of(driver, struct host1x_driver, driver);
382 }
383
384 int host1x_driver_register_full(struct host1x_driver *driver,
385 struct module *owner);
386 void host1x_driver_unregister(struct host1x_driver *driver);
387
388 #define host1x_driver_register(driver) \
389 host1x_driver_register_full(driver, THIS_MODULE)
390
391 struct host1x_device {
392 struct host1x_driver *driver;
393 struct list_head list;
394 struct device dev;
395
396 struct mutex subdevs_lock;
397 struct list_head subdevs;
398 struct list_head active;
399
400 struct mutex clients_lock;
401 struct list_head clients;
402
403 bool registered;
404
405 struct device_dma_parameters dma_parms;
406 };
407
to_host1x_device(struct device * dev)408 static inline struct host1x_device *to_host1x_device(struct device *dev)
409 {
410 return container_of(dev, struct host1x_device, dev);
411 }
412
413 int host1x_device_init(struct host1x_device *device);
414 int host1x_device_exit(struct host1x_device *device);
415
416 void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
417 void host1x_client_exit(struct host1x_client *client);
418
419 #define host1x_client_init(client) \
420 ({ \
421 static struct lock_class_key __key; \
422 __host1x_client_init(client, &__key); \
423 })
424
425 int __host1x_client_register(struct host1x_client *client);
426
427 /*
428 * Note that this wrapper calls __host1x_client_init() for compatibility
429 * with existing callers. Callers that want to separately initialize and
430 * register a host1x client must first initialize using either of the
431 * __host1x_client_init() or host1x_client_init() functions and then use
432 * the low-level __host1x_client_register() function to avoid the client
433 * getting reinitialized.
434 */
435 #define host1x_client_register(client) \
436 ({ \
437 static struct lock_class_key __key; \
438 __host1x_client_init(client, &__key); \
439 __host1x_client_register(client); \
440 })
441
442 int host1x_client_unregister(struct host1x_client *client);
443
444 int host1x_client_suspend(struct host1x_client *client);
445 int host1x_client_resume(struct host1x_client *client);
446
447 struct tegra_mipi_device;
448
449 struct tegra_mipi_device *tegra_mipi_request(struct device *device,
450 struct device_node *np);
451 void tegra_mipi_free(struct tegra_mipi_device *device);
452 int tegra_mipi_enable(struct tegra_mipi_device *device);
453 int tegra_mipi_disable(struct tegra_mipi_device *device);
454 int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
455 int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
456
457 /* host1x memory contexts */
458
459 struct host1x_memory_context {
460 struct host1x *host;
461
462 refcount_t ref;
463 struct pid *owner;
464
465 struct device dev;
466 u64 dma_mask;
467 u32 stream_id;
468 };
469
470 #ifdef CONFIG_IOMMU_API
471 struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
472 struct pid *pid);
473 void host1x_memory_context_get(struct host1x_memory_context *cd);
474 void host1x_memory_context_put(struct host1x_memory_context *cd);
475 #else
host1x_memory_context_alloc(struct host1x * host1x,struct pid * pid)476 static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
477 struct pid *pid)
478 {
479 return NULL;
480 }
481
host1x_memory_context_get(struct host1x_memory_context * cd)482 static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
483 {
484 }
485
host1x_memory_context_put(struct host1x_memory_context * cd)486 static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
487 {
488 }
489 #endif
490
491 #endif
492