1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
4  */
5 
6 #ifndef __LINUX_HOST1X_H
7 #define __LINUX_HOST1X_H
8 
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/dma-fence.h>
12 #include <linux/spinlock.h>
13 #include <linux/types.h>
14 
15 enum host1x_class {
16 	HOST1X_CLASS_HOST1X = 0x1,
17 	HOST1X_CLASS_GR2D = 0x51,
18 	HOST1X_CLASS_GR2D_SB = 0x52,
19 	HOST1X_CLASS_VIC = 0x5D,
20 	HOST1X_CLASS_GR3D = 0x60,
21 	HOST1X_CLASS_NVDEC = 0xF0,
22 	HOST1X_CLASS_NVDEC1 = 0xF5,
23 };
24 
25 struct host1x;
26 struct host1x_client;
27 struct iommu_group;
28 
29 u64 host1x_get_dma_mask(struct host1x *host1x);
30 
31 /**
32  * struct host1x_bo_cache - host1x buffer object cache
33  * @mappings: list of mappings
34  * @lock: synchronizes accesses to the list of mappings
35  *
36  * Note that entries are not periodically evicted from this cache and instead need to be
37  * explicitly released. This is used primarily for DRM/KMS where the cache's reference is
38  * released when the last reference to a buffer object represented by a mapping in this
39  * cache is dropped.
40  */
41 struct host1x_bo_cache {
42 	struct list_head mappings;
43 	struct mutex lock;
44 };
45 
host1x_bo_cache_init(struct host1x_bo_cache * cache)46 static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
47 {
48 	INIT_LIST_HEAD(&cache->mappings);
49 	mutex_init(&cache->lock);
50 }
51 
host1x_bo_cache_destroy(struct host1x_bo_cache * cache)52 static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
53 {
54 	/* XXX warn if not empty? */
55 	mutex_destroy(&cache->lock);
56 }
57 
58 /**
59  * struct host1x_client_ops - host1x client operations
60  * @early_init: host1x client early initialization code
61  * @init: host1x client initialization code
62  * @exit: host1x client tear down code
63  * @late_exit: host1x client late tear down code
64  * @suspend: host1x client suspend code
65  * @resume: host1x client resume code
66  */
67 struct host1x_client_ops {
68 	int (*early_init)(struct host1x_client *client);
69 	int (*init)(struct host1x_client *client);
70 	int (*exit)(struct host1x_client *client);
71 	int (*late_exit)(struct host1x_client *client);
72 	int (*suspend)(struct host1x_client *client);
73 	int (*resume)(struct host1x_client *client);
74 };
75 
76 /**
77  * struct host1x_client - host1x client structure
78  * @list: list node for the host1x client
79  * @host: pointer to struct device representing the host1x controller
80  * @dev: pointer to struct device backing this host1x client
81  * @group: IOMMU group that this client is a member of
82  * @ops: host1x client operations
83  * @class: host1x class represented by this client
84  * @channel: host1x channel associated with this client
85  * @syncpts: array of syncpoints requested for this client
86  * @num_syncpts: number of syncpoints requested for this client
87  * @parent: pointer to parent structure
88  * @usecount: reference count for this structure
89  * @lock: mutex for mutually exclusive concurrency
90  * @cache: host1x buffer object cache
91  */
92 struct host1x_client {
93 	struct list_head list;
94 	struct device *host;
95 	struct device *dev;
96 	struct iommu_group *group;
97 
98 	const struct host1x_client_ops *ops;
99 
100 	enum host1x_class class;
101 	struct host1x_channel *channel;
102 
103 	struct host1x_syncpt **syncpts;
104 	unsigned int num_syncpts;
105 
106 	struct host1x_client *parent;
107 	unsigned int usecount;
108 	struct mutex lock;
109 
110 	struct host1x_bo_cache cache;
111 };
112 
113 /*
114  * host1x buffer objects
115  */
116 
117 struct host1x_bo;
118 struct sg_table;
119 
120 struct host1x_bo_mapping {
121 	struct kref ref;
122 	struct dma_buf_attachment *attach;
123 	enum dma_data_direction direction;
124 	struct list_head list;
125 	struct host1x_bo *bo;
126 	struct sg_table *sgt;
127 	unsigned int chunks;
128 	struct device *dev;
129 	dma_addr_t phys;
130 	size_t size;
131 
132 	struct host1x_bo_cache *cache;
133 	struct list_head entry;
134 };
135 
to_host1x_bo_mapping(struct kref * ref)136 static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
137 {
138 	return container_of(ref, struct host1x_bo_mapping, ref);
139 }
140 
141 struct host1x_bo_ops {
142 	struct host1x_bo *(*get)(struct host1x_bo *bo);
143 	void (*put)(struct host1x_bo *bo);
144 	struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
145 					 enum dma_data_direction dir);
146 	void (*unpin)(struct host1x_bo_mapping *map);
147 	void *(*mmap)(struct host1x_bo *bo);
148 	void (*munmap)(struct host1x_bo *bo, void *addr);
149 };
150 
151 struct host1x_bo {
152 	const struct host1x_bo_ops *ops;
153 	struct list_head mappings;
154 	spinlock_t lock;
155 };
156 
host1x_bo_init(struct host1x_bo * bo,const struct host1x_bo_ops * ops)157 static inline void host1x_bo_init(struct host1x_bo *bo,
158 				  const struct host1x_bo_ops *ops)
159 {
160 	INIT_LIST_HEAD(&bo->mappings);
161 	spin_lock_init(&bo->lock);
162 	bo->ops = ops;
163 }
164 
host1x_bo_get(struct host1x_bo * bo)165 static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
166 {
167 	return bo->ops->get(bo);
168 }
169 
host1x_bo_put(struct host1x_bo * bo)170 static inline void host1x_bo_put(struct host1x_bo *bo)
171 {
172 	bo->ops->put(bo);
173 }
174 
175 struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
176 					enum dma_data_direction dir,
177 					struct host1x_bo_cache *cache);
178 void host1x_bo_unpin(struct host1x_bo_mapping *map);
179 
host1x_bo_mmap(struct host1x_bo * bo)180 static inline void *host1x_bo_mmap(struct host1x_bo *bo)
181 {
182 	return bo->ops->mmap(bo);
183 }
184 
host1x_bo_munmap(struct host1x_bo * bo,void * addr)185 static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
186 {
187 	bo->ops->munmap(bo, addr);
188 }
189 
190 /*
191  * host1x syncpoints
192  */
193 
194 #define HOST1X_SYNCPT_CLIENT_MANAGED	(1 << 0)
195 #define HOST1X_SYNCPT_HAS_BASE		(1 << 1)
196 
197 struct host1x_syncpt_base;
198 struct host1x_syncpt;
199 struct host1x;
200 
201 struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
202 struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
203 struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
204 u32 host1x_syncpt_id(struct host1x_syncpt *sp);
205 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
206 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
207 u32 host1x_syncpt_read(struct host1x_syncpt *sp);
208 int host1x_syncpt_incr(struct host1x_syncpt *sp);
209 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
210 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
211 		       u32 *value);
212 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
213 					    unsigned long flags);
214 void host1x_syncpt_put(struct host1x_syncpt *sp);
215 struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
216 					  unsigned long flags,
217 					  const char *name);
218 
219 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
220 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
221 
222 void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
223 					      u32 syncpt_id);
224 
225 struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
226 				      bool timeout);
227 void host1x_fence_cancel(struct dma_fence *fence);
228 
229 /*
230  * host1x channel
231  */
232 
233 struct host1x_channel;
234 struct host1x_job;
235 
236 struct host1x_channel *host1x_channel_request(struct host1x_client *client);
237 struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
238 void host1x_channel_stop(struct host1x_channel *channel);
239 void host1x_channel_put(struct host1x_channel *channel);
240 int host1x_job_submit(struct host1x_job *job);
241 
242 /*
243  * host1x job
244  */
245 
246 #define HOST1X_RELOC_READ	(1 << 0)
247 #define HOST1X_RELOC_WRITE	(1 << 1)
248 
249 struct host1x_reloc {
250 	struct {
251 		struct host1x_bo *bo;
252 		unsigned long offset;
253 	} cmdbuf;
254 	struct {
255 		struct host1x_bo *bo;
256 		unsigned long offset;
257 	} target;
258 	unsigned long shift;
259 	unsigned long flags;
260 };
261 
262 struct host1x_job {
263 	/* When refcount goes to zero, job can be freed */
264 	struct kref ref;
265 
266 	/* List entry */
267 	struct list_head list;
268 
269 	/* Channel where job is submitted to */
270 	struct host1x_channel *channel;
271 
272 	/* client where the job originated */
273 	struct host1x_client *client;
274 
275 	/* Gathers and their memory */
276 	struct host1x_job_cmd *cmds;
277 	unsigned int num_cmds;
278 
279 	/* Array of handles to be pinned & unpinned */
280 	struct host1x_reloc *relocs;
281 	unsigned int num_relocs;
282 	struct host1x_job_unpin_data *unpins;
283 	unsigned int num_unpins;
284 
285 	dma_addr_t *addr_phys;
286 	dma_addr_t *gather_addr_phys;
287 	dma_addr_t *reloc_addr_phys;
288 
289 	/* Sync point id, number of increments and end related to the submit */
290 	struct host1x_syncpt *syncpt;
291 	u32 syncpt_incrs;
292 	u32 syncpt_end;
293 
294 	/* Completion fence for job tracking */
295 	struct dma_fence *fence;
296 	struct dma_fence_cb fence_cb;
297 
298 	/* Maximum time to wait for this job */
299 	unsigned int timeout;
300 
301 	/* Job has timed out and should be released */
302 	bool cancelled;
303 
304 	/* Index and number of slots used in the push buffer */
305 	unsigned int first_get;
306 	unsigned int num_slots;
307 
308 	/* Copy of gathers */
309 	size_t gather_copy_size;
310 	dma_addr_t gather_copy;
311 	u8 *gather_copy_mapped;
312 
313 	/* Check if register is marked as an address reg */
314 	int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
315 
316 	/* Check if class belongs to the unit */
317 	int (*is_valid_class)(u32 class);
318 
319 	/* Request a SETCLASS to this class */
320 	u32 class;
321 
322 	/* Add a channel wait for previous ops to complete */
323 	bool serialize;
324 
325 	/* Fast-forward syncpoint increments on job timeout */
326 	bool syncpt_recovery;
327 
328 	/* Callback called when job is freed */
329 	void (*release)(struct host1x_job *job);
330 	void *user_data;
331 
332 	/* Whether host1x-side firewall should be ran for this job or not */
333 	bool enable_firewall;
334 
335 	/* Options for configuring engine data stream ID */
336 	/* Context device to use for job */
337 	struct host1x_memory_context *memory_context;
338 	/* Stream ID to use if context isolation is disabled (!memory_context) */
339 	u32 engine_fallback_streamid;
340 	/* Engine offset to program stream ID to */
341 	u32 engine_streamid_offset;
342 };
343 
344 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
345 				    u32 num_cmdbufs, u32 num_relocs,
346 				    bool skip_firewall);
347 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
348 			   unsigned int words, unsigned int offset);
349 void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
350 			 bool relative, u32 next_class);
351 struct host1x_job *host1x_job_get(struct host1x_job *job);
352 void host1x_job_put(struct host1x_job *job);
353 int host1x_job_pin(struct host1x_job *job, struct device *dev);
354 void host1x_job_unpin(struct host1x_job *job);
355 
356 /*
357  * subdevice probe infrastructure
358  */
359 
360 struct host1x_device;
361 
362 /**
363  * struct host1x_driver - host1x logical device driver
364  * @driver: core driver
365  * @subdevs: table of OF device IDs matching subdevices for this driver
366  * @list: list node for the driver
367  * @probe: called when the host1x logical device is probed
368  * @remove: called when the host1x logical device is removed
369  * @shutdown: called when the host1x logical device is shut down
370  */
371 struct host1x_driver {
372 	struct device_driver driver;
373 
374 	const struct of_device_id *subdevs;
375 	struct list_head list;
376 
377 	int (*probe)(struct host1x_device *device);
378 	int (*remove)(struct host1x_device *device);
379 	void (*shutdown)(struct host1x_device *device);
380 };
381 
382 static inline struct host1x_driver *
to_host1x_driver(struct device_driver * driver)383 to_host1x_driver(struct device_driver *driver)
384 {
385 	return container_of(driver, struct host1x_driver, driver);
386 }
387 
388 int host1x_driver_register_full(struct host1x_driver *driver,
389 				struct module *owner);
390 void host1x_driver_unregister(struct host1x_driver *driver);
391 
392 #define host1x_driver_register(driver) \
393 	host1x_driver_register_full(driver, THIS_MODULE)
394 
395 struct host1x_device {
396 	struct host1x_driver *driver;
397 	struct list_head list;
398 	struct device dev;
399 
400 	struct mutex subdevs_lock;
401 	struct list_head subdevs;
402 	struct list_head active;
403 
404 	struct mutex clients_lock;
405 	struct list_head clients;
406 
407 	bool registered;
408 
409 	struct device_dma_parameters dma_parms;
410 };
411 
to_host1x_device(struct device * dev)412 static inline struct host1x_device *to_host1x_device(struct device *dev)
413 {
414 	return container_of(dev, struct host1x_device, dev);
415 }
416 
417 int host1x_device_init(struct host1x_device *device);
418 int host1x_device_exit(struct host1x_device *device);
419 
420 void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
421 void host1x_client_exit(struct host1x_client *client);
422 
423 #define host1x_client_init(client)			\
424 	({						\
425 		static struct lock_class_key __key;	\
426 		__host1x_client_init(client, &__key);	\
427 	})
428 
429 int __host1x_client_register(struct host1x_client *client);
430 
431 /*
432  * Note that this wrapper calls __host1x_client_init() for compatibility
433  * with existing callers. Callers that want to separately initialize and
434  * register a host1x client must first initialize using either of the
435  * __host1x_client_init() or host1x_client_init() functions and then use
436  * the low-level __host1x_client_register() function to avoid the client
437  * getting reinitialized.
438  */
439 #define host1x_client_register(client)			\
440 	({						\
441 		static struct lock_class_key __key;	\
442 		__host1x_client_init(client, &__key);	\
443 		__host1x_client_register(client);	\
444 	})
445 
446 void host1x_client_unregister(struct host1x_client *client);
447 
448 int host1x_client_suspend(struct host1x_client *client);
449 int host1x_client_resume(struct host1x_client *client);
450 
451 struct tegra_mipi_device;
452 
453 struct tegra_mipi_device *tegra_mipi_request(struct device *device,
454 					     struct device_node *np);
455 void tegra_mipi_free(struct tegra_mipi_device *device);
456 int tegra_mipi_enable(struct tegra_mipi_device *device);
457 int tegra_mipi_disable(struct tegra_mipi_device *device);
458 int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
459 int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
460 
461 /* host1x memory contexts */
462 
463 struct host1x_memory_context {
464 	struct host1x *host;
465 
466 	refcount_t ref;
467 	struct pid *owner;
468 
469 	struct device dev;
470 	u64 dma_mask;
471 	u32 stream_id;
472 };
473 
474 #ifdef CONFIG_IOMMU_API
475 struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
476 							  struct device *dev,
477 							  struct pid *pid);
478 void host1x_memory_context_get(struct host1x_memory_context *cd);
479 void host1x_memory_context_put(struct host1x_memory_context *cd);
480 #else
host1x_memory_context_alloc(struct host1x * host1x,struct device * dev,struct pid * pid)481 static inline struct host1x_memory_context *host1x_memory_context_alloc(struct host1x *host1x,
482 									struct device *dev,
483 									struct pid *pid)
484 {
485 	return NULL;
486 }
487 
host1x_memory_context_get(struct host1x_memory_context * cd)488 static inline void host1x_memory_context_get(struct host1x_memory_context *cd)
489 {
490 }
491 
host1x_memory_context_put(struct host1x_memory_context * cd)492 static inline void host1x_memory_context_put(struct host1x_memory_context *cd)
493 {
494 }
495 #endif
496 
497 #endif
498