1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #ifndef __MSM_RINGBUFFER_H__
8 #define __MSM_RINGBUFFER_H__
9 
10 #include "drm/gpu_scheduler.h"
11 #include "msm_drv.h"
12 
13 #define rbmemptr(ring, member)  \
14 	((ring)->memptrs_iova + offsetof(struct msm_rbmemptrs, member))
15 
16 #define rbmemptr_stats(ring, index, member) \
17 	(rbmemptr((ring), stats) + \
18 	 ((index) * sizeof(struct msm_gpu_submit_stats)) + \
19 	 offsetof(struct msm_gpu_submit_stats, member))
20 
21 struct msm_gpu_submit_stats {
22 	u64 cpcycles_start;
23 	u64 cpcycles_end;
24 	u64 alwayson_start;
25 	u64 alwayson_end;
26 };
27 
28 #define MSM_GPU_SUBMIT_STATS_COUNT 64
29 
30 struct msm_rbmemptrs {
31 	volatile uint32_t rptr;
32 	volatile uint32_t fence;
33 
34 	volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
35 	volatile u64 ttbr0;
36 };
37 
38 struct msm_ringbuffer {
39 	struct msm_gpu *gpu;
40 	int id;
41 	struct drm_gem_object *bo;
42 	uint32_t *start, *end, *cur, *next;
43 
44 	/*
45 	 * The job scheduler for this ring.
46 	 */
47 	struct drm_gpu_scheduler sched;
48 
49 	/*
50 	 * List of in-flight submits on this ring.  Protected by submit_lock.
51 	 *
52 	 * Currently just submits that are already written into the ring, not
53 	 * submits that are still in drm_gpu_scheduler's queues.  At a later
54 	 * step we could probably move to letting drm_gpu_scheduler manage
55 	 * hangcheck detection and keep track of submit jobs that are in-
56 	 * flight.
57 	 */
58 	struct list_head submits;
59 	spinlock_t submit_lock;
60 
61 	uint64_t iova;
62 	uint32_t hangcheck_fence;
63 	struct msm_rbmemptrs *memptrs;
64 	uint64_t memptrs_iova;
65 	struct msm_fence_context *fctx;
66 
67 	/*
68 	 * preempt_lock protects preemption and serializes wptr updates against
69 	 * preemption.  Can be aquired from irq context.
70 	 */
71 	spinlock_t preempt_lock;
72 };
73 
74 struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
75 		void *memptrs, uint64_t memptrs_iova);
76 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
77 
78 /* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
79 
80 static inline void
OUT_RING(struct msm_ringbuffer * ring,uint32_t data)81 OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
82 {
83 	/*
84 	 * ring->next points to the current command being written - it won't be
85 	 * committed as ring->cur until the flush
86 	 */
87 	if (ring->next == ring->end)
88 		ring->next = ring->start;
89 	*(ring->next++) = data;
90 }
91 
92 #endif /* __MSM_RINGBUFFER_H__ */
93