1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
2 /*
3  * Copyright 2014-2022 Advanced Micro Devices, Inc.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be included in
13  * all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21  * OTHER DEALINGS IN THE SOFTWARE.
22  *
23  */
24 
25 #ifndef KFD_DEVICE_QUEUE_MANAGER_H_
26 #define KFD_DEVICE_QUEUE_MANAGER_H_
27 
28 #include <linux/rwsem.h>
29 #include <linux/list.h>
30 #include <linux/mutex.h>
31 #include <linux/sched/mm.h>
32 #include "kfd_priv.h"
33 #include "kfd_mqd_manager.h"
34 
35 
36 #define VMID_NUM 16
37 
38 #define KFD_MES_PROCESS_QUANTUM		100000
39 #define KFD_MES_GANG_QUANTUM		10000
40 
41 struct device_process_node {
42 	struct qcm_process_device *qpd;
43 	struct list_head list;
44 };
45 
46 union SQ_CMD_BITS {
47 	struct {
48 		uint32_t cmd:3;
49 		uint32_t:1;
50 		uint32_t mode:3;
51 		uint32_t check_vmid:1;
52 		uint32_t trap_id:3;
53 		uint32_t:5;
54 		uint32_t wave_id:4;
55 		uint32_t simd_id:2;
56 		uint32_t:2;
57 		uint32_t queue_id:3;
58 		uint32_t:1;
59 		uint32_t vm_id:4;
60 	} bitfields, bits;
61 	uint32_t u32All;
62 	signed int i32All;
63 	float f32All;
64 };
65 
66 union GRBM_GFX_INDEX_BITS {
67 	struct {
68 		uint32_t instance_index:8;
69 		uint32_t sh_index:8;
70 		uint32_t se_index:8;
71 		uint32_t:5;
72 		uint32_t sh_broadcast_writes:1;
73 		uint32_t instance_broadcast_writes:1;
74 		uint32_t se_broadcast_writes:1;
75 	} bitfields, bits;
76 	uint32_t u32All;
77 	signed int i32All;
78 	float f32All;
79 };
80 
81 /**
82  * struct device_queue_manager_ops
83  *
84  * @create_queue: Queue creation routine.
85  *
86  * @destroy_queue: Queue destruction routine.
87  *
88  * @update_queue: Queue update routine.
89  *
90  * @exeute_queues: Dispatches the queues list to the H/W.
91  *
92  * @register_process: This routine associates a specific process with device.
93  *
94  * @unregister_process: destroys the associations between process to device.
95  *
96  * @initialize: Initializes the pipelines and memory module for that device.
97  *
98  * @start: Initializes the resources/modules the device needs for queues
99  * execution. This function is called on device initialization and after the
100  * system woke up after suspension.
101  *
102  * @stop: This routine stops execution of all the active queue running on the
103  * H/W and basically this function called on system suspend.
104  *
105  * @uninitialize: Destroys all the device queue manager resources allocated in
106  * initialize routine.
107  *
108  * @create_kernel_queue: Creates kernel queue. Used for debug queue.
109  *
110  * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
111  *
112  * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
113  * memory apertures.
114  *
115  * @process_termination: Clears all process queues belongs to that device.
116  *
117  * @evict_process_queues: Evict all active queues of a process
118  *
119  * @restore_process_queues: Restore all evicted queues of a process
120  *
121  * @get_wave_state: Retrieves context save state and optionally copies the
122  * control stack, if kept in the MQD, to the given userspace address.
123  *
124  * @reset_queues: reset queues which consume RAS poison
125  * @get_queue_checkpoint_info: Retrieves queue size information for CRIU checkpoint.
126  *
127  * @checkpoint_mqd: checkpoint queue MQD contents for CRIU.
128  */
129 
130 struct device_queue_manager_ops {
131 	int	(*create_queue)(struct device_queue_manager *dqm,
132 				struct queue *q,
133 				struct qcm_process_device *qpd,
134 				const struct kfd_criu_queue_priv_data *qd,
135 				const void *restore_mqd,
136 				const void *restore_ctl_stack);
137 
138 	int	(*destroy_queue)(struct device_queue_manager *dqm,
139 				struct qcm_process_device *qpd,
140 				struct queue *q);
141 
142 	int	(*update_queue)(struct device_queue_manager *dqm,
143 				struct queue *q, struct mqd_update_info *minfo);
144 
145 	int	(*register_process)(struct device_queue_manager *dqm,
146 					struct qcm_process_device *qpd);
147 
148 	int	(*unregister_process)(struct device_queue_manager *dqm,
149 					struct qcm_process_device *qpd);
150 
151 	int	(*initialize)(struct device_queue_manager *dqm);
152 	int	(*start)(struct device_queue_manager *dqm);
153 	int	(*stop)(struct device_queue_manager *dqm);
154 	void	(*pre_reset)(struct device_queue_manager *dqm);
155 	void	(*uninitialize)(struct device_queue_manager *dqm);
156 	int	(*create_kernel_queue)(struct device_queue_manager *dqm,
157 					struct kernel_queue *kq,
158 					struct qcm_process_device *qpd);
159 
160 	void	(*destroy_kernel_queue)(struct device_queue_manager *dqm,
161 					struct kernel_queue *kq,
162 					struct qcm_process_device *qpd);
163 
164 	bool	(*set_cache_memory_policy)(struct device_queue_manager *dqm,
165 					   struct qcm_process_device *qpd,
166 					   enum cache_policy default_policy,
167 					   enum cache_policy alternate_policy,
168 					   void __user *alternate_aperture_base,
169 					   uint64_t alternate_aperture_size);
170 
171 	int (*process_termination)(struct device_queue_manager *dqm,
172 			struct qcm_process_device *qpd);
173 
174 	int (*evict_process_queues)(struct device_queue_manager *dqm,
175 				    struct qcm_process_device *qpd);
176 	int (*restore_process_queues)(struct device_queue_manager *dqm,
177 				      struct qcm_process_device *qpd);
178 
179 	int	(*get_wave_state)(struct device_queue_manager *dqm,
180 				  struct queue *q,
181 				  void __user *ctl_stack,
182 				  u32 *ctl_stack_used_size,
183 				  u32 *save_area_used_size);
184 
185 	int (*reset_queues)(struct device_queue_manager *dqm,
186 					uint16_t pasid);
187 	void	(*get_queue_checkpoint_info)(struct device_queue_manager *dqm,
188 				  const struct queue *q, u32 *mqd_size,
189 				  u32 *ctl_stack_size);
190 
191 	int	(*checkpoint_mqd)(struct device_queue_manager *dqm,
192 				  const struct queue *q,
193 				  void *mqd,
194 				  void *ctl_stack);
195 };
196 
197 struct device_queue_manager_asic_ops {
198 	int	(*update_qpd)(struct device_queue_manager *dqm,
199 					struct qcm_process_device *qpd);
200 	bool	(*set_cache_memory_policy)(struct device_queue_manager *dqm,
201 					   struct qcm_process_device *qpd,
202 					   enum cache_policy default_policy,
203 					   enum cache_policy alternate_policy,
204 					   void __user *alternate_aperture_base,
205 					   uint64_t alternate_aperture_size);
206 	void	(*init_sdma_vm)(struct device_queue_manager *dqm,
207 				struct queue *q,
208 				struct qcm_process_device *qpd);
209 	struct mqd_manager *	(*mqd_manager_init)(enum KFD_MQD_TYPE type,
210 				 struct kfd_dev *dev);
211 };
212 
213 /**
214  * struct device_queue_manager
215  *
216  * This struct is a base class for the kfd queues scheduler in the
217  * device level. The device base class should expose the basic operations
218  * for queue creation and queue destruction. This base class hides the
219  * scheduling mode of the driver and the specific implementation of the
220  * concrete device. This class is the only class in the queues scheduler
221  * that configures the H/W.
222  *
223  */
224 
225 struct device_queue_manager {
226 	struct device_queue_manager_ops ops;
227 	struct device_queue_manager_asic_ops asic_ops;
228 
229 	struct mqd_manager	*mqd_mgrs[KFD_MQD_TYPE_MAX];
230 	struct packet_manager	packet_mgr;
231 	struct kfd_dev		*dev;
232 	struct mutex		lock_hidden; /* use dqm_lock/unlock(dqm) */
233 	struct list_head	queues;
234 	unsigned int		saved_flags;
235 	unsigned int		processes_count;
236 	unsigned int		active_queue_count;
237 	unsigned int		active_cp_queue_count;
238 	unsigned int		gws_queue_count;
239 	unsigned int		total_queue_count;
240 	unsigned int		next_pipe_to_allocate;
241 	unsigned int		*allocated_queues;
242 	uint64_t		sdma_bitmap;
243 	uint64_t		xgmi_sdma_bitmap;
244 	/* the pasid mapping for each kfd vmid */
245 	uint16_t		vmid_pasid[VMID_NUM];
246 	uint64_t		pipelines_addr;
247 	uint64_t		fence_gpu_addr;
248 	uint64_t		*fence_addr;
249 	struct kfd_mem_obj	*fence_mem;
250 	bool			active_runlist;
251 	int			sched_policy;
252 
253 	/* hw exception  */
254 	bool			is_hws_hang;
255 	bool			is_resetting;
256 	struct work_struct	hw_exception_work;
257 	struct kfd_mem_obj	hiq_sdma_mqd;
258 	bool			sched_running;
259 };
260 
261 void device_queue_manager_init_cik(
262 		struct device_queue_manager_asic_ops *asic_ops);
263 void device_queue_manager_init_cik_hawaii(
264 		struct device_queue_manager_asic_ops *asic_ops);
265 void device_queue_manager_init_vi(
266 		struct device_queue_manager_asic_ops *asic_ops);
267 void device_queue_manager_init_vi_tonga(
268 		struct device_queue_manager_asic_ops *asic_ops);
269 void device_queue_manager_init_v9(
270 		struct device_queue_manager_asic_ops *asic_ops);
271 void device_queue_manager_init_v10_navi10(
272 		struct device_queue_manager_asic_ops *asic_ops);
273 void device_queue_manager_init_v11(
274 		struct device_queue_manager_asic_ops *asic_ops);
275 void program_sh_mem_settings(struct device_queue_manager *dqm,
276 					struct qcm_process_device *qpd);
277 unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
278 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
279 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
280 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
281 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm);
282 
get_sh_mem_bases_32(struct kfd_process_device * pdd)283 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
284 {
285 	return (pdd->lds_base >> 16) & 0xFF;
286 }
287 
288 static inline unsigned int
get_sh_mem_bases_nybble_64(struct kfd_process_device * pdd)289 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
290 {
291 	return (pdd->lds_base >> 60) & 0x0E;
292 }
293 
294 /* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS
295  * happens while holding this lock anywhere to prevent deadlocks when
296  * an MMU notifier runs in reclaim-FS context.
297  */
dqm_lock(struct device_queue_manager * dqm)298 static inline void dqm_lock(struct device_queue_manager *dqm)
299 {
300 	mutex_lock(&dqm->lock_hidden);
301 	dqm->saved_flags = memalloc_noreclaim_save();
302 }
dqm_unlock(struct device_queue_manager * dqm)303 static inline void dqm_unlock(struct device_queue_manager *dqm)
304 {
305 	memalloc_noreclaim_restore(dqm->saved_flags);
306 	mutex_unlock(&dqm->lock_hidden);
307 }
308 
read_sdma_queue_counter(uint64_t __user * q_rptr,uint64_t * val)309 static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val)
310 {
311 	/* SDMA activity counter is stored at queue's RPTR + 0x8 location. */
312 	return get_user(*val, q_rptr + 1);
313 }
314 #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
315