1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "amdgpu.h"
25 #include "nbio/nbio_6_1_offset.h"
26 #include "nbio/nbio_6_1_sh_mask.h"
27 #include "gc/gc_9_0_offset.h"
28 #include "gc/gc_9_0_sh_mask.h"
29 #include "mp/mp_9_0_offset.h"
30 #include "soc15.h"
31 #include "vega10_ih.h"
32 #include "soc15_common.h"
33 #include "mxgpu_ai.h"
34
35 #include "amdgpu_reset.h"
36
xgpu_ai_mailbox_send_ack(struct amdgpu_device * adev)37 static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
38 {
39 WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
40 }
41
xgpu_ai_mailbox_set_valid(struct amdgpu_device * adev,bool val)42 static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
43 {
44 WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
45 }
46
47 /*
48 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
49 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
50 * by host.
51 *
52 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
53 * correct value since it doesn't return the RCV_DW0 under the case that
54 * RCV_MSG_VALID is set by host.
55 */
xgpu_ai_mailbox_peek_msg(struct amdgpu_device * adev)56 static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
57 {
58 return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
59 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
60 }
61
62
xgpu_ai_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)63 static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
64 enum idh_event event)
65 {
66 u32 reg;
67
68 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
69 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
70 if (reg != event)
71 return -ENOENT;
72
73 xgpu_ai_mailbox_send_ack(adev);
74
75 return 0;
76 }
77
xgpu_ai_peek_ack(struct amdgpu_device * adev)78 static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
79 return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
80 }
81
xgpu_ai_poll_ack(struct amdgpu_device * adev)82 static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
83 {
84 int timeout = AI_MAILBOX_POLL_ACK_TIMEDOUT;
85 u8 reg;
86
87 do {
88 reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
89 if (reg & 2)
90 return 0;
91
92 mdelay(5);
93 timeout -= 5;
94 } while (timeout > 1);
95
96 pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
97
98 return -ETIME;
99 }
100
xgpu_ai_poll_msg(struct amdgpu_device * adev,enum idh_event event)101 static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
102 {
103 int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
104
105 do {
106 r = xgpu_ai_mailbox_rcv_msg(adev, event);
107 if (!r)
108 return 0;
109
110 msleep(10);
111 timeout -= 10;
112 } while (timeout > 1);
113
114 pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
115
116 return -ETIME;
117 }
118
xgpu_ai_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)119 static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
120 enum idh_request req, u32 data1, u32 data2, u32 data3) {
121 u32 reg;
122 int r;
123 uint8_t trn;
124
125 /* IMPORTANT:
126 * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
127 * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
128 * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
129 * will return immediatly
130 */
131 do {
132 xgpu_ai_mailbox_set_valid(adev, false);
133 trn = xgpu_ai_peek_ack(adev);
134 if (trn) {
135 pr_err("trn=%x ACK should not assert! wait again !\n", trn);
136 msleep(1);
137 }
138 } while(trn);
139
140 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
141 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
142 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
143 MSGBUF_DATA, req);
144 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
145 reg);
146 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
147 data1);
148 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
149 data2);
150 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
151 data3);
152
153 xgpu_ai_mailbox_set_valid(adev, true);
154
155 /* start to poll ack */
156 r = xgpu_ai_poll_ack(adev);
157 if (r)
158 pr_err("Doesn't get ack from pf, continue\n");
159
160 xgpu_ai_mailbox_set_valid(adev, false);
161 }
162
xgpu_ai_send_access_requests(struct amdgpu_device * adev,enum idh_request req)163 static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
164 enum idh_request req)
165 {
166 int r;
167
168 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
169
170 /* start to check msg if request is idh_req_gpu_init_access */
171 if (req == IDH_REQ_GPU_INIT_ACCESS ||
172 req == IDH_REQ_GPU_FINI_ACCESS ||
173 req == IDH_REQ_GPU_RESET_ACCESS) {
174 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
175 if (r) {
176 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
177 return r;
178 }
179 /* Retrieve checksum from mailbox2 */
180 if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
181 adev->virt.fw_reserve.checksum_key =
182 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
183 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
184 }
185 } else if (req == IDH_REQ_GPU_INIT_DATA){
186 /* Dummy REQ_GPU_INIT_DATA handling */
187 r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY);
188 /* version set to 0 since dummy */
189 adev->virt.req_init_data_ver = 0;
190 }
191
192 return 0;
193 }
194
xgpu_ai_request_reset(struct amdgpu_device * adev)195 static int xgpu_ai_request_reset(struct amdgpu_device *adev)
196 {
197 int ret, i = 0;
198
199 while (i < AI_MAILBOX_POLL_MSG_REP_MAX) {
200 ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
201 if (!ret)
202 break;
203 i++;
204 }
205
206 return ret;
207 }
208
xgpu_ai_request_full_gpu_access(struct amdgpu_device * adev,bool init)209 static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
210 bool init)
211 {
212 enum idh_request req;
213
214 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
215 return xgpu_ai_send_access_requests(adev, req);
216 }
217
xgpu_ai_release_full_gpu_access(struct amdgpu_device * adev,bool init)218 static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
219 bool init)
220 {
221 enum idh_request req;
222 int r = 0;
223
224 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
225 r = xgpu_ai_send_access_requests(adev, req);
226
227 return r;
228 }
229
xgpu_ai_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)230 static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
231 struct amdgpu_irq_src *source,
232 struct amdgpu_iv_entry *entry)
233 {
234 DRM_DEBUG("get ack intr and do nothing.\n");
235 return 0;
236 }
237
xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)238 static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
239 struct amdgpu_irq_src *source,
240 unsigned type,
241 enum amdgpu_interrupt_state state)
242 {
243 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
244
245 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
246 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
247 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
248
249 return 0;
250 }
251
xgpu_ai_mailbox_flr_work(struct work_struct * work)252 static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
253 {
254 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
255 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
256 int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
257
258 /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
259 * otherwise the mailbox msg will be ruined/reseted by
260 * the VF FLR.
261 */
262 if (atomic_cmpxchg(&adev->reset_domain->in_gpu_reset, 0, 1) != 0)
263 return;
264
265 down_write(&adev->reset_domain->sem);
266
267 amdgpu_virt_fini_data_exchange(adev);
268
269 xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
270
271 do {
272 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
273 goto flr_done;
274
275 msleep(10);
276 timeout -= 10;
277 } while (timeout > 1);
278
279 flr_done:
280 atomic_set(&adev->reset_domain->in_gpu_reset, 0);
281 up_write(&adev->reset_domain->sem);
282
283 /* Trigger recovery for world switch failure if no TDR */
284 if (amdgpu_device_should_recover_gpu(adev)
285 && (!amdgpu_device_has_job_running(adev) ||
286 adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)) {
287 struct amdgpu_reset_context reset_context;
288 memset(&reset_context, 0, sizeof(reset_context));
289
290 reset_context.method = AMD_RESET_METHOD_NONE;
291 reset_context.reset_req_dev = adev;
292 clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
293
294 amdgpu_device_gpu_recover(adev, NULL, &reset_context);
295 }
296 }
297
xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)298 static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
299 struct amdgpu_irq_src *src,
300 unsigned type,
301 enum amdgpu_interrupt_state state)
302 {
303 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
304
305 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
306 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
307 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
308
309 return 0;
310 }
311
xgpu_ai_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)312 static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
313 struct amdgpu_irq_src *source,
314 struct amdgpu_iv_entry *entry)
315 {
316 enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
317
318 switch (event) {
319 case IDH_FLR_NOTIFICATION:
320 if (amdgpu_sriov_runtime(adev) && !amdgpu_in_reset(adev))
321 WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
322 &adev->virt.flr_work),
323 "Failed to queue work! at %s",
324 __func__);
325 break;
326 case IDH_QUERY_ALIVE:
327 xgpu_ai_mailbox_send_ack(adev);
328 break;
329 /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
330 * it byfar since that polling thread will handle it,
331 * other msg like flr complete is not handled here.
332 */
333 case IDH_CLR_MSG_BUF:
334 case IDH_FLR_NOTIFICATION_CMPL:
335 case IDH_READY_TO_ACCESS_GPU:
336 default:
337 break;
338 }
339
340 return 0;
341 }
342
343 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
344 .set = xgpu_ai_set_mailbox_ack_irq,
345 .process = xgpu_ai_mailbox_ack_irq,
346 };
347
348 static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
349 .set = xgpu_ai_set_mailbox_rcv_irq,
350 .process = xgpu_ai_mailbox_rcv_irq,
351 };
352
xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device * adev)353 void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
354 {
355 adev->virt.ack_irq.num_types = 1;
356 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
357 adev->virt.rcv_irq.num_types = 1;
358 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
359 }
360
xgpu_ai_mailbox_add_irq_id(struct amdgpu_device * adev)361 int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
362 {
363 int r;
364
365 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
366 if (r)
367 return r;
368
369 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
370 if (r) {
371 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
372 return r;
373 }
374
375 return 0;
376 }
377
xgpu_ai_mailbox_get_irq(struct amdgpu_device * adev)378 int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
379 {
380 int r;
381
382 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
383 if (r)
384 return r;
385 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
386 if (r) {
387 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
388 return r;
389 }
390
391 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
392
393 return 0;
394 }
395
xgpu_ai_mailbox_put_irq(struct amdgpu_device * adev)396 void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
397 {
398 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
399 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
400 }
401
xgpu_ai_request_init_data(struct amdgpu_device * adev)402 static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
403 {
404 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
405 }
406
407 const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
408 .req_full_gpu = xgpu_ai_request_full_gpu_access,
409 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
410 .reset_gpu = xgpu_ai_request_reset,
411 .wait_reset = NULL,
412 .trans_msg = xgpu_ai_mailbox_trans_msg,
413 .req_init_data = xgpu_ai_request_init_data,
414 };
415