blob: 271452d3999a1d04b571a8ebc3e075a11b1f5db9 [file] [log] [blame]
Xiangliang Yuc9c9de92017-03-10 14:18:17 +08001/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "amdgpu.h"
Feifei Xuf0a58aa2017-11-23 14:54:48 +080025#include "nbio/nbio_6_1_offset.h"
26#include "nbio/nbio_6_1_sh_mask.h"
Feifei Xucde5c342017-11-24 10:29:00 +080027#include "gc/gc_9_0_offset.h"
28#include "gc/gc_9_0_sh_mask.h"
Xiangliang Yuc9c9de92017-03-10 14:18:17 +080029#include "soc15.h"
Monk Liuf98b6172017-04-05 12:17:18 +080030#include "vega10_ih.h"
Xiangliang Yuc9c9de92017-03-10 14:18:17 +080031#include "soc15_common.h"
32#include "mxgpu_ai.h"
33
34static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
35{
36 u32 reg;
37 int timeout = AI_MAILBOX_TIMEDOUT;
38 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
39
40 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
41 mmBIF_BX_PF0_MAILBOX_CONTROL));
42 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_ACK, 1);
43 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
44 mmBIF_BX_PF0_MAILBOX_CONTROL), reg);
45
46 /*Wait for RCV_MSG_VALID to be 0*/
47 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
48 mmBIF_BX_PF0_MAILBOX_CONTROL));
49 while (reg & mask) {
50 if (timeout <= 0) {
51 pr_err("RCV_MSG_VALID is not cleared\n");
52 break;
53 }
54 mdelay(1);
55 timeout -=1;
56
57 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
58 mmBIF_BX_PF0_MAILBOX_CONTROL));
59 }
60}
61
62static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
63{
64 u32 reg;
65
66 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
67 mmBIF_BX_PF0_MAILBOX_CONTROL));
68 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_CONTROL,
69 TRN_MSG_VALID, val ? 1 : 0);
70 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_CONTROL),
71 reg);
72}
73
Xiangliang Yuc9c9de92017-03-10 14:18:17 +080074static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
75 enum idh_event event)
76{
77 u32 reg;
78 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, RCV_MSG_VALID);
79
80 if (event != IDH_FLR_NOTIFICATION_CMPL) {
81 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
82 mmBIF_BX_PF0_MAILBOX_CONTROL));
83 if (!(reg & mask))
84 return -ENOENT;
85 }
86
87 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
88 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
89 if (reg != event)
90 return -ENOENT;
91
92 xgpu_ai_mailbox_send_ack(adev);
93
94 return 0;
95}
96
97static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
98{
99 int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
100 u32 mask = REG_FIELD_MASK(BIF_BX_PF0_MAILBOX_CONTROL, TRN_MSG_ACK);
101 u32 reg;
102
103 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
104 mmBIF_BX_PF0_MAILBOX_CONTROL));
105 while (!(reg & mask)) {
106 if (timeout <= 0) {
107 pr_err("Doesn't get ack from pf.\n");
108 r = -ETIME;
109 break;
110 }
Monk Liu17b2e332017-04-21 19:35:11 +0800111 mdelay(5);
112 timeout -= 5;
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800113
114 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
115 mmBIF_BX_PF0_MAILBOX_CONTROL));
116 }
117
118 return r;
119}
120
Monk Liu94b4fd72017-04-05 12:16:44 +0800121static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800122{
123 int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
124
125 r = xgpu_ai_mailbox_rcv_msg(adev, event);
126 while (r) {
127 if (timeout <= 0) {
Monk Liu17b2e332017-04-21 19:35:11 +0800128 pr_err("Doesn't get msg:%d from pf.\n", event);
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800129 r = -ETIME;
130 break;
131 }
Monk Liu17b2e332017-04-21 19:35:11 +0800132 mdelay(5);
133 timeout -= 5;
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800134
135 r = xgpu_ai_mailbox_rcv_msg(adev, event);
136 }
137
138 return r;
139}
140
Gavin Wan89041942017-06-23 13:55:15 -0400141static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
142 enum idh_request req, u32 data1, u32 data2, u32 data3) {
143 u32 reg;
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800144 int r;
145
Gavin Wan89041942017-06-23 13:55:15 -0400146 reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
147 mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
148 reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
149 MSGBUF_DATA, req);
150 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
151 reg);
152 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
153 data1);
154 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
155 data2);
156 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
157 data3);
158
159 xgpu_ai_mailbox_set_valid(adev, true);
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800160
161 /* start to poll ack */
162 r = xgpu_ai_poll_ack(adev);
163 if (r)
Monk Liu17b2e332017-04-21 19:35:11 +0800164 pr_err("Doesn't get ack from pf, continue\n");
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800165
166 xgpu_ai_mailbox_set_valid(adev, false);
Gavin Wan89041942017-06-23 13:55:15 -0400167}
168
169static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
170 enum idh_request req)
171{
172 int r;
173
174 xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800175
176 /* start to check msg if request is idh_req_gpu_init_access */
177 if (req == IDH_REQ_GPU_INIT_ACCESS ||
178 req == IDH_REQ_GPU_FINI_ACCESS ||
179 req == IDH_REQ_GPU_RESET_ACCESS) {
Monk Liu94b4fd72017-04-05 12:16:44 +0800180 r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
Monk Liu17b2e332017-04-21 19:35:11 +0800181 if (r) {
182 pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800183 return r;
Monk Liu17b2e332017-04-21 19:35:11 +0800184 }
Horace Chen2dc8f812017-10-09 16:17:16 +0800185 /* Retrieve checksum from mailbox2 */
186 if (req == IDH_REQ_GPU_INIT_ACCESS) {
187 adev->virt.fw_reserve.checksum_key =
188 RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
189 mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
190 }
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800191 }
192
193 return 0;
194}
195
Monk Liuf98b6172017-04-05 12:17:18 +0800196static int xgpu_ai_request_reset(struct amdgpu_device *adev)
197{
198 return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
199}
200
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800201static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
202 bool init)
203{
204 enum idh_request req;
205
206 req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
207 return xgpu_ai_send_access_requests(adev, req);
208}
209
210static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
211 bool init)
212{
213 enum idh_request req;
214 int r = 0;
215
216 req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
217 r = xgpu_ai_send_access_requests(adev, req);
218
219 return r;
220}
221
Monk Liuf98b6172017-04-05 12:17:18 +0800222static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
223 struct amdgpu_irq_src *source,
224 struct amdgpu_iv_entry *entry)
225{
Xiangliang Yu034b6862017-05-04 14:02:31 +0800226 DRM_DEBUG("get ack intr and do nothing.\n");
Monk Liuf98b6172017-04-05 12:17:18 +0800227 return 0;
228}
229
230static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
231 struct amdgpu_irq_src *source,
232 unsigned type,
233 enum amdgpu_interrupt_state state)
234{
235 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
236
237 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
238 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
239 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
240
241 return 0;
242}
243
244static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
245{
246 struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
247 struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
248
249 /* wait until RCV_MSG become 3 */
250 if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
251 pr_err("failed to recieve FLR_CMPL\n");
252 return;
253 }
254
255 /* Trigger recovery due to world switch failure */
Alex Deucher5f152b52017-12-15 16:40:49 -0500256 amdgpu_device_gpu_recover(adev, NULL, false);
Monk Liuf98b6172017-04-05 12:17:18 +0800257}
258
259static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
260 struct amdgpu_irq_src *src,
261 unsigned type,
262 enum amdgpu_interrupt_state state)
263{
264 u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
265
266 tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
267 (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
268 WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
269
270 return 0;
271}
272
273static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
274 struct amdgpu_irq_src *source,
275 struct amdgpu_iv_entry *entry)
276{
277 int r;
278
Monk Liu0c63e112017-04-26 14:51:54 +0800279 /* trigger gpu-reset by hypervisor only if TDR disbaled */
Andrey Grodzovsky88546952017-12-13 14:36:53 -0500280 if (!amdgpu_gpu_recovery) {
Monk Liu0c63e112017-04-26 14:51:54 +0800281 /* see what event we get */
282 r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
Monk Liuf98b6172017-04-05 12:17:18 +0800283
Monk Liu34a4d2b2017-10-24 15:10:11 +0800284 /* sometimes the interrupt is delayed to inject to VM, so under such case
285 * the IDH_FLR_NOTIFICATION is overwritten by VF FLR from GIM side, thus
286 * above recieve message could be failed, we should schedule the flr_work
287 * anyway
288 */
289 if (r) {
290 DRM_ERROR("FLR_NOTIFICATION is missed\n");
291 xgpu_ai_mailbox_send_ack(adev);
292 }
293
294 schedule_work(&adev->virt.flr_work);
Monk Liu0c63e112017-04-26 14:51:54 +0800295 }
Monk Liuf98b6172017-04-05 12:17:18 +0800296
297 return 0;
298}
299
300static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
301 .set = xgpu_ai_set_mailbox_ack_irq,
302 .process = xgpu_ai_mailbox_ack_irq,
303};
304
305static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
306 .set = xgpu_ai_set_mailbox_rcv_irq,
307 .process = xgpu_ai_mailbox_rcv_irq,
308};
309
310void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
311{
312 adev->virt.ack_irq.num_types = 1;
313 adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
314 adev->virt.rcv_irq.num_types = 1;
315 adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
316}
317
318int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
319{
320 int r;
321
Monk Liu3af906f2017-04-23 11:25:59 +0800322 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
Monk Liuf98b6172017-04-05 12:17:18 +0800323 if (r)
324 return r;
325
Monk Liu3af906f2017-04-23 11:25:59 +0800326 r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
Monk Liuf98b6172017-04-05 12:17:18 +0800327 if (r) {
328 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
329 return r;
330 }
331
332 return 0;
333}
334
335int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
336{
337 int r;
338
339 r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
340 if (r)
341 return r;
342 r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
343 if (r) {
344 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
345 return r;
346 }
347
348 INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
349
350 return 0;
351}
352
353void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
354{
355 amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
356 amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
357}
358
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800359const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
360 .req_full_gpu = xgpu_ai_request_full_gpu_access,
361 .rel_full_gpu = xgpu_ai_release_full_gpu_access,
Monk Liuf98b6172017-04-05 12:17:18 +0800362 .reset_gpu = xgpu_ai_request_reset,
pdingb5914232017-10-24 09:53:16 +0800363 .wait_reset = NULL,
Gavin Wan89041942017-06-23 13:55:15 -0400364 .trans_msg = xgpu_ai_mailbox_trans_msg,
Xiangliang Yuc9c9de92017-03-10 14:18:17 +0800365};