blob: a55b4975c154b7c65cab88957dbe6a85d53dcb66 [file] [log] [blame]
Zhi Wange4734052016-05-01 07:42:16 -04001/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Zhi Wang <zhi.a.wang@intel.com>
25 *
26 * Contributors:
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
33 *
34 */
35
Zhi Wange4734052016-05-01 07:42:16 -040036#include <linux/kthread.h>
37
Zhenyu Wangfeddf6e2016-10-20 17:15:03 +080038#include "i915_drv.h"
39#include "gvt.h"
40
Zhi Wange4734052016-05-01 07:42:16 -040041#define RING_CTX_OFF(x) \
42 offsetof(struct execlist_ring_context, x)
43
Du, Changbin999ccb42016-10-20 14:08:47 +080044static void set_context_pdp_root_pointer(
45 struct execlist_ring_context *ring_context,
Zhi Wange4734052016-05-01 07:42:16 -040046 u32 pdp[8])
47{
48 struct execlist_mmio_pair *pdp_pair = &ring_context->pdp3_UDW;
49 int i;
50
51 for (i = 0; i < 8; i++)
52 pdp_pair[i].val = pdp[7 - i];
53}
54
Zhi Wangb20c0d52018-02-07 18:12:15 +080055static void update_shadow_pdps(struct intel_vgpu_workload *workload)
56{
57 struct intel_vgpu *vgpu = workload->vgpu;
58 int ring_id = workload->ring_id;
59 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
60 struct drm_i915_gem_object *ctx_obj =
61 shadow_ctx->engine[ring_id].state->obj;
62 struct execlist_ring_context *shadow_ring_context;
63 struct page *page;
64
65 if (WARN_ON(!workload->shadow_mm))
66 return;
67
68 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
69 return;
70
71 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
72 shadow_ring_context = kmap(page);
73 set_context_pdp_root_pointer(shadow_ring_context,
74 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
75 kunmap(page);
76}
77
Zhi Wange4734052016-05-01 07:42:16 -040078static int populate_shadow_context(struct intel_vgpu_workload *workload)
79{
80 struct intel_vgpu *vgpu = workload->vgpu;
81 struct intel_gvt *gvt = vgpu->gvt;
82 int ring_id = workload->ring_id;
Zhi Wang1406a142017-09-10 21:15:18 +080083 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -040084 struct drm_i915_gem_object *ctx_obj =
85 shadow_ctx->engine[ring_id].state->obj;
86 struct execlist_ring_context *shadow_ring_context;
87 struct page *page;
88 void *dst;
89 unsigned long context_gpa, context_page_num;
90 int i;
91
92 gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
93 workload->ctx_desc.lrca);
94
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +030095 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -040096
97 context_page_num = context_page_num >> PAGE_SHIFT;
98
99 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
100 context_page_num = 19;
101
102 i = 2;
103
104 while (i < context_page_num) {
105 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
106 (u32)((workload->ctx_desc.lrca + i) <<
Zhi Wang9556e112017-10-10 13:51:32 +0800107 I915_GTT_PAGE_SHIFT));
Zhi Wange4734052016-05-01 07:42:16 -0400108 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500109 gvt_vgpu_err("Invalid guest context descriptor\n");
fred gao5c568832017-09-20 05:36:47 +0800110 return -EFAULT;
Zhi Wange4734052016-05-01 07:42:16 -0400111 }
112
Michel Thierry0b29c752017-09-13 09:56:00 +0100113 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800114 dst = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400115 intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
Zhi Wang9556e112017-10-10 13:51:32 +0800116 I915_GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800117 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400118 i++;
119 }
120
121 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800122 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400123
124#define COPY_REG(name) \
125 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
126 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
Zhenyu Wangd8303072018-03-19 17:09:05 +0800127#define COPY_REG_MASKED(name) {\
128 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
129 + RING_CTX_OFF(name.val),\
130 &shadow_ring_context->name.val, 4);\
131 shadow_ring_context->name.val |= 0xffff << 16;\
132 }
Zhi Wange4734052016-05-01 07:42:16 -0400133
Zhenyu Wangd8303072018-03-19 17:09:05 +0800134 COPY_REG_MASKED(ctx_ctrl);
Zhi Wange4734052016-05-01 07:42:16 -0400135 COPY_REG(ctx_timestamp);
136
137 if (ring_id == RCS) {
138 COPY_REG(bb_per_ctx_ptr);
139 COPY_REG(rcs_indirect_ctx);
140 COPY_REG(rcs_indirect_ctx_offset);
141 }
142#undef COPY_REG
Zhenyu Wangd8303072018-03-19 17:09:05 +0800143#undef COPY_REG_MASKED
Zhi Wange4734052016-05-01 07:42:16 -0400144
Zhi Wange4734052016-05-01 07:42:16 -0400145 intel_gvt_hypervisor_read_gpa(vgpu,
146 workload->ring_context_gpa +
147 sizeof(*shadow_ring_context),
148 (void *)shadow_ring_context +
149 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800150 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400151
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800152 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400153 return 0;
154}
155
Chris Wilsone61e0f52018-02-21 09:56:36 +0000156static inline bool is_gvt_request(struct i915_request *req)
Changbin Dubc2d4b62017-03-22 12:35:31 +0800157{
158 return i915_gem_context_force_single_submission(req->ctx);
159}
160
Xiong Zhang295764c2017-11-07 05:23:02 +0800161static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
162{
163 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
164 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
165 i915_reg_t reg;
166
167 reg = RING_INSTDONE(ring_base);
168 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
169 reg = RING_ACTHD(ring_base);
170 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
171 reg = RING_ACTHD_UDW(ring_base);
172 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
173}
174
Zhi Wange4734052016-05-01 07:42:16 -0400175static int shadow_context_status_change(struct notifier_block *nb,
176 unsigned long action, void *data)
177{
Chris Wilsone61e0f52018-02-21 09:56:36 +0000178 struct i915_request *req = data;
Changbin Du3fc03062017-03-13 10:47:11 +0800179 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
180 shadow_ctx_notifier_block[req->engine->id]);
181 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du0e86cc92017-05-04 10:52:38 +0800182 enum intel_engine_id ring_id = req->engine->id;
183 struct intel_vgpu_workload *workload;
Changbin Du679fd3e2017-11-13 14:58:31 +0800184 unsigned long flags;
Zhi Wange4734052016-05-01 07:42:16 -0400185
Changbin Du0e86cc92017-05-04 10:52:38 +0800186 if (!is_gvt_request(req)) {
Changbin Du679fd3e2017-11-13 14:58:31 +0800187 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800188 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
189 scheduler->engine_owner[ring_id]) {
190 /* Switch ring from vGPU to host. */
191 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
192 NULL, ring_id);
193 scheduler->engine_owner[ring_id] = NULL;
194 }
Changbin Du679fd3e2017-11-13 14:58:31 +0800195 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800196
197 return NOTIFY_OK;
198 }
199
200 workload = scheduler->current_workload[ring_id];
201 if (unlikely(!workload))
Chuanxiao Dong9272f732017-02-17 19:29:52 +0800202 return NOTIFY_OK;
203
Zhi Wange4734052016-05-01 07:42:16 -0400204 switch (action) {
205 case INTEL_CONTEXT_SCHEDULE_IN:
Changbin Du679fd3e2017-11-13 14:58:31 +0800206 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
Changbin Du0e86cc92017-05-04 10:52:38 +0800207 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
208 /* Switch ring from host to vGPU or vGPU to vGPU. */
209 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
210 workload->vgpu, ring_id);
211 scheduler->engine_owner[ring_id] = workload->vgpu;
212 } else
213 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
214 ring_id, workload->vgpu->id);
Changbin Du679fd3e2017-11-13 14:58:31 +0800215 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
Zhi Wange4734052016-05-01 07:42:16 -0400216 atomic_set(&workload->shadow_ctx_active, 1);
217 break;
218 case INTEL_CONTEXT_SCHEDULE_OUT:
Xiong Zhang295764c2017-11-07 05:23:02 +0800219 save_ring_hw_state(workload->vgpu, ring_id);
Zhi Wange4734052016-05-01 07:42:16 -0400220 atomic_set(&workload->shadow_ctx_active, 0);
221 break;
Zhenyu Wangda5f99e2017-12-01 14:59:53 +0800222 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
223 save_ring_hw_state(workload->vgpu, ring_id);
224 break;
Zhi Wange4734052016-05-01 07:42:16 -0400225 default:
226 WARN_ON(1);
227 return NOTIFY_OK;
228 }
229 wake_up(&workload->shadow_ctx_status_wq);
230 return NOTIFY_OK;
231}
232
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800233static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
234 struct intel_engine_cs *engine)
235{
236 struct intel_context *ce = &ctx->engine[engine->id];
237 u64 desc = 0;
238
239 desc = ce->lrc_desc;
240
241 /* Update bits 0-11 of the context descriptor which includes flags
242 * like GEN8_CTX_* cached in desc_template
243 */
244 desc &= U64_MAX << 12;
245 desc |= ctx->desc_template & ((1ULL << 12) - 1);
246
247 ce->lrc_desc = desc;
248}
249
fred gao0a53bc02017-08-18 15:41:06 +0800250static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
251{
252 struct intel_vgpu *vgpu = workload->vgpu;
253 void *shadow_ring_buffer_va;
254 u32 *cs;
Weinan Licd7e61b2018-02-23 14:46:45 +0800255 struct i915_request *req = workload->req;
256
257 if (IS_KABYLAKE(req->i915) &&
258 is_inhibit_context(req->ctx, req->engine->id))
259 intel_vgpu_restore_inhibit_context(vgpu, req);
fred gao0a53bc02017-08-18 15:41:06 +0800260
261 /* allocate shadow ring buffer */
262 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
263 if (IS_ERR(cs)) {
264 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
265 workload->rb_len);
266 return PTR_ERR(cs);
267 }
268
269 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
270
271 /* get shadow ring buffer va */
272 workload->shadow_ring_buffer_va = cs;
273
274 memcpy(cs, shadow_ring_buffer_va,
275 workload->rb_len);
276
277 cs += workload->rb_len / sizeof(u32);
278 intel_ring_advance(workload->req, cs);
279
280 return 0;
281}
282
Chris Wilson7b302552017-11-20 13:29:58 +0000283static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
fred gaoa3cfdca2017-08-18 15:41:07 +0800284{
285 if (!wa_ctx->indirect_ctx.obj)
286 return;
287
288 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
289 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
290}
291
Ping Gao89ea20b2017-06-29 12:22:42 +0800292/**
293 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
294 * shadow it as well, include ringbuffer,wa_ctx and ctx.
295 * @workload: an abstract entity for each execlist submission.
296 *
297 * This function is called before the workload submitting to i915, to make
298 * sure the content of the workload is valid.
299 */
300int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
Zhi Wange4734052016-05-01 07:42:16 -0400301{
Zhi Wang1406a142017-09-10 21:15:18 +0800302 struct intel_vgpu *vgpu = workload->vgpu;
303 struct intel_vgpu_submission *s = &vgpu->submission;
304 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
305 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
Zhi Wange4734052016-05-01 07:42:16 -0400306 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800307 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
fred gao0a53bc02017-08-18 15:41:06 +0800308 struct intel_ring *ring;
Zhi Wange4734052016-05-01 07:42:16 -0400309 int ret;
310
Ping Gao87e919d2017-07-04 14:53:03 +0800311 lockdep_assert_held(&dev_priv->drm.struct_mutex);
312
Ping Gaod0302e72017-06-29 12:22:43 +0800313 if (workload->shadowed)
314 return 0;
Zhi Wange4734052016-05-01 07:42:16 -0400315
Zhenyu Wang03806ed2017-02-13 17:07:19 +0800316 shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
317 shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
Zhi Wange4734052016-05-01 07:42:16 -0400318 GEN8_CTX_ADDRESSING_MODE_SHIFT;
319
Zhi Wang1406a142017-09-10 21:15:18 +0800320 if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
Kechen Lu9dfb8e52017-08-10 07:41:36 +0800321 shadow_context_descriptor_update(shadow_ctx,
322 dev_priv->engine[ring_id]);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800323
Ping Gao89ea20b2017-06-29 12:22:42 +0800324 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
Zhi Wangbe1da702016-05-03 18:26:57 -0400325 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800326 goto err_scan;
Zhi Wangbe1da702016-05-03 18:26:57 -0400327
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400328 if ((workload->ring_id == RCS) &&
329 (workload->wa_ctx.indirect_ctx.size != 0)) {
330 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
331 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800332 goto err_scan;
Tina Zhang17f1b1a2017-03-15 23:16:01 -0400333 }
Zhi Wangbe1da702016-05-03 18:26:57 -0400334
Ping Gao89ea20b2017-06-29 12:22:42 +0800335 /* pin shadow context by gvt even the shadow context will be pinned
336 * when i915 alloc request. That is because gvt will update the guest
337 * context from shadow context when workload is completed, and at that
338 * moment, i915 may already unpined the shadow context to make the
339 * shadow_ctx pages invalid. So gvt need to pin itself. After update
340 * the guest context, gvt can unpin the shadow_ctx safely.
341 */
342 ring = engine->context_pin(engine, shadow_ctx);
343 if (IS_ERR(ring)) {
344 ret = PTR_ERR(ring);
345 gvt_vgpu_err("fail to pin shadow context\n");
fred gaoa3cfdca2017-08-18 15:41:07 +0800346 goto err_shadow;
Ping Gao89ea20b2017-06-29 12:22:42 +0800347 }
Zhi Wange4734052016-05-01 07:42:16 -0400348
fred gao0a53bc02017-08-18 15:41:06 +0800349 ret = populate_shadow_context(workload);
350 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800351 goto err_unpin;
fred gaof2880e02017-11-14 17:09:35 +0800352 workload->shadowed = true;
353 return 0;
354
355err_unpin:
356 engine->context_unpin(engine, shadow_ctx);
357err_shadow:
358 release_shadow_wa_ctx(&workload->wa_ctx);
359err_scan:
360 return ret;
361}
362
363static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
364{
365 int ring_id = workload->ring_id;
366 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
367 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
Chris Wilsone61e0f52018-02-21 09:56:36 +0000368 struct i915_request *rq;
fred gaof2880e02017-11-14 17:09:35 +0800369 struct intel_vgpu *vgpu = workload->vgpu;
370 struct intel_vgpu_submission *s = &vgpu->submission;
371 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
372 int ret;
fred gao0a53bc02017-08-18 15:41:06 +0800373
Chris Wilsone61e0f52018-02-21 09:56:36 +0000374 rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
fred gao0a53bc02017-08-18 15:41:06 +0800375 if (IS_ERR(rq)) {
376 gvt_vgpu_err("fail to allocate gem request\n");
377 ret = PTR_ERR(rq);
fred gaoa3cfdca2017-08-18 15:41:07 +0800378 goto err_unpin;
fred gao0a53bc02017-08-18 15:41:06 +0800379 }
380
381 gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
382
Chris Wilsone61e0f52018-02-21 09:56:36 +0000383 workload->req = i915_request_get(rq);
fred gao0a53bc02017-08-18 15:41:06 +0800384 ret = copy_workload_to_ring_buffer(workload);
385 if (ret)
fred gaoa3cfdca2017-08-18 15:41:07 +0800386 goto err_unpin;
fred gaoa3cfdca2017-08-18 15:41:07 +0800387 return 0;
fred gao0a53bc02017-08-18 15:41:06 +0800388
fred gaoa3cfdca2017-08-18 15:41:07 +0800389err_unpin:
390 engine->context_unpin(engine, shadow_ctx);
fred gaoa3cfdca2017-08-18 15:41:07 +0800391 release_shadow_wa_ctx(&workload->wa_ctx);
fred gao0a53bc02017-08-18 15:41:06 +0800392 return ret;
393}
394
Zhi Wangf52c3802017-09-24 21:53:03 +0800395static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
396
Zhi Wangd8235b52017-09-12 22:06:39 +0800397static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
398{
399 struct intel_gvt *gvt = workload->vgpu->gvt;
400 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
Zhi Wangf52c3802017-09-24 21:53:03 +0800401 struct intel_vgpu_shadow_bb *bb;
402 int ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800403
Zhi Wangf52c3802017-09-24 21:53:03 +0800404 list_for_each_entry(bb, &workload->shadow_bb, list) {
405 bb->vma = i915_gem_object_ggtt_pin(bb->obj, NULL, 0, 0, 0);
406 if (IS_ERR(bb->vma)) {
407 ret = PTR_ERR(bb->vma);
408 goto err;
409 }
Zhi Wangd8235b52017-09-12 22:06:39 +0800410
Zhi Wangf52c3802017-09-24 21:53:03 +0800411 /* relocate shadow batch buffer */
412 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
Zhi Wangd8235b52017-09-12 22:06:39 +0800413 if (gmadr_bytes == 8)
Zhi Wangf52c3802017-09-24 21:53:03 +0800414 bb->bb_start_cmd_va[2] = 0;
415
416 /* No one is going to touch shadow bb from now on. */
417 if (bb->clflush & CLFLUSH_AFTER) {
418 drm_clflush_virt_range(bb->va, bb->obj->base.size);
419 bb->clflush &= ~CLFLUSH_AFTER;
420 }
421
422 ret = i915_gem_object_set_to_gtt_domain(bb->obj, false);
423 if (ret)
424 goto err;
425
426 i915_gem_obj_finish_shmem_access(bb->obj);
427 bb->accessing = false;
428
429 i915_vma_move_to_active(bb->vma, workload->req, 0);
Zhi Wangd8235b52017-09-12 22:06:39 +0800430 }
431 return 0;
Zhi Wangf52c3802017-09-24 21:53:03 +0800432err:
433 release_shadow_batch_buffer(workload);
434 return ret;
Zhi Wangd8235b52017-09-12 22:06:39 +0800435}
436
437static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
438{
439 struct intel_vgpu_workload *workload = container_of(wa_ctx,
440 struct intel_vgpu_workload,
441 wa_ctx);
442 int ring_id = workload->ring_id;
443 struct intel_vgpu_submission *s = &workload->vgpu->submission;
444 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
445 struct drm_i915_gem_object *ctx_obj =
446 shadow_ctx->engine[ring_id].state->obj;
447 struct execlist_ring_context *shadow_ring_context;
448 struct page *page;
449
450 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
451 shadow_ring_context = kmap_atomic(page);
452
453 shadow_ring_context->bb_per_ctx_ptr.val =
454 (shadow_ring_context->bb_per_ctx_ptr.val &
455 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
456 shadow_ring_context->rcs_indirect_ctx.val =
457 (shadow_ring_context->rcs_indirect_ctx.val &
458 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
459
460 kunmap_atomic(shadow_ring_context);
461 return 0;
462}
463
464static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
465{
466 struct i915_vma *vma;
467 unsigned char *per_ctx_va =
468 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
469 wa_ctx->indirect_ctx.size;
470
471 if (wa_ctx->indirect_ctx.size == 0)
472 return 0;
473
474 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL,
475 0, CACHELINE_BYTES, 0);
476 if (IS_ERR(vma))
477 return PTR_ERR(vma);
478
479 /* FIXME: we are not tracking our pinned VMA leaving it
480 * up to the core to fix up the stray pin_count upon
481 * free.
482 */
483
484 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
485
486 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
487 memset(per_ctx_va, 0, CACHELINE_BYTES);
488
489 update_wa_ctx_2_shadow_ctx(wa_ctx);
490 return 0;
491}
492
493static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
494{
Zhi Wangf52c3802017-09-24 21:53:03 +0800495 struct intel_vgpu *vgpu = workload->vgpu;
496 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
497 struct intel_vgpu_shadow_bb *bb, *pos;
Zhi Wangd8235b52017-09-12 22:06:39 +0800498
Zhi Wangf52c3802017-09-24 21:53:03 +0800499 if (list_empty(&workload->shadow_bb))
500 return;
501
502 bb = list_first_entry(&workload->shadow_bb,
503 struct intel_vgpu_shadow_bb, list);
504
505 mutex_lock(&dev_priv->drm.struct_mutex);
506
507 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
508 if (bb->obj) {
509 if (bb->accessing)
510 i915_gem_obj_finish_shmem_access(bb->obj);
511
512 if (bb->va && !IS_ERR(bb->va))
513 i915_gem_object_unpin_map(bb->obj);
514
515 if (bb->vma && !IS_ERR(bb->vma)) {
516 i915_vma_unpin(bb->vma);
517 i915_vma_close(bb->vma);
518 }
519 __i915_gem_object_release_unless_active(bb->obj);
Zhi Wangd8235b52017-09-12 22:06:39 +0800520 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800521 list_del(&bb->list);
522 kfree(bb);
Zhi Wangd8235b52017-09-12 22:06:39 +0800523 }
Zhi Wangf52c3802017-09-24 21:53:03 +0800524
525 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wangd8235b52017-09-12 22:06:39 +0800526}
527
Zhi Wang497aa3f2017-09-12 21:51:10 +0800528static int prepare_workload(struct intel_vgpu_workload *workload)
529{
Zhi Wangd8235b52017-09-12 22:06:39 +0800530 struct intel_vgpu *vgpu = workload->vgpu;
Zhi Wang497aa3f2017-09-12 21:51:10 +0800531 int ret = 0;
532
Zhi Wangd8235b52017-09-12 22:06:39 +0800533 ret = intel_vgpu_pin_mm(workload->shadow_mm);
534 if (ret) {
535 gvt_vgpu_err("fail to vgpu pin mm\n");
536 return ret;
537 }
Zhi Wang497aa3f2017-09-12 21:51:10 +0800538
Zhi Wangb20c0d52018-02-07 18:12:15 +0800539 update_shadow_pdps(workload);
540
Zhi Wangd8235b52017-09-12 22:06:39 +0800541 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
542 if (ret) {
543 gvt_vgpu_err("fail to vgpu sync oos pages\n");
544 goto err_unpin_mm;
545 }
546
547 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
548 if (ret) {
549 gvt_vgpu_err("fail to flush post shadow\n");
550 goto err_unpin_mm;
551 }
552
fred gaof2880e02017-11-14 17:09:35 +0800553 ret = intel_gvt_generate_request(workload);
554 if (ret) {
555 gvt_vgpu_err("fail to generate request\n");
556 goto err_unpin_mm;
557 }
558
Zhi Wangd8235b52017-09-12 22:06:39 +0800559 ret = prepare_shadow_batch_buffer(workload);
560 if (ret) {
561 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
562 goto err_unpin_mm;
563 }
564
565 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
566 if (ret) {
567 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
568 goto err_shadow_batch;
569 }
570
571 if (workload->prepare) {
572 ret = workload->prepare(workload);
573 if (ret)
574 goto err_shadow_wa_ctx;
575 }
576
577 return 0;
578err_shadow_wa_ctx:
579 release_shadow_wa_ctx(&workload->wa_ctx);
580err_shadow_batch:
581 release_shadow_batch_buffer(workload);
582err_unpin_mm:
583 intel_vgpu_unpin_mm(workload->shadow_mm);
Zhi Wang497aa3f2017-09-12 21:51:10 +0800584 return ret;
585}
586
fred gao0a53bc02017-08-18 15:41:06 +0800587static int dispatch_workload(struct intel_vgpu_workload *workload)
588{
Zhi Wang1406a142017-09-10 21:15:18 +0800589 struct intel_vgpu *vgpu = workload->vgpu;
590 struct intel_vgpu_submission *s = &vgpu->submission;
591 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
592 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
fred gao0a53bc02017-08-18 15:41:06 +0800593 int ring_id = workload->ring_id;
fred gao0a53bc02017-08-18 15:41:06 +0800594 struct intel_engine_cs *engine = dev_priv->engine[ring_id];
595 int ret = 0;
596
597 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
598 ring_id, workload);
599
600 mutex_lock(&dev_priv->drm.struct_mutex);
601
602 ret = intel_gvt_scan_and_shadow_workload(workload);
603 if (ret)
604 goto out;
605
Zhi Wang497aa3f2017-09-12 21:51:10 +0800606 ret = prepare_workload(workload);
607 if (ret) {
608 engine->context_unpin(engine, shadow_ctx);
609 goto out;
fred gao0a53bc02017-08-18 15:41:06 +0800610 }
611
Pei Zhang90d27a12016-11-14 18:02:57 +0800612out:
613 if (ret)
614 workload->status = ret;
Chris Wilson0eb742d2016-10-20 17:29:36 +0800615
Ping Gao89ea20b2017-06-29 12:22:42 +0800616 if (!IS_ERR_OR_NULL(workload->req)) {
617 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
618 ring_id, workload->req);
Chris Wilsone61e0f52018-02-21 09:56:36 +0000619 i915_request_add(workload->req);
Ping Gao89ea20b2017-06-29 12:22:42 +0800620 workload->dispatched = true;
621 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800622
Pei Zhang90d27a12016-11-14 18:02:57 +0800623 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400624 return ret;
625}
626
627static struct intel_vgpu_workload *pick_next_workload(
628 struct intel_gvt *gvt, int ring_id)
629{
630 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
631 struct intel_vgpu_workload *workload = NULL;
632
633 mutex_lock(&gvt->lock);
634
635 /*
636 * no current vgpu / will be scheduled out / no workload
637 * bail out
638 */
639 if (!scheduler->current_vgpu) {
640 gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
641 goto out;
642 }
643
644 if (scheduler->need_reschedule) {
645 gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
646 goto out;
647 }
648
Zhenyu Wang954180a2017-04-12 14:22:50 +0800649 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
Zhi Wange4734052016-05-01 07:42:16 -0400650 goto out;
Zhi Wange4734052016-05-01 07:42:16 -0400651
652 /*
653 * still have current workload, maybe the workload disptacher
654 * fail to submit it for some reason, resubmit it.
655 */
656 if (scheduler->current_workload[ring_id]) {
657 workload = scheduler->current_workload[ring_id];
658 gvt_dbg_sched("ring id %d still have current workload %p\n",
659 ring_id, workload);
660 goto out;
661 }
662
663 /*
664 * pick a workload as current workload
665 * once current workload is set, schedule policy routines
666 * will wait the current workload is finished when trying to
667 * schedule out a vgpu.
668 */
669 scheduler->current_workload[ring_id] = container_of(
670 workload_q_head(scheduler->current_vgpu, ring_id)->next,
671 struct intel_vgpu_workload, list);
672
673 workload = scheduler->current_workload[ring_id];
674
675 gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
676
Zhi Wang1406a142017-09-10 21:15:18 +0800677 atomic_inc(&workload->vgpu->submission.running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400678out:
679 mutex_unlock(&gvt->lock);
680 return workload;
681}
682
683static void update_guest_context(struct intel_vgpu_workload *workload)
684{
685 struct intel_vgpu *vgpu = workload->vgpu;
686 struct intel_gvt *gvt = vgpu->gvt;
Zhi Wang1406a142017-09-10 21:15:18 +0800687 struct intel_vgpu_submission *s = &vgpu->submission;
688 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
Zhi Wange4734052016-05-01 07:42:16 -0400689 int ring_id = workload->ring_id;
Zhi Wange4734052016-05-01 07:42:16 -0400690 struct drm_i915_gem_object *ctx_obj =
691 shadow_ctx->engine[ring_id].state->obj;
692 struct execlist_ring_context *shadow_ring_context;
693 struct page *page;
694 void *src;
695 unsigned long context_gpa, context_page_num;
696 int i;
697
698 gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
699 workload->ctx_desc.lrca);
700
Joonas Lahtinen63ffbcd2017-04-28 10:53:36 +0300701 context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
Zhi Wange4734052016-05-01 07:42:16 -0400702
703 context_page_num = context_page_num >> PAGE_SHIFT;
704
705 if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
706 context_page_num = 19;
707
708 i = 2;
709
710 while (i < context_page_num) {
711 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
712 (u32)((workload->ctx_desc.lrca + i) <<
Zhi Wang9556e112017-10-10 13:51:32 +0800713 I915_GTT_PAGE_SHIFT));
Zhi Wange4734052016-05-01 07:42:16 -0400714 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500715 gvt_vgpu_err("invalid guest context descriptor\n");
Zhi Wange4734052016-05-01 07:42:16 -0400716 return;
717 }
718
Michel Thierry0b29c752017-09-13 09:56:00 +0100719 page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800720 src = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400721 intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
Zhi Wang9556e112017-10-10 13:51:32 +0800722 I915_GTT_PAGE_SIZE);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800723 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400724 i++;
725 }
726
727 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa +
728 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
729
730 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800731 shadow_ring_context = kmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400732
733#define COPY_REG(name) \
734 intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
735 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
736
737 COPY_REG(ctx_ctrl);
738 COPY_REG(ctx_timestamp);
739
740#undef COPY_REG
741
742 intel_gvt_hypervisor_write_gpa(vgpu,
743 workload->ring_context_gpa +
744 sizeof(*shadow_ring_context),
745 (void *)shadow_ring_context +
746 sizeof(*shadow_ring_context),
Zhi Wang9556e112017-10-10 13:51:32 +0800747 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
Zhi Wange4734052016-05-01 07:42:16 -0400748
Xiaoguang Chenc7549362016-11-03 18:38:30 +0800749 kunmap(page);
Zhi Wange4734052016-05-01 07:42:16 -0400750}
751
Zhi Wange2c43c02017-09-13 01:58:35 +0800752static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
753{
754 struct intel_vgpu_submission *s = &vgpu->submission;
755 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
756 struct intel_engine_cs *engine;
757 struct intel_vgpu_workload *pos, *n;
758 unsigned int tmp;
759
760 /* free the unsubmited workloads in the queues. */
761 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
762 list_for_each_entry_safe(pos, n,
763 &s->workload_q_head[engine->id], list) {
764 list_del_init(&pos->list);
765 intel_vgpu_destroy_workload(pos);
766 }
767 clear_bit(engine->id, s->shadow_ctx_desc_updated);
768 }
769}
770
Zhi Wange4734052016-05-01 07:42:16 -0400771static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
772{
773 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Zhi Wang1406a142017-09-10 21:15:18 +0800774 struct intel_vgpu_workload *workload =
775 scheduler->current_workload[ring_id];
776 struct intel_vgpu *vgpu = workload->vgpu;
777 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wangbe1da702016-05-03 18:26:57 -0400778 int event;
Zhi Wange4734052016-05-01 07:42:16 -0400779
780 mutex_lock(&gvt->lock);
781
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800782 /* For the workload w/ request, needs to wait for the context
783 * switch to make sure request is completed.
784 * For the workload w/o request, directly complete the workload.
785 */
786 if (workload->req) {
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800787 struct drm_i915_private *dev_priv =
788 workload->vgpu->gvt->dev_priv;
789 struct intel_engine_cs *engine =
790 dev_priv->engine[workload->ring_id];
Zhi Wange4734052016-05-01 07:42:16 -0400791 wait_event(workload->shadow_ctx_status_wq,
792 !atomic_read(&workload->shadow_ctx_active));
793
Chuanxiao Dong0cf5ec42017-06-23 13:01:11 +0800794 /* If this request caused GPU hang, req->fence.error will
795 * be set to -EIO. Use -EIO to set workload status so
796 * that when this request caused GPU hang, didn't trigger
797 * context switch interrupt to guest.
798 */
799 if (likely(workload->status == -EINPROGRESS)) {
800 if (workload->req->fence.error == -EIO)
801 workload->status = -EIO;
802 else
803 workload->status = 0;
804 }
805
Chris Wilsone61e0f52018-02-21 09:56:36 +0000806 i915_request_put(fetch_and_zero(&workload->req));
Zhi Wangbe1da702016-05-03 18:26:57 -0400807
Chuanxiao Dong6184cc82017-08-01 17:47:25 +0800808 if (!workload->status && !(vgpu->resetting_eng &
809 ENGINE_MASK(ring_id))) {
Chuanxiao Dong8f1117a2017-03-06 13:05:24 +0800810 update_guest_context(workload);
811
812 for_each_set_bit(event, workload->pending_events,
813 INTEL_GVT_EVENT_MAX)
814 intel_vgpu_trigger_virtual_event(vgpu, event);
815 }
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800816 mutex_lock(&dev_priv->drm.struct_mutex);
817 /* unpin shadow ctx as the shadow_ctx update is done */
Zhi Wang1406a142017-09-10 21:15:18 +0800818 engine->context_unpin(engine, s->shadow_ctx);
Chuanxiao Dong3cd23b82017-03-16 09:47:58 +0800819 mutex_unlock(&dev_priv->drm.struct_mutex);
Zhi Wange4734052016-05-01 07:42:16 -0400820 }
821
822 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
823 ring_id, workload, workload->status);
824
825 scheduler->current_workload[ring_id] = NULL;
826
Zhi Wange4734052016-05-01 07:42:16 -0400827 list_del_init(&workload->list);
Zhi Wangd8235b52017-09-12 22:06:39 +0800828
829 if (!workload->status) {
830 release_shadow_batch_buffer(workload);
831 release_shadow_wa_ctx(&workload->wa_ctx);
832 }
833
Zhi Wange2c43c02017-09-13 01:58:35 +0800834 if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
835 /* if workload->status is not successful means HW GPU
836 * has occurred GPU hang or something wrong with i915/GVT,
837 * and GVT won't inject context switch interrupt to guest.
838 * So this error is a vGPU hang actually to the guest.
839 * According to this we should emunlate a vGPU hang. If
840 * there are pending workloads which are already submitted
841 * from guest, we should clean them up like HW GPU does.
842 *
843 * if it is in middle of engine resetting, the pending
844 * workloads won't be submitted to HW GPU and will be
845 * cleaned up during the resetting process later, so doing
846 * the workload clean up here doesn't have any impact.
847 **/
848 clean_workloads(vgpu, ENGINE_MASK(ring_id));
849 }
850
Zhi Wange4734052016-05-01 07:42:16 -0400851 workload->complete(workload);
852
Zhi Wang1406a142017-09-10 21:15:18 +0800853 atomic_dec(&s->running_workload_num);
Zhi Wange4734052016-05-01 07:42:16 -0400854 wake_up(&scheduler->workload_complete_wq);
Ping Gaof100dae2017-05-24 09:14:11 +0800855
856 if (gvt->scheduler.need_reschedule)
857 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
858
Zhi Wange4734052016-05-01 07:42:16 -0400859 mutex_unlock(&gvt->lock);
860}
861
862struct workload_thread_param {
863 struct intel_gvt *gvt;
864 int ring_id;
865};
866
867static int workload_thread(void *priv)
868{
869 struct workload_thread_param *p = (struct workload_thread_param *)priv;
870 struct intel_gvt *gvt = p->gvt;
871 int ring_id = p->ring_id;
872 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
873 struct intel_vgpu_workload *workload = NULL;
Tina Zhang695fbc02017-03-10 04:26:53 -0500874 struct intel_vgpu *vgpu = NULL;
Zhi Wange4734052016-05-01 07:42:16 -0400875 int ret;
Xu Hane3476c02017-03-29 10:13:59 +0800876 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
877 || IS_KABYLAKE(gvt->dev_priv);
Du, Changbine45d7b72016-10-27 11:10:31 +0800878 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Zhi Wange4734052016-05-01 07:42:16 -0400879
880 kfree(p);
881
882 gvt_dbg_core("workload thread for ring %d started\n", ring_id);
883
884 while (!kthread_should_stop()) {
Du, Changbine45d7b72016-10-27 11:10:31 +0800885 add_wait_queue(&scheduler->waitq[ring_id], &wait);
886 do {
887 workload = pick_next_workload(gvt, ring_id);
888 if (workload)
889 break;
890 wait_woken(&wait, TASK_INTERRUPTIBLE,
891 MAX_SCHEDULE_TIMEOUT);
892 } while (!kthread_should_stop());
893 remove_wait_queue(&scheduler->waitq[ring_id], &wait);
Zhi Wange4734052016-05-01 07:42:16 -0400894
Du, Changbine45d7b72016-10-27 11:10:31 +0800895 if (!workload)
Zhi Wange4734052016-05-01 07:42:16 -0400896 break;
897
898 gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
899 workload->ring_id, workload,
900 workload->vgpu->id);
901
902 intel_runtime_pm_get(gvt->dev_priv);
903
Zhi Wange4734052016-05-01 07:42:16 -0400904 gvt_dbg_sched("ring id %d will dispatch workload %p\n",
905 workload->ring_id, workload);
906
907 if (need_force_wake)
908 intel_uncore_forcewake_get(gvt->dev_priv,
909 FORCEWAKE_ALL);
910
Pei Zhang90d27a12016-11-14 18:02:57 +0800911 mutex_lock(&gvt->lock);
Zhi Wange4734052016-05-01 07:42:16 -0400912 ret = dispatch_workload(workload);
Pei Zhang90d27a12016-11-14 18:02:57 +0800913 mutex_unlock(&gvt->lock);
Chris Wilson66bbc3b2016-10-19 11:11:44 +0100914
Zhi Wange4734052016-05-01 07:42:16 -0400915 if (ret) {
Tina Zhang695fbc02017-03-10 04:26:53 -0500916 vgpu = workload->vgpu;
917 gvt_vgpu_err("fail to dispatch workload, skip\n");
Zhi Wange4734052016-05-01 07:42:16 -0400918 goto complete;
919 }
920
921 gvt_dbg_sched("ring id %d wait workload %p\n",
922 workload->ring_id, workload);
Chris Wilsone61e0f52018-02-21 09:56:36 +0000923 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
Zhi Wange4734052016-05-01 07:42:16 -0400924
925complete:
Changbin Du3ce32742017-02-09 10:13:16 +0800926 gvt_dbg_sched("will complete workload %p, status: %d\n",
Zhi Wange4734052016-05-01 07:42:16 -0400927 workload, workload->status);
928
Changbin Du2e51ef32017-01-05 13:28:05 +0800929 complete_current_workload(gvt, ring_id);
930
Zhi Wange4734052016-05-01 07:42:16 -0400931 if (need_force_wake)
932 intel_uncore_forcewake_put(gvt->dev_priv,
933 FORCEWAKE_ALL);
934
Zhi Wange4734052016-05-01 07:42:16 -0400935 intel_runtime_pm_put(gvt->dev_priv);
Zhi Wang6d763032017-09-12 22:33:12 +0800936 if (ret && (vgpu_is_vm_unhealthy(ret)))
fred gaoe011c6c2017-09-19 15:11:28 +0800937 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
Zhi Wange4734052016-05-01 07:42:16 -0400938 }
939 return 0;
940}
941
942void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
943{
Zhi Wang1406a142017-09-10 21:15:18 +0800944 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wange4734052016-05-01 07:42:16 -0400945 struct intel_gvt *gvt = vgpu->gvt;
946 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
947
Zhi Wang1406a142017-09-10 21:15:18 +0800948 if (atomic_read(&s->running_workload_num)) {
Zhi Wange4734052016-05-01 07:42:16 -0400949 gvt_dbg_sched("wait vgpu idle\n");
950
951 wait_event(scheduler->workload_complete_wq,
Zhi Wang1406a142017-09-10 21:15:18 +0800952 !atomic_read(&s->running_workload_num));
Zhi Wange4734052016-05-01 07:42:16 -0400953 }
954}
955
956void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
957{
958 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
Changbin Du3fc03062017-03-13 10:47:11 +0800959 struct intel_engine_cs *engine;
960 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400961
962 gvt_dbg_core("clean workload scheduler\n");
963
Changbin Du3fc03062017-03-13 10:47:11 +0800964 for_each_engine(engine, gvt->dev_priv, i) {
965 atomic_notifier_chain_unregister(
966 &engine->context_status_notifier,
967 &gvt->shadow_ctx_notifier_block[i]);
968 kthread_stop(scheduler->thread[i]);
Zhi Wange4734052016-05-01 07:42:16 -0400969 }
970}
971
972int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
973{
974 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
975 struct workload_thread_param *param = NULL;
Changbin Du3fc03062017-03-13 10:47:11 +0800976 struct intel_engine_cs *engine;
977 enum intel_engine_id i;
Zhi Wange4734052016-05-01 07:42:16 -0400978 int ret;
Zhi Wange4734052016-05-01 07:42:16 -0400979
980 gvt_dbg_core("init workload scheduler\n");
981
982 init_waitqueue_head(&scheduler->workload_complete_wq);
983
Changbin Du3fc03062017-03-13 10:47:11 +0800984 for_each_engine(engine, gvt->dev_priv, i) {
Zhi Wange4734052016-05-01 07:42:16 -0400985 init_waitqueue_head(&scheduler->waitq[i]);
986
987 param = kzalloc(sizeof(*param), GFP_KERNEL);
988 if (!param) {
989 ret = -ENOMEM;
990 goto err;
991 }
992
993 param->gvt = gvt;
994 param->ring_id = i;
995
996 scheduler->thread[i] = kthread_run(workload_thread, param,
997 "gvt workload %d", i);
998 if (IS_ERR(scheduler->thread[i])) {
999 gvt_err("fail to create workload thread\n");
1000 ret = PTR_ERR(scheduler->thread[i]);
1001 goto err;
1002 }
Changbin Du3fc03062017-03-13 10:47:11 +08001003
1004 gvt->shadow_ctx_notifier_block[i].notifier_call =
1005 shadow_context_status_change;
1006 atomic_notifier_chain_register(&engine->context_status_notifier,
1007 &gvt->shadow_ctx_notifier_block[i]);
Zhi Wange4734052016-05-01 07:42:16 -04001008 }
1009 return 0;
1010err:
1011 intel_gvt_clean_workload_scheduler(gvt);
1012 kfree(param);
1013 param = NULL;
1014 return ret;
1015}
1016
Zhi Wang874b6a92017-09-10 20:08:18 +08001017/**
1018 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1019 * @vgpu: a vGPU
1020 *
1021 * This function is called when a vGPU is being destroyed.
1022 *
1023 */
1024void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -04001025{
Zhi Wang1406a142017-09-10 21:15:18 +08001026 struct intel_vgpu_submission *s = &vgpu->submission;
1027
Weinan Li7569a062018-01-26 15:09:07 +08001028 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
Zhi Wang1406a142017-09-10 21:15:18 +08001029 i915_gem_context_put(s->shadow_ctx);
1030 kmem_cache_destroy(s->workloads);
Zhi Wange4734052016-05-01 07:42:16 -04001031}
1032
Zhi Wang06bb3722017-09-13 01:41:35 +08001033
1034/**
1035 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1036 * @vgpu: a vGPU
1037 * @engine_mask: engines expected to be reset
1038 *
1039 * This function is called when a vGPU is being destroyed.
1040 *
1041 */
1042void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1043 unsigned long engine_mask)
1044{
1045 struct intel_vgpu_submission *s = &vgpu->submission;
1046
1047 if (!s->active)
1048 return;
1049
Zhi Wange2c43c02017-09-13 01:58:35 +08001050 clean_workloads(vgpu, engine_mask);
Zhi Wang06bb3722017-09-13 01:41:35 +08001051 s->ops->reset(vgpu, engine_mask);
1052}
1053
Zhi Wang874b6a92017-09-10 20:08:18 +08001054/**
1055 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1056 * @vgpu: a vGPU
1057 *
1058 * This function is called when a vGPU is being created.
1059 *
1060 * Returns:
1061 * Zero on success, negative error code if failed.
1062 *
1063 */
1064int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
Zhi Wange4734052016-05-01 07:42:16 -04001065{
Zhi Wang1406a142017-09-10 21:15:18 +08001066 struct intel_vgpu_submission *s = &vgpu->submission;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001067 enum intel_engine_id i;
1068 struct intel_engine_cs *engine;
1069 int ret;
Zhi Wange4734052016-05-01 07:42:16 -04001070
Zhi Wang1406a142017-09-10 21:15:18 +08001071 s->shadow_ctx = i915_gem_context_create_gvt(
Zhi Wange4734052016-05-01 07:42:16 -04001072 &vgpu->gvt->dev_priv->drm);
Zhi Wang1406a142017-09-10 21:15:18 +08001073 if (IS_ERR(s->shadow_ctx))
1074 return PTR_ERR(s->shadow_ctx);
Zhi Wange4734052016-05-01 07:42:16 -04001075
Zhenyu Wang16036602017-12-04 10:42:58 +08001076 if (HAS_LOGICAL_RING_PREEMPTION(vgpu->gvt->dev_priv))
1077 s->shadow_ctx->priority = INT_MAX;
1078
Zhi Wang1406a142017-09-10 21:15:18 +08001079 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
Kechen Lu9dfb8e52017-08-10 07:41:36 +08001080
Zhi Wang1406a142017-09-10 21:15:18 +08001081 s->workloads = kmem_cache_create("gvt-g_vgpu_workload",
Zhi Wang9a9829e2017-09-10 20:28:09 +08001082 sizeof(struct intel_vgpu_workload), 0,
1083 SLAB_HWCACHE_ALIGN,
1084 NULL);
1085
Zhi Wang1406a142017-09-10 21:15:18 +08001086 if (!s->workloads) {
Zhi Wang9a9829e2017-09-10 20:28:09 +08001087 ret = -ENOMEM;
1088 goto out_shadow_ctx;
1089 }
1090
1091 for_each_engine(engine, vgpu->gvt->dev_priv, i)
Zhi Wang1406a142017-09-10 21:15:18 +08001092 INIT_LIST_HEAD(&s->workload_q_head[i]);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001093
Zhi Wang1406a142017-09-10 21:15:18 +08001094 atomic_set(&s->running_workload_num, 0);
Zhi Wang91d5d852017-09-10 21:33:20 +08001095 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001096
Zhi Wange4734052016-05-01 07:42:16 -04001097 return 0;
Zhi Wang9a9829e2017-09-10 20:28:09 +08001098
1099out_shadow_ctx:
Zhi Wang1406a142017-09-10 21:15:18 +08001100 i915_gem_context_put(s->shadow_ctx);
Zhi Wang9a9829e2017-09-10 20:28:09 +08001101 return ret;
Zhi Wange4734052016-05-01 07:42:16 -04001102}
Zhi Wang21527a82017-09-12 21:42:09 +08001103
1104/**
Zhi Wangad1d3632017-09-13 00:31:29 +08001105 * intel_vgpu_select_submission_ops - select virtual submission interface
1106 * @vgpu: a vGPU
1107 * @interface: expected vGPU virtual submission interface
1108 *
1109 * This function is called when guest configures submission interface.
1110 *
1111 * Returns:
1112 * Zero on success, negative error code if failed.
1113 *
1114 */
1115int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
Weinan Li7569a062018-01-26 15:09:07 +08001116 unsigned long engine_mask,
Zhi Wangad1d3632017-09-13 00:31:29 +08001117 unsigned int interface)
1118{
1119 struct intel_vgpu_submission *s = &vgpu->submission;
1120 const struct intel_vgpu_submission_ops *ops[] = {
1121 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1122 &intel_vgpu_execlist_submission_ops,
1123 };
1124 int ret;
1125
1126 if (WARN_ON(interface >= ARRAY_SIZE(ops)))
1127 return -EINVAL;
1128
Weinan Li9212b132018-01-26 15:09:08 +08001129 if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
1130 return -EINVAL;
1131
1132 if (s->active)
Weinan Li7569a062018-01-26 15:09:07 +08001133 s->ops->clean(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001134
1135 if (interface == 0) {
1136 s->ops = NULL;
1137 s->virtual_submission_interface = 0;
Weinan Li9212b132018-01-26 15:09:08 +08001138 s->active = false;
1139 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
Zhi Wangad1d3632017-09-13 00:31:29 +08001140 return 0;
1141 }
1142
Weinan Li7569a062018-01-26 15:09:07 +08001143 ret = ops[interface]->init(vgpu, engine_mask);
Zhi Wangad1d3632017-09-13 00:31:29 +08001144 if (ret)
1145 return ret;
1146
1147 s->ops = ops[interface];
1148 s->virtual_submission_interface = interface;
1149 s->active = true;
1150
1151 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1152 vgpu->id, s->ops->name);
1153
1154 return 0;
1155}
1156
1157/**
Zhi Wang21527a82017-09-12 21:42:09 +08001158 * intel_vgpu_destroy_workload - destroy a vGPU workload
1159 * @vgpu: a vGPU
1160 *
1161 * This function is called when destroy a vGPU workload.
1162 *
1163 */
1164void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1165{
1166 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1167
1168 if (workload->shadow_mm)
Changbin Du1bc25852018-01-30 19:19:41 +08001169 intel_vgpu_mm_put(workload->shadow_mm);
Zhi Wang21527a82017-09-12 21:42:09 +08001170
1171 kmem_cache_free(s->workloads, workload);
1172}
1173
Zhi Wang6d763032017-09-12 22:33:12 +08001174static struct intel_vgpu_workload *
1175alloc_workload(struct intel_vgpu *vgpu)
Zhi Wang21527a82017-09-12 21:42:09 +08001176{
1177 struct intel_vgpu_submission *s = &vgpu->submission;
1178 struct intel_vgpu_workload *workload;
1179
1180 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1181 if (!workload)
1182 return ERR_PTR(-ENOMEM);
1183
1184 INIT_LIST_HEAD(&workload->list);
1185 INIT_LIST_HEAD(&workload->shadow_bb);
1186
1187 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1188 atomic_set(&workload->shadow_ctx_active, 0);
1189
1190 workload->status = -EINPROGRESS;
1191 workload->shadowed = false;
1192 workload->vgpu = vgpu;
1193
1194 return workload;
1195}
Zhi Wang6d763032017-09-12 22:33:12 +08001196
1197#define RING_CTX_OFF(x) \
1198 offsetof(struct execlist_ring_context, x)
1199
1200static void read_guest_pdps(struct intel_vgpu *vgpu,
1201 u64 ring_context_gpa, u32 pdp[8])
1202{
1203 u64 gpa;
1204 int i;
1205
1206 gpa = ring_context_gpa + RING_CTX_OFF(pdp3_UDW.val);
1207
1208 for (i = 0; i < 8; i++)
1209 intel_gvt_hypervisor_read_gpa(vgpu,
1210 gpa + i * 8, &pdp[7 - i], 4);
1211}
1212
1213static int prepare_mm(struct intel_vgpu_workload *workload)
1214{
1215 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1216 struct intel_vgpu_mm *mm;
1217 struct intel_vgpu *vgpu = workload->vgpu;
Changbin Duede9d0c2018-01-30 19:19:40 +08001218 intel_gvt_gtt_type_t root_entry_type;
1219 u64 pdps[GVT_RING_CTX_NR_PDPS];
Zhi Wang6d763032017-09-12 22:33:12 +08001220
Changbin Duede9d0c2018-01-30 19:19:40 +08001221 switch (desc->addressing_mode) {
1222 case 1: /* legacy 32-bit */
1223 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1224 break;
1225 case 3: /* legacy 64-bit */
1226 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1227 break;
1228 default:
Zhi Wang6d763032017-09-12 22:33:12 +08001229 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1230 return -EINVAL;
1231 }
1232
Changbin Duede9d0c2018-01-30 19:19:40 +08001233 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
Zhi Wang6d763032017-09-12 22:33:12 +08001234
Changbin Due6e9c462018-01-30 19:19:46 +08001235 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1236 if (IS_ERR(mm))
1237 return PTR_ERR(mm);
Zhi Wang6d763032017-09-12 22:33:12 +08001238
Zhi Wang6d763032017-09-12 22:33:12 +08001239 workload->shadow_mm = mm;
1240 return 0;
1241}
1242
1243#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1244 ((a)->lrca == (b)->lrca))
1245
1246#define get_last_workload(q) \
1247 (list_empty(q) ? NULL : container_of(q->prev, \
1248 struct intel_vgpu_workload, list))
1249/**
1250 * intel_vgpu_create_workload - create a vGPU workload
1251 * @vgpu: a vGPU
1252 * @desc: a guest context descriptor
1253 *
1254 * This function is called when creating a vGPU workload.
1255 *
1256 * Returns:
1257 * struct intel_vgpu_workload * on success, negative error code in
1258 * pointer if failed.
1259 *
1260 */
1261struct intel_vgpu_workload *
1262intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1263 struct execlist_ctx_descriptor_format *desc)
1264{
1265 struct intel_vgpu_submission *s = &vgpu->submission;
1266 struct list_head *q = workload_q_head(vgpu, ring_id);
1267 struct intel_vgpu_workload *last_workload = get_last_workload(q);
1268 struct intel_vgpu_workload *workload = NULL;
1269 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1270 u64 ring_context_gpa;
1271 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1272 int ret;
1273
1274 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
Zhi Wang9556e112017-10-10 13:51:32 +08001275 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
Zhi Wang6d763032017-09-12 22:33:12 +08001276 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1277 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1278 return ERR_PTR(-EINVAL);
1279 }
1280
1281 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1282 RING_CTX_OFF(ring_header.val), &head, 4);
1283
1284 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1285 RING_CTX_OFF(ring_tail.val), &tail, 4);
1286
1287 head &= RB_HEAD_OFF_MASK;
1288 tail &= RB_TAIL_OFF_MASK;
1289
1290 if (last_workload && same_context(&last_workload->ctx_desc, desc)) {
1291 gvt_dbg_el("ring id %d cur workload == last\n", ring_id);
1292 gvt_dbg_el("ctx head %x real head %lx\n", head,
1293 last_workload->rb_tail);
1294 /*
1295 * cannot use guest context head pointer here,
1296 * as it might not be updated at this time
1297 */
1298 head = last_workload->rb_tail;
1299 }
1300
1301 gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
1302
1303 /* record some ring buffer register values for scan and shadow */
1304 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1305 RING_CTX_OFF(rb_start.val), &start, 4);
1306 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1307 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1308 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1309 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1310
1311 workload = alloc_workload(vgpu);
1312 if (IS_ERR(workload))
1313 return workload;
1314
1315 workload->ring_id = ring_id;
1316 workload->ctx_desc = *desc;
1317 workload->ring_context_gpa = ring_context_gpa;
1318 workload->rb_head = head;
1319 workload->rb_tail = tail;
1320 workload->rb_start = start;
1321 workload->rb_ctl = ctl;
1322
1323 if (ring_id == RCS) {
1324 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1325 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1326 intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
1327 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1328
1329 workload->wa_ctx.indirect_ctx.guest_gma =
1330 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1331 workload->wa_ctx.indirect_ctx.size =
1332 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1333 CACHELINE_BYTES;
1334 workload->wa_ctx.per_ctx.guest_gma =
1335 per_ctx & PER_CTX_ADDR_MASK;
1336 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1337 }
1338
1339 gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
1340 workload, ring_id, head, tail, start, ctl);
1341
1342 ret = prepare_mm(workload);
1343 if (ret) {
1344 kmem_cache_free(s->workloads, workload);
1345 return ERR_PTR(ret);
1346 }
1347
1348 /* Only scan and shadow the first workload in the queue
1349 * as there is only one pre-allocated buf-obj for shadow.
1350 */
1351 if (list_empty(workload_q_head(vgpu, ring_id))) {
1352 intel_runtime_pm_get(dev_priv);
1353 mutex_lock(&dev_priv->drm.struct_mutex);
1354 ret = intel_gvt_scan_and_shadow_workload(workload);
1355 mutex_unlock(&dev_priv->drm.struct_mutex);
1356 intel_runtime_pm_put(dev_priv);
1357 }
1358
1359 if (ret && (vgpu_is_vm_unhealthy(ret))) {
1360 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1361 intel_vgpu_destroy_workload(workload);
1362 return ERR_PTR(ret);
1363 }
1364
1365 return workload;
1366}
Changbin Du59a716c2017-11-29 15:40:06 +08001367
1368/**
1369 * intel_vgpu_queue_workload - Qeue a vGPU workload
1370 * @workload: the workload to queue in
1371 */
1372void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1373{
1374 list_add_tail(&workload->list,
1375 workload_q_head(workload->vgpu, workload->ring_id));
Changbin Duc1304562017-11-29 15:40:07 +08001376 intel_gvt_kick_schedule(workload->vgpu->gvt);
Changbin Du59a716c2017-11-29 15:40:06 +08001377 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
1378}