Zhi Wang | 4b63960 | 2016-05-01 17:09:58 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice (including the next |
| 12 | * paragraph) shall be included in all copies or substantial portions of the |
| 13 | * Software. |
| 14 | * |
| 15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 21 | * SOFTWARE. |
| 22 | * |
| 23 | * Authors: |
| 24 | * Anhua Xu |
| 25 | * Kevin Tian <kevin.tian@intel.com> |
| 26 | * |
| 27 | * Contributors: |
| 28 | * Min He <min.he@intel.com> |
| 29 | * Bing Niu <bing.niu@intel.com> |
| 30 | * Zhi Wang <zhi.a.wang@intel.com> |
| 31 | * |
| 32 | */ |
| 33 | |
| 34 | #include "i915_drv.h" |
| 35 | |
| 36 | static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) |
| 37 | { |
| 38 | struct intel_vgpu_execlist *execlist; |
| 39 | int i; |
| 40 | |
| 41 | for (i = 0; i < I915_NUM_ENGINES; i++) { |
| 42 | execlist = &vgpu->execlist[i]; |
| 43 | if (!list_empty(workload_q_head(vgpu, i))) |
| 44 | return true; |
| 45 | } |
| 46 | |
| 47 | return false; |
| 48 | } |
| 49 | |
| 50 | static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) |
| 51 | { |
| 52 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 53 | int i; |
| 54 | |
| 55 | /* no target to schedule */ |
| 56 | if (!scheduler->next_vgpu) |
| 57 | return; |
| 58 | |
| 59 | gvt_dbg_sched("try to schedule next vgpu %d\n", |
| 60 | scheduler->next_vgpu->id); |
| 61 | |
| 62 | /* |
| 63 | * after the flag is set, workload dispatch thread will |
| 64 | * stop dispatching workload for current vgpu |
| 65 | */ |
| 66 | scheduler->need_reschedule = true; |
| 67 | |
| 68 | /* still have uncompleted workload? */ |
| 69 | for (i = 0; i < I915_NUM_ENGINES; i++) { |
| 70 | if (scheduler->current_workload[i]) { |
| 71 | gvt_dbg_sched("still have running workload\n"); |
| 72 | return; |
| 73 | } |
| 74 | } |
| 75 | |
| 76 | gvt_dbg_sched("switch to next vgpu %d\n", |
| 77 | scheduler->next_vgpu->id); |
| 78 | |
| 79 | /* switch current vgpu */ |
| 80 | scheduler->current_vgpu = scheduler->next_vgpu; |
| 81 | scheduler->next_vgpu = NULL; |
| 82 | |
| 83 | scheduler->need_reschedule = false; |
| 84 | |
| 85 | /* wake up workload dispatch thread */ |
| 86 | for (i = 0; i < I915_NUM_ENGINES; i++) |
| 87 | wake_up(&scheduler->waitq[i]); |
| 88 | } |
| 89 | |
| 90 | struct tbs_vgpu_data { |
| 91 | struct list_head list; |
| 92 | struct intel_vgpu *vgpu; |
| 93 | /* put some per-vgpu sched stats here */ |
| 94 | }; |
| 95 | |
| 96 | struct tbs_sched_data { |
| 97 | struct intel_gvt *gvt; |
| 98 | struct delayed_work work; |
| 99 | unsigned long period; |
| 100 | struct list_head runq_head; |
| 101 | }; |
| 102 | |
| 103 | #define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) |
| 104 | |
| 105 | static void tbs_sched_func(struct work_struct *work) |
| 106 | { |
| 107 | struct tbs_sched_data *sched_data = container_of(work, |
| 108 | struct tbs_sched_data, work.work); |
| 109 | struct tbs_vgpu_data *vgpu_data; |
| 110 | |
| 111 | struct intel_gvt *gvt = sched_data->gvt; |
| 112 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 113 | |
| 114 | struct intel_vgpu *vgpu = NULL; |
| 115 | struct list_head *pos, *head; |
| 116 | |
| 117 | mutex_lock(&gvt->lock); |
| 118 | |
| 119 | /* no vgpu or has already had a target */ |
| 120 | if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu) |
| 121 | goto out; |
| 122 | |
| 123 | if (scheduler->current_vgpu) { |
| 124 | vgpu_data = scheduler->current_vgpu->sched_data; |
| 125 | head = &vgpu_data->list; |
| 126 | } else { |
| 127 | gvt_dbg_sched("no current vgpu search from q head\n"); |
| 128 | head = &sched_data->runq_head; |
| 129 | } |
| 130 | |
| 131 | /* search a vgpu with pending workload */ |
| 132 | list_for_each(pos, head) { |
| 133 | if (pos == &sched_data->runq_head) |
| 134 | continue; |
| 135 | |
| 136 | vgpu_data = container_of(pos, struct tbs_vgpu_data, list); |
| 137 | if (!vgpu_has_pending_workload(vgpu_data->vgpu)) |
| 138 | continue; |
| 139 | |
| 140 | vgpu = vgpu_data->vgpu; |
| 141 | break; |
| 142 | } |
| 143 | |
| 144 | if (vgpu) { |
| 145 | scheduler->next_vgpu = vgpu; |
| 146 | gvt_dbg_sched("pick next vgpu %d\n", vgpu->id); |
| 147 | } |
| 148 | out: |
| 149 | if (scheduler->next_vgpu) { |
| 150 | gvt_dbg_sched("try to schedule next vgpu %d\n", |
| 151 | scheduler->next_vgpu->id); |
| 152 | try_to_schedule_next_vgpu(gvt); |
| 153 | } |
| 154 | |
| 155 | /* |
| 156 | * still have vgpu on runq |
| 157 | * or last schedule haven't finished due to running workload |
| 158 | */ |
| 159 | if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu) |
| 160 | schedule_delayed_work(&sched_data->work, sched_data->period); |
| 161 | |
| 162 | mutex_unlock(&gvt->lock); |
| 163 | } |
| 164 | |
| 165 | static int tbs_sched_init(struct intel_gvt *gvt) |
| 166 | { |
| 167 | struct intel_gvt_workload_scheduler *scheduler = |
| 168 | &gvt->scheduler; |
| 169 | |
| 170 | struct tbs_sched_data *data; |
| 171 | |
| 172 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
| 173 | if (!data) |
| 174 | return -ENOMEM; |
| 175 | |
| 176 | INIT_LIST_HEAD(&data->runq_head); |
| 177 | INIT_DELAYED_WORK(&data->work, tbs_sched_func); |
| 178 | data->period = GVT_DEFAULT_TIME_SLICE; |
| 179 | data->gvt = gvt; |
| 180 | |
| 181 | scheduler->sched_data = data; |
| 182 | return 0; |
| 183 | } |
| 184 | |
| 185 | static void tbs_sched_clean(struct intel_gvt *gvt) |
| 186 | { |
| 187 | struct intel_gvt_workload_scheduler *scheduler = |
| 188 | &gvt->scheduler; |
| 189 | struct tbs_sched_data *data = scheduler->sched_data; |
| 190 | |
| 191 | cancel_delayed_work(&data->work); |
| 192 | kfree(data); |
| 193 | scheduler->sched_data = NULL; |
| 194 | } |
| 195 | |
| 196 | static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) |
| 197 | { |
| 198 | struct tbs_vgpu_data *data; |
| 199 | |
| 200 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
| 201 | if (!data) |
| 202 | return -ENOMEM; |
| 203 | |
| 204 | data->vgpu = vgpu; |
| 205 | INIT_LIST_HEAD(&data->list); |
| 206 | |
| 207 | vgpu->sched_data = data; |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) |
| 212 | { |
| 213 | kfree(vgpu->sched_data); |
| 214 | vgpu->sched_data = NULL; |
| 215 | } |
| 216 | |
| 217 | static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) |
| 218 | { |
| 219 | struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; |
| 220 | struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; |
| 221 | |
| 222 | if (!list_empty(&vgpu_data->list)) |
| 223 | return; |
| 224 | |
| 225 | list_add_tail(&vgpu_data->list, &sched_data->runq_head); |
| 226 | schedule_delayed_work(&sched_data->work, sched_data->period); |
| 227 | } |
| 228 | |
| 229 | static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) |
| 230 | { |
| 231 | struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; |
| 232 | |
| 233 | list_del_init(&vgpu_data->list); |
| 234 | } |
| 235 | |
| 236 | struct intel_gvt_sched_policy_ops tbs_schedule_ops = { |
| 237 | .init = tbs_sched_init, |
| 238 | .clean = tbs_sched_clean, |
| 239 | .init_vgpu = tbs_sched_init_vgpu, |
| 240 | .clean_vgpu = tbs_sched_clean_vgpu, |
| 241 | .start_schedule = tbs_sched_start_schedule, |
| 242 | .stop_schedule = tbs_sched_stop_schedule, |
| 243 | }; |
| 244 | |
| 245 | int intel_gvt_init_sched_policy(struct intel_gvt *gvt) |
| 246 | { |
| 247 | gvt->scheduler.sched_ops = &tbs_schedule_ops; |
| 248 | |
| 249 | return gvt->scheduler.sched_ops->init(gvt); |
| 250 | } |
| 251 | |
| 252 | void intel_gvt_clean_sched_policy(struct intel_gvt *gvt) |
| 253 | { |
| 254 | gvt->scheduler.sched_ops->clean(gvt); |
| 255 | } |
| 256 | |
| 257 | int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu) |
| 258 | { |
| 259 | return vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu); |
| 260 | } |
| 261 | |
| 262 | void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu) |
| 263 | { |
| 264 | vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu); |
| 265 | } |
| 266 | |
| 267 | void intel_vgpu_start_schedule(struct intel_vgpu *vgpu) |
| 268 | { |
| 269 | gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id); |
| 270 | |
| 271 | vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu); |
| 272 | } |
| 273 | |
| 274 | void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) |
| 275 | { |
| 276 | struct intel_gvt_workload_scheduler *scheduler = |
| 277 | &vgpu->gvt->scheduler; |
| 278 | |
| 279 | gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); |
| 280 | |
| 281 | scheduler->sched_ops->stop_schedule(vgpu); |
| 282 | |
| 283 | if (scheduler->next_vgpu == vgpu) |
| 284 | scheduler->next_vgpu = NULL; |
| 285 | |
| 286 | if (scheduler->current_vgpu == vgpu) { |
| 287 | /* stop workload dispatching */ |
| 288 | scheduler->need_reschedule = true; |
| 289 | scheduler->current_vgpu = NULL; |
| 290 | } |
| 291 | } |