2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
28 * Min He <min.he@intel.com>
29 * Bing Niu <bing.niu@intel.com>
30 * Zhi Wang <zhi.a.wang@intel.com>
37 static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
39 enum intel_engine_id i;
40 struct intel_engine_cs *engine;
42 for_each_engine(engine, vgpu->gvt->dev_priv, i) {
43 if (!list_empty(workload_q_head(vgpu, i)))
50 struct vgpu_sched_data {
51 struct list_head lru_list;
52 struct intel_vgpu *vgpu;
55 ktime_t sched_in_time;
60 struct vgpu_sched_ctl sched_ctl;
63 struct gvt_sched_data {
64 struct intel_gvt *gvt;
67 struct list_head lru_runq_head;
71 static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
74 struct vgpu_sched_data *vgpu_data;
76 if (!vgpu || vgpu == vgpu->gvt->idle_vgpu)
79 vgpu_data = vgpu->sched_data;
80 delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
81 vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
82 vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
83 vgpu_data->sched_in_time = cur_time;
86 #define GVT_TS_BALANCE_PERIOD_MS 100
87 #define GVT_TS_BALANCE_STAGE_NUM 10
89 static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
91 struct vgpu_sched_data *vgpu_data;
92 struct list_head *pos;
93 static uint64_t stage_check;
94 int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
96 /* The timeslice accumulation reset at stage 0, which is
97 * allocated again without adding previous debt.
100 int total_weight = 0;
101 ktime_t fair_timeslice;
103 list_for_each(pos, &sched_data->lru_runq_head) {
104 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
105 total_weight += vgpu_data->sched_ctl.weight;
108 list_for_each(pos, &sched_data->lru_runq_head) {
109 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
110 fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS),
111 total_weight) * vgpu_data->sched_ctl.weight;
113 vgpu_data->allocated_ts = fair_timeslice;
114 vgpu_data->left_ts = vgpu_data->allocated_ts;
117 list_for_each(pos, &sched_data->lru_runq_head) {
118 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
120 /* timeslice for next 100ms should add the left/debt
121 * slice of previous stages.
123 vgpu_data->left_ts += vgpu_data->allocated_ts;
128 static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
130 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
131 enum intel_engine_id i;
132 struct intel_engine_cs *engine;
133 struct vgpu_sched_data *vgpu_data;
136 /* no need to schedule if next_vgpu is the same with current_vgpu,
137 * let scheduler chose next_vgpu again by setting it to NULL.
139 if (scheduler->next_vgpu == scheduler->current_vgpu) {
140 scheduler->next_vgpu = NULL;
145 * after the flag is set, workload dispatch thread will
146 * stop dispatching workload for current vgpu
148 scheduler->need_reschedule = true;
150 /* still have uncompleted workload? */
151 for_each_engine(engine, gvt->dev_priv, i) {
152 if (scheduler->current_workload[i])
156 cur_time = ktime_get();
157 vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
158 vgpu_data = scheduler->next_vgpu->sched_data;
159 vgpu_data->sched_in_time = cur_time;
161 /* switch current vgpu */
162 scheduler->current_vgpu = scheduler->next_vgpu;
163 scheduler->next_vgpu = NULL;
165 scheduler->need_reschedule = false;
167 /* wake up workload dispatch thread */
168 for_each_engine(engine, gvt->dev_priv, i)
169 wake_up(&scheduler->waitq[i]);
172 static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
174 struct vgpu_sched_data *vgpu_data;
175 struct intel_vgpu *vgpu = NULL;
176 struct list_head *head = &sched_data->lru_runq_head;
177 struct list_head *pos;
179 /* search a vgpu with pending workload */
180 list_for_each(pos, head) {
182 vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
183 if (!vgpu_has_pending_workload(vgpu_data->vgpu))
186 /* Return the vGPU only if it has time slice left */
187 if (vgpu_data->left_ts > 0) {
188 vgpu = vgpu_data->vgpu;
197 #define GVT_DEFAULT_TIME_SLICE 1000000
199 static void tbs_sched_func(struct gvt_sched_data *sched_data)
201 struct intel_gvt *gvt = sched_data->gvt;
202 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
203 struct vgpu_sched_data *vgpu_data;
204 struct intel_vgpu *vgpu = NULL;
205 /* no active vgpu or has already had a target */
206 if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
209 vgpu = find_busy_vgpu(sched_data);
211 scheduler->next_vgpu = vgpu;
213 /* Move the last used vGPU to the tail of lru_list */
214 vgpu_data = vgpu->sched_data;
215 list_del_init(&vgpu_data->lru_list);
216 list_add_tail(&vgpu_data->lru_list,
217 &sched_data->lru_runq_head);
219 scheduler->next_vgpu = gvt->idle_vgpu;
222 if (scheduler->next_vgpu)
223 try_to_schedule_next_vgpu(gvt);
226 void intel_gvt_schedule(struct intel_gvt *gvt)
228 struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
231 mutex_lock(&gvt->sched_lock);
232 cur_time = ktime_get();
234 if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
235 (void *)&gvt->service_request)) {
236 if (cur_time >= sched_data->expire_time) {
237 gvt_balance_timeslice(sched_data);
238 sched_data->expire_time = ktime_add_ms(
239 cur_time, GVT_TS_BALANCE_PERIOD_MS);
242 clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
244 vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
245 tbs_sched_func(sched_data);
247 mutex_unlock(&gvt->sched_lock);
250 static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
252 struct gvt_sched_data *data;
254 data = container_of(timer_data, struct gvt_sched_data, timer);
256 intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
258 hrtimer_add_expires_ns(&data->timer, data->period);
260 return HRTIMER_RESTART;
263 static int tbs_sched_init(struct intel_gvt *gvt)
265 struct intel_gvt_workload_scheduler *scheduler =
268 struct gvt_sched_data *data;
270 data = kzalloc(sizeof(*data), GFP_KERNEL);
274 INIT_LIST_HEAD(&data->lru_runq_head);
275 hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
276 data->timer.function = tbs_timer_fn;
277 data->period = GVT_DEFAULT_TIME_SLICE;
280 scheduler->sched_data = data;
285 static void tbs_sched_clean(struct intel_gvt *gvt)
287 struct intel_gvt_workload_scheduler *scheduler =
289 struct gvt_sched_data *data = scheduler->sched_data;
291 hrtimer_cancel(&data->timer);
294 scheduler->sched_data = NULL;
297 static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
299 struct vgpu_sched_data *data;
301 data = kzalloc(sizeof(*data), GFP_KERNEL);
305 data->sched_ctl.weight = vgpu->sched_ctl.weight;
307 INIT_LIST_HEAD(&data->lru_list);
309 vgpu->sched_data = data;
314 static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
316 struct intel_gvt *gvt = vgpu->gvt;
317 struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
319 kfree(vgpu->sched_data);
320 vgpu->sched_data = NULL;
322 /* this vgpu id has been removed */
323 if (idr_is_empty(&gvt->vgpu_idr))
324 hrtimer_cancel(&sched_data->timer);
327 static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
329 struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
330 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
332 if (!list_empty(&vgpu_data->lru_list))
335 list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
337 if (!hrtimer_active(&sched_data->timer))
338 hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
339 sched_data->period), HRTIMER_MODE_ABS);
340 vgpu_data->active = true;
343 static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
345 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
347 list_del_init(&vgpu_data->lru_list);
348 vgpu_data->active = false;
351 static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
352 .init = tbs_sched_init,
353 .clean = tbs_sched_clean,
354 .init_vgpu = tbs_sched_init_vgpu,
355 .clean_vgpu = tbs_sched_clean_vgpu,
356 .start_schedule = tbs_sched_start_schedule,
357 .stop_schedule = tbs_sched_stop_schedule,
360 int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
364 mutex_lock(&gvt->sched_lock);
365 gvt->scheduler.sched_ops = &tbs_schedule_ops;
366 ret = gvt->scheduler.sched_ops->init(gvt);
367 mutex_unlock(&gvt->sched_lock);
372 void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
374 mutex_lock(&gvt->sched_lock);
375 gvt->scheduler.sched_ops->clean(gvt);
376 mutex_unlock(&gvt->sched_lock);
379 /* for per-vgpu scheduler policy, there are 2 per-vgpu data:
380 * sched_data, and sched_ctl. We see these 2 data as part of
381 * the global scheduler which are proteced by gvt->sched_lock.
382 * Caller should make their decision if the vgpu_lock should
386 int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
390 mutex_lock(&vgpu->gvt->sched_lock);
391 ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
392 mutex_unlock(&vgpu->gvt->sched_lock);
397 void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
399 mutex_lock(&vgpu->gvt->sched_lock);
400 vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
401 mutex_unlock(&vgpu->gvt->sched_lock);
404 void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
406 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
408 mutex_lock(&vgpu->gvt->sched_lock);
409 if (!vgpu_data->active) {
410 gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
411 vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
413 mutex_unlock(&vgpu->gvt->sched_lock);
416 void intel_gvt_kick_schedule(struct intel_gvt *gvt)
418 mutex_lock(&gvt->sched_lock);
419 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
420 mutex_unlock(&gvt->sched_lock);
423 void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
425 struct intel_gvt_workload_scheduler *scheduler =
426 &vgpu->gvt->scheduler;
428 struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
430 if (!vgpu_data->active)
433 gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
435 mutex_lock(&vgpu->gvt->sched_lock);
436 scheduler->sched_ops->stop_schedule(vgpu);
438 if (scheduler->next_vgpu == vgpu)
439 scheduler->next_vgpu = NULL;
441 if (scheduler->current_vgpu == vgpu) {
442 /* stop workload dispatching */
443 scheduler->need_reschedule = true;
444 scheduler->current_vgpu = NULL;
447 spin_lock_bh(&scheduler->mmio_context_lock);
448 for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
449 if (scheduler->engine_owner[ring_id] == vgpu) {
450 intel_gvt_switch_mmio(vgpu, NULL, ring_id);
451 scheduler->engine_owner[ring_id] = NULL;
454 spin_unlock_bh(&scheduler->mmio_context_lock);
455 mutex_unlock(&vgpu->gvt->sched_lock);