Home | History | Annotate | Line # | Download | only in gvt
      1  1.1  riastrad /*	$NetBSD: sched_policy.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $	*/
      2  1.1  riastrad 
      3  1.1  riastrad /*
      4  1.1  riastrad  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
      5  1.1  riastrad  *
      6  1.1  riastrad  * Permission is hereby granted, free of charge, to any person obtaining a
      7  1.1  riastrad  * copy of this software and associated documentation files (the "Software"),
      8  1.1  riastrad  * to deal in the Software without restriction, including without limitation
      9  1.1  riastrad  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  1.1  riastrad  * and/or sell copies of the Software, and to permit persons to whom the
     11  1.1  riastrad  * Software is furnished to do so, subject to the following conditions:
     12  1.1  riastrad  *
     13  1.1  riastrad  * The above copyright notice and this permission notice (including the next
     14  1.1  riastrad  * paragraph) shall be included in all copies or substantial portions of the
     15  1.1  riastrad  * Software.
     16  1.1  riastrad  *
     17  1.1  riastrad  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  1.1  riastrad  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  1.1  riastrad  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  1.1  riastrad  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  1.1  riastrad  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22  1.1  riastrad  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     23  1.1  riastrad  * SOFTWARE.
     24  1.1  riastrad  *
     25  1.1  riastrad  * Authors:
     26  1.1  riastrad  *    Anhua Xu
     27  1.1  riastrad  *    Kevin Tian <kevin.tian (at) intel.com>
     28  1.1  riastrad  *
     29  1.1  riastrad  * Contributors:
     30  1.1  riastrad  *    Min He <min.he (at) intel.com>
     31  1.1  riastrad  *    Bing Niu <bing.niu (at) intel.com>
     32  1.1  riastrad  *    Zhi Wang <zhi.a.wang (at) intel.com>
     33  1.1  riastrad  *
     34  1.1  riastrad  */
     35  1.1  riastrad 
     36  1.1  riastrad #include <sys/cdefs.h>
     37  1.1  riastrad __KERNEL_RCSID(0, "$NetBSD: sched_policy.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $");
     38  1.1  riastrad 
     39  1.1  riastrad #include "i915_drv.h"
     40  1.1  riastrad #include "gvt.h"
     41  1.1  riastrad 
     42  1.1  riastrad static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
     43  1.1  riastrad {
     44  1.1  riastrad 	enum intel_engine_id i;
     45  1.1  riastrad 	struct intel_engine_cs *engine;
     46  1.1  riastrad 
     47  1.1  riastrad 	for_each_engine(engine, vgpu->gvt->dev_priv, i) {
     48  1.1  riastrad 		if (!list_empty(workload_q_head(vgpu, i)))
     49  1.1  riastrad 			return true;
     50  1.1  riastrad 	}
     51  1.1  riastrad 
     52  1.1  riastrad 	return false;
     53  1.1  riastrad }
     54  1.1  riastrad 
     55  1.1  riastrad /* We give 2 seconds higher prio for vGPU during start */
     56  1.1  riastrad #define GVT_SCHED_VGPU_PRI_TIME  2
     57  1.1  riastrad 
     58  1.1  riastrad struct vgpu_sched_data {
     59  1.1  riastrad 	struct list_head lru_list;
     60  1.1  riastrad 	struct intel_vgpu *vgpu;
     61  1.1  riastrad 	bool active;
     62  1.1  riastrad 	bool pri_sched;
     63  1.1  riastrad 	ktime_t pri_time;
     64  1.1  riastrad 	ktime_t sched_in_time;
     65  1.1  riastrad 	ktime_t sched_time;
     66  1.1  riastrad 	ktime_t left_ts;
     67  1.1  riastrad 	ktime_t allocated_ts;
     68  1.1  riastrad 
     69  1.1  riastrad 	struct vgpu_sched_ctl sched_ctl;
     70  1.1  riastrad };
     71  1.1  riastrad 
     72  1.1  riastrad struct gvt_sched_data {
     73  1.1  riastrad 	struct intel_gvt *gvt;
     74  1.1  riastrad 	struct hrtimer timer;
     75  1.1  riastrad 	unsigned long period;
     76  1.1  riastrad 	struct list_head lru_runq_head;
     77  1.1  riastrad 	ktime_t expire_time;
     78  1.1  riastrad };
     79  1.1  riastrad 
     80  1.1  riastrad static void vgpu_update_timeslice(struct intel_vgpu *vgpu, ktime_t cur_time)
     81  1.1  riastrad {
     82  1.1  riastrad 	ktime_t delta_ts;
     83  1.1  riastrad 	struct vgpu_sched_data *vgpu_data;
     84  1.1  riastrad 
     85  1.1  riastrad 	if (!vgpu || vgpu == vgpu->gvt->idle_vgpu)
     86  1.1  riastrad 		return;
     87  1.1  riastrad 
     88  1.1  riastrad 	vgpu_data = vgpu->sched_data;
     89  1.1  riastrad 	delta_ts = ktime_sub(cur_time, vgpu_data->sched_in_time);
     90  1.1  riastrad 	vgpu_data->sched_time = ktime_add(vgpu_data->sched_time, delta_ts);
     91  1.1  riastrad 	vgpu_data->left_ts = ktime_sub(vgpu_data->left_ts, delta_ts);
     92  1.1  riastrad 	vgpu_data->sched_in_time = cur_time;
     93  1.1  riastrad }
     94  1.1  riastrad 
     95  1.1  riastrad #define GVT_TS_BALANCE_PERIOD_MS 100
     96  1.1  riastrad #define GVT_TS_BALANCE_STAGE_NUM 10
     97  1.1  riastrad 
     98  1.1  riastrad static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
     99  1.1  riastrad {
    100  1.1  riastrad 	struct vgpu_sched_data *vgpu_data;
    101  1.1  riastrad 	struct list_head *pos;
    102  1.1  riastrad 	static u64 stage_check;
    103  1.1  riastrad 	int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
    104  1.1  riastrad 
    105  1.1  riastrad 	/* The timeslice accumulation reset at stage 0, which is
    106  1.1  riastrad 	 * allocated again without adding previous debt.
    107  1.1  riastrad 	 */
    108  1.1  riastrad 	if (stage == 0) {
    109  1.1  riastrad 		int total_weight = 0;
    110  1.1  riastrad 		ktime_t fair_timeslice;
    111  1.1  riastrad 
    112  1.1  riastrad 		list_for_each(pos, &sched_data->lru_runq_head) {
    113  1.1  riastrad 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
    114  1.1  riastrad 			total_weight += vgpu_data->sched_ctl.weight;
    115  1.1  riastrad 		}
    116  1.1  riastrad 
    117  1.1  riastrad 		list_for_each(pos, &sched_data->lru_runq_head) {
    118  1.1  riastrad 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
    119  1.1  riastrad 			fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS),
    120  1.1  riastrad 						     total_weight) * vgpu_data->sched_ctl.weight;
    121  1.1  riastrad 
    122  1.1  riastrad 			vgpu_data->allocated_ts = fair_timeslice;
    123  1.1  riastrad 			vgpu_data->left_ts = vgpu_data->allocated_ts;
    124  1.1  riastrad 		}
    125  1.1  riastrad 	} else {
    126  1.1  riastrad 		list_for_each(pos, &sched_data->lru_runq_head) {
    127  1.1  riastrad 			vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
    128  1.1  riastrad 
    129  1.1  riastrad 			/* timeslice for next 100ms should add the left/debt
    130  1.1  riastrad 			 * slice of previous stages.
    131  1.1  riastrad 			 */
    132  1.1  riastrad 			vgpu_data->left_ts += vgpu_data->allocated_ts;
    133  1.1  riastrad 		}
    134  1.1  riastrad 	}
    135  1.1  riastrad }
    136  1.1  riastrad 
    137  1.1  riastrad static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
    138  1.1  riastrad {
    139  1.1  riastrad 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
    140  1.1  riastrad 	enum intel_engine_id i;
    141  1.1  riastrad 	struct intel_engine_cs *engine;
    142  1.1  riastrad 	struct vgpu_sched_data *vgpu_data;
    143  1.1  riastrad 	ktime_t cur_time;
    144  1.1  riastrad 
    145  1.1  riastrad 	/* no need to schedule if next_vgpu is the same with current_vgpu,
    146  1.1  riastrad 	 * let scheduler chose next_vgpu again by setting it to NULL.
    147  1.1  riastrad 	 */
    148  1.1  riastrad 	if (scheduler->next_vgpu == scheduler->current_vgpu) {
    149  1.1  riastrad 		scheduler->next_vgpu = NULL;
    150  1.1  riastrad 		return;
    151  1.1  riastrad 	}
    152  1.1  riastrad 
    153  1.1  riastrad 	/*
    154  1.1  riastrad 	 * after the flag is set, workload dispatch thread will
    155  1.1  riastrad 	 * stop dispatching workload for current vgpu
    156  1.1  riastrad 	 */
    157  1.1  riastrad 	scheduler->need_reschedule = true;
    158  1.1  riastrad 
    159  1.1  riastrad 	/* still have uncompleted workload? */
    160  1.1  riastrad 	for_each_engine(engine, gvt->dev_priv, i) {
    161  1.1  riastrad 		if (scheduler->current_workload[i])
    162  1.1  riastrad 			return;
    163  1.1  riastrad 	}
    164  1.1  riastrad 
    165  1.1  riastrad 	cur_time = ktime_get();
    166  1.1  riastrad 	vgpu_update_timeslice(scheduler->current_vgpu, cur_time);
    167  1.1  riastrad 	vgpu_data = scheduler->next_vgpu->sched_data;
    168  1.1  riastrad 	vgpu_data->sched_in_time = cur_time;
    169  1.1  riastrad 
    170  1.1  riastrad 	/* switch current vgpu */
    171  1.1  riastrad 	scheduler->current_vgpu = scheduler->next_vgpu;
    172  1.1  riastrad 	scheduler->next_vgpu = NULL;
    173  1.1  riastrad 
    174  1.1  riastrad 	scheduler->need_reschedule = false;
    175  1.1  riastrad 
    176  1.1  riastrad 	/* wake up workload dispatch thread */
    177  1.1  riastrad 	for_each_engine(engine, gvt->dev_priv, i)
    178  1.1  riastrad 		wake_up(&scheduler->waitq[i]);
    179  1.1  riastrad }
    180  1.1  riastrad 
    181  1.1  riastrad static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
    182  1.1  riastrad {
    183  1.1  riastrad 	struct vgpu_sched_data *vgpu_data;
    184  1.1  riastrad 	struct intel_vgpu *vgpu = NULL;
    185  1.1  riastrad 	struct list_head *head = &sched_data->lru_runq_head;
    186  1.1  riastrad 	struct list_head *pos;
    187  1.1  riastrad 
    188  1.1  riastrad 	/* search a vgpu with pending workload */
    189  1.1  riastrad 	list_for_each(pos, head) {
    190  1.1  riastrad 
    191  1.1  riastrad 		vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
    192  1.1  riastrad 		if (!vgpu_has_pending_workload(vgpu_data->vgpu))
    193  1.1  riastrad 			continue;
    194  1.1  riastrad 
    195  1.1  riastrad 		if (vgpu_data->pri_sched) {
    196  1.1  riastrad 			if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
    197  1.1  riastrad 				vgpu = vgpu_data->vgpu;
    198  1.1  riastrad 				break;
    199  1.1  riastrad 			} else
    200  1.1  riastrad 				vgpu_data->pri_sched = false;
    201  1.1  riastrad 		}
    202  1.1  riastrad 
    203  1.1  riastrad 		/* Return the vGPU only if it has time slice left */
    204  1.1  riastrad 		if (vgpu_data->left_ts > 0) {
    205  1.1  riastrad 			vgpu = vgpu_data->vgpu;
    206  1.1  riastrad 			break;
    207  1.1  riastrad 		}
    208  1.1  riastrad 	}
    209  1.1  riastrad 
    210  1.1  riastrad 	return vgpu;
    211  1.1  riastrad }
    212  1.1  riastrad 
    213  1.1  riastrad /* in nanosecond */
    214  1.1  riastrad #define GVT_DEFAULT_TIME_SLICE 1000000
    215  1.1  riastrad 
    216  1.1  riastrad static void tbs_sched_func(struct gvt_sched_data *sched_data)
    217  1.1  riastrad {
    218  1.1  riastrad 	struct intel_gvt *gvt = sched_data->gvt;
    219  1.1  riastrad 	struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
    220  1.1  riastrad 	struct vgpu_sched_data *vgpu_data;
    221  1.1  riastrad 	struct intel_vgpu *vgpu = NULL;
    222  1.1  riastrad 
    223  1.1  riastrad 	/* no active vgpu or has already had a target */
    224  1.1  riastrad 	if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
    225  1.1  riastrad 		goto out;
    226  1.1  riastrad 
    227  1.1  riastrad 	vgpu = find_busy_vgpu(sched_data);
    228  1.1  riastrad 	if (vgpu) {
    229  1.1  riastrad 		scheduler->next_vgpu = vgpu;
    230  1.1  riastrad 		vgpu_data = vgpu->sched_data;
    231  1.1  riastrad 		if (!vgpu_data->pri_sched) {
    232  1.1  riastrad 			/* Move the last used vGPU to the tail of lru_list */
    233  1.1  riastrad 			list_del_init(&vgpu_data->lru_list);
    234  1.1  riastrad 			list_add_tail(&vgpu_data->lru_list,
    235  1.1  riastrad 				      &sched_data->lru_runq_head);
    236  1.1  riastrad 		}
    237  1.1  riastrad 	} else {
    238  1.1  riastrad 		scheduler->next_vgpu = gvt->idle_vgpu;
    239  1.1  riastrad 	}
    240  1.1  riastrad out:
    241  1.1  riastrad 	if (scheduler->next_vgpu)
    242  1.1  riastrad 		try_to_schedule_next_vgpu(gvt);
    243  1.1  riastrad }
    244  1.1  riastrad 
    245  1.1  riastrad void intel_gvt_schedule(struct intel_gvt *gvt)
    246  1.1  riastrad {
    247  1.1  riastrad 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
    248  1.1  riastrad 	ktime_t cur_time;
    249  1.1  riastrad 
    250  1.1  riastrad 	mutex_lock(&gvt->sched_lock);
    251  1.1  riastrad 	cur_time = ktime_get();
    252  1.1  riastrad 
    253  1.1  riastrad 	if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
    254  1.1  riastrad 				(void *)&gvt->service_request)) {
    255  1.1  riastrad 		if (cur_time >= sched_data->expire_time) {
    256  1.1  riastrad 			gvt_balance_timeslice(sched_data);
    257  1.1  riastrad 			sched_data->expire_time = ktime_add_ms(
    258  1.1  riastrad 				cur_time, GVT_TS_BALANCE_PERIOD_MS);
    259  1.1  riastrad 		}
    260  1.1  riastrad 	}
    261  1.1  riastrad 	clear_bit(INTEL_GVT_REQUEST_EVENT_SCHED, (void *)&gvt->service_request);
    262  1.1  riastrad 
    263  1.1  riastrad 	vgpu_update_timeslice(gvt->scheduler.current_vgpu, cur_time);
    264  1.1  riastrad 	tbs_sched_func(sched_data);
    265  1.1  riastrad 
    266  1.1  riastrad 	mutex_unlock(&gvt->sched_lock);
    267  1.1  riastrad }
    268  1.1  riastrad 
    269  1.1  riastrad static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
    270  1.1  riastrad {
    271  1.1  riastrad 	struct gvt_sched_data *data;
    272  1.1  riastrad 
    273  1.1  riastrad 	data = container_of(timer_data, struct gvt_sched_data, timer);
    274  1.1  riastrad 
    275  1.1  riastrad 	intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
    276  1.1  riastrad 
    277  1.1  riastrad 	hrtimer_add_expires_ns(&data->timer, data->period);
    278  1.1  riastrad 
    279  1.1  riastrad 	return HRTIMER_RESTART;
    280  1.1  riastrad }
    281  1.1  riastrad 
    282  1.1  riastrad static int tbs_sched_init(struct intel_gvt *gvt)
    283  1.1  riastrad {
    284  1.1  riastrad 	struct intel_gvt_workload_scheduler *scheduler =
    285  1.1  riastrad 		&gvt->scheduler;
    286  1.1  riastrad 
    287  1.1  riastrad 	struct gvt_sched_data *data;
    288  1.1  riastrad 
    289  1.1  riastrad 	data = kzalloc(sizeof(*data), GFP_KERNEL);
    290  1.1  riastrad 	if (!data)
    291  1.1  riastrad 		return -ENOMEM;
    292  1.1  riastrad 
    293  1.1  riastrad 	INIT_LIST_HEAD(&data->lru_runq_head);
    294  1.1  riastrad 	hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
    295  1.1  riastrad 	data->timer.function = tbs_timer_fn;
    296  1.1  riastrad 	data->period = GVT_DEFAULT_TIME_SLICE;
    297  1.1  riastrad 	data->gvt = gvt;
    298  1.1  riastrad 
    299  1.1  riastrad 	scheduler->sched_data = data;
    300  1.1  riastrad 
    301  1.1  riastrad 	return 0;
    302  1.1  riastrad }
    303  1.1  riastrad 
    304  1.1  riastrad static void tbs_sched_clean(struct intel_gvt *gvt)
    305  1.1  riastrad {
    306  1.1  riastrad 	struct intel_gvt_workload_scheduler *scheduler =
    307  1.1  riastrad 		&gvt->scheduler;
    308  1.1  riastrad 	struct gvt_sched_data *data = scheduler->sched_data;
    309  1.1  riastrad 
    310  1.1  riastrad 	hrtimer_cancel(&data->timer);
    311  1.1  riastrad 
    312  1.1  riastrad 	kfree(data);
    313  1.1  riastrad 	scheduler->sched_data = NULL;
    314  1.1  riastrad }
    315  1.1  riastrad 
    316  1.1  riastrad static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
    317  1.1  riastrad {
    318  1.1  riastrad 	struct vgpu_sched_data *data;
    319  1.1  riastrad 
    320  1.1  riastrad 	data = kzalloc(sizeof(*data), GFP_KERNEL);
    321  1.1  riastrad 	if (!data)
    322  1.1  riastrad 		return -ENOMEM;
    323  1.1  riastrad 
    324  1.1  riastrad 	data->sched_ctl.weight = vgpu->sched_ctl.weight;
    325  1.1  riastrad 	data->vgpu = vgpu;
    326  1.1  riastrad 	INIT_LIST_HEAD(&data->lru_list);
    327  1.1  riastrad 
    328  1.1  riastrad 	vgpu->sched_data = data;
    329  1.1  riastrad 
    330  1.1  riastrad 	return 0;
    331  1.1  riastrad }
    332  1.1  riastrad 
    333  1.1  riastrad static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
    334  1.1  riastrad {
    335  1.1  riastrad 	struct intel_gvt *gvt = vgpu->gvt;
    336  1.1  riastrad 	struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
    337  1.1  riastrad 
    338  1.1  riastrad 	kfree(vgpu->sched_data);
    339  1.1  riastrad 	vgpu->sched_data = NULL;
    340  1.1  riastrad 
    341  1.1  riastrad 	/* this vgpu id has been removed */
    342  1.1  riastrad 	if (idr_is_empty(&gvt->vgpu_idr))
    343  1.1  riastrad 		hrtimer_cancel(&sched_data->timer);
    344  1.1  riastrad }
    345  1.1  riastrad 
    346  1.1  riastrad static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
    347  1.1  riastrad {
    348  1.1  riastrad 	struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
    349  1.1  riastrad 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
    350  1.1  riastrad 	ktime_t now;
    351  1.1  riastrad 
    352  1.1  riastrad 	if (!list_empty(&vgpu_data->lru_list))
    353  1.1  riastrad 		return;
    354  1.1  riastrad 
    355  1.1  riastrad 	now = ktime_get();
    356  1.1  riastrad 	vgpu_data->pri_time = ktime_add(now,
    357  1.1  riastrad 					ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
    358  1.1  riastrad 	vgpu_data->pri_sched = true;
    359  1.1  riastrad 
    360  1.1  riastrad 	list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);
    361  1.1  riastrad 
    362  1.1  riastrad 	if (!hrtimer_active(&sched_data->timer))
    363  1.1  riastrad 		hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
    364  1.1  riastrad 			sched_data->period), HRTIMER_MODE_ABS);
    365  1.1  riastrad 	vgpu_data->active = true;
    366  1.1  riastrad }
    367  1.1  riastrad 
    368  1.1  riastrad static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
    369  1.1  riastrad {
    370  1.1  riastrad 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
    371  1.1  riastrad 
    372  1.1  riastrad 	list_del_init(&vgpu_data->lru_list);
    373  1.1  riastrad 	vgpu_data->active = false;
    374  1.1  riastrad }
    375  1.1  riastrad 
    376  1.1  riastrad static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
    377  1.1  riastrad 	.init = tbs_sched_init,
    378  1.1  riastrad 	.clean = tbs_sched_clean,
    379  1.1  riastrad 	.init_vgpu = tbs_sched_init_vgpu,
    380  1.1  riastrad 	.clean_vgpu = tbs_sched_clean_vgpu,
    381  1.1  riastrad 	.start_schedule = tbs_sched_start_schedule,
    382  1.1  riastrad 	.stop_schedule = tbs_sched_stop_schedule,
    383  1.1  riastrad };
    384  1.1  riastrad 
    385  1.1  riastrad int intel_gvt_init_sched_policy(struct intel_gvt *gvt)
    386  1.1  riastrad {
    387  1.1  riastrad 	int ret;
    388  1.1  riastrad 
    389  1.1  riastrad 	mutex_lock(&gvt->sched_lock);
    390  1.1  riastrad 	gvt->scheduler.sched_ops = &tbs_schedule_ops;
    391  1.1  riastrad 	ret = gvt->scheduler.sched_ops->init(gvt);
    392  1.1  riastrad 	mutex_unlock(&gvt->sched_lock);
    393  1.1  riastrad 
    394  1.1  riastrad 	return ret;
    395  1.1  riastrad }
    396  1.1  riastrad 
    397  1.1  riastrad void intel_gvt_clean_sched_policy(struct intel_gvt *gvt)
    398  1.1  riastrad {
    399  1.1  riastrad 	mutex_lock(&gvt->sched_lock);
    400  1.1  riastrad 	gvt->scheduler.sched_ops->clean(gvt);
    401  1.1  riastrad 	mutex_unlock(&gvt->sched_lock);
    402  1.1  riastrad }
    403  1.1  riastrad 
    404  1.1  riastrad /* for per-vgpu scheduler policy, there are 2 per-vgpu data:
    405  1.1  riastrad  * sched_data, and sched_ctl. We see these 2 data as part of
    406  1.1  riastrad  * the global scheduler which are proteced by gvt->sched_lock.
    407  1.1  riastrad  * Caller should make their decision if the vgpu_lock should
    408  1.1  riastrad  * be hold outside.
    409  1.1  riastrad  */
    410  1.1  riastrad 
    411  1.1  riastrad int intel_vgpu_init_sched_policy(struct intel_vgpu *vgpu)
    412  1.1  riastrad {
    413  1.1  riastrad 	int ret;
    414  1.1  riastrad 
    415  1.1  riastrad 	mutex_lock(&vgpu->gvt->sched_lock);
    416  1.1  riastrad 	ret = vgpu->gvt->scheduler.sched_ops->init_vgpu(vgpu);
    417  1.1  riastrad 	mutex_unlock(&vgpu->gvt->sched_lock);
    418  1.1  riastrad 
    419  1.1  riastrad 	return ret;
    420  1.1  riastrad }
    421  1.1  riastrad 
    422  1.1  riastrad void intel_vgpu_clean_sched_policy(struct intel_vgpu *vgpu)
    423  1.1  riastrad {
    424  1.1  riastrad 	mutex_lock(&vgpu->gvt->sched_lock);
    425  1.1  riastrad 	vgpu->gvt->scheduler.sched_ops->clean_vgpu(vgpu);
    426  1.1  riastrad 	mutex_unlock(&vgpu->gvt->sched_lock);
    427  1.1  riastrad }
    428  1.1  riastrad 
    429  1.1  riastrad void intel_vgpu_start_schedule(struct intel_vgpu *vgpu)
    430  1.1  riastrad {
    431  1.1  riastrad 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
    432  1.1  riastrad 
    433  1.1  riastrad 	mutex_lock(&vgpu->gvt->sched_lock);
    434  1.1  riastrad 	if (!vgpu_data->active) {
    435  1.1  riastrad 		gvt_dbg_core("vgpu%d: start schedule\n", vgpu->id);
    436  1.1  riastrad 		vgpu->gvt->scheduler.sched_ops->start_schedule(vgpu);
    437  1.1  riastrad 	}
    438  1.1  riastrad 	mutex_unlock(&vgpu->gvt->sched_lock);
    439  1.1  riastrad }
    440  1.1  riastrad 
    441  1.1  riastrad void intel_gvt_kick_schedule(struct intel_gvt *gvt)
    442  1.1  riastrad {
    443  1.1  riastrad 	mutex_lock(&gvt->sched_lock);
    444  1.1  riastrad 	intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
    445  1.1  riastrad 	mutex_unlock(&gvt->sched_lock);
    446  1.1  riastrad }
    447  1.1  riastrad 
    448  1.1  riastrad void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
    449  1.1  riastrad {
    450  1.1  riastrad 	struct intel_gvt_workload_scheduler *scheduler =
    451  1.1  riastrad 		&vgpu->gvt->scheduler;
    452  1.1  riastrad 	int ring_id;
    453  1.1  riastrad 	struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
    454  1.1  riastrad 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
    455  1.1  riastrad 
    456  1.1  riastrad 	if (!vgpu_data->active)
    457  1.1  riastrad 		return;
    458  1.1  riastrad 
    459  1.1  riastrad 	gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id);
    460  1.1  riastrad 
    461  1.1  riastrad 	mutex_lock(&vgpu->gvt->sched_lock);
    462  1.1  riastrad 	scheduler->sched_ops->stop_schedule(vgpu);
    463  1.1  riastrad 
    464  1.1  riastrad 	if (scheduler->next_vgpu == vgpu)
    465  1.1  riastrad 		scheduler->next_vgpu = NULL;
    466  1.1  riastrad 
    467  1.1  riastrad 	if (scheduler->current_vgpu == vgpu) {
    468  1.1  riastrad 		/* stop workload dispatching */
    469  1.1  riastrad 		scheduler->need_reschedule = true;
    470  1.1  riastrad 		scheduler->current_vgpu = NULL;
    471  1.1  riastrad 	}
    472  1.1  riastrad 
    473  1.1  riastrad 	intel_runtime_pm_get(&dev_priv->runtime_pm);
    474  1.1  riastrad 	spin_lock_bh(&scheduler->mmio_context_lock);
    475  1.1  riastrad 	for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
    476  1.1  riastrad 		if (scheduler->engine_owner[ring_id] == vgpu) {
    477  1.1  riastrad 			intel_gvt_switch_mmio(vgpu, NULL, ring_id);
    478  1.1  riastrad 			scheduler->engine_owner[ring_id] = NULL;
    479  1.1  riastrad 		}
    480  1.1  riastrad 	}
    481  1.1  riastrad 	spin_unlock_bh(&scheduler->mmio_context_lock);
    482  1.1  riastrad 	intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
    483  1.1  riastrad 	mutex_unlock(&vgpu->gvt->sched_lock);
    484  1.1  riastrad }
    485