Home | History | Annotate | Line # | Download | only in gt
      1 /*	$NetBSD: intel_gt_requests.c,v 1.3 2021/12/19 11:45:01 riastradh Exp $	*/
      2 
      3 /*
      4  * SPDX-License-Identifier: MIT
      5  *
      6  * Copyright  2019 Intel Corporation
      7  */
      8 
      9 #include <sys/cdefs.h>
     10 __KERNEL_RCSID(0, "$NetBSD: intel_gt_requests.c,v 1.3 2021/12/19 11:45:01 riastradh Exp $");
     11 
     12 #include <linux/sched/signal.h>
     13 #include <linux/workqueue.h>
     14 
     15 #include "i915_drv.h" /* for_each_engine() */
     16 #include "i915_request.h"
     17 #include "intel_engine_heartbeat.h"
     18 #include "intel_gt.h"
     19 #include "intel_gt_pm.h"
     20 #include "intel_gt_requests.h"
     21 #include "intel_timeline.h"
     22 
     23 #include <linux/nbsd-namespace.h>
     24 
     25 static bool retire_requests(struct intel_timeline *tl)
     26 {
     27 	struct i915_request *rq, *rn;
     28 
     29 	list_for_each_entry_safe(rq, rn, &tl->requests, link)
     30 		if (!i915_request_retire(rq))
     31 			return false;
     32 
     33 	/* And check nothing new was submitted */
     34 	return !i915_active_fence_isset(&tl->last_request);
     35 }
     36 
     37 static bool flush_submission(struct intel_gt *gt)
     38 {
     39 	struct intel_engine_cs *engine;
     40 	enum intel_engine_id id;
     41 	bool active = false;
     42 
     43 	if (!intel_gt_pm_is_awake(gt))
     44 		return false;
     45 
     46 	for_each_engine(engine, gt, id) {
     47 		intel_engine_flush_submission(engine);
     48 		active |= flush_work(&engine->retire_work);
     49 		active |= flush_work(&engine->wakeref.work);
     50 	}
     51 
     52 	return active;
     53 }
     54 
     55 static void engine_retire(struct work_struct *work)
     56 {
     57 	struct intel_engine_cs *engine =
     58 		container_of(work, typeof(*engine), retire_work);
     59 	struct intel_timeline *tl = xchg(&engine->retire, NULL);
     60 
     61 	do {
     62 		struct intel_timeline *next = xchg(&tl->retire, NULL);
     63 
     64 		/*
     65 		 * Our goal here is to retire _idle_ timelines as soon as
     66 		 * possible (as they are idle, we do not expect userspace
     67 		 * to be cleaning up anytime soon).
     68 		 *
     69 		 * If the timeline is currently locked, either it is being
     70 		 * retired elsewhere or about to be!
     71 		 */
     72 		if (mutex_trylock(&tl->mutex)) {
     73 			retire_requests(tl);
     74 			mutex_unlock(&tl->mutex);
     75 		}
     76 		intel_timeline_put(tl);
     77 
     78 		GEM_BUG_ON(!next);
     79 		tl = ptr_mask_bits(next, 1);
     80 	} while (tl);
     81 }
     82 
     83 static bool add_retire(struct intel_engine_cs *engine,
     84 		       struct intel_timeline *tl)
     85 {
     86 #define STUB ((struct intel_timeline *)1)
     87 	struct intel_timeline *first;
     88 
     89 	/*
     90 	 * We open-code a llist here to include the additional tag [BIT(0)]
     91 	 * so that we know when the timeline is already on a
     92 	 * retirement queue: either this engine or another.
     93 	 */
     94 
     95 	if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */
     96 		return false;
     97 
     98 	intel_timeline_get(tl);
     99 	first = READ_ONCE(engine->retire);
    100 	do
    101 		tl->retire = ptr_pack_bits(first, 1, 1);
    102 	while (!try_cmpxchg(&engine->retire, &first, tl));
    103 
    104 	return !first;
    105 }
    106 
    107 void intel_engine_add_retire(struct intel_engine_cs *engine,
    108 			     struct intel_timeline *tl)
    109 {
    110 	/* We don't deal well with the engine disappearing beneath us */
    111 	GEM_BUG_ON(intel_engine_is_virtual(engine));
    112 
    113 	if (add_retire(engine, tl))
    114 		schedule_work(&engine->retire_work);
    115 }
    116 
    117 void intel_engine_init_retire(struct intel_engine_cs *engine)
    118 {
    119 	INIT_WORK(&engine->retire_work, engine_retire);
    120 }
    121 
    122 void intel_engine_fini_retire(struct intel_engine_cs *engine)
    123 {
    124 	flush_work(&engine->retire_work);
    125 	GEM_BUG_ON(engine->retire);
    126 }
    127 
    128 static void
    129 null_release(struct kref *kref)
    130 {
    131 }
    132 
    133 long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
    134 {
    135 	struct intel_gt_timelines *timelines = &gt->timelines;
    136 	struct intel_timeline *tl, *tn;
    137 	unsigned long active_count = 0;
    138 	bool interruptible;
    139 	LIST_HEAD(free);
    140 
    141 	interruptible = true;
    142 	if (unlikely(timeout < 0))
    143 		timeout = -timeout, interruptible = false;
    144 
    145 	flush_submission(gt); /* kick the ksoftirqd tasklets */
    146 	spin_lock(&timelines->lock);
    147 	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
    148 		if (!mutex_trylock(&tl->mutex)) {
    149 			active_count++; /* report busy to caller, try again? */
    150 			continue;
    151 		}
    152 
    153 		intel_timeline_get(tl);
    154 		GEM_BUG_ON(!atomic_read(&tl->active_count));
    155 		atomic_inc(&tl->active_count); /* pin the list element */
    156 		spin_unlock(&timelines->lock);
    157 
    158 		if (timeout > 0) {
    159 			struct dma_fence *fence;
    160 
    161 			fence = i915_active_fence_get(&tl->last_request);
    162 			if (fence) {
    163 				timeout = dma_fence_wait_timeout(fence,
    164 								 interruptible,
    165 								 timeout);
    166 				dma_fence_put(fence);
    167 			}
    168 		}
    169 
    170 		if (!retire_requests(tl) || flush_submission(gt))
    171 			active_count++;
    172 
    173 		spin_lock(&timelines->lock);
    174 
    175 		/* Resume iteration after dropping lock */
    176 		list_safe_reset_next(tl, tn, link);
    177 		if (atomic_dec_and_test(&tl->active_count))
    178 			list_del(&tl->link);
    179 
    180 		mutex_unlock(&tl->mutex);
    181 
    182 		/* Defer the final release to after the spinlock */
    183 		if (kref_put(&tl->kref, null_release)) {
    184 			GEM_BUG_ON(atomic_read(&tl->active_count));
    185 			list_add(&tl->link, &free);
    186 		}
    187 	}
    188 	spin_unlock(&timelines->lock);
    189 
    190 	list_for_each_entry_safe(tl, tn, &free, link)
    191 		__intel_timeline_free(&tl->kref);
    192 
    193 	return active_count ? timeout : 0;
    194 }
    195 
    196 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
    197 {
    198 	/* If the device is asleep, we have no requests outstanding */
    199 	if (!intel_gt_pm_is_awake(gt))
    200 		return 0;
    201 
    202 	while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
    203 		cond_resched();
    204 		if (signal_pending(current))
    205 			return -EINTR;
    206 	}
    207 
    208 	return timeout;
    209 }
    210 
    211 static void retire_work_handler(struct work_struct *work)
    212 {
    213 	struct intel_gt *gt =
    214 		container_of(work, typeof(*gt), requests.retire_work.work);
    215 
    216 	schedule_delayed_work(&gt->requests.retire_work,
    217 			      round_jiffies_up_relative(HZ));
    218 	intel_gt_retire_requests(gt);
    219 }
    220 
    221 void intel_gt_init_requests(struct intel_gt *gt)
    222 {
    223 	INIT_DELAYED_WORK(&gt->requests.retire_work, retire_work_handler);
    224 }
    225 
    226 void intel_gt_park_requests(struct intel_gt *gt)
    227 {
    228 	cancel_delayed_work(&gt->requests.retire_work);
    229 }
    230 
    231 void intel_gt_unpark_requests(struct intel_gt *gt)
    232 {
    233 	schedule_delayed_work(&gt->requests.retire_work,
    234 			      round_jiffies_up_relative(HZ));
    235 }
    236 
    237 void intel_gt_fini_requests(struct intel_gt *gt)
    238 {
    239 	/* Wait until the work is marked as finished before unloading! */
    240 	cancel_delayed_work_sync(&gt->requests.retire_work);
    241 }
    242