Home | History | Annotate | Line # | Download | only in i915
i915_request.h revision 1.1.1.1
      1 /*	$NetBSD: i915_request.h,v 1.1.1.1 2021/12/18 20:15:26 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2008-2018 Intel Corporation
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     22  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     23  * IN THE SOFTWARE.
     24  *
     25  */
     26 
     27 #ifndef I915_REQUEST_H
     28 #define I915_REQUEST_H
     29 
     30 #include <linux/dma-fence.h>
     31 #include <linux/lockdep.h>
     32 
     33 #include "gem/i915_gem_context_types.h"
     34 #include "gt/intel_context_types.h"
     35 #include "gt/intel_engine_types.h"
     36 #include "gt/intel_timeline_types.h"
     37 
     38 #include "i915_gem.h"
     39 #include "i915_scheduler.h"
     40 #include "i915_selftest.h"
     41 #include "i915_sw_fence.h"
     42 
     43 #include <uapi/drm/i915_drm.h>
     44 
     45 struct drm_file;
     46 struct drm_i915_gem_object;
     47 struct i915_request;
     48 
     49 struct i915_capture_list {
     50 	struct i915_capture_list *next;
     51 	struct i915_vma *vma;
     52 };
     53 
     54 #define RQ_TRACE(rq, fmt, ...) do {					\
     55 	const struct i915_request *rq__ = (rq);				\
     56 	ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt,	\
     57 		     rq__->fence.context, rq__->fence.seqno,		\
     58 		     hwsp_seqno(rq__), ##__VA_ARGS__);			\
     59 } while (0)
     60 
     61 enum {
     62 	/*
     63 	 * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW.
     64 	 *
     65 	 * Set by __i915_request_submit() on handing over to HW, and cleared
     66 	 * by __i915_request_unsubmit() if we preempt this request.
     67 	 *
     68 	 * Finally cleared for consistency on retiring the request, when
     69 	 * we know the HW is no longer running this request.
     70 	 *
     71 	 * See i915_request_is_active()
     72 	 */
     73 	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
     74 
     75 	/*
     76 	 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
     77 	 *
     78 	 * Using the scheduler, when a request is ready for execution it is put
     79 	 * into the priority queue, and removed from that queue when transferred
     80 	 * to the HW runlists. We want to track its membership within the
     81 	 * priority queue so that we can easily check before rescheduling.
     82 	 *
     83 	 * See i915_request_in_priority_queue()
     84 	 */
     85 	I915_FENCE_FLAG_PQUEUE,
     86 
     87 	/*
     88 	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
     89 	 *
     90 	 * Internal bookkeeping used by the breadcrumb code to track when
     91 	 * a request is on the various signal_list.
     92 	 */
     93 	I915_FENCE_FLAG_SIGNAL,
     94 
     95 	/*
     96 	 * I915_FENCE_FLAG_HOLD - this request is currently on hold
     97 	 *
     98 	 * This request has been suspended, pending an ongoing investigation.
     99 	 */
    100 	I915_FENCE_FLAG_HOLD,
    101 
    102 	/*
    103 	 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
    104 	 *
    105 	 * The execution of some requests should not be interrupted. This is
    106 	 * a sensitive operation as it makes the request super important,
    107 	 * blocking other higher priority work. Abuse of this flag will
    108 	 * lead to quality of service issues.
    109 	 */
    110 	I915_FENCE_FLAG_NOPREEMPT,
    111 
    112 	/*
    113 	 * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue
    114 	 *
    115 	 * A high priority sentinel request may be submitted to clear the
    116 	 * submission queue. As it will be the only request in-flight, upon
    117 	 * execution all other active requests will have been preempted and
    118 	 * unsubmitted. This preemptive pulse is used to re-evaluate the
    119 	 * in-flight requests, particularly in cases where an active context
    120 	 * is banned and those active requests need to be cancelled.
    121 	 */
    122 	I915_FENCE_FLAG_SENTINEL,
    123 
    124 	/*
    125 	 * I915_FENCE_FLAG_BOOST - upclock the gpu for this request
    126 	 *
    127 	 * Some requests are more important than others! In particular, a
    128 	 * request that the user is waiting on is typically required for
    129 	 * interactive latency, for which we want to minimise by upclocking
    130 	 * the GPU. Here we track such boost requests on a per-request basis.
    131 	 */
    132 	I915_FENCE_FLAG_BOOST,
    133 };
    134 
    135 /**
    136  * Request queue structure.
    137  *
    138  * The request queue allows us to note sequence numbers that have been emitted
    139  * and may be associated with active buffers to be retired.
    140  *
    141  * By keeping this list, we can avoid having to do questionable sequence
    142  * number comparisons on buffer last_read|write_seqno. It also allows an
    143  * emission time to be associated with the request for tracking how far ahead
    144  * of the GPU the submission is.
    145  *
    146  * When modifying this structure be very aware that we perform a lockless
    147  * RCU lookup of it that may race against reallocation of the struct
    148  * from the slab freelist. We intentionally do not zero the structure on
    149  * allocation so that the lookup can use the dangling pointers (and is
    150  * cogniscent that those pointers may be wrong). Instead, everything that
    151  * needs to be initialised must be done so explicitly.
    152  *
    153  * The requests are reference counted.
    154  */
    155 struct i915_request {
    156 	struct dma_fence fence;
    157 	spinlock_t lock;
    158 
    159 	/** On Which ring this request was generated */
    160 	struct drm_i915_private *i915;
    161 
    162 	/**
    163 	 * Context and ring buffer related to this request
    164 	 * Contexts are refcounted, so when this request is associated with a
    165 	 * context, we must increment the context's refcount, to guarantee that
    166 	 * it persists while any request is linked to it. Requests themselves
    167 	 * are also refcounted, so the request will only be freed when the last
    168 	 * reference to it is dismissed, and the code in
    169 	 * i915_request_free() will then decrement the refcount on the
    170 	 * context.
    171 	 */
    172 	struct intel_engine_cs *engine;
    173 	struct intel_context *context;
    174 	struct intel_ring *ring;
    175 	struct intel_timeline __rcu *timeline;
    176 	struct list_head signal_link;
    177 
    178 	/*
    179 	 * The rcu epoch of when this request was allocated. Used to judiciously
    180 	 * apply backpressure on future allocations to ensure that under
    181 	 * mempressure there is sufficient RCU ticks for us to reclaim our
    182 	 * RCU protected slabs.
    183 	 */
    184 	unsigned long rcustate;
    185 
    186 	/*
    187 	 * We pin the timeline->mutex while constructing the request to
    188 	 * ensure that no caller accidentally drops it during construction.
    189 	 * The timeline->mutex must be held to ensure that only this caller
    190 	 * can use the ring and manipulate the associated timeline during
    191 	 * construction.
    192 	 */
    193 	struct pin_cookie cookie;
    194 
    195 	/*
    196 	 * Fences for the various phases in the request's lifetime.
    197 	 *
    198 	 * The submit fence is used to await upon all of the request's
    199 	 * dependencies. When it is signaled, the request is ready to run.
    200 	 * It is used by the driver to then queue the request for execution.
    201 	 */
    202 	struct i915_sw_fence submit;
    203 	union {
    204 		wait_queue_entry_t submitq;
    205 		struct i915_sw_dma_fence_cb dmaq;
    206 		struct i915_request_duration_cb {
    207 			struct dma_fence_cb cb;
    208 			ktime_t emitted;
    209 		} duration;
    210 	};
    211 	struct list_head execute_cb;
    212 	struct i915_sw_fence semaphore;
    213 
    214 	/*
    215 	 * A list of everyone we wait upon, and everyone who waits upon us.
    216 	 * Even though we will not be submitted to the hardware before the
    217 	 * submit fence is signaled (it waits for all external events as well
    218 	 * as our own requests), the scheduler still needs to know the
    219 	 * dependency tree for the lifetime of the request (from execbuf
    220 	 * to retirement), i.e. bidirectional dependency information for the
    221 	 * request not tied to individual fences.
    222 	 */
    223 	struct i915_sched_node sched;
    224 	struct i915_dependency dep;
    225 	intel_engine_mask_t execution_mask;
    226 
    227 	/*
    228 	 * A convenience pointer to the current breadcrumb value stored in
    229 	 * the HW status page (or our timeline's local equivalent). The full
    230 	 * path would be rq->hw_context->ring->timeline->hwsp_seqno.
    231 	 */
    232 	const u32 *hwsp_seqno;
    233 
    234 	/*
    235 	 * If we need to access the timeline's seqno for this request in
    236 	 * another request, we need to keep a read reference to this associated
    237 	 * cacheline, so that we do not free and recycle it before the foreign
    238 	 * observers have completed. Hence, we keep a pointer to the cacheline
    239 	 * inside the timeline's HWSP vma, but it is only valid while this
    240 	 * request has not completed and guarded by the timeline mutex.
    241 	 */
    242 	struct intel_timeline_cacheline __rcu *hwsp_cacheline;
    243 
    244 	/** Position in the ring of the start of the request */
    245 	u32 head;
    246 
    247 	/** Position in the ring of the start of the user packets */
    248 	u32 infix;
    249 
    250 	/**
    251 	 * Position in the ring of the start of the postfix.
    252 	 * This is required to calculate the maximum available ring space
    253 	 * without overwriting the postfix.
    254 	 */
    255 	u32 postfix;
    256 
    257 	/** Position in the ring of the end of the whole request */
    258 	u32 tail;
    259 
    260 	/** Position in the ring of the end of any workarounds after the tail */
    261 	u32 wa_tail;
    262 
    263 	/** Preallocate space in the ring for the emitting the request */
    264 	u32 reserved_space;
    265 
    266 	/** Batch buffer related to this request if any (used for
    267 	 * error state dump only).
    268 	 */
    269 	struct i915_vma *batch;
    270 	/**
    271 	 * Additional buffers requested by userspace to be captured upon
    272 	 * a GPU hang. The vma/obj on this list are protected by their
    273 	 * active reference - all objects on this list must also be
    274 	 * on the active_list (of their final request).
    275 	 */
    276 	struct i915_capture_list *capture_list;
    277 
    278 	/** Time at which this request was emitted, in jiffies. */
    279 	unsigned long emitted_jiffies;
    280 
    281 	/** timeline->request entry for this request */
    282 	struct list_head link;
    283 
    284 	struct drm_i915_file_private *file_priv;
    285 	/** file_priv list entry for this request */
    286 	struct list_head client_link;
    287 
    288 	I915_SELFTEST_DECLARE(struct {
    289 		struct list_head link;
    290 		unsigned long delay;
    291 	} mock;)
    292 };
    293 
    294 #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
    295 
    296 extern const struct dma_fence_ops i915_fence_ops;
    297 
    298 static inline bool dma_fence_is_i915(const struct dma_fence *fence)
    299 {
    300 	return fence->ops == &i915_fence_ops;
    301 }
    302 
    303 struct i915_request * __must_check
    304 __i915_request_create(struct intel_context *ce, gfp_t gfp);
    305 struct i915_request * __must_check
    306 i915_request_create(struct intel_context *ce);
    307 
    308 struct i915_request *__i915_request_commit(struct i915_request *request);
    309 void __i915_request_queue(struct i915_request *rq,
    310 			  const struct i915_sched_attr *attr);
    311 
    312 bool i915_request_retire(struct i915_request *rq);
    313 void i915_request_retire_upto(struct i915_request *rq);
    314 
    315 static inline struct i915_request *
    316 to_request(struct dma_fence *fence)
    317 {
    318 	/* We assume that NULL fence/request are interoperable */
    319 	BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0);
    320 	GEM_BUG_ON(fence && !dma_fence_is_i915(fence));
    321 	return container_of(fence, struct i915_request, fence);
    322 }
    323 
    324 static inline struct i915_request *
    325 i915_request_get(struct i915_request *rq)
    326 {
    327 	return to_request(dma_fence_get(&rq->fence));
    328 }
    329 
    330 static inline struct i915_request *
    331 i915_request_get_rcu(struct i915_request *rq)
    332 {
    333 	return to_request(dma_fence_get_rcu(&rq->fence));
    334 }
    335 
    336 static inline void
    337 i915_request_put(struct i915_request *rq)
    338 {
    339 	dma_fence_put(&rq->fence);
    340 }
    341 
    342 int i915_request_await_object(struct i915_request *to,
    343 			      struct drm_i915_gem_object *obj,
    344 			      bool write);
    345 int i915_request_await_dma_fence(struct i915_request *rq,
    346 				 struct dma_fence *fence);
    347 int i915_request_await_execution(struct i915_request *rq,
    348 				 struct dma_fence *fence,
    349 				 void (*hook)(struct i915_request *rq,
    350 					      struct dma_fence *signal));
    351 
    352 void i915_request_add(struct i915_request *rq);
    353 
    354 bool __i915_request_submit(struct i915_request *request);
    355 void i915_request_submit(struct i915_request *request);
    356 
    357 void i915_request_skip(struct i915_request *request, int error);
    358 
    359 void __i915_request_unsubmit(struct i915_request *request);
    360 void i915_request_unsubmit(struct i915_request *request);
    361 
    362 /* Note: part of the intel_breadcrumbs family */
    363 bool i915_request_enable_breadcrumb(struct i915_request *request);
    364 void i915_request_cancel_breadcrumb(struct i915_request *request);
    365 
    366 long i915_request_wait(struct i915_request *rq,
    367 		       unsigned int flags,
    368 		       long timeout)
    369 	__attribute__((nonnull(1)));
    370 #define I915_WAIT_INTERRUPTIBLE	BIT(0)
    371 #define I915_WAIT_PRIORITY	BIT(1) /* small priority bump for the request */
    372 #define I915_WAIT_ALL		BIT(2) /* used by i915_gem_object_wait() */
    373 
    374 static inline bool i915_request_signaled(const struct i915_request *rq)
    375 {
    376 	/* The request may live longer than its HWSP, so check flags first! */
    377 	return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags);
    378 }
    379 
    380 static inline bool i915_request_is_active(const struct i915_request *rq)
    381 {
    382 	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
    383 }
    384 
    385 static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
    386 {
    387 	return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
    388 }
    389 
    390 /**
    391  * Returns true if seq1 is later than seq2.
    392  */
    393 static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
    394 {
    395 	return (s32)(seq1 - seq2) >= 0;
    396 }
    397 
    398 static inline u32 __hwsp_seqno(const struct i915_request *rq)
    399 {
    400 	return READ_ONCE(*rq->hwsp_seqno);
    401 }
    402 
    403 /**
    404  * hwsp_seqno - the current breadcrumb value in the HW status page
    405  * @rq: the request, to chase the relevant HW status page
    406  *
    407  * The emphasis in naming here is that hwsp_seqno() is not a property of the
    408  * request, but an indication of the current HW state (associated with this
    409  * request). Its value will change as the GPU executes more requests.
    410  *
    411  * Returns the current breadcrumb value in the associated HW status page (or
    412  * the local timeline's equivalent) for this request. The request itself
    413  * has the associated breadcrumb value of rq->fence.seqno, when the HW
    414  * status page has that breadcrumb or later, this request is complete.
    415  */
    416 static inline u32 hwsp_seqno(const struct i915_request *rq)
    417 {
    418 	u32 seqno;
    419 
    420 	rcu_read_lock(); /* the HWSP may be freed at runtime */
    421 	seqno = __hwsp_seqno(rq);
    422 	rcu_read_unlock();
    423 
    424 	return seqno;
    425 }
    426 
    427 static inline bool __i915_request_has_started(const struct i915_request *rq)
    428 {
    429 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1);
    430 }
    431 
    432 /**
    433  * i915_request_started - check if the request has begun being executed
    434  * @rq: the request
    435  *
    436  * If the timeline is not using initial breadcrumbs, a request is
    437  * considered started if the previous request on its timeline (i.e.
    438  * context) has been signaled.
    439  *
    440  * If the timeline is using semaphores, it will also be emitting an
    441  * "initial breadcrumb" after the semaphores are complete and just before
    442  * it began executing the user payload. A request can therefore be active
    443  * on the HW and not yet started as it is still busywaiting on its
    444  * dependencies (via HW semaphores).
    445  *
    446  * If the request has started, its dependencies will have been signaled
    447  * (either by fences or by semaphores) and it will have begun processing
    448  * the user payload.
    449  *
    450  * However, even if a request has started, it may have been preempted and
    451  * so no longer active, or it may have already completed.
    452  *
    453  * See also i915_request_is_active().
    454  *
    455  * Returns true if the request has begun executing the user payload, or
    456  * has completed:
    457  */
    458 static inline bool i915_request_started(const struct i915_request *rq)
    459 {
    460 	if (i915_request_signaled(rq))
    461 		return true;
    462 
    463 	/* Remember: started but may have since been preempted! */
    464 	return __i915_request_has_started(rq);
    465 }
    466 
    467 /**
    468  * i915_request_is_running - check if the request may actually be executing
    469  * @rq: the request
    470  *
    471  * Returns true if the request is currently submitted to hardware, has passed
    472  * its start point (i.e. the context is setup and not busywaiting). Note that
    473  * it may no longer be running by the time the function returns!
    474  */
    475 static inline bool i915_request_is_running(const struct i915_request *rq)
    476 {
    477 	if (!i915_request_is_active(rq))
    478 		return false;
    479 
    480 	return __i915_request_has_started(rq);
    481 }
    482 
    483 /**
    484  * i915_request_is_running - check if the request is ready for execution
    485  * @rq: the request
    486  *
    487  * Upon construction, the request is instructed to wait upon various
    488  * signals before it is ready to be executed by the HW. That is, we do
    489  * not want to start execution and read data before it is written. In practice,
    490  * this is controlled with a mixture of interrupts and semaphores. Once
    491  * the submit fence is completed, the backend scheduler will place the
    492  * request into its queue and from there submit it for execution. So we
    493  * can detect when a request is eligible for execution (and is under control
    494  * of the scheduler) by querying where it is in any of the scheduler's lists.
    495  *
    496  * Returns true if the request is ready for execution (it may be inflight),
    497  * false otherwise.
    498  */
    499 static inline bool i915_request_is_ready(const struct i915_request *rq)
    500 {
    501 	return !list_empty(&rq->sched.link);
    502 }
    503 
    504 static inline bool i915_request_completed(const struct i915_request *rq)
    505 {
    506 	if (i915_request_signaled(rq))
    507 		return true;
    508 
    509 	return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno);
    510 }
    511 
    512 static inline void i915_request_mark_complete(struct i915_request *rq)
    513 {
    514 	rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
    515 }
    516 
    517 static inline bool i915_request_has_waitboost(const struct i915_request *rq)
    518 {
    519 	return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags);
    520 }
    521 
    522 static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
    523 {
    524 	/* Preemption should only be disabled very rarely */
    525 	return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags));
    526 }
    527 
    528 static inline bool i915_request_has_sentinel(const struct i915_request *rq)
    529 {
    530 	return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
    531 }
    532 
    533 static inline bool i915_request_on_hold(const struct i915_request *rq)
    534 {
    535 	return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
    536 }
    537 
    538 static inline void i915_request_set_hold(struct i915_request *rq)
    539 {
    540 	set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
    541 }
    542 
    543 static inline void i915_request_clear_hold(struct i915_request *rq)
    544 {
    545 	clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
    546 }
    547 
    548 static inline struct intel_timeline *
    549 i915_request_timeline(struct i915_request *rq)
    550 {
    551 	/* Valid only while the request is being constructed (or retired). */
    552 	return rcu_dereference_protected(rq->timeline,
    553 					 lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex));
    554 }
    555 
    556 static inline struct i915_gem_context *
    557 i915_request_gem_context(struct i915_request *rq)
    558 {
    559 	/* Valid only while the request is being constructed (or retired). */
    560 	return rcu_dereference_protected(rq->context->gem_context, true);
    561 }
    562 
    563 static inline struct intel_timeline *
    564 i915_request_active_timeline(struct i915_request *rq)
    565 {
    566 	/*
    567 	 * When in use during submission, we are protected by a guarantee that
    568 	 * the context/timeline is pinned and must remain pinned until after
    569 	 * this submission.
    570 	 */
    571 	return rcu_dereference_protected(rq->timeline,
    572 					 lockdep_is_held(&rq->engine->active.lock));
    573 }
    574 
    575 #endif /* I915_REQUEST_H */
    576