1 1.4 riastrad /* $NetBSD: i915_request.h,v 1.4 2021/12/19 11:36:17 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2008-2018 Intel Corporation 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice (including the next 14 1.1 riastrad * paragraph) shall be included in all copies or substantial portions of the 15 1.1 riastrad * Software. 16 1.1 riastrad * 17 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 1.1 riastrad * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 1.1 riastrad * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 1.1 riastrad * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 1.1 riastrad * IN THE SOFTWARE. 24 1.1 riastrad * 25 1.1 riastrad */ 26 1.1 riastrad 27 1.1 riastrad #ifndef I915_REQUEST_H 28 1.1 riastrad #define I915_REQUEST_H 29 1.1 riastrad 30 1.1 riastrad #include <linux/dma-fence.h> 31 1.1 riastrad #include <linux/lockdep.h> 32 1.1 riastrad 33 1.1 riastrad #include "gem/i915_gem_context_types.h" 34 1.1 riastrad #include "gt/intel_context_types.h" 35 1.1 riastrad #include "gt/intel_engine_types.h" 36 1.1 riastrad #include "gt/intel_timeline_types.h" 37 1.1 riastrad 38 1.1 riastrad #include "i915_gem.h" 39 1.1 riastrad #include "i915_scheduler.h" 40 1.1 riastrad #include "i915_selftest.h" 41 1.1 riastrad #include "i915_sw_fence.h" 42 1.1 riastrad 43 1.1 riastrad #include <uapi/drm/i915_drm.h> 44 1.1 riastrad 45 1.1 riastrad struct drm_file; 46 1.1 riastrad struct drm_i915_gem_object; 47 1.1 riastrad struct i915_request; 48 1.1 riastrad 49 1.1 riastrad struct i915_capture_list { 50 1.1 riastrad struct i915_capture_list *next; 51 1.1 riastrad struct i915_vma *vma; 52 1.1 riastrad }; 53 1.1 riastrad 54 1.1 riastrad #define RQ_TRACE(rq, fmt, ...) do { \ 55 1.1 riastrad const struct i915_request *rq__ = (rq); \ 56 1.1 riastrad ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \ 57 1.1 riastrad rq__->fence.context, rq__->fence.seqno, \ 58 1.1 riastrad hwsp_seqno(rq__), ##__VA_ARGS__); \ 59 1.1 riastrad } while (0) 60 1.1 riastrad 61 1.1 riastrad enum { 62 1.1 riastrad /* 63 1.1 riastrad * I915_FENCE_FLAG_ACTIVE - this request is currently submitted to HW. 64 1.1 riastrad * 65 1.1 riastrad * Set by __i915_request_submit() on handing over to HW, and cleared 66 1.1 riastrad * by __i915_request_unsubmit() if we preempt this request. 67 1.1 riastrad * 68 1.1 riastrad * Finally cleared for consistency on retiring the request, when 69 1.1 riastrad * we know the HW is no longer running this request. 70 1.1 riastrad * 71 1.1 riastrad * See i915_request_is_active() 72 1.1 riastrad */ 73 1.1 riastrad I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS, 74 1.1 riastrad 75 1.1 riastrad /* 76 1.1 riastrad * I915_FENCE_FLAG_PQUEUE - this request is ready for execution 77 1.1 riastrad * 78 1.1 riastrad * Using the scheduler, when a request is ready for execution it is put 79 1.1 riastrad * into the priority queue, and removed from that queue when transferred 80 1.1 riastrad * to the HW runlists. We want to track its membership within the 81 1.1 riastrad * priority queue so that we can easily check before rescheduling. 82 1.1 riastrad * 83 1.1 riastrad * See i915_request_in_priority_queue() 84 1.1 riastrad */ 85 1.1 riastrad I915_FENCE_FLAG_PQUEUE, 86 1.1 riastrad 87 1.1 riastrad /* 88 1.1 riastrad * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list 89 1.1 riastrad * 90 1.1 riastrad * Internal bookkeeping used by the breadcrumb code to track when 91 1.1 riastrad * a request is on the various signal_list. 92 1.1 riastrad */ 93 1.1 riastrad I915_FENCE_FLAG_SIGNAL, 94 1.1 riastrad 95 1.1 riastrad /* 96 1.1 riastrad * I915_FENCE_FLAG_HOLD - this request is currently on hold 97 1.1 riastrad * 98 1.1 riastrad * This request has been suspended, pending an ongoing investigation. 99 1.1 riastrad */ 100 1.1 riastrad I915_FENCE_FLAG_HOLD, 101 1.1 riastrad 102 1.1 riastrad /* 103 1.1 riastrad * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted 104 1.1 riastrad * 105 1.1 riastrad * The execution of some requests should not be interrupted. This is 106 1.1 riastrad * a sensitive operation as it makes the request super important, 107 1.1 riastrad * blocking other higher priority work. Abuse of this flag will 108 1.1 riastrad * lead to quality of service issues. 109 1.1 riastrad */ 110 1.1 riastrad I915_FENCE_FLAG_NOPREEMPT, 111 1.1 riastrad 112 1.1 riastrad /* 113 1.1 riastrad * I915_FENCE_FLAG_SENTINEL - this request should be last in the queue 114 1.1 riastrad * 115 1.1 riastrad * A high priority sentinel request may be submitted to clear the 116 1.1 riastrad * submission queue. As it will be the only request in-flight, upon 117 1.1 riastrad * execution all other active requests will have been preempted and 118 1.1 riastrad * unsubmitted. This preemptive pulse is used to re-evaluate the 119 1.1 riastrad * in-flight requests, particularly in cases where an active context 120 1.1 riastrad * is banned and those active requests need to be cancelled. 121 1.1 riastrad */ 122 1.1 riastrad I915_FENCE_FLAG_SENTINEL, 123 1.1 riastrad 124 1.1 riastrad /* 125 1.1 riastrad * I915_FENCE_FLAG_BOOST - upclock the gpu for this request 126 1.1 riastrad * 127 1.1 riastrad * Some requests are more important than others! In particular, a 128 1.1 riastrad * request that the user is waiting on is typically required for 129 1.1 riastrad * interactive latency, for which we want to minimise by upclocking 130 1.1 riastrad * the GPU. Here we track such boost requests on a per-request basis. 131 1.1 riastrad */ 132 1.1 riastrad I915_FENCE_FLAG_BOOST, 133 1.1 riastrad }; 134 1.1 riastrad 135 1.1 riastrad /** 136 1.1 riastrad * Request queue structure. 137 1.1 riastrad * 138 1.1 riastrad * The request queue allows us to note sequence numbers that have been emitted 139 1.1 riastrad * and may be associated with active buffers to be retired. 140 1.1 riastrad * 141 1.1 riastrad * By keeping this list, we can avoid having to do questionable sequence 142 1.1 riastrad * number comparisons on buffer last_read|write_seqno. It also allows an 143 1.1 riastrad * emission time to be associated with the request for tracking how far ahead 144 1.1 riastrad * of the GPU the submission is. 145 1.1 riastrad * 146 1.1 riastrad * When modifying this structure be very aware that we perform a lockless 147 1.1 riastrad * RCU lookup of it that may race against reallocation of the struct 148 1.1 riastrad * from the slab freelist. We intentionally do not zero the structure on 149 1.1 riastrad * allocation so that the lookup can use the dangling pointers (and is 150 1.1 riastrad * cogniscent that those pointers may be wrong). Instead, everything that 151 1.1 riastrad * needs to be initialised must be done so explicitly. 152 1.1 riastrad * 153 1.1 riastrad * The requests are reference counted. 154 1.1 riastrad */ 155 1.1 riastrad struct i915_request { 156 1.1 riastrad struct dma_fence fence; 157 1.1 riastrad spinlock_t lock; 158 1.1 riastrad 159 1.1 riastrad /** On Which ring this request was generated */ 160 1.1 riastrad struct drm_i915_private *i915; 161 1.1 riastrad 162 1.1 riastrad /** 163 1.1 riastrad * Context and ring buffer related to this request 164 1.1 riastrad * Contexts are refcounted, so when this request is associated with a 165 1.1 riastrad * context, we must increment the context's refcount, to guarantee that 166 1.1 riastrad * it persists while any request is linked to it. Requests themselves 167 1.1 riastrad * are also refcounted, so the request will only be freed when the last 168 1.1 riastrad * reference to it is dismissed, and the code in 169 1.1 riastrad * i915_request_free() will then decrement the refcount on the 170 1.1 riastrad * context. 171 1.1 riastrad */ 172 1.1 riastrad struct intel_engine_cs *engine; 173 1.1 riastrad struct intel_context *context; 174 1.1 riastrad struct intel_ring *ring; 175 1.1 riastrad struct intel_timeline __rcu *timeline; 176 1.1 riastrad struct list_head signal_link; 177 1.1 riastrad 178 1.1 riastrad /* 179 1.1 riastrad * The rcu epoch of when this request was allocated. Used to judiciously 180 1.1 riastrad * apply backpressure on future allocations to ensure that under 181 1.1 riastrad * mempressure there is sufficient RCU ticks for us to reclaim our 182 1.1 riastrad * RCU protected slabs. 183 1.1 riastrad */ 184 1.1 riastrad unsigned long rcustate; 185 1.1 riastrad 186 1.1 riastrad /* 187 1.1 riastrad * We pin the timeline->mutex while constructing the request to 188 1.1 riastrad * ensure that no caller accidentally drops it during construction. 189 1.1 riastrad * The timeline->mutex must be held to ensure that only this caller 190 1.1 riastrad * can use the ring and manipulate the associated timeline during 191 1.1 riastrad * construction. 192 1.1 riastrad */ 193 1.1 riastrad struct pin_cookie cookie; 194 1.1 riastrad 195 1.1 riastrad /* 196 1.1 riastrad * Fences for the various phases in the request's lifetime. 197 1.1 riastrad * 198 1.1 riastrad * The submit fence is used to await upon all of the request's 199 1.1 riastrad * dependencies. When it is signaled, the request is ready to run. 200 1.1 riastrad * It is used by the driver to then queue the request for execution. 201 1.1 riastrad */ 202 1.1 riastrad struct i915_sw_fence submit; 203 1.1 riastrad union { 204 1.4 riastrad #ifdef __NetBSD__ /* XXX */ 205 1.4 riastrad struct i915_sw_fence_waiter submitq; 206 1.4 riastrad #else 207 1.1 riastrad wait_queue_entry_t submitq; 208 1.3 riastrad #endif 209 1.1 riastrad struct i915_sw_dma_fence_cb dmaq; 210 1.1 riastrad struct i915_request_duration_cb { 211 1.1 riastrad struct dma_fence_cb cb; 212 1.1 riastrad ktime_t emitted; 213 1.1 riastrad } duration; 214 1.1 riastrad }; 215 1.1 riastrad struct list_head execute_cb; 216 1.1 riastrad struct i915_sw_fence semaphore; 217 1.1 riastrad 218 1.1 riastrad /* 219 1.1 riastrad * A list of everyone we wait upon, and everyone who waits upon us. 220 1.1 riastrad * Even though we will not be submitted to the hardware before the 221 1.1 riastrad * submit fence is signaled (it waits for all external events as well 222 1.1 riastrad * as our own requests), the scheduler still needs to know the 223 1.1 riastrad * dependency tree for the lifetime of the request (from execbuf 224 1.1 riastrad * to retirement), i.e. bidirectional dependency information for the 225 1.1 riastrad * request not tied to individual fences. 226 1.1 riastrad */ 227 1.1 riastrad struct i915_sched_node sched; 228 1.1 riastrad struct i915_dependency dep; 229 1.1 riastrad intel_engine_mask_t execution_mask; 230 1.1 riastrad 231 1.1 riastrad /* 232 1.1 riastrad * A convenience pointer to the current breadcrumb value stored in 233 1.1 riastrad * the HW status page (or our timeline's local equivalent). The full 234 1.1 riastrad * path would be rq->hw_context->ring->timeline->hwsp_seqno. 235 1.1 riastrad */ 236 1.1 riastrad const u32 *hwsp_seqno; 237 1.1 riastrad 238 1.1 riastrad /* 239 1.1 riastrad * If we need to access the timeline's seqno for this request in 240 1.1 riastrad * another request, we need to keep a read reference to this associated 241 1.1 riastrad * cacheline, so that we do not free and recycle it before the foreign 242 1.1 riastrad * observers have completed. Hence, we keep a pointer to the cacheline 243 1.1 riastrad * inside the timeline's HWSP vma, but it is only valid while this 244 1.1 riastrad * request has not completed and guarded by the timeline mutex. 245 1.1 riastrad */ 246 1.1 riastrad struct intel_timeline_cacheline __rcu *hwsp_cacheline; 247 1.1 riastrad 248 1.1 riastrad /** Position in the ring of the start of the request */ 249 1.1 riastrad u32 head; 250 1.1 riastrad 251 1.1 riastrad /** Position in the ring of the start of the user packets */ 252 1.1 riastrad u32 infix; 253 1.1 riastrad 254 1.1 riastrad /** 255 1.1 riastrad * Position in the ring of the start of the postfix. 256 1.1 riastrad * This is required to calculate the maximum available ring space 257 1.1 riastrad * without overwriting the postfix. 258 1.1 riastrad */ 259 1.1 riastrad u32 postfix; 260 1.1 riastrad 261 1.1 riastrad /** Position in the ring of the end of the whole request */ 262 1.1 riastrad u32 tail; 263 1.1 riastrad 264 1.1 riastrad /** Position in the ring of the end of any workarounds after the tail */ 265 1.1 riastrad u32 wa_tail; 266 1.1 riastrad 267 1.1 riastrad /** Preallocate space in the ring for the emitting the request */ 268 1.1 riastrad u32 reserved_space; 269 1.1 riastrad 270 1.1 riastrad /** Batch buffer related to this request if any (used for 271 1.1 riastrad * error state dump only). 272 1.1 riastrad */ 273 1.1 riastrad struct i915_vma *batch; 274 1.1 riastrad /** 275 1.1 riastrad * Additional buffers requested by userspace to be captured upon 276 1.1 riastrad * a GPU hang. The vma/obj on this list are protected by their 277 1.1 riastrad * active reference - all objects on this list must also be 278 1.1 riastrad * on the active_list (of their final request). 279 1.1 riastrad */ 280 1.1 riastrad struct i915_capture_list *capture_list; 281 1.1 riastrad 282 1.1 riastrad /** Time at which this request was emitted, in jiffies. */ 283 1.1 riastrad unsigned long emitted_jiffies; 284 1.1 riastrad 285 1.1 riastrad /** timeline->request entry for this request */ 286 1.1 riastrad struct list_head link; 287 1.1 riastrad 288 1.1 riastrad struct drm_i915_file_private *file_priv; 289 1.1 riastrad /** file_priv list entry for this request */ 290 1.1 riastrad struct list_head client_link; 291 1.1 riastrad 292 1.1 riastrad I915_SELFTEST_DECLARE(struct { 293 1.1 riastrad struct list_head link; 294 1.1 riastrad unsigned long delay; 295 1.1 riastrad } mock;) 296 1.1 riastrad }; 297 1.1 riastrad 298 1.1 riastrad #define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) 299 1.1 riastrad 300 1.1 riastrad extern const struct dma_fence_ops i915_fence_ops; 301 1.1 riastrad 302 1.1 riastrad static inline bool dma_fence_is_i915(const struct dma_fence *fence) 303 1.1 riastrad { 304 1.1 riastrad return fence->ops == &i915_fence_ops; 305 1.1 riastrad } 306 1.1 riastrad 307 1.1 riastrad struct i915_request * __must_check 308 1.1 riastrad __i915_request_create(struct intel_context *ce, gfp_t gfp); 309 1.1 riastrad struct i915_request * __must_check 310 1.1 riastrad i915_request_create(struct intel_context *ce); 311 1.1 riastrad 312 1.1 riastrad struct i915_request *__i915_request_commit(struct i915_request *request); 313 1.1 riastrad void __i915_request_queue(struct i915_request *rq, 314 1.1 riastrad const struct i915_sched_attr *attr); 315 1.1 riastrad 316 1.1 riastrad bool i915_request_retire(struct i915_request *rq); 317 1.1 riastrad void i915_request_retire_upto(struct i915_request *rq); 318 1.1 riastrad 319 1.1 riastrad static inline struct i915_request * 320 1.1 riastrad to_request(struct dma_fence *fence) 321 1.1 riastrad { 322 1.1 riastrad /* We assume that NULL fence/request are interoperable */ 323 1.1 riastrad BUILD_BUG_ON(offsetof(struct i915_request, fence) != 0); 324 1.1 riastrad GEM_BUG_ON(fence && !dma_fence_is_i915(fence)); 325 1.1 riastrad return container_of(fence, struct i915_request, fence); 326 1.1 riastrad } 327 1.1 riastrad 328 1.1 riastrad static inline struct i915_request * 329 1.1 riastrad i915_request_get(struct i915_request *rq) 330 1.1 riastrad { 331 1.1 riastrad return to_request(dma_fence_get(&rq->fence)); 332 1.1 riastrad } 333 1.1 riastrad 334 1.1 riastrad static inline struct i915_request * 335 1.1 riastrad i915_request_get_rcu(struct i915_request *rq) 336 1.1 riastrad { 337 1.1 riastrad return to_request(dma_fence_get_rcu(&rq->fence)); 338 1.1 riastrad } 339 1.1 riastrad 340 1.1 riastrad static inline void 341 1.1 riastrad i915_request_put(struct i915_request *rq) 342 1.1 riastrad { 343 1.1 riastrad dma_fence_put(&rq->fence); 344 1.1 riastrad } 345 1.1 riastrad 346 1.1 riastrad int i915_request_await_object(struct i915_request *to, 347 1.1 riastrad struct drm_i915_gem_object *obj, 348 1.1 riastrad bool write); 349 1.1 riastrad int i915_request_await_dma_fence(struct i915_request *rq, 350 1.1 riastrad struct dma_fence *fence); 351 1.1 riastrad int i915_request_await_execution(struct i915_request *rq, 352 1.1 riastrad struct dma_fence *fence, 353 1.1 riastrad void (*hook)(struct i915_request *rq, 354 1.1 riastrad struct dma_fence *signal)); 355 1.1 riastrad 356 1.1 riastrad void i915_request_add(struct i915_request *rq); 357 1.1 riastrad 358 1.1 riastrad bool __i915_request_submit(struct i915_request *request); 359 1.1 riastrad void i915_request_submit(struct i915_request *request); 360 1.1 riastrad 361 1.1 riastrad void i915_request_skip(struct i915_request *request, int error); 362 1.1 riastrad 363 1.1 riastrad void __i915_request_unsubmit(struct i915_request *request); 364 1.1 riastrad void i915_request_unsubmit(struct i915_request *request); 365 1.1 riastrad 366 1.1 riastrad /* Note: part of the intel_breadcrumbs family */ 367 1.1 riastrad bool i915_request_enable_breadcrumb(struct i915_request *request); 368 1.1 riastrad void i915_request_cancel_breadcrumb(struct i915_request *request); 369 1.1 riastrad 370 1.1 riastrad long i915_request_wait(struct i915_request *rq, 371 1.1 riastrad unsigned int flags, 372 1.1 riastrad long timeout) 373 1.1 riastrad __attribute__((nonnull(1))); 374 1.1 riastrad #define I915_WAIT_INTERRUPTIBLE BIT(0) 375 1.1 riastrad #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */ 376 1.1 riastrad #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ 377 1.1 riastrad 378 1.1 riastrad static inline bool i915_request_signaled(const struct i915_request *rq) 379 1.1 riastrad { 380 1.1 riastrad /* The request may live longer than its HWSP, so check flags first! */ 381 1.1 riastrad return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags); 382 1.1 riastrad } 383 1.1 riastrad 384 1.1 riastrad static inline bool i915_request_is_active(const struct i915_request *rq) 385 1.1 riastrad { 386 1.1 riastrad return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); 387 1.1 riastrad } 388 1.1 riastrad 389 1.1 riastrad static inline bool i915_request_in_priority_queue(const struct i915_request *rq) 390 1.1 riastrad { 391 1.1 riastrad return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); 392 1.1 riastrad } 393 1.1 riastrad 394 1.1 riastrad /** 395 1.1 riastrad * Returns true if seq1 is later than seq2. 396 1.1 riastrad */ 397 1.1 riastrad static inline bool i915_seqno_passed(u32 seq1, u32 seq2) 398 1.1 riastrad { 399 1.1 riastrad return (s32)(seq1 - seq2) >= 0; 400 1.1 riastrad } 401 1.1 riastrad 402 1.1 riastrad static inline u32 __hwsp_seqno(const struct i915_request *rq) 403 1.1 riastrad { 404 1.1 riastrad return READ_ONCE(*rq->hwsp_seqno); 405 1.1 riastrad } 406 1.1 riastrad 407 1.1 riastrad /** 408 1.1 riastrad * hwsp_seqno - the current breadcrumb value in the HW status page 409 1.1 riastrad * @rq: the request, to chase the relevant HW status page 410 1.1 riastrad * 411 1.1 riastrad * The emphasis in naming here is that hwsp_seqno() is not a property of the 412 1.1 riastrad * request, but an indication of the current HW state (associated with this 413 1.1 riastrad * request). Its value will change as the GPU executes more requests. 414 1.1 riastrad * 415 1.1 riastrad * Returns the current breadcrumb value in the associated HW status page (or 416 1.1 riastrad * the local timeline's equivalent) for this request. The request itself 417 1.1 riastrad * has the associated breadcrumb value of rq->fence.seqno, when the HW 418 1.1 riastrad * status page has that breadcrumb or later, this request is complete. 419 1.1 riastrad */ 420 1.1 riastrad static inline u32 hwsp_seqno(const struct i915_request *rq) 421 1.1 riastrad { 422 1.1 riastrad u32 seqno; 423 1.1 riastrad 424 1.1 riastrad rcu_read_lock(); /* the HWSP may be freed at runtime */ 425 1.1 riastrad seqno = __hwsp_seqno(rq); 426 1.1 riastrad rcu_read_unlock(); 427 1.1 riastrad 428 1.1 riastrad return seqno; 429 1.1 riastrad } 430 1.1 riastrad 431 1.1 riastrad static inline bool __i915_request_has_started(const struct i915_request *rq) 432 1.1 riastrad { 433 1.1 riastrad return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); 434 1.1 riastrad } 435 1.1 riastrad 436 1.1 riastrad /** 437 1.1 riastrad * i915_request_started - check if the request has begun being executed 438 1.1 riastrad * @rq: the request 439 1.1 riastrad * 440 1.1 riastrad * If the timeline is not using initial breadcrumbs, a request is 441 1.1 riastrad * considered started if the previous request on its timeline (i.e. 442 1.1 riastrad * context) has been signaled. 443 1.1 riastrad * 444 1.1 riastrad * If the timeline is using semaphores, it will also be emitting an 445 1.1 riastrad * "initial breadcrumb" after the semaphores are complete and just before 446 1.1 riastrad * it began executing the user payload. A request can therefore be active 447 1.1 riastrad * on the HW and not yet started as it is still busywaiting on its 448 1.1 riastrad * dependencies (via HW semaphores). 449 1.1 riastrad * 450 1.1 riastrad * If the request has started, its dependencies will have been signaled 451 1.1 riastrad * (either by fences or by semaphores) and it will have begun processing 452 1.1 riastrad * the user payload. 453 1.1 riastrad * 454 1.1 riastrad * However, even if a request has started, it may have been preempted and 455 1.1 riastrad * so no longer active, or it may have already completed. 456 1.1 riastrad * 457 1.1 riastrad * See also i915_request_is_active(). 458 1.1 riastrad * 459 1.1 riastrad * Returns true if the request has begun executing the user payload, or 460 1.1 riastrad * has completed: 461 1.1 riastrad */ 462 1.1 riastrad static inline bool i915_request_started(const struct i915_request *rq) 463 1.1 riastrad { 464 1.1 riastrad if (i915_request_signaled(rq)) 465 1.1 riastrad return true; 466 1.1 riastrad 467 1.1 riastrad /* Remember: started but may have since been preempted! */ 468 1.1 riastrad return __i915_request_has_started(rq); 469 1.1 riastrad } 470 1.1 riastrad 471 1.1 riastrad /** 472 1.1 riastrad * i915_request_is_running - check if the request may actually be executing 473 1.1 riastrad * @rq: the request 474 1.1 riastrad * 475 1.1 riastrad * Returns true if the request is currently submitted to hardware, has passed 476 1.1 riastrad * its start point (i.e. the context is setup and not busywaiting). Note that 477 1.1 riastrad * it may no longer be running by the time the function returns! 478 1.1 riastrad */ 479 1.1 riastrad static inline bool i915_request_is_running(const struct i915_request *rq) 480 1.1 riastrad { 481 1.1 riastrad if (!i915_request_is_active(rq)) 482 1.1 riastrad return false; 483 1.1 riastrad 484 1.1 riastrad return __i915_request_has_started(rq); 485 1.1 riastrad } 486 1.1 riastrad 487 1.1 riastrad /** 488 1.1 riastrad * i915_request_is_running - check if the request is ready for execution 489 1.1 riastrad * @rq: the request 490 1.1 riastrad * 491 1.1 riastrad * Upon construction, the request is instructed to wait upon various 492 1.1 riastrad * signals before it is ready to be executed by the HW. That is, we do 493 1.1 riastrad * not want to start execution and read data before it is written. In practice, 494 1.1 riastrad * this is controlled with a mixture of interrupts and semaphores. Once 495 1.1 riastrad * the submit fence is completed, the backend scheduler will place the 496 1.1 riastrad * request into its queue and from there submit it for execution. So we 497 1.1 riastrad * can detect when a request is eligible for execution (and is under control 498 1.1 riastrad * of the scheduler) by querying where it is in any of the scheduler's lists. 499 1.1 riastrad * 500 1.1 riastrad * Returns true if the request is ready for execution (it may be inflight), 501 1.1 riastrad * false otherwise. 502 1.1 riastrad */ 503 1.1 riastrad static inline bool i915_request_is_ready(const struct i915_request *rq) 504 1.1 riastrad { 505 1.1 riastrad return !list_empty(&rq->sched.link); 506 1.1 riastrad } 507 1.1 riastrad 508 1.1 riastrad static inline bool i915_request_completed(const struct i915_request *rq) 509 1.1 riastrad { 510 1.1 riastrad if (i915_request_signaled(rq)) 511 1.1 riastrad return true; 512 1.1 riastrad 513 1.1 riastrad return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno); 514 1.1 riastrad } 515 1.1 riastrad 516 1.1 riastrad static inline void i915_request_mark_complete(struct i915_request *rq) 517 1.1 riastrad { 518 1.1 riastrad rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */ 519 1.1 riastrad } 520 1.1 riastrad 521 1.1 riastrad static inline bool i915_request_has_waitboost(const struct i915_request *rq) 522 1.1 riastrad { 523 1.1 riastrad return test_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); 524 1.1 riastrad } 525 1.1 riastrad 526 1.1 riastrad static inline bool i915_request_has_nopreempt(const struct i915_request *rq) 527 1.1 riastrad { 528 1.1 riastrad /* Preemption should only be disabled very rarely */ 529 1.1 riastrad return unlikely(test_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags)); 530 1.1 riastrad } 531 1.1 riastrad 532 1.1 riastrad static inline bool i915_request_has_sentinel(const struct i915_request *rq) 533 1.1 riastrad { 534 1.1 riastrad return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags)); 535 1.1 riastrad } 536 1.1 riastrad 537 1.1 riastrad static inline bool i915_request_on_hold(const struct i915_request *rq) 538 1.1 riastrad { 539 1.1 riastrad return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags)); 540 1.1 riastrad } 541 1.1 riastrad 542 1.1 riastrad static inline void i915_request_set_hold(struct i915_request *rq) 543 1.1 riastrad { 544 1.1 riastrad set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags); 545 1.1 riastrad } 546 1.1 riastrad 547 1.1 riastrad static inline void i915_request_clear_hold(struct i915_request *rq) 548 1.1 riastrad { 549 1.1 riastrad clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags); 550 1.1 riastrad } 551 1.1 riastrad 552 1.1 riastrad static inline struct intel_timeline * 553 1.1 riastrad i915_request_timeline(struct i915_request *rq) 554 1.1 riastrad { 555 1.1 riastrad /* Valid only while the request is being constructed (or retired). */ 556 1.1 riastrad return rcu_dereference_protected(rq->timeline, 557 1.1 riastrad lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex)); 558 1.1 riastrad } 559 1.1 riastrad 560 1.1 riastrad static inline struct i915_gem_context * 561 1.1 riastrad i915_request_gem_context(struct i915_request *rq) 562 1.1 riastrad { 563 1.1 riastrad /* Valid only while the request is being constructed (or retired). */ 564 1.1 riastrad return rcu_dereference_protected(rq->context->gem_context, true); 565 1.1 riastrad } 566 1.1 riastrad 567 1.1 riastrad static inline struct intel_timeline * 568 1.1 riastrad i915_request_active_timeline(struct i915_request *rq) 569 1.1 riastrad { 570 1.1 riastrad /* 571 1.1 riastrad * When in use during submission, we are protected by a guarantee that 572 1.1 riastrad * the context/timeline is pinned and must remain pinned until after 573 1.1 riastrad * this submission. 574 1.1 riastrad */ 575 1.1 riastrad return rcu_dereference_protected(rq->timeline, 576 1.1 riastrad lockdep_is_held(&rq->engine->active.lock)); 577 1.1 riastrad } 578 1.1 riastrad 579 1.1 riastrad #endif /* I915_REQUEST_H */ 580