1 /* $NetBSD: i915_gem_wait.c,v 1.3 2021/12/19 01:34:08 riastradh Exp $ */ 2 3 /* 4 * SPDX-License-Identifier: MIT 5 * 6 * Copyright 2016 Intel Corporation 7 */ 8 9 #include <sys/cdefs.h> 10 __KERNEL_RCSID(0, "$NetBSD: i915_gem_wait.c,v 1.3 2021/12/19 01:34:08 riastradh Exp $"); 11 12 #include <linux/dma-fence-array.h> 13 #include <linux/jiffies.h> 14 15 #include "gt/intel_engine.h" 16 17 #include "i915_gem_ioctls.h" 18 #include "i915_gem_object.h" 19 20 static long 21 i915_gem_object_wait_fence(struct dma_fence *fence, 22 unsigned int flags, 23 long timeout) 24 { 25 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); 26 27 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 28 return timeout; 29 30 if (dma_fence_is_i915(fence)) 31 return i915_request_wait(to_request(fence), flags, timeout); 32 33 return dma_fence_wait_timeout(fence, 34 flags & I915_WAIT_INTERRUPTIBLE, 35 timeout); 36 } 37 38 static long 39 i915_gem_object_wait_reservation(struct dma_resv *resv, 40 unsigned int flags, 41 long timeout) 42 { 43 struct dma_fence *excl; 44 bool prune_fences = false; 45 46 if (flags & I915_WAIT_ALL) { 47 struct dma_fence **shared; 48 unsigned int count, i; 49 int ret; 50 51 ret = dma_resv_get_fences_rcu(resv, 52 &excl, &count, &shared); 53 if (ret) 54 return ret; 55 56 for (i = 0; i < count; i++) { 57 timeout = i915_gem_object_wait_fence(shared[i], 58 flags, timeout); 59 if (timeout < 0) 60 break; 61 62 dma_fence_put(shared[i]); 63 } 64 65 for (; i < count; i++) 66 dma_fence_put(shared[i]); 67 kfree(shared); 68 69 /* 70 * If both shared fences and an exclusive fence exist, 71 * then by construction the shared fences must be later 72 * than the exclusive fence. If we successfully wait for 73 * all the shared fences, we know that the exclusive fence 74 * must all be signaled. If all the shared fences are 75 * signaled, we can prune the array and recover the 76 * floating references on the fences/requests. 77 */ 78 prune_fences = count && timeout >= 0; 79 } else { 80 excl = dma_resv_get_excl_rcu(resv); 81 } 82 83 if (excl && timeout >= 0) 84 timeout = i915_gem_object_wait_fence(excl, flags, timeout); 85 86 dma_fence_put(excl); 87 88 /* 89 * Opportunistically prune the fences iff we know they have *all* been 90 * signaled. 91 */ 92 if (prune_fences && dma_resv_trylock(resv)) { 93 if (dma_resv_test_signaled_rcu(resv, true)) 94 dma_resv_add_excl_fence(resv, NULL); 95 dma_resv_unlock(resv); 96 } 97 98 return timeout; 99 } 100 101 static void __fence_set_priority(struct dma_fence *fence, 102 const struct i915_sched_attr *attr) 103 { 104 struct i915_request *rq; 105 struct intel_engine_cs *engine; 106 #ifdef __NetBSD__ 107 int s; 108 #endif 109 110 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence)) 111 return; 112 113 rq = to_request(fence); 114 engine = rq->engine; 115 116 #ifdef __NetBSD__ 117 s = splsoftserial(); 118 #else 119 local_bh_disable(); 120 #endif 121 rcu_read_lock(); /* RCU serialisation for set-wedged protection */ 122 if (engine->schedule) 123 engine->schedule(rq, attr); 124 rcu_read_unlock(); 125 #ifdef __NetBSD__ 126 splx(s); 127 #else 128 local_bh_enable(); /* kick the tasklets if queues were reprioritised */ 129 #endif 130 } 131 132 static void fence_set_priority(struct dma_fence *fence, 133 const struct i915_sched_attr *attr) 134 { 135 /* Recurse once into a fence-array */ 136 if (dma_fence_is_array(fence)) { 137 struct dma_fence_array *array = to_dma_fence_array(fence); 138 int i; 139 140 for (i = 0; i < array->num_fences; i++) 141 __fence_set_priority(array->fences[i], attr); 142 } else { 143 __fence_set_priority(fence, attr); 144 } 145 } 146 147 int 148 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 149 unsigned int flags, 150 const struct i915_sched_attr *attr) 151 { 152 struct dma_fence *excl; 153 154 if (flags & I915_WAIT_ALL) { 155 struct dma_fence **shared; 156 unsigned int count, i; 157 int ret; 158 159 ret = dma_resv_get_fences_rcu(obj->base.resv, 160 &excl, &count, &shared); 161 if (ret) 162 return ret; 163 164 for (i = 0; i < count; i++) { 165 fence_set_priority(shared[i], attr); 166 dma_fence_put(shared[i]); 167 } 168 169 kfree(shared); 170 } else { 171 excl = dma_resv_get_excl_rcu(obj->base.resv); 172 } 173 174 if (excl) { 175 fence_set_priority(excl, attr); 176 dma_fence_put(excl); 177 } 178 return 0; 179 } 180 181 /** 182 * Waits for rendering to the object to be completed 183 * @obj: i915 gem object 184 * @flags: how to wait (under a lock, for all rendering or just for writes etc) 185 * @timeout: how long to wait 186 */ 187 int 188 i915_gem_object_wait(struct drm_i915_gem_object *obj, 189 unsigned int flags, 190 long timeout) 191 { 192 might_sleep(); 193 GEM_BUG_ON(timeout < 0); 194 195 timeout = i915_gem_object_wait_reservation(obj->base.resv, 196 flags, timeout); 197 return timeout < 0 ? timeout : 0; 198 } 199 200 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 201 { 202 /* nsecs_to_jiffies64() does not guard against overflow */ 203 if (NSEC_PER_SEC % HZ && 204 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) 205 return MAX_JIFFY_OFFSET; 206 207 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 208 } 209 210 static unsigned long to_wait_timeout(s64 timeout_ns) 211 { 212 if (timeout_ns < 0) 213 return MAX_SCHEDULE_TIMEOUT; 214 215 if (timeout_ns == 0) 216 return 0; 217 218 return nsecs_to_jiffies_timeout(timeout_ns); 219 } 220 221 /** 222 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 223 * @dev: drm device pointer 224 * @data: ioctl data blob 225 * @file: drm file pointer 226 * 227 * Returns 0 if successful, else an error is returned with the remaining time in 228 * the timeout parameter. 229 * -ETIME: object is still busy after timeout 230 * -ERESTARTSYS: signal interrupted the wait 231 * -ENONENT: object doesn't exist 232 * Also possible, but rare: 233 * -EAGAIN: incomplete, restart syscall 234 * -ENOMEM: damn 235 * -ENODEV: Internal IRQ fail 236 * -E?: The add request failed 237 * 238 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any 239 * non-zero timeout parameter the wait ioctl will wait for the given number of 240 * nanoseconds on an object becoming unbusy. Since the wait itself does so 241 * without holding struct_mutex the object may become re-busied before this 242 * function completes. A similar but shorter * race condition exists in the busy 243 * ioctl 244 */ 245 int 246 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 247 { 248 struct drm_i915_gem_wait *args = data; 249 struct drm_i915_gem_object *obj; 250 ktime_t start; 251 long ret; 252 253 if (args->flags != 0) 254 return -EINVAL; 255 256 obj = i915_gem_object_lookup(file, args->bo_handle); 257 if (!obj) 258 return -ENOENT; 259 260 start = ktime_get(); 261 262 ret = i915_gem_object_wait(obj, 263 I915_WAIT_INTERRUPTIBLE | 264 I915_WAIT_PRIORITY | 265 I915_WAIT_ALL, 266 to_wait_timeout(args->timeout_ns)); 267 268 if (args->timeout_ns > 0) { 269 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 270 if (args->timeout_ns < 0) 271 args->timeout_ns = 0; 272 273 /* 274 * Apparently ktime isn't accurate enough and occasionally has a 275 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 276 * things up to make the test happy. We allow up to 1 jiffy. 277 * 278 * This is a regression from the timespec->ktime conversion. 279 */ 280 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) 281 args->timeout_ns = 0; 282 283 /* Asked to wait beyond the jiffie/scheduler precision? */ 284 if (ret == -ETIME && args->timeout_ns) 285 ret = -EAGAIN; 286 } 287 288 i915_gem_object_put(obj); 289 return ret; 290 } 291