1 1.1 riastrad /* $NetBSD: i915_request.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2016 Intel Corporation 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice (including the next 14 1.1 riastrad * paragraph) shall be included in all copies or substantial portions of the 15 1.1 riastrad * Software. 16 1.1 riastrad * 17 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 1.1 riastrad * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 1.1 riastrad * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 1.1 riastrad * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 23 1.1 riastrad * IN THE SOFTWARE. 24 1.1 riastrad * 25 1.1 riastrad */ 26 1.1 riastrad 27 1.1 riastrad #include <sys/cdefs.h> 28 1.1 riastrad __KERNEL_RCSID(0, "$NetBSD: i915_request.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $"); 29 1.1 riastrad 30 1.1 riastrad #include <linux/prime_numbers.h> 31 1.1 riastrad 32 1.1 riastrad #include "gem/i915_gem_pm.h" 33 1.1 riastrad #include "gem/selftests/mock_context.h" 34 1.1 riastrad 35 1.1 riastrad #include "gt/intel_engine_pm.h" 36 1.1 riastrad #include "gt/intel_gt.h" 37 1.1 riastrad 38 1.1 riastrad #include "i915_random.h" 39 1.1 riastrad #include "i915_selftest.h" 40 1.1 riastrad #include "igt_live_test.h" 41 1.1 riastrad #include "igt_spinner.h" 42 1.1 riastrad #include "lib_sw_fence.h" 43 1.1 riastrad 44 1.1 riastrad #include "mock_drm.h" 45 1.1 riastrad #include "mock_gem_device.h" 46 1.1 riastrad 47 1.1 riastrad static unsigned int num_uabi_engines(struct drm_i915_private *i915) 48 1.1 riastrad { 49 1.1 riastrad struct intel_engine_cs *engine; 50 1.1 riastrad unsigned int count; 51 1.1 riastrad 52 1.1 riastrad count = 0; 53 1.1 riastrad for_each_uabi_engine(engine, i915) 54 1.1 riastrad count++; 55 1.1 riastrad 56 1.1 riastrad return count; 57 1.1 riastrad } 58 1.1 riastrad 59 1.1 riastrad static int igt_add_request(void *arg) 60 1.1 riastrad { 61 1.1 riastrad struct drm_i915_private *i915 = arg; 62 1.1 riastrad struct i915_request *request; 63 1.1 riastrad 64 1.1 riastrad /* Basic preliminary test to create a request and let it loose! */ 65 1.1 riastrad 66 1.1 riastrad request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10); 67 1.1 riastrad if (!request) 68 1.1 riastrad return -ENOMEM; 69 1.1 riastrad 70 1.1 riastrad i915_request_add(request); 71 1.1 riastrad 72 1.1 riastrad return 0; 73 1.1 riastrad } 74 1.1 riastrad 75 1.1 riastrad static int igt_wait_request(void *arg) 76 1.1 riastrad { 77 1.1 riastrad const long T = HZ / 4; 78 1.1 riastrad struct drm_i915_private *i915 = arg; 79 1.1 riastrad struct i915_request *request; 80 1.1 riastrad int err = -EINVAL; 81 1.1 riastrad 82 1.1 riastrad /* Submit a request, then wait upon it */ 83 1.1 riastrad 84 1.1 riastrad request = mock_request(i915->engine[RCS0]->kernel_context, T); 85 1.1 riastrad if (!request) 86 1.1 riastrad return -ENOMEM; 87 1.1 riastrad 88 1.1 riastrad i915_request_get(request); 89 1.1 riastrad 90 1.1 riastrad if (i915_request_wait(request, 0, 0) != -ETIME) { 91 1.1 riastrad pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n"); 92 1.1 riastrad goto out_request; 93 1.1 riastrad } 94 1.1 riastrad 95 1.1 riastrad if (i915_request_wait(request, 0, T) != -ETIME) { 96 1.1 riastrad pr_err("request wait succeeded (expected timeout before submit!)\n"); 97 1.1 riastrad goto out_request; 98 1.1 riastrad } 99 1.1 riastrad 100 1.1 riastrad if (i915_request_completed(request)) { 101 1.1 riastrad pr_err("request completed before submit!!\n"); 102 1.1 riastrad goto out_request; 103 1.1 riastrad } 104 1.1 riastrad 105 1.1 riastrad i915_request_add(request); 106 1.1 riastrad 107 1.1 riastrad if (i915_request_wait(request, 0, 0) != -ETIME) { 108 1.1 riastrad pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n"); 109 1.1 riastrad goto out_request; 110 1.1 riastrad } 111 1.1 riastrad 112 1.1 riastrad if (i915_request_completed(request)) { 113 1.1 riastrad pr_err("request completed immediately!\n"); 114 1.1 riastrad goto out_request; 115 1.1 riastrad } 116 1.1 riastrad 117 1.1 riastrad if (i915_request_wait(request, 0, T / 2) != -ETIME) { 118 1.1 riastrad pr_err("request wait succeeded (expected timeout!)\n"); 119 1.1 riastrad goto out_request; 120 1.1 riastrad } 121 1.1 riastrad 122 1.1 riastrad if (i915_request_wait(request, 0, T) == -ETIME) { 123 1.1 riastrad pr_err("request wait timed out!\n"); 124 1.1 riastrad goto out_request; 125 1.1 riastrad } 126 1.1 riastrad 127 1.1 riastrad if (!i915_request_completed(request)) { 128 1.1 riastrad pr_err("request not complete after waiting!\n"); 129 1.1 riastrad goto out_request; 130 1.1 riastrad } 131 1.1 riastrad 132 1.1 riastrad if (i915_request_wait(request, 0, T) == -ETIME) { 133 1.1 riastrad pr_err("request wait timed out when already complete!\n"); 134 1.1 riastrad goto out_request; 135 1.1 riastrad } 136 1.1 riastrad 137 1.1 riastrad err = 0; 138 1.1 riastrad out_request: 139 1.1 riastrad i915_request_put(request); 140 1.1 riastrad mock_device_flush(i915); 141 1.1 riastrad return err; 142 1.1 riastrad } 143 1.1 riastrad 144 1.1 riastrad static int igt_fence_wait(void *arg) 145 1.1 riastrad { 146 1.1 riastrad const long T = HZ / 4; 147 1.1 riastrad struct drm_i915_private *i915 = arg; 148 1.1 riastrad struct i915_request *request; 149 1.1 riastrad int err = -EINVAL; 150 1.1 riastrad 151 1.1 riastrad /* Submit a request, treat it as a fence and wait upon it */ 152 1.1 riastrad 153 1.1 riastrad request = mock_request(i915->engine[RCS0]->kernel_context, T); 154 1.1 riastrad if (!request) 155 1.1 riastrad return -ENOMEM; 156 1.1 riastrad 157 1.1 riastrad if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) { 158 1.1 riastrad pr_err("fence wait success before submit (expected timeout)!\n"); 159 1.1 riastrad goto out; 160 1.1 riastrad } 161 1.1 riastrad 162 1.1 riastrad i915_request_add(request); 163 1.1 riastrad 164 1.1 riastrad if (dma_fence_is_signaled(&request->fence)) { 165 1.1 riastrad pr_err("fence signaled immediately!\n"); 166 1.1 riastrad goto out; 167 1.1 riastrad } 168 1.1 riastrad 169 1.1 riastrad if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) { 170 1.1 riastrad pr_err("fence wait success after submit (expected timeout)!\n"); 171 1.1 riastrad goto out; 172 1.1 riastrad } 173 1.1 riastrad 174 1.1 riastrad if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) { 175 1.1 riastrad pr_err("fence wait timed out (expected success)!\n"); 176 1.1 riastrad goto out; 177 1.1 riastrad } 178 1.1 riastrad 179 1.1 riastrad if (!dma_fence_is_signaled(&request->fence)) { 180 1.1 riastrad pr_err("fence unsignaled after waiting!\n"); 181 1.1 riastrad goto out; 182 1.1 riastrad } 183 1.1 riastrad 184 1.1 riastrad if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) { 185 1.1 riastrad pr_err("fence wait timed out when complete (expected success)!\n"); 186 1.1 riastrad goto out; 187 1.1 riastrad } 188 1.1 riastrad 189 1.1 riastrad err = 0; 190 1.1 riastrad out: 191 1.1 riastrad mock_device_flush(i915); 192 1.1 riastrad return err; 193 1.1 riastrad } 194 1.1 riastrad 195 1.1 riastrad static int igt_request_rewind(void *arg) 196 1.1 riastrad { 197 1.1 riastrad struct drm_i915_private *i915 = arg; 198 1.1 riastrad struct i915_request *request, *vip; 199 1.1 riastrad struct i915_gem_context *ctx[2]; 200 1.1 riastrad struct intel_context *ce; 201 1.1 riastrad int err = -EINVAL; 202 1.1 riastrad 203 1.1 riastrad ctx[0] = mock_context(i915, "A"); 204 1.1 riastrad 205 1.1 riastrad ce = i915_gem_context_get_engine(ctx[0], RCS0); 206 1.1 riastrad GEM_BUG_ON(IS_ERR(ce)); 207 1.1 riastrad request = mock_request(ce, 2 * HZ); 208 1.1 riastrad intel_context_put(ce); 209 1.1 riastrad if (!request) { 210 1.1 riastrad err = -ENOMEM; 211 1.1 riastrad goto err_context_0; 212 1.1 riastrad } 213 1.1 riastrad 214 1.1 riastrad i915_request_get(request); 215 1.1 riastrad i915_request_add(request); 216 1.1 riastrad 217 1.1 riastrad ctx[1] = mock_context(i915, "B"); 218 1.1 riastrad 219 1.1 riastrad ce = i915_gem_context_get_engine(ctx[1], RCS0); 220 1.1 riastrad GEM_BUG_ON(IS_ERR(ce)); 221 1.1 riastrad vip = mock_request(ce, 0); 222 1.1 riastrad intel_context_put(ce); 223 1.1 riastrad if (!vip) { 224 1.1 riastrad err = -ENOMEM; 225 1.1 riastrad goto err_context_1; 226 1.1 riastrad } 227 1.1 riastrad 228 1.1 riastrad /* Simulate preemption by manual reordering */ 229 1.1 riastrad if (!mock_cancel_request(request)) { 230 1.1 riastrad pr_err("failed to cancel request (already executed)!\n"); 231 1.1 riastrad i915_request_add(vip); 232 1.1 riastrad goto err_context_1; 233 1.1 riastrad } 234 1.1 riastrad i915_request_get(vip); 235 1.1 riastrad i915_request_add(vip); 236 1.1 riastrad rcu_read_lock(); 237 1.1 riastrad request->engine->submit_request(request); 238 1.1 riastrad rcu_read_unlock(); 239 1.1 riastrad 240 1.1 riastrad 241 1.1 riastrad if (i915_request_wait(vip, 0, HZ) == -ETIME) { 242 1.1 riastrad pr_err("timed out waiting for high priority request\n"); 243 1.1 riastrad goto err; 244 1.1 riastrad } 245 1.1 riastrad 246 1.1 riastrad if (i915_request_completed(request)) { 247 1.1 riastrad pr_err("low priority request already completed\n"); 248 1.1 riastrad goto err; 249 1.1 riastrad } 250 1.1 riastrad 251 1.1 riastrad err = 0; 252 1.1 riastrad err: 253 1.1 riastrad i915_request_put(vip); 254 1.1 riastrad err_context_1: 255 1.1 riastrad mock_context_close(ctx[1]); 256 1.1 riastrad i915_request_put(request); 257 1.1 riastrad err_context_0: 258 1.1 riastrad mock_context_close(ctx[0]); 259 1.1 riastrad mock_device_flush(i915); 260 1.1 riastrad return err; 261 1.1 riastrad } 262 1.1 riastrad 263 1.1 riastrad struct smoketest { 264 1.1 riastrad struct intel_engine_cs *engine; 265 1.1 riastrad struct i915_gem_context **contexts; 266 1.1 riastrad atomic_long_t num_waits, num_fences; 267 1.1 riastrad int ncontexts, max_batch; 268 1.1 riastrad struct i915_request *(*request_alloc)(struct intel_context *ce); 269 1.1 riastrad }; 270 1.1 riastrad 271 1.1 riastrad static struct i915_request * 272 1.1 riastrad __mock_request_alloc(struct intel_context *ce) 273 1.1 riastrad { 274 1.1 riastrad return mock_request(ce, 0); 275 1.1 riastrad } 276 1.1 riastrad 277 1.1 riastrad static struct i915_request * 278 1.1 riastrad __live_request_alloc(struct intel_context *ce) 279 1.1 riastrad { 280 1.1 riastrad return intel_context_create_request(ce); 281 1.1 riastrad } 282 1.1 riastrad 283 1.1 riastrad static int __igt_breadcrumbs_smoketest(void *arg) 284 1.1 riastrad { 285 1.1 riastrad struct smoketest *t = arg; 286 1.1 riastrad const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1; 287 1.1 riastrad const unsigned int total = 4 * t->ncontexts + 1; 288 1.1 riastrad unsigned int num_waits = 0, num_fences = 0; 289 1.1 riastrad struct i915_request **requests; 290 1.1 riastrad I915_RND_STATE(prng); 291 1.1 riastrad unsigned int *order; 292 1.1 riastrad int err = 0; 293 1.1 riastrad 294 1.1 riastrad /* 295 1.1 riastrad * A very simple test to catch the most egregious of list handling bugs. 296 1.1 riastrad * 297 1.1 riastrad * At its heart, we simply create oodles of requests running across 298 1.1 riastrad * multiple kthreads and enable signaling on them, for the sole purpose 299 1.1 riastrad * of stressing our breadcrumb handling. The only inspection we do is 300 1.1 riastrad * that the fences were marked as signaled. 301 1.1 riastrad */ 302 1.1 riastrad 303 1.1 riastrad requests = kcalloc(total, sizeof(*requests), GFP_KERNEL); 304 1.1 riastrad if (!requests) 305 1.1 riastrad return -ENOMEM; 306 1.1 riastrad 307 1.1 riastrad order = i915_random_order(total, &prng); 308 1.1 riastrad if (!order) { 309 1.1 riastrad err = -ENOMEM; 310 1.1 riastrad goto out_requests; 311 1.1 riastrad } 312 1.1 riastrad 313 1.1 riastrad while (!kthread_should_stop()) { 314 1.1 riastrad struct i915_sw_fence *submit, *wait; 315 1.1 riastrad unsigned int n, count; 316 1.1 riastrad 317 1.1 riastrad submit = heap_fence_create(GFP_KERNEL); 318 1.1 riastrad if (!submit) { 319 1.1 riastrad err = -ENOMEM; 320 1.1 riastrad break; 321 1.1 riastrad } 322 1.1 riastrad 323 1.1 riastrad wait = heap_fence_create(GFP_KERNEL); 324 1.1 riastrad if (!wait) { 325 1.1 riastrad i915_sw_fence_commit(submit); 326 1.1 riastrad heap_fence_put(submit); 327 1.1 riastrad err = ENOMEM; 328 1.1 riastrad break; 329 1.1 riastrad } 330 1.1 riastrad 331 1.1 riastrad i915_random_reorder(order, total, &prng); 332 1.1 riastrad count = 1 + i915_prandom_u32_max_state(max_batch, &prng); 333 1.1 riastrad 334 1.1 riastrad for (n = 0; n < count; n++) { 335 1.1 riastrad struct i915_gem_context *ctx = 336 1.1 riastrad t->contexts[order[n] % t->ncontexts]; 337 1.1 riastrad struct i915_request *rq; 338 1.1 riastrad struct intel_context *ce; 339 1.1 riastrad 340 1.1 riastrad ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx); 341 1.1 riastrad GEM_BUG_ON(IS_ERR(ce)); 342 1.1 riastrad rq = t->request_alloc(ce); 343 1.1 riastrad intel_context_put(ce); 344 1.1 riastrad if (IS_ERR(rq)) { 345 1.1 riastrad err = PTR_ERR(rq); 346 1.1 riastrad count = n; 347 1.1 riastrad break; 348 1.1 riastrad } 349 1.1 riastrad 350 1.1 riastrad err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, 351 1.1 riastrad submit, 352 1.1 riastrad GFP_KERNEL); 353 1.1 riastrad 354 1.1 riastrad requests[n] = i915_request_get(rq); 355 1.1 riastrad i915_request_add(rq); 356 1.1 riastrad 357 1.1 riastrad if (err >= 0) 358 1.1 riastrad err = i915_sw_fence_await_dma_fence(wait, 359 1.1 riastrad &rq->fence, 360 1.1 riastrad 0, 361 1.1 riastrad GFP_KERNEL); 362 1.1 riastrad 363 1.1 riastrad if (err < 0) { 364 1.1 riastrad i915_request_put(rq); 365 1.1 riastrad count = n; 366 1.1 riastrad break; 367 1.1 riastrad } 368 1.1 riastrad } 369 1.1 riastrad 370 1.1 riastrad i915_sw_fence_commit(submit); 371 1.1 riastrad i915_sw_fence_commit(wait); 372 1.1 riastrad 373 1.1 riastrad if (!wait_event_timeout(wait->wait, 374 1.1 riastrad i915_sw_fence_done(wait), 375 1.1 riastrad 5 * HZ)) { 376 1.1 riastrad struct i915_request *rq = requests[count - 1]; 377 1.1 riastrad 378 1.1 riastrad pr_err("waiting for %d/%d fences (last %llx:%lld) on %s timed out!\n", 379 1.1 riastrad atomic_read(&wait->pending), count, 380 1.1 riastrad rq->fence.context, rq->fence.seqno, 381 1.1 riastrad t->engine->name); 382 1.1 riastrad GEM_TRACE_DUMP(); 383 1.1 riastrad 384 1.1 riastrad intel_gt_set_wedged(t->engine->gt); 385 1.1 riastrad GEM_BUG_ON(!i915_request_completed(rq)); 386 1.1 riastrad i915_sw_fence_wait(wait); 387 1.1 riastrad err = -EIO; 388 1.1 riastrad } 389 1.1 riastrad 390 1.1 riastrad for (n = 0; n < count; n++) { 391 1.1 riastrad struct i915_request *rq = requests[n]; 392 1.1 riastrad 393 1.1 riastrad if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 394 1.1 riastrad &rq->fence.flags)) { 395 1.1 riastrad pr_err("%llu:%llu was not signaled!\n", 396 1.1 riastrad rq->fence.context, rq->fence.seqno); 397 1.1 riastrad err = -EINVAL; 398 1.1 riastrad } 399 1.1 riastrad 400 1.1 riastrad i915_request_put(rq); 401 1.1 riastrad } 402 1.1 riastrad 403 1.1 riastrad heap_fence_put(wait); 404 1.1 riastrad heap_fence_put(submit); 405 1.1 riastrad 406 1.1 riastrad if (err < 0) 407 1.1 riastrad break; 408 1.1 riastrad 409 1.1 riastrad num_fences += count; 410 1.1 riastrad num_waits++; 411 1.1 riastrad 412 1.1 riastrad cond_resched(); 413 1.1 riastrad } 414 1.1 riastrad 415 1.1 riastrad atomic_long_add(num_fences, &t->num_fences); 416 1.1 riastrad atomic_long_add(num_waits, &t->num_waits); 417 1.1 riastrad 418 1.1 riastrad kfree(order); 419 1.1 riastrad out_requests: 420 1.1 riastrad kfree(requests); 421 1.1 riastrad return err; 422 1.1 riastrad } 423 1.1 riastrad 424 1.1 riastrad static int mock_breadcrumbs_smoketest(void *arg) 425 1.1 riastrad { 426 1.1 riastrad struct drm_i915_private *i915 = arg; 427 1.1 riastrad struct smoketest t = { 428 1.1 riastrad .engine = i915->engine[RCS0], 429 1.1 riastrad .ncontexts = 1024, 430 1.1 riastrad .max_batch = 1024, 431 1.1 riastrad .request_alloc = __mock_request_alloc 432 1.1 riastrad }; 433 1.1 riastrad unsigned int ncpus = num_online_cpus(); 434 1.1 riastrad struct task_struct **threads; 435 1.1 riastrad unsigned int n; 436 1.1 riastrad int ret = 0; 437 1.1 riastrad 438 1.1 riastrad /* 439 1.1 riastrad * Smoketest our breadcrumb/signal handling for requests across multiple 440 1.1 riastrad * threads. A very simple test to only catch the most egregious of bugs. 441 1.1 riastrad * See __igt_breadcrumbs_smoketest(); 442 1.1 riastrad */ 443 1.1 riastrad 444 1.1 riastrad threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL); 445 1.1 riastrad if (!threads) 446 1.1 riastrad return -ENOMEM; 447 1.1 riastrad 448 1.1 riastrad t.contexts = kcalloc(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL); 449 1.1 riastrad if (!t.contexts) { 450 1.1 riastrad ret = -ENOMEM; 451 1.1 riastrad goto out_threads; 452 1.1 riastrad } 453 1.1 riastrad 454 1.1 riastrad for (n = 0; n < t.ncontexts; n++) { 455 1.1 riastrad t.contexts[n] = mock_context(t.engine->i915, "mock"); 456 1.1 riastrad if (!t.contexts[n]) { 457 1.1 riastrad ret = -ENOMEM; 458 1.1 riastrad goto out_contexts; 459 1.1 riastrad } 460 1.1 riastrad } 461 1.1 riastrad 462 1.1 riastrad for (n = 0; n < ncpus; n++) { 463 1.1 riastrad threads[n] = kthread_run(__igt_breadcrumbs_smoketest, 464 1.1 riastrad &t, "igt/%d", n); 465 1.1 riastrad if (IS_ERR(threads[n])) { 466 1.1 riastrad ret = PTR_ERR(threads[n]); 467 1.1 riastrad ncpus = n; 468 1.1 riastrad break; 469 1.1 riastrad } 470 1.1 riastrad 471 1.1 riastrad get_task_struct(threads[n]); 472 1.1 riastrad } 473 1.1 riastrad 474 1.1 riastrad yield(); /* start all threads before we begin */ 475 1.1 riastrad msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); 476 1.1 riastrad 477 1.1 riastrad for (n = 0; n < ncpus; n++) { 478 1.1 riastrad int err; 479 1.1 riastrad 480 1.1 riastrad err = kthread_stop(threads[n]); 481 1.1 riastrad if (err < 0 && !ret) 482 1.1 riastrad ret = err; 483 1.1 riastrad 484 1.1 riastrad put_task_struct(threads[n]); 485 1.1 riastrad } 486 1.1 riastrad pr_info("Completed %lu waits for %lu fence across %d cpus\n", 487 1.1 riastrad atomic_long_read(&t.num_waits), 488 1.1 riastrad atomic_long_read(&t.num_fences), 489 1.1 riastrad ncpus); 490 1.1 riastrad 491 1.1 riastrad out_contexts: 492 1.1 riastrad for (n = 0; n < t.ncontexts; n++) { 493 1.1 riastrad if (!t.contexts[n]) 494 1.1 riastrad break; 495 1.1 riastrad mock_context_close(t.contexts[n]); 496 1.1 riastrad } 497 1.1 riastrad kfree(t.contexts); 498 1.1 riastrad out_threads: 499 1.1 riastrad kfree(threads); 500 1.1 riastrad return ret; 501 1.1 riastrad } 502 1.1 riastrad 503 1.1 riastrad int i915_request_mock_selftests(void) 504 1.1 riastrad { 505 1.1 riastrad static const struct i915_subtest tests[] = { 506 1.1 riastrad SUBTEST(igt_add_request), 507 1.1 riastrad SUBTEST(igt_wait_request), 508 1.1 riastrad SUBTEST(igt_fence_wait), 509 1.1 riastrad SUBTEST(igt_request_rewind), 510 1.1 riastrad SUBTEST(mock_breadcrumbs_smoketest), 511 1.1 riastrad }; 512 1.1 riastrad struct drm_i915_private *i915; 513 1.1 riastrad intel_wakeref_t wakeref; 514 1.1 riastrad int err = 0; 515 1.1 riastrad 516 1.1 riastrad i915 = mock_gem_device(); 517 1.1 riastrad if (!i915) 518 1.1 riastrad return -ENOMEM; 519 1.1 riastrad 520 1.1 riastrad with_intel_runtime_pm(&i915->runtime_pm, wakeref) 521 1.1 riastrad err = i915_subtests(tests, i915); 522 1.1 riastrad 523 1.1 riastrad drm_dev_put(&i915->drm); 524 1.1 riastrad 525 1.1 riastrad return err; 526 1.1 riastrad } 527 1.1 riastrad 528 1.1 riastrad static int live_nop_request(void *arg) 529 1.1 riastrad { 530 1.1 riastrad struct drm_i915_private *i915 = arg; 531 1.1 riastrad struct intel_engine_cs *engine; 532 1.1 riastrad struct igt_live_test t; 533 1.1 riastrad int err = -ENODEV; 534 1.1 riastrad 535 1.1 riastrad /* 536 1.1 riastrad * Submit various sized batches of empty requests, to each engine 537 1.1 riastrad * (individually), and wait for the batch to complete. We can check 538 1.1 riastrad * the overhead of submitting requests to the hardware. 539 1.1 riastrad */ 540 1.1 riastrad 541 1.1 riastrad for_each_uabi_engine(engine, i915) { 542 1.1 riastrad unsigned long n, prime; 543 1.1 riastrad IGT_TIMEOUT(end_time); 544 1.1 riastrad ktime_t times[2] = {}; 545 1.1 riastrad 546 1.1 riastrad err = igt_live_test_begin(&t, i915, __func__, engine->name); 547 1.1 riastrad if (err) 548 1.1 riastrad return err; 549 1.1 riastrad 550 1.1 riastrad intel_engine_pm_get(engine); 551 1.1 riastrad for_each_prime_number_from(prime, 1, 8192) { 552 1.1 riastrad struct i915_request *request = NULL; 553 1.1 riastrad 554 1.1 riastrad times[1] = ktime_get_raw(); 555 1.1 riastrad 556 1.1 riastrad for (n = 0; n < prime; n++) { 557 1.1 riastrad i915_request_put(request); 558 1.1 riastrad request = i915_request_create(engine->kernel_context); 559 1.1 riastrad if (IS_ERR(request)) 560 1.1 riastrad return PTR_ERR(request); 561 1.1 riastrad 562 1.1 riastrad /* 563 1.1 riastrad * This space is left intentionally blank. 564 1.1 riastrad * 565 1.1 riastrad * We do not actually want to perform any 566 1.1 riastrad * action with this request, we just want 567 1.1 riastrad * to measure the latency in allocation 568 1.1 riastrad * and submission of our breadcrumbs - 569 1.1 riastrad * ensuring that the bare request is sufficient 570 1.1 riastrad * for the system to work (i.e. proper HEAD 571 1.1 riastrad * tracking of the rings, interrupt handling, 572 1.1 riastrad * etc). It also gives us the lowest bounds 573 1.1 riastrad * for latency. 574 1.1 riastrad */ 575 1.1 riastrad 576 1.1 riastrad i915_request_get(request); 577 1.1 riastrad i915_request_add(request); 578 1.1 riastrad } 579 1.1 riastrad i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT); 580 1.1 riastrad i915_request_put(request); 581 1.1 riastrad 582 1.1 riastrad times[1] = ktime_sub(ktime_get_raw(), times[1]); 583 1.1 riastrad if (prime == 1) 584 1.1 riastrad times[0] = times[1]; 585 1.1 riastrad 586 1.1 riastrad if (__igt_timeout(end_time, NULL)) 587 1.1 riastrad break; 588 1.1 riastrad } 589 1.1 riastrad intel_engine_pm_put(engine); 590 1.1 riastrad 591 1.1 riastrad err = igt_live_test_end(&t); 592 1.1 riastrad if (err) 593 1.1 riastrad return err; 594 1.1 riastrad 595 1.1 riastrad pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n", 596 1.1 riastrad engine->name, 597 1.1 riastrad ktime_to_ns(times[0]), 598 1.1 riastrad prime, div64_u64(ktime_to_ns(times[1]), prime)); 599 1.1 riastrad } 600 1.1 riastrad 601 1.1 riastrad return err; 602 1.1 riastrad } 603 1.1 riastrad 604 1.1 riastrad static struct i915_vma *empty_batch(struct drm_i915_private *i915) 605 1.1 riastrad { 606 1.1 riastrad struct drm_i915_gem_object *obj; 607 1.1 riastrad struct i915_vma *vma; 608 1.1 riastrad u32 *cmd; 609 1.1 riastrad int err; 610 1.1 riastrad 611 1.1 riastrad obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 612 1.1 riastrad if (IS_ERR(obj)) 613 1.1 riastrad return ERR_CAST(obj); 614 1.1 riastrad 615 1.1 riastrad cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); 616 1.1 riastrad if (IS_ERR(cmd)) { 617 1.1 riastrad err = PTR_ERR(cmd); 618 1.1 riastrad goto err; 619 1.1 riastrad } 620 1.1 riastrad 621 1.1 riastrad *cmd = MI_BATCH_BUFFER_END; 622 1.1 riastrad 623 1.1 riastrad __i915_gem_object_flush_map(obj, 0, 64); 624 1.1 riastrad i915_gem_object_unpin_map(obj); 625 1.1 riastrad 626 1.1 riastrad intel_gt_chipset_flush(&i915->gt); 627 1.1 riastrad 628 1.1 riastrad vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); 629 1.1 riastrad if (IS_ERR(vma)) { 630 1.1 riastrad err = PTR_ERR(vma); 631 1.1 riastrad goto err; 632 1.1 riastrad } 633 1.1 riastrad 634 1.1 riastrad err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL); 635 1.1 riastrad if (err) 636 1.1 riastrad goto err; 637 1.1 riastrad 638 1.1 riastrad /* Force the wait wait now to avoid including it in the benchmark */ 639 1.1 riastrad err = i915_vma_sync(vma); 640 1.1 riastrad if (err) 641 1.1 riastrad goto err_pin; 642 1.1 riastrad 643 1.1 riastrad return vma; 644 1.1 riastrad 645 1.1 riastrad err_pin: 646 1.1 riastrad i915_vma_unpin(vma); 647 1.1 riastrad err: 648 1.1 riastrad i915_gem_object_put(obj); 649 1.1 riastrad return ERR_PTR(err); 650 1.1 riastrad } 651 1.1 riastrad 652 1.1 riastrad static struct i915_request * 653 1.1 riastrad empty_request(struct intel_engine_cs *engine, 654 1.1 riastrad struct i915_vma *batch) 655 1.1 riastrad { 656 1.1 riastrad struct i915_request *request; 657 1.1 riastrad int err; 658 1.1 riastrad 659 1.1 riastrad request = i915_request_create(engine->kernel_context); 660 1.1 riastrad if (IS_ERR(request)) 661 1.1 riastrad return request; 662 1.1 riastrad 663 1.1 riastrad err = engine->emit_bb_start(request, 664 1.1 riastrad batch->node.start, 665 1.1 riastrad batch->node.size, 666 1.1 riastrad I915_DISPATCH_SECURE); 667 1.1 riastrad if (err) 668 1.1 riastrad goto out_request; 669 1.1 riastrad 670 1.1 riastrad i915_request_get(request); 671 1.1 riastrad out_request: 672 1.1 riastrad i915_request_add(request); 673 1.1 riastrad return err ? ERR_PTR(err) : request; 674 1.1 riastrad } 675 1.1 riastrad 676 1.1 riastrad static int live_empty_request(void *arg) 677 1.1 riastrad { 678 1.1 riastrad struct drm_i915_private *i915 = arg; 679 1.1 riastrad struct intel_engine_cs *engine; 680 1.1 riastrad struct igt_live_test t; 681 1.1 riastrad struct i915_vma *batch; 682 1.1 riastrad int err = 0; 683 1.1 riastrad 684 1.1 riastrad /* 685 1.1 riastrad * Submit various sized batches of empty requests, to each engine 686 1.1 riastrad * (individually), and wait for the batch to complete. We can check 687 1.1 riastrad * the overhead of submitting requests to the hardware. 688 1.1 riastrad */ 689 1.1 riastrad 690 1.1 riastrad batch = empty_batch(i915); 691 1.1 riastrad if (IS_ERR(batch)) 692 1.1 riastrad return PTR_ERR(batch); 693 1.1 riastrad 694 1.1 riastrad for_each_uabi_engine(engine, i915) { 695 1.1 riastrad IGT_TIMEOUT(end_time); 696 1.1 riastrad struct i915_request *request; 697 1.1 riastrad unsigned long n, prime; 698 1.1 riastrad ktime_t times[2] = {}; 699 1.1 riastrad 700 1.1 riastrad err = igt_live_test_begin(&t, i915, __func__, engine->name); 701 1.1 riastrad if (err) 702 1.1 riastrad goto out_batch; 703 1.1 riastrad 704 1.1 riastrad intel_engine_pm_get(engine); 705 1.1 riastrad 706 1.1 riastrad /* Warmup / preload */ 707 1.1 riastrad request = empty_request(engine, batch); 708 1.1 riastrad if (IS_ERR(request)) { 709 1.1 riastrad err = PTR_ERR(request); 710 1.1 riastrad intel_engine_pm_put(engine); 711 1.1 riastrad goto out_batch; 712 1.1 riastrad } 713 1.1 riastrad i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT); 714 1.1 riastrad 715 1.1 riastrad for_each_prime_number_from(prime, 1, 8192) { 716 1.1 riastrad times[1] = ktime_get_raw(); 717 1.1 riastrad 718 1.1 riastrad for (n = 0; n < prime; n++) { 719 1.1 riastrad i915_request_put(request); 720 1.1 riastrad request = empty_request(engine, batch); 721 1.1 riastrad if (IS_ERR(request)) { 722 1.1 riastrad err = PTR_ERR(request); 723 1.1 riastrad intel_engine_pm_put(engine); 724 1.1 riastrad goto out_batch; 725 1.1 riastrad } 726 1.1 riastrad } 727 1.1 riastrad i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT); 728 1.1 riastrad 729 1.1 riastrad times[1] = ktime_sub(ktime_get_raw(), times[1]); 730 1.1 riastrad if (prime == 1) 731 1.1 riastrad times[0] = times[1]; 732 1.1 riastrad 733 1.1 riastrad if (__igt_timeout(end_time, NULL)) 734 1.1 riastrad break; 735 1.1 riastrad } 736 1.1 riastrad i915_request_put(request); 737 1.1 riastrad intel_engine_pm_put(engine); 738 1.1 riastrad 739 1.1 riastrad err = igt_live_test_end(&t); 740 1.1 riastrad if (err) 741 1.1 riastrad goto out_batch; 742 1.1 riastrad 743 1.1 riastrad pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n", 744 1.1 riastrad engine->name, 745 1.1 riastrad ktime_to_ns(times[0]), 746 1.1 riastrad prime, div64_u64(ktime_to_ns(times[1]), prime)); 747 1.1 riastrad } 748 1.1 riastrad 749 1.1 riastrad out_batch: 750 1.1 riastrad i915_vma_unpin(batch); 751 1.1 riastrad i915_vma_put(batch); 752 1.1 riastrad return err; 753 1.1 riastrad } 754 1.1 riastrad 755 1.1 riastrad static struct i915_vma *recursive_batch(struct drm_i915_private *i915) 756 1.1 riastrad { 757 1.1 riastrad struct drm_i915_gem_object *obj; 758 1.1 riastrad const int gen = INTEL_GEN(i915); 759 1.1 riastrad struct i915_vma *vma; 760 1.1 riastrad u32 *cmd; 761 1.1 riastrad int err; 762 1.1 riastrad 763 1.1 riastrad obj = i915_gem_object_create_internal(i915, PAGE_SIZE); 764 1.1 riastrad if (IS_ERR(obj)) 765 1.1 riastrad return ERR_CAST(obj); 766 1.1 riastrad 767 1.1 riastrad vma = i915_vma_instance(obj, i915->gt.vm, NULL); 768 1.1 riastrad if (IS_ERR(vma)) { 769 1.1 riastrad err = PTR_ERR(vma); 770 1.1 riastrad goto err; 771 1.1 riastrad } 772 1.1 riastrad 773 1.1 riastrad err = i915_vma_pin(vma, 0, 0, PIN_USER); 774 1.1 riastrad if (err) 775 1.1 riastrad goto err; 776 1.1 riastrad 777 1.1 riastrad cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); 778 1.1 riastrad if (IS_ERR(cmd)) { 779 1.1 riastrad err = PTR_ERR(cmd); 780 1.1 riastrad goto err; 781 1.1 riastrad } 782 1.1 riastrad 783 1.1 riastrad if (gen >= 8) { 784 1.1 riastrad *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1; 785 1.1 riastrad *cmd++ = lower_32_bits(vma->node.start); 786 1.1 riastrad *cmd++ = upper_32_bits(vma->node.start); 787 1.1 riastrad } else if (gen >= 6) { 788 1.1 riastrad *cmd++ = MI_BATCH_BUFFER_START | 1 << 8; 789 1.1 riastrad *cmd++ = lower_32_bits(vma->node.start); 790 1.1 riastrad } else { 791 1.1 riastrad *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT; 792 1.1 riastrad *cmd++ = lower_32_bits(vma->node.start); 793 1.1 riastrad } 794 1.1 riastrad *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */ 795 1.1 riastrad 796 1.1 riastrad __i915_gem_object_flush_map(obj, 0, 64); 797 1.1 riastrad i915_gem_object_unpin_map(obj); 798 1.1 riastrad 799 1.1 riastrad intel_gt_chipset_flush(&i915->gt); 800 1.1 riastrad 801 1.1 riastrad return vma; 802 1.1 riastrad 803 1.1 riastrad err: 804 1.1 riastrad i915_gem_object_put(obj); 805 1.1 riastrad return ERR_PTR(err); 806 1.1 riastrad } 807 1.1 riastrad 808 1.1 riastrad static int recursive_batch_resolve(struct i915_vma *batch) 809 1.1 riastrad { 810 1.1 riastrad u32 *cmd; 811 1.1 riastrad 812 1.1 riastrad cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); 813 1.1 riastrad if (IS_ERR(cmd)) 814 1.1 riastrad return PTR_ERR(cmd); 815 1.1 riastrad 816 1.1 riastrad *cmd = MI_BATCH_BUFFER_END; 817 1.1 riastrad intel_gt_chipset_flush(batch->vm->gt); 818 1.1 riastrad 819 1.1 riastrad i915_gem_object_unpin_map(batch->obj); 820 1.1 riastrad 821 1.1 riastrad return 0; 822 1.1 riastrad } 823 1.1 riastrad 824 1.1 riastrad static int live_all_engines(void *arg) 825 1.1 riastrad { 826 1.1 riastrad struct drm_i915_private *i915 = arg; 827 1.1 riastrad const unsigned int nengines = num_uabi_engines(i915); 828 1.1 riastrad struct intel_engine_cs *engine; 829 1.1 riastrad struct i915_request **request; 830 1.1 riastrad struct igt_live_test t; 831 1.1 riastrad struct i915_vma *batch; 832 1.1 riastrad unsigned int idx; 833 1.1 riastrad int err; 834 1.1 riastrad 835 1.1 riastrad /* 836 1.1 riastrad * Check we can submit requests to all engines simultaneously. We 837 1.1 riastrad * send a recursive batch to each engine - checking that we don't 838 1.1 riastrad * block doing so, and that they don't complete too soon. 839 1.1 riastrad */ 840 1.1 riastrad 841 1.1 riastrad request = kcalloc(nengines, sizeof(*request), GFP_KERNEL); 842 1.1 riastrad if (!request) 843 1.1 riastrad return -ENOMEM; 844 1.1 riastrad 845 1.1 riastrad err = igt_live_test_begin(&t, i915, __func__, ""); 846 1.1 riastrad if (err) 847 1.1 riastrad goto out_free; 848 1.1 riastrad 849 1.1 riastrad batch = recursive_batch(i915); 850 1.1 riastrad if (IS_ERR(batch)) { 851 1.1 riastrad err = PTR_ERR(batch); 852 1.1 riastrad pr_err("%s: Unable to create batch, err=%d\n", __func__, err); 853 1.1 riastrad goto out_free; 854 1.1 riastrad } 855 1.1 riastrad 856 1.1 riastrad idx = 0; 857 1.1 riastrad for_each_uabi_engine(engine, i915) { 858 1.1 riastrad request[idx] = intel_engine_create_kernel_request(engine); 859 1.1 riastrad if (IS_ERR(request[idx])) { 860 1.1 riastrad err = PTR_ERR(request[idx]); 861 1.1 riastrad pr_err("%s: Request allocation failed with err=%d\n", 862 1.1 riastrad __func__, err); 863 1.1 riastrad goto out_request; 864 1.1 riastrad } 865 1.1 riastrad 866 1.1 riastrad err = engine->emit_bb_start(request[idx], 867 1.1 riastrad batch->node.start, 868 1.1 riastrad batch->node.size, 869 1.1 riastrad 0); 870 1.1 riastrad GEM_BUG_ON(err); 871 1.1 riastrad request[idx]->batch = batch; 872 1.1 riastrad 873 1.1 riastrad i915_vma_lock(batch); 874 1.1 riastrad err = i915_request_await_object(request[idx], batch->obj, 0); 875 1.1 riastrad if (err == 0) 876 1.1 riastrad err = i915_vma_move_to_active(batch, request[idx], 0); 877 1.1 riastrad i915_vma_unlock(batch); 878 1.1 riastrad GEM_BUG_ON(err); 879 1.1 riastrad 880 1.1 riastrad i915_request_get(request[idx]); 881 1.1 riastrad i915_request_add(request[idx]); 882 1.1 riastrad idx++; 883 1.1 riastrad } 884 1.1 riastrad 885 1.1 riastrad idx = 0; 886 1.1 riastrad for_each_uabi_engine(engine, i915) { 887 1.1 riastrad if (i915_request_completed(request[idx])) { 888 1.1 riastrad pr_err("%s(%s): request completed too early!\n", 889 1.1 riastrad __func__, engine->name); 890 1.1 riastrad err = -EINVAL; 891 1.1 riastrad goto out_request; 892 1.1 riastrad } 893 1.1 riastrad idx++; 894 1.1 riastrad } 895 1.1 riastrad 896 1.1 riastrad err = recursive_batch_resolve(batch); 897 1.1 riastrad if (err) { 898 1.1 riastrad pr_err("%s: failed to resolve batch, err=%d\n", __func__, err); 899 1.1 riastrad goto out_request; 900 1.1 riastrad } 901 1.1 riastrad 902 1.1 riastrad idx = 0; 903 1.1 riastrad for_each_uabi_engine(engine, i915) { 904 1.1 riastrad long timeout; 905 1.1 riastrad 906 1.1 riastrad timeout = i915_request_wait(request[idx], 0, 907 1.1 riastrad MAX_SCHEDULE_TIMEOUT); 908 1.1 riastrad if (timeout < 0) { 909 1.1 riastrad err = timeout; 910 1.1 riastrad pr_err("%s: error waiting for request on %s, err=%d\n", 911 1.1 riastrad __func__, engine->name, err); 912 1.1 riastrad goto out_request; 913 1.1 riastrad } 914 1.1 riastrad 915 1.1 riastrad GEM_BUG_ON(!i915_request_completed(request[idx])); 916 1.1 riastrad i915_request_put(request[idx]); 917 1.1 riastrad request[idx] = NULL; 918 1.1 riastrad idx++; 919 1.1 riastrad } 920 1.1 riastrad 921 1.1 riastrad err = igt_live_test_end(&t); 922 1.1 riastrad 923 1.1 riastrad out_request: 924 1.1 riastrad idx = 0; 925 1.1 riastrad for_each_uabi_engine(engine, i915) { 926 1.1 riastrad if (request[idx]) 927 1.1 riastrad i915_request_put(request[idx]); 928 1.1 riastrad idx++; 929 1.1 riastrad } 930 1.1 riastrad i915_vma_unpin(batch); 931 1.1 riastrad i915_vma_put(batch); 932 1.1 riastrad out_free: 933 1.1 riastrad kfree(request); 934 1.1 riastrad return err; 935 1.1 riastrad } 936 1.1 riastrad 937 1.1 riastrad static int live_sequential_engines(void *arg) 938 1.1 riastrad { 939 1.1 riastrad struct drm_i915_private *i915 = arg; 940 1.1 riastrad const unsigned int nengines = num_uabi_engines(i915); 941 1.1 riastrad struct i915_request **request; 942 1.1 riastrad struct i915_request *prev = NULL; 943 1.1 riastrad struct intel_engine_cs *engine; 944 1.1 riastrad struct igt_live_test t; 945 1.1 riastrad unsigned int idx; 946 1.1 riastrad int err; 947 1.1 riastrad 948 1.1 riastrad /* 949 1.1 riastrad * Check we can submit requests to all engines sequentially, such 950 1.1 riastrad * that each successive request waits for the earlier ones. This 951 1.1 riastrad * tests that we don't execute requests out of order, even though 952 1.1 riastrad * they are running on independent engines. 953 1.1 riastrad */ 954 1.1 riastrad 955 1.1 riastrad request = kcalloc(nengines, sizeof(*request), GFP_KERNEL); 956 1.1 riastrad if (!request) 957 1.1 riastrad return -ENOMEM; 958 1.1 riastrad 959 1.1 riastrad err = igt_live_test_begin(&t, i915, __func__, ""); 960 1.1 riastrad if (err) 961 1.1 riastrad goto out_free; 962 1.1 riastrad 963 1.1 riastrad idx = 0; 964 1.1 riastrad for_each_uabi_engine(engine, i915) { 965 1.1 riastrad struct i915_vma *batch; 966 1.1 riastrad 967 1.1 riastrad batch = recursive_batch(i915); 968 1.1 riastrad if (IS_ERR(batch)) { 969 1.1 riastrad err = PTR_ERR(batch); 970 1.1 riastrad pr_err("%s: Unable to create batch for %s, err=%d\n", 971 1.1 riastrad __func__, engine->name, err); 972 1.1 riastrad goto out_free; 973 1.1 riastrad } 974 1.1 riastrad 975 1.1 riastrad request[idx] = intel_engine_create_kernel_request(engine); 976 1.1 riastrad if (IS_ERR(request[idx])) { 977 1.1 riastrad err = PTR_ERR(request[idx]); 978 1.1 riastrad pr_err("%s: Request allocation failed for %s with err=%d\n", 979 1.1 riastrad __func__, engine->name, err); 980 1.1 riastrad goto out_request; 981 1.1 riastrad } 982 1.1 riastrad 983 1.1 riastrad if (prev) { 984 1.1 riastrad err = i915_request_await_dma_fence(request[idx], 985 1.1 riastrad &prev->fence); 986 1.1 riastrad if (err) { 987 1.1 riastrad i915_request_add(request[idx]); 988 1.1 riastrad pr_err("%s: Request await failed for %s with err=%d\n", 989 1.1 riastrad __func__, engine->name, err); 990 1.1 riastrad goto out_request; 991 1.1 riastrad } 992 1.1 riastrad } 993 1.1 riastrad 994 1.1 riastrad err = engine->emit_bb_start(request[idx], 995 1.1 riastrad batch->node.start, 996 1.1 riastrad batch->node.size, 997 1.1 riastrad 0); 998 1.1 riastrad GEM_BUG_ON(err); 999 1.1 riastrad request[idx]->batch = batch; 1000 1.1 riastrad 1001 1.1 riastrad i915_vma_lock(batch); 1002 1.1 riastrad err = i915_request_await_object(request[idx], 1003 1.1 riastrad batch->obj, false); 1004 1.1 riastrad if (err == 0) 1005 1.1 riastrad err = i915_vma_move_to_active(batch, request[idx], 0); 1006 1.1 riastrad i915_vma_unlock(batch); 1007 1.1 riastrad GEM_BUG_ON(err); 1008 1.1 riastrad 1009 1.1 riastrad i915_request_get(request[idx]); 1010 1.1 riastrad i915_request_add(request[idx]); 1011 1.1 riastrad 1012 1.1 riastrad prev = request[idx]; 1013 1.1 riastrad idx++; 1014 1.1 riastrad } 1015 1.1 riastrad 1016 1.1 riastrad idx = 0; 1017 1.1 riastrad for_each_uabi_engine(engine, i915) { 1018 1.1 riastrad long timeout; 1019 1.1 riastrad 1020 1.1 riastrad if (i915_request_completed(request[idx])) { 1021 1.1 riastrad pr_err("%s(%s): request completed too early!\n", 1022 1.1 riastrad __func__, engine->name); 1023 1.1 riastrad err = -EINVAL; 1024 1.1 riastrad goto out_request; 1025 1.1 riastrad } 1026 1.1 riastrad 1027 1.1 riastrad err = recursive_batch_resolve(request[idx]->batch); 1028 1.1 riastrad if (err) { 1029 1.1 riastrad pr_err("%s: failed to resolve batch, err=%d\n", 1030 1.1 riastrad __func__, err); 1031 1.1 riastrad goto out_request; 1032 1.1 riastrad } 1033 1.1 riastrad 1034 1.1 riastrad timeout = i915_request_wait(request[idx], 0, 1035 1.1 riastrad MAX_SCHEDULE_TIMEOUT); 1036 1.1 riastrad if (timeout < 0) { 1037 1.1 riastrad err = timeout; 1038 1.1 riastrad pr_err("%s: error waiting for request on %s, err=%d\n", 1039 1.1 riastrad __func__, engine->name, err); 1040 1.1 riastrad goto out_request; 1041 1.1 riastrad } 1042 1.1 riastrad 1043 1.1 riastrad GEM_BUG_ON(!i915_request_completed(request[idx])); 1044 1.1 riastrad idx++; 1045 1.1 riastrad } 1046 1.1 riastrad 1047 1.1 riastrad err = igt_live_test_end(&t); 1048 1.1 riastrad 1049 1.1 riastrad out_request: 1050 1.1 riastrad idx = 0; 1051 1.1 riastrad for_each_uabi_engine(engine, i915) { 1052 1.1 riastrad u32 *cmd; 1053 1.1 riastrad 1054 1.1 riastrad if (!request[idx]) 1055 1.1 riastrad break; 1056 1.1 riastrad 1057 1.1 riastrad cmd = i915_gem_object_pin_map(request[idx]->batch->obj, 1058 1.1 riastrad I915_MAP_WC); 1059 1.1 riastrad if (!IS_ERR(cmd)) { 1060 1.1 riastrad *cmd = MI_BATCH_BUFFER_END; 1061 1.1 riastrad intel_gt_chipset_flush(engine->gt); 1062 1.1 riastrad 1063 1.1 riastrad i915_gem_object_unpin_map(request[idx]->batch->obj); 1064 1.1 riastrad } 1065 1.1 riastrad 1066 1.1 riastrad i915_vma_put(request[idx]->batch); 1067 1.1 riastrad i915_request_put(request[idx]); 1068 1.1 riastrad idx++; 1069 1.1 riastrad } 1070 1.1 riastrad out_free: 1071 1.1 riastrad kfree(request); 1072 1.1 riastrad return err; 1073 1.1 riastrad } 1074 1.1 riastrad 1075 1.1 riastrad static int __live_parallel_engine1(void *arg) 1076 1.1 riastrad { 1077 1.1 riastrad struct intel_engine_cs *engine = arg; 1078 1.1 riastrad IGT_TIMEOUT(end_time); 1079 1.1 riastrad unsigned long count; 1080 1.1 riastrad int err = 0; 1081 1.1 riastrad 1082 1.1 riastrad count = 0; 1083 1.1 riastrad intel_engine_pm_get(engine); 1084 1.1 riastrad do { 1085 1.1 riastrad struct i915_request *rq; 1086 1.1 riastrad 1087 1.1 riastrad rq = i915_request_create(engine->kernel_context); 1088 1.1 riastrad if (IS_ERR(rq)) { 1089 1.1 riastrad err = PTR_ERR(rq); 1090 1.1 riastrad break; 1091 1.1 riastrad } 1092 1.1 riastrad 1093 1.1 riastrad i915_request_get(rq); 1094 1.1 riastrad i915_request_add(rq); 1095 1.1 riastrad 1096 1.1 riastrad err = 0; 1097 1.1 riastrad if (i915_request_wait(rq, 0, HZ / 5) < 0) 1098 1.1 riastrad err = -ETIME; 1099 1.1 riastrad i915_request_put(rq); 1100 1.1 riastrad if (err) 1101 1.1 riastrad break; 1102 1.1 riastrad 1103 1.1 riastrad count++; 1104 1.1 riastrad } while (!__igt_timeout(end_time, NULL)); 1105 1.1 riastrad intel_engine_pm_put(engine); 1106 1.1 riastrad 1107 1.1 riastrad pr_info("%s: %lu request + sync\n", engine->name, count); 1108 1.1 riastrad return err; 1109 1.1 riastrad } 1110 1.1 riastrad 1111 1.1 riastrad static int __live_parallel_engineN(void *arg) 1112 1.1 riastrad { 1113 1.1 riastrad struct intel_engine_cs *engine = arg; 1114 1.1 riastrad IGT_TIMEOUT(end_time); 1115 1.1 riastrad unsigned long count; 1116 1.1 riastrad int err = 0; 1117 1.1 riastrad 1118 1.1 riastrad count = 0; 1119 1.1 riastrad intel_engine_pm_get(engine); 1120 1.1 riastrad do { 1121 1.1 riastrad struct i915_request *rq; 1122 1.1 riastrad 1123 1.1 riastrad rq = i915_request_create(engine->kernel_context); 1124 1.1 riastrad if (IS_ERR(rq)) { 1125 1.1 riastrad err = PTR_ERR(rq); 1126 1.1 riastrad break; 1127 1.1 riastrad } 1128 1.1 riastrad 1129 1.1 riastrad i915_request_add(rq); 1130 1.1 riastrad count++; 1131 1.1 riastrad } while (!__igt_timeout(end_time, NULL)); 1132 1.1 riastrad intel_engine_pm_put(engine); 1133 1.1 riastrad 1134 1.1 riastrad pr_info("%s: %lu requests\n", engine->name, count); 1135 1.1 riastrad return err; 1136 1.1 riastrad } 1137 1.1 riastrad 1138 1.1 riastrad static bool wake_all(struct drm_i915_private *i915) 1139 1.1 riastrad { 1140 1.1 riastrad if (atomic_dec_and_test(&i915->selftest.counter)) { 1141 1.1 riastrad wake_up_var(&i915->selftest.counter); 1142 1.1 riastrad return true; 1143 1.1 riastrad } 1144 1.1 riastrad 1145 1.1 riastrad return false; 1146 1.1 riastrad } 1147 1.1 riastrad 1148 1.1 riastrad static int wait_for_all(struct drm_i915_private *i915) 1149 1.1 riastrad { 1150 1.1 riastrad if (wake_all(i915)) 1151 1.1 riastrad return 0; 1152 1.1 riastrad 1153 1.1 riastrad if (wait_var_event_timeout(&i915->selftest.counter, 1154 1.1 riastrad !atomic_read(&i915->selftest.counter), 1155 1.1 riastrad i915_selftest.timeout_jiffies)) 1156 1.1 riastrad return 0; 1157 1.1 riastrad 1158 1.1 riastrad return -ETIME; 1159 1.1 riastrad } 1160 1.1 riastrad 1161 1.1 riastrad static int __live_parallel_spin(void *arg) 1162 1.1 riastrad { 1163 1.1 riastrad struct intel_engine_cs *engine = arg; 1164 1.1 riastrad struct igt_spinner spin; 1165 1.1 riastrad struct i915_request *rq; 1166 1.1 riastrad int err = 0; 1167 1.1 riastrad 1168 1.1 riastrad /* 1169 1.1 riastrad * Create a spinner running for eternity on each engine. If a second 1170 1.1 riastrad * spinner is incorrectly placed on the same engine, it will not be 1171 1.1 riastrad * able to start in time. 1172 1.1 riastrad */ 1173 1.1 riastrad 1174 1.1 riastrad if (igt_spinner_init(&spin, engine->gt)) { 1175 1.1 riastrad wake_all(engine->i915); 1176 1.1 riastrad return -ENOMEM; 1177 1.1 riastrad } 1178 1.1 riastrad 1179 1.1 riastrad intel_engine_pm_get(engine); 1180 1.1 riastrad rq = igt_spinner_create_request(&spin, 1181 1.1 riastrad engine->kernel_context, 1182 1.1 riastrad MI_NOOP); /* no preemption */ 1183 1.1 riastrad intel_engine_pm_put(engine); 1184 1.1 riastrad if (IS_ERR(rq)) { 1185 1.1 riastrad err = PTR_ERR(rq); 1186 1.1 riastrad if (err == -ENODEV) 1187 1.1 riastrad err = 0; 1188 1.1 riastrad wake_all(engine->i915); 1189 1.1 riastrad goto out_spin; 1190 1.1 riastrad } 1191 1.1 riastrad 1192 1.1 riastrad i915_request_get(rq); 1193 1.1 riastrad i915_request_add(rq); 1194 1.1 riastrad if (igt_wait_for_spinner(&spin, rq)) { 1195 1.1 riastrad /* Occupy this engine for the whole test */ 1196 1.1 riastrad err = wait_for_all(engine->i915); 1197 1.1 riastrad } else { 1198 1.1 riastrad pr_err("Failed to start spinner on %s\n", engine->name); 1199 1.1 riastrad err = -EINVAL; 1200 1.1 riastrad } 1201 1.1 riastrad igt_spinner_end(&spin); 1202 1.1 riastrad 1203 1.1 riastrad if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) 1204 1.1 riastrad err = -EIO; 1205 1.1 riastrad i915_request_put(rq); 1206 1.1 riastrad 1207 1.1 riastrad out_spin: 1208 1.1 riastrad igt_spinner_fini(&spin); 1209 1.1 riastrad return err; 1210 1.1 riastrad } 1211 1.1 riastrad 1212 1.1 riastrad static int live_parallel_engines(void *arg) 1213 1.1 riastrad { 1214 1.1 riastrad struct drm_i915_private *i915 = arg; 1215 1.1 riastrad static int (* const func[])(void *arg) = { 1216 1.1 riastrad __live_parallel_engine1, 1217 1.1 riastrad __live_parallel_engineN, 1218 1.1 riastrad __live_parallel_spin, 1219 1.1 riastrad NULL, 1220 1.1 riastrad }; 1221 1.1 riastrad const unsigned int nengines = num_uabi_engines(i915); 1222 1.1 riastrad struct intel_engine_cs *engine; 1223 1.1 riastrad int (* const *fn)(void *arg); 1224 1.1 riastrad struct task_struct **tsk; 1225 1.1 riastrad int err = 0; 1226 1.1 riastrad 1227 1.1 riastrad /* 1228 1.1 riastrad * Check we can submit requests to all engines concurrently. This 1229 1.1 riastrad * tests that we load up the system maximally. 1230 1.1 riastrad */ 1231 1.1 riastrad 1232 1.1 riastrad tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL); 1233 1.1 riastrad if (!tsk) 1234 1.1 riastrad return -ENOMEM; 1235 1.1 riastrad 1236 1.1 riastrad for (fn = func; !err && *fn; fn++) { 1237 1.1 riastrad char name[KSYM_NAME_LEN]; 1238 1.1 riastrad struct igt_live_test t; 1239 1.1 riastrad unsigned int idx; 1240 1.1 riastrad 1241 1.1 riastrad snprintf(name, sizeof(name), "%pS", fn); 1242 1.1 riastrad err = igt_live_test_begin(&t, i915, __func__, name); 1243 1.1 riastrad if (err) 1244 1.1 riastrad break; 1245 1.1 riastrad 1246 1.1 riastrad atomic_set(&i915->selftest.counter, nengines); 1247 1.1 riastrad 1248 1.1 riastrad idx = 0; 1249 1.1 riastrad for_each_uabi_engine(engine, i915) { 1250 1.1 riastrad tsk[idx] = kthread_run(*fn, engine, 1251 1.1 riastrad "igt/parallel:%s", 1252 1.1 riastrad engine->name); 1253 1.1 riastrad if (IS_ERR(tsk[idx])) { 1254 1.1 riastrad err = PTR_ERR(tsk[idx]); 1255 1.1 riastrad break; 1256 1.1 riastrad } 1257 1.1 riastrad get_task_struct(tsk[idx++]); 1258 1.1 riastrad } 1259 1.1 riastrad 1260 1.1 riastrad yield(); /* start all threads before we kthread_stop() */ 1261 1.1 riastrad 1262 1.1 riastrad idx = 0; 1263 1.1 riastrad for_each_uabi_engine(engine, i915) { 1264 1.1 riastrad int status; 1265 1.1 riastrad 1266 1.1 riastrad if (IS_ERR(tsk[idx])) 1267 1.1 riastrad break; 1268 1.1 riastrad 1269 1.1 riastrad status = kthread_stop(tsk[idx]); 1270 1.1 riastrad if (status && !err) 1271 1.1 riastrad err = status; 1272 1.1 riastrad 1273 1.1 riastrad put_task_struct(tsk[idx++]); 1274 1.1 riastrad } 1275 1.1 riastrad 1276 1.1 riastrad if (igt_live_test_end(&t)) 1277 1.1 riastrad err = -EIO; 1278 1.1 riastrad } 1279 1.1 riastrad 1280 1.1 riastrad kfree(tsk); 1281 1.1 riastrad return err; 1282 1.1 riastrad } 1283 1.1 riastrad 1284 1.1 riastrad static int 1285 1.1 riastrad max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine) 1286 1.1 riastrad { 1287 1.1 riastrad struct i915_request *rq; 1288 1.1 riastrad int ret; 1289 1.1 riastrad 1290 1.1 riastrad /* 1291 1.1 riastrad * Before execlists, all contexts share the same ringbuffer. With 1292 1.1 riastrad * execlists, each context/engine has a separate ringbuffer and 1293 1.1 riastrad * for the purposes of this test, inexhaustible. 1294 1.1 riastrad * 1295 1.1 riastrad * For the global ringbuffer though, we have to be very careful 1296 1.1 riastrad * that we do not wrap while preventing the execution of requests 1297 1.1 riastrad * with a unsignaled fence. 1298 1.1 riastrad */ 1299 1.1 riastrad if (HAS_EXECLISTS(ctx->i915)) 1300 1.1 riastrad return INT_MAX; 1301 1.1 riastrad 1302 1.1 riastrad rq = igt_request_alloc(ctx, engine); 1303 1.1 riastrad if (IS_ERR(rq)) { 1304 1.1 riastrad ret = PTR_ERR(rq); 1305 1.1 riastrad } else { 1306 1.1 riastrad int sz; 1307 1.1 riastrad 1308 1.1 riastrad ret = rq->ring->size - rq->reserved_space; 1309 1.1 riastrad i915_request_add(rq); 1310 1.1 riastrad 1311 1.1 riastrad sz = rq->ring->emit - rq->head; 1312 1.1 riastrad if (sz < 0) 1313 1.1 riastrad sz += rq->ring->size; 1314 1.1 riastrad ret /= sz; 1315 1.1 riastrad ret /= 2; /* leave half spare, in case of emergency! */ 1316 1.1 riastrad } 1317 1.1 riastrad 1318 1.1 riastrad return ret; 1319 1.1 riastrad } 1320 1.1 riastrad 1321 1.1 riastrad static int live_breadcrumbs_smoketest(void *arg) 1322 1.1 riastrad { 1323 1.1 riastrad struct drm_i915_private *i915 = arg; 1324 1.1 riastrad const unsigned int nengines = num_uabi_engines(i915); 1325 1.1 riastrad const unsigned int ncpus = num_online_cpus(); 1326 1.1 riastrad unsigned long num_waits, num_fences; 1327 1.1 riastrad struct intel_engine_cs *engine; 1328 1.1 riastrad struct task_struct **threads; 1329 1.1 riastrad struct igt_live_test live; 1330 1.1 riastrad intel_wakeref_t wakeref; 1331 1.1 riastrad struct smoketest *smoke; 1332 1.1 riastrad unsigned int n, idx; 1333 1.1 riastrad struct file *file; 1334 1.1 riastrad int ret = 0; 1335 1.1 riastrad 1336 1.1 riastrad /* 1337 1.1 riastrad * Smoketest our breadcrumb/signal handling for requests across multiple 1338 1.1 riastrad * threads. A very simple test to only catch the most egregious of bugs. 1339 1.1 riastrad * See __igt_breadcrumbs_smoketest(); 1340 1.1 riastrad * 1341 1.1 riastrad * On real hardware this time. 1342 1.1 riastrad */ 1343 1.1 riastrad 1344 1.1 riastrad wakeref = intel_runtime_pm_get(&i915->runtime_pm); 1345 1.1 riastrad 1346 1.1 riastrad file = mock_file(i915); 1347 1.1 riastrad if (IS_ERR(file)) { 1348 1.1 riastrad ret = PTR_ERR(file); 1349 1.1 riastrad goto out_rpm; 1350 1.1 riastrad } 1351 1.1 riastrad 1352 1.1 riastrad smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL); 1353 1.1 riastrad if (!smoke) { 1354 1.1 riastrad ret = -ENOMEM; 1355 1.1 riastrad goto out_file; 1356 1.1 riastrad } 1357 1.1 riastrad 1358 1.1 riastrad threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL); 1359 1.1 riastrad if (!threads) { 1360 1.1 riastrad ret = -ENOMEM; 1361 1.1 riastrad goto out_smoke; 1362 1.1 riastrad } 1363 1.1 riastrad 1364 1.1 riastrad smoke[0].request_alloc = __live_request_alloc; 1365 1.1 riastrad smoke[0].ncontexts = 64; 1366 1.1 riastrad smoke[0].contexts = kcalloc(smoke[0].ncontexts, 1367 1.1 riastrad sizeof(*smoke[0].contexts), 1368 1.1 riastrad GFP_KERNEL); 1369 1.1 riastrad if (!smoke[0].contexts) { 1370 1.1 riastrad ret = -ENOMEM; 1371 1.1 riastrad goto out_threads; 1372 1.1 riastrad } 1373 1.1 riastrad 1374 1.1 riastrad for (n = 0; n < smoke[0].ncontexts; n++) { 1375 1.1 riastrad smoke[0].contexts[n] = live_context(i915, file); 1376 1.1 riastrad if (!smoke[0].contexts[n]) { 1377 1.1 riastrad ret = -ENOMEM; 1378 1.1 riastrad goto out_contexts; 1379 1.1 riastrad } 1380 1.1 riastrad } 1381 1.1 riastrad 1382 1.1 riastrad ret = igt_live_test_begin(&live, i915, __func__, ""); 1383 1.1 riastrad if (ret) 1384 1.1 riastrad goto out_contexts; 1385 1.1 riastrad 1386 1.1 riastrad idx = 0; 1387 1.1 riastrad for_each_uabi_engine(engine, i915) { 1388 1.1 riastrad smoke[idx] = smoke[0]; 1389 1.1 riastrad smoke[idx].engine = engine; 1390 1.1 riastrad smoke[idx].max_batch = 1391 1.1 riastrad max_batches(smoke[0].contexts[0], engine); 1392 1.1 riastrad if (smoke[idx].max_batch < 0) { 1393 1.1 riastrad ret = smoke[idx].max_batch; 1394 1.1 riastrad goto out_flush; 1395 1.1 riastrad } 1396 1.1 riastrad /* One ring interleaved between requests from all cpus */ 1397 1.1 riastrad smoke[idx].max_batch /= num_online_cpus() + 1; 1398 1.1 riastrad pr_debug("Limiting batches to %d requests on %s\n", 1399 1.1 riastrad smoke[idx].max_batch, engine->name); 1400 1.1 riastrad 1401 1.1 riastrad for (n = 0; n < ncpus; n++) { 1402 1.1 riastrad struct task_struct *tsk; 1403 1.1 riastrad 1404 1.1 riastrad tsk = kthread_run(__igt_breadcrumbs_smoketest, 1405 1.1 riastrad &smoke[idx], "igt/%d.%d", idx, n); 1406 1.1 riastrad if (IS_ERR(tsk)) { 1407 1.1 riastrad ret = PTR_ERR(tsk); 1408 1.1 riastrad goto out_flush; 1409 1.1 riastrad } 1410 1.1 riastrad 1411 1.1 riastrad get_task_struct(tsk); 1412 1.1 riastrad threads[idx * ncpus + n] = tsk; 1413 1.1 riastrad } 1414 1.1 riastrad 1415 1.1 riastrad idx++; 1416 1.1 riastrad } 1417 1.1 riastrad 1418 1.1 riastrad yield(); /* start all threads before we begin */ 1419 1.1 riastrad msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); 1420 1.1 riastrad 1421 1.1 riastrad out_flush: 1422 1.1 riastrad idx = 0; 1423 1.1 riastrad num_waits = 0; 1424 1.1 riastrad num_fences = 0; 1425 1.1 riastrad for_each_uabi_engine(engine, i915) { 1426 1.1 riastrad for (n = 0; n < ncpus; n++) { 1427 1.1 riastrad struct task_struct *tsk = threads[idx * ncpus + n]; 1428 1.1 riastrad int err; 1429 1.1 riastrad 1430 1.1 riastrad if (!tsk) 1431 1.1 riastrad continue; 1432 1.1 riastrad 1433 1.1 riastrad err = kthread_stop(tsk); 1434 1.1 riastrad if (err < 0 && !ret) 1435 1.1 riastrad ret = err; 1436 1.1 riastrad 1437 1.1 riastrad put_task_struct(tsk); 1438 1.1 riastrad } 1439 1.1 riastrad 1440 1.1 riastrad num_waits += atomic_long_read(&smoke[idx].num_waits); 1441 1.1 riastrad num_fences += atomic_long_read(&smoke[idx].num_fences); 1442 1.1 riastrad idx++; 1443 1.1 riastrad } 1444 1.1 riastrad pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", 1445 1.1 riastrad num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus); 1446 1.1 riastrad 1447 1.1 riastrad ret = igt_live_test_end(&live) ?: ret; 1448 1.1 riastrad out_contexts: 1449 1.1 riastrad kfree(smoke[0].contexts); 1450 1.1 riastrad out_threads: 1451 1.1 riastrad kfree(threads); 1452 1.1 riastrad out_smoke: 1453 1.1 riastrad kfree(smoke); 1454 1.1 riastrad out_file: 1455 1.1 riastrad fput(file); 1456 1.1 riastrad out_rpm: 1457 1.1 riastrad intel_runtime_pm_put(&i915->runtime_pm, wakeref); 1458 1.1 riastrad 1459 1.1 riastrad return ret; 1460 1.1 riastrad } 1461 1.1 riastrad 1462 1.1 riastrad int i915_request_live_selftests(struct drm_i915_private *i915) 1463 1.1 riastrad { 1464 1.1 riastrad static const struct i915_subtest tests[] = { 1465 1.1 riastrad SUBTEST(live_nop_request), 1466 1.1 riastrad SUBTEST(live_all_engines), 1467 1.1 riastrad SUBTEST(live_sequential_engines), 1468 1.1 riastrad SUBTEST(live_parallel_engines), 1469 1.1 riastrad SUBTEST(live_empty_request), 1470 1.1 riastrad SUBTEST(live_breadcrumbs_smoketest), 1471 1.1 riastrad }; 1472 1.1 riastrad 1473 1.1 riastrad if (intel_gt_is_wedged(&i915->gt)) 1474 1.1 riastrad return 0; 1475 1.1 riastrad 1476 1.1 riastrad return i915_subtests(tests, i915); 1477 1.1 riastrad } 1478