1 /* $NetBSD: i915_gem_context.c,v 1.7 2022/09/01 11:49:23 riastradh Exp $ */ 2 3 /* 4 * SPDX-License-Identifier: MIT 5 * 6 * Copyright 2011-2012 Intel Corporation 7 */ 8 9 /* 10 * This file implements HW context support. On gen5+ a HW context consists of an 11 * opaque GPU object which is referenced at times of context saves and restores. 12 * With RC6 enabled, the context is also referenced as the GPU enters and exists 13 * from RC6 (GPU has it's own internal power context, except on gen5). Though 14 * something like a context does exist for the media ring, the code only 15 * supports contexts for the render ring. 16 * 17 * In software, there is a distinction between contexts created by the user, 18 * and the default HW context. The default HW context is used by GPU clients 19 * that do not request setup of their own hardware context. The default 20 * context's state is never restored to help prevent programming errors. This 21 * would happen if a client ran and piggy-backed off another clients GPU state. 22 * The default context only exists to give the GPU some offset to load as the 23 * current to invoke a save of the context we actually care about. In fact, the 24 * code could likely be constructed, albeit in a more complicated fashion, to 25 * never use the default context, though that limits the driver's ability to 26 * swap out, and/or destroy other contexts. 27 * 28 * All other contexts are created as a request by the GPU client. These contexts 29 * store GPU state, and thus allow GPU clients to not re-emit state (and 30 * potentially query certain state) at any time. The kernel driver makes 31 * certain that the appropriate commands are inserted. 32 * 33 * The context life cycle is semi-complicated in that context BOs may live 34 * longer than the context itself because of the way the hardware, and object 35 * tracking works. Below is a very crude representation of the state machine 36 * describing the context life. 37 * refcount pincount active 38 * S0: initial state 0 0 0 39 * S1: context created 1 0 0 40 * S2: context is currently running 2 1 X 41 * S3: GPU referenced, but not current 2 0 1 42 * S4: context is current, but destroyed 1 1 0 43 * S5: like S3, but destroyed 1 0 1 44 * 45 * The most common (but not all) transitions: 46 * S0->S1: client creates a context 47 * S1->S2: client submits execbuf with context 48 * S2->S3: other clients submits execbuf with context 49 * S3->S1: context object was retired 50 * S3->S2: clients submits another execbuf 51 * S2->S4: context destroy called with current context 52 * S3->S5->S0: destroy path 53 * S4->S5->S0: destroy path on current context 54 * 55 * There are two confusing terms used above: 56 * The "current context" means the context which is currently running on the 57 * GPU. The GPU has loaded its state already and has stored away the gtt 58 * offset of the BO. The GPU is not actively referencing the data at this 59 * offset, but it will on the next context switch. The only way to avoid this 60 * is to do a GPU reset. 61 * 62 * An "active context' is one which was previously the "current context" and is 63 * on the active list waiting for the next context switch to occur. Until this 64 * happens, the object must remain at the same gtt offset. It is therefore 65 * possible to destroy a context, but it is still active. 66 * 67 */ 68 69 #include <sys/cdefs.h> 70 __KERNEL_RCSID(0, "$NetBSD: i915_gem_context.c,v 1.7 2022/09/01 11:49:23 riastradh Exp $"); 71 72 #include <linux/log2.h> 73 #include <linux/nospec.h> 74 75 #include <asm/uaccess.h> 76 77 #include <drm/i915_drm.h> 78 79 #include "gt/gen6_ppgtt.h" 80 #include "gt/intel_context.h" 81 #include "gt/intel_engine_heartbeat.h" 82 #include "gt/intel_engine_pm.h" 83 #include "gt/intel_engine_user.h" 84 #include "gt/intel_lrc_reg.h" 85 #include "gt/intel_ring.h" 86 87 #include "i915_gem_context.h" 88 #include "i915_globals.h" 89 #include "i915_trace.h" 90 #include "i915_user_extensions.h" 91 92 #include <linux/nbsd-namespace.h> 93 94 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1 95 96 static struct i915_global_gem_context { 97 struct i915_global base; 98 struct kmem_cache *slab_luts; 99 } global; 100 101 struct i915_lut_handle *i915_lut_handle_alloc(void) 102 { 103 return kmem_cache_alloc(global.slab_luts, GFP_KERNEL); 104 } 105 106 void i915_lut_handle_free(struct i915_lut_handle *lut) 107 { 108 return kmem_cache_free(global.slab_luts, lut); 109 } 110 111 static void lut_close(struct i915_gem_context *ctx) 112 { 113 struct radix_tree_iter iter; 114 void __rcu **slot; 115 116 lockdep_assert_held(&ctx->mutex); 117 118 rcu_read_lock(); 119 radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) { 120 struct i915_vma *vma = rcu_dereference_raw(*slot); 121 struct drm_i915_gem_object *obj = vma->obj; 122 struct i915_lut_handle *lut; 123 124 if (!kref_get_unless_zero(&obj->base.refcount)) 125 continue; 126 127 rcu_read_unlock(); 128 i915_gem_object_lock(obj); 129 list_for_each_entry(lut, &obj->lut_list, obj_link) { 130 if (lut->ctx != ctx) 131 continue; 132 133 if (lut->handle != iter.index) 134 continue; 135 136 list_del(&lut->obj_link); 137 break; 138 } 139 i915_gem_object_unlock(obj); 140 rcu_read_lock(); 141 142 if (&lut->obj_link != &obj->lut_list) { 143 i915_lut_handle_free(lut); 144 radix_tree_iter_delete(&ctx->handles_vma, &iter, slot); 145 if (atomic_dec_and_test(&vma->open_count) && 146 !i915_vma_is_ggtt(vma)) 147 i915_vma_close(vma); 148 i915_gem_object_put(obj); 149 } 150 151 i915_gem_object_put(obj); 152 } 153 rcu_read_unlock(); 154 } 155 156 static struct intel_context * 157 lookup_user_engine(struct i915_gem_context *ctx, 158 unsigned long flags, 159 const struct i915_engine_class_instance *ci) 160 #define LOOKUP_USER_INDEX BIT(0) 161 { 162 int idx; 163 164 if (!!(flags & LOOKUP_USER_INDEX) != i915_gem_context_user_engines(ctx)) 165 return ERR_PTR(-EINVAL); 166 167 if (!i915_gem_context_user_engines(ctx)) { 168 struct intel_engine_cs *engine; 169 170 engine = intel_engine_lookup_user(ctx->i915, 171 ci->engine_class, 172 ci->engine_instance); 173 if (!engine) 174 return ERR_PTR(-EINVAL); 175 176 idx = engine->legacy_idx; 177 } else { 178 idx = ci->engine_instance; 179 } 180 181 return i915_gem_context_get_engine(ctx, idx); 182 } 183 184 static struct i915_address_space * 185 context_get_vm_rcu(struct i915_gem_context *ctx) 186 { 187 GEM_BUG_ON(!rcu_access_pointer(ctx->vm)); 188 189 do { 190 struct i915_address_space *vm; 191 192 /* 193 * We do not allow downgrading from full-ppgtt [to a shared 194 * global gtt], so ctx->vm cannot become NULL. 195 */ 196 vm = rcu_dereference(ctx->vm); 197 if (!kref_get_unless_zero(&vm->ref)) 198 continue; 199 200 /* 201 * This ppgtt may have be reallocated between 202 * the read and the kref, and reassigned to a third 203 * context. In order to avoid inadvertent sharing 204 * of this ppgtt with that third context (and not 205 * src), we have to confirm that we have the same 206 * ppgtt after passing through the strong memory 207 * barrier implied by a successful 208 * kref_get_unless_zero(). 209 * 210 * Once we have acquired the current ppgtt of ctx, 211 * we no longer care if it is released from ctx, as 212 * it cannot be reallocated elsewhere. 213 */ 214 215 if (vm == rcu_access_pointer(ctx->vm)) 216 return rcu_pointer_handoff(vm); 217 218 i915_vm_put(vm); 219 } while (1); 220 } 221 222 static void intel_context_set_gem(struct intel_context *ce, 223 struct i915_gem_context *ctx) 224 { 225 GEM_BUG_ON(rcu_access_pointer(ce->gem_context)); 226 RCU_INIT_POINTER(ce->gem_context, ctx); 227 228 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 229 ce->ring = __intel_context_ring_size(SZ_16K); 230 231 if (rcu_access_pointer(ctx->vm)) { 232 struct i915_address_space *vm; 233 234 rcu_read_lock(); 235 vm = context_get_vm_rcu(ctx); /* hmm */ 236 rcu_read_unlock(); 237 238 i915_vm_put(ce->vm); 239 ce->vm = vm; 240 } 241 242 GEM_BUG_ON(ce->timeline); 243 if (ctx->timeline) 244 ce->timeline = intel_timeline_get(ctx->timeline); 245 246 if (ctx->sched.priority >= I915_PRIORITY_NORMAL && 247 intel_engine_has_semaphores(ce->engine)) 248 __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); 249 } 250 251 static void __free_engines(struct i915_gem_engines *e, unsigned int count) 252 { 253 while (count--) { 254 if (!e->engines[count]) 255 continue; 256 257 RCU_INIT_POINTER(e->engines[count]->gem_context, NULL); 258 intel_context_put(e->engines[count]); 259 } 260 kfree(e); 261 } 262 263 static void free_engines(struct i915_gem_engines *e) 264 { 265 __free_engines(e, e->num_engines); 266 } 267 268 static void free_engines_rcu(struct rcu_head *rcu) 269 { 270 free_engines(container_of(rcu, struct i915_gem_engines, rcu)); 271 } 272 273 static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) 274 { 275 const struct intel_gt *gt = &ctx->i915->gt; 276 struct intel_engine_cs *engine; 277 struct i915_gem_engines *e; 278 enum intel_engine_id id; 279 280 e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL); 281 if (!e) 282 return ERR_PTR(-ENOMEM); 283 284 init_rcu_head(&e->rcu); 285 for_each_engine(engine, gt, id) { 286 struct intel_context *ce; 287 288 if (engine->legacy_idx == INVALID_ENGINE) 289 continue; 290 291 GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); 292 GEM_BUG_ON(e->engines[engine->legacy_idx]); 293 294 ce = intel_context_create(engine); 295 if (IS_ERR(ce)) { 296 __free_engines(e, e->num_engines + 1); 297 return ERR_CAST(ce); 298 } 299 300 intel_context_set_gem(ce, ctx); 301 302 e->engines[engine->legacy_idx] = ce; 303 e->num_engines = max(e->num_engines, engine->legacy_idx); 304 } 305 e->num_engines++; 306 307 return e; 308 } 309 310 static void i915_gem_context_free(struct i915_gem_context *ctx) 311 { 312 GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); 313 314 spin_lock(&ctx->i915->gem.contexts.lock); 315 list_del(&ctx->link); 316 spin_unlock(&ctx->i915->gem.contexts.lock); 317 318 free_engines(rcu_access_pointer(ctx->engines)); 319 mutex_destroy(&ctx->engines_mutex); 320 321 if (ctx->timeline) 322 intel_timeline_put(ctx->timeline); 323 324 #ifndef __NetBSD__ 325 put_pid(ctx->pid); 326 #endif 327 mutex_destroy(&ctx->mutex); 328 329 kfree_rcu(ctx, rcu); 330 } 331 332 static void contexts_free_all(struct llist_node *list) 333 { 334 struct i915_gem_context *ctx, *cn; 335 336 llist_for_each_entry_safe(ctx, cn, list, free_link) 337 i915_gem_context_free(ctx); 338 } 339 340 static void contexts_flush_free(struct i915_gem_contexts *gc) 341 { 342 contexts_free_all(llist_del_all(&gc->free_list)); 343 } 344 345 static void contexts_free_worker(struct work_struct *work) 346 { 347 struct i915_gem_contexts *gc = 348 container_of(work, typeof(*gc), free_work); 349 350 contexts_flush_free(gc); 351 } 352 353 void i915_gem_context_release(struct kref *ref) 354 { 355 struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); 356 struct i915_gem_contexts *gc = &ctx->i915->gem.contexts; 357 358 trace_i915_context_free(ctx); 359 if (llist_add(&ctx->free_link, &gc->free_list)) 360 schedule_work(&gc->free_work); 361 } 362 363 static inline struct i915_gem_engines * 364 __context_engines_static(const struct i915_gem_context *ctx) 365 { 366 return rcu_dereference_protected(ctx->engines, true); 367 } 368 369 static bool __reset_engine(struct intel_engine_cs *engine) 370 { 371 struct intel_gt *gt = engine->gt; 372 bool success = false; 373 374 if (!intel_has_reset_engine(gt)) 375 return false; 376 377 if (!test_and_set_bit(I915_RESET_ENGINE + engine->id, 378 >->reset.flags)) { 379 success = intel_engine_reset(engine, NULL) == 0; 380 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, 381 >->reset.flags); 382 } 383 384 return success; 385 } 386 387 static void __reset_context(struct i915_gem_context *ctx, 388 struct intel_engine_cs *engine) 389 { 390 intel_gt_handle_error(engine->gt, engine->mask, 0, 391 "context closure in %s", ctx->name); 392 } 393 394 static bool __cancel_engine(struct intel_engine_cs *engine) 395 { 396 /* 397 * Send a "high priority pulse" down the engine to cause the 398 * current request to be momentarily preempted. (If it fails to 399 * be preempted, it will be reset). As we have marked our context 400 * as banned, any incomplete request, including any running, will 401 * be skipped following the preemption. 402 * 403 * If there is no hangchecking (one of the reasons why we try to 404 * cancel the context) and no forced preemption, there may be no 405 * means by which we reset the GPU and evict the persistent hog. 406 * Ergo if we are unable to inject a preemptive pulse that can 407 * kill the banned context, we fallback to doing a local reset 408 * instead. 409 */ 410 if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && 411 !intel_engine_pulse(engine)) 412 return true; 413 414 /* If we are unable to send a pulse, try resetting this engine. */ 415 return __reset_engine(engine); 416 } 417 418 static struct intel_engine_cs *__active_engine(struct i915_request *rq) 419 { 420 struct intel_engine_cs *engine, *locked; 421 422 /* 423 * Serialise with __i915_request_submit() so that it sees 424 * is-banned?, or we know the request is already inflight. 425 */ 426 locked = READ_ONCE(rq->engine); 427 spin_lock_irq(&locked->active.lock); 428 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { 429 spin_unlock(&locked->active.lock); 430 spin_lock(&engine->active.lock); 431 locked = engine; 432 } 433 434 engine = NULL; 435 if (i915_request_is_active(rq) && !rq->fence.error) 436 engine = rq->engine; 437 438 spin_unlock_irq(&locked->active.lock); 439 440 return engine; 441 } 442 443 static struct intel_engine_cs *active_engine(struct intel_context *ce) 444 { 445 struct intel_engine_cs *engine = NULL; 446 struct i915_request *rq; 447 448 if (!ce->timeline) 449 return NULL; 450 451 mutex_lock(&ce->timeline->mutex); 452 list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { 453 if (i915_request_completed(rq)) 454 break; 455 456 /* Check with the backend if the request is inflight */ 457 engine = __active_engine(rq); 458 if (engine) 459 break; 460 } 461 mutex_unlock(&ce->timeline->mutex); 462 463 return engine; 464 } 465 466 static void kill_context(struct i915_gem_context *ctx) 467 { 468 struct i915_gem_engines_iter it; 469 struct intel_context *ce; 470 471 /* 472 * Map the user's engine back to the actual engines; one virtual 473 * engine will be mapped to multiple engines, and using ctx->engine[] 474 * the same engine may be have multiple instances in the user's map. 475 * However, we only care about pending requests, so only include 476 * engines on which there are incomplete requests. 477 */ 478 for_each_gem_engine(ce, __context_engines_static(ctx), it) { 479 struct intel_engine_cs *engine; 480 481 if (intel_context_set_banned(ce)) 482 continue; 483 484 /* 485 * Check the current active state of this context; if we 486 * are currently executing on the GPU we need to evict 487 * ourselves. On the other hand, if we haven't yet been 488 * submitted to the GPU or if everything is complete, 489 * we have nothing to do. 490 */ 491 engine = active_engine(ce); 492 493 /* First attempt to gracefully cancel the context */ 494 if (engine && !__cancel_engine(engine)) 495 /* 496 * If we are unable to send a preemptive pulse to bump 497 * the context from the GPU, we have to resort to a full 498 * reset. We hope the collateral damage is worth it. 499 */ 500 __reset_context(ctx, engine); 501 } 502 } 503 504 static void set_closed_name(struct i915_gem_context *ctx) 505 { 506 char *s; 507 508 /* Replace '[]' with '<>' to indicate closed in debug prints */ 509 510 s = strrchr(ctx->name, '['); 511 if (!s) 512 return; 513 514 *s = '<'; 515 516 s = strchr(s + 1, ']'); 517 if (s) 518 *s = '>'; 519 } 520 521 static void context_close(struct i915_gem_context *ctx) 522 { 523 struct i915_address_space *vm; 524 525 i915_gem_context_set_closed(ctx); 526 set_closed_name(ctx); 527 528 mutex_lock(&ctx->mutex); 529 530 vm = i915_gem_context_vm(ctx); 531 if (vm) 532 i915_vm_close(vm); 533 534 ctx->file_priv = ERR_PTR(-EBADF); 535 536 /* 537 * The LUT uses the VMA as a backpointer to unref the object, 538 * so we need to clear the LUT before we close all the VMA (inside 539 * the ppgtt). 540 */ 541 lut_close(ctx); 542 543 mutex_unlock(&ctx->mutex); 544 545 /* 546 * If the user has disabled hangchecking, we can not be sure that 547 * the batches will ever complete after the context is closed, 548 * keeping the context and all resources pinned forever. So in this 549 * case we opt to forcibly kill off all remaining requests on 550 * context close. 551 */ 552 if (!i915_gem_context_is_persistent(ctx) || 553 !i915_modparams.enable_hangcheck) 554 kill_context(ctx); 555 556 i915_gem_context_put(ctx); 557 } 558 559 static int __context_set_persistence(struct i915_gem_context *ctx, bool state) 560 { 561 if (i915_gem_context_is_persistent(ctx) == state) 562 return 0; 563 564 if (state) { 565 /* 566 * Only contexts that are short-lived [that will expire or be 567 * reset] are allowed to survive past termination. We require 568 * hangcheck to ensure that the persistent requests are healthy. 569 */ 570 if (!i915_modparams.enable_hangcheck) 571 return -EINVAL; 572 573 i915_gem_context_set_persistence(ctx); 574 } else { 575 /* To cancel a context we use "preempt-to-idle" */ 576 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) 577 return -ENODEV; 578 579 /* 580 * If the cancel fails, we then need to reset, cleanly! 581 * 582 * If the per-engine reset fails, all hope is lost! We resort 583 * to a full GPU reset in that unlikely case, but realistically 584 * if the engine could not reset, the full reset does not fare 585 * much better. The damage has been done. 586 * 587 * However, if we cannot reset an engine by itself, we cannot 588 * cleanup a hanging persistent context without causing 589 * colateral damage, and we should not pretend we can by 590 * exposing the interface. 591 */ 592 if (!intel_has_reset_engine(&ctx->i915->gt)) 593 return -ENODEV; 594 595 i915_gem_context_clear_persistence(ctx); 596 } 597 598 return 0; 599 } 600 601 static struct i915_gem_context * 602 __create_context(struct drm_i915_private *i915) 603 { 604 struct i915_gem_context *ctx; 605 struct i915_gem_engines *e; 606 int err; 607 int i; 608 609 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 610 if (!ctx) 611 return ERR_PTR(-ENOMEM); 612 613 kref_init(&ctx->ref); 614 ctx->i915 = i915; 615 ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); 616 mutex_init(&ctx->mutex); 617 618 mutex_init(&ctx->engines_mutex); 619 e = default_engines(ctx); 620 if (IS_ERR(e)) { 621 err = PTR_ERR(e); 622 goto err_free; 623 } 624 RCU_INIT_POINTER(ctx->engines, e); 625 626 INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); 627 628 /* NB: Mark all slices as needing a remap so that when the context first 629 * loads it will restore whatever remap state already exists. If there 630 * is no remap info, it will be a NOP. */ 631 ctx->remap_slice = ALL_L3_SLICES(i915); 632 633 i915_gem_context_set_bannable(ctx); 634 i915_gem_context_set_recoverable(ctx); 635 __context_set_persistence(ctx, true /* cgroup hook? */); 636 637 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) 638 ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; 639 640 spin_lock(&i915->gem.contexts.lock); 641 list_add_tail(&ctx->link, &i915->gem.contexts.list); 642 spin_unlock(&i915->gem.contexts.lock); 643 644 return ctx; 645 646 err_free: 647 kfree(ctx); 648 return ERR_PTR(err); 649 } 650 651 static void 652 context_apply_all(struct i915_gem_context *ctx, 653 void (*fn)(struct intel_context *ce, void *data), 654 void *data) 655 { 656 struct i915_gem_engines_iter it; 657 struct intel_context *ce; 658 659 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) 660 fn(ce, data); 661 i915_gem_context_unlock_engines(ctx); 662 } 663 664 static void __apply_ppgtt(struct intel_context *ce, void *vm) 665 { 666 i915_vm_put(ce->vm); 667 ce->vm = i915_vm_get(vm); 668 } 669 670 static struct i915_address_space * 671 __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) 672 { 673 struct i915_address_space *old = i915_gem_context_vm(ctx); 674 675 GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); 676 677 rcu_assign_pointer(ctx->vm, i915_vm_open(vm)); 678 context_apply_all(ctx, __apply_ppgtt, vm); 679 680 return old; 681 } 682 683 static void __assign_ppgtt(struct i915_gem_context *ctx, 684 struct i915_address_space *vm) 685 { 686 if (vm == rcu_access_pointer(ctx->vm)) 687 return; 688 689 vm = __set_ppgtt(ctx, vm); 690 if (vm) 691 i915_vm_close(vm); 692 } 693 694 static void __set_timeline(struct intel_timeline **dst, 695 struct intel_timeline *src) 696 { 697 struct intel_timeline *old = *dst; 698 699 *dst = src ? intel_timeline_get(src) : NULL; 700 701 if (old) 702 intel_timeline_put(old); 703 } 704 705 static void __apply_timeline(struct intel_context *ce, void *timeline) 706 { 707 __set_timeline(&ce->timeline, timeline); 708 } 709 710 static void __assign_timeline(struct i915_gem_context *ctx, 711 struct intel_timeline *timeline) 712 { 713 __set_timeline(&ctx->timeline, timeline); 714 context_apply_all(ctx, __apply_timeline, timeline); 715 } 716 717 static struct i915_gem_context * 718 i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) 719 { 720 struct i915_gem_context *ctx; 721 722 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && 723 !HAS_EXECLISTS(i915)) 724 return ERR_PTR(-EINVAL); 725 726 /* Reap the stale contexts */ 727 contexts_flush_free(&i915->gem.contexts); 728 729 ctx = __create_context(i915); 730 if (IS_ERR(ctx)) 731 return ctx; 732 733 if (HAS_FULL_PPGTT(i915)) { 734 struct i915_ppgtt *ppgtt; 735 736 ppgtt = i915_ppgtt_create(&i915->gt); 737 if (IS_ERR(ppgtt)) { 738 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 739 PTR_ERR(ppgtt)); 740 context_close(ctx); 741 return ERR_CAST(ppgtt); 742 } 743 744 mutex_lock(&ctx->mutex); 745 __assign_ppgtt(ctx, &ppgtt->vm); 746 mutex_unlock(&ctx->mutex); 747 748 i915_vm_put(&ppgtt->vm); 749 } 750 751 if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { 752 struct intel_timeline *timeline; 753 754 timeline = intel_timeline_create(&i915->gt, NULL); 755 if (IS_ERR(timeline)) { 756 context_close(ctx); 757 return ERR_CAST(timeline); 758 } 759 760 __assign_timeline(ctx, timeline); 761 intel_timeline_put(timeline); 762 } 763 764 trace_i915_context_create(ctx); 765 766 return ctx; 767 } 768 769 static void init_contexts(struct i915_gem_contexts *gc) 770 { 771 spin_lock_init(&gc->lock); 772 INIT_LIST_HEAD(&gc->list); 773 774 INIT_WORK(&gc->free_work, contexts_free_worker); 775 init_llist_head(&gc->free_list); 776 } 777 778 void i915_gem_init__contexts(struct drm_i915_private *i915) 779 { 780 init_contexts(&i915->gem.contexts); 781 DRM_DEBUG_DRIVER("%s context support initialized\n", 782 DRIVER_CAPS(i915)->has_logical_contexts ? 783 "logical" : "fake"); 784 } 785 786 void i915_gem_driver_release__contexts(struct drm_i915_private *i915) 787 { 788 flush_work(&i915->gem.contexts.free_work); 789 spin_lock_destroy(&i915->gem.contexts.lock); 790 } 791 792 static int vm_idr_cleanup(int id, void *p, void *data) 793 { 794 i915_vm_put(p); 795 return 0; 796 } 797 798 static int gem_context_register(struct i915_gem_context *ctx, 799 struct drm_i915_file_private *fpriv, 800 u32 *id) 801 { 802 struct i915_address_space *vm; 803 int ret; 804 805 ctx->file_priv = fpriv; 806 807 mutex_lock(&ctx->mutex); 808 vm = i915_gem_context_vm(ctx); 809 if (vm) 810 WRITE_ONCE(vm->file, fpriv); /* XXX */ 811 mutex_unlock(&ctx->mutex); 812 813 #ifdef __NetBSD__ 814 ctx->pid = NULL; 815 #else 816 ctx->pid = get_task_pid(current, PIDTYPE_PID); 817 #endif 818 #ifdef __NetBSD__ 819 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 820 curproc->p_comm, (int)curproc->p_pid); 821 #else 822 snprintf(ctx->name, sizeof(ctx->name), "%s[%d]", 823 current->comm, pid_nr(ctx->pid)); 824 #endif 825 826 /* And finally expose ourselves to userspace via the idr */ 827 ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL); 828 #ifndef __NetBSD__ 829 if (ret) 830 put_pid(fetch_and_zero(&ctx->pid)); 831 #endif 832 833 return ret; 834 } 835 836 int i915_gem_context_open(struct drm_i915_private *i915, 837 struct drm_file *file) 838 { 839 struct drm_i915_file_private *file_priv = file->driver_priv; 840 struct i915_gem_context *ctx; 841 int err; 842 u32 id; 843 844 xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC); 845 846 mutex_init(&file_priv->vm_idr_lock); 847 idr_init_base(&file_priv->vm_idr, 1); 848 849 ctx = i915_gem_create_context(i915, 0); 850 if (IS_ERR(ctx)) { 851 err = PTR_ERR(ctx); 852 goto err; 853 } 854 855 err = gem_context_register(ctx, file_priv, &id); 856 if (err < 0) 857 goto err_ctx; 858 859 GEM_BUG_ON(id); 860 return 0; 861 862 err_ctx: 863 context_close(ctx); 864 err: 865 idr_destroy(&file_priv->vm_idr); 866 xa_destroy(&file_priv->context_xa); 867 mutex_destroy(&file_priv->vm_idr_lock); 868 return err; 869 } 870 871 void i915_gem_context_close(struct drm_file *file) 872 { 873 struct drm_i915_file_private *file_priv = file->driver_priv; 874 struct drm_i915_private *i915 = file_priv->dev_priv; 875 struct i915_gem_context *ctx; 876 unsigned long idx; 877 878 xa_for_each(&file_priv->context_xa, idx, ctx) 879 context_close(ctx); 880 xa_destroy(&file_priv->context_xa); 881 882 idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); 883 idr_destroy(&file_priv->vm_idr); 884 mutex_destroy(&file_priv->vm_idr_lock); 885 886 contexts_flush_free(&i915->gem.contexts); 887 } 888 889 int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, 890 struct drm_file *file) 891 { 892 struct drm_i915_private *i915 = to_i915(dev); 893 struct drm_i915_gem_vm_control *args = data; 894 struct drm_i915_file_private *file_priv = file->driver_priv; 895 struct i915_ppgtt *ppgtt; 896 int err; 897 898 if (!HAS_FULL_PPGTT(i915)) 899 return -ENODEV; 900 901 if (args->flags) 902 return -EINVAL; 903 904 ppgtt = i915_ppgtt_create(&i915->gt); 905 if (IS_ERR(ppgtt)) 906 return PTR_ERR(ppgtt); 907 908 ppgtt->vm.file = file_priv; 909 910 if (args->extensions) { 911 err = i915_user_extensions(u64_to_user_ptr(args->extensions), 912 NULL, 0, 913 ppgtt); 914 if (err) 915 goto err_put; 916 } 917 918 idr_preload(GFP_KERNEL); 919 err = mutex_lock_interruptible(&file_priv->vm_idr_lock); 920 if (err) 921 goto err_put; 922 923 err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL); 924 if (err < 0) 925 goto err_unlock; 926 927 GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */ 928 929 mutex_unlock(&file_priv->vm_idr_lock); 930 idr_preload_end(); 931 932 args->vm_id = err; 933 return 0; 934 935 err_unlock: 936 mutex_unlock(&file_priv->vm_idr_lock); 937 idr_preload_end(); 938 err_put: 939 i915_vm_put(&ppgtt->vm); 940 return err; 941 } 942 943 int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data, 944 struct drm_file *file) 945 { 946 struct drm_i915_file_private *file_priv = file->driver_priv; 947 struct drm_i915_gem_vm_control *args = data; 948 struct i915_address_space *vm; 949 int err; 950 u32 id; 951 952 if (args->flags) 953 return -EINVAL; 954 955 if (args->extensions) 956 return -EINVAL; 957 958 id = args->vm_id; 959 if (!id) 960 return -ENOENT; 961 962 err = mutex_lock_interruptible(&file_priv->vm_idr_lock); 963 if (err) 964 return err; 965 966 vm = idr_remove(&file_priv->vm_idr, id); 967 968 mutex_unlock(&file_priv->vm_idr_lock); 969 if (!vm) 970 return -ENOENT; 971 972 i915_vm_put(vm); 973 return 0; 974 } 975 976 struct context_barrier_task { 977 struct i915_active base; 978 void (*task)(void *data); 979 void *data; 980 }; 981 982 __i915_active_call 983 static void cb_retire(struct i915_active *base) 984 { 985 struct context_barrier_task *cb = container_of(base, typeof(*cb), base); 986 987 if (cb->task) 988 cb->task(cb->data); 989 990 i915_active_fini(&cb->base); 991 kfree(cb); 992 } 993 994 I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault); 995 static int context_barrier_task(struct i915_gem_context *ctx, 996 intel_engine_mask_t engines, 997 bool (*skip)(struct intel_context *ce, void *data), 998 int (*emit)(struct i915_request *rq, void *data), 999 void (*task)(void *data), 1000 void *data) 1001 { 1002 struct context_barrier_task *cb; 1003 struct i915_gem_engines_iter it; 1004 struct intel_context *ce; 1005 int err = 0; 1006 1007 GEM_BUG_ON(!task); 1008 1009 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 1010 if (!cb) 1011 return -ENOMEM; 1012 1013 i915_active_init(&cb->base, NULL, cb_retire); 1014 err = i915_active_acquire(&cb->base); 1015 if (err) { 1016 kfree(cb); 1017 return err; 1018 } 1019 1020 for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { 1021 struct i915_request *rq; 1022 1023 if (I915_SELFTEST_ONLY(context_barrier_inject_fault & 1024 ce->engine->mask)) { 1025 err = -ENXIO; 1026 break; 1027 } 1028 1029 if (!(ce->engine->mask & engines)) 1030 continue; 1031 1032 if (skip && skip(ce, data)) 1033 continue; 1034 1035 rq = intel_context_create_request(ce); 1036 if (IS_ERR(rq)) { 1037 err = PTR_ERR(rq); 1038 break; 1039 } 1040 1041 err = 0; 1042 if (emit) 1043 err = emit(rq, data); 1044 if (err == 0) 1045 err = i915_active_add_request(&cb->base, rq); 1046 1047 i915_request_add(rq); 1048 if (err) 1049 break; 1050 } 1051 i915_gem_context_unlock_engines(ctx); 1052 1053 cb->task = err ? NULL : task; /* caller needs to unwind instead */ 1054 cb->data = data; 1055 1056 i915_active_release(&cb->base); 1057 1058 return err; 1059 } 1060 1061 static int get_ppgtt(struct drm_i915_file_private *file_priv, 1062 struct i915_gem_context *ctx, 1063 struct drm_i915_gem_context_param *args) 1064 { 1065 struct i915_address_space *vm; 1066 int ret; 1067 1068 if (!rcu_access_pointer(ctx->vm)) 1069 return -ENODEV; 1070 1071 rcu_read_lock(); 1072 vm = context_get_vm_rcu(ctx); 1073 rcu_read_unlock(); 1074 1075 idr_preload(GFP_KERNEL); 1076 ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); 1077 if (ret) 1078 goto err_put; 1079 1080 ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL); 1081 GEM_BUG_ON(!ret); 1082 if (ret < 0) 1083 goto err_unlock; 1084 1085 i915_vm_open(vm); 1086 1087 args->size = 0; 1088 args->value = ret; 1089 1090 ret = 0; 1091 err_unlock: 1092 mutex_unlock(&file_priv->vm_idr_lock); 1093 idr_preload_end(); 1094 err_put: 1095 i915_vm_put(vm); 1096 return ret; 1097 } 1098 1099 static void set_ppgtt_barrier(void *data) 1100 { 1101 struct i915_address_space *old = data; 1102 1103 if (INTEL_GEN(old->i915) < 8) 1104 gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); 1105 1106 i915_vm_close(old); 1107 } 1108 1109 static int emit_ppgtt_update(struct i915_request *rq, void *data) 1110 { 1111 struct i915_address_space *vm = rq->context->vm; 1112 struct intel_engine_cs *engine = rq->engine; 1113 u32 base = engine->mmio_base; 1114 u32 *cs; 1115 int i; 1116 1117 if (i915_vm_is_4lvl(vm)) { 1118 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1119 const dma_addr_t pd_daddr = px_dma(ppgtt->pd); 1120 1121 cs = intel_ring_begin(rq, 6); 1122 if (IS_ERR(cs)) 1123 return PTR_ERR(cs); 1124 1125 *cs++ = MI_LOAD_REGISTER_IMM(2); 1126 1127 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); 1128 *cs++ = upper_32_bits(pd_daddr); 1129 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); 1130 *cs++ = lower_32_bits(pd_daddr); 1131 1132 *cs++ = MI_NOOP; 1133 intel_ring_advance(rq, cs); 1134 } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { 1135 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1136 int err; 1137 1138 /* Magic required to prevent forcewake errors! */ 1139 err = engine->emit_flush(rq, EMIT_INVALIDATE); 1140 if (err) 1141 return err; 1142 1143 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); 1144 if (IS_ERR(cs)) 1145 return PTR_ERR(cs); 1146 1147 *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; 1148 for (i = GEN8_3LVL_PDPES; i--; ) { 1149 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); 1150 1151 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); 1152 *cs++ = upper_32_bits(pd_daddr); 1153 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); 1154 *cs++ = lower_32_bits(pd_daddr); 1155 } 1156 *cs++ = MI_NOOP; 1157 intel_ring_advance(rq, cs); 1158 } 1159 1160 return 0; 1161 } 1162 1163 static bool skip_ppgtt_update(struct intel_context *ce, void *data) 1164 { 1165 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) 1166 return true; 1167 1168 if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915)) 1169 return false; 1170 1171 if (!atomic_read(&ce->pin_count)) 1172 return true; 1173 1174 /* ppGTT is not part of the legacy context image */ 1175 if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm))) 1176 return true; 1177 1178 return false; 1179 } 1180 1181 static int set_ppgtt(struct drm_i915_file_private *file_priv, 1182 struct i915_gem_context *ctx, 1183 struct drm_i915_gem_context_param *args) 1184 { 1185 struct i915_address_space *vm, *old; 1186 int err; 1187 1188 if (args->size) 1189 return -EINVAL; 1190 1191 if (!rcu_access_pointer(ctx->vm)) 1192 return -ENODEV; 1193 1194 if (upper_32_bits(args->value)) 1195 return -ENOENT; 1196 1197 rcu_read_lock(); 1198 vm = idr_find(&file_priv->vm_idr, args->value); 1199 if (vm && !kref_get_unless_zero(&vm->ref)) 1200 vm = NULL; 1201 rcu_read_unlock(); 1202 if (!vm) 1203 return -ENOENT; 1204 1205 err = mutex_lock_interruptible(&ctx->mutex); 1206 if (err) 1207 goto out; 1208 1209 if (i915_gem_context_is_closed(ctx)) { 1210 err = -ENOENT; 1211 goto unlock; 1212 } 1213 1214 if (vm == rcu_access_pointer(ctx->vm)) 1215 goto unlock; 1216 1217 /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ 1218 lut_close(ctx); 1219 1220 old = __set_ppgtt(ctx, vm); 1221 1222 /* 1223 * We need to flush any requests using the current ppgtt before 1224 * we release it as the requests do not hold a reference themselves, 1225 * only indirectly through the context. 1226 */ 1227 err = context_barrier_task(ctx, ALL_ENGINES, 1228 skip_ppgtt_update, 1229 emit_ppgtt_update, 1230 set_ppgtt_barrier, 1231 old); 1232 if (err) { 1233 i915_vm_close(__set_ppgtt(ctx, old)); 1234 i915_vm_close(old); 1235 } 1236 1237 unlock: 1238 mutex_unlock(&ctx->mutex); 1239 out: 1240 i915_vm_put(vm); 1241 return err; 1242 } 1243 1244 static int gen8_emit_rpcs_config(struct i915_request *rq, 1245 struct intel_context *ce, 1246 struct intel_sseu sseu) 1247 { 1248 u64 offset; 1249 u32 *cs; 1250 1251 cs = intel_ring_begin(rq, 4); 1252 if (IS_ERR(cs)) 1253 return PTR_ERR(cs); 1254 1255 offset = i915_ggtt_offset(ce->state) + 1256 LRC_STATE_PN * PAGE_SIZE + 1257 CTX_R_PWR_CLK_STATE * 4; 1258 1259 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; 1260 *cs++ = lower_32_bits(offset); 1261 *cs++ = upper_32_bits(offset); 1262 *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu); 1263 1264 intel_ring_advance(rq, cs); 1265 1266 return 0; 1267 } 1268 1269 static int 1270 gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) 1271 { 1272 struct i915_request *rq; 1273 int ret; 1274 1275 lockdep_assert_held(&ce->pin_mutex); 1276 1277 /* 1278 * If the context is not idle, we have to submit an ordered request to 1279 * modify its context image via the kernel context (writing to our own 1280 * image, or into the registers directory, does not stick). Pristine 1281 * and idle contexts will be configured on pinning. 1282 */ 1283 if (!intel_context_pin_if_active(ce)) 1284 return 0; 1285 1286 rq = intel_engine_create_kernel_request(ce->engine); 1287 if (IS_ERR(rq)) { 1288 ret = PTR_ERR(rq); 1289 goto out_unpin; 1290 } 1291 1292 /* Serialise with the remote context */ 1293 ret = intel_context_prepare_remote_request(ce, rq); 1294 if (ret == 0) 1295 ret = gen8_emit_rpcs_config(rq, ce, sseu); 1296 1297 i915_request_add(rq); 1298 out_unpin: 1299 intel_context_unpin(ce); 1300 return ret; 1301 } 1302 1303 static int 1304 intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu) 1305 { 1306 int ret; 1307 1308 GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8); 1309 1310 ret = intel_context_lock_pinned(ce); 1311 if (ret) 1312 return ret; 1313 1314 /* Nothing to do if unmodified. */ 1315 if (!memcmp(&ce->sseu, &sseu, sizeof(sseu))) 1316 goto unlock; 1317 1318 ret = gen8_modify_rpcs(ce, sseu); 1319 if (!ret) 1320 ce->sseu = sseu; 1321 1322 unlock: 1323 intel_context_unlock_pinned(ce); 1324 return ret; 1325 } 1326 1327 static int 1328 user_to_context_sseu(struct drm_i915_private *i915, 1329 const struct drm_i915_gem_context_param_sseu *user, 1330 struct intel_sseu *context) 1331 { 1332 const struct sseu_dev_info *device = &RUNTIME_INFO(i915)->sseu; 1333 1334 /* No zeros in any field. */ 1335 if (!user->slice_mask || !user->subslice_mask || 1336 !user->min_eus_per_subslice || !user->max_eus_per_subslice) 1337 return -EINVAL; 1338 1339 /* Max > min. */ 1340 if (user->max_eus_per_subslice < user->min_eus_per_subslice) 1341 return -EINVAL; 1342 1343 /* 1344 * Some future proofing on the types since the uAPI is wider than the 1345 * current internal implementation. 1346 */ 1347 if (overflows_type(user->slice_mask, context->slice_mask) || 1348 overflows_type(user->subslice_mask, context->subslice_mask) || 1349 overflows_type(user->min_eus_per_subslice, 1350 context->min_eus_per_subslice) || 1351 overflows_type(user->max_eus_per_subslice, 1352 context->max_eus_per_subslice)) 1353 return -EINVAL; 1354 1355 /* Check validity against hardware. */ 1356 if (user->slice_mask & ~device->slice_mask) 1357 return -EINVAL; 1358 1359 if (user->subslice_mask & ~device->subslice_mask[0]) 1360 return -EINVAL; 1361 1362 if (user->max_eus_per_subslice > device->max_eus_per_subslice) 1363 return -EINVAL; 1364 1365 context->slice_mask = user->slice_mask; 1366 context->subslice_mask = user->subslice_mask; 1367 context->min_eus_per_subslice = user->min_eus_per_subslice; 1368 context->max_eus_per_subslice = user->max_eus_per_subslice; 1369 1370 /* Part specific restrictions. */ 1371 if (IS_GEN(i915, 11)) { 1372 unsigned int hw_s = hweight8(device->slice_mask); 1373 unsigned int hw_ss_per_s = hweight8(device->subslice_mask[0]); 1374 unsigned int req_s = hweight8(context->slice_mask); 1375 unsigned int req_ss = hweight8(context->subslice_mask); 1376 1377 /* 1378 * Only full subslice enablement is possible if more than one 1379 * slice is turned on. 1380 */ 1381 if (req_s > 1 && req_ss != hw_ss_per_s) 1382 return -EINVAL; 1383 1384 /* 1385 * If more than four (SScount bitfield limit) subslices are 1386 * requested then the number has to be even. 1387 */ 1388 if (req_ss > 4 && (req_ss & 1)) 1389 return -EINVAL; 1390 1391 /* 1392 * If only one slice is enabled and subslice count is below the 1393 * device full enablement, it must be at most half of the all 1394 * available subslices. 1395 */ 1396 if (req_s == 1 && req_ss < hw_ss_per_s && 1397 req_ss > (hw_ss_per_s / 2)) 1398 return -EINVAL; 1399 1400 /* ABI restriction - VME use case only. */ 1401 1402 /* All slices or one slice only. */ 1403 if (req_s != 1 && req_s != hw_s) 1404 return -EINVAL; 1405 1406 /* 1407 * Half subslices or full enablement only when one slice is 1408 * enabled. 1409 */ 1410 if (req_s == 1 && 1411 (req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) 1412 return -EINVAL; 1413 1414 /* No EU configuration changes. */ 1415 if ((user->min_eus_per_subslice != 1416 device->max_eus_per_subslice) || 1417 (user->max_eus_per_subslice != 1418 device->max_eus_per_subslice)) 1419 return -EINVAL; 1420 } 1421 1422 return 0; 1423 } 1424 1425 static int set_sseu(struct i915_gem_context *ctx, 1426 struct drm_i915_gem_context_param *args) 1427 { 1428 struct drm_i915_private *i915 = ctx->i915; 1429 struct drm_i915_gem_context_param_sseu user_sseu; 1430 struct intel_context *ce; 1431 struct intel_sseu sseu; 1432 unsigned long lookup; 1433 int ret; 1434 1435 if (args->size < sizeof(user_sseu)) 1436 return -EINVAL; 1437 1438 if (!IS_GEN(i915, 11)) 1439 return -ENODEV; 1440 1441 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 1442 sizeof(user_sseu))) 1443 return -EFAULT; 1444 1445 if (user_sseu.rsvd) 1446 return -EINVAL; 1447 1448 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 1449 return -EINVAL; 1450 1451 lookup = 0; 1452 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 1453 lookup |= LOOKUP_USER_INDEX; 1454 1455 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 1456 if (IS_ERR(ce)) 1457 return PTR_ERR(ce); 1458 1459 /* Only render engine supports RPCS configuration. */ 1460 if (ce->engine->class != RENDER_CLASS) { 1461 ret = -ENODEV; 1462 goto out_ce; 1463 } 1464 1465 ret = user_to_context_sseu(i915, &user_sseu, &sseu); 1466 if (ret) 1467 goto out_ce; 1468 1469 ret = intel_context_reconfigure_sseu(ce, sseu); 1470 if (ret) 1471 goto out_ce; 1472 1473 args->size = sizeof(user_sseu); 1474 1475 out_ce: 1476 intel_context_put(ce); 1477 return ret; 1478 } 1479 1480 struct set_engines { 1481 struct i915_gem_context *ctx; 1482 struct i915_gem_engines *engines; 1483 }; 1484 1485 static int 1486 set_engines__load_balance(struct i915_user_extension __user *base, void *data) 1487 { 1488 struct i915_context_engines_load_balance __user *ext = 1489 container_of_user(base, typeof(*ext), base); 1490 const struct set_engines *set = data; 1491 struct intel_engine_cs *stack[16]; 1492 struct intel_engine_cs **siblings; 1493 struct intel_context *ce; 1494 u16 num_siblings, idx; 1495 unsigned int n; 1496 int err; 1497 1498 if (!HAS_EXECLISTS(set->ctx->i915)) 1499 return -ENODEV; 1500 1501 if (USES_GUC_SUBMISSION(set->ctx->i915)) 1502 return -ENODEV; /* not implement yet */ 1503 1504 if (get_user(idx, &ext->engine_index)) 1505 return -EFAULT; 1506 1507 if (idx >= set->engines->num_engines) { 1508 DRM_DEBUG("Invalid placement value, %d >= %d\n", 1509 idx, set->engines->num_engines); 1510 return -EINVAL; 1511 } 1512 1513 idx = array_index_nospec(idx, set->engines->num_engines); 1514 if (set->engines->engines[idx]) { 1515 DRM_DEBUG("Invalid placement[%d], already occupied\n", idx); 1516 return -EEXIST; 1517 } 1518 1519 if (get_user(num_siblings, &ext->num_siblings)) 1520 return -EFAULT; 1521 1522 err = check_user_mbz(&ext->flags); 1523 if (err) 1524 return err; 1525 1526 err = check_user_mbz(&ext->mbz64); 1527 if (err) 1528 return err; 1529 1530 siblings = stack; 1531 if (num_siblings > ARRAY_SIZE(stack)) { 1532 siblings = kmalloc_array(num_siblings, 1533 sizeof(*siblings), 1534 GFP_KERNEL); 1535 if (!siblings) 1536 return -ENOMEM; 1537 } 1538 1539 for (n = 0; n < num_siblings; n++) { 1540 struct i915_engine_class_instance ci; 1541 1542 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) { 1543 err = -EFAULT; 1544 goto out_siblings; 1545 } 1546 1547 siblings[n] = intel_engine_lookup_user(set->ctx->i915, 1548 ci.engine_class, 1549 ci.engine_instance); 1550 if (!siblings[n]) { 1551 DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n", 1552 n, ci.engine_class, ci.engine_instance); 1553 err = -EINVAL; 1554 goto out_siblings; 1555 } 1556 } 1557 1558 ce = intel_execlists_create_virtual(siblings, n); 1559 if (IS_ERR(ce)) { 1560 err = PTR_ERR(ce); 1561 goto out_siblings; 1562 } 1563 1564 intel_context_set_gem(ce, set->ctx); 1565 1566 if (cmpxchg(&set->engines->engines[idx], NULL, ce)) { 1567 intel_context_put(ce); 1568 err = -EEXIST; 1569 goto out_siblings; 1570 } 1571 1572 out_siblings: 1573 if (siblings != stack) 1574 kfree(siblings); 1575 1576 return err; 1577 } 1578 1579 static int 1580 set_engines__bond(struct i915_user_extension __user *base, void *data) 1581 { 1582 struct i915_context_engines_bond __user *ext = 1583 container_of_user(base, typeof(*ext), base); 1584 const struct set_engines *set = data; 1585 struct i915_engine_class_instance ci; 1586 struct intel_engine_cs *virtual; 1587 struct intel_engine_cs *master; 1588 u16 idx, num_bonds; 1589 int err, n; 1590 1591 if (get_user(idx, &ext->virtual_index)) 1592 return -EFAULT; 1593 1594 if (idx >= set->engines->num_engines) { 1595 DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n", 1596 idx, set->engines->num_engines); 1597 return -EINVAL; 1598 } 1599 1600 idx = array_index_nospec(idx, set->engines->num_engines); 1601 if (!set->engines->engines[idx]) { 1602 DRM_DEBUG("Invalid engine at %d\n", idx); 1603 return -EINVAL; 1604 } 1605 virtual = set->engines->engines[idx]->engine; 1606 1607 err = check_user_mbz(&ext->flags); 1608 if (err) 1609 return err; 1610 1611 for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) { 1612 err = check_user_mbz(&ext->mbz64[n]); 1613 if (err) 1614 return err; 1615 } 1616 1617 if (copy_from_user(&ci, &ext->master, sizeof(ci))) 1618 return -EFAULT; 1619 1620 master = intel_engine_lookup_user(set->ctx->i915, 1621 ci.engine_class, ci.engine_instance); 1622 if (!master) { 1623 DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n", 1624 ci.engine_class, ci.engine_instance); 1625 return -EINVAL; 1626 } 1627 1628 if (get_user(num_bonds, &ext->num_bonds)) 1629 return -EFAULT; 1630 1631 for (n = 0; n < num_bonds; n++) { 1632 struct intel_engine_cs *bond; 1633 1634 if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) 1635 return -EFAULT; 1636 1637 bond = intel_engine_lookup_user(set->ctx->i915, 1638 ci.engine_class, 1639 ci.engine_instance); 1640 if (!bond) { 1641 DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n", 1642 n, ci.engine_class, ci.engine_instance); 1643 return -EINVAL; 1644 } 1645 1646 /* 1647 * A non-virtual engine has no siblings to choose between; and 1648 * a submit fence will always be directed to the one engine. 1649 */ 1650 if (intel_engine_is_virtual(virtual)) { 1651 err = intel_virtual_engine_attach_bond(virtual, 1652 master, 1653 bond); 1654 if (err) 1655 return err; 1656 } 1657 } 1658 1659 return 0; 1660 } 1661 1662 static const i915_user_extension_fn set_engines__extensions[] = { 1663 [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance, 1664 [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond, 1665 }; 1666 1667 static int 1668 set_engines(struct i915_gem_context *ctx, 1669 const struct drm_i915_gem_context_param *args) 1670 { 1671 struct i915_context_param_engines __user *user = 1672 u64_to_user_ptr(args->value); 1673 struct set_engines set = { .ctx = ctx }; 1674 unsigned int num_engines, n; 1675 u64 extensions; 1676 int err; 1677 1678 if (!args->size) { /* switch back to legacy user_ring_map */ 1679 if (!i915_gem_context_user_engines(ctx)) 1680 return 0; 1681 1682 set.engines = default_engines(ctx); 1683 if (IS_ERR(set.engines)) 1684 return PTR_ERR(set.engines); 1685 1686 goto replace; 1687 } 1688 1689 BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines))); 1690 if (args->size < sizeof(*user) || 1691 !IS_ALIGNED(args->size, sizeof(*user->engines))) { 1692 DRM_DEBUG("Invalid size for engine array: %d\n", 1693 args->size); 1694 return -EINVAL; 1695 } 1696 1697 /* 1698 * Note that I915_EXEC_RING_MASK limits execbuf to only using the 1699 * first 64 engines defined here. 1700 */ 1701 num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); 1702 1703 set.engines = kmalloc(struct_size(set.engines, engines, num_engines), 1704 GFP_KERNEL); 1705 if (!set.engines) 1706 return -ENOMEM; 1707 1708 init_rcu_head(&set.engines->rcu); 1709 for (n = 0; n < num_engines; n++) { 1710 struct i915_engine_class_instance ci; 1711 struct intel_engine_cs *engine; 1712 struct intel_context *ce; 1713 1714 if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) { 1715 __free_engines(set.engines, n); 1716 return -EFAULT; 1717 } 1718 1719 if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID && 1720 ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) { 1721 set.engines->engines[n] = NULL; 1722 continue; 1723 } 1724 1725 engine = intel_engine_lookup_user(ctx->i915, 1726 ci.engine_class, 1727 ci.engine_instance); 1728 if (!engine) { 1729 DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n", 1730 n, ci.engine_class, ci.engine_instance); 1731 __free_engines(set.engines, n); 1732 return -ENOENT; 1733 } 1734 1735 ce = intel_context_create(engine); 1736 if (IS_ERR(ce)) { 1737 __free_engines(set.engines, n); 1738 return PTR_ERR(ce); 1739 } 1740 1741 intel_context_set_gem(ce, ctx); 1742 1743 set.engines->engines[n] = ce; 1744 } 1745 set.engines->num_engines = num_engines; 1746 1747 err = -EFAULT; 1748 if (!get_user(extensions, &user->extensions)) 1749 err = i915_user_extensions(u64_to_user_ptr(extensions), 1750 set_engines__extensions, 1751 ARRAY_SIZE(set_engines__extensions), 1752 &set); 1753 if (err) { 1754 free_engines(set.engines); 1755 return err; 1756 } 1757 1758 replace: 1759 mutex_lock(&ctx->engines_mutex); 1760 if (args->size) 1761 i915_gem_context_set_user_engines(ctx); 1762 else 1763 i915_gem_context_clear_user_engines(ctx); 1764 set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1); 1765 mutex_unlock(&ctx->engines_mutex); 1766 1767 call_rcu(&set.engines->rcu, free_engines_rcu); 1768 1769 return 0; 1770 } 1771 1772 static struct i915_gem_engines * 1773 __copy_engines(struct i915_gem_engines *e) 1774 { 1775 struct i915_gem_engines *copy; 1776 unsigned int n; 1777 1778 copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 1779 if (!copy) 1780 return ERR_PTR(-ENOMEM); 1781 1782 init_rcu_head(©->rcu); 1783 for (n = 0; n < e->num_engines; n++) { 1784 if (e->engines[n]) 1785 copy->engines[n] = intel_context_get(e->engines[n]); 1786 else 1787 copy->engines[n] = NULL; 1788 } 1789 copy->num_engines = n; 1790 1791 return copy; 1792 } 1793 1794 static int 1795 get_engines(struct i915_gem_context *ctx, 1796 struct drm_i915_gem_context_param *args) 1797 { 1798 struct i915_context_param_engines __user *user; 1799 struct i915_gem_engines *e; 1800 size_t n, count, size; 1801 int err = 0; 1802 1803 err = mutex_lock_interruptible(&ctx->engines_mutex); 1804 if (err) 1805 return err; 1806 1807 e = NULL; 1808 if (i915_gem_context_user_engines(ctx)) 1809 e = __copy_engines(i915_gem_context_engines(ctx)); 1810 mutex_unlock(&ctx->engines_mutex); 1811 if (IS_ERR_OR_NULL(e)) { 1812 args->size = 0; 1813 return PTR_ERR_OR_ZERO(e); 1814 } 1815 1816 count = e->num_engines; 1817 1818 /* Be paranoid in case we have an impedance mismatch */ 1819 if (!check_struct_size(user, engines, count, &size)) { 1820 err = -EINVAL; 1821 goto err_free; 1822 } 1823 if (overflows_type(size, args->size)) { 1824 err = -EINVAL; 1825 goto err_free; 1826 } 1827 1828 if (!args->size) { 1829 args->size = size; 1830 goto err_free; 1831 } 1832 1833 if (args->size < size) { 1834 err = -EINVAL; 1835 goto err_free; 1836 } 1837 1838 user = u64_to_user_ptr(args->value); 1839 if (!access_ok(user, size)) { 1840 err = -EFAULT; 1841 goto err_free; 1842 } 1843 1844 if (put_user(0, &user->extensions)) { 1845 err = -EFAULT; 1846 goto err_free; 1847 } 1848 1849 for (n = 0; n < count; n++) { 1850 struct i915_engine_class_instance ci = { 1851 .engine_class = I915_ENGINE_CLASS_INVALID, 1852 .engine_instance = I915_ENGINE_CLASS_INVALID_NONE, 1853 }; 1854 1855 if (e->engines[n]) { 1856 ci.engine_class = e->engines[n]->engine->uabi_class; 1857 ci.engine_instance = e->engines[n]->engine->uabi_instance; 1858 } 1859 1860 if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) { 1861 err = -EFAULT; 1862 goto err_free; 1863 } 1864 } 1865 1866 args->size = size; 1867 1868 err_free: 1869 free_engines(e); 1870 return err; 1871 } 1872 1873 static int 1874 set_persistence(struct i915_gem_context *ctx, 1875 const struct drm_i915_gem_context_param *args) 1876 { 1877 if (args->size) 1878 return -EINVAL; 1879 1880 return __context_set_persistence(ctx, args->value); 1881 } 1882 1883 static void __apply_priority(struct intel_context *ce, void *arg) 1884 { 1885 struct i915_gem_context *ctx = arg; 1886 1887 if (!intel_engine_has_semaphores(ce->engine)) 1888 return; 1889 1890 if (ctx->sched.priority >= I915_PRIORITY_NORMAL) 1891 intel_context_set_use_semaphores(ce); 1892 else 1893 intel_context_clear_use_semaphores(ce); 1894 } 1895 1896 static int set_priority(struct i915_gem_context *ctx, 1897 const struct drm_i915_gem_context_param *args) 1898 { 1899 s64 priority = args->value; 1900 1901 if (args->size) 1902 return -EINVAL; 1903 1904 if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 1905 return -ENODEV; 1906 1907 if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 1908 priority < I915_CONTEXT_MIN_USER_PRIORITY) 1909 return -EINVAL; 1910 1911 if (priority > I915_CONTEXT_DEFAULT_PRIORITY && 1912 !capable(CAP_SYS_NICE)) 1913 return -EPERM; 1914 1915 ctx->sched.priority = I915_USER_PRIORITY(priority); 1916 context_apply_all(ctx, __apply_priority, ctx); 1917 1918 return 0; 1919 } 1920 1921 static int ctx_setparam(struct drm_i915_file_private *fpriv, 1922 struct i915_gem_context *ctx, 1923 struct drm_i915_gem_context_param *args) 1924 { 1925 int ret = 0; 1926 1927 switch (args->param) { 1928 case I915_CONTEXT_PARAM_NO_ZEROMAP: 1929 if (args->size) 1930 ret = -EINVAL; 1931 else if (args->value) 1932 set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 1933 else 1934 clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 1935 break; 1936 1937 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 1938 if (args->size) 1939 ret = -EINVAL; 1940 else if (args->value) 1941 i915_gem_context_set_no_error_capture(ctx); 1942 else 1943 i915_gem_context_clear_no_error_capture(ctx); 1944 break; 1945 1946 case I915_CONTEXT_PARAM_BANNABLE: 1947 if (args->size) 1948 ret = -EINVAL; 1949 else if (!capable(CAP_SYS_ADMIN) && !args->value) 1950 ret = -EPERM; 1951 else if (args->value) 1952 i915_gem_context_set_bannable(ctx); 1953 else 1954 i915_gem_context_clear_bannable(ctx); 1955 break; 1956 1957 case I915_CONTEXT_PARAM_RECOVERABLE: 1958 if (args->size) 1959 ret = -EINVAL; 1960 else if (args->value) 1961 i915_gem_context_set_recoverable(ctx); 1962 else 1963 i915_gem_context_clear_recoverable(ctx); 1964 break; 1965 1966 case I915_CONTEXT_PARAM_PRIORITY: 1967 ret = set_priority(ctx, args); 1968 break; 1969 1970 case I915_CONTEXT_PARAM_SSEU: 1971 ret = set_sseu(ctx, args); 1972 break; 1973 1974 case I915_CONTEXT_PARAM_VM: 1975 ret = set_ppgtt(fpriv, ctx, args); 1976 break; 1977 1978 case I915_CONTEXT_PARAM_ENGINES: 1979 ret = set_engines(ctx, args); 1980 break; 1981 1982 case I915_CONTEXT_PARAM_PERSISTENCE: 1983 ret = set_persistence(ctx, args); 1984 break; 1985 1986 case I915_CONTEXT_PARAM_BAN_PERIOD: 1987 default: 1988 ret = -EINVAL; 1989 break; 1990 } 1991 1992 return ret; 1993 } 1994 1995 struct create_ext { 1996 struct i915_gem_context *ctx; 1997 struct drm_i915_file_private *fpriv; 1998 }; 1999 2000 static int create_setparam(struct i915_user_extension __user *ext, void *data) 2001 { 2002 struct drm_i915_gem_context_create_ext_setparam local; 2003 const struct create_ext *arg = data; 2004 2005 if (copy_from_user(&local, ext, sizeof(local))) 2006 return -EFAULT; 2007 2008 if (local.param.ctx_id) 2009 return -EINVAL; 2010 2011 return ctx_setparam(arg->fpriv, arg->ctx, &local.param); 2012 } 2013 2014 static int clone_engines(struct i915_gem_context *dst, 2015 struct i915_gem_context *src) 2016 { 2017 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 2018 struct i915_gem_engines *clone; 2019 bool user_engines; 2020 unsigned long n; 2021 2022 clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL); 2023 if (!clone) 2024 goto err_unlock; 2025 2026 init_rcu_head(&clone->rcu); 2027 for (n = 0; n < e->num_engines; n++) { 2028 struct intel_engine_cs *engine; 2029 2030 if (!e->engines[n]) { 2031 clone->engines[n] = NULL; 2032 continue; 2033 } 2034 engine = e->engines[n]->engine; 2035 2036 /* 2037 * Virtual engines are singletons; they can only exist 2038 * inside a single context, because they embed their 2039 * HW context... As each virtual context implies a single 2040 * timeline (each engine can only dequeue a single request 2041 * at any time), it would be surprising for two contexts 2042 * to use the same engine. So let's create a copy of 2043 * the virtual engine instead. 2044 */ 2045 if (intel_engine_is_virtual(engine)) 2046 clone->engines[n] = 2047 intel_execlists_clone_virtual(engine); 2048 else 2049 clone->engines[n] = intel_context_create(engine); 2050 if (IS_ERR_OR_NULL(clone->engines[n])) { 2051 __free_engines(clone, n); 2052 goto err_unlock; 2053 } 2054 2055 intel_context_set_gem(clone->engines[n], dst); 2056 } 2057 clone->num_engines = n; 2058 2059 user_engines = i915_gem_context_user_engines(src); 2060 i915_gem_context_unlock_engines(src); 2061 2062 /* Serialised by constructor */ 2063 free_engines(__context_engines_static(dst)); 2064 RCU_INIT_POINTER(dst->engines, clone); 2065 if (user_engines) 2066 i915_gem_context_set_user_engines(dst); 2067 else 2068 i915_gem_context_clear_user_engines(dst); 2069 return 0; 2070 2071 err_unlock: 2072 i915_gem_context_unlock_engines(src); 2073 return -ENOMEM; 2074 } 2075 2076 static int clone_flags(struct i915_gem_context *dst, 2077 struct i915_gem_context *src) 2078 { 2079 dst->user_flags = src->user_flags; 2080 return 0; 2081 } 2082 2083 static int clone_schedattr(struct i915_gem_context *dst, 2084 struct i915_gem_context *src) 2085 { 2086 dst->sched = src->sched; 2087 return 0; 2088 } 2089 2090 static int clone_sseu(struct i915_gem_context *dst, 2091 struct i915_gem_context *src) 2092 { 2093 struct i915_gem_engines *e = i915_gem_context_lock_engines(src); 2094 struct i915_gem_engines *clone; 2095 unsigned long n; 2096 int err; 2097 2098 /* no locking required; sole access under constructor*/ 2099 clone = __context_engines_static(dst); 2100 if (e->num_engines != clone->num_engines) { 2101 err = -EINVAL; 2102 goto unlock; 2103 } 2104 2105 for (n = 0; n < e->num_engines; n++) { 2106 struct intel_context *ce = e->engines[n]; 2107 2108 if (clone->engines[n]->engine->class != ce->engine->class) { 2109 /* Must have compatible engine maps! */ 2110 err = -EINVAL; 2111 goto unlock; 2112 } 2113 2114 /* serialises with set_sseu */ 2115 err = intel_context_lock_pinned(ce); 2116 if (err) 2117 goto unlock; 2118 2119 clone->engines[n]->sseu = ce->sseu; 2120 intel_context_unlock_pinned(ce); 2121 } 2122 2123 err = 0; 2124 unlock: 2125 i915_gem_context_unlock_engines(src); 2126 return err; 2127 } 2128 2129 static int clone_timeline(struct i915_gem_context *dst, 2130 struct i915_gem_context *src) 2131 { 2132 if (src->timeline) 2133 __assign_timeline(dst, src->timeline); 2134 2135 return 0; 2136 } 2137 2138 static int clone_vm(struct i915_gem_context *dst, 2139 struct i915_gem_context *src) 2140 { 2141 struct i915_address_space *vm; 2142 int err = 0; 2143 2144 if (!rcu_access_pointer(src->vm)) 2145 return 0; 2146 2147 rcu_read_lock(); 2148 vm = context_get_vm_rcu(src); 2149 rcu_read_unlock(); 2150 2151 if (!mutex_lock_interruptible(&dst->mutex)) { 2152 __assign_ppgtt(dst, vm); 2153 mutex_unlock(&dst->mutex); 2154 } else { 2155 err = -EINTR; 2156 } 2157 2158 i915_vm_put(vm); 2159 return err; 2160 } 2161 2162 static int create_clone(struct i915_user_extension __user *ext, void *data) 2163 { 2164 static int (* const fn[])(struct i915_gem_context *dst, 2165 struct i915_gem_context *src) = { 2166 #define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y 2167 MAP(ENGINES, clone_engines), 2168 MAP(FLAGS, clone_flags), 2169 MAP(SCHEDATTR, clone_schedattr), 2170 MAP(SSEU, clone_sseu), 2171 MAP(TIMELINE, clone_timeline), 2172 MAP(VM, clone_vm), 2173 #undef MAP 2174 }; 2175 struct drm_i915_gem_context_create_ext_clone local; 2176 const struct create_ext *arg = data; 2177 struct i915_gem_context *dst = arg->ctx; 2178 struct i915_gem_context *src; 2179 int err, bit; 2180 2181 if (copy_from_user(&local, ext, sizeof(local))) 2182 return -EFAULT; 2183 2184 BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) != 2185 I915_CONTEXT_CLONE_UNKNOWN); 2186 2187 if (local.flags & I915_CONTEXT_CLONE_UNKNOWN) 2188 return -EINVAL; 2189 2190 if (local.rsvd) 2191 return -EINVAL; 2192 2193 rcu_read_lock(); 2194 src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id); 2195 rcu_read_unlock(); 2196 if (!src) 2197 return -ENOENT; 2198 2199 GEM_BUG_ON(src == dst); 2200 2201 for (bit = 0; bit < ARRAY_SIZE(fn); bit++) { 2202 if (!(local.flags & BIT(bit))) 2203 continue; 2204 2205 err = fn[bit](dst, src); 2206 if (err) 2207 return err; 2208 } 2209 2210 return 0; 2211 } 2212 2213 static const i915_user_extension_fn create_extensions[] = { 2214 [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam, 2215 [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone, 2216 }; 2217 2218 static bool client_is_banned(struct drm_i915_file_private *file_priv) 2219 { 2220 return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 2221 } 2222 2223 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, 2224 struct drm_file *file) 2225 { 2226 struct drm_i915_private *i915 = to_i915(dev); 2227 struct drm_i915_gem_context_create_ext *args = data; 2228 struct create_ext ext_data; 2229 int ret; 2230 u32 id; 2231 2232 if (!DRIVER_CAPS(i915)->has_logical_contexts) 2233 return -ENODEV; 2234 2235 if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN) 2236 return -EINVAL; 2237 2238 ret = intel_gt_terminally_wedged(&i915->gt); 2239 if (ret) 2240 return ret; 2241 2242 ext_data.fpriv = file->driver_priv; 2243 if (client_is_banned(ext_data.fpriv)) { 2244 #ifdef __NetBSD__ 2245 DRM_DEBUG("client %s[%d] banned from creating ctx\n", 2246 curproc->p_comm, (int)curproc->p_pid); 2247 #else 2248 DRM_DEBUG("client %s[%d] banned from creating ctx\n", 2249 current->comm, task_pid_nr(current)); 2250 #endif 2251 return -EIO; 2252 } 2253 2254 ext_data.ctx = i915_gem_create_context(i915, args->flags); 2255 if (IS_ERR(ext_data.ctx)) 2256 return PTR_ERR(ext_data.ctx); 2257 2258 if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) { 2259 ret = i915_user_extensions(u64_to_user_ptr(args->extensions), 2260 create_extensions, 2261 ARRAY_SIZE(create_extensions), 2262 &ext_data); 2263 if (ret) 2264 goto err_ctx; 2265 } 2266 2267 ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id); 2268 if (ret < 0) 2269 goto err_ctx; 2270 2271 args->ctx_id = id; 2272 DRM_DEBUG("HW context %d created\n", args->ctx_id); 2273 2274 return 0; 2275 2276 err_ctx: 2277 context_close(ext_data.ctx); 2278 return ret; 2279 } 2280 2281 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data, 2282 struct drm_file *file) 2283 { 2284 struct drm_i915_gem_context_destroy *args = data; 2285 struct drm_i915_file_private *file_priv = file->driver_priv; 2286 struct i915_gem_context *ctx; 2287 2288 if (args->pad != 0) 2289 return -EINVAL; 2290 2291 if (!args->ctx_id) 2292 return -ENOENT; 2293 2294 ctx = xa_erase(&file_priv->context_xa, args->ctx_id); 2295 if (!ctx) 2296 return -ENOENT; 2297 2298 context_close(ctx); 2299 return 0; 2300 } 2301 2302 static int get_sseu(struct i915_gem_context *ctx, 2303 struct drm_i915_gem_context_param *args) 2304 { 2305 struct drm_i915_gem_context_param_sseu user_sseu; 2306 struct intel_context *ce; 2307 unsigned long lookup; 2308 int err; 2309 2310 if (args->size == 0) 2311 goto out; 2312 else if (args->size < sizeof(user_sseu)) 2313 return -EINVAL; 2314 2315 if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), 2316 sizeof(user_sseu))) 2317 return -EFAULT; 2318 2319 if (user_sseu.rsvd) 2320 return -EINVAL; 2321 2322 if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) 2323 return -EINVAL; 2324 2325 lookup = 0; 2326 if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) 2327 lookup |= LOOKUP_USER_INDEX; 2328 2329 ce = lookup_user_engine(ctx, lookup, &user_sseu.engine); 2330 if (IS_ERR(ce)) 2331 return PTR_ERR(ce); 2332 2333 err = intel_context_lock_pinned(ce); /* serialises with set_sseu */ 2334 if (err) { 2335 intel_context_put(ce); 2336 return err; 2337 } 2338 2339 user_sseu.slice_mask = ce->sseu.slice_mask; 2340 user_sseu.subslice_mask = ce->sseu.subslice_mask; 2341 user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice; 2342 user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice; 2343 2344 intel_context_unlock_pinned(ce); 2345 intel_context_put(ce); 2346 2347 if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, 2348 sizeof(user_sseu))) 2349 return -EFAULT; 2350 2351 out: 2352 args->size = sizeof(user_sseu); 2353 2354 return 0; 2355 } 2356 2357 int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, 2358 struct drm_file *file) 2359 { 2360 struct drm_i915_file_private *file_priv = file->driver_priv; 2361 struct drm_i915_gem_context_param *args = data; 2362 struct i915_gem_context *ctx; 2363 int ret = 0; 2364 2365 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2366 if (!ctx) 2367 return -ENOENT; 2368 2369 switch (args->param) { 2370 case I915_CONTEXT_PARAM_NO_ZEROMAP: 2371 args->size = 0; 2372 args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags); 2373 break; 2374 2375 case I915_CONTEXT_PARAM_GTT_SIZE: 2376 args->size = 0; 2377 rcu_read_lock(); 2378 if (rcu_access_pointer(ctx->vm)) 2379 args->value = rcu_dereference(ctx->vm)->total; 2380 else 2381 args->value = to_i915(dev)->ggtt.vm.total; 2382 rcu_read_unlock(); 2383 break; 2384 2385 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: 2386 args->size = 0; 2387 args->value = i915_gem_context_no_error_capture(ctx); 2388 break; 2389 2390 case I915_CONTEXT_PARAM_BANNABLE: 2391 args->size = 0; 2392 args->value = i915_gem_context_is_bannable(ctx); 2393 break; 2394 2395 case I915_CONTEXT_PARAM_RECOVERABLE: 2396 args->size = 0; 2397 args->value = i915_gem_context_is_recoverable(ctx); 2398 break; 2399 2400 case I915_CONTEXT_PARAM_PRIORITY: 2401 args->size = 0; 2402 args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT; 2403 break; 2404 2405 case I915_CONTEXT_PARAM_SSEU: 2406 ret = get_sseu(ctx, args); 2407 break; 2408 2409 case I915_CONTEXT_PARAM_VM: 2410 ret = get_ppgtt(file_priv, ctx, args); 2411 break; 2412 2413 case I915_CONTEXT_PARAM_ENGINES: 2414 ret = get_engines(ctx, args); 2415 break; 2416 2417 case I915_CONTEXT_PARAM_PERSISTENCE: 2418 args->size = 0; 2419 args->value = i915_gem_context_is_persistent(ctx); 2420 break; 2421 2422 case I915_CONTEXT_PARAM_BAN_PERIOD: 2423 default: 2424 ret = -EINVAL; 2425 break; 2426 } 2427 2428 i915_gem_context_put(ctx); 2429 return ret; 2430 } 2431 2432 int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 2433 struct drm_file *file) 2434 { 2435 struct drm_i915_file_private *file_priv = file->driver_priv; 2436 struct drm_i915_gem_context_param *args = data; 2437 struct i915_gem_context *ctx; 2438 int ret; 2439 2440 ctx = i915_gem_context_lookup(file_priv, args->ctx_id); 2441 if (!ctx) 2442 return -ENOENT; 2443 2444 ret = ctx_setparam(file_priv, ctx, args); 2445 2446 i915_gem_context_put(ctx); 2447 return ret; 2448 } 2449 2450 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, 2451 void *data, struct drm_file *file) 2452 { 2453 struct drm_i915_private *i915 = to_i915(dev); 2454 struct drm_i915_reset_stats *args = data; 2455 struct i915_gem_context *ctx; 2456 int ret; 2457 2458 if (args->flags || args->pad) 2459 return -EINVAL; 2460 2461 ret = -ENOENT; 2462 rcu_read_lock(); 2463 ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id); 2464 if (!ctx) 2465 goto out; 2466 2467 /* 2468 * We opt for unserialised reads here. This may result in tearing 2469 * in the extremely unlikely event of a GPU hang on this context 2470 * as we are querying them. If we need that extra layer of protection, 2471 * we should wrap the hangstats with a seqlock. 2472 */ 2473 2474 if (capable(CAP_SYS_ADMIN)) 2475 args->reset_count = i915_reset_count(&i915->gpu_error); 2476 else 2477 args->reset_count = 0; 2478 2479 args->batch_active = atomic_read(&ctx->guilty_count); 2480 args->batch_pending = atomic_read(&ctx->active_count); 2481 2482 ret = 0; 2483 out: 2484 rcu_read_unlock(); 2485 return ret; 2486 } 2487 2488 /* GEM context-engines iterator: for_each_gem_engine() */ 2489 struct intel_context * 2490 i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) 2491 { 2492 const struct i915_gem_engines *e = it->engines; 2493 struct intel_context *ctx; 2494 2495 do { 2496 if (it->idx >= e->num_engines) 2497 return NULL; 2498 2499 ctx = e->engines[it->idx++]; 2500 } while (!ctx); 2501 2502 return ctx; 2503 } 2504 2505 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 2506 #include "selftests/mock_context.c" 2507 #include "selftests/i915_gem_context.c" 2508 #endif 2509 2510 static void i915_global_gem_context_shrink(void) 2511 { 2512 kmem_cache_shrink(global.slab_luts); 2513 } 2514 2515 static void i915_global_gem_context_exit(void) 2516 { 2517 kmem_cache_destroy(global.slab_luts); 2518 } 2519 2520 static struct i915_global_gem_context global = { { 2521 .shrink = i915_global_gem_context_shrink, 2522 .exit = i915_global_gem_context_exit, 2523 } }; 2524 2525 int __init i915_global_gem_context_init(void) 2526 { 2527 global.slab_luts = KMEM_CACHE(i915_lut_handle, 0); 2528 if (!global.slab_luts) 2529 return -ENOMEM; 2530 2531 i915_global_register(&global.base); 2532 return 0; 2533 } 2534