intel_bufmgr_gem.c revision a884aba1
1/************************************************************************** 2 * 3 * Copyright � 2007 Red Hat Inc. 4 * Copyright � 2007-2012 Intel Corporation 5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA 6 * All Rights Reserved. 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the 10 * "Software"), to deal in the Software without restriction, including 11 * without limitation the rights to use, copy, modify, merge, publish, 12 * distribute, sub license, and/or sell copies of the Software, and to 13 * permit persons to whom the Software is furnished to do so, subject to 14 * the following conditions: 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 22 * USE OR OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * The above copyright notice and this permission notice (including the 25 * next paragraph) shall be included in all copies or substantial portions 26 * of the Software. 27 * 28 * 29 **************************************************************************/ 30/* 31 * Authors: Thomas Hellstr�m <thomas-at-tungstengraphics-dot-com> 32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com> 33 * Eric Anholt <eric@anholt.net> 34 * Dave Airlie <airlied@linux.ie> 35 */ 36 37#ifdef HAVE_CONFIG_H 38#include "config.h" 39#endif 40 41#include <xf86drm.h> 42#include <xf86atomic.h> 43#include <fcntl.h> 44#include <stdio.h> 45#include <stdlib.h> 46#include <string.h> 47#include <unistd.h> 48#include <assert.h> 49#include <pthread.h> 50#include <stddef.h> 51#include <sys/ioctl.h> 52#include <sys/stat.h> 53#include <sys/types.h> 54#include <stdbool.h> 55 56#include "errno.h" 57#ifndef ETIME 58#define ETIME ETIMEDOUT 59#endif 60#include "libdrm.h" 61#include "libdrm_lists.h" 62#include "intel_bufmgr.h" 63#include "intel_bufmgr_priv.h" 64#include "intel_chipset.h" 65#include "intel_aub.h" 66#include "string.h" 67 68#include "i915_drm.h" 69 70#ifdef HAVE_VALGRIND 71#include <valgrind.h> 72#include <memcheck.h> 73#define VG(x) x 74#else 75#define VG(x) 76#endif 77 78#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s))) 79 80#define DBG(...) do { \ 81 if (bufmgr_gem->bufmgr.debug) \ 82 fprintf(stderr, __VA_ARGS__); \ 83} while (0) 84 85#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) 86 87typedef struct _drm_intel_bo_gem drm_intel_bo_gem; 88 89struct drm_intel_gem_bo_bucket { 90 drmMMListHead head; 91 unsigned long size; 92}; 93 94typedef struct _drm_intel_bufmgr_gem { 95 drm_intel_bufmgr bufmgr; 96 97 atomic_t refcount; 98 99 int fd; 100 101 int max_relocs; 102 103 pthread_mutex_t lock; 104 105 struct drm_i915_gem_exec_object *exec_objects; 106 struct drm_i915_gem_exec_object2 *exec2_objects; 107 drm_intel_bo **exec_bos; 108 int exec_size; 109 int exec_count; 110 111 /** Array of lists of cached gem objects of power-of-two sizes */ 112 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4]; 113 int num_buckets; 114 time_t time; 115 116 drmMMListHead managers; 117 118 drmMMListHead named; 119 drmMMListHead vma_cache; 120 int vma_count, vma_open, vma_max; 121 122 uint64_t gtt_size; 123 int available_fences; 124 int pci_device; 125 int gen; 126 unsigned int has_bsd : 1; 127 unsigned int has_blt : 1; 128 unsigned int has_relaxed_fencing : 1; 129 unsigned int has_llc : 1; 130 unsigned int has_wait_timeout : 1; 131 unsigned int bo_reuse : 1; 132 unsigned int no_exec : 1; 133 unsigned int has_vebox : 1; 134 bool fenced_relocs; 135 136 char *aub_filename; 137 FILE *aub_file; 138 uint32_t aub_offset; 139} drm_intel_bufmgr_gem; 140 141#define DRM_INTEL_RELOC_FENCE (1<<0) 142 143typedef struct _drm_intel_reloc_target_info { 144 drm_intel_bo *bo; 145 int flags; 146} drm_intel_reloc_target; 147 148struct _drm_intel_bo_gem { 149 drm_intel_bo bo; 150 151 atomic_t refcount; 152 uint32_t gem_handle; 153 const char *name; 154 155 /** 156 * Kenel-assigned global name for this object 157 * 158 * List contains both flink named and prime fd'd objects 159 */ 160 unsigned int global_name; 161 drmMMListHead name_list; 162 163 /** 164 * Index of the buffer within the validation list while preparing a 165 * batchbuffer execution. 166 */ 167 int validate_index; 168 169 /** 170 * Current tiling mode 171 */ 172 uint32_t tiling_mode; 173 uint32_t swizzle_mode; 174 unsigned long stride; 175 176 time_t free_time; 177 178 /** Array passed to the DRM containing relocation information. */ 179 struct drm_i915_gem_relocation_entry *relocs; 180 /** 181 * Array of info structs corresponding to relocs[i].target_handle etc 182 */ 183 drm_intel_reloc_target *reloc_target_info; 184 /** Number of entries in relocs */ 185 int reloc_count; 186 /** Mapped address for the buffer, saved across map/unmap cycles */ 187 void *mem_virtual; 188 /** GTT virtual address for the buffer, saved across map/unmap cycles */ 189 void *gtt_virtual; 190 /** 191 * Virtual address of the buffer allocated by user, used for userptr 192 * objects only. 193 */ 194 void *user_virtual; 195 int map_count; 196 drmMMListHead vma_list; 197 198 /** BO cache list */ 199 drmMMListHead head; 200 201 /** 202 * Boolean of whether this BO and its children have been included in 203 * the current drm_intel_bufmgr_check_aperture_space() total. 204 */ 205 bool included_in_check_aperture; 206 207 /** 208 * Boolean of whether this buffer has been used as a relocation 209 * target and had its size accounted for, and thus can't have any 210 * further relocations added to it. 211 */ 212 bool used_as_reloc_target; 213 214 /** 215 * Boolean of whether we have encountered an error whilst building the relocation tree. 216 */ 217 bool has_error; 218 219 /** 220 * Boolean of whether this buffer can be re-used 221 */ 222 bool reusable; 223 224 /** 225 * Boolean of whether the GPU is definitely not accessing the buffer. 226 * 227 * This is only valid when reusable, since non-reusable 228 * buffers are those that have been shared wth other 229 * processes, so we don't know their state. 230 */ 231 bool idle; 232 233 /** 234 * Boolean of whether this buffer was allocated with userptr 235 */ 236 bool is_userptr; 237 238 /** 239 * Size in bytes of this buffer and its relocation descendents. 240 * 241 * Used to avoid costly tree walking in 242 * drm_intel_bufmgr_check_aperture in the common case. 243 */ 244 int reloc_tree_size; 245 246 /** 247 * Number of potential fence registers required by this buffer and its 248 * relocations. 249 */ 250 int reloc_tree_fences; 251 252 /** Flags that we may need to do the SW_FINSIH ioctl on unmap. */ 253 bool mapped_cpu_write; 254 255 uint32_t aub_offset; 256 257 drm_intel_aub_annotation *aub_annotations; 258 unsigned aub_annotation_count; 259}; 260 261static unsigned int 262drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count); 263 264static unsigned int 265drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count); 266 267static int 268drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, 269 uint32_t * swizzle_mode); 270 271static int 272drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo, 273 uint32_t tiling_mode, 274 uint32_t stride); 275 276static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo, 277 time_t time); 278 279static void drm_intel_gem_bo_unreference(drm_intel_bo *bo); 280 281static void drm_intel_gem_bo_free(drm_intel_bo *bo); 282 283static unsigned long 284drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size, 285 uint32_t *tiling_mode) 286{ 287 unsigned long min_size, max_size; 288 unsigned long i; 289 290 if (*tiling_mode == I915_TILING_NONE) 291 return size; 292 293 /* 965+ just need multiples of page size for tiling */ 294 if (bufmgr_gem->gen >= 4) 295 return ROUND_UP_TO(size, 4096); 296 297 /* Older chips need powers of two, of at least 512k or 1M */ 298 if (bufmgr_gem->gen == 3) { 299 min_size = 1024*1024; 300 max_size = 128*1024*1024; 301 } else { 302 min_size = 512*1024; 303 max_size = 64*1024*1024; 304 } 305 306 if (size > max_size) { 307 *tiling_mode = I915_TILING_NONE; 308 return size; 309 } 310 311 /* Do we need to allocate every page for the fence? */ 312 if (bufmgr_gem->has_relaxed_fencing) 313 return ROUND_UP_TO(size, 4096); 314 315 for (i = min_size; i < size; i <<= 1) 316 ; 317 318 return i; 319} 320 321/* 322 * Round a given pitch up to the minimum required for X tiling on a 323 * given chip. We use 512 as the minimum to allow for a later tiling 324 * change. 325 */ 326static unsigned long 327drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem, 328 unsigned long pitch, uint32_t *tiling_mode) 329{ 330 unsigned long tile_width; 331 unsigned long i; 332 333 /* If untiled, then just align it so that we can do rendering 334 * to it with the 3D engine. 335 */ 336 if (*tiling_mode == I915_TILING_NONE) 337 return ALIGN(pitch, 64); 338 339 if (*tiling_mode == I915_TILING_X 340 || (IS_915(bufmgr_gem->pci_device) 341 && *tiling_mode == I915_TILING_Y)) 342 tile_width = 512; 343 else 344 tile_width = 128; 345 346 /* 965 is flexible */ 347 if (bufmgr_gem->gen >= 4) 348 return ROUND_UP_TO(pitch, tile_width); 349 350 /* The older hardware has a maximum pitch of 8192 with tiled 351 * surfaces, so fallback to untiled if it's too large. 352 */ 353 if (pitch > 8192) { 354 *tiling_mode = I915_TILING_NONE; 355 return ALIGN(pitch, 64); 356 } 357 358 /* Pre-965 needs power of two tile width */ 359 for (i = tile_width; i < pitch; i <<= 1) 360 ; 361 362 return i; 363} 364 365static struct drm_intel_gem_bo_bucket * 366drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem, 367 unsigned long size) 368{ 369 int i; 370 371 for (i = 0; i < bufmgr_gem->num_buckets; i++) { 372 struct drm_intel_gem_bo_bucket *bucket = 373 &bufmgr_gem->cache_bucket[i]; 374 if (bucket->size >= size) { 375 return bucket; 376 } 377 } 378 379 return NULL; 380} 381 382static void 383drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem) 384{ 385 int i, j; 386 387 for (i = 0; i < bufmgr_gem->exec_count; i++) { 388 drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; 389 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 390 391 if (bo_gem->relocs == NULL) { 392 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, 393 bo_gem->name); 394 continue; 395 } 396 397 for (j = 0; j < bo_gem->reloc_count; j++) { 398 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo; 399 drm_intel_bo_gem *target_gem = 400 (drm_intel_bo_gem *) target_bo; 401 402 DBG("%2d: %d (%s)@0x%08llx -> " 403 "%d (%s)@0x%08llx + 0x%08x\n", 404 i, 405 bo_gem->gem_handle, bo_gem->name, 406 (unsigned long long)bo_gem->relocs[j].offset, 407 target_gem->gem_handle, 408 target_gem->name, 409 (unsigned long long)target_bo->offset64, 410 bo_gem->relocs[j].delta); 411 } 412 } 413} 414 415static inline void 416drm_intel_gem_bo_reference(drm_intel_bo *bo) 417{ 418 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 419 420 atomic_inc(&bo_gem->refcount); 421} 422 423/** 424 * Adds the given buffer to the list of buffers to be validated (moved into the 425 * appropriate memory type) with the next batch submission. 426 * 427 * If a buffer is validated multiple times in a batch submission, it ends up 428 * with the intersection of the memory type flags and the union of the 429 * access flags. 430 */ 431static void 432drm_intel_add_validate_buffer(drm_intel_bo *bo) 433{ 434 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 435 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 436 int index; 437 438 if (bo_gem->validate_index != -1) 439 return; 440 441 /* Extend the array of validation entries as necessary. */ 442 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { 443 int new_size = bufmgr_gem->exec_size * 2; 444 445 if (new_size == 0) 446 new_size = 5; 447 448 bufmgr_gem->exec_objects = 449 realloc(bufmgr_gem->exec_objects, 450 sizeof(*bufmgr_gem->exec_objects) * new_size); 451 bufmgr_gem->exec_bos = 452 realloc(bufmgr_gem->exec_bos, 453 sizeof(*bufmgr_gem->exec_bos) * new_size); 454 bufmgr_gem->exec_size = new_size; 455 } 456 457 index = bufmgr_gem->exec_count; 458 bo_gem->validate_index = index; 459 /* Fill in array entry */ 460 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle; 461 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count; 462 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs; 463 bufmgr_gem->exec_objects[index].alignment = 0; 464 bufmgr_gem->exec_objects[index].offset = 0; 465 bufmgr_gem->exec_bos[index] = bo; 466 bufmgr_gem->exec_count++; 467} 468 469static void 470drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence) 471{ 472 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; 473 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; 474 int index; 475 476 if (bo_gem->validate_index != -1) { 477 if (need_fence) 478 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= 479 EXEC_OBJECT_NEEDS_FENCE; 480 return; 481 } 482 483 /* Extend the array of validation entries as necessary. */ 484 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) { 485 int new_size = bufmgr_gem->exec_size * 2; 486 487 if (new_size == 0) 488 new_size = 5; 489 490 bufmgr_gem->exec2_objects = 491 realloc(bufmgr_gem->exec2_objects, 492 sizeof(*bufmgr_gem->exec2_objects) * new_size); 493 bufmgr_gem->exec_bos = 494 realloc(bufmgr_gem->exec_bos, 495 sizeof(*bufmgr_gem->exec_bos) * new_size); 496 bufmgr_gem->exec_size = new_size; 497 } 498 499 index = bufmgr_gem->exec_count; 500 bo_gem->validate_index = index; 501 /* Fill in array entry */ 502 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle; 503 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count; 504 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs; 505 bufmgr_gem->exec2_objects[index].alignment = 0; 506 bufmgr_gem->exec2_objects[index].offset = 0; 507 bufmgr_gem->exec_bos[index] = bo; 508 bufmgr_gem->exec2_objects[index].flags = 0; 509 bufmgr_gem->exec2_objects[index].rsvd1 = 0; 510 bufmgr_gem->exec2_objects[index].rsvd2 = 0; 511 if (need_fence) { 512 bufmgr_gem->exec2_objects[index].flags |= 513 EXEC_OBJECT_NEEDS_FENCE; 514 } 515 bufmgr_gem->exec_count++; 516} 517 518#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \ 519 sizeof(uint32_t)) 520 521static void 522drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem, 523 drm_intel_bo_gem *bo_gem) 524{ 525 int size; 526 527 assert(!bo_gem->used_as_reloc_target); 528 529 /* The older chipsets are far-less flexible in terms of tiling, 530 * and require tiled buffer to be size aligned in the aperture. 531 * This means that in the worst possible case we will need a hole 532 * twice as large as the object in order for it to fit into the 533 * aperture. Optimal packing is for wimps. 534 */ 535 size = bo_gem->bo.size; 536 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) { 537 int min_size; 538 539 if (bufmgr_gem->has_relaxed_fencing) { 540 if (bufmgr_gem->gen == 3) 541 min_size = 1024*1024; 542 else 543 min_size = 512*1024; 544 545 while (min_size < size) 546 min_size *= 2; 547 } else 548 min_size = size; 549 550 /* Account for worst-case alignment. */ 551 size = 2 * min_size; 552 } 553 554 bo_gem->reloc_tree_size = size; 555} 556 557static int 558drm_intel_setup_reloc_list(drm_intel_bo *bo) 559{ 560 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 561 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 562 unsigned int max_relocs = bufmgr_gem->max_relocs; 563 564 if (bo->size / 4 < max_relocs) 565 max_relocs = bo->size / 4; 566 567 bo_gem->relocs = malloc(max_relocs * 568 sizeof(struct drm_i915_gem_relocation_entry)); 569 bo_gem->reloc_target_info = malloc(max_relocs * 570 sizeof(drm_intel_reloc_target)); 571 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) { 572 bo_gem->has_error = true; 573 574 free (bo_gem->relocs); 575 bo_gem->relocs = NULL; 576 577 free (bo_gem->reloc_target_info); 578 bo_gem->reloc_target_info = NULL; 579 580 return 1; 581 } 582 583 return 0; 584} 585 586static int 587drm_intel_gem_bo_busy(drm_intel_bo *bo) 588{ 589 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 590 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 591 struct drm_i915_gem_busy busy; 592 int ret; 593 594 if (bo_gem->reusable && bo_gem->idle) 595 return false; 596 597 VG_CLEAR(busy); 598 busy.handle = bo_gem->gem_handle; 599 600 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); 601 if (ret == 0) { 602 bo_gem->idle = !busy.busy; 603 return busy.busy; 604 } else { 605 return false; 606 } 607 return (ret == 0 && busy.busy); 608} 609 610static int 611drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem, 612 drm_intel_bo_gem *bo_gem, int state) 613{ 614 struct drm_i915_gem_madvise madv; 615 616 VG_CLEAR(madv); 617 madv.handle = bo_gem->gem_handle; 618 madv.madv = state; 619 madv.retained = 1; 620 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv); 621 622 return madv.retained; 623} 624 625static int 626drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv) 627{ 628 return drm_intel_gem_bo_madvise_internal 629 ((drm_intel_bufmgr_gem *) bo->bufmgr, 630 (drm_intel_bo_gem *) bo, 631 madv); 632} 633 634/* drop the oldest entries that have been purged by the kernel */ 635static void 636drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem, 637 struct drm_intel_gem_bo_bucket *bucket) 638{ 639 while (!DRMLISTEMPTY(&bucket->head)) { 640 drm_intel_bo_gem *bo_gem; 641 642 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 643 bucket->head.next, head); 644 if (drm_intel_gem_bo_madvise_internal 645 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED)) 646 break; 647 648 DRMLISTDEL(&bo_gem->head); 649 drm_intel_gem_bo_free(&bo_gem->bo); 650 } 651} 652 653static drm_intel_bo * 654drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, 655 const char *name, 656 unsigned long size, 657 unsigned long flags, 658 uint32_t tiling_mode, 659 unsigned long stride) 660{ 661 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 662 drm_intel_bo_gem *bo_gem; 663 unsigned int page_size = getpagesize(); 664 int ret; 665 struct drm_intel_gem_bo_bucket *bucket; 666 bool alloc_from_cache; 667 unsigned long bo_size; 668 bool for_render = false; 669 670 if (flags & BO_ALLOC_FOR_RENDER) 671 for_render = true; 672 673 /* Round the allocated size up to a power of two number of pages. */ 674 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size); 675 676 /* If we don't have caching at this size, don't actually round the 677 * allocation up. 678 */ 679 if (bucket == NULL) { 680 bo_size = size; 681 if (bo_size < page_size) 682 bo_size = page_size; 683 } else { 684 bo_size = bucket->size; 685 } 686 687 pthread_mutex_lock(&bufmgr_gem->lock); 688 /* Get a buffer out of the cache if available */ 689retry: 690 alloc_from_cache = false; 691 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) { 692 if (for_render) { 693 /* Allocate new render-target BOs from the tail (MRU) 694 * of the list, as it will likely be hot in the GPU 695 * cache and in the aperture for us. 696 */ 697 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 698 bucket->head.prev, head); 699 DRMLISTDEL(&bo_gem->head); 700 alloc_from_cache = true; 701 } else { 702 /* For non-render-target BOs (where we're probably 703 * going to map it first thing in order to fill it 704 * with data), check if the last BO in the cache is 705 * unbusy, and only reuse in that case. Otherwise, 706 * allocating a new buffer is probably faster than 707 * waiting for the GPU to finish. 708 */ 709 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 710 bucket->head.next, head); 711 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) { 712 alloc_from_cache = true; 713 DRMLISTDEL(&bo_gem->head); 714 } 715 } 716 717 if (alloc_from_cache) { 718 if (!drm_intel_gem_bo_madvise_internal 719 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) { 720 drm_intel_gem_bo_free(&bo_gem->bo); 721 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem, 722 bucket); 723 goto retry; 724 } 725 726 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, 727 tiling_mode, 728 stride)) { 729 drm_intel_gem_bo_free(&bo_gem->bo); 730 goto retry; 731 } 732 } 733 } 734 pthread_mutex_unlock(&bufmgr_gem->lock); 735 736 if (!alloc_from_cache) { 737 struct drm_i915_gem_create create; 738 739 bo_gem = calloc(1, sizeof(*bo_gem)); 740 if (!bo_gem) 741 return NULL; 742 743 bo_gem->bo.size = bo_size; 744 745 VG_CLEAR(create); 746 create.size = bo_size; 747 748 ret = drmIoctl(bufmgr_gem->fd, 749 DRM_IOCTL_I915_GEM_CREATE, 750 &create); 751 bo_gem->gem_handle = create.handle; 752 bo_gem->bo.handle = bo_gem->gem_handle; 753 if (ret != 0) { 754 free(bo_gem); 755 return NULL; 756 } 757 bo_gem->bo.bufmgr = bufmgr; 758 759 bo_gem->tiling_mode = I915_TILING_NONE; 760 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 761 bo_gem->stride = 0; 762 763 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo, 764 tiling_mode, 765 stride)) { 766 drm_intel_gem_bo_free(&bo_gem->bo); 767 return NULL; 768 } 769 770 DRMINITLISTHEAD(&bo_gem->name_list); 771 DRMINITLISTHEAD(&bo_gem->vma_list); 772 } 773 774 bo_gem->name = name; 775 atomic_set(&bo_gem->refcount, 1); 776 bo_gem->validate_index = -1; 777 bo_gem->reloc_tree_fences = 0; 778 bo_gem->used_as_reloc_target = false; 779 bo_gem->has_error = false; 780 bo_gem->reusable = true; 781 bo_gem->aub_annotations = NULL; 782 bo_gem->aub_annotation_count = 0; 783 784 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); 785 786 DBG("bo_create: buf %d (%s) %ldb\n", 787 bo_gem->gem_handle, bo_gem->name, size); 788 789 return &bo_gem->bo; 790} 791 792static drm_intel_bo * 793drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, 794 const char *name, 795 unsigned long size, 796 unsigned int alignment) 797{ 798 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 799 BO_ALLOC_FOR_RENDER, 800 I915_TILING_NONE, 0); 801} 802 803static drm_intel_bo * 804drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, 805 const char *name, 806 unsigned long size, 807 unsigned int alignment) 808{ 809 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0, 810 I915_TILING_NONE, 0); 811} 812 813static drm_intel_bo * 814drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, 815 int x, int y, int cpp, uint32_t *tiling_mode, 816 unsigned long *pitch, unsigned long flags) 817{ 818 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 819 unsigned long size, stride; 820 uint32_t tiling; 821 822 do { 823 unsigned long aligned_y, height_alignment; 824 825 tiling = *tiling_mode; 826 827 /* If we're tiled, our allocations are in 8 or 32-row blocks, 828 * so failure to align our height means that we won't allocate 829 * enough pages. 830 * 831 * If we're untiled, we still have to align to 2 rows high 832 * because the data port accesses 2x2 blocks even if the 833 * bottom row isn't to be rendered, so failure to align means 834 * we could walk off the end of the GTT and fault. This is 835 * documented on 965, and may be the case on older chipsets 836 * too so we try to be careful. 837 */ 838 aligned_y = y; 839 height_alignment = 2; 840 841 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE) 842 height_alignment = 16; 843 else if (tiling == I915_TILING_X 844 || (IS_915(bufmgr_gem->pci_device) 845 && tiling == I915_TILING_Y)) 846 height_alignment = 8; 847 else if (tiling == I915_TILING_Y) 848 height_alignment = 32; 849 aligned_y = ALIGN(y, height_alignment); 850 851 stride = x * cpp; 852 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode); 853 size = stride * aligned_y; 854 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode); 855 } while (*tiling_mode != tiling); 856 *pitch = stride; 857 858 if (tiling == I915_TILING_NONE) 859 stride = 0; 860 861 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags, 862 tiling, stride); 863} 864 865static drm_intel_bo * 866drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr, 867 const char *name, 868 void *addr, 869 uint32_t tiling_mode, 870 uint32_t stride, 871 unsigned long size, 872 unsigned long flags) 873{ 874 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 875 drm_intel_bo_gem *bo_gem; 876 int ret; 877 struct drm_i915_gem_userptr userptr; 878 879 /* Tiling with userptr surfaces is not supported 880 * on all hardware so refuse it for time being. 881 */ 882 if (tiling_mode != I915_TILING_NONE) 883 return NULL; 884 885 bo_gem = calloc(1, sizeof(*bo_gem)); 886 if (!bo_gem) 887 return NULL; 888 889 bo_gem->bo.size = size; 890 891 VG_CLEAR(userptr); 892 userptr.user_ptr = (__u64)((unsigned long)addr); 893 userptr.user_size = size; 894 userptr.flags = flags; 895 896 ret = drmIoctl(bufmgr_gem->fd, 897 DRM_IOCTL_I915_GEM_USERPTR, 898 &userptr); 899 if (ret != 0) { 900 DBG("bo_create_userptr: " 901 "ioctl failed with user ptr %p size 0x%lx, " 902 "user flags 0x%lx\n", addr, size, flags); 903 free(bo_gem); 904 return NULL; 905 } 906 907 bo_gem->gem_handle = userptr.handle; 908 bo_gem->bo.handle = bo_gem->gem_handle; 909 bo_gem->bo.bufmgr = bufmgr; 910 bo_gem->is_userptr = true; 911 bo_gem->bo.virtual = addr; 912 /* Save the address provided by user */ 913 bo_gem->user_virtual = addr; 914 bo_gem->tiling_mode = I915_TILING_NONE; 915 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 916 bo_gem->stride = 0; 917 918 DRMINITLISTHEAD(&bo_gem->name_list); 919 DRMINITLISTHEAD(&bo_gem->vma_list); 920 921 bo_gem->name = name; 922 atomic_set(&bo_gem->refcount, 1); 923 bo_gem->validate_index = -1; 924 bo_gem->reloc_tree_fences = 0; 925 bo_gem->used_as_reloc_target = false; 926 bo_gem->has_error = false; 927 bo_gem->reusable = false; 928 929 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); 930 931 DBG("bo_create_userptr: " 932 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n", 933 addr, bo_gem->gem_handle, bo_gem->name, 934 size, stride, tiling_mode); 935 936 return &bo_gem->bo; 937} 938 939/** 940 * Returns a drm_intel_bo wrapping the given buffer object handle. 941 * 942 * This can be used when one application needs to pass a buffer object 943 * to another. 944 */ 945drm_public drm_intel_bo * 946drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, 947 const char *name, 948 unsigned int handle) 949{ 950 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 951 drm_intel_bo_gem *bo_gem; 952 int ret; 953 struct drm_gem_open open_arg; 954 struct drm_i915_gem_get_tiling get_tiling; 955 drmMMListHead *list; 956 957 /* At the moment most applications only have a few named bo. 958 * For instance, in a DRI client only the render buffers passed 959 * between X and the client are named. And since X returns the 960 * alternating names for the front/back buffer a linear search 961 * provides a sufficiently fast match. 962 */ 963 pthread_mutex_lock(&bufmgr_gem->lock); 964 for (list = bufmgr_gem->named.next; 965 list != &bufmgr_gem->named; 966 list = list->next) { 967 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); 968 if (bo_gem->global_name == handle) { 969 drm_intel_gem_bo_reference(&bo_gem->bo); 970 pthread_mutex_unlock(&bufmgr_gem->lock); 971 return &bo_gem->bo; 972 } 973 } 974 975 VG_CLEAR(open_arg); 976 open_arg.name = handle; 977 ret = drmIoctl(bufmgr_gem->fd, 978 DRM_IOCTL_GEM_OPEN, 979 &open_arg); 980 if (ret != 0) { 981 DBG("Couldn't reference %s handle 0x%08x: %s\n", 982 name, handle, strerror(errno)); 983 pthread_mutex_unlock(&bufmgr_gem->lock); 984 return NULL; 985 } 986 /* Now see if someone has used a prime handle to get this 987 * object from the kernel before by looking through the list 988 * again for a matching gem_handle 989 */ 990 for (list = bufmgr_gem->named.next; 991 list != &bufmgr_gem->named; 992 list = list->next) { 993 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); 994 if (bo_gem->gem_handle == open_arg.handle) { 995 drm_intel_gem_bo_reference(&bo_gem->bo); 996 pthread_mutex_unlock(&bufmgr_gem->lock); 997 return &bo_gem->bo; 998 } 999 } 1000 1001 bo_gem = calloc(1, sizeof(*bo_gem)); 1002 if (!bo_gem) { 1003 pthread_mutex_unlock(&bufmgr_gem->lock); 1004 return NULL; 1005 } 1006 1007 bo_gem->bo.size = open_arg.size; 1008 bo_gem->bo.offset = 0; 1009 bo_gem->bo.offset64 = 0; 1010 bo_gem->bo.virtual = NULL; 1011 bo_gem->bo.bufmgr = bufmgr; 1012 bo_gem->name = name; 1013 atomic_set(&bo_gem->refcount, 1); 1014 bo_gem->validate_index = -1; 1015 bo_gem->gem_handle = open_arg.handle; 1016 bo_gem->bo.handle = open_arg.handle; 1017 bo_gem->global_name = handle; 1018 bo_gem->reusable = false; 1019 1020 VG_CLEAR(get_tiling); 1021 get_tiling.handle = bo_gem->gem_handle; 1022 ret = drmIoctl(bufmgr_gem->fd, 1023 DRM_IOCTL_I915_GEM_GET_TILING, 1024 &get_tiling); 1025 if (ret != 0) { 1026 drm_intel_gem_bo_unreference(&bo_gem->bo); 1027 pthread_mutex_unlock(&bufmgr_gem->lock); 1028 return NULL; 1029 } 1030 bo_gem->tiling_mode = get_tiling.tiling_mode; 1031 bo_gem->swizzle_mode = get_tiling.swizzle_mode; 1032 /* XXX stride is unknown */ 1033 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); 1034 1035 DRMINITLISTHEAD(&bo_gem->vma_list); 1036 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); 1037 pthread_mutex_unlock(&bufmgr_gem->lock); 1038 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name); 1039 1040 return &bo_gem->bo; 1041} 1042 1043static void 1044drm_intel_gem_bo_free(drm_intel_bo *bo) 1045{ 1046 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1047 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1048 struct drm_gem_close close; 1049 int ret; 1050 1051 DRMLISTDEL(&bo_gem->vma_list); 1052 if (bo_gem->mem_virtual) { 1053 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0)); 1054 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size); 1055 bufmgr_gem->vma_count--; 1056 } 1057 if (bo_gem->gtt_virtual) { 1058 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size); 1059 bufmgr_gem->vma_count--; 1060 } 1061 1062 /* Close this object */ 1063 VG_CLEAR(close); 1064 close.handle = bo_gem->gem_handle; 1065 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close); 1066 if (ret != 0) { 1067 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n", 1068 bo_gem->gem_handle, bo_gem->name, strerror(errno)); 1069 } 1070 free(bo_gem->aub_annotations); 1071 free(bo); 1072} 1073 1074static void 1075drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo) 1076{ 1077#if HAVE_VALGRIND 1078 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1079 1080 if (bo_gem->mem_virtual) 1081 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size); 1082 1083 if (bo_gem->gtt_virtual) 1084 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size); 1085#endif 1086} 1087 1088/** Frees all cached buffers significantly older than @time. */ 1089static void 1090drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time) 1091{ 1092 int i; 1093 1094 if (bufmgr_gem->time == time) 1095 return; 1096 1097 for (i = 0; i < bufmgr_gem->num_buckets; i++) { 1098 struct drm_intel_gem_bo_bucket *bucket = 1099 &bufmgr_gem->cache_bucket[i]; 1100 1101 while (!DRMLISTEMPTY(&bucket->head)) { 1102 drm_intel_bo_gem *bo_gem; 1103 1104 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 1105 bucket->head.next, head); 1106 if (time - bo_gem->free_time <= 1) 1107 break; 1108 1109 DRMLISTDEL(&bo_gem->head); 1110 1111 drm_intel_gem_bo_free(&bo_gem->bo); 1112 } 1113 } 1114 1115 bufmgr_gem->time = time; 1116} 1117 1118static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem) 1119{ 1120 int limit; 1121 1122 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__, 1123 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max); 1124 1125 if (bufmgr_gem->vma_max < 0) 1126 return; 1127 1128 /* We may need to evict a few entries in order to create new mmaps */ 1129 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open; 1130 if (limit < 0) 1131 limit = 0; 1132 1133 while (bufmgr_gem->vma_count > limit) { 1134 drm_intel_bo_gem *bo_gem; 1135 1136 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 1137 bufmgr_gem->vma_cache.next, 1138 vma_list); 1139 assert(bo_gem->map_count == 0); 1140 DRMLISTDELINIT(&bo_gem->vma_list); 1141 1142 if (bo_gem->mem_virtual) { 1143 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size); 1144 bo_gem->mem_virtual = NULL; 1145 bufmgr_gem->vma_count--; 1146 } 1147 if (bo_gem->gtt_virtual) { 1148 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size); 1149 bo_gem->gtt_virtual = NULL; 1150 bufmgr_gem->vma_count--; 1151 } 1152 } 1153} 1154 1155static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem, 1156 drm_intel_bo_gem *bo_gem) 1157{ 1158 bufmgr_gem->vma_open--; 1159 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache); 1160 if (bo_gem->mem_virtual) 1161 bufmgr_gem->vma_count++; 1162 if (bo_gem->gtt_virtual) 1163 bufmgr_gem->vma_count++; 1164 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); 1165} 1166 1167static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem, 1168 drm_intel_bo_gem *bo_gem) 1169{ 1170 bufmgr_gem->vma_open++; 1171 DRMLISTDEL(&bo_gem->vma_list); 1172 if (bo_gem->mem_virtual) 1173 bufmgr_gem->vma_count--; 1174 if (bo_gem->gtt_virtual) 1175 bufmgr_gem->vma_count--; 1176 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); 1177} 1178 1179static void 1180drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time) 1181{ 1182 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1183 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1184 struct drm_intel_gem_bo_bucket *bucket; 1185 int i; 1186 1187 /* Unreference all the target buffers */ 1188 for (i = 0; i < bo_gem->reloc_count; i++) { 1189 if (bo_gem->reloc_target_info[i].bo != bo) { 1190 drm_intel_gem_bo_unreference_locked_timed(bo_gem-> 1191 reloc_target_info[i].bo, 1192 time); 1193 } 1194 } 1195 bo_gem->reloc_count = 0; 1196 bo_gem->used_as_reloc_target = false; 1197 1198 DBG("bo_unreference final: %d (%s)\n", 1199 bo_gem->gem_handle, bo_gem->name); 1200 1201 /* release memory associated with this object */ 1202 if (bo_gem->reloc_target_info) { 1203 free(bo_gem->reloc_target_info); 1204 bo_gem->reloc_target_info = NULL; 1205 } 1206 if (bo_gem->relocs) { 1207 free(bo_gem->relocs); 1208 bo_gem->relocs = NULL; 1209 } 1210 1211 /* Clear any left-over mappings */ 1212 if (bo_gem->map_count) { 1213 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count); 1214 bo_gem->map_count = 0; 1215 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 1216 drm_intel_gem_bo_mark_mmaps_incoherent(bo); 1217 } 1218 1219 DRMLISTDEL(&bo_gem->name_list); 1220 1221 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size); 1222 /* Put the buffer into our internal cache for reuse if we can. */ 1223 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL && 1224 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem, 1225 I915_MADV_DONTNEED)) { 1226 bo_gem->free_time = time; 1227 1228 bo_gem->name = NULL; 1229 bo_gem->validate_index = -1; 1230 1231 DRMLISTADDTAIL(&bo_gem->head, &bucket->head); 1232 } else { 1233 drm_intel_gem_bo_free(bo); 1234 } 1235} 1236 1237static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo, 1238 time_t time) 1239{ 1240 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1241 1242 assert(atomic_read(&bo_gem->refcount) > 0); 1243 if (atomic_dec_and_test(&bo_gem->refcount)) 1244 drm_intel_gem_bo_unreference_final(bo, time); 1245} 1246 1247static void drm_intel_gem_bo_unreference(drm_intel_bo *bo) 1248{ 1249 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1250 1251 assert(atomic_read(&bo_gem->refcount) > 0); 1252 1253 if (atomic_add_unless(&bo_gem->refcount, -1, 1)) { 1254 drm_intel_bufmgr_gem *bufmgr_gem = 1255 (drm_intel_bufmgr_gem *) bo->bufmgr; 1256 struct timespec time; 1257 1258 clock_gettime(CLOCK_MONOTONIC, &time); 1259 1260 pthread_mutex_lock(&bufmgr_gem->lock); 1261 1262 if (atomic_dec_and_test(&bo_gem->refcount)) { 1263 drm_intel_gem_bo_unreference_final(bo, time.tv_sec); 1264 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec); 1265 } 1266 1267 pthread_mutex_unlock(&bufmgr_gem->lock); 1268 } 1269} 1270 1271static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable) 1272{ 1273 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1274 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1275 struct drm_i915_gem_set_domain set_domain; 1276 int ret; 1277 1278 if (bo_gem->is_userptr) { 1279 /* Return the same user ptr */ 1280 bo->virtual = bo_gem->user_virtual; 1281 return 0; 1282 } 1283 1284 pthread_mutex_lock(&bufmgr_gem->lock); 1285 1286 if (bo_gem->map_count++ == 0) 1287 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); 1288 1289 if (!bo_gem->mem_virtual) { 1290 struct drm_i915_gem_mmap mmap_arg; 1291 1292 DBG("bo_map: %d (%s), map_count=%d\n", 1293 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); 1294 1295 VG_CLEAR(mmap_arg); 1296 mmap_arg.handle = bo_gem->gem_handle; 1297 mmap_arg.offset = 0; 1298 mmap_arg.size = bo->size; 1299 ret = drmIoctl(bufmgr_gem->fd, 1300 DRM_IOCTL_I915_GEM_MMAP, 1301 &mmap_arg); 1302 if (ret != 0) { 1303 ret = -errno; 1304 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", 1305 __FILE__, __LINE__, bo_gem->gem_handle, 1306 bo_gem->name, strerror(errno)); 1307 if (--bo_gem->map_count == 0) 1308 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 1309 pthread_mutex_unlock(&bufmgr_gem->lock); 1310 return ret; 1311 } 1312 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1)); 1313 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr; 1314 } 1315 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, 1316 bo_gem->mem_virtual); 1317 bo->virtual = bo_gem->mem_virtual; 1318 1319 VG_CLEAR(set_domain); 1320 set_domain.handle = bo_gem->gem_handle; 1321 set_domain.read_domains = I915_GEM_DOMAIN_CPU; 1322 if (write_enable) 1323 set_domain.write_domain = I915_GEM_DOMAIN_CPU; 1324 else 1325 set_domain.write_domain = 0; 1326 ret = drmIoctl(bufmgr_gem->fd, 1327 DRM_IOCTL_I915_GEM_SET_DOMAIN, 1328 &set_domain); 1329 if (ret != 0) { 1330 DBG("%s:%d: Error setting to CPU domain %d: %s\n", 1331 __FILE__, __LINE__, bo_gem->gem_handle, 1332 strerror(errno)); 1333 } 1334 1335 if (write_enable) 1336 bo_gem->mapped_cpu_write = true; 1337 1338 drm_intel_gem_bo_mark_mmaps_incoherent(bo); 1339 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size)); 1340 pthread_mutex_unlock(&bufmgr_gem->lock); 1341 1342 return 0; 1343} 1344 1345static int 1346map_gtt(drm_intel_bo *bo) 1347{ 1348 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1349 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1350 int ret; 1351 1352 if (bo_gem->is_userptr) 1353 return -EINVAL; 1354 1355 if (bo_gem->map_count++ == 0) 1356 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem); 1357 1358 /* Get a mapping of the buffer if we haven't before. */ 1359 if (bo_gem->gtt_virtual == NULL) { 1360 struct drm_i915_gem_mmap_gtt mmap_arg; 1361 1362 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n", 1363 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count); 1364 1365 VG_CLEAR(mmap_arg); 1366 mmap_arg.handle = bo_gem->gem_handle; 1367 1368 /* Get the fake offset back... */ 1369 ret = drmIoctl(bufmgr_gem->fd, 1370 DRM_IOCTL_I915_GEM_MMAP_GTT, 1371 &mmap_arg); 1372 if (ret != 0) { 1373 ret = -errno; 1374 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n", 1375 __FILE__, __LINE__, 1376 bo_gem->gem_handle, bo_gem->name, 1377 strerror(errno)); 1378 if (--bo_gem->map_count == 0) 1379 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 1380 return ret; 1381 } 1382 1383 /* and mmap it */ 1384 ret = drmMap(bufmgr_gem->fd, mmap_arg.offset, bo->size, 1385 &bo_gem->gtt_virtual); 1386 if (ret) { 1387 bo_gem->gtt_virtual = NULL; 1388 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n", 1389 __FILE__, __LINE__, 1390 bo_gem->gem_handle, bo_gem->name, 1391 strerror(errno)); 1392 if (--bo_gem->map_count == 0) 1393 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 1394 return ret; 1395 } 1396 } 1397 1398 bo->virtual = bo_gem->gtt_virtual; 1399 1400 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, 1401 bo_gem->gtt_virtual); 1402 1403 return 0; 1404} 1405 1406drm_public int 1407drm_intel_gem_bo_map_gtt(drm_intel_bo *bo) 1408{ 1409 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1410 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1411 struct drm_i915_gem_set_domain set_domain; 1412 int ret; 1413 1414 pthread_mutex_lock(&bufmgr_gem->lock); 1415 1416 ret = map_gtt(bo); 1417 if (ret) { 1418 pthread_mutex_unlock(&bufmgr_gem->lock); 1419 return ret; 1420 } 1421 1422 /* Now move it to the GTT domain so that the GPU and CPU 1423 * caches are flushed and the GPU isn't actively using the 1424 * buffer. 1425 * 1426 * The pagefault handler does this domain change for us when 1427 * it has unbound the BO from the GTT, but it's up to us to 1428 * tell it when we're about to use things if we had done 1429 * rendering and it still happens to be bound to the GTT. 1430 */ 1431 VG_CLEAR(set_domain); 1432 set_domain.handle = bo_gem->gem_handle; 1433 set_domain.read_domains = I915_GEM_DOMAIN_GTT; 1434 set_domain.write_domain = I915_GEM_DOMAIN_GTT; 1435 ret = drmIoctl(bufmgr_gem->fd, 1436 DRM_IOCTL_I915_GEM_SET_DOMAIN, 1437 &set_domain); 1438 if (ret != 0) { 1439 DBG("%s:%d: Error setting domain %d: %s\n", 1440 __FILE__, __LINE__, bo_gem->gem_handle, 1441 strerror(errno)); 1442 } 1443 1444 drm_intel_gem_bo_mark_mmaps_incoherent(bo); 1445 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size)); 1446 pthread_mutex_unlock(&bufmgr_gem->lock); 1447 1448 return 0; 1449} 1450 1451/** 1452 * Performs a mapping of the buffer object like the normal GTT 1453 * mapping, but avoids waiting for the GPU to be done reading from or 1454 * rendering to the buffer. 1455 * 1456 * This is used in the implementation of GL_ARB_map_buffer_range: The 1457 * user asks to create a buffer, then does a mapping, fills some 1458 * space, runs a drawing command, then asks to map it again without 1459 * synchronizing because it guarantees that it won't write over the 1460 * data that the GPU is busy using (or, more specifically, that if it 1461 * does write over the data, it acknowledges that rendering is 1462 * undefined). 1463 */ 1464 1465drm_public int 1466drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo) 1467{ 1468 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1469#ifdef HAVE_VALGRIND 1470 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1471#endif 1472 int ret; 1473 1474 /* If the CPU cache isn't coherent with the GTT, then use a 1475 * regular synchronized mapping. The problem is that we don't 1476 * track where the buffer was last used on the CPU side in 1477 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so 1478 * we would potentially corrupt the buffer even when the user 1479 * does reasonable things. 1480 */ 1481 if (!bufmgr_gem->has_llc) 1482 return drm_intel_gem_bo_map_gtt(bo); 1483 1484 pthread_mutex_lock(&bufmgr_gem->lock); 1485 1486 ret = map_gtt(bo); 1487 if (ret == 0) { 1488 drm_intel_gem_bo_mark_mmaps_incoherent(bo); 1489 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size)); 1490 } 1491 1492 pthread_mutex_unlock(&bufmgr_gem->lock); 1493 1494 return ret; 1495} 1496 1497static int drm_intel_gem_bo_unmap(drm_intel_bo *bo) 1498{ 1499 drm_intel_bufmgr_gem *bufmgr_gem; 1500 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1501 int ret = 0; 1502 1503 if (bo == NULL) 1504 return 0; 1505 1506 if (bo_gem->is_userptr) 1507 return 0; 1508 1509 bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1510 1511 pthread_mutex_lock(&bufmgr_gem->lock); 1512 1513 if (bo_gem->map_count <= 0) { 1514 DBG("attempted to unmap an unmapped bo\n"); 1515 pthread_mutex_unlock(&bufmgr_gem->lock); 1516 /* Preserve the old behaviour of just treating this as a 1517 * no-op rather than reporting the error. 1518 */ 1519 return 0; 1520 } 1521 1522 if (bo_gem->mapped_cpu_write) { 1523 struct drm_i915_gem_sw_finish sw_finish; 1524 1525 /* Cause a flush to happen if the buffer's pinned for 1526 * scanout, so the results show up in a timely manner. 1527 * Unlike GTT set domains, this only does work if the 1528 * buffer should be scanout-related. 1529 */ 1530 VG_CLEAR(sw_finish); 1531 sw_finish.handle = bo_gem->gem_handle; 1532 ret = drmIoctl(bufmgr_gem->fd, 1533 DRM_IOCTL_I915_GEM_SW_FINISH, 1534 &sw_finish); 1535 ret = ret == -1 ? -errno : 0; 1536 1537 bo_gem->mapped_cpu_write = false; 1538 } 1539 1540 /* We need to unmap after every innovation as we cannot track 1541 * an open vma for every bo as that will exhaasut the system 1542 * limits and cause later failures. 1543 */ 1544 if (--bo_gem->map_count == 0) { 1545 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem); 1546 drm_intel_gem_bo_mark_mmaps_incoherent(bo); 1547 bo->virtual = NULL; 1548 } 1549 pthread_mutex_unlock(&bufmgr_gem->lock); 1550 1551 return ret; 1552} 1553 1554drm_public int 1555drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo) 1556{ 1557 return drm_intel_gem_bo_unmap(bo); 1558} 1559 1560static int 1561drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset, 1562 unsigned long size, const void *data) 1563{ 1564 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1565 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1566 struct drm_i915_gem_pwrite pwrite; 1567 int ret; 1568 1569 if (bo_gem->is_userptr) 1570 return -EINVAL; 1571 1572 VG_CLEAR(pwrite); 1573 pwrite.handle = bo_gem->gem_handle; 1574 pwrite.offset = offset; 1575 pwrite.size = size; 1576 pwrite.data_ptr = (uint64_t) (uintptr_t) data; 1577 ret = drmIoctl(bufmgr_gem->fd, 1578 DRM_IOCTL_I915_GEM_PWRITE, 1579 &pwrite); 1580 if (ret != 0) { 1581 ret = -errno; 1582 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n", 1583 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset, 1584 (int)size, strerror(errno)); 1585 } 1586 1587 return ret; 1588} 1589 1590static int 1591drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id) 1592{ 1593 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 1594 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id; 1595 int ret; 1596 1597 VG_CLEAR(get_pipe_from_crtc_id); 1598 get_pipe_from_crtc_id.crtc_id = crtc_id; 1599 ret = drmIoctl(bufmgr_gem->fd, 1600 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID, 1601 &get_pipe_from_crtc_id); 1602 if (ret != 0) { 1603 /* We return -1 here to signal that we don't 1604 * know which pipe is associated with this crtc. 1605 * This lets the caller know that this information 1606 * isn't available; using the wrong pipe for 1607 * vblank waiting can cause the chipset to lock up 1608 */ 1609 return -1; 1610 } 1611 1612 return get_pipe_from_crtc_id.pipe; 1613} 1614 1615static int 1616drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset, 1617 unsigned long size, void *data) 1618{ 1619 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1620 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1621 struct drm_i915_gem_pread pread; 1622 int ret; 1623 1624 if (bo_gem->is_userptr) 1625 return -EINVAL; 1626 1627 VG_CLEAR(pread); 1628 pread.handle = bo_gem->gem_handle; 1629 pread.offset = offset; 1630 pread.size = size; 1631 pread.data_ptr = (uint64_t) (uintptr_t) data; 1632 ret = drmIoctl(bufmgr_gem->fd, 1633 DRM_IOCTL_I915_GEM_PREAD, 1634 &pread); 1635 if (ret != 0) { 1636 ret = -errno; 1637 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n", 1638 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset, 1639 (int)size, strerror(errno)); 1640 } 1641 1642 return ret; 1643} 1644 1645/** Waits for all GPU rendering with the object to have completed. */ 1646static void 1647drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo) 1648{ 1649 drm_intel_gem_bo_start_gtt_access(bo, 1); 1650} 1651 1652/** 1653 * Waits on a BO for the given amount of time. 1654 * 1655 * @bo: buffer object to wait for 1656 * @timeout_ns: amount of time to wait in nanoseconds. 1657 * If value is less than 0, an infinite wait will occur. 1658 * 1659 * Returns 0 if the wait was successful ie. the last batch referencing the 1660 * object has completed within the allotted time. Otherwise some negative return 1661 * value describes the error. Of particular interest is -ETIME when the wait has 1662 * failed to yield the desired result. 1663 * 1664 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows 1665 * the operation to give up after a certain amount of time. Another subtle 1666 * difference is the internal locking semantics are different (this variant does 1667 * not hold the lock for the duration of the wait). This makes the wait subject 1668 * to a larger userspace race window. 1669 * 1670 * The implementation shall wait until the object is no longer actively 1671 * referenced within a batch buffer at the time of the call. The wait will 1672 * not guarantee that the buffer is re-issued via another thread, or an flinked 1673 * handle. Userspace must make sure this race does not occur if such precision 1674 * is important. 1675 */ 1676drm_public int 1677drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns) 1678{ 1679 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1680 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1681 struct drm_i915_gem_wait wait; 1682 int ret; 1683 1684 if (!bufmgr_gem->has_wait_timeout) { 1685 DBG("%s:%d: Timed wait is not supported. Falling back to " 1686 "infinite wait\n", __FILE__, __LINE__); 1687 if (timeout_ns) { 1688 drm_intel_gem_bo_wait_rendering(bo); 1689 return 0; 1690 } else { 1691 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0; 1692 } 1693 } 1694 1695 wait.bo_handle = bo_gem->gem_handle; 1696 wait.timeout_ns = timeout_ns; 1697 wait.flags = 0; 1698 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait); 1699 if (ret == -1) 1700 return -errno; 1701 1702 return ret; 1703} 1704 1705/** 1706 * Sets the object to the GTT read and possibly write domain, used by the X 1707 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt(). 1708 * 1709 * In combination with drm_intel_gem_bo_pin() and manual fence management, we 1710 * can do tiled pixmaps this way. 1711 */ 1712drm_public void 1713drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable) 1714{ 1715 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1716 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1717 struct drm_i915_gem_set_domain set_domain; 1718 int ret; 1719 1720 VG_CLEAR(set_domain); 1721 set_domain.handle = bo_gem->gem_handle; 1722 set_domain.read_domains = I915_GEM_DOMAIN_GTT; 1723 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0; 1724 ret = drmIoctl(bufmgr_gem->fd, 1725 DRM_IOCTL_I915_GEM_SET_DOMAIN, 1726 &set_domain); 1727 if (ret != 0) { 1728 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n", 1729 __FILE__, __LINE__, bo_gem->gem_handle, 1730 set_domain.read_domains, set_domain.write_domain, 1731 strerror(errno)); 1732 } 1733} 1734 1735static void 1736drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr) 1737{ 1738 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 1739 int i; 1740 1741 free(bufmgr_gem->exec2_objects); 1742 free(bufmgr_gem->exec_objects); 1743 free(bufmgr_gem->exec_bos); 1744 free(bufmgr_gem->aub_filename); 1745 1746 pthread_mutex_destroy(&bufmgr_gem->lock); 1747 1748 /* Free any cached buffer objects we were going to reuse */ 1749 for (i = 0; i < bufmgr_gem->num_buckets; i++) { 1750 struct drm_intel_gem_bo_bucket *bucket = 1751 &bufmgr_gem->cache_bucket[i]; 1752 drm_intel_bo_gem *bo_gem; 1753 1754 while (!DRMLISTEMPTY(&bucket->head)) { 1755 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, 1756 bucket->head.next, head); 1757 DRMLISTDEL(&bo_gem->head); 1758 1759 drm_intel_gem_bo_free(&bo_gem->bo); 1760 } 1761 } 1762 1763 free(bufmgr); 1764} 1765 1766/** 1767 * Adds the target buffer to the validation list and adds the relocation 1768 * to the reloc_buffer's relocation list. 1769 * 1770 * The relocation entry at the given offset must already contain the 1771 * precomputed relocation value, because the kernel will optimize out 1772 * the relocation entry write when the buffer hasn't moved from the 1773 * last known offset in target_bo. 1774 */ 1775static int 1776do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, 1777 drm_intel_bo *target_bo, uint32_t target_offset, 1778 uint32_t read_domains, uint32_t write_domain, 1779 bool need_fence) 1780{ 1781 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1782 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1783 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; 1784 bool fenced_command; 1785 1786 if (bo_gem->has_error) 1787 return -ENOMEM; 1788 1789 if (target_bo_gem->has_error) { 1790 bo_gem->has_error = true; 1791 return -ENOMEM; 1792 } 1793 1794 /* We never use HW fences for rendering on 965+ */ 1795 if (bufmgr_gem->gen >= 4) 1796 need_fence = false; 1797 1798 fenced_command = need_fence; 1799 if (target_bo_gem->tiling_mode == I915_TILING_NONE) 1800 need_fence = false; 1801 1802 /* Create a new relocation list if needed */ 1803 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo)) 1804 return -ENOMEM; 1805 1806 /* Check overflow */ 1807 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs); 1808 1809 /* Check args */ 1810 assert(offset <= bo->size - 4); 1811 assert((write_domain & (write_domain - 1)) == 0); 1812 1813 /* Make sure that we're not adding a reloc to something whose size has 1814 * already been accounted for. 1815 */ 1816 assert(!bo_gem->used_as_reloc_target); 1817 if (target_bo_gem != bo_gem) { 1818 target_bo_gem->used_as_reloc_target = true; 1819 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size; 1820 } 1821 /* An object needing a fence is a tiled buffer, so it won't have 1822 * relocs to other buffers. 1823 */ 1824 if (need_fence) 1825 target_bo_gem->reloc_tree_fences = 1; 1826 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences; 1827 1828 bo_gem->relocs[bo_gem->reloc_count].offset = offset; 1829 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset; 1830 bo_gem->relocs[bo_gem->reloc_count].target_handle = 1831 target_bo_gem->gem_handle; 1832 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; 1833 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; 1834 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64; 1835 1836 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo; 1837 if (target_bo != bo) 1838 drm_intel_gem_bo_reference(target_bo); 1839 if (fenced_command) 1840 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 1841 DRM_INTEL_RELOC_FENCE; 1842 else 1843 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0; 1844 1845 bo_gem->reloc_count++; 1846 1847 return 0; 1848} 1849 1850static int 1851drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset, 1852 drm_intel_bo *target_bo, uint32_t target_offset, 1853 uint32_t read_domains, uint32_t write_domain) 1854{ 1855 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; 1856 1857 return do_bo_emit_reloc(bo, offset, target_bo, target_offset, 1858 read_domains, write_domain, 1859 !bufmgr_gem->fenced_relocs); 1860} 1861 1862static int 1863drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset, 1864 drm_intel_bo *target_bo, 1865 uint32_t target_offset, 1866 uint32_t read_domains, uint32_t write_domain) 1867{ 1868 return do_bo_emit_reloc(bo, offset, target_bo, target_offset, 1869 read_domains, write_domain, true); 1870} 1871 1872drm_public int 1873drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo) 1874{ 1875 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1876 1877 return bo_gem->reloc_count; 1878} 1879 1880/** 1881 * Removes existing relocation entries in the BO after "start". 1882 * 1883 * This allows a user to avoid a two-step process for state setup with 1884 * counting up all the buffer objects and doing a 1885 * drm_intel_bufmgr_check_aperture_space() before emitting any of the 1886 * relocations for the state setup. Instead, save the state of the 1887 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the 1888 * state, and then check if it still fits in the aperture. 1889 * 1890 * Any further drm_intel_bufmgr_check_aperture_space() queries 1891 * involving this buffer in the tree are undefined after this call. 1892 */ 1893drm_public void 1894drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start) 1895{ 1896 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 1897 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1898 int i; 1899 struct timespec time; 1900 1901 clock_gettime(CLOCK_MONOTONIC, &time); 1902 1903 assert(bo_gem->reloc_count >= start); 1904 1905 /* Unreference the cleared target buffers */ 1906 pthread_mutex_lock(&bufmgr_gem->lock); 1907 1908 for (i = start; i < bo_gem->reloc_count; i++) { 1909 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo; 1910 if (&target_bo_gem->bo != bo) { 1911 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences; 1912 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, 1913 time.tv_sec); 1914 } 1915 } 1916 bo_gem->reloc_count = start; 1917 1918 pthread_mutex_unlock(&bufmgr_gem->lock); 1919 1920} 1921 1922/** 1923 * Walk the tree of relocations rooted at BO and accumulate the list of 1924 * validations to be performed and update the relocation buffers with 1925 * index values into the validation list. 1926 */ 1927static void 1928drm_intel_gem_bo_process_reloc(drm_intel_bo *bo) 1929{ 1930 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1931 int i; 1932 1933 if (bo_gem->relocs == NULL) 1934 return; 1935 1936 for (i = 0; i < bo_gem->reloc_count; i++) { 1937 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo; 1938 1939 if (target_bo == bo) 1940 continue; 1941 1942 drm_intel_gem_bo_mark_mmaps_incoherent(bo); 1943 1944 /* Continue walking the tree depth-first. */ 1945 drm_intel_gem_bo_process_reloc(target_bo); 1946 1947 /* Add the target to the validate list */ 1948 drm_intel_add_validate_buffer(target_bo); 1949 } 1950} 1951 1952static void 1953drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo) 1954{ 1955 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; 1956 int i; 1957 1958 if (bo_gem->relocs == NULL) 1959 return; 1960 1961 for (i = 0; i < bo_gem->reloc_count; i++) { 1962 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo; 1963 int need_fence; 1964 1965 if (target_bo == bo) 1966 continue; 1967 1968 drm_intel_gem_bo_mark_mmaps_incoherent(bo); 1969 1970 /* Continue walking the tree depth-first. */ 1971 drm_intel_gem_bo_process_reloc2(target_bo); 1972 1973 need_fence = (bo_gem->reloc_target_info[i].flags & 1974 DRM_INTEL_RELOC_FENCE); 1975 1976 /* Add the target to the validate list */ 1977 drm_intel_add_validate_buffer2(target_bo, need_fence); 1978 } 1979} 1980 1981 1982static void 1983drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem) 1984{ 1985 int i; 1986 1987 for (i = 0; i < bufmgr_gem->exec_count; i++) { 1988 drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; 1989 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 1990 1991 /* Update the buffer offset */ 1992 if (bufmgr_gem->exec_objects[i].offset != bo->offset64) { 1993 DBG("BO %d (%s) migrated: 0x%08llx -> 0x%08llx\n", 1994 bo_gem->gem_handle, bo_gem->name, 1995 (unsigned long long)bo->offset64, 1996 (unsigned long long)bufmgr_gem->exec_objects[i]. 1997 offset); 1998 bo->offset64 = bufmgr_gem->exec_objects[i].offset; 1999 bo->offset = bufmgr_gem->exec_objects[i].offset; 2000 } 2001 } 2002} 2003 2004static void 2005drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem) 2006{ 2007 int i; 2008 2009 for (i = 0; i < bufmgr_gem->exec_count; i++) { 2010 drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; 2011 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; 2012 2013 /* Update the buffer offset */ 2014 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) { 2015 DBG("BO %d (%s) migrated: 0x%08llx -> 0x%08llx\n", 2016 bo_gem->gem_handle, bo_gem->name, 2017 (unsigned long long)bo->offset64, 2018 (unsigned long long)bufmgr_gem->exec2_objects[i].offset); 2019 bo->offset64 = bufmgr_gem->exec2_objects[i].offset; 2020 bo->offset = bufmgr_gem->exec2_objects[i].offset; 2021 } 2022 } 2023} 2024 2025static void 2026aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data) 2027{ 2028 fwrite(&data, 1, 4, bufmgr_gem->aub_file); 2029} 2030 2031static void 2032aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size) 2033{ 2034 fwrite(data, 1, size, bufmgr_gem->aub_file); 2035} 2036 2037static void 2038aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size) 2039{ 2040 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2041 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2042 uint32_t *data; 2043 unsigned int i; 2044 2045 data = malloc(bo->size); 2046 drm_intel_bo_get_subdata(bo, offset, size, data); 2047 2048 /* Easy mode: write out bo with no relocations */ 2049 if (!bo_gem->reloc_count) { 2050 aub_out_data(bufmgr_gem, data, size); 2051 free(data); 2052 return; 2053 } 2054 2055 /* Otherwise, handle the relocations while writing. */ 2056 for (i = 0; i < size / 4; i++) { 2057 int r; 2058 for (r = 0; r < bo_gem->reloc_count; r++) { 2059 struct drm_i915_gem_relocation_entry *reloc; 2060 drm_intel_reloc_target *info; 2061 2062 reloc = &bo_gem->relocs[r]; 2063 info = &bo_gem->reloc_target_info[r]; 2064 2065 if (reloc->offset == offset + i * 4) { 2066 drm_intel_bo_gem *target_gem; 2067 uint32_t val; 2068 2069 target_gem = (drm_intel_bo_gem *)info->bo; 2070 2071 val = reloc->delta; 2072 val += target_gem->aub_offset; 2073 2074 aub_out(bufmgr_gem, val); 2075 data[i] = val; 2076 break; 2077 } 2078 } 2079 if (r == bo_gem->reloc_count) { 2080 /* no relocation, just the data */ 2081 aub_out(bufmgr_gem, data[i]); 2082 } 2083 } 2084 2085 free(data); 2086} 2087 2088static void 2089aub_bo_get_address(drm_intel_bo *bo) 2090{ 2091 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2092 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2093 2094 /* Give the object a graphics address in the AUB file. We 2095 * don't just use the GEM object address because we do AUB 2096 * dumping before execution -- we want to successfully log 2097 * when the hardware might hang, and we might even want to aub 2098 * capture for a driver trying to execute on a different 2099 * generation of hardware by disabling the actual kernel exec 2100 * call. 2101 */ 2102 bo_gem->aub_offset = bufmgr_gem->aub_offset; 2103 bufmgr_gem->aub_offset += bo->size; 2104 /* XXX: Handle aperture overflow. */ 2105 assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024); 2106} 2107 2108static void 2109aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype, 2110 uint32_t offset, uint32_t size) 2111{ 2112 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2113 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2114 2115 aub_out(bufmgr_gem, 2116 CMD_AUB_TRACE_HEADER_BLOCK | 2117 ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2)); 2118 aub_out(bufmgr_gem, 2119 AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE); 2120 aub_out(bufmgr_gem, subtype); 2121 aub_out(bufmgr_gem, bo_gem->aub_offset + offset); 2122 aub_out(bufmgr_gem, size); 2123 if (bufmgr_gem->gen >= 8) 2124 aub_out(bufmgr_gem, 0); 2125 aub_write_bo_data(bo, offset, size); 2126} 2127 2128/** 2129 * Break up large objects into multiple writes. Otherwise a 128kb VBO 2130 * would overflow the 16 bits of size field in the packet header and 2131 * everything goes badly after that. 2132 */ 2133static void 2134aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype, 2135 uint32_t offset, uint32_t size) 2136{ 2137 uint32_t block_size; 2138 uint32_t sub_offset; 2139 2140 for (sub_offset = 0; sub_offset < size; sub_offset += block_size) { 2141 block_size = size - sub_offset; 2142 2143 if (block_size > 8 * 4096) 2144 block_size = 8 * 4096; 2145 2146 aub_write_trace_block(bo, type, subtype, offset + sub_offset, 2147 block_size); 2148 } 2149} 2150 2151static void 2152aub_write_bo(drm_intel_bo *bo) 2153{ 2154 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2155 uint32_t offset = 0; 2156 unsigned i; 2157 2158 aub_bo_get_address(bo); 2159 2160 /* Write out each annotated section separately. */ 2161 for (i = 0; i < bo_gem->aub_annotation_count; ++i) { 2162 drm_intel_aub_annotation *annotation = 2163 &bo_gem->aub_annotations[i]; 2164 uint32_t ending_offset = annotation->ending_offset; 2165 if (ending_offset > bo->size) 2166 ending_offset = bo->size; 2167 if (ending_offset > offset) { 2168 aub_write_large_trace_block(bo, annotation->type, 2169 annotation->subtype, 2170 offset, 2171 ending_offset - offset); 2172 offset = ending_offset; 2173 } 2174 } 2175 2176 /* Write out any remaining unannotated data */ 2177 if (offset < bo->size) { 2178 aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0, 2179 offset, bo->size - offset); 2180 } 2181} 2182 2183/* 2184 * Make a ringbuffer on fly and dump it 2185 */ 2186static void 2187aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem, 2188 uint32_t batch_buffer, int ring_flag) 2189{ 2190 uint32_t ringbuffer[4096]; 2191 int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */ 2192 int ring_count = 0; 2193 2194 if (ring_flag == I915_EXEC_BSD) 2195 ring = AUB_TRACE_TYPE_RING_PRB1; 2196 else if (ring_flag == I915_EXEC_BLT) 2197 ring = AUB_TRACE_TYPE_RING_PRB2; 2198 2199 /* Make a ring buffer to execute our batchbuffer. */ 2200 memset(ringbuffer, 0, sizeof(ringbuffer)); 2201 if (bufmgr_gem->gen >= 8) { 2202 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START | (3 - 2); 2203 ringbuffer[ring_count++] = batch_buffer; 2204 ringbuffer[ring_count++] = 0; 2205 } else { 2206 ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START; 2207 ringbuffer[ring_count++] = batch_buffer; 2208 } 2209 2210 /* Write out the ring. This appears to trigger execution of 2211 * the ring in the simulator. 2212 */ 2213 aub_out(bufmgr_gem, 2214 CMD_AUB_TRACE_HEADER_BLOCK | 2215 ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2)); 2216 aub_out(bufmgr_gem, 2217 AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE); 2218 aub_out(bufmgr_gem, 0); /* general/surface subtype */ 2219 aub_out(bufmgr_gem, bufmgr_gem->aub_offset); 2220 aub_out(bufmgr_gem, ring_count * 4); 2221 if (bufmgr_gem->gen >= 8) 2222 aub_out(bufmgr_gem, 0); 2223 2224 /* FIXME: Need some flush operations here? */ 2225 aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4); 2226 2227 /* Update offset pointer */ 2228 bufmgr_gem->aub_offset += 4096; 2229} 2230 2231drm_public void 2232drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo, 2233 int x1, int y1, int width, int height, 2234 enum aub_dump_bmp_format format, 2235 int pitch, int offset) 2236{ 2237 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2238 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; 2239 uint32_t cpp; 2240 2241 switch (format) { 2242 case AUB_DUMP_BMP_FORMAT_8BIT: 2243 cpp = 1; 2244 break; 2245 case AUB_DUMP_BMP_FORMAT_ARGB_4444: 2246 cpp = 2; 2247 break; 2248 case AUB_DUMP_BMP_FORMAT_ARGB_0888: 2249 case AUB_DUMP_BMP_FORMAT_ARGB_8888: 2250 cpp = 4; 2251 break; 2252 default: 2253 printf("Unknown AUB dump format %d\n", format); 2254 return; 2255 } 2256 2257 if (!bufmgr_gem->aub_file) 2258 return; 2259 2260 aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4); 2261 aub_out(bufmgr_gem, (y1 << 16) | x1); 2262 aub_out(bufmgr_gem, 2263 (format << 24) | 2264 (cpp << 19) | 2265 pitch / 4); 2266 aub_out(bufmgr_gem, (height << 16) | width); 2267 aub_out(bufmgr_gem, bo_gem->aub_offset + offset); 2268 aub_out(bufmgr_gem, 2269 ((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) | 2270 ((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0)); 2271} 2272 2273static void 2274aub_exec(drm_intel_bo *bo, int ring_flag, int used) 2275{ 2276 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2277 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2278 int i; 2279 bool batch_buffer_needs_annotations; 2280 2281 if (!bufmgr_gem->aub_file) 2282 return; 2283 2284 /* If batch buffer is not annotated, annotate it the best we 2285 * can. 2286 */ 2287 batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0; 2288 if (batch_buffer_needs_annotations) { 2289 drm_intel_aub_annotation annotations[2] = { 2290 { AUB_TRACE_TYPE_BATCH, 0, used }, 2291 { AUB_TRACE_TYPE_NOTYPE, 0, bo->size } 2292 }; 2293 drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2); 2294 } 2295 2296 /* Write out all buffers to AUB memory */ 2297 for (i = 0; i < bufmgr_gem->exec_count; i++) { 2298 aub_write_bo(bufmgr_gem->exec_bos[i]); 2299 } 2300 2301 /* Remove any annotations we added */ 2302 if (batch_buffer_needs_annotations) 2303 drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0); 2304 2305 /* Dump ring buffer */ 2306 aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag); 2307 2308 fflush(bufmgr_gem->aub_file); 2309 2310 /* 2311 * One frame has been dumped. So reset the aub_offset for the next frame. 2312 * 2313 * FIXME: Can we do this? 2314 */ 2315 bufmgr_gem->aub_offset = 0x10000; 2316} 2317 2318static int 2319drm_intel_gem_bo_exec(drm_intel_bo *bo, int used, 2320 drm_clip_rect_t * cliprects, int num_cliprects, int DR4) 2321{ 2322 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2323 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2324 struct drm_i915_gem_execbuffer execbuf; 2325 int ret, i; 2326 2327 if (bo_gem->has_error) 2328 return -ENOMEM; 2329 2330 pthread_mutex_lock(&bufmgr_gem->lock); 2331 /* Update indices and set up the validate list. */ 2332 drm_intel_gem_bo_process_reloc(bo); 2333 2334 /* Add the batch buffer to the validation list. There are no 2335 * relocations pointing to it. 2336 */ 2337 drm_intel_add_validate_buffer(bo); 2338 2339 VG_CLEAR(execbuf); 2340 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects; 2341 execbuf.buffer_count = bufmgr_gem->exec_count; 2342 execbuf.batch_start_offset = 0; 2343 execbuf.batch_len = used; 2344 execbuf.cliprects_ptr = (uintptr_t) cliprects; 2345 execbuf.num_cliprects = num_cliprects; 2346 execbuf.DR1 = 0; 2347 execbuf.DR4 = DR4; 2348 2349 ret = drmIoctl(bufmgr_gem->fd, 2350 DRM_IOCTL_I915_GEM_EXECBUFFER, 2351 &execbuf); 2352 if (ret != 0) { 2353 ret = -errno; 2354 if (errno == ENOSPC) { 2355 DBG("Execbuffer fails to pin. " 2356 "Estimate: %u. Actual: %u. Available: %u\n", 2357 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos, 2358 bufmgr_gem-> 2359 exec_count), 2360 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos, 2361 bufmgr_gem-> 2362 exec_count), 2363 (unsigned int)bufmgr_gem->gtt_size); 2364 } 2365 } 2366 drm_intel_update_buffer_offsets(bufmgr_gem); 2367 2368 if (bufmgr_gem->bufmgr.debug) 2369 drm_intel_gem_dump_validation_list(bufmgr_gem); 2370 2371 for (i = 0; i < bufmgr_gem->exec_count; i++) { 2372 drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; 2373 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2374 2375 bo_gem->idle = false; 2376 2377 /* Disconnect the buffer from the validate list */ 2378 bo_gem->validate_index = -1; 2379 bufmgr_gem->exec_bos[i] = NULL; 2380 } 2381 bufmgr_gem->exec_count = 0; 2382 pthread_mutex_unlock(&bufmgr_gem->lock); 2383 2384 return ret; 2385} 2386 2387static int 2388do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx, 2389 drm_clip_rect_t *cliprects, int num_cliprects, int DR4, 2390 unsigned int flags) 2391{ 2392 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr; 2393 struct drm_i915_gem_execbuffer2 execbuf; 2394 int ret = 0; 2395 int i; 2396 2397 switch (flags & 0x7) { 2398 default: 2399 return -EINVAL; 2400 case I915_EXEC_BLT: 2401 if (!bufmgr_gem->has_blt) 2402 return -EINVAL; 2403 break; 2404 case I915_EXEC_BSD: 2405 if (!bufmgr_gem->has_bsd) 2406 return -EINVAL; 2407 break; 2408 case I915_EXEC_VEBOX: 2409 if (!bufmgr_gem->has_vebox) 2410 return -EINVAL; 2411 break; 2412 case I915_EXEC_RENDER: 2413 case I915_EXEC_DEFAULT: 2414 break; 2415 } 2416 2417 pthread_mutex_lock(&bufmgr_gem->lock); 2418 /* Update indices and set up the validate list. */ 2419 drm_intel_gem_bo_process_reloc2(bo); 2420 2421 /* Add the batch buffer to the validation list. There are no relocations 2422 * pointing to it. 2423 */ 2424 drm_intel_add_validate_buffer2(bo, 0); 2425 2426 VG_CLEAR(execbuf); 2427 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects; 2428 execbuf.buffer_count = bufmgr_gem->exec_count; 2429 execbuf.batch_start_offset = 0; 2430 execbuf.batch_len = used; 2431 execbuf.cliprects_ptr = (uintptr_t)cliprects; 2432 execbuf.num_cliprects = num_cliprects; 2433 execbuf.DR1 = 0; 2434 execbuf.DR4 = DR4; 2435 execbuf.flags = flags; 2436 if (ctx == NULL) 2437 i915_execbuffer2_set_context_id(execbuf, 0); 2438 else 2439 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id); 2440 execbuf.rsvd2 = 0; 2441 2442 aub_exec(bo, flags, used); 2443 2444 if (bufmgr_gem->no_exec) 2445 goto skip_execution; 2446 2447 ret = drmIoctl(bufmgr_gem->fd, 2448 DRM_IOCTL_I915_GEM_EXECBUFFER2, 2449 &execbuf); 2450 if (ret != 0) { 2451 ret = -errno; 2452 if (ret == -ENOSPC) { 2453 DBG("Execbuffer fails to pin. " 2454 "Estimate: %u. Actual: %u. Available: %u\n", 2455 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos, 2456 bufmgr_gem->exec_count), 2457 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos, 2458 bufmgr_gem->exec_count), 2459 (unsigned int) bufmgr_gem->gtt_size); 2460 } 2461 } 2462 drm_intel_update_buffer_offsets2(bufmgr_gem); 2463 2464skip_execution: 2465 if (bufmgr_gem->bufmgr.debug) 2466 drm_intel_gem_dump_validation_list(bufmgr_gem); 2467 2468 for (i = 0; i < bufmgr_gem->exec_count; i++) { 2469 drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; 2470 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; 2471 2472 bo_gem->idle = false; 2473 2474 /* Disconnect the buffer from the validate list */ 2475 bo_gem->validate_index = -1; 2476 bufmgr_gem->exec_bos[i] = NULL; 2477 } 2478 bufmgr_gem->exec_count = 0; 2479 pthread_mutex_unlock(&bufmgr_gem->lock); 2480 2481 return ret; 2482} 2483 2484static int 2485drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used, 2486 drm_clip_rect_t *cliprects, int num_cliprects, 2487 int DR4) 2488{ 2489 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4, 2490 I915_EXEC_RENDER); 2491} 2492 2493static int 2494drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used, 2495 drm_clip_rect_t *cliprects, int num_cliprects, int DR4, 2496 unsigned int flags) 2497{ 2498 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4, 2499 flags); 2500} 2501 2502drm_public int 2503drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx, 2504 int used, unsigned int flags) 2505{ 2506 return do_exec2(bo, used, ctx, NULL, 0, 0, flags); 2507} 2508 2509static int 2510drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment) 2511{ 2512 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2513 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2514 struct drm_i915_gem_pin pin; 2515 int ret; 2516 2517 VG_CLEAR(pin); 2518 pin.handle = bo_gem->gem_handle; 2519 pin.alignment = alignment; 2520 2521 ret = drmIoctl(bufmgr_gem->fd, 2522 DRM_IOCTL_I915_GEM_PIN, 2523 &pin); 2524 if (ret != 0) 2525 return -errno; 2526 2527 bo->offset64 = pin.offset; 2528 bo->offset = pin.offset; 2529 return 0; 2530} 2531 2532static int 2533drm_intel_gem_bo_unpin(drm_intel_bo *bo) 2534{ 2535 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2536 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2537 struct drm_i915_gem_unpin unpin; 2538 int ret; 2539 2540 VG_CLEAR(unpin); 2541 unpin.handle = bo_gem->gem_handle; 2542 2543 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin); 2544 if (ret != 0) 2545 return -errno; 2546 2547 return 0; 2548} 2549 2550static int 2551drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo, 2552 uint32_t tiling_mode, 2553 uint32_t stride) 2554{ 2555 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2556 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2557 struct drm_i915_gem_set_tiling set_tiling; 2558 int ret; 2559 2560 if (bo_gem->global_name == 0 && 2561 tiling_mode == bo_gem->tiling_mode && 2562 stride == bo_gem->stride) 2563 return 0; 2564 2565 memset(&set_tiling, 0, sizeof(set_tiling)); 2566 do { 2567 /* set_tiling is slightly broken and overwrites the 2568 * input on the error path, so we have to open code 2569 * rmIoctl. 2570 */ 2571 set_tiling.handle = bo_gem->gem_handle; 2572 set_tiling.tiling_mode = tiling_mode; 2573 set_tiling.stride = stride; 2574 2575 ret = ioctl(bufmgr_gem->fd, 2576 DRM_IOCTL_I915_GEM_SET_TILING, 2577 &set_tiling); 2578 } while (ret == -1 && (errno == EINTR || errno == EAGAIN)); 2579 if (ret == -1) 2580 return -errno; 2581 2582 bo_gem->tiling_mode = set_tiling.tiling_mode; 2583 bo_gem->swizzle_mode = set_tiling.swizzle_mode; 2584 bo_gem->stride = set_tiling.stride; 2585 return 0; 2586} 2587 2588static int 2589drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, 2590 uint32_t stride) 2591{ 2592 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2593 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2594 int ret; 2595 2596 /* Tiling with userptr surfaces is not supported 2597 * on all hardware so refuse it for time being. 2598 */ 2599 if (bo_gem->is_userptr) 2600 return -EINVAL; 2601 2602 /* Linear buffers have no stride. By ensuring that we only ever use 2603 * stride 0 with linear buffers, we simplify our code. 2604 */ 2605 if (*tiling_mode == I915_TILING_NONE) 2606 stride = 0; 2607 2608 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride); 2609 if (ret == 0) 2610 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); 2611 2612 *tiling_mode = bo_gem->tiling_mode; 2613 return ret; 2614} 2615 2616static int 2617drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode, 2618 uint32_t * swizzle_mode) 2619{ 2620 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2621 2622 *tiling_mode = bo_gem->tiling_mode; 2623 *swizzle_mode = bo_gem->swizzle_mode; 2624 return 0; 2625} 2626 2627drm_public drm_intel_bo * 2628drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size) 2629{ 2630 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 2631 int ret; 2632 uint32_t handle; 2633 drm_intel_bo_gem *bo_gem; 2634 struct drm_i915_gem_get_tiling get_tiling; 2635 drmMMListHead *list; 2636 2637 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle); 2638 2639 /* 2640 * See if the kernel has already returned this buffer to us. Just as 2641 * for named buffers, we must not create two bo's pointing at the same 2642 * kernel object 2643 */ 2644 pthread_mutex_lock(&bufmgr_gem->lock); 2645 for (list = bufmgr_gem->named.next; 2646 list != &bufmgr_gem->named; 2647 list = list->next) { 2648 bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); 2649 if (bo_gem->gem_handle == handle) { 2650 drm_intel_gem_bo_reference(&bo_gem->bo); 2651 pthread_mutex_unlock(&bufmgr_gem->lock); 2652 return &bo_gem->bo; 2653 } 2654 } 2655 2656 if (ret) { 2657 fprintf(stderr,"ret is %d %d\n", ret, errno); 2658 pthread_mutex_unlock(&bufmgr_gem->lock); 2659 return NULL; 2660 } 2661 2662 bo_gem = calloc(1, sizeof(*bo_gem)); 2663 if (!bo_gem) { 2664 pthread_mutex_unlock(&bufmgr_gem->lock); 2665 return NULL; 2666 } 2667 /* Determine size of bo. The fd-to-handle ioctl really should 2668 * return the size, but it doesn't. If we have kernel 3.12 or 2669 * later, we can lseek on the prime fd to get the size. Older 2670 * kernels will just fail, in which case we fall back to the 2671 * provided (estimated or guess size). */ 2672 ret = lseek(prime_fd, 0, SEEK_END); 2673 if (ret != -1) 2674 bo_gem->bo.size = ret; 2675 else 2676 bo_gem->bo.size = size; 2677 2678 bo_gem->bo.handle = handle; 2679 bo_gem->bo.bufmgr = bufmgr; 2680 2681 bo_gem->gem_handle = handle; 2682 2683 atomic_set(&bo_gem->refcount, 1); 2684 2685 bo_gem->name = "prime"; 2686 bo_gem->validate_index = -1; 2687 bo_gem->reloc_tree_fences = 0; 2688 bo_gem->used_as_reloc_target = false; 2689 bo_gem->has_error = false; 2690 bo_gem->reusable = false; 2691 2692 DRMINITLISTHEAD(&bo_gem->vma_list); 2693 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); 2694 pthread_mutex_unlock(&bufmgr_gem->lock); 2695 2696 VG_CLEAR(get_tiling); 2697 get_tiling.handle = bo_gem->gem_handle; 2698 ret = drmIoctl(bufmgr_gem->fd, 2699 DRM_IOCTL_I915_GEM_GET_TILING, 2700 &get_tiling); 2701 if (ret != 0) { 2702 drm_intel_gem_bo_unreference(&bo_gem->bo); 2703 return NULL; 2704 } 2705 bo_gem->tiling_mode = get_tiling.tiling_mode; 2706 bo_gem->swizzle_mode = get_tiling.swizzle_mode; 2707 /* XXX stride is unknown */ 2708 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); 2709 2710 return &bo_gem->bo; 2711} 2712 2713drm_public int 2714drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd) 2715{ 2716 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2717 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2718 2719 pthread_mutex_lock(&bufmgr_gem->lock); 2720 if (DRMLISTEMPTY(&bo_gem->name_list)) 2721 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); 2722 pthread_mutex_unlock(&bufmgr_gem->lock); 2723 2724 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle, 2725 DRM_CLOEXEC, prime_fd) != 0) 2726 return -errno; 2727 2728 bo_gem->reusable = false; 2729 2730 return 0; 2731} 2732 2733static int 2734drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name) 2735{ 2736 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; 2737 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2738 int ret; 2739 2740 if (!bo_gem->global_name) { 2741 struct drm_gem_flink flink; 2742 2743 VG_CLEAR(flink); 2744 flink.handle = bo_gem->gem_handle; 2745 2746 pthread_mutex_lock(&bufmgr_gem->lock); 2747 2748 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink); 2749 if (ret != 0) { 2750 pthread_mutex_unlock(&bufmgr_gem->lock); 2751 return -errno; 2752 } 2753 2754 bo_gem->global_name = flink.name; 2755 bo_gem->reusable = false; 2756 2757 if (DRMLISTEMPTY(&bo_gem->name_list)) 2758 DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); 2759 pthread_mutex_unlock(&bufmgr_gem->lock); 2760 } 2761 2762 *name = bo_gem->global_name; 2763 return 0; 2764} 2765 2766/** 2767 * Enables unlimited caching of buffer objects for reuse. 2768 * 2769 * This is potentially very memory expensive, as the cache at each bucket 2770 * size is only bounded by how many buffers of that size we've managed to have 2771 * in flight at once. 2772 */ 2773drm_public void 2774drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr) 2775{ 2776 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr; 2777 2778 bufmgr_gem->bo_reuse = true; 2779} 2780 2781/** 2782 * Enable use of fenced reloc type. 2783 * 2784 * New code should enable this to avoid unnecessary fence register 2785 * allocation. If this option is not enabled, all relocs will have fence 2786 * register allocated. 2787 */ 2788drm_public void 2789drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr) 2790{ 2791 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 2792 2793 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2) 2794 bufmgr_gem->fenced_relocs = true; 2795} 2796 2797/** 2798 * Return the additional aperture space required by the tree of buffer objects 2799 * rooted at bo. 2800 */ 2801static int 2802drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo) 2803{ 2804 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2805 int i; 2806 int total = 0; 2807 2808 if (bo == NULL || bo_gem->included_in_check_aperture) 2809 return 0; 2810 2811 total += bo->size; 2812 bo_gem->included_in_check_aperture = true; 2813 2814 for (i = 0; i < bo_gem->reloc_count; i++) 2815 total += 2816 drm_intel_gem_bo_get_aperture_space(bo_gem-> 2817 reloc_target_info[i].bo); 2818 2819 return total; 2820} 2821 2822/** 2823 * Count the number of buffers in this list that need a fence reg 2824 * 2825 * If the count is greater than the number of available regs, we'll have 2826 * to ask the caller to resubmit a batch with fewer tiled buffers. 2827 * 2828 * This function over-counts if the same buffer is used multiple times. 2829 */ 2830static unsigned int 2831drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count) 2832{ 2833 int i; 2834 unsigned int total = 0; 2835 2836 for (i = 0; i < count; i++) { 2837 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i]; 2838 2839 if (bo_gem == NULL) 2840 continue; 2841 2842 total += bo_gem->reloc_tree_fences; 2843 } 2844 return total; 2845} 2846 2847/** 2848 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready 2849 * for the next drm_intel_bufmgr_check_aperture_space() call. 2850 */ 2851static void 2852drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo) 2853{ 2854 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2855 int i; 2856 2857 if (bo == NULL || !bo_gem->included_in_check_aperture) 2858 return; 2859 2860 bo_gem->included_in_check_aperture = false; 2861 2862 for (i = 0; i < bo_gem->reloc_count; i++) 2863 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem-> 2864 reloc_target_info[i].bo); 2865} 2866 2867/** 2868 * Return a conservative estimate for the amount of aperture required 2869 * for a collection of buffers. This may double-count some buffers. 2870 */ 2871static unsigned int 2872drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count) 2873{ 2874 int i; 2875 unsigned int total = 0; 2876 2877 for (i = 0; i < count; i++) { 2878 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i]; 2879 if (bo_gem != NULL) 2880 total += bo_gem->reloc_tree_size; 2881 } 2882 return total; 2883} 2884 2885/** 2886 * Return the amount of aperture needed for a collection of buffers. 2887 * This avoids double counting any buffers, at the cost of looking 2888 * at every buffer in the set. 2889 */ 2890static unsigned int 2891drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count) 2892{ 2893 int i; 2894 unsigned int total = 0; 2895 2896 for (i = 0; i < count; i++) { 2897 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]); 2898 /* For the first buffer object in the array, we get an 2899 * accurate count back for its reloc_tree size (since nothing 2900 * had been flagged as being counted yet). We can save that 2901 * value out as a more conservative reloc_tree_size that 2902 * avoids double-counting target buffers. Since the first 2903 * buffer happens to usually be the batch buffer in our 2904 * callers, this can pull us back from doing the tree 2905 * walk on every new batch emit. 2906 */ 2907 if (i == 0) { 2908 drm_intel_bo_gem *bo_gem = 2909 (drm_intel_bo_gem *) bo_array[i]; 2910 bo_gem->reloc_tree_size = total; 2911 } 2912 } 2913 2914 for (i = 0; i < count; i++) 2915 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]); 2916 return total; 2917} 2918 2919/** 2920 * Return -1 if the batchbuffer should be flushed before attempting to 2921 * emit rendering referencing the buffers pointed to by bo_array. 2922 * 2923 * This is required because if we try to emit a batchbuffer with relocations 2924 * to a tree of buffers that won't simultaneously fit in the aperture, 2925 * the rendering will return an error at a point where the software is not 2926 * prepared to recover from it. 2927 * 2928 * However, we also want to emit the batchbuffer significantly before we reach 2929 * the limit, as a series of batchbuffers each of which references buffers 2930 * covering almost all of the aperture means that at each emit we end up 2931 * waiting to evict a buffer from the last rendering, and we get synchronous 2932 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to 2933 * get better parallelism. 2934 */ 2935static int 2936drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count) 2937{ 2938 drm_intel_bufmgr_gem *bufmgr_gem = 2939 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr; 2940 unsigned int total = 0; 2941 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4; 2942 int total_fences; 2943 2944 /* Check for fence reg constraints if necessary */ 2945 if (bufmgr_gem->available_fences) { 2946 total_fences = drm_intel_gem_total_fences(bo_array, count); 2947 if (total_fences > bufmgr_gem->available_fences) 2948 return -ENOSPC; 2949 } 2950 2951 total = drm_intel_gem_estimate_batch_space(bo_array, count); 2952 2953 if (total > threshold) 2954 total = drm_intel_gem_compute_batch_space(bo_array, count); 2955 2956 if (total > threshold) { 2957 DBG("check_space: overflowed available aperture, " 2958 "%dkb vs %dkb\n", 2959 total / 1024, (int)bufmgr_gem->gtt_size / 1024); 2960 return -ENOSPC; 2961 } else { 2962 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024, 2963 (int)bufmgr_gem->gtt_size / 1024); 2964 return 0; 2965 } 2966} 2967 2968/* 2969 * Disable buffer reuse for objects which are shared with the kernel 2970 * as scanout buffers 2971 */ 2972static int 2973drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo) 2974{ 2975 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2976 2977 bo_gem->reusable = false; 2978 return 0; 2979} 2980 2981static int 2982drm_intel_gem_bo_is_reusable(drm_intel_bo *bo) 2983{ 2984 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2985 2986 return bo_gem->reusable; 2987} 2988 2989static int 2990_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) 2991{ 2992 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 2993 int i; 2994 2995 for (i = 0; i < bo_gem->reloc_count; i++) { 2996 if (bo_gem->reloc_target_info[i].bo == target_bo) 2997 return 1; 2998 if (bo == bo_gem->reloc_target_info[i].bo) 2999 continue; 3000 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo, 3001 target_bo)) 3002 return 1; 3003 } 3004 3005 return 0; 3006} 3007 3008/** Return true if target_bo is referenced by bo's relocation tree. */ 3009static int 3010drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo) 3011{ 3012 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo; 3013 3014 if (bo == NULL || target_bo == NULL) 3015 return 0; 3016 if (target_bo_gem->used_as_reloc_target) 3017 return _drm_intel_gem_bo_references(bo, target_bo); 3018 return 0; 3019} 3020 3021static void 3022add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size) 3023{ 3024 unsigned int i = bufmgr_gem->num_buckets; 3025 3026 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket)); 3027 3028 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head); 3029 bufmgr_gem->cache_bucket[i].size = size; 3030 bufmgr_gem->num_buckets++; 3031} 3032 3033static void 3034init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem) 3035{ 3036 unsigned long size, cache_max_size = 64 * 1024 * 1024; 3037 3038 /* OK, so power of two buckets was too wasteful of memory. 3039 * Give 3 other sizes between each power of two, to hopefully 3040 * cover things accurately enough. (The alternative is 3041 * probably to just go for exact matching of sizes, and assume 3042 * that for things like composited window resize the tiled 3043 * width/height alignment and rounding of sizes to pages will 3044 * get us useful cache hit rates anyway) 3045 */ 3046 add_bucket(bufmgr_gem, 4096); 3047 add_bucket(bufmgr_gem, 4096 * 2); 3048 add_bucket(bufmgr_gem, 4096 * 3); 3049 3050 /* Initialize the linked lists for BO reuse cache. */ 3051 for (size = 4 * 4096; size <= cache_max_size; size *= 2) { 3052 add_bucket(bufmgr_gem, size); 3053 3054 add_bucket(bufmgr_gem, size + size * 1 / 4); 3055 add_bucket(bufmgr_gem, size + size * 2 / 4); 3056 add_bucket(bufmgr_gem, size + size * 3 / 4); 3057 } 3058} 3059 3060drm_public void 3061drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit) 3062{ 3063 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 3064 3065 bufmgr_gem->vma_max = limit; 3066 3067 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem); 3068} 3069 3070/** 3071 * Get the PCI ID for the device. This can be overridden by setting the 3072 * INTEL_DEVID_OVERRIDE environment variable to the desired ID. 3073 */ 3074static int 3075get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem) 3076{ 3077 char *devid_override; 3078 int devid; 3079 int ret; 3080 drm_i915_getparam_t gp; 3081 3082 if (geteuid() == getuid()) { 3083 devid_override = getenv("INTEL_DEVID_OVERRIDE"); 3084 if (devid_override) { 3085 bufmgr_gem->no_exec = true; 3086 return strtod(devid_override, NULL); 3087 } 3088 } 3089 3090 VG_CLEAR(devid); 3091 VG_CLEAR(gp); 3092 gp.param = I915_PARAM_CHIPSET_ID; 3093 gp.value = &devid; 3094 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3095 if (ret) { 3096 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno); 3097 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value); 3098 } 3099 return devid; 3100} 3101 3102drm_public int 3103drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr) 3104{ 3105 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 3106 3107 return bufmgr_gem->pci_device; 3108} 3109 3110/** 3111 * Sets the AUB filename. 3112 * 3113 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump() 3114 * for it to have any effect. 3115 */ 3116drm_public void 3117drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr, 3118 const char *filename) 3119{ 3120 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 3121 3122 free(bufmgr_gem->aub_filename); 3123 if (filename) 3124 bufmgr_gem->aub_filename = strdup(filename); 3125} 3126 3127/** 3128 * Sets up AUB dumping. 3129 * 3130 * This is a trace file format that can be used with the simulator. 3131 * Packets are emitted in a format somewhat like GPU command packets. 3132 * You can set up a GTT and upload your objects into the referenced 3133 * space, then send off batchbuffers and get BMPs out the other end. 3134 */ 3135drm_public void 3136drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable) 3137{ 3138 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 3139 int entry = 0x200003; 3140 int i; 3141 int gtt_size = 0x10000; 3142 const char *filename; 3143 3144 if (!enable) { 3145 if (bufmgr_gem->aub_file) { 3146 fclose(bufmgr_gem->aub_file); 3147 bufmgr_gem->aub_file = NULL; 3148 } 3149 return; 3150 } 3151 3152 if (geteuid() != getuid()) 3153 return; 3154 3155 if (bufmgr_gem->aub_filename) 3156 filename = bufmgr_gem->aub_filename; 3157 else 3158 filename = "intel.aub"; 3159 bufmgr_gem->aub_file = fopen(filename, "w+"); 3160 if (!bufmgr_gem->aub_file) 3161 return; 3162 3163 /* Start allocating objects from just after the GTT. */ 3164 bufmgr_gem->aub_offset = gtt_size; 3165 3166 /* Start with a (required) version packet. */ 3167 aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2)); 3168 aub_out(bufmgr_gem, 3169 (4 << AUB_HEADER_MAJOR_SHIFT) | 3170 (0 << AUB_HEADER_MINOR_SHIFT)); 3171 for (i = 0; i < 8; i++) { 3172 aub_out(bufmgr_gem, 0); /* app name */ 3173 } 3174 aub_out(bufmgr_gem, 0); /* timestamp */ 3175 aub_out(bufmgr_gem, 0); /* timestamp */ 3176 aub_out(bufmgr_gem, 0); /* comment len */ 3177 3178 /* Set up the GTT. The max we can handle is 256M */ 3179 aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2)); 3180 aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE); 3181 aub_out(bufmgr_gem, 0); /* subtype */ 3182 aub_out(bufmgr_gem, 0); /* offset */ 3183 aub_out(bufmgr_gem, gtt_size); /* size */ 3184 if (bufmgr_gem->gen >= 8) 3185 aub_out(bufmgr_gem, 0); 3186 for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) { 3187 aub_out(bufmgr_gem, entry); 3188 } 3189} 3190 3191drm_public drm_intel_context * 3192drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr) 3193{ 3194 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 3195 struct drm_i915_gem_context_create create; 3196 drm_intel_context *context = NULL; 3197 int ret; 3198 3199 context = calloc(1, sizeof(*context)); 3200 if (!context) 3201 return NULL; 3202 3203 VG_CLEAR(create); 3204 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create); 3205 if (ret != 0) { 3206 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", 3207 strerror(errno)); 3208 free(context); 3209 return NULL; 3210 } 3211 3212 context->ctx_id = create.ctx_id; 3213 context->bufmgr = bufmgr; 3214 3215 return context; 3216} 3217 3218drm_public void 3219drm_intel_gem_context_destroy(drm_intel_context *ctx) 3220{ 3221 drm_intel_bufmgr_gem *bufmgr_gem; 3222 struct drm_i915_gem_context_destroy destroy; 3223 int ret; 3224 3225 if (ctx == NULL) 3226 return; 3227 3228 VG_CLEAR(destroy); 3229 3230 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr; 3231 destroy.ctx_id = ctx->ctx_id; 3232 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY, 3233 &destroy); 3234 if (ret != 0) 3235 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n", 3236 strerror(errno)); 3237 3238 free(ctx); 3239} 3240 3241drm_public int 3242drm_intel_get_reset_stats(drm_intel_context *ctx, 3243 uint32_t *reset_count, 3244 uint32_t *active, 3245 uint32_t *pending) 3246{ 3247 drm_intel_bufmgr_gem *bufmgr_gem; 3248 struct drm_i915_reset_stats stats; 3249 int ret; 3250 3251 if (ctx == NULL) 3252 return -EINVAL; 3253 3254 memset(&stats, 0, sizeof(stats)); 3255 3256 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr; 3257 stats.ctx_id = ctx->ctx_id; 3258 ret = drmIoctl(bufmgr_gem->fd, 3259 DRM_IOCTL_I915_GET_RESET_STATS, 3260 &stats); 3261 if (ret == 0) { 3262 if (reset_count != NULL) 3263 *reset_count = stats.reset_count; 3264 3265 if (active != NULL) 3266 *active = stats.batch_active; 3267 3268 if (pending != NULL) 3269 *pending = stats.batch_pending; 3270 } 3271 3272 return ret; 3273} 3274 3275drm_public int 3276drm_intel_reg_read(drm_intel_bufmgr *bufmgr, 3277 uint32_t offset, 3278 uint64_t *result) 3279{ 3280 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 3281 struct drm_i915_reg_read reg_read; 3282 int ret; 3283 3284 VG_CLEAR(reg_read); 3285 reg_read.offset = offset; 3286 3287 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read); 3288 3289 *result = reg_read.val; 3290 return ret; 3291} 3292 3293 3294/** 3295 * Annotate the given bo for use in aub dumping. 3296 * 3297 * \param annotations is an array of drm_intel_aub_annotation objects 3298 * describing the type of data in various sections of the bo. Each 3299 * element of the array specifies the type and subtype of a section of 3300 * the bo, and the past-the-end offset of that section. The elements 3301 * of \c annotations must be sorted so that ending_offset is 3302 * increasing. 3303 * 3304 * \param count is the number of elements in the \c annotations array. 3305 * If \c count is zero, then \c annotations will not be dereferenced. 3306 * 3307 * Annotations are copied into a private data structure, so caller may 3308 * re-use the memory pointed to by \c annotations after the call 3309 * returns. 3310 * 3311 * Annotations are stored for the lifetime of the bo; to reset to the 3312 * default state (no annotations), call this function with a \c count 3313 * of zero. 3314 */ 3315drm_public void 3316drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo, 3317 drm_intel_aub_annotation *annotations, 3318 unsigned count) 3319{ 3320 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; 3321 unsigned size = sizeof(*annotations) * count; 3322 drm_intel_aub_annotation *new_annotations = 3323 count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL; 3324 if (new_annotations == NULL) { 3325 free(bo_gem->aub_annotations); 3326 bo_gem->aub_annotations = NULL; 3327 bo_gem->aub_annotation_count = 0; 3328 return; 3329 } 3330 memcpy(new_annotations, annotations, size); 3331 bo_gem->aub_annotations = new_annotations; 3332 bo_gem->aub_annotation_count = count; 3333} 3334 3335static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER; 3336static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list }; 3337 3338static drm_intel_bufmgr_gem * 3339drm_intel_bufmgr_gem_find(int fd) 3340{ 3341 drm_intel_bufmgr_gem *bufmgr_gem; 3342 3343 DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) { 3344 if (bufmgr_gem->fd == fd) { 3345 atomic_inc(&bufmgr_gem->refcount); 3346 return bufmgr_gem; 3347 } 3348 } 3349 3350 return NULL; 3351} 3352 3353static void 3354drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr) 3355{ 3356 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr; 3357 3358 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) { 3359 pthread_mutex_lock(&bufmgr_list_mutex); 3360 3361 if (atomic_dec_and_test(&bufmgr_gem->refcount)) { 3362 DRMLISTDEL(&bufmgr_gem->managers); 3363 drm_intel_bufmgr_gem_destroy(bufmgr); 3364 } 3365 3366 pthread_mutex_unlock(&bufmgr_list_mutex); 3367 } 3368} 3369 3370static bool 3371has_userptr(drm_intel_bufmgr_gem *bufmgr_gem) 3372{ 3373 int ret; 3374 void *ptr; 3375 long pgsz; 3376 struct drm_i915_gem_userptr userptr; 3377 struct drm_gem_close close_bo; 3378 3379 pgsz = sysconf(_SC_PAGESIZE); 3380 assert(pgsz > 0); 3381 3382 ret = posix_memalign(&ptr, pgsz, pgsz); 3383 if (ret) { 3384 DBG("Failed to get a page (%ld) for userptr detection!\n", 3385 pgsz); 3386 return false; 3387 } 3388 3389 memset(&userptr, 0, sizeof(userptr)); 3390 userptr.user_ptr = (__u64)(unsigned long)ptr; 3391 userptr.user_size = pgsz; 3392 3393retry: 3394 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr); 3395 if (ret) { 3396 if (errno == ENODEV && userptr.flags == 0) { 3397 userptr.flags = I915_USERPTR_UNSYNCHRONIZED; 3398 goto retry; 3399 } 3400 free(ptr); 3401 return false; 3402 } 3403 3404 close_bo.handle = userptr.handle; 3405 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo); 3406 free(ptr); 3407 if (ret) { 3408 fprintf(stderr, "Failed to release test userptr object! (%d) " 3409 "i915 kernel driver may not be sane!\n", errno); 3410 return false; 3411 } 3412 3413 return true; 3414} 3415 3416/** 3417 * Initializes the GEM buffer manager, which uses the kernel to allocate, map, 3418 * and manage map buffer objections. 3419 * 3420 * \param fd File descriptor of the opened DRM device. 3421 */ 3422drm_public drm_intel_bufmgr * 3423drm_intel_bufmgr_gem_init(int fd, int batch_size) 3424{ 3425 drm_intel_bufmgr_gem *bufmgr_gem; 3426 struct drm_i915_gem_get_aperture aperture; 3427 drm_i915_getparam_t gp; 3428 int ret, tmp; 3429 bool exec2 = false; 3430 3431 pthread_mutex_lock(&bufmgr_list_mutex); 3432 3433 bufmgr_gem = drm_intel_bufmgr_gem_find(fd); 3434 if (bufmgr_gem) 3435 goto exit; 3436 3437 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem)); 3438 if (bufmgr_gem == NULL) 3439 goto exit; 3440 3441 bufmgr_gem->fd = fd; 3442 atomic_set(&bufmgr_gem->refcount, 1); 3443 3444 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) { 3445 free(bufmgr_gem); 3446 bufmgr_gem = NULL; 3447 goto exit; 3448 } 3449 3450 ret = drmIoctl(bufmgr_gem->fd, 3451 DRM_IOCTL_I915_GEM_GET_APERTURE, 3452 &aperture); 3453 3454 if (ret == 0) 3455 bufmgr_gem->gtt_size = aperture.aper_available_size; 3456 else { 3457 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n", 3458 strerror(errno)); 3459 bufmgr_gem->gtt_size = 128 * 1024 * 1024; 3460 fprintf(stderr, "Assuming %dkB available aperture size.\n" 3461 "May lead to reduced performance or incorrect " 3462 "rendering.\n", 3463 (int)bufmgr_gem->gtt_size / 1024); 3464 } 3465 3466 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem); 3467 3468 if (IS_GEN2(bufmgr_gem->pci_device)) 3469 bufmgr_gem->gen = 2; 3470 else if (IS_GEN3(bufmgr_gem->pci_device)) 3471 bufmgr_gem->gen = 3; 3472 else if (IS_GEN4(bufmgr_gem->pci_device)) 3473 bufmgr_gem->gen = 4; 3474 else if (IS_GEN5(bufmgr_gem->pci_device)) 3475 bufmgr_gem->gen = 5; 3476 else if (IS_GEN6(bufmgr_gem->pci_device)) 3477 bufmgr_gem->gen = 6; 3478 else if (IS_GEN7(bufmgr_gem->pci_device)) 3479 bufmgr_gem->gen = 7; 3480 else if (IS_GEN8(bufmgr_gem->pci_device)) 3481 bufmgr_gem->gen = 8; 3482 else { 3483 free(bufmgr_gem); 3484 bufmgr_gem = NULL; 3485 goto exit; 3486 } 3487 3488 if (IS_GEN3(bufmgr_gem->pci_device) && 3489 bufmgr_gem->gtt_size > 256*1024*1024) { 3490 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't 3491 * be used for tiled blits. To simplify the accounting, just 3492 * substract the unmappable part (fixed to 256MB on all known 3493 * gen3 devices) if the kernel advertises it. */ 3494 bufmgr_gem->gtt_size -= 256*1024*1024; 3495 } 3496 3497 VG_CLEAR(gp); 3498 gp.value = &tmp; 3499 3500 gp.param = I915_PARAM_HAS_EXECBUF2; 3501 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3502 if (!ret) 3503 exec2 = true; 3504 3505 gp.param = I915_PARAM_HAS_BSD; 3506 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3507 bufmgr_gem->has_bsd = ret == 0; 3508 3509 gp.param = I915_PARAM_HAS_BLT; 3510 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3511 bufmgr_gem->has_blt = ret == 0; 3512 3513 gp.param = I915_PARAM_HAS_RELAXED_FENCING; 3514 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3515 bufmgr_gem->has_relaxed_fencing = ret == 0; 3516 3517 if (has_userptr(bufmgr_gem)) 3518 bufmgr_gem->bufmgr.bo_alloc_userptr = 3519 drm_intel_gem_bo_alloc_userptr; 3520 3521 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT; 3522 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3523 bufmgr_gem->has_wait_timeout = ret == 0; 3524 3525 gp.param = I915_PARAM_HAS_LLC; 3526 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3527 if (ret != 0) { 3528 /* Kernel does not supports HAS_LLC query, fallback to GPU 3529 * generation detection and assume that we have LLC on GEN6/7 3530 */ 3531 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) | 3532 IS_GEN7(bufmgr_gem->pci_device)); 3533 } else 3534 bufmgr_gem->has_llc = *gp.value; 3535 3536 gp.param = I915_PARAM_HAS_VEBOX; 3537 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3538 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0); 3539 3540 if (bufmgr_gem->gen < 4) { 3541 gp.param = I915_PARAM_NUM_FENCES_AVAIL; 3542 gp.value = &bufmgr_gem->available_fences; 3543 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp); 3544 if (ret) { 3545 fprintf(stderr, "get fences failed: %d [%d]\n", ret, 3546 errno); 3547 fprintf(stderr, "param: %d, val: %d\n", gp.param, 3548 *gp.value); 3549 bufmgr_gem->available_fences = 0; 3550 } else { 3551 /* XXX The kernel reports the total number of fences, 3552 * including any that may be pinned. 3553 * 3554 * We presume that there will be at least one pinned 3555 * fence for the scanout buffer, but there may be more 3556 * than one scanout and the user may be manually 3557 * pinning buffers. Let's move to execbuffer2 and 3558 * thereby forget the insanity of using fences... 3559 */ 3560 bufmgr_gem->available_fences -= 2; 3561 if (bufmgr_gem->available_fences < 0) 3562 bufmgr_gem->available_fences = 0; 3563 } 3564 } 3565 3566 /* Let's go with one relocation per every 2 dwords (but round down a bit 3567 * since a power of two will mean an extra page allocation for the reloc 3568 * buffer). 3569 * 3570 * Every 4 was too few for the blender benchmark. 3571 */ 3572 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2; 3573 3574 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc; 3575 bufmgr_gem->bufmgr.bo_alloc_for_render = 3576 drm_intel_gem_bo_alloc_for_render; 3577 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled; 3578 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference; 3579 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference; 3580 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map; 3581 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap; 3582 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata; 3583 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata; 3584 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering; 3585 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc; 3586 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence; 3587 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin; 3588 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin; 3589 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling; 3590 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling; 3591 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink; 3592 /* Use the new one if available */ 3593 if (exec2) { 3594 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2; 3595 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2; 3596 } else 3597 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec; 3598 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy; 3599 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise; 3600 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref; 3601 bufmgr_gem->bufmgr.debug = 0; 3602 bufmgr_gem->bufmgr.check_aperture_space = 3603 drm_intel_gem_check_aperture_space; 3604 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse; 3605 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable; 3606 bufmgr_gem->bufmgr.get_pipe_from_crtc_id = 3607 drm_intel_gem_get_pipe_from_crtc_id; 3608 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references; 3609 3610 DRMINITLISTHEAD(&bufmgr_gem->named); 3611 init_cache_buckets(bufmgr_gem); 3612 3613 DRMINITLISTHEAD(&bufmgr_gem->vma_cache); 3614 bufmgr_gem->vma_max = -1; /* unlimited by default */ 3615 3616 DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list); 3617 3618exit: 3619 pthread_mutex_unlock(&bufmgr_list_mutex); 3620 3621 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL; 3622} 3623