radeon_cs_gem.c revision 69dda199
1/* 2 * Copyright © 2008 Jérôme Glisse 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 14 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 15 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 16 * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS 17 * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 20 * USE OR OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * The above copyright notice and this permission notice (including the 23 * next paragraph) shall be included in all copies or substantial portions 24 * of the Software. 25 */ 26/* 27 * Authors: 28 * Aapo Tahkola <aet@rasterburn.org> 29 * Nicolai Haehnle <prefect_@gmx.net> 30 * Jérôme Glisse <glisse@freedesktop.org> 31 */ 32#include <assert.h> 33#include <errno.h> 34#include <stdlib.h> 35#include <string.h> 36#include <pthread.h> 37#include <sys/mman.h> 38#include <sys/ioctl.h> 39#include "radeon_cs.h" 40#include "radeon_cs_int.h" 41#include "radeon_bo_int.h" 42#include "radeon_cs_gem.h" 43#include "radeon_bo_gem.h" 44#include "drm.h" 45#include "xf86drm.h" 46#include "xf86atomic.h" 47#include "radeon_drm.h" 48#include "bof.h" 49 50#define CS_BOF_DUMP 0 51 52struct radeon_cs_manager_gem { 53 struct radeon_cs_manager base; 54 uint32_t device_id; 55 unsigned nbof; 56}; 57 58#pragma pack(1) 59struct cs_reloc_gem { 60 uint32_t handle; 61 uint32_t read_domain; 62 uint32_t write_domain; 63 uint32_t flags; 64}; 65 66#pragma pack() 67#define RELOC_SIZE (sizeof(struct cs_reloc_gem) / sizeof(uint32_t)) 68 69struct cs_gem { 70 struct radeon_cs_int base; 71 struct drm_radeon_cs cs; 72 struct drm_radeon_cs_chunk chunks[2]; 73 unsigned nrelocs; 74 uint32_t *relocs; 75 struct radeon_bo_int **relocs_bo; 76}; 77 78static pthread_mutex_t id_mutex = PTHREAD_MUTEX_INITIALIZER; 79static uint32_t cs_id_source = 0; 80 81/** 82 * result is undefined if called with ~0 83 */ 84static uint32_t get_first_zero(const uint32_t n) 85{ 86 /* __builtin_ctz returns number of trailing zeros. */ 87 return 1 << __builtin_ctz(~n); 88} 89 90/** 91 * Returns a free id for cs. 92 * If there is no free id we return zero 93 **/ 94static uint32_t generate_id(void) 95{ 96 uint32_t r = 0; 97 pthread_mutex_lock( &id_mutex ); 98 /* check for free ids */ 99 if (cs_id_source != ~r) { 100 /* find first zero bit */ 101 r = get_first_zero(cs_id_source); 102 103 /* set id as reserved */ 104 cs_id_source |= r; 105 } 106 pthread_mutex_unlock( &id_mutex ); 107 return r; 108} 109 110/** 111 * Free the id for later reuse 112 **/ 113static void free_id(uint32_t id) 114{ 115 pthread_mutex_lock( &id_mutex ); 116 117 cs_id_source &= ~id; 118 119 pthread_mutex_unlock( &id_mutex ); 120} 121 122static struct radeon_cs_int *cs_gem_create(struct radeon_cs_manager *csm, 123 uint32_t ndw) 124{ 125 struct cs_gem *csg; 126 127 /* max cmd buffer size is 64Kb */ 128 if (ndw > (64 * 1024 / 4)) { 129 return NULL; 130 } 131 csg = (struct cs_gem*)calloc(1, sizeof(struct cs_gem)); 132 if (csg == NULL) { 133 return NULL; 134 } 135 csg->base.csm = csm; 136 csg->base.ndw = 64 * 1024 / 4; 137 csg->base.packets = (uint32_t*)calloc(1, 64 * 1024); 138 if (csg->base.packets == NULL) { 139 free(csg); 140 return NULL; 141 } 142 csg->base.relocs_total_size = 0; 143 csg->base.crelocs = 0; 144 csg->base.id = generate_id(); 145 csg->nrelocs = 4096 / (4 * 4) ; 146 csg->relocs_bo = (struct radeon_bo_int**)calloc(1, 147 csg->nrelocs*sizeof(void*)); 148 if (csg->relocs_bo == NULL) { 149 free(csg->base.packets); 150 free(csg); 151 return NULL; 152 } 153 csg->base.relocs = csg->relocs = (uint32_t*)calloc(1, 4096); 154 if (csg->relocs == NULL) { 155 free(csg->relocs_bo); 156 free(csg->base.packets); 157 free(csg); 158 return NULL; 159 } 160 csg->chunks[0].chunk_id = RADEON_CHUNK_ID_IB; 161 csg->chunks[0].length_dw = 0; 162 csg->chunks[0].chunk_data = (uint64_t)(uintptr_t)csg->base.packets; 163 csg->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS; 164 csg->chunks[1].length_dw = 0; 165 csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs; 166 return (struct radeon_cs_int*)csg; 167} 168 169static int cs_gem_write_reloc(struct radeon_cs_int *cs, 170 struct radeon_bo *bo, 171 uint32_t read_domain, 172 uint32_t write_domain, 173 uint32_t flags) 174{ 175 struct radeon_bo_int *boi = (struct radeon_bo_int *)bo; 176 struct cs_gem *csg = (struct cs_gem*)cs; 177 struct cs_reloc_gem *reloc; 178 uint32_t idx; 179 unsigned i; 180 181 assert(boi->space_accounted); 182 183 /* check domains */ 184 if ((read_domain && write_domain) || (!read_domain && !write_domain)) { 185 /* in one CS a bo can only be in read or write domain but not 186 * in read & write domain at the same sime 187 */ 188 return -EINVAL; 189 } 190 if (read_domain == RADEON_GEM_DOMAIN_CPU) { 191 return -EINVAL; 192 } 193 if (write_domain == RADEON_GEM_DOMAIN_CPU) { 194 return -EINVAL; 195 } 196 /* use bit field hash function to determine 197 if this bo is for sure not in this cs.*/ 198 if ((atomic_read((atomic_t *)radeon_gem_get_reloc_in_cs(bo)) & cs->id)) { 199 /* check if bo is already referenced. 200 * Scanning from end to begin reduces cycles with mesa because 201 * it often relocates same shared dma bo again. */ 202 for(i = cs->crelocs; i != 0;) { 203 --i; 204 idx = i * RELOC_SIZE; 205 reloc = (struct cs_reloc_gem*)&csg->relocs[idx]; 206 if (reloc->handle == bo->handle) { 207 /* Check domains must be in read or write. As we check already 208 * checked that in argument one of the read or write domain was 209 * set we only need to check that if previous reloc as the read 210 * domain set then the read_domain should also be set for this 211 * new relocation. 212 */ 213 /* the DDX expects to read and write from same pixmap */ 214 if (write_domain && (reloc->read_domain & write_domain)) { 215 reloc->read_domain = 0; 216 reloc->write_domain = write_domain; 217 } else if (read_domain & reloc->write_domain) { 218 reloc->read_domain = 0; 219 } else { 220 if (write_domain != reloc->write_domain) 221 return -EINVAL; 222 if (read_domain != reloc->read_domain) 223 return -EINVAL; 224 } 225 226 reloc->read_domain |= read_domain; 227 reloc->write_domain |= write_domain; 228 /* update flags */ 229 reloc->flags |= (flags & reloc->flags); 230 /* write relocation packet */ 231 radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000); 232 radeon_cs_write_dword((struct radeon_cs *)cs, idx); 233 return 0; 234 } 235 } 236 } 237 /* new relocation */ 238 if (csg->base.crelocs >= csg->nrelocs) { 239 /* allocate more memory (TODO: should use a slab allocatore maybe) */ 240 uint32_t *tmp, size; 241 size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*)); 242 tmp = (uint32_t*)realloc(csg->relocs_bo, size); 243 if (tmp == NULL) { 244 return -ENOMEM; 245 } 246 csg->relocs_bo = (struct radeon_bo_int **)tmp; 247 size = ((csg->nrelocs + 1) * RELOC_SIZE * 4); 248 tmp = (uint32_t*)realloc(csg->relocs, size); 249 if (tmp == NULL) { 250 return -ENOMEM; 251 } 252 cs->relocs = csg->relocs = tmp; 253 csg->nrelocs += 1; 254 csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs; 255 } 256 csg->relocs_bo[csg->base.crelocs] = boi; 257 idx = (csg->base.crelocs++) * RELOC_SIZE; 258 reloc = (struct cs_reloc_gem*)&csg->relocs[idx]; 259 reloc->handle = bo->handle; 260 reloc->read_domain = read_domain; 261 reloc->write_domain = write_domain; 262 reloc->flags = flags; 263 csg->chunks[1].length_dw += RELOC_SIZE; 264 radeon_bo_ref(bo); 265 /* bo might be referenced from another context so have to use atomic opertions */ 266 atomic_add((atomic_t *)radeon_gem_get_reloc_in_cs(bo), cs->id); 267 cs->relocs_total_size += boi->size; 268 radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000); 269 radeon_cs_write_dword((struct radeon_cs *)cs, idx); 270 return 0; 271} 272 273static int cs_gem_begin(struct radeon_cs_int *cs, 274 uint32_t ndw, 275 const char *file, 276 const char *func, 277 int line) 278{ 279 280 if (cs->section_ndw) { 281 fprintf(stderr, "CS already in a section(%s,%s,%d)\n", 282 cs->section_file, cs->section_func, cs->section_line); 283 fprintf(stderr, "CS can't start section(%s,%s,%d)\n", 284 file, func, line); 285 return -EPIPE; 286 } 287 cs->section_ndw = ndw; 288 cs->section_cdw = 0; 289 cs->section_file = file; 290 cs->section_func = func; 291 cs->section_line = line; 292 293 if (cs->cdw + ndw > cs->ndw) { 294 uint32_t tmp, *ptr; 295 296 /* round up the required size to a multiple of 1024 */ 297 tmp = (cs->cdw + ndw + 0x3FF) & (~0x3FF); 298 ptr = (uint32_t*)realloc(cs->packets, 4 * tmp); 299 if (ptr == NULL) { 300 return -ENOMEM; 301 } 302 cs->packets = ptr; 303 cs->ndw = tmp; 304 } 305 return 0; 306} 307 308static int cs_gem_end(struct radeon_cs_int *cs, 309 const char *file, 310 const char *func, 311 int line) 312 313{ 314 if (!cs->section_ndw) { 315 fprintf(stderr, "CS no section to end at (%s,%s,%d)\n", 316 file, func, line); 317 return -EPIPE; 318 } 319 if (cs->section_ndw != cs->section_cdw) { 320 fprintf(stderr, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n", 321 cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw); 322 fprintf(stderr, "CS section end at (%s,%s,%d)\n", 323 file, func, line); 324 325 /* We must reset the section even when there is error. */ 326 cs->section_ndw = 0; 327 return -EPIPE; 328 } 329 cs->section_ndw = 0; 330 return 0; 331} 332 333static void cs_gem_dump_bof(struct radeon_cs_int *cs) 334{ 335 struct cs_gem *csg = (struct cs_gem*)cs; 336 struct radeon_cs_manager_gem *csm; 337 bof_t *bcs, *blob, *array, *bo, *size, *handle, *device_id, *root; 338 char tmp[256]; 339 unsigned i; 340 341 csm = (struct radeon_cs_manager_gem *)cs->csm; 342 root = device_id = bcs = blob = array = bo = size = handle = NULL; 343 root = bof_object(); 344 if (root == NULL) 345 goto out_err; 346 device_id = bof_int32(csm->device_id); 347 if (device_id == NULL) 348 return; 349 if (bof_object_set(root, "device_id", device_id)) 350 goto out_err; 351 bof_decref(device_id); 352 device_id = NULL; 353 /* dump relocs */ 354 blob = bof_blob(csg->nrelocs * 16, csg->relocs); 355 if (blob == NULL) 356 goto out_err; 357 if (bof_object_set(root, "reloc", blob)) 358 goto out_err; 359 bof_decref(blob); 360 blob = NULL; 361 /* dump cs */ 362 blob = bof_blob(cs->cdw * 4, cs->packets); 363 if (blob == NULL) 364 goto out_err; 365 if (bof_object_set(root, "pm4", blob)) 366 goto out_err; 367 bof_decref(blob); 368 blob = NULL; 369 /* dump bo */ 370 array = bof_array(); 371 if (array == NULL) 372 goto out_err; 373 for (i = 0; i < csg->base.crelocs; i++) { 374 bo = bof_object(); 375 if (bo == NULL) 376 goto out_err; 377 size = bof_int32(csg->relocs_bo[i]->size); 378 if (size == NULL) 379 goto out_err; 380 if (bof_object_set(bo, "size", size)) 381 goto out_err; 382 bof_decref(size); 383 size = NULL; 384 handle = bof_int32(csg->relocs_bo[i]->handle); 385 if (handle == NULL) 386 goto out_err; 387 if (bof_object_set(bo, "handle", handle)) 388 goto out_err; 389 bof_decref(handle); 390 handle = NULL; 391 radeon_bo_map((struct radeon_bo*)csg->relocs_bo[i], 0); 392 blob = bof_blob(csg->relocs_bo[i]->size, csg->relocs_bo[i]->ptr); 393 radeon_bo_unmap((struct radeon_bo*)csg->relocs_bo[i]); 394 if (blob == NULL) 395 goto out_err; 396 if (bof_object_set(bo, "data", blob)) 397 goto out_err; 398 bof_decref(blob); 399 blob = NULL; 400 if (bof_array_append(array, bo)) 401 goto out_err; 402 bof_decref(bo); 403 bo = NULL; 404 } 405 if (bof_object_set(root, "bo", array)) 406 goto out_err; 407 sprintf(tmp, "d-0x%04X-%08d.bof", csm->device_id, csm->nbof++); 408 bof_dump_file(root, tmp); 409out_err: 410 bof_decref(blob); 411 bof_decref(array); 412 bof_decref(bo); 413 bof_decref(size); 414 bof_decref(handle); 415 bof_decref(device_id); 416 bof_decref(root); 417} 418 419static int cs_gem_emit(struct radeon_cs_int *cs) 420{ 421 struct cs_gem *csg = (struct cs_gem*)cs; 422 uint64_t chunk_array[2]; 423 unsigned i; 424 int r; 425 426#if CS_BOF_DUMP 427 cs_gem_dump_bof(cs); 428#endif 429 csg->chunks[0].length_dw = cs->cdw; 430 431 chunk_array[0] = (uint64_t)(uintptr_t)&csg->chunks[0]; 432 chunk_array[1] = (uint64_t)(uintptr_t)&csg->chunks[1]; 433 434 csg->cs.num_chunks = 2; 435 csg->cs.chunks = (uint64_t)(uintptr_t)chunk_array; 436 437 r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS, 438 &csg->cs, sizeof(struct drm_radeon_cs)); 439 for (i = 0; i < csg->base.crelocs; i++) { 440 csg->relocs_bo[i]->space_accounted = 0; 441 /* bo might be referenced from another context so have to use atomic opertions */ 442 atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id); 443 radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]); 444 csg->relocs_bo[i] = NULL; 445 } 446 447 cs->csm->read_used = 0; 448 cs->csm->vram_write_used = 0; 449 cs->csm->gart_write_used = 0; 450 return r; 451} 452 453static int cs_gem_destroy(struct radeon_cs_int *cs) 454{ 455 struct cs_gem *csg = (struct cs_gem*)cs; 456 457 free_id(cs->id); 458 free(csg->relocs_bo); 459 free(cs->relocs); 460 free(cs->packets); 461 free(cs); 462 return 0; 463} 464 465static int cs_gem_erase(struct radeon_cs_int *cs) 466{ 467 struct cs_gem *csg = (struct cs_gem*)cs; 468 unsigned i; 469 470 if (csg->relocs_bo) { 471 for (i = 0; i < csg->base.crelocs; i++) { 472 if (csg->relocs_bo[i]) { 473 /* bo might be referenced from another context so have to use atomic opertions */ 474 atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id); 475 radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]); 476 csg->relocs_bo[i] = NULL; 477 } 478 } 479 } 480 cs->relocs_total_size = 0; 481 cs->cdw = 0; 482 cs->section_ndw = 0; 483 cs->crelocs = 0; 484 csg->chunks[0].length_dw = 0; 485 csg->chunks[1].length_dw = 0; 486 return 0; 487} 488 489static int cs_gem_need_flush(struct radeon_cs_int *cs) 490{ 491 return 0; //(cs->relocs_total_size > (32*1024*1024)); 492} 493 494static void cs_gem_print(struct radeon_cs_int *cs, FILE *file) 495{ 496 struct radeon_cs_manager_gem *csm; 497 unsigned int i; 498 499 csm = (struct radeon_cs_manager_gem *)cs->csm; 500 fprintf(file, "VENDORID:DEVICEID 0x%04X:0x%04X\n", 0x1002, csm->device_id); 501 for (i = 0; i < cs->cdw; i++) { 502 fprintf(file, "0x%08X\n", cs->packets[i]); 503 } 504} 505 506static struct radeon_cs_funcs radeon_cs_gem_funcs = { 507 cs_gem_create, 508 cs_gem_write_reloc, 509 cs_gem_begin, 510 cs_gem_end, 511 cs_gem_emit, 512 cs_gem_destroy, 513 cs_gem_erase, 514 cs_gem_need_flush, 515 cs_gem_print, 516}; 517 518static int radeon_get_device_id(int fd, uint32_t *device_id) 519{ 520 struct drm_radeon_info info = {}; 521 int r; 522 523 *device_id = 0; 524 info.request = RADEON_INFO_DEVICE_ID; 525 info.value = (uintptr_t)device_id; 526 r = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info, 527 sizeof(struct drm_radeon_info)); 528 return r; 529} 530 531struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd) 532{ 533 struct radeon_cs_manager_gem *csm; 534 535 csm = calloc(1, sizeof(struct radeon_cs_manager_gem)); 536 if (csm == NULL) { 537 return NULL; 538 } 539 csm->base.funcs = &radeon_cs_gem_funcs; 540 csm->base.fd = fd; 541 radeon_get_device_id(fd, &csm->device_id); 542 return &csm->base; 543} 544 545void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm) 546{ 547 free(csm); 548} 549