nouveau.c revision 4babd585
1/* 2 * Copyright 2012 Red Hat Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * Authors: Ben Skeggs 23 */ 24 25#include <stdio.h> 26#include <stdlib.h> 27#include <stdint.h> 28#include <string.h> 29#include <strings.h> 30#include <stdbool.h> 31#include <assert.h> 32#include <errno.h> 33#include <fcntl.h> 34 35#include <xf86drm.h> 36#include <xf86atomic.h> 37#include "libdrm_macros.h" 38#include "libdrm_lists.h" 39#include "nouveau_drm.h" 40 41#include "nouveau.h" 42#include "private.h" 43 44#include "nvif/class.h" 45#include "nvif/cl0080.h" 46#include "nvif/ioctl.h" 47#include "nvif/unpack.h" 48 49drm_private FILE *nouveau_out = NULL; 50drm_private uint32_t nouveau_debug = 0; 51 52static void 53debug_init(void) 54{ 55 static bool once = false; 56 char *debug, *out; 57 58 if (once) 59 return; 60 once = true; 61 62 debug = getenv("NOUVEAU_LIBDRM_DEBUG"); 63 if (debug) { 64 int n = strtol(debug, NULL, 0); 65 if (n >= 0) 66 nouveau_debug = n; 67 68 } 69 70 nouveau_out = stderr; 71 out = getenv("NOUVEAU_LIBDRM_OUT"); 72 if (out) { 73 FILE *fout = fopen(out, "w"); 74 if (fout) 75 nouveau_out = fout; 76 } 77} 78 79static int 80nouveau_object_ioctl(struct nouveau_object *obj, void *data, uint32_t size) 81{ 82 struct nouveau_drm *drm = nouveau_drm(obj); 83 union { 84 struct nvif_ioctl_v0 v0; 85 } *args = data; 86 uint32_t argc = size; 87 int ret = -ENOSYS; 88 89 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { 90 if (!obj->length) { 91 if (obj != &drm->client) 92 args->v0.object = (unsigned long)(void *)obj; 93 else 94 args->v0.object = 0; 95 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY; 96 args->v0.route = 0x00; 97 } else { 98 args->v0.route = 0xff; 99 args->v0.token = obj->handle; 100 } 101 } else 102 return ret; 103 104 return drmCommandWriteRead(drm->fd, DRM_NOUVEAU_NVIF, args, argc); 105} 106 107drm_public int 108nouveau_object_mthd(struct nouveau_object *obj, 109 uint32_t mthd, void *data, uint32_t size) 110{ 111 struct nouveau_drm *drm = nouveau_drm(obj); 112 struct { 113 struct nvif_ioctl_v0 ioctl; 114 struct nvif_ioctl_mthd_v0 mthd; 115 } *args; 116 uint32_t argc = sizeof(*args) + size; 117 uint8_t stack[128]; 118 int ret; 119 120 if (!drm->nvif) 121 return -ENOSYS; 122 123 if (argc > sizeof(stack)) { 124 if (!(args = malloc(argc))) 125 return -ENOMEM; 126 } else { 127 args = (void *)stack; 128 } 129 args->ioctl.version = 0; 130 args->ioctl.type = NVIF_IOCTL_V0_MTHD; 131 args->mthd.version = 0; 132 args->mthd.method = mthd; 133 134 memcpy(args->mthd.data, data, size); 135 ret = nouveau_object_ioctl(obj, args, argc); 136 memcpy(data, args->mthd.data, size); 137 if (args != (void *)stack) 138 free(args); 139 return ret; 140} 141 142drm_public void 143nouveau_object_sclass_put(struct nouveau_sclass **psclass) 144{ 145 free(*psclass); 146 *psclass = NULL; 147} 148 149drm_public int 150nouveau_object_sclass_get(struct nouveau_object *obj, 151 struct nouveau_sclass **psclass) 152{ 153 struct nouveau_drm *drm = nouveau_drm(obj); 154 struct { 155 struct nvif_ioctl_v0 ioctl; 156 struct nvif_ioctl_sclass_v0 sclass; 157 } *args = NULL; 158 struct nouveau_sclass *sclass; 159 int ret, cnt = 0, i; 160 uint32_t size; 161 162 if (!drm->nvif) 163 return abi16_sclass(obj, psclass); 164 165 while (1) { 166 size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]); 167 if (!(args = malloc(size))) 168 return -ENOMEM; 169 args->ioctl.version = 0; 170 args->ioctl.type = NVIF_IOCTL_V0_SCLASS; 171 args->sclass.version = 0; 172 args->sclass.count = cnt; 173 174 ret = nouveau_object_ioctl(obj, args, size); 175 if (ret == 0 && args->sclass.count <= cnt) 176 break; 177 cnt = args->sclass.count; 178 free(args); 179 if (ret != 0) 180 return ret; 181 } 182 183 if ((sclass = calloc(args->sclass.count, sizeof(*sclass)))) { 184 for (i = 0; i < args->sclass.count; i++) { 185 sclass[i].oclass = args->sclass.oclass[i].oclass; 186 sclass[i].minver = args->sclass.oclass[i].minver; 187 sclass[i].maxver = args->sclass.oclass[i].maxver; 188 } 189 *psclass = sclass; 190 ret = args->sclass.count; 191 } else { 192 ret = -ENOMEM; 193 } 194 195 free(args); 196 return ret; 197} 198 199drm_public int 200nouveau_object_mclass(struct nouveau_object *obj, 201 const struct nouveau_mclass *mclass) 202{ 203 struct nouveau_sclass *sclass; 204 int ret = -ENODEV; 205 int cnt, i, j; 206 207 cnt = nouveau_object_sclass_get(obj, &sclass); 208 if (cnt < 0) 209 return cnt; 210 211 for (i = 0; ret < 0 && mclass[i].oclass; i++) { 212 for (j = 0; j < cnt; j++) { 213 if (mclass[i].oclass == sclass[j].oclass && 214 mclass[i].version >= sclass[j].minver && 215 mclass[i].version <= sclass[j].maxver) { 216 ret = i; 217 break; 218 } 219 } 220 } 221 222 nouveau_object_sclass_put(&sclass); 223 return ret; 224} 225 226static void 227nouveau_object_fini(struct nouveau_object *obj) 228{ 229 struct { 230 struct nvif_ioctl_v0 ioctl; 231 struct nvif_ioctl_del del; 232 } args = { 233 .ioctl.type = NVIF_IOCTL_V0_DEL, 234 }; 235 236 if (obj->data) { 237 abi16_delete(obj); 238 free(obj->data); 239 obj->data = NULL; 240 return; 241 } 242 243 nouveau_object_ioctl(obj, &args, sizeof(args)); 244} 245 246static int 247nouveau_object_init(struct nouveau_object *parent, uint32_t handle, 248 int32_t oclass, void *data, uint32_t size, 249 struct nouveau_object *obj) 250{ 251 struct nouveau_drm *drm = nouveau_drm(parent); 252 struct { 253 struct nvif_ioctl_v0 ioctl; 254 struct nvif_ioctl_new_v0 new; 255 } *args; 256 uint32_t argc = sizeof(*args) + size; 257 int (*func)(struct nouveau_object *); 258 int ret = -ENOSYS; 259 260 obj->parent = parent; 261 obj->handle = handle; 262 obj->oclass = oclass; 263 obj->length = 0; 264 obj->data = NULL; 265 266 if (!abi16_object(obj, &func) && drm->nvif) { 267 if (!(args = malloc(argc))) 268 return -ENOMEM; 269 args->ioctl.version = 0; 270 args->ioctl.type = NVIF_IOCTL_V0_NEW; 271 args->new.version = 0; 272 args->new.route = NVIF_IOCTL_V0_ROUTE_NVIF; 273 args->new.token = (unsigned long)(void *)obj; 274 args->new.object = (unsigned long)(void *)obj; 275 args->new.handle = handle; 276 args->new.oclass = oclass; 277 memcpy(args->new.data, data, size); 278 ret = nouveau_object_ioctl(parent, args, argc); 279 memcpy(data, args->new.data, size); 280 free(args); 281 } else 282 if (func) { 283 obj->length = size ? size : sizeof(struct nouveau_object *); 284 if (!(obj->data = malloc(obj->length))) 285 return -ENOMEM; 286 if (data) 287 memcpy(obj->data, data, obj->length); 288 *(struct nouveau_object **)obj->data = obj; 289 290 ret = func(obj); 291 } 292 293 if (ret) { 294 nouveau_object_fini(obj); 295 return ret; 296 } 297 298 return 0; 299} 300 301drm_public int 302nouveau_object_new(struct nouveau_object *parent, uint64_t handle, 303 uint32_t oclass, void *data, uint32_t length, 304 struct nouveau_object **pobj) 305{ 306 struct nouveau_object *obj; 307 int ret; 308 309 if (!(obj = malloc(sizeof(*obj)))) 310 return -ENOMEM; 311 312 ret = nouveau_object_init(parent, handle, oclass, data, length, obj); 313 if (ret) { 314 free(obj); 315 return ret; 316 } 317 318 *pobj = obj; 319 return 0; 320} 321 322drm_public void 323nouveau_object_del(struct nouveau_object **pobj) 324{ 325 struct nouveau_object *obj = *pobj; 326 if (obj) { 327 nouveau_object_fini(obj); 328 free(obj); 329 *pobj = NULL; 330 } 331} 332 333drm_public void 334nouveau_drm_del(struct nouveau_drm **pdrm) 335{ 336 free(*pdrm); 337 *pdrm = NULL; 338} 339 340drm_public int 341nouveau_drm_new(int fd, struct nouveau_drm **pdrm) 342{ 343 struct nouveau_drm *drm; 344 drmVersionPtr ver; 345 346 debug_init(); 347 348 if (!(drm = calloc(1, sizeof(*drm)))) 349 return -ENOMEM; 350 drm->fd = fd; 351 352 if (!(ver = drmGetVersion(fd))) { 353 nouveau_drm_del(&drm); 354 return -EINVAL; 355 } 356 *pdrm = drm; 357 358 drm->version = (ver->version_major << 24) | 359 (ver->version_minor << 8) | 360 ver->version_patchlevel; 361 drm->nvif = (drm->version >= 0x01000301); 362 drmFreeVersion(ver); 363 return 0; 364} 365 366/* this is the old libdrm's version of nouveau_device_wrap(), the symbol 367 * is kept here to prevent AIGLX from crashing if the DDX is linked against 368 * the new libdrm, but the DRI driver against the old 369 */ 370drm_public int 371nouveau_device_open_existing(struct nouveau_device **pdev, int close, int fd, 372 drm_context_t ctx) 373{ 374 return -EACCES; 375} 376 377drm_public int 378nouveau_device_new(struct nouveau_object *parent, int32_t oclass, 379 void *data, uint32_t size, struct nouveau_device **pdev) 380{ 381 struct nv_device_info_v0 info = {}; 382 union { 383 struct nv_device_v0 v0; 384 } *args = data; 385 uint32_t argc = size; 386 struct nouveau_drm *drm = nouveau_drm(parent); 387 struct nouveau_device_priv *nvdev; 388 struct nouveau_device *dev; 389 uint64_t v; 390 char *tmp; 391 int ret = -ENOSYS; 392 393 if (oclass != NV_DEVICE || 394 nvif_unpack(ret, &data, &size, args->v0, 0, 0, false)) 395 return ret; 396 397 if (!(nvdev = calloc(1, sizeof(*nvdev)))) 398 return -ENOMEM; 399 dev = *pdev = &nvdev->base; 400 dev->fd = -1; 401 402 if (drm->nvif) { 403 ret = nouveau_object_init(parent, 0, oclass, args, argc, 404 &dev->object); 405 if (ret) 406 goto done; 407 408 info.version = 0; 409 410 ret = nouveau_object_mthd(&dev->object, NV_DEVICE_V0_INFO, 411 &info, sizeof(info)); 412 if (ret) 413 goto done; 414 415 nvdev->base.chipset = info.chipset; 416 nvdev->have_bo_usage = true; 417 } else 418 if (args->v0.device == ~0ULL) { 419 nvdev->base.object.parent = &drm->client; 420 nvdev->base.object.handle = ~0ULL; 421 nvdev->base.object.oclass = NOUVEAU_DEVICE_CLASS; 422 nvdev->base.object.length = ~0; 423 424 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_CHIPSET_ID, &v); 425 if (ret) 426 goto done; 427 nvdev->base.chipset = v; 428 429 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_HAS_BO_USAGE, &v); 430 if (ret == 0) 431 nvdev->have_bo_usage = (v != 0); 432 } else 433 return -ENOSYS; 434 435 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_FB_SIZE, &v); 436 if (ret) 437 goto done; 438 nvdev->base.vram_size = v; 439 440 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_AGP_SIZE, &v); 441 if (ret) 442 goto done; 443 nvdev->base.gart_size = v; 444 445 tmp = getenv("NOUVEAU_LIBDRM_VRAM_LIMIT_PERCENT"); 446 if (tmp) 447 nvdev->vram_limit_percent = atoi(tmp); 448 else 449 nvdev->vram_limit_percent = 80; 450 451 nvdev->base.vram_limit = 452 (nvdev->base.vram_size * nvdev->vram_limit_percent) / 100; 453 454 tmp = getenv("NOUVEAU_LIBDRM_GART_LIMIT_PERCENT"); 455 if (tmp) 456 nvdev->gart_limit_percent = atoi(tmp); 457 else 458 nvdev->gart_limit_percent = 80; 459 460 nvdev->base.gart_limit = 461 (nvdev->base.gart_size * nvdev->gart_limit_percent) / 100; 462 463 ret = pthread_mutex_init(&nvdev->lock, NULL); 464 DRMINITLISTHEAD(&nvdev->bo_list); 465done: 466 if (ret) 467 nouveau_device_del(pdev); 468 return ret; 469} 470 471drm_public int 472nouveau_device_wrap(int fd, int close, struct nouveau_device **pdev) 473{ 474 struct nouveau_drm *drm; 475 struct nouveau_device_priv *nvdev; 476 int ret; 477 478 ret = nouveau_drm_new(fd, &drm); 479 if (ret) 480 return ret; 481 drm->nvif = false; 482 483 ret = nouveau_device_new(&drm->client, NV_DEVICE, 484 &(struct nv_device_v0) { 485 .device = ~0ULL, 486 }, sizeof(struct nv_device_v0), pdev); 487 if (ret) { 488 nouveau_drm_del(&drm); 489 return ret; 490 } 491 492 nvdev = nouveau_device(*pdev); 493 nvdev->base.fd = drm->fd; 494 nvdev->base.drm_version = drm->version; 495 nvdev->close = close; 496 return 0; 497} 498 499drm_public int 500nouveau_device_open(const char *busid, struct nouveau_device **pdev) 501{ 502 int ret = -ENODEV, fd = drmOpen("nouveau", busid); 503 if (fd >= 0) { 504 ret = nouveau_device_wrap(fd, 1, pdev); 505 if (ret) 506 drmClose(fd); 507 } 508 return ret; 509} 510 511drm_public void 512nouveau_device_del(struct nouveau_device **pdev) 513{ 514 struct nouveau_device_priv *nvdev = nouveau_device(*pdev); 515 if (nvdev) { 516 free(nvdev->client); 517 pthread_mutex_destroy(&nvdev->lock); 518 if (nvdev->base.fd >= 0) { 519 struct nouveau_drm *drm = 520 nouveau_drm(&nvdev->base.object); 521 nouveau_drm_del(&drm); 522 if (nvdev->close) 523 drmClose(nvdev->base.fd); 524 } 525 free(nvdev); 526 *pdev = NULL; 527 } 528} 529 530drm_public int 531nouveau_getparam(struct nouveau_device *dev, uint64_t param, uint64_t *value) 532{ 533 struct nouveau_drm *drm = nouveau_drm(&dev->object); 534 struct drm_nouveau_getparam r = { .param = param }; 535 int fd = drm->fd, ret = 536 drmCommandWriteRead(fd, DRM_NOUVEAU_GETPARAM, &r, sizeof(r)); 537 *value = r.value; 538 return ret; 539} 540 541drm_public int 542nouveau_setparam(struct nouveau_device *dev, uint64_t param, uint64_t value) 543{ 544 struct nouveau_drm *drm = nouveau_drm(&dev->object); 545 struct drm_nouveau_setparam r = { .param = param, .value = value }; 546 return drmCommandWrite(drm->fd, DRM_NOUVEAU_SETPARAM, &r, sizeof(r)); 547} 548 549drm_public int 550nouveau_client_new(struct nouveau_device *dev, struct nouveau_client **pclient) 551{ 552 struct nouveau_device_priv *nvdev = nouveau_device(dev); 553 struct nouveau_client_priv *pcli; 554 int id = 0, i, ret = -ENOMEM; 555 uint32_t *clients; 556 557 pthread_mutex_lock(&nvdev->lock); 558 559 for (i = 0; i < nvdev->nr_client; i++) { 560 id = ffs(nvdev->client[i]) - 1; 561 if (id >= 0) 562 goto out; 563 } 564 565 clients = realloc(nvdev->client, sizeof(uint32_t) * (i + 1)); 566 if (!clients) 567 goto unlock; 568 nvdev->client = clients; 569 nvdev->client[i] = 0; 570 nvdev->nr_client++; 571 572out: 573 pcli = calloc(1, sizeof(*pcli)); 574 if (pcli) { 575 nvdev->client[i] |= (1 << id); 576 pcli->base.device = dev; 577 pcli->base.id = (i * 32) + id; 578 ret = 0; 579 } 580 581 *pclient = &pcli->base; 582 583unlock: 584 pthread_mutex_unlock(&nvdev->lock); 585 return ret; 586} 587 588drm_public void 589nouveau_client_del(struct nouveau_client **pclient) 590{ 591 struct nouveau_client_priv *pcli = nouveau_client(*pclient); 592 struct nouveau_device_priv *nvdev; 593 if (pcli) { 594 int id = pcli->base.id; 595 nvdev = nouveau_device(pcli->base.device); 596 pthread_mutex_lock(&nvdev->lock); 597 nvdev->client[id / 32] &= ~(1 << (id % 32)); 598 pthread_mutex_unlock(&nvdev->lock); 599 free(pcli->kref); 600 free(pcli); 601 } 602} 603 604static void 605nouveau_bo_del(struct nouveau_bo *bo) 606{ 607 struct nouveau_drm *drm = nouveau_drm(&bo->device->object); 608 struct nouveau_device_priv *nvdev = nouveau_device(bo->device); 609 struct nouveau_bo_priv *nvbo = nouveau_bo(bo); 610 struct drm_gem_close req = { .handle = bo->handle }; 611 612 if (nvbo->head.next) { 613 pthread_mutex_lock(&nvdev->lock); 614 if (atomic_read(&nvbo->refcnt) == 0) { 615 DRMLISTDEL(&nvbo->head); 616 /* 617 * This bo has to be closed with the lock held because 618 * gem handles are not refcounted. If a shared bo is 619 * closed and re-opened in another thread a race 620 * against DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle 621 * might cause the bo to be closed accidentally while 622 * re-importing. 623 */ 624 drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req); 625 } 626 pthread_mutex_unlock(&nvdev->lock); 627 } else { 628 drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req); 629 } 630 if (bo->map) 631 drm_munmap(bo->map, bo->size); 632 free(nvbo); 633} 634 635drm_public int 636nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align, 637 uint64_t size, union nouveau_bo_config *config, 638 struct nouveau_bo **pbo) 639{ 640 struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo)); 641 struct nouveau_bo *bo = &nvbo->base; 642 int ret; 643 644 if (!nvbo) 645 return -ENOMEM; 646 atomic_set(&nvbo->refcnt, 1); 647 bo->device = dev; 648 bo->flags = flags; 649 bo->size = size; 650 651 ret = abi16_bo_init(bo, align, config); 652 if (ret) { 653 free(nvbo); 654 return ret; 655 } 656 657 *pbo = bo; 658 return 0; 659} 660 661static int 662nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle, 663 struct nouveau_bo **pbo, int name) 664{ 665 struct nouveau_drm *drm = nouveau_drm(&dev->object); 666 struct nouveau_device_priv *nvdev = nouveau_device(dev); 667 struct drm_nouveau_gem_info req = { .handle = handle }; 668 struct nouveau_bo_priv *nvbo; 669 int ret; 670 671 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) { 672 if (nvbo->base.handle == handle) { 673 if (atomic_inc_return(&nvbo->refcnt) == 1) { 674 /* 675 * Uh oh, this bo is dead and someone else 676 * will free it, but because refcnt is 677 * now non-zero fortunately they won't 678 * call the ioctl to close the bo. 679 * 680 * Remove this bo from the list so other 681 * calls to nouveau_bo_wrap_locked will 682 * see our replacement nvbo. 683 */ 684 DRMLISTDEL(&nvbo->head); 685 if (!name) 686 name = nvbo->name; 687 break; 688 } 689 690 *pbo = &nvbo->base; 691 return 0; 692 } 693 } 694 695 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_INFO, 696 &req, sizeof(req)); 697 if (ret) 698 return ret; 699 700 nvbo = calloc(1, sizeof(*nvbo)); 701 if (nvbo) { 702 atomic_set(&nvbo->refcnt, 1); 703 nvbo->base.device = dev; 704 abi16_bo_info(&nvbo->base, &req); 705 nvbo->name = name; 706 DRMLISTADD(&nvbo->head, &nvdev->bo_list); 707 *pbo = &nvbo->base; 708 return 0; 709 } 710 711 return -ENOMEM; 712} 713 714static void 715nouveau_bo_make_global(struct nouveau_bo_priv *nvbo) 716{ 717 if (!nvbo->head.next) { 718 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device); 719 pthread_mutex_lock(&nvdev->lock); 720 if (!nvbo->head.next) 721 DRMLISTADD(&nvbo->head, &nvdev->bo_list); 722 pthread_mutex_unlock(&nvdev->lock); 723 } 724} 725 726drm_public int 727nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle, 728 struct nouveau_bo **pbo) 729{ 730 struct nouveau_device_priv *nvdev = nouveau_device(dev); 731 int ret; 732 pthread_mutex_lock(&nvdev->lock); 733 ret = nouveau_bo_wrap_locked(dev, handle, pbo, 0); 734 pthread_mutex_unlock(&nvdev->lock); 735 return ret; 736} 737 738drm_public int 739nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name, 740 struct nouveau_bo **pbo) 741{ 742 struct nouveau_drm *drm = nouveau_drm(&dev->object); 743 struct nouveau_device_priv *nvdev = nouveau_device(dev); 744 struct nouveau_bo_priv *nvbo; 745 struct drm_gem_open req = { .name = name }; 746 int ret; 747 748 pthread_mutex_lock(&nvdev->lock); 749 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) { 750 if (nvbo->name == name) { 751 ret = nouveau_bo_wrap_locked(dev, nvbo->base.handle, 752 pbo, name); 753 pthread_mutex_unlock(&nvdev->lock); 754 return ret; 755 } 756 } 757 758 ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_OPEN, &req); 759 if (ret == 0) { 760 ret = nouveau_bo_wrap_locked(dev, req.handle, pbo, name); 761 } 762 763 pthread_mutex_unlock(&nvdev->lock); 764 return ret; 765} 766 767drm_public int 768nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name) 769{ 770 struct drm_gem_flink req = { .handle = bo->handle }; 771 struct nouveau_drm *drm = nouveau_drm(&bo->device->object); 772 struct nouveau_bo_priv *nvbo = nouveau_bo(bo); 773 774 *name = nvbo->name; 775 if (!*name) { 776 int ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_FLINK, &req); 777 778 if (ret) { 779 *name = 0; 780 return ret; 781 } 782 nvbo->name = *name = req.name; 783 784 nouveau_bo_make_global(nvbo); 785 } 786 return 0; 787} 788 789drm_public void 790nouveau_bo_ref(struct nouveau_bo *bo, struct nouveau_bo **pref) 791{ 792 struct nouveau_bo *ref = *pref; 793 if (bo) { 794 atomic_inc(&nouveau_bo(bo)->refcnt); 795 } 796 if (ref) { 797 if (atomic_dec_and_test(&nouveau_bo(ref)->refcnt)) 798 nouveau_bo_del(ref); 799 } 800 *pref = bo; 801} 802 803drm_public int 804nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd, 805 struct nouveau_bo **bo) 806{ 807 struct nouveau_drm *drm = nouveau_drm(&dev->object); 808 struct nouveau_device_priv *nvdev = nouveau_device(dev); 809 int ret; 810 unsigned int handle; 811 812 nouveau_bo_ref(NULL, bo); 813 814 pthread_mutex_lock(&nvdev->lock); 815 ret = drmPrimeFDToHandle(drm->fd, prime_fd, &handle); 816 if (ret == 0) { 817 ret = nouveau_bo_wrap_locked(dev, handle, bo, 0); 818 } 819 pthread_mutex_unlock(&nvdev->lock); 820 return ret; 821} 822 823drm_public int 824nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd) 825{ 826 struct nouveau_drm *drm = nouveau_drm(&bo->device->object); 827 struct nouveau_bo_priv *nvbo = nouveau_bo(bo); 828 int ret; 829 830 ret = drmPrimeHandleToFD(drm->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd); 831 if (ret) 832 return ret; 833 834 nouveau_bo_make_global(nvbo); 835 return 0; 836} 837 838drm_public int 839nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access, 840 struct nouveau_client *client) 841{ 842 struct nouveau_drm *drm = nouveau_drm(&bo->device->object); 843 struct nouveau_bo_priv *nvbo = nouveau_bo(bo); 844 struct drm_nouveau_gem_cpu_prep req; 845 struct nouveau_pushbuf *push; 846 int ret = 0; 847 848 if (!(access & NOUVEAU_BO_RDWR)) 849 return 0; 850 851 push = cli_push_get(client, bo); 852 if (push && push->channel) 853 nouveau_pushbuf_kick(push, push->channel); 854 855 if (!nvbo->head.next && !(nvbo->access & NOUVEAU_BO_WR) && 856 !(access & NOUVEAU_BO_WR)) 857 return 0; 858 859 req.handle = bo->handle; 860 req.flags = 0; 861 if (access & NOUVEAU_BO_WR) 862 req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE; 863 if (access & NOUVEAU_BO_NOBLOCK) 864 req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT; 865 866 ret = drmCommandWrite(drm->fd, DRM_NOUVEAU_GEM_CPU_PREP, 867 &req, sizeof(req)); 868 if (ret == 0) 869 nvbo->access = 0; 870 return ret; 871} 872 873drm_public int 874nouveau_bo_map(struct nouveau_bo *bo, uint32_t access, 875 struct nouveau_client *client) 876{ 877 struct nouveau_drm *drm = nouveau_drm(&bo->device->object); 878 struct nouveau_bo_priv *nvbo = nouveau_bo(bo); 879 if (bo->map == NULL) { 880 bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE, 881 MAP_SHARED, drm->fd, nvbo->map_handle); 882 if (bo->map == MAP_FAILED) { 883 bo->map = NULL; 884 return -errno; 885 } 886 } 887 return nouveau_bo_wait(bo, access, client); 888} 889