1 /* $NetBSD: nouveau_bo.c,v 1.20 2025/03/23 17:04:09 joe Exp $ */ 2 3 /* 4 * Copyright 2007 Dave Airlied 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the "Software"), 9 * to deal in the Software without restriction, including without limitation 10 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 * and/or sell copies of the Software, and to permit persons to whom the 12 * Software is furnished to do so, subject to the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the next 15 * paragraph) shall be included in all copies or substantial portions of the 16 * Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 21 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 24 * OTHER DEALINGS IN THE SOFTWARE. 25 */ 26 /* 27 * Authors: Dave Airlied <airlied (at) linux.ie> 28 * Ben Skeggs <darktama (at) iinet.net.au> 29 * Jeremy Kolb <jkolb (at) brandeis.edu> 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: nouveau_bo.c,v 1.20 2025/03/23 17:04:09 joe Exp $"); 34 35 #include <linux/dma-mapping.h> 36 #include <linux/swiotlb.h> 37 38 #include "nouveau_drv.h" 39 #include "nouveau_dma.h" 40 #include "nouveau_fence.h" 41 42 #include "nouveau_bo.h" 43 #include "nouveau_ttm.h" 44 #include "nouveau_gem.h" 45 #include "nouveau_mem.h" 46 #include "nouveau_vmm.h" 47 48 #include <nvif/class.h> 49 #include <nvif/if500b.h> 50 #include <nvif/if900b.h> 51 52 /* 53 * NV10-NV40 tiling helpers 54 */ 55 56 static void 57 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, 58 u32 addr, u32 size, u32 pitch, u32 flags) 59 { 60 struct nouveau_drm *drm = nouveau_drm(dev); 61 int i = reg - drm->tile.reg; 62 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); 63 struct nvkm_fb_tile *tile = &fb->tile.region[i]; 64 65 nouveau_fence_unref(®->fence); 66 67 if (tile->pitch) 68 nvkm_fb_tile_fini(fb, i, tile); 69 70 if (pitch) 71 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile); 72 73 nvkm_fb_tile_prog(fb, i, tile); 74 } 75 76 static struct nouveau_drm_tile * 77 nv10_bo_get_tile_region(struct drm_device *dev, int i) 78 { 79 struct nouveau_drm *drm = nouveau_drm(dev); 80 struct nouveau_drm_tile *tile = &drm->tile.reg[i]; 81 82 spin_lock(&drm->tile.lock); 83 84 if (!tile->used && 85 (!tile->fence || nouveau_fence_done(tile->fence))) 86 tile->used = true; 87 else 88 tile = NULL; 89 90 spin_unlock(&drm->tile.lock); 91 return tile; 92 } 93 94 static void 95 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, 96 struct dma_fence *fence) 97 { 98 struct nouveau_drm *drm = nouveau_drm(dev); 99 100 if (tile) { 101 spin_lock(&drm->tile.lock); 102 tile->fence = (struct nouveau_fence *)dma_fence_get(fence); 103 tile->used = false; 104 spin_unlock(&drm->tile.lock); 105 } 106 } 107 108 static struct nouveau_drm_tile * 109 nv10_bo_set_tiling(struct drm_device *dev, u32 addr, 110 u32 size, u32 pitch, u32 zeta) 111 { 112 struct nouveau_drm *drm = nouveau_drm(dev); 113 struct nvkm_fb *fb = nvxx_fb(&drm->client.device); 114 struct nouveau_drm_tile *tile, *found = NULL; 115 int i; 116 117 for (i = 0; i < fb->tile.regions; i++) { 118 tile = nv10_bo_get_tile_region(dev, i); 119 120 if (pitch && !found) { 121 found = tile; 122 continue; 123 124 } else if (tile && fb->tile.region[i].pitch) { 125 /* Kill an unused tile region. */ 126 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); 127 } 128 129 nv10_bo_put_tile_region(dev, tile, NULL); 130 } 131 132 if (found) 133 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta); 134 return found; 135 } 136 137 static void 138 nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 139 { 140 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 141 struct drm_device *dev = drm->dev; 142 struct nouveau_bo *nvbo = nouveau_bo(bo); 143 144 WARN_ON(nvbo->pin_refcnt > 0); 145 nv10_bo_put_tile_region(dev, nvbo->tile, NULL); 146 147 /* 148 * If nouveau_bo_new() allocated this buffer, the GEM object was never 149 * initialized, so don't attempt to release it. 150 */ 151 if (bo->base.dev) 152 drm_gem_object_release(&bo->base); 153 154 kfree(nvbo); 155 } 156 157 static inline u64 158 roundup_64(u64 x, u32 y) 159 { 160 x += y - 1; 161 do_div(x, y); 162 return x * y; 163 } 164 165 static void 166 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, 167 int *align, u64 *size) 168 { 169 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 170 struct nvif_device *device = &drm->client.device; 171 172 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) { 173 if (nvbo->mode) { 174 if (device->info.chipset >= 0x40) { 175 *align = 65536; 176 *size = roundup_64(*size, 64 * nvbo->mode); 177 178 } else if (device->info.chipset >= 0x30) { 179 *align = 32768; 180 *size = roundup_64(*size, 64 * nvbo->mode); 181 182 } else if (device->info.chipset >= 0x20) { 183 *align = 16384; 184 *size = roundup_64(*size, 64 * nvbo->mode); 185 186 } else if (device->info.chipset >= 0x10) { 187 *align = 16384; 188 *size = roundup_64(*size, 32 * nvbo->mode); 189 } 190 } 191 } else { 192 *size = roundup_64(*size, (1 << nvbo->page)); 193 *align = max((1 << nvbo->page), *align); 194 } 195 196 *size = roundup_64(*size, PAGE_SIZE); 197 } 198 199 struct nouveau_bo * 200 nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags, 201 u32 tile_mode, u32 tile_flags) 202 { 203 struct nouveau_drm *drm = cli->drm; 204 struct nouveau_bo *nvbo; 205 struct nvif_mmu *mmu = &cli->mmu; 206 struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm; 207 int i, pi = -1; 208 209 if (!*size) { 210 NV_WARN(drm, "skipped size %016"PRIx64"\n", *size); 211 return ERR_PTR(-EINVAL); 212 } 213 214 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 215 if (!nvbo) 216 return ERR_PTR(-ENOMEM); 217 INIT_LIST_HEAD(&nvbo->head); 218 INIT_LIST_HEAD(&nvbo->entry); 219 INIT_LIST_HEAD(&nvbo->vma_list); 220 nvbo->bo.bdev = &drm->ttm.bdev; 221 222 /* This is confusing, and doesn't actually mean we want an uncached 223 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated 224 * into in nouveau_gem_new(). 225 */ 226 if (flags & TTM_PL_FLAG_UNCACHED) { 227 /* Determine if we can get a cache-coherent map, forcing 228 * uncached mapping if we can't. 229 */ 230 if (!nouveau_drm_use_coherent_gpu_mapping(drm)) 231 nvbo->force_coherent = true; 232 } 233 234 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) { 235 nvbo->kind = (tile_flags & 0x0000ff00) >> 8; 236 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { 237 goto err; 238 } 239 240 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind; 241 } else 242 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 243 nvbo->kind = (tile_flags & 0x00007f00) >> 8; 244 nvbo->comp = (tile_flags & 0x00030000) >> 16; 245 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) { 246 goto err; 247 } 248 } else { 249 nvbo->zeta = (tile_flags & 0x00000007); 250 } 251 nvbo->mode = tile_mode; 252 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG); 253 254 /* Determine the desirable target GPU page size for the buffer. */ 255 for (i = 0; i < vmm->page_nr; i++) { 256 /* Because we cannot currently allow VMM maps to fail 257 * during buffer migration, we need to determine page 258 * size for the buffer up-front, and pre-allocate its 259 * page tables. 260 * 261 * Skip page sizes that can't support needed domains. 262 */ 263 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && 264 (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram) 265 continue; 266 if ((flags & TTM_PL_FLAG_TT) && 267 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) 268 continue; 269 270 /* Select this page size if it's the first that supports 271 * the potential memory domains, or when it's compatible 272 * with the requested compression settings. 273 */ 274 if (pi < 0 || !nvbo->comp || vmm->page[i].comp) 275 pi = i; 276 277 /* Stop once the buffer is larger than the current page size. */ 278 if (*size >= 1ULL << vmm->page[i].shift) 279 break; 280 } 281 282 if (WARN_ON(pi < 0)) 283 goto err; 284 285 /* Disable compression if suitable settings couldn't be found. */ 286 if (nvbo->comp && !vmm->page[pi].comp) { 287 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100) 288 nvbo->kind = mmu->kind[nvbo->kind]; 289 nvbo->comp = 0; 290 } 291 nvbo->page = vmm->page[pi].shift; 292 293 nouveau_bo_fixup_align(nvbo, flags, align, size); 294 295 return nvbo; 296 297 err: 298 kfree(nvbo); 299 return ERR_PTR(-EINVAL); 300 } 301 302 int 303 nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags, 304 struct sg_table *sg, struct dma_resv *robj) 305 { 306 int type = sg ? ttm_bo_type_sg : ttm_bo_type_device; 307 size_t acc_size; 308 int ret; 309 310 acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo)); 311 312 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; 313 nouveau_bo_placement_set(nvbo, flags, 0); 314 315 ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type, 316 &nvbo->placement, align >> PAGE_SHIFT, false, 317 acc_size, sg, robj, nouveau_bo_del_ttm); 318 if (ret) { 319 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 320 return ret; 321 } 322 323 return 0; 324 } 325 326 int 327 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, 328 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, 329 struct sg_table *sg, struct dma_resv *robj, 330 struct nouveau_bo **pnvbo) 331 { 332 struct nouveau_bo *nvbo; 333 int ret; 334 335 nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode, 336 tile_flags); 337 if (IS_ERR(nvbo)) 338 return PTR_ERR(nvbo); 339 340 ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj); 341 if (ret) 342 return ret; 343 344 *pnvbo = nvbo; 345 return 0; 346 } 347 348 static void 349 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags) 350 { 351 *n = 0; 352 353 if (type & TTM_PL_FLAG_VRAM) 354 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags; 355 if (type & TTM_PL_FLAG_TT) 356 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags; 357 if (type & TTM_PL_FLAG_SYSTEM) 358 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags; 359 } 360 361 static void 362 set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 363 { 364 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 365 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT; 366 unsigned i, fpfn, lpfn; 367 368 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && 369 nvbo->mode && (type & TTM_PL_FLAG_VRAM) && 370 nvbo->bo.mem.num_pages < vram_pages / 4) { 371 /* 372 * Make sure that the color and depth buffers are handled 373 * by independent memory controller units. Up to a 9x 374 * speed up when alpha-blending and depth-test are enabled 375 * at the same time. 376 */ 377 if (nvbo->zeta) { 378 fpfn = vram_pages / 2; 379 lpfn = ~0; 380 } else { 381 fpfn = 0; 382 lpfn = vram_pages / 2; 383 } 384 for (i = 0; i < nvbo->placement.num_placement; ++i) { 385 nvbo->placements[i].fpfn = fpfn; 386 nvbo->placements[i].lpfn = lpfn; 387 } 388 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { 389 nvbo->busy_placements[i].fpfn = fpfn; 390 nvbo->busy_placements[i].lpfn = lpfn; 391 } 392 } 393 } 394 395 void 396 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) 397 { 398 struct ttm_placement *pl = &nvbo->placement; 399 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED : 400 TTM_PL_MASK_CACHING) | 401 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); 402 403 pl->placement = nvbo->placements; 404 set_placement_list(nvbo->placements, &pl->num_placement, 405 type, flags); 406 407 pl->busy_placement = nvbo->busy_placements; 408 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, 409 type | busy, flags); 410 411 set_placement_range(nvbo, type); 412 } 413 414 int 415 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig) 416 { 417 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 418 struct ttm_buffer_object *bo = &nvbo->bo; 419 bool force = false, evict = false; 420 int ret; 421 422 ret = ttm_bo_reserve(bo, false, false, NULL); 423 if (ret) 424 return ret; 425 426 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && 427 memtype == TTM_PL_FLAG_VRAM && contig) { 428 if (!nvbo->contig) { 429 nvbo->contig = true; 430 force = true; 431 evict = true; 432 } 433 } 434 435 if (nvbo->pin_refcnt) { 436 if (!(memtype & (1 << bo->mem.mem_type)) || evict) { 437 NV_ERROR(drm, "bo %p pinned elsewhere: " 438 "0x%08x vs 0x%08x\n", bo, 439 1 << bo->mem.mem_type, memtype); 440 ret = -EBUSY; 441 } 442 nvbo->pin_refcnt++; 443 goto out; 444 } 445 446 if (evict) { 447 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0); 448 ret = nouveau_bo_validate(nvbo, false, false); 449 if (ret) 450 goto out; 451 } 452 453 nvbo->pin_refcnt++; 454 nouveau_bo_placement_set(nvbo, memtype, 0); 455 456 /* drop pin_refcnt temporarily, so we don't trip the assertion 457 * in nouveau_bo_move() that makes sure we're not trying to 458 * move a pinned buffer 459 */ 460 nvbo->pin_refcnt--; 461 ret = nouveau_bo_validate(nvbo, false, false); 462 if (ret) 463 goto out; 464 nvbo->pin_refcnt++; 465 466 switch (bo->mem.mem_type) { 467 case TTM_PL_VRAM: 468 drm->gem.vram_available -= bo->mem.size; 469 break; 470 case TTM_PL_TT: 471 drm->gem.gart_available -= bo->mem.size; 472 break; 473 default: 474 break; 475 } 476 477 out: 478 if (force && ret) 479 nvbo->contig = false; 480 ttm_bo_unreserve(bo); 481 return ret; 482 } 483 484 int 485 nouveau_bo_unpin(struct nouveau_bo *nvbo) 486 { 487 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 488 struct ttm_buffer_object *bo = &nvbo->bo; 489 int ret, ref; 490 491 ret = ttm_bo_reserve(bo, false, false, NULL); 492 if (ret) 493 return ret; 494 495 ref = --nvbo->pin_refcnt; 496 WARN_ON_ONCE(ref < 0); 497 if (ref) 498 goto out; 499 500 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 501 502 ret = nouveau_bo_validate(nvbo, false, false); 503 if (ret == 0) { 504 switch (bo->mem.mem_type) { 505 case TTM_PL_VRAM: 506 drm->gem.vram_available += bo->mem.size; 507 break; 508 case TTM_PL_TT: 509 drm->gem.gart_available += bo->mem.size; 510 break; 511 default: 512 break; 513 } 514 } 515 516 out: 517 ttm_bo_unreserve(bo); 518 return ret; 519 } 520 521 int 522 nouveau_bo_map(struct nouveau_bo *nvbo) 523 { 524 int ret; 525 526 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 527 if (ret) 528 return ret; 529 530 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); 531 532 ttm_bo_unreserve(&nvbo->bo); 533 return ret; 534 } 535 536 void 537 nouveau_bo_unmap(struct nouveau_bo *nvbo) 538 { 539 if (!nvbo) 540 return; 541 542 ttm_bo_kunmap(&nvbo->kmap); 543 } 544 545 void 546 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) 547 { 548 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 549 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; 550 #ifndef __NetBSD__ 551 int i; 552 #endif 553 554 if (!ttm_dma) 555 return; 556 557 /* Don't waste time looping if the object is coherent */ 558 if (nvbo->force_coherent) 559 return; 560 561 #ifdef __NetBSD__ 562 bus_dma_tag_t dmat = drm->dev->dmat; 563 bus_dmamap_sync(dmat, ttm_dma->dma_address, 0, 564 PAGE_SIZE*ttm_dma->ttm.num_pages, 565 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 566 #else 567 for (i = 0; i < ttm_dma->ttm.num_pages; i++) 568 dma_sync_single_for_device(drm->dev->dev, 569 ttm_dma->dma_address[i], 570 PAGE_SIZE, DMA_TO_DEVICE); 571 #endif 572 } 573 574 void 575 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) 576 { 577 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 578 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; 579 #ifndef __NetBSD__ 580 int i; 581 #endif 582 583 if (!ttm_dma) 584 return; 585 586 /* Don't waste time looping if the object is coherent */ 587 if (nvbo->force_coherent) 588 return; 589 590 #ifdef __NetBSD__ 591 bus_dma_tag_t dmat = drm->dev->dmat; 592 bus_dmamap_sync(dmat, ttm_dma->dma_address, 0, 593 PAGE_SIZE*ttm_dma->ttm.num_pages, 594 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 595 #else 596 for (i = 0; i < ttm_dma->ttm.num_pages; i++) 597 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], 598 PAGE_SIZE, DMA_FROM_DEVICE); 599 #endif 600 } 601 602 int 603 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, 604 bool no_wait_gpu) 605 { 606 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; 607 int ret; 608 609 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx); 610 if (ret) 611 return ret; 612 613 nouveau_bo_sync_for_device(nvbo); 614 615 return 0; 616 } 617 618 #ifdef __NetBSD__ 619 /* 620 * XXX Can't use bus_space here because this is all mapped through the 621 * radeon_bo abstraction. Can't assume we're x86 because this is 622 * Nouveau, not Intel. 623 */ 624 625 # define __iomem volatile 626 # define __force 627 # define ioread16_native fake_ioread16_native 628 # define ioread32_native fake_ioread32_native 629 # define iowrite16_native fake_iowrite16_native 630 # define iowrite32_native fake_iowrite32_native 631 632 #ifdef notdef 633 static inline uint16_t 634 ioread16_native(const void __iomem *ptr) 635 { 636 uint16_t v; 637 638 v = *(const uint16_t __iomem *)ptr; 639 membar_consumer(); 640 641 return v; 642 } 643 #endif 644 645 static inline uint32_t 646 ioread32_native(const void __iomem *ptr) 647 { 648 uint32_t v; 649 650 v = *(const uint32_t __iomem *)ptr; 651 membar_consumer(); 652 653 return v; 654 } 655 656 static inline void 657 iowrite16_native(uint16_t v, void __iomem *ptr) 658 { 659 660 membar_producer(); 661 *(uint16_t __iomem *)ptr = v; 662 } 663 664 static inline void 665 iowrite32_native(uint32_t v, void __iomem *ptr) 666 { 667 668 membar_producer(); 669 *(uint32_t __iomem *)ptr = v; 670 } 671 #endif 672 673 void 674 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) 675 { 676 bool is_iomem; 677 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 678 679 mem += index; 680 681 if (is_iomem) 682 iowrite16_native(val, (void __force __iomem *)mem); 683 else 684 *mem = val; 685 } 686 687 u32 688 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) 689 { 690 bool is_iomem; 691 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 692 693 mem += index; 694 695 if (is_iomem) 696 return ioread32_native((void __force __iomem *)mem); 697 else 698 return *mem; 699 } 700 701 void 702 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) 703 { 704 bool is_iomem; 705 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 706 707 mem += index; 708 709 if (is_iomem) 710 iowrite32_native(val, (void __force __iomem *)mem); 711 else 712 *mem = val; 713 } 714 715 #ifdef __NetBSD__ 716 # undef __iomem 717 # undef __force 718 # undef ioread16_native 719 # undef ioread32_native 720 # undef iowrite16_native 721 # undef iowrite32_native 722 #endif 723 724 static struct ttm_tt * 725 nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) 726 { 727 #if IS_ENABLED(CONFIG_AGP) 728 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 729 730 if (drm->agp.bridge) { 731 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags); 732 } 733 #endif 734 735 return nouveau_sgdma_create_ttm(bo, page_flags); 736 } 737 738 static int 739 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 740 { 741 /* We'll do this from user space. */ 742 return 0; 743 } 744 745 static int 746 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 747 struct ttm_mem_type_manager *man) 748 { 749 struct nouveau_drm *drm = nouveau_bdev(bdev); 750 struct nvif_mmu *mmu = &drm->client.mmu; 751 752 switch (type) { 753 case TTM_PL_SYSTEM: 754 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 755 man->available_caching = TTM_PL_MASK_CACHING; 756 man->default_caching = TTM_PL_FLAG_CACHED; 757 break; 758 case TTM_PL_VRAM: 759 man->flags = TTM_MEMTYPE_FLAG_FIXED | 760 TTM_MEMTYPE_FLAG_MAPPABLE; 761 man->available_caching = TTM_PL_FLAG_UNCACHED | 762 TTM_PL_FLAG_WC; 763 man->default_caching = TTM_PL_FLAG_WC; 764 765 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 766 /* Some BARs do not support being ioremapped WC */ 767 const u8 type = mmu->type[drm->ttm.type_vram].type; 768 if (type & NVIF_MEM_UNCACHED) { 769 man->available_caching = TTM_PL_FLAG_UNCACHED; 770 man->default_caching = TTM_PL_FLAG_UNCACHED; 771 } 772 773 man->func = &nouveau_vram_manager; 774 man->io_reserve_fastpath = false; 775 man->use_io_reserve_lru = true; 776 } else { 777 man->func = &ttm_bo_manager_func; 778 } 779 break; 780 case TTM_PL_TT: 781 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) 782 man->func = &nouveau_gart_manager; 783 else 784 if (!drm->agp.bridge) 785 man->func = &nv04_gart_manager; 786 else 787 man->func = &ttm_bo_manager_func; 788 789 if (drm->agp.bridge) { 790 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 791 man->available_caching = TTM_PL_FLAG_UNCACHED | 792 TTM_PL_FLAG_WC; 793 man->default_caching = TTM_PL_FLAG_WC; 794 } else { 795 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 796 TTM_MEMTYPE_FLAG_CMA; 797 man->available_caching = TTM_PL_MASK_CACHING; 798 man->default_caching = TTM_PL_FLAG_CACHED; 799 } 800 801 break; 802 default: 803 return -EINVAL; 804 } 805 return 0; 806 } 807 808 static void 809 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) 810 { 811 struct nouveau_bo *nvbo = nouveau_bo(bo); 812 813 switch (bo->mem.mem_type) { 814 case TTM_PL_VRAM: 815 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 816 TTM_PL_FLAG_SYSTEM); 817 break; 818 default: 819 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); 820 break; 821 } 822 823 *pl = nvbo->placement; 824 } 825 826 827 static int 828 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) 829 { 830 int ret = RING_SPACE(chan, 2); 831 if (ret == 0) { 832 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); 833 OUT_RING (chan, handle & 0x0000ffff); 834 FIRE_RING (chan); 835 } 836 return ret; 837 } 838 839 static int 840 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 841 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) 842 { 843 struct nouveau_mem *mem = nouveau_mem(old_reg); 844 int ret = RING_SPACE(chan, 10); 845 if (ret == 0) { 846 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); 847 OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); 848 OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); 849 OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); 850 OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); 851 OUT_RING (chan, PAGE_SIZE); 852 OUT_RING (chan, PAGE_SIZE); 853 OUT_RING (chan, PAGE_SIZE); 854 OUT_RING (chan, new_reg->num_pages); 855 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); 856 } 857 return ret; 858 } 859 860 static int 861 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) 862 { 863 int ret = RING_SPACE(chan, 2); 864 if (ret == 0) { 865 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); 866 OUT_RING (chan, handle); 867 } 868 return ret; 869 } 870 871 static int 872 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 873 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) 874 { 875 struct nouveau_mem *mem = nouveau_mem(old_reg); 876 u64 src_offset = mem->vma[0].addr; 877 u64 dst_offset = mem->vma[1].addr; 878 u32 page_count = new_reg->num_pages; 879 int ret; 880 881 page_count = new_reg->num_pages; 882 while (page_count) { 883 int line_count = (page_count > 8191) ? 8191 : page_count; 884 885 ret = RING_SPACE(chan, 11); 886 if (ret) 887 return ret; 888 889 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8); 890 OUT_RING (chan, upper_32_bits(src_offset)); 891 OUT_RING (chan, lower_32_bits(src_offset)); 892 OUT_RING (chan, upper_32_bits(dst_offset)); 893 OUT_RING (chan, lower_32_bits(dst_offset)); 894 OUT_RING (chan, PAGE_SIZE); 895 OUT_RING (chan, PAGE_SIZE); 896 OUT_RING (chan, PAGE_SIZE); 897 OUT_RING (chan, line_count); 898 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); 899 OUT_RING (chan, 0x00000110); 900 901 page_count -= line_count; 902 src_offset += (PAGE_SIZE * line_count); 903 dst_offset += (PAGE_SIZE * line_count); 904 } 905 906 return 0; 907 } 908 909 static int 910 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 911 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) 912 { 913 struct nouveau_mem *mem = nouveau_mem(old_reg); 914 u64 src_offset = mem->vma[0].addr; 915 u64 dst_offset = mem->vma[1].addr; 916 u32 page_count = new_reg->num_pages; 917 int ret; 918 919 page_count = new_reg->num_pages; 920 while (page_count) { 921 int line_count = (page_count > 2047) ? 2047 : page_count; 922 923 ret = RING_SPACE(chan, 12); 924 if (ret) 925 return ret; 926 927 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2); 928 OUT_RING (chan, upper_32_bits(dst_offset)); 929 OUT_RING (chan, lower_32_bits(dst_offset)); 930 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6); 931 OUT_RING (chan, upper_32_bits(src_offset)); 932 OUT_RING (chan, lower_32_bits(src_offset)); 933 OUT_RING (chan, PAGE_SIZE); /* src_pitch */ 934 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ 935 OUT_RING (chan, PAGE_SIZE); /* line_length */ 936 OUT_RING (chan, line_count); 937 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); 938 OUT_RING (chan, 0x00100110); 939 940 page_count -= line_count; 941 src_offset += (PAGE_SIZE * line_count); 942 dst_offset += (PAGE_SIZE * line_count); 943 } 944 945 return 0; 946 } 947 948 static int 949 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 950 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) 951 { 952 struct nouveau_mem *mem = nouveau_mem(old_reg); 953 u64 src_offset = mem->vma[0].addr; 954 u64 dst_offset = mem->vma[1].addr; 955 u32 page_count = new_reg->num_pages; 956 int ret; 957 958 page_count = new_reg->num_pages; 959 while (page_count) { 960 int line_count = (page_count > 8191) ? 8191 : page_count; 961 962 ret = RING_SPACE(chan, 11); 963 if (ret) 964 return ret; 965 966 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); 967 OUT_RING (chan, upper_32_bits(src_offset)); 968 OUT_RING (chan, lower_32_bits(src_offset)); 969 OUT_RING (chan, upper_32_bits(dst_offset)); 970 OUT_RING (chan, lower_32_bits(dst_offset)); 971 OUT_RING (chan, PAGE_SIZE); 972 OUT_RING (chan, PAGE_SIZE); 973 OUT_RING (chan, PAGE_SIZE); 974 OUT_RING (chan, line_count); 975 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1); 976 OUT_RING (chan, 0x00000110); 977 978 page_count -= line_count; 979 src_offset += (PAGE_SIZE * line_count); 980 dst_offset += (PAGE_SIZE * line_count); 981 } 982 983 return 0; 984 } 985 986 static int 987 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 988 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) 989 { 990 struct nouveau_mem *mem = nouveau_mem(old_reg); 991 int ret = RING_SPACE(chan, 7); 992 if (ret == 0) { 993 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); 994 OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); 995 OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); 996 OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); 997 OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); 998 OUT_RING (chan, 0x00000000 /* COPY */); 999 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); 1000 } 1001 return ret; 1002 } 1003 1004 static int 1005 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 1006 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) 1007 { 1008 struct nouveau_mem *mem = nouveau_mem(old_reg); 1009 int ret = RING_SPACE(chan, 7); 1010 if (ret == 0) { 1011 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); 1012 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT); 1013 OUT_RING (chan, upper_32_bits(mem->vma[0].addr)); 1014 OUT_RING (chan, lower_32_bits(mem->vma[0].addr)); 1015 OUT_RING (chan, upper_32_bits(mem->vma[1].addr)); 1016 OUT_RING (chan, lower_32_bits(mem->vma[1].addr)); 1017 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); 1018 } 1019 return ret; 1020 } 1021 1022 static int 1023 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) 1024 { 1025 int ret = RING_SPACE(chan, 6); 1026 if (ret == 0) { 1027 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 1028 OUT_RING (chan, handle); 1029 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); 1030 OUT_RING (chan, chan->drm->ntfy.handle); 1031 OUT_RING (chan, chan->vram.handle); 1032 OUT_RING (chan, chan->vram.handle); 1033 } 1034 1035 return ret; 1036 } 1037 1038 static int 1039 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 1040 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) 1041 { 1042 struct nouveau_mem *mem = nouveau_mem(old_reg); 1043 u64 length = (new_reg->num_pages << PAGE_SHIFT); 1044 u64 src_offset = mem->vma[0].addr; 1045 u64 dst_offset = mem->vma[1].addr; 1046 int src_tiled = !!mem->kind; 1047 int dst_tiled = !!nouveau_mem(new_reg)->kind; 1048 int ret; 1049 1050 while (length) { 1051 u32 amount, stride, height; 1052 1053 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled)); 1054 if (ret) 1055 return ret; 1056 1057 amount = min(length, (u64)(4 * 1024 * 1024)); 1058 stride = 16 * 4; 1059 height = amount / stride; 1060 1061 if (src_tiled) { 1062 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); 1063 OUT_RING (chan, 0); 1064 OUT_RING (chan, 0); 1065 OUT_RING (chan, stride); 1066 OUT_RING (chan, height); 1067 OUT_RING (chan, 1); 1068 OUT_RING (chan, 0); 1069 OUT_RING (chan, 0); 1070 } else { 1071 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); 1072 OUT_RING (chan, 1); 1073 } 1074 if (dst_tiled) { 1075 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); 1076 OUT_RING (chan, 0); 1077 OUT_RING (chan, 0); 1078 OUT_RING (chan, stride); 1079 OUT_RING (chan, height); 1080 OUT_RING (chan, 1); 1081 OUT_RING (chan, 0); 1082 OUT_RING (chan, 0); 1083 } else { 1084 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); 1085 OUT_RING (chan, 1); 1086 } 1087 1088 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); 1089 OUT_RING (chan, upper_32_bits(src_offset)); 1090 OUT_RING (chan, upper_32_bits(dst_offset)); 1091 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); 1092 OUT_RING (chan, lower_32_bits(src_offset)); 1093 OUT_RING (chan, lower_32_bits(dst_offset)); 1094 OUT_RING (chan, stride); 1095 OUT_RING (chan, stride); 1096 OUT_RING (chan, stride); 1097 OUT_RING (chan, height); 1098 OUT_RING (chan, 0x00000101); 1099 OUT_RING (chan, 0x00000000); 1100 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 1101 OUT_RING (chan, 0); 1102 1103 length -= amount; 1104 src_offset += amount; 1105 dst_offset += amount; 1106 } 1107 1108 return 0; 1109 } 1110 1111 static int 1112 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) 1113 { 1114 int ret = RING_SPACE(chan, 4); 1115 if (ret == 0) { 1116 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); 1117 OUT_RING (chan, handle); 1118 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); 1119 OUT_RING (chan, chan->drm->ntfy.handle); 1120 } 1121 1122 return ret; 1123 } 1124 1125 static inline uint32_t 1126 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, 1127 struct nouveau_channel *chan, struct ttm_mem_reg *reg) 1128 { 1129 if (reg->mem_type == TTM_PL_TT) 1130 return NvDmaTT; 1131 return chan->vram.handle; 1132 } 1133 1134 static int 1135 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, 1136 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg) 1137 { 1138 u32 src_offset = old_reg->start << PAGE_SHIFT; 1139 u32 dst_offset = new_reg->start << PAGE_SHIFT; 1140 u32 page_count = new_reg->num_pages; 1141 int ret; 1142 1143 ret = RING_SPACE(chan, 3); 1144 if (ret) 1145 return ret; 1146 1147 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); 1148 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg)); 1149 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg)); 1150 1151 page_count = new_reg->num_pages; 1152 while (page_count) { 1153 int line_count = (page_count > 2047) ? 2047 : page_count; 1154 1155 ret = RING_SPACE(chan, 11); 1156 if (ret) 1157 return ret; 1158 1159 BEGIN_NV04(chan, NvSubCopy, 1160 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); 1161 OUT_RING (chan, src_offset); 1162 OUT_RING (chan, dst_offset); 1163 OUT_RING (chan, PAGE_SIZE); /* src_pitch */ 1164 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ 1165 OUT_RING (chan, PAGE_SIZE); /* line_length */ 1166 OUT_RING (chan, line_count); 1167 OUT_RING (chan, 0x00000101); 1168 OUT_RING (chan, 0x00000000); 1169 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 1170 OUT_RING (chan, 0); 1171 1172 page_count -= line_count; 1173 src_offset += (PAGE_SIZE * line_count); 1174 dst_offset += (PAGE_SIZE * line_count); 1175 } 1176 1177 return 0; 1178 } 1179 1180 static int 1181 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, 1182 struct ttm_mem_reg *reg) 1183 { 1184 struct nouveau_mem *old_mem = nouveau_mem(&bo->mem); 1185 struct nouveau_mem *new_mem = nouveau_mem(reg); 1186 struct nvif_vmm *vmm = &drm->client.vmm.vmm; 1187 int ret; 1188 1189 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0, 1190 old_mem->mem.size, &old_mem->vma[0]); 1191 if (ret) 1192 return ret; 1193 1194 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0, 1195 new_mem->mem.size, &old_mem->vma[1]); 1196 if (ret) 1197 goto done; 1198 1199 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]); 1200 if (ret) 1201 goto done; 1202 1203 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]); 1204 done: 1205 if (ret) { 1206 nvif_vmm_put(vmm, &old_mem->vma[1]); 1207 nvif_vmm_put(vmm, &old_mem->vma[0]); 1208 } 1209 return 0; 1210 } 1211 1212 static int 1213 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 1214 bool no_wait_gpu, struct ttm_mem_reg *new_reg) 1215 { 1216 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1217 struct nouveau_channel *chan = drm->ttm.chan; 1218 struct nouveau_cli *cli = (void *)chan->user.client; 1219 struct nouveau_fence *fence; 1220 int ret; 1221 1222 /* create temporary vmas for the transfer and attach them to the 1223 * old nvkm_mem node, these will get cleaned up after ttm has 1224 * destroyed the ttm_mem_reg 1225 */ 1226 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 1227 ret = nouveau_bo_move_prep(drm, bo, new_reg); 1228 if (ret) 1229 return ret; 1230 } 1231 1232 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); 1233 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr); 1234 if (ret == 0) { 1235 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg); 1236 if (ret == 0) { 1237 ret = nouveau_fence_new(chan, false, &fence); 1238 if (ret == 0) { 1239 ret = ttm_bo_move_accel_cleanup(bo, 1240 &fence->base, 1241 evict, 1242 new_reg); 1243 nouveau_fence_unref(&fence); 1244 } 1245 } 1246 } 1247 mutex_unlock(&cli->mutex); 1248 return ret; 1249 } 1250 1251 void 1252 nouveau_bo_move_init(struct nouveau_drm *drm) 1253 { 1254 static const struct _method_table { 1255 const char *name; 1256 int engine; 1257 s32 oclass; 1258 int (*exec)(struct nouveau_channel *, 1259 struct ttm_buffer_object *, 1260 struct ttm_mem_reg *, struct ttm_mem_reg *); 1261 int (*init)(struct nouveau_channel *, u32 handle); 1262 } _methods[] = { 1263 { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init }, 1264 { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1265 { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init }, 1266 { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1267 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init }, 1268 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1269 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init }, 1270 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1271 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init }, 1272 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1273 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, 1274 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1275 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, 1276 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, 1277 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, 1278 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init }, 1279 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init }, 1280 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, 1281 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, 1282 {}, 1283 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, 1284 }; 1285 const struct _method_table *mthd = _methods; 1286 const char *name = "CPU"; 1287 int ret; 1288 1289 do { 1290 struct nouveau_channel *chan; 1291 1292 if (mthd->engine) 1293 chan = drm->cechan; 1294 else 1295 chan = drm->channel; 1296 if (chan == NULL) 1297 continue; 1298 1299 ret = nvif_object_init(&chan->user, 1300 mthd->oclass | (mthd->engine << 16), 1301 mthd->oclass, NULL, 0, 1302 &drm->ttm.copy); 1303 if (ret == 0) { 1304 ret = mthd->init(chan, drm->ttm.copy.handle); 1305 if (ret) { 1306 nvif_object_fini(&drm->ttm.copy); 1307 continue; 1308 } 1309 1310 drm->ttm.move = mthd->exec; 1311 drm->ttm.chan = chan; 1312 name = mthd->name; 1313 break; 1314 } 1315 } while ((++mthd)->exec); 1316 1317 NV_INFO(drm, "MM: using %s for buffer copies\n", name); 1318 } 1319 1320 static int 1321 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 1322 bool no_wait_gpu, struct ttm_mem_reg *new_reg) 1323 { 1324 struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; 1325 struct ttm_place placement_memtype = { 1326 .fpfn = 0, 1327 .lpfn = 0, 1328 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING 1329 }; 1330 struct ttm_placement placement; 1331 struct ttm_mem_reg tmp_reg; 1332 int ret; 1333 1334 placement.num_placement = placement.num_busy_placement = 1; 1335 placement.placement = placement.busy_placement = &placement_memtype; 1336 1337 tmp_reg = *new_reg; 1338 tmp_reg.mm_node = NULL; 1339 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx); 1340 if (ret) 1341 return ret; 1342 1343 ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx); 1344 if (ret) 1345 goto out; 1346 1347 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg); 1348 if (ret) 1349 goto out; 1350 1351 ret = ttm_bo_move_ttm(bo, &ctx, new_reg); 1352 out: 1353 ttm_bo_mem_put(bo, &tmp_reg); 1354 return ret; 1355 } 1356 1357 static int 1358 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 1359 bool no_wait_gpu, struct ttm_mem_reg *new_reg) 1360 { 1361 struct ttm_operation_ctx ctx = { intr, no_wait_gpu }; 1362 struct ttm_place placement_memtype = { 1363 .fpfn = 0, 1364 .lpfn = 0, 1365 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING 1366 }; 1367 struct ttm_placement placement; 1368 struct ttm_mem_reg tmp_reg; 1369 int ret; 1370 1371 placement.num_placement = placement.num_busy_placement = 1; 1372 placement.placement = placement.busy_placement = &placement_memtype; 1373 1374 tmp_reg = *new_reg; 1375 tmp_reg.mm_node = NULL; 1376 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx); 1377 if (ret) 1378 return ret; 1379 1380 ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg); 1381 if (ret) 1382 goto out; 1383 1384 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg); 1385 if (ret) 1386 goto out; 1387 1388 out: 1389 ttm_bo_mem_put(bo, &tmp_reg); 1390 return ret; 1391 } 1392 1393 static void 1394 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict, 1395 struct ttm_mem_reg *new_reg) 1396 { 1397 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL; 1398 struct nouveau_bo *nvbo = nouveau_bo(bo); 1399 struct nouveau_vma *vma; 1400 1401 /* ttm can now (stupidly) pass the driver bos it didn't create... */ 1402 if (bo->destroy != nouveau_bo_del_ttm) 1403 return; 1404 1405 if (mem && new_reg->mem_type != TTM_PL_SYSTEM && 1406 mem->mem.page == nvbo->page) { 1407 list_for_each_entry(vma, &nvbo->vma_list, head) { 1408 nouveau_vma_map(vma, mem); 1409 } 1410 } else { 1411 list_for_each_entry(vma, &nvbo->vma_list, head) { 1412 WARN_ON(ttm_bo_wait(bo, false, false)); 1413 nouveau_vma_unmap(vma); 1414 } 1415 } 1416 } 1417 1418 static int 1419 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg, 1420 struct nouveau_drm_tile **new_tile) 1421 { 1422 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1423 struct drm_device *dev = drm->dev; 1424 struct nouveau_bo *nvbo = nouveau_bo(bo); 1425 u64 offset = new_reg->start << PAGE_SHIFT; 1426 1427 *new_tile = NULL; 1428 if (new_reg->mem_type != TTM_PL_VRAM) 1429 return 0; 1430 1431 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { 1432 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size, 1433 nvbo->mode, nvbo->zeta); 1434 } 1435 1436 return 0; 1437 } 1438 1439 static void 1440 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, 1441 struct nouveau_drm_tile *new_tile, 1442 struct nouveau_drm_tile **old_tile) 1443 { 1444 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1445 struct drm_device *dev = drm->dev; 1446 struct dma_fence *fence = dma_resv_get_excl(bo->base.resv); 1447 1448 nv10_bo_put_tile_region(dev, *old_tile, fence); 1449 *old_tile = new_tile; 1450 } 1451 1452 static int 1453 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, 1454 struct ttm_operation_ctx *ctx, 1455 struct ttm_mem_reg *new_reg) 1456 { 1457 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1458 struct nouveau_bo *nvbo = nouveau_bo(bo); 1459 struct ttm_mem_reg *old_reg = &bo->mem; 1460 struct nouveau_drm_tile *new_tile = NULL; 1461 int ret = 0; 1462 1463 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); 1464 if (ret) 1465 return ret; 1466 1467 if (nvbo->pin_refcnt) 1468 NV_WARN(drm, "Moving pinned object %p!\n", nvbo); 1469 1470 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 1471 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); 1472 if (ret) 1473 return ret; 1474 } 1475 1476 /* Fake bo copy. */ 1477 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 1478 BUG_ON(bo->mem.mm_node != NULL); 1479 bo->mem = *new_reg; 1480 new_reg->mm_node = NULL; 1481 goto out; 1482 } 1483 1484 /* Hardware assisted copy. */ 1485 if (drm->ttm.move) { 1486 if (new_reg->mem_type == TTM_PL_SYSTEM) 1487 ret = nouveau_bo_move_flipd(bo, evict, 1488 ctx->interruptible, 1489 ctx->no_wait_gpu, new_reg); 1490 else if (old_reg->mem_type == TTM_PL_SYSTEM) 1491 ret = nouveau_bo_move_flips(bo, evict, 1492 ctx->interruptible, 1493 ctx->no_wait_gpu, new_reg); 1494 else 1495 ret = nouveau_bo_move_m2mf(bo, evict, 1496 ctx->interruptible, 1497 ctx->no_wait_gpu, new_reg); 1498 if (!ret) 1499 goto out; 1500 } 1501 1502 /* Fallback to software copy. */ 1503 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); 1504 if (ret == 0) 1505 ret = ttm_bo_move_memcpy(bo, ctx, new_reg); 1506 1507 out: 1508 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 1509 if (ret) 1510 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 1511 else 1512 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); 1513 } 1514 1515 return ret; 1516 } 1517 1518 static int 1519 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 1520 { 1521 #ifdef __NetBSD__ 1522 struct drm_file *file = filp->f_data; 1523 #else 1524 struct drm_file *file = filp->private_data; 1525 #endif 1526 struct nouveau_bo *nvbo = nouveau_bo(bo); 1527 1528 return drm_vma_node_verify_access(&nvbo->bo.base.vma_node, file); 1529 } 1530 1531 static int 1532 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) 1533 { 1534 struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type]; 1535 struct nouveau_drm *drm = nouveau_bdev(bdev); 1536 struct nvkm_device *device = nvxx_device(&drm->client.device); 1537 struct nouveau_mem *mem = nouveau_mem(reg); 1538 1539 reg->bus.addr = NULL; 1540 reg->bus.offset = 0; 1541 reg->bus.size = reg->num_pages << PAGE_SHIFT; 1542 reg->bus.base = 0; 1543 reg->bus.is_iomem = false; 1544 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 1545 return -EINVAL; 1546 switch (reg->mem_type) { 1547 case TTM_PL_SYSTEM: 1548 /* System memory */ 1549 return 0; 1550 case TTM_PL_TT: 1551 #if IS_ENABLED(CONFIG_AGP) 1552 if (drm->agp.bridge) { 1553 reg->bus.offset = reg->start << PAGE_SHIFT; 1554 reg->bus.base = drm->agp.base; 1555 reg->bus.is_iomem = !drm->agp.cma; 1556 } 1557 #endif 1558 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind) 1559 /* untiled */ 1560 break; 1561 /* fall through - tiled memory */ 1562 case TTM_PL_VRAM: 1563 reg->bus.offset = reg->start << PAGE_SHIFT; 1564 reg->bus.base = device->func->resource_addr(device, 1); 1565 reg->bus.is_iomem = true; 1566 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { 1567 union { 1568 struct nv50_mem_map_v0 nv50; 1569 struct gf100_mem_map_v0 gf100; 1570 } args; 1571 u64 handle, length; 1572 u32 argc = 0; 1573 int ret; 1574 1575 switch (mem->mem.object.oclass) { 1576 case NVIF_CLASS_MEM_NV50: 1577 args.nv50.version = 0; 1578 args.nv50.ro = 0; 1579 args.nv50.kind = mem->kind; 1580 args.nv50.comp = mem->comp; 1581 argc = sizeof(args.nv50); 1582 break; 1583 case NVIF_CLASS_MEM_GF100: 1584 args.gf100.version = 0; 1585 args.gf100.ro = 0; 1586 args.gf100.kind = mem->kind; 1587 argc = sizeof(args.gf100); 1588 break; 1589 default: 1590 WARN_ON(1); 1591 break; 1592 } 1593 1594 ret = nvif_object_map_handle(&mem->mem.object, 1595 &args, argc, 1596 #ifdef __NetBSD__ 1597 NULL, 1598 #endif 1599 &handle, &length); 1600 if (ret != 1) 1601 return ret ? ret : -EINVAL; 1602 1603 reg->bus.base = 0; 1604 reg->bus.offset = handle; 1605 } 1606 break; 1607 default: 1608 return -EINVAL; 1609 } 1610 return 0; 1611 } 1612 1613 static void 1614 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) 1615 { 1616 struct nouveau_drm *drm = nouveau_bdev(bdev); 1617 struct nouveau_mem *mem = nouveau_mem(reg); 1618 1619 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { 1620 switch (reg->mem_type) { 1621 case TTM_PL_TT: 1622 if (mem->kind) 1623 nvif_object_unmap_handle(&mem->mem.object); 1624 break; 1625 case TTM_PL_VRAM: 1626 nvif_object_unmap_handle(&mem->mem.object); 1627 break; 1628 default: 1629 break; 1630 } 1631 } 1632 } 1633 1634 static int 1635 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) 1636 { 1637 struct nouveau_drm *drm = nouveau_bdev(bo->bdev); 1638 struct nouveau_bo *nvbo = nouveau_bo(bo); 1639 struct nvkm_device *device = nvxx_device(&drm->client.device); 1640 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT; 1641 int i, ret; 1642 1643 /* as long as the bo isn't in vram, and isn't tiled, we've got 1644 * nothing to do here. 1645 */ 1646 if (bo->mem.mem_type != TTM_PL_VRAM) { 1647 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || 1648 !nvbo->kind) 1649 return 0; 1650 1651 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 1652 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); 1653 1654 ret = nouveau_bo_validate(nvbo, false, false); 1655 if (ret) 1656 return ret; 1657 } 1658 return 0; 1659 } 1660 1661 /* make sure bo is in mappable vram */ 1662 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || 1663 bo->mem.start + bo->mem.num_pages < mappable) 1664 return 0; 1665 1666 for (i = 0; i < nvbo->placement.num_placement; ++i) { 1667 nvbo->placements[i].fpfn = 0; 1668 nvbo->placements[i].lpfn = mappable; 1669 } 1670 1671 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) { 1672 nvbo->busy_placements[i].fpfn = 0; 1673 nvbo->busy_placements[i].lpfn = mappable; 1674 } 1675 1676 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); 1677 return nouveau_bo_validate(nvbo, false, false); 1678 } 1679 1680 static int 1681 nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) 1682 { 1683 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1684 struct nouveau_drm *drm; 1685 struct device *dev; 1686 unsigned i; 1687 int r; 1688 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1689 1690 if (ttm->state != tt_unpopulated) 1691 return 0; 1692 1693 if (slave && ttm->sg) { 1694 /* make userspace faulting work */ 1695 #ifdef __NetBSD__ 1696 r = drm_prime_bus_dmamap_load_sgt(ttm->bdev->dmat, 1697 ttm_dma->dma_address, ttm->sg); 1698 if (r) 1699 return r; 1700 #else 1701 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, 1702 ttm_dma->dma_address, ttm->num_pages); 1703 #endif 1704 ttm->state = tt_unbound; 1705 return 0; 1706 } 1707 1708 drm = nouveau_bdev(ttm->bdev); 1709 dev = drm->dev->dev; 1710 1711 #if IS_ENABLED(CONFIG_AGP) 1712 if (drm->agp.bridge) { 1713 return ttm_agp_tt_populate(ttm, ctx); 1714 } 1715 #endif 1716 1717 #ifdef __NetBSD__ 1718 __USE(i); 1719 __USE(dev); 1720 return ttm_bus_dma_populate(ttm_dma); 1721 #else 1722 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) 1723 if (swiotlb_nr_tbl()) { 1724 return ttm_dma_populate((void *)ttm, dev, ctx); 1725 } 1726 #endif 1727 1728 r = ttm_pool_populate(ttm, ctx); 1729 if (r) { 1730 return r; 1731 } 1732 1733 for (i = 0; i < ttm->num_pages; i++) { 1734 dma_addr_t addr; 1735 1736 addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE, 1737 DMA_BIDIRECTIONAL); 1738 1739 if (dma_mapping_error(dev, addr)) { 1740 while (i--) { 1741 dma_unmap_page(dev, ttm_dma->dma_address[i], 1742 PAGE_SIZE, DMA_BIDIRECTIONAL); 1743 ttm_dma->dma_address[i] = 0; 1744 } 1745 ttm_pool_unpopulate(ttm); 1746 return -EFAULT; 1747 } 1748 1749 ttm_dma->dma_address[i] = addr; 1750 } 1751 return 0; 1752 #endif 1753 } 1754 1755 static void 1756 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) 1757 { 1758 struct ttm_dma_tt *ttm_dma = (void *)ttm; 1759 struct nouveau_drm *drm; 1760 struct device *dev; 1761 unsigned i; 1762 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); 1763 1764 if (slave) 1765 return; 1766 1767 drm = nouveau_bdev(ttm->bdev); 1768 dev = drm->dev->dev; 1769 1770 #if IS_ENABLED(CONFIG_AGP) 1771 if (drm->agp.bridge) { 1772 ttm_agp_tt_unpopulate(ttm); 1773 return; 1774 } 1775 #endif 1776 1777 #ifdef __NetBSD__ 1778 __USE(i); 1779 __USE(dev); 1780 ttm_bus_dma_unpopulate(ttm_dma); 1781 #else 1782 #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) 1783 if (swiotlb_nr_tbl()) { 1784 ttm_dma_unpopulate((void *)ttm, dev); 1785 #endif 1786 return; 1787 } 1788 1789 for (i = 0; i < ttm->num_pages; i++) { 1790 if (ttm_dma->dma_address[i]) { 1791 dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE, 1792 DMA_BIDIRECTIONAL); 1793 } 1794 } 1795 1796 ttm_pool_unpopulate(ttm); 1797 #endif 1798 } 1799 1800 #ifdef __NetBSD__ 1801 static void 1802 nouveau_ttm_tt_swapout(struct ttm_tt *ttm) 1803 { 1804 struct ttm_dma_tt *ttm_dma = container_of(ttm, struct ttm_dma_tt, ttm); 1805 1806 ttm_bus_dma_swapout(ttm_dma); 1807 } 1808 #endif 1809 1810 void 1811 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive) 1812 { 1813 struct dma_resv *resv = nvbo->bo.base.resv; 1814 1815 if (exclusive) 1816 dma_resv_add_excl_fence(resv, &fence->base); 1817 else if (fence) 1818 dma_resv_add_shared_fence(resv, &fence->base); 1819 } 1820 1821 #ifdef __NetBSD__ 1822 static const struct uvm_pagerops nouveau_uvm_ops = { 1823 .pgo_reference = &ttm_bo_uvm_reference, 1824 .pgo_detach = &ttm_bo_uvm_detach, 1825 .pgo_fault = &ttm_bo_uvm_fault, 1826 }; 1827 #endif 1828 1829 struct ttm_bo_driver nouveau_bo_driver = { 1830 .ttm_tt_create = &nouveau_ttm_tt_create, 1831 .ttm_tt_populate = &nouveau_ttm_tt_populate, 1832 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, 1833 #ifdef __NetBSD__ 1834 .ttm_tt_swapout = &nouveau_ttm_tt_swapout, 1835 .ttm_uvm_ops = &nouveau_uvm_ops, 1836 #endif 1837 .invalidate_caches = nouveau_bo_invalidate_caches, 1838 .init_mem_type = nouveau_bo_init_mem_type, 1839 .eviction_valuable = ttm_bo_eviction_valuable, 1840 .evict_flags = nouveau_bo_evict_flags, 1841 .move_notify = nouveau_bo_move_ntfy, 1842 .move = nouveau_bo_move, 1843 .verify_access = nouveau_bo_verify_access, 1844 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, 1845 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 1846 .io_mem_free = &nouveau_ttm_io_mem_free, 1847 }; 1848