1 1.3 riastrad /* $NetBSD: amdgpu_csa.c,v 1.3 2021/12/19 10:59:01 riastradh Exp $ */ 2 1.1 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2016 Advanced Micro Devices, Inc. 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice shall be included in 14 1.1 riastrad * all copies or substantial portions of the Software. 15 1.1 riastrad * 16 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 23 1.1 riastrad 24 1.1 riastrad * * Author: Monk.liu (at) amd.com 25 1.1 riastrad */ 26 1.1 riastrad 27 1.1 riastrad #include <sys/cdefs.h> 28 1.3 riastrad __KERNEL_RCSID(0, "$NetBSD: amdgpu_csa.c,v 1.3 2021/12/19 10:59:01 riastradh Exp $"); 29 1.1 riastrad 30 1.1 riastrad #include "amdgpu.h" 31 1.1 riastrad 32 1.1 riastrad uint64_t amdgpu_csa_vaddr(struct amdgpu_device *adev) 33 1.1 riastrad { 34 1.1 riastrad uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT; 35 1.1 riastrad 36 1.1 riastrad addr -= AMDGPU_VA_RESERVED_SIZE; 37 1.1 riastrad addr = amdgpu_gmc_sign_extend(addr); 38 1.1 riastrad 39 1.1 riastrad return addr; 40 1.1 riastrad } 41 1.1 riastrad 42 1.1 riastrad int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, 43 1.1 riastrad u32 domain, uint32_t size) 44 1.1 riastrad { 45 1.3 riastrad int r __unused; 46 1.1 riastrad void *ptr; 47 1.1 riastrad 48 1.1 riastrad r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, 49 1.1 riastrad domain, bo, 50 1.1 riastrad NULL, &ptr); 51 1.1 riastrad if (!*bo) 52 1.1 riastrad return -ENOMEM; 53 1.1 riastrad 54 1.1 riastrad memset(ptr, 0, size); 55 1.1 riastrad adev->virt.csa_cpu_addr = ptr; 56 1.1 riastrad return 0; 57 1.1 riastrad } 58 1.1 riastrad 59 1.1 riastrad void amdgpu_free_static_csa(struct amdgpu_bo **bo) 60 1.1 riastrad { 61 1.1 riastrad amdgpu_bo_free_kernel(bo, NULL, NULL); 62 1.1 riastrad } 63 1.1 riastrad 64 1.1 riastrad /* 65 1.1 riastrad * amdgpu_map_static_csa should be called during amdgpu_vm_init 66 1.1 riastrad * it maps virtual address amdgpu_csa_vaddr() to this VM, and each command 67 1.1 riastrad * submission of GFX should use this virtual address within META_DATA init 68 1.1 riastrad * package to support SRIOV gfx preemption. 69 1.1 riastrad */ 70 1.1 riastrad int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, 71 1.1 riastrad struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, 72 1.1 riastrad uint64_t csa_addr, uint32_t size) 73 1.1 riastrad { 74 1.1 riastrad struct ww_acquire_ctx ticket; 75 1.1 riastrad struct list_head list; 76 1.1 riastrad struct amdgpu_bo_list_entry pd; 77 1.1 riastrad struct ttm_validate_buffer csa_tv; 78 1.1 riastrad int r; 79 1.1 riastrad 80 1.1 riastrad INIT_LIST_HEAD(&list); 81 1.1 riastrad INIT_LIST_HEAD(&csa_tv.head); 82 1.1 riastrad csa_tv.bo = &bo->tbo; 83 1.1 riastrad csa_tv.num_shared = 1; 84 1.1 riastrad 85 1.1 riastrad list_add(&csa_tv.head, &list); 86 1.1 riastrad amdgpu_vm_get_pd_bo(vm, &list, &pd); 87 1.1 riastrad 88 1.1 riastrad r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); 89 1.1 riastrad if (r) { 90 1.1 riastrad DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); 91 1.1 riastrad return r; 92 1.1 riastrad } 93 1.1 riastrad 94 1.1 riastrad *bo_va = amdgpu_vm_bo_add(adev, vm, bo); 95 1.1 riastrad if (!*bo_va) { 96 1.1 riastrad ttm_eu_backoff_reservation(&ticket, &list); 97 1.1 riastrad DRM_ERROR("failed to create bo_va for static CSA\n"); 98 1.1 riastrad return -ENOMEM; 99 1.1 riastrad } 100 1.1 riastrad 101 1.1 riastrad r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size, 102 1.1 riastrad AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE | 103 1.1 riastrad AMDGPU_PTE_EXECUTABLE); 104 1.1 riastrad 105 1.1 riastrad if (r) { 106 1.1 riastrad DRM_ERROR("failed to do bo_map on static CSA, err=%d\n", r); 107 1.1 riastrad amdgpu_vm_bo_rmv(adev, *bo_va); 108 1.1 riastrad ttm_eu_backoff_reservation(&ticket, &list); 109 1.1 riastrad return r; 110 1.1 riastrad } 111 1.1 riastrad 112 1.1 riastrad ttm_eu_backoff_reservation(&ticket, &list); 113 1.1 riastrad return 0; 114 1.1 riastrad } 115