amdgpu_amdkfd.c revision 1.1.1.1 1 /* $NetBSD: amdgpu_amdkfd.c,v 1.1.1.1 2018/08/27 01:34:43 riastradh Exp $ */
2
3 /*
4 * Copyright 2014 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: amdgpu_amdkfd.c,v 1.1.1.1 2018/08/27 01:34:43 riastradh Exp $");
27
28 #include "amdgpu_amdkfd.h"
29 #include "amd_shared.h"
30 #include <drm/drmP.h>
31 #include "amdgpu.h"
32 #include <linux/module.h>
33
34 const struct kfd2kgd_calls *kfd2kgd;
35 const struct kgd2kfd_calls *kgd2kfd;
36 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
37
38 bool amdgpu_amdkfd_init(void)
39 {
40 #if defined(CONFIG_HSA_AMD_MODULE)
41 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
42
43 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
44
45 if (kgd2kfd_init_p == NULL)
46 return false;
47 #endif
48 return true;
49 }
50
51 bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
52 {
53 #if defined(CONFIG_HSA_AMD_MODULE)
54 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
55 #endif
56
57 switch (rdev->asic_type) {
58 #ifdef CONFIG_DRM_AMDGPU_CIK
59 case CHIP_KAVERI:
60 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
61 break;
62 #endif
63 case CHIP_CARRIZO:
64 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
65 break;
66 default:
67 return false;
68 }
69
70 #if defined(CONFIG_HSA_AMD_MODULE)
71 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
72
73 if (kgd2kfd_init_p == NULL) {
74 kfd2kgd = NULL;
75 return false;
76 }
77
78 if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
79 symbol_put(kgd2kfd_init);
80 kfd2kgd = NULL;
81 kgd2kfd = NULL;
82
83 return false;
84 }
85
86 return true;
87 #elif defined(CONFIG_HSA_AMD)
88 if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
89 kfd2kgd = NULL;
90 kgd2kfd = NULL;
91 return false;
92 }
93
94 return true;
95 #else
96 kfd2kgd = NULL;
97 return false;
98 #endif
99 }
100
101 void amdgpu_amdkfd_fini(void)
102 {
103 if (kgd2kfd) {
104 kgd2kfd->exit();
105 symbol_put(kgd2kfd_init);
106 }
107 }
108
109 void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
110 {
111 if (kgd2kfd)
112 rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
113 rdev->pdev, kfd2kgd);
114 }
115
116 void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
117 {
118 if (rdev->kfd) {
119 struct kgd2kfd_shared_resources gpu_resources = {
120 .compute_vmid_bitmap = 0xFF00,
121
122 .first_compute_pipe = 1,
123 .compute_pipe_count = 4 - 1,
124 };
125
126 amdgpu_doorbell_get_kfd_info(rdev,
127 &gpu_resources.doorbell_physical_address,
128 &gpu_resources.doorbell_aperture_size,
129 &gpu_resources.doorbell_start_offset);
130
131 kgd2kfd->device_init(rdev->kfd, &gpu_resources);
132 }
133 }
134
135 void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
136 {
137 if (rdev->kfd) {
138 kgd2kfd->device_exit(rdev->kfd);
139 rdev->kfd = NULL;
140 }
141 }
142
143 void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
144 const void *ih_ring_entry)
145 {
146 if (rdev->kfd)
147 kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
148 }
149
150 void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
151 {
152 if (rdev->kfd)
153 kgd2kfd->suspend(rdev->kfd);
154 }
155
156 int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
157 {
158 int r = 0;
159
160 if (rdev->kfd)
161 r = kgd2kfd->resume(rdev->kfd);
162
163 return r;
164 }
165
166 u32 pool_to_domain(enum kgd_memory_pool p)
167 {
168 switch (p) {
169 case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
170 default: return AMDGPU_GEM_DOMAIN_GTT;
171 }
172 }
173
174 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
175 void **mem_obj, uint64_t *gpu_addr,
176 void **cpu_ptr)
177 {
178 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
179 struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
180 int r;
181
182 BUG_ON(kgd == NULL);
183 BUG_ON(gpu_addr == NULL);
184 BUG_ON(cpu_ptr == NULL);
185
186 *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
187 if ((*mem) == NULL)
188 return -ENOMEM;
189
190 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
191 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
192 if (r) {
193 dev_err(rdev->dev,
194 "failed to allocate BO for amdkfd (%d)\n", r);
195 return r;
196 }
197
198 /* map the buffer */
199 r = amdgpu_bo_reserve((*mem)->bo, true);
200 if (r) {
201 dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
202 goto allocate_mem_reserve_bo_failed;
203 }
204
205 r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
206 &(*mem)->gpu_addr);
207 if (r) {
208 dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
209 goto allocate_mem_pin_bo_failed;
210 }
211 *gpu_addr = (*mem)->gpu_addr;
212
213 r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
214 if (r) {
215 dev_err(rdev->dev,
216 "(%d) failed to map bo to kernel for amdkfd\n", r);
217 goto allocate_mem_kmap_bo_failed;
218 }
219 *cpu_ptr = (*mem)->cpu_ptr;
220
221 amdgpu_bo_unreserve((*mem)->bo);
222
223 return 0;
224
225 allocate_mem_kmap_bo_failed:
226 amdgpu_bo_unpin((*mem)->bo);
227 allocate_mem_pin_bo_failed:
228 amdgpu_bo_unreserve((*mem)->bo);
229 allocate_mem_reserve_bo_failed:
230 amdgpu_bo_unref(&(*mem)->bo);
231
232 return r;
233 }
234
235 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
236 {
237 struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
238
239 BUG_ON(mem == NULL);
240
241 amdgpu_bo_reserve(mem->bo, true);
242 amdgpu_bo_kunmap(mem->bo);
243 amdgpu_bo_unpin(mem->bo);
244 amdgpu_bo_unreserve(mem->bo);
245 amdgpu_bo_unref(&(mem->bo));
246 kfree(mem);
247 }
248
249 uint64_t get_vmem_size(struct kgd_dev *kgd)
250 {
251 struct amdgpu_device *rdev =
252 (struct amdgpu_device *)kgd;
253
254 BUG_ON(kgd == NULL);
255
256 return rdev->mc.real_vram_size;
257 }
258
259 uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
260 {
261 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
262
263 if (rdev->asic_funcs->get_gpu_clock_counter)
264 return rdev->asic_funcs->get_gpu_clock_counter(rdev);
265 return 0;
266 }
267
268 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
269 {
270 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
271
272 /* The sclk is in quantas of 10kHz */
273 return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
274 }
275