Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_cgs.c revision 1.4.10.1
      1 /*	$NetBSD: amdgpu_cgs.c,v 1.4.10.1 2020/02/29 20:20:13 ad Exp $	*/
      2 
      3 /*
      4  * Copyright 2015 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  *
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_cgs.c,v 1.4.10.1 2020/02/29 20:20:13 ad Exp $");
     28 
     29 #include <linux/list.h>
     30 #include <linux/slab.h>
     31 #include <linux/pci.h>
     32 #include <drm/drmP.h>
     33 #include <linux/firmware.h>
     34 #include <drm/amdgpu_drm.h>
     35 #include "amdgpu.h"
     36 #include "cgs_linux.h"
     37 #include "atom.h"
     38 #include "amdgpu_ucode.h"
     39 
     40 
     41 struct amdgpu_cgs_device {
     42 	struct cgs_device base;
     43 	struct amdgpu_device *adev;
     44 };
     45 
     46 #define CGS_FUNC_ADEV							\
     47 	struct amdgpu_device *adev =					\
     48 		((struct amdgpu_cgs_device *)cgs_device)->adev
     49 
     50 static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
     51 				   uint64_t *mc_start, uint64_t *mc_size,
     52 				   uint64_t *mem_size)
     53 {
     54 	CGS_FUNC_ADEV;
     55 	switch(type) {
     56 	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
     57 	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
     58 		*mc_start = 0;
     59 		*mc_size = adev->mc.visible_vram_size;
     60 		*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
     61 		break;
     62 	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
     63 	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
     64 		*mc_start = adev->mc.visible_vram_size;
     65 		*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
     66 		*mem_size = *mc_size;
     67 		break;
     68 	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
     69 	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
     70 		*mc_start = adev->mc.gtt_start;
     71 		*mc_size = adev->mc.gtt_size;
     72 		*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
     73 		break;
     74 	default:
     75 		return -EINVAL;
     76 	}
     77 
     78 	return 0;
     79 }
     80 
     81 static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
     82 				uint64_t size,
     83 				uint64_t min_offset, uint64_t max_offset,
     84 				cgs_handle_t *kmem_handle, uint64_t *mcaddr)
     85 {
     86 #ifdef __NetBSD__		/* XXX unused */
     87 	return -ENOSYS;
     88 #else
     89 	CGS_FUNC_ADEV;
     90 	int ret;
     91 	struct amdgpu_bo *bo;
     92 	struct page *kmem_page = vmalloc_to_page(kmem);
     93 	int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
     94 
     95 	struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
     96 	ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
     97 			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
     98 	if (ret)
     99 		return ret;
    100 	ret = amdgpu_bo_reserve(bo, false);
    101 	if (unlikely(ret != 0))
    102 		return ret;
    103 
    104 	/* pin buffer into GTT */
    105 	ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
    106 				       min_offset, max_offset, mcaddr);
    107 	amdgpu_bo_unreserve(bo);
    108 
    109 	*kmem_handle = (cgs_handle_t)bo;
    110 	return ret;
    111 #endif
    112 }
    113 
    114 static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
    115 {
    116 #ifdef __NetBSD__		/* XXX unused */
    117 	panic("not implemented");
    118 #else
    119 	struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
    120 
    121 	if (obj) {
    122 		int r = amdgpu_bo_reserve(obj, false);
    123 		if (likely(r == 0)) {
    124 			amdgpu_bo_unpin(obj);
    125 			amdgpu_bo_unreserve(obj);
    126 		}
    127 		amdgpu_bo_unref(&obj);
    128 
    129 	}
    130 	return 0;
    131 #endif
    132 }
    133 
    134 static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
    135 				    enum cgs_gpu_mem_type type,
    136 				    uint64_t size, uint64_t align,
    137 				    uint64_t min_offset, uint64_t max_offset,
    138 				    cgs_handle_t *handle)
    139 {
    140 	CGS_FUNC_ADEV;
    141 	uint16_t flags = 0;
    142 	int ret = 0;
    143 	uint32_t domain = 0;
    144 	struct amdgpu_bo *obj;
    145 	struct ttm_placement placement;
    146 	struct ttm_place place;
    147 
    148 	if (min_offset > max_offset) {
    149 		BUG_ON(1);
    150 		return -EINVAL;
    151 	}
    152 
    153 	/* fail if the alignment is not a power of 2 */
    154 	if (((align != 1) && (align & (align - 1)))
    155 	    || size == 0 || align == 0)
    156 		return -EINVAL;
    157 
    158 
    159 	switch(type) {
    160 	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
    161 	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
    162 		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
    163 		domain = AMDGPU_GEM_DOMAIN_VRAM;
    164 		if (max_offset > adev->mc.real_vram_size)
    165 			return -EINVAL;
    166 		place.fpfn = min_offset >> PAGE_SHIFT;
    167 		place.lpfn = max_offset >> PAGE_SHIFT;
    168 		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
    169 			TTM_PL_FLAG_VRAM;
    170 		break;
    171 	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
    172 	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
    173 		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
    174 		domain = AMDGPU_GEM_DOMAIN_VRAM;
    175 		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
    176 			place.fpfn =
    177 				max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
    178 			place.lpfn =
    179 				min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
    180 			place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
    181 				TTM_PL_FLAG_VRAM;
    182 		}
    183 
    184 		break;
    185 	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
    186 		domain = AMDGPU_GEM_DOMAIN_GTT;
    187 		place.fpfn = min_offset >> PAGE_SHIFT;
    188 		place.lpfn = max_offset >> PAGE_SHIFT;
    189 		place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
    190 		break;
    191 	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
    192 		flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
    193 		domain = AMDGPU_GEM_DOMAIN_GTT;
    194 		place.fpfn = min_offset >> PAGE_SHIFT;
    195 		place.lpfn = max_offset >> PAGE_SHIFT;
    196 		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
    197 			TTM_PL_FLAG_UNCACHED;
    198 		break;
    199 	default:
    200 		return -EINVAL;
    201 	}
    202 
    203 
    204 	*handle = 0;
    205 
    206 	placement.placement = &place;
    207 	placement.num_placement = 1;
    208 	placement.busy_placement = &place;
    209 	placement.num_busy_placement = 1;
    210 
    211 	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
    212 					  true, domain, flags,
    213 					  NULL, &placement, NULL,
    214 					  &obj);
    215 	if (ret) {
    216 		DRM_ERROR("(%d) bo create failed\n", ret);
    217 		return ret;
    218 	}
    219 	*handle = (cgs_handle_t)obj;
    220 
    221 	return ret;
    222 }
    223 
    224 static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
    225 {
    226 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    227 
    228 	if (obj) {
    229 		int r = amdgpu_bo_reserve(obj, false);
    230 		if (likely(r == 0)) {
    231 			amdgpu_bo_kunmap(obj);
    232 			amdgpu_bo_unpin(obj);
    233 			amdgpu_bo_unreserve(obj);
    234 		}
    235 		amdgpu_bo_unref(&obj);
    236 
    237 	}
    238 	return 0;
    239 }
    240 
    241 static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
    242 				   uint64_t *mcaddr)
    243 {
    244 	int r;
    245 	u64 min_offset, max_offset;
    246 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    247 
    248 	WARN_ON_ONCE(obj->placement.num_placement > 1);
    249 
    250 	min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
    251 	max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
    252 
    253 	r = amdgpu_bo_reserve(obj, false);
    254 	if (unlikely(r != 0))
    255 		return r;
    256 	r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
    257 				     min_offset, max_offset, mcaddr);
    258 	amdgpu_bo_unreserve(obj);
    259 	return r;
    260 }
    261 
    262 static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
    263 {
    264 	int r;
    265 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    266 	r = amdgpu_bo_reserve(obj, false);
    267 	if (unlikely(r != 0))
    268 		return r;
    269 	r = amdgpu_bo_unpin(obj);
    270 	amdgpu_bo_unreserve(obj);
    271 	return r;
    272 }
    273 
    274 static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
    275 				   void **map)
    276 {
    277 	int r;
    278 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    279 	r = amdgpu_bo_reserve(obj, false);
    280 	if (unlikely(r != 0))
    281 		return r;
    282 	r = amdgpu_bo_kmap(obj, map);
    283 	amdgpu_bo_unreserve(obj);
    284 	return r;
    285 }
    286 
    287 static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
    288 {
    289 	int r;
    290 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    291 	r = amdgpu_bo_reserve(obj, false);
    292 	if (unlikely(r != 0))
    293 		return r;
    294 	amdgpu_bo_kunmap(obj);
    295 	amdgpu_bo_unreserve(obj);
    296 	return r;
    297 }
    298 
    299 static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
    300 {
    301 	CGS_FUNC_ADEV;
    302 	return RREG32(offset);
    303 }
    304 
    305 static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
    306 				      uint32_t value)
    307 {
    308 	CGS_FUNC_ADEV;
    309 	WREG32(offset, value);
    310 }
    311 
    312 static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
    313 					     enum cgs_ind_reg space,
    314 					     unsigned index)
    315 {
    316 	CGS_FUNC_ADEV;
    317 	switch (space) {
    318 	case CGS_IND_REG__MMIO:
    319 		return RREG32_IDX(index);
    320 	case CGS_IND_REG__PCIE:
    321 		return RREG32_PCIE(index);
    322 	case CGS_IND_REG__SMC:
    323 		return RREG32_SMC(index);
    324 	case CGS_IND_REG__UVD_CTX:
    325 		return RREG32_UVD_CTX(index);
    326 	case CGS_IND_REG__DIDT:
    327 		return RREG32_DIDT(index);
    328 	case CGS_IND_REG__AUDIO_ENDPT:
    329 		DRM_ERROR("audio endpt register access not implemented.\n");
    330 		return 0;
    331 	}
    332 	WARN(1, "Invalid indirect register space");
    333 	return 0;
    334 }
    335 
    336 static void amdgpu_cgs_write_ind_register(void *cgs_device,
    337 					  enum cgs_ind_reg space,
    338 					  unsigned index, uint32_t value)
    339 {
    340 	CGS_FUNC_ADEV;
    341 	switch (space) {
    342 	case CGS_IND_REG__MMIO:
    343 		return WREG32_IDX(index, value);
    344 	case CGS_IND_REG__PCIE:
    345 		return WREG32_PCIE(index, value);
    346 	case CGS_IND_REG__SMC:
    347 		return WREG32_SMC(index, value);
    348 	case CGS_IND_REG__UVD_CTX:
    349 		return WREG32_UVD_CTX(index, value);
    350 	case CGS_IND_REG__DIDT:
    351 		return WREG32_DIDT(index, value);
    352 	case CGS_IND_REG__AUDIO_ENDPT:
    353 		DRM_ERROR("audio endpt register access not implemented.\n");
    354 		return;
    355 	}
    356 	WARN(1, "Invalid indirect register space");
    357 }
    358 
    359 static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
    360 {
    361 	CGS_FUNC_ADEV;
    362 	uint8_t val;
    363 	int ret = pci_read_config_byte(adev->pdev, addr, &val);
    364 	if (WARN(ret, "pci_read_config_byte error"))
    365 		return 0;
    366 	return val;
    367 }
    368 
    369 static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
    370 {
    371 	CGS_FUNC_ADEV;
    372 	uint16_t val;
    373 	int ret = pci_read_config_word(adev->pdev, addr, &val);
    374 	if (WARN(ret, "pci_read_config_word error"))
    375 		return 0;
    376 	return val;
    377 }
    378 
    379 static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
    380 						 unsigned addr)
    381 {
    382 	CGS_FUNC_ADEV;
    383 	uint32_t val;
    384 	int ret = pci_read_config_dword(adev->pdev, addr, &val);
    385 	if (WARN(ret, "pci_read_config_dword error"))
    386 		return 0;
    387 	return val;
    388 }
    389 
    390 static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
    391 					     uint8_t value)
    392 {
    393 	CGS_FUNC_ADEV;
    394 	int ret = pci_write_config_byte(adev->pdev, addr, value);
    395 	WARN(ret, "pci_write_config_byte error");
    396 }
    397 
    398 static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
    399 					     uint16_t value)
    400 {
    401 	CGS_FUNC_ADEV;
    402 	int ret = pci_write_config_word(adev->pdev, addr, value);
    403 	WARN(ret, "pci_write_config_word error");
    404 }
    405 
    406 static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
    407 					      uint32_t value)
    408 {
    409 	CGS_FUNC_ADEV;
    410 	int ret = pci_write_config_dword(adev->pdev, addr, value);
    411 	WARN(ret, "pci_write_config_dword error");
    412 }
    413 
    414 static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
    415 						  unsigned table, uint16_t *size,
    416 						  uint8_t *frev, uint8_t *crev)
    417 {
    418 	CGS_FUNC_ADEV;
    419 	uint16_t data_start;
    420 
    421 	if (amdgpu_atom_parse_data_header(
    422 		    adev->mode_info.atom_context, table, size,
    423 		    frev, crev, &data_start))
    424 		return (uint8_t*)adev->mode_info.atom_context->bios +
    425 			data_start;
    426 
    427 	return NULL;
    428 }
    429 
    430 static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
    431 					      uint8_t *frev, uint8_t *crev)
    432 {
    433 	CGS_FUNC_ADEV;
    434 
    435 	if (amdgpu_atom_parse_cmd_header(
    436 		    adev->mode_info.atom_context, table,
    437 		    frev, crev))
    438 		return 0;
    439 
    440 	return -EINVAL;
    441 }
    442 
    443 static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
    444 					  void *args)
    445 {
    446 	CGS_FUNC_ADEV;
    447 
    448 	return amdgpu_atom_execute_table(
    449 		adev->mode_info.atom_context, table, args);
    450 }
    451 
    452 static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
    453 {
    454 	/* TODO */
    455 	return 0;
    456 }
    457 
    458 static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
    459 {
    460 	/* TODO */
    461 	return 0;
    462 }
    463 
    464 static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
    465 				     int active)
    466 {
    467 	/* TODO */
    468 	return 0;
    469 }
    470 
    471 static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
    472 				       enum cgs_clock clock, unsigned freq)
    473 {
    474 	/* TODO */
    475 	return 0;
    476 }
    477 
    478 static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
    479 					enum cgs_engine engine, int powered)
    480 {
    481 	/* TODO */
    482 	return 0;
    483 }
    484 
    485 
    486 
    487 static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
    488 					    enum cgs_clock clock,
    489 					    struct cgs_clock_limits *limits)
    490 {
    491 	/* TODO */
    492 	return 0;
    493 }
    494 
    495 static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
    496 					  const uint32_t *voltages)
    497 {
    498 	DRM_ERROR("not implemented");
    499 	return -EPERM;
    500 }
    501 
    502 struct cgs_irq_params {
    503 	unsigned src_id;
    504 	cgs_irq_source_set_func_t set;
    505 	cgs_irq_handler_func_t handler;
    506 	void *private_data;
    507 };
    508 
    509 static int cgs_set_irq_state(struct amdgpu_device *adev,
    510 			     struct amdgpu_irq_src *src,
    511 			     unsigned type,
    512 			     enum amdgpu_interrupt_state state)
    513 {
    514 	struct cgs_irq_params *irq_params =
    515 		(struct cgs_irq_params *)src->data;
    516 	if (!irq_params)
    517 		return -EINVAL;
    518 	if (!irq_params->set)
    519 		return -EINVAL;
    520 	return irq_params->set(irq_params->private_data,
    521 			       irq_params->src_id,
    522 			       type,
    523 			       (int)state);
    524 }
    525 
    526 static int cgs_process_irq(struct amdgpu_device *adev,
    527 			   struct amdgpu_irq_src *source,
    528 			   struct amdgpu_iv_entry *entry)
    529 {
    530 	struct cgs_irq_params *irq_params =
    531 		(struct cgs_irq_params *)source->data;
    532 	if (!irq_params)
    533 		return -EINVAL;
    534 	if (!irq_params->handler)
    535 		return -EINVAL;
    536 	return irq_params->handler(irq_params->private_data,
    537 				   irq_params->src_id,
    538 				   entry->iv_entry);
    539 }
    540 
    541 static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
    542 	.set = cgs_set_irq_state,
    543 	.process = cgs_process_irq,
    544 };
    545 
    546 static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
    547 				     unsigned num_types,
    548 				     cgs_irq_source_set_func_t set,
    549 				     cgs_irq_handler_func_t handler,
    550 				     void *private_data)
    551 {
    552 	CGS_FUNC_ADEV;
    553 	int ret = 0;
    554 	struct cgs_irq_params *irq_params;
    555 	struct amdgpu_irq_src *source =
    556 		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
    557 	if (!source)
    558 		return -ENOMEM;
    559 	irq_params =
    560 		kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
    561 	if (!irq_params) {
    562 		kfree(source);
    563 		return -ENOMEM;
    564 	}
    565 	source->num_types = num_types;
    566 	source->funcs = &cgs_irq_funcs;
    567 	irq_params->src_id = src_id;
    568 	irq_params->set = set;
    569 	irq_params->handler = handler;
    570 	irq_params->private_data = private_data;
    571 	source->data = (void *)irq_params;
    572 	ret = amdgpu_irq_add_id(adev, src_id, source);
    573 	if (ret) {
    574 		kfree(irq_params);
    575 		kfree(source);
    576 	}
    577 
    578 	return ret;
    579 }
    580 
    581 static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
    582 {
    583 	CGS_FUNC_ADEV;
    584 	return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
    585 }
    586 
    587 static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
    588 {
    589 	CGS_FUNC_ADEV;
    590 	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
    591 }
    592 
    593 static int amdgpu_cgs_set_clockgating_state(void *cgs_device,
    594 				  enum amd_ip_block_type block_type,
    595 				  enum amd_clockgating_state state)
    596 {
    597 	CGS_FUNC_ADEV;
    598 	int i, r = -1;
    599 
    600 	for (i = 0; i < adev->num_ip_blocks; i++) {
    601 		if (!adev->ip_block_status[i].valid)
    602 			continue;
    603 
    604 		if (adev->ip_blocks[i].type == block_type) {
    605 			r = adev->ip_blocks[i].funcs->set_clockgating_state(
    606 								(void *)adev,
    607 									state);
    608 			break;
    609 		}
    610 	}
    611 	return r;
    612 }
    613 
    614 static int amdgpu_cgs_set_powergating_state(void *cgs_device,
    615 				  enum amd_ip_block_type block_type,
    616 				  enum amd_powergating_state state)
    617 {
    618 	CGS_FUNC_ADEV;
    619 	int i, r = -1;
    620 
    621 	for (i = 0; i < adev->num_ip_blocks; i++) {
    622 		if (!adev->ip_block_status[i].valid)
    623 			continue;
    624 
    625 		if (adev->ip_blocks[i].type == block_type) {
    626 			r = adev->ip_blocks[i].funcs->set_powergating_state(
    627 								(void *)adev,
    628 									state);
    629 			break;
    630 		}
    631 	}
    632 	return r;
    633 }
    634 
    635 
    636 static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
    637 {
    638 	CGS_FUNC_ADEV;
    639 	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
    640 
    641 	switch (fw_type) {
    642 	case CGS_UCODE_ID_SDMA0:
    643 		result = AMDGPU_UCODE_ID_SDMA0;
    644 		break;
    645 	case CGS_UCODE_ID_SDMA1:
    646 		result = AMDGPU_UCODE_ID_SDMA1;
    647 		break;
    648 	case CGS_UCODE_ID_CP_CE:
    649 		result = AMDGPU_UCODE_ID_CP_CE;
    650 		break;
    651 	case CGS_UCODE_ID_CP_PFP:
    652 		result = AMDGPU_UCODE_ID_CP_PFP;
    653 		break;
    654 	case CGS_UCODE_ID_CP_ME:
    655 		result = AMDGPU_UCODE_ID_CP_ME;
    656 		break;
    657 	case CGS_UCODE_ID_CP_MEC:
    658 	case CGS_UCODE_ID_CP_MEC_JT1:
    659 		result = AMDGPU_UCODE_ID_CP_MEC1;
    660 		break;
    661 	case CGS_UCODE_ID_CP_MEC_JT2:
    662 		if (adev->asic_type == CHIP_TONGA)
    663 			result = AMDGPU_UCODE_ID_CP_MEC2;
    664 		else if (adev->asic_type == CHIP_CARRIZO)
    665 			result = AMDGPU_UCODE_ID_CP_MEC1;
    666 		break;
    667 	case CGS_UCODE_ID_RLC_G:
    668 		result = AMDGPU_UCODE_ID_RLC_G;
    669 		break;
    670 	default:
    671 		DRM_ERROR("Firmware type not supported\n");
    672 	}
    673 	return result;
    674 }
    675 
    676 static int amdgpu_cgs_get_firmware_info(void *cgs_device,
    677 					enum cgs_ucode_id type,
    678 					struct cgs_firmware_info *info)
    679 {
    680 	CGS_FUNC_ADEV;
    681 
    682 	if (CGS_UCODE_ID_SMU != type) {
    683 		uint64_t gpu_addr;
    684 		uint32_t data_size;
    685 		const struct gfx_firmware_header_v1_0 *header;
    686 		enum AMDGPU_UCODE_ID id;
    687 		struct amdgpu_firmware_info *ucode;
    688 
    689 		id = fw_type_convert(cgs_device, type);
    690 		ucode = &adev->firmware.ucode[id];
    691 		if (ucode->fw == NULL)
    692 			return -EINVAL;
    693 
    694 		gpu_addr  = ucode->mc_addr;
    695 		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
    696 		data_size = le32_to_cpu(header->header.ucode_size_bytes);
    697 
    698 		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
    699 		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
    700 			gpu_addr += le32_to_cpu(header->jt_offset) << 2;
    701 			data_size = le32_to_cpu(header->jt_size) << 2;
    702 		}
    703 		info->mc_addr = gpu_addr;
    704 		info->image_size = data_size;
    705 		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
    706 		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
    707 	} else {
    708 		char fw_name[30] = {0};
    709 		int err = 0;
    710 		uint32_t ucode_size;
    711 		uint32_t ucode_start_address __unused;
    712 		const uint8_t *src;
    713 		const struct smc_firmware_header_v1_0 *hdr;
    714 
    715 		switch (adev->asic_type) {
    716 		case CHIP_TONGA:
    717 			strcpy(fw_name, "amdgpu/tonga_smc.bin");
    718 			break;
    719 		default:
    720 			DRM_ERROR("SMC firmware not supported\n");
    721 			return -EINVAL;
    722 		}
    723 
    724 		err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
    725 		if (err) {
    726 			DRM_ERROR("Failed to request firmware\n");
    727 			return err;
    728 		}
    729 
    730 		err = amdgpu_ucode_validate(adev->pm.fw);
    731 		if (err) {
    732 			DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
    733 			release_firmware(adev->pm.fw);
    734 			adev->pm.fw = NULL;
    735 			return err;
    736 		}
    737 
    738 		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
    739 		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
    740 		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
    741 		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
    742 		src = (const uint8_t *)(adev->pm.fw->data +
    743 		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
    744 
    745 		info->version = adev->pm.fw_version;
    746 		info->image_size = ucode_size;
    747 		info->kptr = (void *)__UNCONST(src); /* XXX used for? */
    748 	}
    749 	return 0;
    750 }
    751 
    752 static const struct cgs_ops amdgpu_cgs_ops = {
    753 	amdgpu_cgs_gpu_mem_info,
    754 	amdgpu_cgs_gmap_kmem,
    755 	amdgpu_cgs_gunmap_kmem,
    756 	amdgpu_cgs_alloc_gpu_mem,
    757 	amdgpu_cgs_free_gpu_mem,
    758 	amdgpu_cgs_gmap_gpu_mem,
    759 	amdgpu_cgs_gunmap_gpu_mem,
    760 	amdgpu_cgs_kmap_gpu_mem,
    761 	amdgpu_cgs_kunmap_gpu_mem,
    762 	amdgpu_cgs_read_register,
    763 	amdgpu_cgs_write_register,
    764 	amdgpu_cgs_read_ind_register,
    765 	amdgpu_cgs_write_ind_register,
    766 	amdgpu_cgs_read_pci_config_byte,
    767 	amdgpu_cgs_read_pci_config_word,
    768 	amdgpu_cgs_read_pci_config_dword,
    769 	amdgpu_cgs_write_pci_config_byte,
    770 	amdgpu_cgs_write_pci_config_word,
    771 	amdgpu_cgs_write_pci_config_dword,
    772 	amdgpu_cgs_atom_get_data_table,
    773 	amdgpu_cgs_atom_get_cmd_table_revs,
    774 	amdgpu_cgs_atom_exec_cmd_table,
    775 	amdgpu_cgs_create_pm_request,
    776 	amdgpu_cgs_destroy_pm_request,
    777 	amdgpu_cgs_set_pm_request,
    778 	amdgpu_cgs_pm_request_clock,
    779 	amdgpu_cgs_pm_request_engine,
    780 	amdgpu_cgs_pm_query_clock_limits,
    781 	amdgpu_cgs_set_camera_voltages,
    782 	amdgpu_cgs_get_firmware_info,
    783 	amdgpu_cgs_set_powergating_state,
    784 	amdgpu_cgs_set_clockgating_state
    785 };
    786 
    787 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
    788 	amdgpu_cgs_add_irq_source,
    789 	amdgpu_cgs_irq_get,
    790 	amdgpu_cgs_irq_put
    791 };
    792 
    793 void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
    794 {
    795 	struct amdgpu_cgs_device *cgs_device =
    796 		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
    797 
    798 	if (!cgs_device) {
    799 		DRM_ERROR("Couldn't allocate CGS device structure\n");
    800 		return NULL;
    801 	}
    802 
    803 	cgs_device->base.ops = &amdgpu_cgs_ops;
    804 	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
    805 	cgs_device->adev = adev;
    806 
    807 	return cgs_device;
    808 }
    809 
    810 void amdgpu_cgs_destroy_device(void *cgs_device)
    811 {
    812 	kfree(cgs_device);
    813 }
    814