Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_cgs.c revision 1.4.6.2
      1 /*	$NetBSD: amdgpu_cgs.c,v 1.4.6.2 2019/06/10 22:07:57 christos Exp $	*/
      2 
      3 /*
      4  * Copyright 2015 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  *
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_cgs.c,v 1.4.6.2 2019/06/10 22:07:57 christos Exp $");
     28 
     29 #include <asm/byteorder.h>
     30 #include <linux/list.h>
     31 #include <linux/slab.h>
     32 #include <linux/pci.h>
     33 #include <drm/drmP.h>
     34 #include <linux/firmware.h>
     35 #include <drm/amdgpu_drm.h>
     36 #include "amdgpu.h"
     37 #include "cgs_linux.h"
     38 #include "atom.h"
     39 #include "amdgpu_ucode.h"
     40 
     41 
     42 struct amdgpu_cgs_device {
     43 	struct cgs_device base;
     44 	struct amdgpu_device *adev;
     45 };
     46 
     47 #define CGS_FUNC_ADEV							\
     48 	struct amdgpu_device *adev =					\
     49 		((struct amdgpu_cgs_device *)cgs_device)->adev
     50 
     51 static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
     52 				   uint64_t *mc_start, uint64_t *mc_size,
     53 				   uint64_t *mem_size)
     54 {
     55 	CGS_FUNC_ADEV;
     56 	switch(type) {
     57 	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
     58 	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
     59 		*mc_start = 0;
     60 		*mc_size = adev->mc.visible_vram_size;
     61 		*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
     62 		break;
     63 	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
     64 	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
     65 		*mc_start = adev->mc.visible_vram_size;
     66 		*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
     67 		*mem_size = *mc_size;
     68 		break;
     69 	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
     70 	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
     71 		*mc_start = adev->mc.gtt_start;
     72 		*mc_size = adev->mc.gtt_size;
     73 		*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
     74 		break;
     75 	default:
     76 		return -EINVAL;
     77 	}
     78 
     79 	return 0;
     80 }
     81 
     82 static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
     83 				uint64_t size,
     84 				uint64_t min_offset, uint64_t max_offset,
     85 				cgs_handle_t *kmem_handle, uint64_t *mcaddr)
     86 {
     87 #ifdef __NetBSD__		/* XXX unused */
     88 	return -ENOSYS;
     89 #else
     90 	CGS_FUNC_ADEV;
     91 	int ret;
     92 	struct amdgpu_bo *bo;
     93 	struct page *kmem_page = vmalloc_to_page(kmem);
     94 	int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
     95 
     96 	struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
     97 	ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
     98 			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
     99 	if (ret)
    100 		return ret;
    101 	ret = amdgpu_bo_reserve(bo, false);
    102 	if (unlikely(ret != 0))
    103 		return ret;
    104 
    105 	/* pin buffer into GTT */
    106 	ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
    107 				       min_offset, max_offset, mcaddr);
    108 	amdgpu_bo_unreserve(bo);
    109 
    110 	*kmem_handle = (cgs_handle_t)bo;
    111 	return ret;
    112 #endif
    113 }
    114 
    115 static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
    116 {
    117 #ifdef __NetBSD__		/* XXX unused */
    118 	panic("not implemented");
    119 #else
    120 	struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
    121 
    122 	if (obj) {
    123 		int r = amdgpu_bo_reserve(obj, false);
    124 		if (likely(r == 0)) {
    125 			amdgpu_bo_unpin(obj);
    126 			amdgpu_bo_unreserve(obj);
    127 		}
    128 		amdgpu_bo_unref(&obj);
    129 
    130 	}
    131 	return 0;
    132 #endif
    133 }
    134 
    135 static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
    136 				    enum cgs_gpu_mem_type type,
    137 				    uint64_t size, uint64_t align,
    138 				    uint64_t min_offset, uint64_t max_offset,
    139 				    cgs_handle_t *handle)
    140 {
    141 	CGS_FUNC_ADEV;
    142 	uint16_t flags = 0;
    143 	int ret = 0;
    144 	uint32_t domain = 0;
    145 	struct amdgpu_bo *obj;
    146 	struct ttm_placement placement;
    147 	struct ttm_place place;
    148 
    149 	if (min_offset > max_offset) {
    150 		BUG_ON(1);
    151 		return -EINVAL;
    152 	}
    153 
    154 	/* fail if the alignment is not a power of 2 */
    155 	if (((align != 1) && (align & (align - 1)))
    156 	    || size == 0 || align == 0)
    157 		return -EINVAL;
    158 
    159 
    160 	switch(type) {
    161 	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
    162 	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
    163 		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
    164 		domain = AMDGPU_GEM_DOMAIN_VRAM;
    165 		if (max_offset > adev->mc.real_vram_size)
    166 			return -EINVAL;
    167 		place.fpfn = min_offset >> PAGE_SHIFT;
    168 		place.lpfn = max_offset >> PAGE_SHIFT;
    169 		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
    170 			TTM_PL_FLAG_VRAM;
    171 		break;
    172 	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
    173 	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
    174 		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
    175 		domain = AMDGPU_GEM_DOMAIN_VRAM;
    176 		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
    177 			place.fpfn =
    178 				max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
    179 			place.lpfn =
    180 				min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
    181 			place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
    182 				TTM_PL_FLAG_VRAM;
    183 		}
    184 
    185 		break;
    186 	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
    187 		domain = AMDGPU_GEM_DOMAIN_GTT;
    188 		place.fpfn = min_offset >> PAGE_SHIFT;
    189 		place.lpfn = max_offset >> PAGE_SHIFT;
    190 		place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
    191 		break;
    192 	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
    193 		flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
    194 		domain = AMDGPU_GEM_DOMAIN_GTT;
    195 		place.fpfn = min_offset >> PAGE_SHIFT;
    196 		place.lpfn = max_offset >> PAGE_SHIFT;
    197 		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
    198 			TTM_PL_FLAG_UNCACHED;
    199 		break;
    200 	default:
    201 		return -EINVAL;
    202 	}
    203 
    204 
    205 	*handle = 0;
    206 
    207 	placement.placement = &place;
    208 	placement.num_placement = 1;
    209 	placement.busy_placement = &place;
    210 	placement.num_busy_placement = 1;
    211 
    212 	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
    213 					  true, domain, flags,
    214 					  NULL, &placement, NULL,
    215 					  &obj);
    216 	if (ret) {
    217 		DRM_ERROR("(%d) bo create failed\n", ret);
    218 		return ret;
    219 	}
    220 	*handle = (cgs_handle_t)obj;
    221 
    222 	return ret;
    223 }
    224 
    225 static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
    226 {
    227 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    228 
    229 	if (obj) {
    230 		int r = amdgpu_bo_reserve(obj, false);
    231 		if (likely(r == 0)) {
    232 			amdgpu_bo_kunmap(obj);
    233 			amdgpu_bo_unpin(obj);
    234 			amdgpu_bo_unreserve(obj);
    235 		}
    236 		amdgpu_bo_unref(&obj);
    237 
    238 	}
    239 	return 0;
    240 }
    241 
    242 static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
    243 				   uint64_t *mcaddr)
    244 {
    245 	int r;
    246 	u64 min_offset, max_offset;
    247 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    248 
    249 	WARN_ON_ONCE(obj->placement.num_placement > 1);
    250 
    251 	min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
    252 	max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
    253 
    254 	r = amdgpu_bo_reserve(obj, false);
    255 	if (unlikely(r != 0))
    256 		return r;
    257 	r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
    258 				     min_offset, max_offset, mcaddr);
    259 	amdgpu_bo_unreserve(obj);
    260 	return r;
    261 }
    262 
    263 static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
    264 {
    265 	int r;
    266 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    267 	r = amdgpu_bo_reserve(obj, false);
    268 	if (unlikely(r != 0))
    269 		return r;
    270 	r = amdgpu_bo_unpin(obj);
    271 	amdgpu_bo_unreserve(obj);
    272 	return r;
    273 }
    274 
    275 static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
    276 				   void **map)
    277 {
    278 	int r;
    279 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    280 	r = amdgpu_bo_reserve(obj, false);
    281 	if (unlikely(r != 0))
    282 		return r;
    283 	r = amdgpu_bo_kmap(obj, map);
    284 	amdgpu_bo_unreserve(obj);
    285 	return r;
    286 }
    287 
    288 static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
    289 {
    290 	int r;
    291 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    292 	r = amdgpu_bo_reserve(obj, false);
    293 	if (unlikely(r != 0))
    294 		return r;
    295 	amdgpu_bo_kunmap(obj);
    296 	amdgpu_bo_unreserve(obj);
    297 	return r;
    298 }
    299 
    300 static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
    301 {
    302 	CGS_FUNC_ADEV;
    303 	return RREG32(offset);
    304 }
    305 
    306 static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
    307 				      uint32_t value)
    308 {
    309 	CGS_FUNC_ADEV;
    310 	WREG32(offset, value);
    311 }
    312 
    313 static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
    314 					     enum cgs_ind_reg space,
    315 					     unsigned index)
    316 {
    317 	CGS_FUNC_ADEV;
    318 	switch (space) {
    319 	case CGS_IND_REG__MMIO:
    320 		return RREG32_IDX(index);
    321 	case CGS_IND_REG__PCIE:
    322 		return RREG32_PCIE(index);
    323 	case CGS_IND_REG__SMC:
    324 		return RREG32_SMC(index);
    325 	case CGS_IND_REG__UVD_CTX:
    326 		return RREG32_UVD_CTX(index);
    327 	case CGS_IND_REG__DIDT:
    328 		return RREG32_DIDT(index);
    329 	case CGS_IND_REG__AUDIO_ENDPT:
    330 		DRM_ERROR("audio endpt register access not implemented.\n");
    331 		return 0;
    332 	}
    333 	WARN(1, "Invalid indirect register space");
    334 	return 0;
    335 }
    336 
    337 static void amdgpu_cgs_write_ind_register(void *cgs_device,
    338 					  enum cgs_ind_reg space,
    339 					  unsigned index, uint32_t value)
    340 {
    341 	CGS_FUNC_ADEV;
    342 	switch (space) {
    343 	case CGS_IND_REG__MMIO:
    344 		return WREG32_IDX(index, value);
    345 	case CGS_IND_REG__PCIE:
    346 		return WREG32_PCIE(index, value);
    347 	case CGS_IND_REG__SMC:
    348 		return WREG32_SMC(index, value);
    349 	case CGS_IND_REG__UVD_CTX:
    350 		return WREG32_UVD_CTX(index, value);
    351 	case CGS_IND_REG__DIDT:
    352 		return WREG32_DIDT(index, value);
    353 	case CGS_IND_REG__AUDIO_ENDPT:
    354 		DRM_ERROR("audio endpt register access not implemented.\n");
    355 		return;
    356 	}
    357 	WARN(1, "Invalid indirect register space");
    358 }
    359 
    360 static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
    361 {
    362 	CGS_FUNC_ADEV;
    363 	uint8_t val;
    364 	int ret = pci_read_config_byte(adev->pdev, addr, &val);
    365 	if (WARN(ret, "pci_read_config_byte error"))
    366 		return 0;
    367 	return val;
    368 }
    369 
    370 static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
    371 {
    372 	CGS_FUNC_ADEV;
    373 	uint16_t val;
    374 	int ret = pci_read_config_word(adev->pdev, addr, &val);
    375 	if (WARN(ret, "pci_read_config_word error"))
    376 		return 0;
    377 	return val;
    378 }
    379 
    380 static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
    381 						 unsigned addr)
    382 {
    383 	CGS_FUNC_ADEV;
    384 	uint32_t val;
    385 	int ret = pci_read_config_dword(adev->pdev, addr, &val);
    386 	if (WARN(ret, "pci_read_config_dword error"))
    387 		return 0;
    388 	return val;
    389 }
    390 
    391 static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
    392 					     uint8_t value)
    393 {
    394 	CGS_FUNC_ADEV;
    395 	int ret = pci_write_config_byte(adev->pdev, addr, value);
    396 	WARN(ret, "pci_write_config_byte error");
    397 }
    398 
    399 static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
    400 					     uint16_t value)
    401 {
    402 	CGS_FUNC_ADEV;
    403 	int ret = pci_write_config_word(adev->pdev, addr, value);
    404 	WARN(ret, "pci_write_config_word error");
    405 }
    406 
    407 static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
    408 					      uint32_t value)
    409 {
    410 	CGS_FUNC_ADEV;
    411 	int ret = pci_write_config_dword(adev->pdev, addr, value);
    412 	WARN(ret, "pci_write_config_dword error");
    413 }
    414 
    415 static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
    416 						  unsigned table, uint16_t *size,
    417 						  uint8_t *frev, uint8_t *crev)
    418 {
    419 	CGS_FUNC_ADEV;
    420 	uint16_t data_start;
    421 
    422 	if (amdgpu_atom_parse_data_header(
    423 		    adev->mode_info.atom_context, table, size,
    424 		    frev, crev, &data_start))
    425 		return (uint8_t*)adev->mode_info.atom_context->bios +
    426 			data_start;
    427 
    428 	return NULL;
    429 }
    430 
    431 static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
    432 					      uint8_t *frev, uint8_t *crev)
    433 {
    434 	CGS_FUNC_ADEV;
    435 
    436 	if (amdgpu_atom_parse_cmd_header(
    437 		    adev->mode_info.atom_context, table,
    438 		    frev, crev))
    439 		return 0;
    440 
    441 	return -EINVAL;
    442 }
    443 
    444 static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
    445 					  void *args)
    446 {
    447 	CGS_FUNC_ADEV;
    448 
    449 	return amdgpu_atom_execute_table(
    450 		adev->mode_info.atom_context, table, args);
    451 }
    452 
    453 static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
    454 {
    455 	/* TODO */
    456 	return 0;
    457 }
    458 
    459 static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
    460 {
    461 	/* TODO */
    462 	return 0;
    463 }
    464 
    465 static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
    466 				     int active)
    467 {
    468 	/* TODO */
    469 	return 0;
    470 }
    471 
    472 static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
    473 				       enum cgs_clock clock, unsigned freq)
    474 {
    475 	/* TODO */
    476 	return 0;
    477 }
    478 
    479 static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
    480 					enum cgs_engine engine, int powered)
    481 {
    482 	/* TODO */
    483 	return 0;
    484 }
    485 
    486 
    487 
    488 static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
    489 					    enum cgs_clock clock,
    490 					    struct cgs_clock_limits *limits)
    491 {
    492 	/* TODO */
    493 	return 0;
    494 }
    495 
    496 static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
    497 					  const uint32_t *voltages)
    498 {
    499 	DRM_ERROR("not implemented");
    500 	return -EPERM;
    501 }
    502 
    503 struct cgs_irq_params {
    504 	unsigned src_id;
    505 	cgs_irq_source_set_func_t set;
    506 	cgs_irq_handler_func_t handler;
    507 	void *private_data;
    508 };
    509 
    510 static int cgs_set_irq_state(struct amdgpu_device *adev,
    511 			     struct amdgpu_irq_src *src,
    512 			     unsigned type,
    513 			     enum amdgpu_interrupt_state state)
    514 {
    515 	struct cgs_irq_params *irq_params =
    516 		(struct cgs_irq_params *)src->data;
    517 	if (!irq_params)
    518 		return -EINVAL;
    519 	if (!irq_params->set)
    520 		return -EINVAL;
    521 	return irq_params->set(irq_params->private_data,
    522 			       irq_params->src_id,
    523 			       type,
    524 			       (int)state);
    525 }
    526 
    527 static int cgs_process_irq(struct amdgpu_device *adev,
    528 			   struct amdgpu_irq_src *source,
    529 			   struct amdgpu_iv_entry *entry)
    530 {
    531 	struct cgs_irq_params *irq_params =
    532 		(struct cgs_irq_params *)source->data;
    533 	if (!irq_params)
    534 		return -EINVAL;
    535 	if (!irq_params->handler)
    536 		return -EINVAL;
    537 	return irq_params->handler(irq_params->private_data,
    538 				   irq_params->src_id,
    539 				   entry->iv_entry);
    540 }
    541 
    542 static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
    543 	.set = cgs_set_irq_state,
    544 	.process = cgs_process_irq,
    545 };
    546 
    547 static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
    548 				     unsigned num_types,
    549 				     cgs_irq_source_set_func_t set,
    550 				     cgs_irq_handler_func_t handler,
    551 				     void *private_data)
    552 {
    553 	CGS_FUNC_ADEV;
    554 	int ret = 0;
    555 	struct cgs_irq_params *irq_params;
    556 	struct amdgpu_irq_src *source =
    557 		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
    558 	if (!source)
    559 		return -ENOMEM;
    560 	irq_params =
    561 		kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
    562 	if (!irq_params) {
    563 		kfree(source);
    564 		return -ENOMEM;
    565 	}
    566 	source->num_types = num_types;
    567 	source->funcs = &cgs_irq_funcs;
    568 	irq_params->src_id = src_id;
    569 	irq_params->set = set;
    570 	irq_params->handler = handler;
    571 	irq_params->private_data = private_data;
    572 	source->data = (void *)irq_params;
    573 	ret = amdgpu_irq_add_id(adev, src_id, source);
    574 	if (ret) {
    575 		kfree(irq_params);
    576 		kfree(source);
    577 	}
    578 
    579 	return ret;
    580 }
    581 
    582 static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
    583 {
    584 	CGS_FUNC_ADEV;
    585 	return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
    586 }
    587 
    588 static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
    589 {
    590 	CGS_FUNC_ADEV;
    591 	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
    592 }
    593 
    594 static int amdgpu_cgs_set_clockgating_state(void *cgs_device,
    595 				  enum amd_ip_block_type block_type,
    596 				  enum amd_clockgating_state state)
    597 {
    598 	CGS_FUNC_ADEV;
    599 	int i, r = -1;
    600 
    601 	for (i = 0; i < adev->num_ip_blocks; i++) {
    602 		if (!adev->ip_block_status[i].valid)
    603 			continue;
    604 
    605 		if (adev->ip_blocks[i].type == block_type) {
    606 			r = adev->ip_blocks[i].funcs->set_clockgating_state(
    607 								(void *)adev,
    608 									state);
    609 			break;
    610 		}
    611 	}
    612 	return r;
    613 }
    614 
    615 static int amdgpu_cgs_set_powergating_state(void *cgs_device,
    616 				  enum amd_ip_block_type block_type,
    617 				  enum amd_powergating_state state)
    618 {
    619 	CGS_FUNC_ADEV;
    620 	int i, r = -1;
    621 
    622 	for (i = 0; i < adev->num_ip_blocks; i++) {
    623 		if (!adev->ip_block_status[i].valid)
    624 			continue;
    625 
    626 		if (adev->ip_blocks[i].type == block_type) {
    627 			r = adev->ip_blocks[i].funcs->set_powergating_state(
    628 								(void *)adev,
    629 									state);
    630 			break;
    631 		}
    632 	}
    633 	return r;
    634 }
    635 
    636 
    637 static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
    638 {
    639 	CGS_FUNC_ADEV;
    640 	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
    641 
    642 	switch (fw_type) {
    643 	case CGS_UCODE_ID_SDMA0:
    644 		result = AMDGPU_UCODE_ID_SDMA0;
    645 		break;
    646 	case CGS_UCODE_ID_SDMA1:
    647 		result = AMDGPU_UCODE_ID_SDMA1;
    648 		break;
    649 	case CGS_UCODE_ID_CP_CE:
    650 		result = AMDGPU_UCODE_ID_CP_CE;
    651 		break;
    652 	case CGS_UCODE_ID_CP_PFP:
    653 		result = AMDGPU_UCODE_ID_CP_PFP;
    654 		break;
    655 	case CGS_UCODE_ID_CP_ME:
    656 		result = AMDGPU_UCODE_ID_CP_ME;
    657 		break;
    658 	case CGS_UCODE_ID_CP_MEC:
    659 	case CGS_UCODE_ID_CP_MEC_JT1:
    660 		result = AMDGPU_UCODE_ID_CP_MEC1;
    661 		break;
    662 	case CGS_UCODE_ID_CP_MEC_JT2:
    663 		if (adev->asic_type == CHIP_TONGA)
    664 			result = AMDGPU_UCODE_ID_CP_MEC2;
    665 		else if (adev->asic_type == CHIP_CARRIZO)
    666 			result = AMDGPU_UCODE_ID_CP_MEC1;
    667 		break;
    668 	case CGS_UCODE_ID_RLC_G:
    669 		result = AMDGPU_UCODE_ID_RLC_G;
    670 		break;
    671 	default:
    672 		DRM_ERROR("Firmware type not supported\n");
    673 	}
    674 	return result;
    675 }
    676 
    677 static int amdgpu_cgs_get_firmware_info(void *cgs_device,
    678 					enum cgs_ucode_id type,
    679 					struct cgs_firmware_info *info)
    680 {
    681 	CGS_FUNC_ADEV;
    682 
    683 	if (CGS_UCODE_ID_SMU != type) {
    684 		uint64_t gpu_addr;
    685 		uint32_t data_size;
    686 		const struct gfx_firmware_header_v1_0 *header;
    687 		enum AMDGPU_UCODE_ID id;
    688 		struct amdgpu_firmware_info *ucode;
    689 
    690 		id = fw_type_convert(cgs_device, type);
    691 		ucode = &adev->firmware.ucode[id];
    692 		if (ucode->fw == NULL)
    693 			return -EINVAL;
    694 
    695 		gpu_addr  = ucode->mc_addr;
    696 		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
    697 		data_size = le32_to_cpu(header->header.ucode_size_bytes);
    698 
    699 		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
    700 		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
    701 			gpu_addr += le32_to_cpu(header->jt_offset) << 2;
    702 			data_size = le32_to_cpu(header->jt_size) << 2;
    703 		}
    704 		info->mc_addr = gpu_addr;
    705 		info->image_size = data_size;
    706 		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
    707 		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
    708 	} else {
    709 		char fw_name[30] = {0};
    710 		int err = 0;
    711 		uint32_t ucode_size;
    712 		uint32_t ucode_start_address __unused;
    713 		const uint8_t *src;
    714 		const struct smc_firmware_header_v1_0 *hdr;
    715 
    716 		switch (adev->asic_type) {
    717 		case CHIP_TONGA:
    718 			strcpy(fw_name, "amdgpu/tonga_smc.bin");
    719 			break;
    720 		default:
    721 			DRM_ERROR("SMC firmware not supported\n");
    722 			return -EINVAL;
    723 		}
    724 
    725 		err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
    726 		if (err) {
    727 			DRM_ERROR("Failed to request firmware\n");
    728 			return err;
    729 		}
    730 
    731 		err = amdgpu_ucode_validate(adev->pm.fw);
    732 		if (err) {
    733 			DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
    734 			release_firmware(adev->pm.fw);
    735 			adev->pm.fw = NULL;
    736 			return err;
    737 		}
    738 
    739 		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
    740 		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
    741 		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
    742 		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
    743 		src = (const uint8_t *)(adev->pm.fw->data +
    744 		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
    745 
    746 		info->version = adev->pm.fw_version;
    747 		info->image_size = ucode_size;
    748 		info->kptr = (void *)__UNCONST(src); /* XXX used for? */
    749 	}
    750 	return 0;
    751 }
    752 
    753 static const struct cgs_ops amdgpu_cgs_ops = {
    754 	amdgpu_cgs_gpu_mem_info,
    755 	amdgpu_cgs_gmap_kmem,
    756 	amdgpu_cgs_gunmap_kmem,
    757 	amdgpu_cgs_alloc_gpu_mem,
    758 	amdgpu_cgs_free_gpu_mem,
    759 	amdgpu_cgs_gmap_gpu_mem,
    760 	amdgpu_cgs_gunmap_gpu_mem,
    761 	amdgpu_cgs_kmap_gpu_mem,
    762 	amdgpu_cgs_kunmap_gpu_mem,
    763 	amdgpu_cgs_read_register,
    764 	amdgpu_cgs_write_register,
    765 	amdgpu_cgs_read_ind_register,
    766 	amdgpu_cgs_write_ind_register,
    767 	amdgpu_cgs_read_pci_config_byte,
    768 	amdgpu_cgs_read_pci_config_word,
    769 	amdgpu_cgs_read_pci_config_dword,
    770 	amdgpu_cgs_write_pci_config_byte,
    771 	amdgpu_cgs_write_pci_config_word,
    772 	amdgpu_cgs_write_pci_config_dword,
    773 	amdgpu_cgs_atom_get_data_table,
    774 	amdgpu_cgs_atom_get_cmd_table_revs,
    775 	amdgpu_cgs_atom_exec_cmd_table,
    776 	amdgpu_cgs_create_pm_request,
    777 	amdgpu_cgs_destroy_pm_request,
    778 	amdgpu_cgs_set_pm_request,
    779 	amdgpu_cgs_pm_request_clock,
    780 	amdgpu_cgs_pm_request_engine,
    781 	amdgpu_cgs_pm_query_clock_limits,
    782 	amdgpu_cgs_set_camera_voltages,
    783 	amdgpu_cgs_get_firmware_info,
    784 	amdgpu_cgs_set_powergating_state,
    785 	amdgpu_cgs_set_clockgating_state
    786 };
    787 
    788 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
    789 	amdgpu_cgs_add_irq_source,
    790 	amdgpu_cgs_irq_get,
    791 	amdgpu_cgs_irq_put
    792 };
    793 
    794 void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
    795 {
    796 	struct amdgpu_cgs_device *cgs_device =
    797 		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
    798 
    799 	if (!cgs_device) {
    800 		DRM_ERROR("Couldn't allocate CGS device structure\n");
    801 		return NULL;
    802 	}
    803 
    804 	cgs_device->base.ops = &amdgpu_cgs_ops;
    805 	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
    806 	cgs_device->adev = adev;
    807 
    808 	return cgs_device;
    809 }
    810 
    811 void amdgpu_cgs_destroy_device(void *cgs_device)
    812 {
    813 	kfree(cgs_device);
    814 }
    815