Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_cgs.c revision 1.2
      1 /*	$NetBSD: amdgpu_cgs.c,v 1.2 2018/08/27 04:58:19 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2015 Advanced Micro Devices, Inc.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  *
     25  */
     26 #include <sys/cdefs.h>
     27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_cgs.c,v 1.2 2018/08/27 04:58:19 riastradh Exp $");
     28 
     29 #include <linux/list.h>
     30 #include <linux/slab.h>
     31 #include <linux/pci.h>
     32 #include <drm/drmP.h>
     33 #include <linux/firmware.h>
     34 #include <drm/amdgpu_drm.h>
     35 #include "amdgpu.h"
     36 #include "cgs_linux.h"
     37 #include "atom.h"
     38 #include "amdgpu_ucode.h"
     39 
     40 
     41 struct amdgpu_cgs_device {
     42 	struct cgs_device base;
     43 	struct amdgpu_device *adev;
     44 };
     45 
     46 #define CGS_FUNC_ADEV							\
     47 	struct amdgpu_device *adev =					\
     48 		((struct amdgpu_cgs_device *)cgs_device)->adev
     49 
     50 static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type,
     51 				   uint64_t *mc_start, uint64_t *mc_size,
     52 				   uint64_t *mem_size)
     53 {
     54 	CGS_FUNC_ADEV;
     55 	switch(type) {
     56 	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
     57 	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
     58 		*mc_start = 0;
     59 		*mc_size = adev->mc.visible_vram_size;
     60 		*mem_size = adev->mc.visible_vram_size - adev->vram_pin_size;
     61 		break;
     62 	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
     63 	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
     64 		*mc_start = adev->mc.visible_vram_size;
     65 		*mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size;
     66 		*mem_size = *mc_size;
     67 		break;
     68 	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
     69 	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
     70 		*mc_start = adev->mc.gtt_start;
     71 		*mc_size = adev->mc.gtt_size;
     72 		*mem_size = adev->mc.gtt_size - adev->gart_pin_size;
     73 		break;
     74 	default:
     75 		return -EINVAL;
     76 	}
     77 
     78 	return 0;
     79 }
     80 
     81 static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
     82 				uint64_t size,
     83 				uint64_t min_offset, uint64_t max_offset,
     84 				cgs_handle_t *kmem_handle, uint64_t *mcaddr)
     85 {
     86 	CGS_FUNC_ADEV;
     87 	int ret;
     88 	struct amdgpu_bo *bo;
     89 	struct page *kmem_page = vmalloc_to_page(kmem);
     90 	int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;
     91 
     92 	struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
     93 	ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
     94 			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
     95 	if (ret)
     96 		return ret;
     97 	ret = amdgpu_bo_reserve(bo, false);
     98 	if (unlikely(ret != 0))
     99 		return ret;
    100 
    101 	/* pin buffer into GTT */
    102 	ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
    103 				       min_offset, max_offset, mcaddr);
    104 	amdgpu_bo_unreserve(bo);
    105 
    106 	*kmem_handle = (cgs_handle_t)bo;
    107 	return ret;
    108 }
    109 
    110 static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle)
    111 {
    112 	struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle;
    113 
    114 	if (obj) {
    115 		int r = amdgpu_bo_reserve(obj, false);
    116 		if (likely(r == 0)) {
    117 			amdgpu_bo_unpin(obj);
    118 			amdgpu_bo_unreserve(obj);
    119 		}
    120 		amdgpu_bo_unref(&obj);
    121 
    122 	}
    123 	return 0;
    124 }
    125 
    126 static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
    127 				    enum cgs_gpu_mem_type type,
    128 				    uint64_t size, uint64_t align,
    129 				    uint64_t min_offset, uint64_t max_offset,
    130 				    cgs_handle_t *handle)
    131 {
    132 	CGS_FUNC_ADEV;
    133 	uint16_t flags = 0;
    134 	int ret = 0;
    135 	uint32_t domain = 0;
    136 	struct amdgpu_bo *obj;
    137 	struct ttm_placement placement;
    138 	struct ttm_place place;
    139 
    140 	if (min_offset > max_offset) {
    141 		BUG_ON(1);
    142 		return -EINVAL;
    143 	}
    144 
    145 	/* fail if the alignment is not a power of 2 */
    146 	if (((align != 1) && (align & (align - 1)))
    147 	    || size == 0 || align == 0)
    148 		return -EINVAL;
    149 
    150 
    151 	switch(type) {
    152 	case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB:
    153 	case CGS_GPU_MEM_TYPE__VISIBLE_FB:
    154 		flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
    155 		domain = AMDGPU_GEM_DOMAIN_VRAM;
    156 		if (max_offset > adev->mc.real_vram_size)
    157 			return -EINVAL;
    158 		place.fpfn = min_offset >> PAGE_SHIFT;
    159 		place.lpfn = max_offset >> PAGE_SHIFT;
    160 		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
    161 			TTM_PL_FLAG_VRAM;
    162 		break;
    163 	case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
    164 	case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
    165 		flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
    166 		domain = AMDGPU_GEM_DOMAIN_VRAM;
    167 		if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
    168 			place.fpfn =
    169 				max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
    170 			place.lpfn =
    171 				min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
    172 			place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
    173 				TTM_PL_FLAG_VRAM;
    174 		}
    175 
    176 		break;
    177 	case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
    178 		domain = AMDGPU_GEM_DOMAIN_GTT;
    179 		place.fpfn = min_offset >> PAGE_SHIFT;
    180 		place.lpfn = max_offset >> PAGE_SHIFT;
    181 		place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
    182 		break;
    183 	case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
    184 		flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
    185 		domain = AMDGPU_GEM_DOMAIN_GTT;
    186 		place.fpfn = min_offset >> PAGE_SHIFT;
    187 		place.lpfn = max_offset >> PAGE_SHIFT;
    188 		place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
    189 			TTM_PL_FLAG_UNCACHED;
    190 		break;
    191 	default:
    192 		return -EINVAL;
    193 	}
    194 
    195 
    196 	*handle = 0;
    197 
    198 	placement.placement = &place;
    199 	placement.num_placement = 1;
    200 	placement.busy_placement = &place;
    201 	placement.num_busy_placement = 1;
    202 
    203 	ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
    204 					  true, domain, flags,
    205 					  NULL, &placement, NULL,
    206 					  &obj);
    207 	if (ret) {
    208 		DRM_ERROR("(%d) bo create failed\n", ret);
    209 		return ret;
    210 	}
    211 	*handle = (cgs_handle_t)obj;
    212 
    213 	return ret;
    214 }
    215 
    216 static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
    217 {
    218 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    219 
    220 	if (obj) {
    221 		int r = amdgpu_bo_reserve(obj, false);
    222 		if (likely(r == 0)) {
    223 			amdgpu_bo_kunmap(obj);
    224 			amdgpu_bo_unpin(obj);
    225 			amdgpu_bo_unreserve(obj);
    226 		}
    227 		amdgpu_bo_unref(&obj);
    228 
    229 	}
    230 	return 0;
    231 }
    232 
    233 static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
    234 				   uint64_t *mcaddr)
    235 {
    236 	int r;
    237 	u64 min_offset, max_offset;
    238 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    239 
    240 	WARN_ON_ONCE(obj->placement.num_placement > 1);
    241 
    242 	min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
    243 	max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
    244 
    245 	r = amdgpu_bo_reserve(obj, false);
    246 	if (unlikely(r != 0))
    247 		return r;
    248 	r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT,
    249 				     min_offset, max_offset, mcaddr);
    250 	amdgpu_bo_unreserve(obj);
    251 	return r;
    252 }
    253 
    254 static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
    255 {
    256 	int r;
    257 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    258 	r = amdgpu_bo_reserve(obj, false);
    259 	if (unlikely(r != 0))
    260 		return r;
    261 	r = amdgpu_bo_unpin(obj);
    262 	amdgpu_bo_unreserve(obj);
    263 	return r;
    264 }
    265 
    266 static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle,
    267 				   void **map)
    268 {
    269 	int r;
    270 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    271 	r = amdgpu_bo_reserve(obj, false);
    272 	if (unlikely(r != 0))
    273 		return r;
    274 	r = amdgpu_bo_kmap(obj, map);
    275 	amdgpu_bo_unreserve(obj);
    276 	return r;
    277 }
    278 
    279 static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle)
    280 {
    281 	int r;
    282 	struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
    283 	r = amdgpu_bo_reserve(obj, false);
    284 	if (unlikely(r != 0))
    285 		return r;
    286 	amdgpu_bo_kunmap(obj);
    287 	amdgpu_bo_unreserve(obj);
    288 	return r;
    289 }
    290 
    291 static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset)
    292 {
    293 	CGS_FUNC_ADEV;
    294 	return RREG32(offset);
    295 }
    296 
    297 static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset,
    298 				      uint32_t value)
    299 {
    300 	CGS_FUNC_ADEV;
    301 	WREG32(offset, value);
    302 }
    303 
    304 static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device,
    305 					     enum cgs_ind_reg space,
    306 					     unsigned index)
    307 {
    308 	CGS_FUNC_ADEV;
    309 	switch (space) {
    310 	case CGS_IND_REG__MMIO:
    311 		return RREG32_IDX(index);
    312 	case CGS_IND_REG__PCIE:
    313 		return RREG32_PCIE(index);
    314 	case CGS_IND_REG__SMC:
    315 		return RREG32_SMC(index);
    316 	case CGS_IND_REG__UVD_CTX:
    317 		return RREG32_UVD_CTX(index);
    318 	case CGS_IND_REG__DIDT:
    319 		return RREG32_DIDT(index);
    320 	case CGS_IND_REG__AUDIO_ENDPT:
    321 		DRM_ERROR("audio endpt register access not implemented.\n");
    322 		return 0;
    323 	}
    324 	WARN(1, "Invalid indirect register space");
    325 	return 0;
    326 }
    327 
    328 static void amdgpu_cgs_write_ind_register(void *cgs_device,
    329 					  enum cgs_ind_reg space,
    330 					  unsigned index, uint32_t value)
    331 {
    332 	CGS_FUNC_ADEV;
    333 	switch (space) {
    334 	case CGS_IND_REG__MMIO:
    335 		return WREG32_IDX(index, value);
    336 	case CGS_IND_REG__PCIE:
    337 		return WREG32_PCIE(index, value);
    338 	case CGS_IND_REG__SMC:
    339 		return WREG32_SMC(index, value);
    340 	case CGS_IND_REG__UVD_CTX:
    341 		return WREG32_UVD_CTX(index, value);
    342 	case CGS_IND_REG__DIDT:
    343 		return WREG32_DIDT(index, value);
    344 	case CGS_IND_REG__AUDIO_ENDPT:
    345 		DRM_ERROR("audio endpt register access not implemented.\n");
    346 		return;
    347 	}
    348 	WARN(1, "Invalid indirect register space");
    349 }
    350 
    351 static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr)
    352 {
    353 	CGS_FUNC_ADEV;
    354 	uint8_t val;
    355 	int ret = pci_read_config_byte(adev->pdev, addr, &val);
    356 	if (WARN(ret, "pci_read_config_byte error"))
    357 		return 0;
    358 	return val;
    359 }
    360 
    361 static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr)
    362 {
    363 	CGS_FUNC_ADEV;
    364 	uint16_t val;
    365 	int ret = pci_read_config_word(adev->pdev, addr, &val);
    366 	if (WARN(ret, "pci_read_config_word error"))
    367 		return 0;
    368 	return val;
    369 }
    370 
    371 static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device,
    372 						 unsigned addr)
    373 {
    374 	CGS_FUNC_ADEV;
    375 	uint32_t val;
    376 	int ret = pci_read_config_dword(adev->pdev, addr, &val);
    377 	if (WARN(ret, "pci_read_config_dword error"))
    378 		return 0;
    379 	return val;
    380 }
    381 
    382 static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr,
    383 					     uint8_t value)
    384 {
    385 	CGS_FUNC_ADEV;
    386 	int ret = pci_write_config_byte(adev->pdev, addr, value);
    387 	WARN(ret, "pci_write_config_byte error");
    388 }
    389 
    390 static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr,
    391 					     uint16_t value)
    392 {
    393 	CGS_FUNC_ADEV;
    394 	int ret = pci_write_config_word(adev->pdev, addr, value);
    395 	WARN(ret, "pci_write_config_word error");
    396 }
    397 
    398 static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr,
    399 					      uint32_t value)
    400 {
    401 	CGS_FUNC_ADEV;
    402 	int ret = pci_write_config_dword(adev->pdev, addr, value);
    403 	WARN(ret, "pci_write_config_dword error");
    404 }
    405 
    406 static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device,
    407 						  unsigned table, uint16_t *size,
    408 						  uint8_t *frev, uint8_t *crev)
    409 {
    410 	CGS_FUNC_ADEV;
    411 	uint16_t data_start;
    412 
    413 	if (amdgpu_atom_parse_data_header(
    414 		    adev->mode_info.atom_context, table, size,
    415 		    frev, crev, &data_start))
    416 		return (uint8_t*)adev->mode_info.atom_context->bios +
    417 			data_start;
    418 
    419 	return NULL;
    420 }
    421 
    422 static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table,
    423 					      uint8_t *frev, uint8_t *crev)
    424 {
    425 	CGS_FUNC_ADEV;
    426 
    427 	if (amdgpu_atom_parse_cmd_header(
    428 		    adev->mode_info.atom_context, table,
    429 		    frev, crev))
    430 		return 0;
    431 
    432 	return -EINVAL;
    433 }
    434 
    435 static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table,
    436 					  void *args)
    437 {
    438 	CGS_FUNC_ADEV;
    439 
    440 	return amdgpu_atom_execute_table(
    441 		adev->mode_info.atom_context, table, args);
    442 }
    443 
    444 static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request)
    445 {
    446 	/* TODO */
    447 	return 0;
    448 }
    449 
    450 static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request)
    451 {
    452 	/* TODO */
    453 	return 0;
    454 }
    455 
    456 static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request,
    457 				     int active)
    458 {
    459 	/* TODO */
    460 	return 0;
    461 }
    462 
    463 static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request,
    464 				       enum cgs_clock clock, unsigned freq)
    465 {
    466 	/* TODO */
    467 	return 0;
    468 }
    469 
    470 static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request,
    471 					enum cgs_engine engine, int powered)
    472 {
    473 	/* TODO */
    474 	return 0;
    475 }
    476 
    477 
    478 
    479 static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device,
    480 					    enum cgs_clock clock,
    481 					    struct cgs_clock_limits *limits)
    482 {
    483 	/* TODO */
    484 	return 0;
    485 }
    486 
    487 static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask,
    488 					  const uint32_t *voltages)
    489 {
    490 	DRM_ERROR("not implemented");
    491 	return -EPERM;
    492 }
    493 
    494 struct cgs_irq_params {
    495 	unsigned src_id;
    496 	cgs_irq_source_set_func_t set;
    497 	cgs_irq_handler_func_t handler;
    498 	void *private_data;
    499 };
    500 
    501 static int cgs_set_irq_state(struct amdgpu_device *adev,
    502 			     struct amdgpu_irq_src *src,
    503 			     unsigned type,
    504 			     enum amdgpu_interrupt_state state)
    505 {
    506 	struct cgs_irq_params *irq_params =
    507 		(struct cgs_irq_params *)src->data;
    508 	if (!irq_params)
    509 		return -EINVAL;
    510 	if (!irq_params->set)
    511 		return -EINVAL;
    512 	return irq_params->set(irq_params->private_data,
    513 			       irq_params->src_id,
    514 			       type,
    515 			       (int)state);
    516 }
    517 
    518 static int cgs_process_irq(struct amdgpu_device *adev,
    519 			   struct amdgpu_irq_src *source,
    520 			   struct amdgpu_iv_entry *entry)
    521 {
    522 	struct cgs_irq_params *irq_params =
    523 		(struct cgs_irq_params *)source->data;
    524 	if (!irq_params)
    525 		return -EINVAL;
    526 	if (!irq_params->handler)
    527 		return -EINVAL;
    528 	return irq_params->handler(irq_params->private_data,
    529 				   irq_params->src_id,
    530 				   entry->iv_entry);
    531 }
    532 
    533 static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
    534 	.set = cgs_set_irq_state,
    535 	.process = cgs_process_irq,
    536 };
    537 
    538 static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id,
    539 				     unsigned num_types,
    540 				     cgs_irq_source_set_func_t set,
    541 				     cgs_irq_handler_func_t handler,
    542 				     void *private_data)
    543 {
    544 	CGS_FUNC_ADEV;
    545 	int ret = 0;
    546 	struct cgs_irq_params *irq_params;
    547 	struct amdgpu_irq_src *source =
    548 		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
    549 	if (!source)
    550 		return -ENOMEM;
    551 	irq_params =
    552 		kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL);
    553 	if (!irq_params) {
    554 		kfree(source);
    555 		return -ENOMEM;
    556 	}
    557 	source->num_types = num_types;
    558 	source->funcs = &cgs_irq_funcs;
    559 	irq_params->src_id = src_id;
    560 	irq_params->set = set;
    561 	irq_params->handler = handler;
    562 	irq_params->private_data = private_data;
    563 	source->data = (void *)irq_params;
    564 	ret = amdgpu_irq_add_id(adev, src_id, source);
    565 	if (ret) {
    566 		kfree(irq_params);
    567 		kfree(source);
    568 	}
    569 
    570 	return ret;
    571 }
    572 
    573 static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type)
    574 {
    575 	CGS_FUNC_ADEV;
    576 	return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
    577 }
    578 
    579 static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type)
    580 {
    581 	CGS_FUNC_ADEV;
    582 	return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
    583 }
    584 
    585 int amdgpu_cgs_set_clockgating_state(void *cgs_device,
    586 				  enum amd_ip_block_type block_type,
    587 				  enum amd_clockgating_state state)
    588 {
    589 	CGS_FUNC_ADEV;
    590 	int i, r = -1;
    591 
    592 	for (i = 0; i < adev->num_ip_blocks; i++) {
    593 		if (!adev->ip_block_status[i].valid)
    594 			continue;
    595 
    596 		if (adev->ip_blocks[i].type == block_type) {
    597 			r = adev->ip_blocks[i].funcs->set_clockgating_state(
    598 								(void *)adev,
    599 									state);
    600 			break;
    601 		}
    602 	}
    603 	return r;
    604 }
    605 
    606 int amdgpu_cgs_set_powergating_state(void *cgs_device,
    607 				  enum amd_ip_block_type block_type,
    608 				  enum amd_powergating_state state)
    609 {
    610 	CGS_FUNC_ADEV;
    611 	int i, r = -1;
    612 
    613 	for (i = 0; i < adev->num_ip_blocks; i++) {
    614 		if (!adev->ip_block_status[i].valid)
    615 			continue;
    616 
    617 		if (adev->ip_blocks[i].type == block_type) {
    618 			r = adev->ip_blocks[i].funcs->set_powergating_state(
    619 								(void *)adev,
    620 									state);
    621 			break;
    622 		}
    623 	}
    624 	return r;
    625 }
    626 
    627 
    628 static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type)
    629 {
    630 	CGS_FUNC_ADEV;
    631 	enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
    632 
    633 	switch (fw_type) {
    634 	case CGS_UCODE_ID_SDMA0:
    635 		result = AMDGPU_UCODE_ID_SDMA0;
    636 		break;
    637 	case CGS_UCODE_ID_SDMA1:
    638 		result = AMDGPU_UCODE_ID_SDMA1;
    639 		break;
    640 	case CGS_UCODE_ID_CP_CE:
    641 		result = AMDGPU_UCODE_ID_CP_CE;
    642 		break;
    643 	case CGS_UCODE_ID_CP_PFP:
    644 		result = AMDGPU_UCODE_ID_CP_PFP;
    645 		break;
    646 	case CGS_UCODE_ID_CP_ME:
    647 		result = AMDGPU_UCODE_ID_CP_ME;
    648 		break;
    649 	case CGS_UCODE_ID_CP_MEC:
    650 	case CGS_UCODE_ID_CP_MEC_JT1:
    651 		result = AMDGPU_UCODE_ID_CP_MEC1;
    652 		break;
    653 	case CGS_UCODE_ID_CP_MEC_JT2:
    654 		if (adev->asic_type == CHIP_TONGA)
    655 			result = AMDGPU_UCODE_ID_CP_MEC2;
    656 		else if (adev->asic_type == CHIP_CARRIZO)
    657 			result = AMDGPU_UCODE_ID_CP_MEC1;
    658 		break;
    659 	case CGS_UCODE_ID_RLC_G:
    660 		result = AMDGPU_UCODE_ID_RLC_G;
    661 		break;
    662 	default:
    663 		DRM_ERROR("Firmware type not supported\n");
    664 	}
    665 	return result;
    666 }
    667 
    668 static int amdgpu_cgs_get_firmware_info(void *cgs_device,
    669 					enum cgs_ucode_id type,
    670 					struct cgs_firmware_info *info)
    671 {
    672 	CGS_FUNC_ADEV;
    673 
    674 	if (CGS_UCODE_ID_SMU != type) {
    675 		uint64_t gpu_addr;
    676 		uint32_t data_size;
    677 		const struct gfx_firmware_header_v1_0 *header;
    678 		enum AMDGPU_UCODE_ID id;
    679 		struct amdgpu_firmware_info *ucode;
    680 
    681 		id = fw_type_convert(cgs_device, type);
    682 		ucode = &adev->firmware.ucode[id];
    683 		if (ucode->fw == NULL)
    684 			return -EINVAL;
    685 
    686 		gpu_addr  = ucode->mc_addr;
    687 		header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
    688 		data_size = le32_to_cpu(header->header.ucode_size_bytes);
    689 
    690 		if ((type == CGS_UCODE_ID_CP_MEC_JT1) ||
    691 		    (type == CGS_UCODE_ID_CP_MEC_JT2)) {
    692 			gpu_addr += le32_to_cpu(header->jt_offset) << 2;
    693 			data_size = le32_to_cpu(header->jt_size) << 2;
    694 		}
    695 		info->mc_addr = gpu_addr;
    696 		info->image_size = data_size;
    697 		info->version = (uint16_t)le32_to_cpu(header->header.ucode_version);
    698 		info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version);
    699 	} else {
    700 		char fw_name[30] = {0};
    701 		int err = 0;
    702 		uint32_t ucode_size;
    703 		uint32_t ucode_start_address;
    704 		const uint8_t *src;
    705 		const struct smc_firmware_header_v1_0 *hdr;
    706 
    707 		switch (adev->asic_type) {
    708 		case CHIP_TONGA:
    709 			strcpy(fw_name, "amdgpu/tonga_smc.bin");
    710 			break;
    711 		default:
    712 			DRM_ERROR("SMC firmware not supported\n");
    713 			return -EINVAL;
    714 		}
    715 
    716 		err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
    717 		if (err) {
    718 			DRM_ERROR("Failed to request firmware\n");
    719 			return err;
    720 		}
    721 
    722 		err = amdgpu_ucode_validate(adev->pm.fw);
    723 		if (err) {
    724 			DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
    725 			release_firmware(adev->pm.fw);
    726 			adev->pm.fw = NULL;
    727 			return err;
    728 		}
    729 
    730 		hdr = (const struct smc_firmware_header_v1_0 *)	adev->pm.fw->data;
    731 		adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
    732 		ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
    733 		ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
    734 		src = (const uint8_t *)(adev->pm.fw->data +
    735 		       le32_to_cpu(hdr->header.ucode_array_offset_bytes));
    736 
    737 		info->version = adev->pm.fw_version;
    738 		info->image_size = ucode_size;
    739 		info->kptr = (void *)src;
    740 	}
    741 	return 0;
    742 }
    743 
    744 static const struct cgs_ops amdgpu_cgs_ops = {
    745 	amdgpu_cgs_gpu_mem_info,
    746 	amdgpu_cgs_gmap_kmem,
    747 	amdgpu_cgs_gunmap_kmem,
    748 	amdgpu_cgs_alloc_gpu_mem,
    749 	amdgpu_cgs_free_gpu_mem,
    750 	amdgpu_cgs_gmap_gpu_mem,
    751 	amdgpu_cgs_gunmap_gpu_mem,
    752 	amdgpu_cgs_kmap_gpu_mem,
    753 	amdgpu_cgs_kunmap_gpu_mem,
    754 	amdgpu_cgs_read_register,
    755 	amdgpu_cgs_write_register,
    756 	amdgpu_cgs_read_ind_register,
    757 	amdgpu_cgs_write_ind_register,
    758 	amdgpu_cgs_read_pci_config_byte,
    759 	amdgpu_cgs_read_pci_config_word,
    760 	amdgpu_cgs_read_pci_config_dword,
    761 	amdgpu_cgs_write_pci_config_byte,
    762 	amdgpu_cgs_write_pci_config_word,
    763 	amdgpu_cgs_write_pci_config_dword,
    764 	amdgpu_cgs_atom_get_data_table,
    765 	amdgpu_cgs_atom_get_cmd_table_revs,
    766 	amdgpu_cgs_atom_exec_cmd_table,
    767 	amdgpu_cgs_create_pm_request,
    768 	amdgpu_cgs_destroy_pm_request,
    769 	amdgpu_cgs_set_pm_request,
    770 	amdgpu_cgs_pm_request_clock,
    771 	amdgpu_cgs_pm_request_engine,
    772 	amdgpu_cgs_pm_query_clock_limits,
    773 	amdgpu_cgs_set_camera_voltages,
    774 	amdgpu_cgs_get_firmware_info,
    775 	amdgpu_cgs_set_powergating_state,
    776 	amdgpu_cgs_set_clockgating_state
    777 };
    778 
    779 static const struct cgs_os_ops amdgpu_cgs_os_ops = {
    780 	amdgpu_cgs_add_irq_source,
    781 	amdgpu_cgs_irq_get,
    782 	amdgpu_cgs_irq_put
    783 };
    784 
    785 void *amdgpu_cgs_create_device(struct amdgpu_device *adev)
    786 {
    787 	struct amdgpu_cgs_device *cgs_device =
    788 		kmalloc(sizeof(*cgs_device), GFP_KERNEL);
    789 
    790 	if (!cgs_device) {
    791 		DRM_ERROR("Couldn't allocate CGS device structure\n");
    792 		return NULL;
    793 	}
    794 
    795 	cgs_device->base.ops = &amdgpu_cgs_ops;
    796 	cgs_device->base.os_ops = &amdgpu_cgs_os_ops;
    797 	cgs_device->adev = adev;
    798 
    799 	return cgs_device;
    800 }
    801 
    802 void amdgpu_cgs_destroy_device(void *cgs_device)
    803 {
    804 	kfree(cgs_device);
    805 }
    806