amdgpu_internal.h revision d8807b2f
13f012e29Smrg/* 23f012e29Smrg * Copyright © 2014 Advanced Micro Devices, Inc. 33f012e29Smrg * All Rights Reserved. 43f012e29Smrg * 53f012e29Smrg * Permission is hereby granted, free of charge, to any person obtaining a 63f012e29Smrg * copy of this software and associated documentation files (the "Software"), 73f012e29Smrg * to deal in the Software without restriction, including without limitation 83f012e29Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 93f012e29Smrg * and/or sell copies of the Software, and to permit persons to whom the 103f012e29Smrg * Software is furnished to do so, subject to the following conditions: 113f012e29Smrg * 123f012e29Smrg * The above copyright notice and this permission notice shall be included in 133f012e29Smrg * all copies or substantial portions of the Software. 143f012e29Smrg * 153f012e29Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 163f012e29Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 173f012e29Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 183f012e29Smrg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 193f012e29Smrg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 203f012e29Smrg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 213f012e29Smrg * OTHER DEALINGS IN THE SOFTWARE. 223f012e29Smrg * 233f012e29Smrg */ 243f012e29Smrg 253f012e29Smrg#ifndef _AMDGPU_INTERNAL_H_ 263f012e29Smrg#define _AMDGPU_INTERNAL_H_ 273f012e29Smrg 283f012e29Smrg#ifdef HAVE_CONFIG_H 293f012e29Smrg#include "config.h" 303f012e29Smrg#endif 313f012e29Smrg 323f012e29Smrg#include <assert.h> 333f012e29Smrg#include <pthread.h> 343f012e29Smrg 353f012e29Smrg#include "libdrm_macros.h" 363f012e29Smrg#include "xf86atomic.h" 373f012e29Smrg#include "amdgpu.h" 383f012e29Smrg#include "util_double_list.h" 393f012e29Smrg 403f012e29Smrg#define AMDGPU_CS_MAX_RINGS 8 413f012e29Smrg/* do not use below macro if b is not power of 2 aligned value */ 423f012e29Smrg#define __round_mask(x, y) ((__typeof__(x))((y)-1)) 433f012e29Smrg#define ROUND_UP(x, y) ((((x)-1) | __round_mask(x, y))+1) 443f012e29Smrg#define ROUND_DOWN(x, y) ((x) & ~__round_mask(x, y)) 453f012e29Smrg 463f012e29Smrg#define AMDGPU_INVALID_VA_ADDRESS 0xffffffffffffffff 473f012e29Smrg#define AMDGPU_NULL_SUBMIT_SEQ 0 483f012e29Smrg 493f012e29Smrgstruct amdgpu_bo_va_hole { 503f012e29Smrg struct list_head list; 513f012e29Smrg uint64_t offset; 523f012e29Smrg uint64_t size; 533f012e29Smrg}; 543f012e29Smrg 553f012e29Smrgstruct amdgpu_bo_va_mgr { 563f012e29Smrg /* the start virtual address */ 573f012e29Smrg uint64_t va_offset; 583f012e29Smrg uint64_t va_max; 593f012e29Smrg struct list_head va_holes; 603f012e29Smrg pthread_mutex_t bo_va_mutex; 613f012e29Smrg uint32_t va_alignment; 623f012e29Smrg}; 633f012e29Smrg 643f012e29Smrgstruct amdgpu_va { 653f012e29Smrg amdgpu_device_handle dev; 663f012e29Smrg uint64_t address; 673f012e29Smrg uint64_t size; 683f012e29Smrg enum amdgpu_gpu_va_range range; 693f012e29Smrg struct amdgpu_bo_va_mgr *vamgr; 703f012e29Smrg}; 713f012e29Smrg 72d8807b2fSmrgstruct amdgpu_asic_id { 73d8807b2fSmrg uint32_t did; 74d8807b2fSmrg uint32_t rid; 75d8807b2fSmrg char *marketing_name; 76d8807b2fSmrg}; 77d8807b2fSmrg 783f012e29Smrgstruct amdgpu_device { 793f012e29Smrg atomic_t refcount; 803f012e29Smrg int fd; 813f012e29Smrg int flink_fd; 823f012e29Smrg unsigned major_version; 833f012e29Smrg unsigned minor_version; 843f012e29Smrg 85d8807b2fSmrg /** Lookup table of asic device id, revision id and marketing name */ 86d8807b2fSmrg struct amdgpu_asic_id *asic_ids; 873f012e29Smrg /** List of buffer handles. Protected by bo_table_mutex. */ 883f012e29Smrg struct util_hash_table *bo_handles; 893f012e29Smrg /** List of buffer GEM flink names. Protected by bo_table_mutex. */ 903f012e29Smrg struct util_hash_table *bo_flink_names; 913f012e29Smrg /** This protects all hash tables. */ 923f012e29Smrg pthread_mutex_t bo_table_mutex; 933f012e29Smrg struct drm_amdgpu_info_device dev_info; 943f012e29Smrg struct amdgpu_gpu_info info; 953f012e29Smrg /** The global VA manager for the whole virtual address space */ 96d8807b2fSmrg struct amdgpu_bo_va_mgr vamgr; 973f012e29Smrg /** The VA manager for the 32bit address space */ 98d8807b2fSmrg struct amdgpu_bo_va_mgr vamgr_32; 993f012e29Smrg}; 1003f012e29Smrg 1013f012e29Smrgstruct amdgpu_bo { 1023f012e29Smrg atomic_t refcount; 1033f012e29Smrg struct amdgpu_device *dev; 1043f012e29Smrg 1053f012e29Smrg uint64_t alloc_size; 1063f012e29Smrg 1073f012e29Smrg uint32_t handle; 1083f012e29Smrg uint32_t flink_name; 1093f012e29Smrg 1103f012e29Smrg pthread_mutex_t cpu_access_mutex; 1113f012e29Smrg void *cpu_ptr; 1123f012e29Smrg int cpu_map_count; 1133f012e29Smrg}; 1143f012e29Smrg 1153f012e29Smrgstruct amdgpu_bo_list { 1163f012e29Smrg struct amdgpu_device *dev; 1173f012e29Smrg 1183f012e29Smrg uint32_t handle; 1193f012e29Smrg}; 1203f012e29Smrg 1213f012e29Smrgstruct amdgpu_context { 1223f012e29Smrg struct amdgpu_device *dev; 1233f012e29Smrg /** Mutex for accessing fences and to maintain command submissions 1243f012e29Smrg in good sequence. */ 1253f012e29Smrg pthread_mutex_t sequence_mutex; 1263f012e29Smrg /* context id*/ 1273f012e29Smrg uint32_t id; 1283f012e29Smrg uint64_t last_seq[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS]; 1293f012e29Smrg struct list_head sem_list[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS]; 1303f012e29Smrg}; 1313f012e29Smrg 1323f012e29Smrg/** 1333f012e29Smrg * Structure describing sw semaphore based on scheduler 1343f012e29Smrg * 1353f012e29Smrg */ 1363f012e29Smrgstruct amdgpu_semaphore { 1373f012e29Smrg atomic_t refcount; 1383f012e29Smrg struct list_head list; 1393f012e29Smrg struct amdgpu_cs_fence signal_fence; 1403f012e29Smrg}; 1413f012e29Smrg 1423f012e29Smrg/** 1433f012e29Smrg * Functions. 1443f012e29Smrg */ 1453f012e29Smrg 1463f012e29Smrgdrm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start, 1473f012e29Smrg uint64_t max, uint64_t alignment); 1483f012e29Smrg 1493f012e29Smrgdrm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr); 1503f012e29Smrg 1513f012e29Smrgdrm_private uint64_t 1523f012e29Smrgamdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size, 1533f012e29Smrg uint64_t alignment, uint64_t base_required); 1543f012e29Smrg 1553f012e29Smrgdrm_private void 1563f012e29Smrgamdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size); 1573f012e29Smrg 158d8807b2fSmrgdrm_private int amdgpu_parse_asic_ids(struct amdgpu_asic_id **asic_ids); 159d8807b2fSmrg 1603f012e29Smrgdrm_private int amdgpu_query_gpu_info_init(amdgpu_device_handle dev); 1613f012e29Smrg 1623f012e29Smrgdrm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout); 1633f012e29Smrg 1643f012e29Smrg/** 1653f012e29Smrg * Inline functions. 1663f012e29Smrg */ 1673f012e29Smrg 1683f012e29Smrg/** 1693f012e29Smrg * Increment src and decrement dst as if we were updating references 1703f012e29Smrg * for an assignment between 2 pointers of some objects. 1713f012e29Smrg * 1723f012e29Smrg * \return true if dst is 0 1733f012e29Smrg */ 1743f012e29Smrgstatic inline bool update_references(atomic_t *dst, atomic_t *src) 1753f012e29Smrg{ 1763f012e29Smrg if (dst != src) { 1773f012e29Smrg /* bump src first */ 1783f012e29Smrg if (src) { 1793f012e29Smrg assert(atomic_read(src) > 0); 1803f012e29Smrg atomic_inc(src); 1813f012e29Smrg } 1823f012e29Smrg if (dst) { 1833f012e29Smrg assert(atomic_read(dst) > 0); 1843f012e29Smrg return atomic_dec_and_test(dst); 1853f012e29Smrg } 1863f012e29Smrg } 1873f012e29Smrg return false; 1883f012e29Smrg} 1893f012e29Smrg 1903f012e29Smrg#endif 191