amdgpu_bo.c revision 4545e80c
13f012e29Smrg/* 23f012e29Smrg * Copyright © 2014 Advanced Micro Devices, Inc. 33f012e29Smrg * All Rights Reserved. 43f012e29Smrg * 53f012e29Smrg * Permission is hereby granted, free of charge, to any person obtaining a 63f012e29Smrg * copy of this software and associated documentation files (the "Software"), 73f012e29Smrg * to deal in the Software without restriction, including without limitation 83f012e29Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 93f012e29Smrg * and/or sell copies of the Software, and to permit persons to whom the 103f012e29Smrg * Software is furnished to do so, subject to the following conditions: 113f012e29Smrg * 123f012e29Smrg * The above copyright notice and this permission notice shall be included in 133f012e29Smrg * all copies or substantial portions of the Software. 143f012e29Smrg * 153f012e29Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 163f012e29Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 173f012e29Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 183f012e29Smrg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 193f012e29Smrg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 203f012e29Smrg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 213f012e29Smrg * OTHER DEALINGS IN THE SOFTWARE. 223f012e29Smrg * 233f012e29Smrg */ 243f012e29Smrg 253f012e29Smrg#include <stdlib.h> 263f012e29Smrg#include <stdio.h> 273f012e29Smrg#include <stdint.h> 283f012e29Smrg#include <string.h> 293f012e29Smrg#include <errno.h> 303f012e29Smrg#include <fcntl.h> 313f012e29Smrg#include <unistd.h> 323f012e29Smrg#include <sys/ioctl.h> 333f012e29Smrg#include <sys/mman.h> 343f012e29Smrg#include <sys/time.h> 353f012e29Smrg 363f012e29Smrg#include "libdrm_macros.h" 373f012e29Smrg#include "xf86drm.h" 383f012e29Smrg#include "amdgpu_drm.h" 393f012e29Smrg#include "amdgpu_internal.h" 403f012e29Smrg#include "util_math.h" 413f012e29Smrg 423f012e29Smrgstatic void amdgpu_close_kms_handle(amdgpu_device_handle dev, 433f012e29Smrg uint32_t handle) 443f012e29Smrg{ 453f012e29Smrg struct drm_gem_close args = {}; 463f012e29Smrg 473f012e29Smrg args.handle = handle; 483f012e29Smrg drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &args); 493f012e29Smrg} 503f012e29Smrg 517cdc0497Smrgstatic int amdgpu_bo_create(amdgpu_device_handle dev, 527cdc0497Smrg uint64_t size, 537cdc0497Smrg uint32_t handle, 547cdc0497Smrg amdgpu_bo_handle *buf_handle) 553f012e29Smrg{ 563f012e29Smrg struct amdgpu_bo *bo; 573f012e29Smrg 583f012e29Smrg bo = calloc(1, sizeof(struct amdgpu_bo)); 593f012e29Smrg if (!bo) 603f012e29Smrg return -ENOMEM; 613f012e29Smrg 623f012e29Smrg atomic_set(&bo->refcount, 1); 633f012e29Smrg bo->dev = dev; 647cdc0497Smrg bo->alloc_size = size; 657cdc0497Smrg bo->handle = handle; 667cdc0497Smrg pthread_mutex_init(&bo->cpu_access_mutex, NULL); 677cdc0497Smrg 687cdc0497Smrg *buf_handle = bo; 697cdc0497Smrg return 0; 707cdc0497Smrg} 717cdc0497Smrg 727cdc0497Smrgdrm_public int amdgpu_bo_alloc(amdgpu_device_handle dev, 737cdc0497Smrg struct amdgpu_bo_alloc_request *alloc_buffer, 747cdc0497Smrg amdgpu_bo_handle *buf_handle) 757cdc0497Smrg{ 767cdc0497Smrg union drm_amdgpu_gem_create args; 777cdc0497Smrg int r; 783f012e29Smrg 793f012e29Smrg memset(&args, 0, sizeof(args)); 803f012e29Smrg args.in.bo_size = alloc_buffer->alloc_size; 813f012e29Smrg args.in.alignment = alloc_buffer->phys_alignment; 823f012e29Smrg 833f012e29Smrg /* Set the placement. */ 847cdc0497Smrg args.in.domains = alloc_buffer->preferred_heap; 853f012e29Smrg args.in.domain_flags = alloc_buffer->flags; 863f012e29Smrg 873f012e29Smrg /* Allocate the buffer with the preferred heap. */ 883f012e29Smrg r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_CREATE, 893f012e29Smrg &args, sizeof(args)); 907cdc0497Smrg if (r) 917cdc0497Smrg goto out; 927cdc0497Smrg 937cdc0497Smrg r = amdgpu_bo_create(dev, alloc_buffer->alloc_size, args.out.handle, 947cdc0497Smrg buf_handle); 953f012e29Smrg if (r) { 967cdc0497Smrg amdgpu_close_kms_handle(dev, args.out.handle); 977cdc0497Smrg goto out; 983f012e29Smrg } 993f012e29Smrg 1007cdc0497Smrg pthread_mutex_lock(&dev->bo_table_mutex); 1017cdc0497Smrg r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle, 1027cdc0497Smrg *buf_handle); 1037cdc0497Smrg pthread_mutex_unlock(&dev->bo_table_mutex); 1047cdc0497Smrg if (r) 1057cdc0497Smrg amdgpu_bo_free(*buf_handle); 1067cdc0497Smrgout: 1077cdc0497Smrg return r; 1083f012e29Smrg} 1093f012e29Smrg 1107cdc0497Smrgdrm_public int amdgpu_bo_set_metadata(amdgpu_bo_handle bo, 1117cdc0497Smrg struct amdgpu_bo_metadata *info) 1123f012e29Smrg{ 1133f012e29Smrg struct drm_amdgpu_gem_metadata args = {}; 1143f012e29Smrg 1153f012e29Smrg args.handle = bo->handle; 1163f012e29Smrg args.op = AMDGPU_GEM_METADATA_OP_SET_METADATA; 1173f012e29Smrg args.data.flags = info->flags; 1183f012e29Smrg args.data.tiling_info = info->tiling_info; 1193f012e29Smrg 1203f012e29Smrg if (info->size_metadata > sizeof(args.data.data)) 1213f012e29Smrg return -EINVAL; 1223f012e29Smrg 1233f012e29Smrg if (info->size_metadata) { 1243f012e29Smrg args.data.data_size_bytes = info->size_metadata; 1253f012e29Smrg memcpy(args.data.data, info->umd_metadata, info->size_metadata); 1263f012e29Smrg } 1273f012e29Smrg 1283f012e29Smrg return drmCommandWriteRead(bo->dev->fd, 1293f012e29Smrg DRM_AMDGPU_GEM_METADATA, 1303f012e29Smrg &args, sizeof(args)); 1313f012e29Smrg} 1323f012e29Smrg 1337cdc0497Smrgdrm_public int amdgpu_bo_query_info(amdgpu_bo_handle bo, 1347cdc0497Smrg struct amdgpu_bo_info *info) 1353f012e29Smrg{ 1363f012e29Smrg struct drm_amdgpu_gem_metadata metadata = {}; 1373f012e29Smrg struct drm_amdgpu_gem_create_in bo_info = {}; 1383f012e29Smrg struct drm_amdgpu_gem_op gem_op = {}; 1393f012e29Smrg int r; 1403f012e29Smrg 1413f012e29Smrg /* Validate the BO passed in */ 1423f012e29Smrg if (!bo->handle) 1433f012e29Smrg return -EINVAL; 1443f012e29Smrg 1453f012e29Smrg /* Query metadata. */ 1463f012e29Smrg metadata.handle = bo->handle; 1473f012e29Smrg metadata.op = AMDGPU_GEM_METADATA_OP_GET_METADATA; 1483f012e29Smrg 1493f012e29Smrg r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_METADATA, 1503f012e29Smrg &metadata, sizeof(metadata)); 1513f012e29Smrg if (r) 1523f012e29Smrg return r; 1533f012e29Smrg 1543f012e29Smrg if (metadata.data.data_size_bytes > 1553f012e29Smrg sizeof(info->metadata.umd_metadata)) 1563f012e29Smrg return -EINVAL; 1573f012e29Smrg 1583f012e29Smrg /* Query buffer info. */ 1593f012e29Smrg gem_op.handle = bo->handle; 1603f012e29Smrg gem_op.op = AMDGPU_GEM_OP_GET_GEM_CREATE_INFO; 1613f012e29Smrg gem_op.value = (uintptr_t)&bo_info; 1623f012e29Smrg 1633f012e29Smrg r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_OP, 1643f012e29Smrg &gem_op, sizeof(gem_op)); 1653f012e29Smrg if (r) 1663f012e29Smrg return r; 1673f012e29Smrg 1683f012e29Smrg memset(info, 0, sizeof(*info)); 1693f012e29Smrg info->alloc_size = bo_info.bo_size; 1703f012e29Smrg info->phys_alignment = bo_info.alignment; 1713f012e29Smrg info->preferred_heap = bo_info.domains; 1723f012e29Smrg info->alloc_flags = bo_info.domain_flags; 1733f012e29Smrg info->metadata.flags = metadata.data.flags; 1743f012e29Smrg info->metadata.tiling_info = metadata.data.tiling_info; 1753f012e29Smrg 1763f012e29Smrg info->metadata.size_metadata = metadata.data.data_size_bytes; 1773f012e29Smrg if (metadata.data.data_size_bytes > 0) 1783f012e29Smrg memcpy(info->metadata.umd_metadata, metadata.data.data, 1793f012e29Smrg metadata.data.data_size_bytes); 1803f012e29Smrg 1813f012e29Smrg return 0; 1823f012e29Smrg} 1833f012e29Smrg 1843f012e29Smrgstatic int amdgpu_bo_export_flink(amdgpu_bo_handle bo) 1853f012e29Smrg{ 1863f012e29Smrg struct drm_gem_flink flink; 1873f012e29Smrg int fd, dma_fd; 1883f012e29Smrg uint32_t handle; 1893f012e29Smrg int r; 1903f012e29Smrg 1913f012e29Smrg fd = bo->dev->fd; 1923f012e29Smrg handle = bo->handle; 1933f012e29Smrg if (bo->flink_name) 1943f012e29Smrg return 0; 1953f012e29Smrg 1963f012e29Smrg 1973f012e29Smrg if (bo->dev->flink_fd != bo->dev->fd) { 1983f012e29Smrg r = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, 1993f012e29Smrg &dma_fd); 2003f012e29Smrg if (!r) { 2013f012e29Smrg r = drmPrimeFDToHandle(bo->dev->flink_fd, dma_fd, &handle); 2023f012e29Smrg close(dma_fd); 2033f012e29Smrg } 2043f012e29Smrg if (r) 2053f012e29Smrg return r; 2063f012e29Smrg fd = bo->dev->flink_fd; 2073f012e29Smrg } 2083f012e29Smrg memset(&flink, 0, sizeof(flink)); 2093f012e29Smrg flink.handle = handle; 2103f012e29Smrg 2113f012e29Smrg r = drmIoctl(fd, DRM_IOCTL_GEM_FLINK, &flink); 2123f012e29Smrg if (r) 2133f012e29Smrg return r; 2143f012e29Smrg 2153f012e29Smrg bo->flink_name = flink.name; 2163f012e29Smrg 2173f012e29Smrg if (bo->dev->flink_fd != bo->dev->fd) { 2183f012e29Smrg struct drm_gem_close args = {}; 2193f012e29Smrg args.handle = handle; 2203f012e29Smrg drmIoctl(bo->dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &args); 2213f012e29Smrg } 2223f012e29Smrg 2233f012e29Smrg pthread_mutex_lock(&bo->dev->bo_table_mutex); 2247cdc0497Smrg r = handle_table_insert(&bo->dev->bo_flink_names, bo->flink_name, bo); 2253f012e29Smrg pthread_mutex_unlock(&bo->dev->bo_table_mutex); 2263f012e29Smrg 2277cdc0497Smrg return r; 2283f012e29Smrg} 2293f012e29Smrg 2307cdc0497Smrgdrm_public int amdgpu_bo_export(amdgpu_bo_handle bo, 2317cdc0497Smrg enum amdgpu_bo_handle_type type, 2327cdc0497Smrg uint32_t *shared_handle) 2333f012e29Smrg{ 2343f012e29Smrg int r; 2353f012e29Smrg 2363f012e29Smrg switch (type) { 2373f012e29Smrg case amdgpu_bo_handle_type_gem_flink_name: 2383f012e29Smrg r = amdgpu_bo_export_flink(bo); 2393f012e29Smrg if (r) 2403f012e29Smrg return r; 2413f012e29Smrg 2423f012e29Smrg *shared_handle = bo->flink_name; 2433f012e29Smrg return 0; 2443f012e29Smrg 2453f012e29Smrg case amdgpu_bo_handle_type_kms: 2467cdc0497Smrg case amdgpu_bo_handle_type_kms_noimport: 2473f012e29Smrg *shared_handle = bo->handle; 2483f012e29Smrg return 0; 2493f012e29Smrg 2503f012e29Smrg case amdgpu_bo_handle_type_dma_buf_fd: 2517cdc0497Smrg return drmPrimeHandleToFD(bo->dev->fd, bo->handle, 2527cdc0497Smrg DRM_CLOEXEC | DRM_RDWR, 2537cdc0497Smrg (int*)shared_handle); 2543f012e29Smrg } 2553f012e29Smrg return -EINVAL; 2563f012e29Smrg} 2573f012e29Smrg 2587cdc0497Smrgdrm_public int amdgpu_bo_import(amdgpu_device_handle dev, 2597cdc0497Smrg enum amdgpu_bo_handle_type type, 2607cdc0497Smrg uint32_t shared_handle, 2613f012e29Smrg struct amdgpu_bo_import_result *output) 2623f012e29Smrg{ 2633f012e29Smrg struct drm_gem_open open_arg = {}; 2647cdc0497Smrg struct drm_gem_close close_arg = {}; 2653f012e29Smrg struct amdgpu_bo *bo = NULL; 2667cdc0497Smrg uint32_t handle = 0, flink_name = 0; 2677cdc0497Smrg uint64_t alloc_size = 0; 2687cdc0497Smrg int r = 0; 2693f012e29Smrg int dma_fd; 2703f012e29Smrg uint64_t dma_buf_size = 0; 2713f012e29Smrg 2723f012e29Smrg /* We must maintain a list of pairs <handle, bo>, so that we always 2733f012e29Smrg * return the same amdgpu_bo instance for the same handle. */ 2743f012e29Smrg pthread_mutex_lock(&dev->bo_table_mutex); 2753f012e29Smrg 2763f012e29Smrg /* Convert a DMA buf handle to a KMS handle now. */ 2773f012e29Smrg if (type == amdgpu_bo_handle_type_dma_buf_fd) { 2783f012e29Smrg off_t size; 2793f012e29Smrg 2803f012e29Smrg /* Get a KMS handle. */ 2813f012e29Smrg r = drmPrimeFDToHandle(dev->fd, shared_handle, &handle); 2827cdc0497Smrg if (r) 2837cdc0497Smrg goto unlock; 2843f012e29Smrg 2853f012e29Smrg /* Query the buffer size. */ 2863f012e29Smrg size = lseek(shared_handle, 0, SEEK_END); 2873f012e29Smrg if (size == (off_t)-1) { 2887cdc0497Smrg r = -errno; 2897cdc0497Smrg goto free_bo_handle; 2903f012e29Smrg } 2913f012e29Smrg lseek(shared_handle, 0, SEEK_SET); 2923f012e29Smrg 2933f012e29Smrg dma_buf_size = size; 2943f012e29Smrg shared_handle = handle; 2953f012e29Smrg } 2963f012e29Smrg 2973f012e29Smrg /* If we have already created a buffer with this handle, find it. */ 2983f012e29Smrg switch (type) { 2993f012e29Smrg case amdgpu_bo_handle_type_gem_flink_name: 3007cdc0497Smrg bo = handle_table_lookup(&dev->bo_flink_names, shared_handle); 3013f012e29Smrg break; 3023f012e29Smrg 3033f012e29Smrg case amdgpu_bo_handle_type_dma_buf_fd: 3047cdc0497Smrg bo = handle_table_lookup(&dev->bo_handles, shared_handle); 3053f012e29Smrg break; 3063f012e29Smrg 3073f012e29Smrg case amdgpu_bo_handle_type_kms: 3087cdc0497Smrg case amdgpu_bo_handle_type_kms_noimport: 3093f012e29Smrg /* Importing a KMS handle in not allowed. */ 3107cdc0497Smrg r = -EPERM; 3117cdc0497Smrg goto unlock; 3123f012e29Smrg 3133f012e29Smrg default: 3147cdc0497Smrg r = -EINVAL; 3157cdc0497Smrg goto unlock; 3163f012e29Smrg } 3173f012e29Smrg 3183f012e29Smrg if (bo) { 3193f012e29Smrg /* The buffer already exists, just bump the refcount. */ 3203f012e29Smrg atomic_inc(&bo->refcount); 321d8807b2fSmrg pthread_mutex_unlock(&dev->bo_table_mutex); 3223f012e29Smrg 3233f012e29Smrg output->buf_handle = bo; 3243f012e29Smrg output->alloc_size = bo->alloc_size; 3253f012e29Smrg return 0; 3263f012e29Smrg } 3273f012e29Smrg 3283f012e29Smrg /* Open the handle. */ 3293f012e29Smrg switch (type) { 3303f012e29Smrg case amdgpu_bo_handle_type_gem_flink_name: 3313f012e29Smrg open_arg.name = shared_handle; 3323f012e29Smrg r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_OPEN, &open_arg); 3337cdc0497Smrg if (r) 3347cdc0497Smrg goto unlock; 3353f012e29Smrg 3367cdc0497Smrg flink_name = shared_handle; 3377cdc0497Smrg handle = open_arg.handle; 3387cdc0497Smrg alloc_size = open_arg.size; 3393f012e29Smrg if (dev->flink_fd != dev->fd) { 3407cdc0497Smrg r = drmPrimeHandleToFD(dev->flink_fd, handle, 3417cdc0497Smrg DRM_CLOEXEC, &dma_fd); 3427cdc0497Smrg if (r) 3437cdc0497Smrg goto free_bo_handle; 3447cdc0497Smrg r = drmPrimeFDToHandle(dev->fd, dma_fd, &handle); 3453f012e29Smrg close(dma_fd); 3467cdc0497Smrg if (r) 3477cdc0497Smrg goto free_bo_handle; 3487cdc0497Smrg close_arg.handle = open_arg.handle; 3497cdc0497Smrg r = drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE, 3507cdc0497Smrg &close_arg); 3517cdc0497Smrg if (r) 3527cdc0497Smrg goto free_bo_handle; 3533f012e29Smrg } 3543f012e29Smrg break; 3553f012e29Smrg 3563f012e29Smrg case amdgpu_bo_handle_type_dma_buf_fd: 3577cdc0497Smrg handle = shared_handle; 3587cdc0497Smrg alloc_size = dma_buf_size; 3593f012e29Smrg break; 3603f012e29Smrg 3613f012e29Smrg case amdgpu_bo_handle_type_kms: 3627cdc0497Smrg case amdgpu_bo_handle_type_kms_noimport: 3633f012e29Smrg assert(0); /* unreachable */ 3643f012e29Smrg } 3653f012e29Smrg 3663f012e29Smrg /* Initialize it. */ 3677cdc0497Smrg r = amdgpu_bo_create(dev, alloc_size, handle, &bo); 3687cdc0497Smrg if (r) 3697cdc0497Smrg goto free_bo_handle; 3703f012e29Smrg 3717cdc0497Smrg r = handle_table_insert(&dev->bo_handles, bo->handle, bo); 3727cdc0497Smrg if (r) 3737cdc0497Smrg goto free_bo_handle; 3747cdc0497Smrg if (flink_name) { 3757cdc0497Smrg bo->flink_name = flink_name; 3767cdc0497Smrg r = handle_table_insert(&dev->bo_flink_names, flink_name, 3777cdc0497Smrg bo); 3787cdc0497Smrg if (r) 3797cdc0497Smrg goto remove_handle; 3807cdc0497Smrg 3817cdc0497Smrg } 3823f012e29Smrg 3833f012e29Smrg output->buf_handle = bo; 3843f012e29Smrg output->alloc_size = bo->alloc_size; 3857cdc0497Smrg pthread_mutex_unlock(&dev->bo_table_mutex); 3863f012e29Smrg return 0; 3877cdc0497Smrg 3887cdc0497Smrgremove_handle: 3897cdc0497Smrg handle_table_remove(&dev->bo_handles, bo->handle); 3907cdc0497Smrgfree_bo_handle: 3917cdc0497Smrg if (flink_name && !close_arg.handle && open_arg.handle) { 3927cdc0497Smrg close_arg.handle = open_arg.handle; 3937cdc0497Smrg drmIoctl(dev->flink_fd, DRM_IOCTL_GEM_CLOSE, &close_arg); 3947cdc0497Smrg } 3957cdc0497Smrg if (bo) 3967cdc0497Smrg amdgpu_bo_free(bo); 3977cdc0497Smrg else 3987cdc0497Smrg amdgpu_close_kms_handle(dev, handle); 3997cdc0497Smrgunlock: 4007cdc0497Smrg pthread_mutex_unlock(&dev->bo_table_mutex); 4017cdc0497Smrg return r; 4023f012e29Smrg} 4033f012e29Smrg 4047cdc0497Smrgdrm_public int amdgpu_bo_free(amdgpu_bo_handle buf_handle) 4053f012e29Smrg{ 406d8807b2fSmrg struct amdgpu_device *dev; 407d8807b2fSmrg struct amdgpu_bo *bo = buf_handle; 408d8807b2fSmrg 409d8807b2fSmrg assert(bo != NULL); 410d8807b2fSmrg dev = bo->dev; 411d8807b2fSmrg pthread_mutex_lock(&dev->bo_table_mutex); 412d8807b2fSmrg 413d8807b2fSmrg if (update_references(&bo->refcount, NULL)) { 414d8807b2fSmrg /* Remove the buffer from the hash tables. */ 4157cdc0497Smrg handle_table_remove(&dev->bo_handles, bo->handle); 416d8807b2fSmrg 4177cdc0497Smrg if (bo->flink_name) 4187cdc0497Smrg handle_table_remove(&dev->bo_flink_names, 4197cdc0497Smrg bo->flink_name); 420d8807b2fSmrg 421d8807b2fSmrg /* Release CPU access. */ 422d8807b2fSmrg if (bo->cpu_map_count > 0) { 423d8807b2fSmrg bo->cpu_map_count = 1; 424d8807b2fSmrg amdgpu_bo_cpu_unmap(bo); 425d8807b2fSmrg } 426d8807b2fSmrg 427d8807b2fSmrg amdgpu_close_kms_handle(dev, bo->handle); 428d8807b2fSmrg pthread_mutex_destroy(&bo->cpu_access_mutex); 429d8807b2fSmrg free(bo); 430d8807b2fSmrg } 431d8807b2fSmrg 432d8807b2fSmrg pthread_mutex_unlock(&dev->bo_table_mutex); 4333f012e29Smrg return 0; 4343f012e29Smrg} 4353f012e29Smrg 4367cdc0497Smrgdrm_public void amdgpu_bo_inc_ref(amdgpu_bo_handle bo) 4377cdc0497Smrg{ 4387cdc0497Smrg atomic_inc(&bo->refcount); 4397cdc0497Smrg} 4407cdc0497Smrg 4417cdc0497Smrgdrm_public int amdgpu_bo_cpu_map(amdgpu_bo_handle bo, void **cpu) 4423f012e29Smrg{ 4433f012e29Smrg union drm_amdgpu_gem_mmap args; 4443f012e29Smrg void *ptr; 4453f012e29Smrg int r; 4463f012e29Smrg 4473f012e29Smrg pthread_mutex_lock(&bo->cpu_access_mutex); 4483f012e29Smrg 4493f012e29Smrg if (bo->cpu_ptr) { 4503f012e29Smrg /* already mapped */ 4513f012e29Smrg assert(bo->cpu_map_count > 0); 4523f012e29Smrg bo->cpu_map_count++; 4533f012e29Smrg *cpu = bo->cpu_ptr; 4543f012e29Smrg pthread_mutex_unlock(&bo->cpu_access_mutex); 4553f012e29Smrg return 0; 4563f012e29Smrg } 4573f012e29Smrg 4583f012e29Smrg assert(bo->cpu_map_count == 0); 4593f012e29Smrg 4603f012e29Smrg memset(&args, 0, sizeof(args)); 4613f012e29Smrg 4623f012e29Smrg /* Query the buffer address (args.addr_ptr). 4633f012e29Smrg * The kernel driver ignores the offset and size parameters. */ 4643f012e29Smrg args.in.handle = bo->handle; 4653f012e29Smrg 4663f012e29Smrg r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_MMAP, &args, 4673f012e29Smrg sizeof(args)); 4683f012e29Smrg if (r) { 4693f012e29Smrg pthread_mutex_unlock(&bo->cpu_access_mutex); 4703f012e29Smrg return r; 4713f012e29Smrg } 4723f012e29Smrg 4733f012e29Smrg /* Map the buffer. */ 4743f012e29Smrg ptr = drm_mmap(NULL, bo->alloc_size, PROT_READ | PROT_WRITE, MAP_SHARED, 4753f012e29Smrg bo->dev->fd, args.out.addr_ptr); 4763f012e29Smrg if (ptr == MAP_FAILED) { 4773f012e29Smrg pthread_mutex_unlock(&bo->cpu_access_mutex); 4783f012e29Smrg return -errno; 4793f012e29Smrg } 4803f012e29Smrg 4813f012e29Smrg bo->cpu_ptr = ptr; 4823f012e29Smrg bo->cpu_map_count = 1; 4833f012e29Smrg pthread_mutex_unlock(&bo->cpu_access_mutex); 4843f012e29Smrg 4853f012e29Smrg *cpu = ptr; 4863f012e29Smrg return 0; 4873f012e29Smrg} 4883f012e29Smrg 4897cdc0497Smrgdrm_public int amdgpu_bo_cpu_unmap(amdgpu_bo_handle bo) 4903f012e29Smrg{ 4913f012e29Smrg int r; 4923f012e29Smrg 4933f012e29Smrg pthread_mutex_lock(&bo->cpu_access_mutex); 4943f012e29Smrg assert(bo->cpu_map_count >= 0); 4953f012e29Smrg 4963f012e29Smrg if (bo->cpu_map_count == 0) { 4973f012e29Smrg /* not mapped */ 4983f012e29Smrg pthread_mutex_unlock(&bo->cpu_access_mutex); 4993f012e29Smrg return -EINVAL; 5003f012e29Smrg } 5013f012e29Smrg 5023f012e29Smrg bo->cpu_map_count--; 5033f012e29Smrg if (bo->cpu_map_count > 0) { 5043f012e29Smrg /* mapped multiple times */ 5053f012e29Smrg pthread_mutex_unlock(&bo->cpu_access_mutex); 5063f012e29Smrg return 0; 5073f012e29Smrg } 5083f012e29Smrg 5093f012e29Smrg r = drm_munmap(bo->cpu_ptr, bo->alloc_size) == 0 ? 0 : -errno; 5103f012e29Smrg bo->cpu_ptr = NULL; 5113f012e29Smrg pthread_mutex_unlock(&bo->cpu_access_mutex); 5123f012e29Smrg return r; 5133f012e29Smrg} 5143f012e29Smrg 5157cdc0497Smrgdrm_public int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev, 5163f012e29Smrg struct amdgpu_buffer_size_alignments *info) 5173f012e29Smrg{ 5183f012e29Smrg info->size_local = dev->dev_info.pte_fragment_size; 5193f012e29Smrg info->size_remote = dev->dev_info.gart_page_size; 5203f012e29Smrg return 0; 5213f012e29Smrg} 5223f012e29Smrg 5237cdc0497Smrgdrm_public int amdgpu_bo_wait_for_idle(amdgpu_bo_handle bo, 5247cdc0497Smrg uint64_t timeout_ns, 5253f012e29Smrg bool *busy) 5263f012e29Smrg{ 5273f012e29Smrg union drm_amdgpu_gem_wait_idle args; 5283f012e29Smrg int r; 5293f012e29Smrg 5303f012e29Smrg memset(&args, 0, sizeof(args)); 5313f012e29Smrg args.in.handle = bo->handle; 5323f012e29Smrg args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns); 5333f012e29Smrg 5343f012e29Smrg r = drmCommandWriteRead(bo->dev->fd, DRM_AMDGPU_GEM_WAIT_IDLE, 5353f012e29Smrg &args, sizeof(args)); 5363f012e29Smrg 5373f012e29Smrg if (r == 0) { 5383f012e29Smrg *busy = args.out.status; 5393f012e29Smrg return 0; 5403f012e29Smrg } else { 5413f012e29Smrg fprintf(stderr, "amdgpu: GEM_WAIT_IDLE failed with %i\n", r); 5423f012e29Smrg return r; 5433f012e29Smrg } 5443f012e29Smrg} 5453f012e29Smrg 5467cdc0497Smrgdrm_public int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev, 5477cdc0497Smrg void *cpu, 5487cdc0497Smrg uint64_t size, 5497cdc0497Smrg amdgpu_bo_handle *buf_handle, 5507cdc0497Smrg uint64_t *offset_in_bo) 5513f012e29Smrg{ 5523f012e29Smrg struct amdgpu_bo *bo; 5537cdc0497Smrg uint32_t i; 5547cdc0497Smrg int r = 0; 5557cdc0497Smrg 5567cdc0497Smrg if (cpu == NULL || size == 0) 5577cdc0497Smrg return -EINVAL; 5587cdc0497Smrg 5597cdc0497Smrg /* 5607cdc0497Smrg * Workaround for a buggy application which tries to import previously 5617cdc0497Smrg * exposed CPU pointers. If we find a real world use case we should 5627cdc0497Smrg * improve that by asking the kernel for the right handle. 5637cdc0497Smrg */ 5647cdc0497Smrg pthread_mutex_lock(&dev->bo_table_mutex); 5657cdc0497Smrg for (i = 0; i < dev->bo_handles.max_key; i++) { 5667cdc0497Smrg bo = handle_table_lookup(&dev->bo_handles, i); 5677cdc0497Smrg if (!bo || !bo->cpu_ptr || size > bo->alloc_size) 5687cdc0497Smrg continue; 5697cdc0497Smrg if (cpu >= bo->cpu_ptr && 5707e21dcc5Schristos cpu < (void*)((char *)bo->cpu_ptr + bo->alloc_size)) 5717cdc0497Smrg break; 5727cdc0497Smrg } 5737cdc0497Smrg 5747cdc0497Smrg if (i < dev->bo_handles.max_key) { 5757cdc0497Smrg atomic_inc(&bo->refcount); 5767cdc0497Smrg *buf_handle = bo; 5777cdc0497Smrg *offset_in_bo = (uintptr_t)cpu - (uintptr_t)bo->cpu_ptr; 5787cdc0497Smrg } else { 5797cdc0497Smrg *buf_handle = NULL; 5807cdc0497Smrg *offset_in_bo = 0; 5817cdc0497Smrg r = -ENXIO; 5827cdc0497Smrg } 5837cdc0497Smrg pthread_mutex_unlock(&dev->bo_table_mutex); 5847cdc0497Smrg 5857cdc0497Smrg return r; 5867cdc0497Smrg} 5877cdc0497Smrg 5887cdc0497Smrgdrm_public int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev, 5897cdc0497Smrg void *cpu, 5907cdc0497Smrg uint64_t size, 5917cdc0497Smrg amdgpu_bo_handle *buf_handle) 5927cdc0497Smrg{ 5937cdc0497Smrg int r; 5943f012e29Smrg struct drm_amdgpu_gem_userptr args; 5953f012e29Smrg 5963f012e29Smrg args.addr = (uintptr_t)cpu; 5973f012e29Smrg args.flags = AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_REGISTER | 5983f012e29Smrg AMDGPU_GEM_USERPTR_VALIDATE; 5993f012e29Smrg args.size = size; 6003f012e29Smrg r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_USERPTR, 6013f012e29Smrg &args, sizeof(args)); 6023f012e29Smrg if (r) 6037cdc0497Smrg goto out; 6043f012e29Smrg 6057cdc0497Smrg r = amdgpu_bo_create(dev, size, args.handle, buf_handle); 6067cdc0497Smrg if (r) { 6077cdc0497Smrg amdgpu_close_kms_handle(dev, args.handle); 6087cdc0497Smrg goto out; 6097cdc0497Smrg } 6103f012e29Smrg 6117cdc0497Smrg pthread_mutex_lock(&dev->bo_table_mutex); 6127cdc0497Smrg r = handle_table_insert(&dev->bo_handles, (*buf_handle)->handle, 6137cdc0497Smrg *buf_handle); 6147cdc0497Smrg pthread_mutex_unlock(&dev->bo_table_mutex); 6157cdc0497Smrg if (r) 6167cdc0497Smrg amdgpu_bo_free(*buf_handle); 6177cdc0497Smrgout: 6183f012e29Smrg return r; 6193f012e29Smrg} 6203f012e29Smrg 6214545e80cSmrgdrm_public int amdgpu_bo_list_create_raw(amdgpu_device_handle dev, 6224545e80cSmrg uint32_t number_of_buffers, 6234545e80cSmrg struct drm_amdgpu_bo_list_entry *buffers, 6244545e80cSmrg uint32_t *result) 6254545e80cSmrg{ 6264545e80cSmrg union drm_amdgpu_bo_list args; 6274545e80cSmrg int r; 6284545e80cSmrg 6294545e80cSmrg memset(&args, 0, sizeof(args)); 6304545e80cSmrg args.in.operation = AMDGPU_BO_LIST_OP_CREATE; 6314545e80cSmrg args.in.bo_number = number_of_buffers; 6324545e80cSmrg args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry); 6334545e80cSmrg args.in.bo_info_ptr = (uint64_t)(uintptr_t)buffers; 6344545e80cSmrg 6354545e80cSmrg r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST, 6364545e80cSmrg &args, sizeof(args)); 6374545e80cSmrg if (!r) 6384545e80cSmrg *result = args.out.list_handle; 6394545e80cSmrg return r; 6404545e80cSmrg} 6414545e80cSmrg 6424545e80cSmrgdrm_public int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, 6434545e80cSmrg uint32_t bo_list) 6444545e80cSmrg{ 6454545e80cSmrg union drm_amdgpu_bo_list args; 6464545e80cSmrg 6474545e80cSmrg memset(&args, 0, sizeof(args)); 6484545e80cSmrg args.in.operation = AMDGPU_BO_LIST_OP_DESTROY; 6494545e80cSmrg args.in.list_handle = bo_list; 6504545e80cSmrg 6514545e80cSmrg return drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST, 6524545e80cSmrg &args, sizeof(args)); 6534545e80cSmrg} 6544545e80cSmrg 6557cdc0497Smrgdrm_public int amdgpu_bo_list_create(amdgpu_device_handle dev, 6567cdc0497Smrg uint32_t number_of_resources, 6577cdc0497Smrg amdgpu_bo_handle *resources, 6587cdc0497Smrg uint8_t *resource_prios, 6597cdc0497Smrg amdgpu_bo_list_handle *result) 6603f012e29Smrg{ 6613f012e29Smrg struct drm_amdgpu_bo_list_entry *list; 6623f012e29Smrg union drm_amdgpu_bo_list args; 6633f012e29Smrg unsigned i; 6643f012e29Smrg int r; 6653f012e29Smrg 6663f012e29Smrg if (!number_of_resources) 6673f012e29Smrg return -EINVAL; 6683f012e29Smrg 6693f012e29Smrg /* overflow check for multiplication */ 6703f012e29Smrg if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry)) 6713f012e29Smrg return -EINVAL; 6723f012e29Smrg 6733f012e29Smrg list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry)); 6743f012e29Smrg if (!list) 6753f012e29Smrg return -ENOMEM; 6763f012e29Smrg 6773f012e29Smrg *result = malloc(sizeof(struct amdgpu_bo_list)); 6783f012e29Smrg if (!*result) { 6793f012e29Smrg free(list); 6803f012e29Smrg return -ENOMEM; 6813f012e29Smrg } 6823f012e29Smrg 6833f012e29Smrg memset(&args, 0, sizeof(args)); 6843f012e29Smrg args.in.operation = AMDGPU_BO_LIST_OP_CREATE; 6853f012e29Smrg args.in.bo_number = number_of_resources; 6863f012e29Smrg args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry); 6873f012e29Smrg args.in.bo_info_ptr = (uint64_t)(uintptr_t)list; 6883f012e29Smrg 6893f012e29Smrg for (i = 0; i < number_of_resources; i++) { 6903f012e29Smrg list[i].bo_handle = resources[i]->handle; 6913f012e29Smrg if (resource_prios) 6923f012e29Smrg list[i].bo_priority = resource_prios[i]; 6933f012e29Smrg else 6943f012e29Smrg list[i].bo_priority = 0; 6953f012e29Smrg } 6963f012e29Smrg 6973f012e29Smrg r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_BO_LIST, 6983f012e29Smrg &args, sizeof(args)); 6993f012e29Smrg free(list); 7003f012e29Smrg if (r) { 7013f012e29Smrg free(*result); 7023f012e29Smrg return r; 7033f012e29Smrg } 7043f012e29Smrg 7053f012e29Smrg (*result)->dev = dev; 7063f012e29Smrg (*result)->handle = args.out.list_handle; 7073f012e29Smrg return 0; 7083f012e29Smrg} 7093f012e29Smrg 7107cdc0497Smrgdrm_public int amdgpu_bo_list_destroy(amdgpu_bo_list_handle list) 7113f012e29Smrg{ 7123f012e29Smrg union drm_amdgpu_bo_list args; 7133f012e29Smrg int r; 7143f012e29Smrg 7153f012e29Smrg memset(&args, 0, sizeof(args)); 7163f012e29Smrg args.in.operation = AMDGPU_BO_LIST_OP_DESTROY; 7173f012e29Smrg args.in.list_handle = list->handle; 7183f012e29Smrg 7193f012e29Smrg r = drmCommandWriteRead(list->dev->fd, DRM_AMDGPU_BO_LIST, 7203f012e29Smrg &args, sizeof(args)); 7213f012e29Smrg 7223f012e29Smrg if (!r) 7233f012e29Smrg free(list); 7243f012e29Smrg 7253f012e29Smrg return r; 7263f012e29Smrg} 7273f012e29Smrg 7287cdc0497Smrgdrm_public int amdgpu_bo_list_update(amdgpu_bo_list_handle handle, 7297cdc0497Smrg uint32_t number_of_resources, 7307cdc0497Smrg amdgpu_bo_handle *resources, 7317cdc0497Smrg uint8_t *resource_prios) 7323f012e29Smrg{ 7333f012e29Smrg struct drm_amdgpu_bo_list_entry *list; 7343f012e29Smrg union drm_amdgpu_bo_list args; 7353f012e29Smrg unsigned i; 7363f012e29Smrg int r; 7373f012e29Smrg 7383f012e29Smrg if (!number_of_resources) 7393f012e29Smrg return -EINVAL; 7403f012e29Smrg 7413f012e29Smrg /* overflow check for multiplication */ 7423f012e29Smrg if (number_of_resources > UINT32_MAX / sizeof(struct drm_amdgpu_bo_list_entry)) 7433f012e29Smrg return -EINVAL; 7443f012e29Smrg 7453f012e29Smrg list = malloc(number_of_resources * sizeof(struct drm_amdgpu_bo_list_entry)); 746d8807b2fSmrg if (!list) 7473f012e29Smrg return -ENOMEM; 7483f012e29Smrg 7493f012e29Smrg args.in.operation = AMDGPU_BO_LIST_OP_UPDATE; 7503f012e29Smrg args.in.list_handle = handle->handle; 7513f012e29Smrg args.in.bo_number = number_of_resources; 7523f012e29Smrg args.in.bo_info_size = sizeof(struct drm_amdgpu_bo_list_entry); 7533f012e29Smrg args.in.bo_info_ptr = (uintptr_t)list; 7543f012e29Smrg 7553f012e29Smrg for (i = 0; i < number_of_resources; i++) { 7563f012e29Smrg list[i].bo_handle = resources[i]->handle; 7573f012e29Smrg if (resource_prios) 7583f012e29Smrg list[i].bo_priority = resource_prios[i]; 7593f012e29Smrg else 7603f012e29Smrg list[i].bo_priority = 0; 7613f012e29Smrg } 7623f012e29Smrg 7633f012e29Smrg r = drmCommandWriteRead(handle->dev->fd, DRM_AMDGPU_BO_LIST, 7643f012e29Smrg &args, sizeof(args)); 7653f012e29Smrg free(list); 7663f012e29Smrg return r; 7673f012e29Smrg} 7683f012e29Smrg 7697cdc0497Smrgdrm_public int amdgpu_bo_va_op(amdgpu_bo_handle bo, 7707cdc0497Smrg uint64_t offset, 7717cdc0497Smrg uint64_t size, 7727cdc0497Smrg uint64_t addr, 7737cdc0497Smrg uint64_t flags, 7747cdc0497Smrg uint32_t ops) 7753f012e29Smrg{ 7763f012e29Smrg amdgpu_device_handle dev = bo->dev; 777d8807b2fSmrg 778d8807b2fSmrg size = ALIGN(size, getpagesize()); 779d8807b2fSmrg 780d8807b2fSmrg return amdgpu_bo_va_op_raw(dev, bo, offset, size, addr, 781d8807b2fSmrg AMDGPU_VM_PAGE_READABLE | 782d8807b2fSmrg AMDGPU_VM_PAGE_WRITEABLE | 783d8807b2fSmrg AMDGPU_VM_PAGE_EXECUTABLE, ops); 784d8807b2fSmrg} 785d8807b2fSmrg 7867cdc0497Smrgdrm_public int amdgpu_bo_va_op_raw(amdgpu_device_handle dev, 7877cdc0497Smrg amdgpu_bo_handle bo, 7887cdc0497Smrg uint64_t offset, 7897cdc0497Smrg uint64_t size, 7907cdc0497Smrg uint64_t addr, 7917cdc0497Smrg uint64_t flags, 7927cdc0497Smrg uint32_t ops) 793d8807b2fSmrg{ 7943f012e29Smrg struct drm_amdgpu_gem_va va; 7953f012e29Smrg int r; 7963f012e29Smrg 797d8807b2fSmrg if (ops != AMDGPU_VA_OP_MAP && ops != AMDGPU_VA_OP_UNMAP && 798d8807b2fSmrg ops != AMDGPU_VA_OP_REPLACE && ops != AMDGPU_VA_OP_CLEAR) 7993f012e29Smrg return -EINVAL; 8003f012e29Smrg 8013f012e29Smrg memset(&va, 0, sizeof(va)); 802d8807b2fSmrg va.handle = bo ? bo->handle : 0; 8033f012e29Smrg va.operation = ops; 804d8807b2fSmrg va.flags = flags; 8053f012e29Smrg va.va_address = addr; 8063f012e29Smrg va.offset_in_bo = offset; 807d8807b2fSmrg va.map_size = size; 8083f012e29Smrg 8093f012e29Smrg r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_GEM_VA, &va, sizeof(va)); 8103f012e29Smrg 8113f012e29Smrg return r; 8123f012e29Smrg} 813