amdgpu.h revision adfa0b0c
1/* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24/** 25 * \file amdgpu.h 26 * 27 * Declare public libdrm_amdgpu API 28 * 29 * This file define API exposed by libdrm_amdgpu library. 30 * User wanted to use libdrm_amdgpu functionality must include 31 * this file. 32 * 33 */ 34#ifndef _AMDGPU_H_ 35#define _AMDGPU_H_ 36 37#include <stdint.h> 38#include <stdbool.h> 39 40#ifdef __cplusplus 41extern "C" { 42#endif 43 44struct drm_amdgpu_info_hw_ip; 45struct drm_amdgpu_bo_list_entry; 46 47/*--------------------------------------------------------------------------*/ 48/* --------------------------- Defines ------------------------------------ */ 49/*--------------------------------------------------------------------------*/ 50 51/** 52 * Define max. number of Command Buffers (IB) which could be sent to the single 53 * hardware IP to accommodate CE/DE requirements 54 * 55 * \sa amdgpu_cs_ib_info 56*/ 57#define AMDGPU_CS_MAX_IBS_PER_SUBMIT 4 58 59/** 60 * Special timeout value meaning that the timeout is infinite. 61 */ 62#define AMDGPU_TIMEOUT_INFINITE 0xffffffffffffffffull 63 64/** 65 * Used in amdgpu_cs_query_fence_status(), meaning that the given timeout 66 * is absolute. 67 */ 68#define AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE (1 << 0) 69 70/*--------------------------------------------------------------------------*/ 71/* ----------------------------- Enums ------------------------------------ */ 72/*--------------------------------------------------------------------------*/ 73 74/** 75 * Enum describing possible handle types 76 * 77 * \sa amdgpu_bo_import, amdgpu_bo_export 78 * 79*/ 80enum amdgpu_bo_handle_type { 81 /** GEM flink name (needs DRM authentication, used by DRI2) */ 82 amdgpu_bo_handle_type_gem_flink_name = 0, 83 84 /** KMS handle which is used by all driver ioctls */ 85 amdgpu_bo_handle_type_kms = 1, 86 87 /** DMA-buf fd handle */ 88 amdgpu_bo_handle_type_dma_buf_fd = 2, 89 90 /** Deprecated in favour of and same behaviour as 91 * amdgpu_bo_handle_type_kms, use that instead of this 92 */ 93 amdgpu_bo_handle_type_kms_noimport = 3, 94}; 95 96/** Define known types of GPU VM VA ranges */ 97enum amdgpu_gpu_va_range 98{ 99 /** Allocate from "normal"/general range */ 100 amdgpu_gpu_va_range_general = 0 101}; 102 103enum amdgpu_sw_info { 104 amdgpu_sw_info_address32_hi = 0, 105}; 106 107/*--------------------------------------------------------------------------*/ 108/* -------------------------- Datatypes ----------------------------------- */ 109/*--------------------------------------------------------------------------*/ 110 111/** 112 * Define opaque pointer to context associated with fd. 113 * This context will be returned as the result of 114 * "initialize" function and should be pass as the first 115 * parameter to any API call 116 */ 117#ifndef AMDGPU_DEVICE_TYPEDEF 118#define AMDGPU_DEVICE_TYPEDEF 119typedef struct amdgpu_device *amdgpu_device_handle; 120#endif 121 122/** 123 * Define GPU Context type as pointer to opaque structure 124 * Example of GPU Context is the "rendering" context associated 125 * with OpenGL context (glCreateContext) 126 */ 127typedef struct amdgpu_context *amdgpu_context_handle; 128 129/** 130 * Define handle for amdgpu resources: buffer, GDS, etc. 131 */ 132typedef struct amdgpu_bo *amdgpu_bo_handle; 133 134/** 135 * Define handle for list of BOs 136 */ 137typedef struct amdgpu_bo_list *amdgpu_bo_list_handle; 138 139/** 140 * Define handle to be used to work with VA allocated ranges 141 */ 142typedef struct amdgpu_va *amdgpu_va_handle; 143 144/** 145 * Define handle for semaphore 146 */ 147typedef struct amdgpu_semaphore *amdgpu_semaphore_handle; 148 149/*--------------------------------------------------------------------------*/ 150/* -------------------------- Structures ---------------------------------- */ 151/*--------------------------------------------------------------------------*/ 152 153/** 154 * Structure describing memory allocation request 155 * 156 * \sa amdgpu_bo_alloc() 157 * 158*/ 159struct amdgpu_bo_alloc_request { 160 /** Allocation request. It must be aligned correctly. */ 161 uint64_t alloc_size; 162 163 /** 164 * It may be required to have some specific alignment requirements 165 * for physical back-up storage (e.g. for displayable surface). 166 * If 0 there is no special alignment requirement 167 */ 168 uint64_t phys_alignment; 169 170 /** 171 * UMD should specify where to allocate memory and how it 172 * will be accessed by the CPU. 173 */ 174 uint32_t preferred_heap; 175 176 /** Additional flags passed on allocation */ 177 uint64_t flags; 178}; 179 180/** 181 * Special UMD specific information associated with buffer. 182 * 183 * It may be need to pass some buffer charactersitic as part 184 * of buffer sharing. Such information are defined UMD and 185 * opaque for libdrm_amdgpu as well for kernel driver. 186 * 187 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info, 188 * amdgpu_bo_import(), amdgpu_bo_export 189 * 190*/ 191struct amdgpu_bo_metadata { 192 /** Special flag associated with surface */ 193 uint64_t flags; 194 195 /** 196 * ASIC-specific tiling information (also used by DCE). 197 * The encoding is defined by the AMDGPU_TILING_* definitions. 198 */ 199 uint64_t tiling_info; 200 201 /** Size of metadata associated with the buffer, in bytes. */ 202 uint32_t size_metadata; 203 204 /** UMD specific metadata. Opaque for kernel */ 205 uint32_t umd_metadata[64]; 206}; 207 208/** 209 * Structure describing allocated buffer. Client may need 210 * to query such information as part of 'sharing' buffers mechanism 211 * 212 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_query_info(), 213 * amdgpu_bo_import(), amdgpu_bo_export() 214*/ 215struct amdgpu_bo_info { 216 /** Allocated memory size */ 217 uint64_t alloc_size; 218 219 /** 220 * It may be required to have some specific alignment requirements 221 * for physical back-up storage. 222 */ 223 uint64_t phys_alignment; 224 225 /** Heap where to allocate memory. */ 226 uint32_t preferred_heap; 227 228 /** Additional allocation flags. */ 229 uint64_t alloc_flags; 230 231 /** Metadata associated with buffer if any. */ 232 struct amdgpu_bo_metadata metadata; 233}; 234 235/** 236 * Structure with information about "imported" buffer 237 * 238 * \sa amdgpu_bo_import() 239 * 240 */ 241struct amdgpu_bo_import_result { 242 /** Handle of memory/buffer to use */ 243 amdgpu_bo_handle buf_handle; 244 245 /** Buffer size */ 246 uint64_t alloc_size; 247}; 248 249/** 250 * 251 * Structure to describe GDS partitioning information. 252 * \note OA and GWS resources are asscoiated with GDS partition 253 * 254 * \sa amdgpu_gpu_resource_query_gds_info 255 * 256*/ 257struct amdgpu_gds_resource_info { 258 uint32_t gds_gfx_partition_size; 259 uint32_t compute_partition_size; 260 uint32_t gds_total_size; 261 uint32_t gws_per_gfx_partition; 262 uint32_t gws_per_compute_partition; 263 uint32_t oa_per_gfx_partition; 264 uint32_t oa_per_compute_partition; 265}; 266 267/** 268 * Structure describing CS fence 269 * 270 * \sa amdgpu_cs_query_fence_status(), amdgpu_cs_request, amdgpu_cs_submit() 271 * 272*/ 273struct amdgpu_cs_fence { 274 275 /** In which context IB was sent to execution */ 276 amdgpu_context_handle context; 277 278 /** To which HW IP type the fence belongs */ 279 uint32_t ip_type; 280 281 /** IP instance index if there are several IPs of the same type. */ 282 uint32_t ip_instance; 283 284 /** Ring index of the HW IP */ 285 uint32_t ring; 286 287 /** Specify fence for which we need to check submission status.*/ 288 uint64_t fence; 289}; 290 291/** 292 * Structure describing IB 293 * 294 * \sa amdgpu_cs_request, amdgpu_cs_submit() 295 * 296*/ 297struct amdgpu_cs_ib_info { 298 /** Special flags */ 299 uint64_t flags; 300 301 /** Virtual MC address of the command buffer */ 302 uint64_t ib_mc_address; 303 304 /** 305 * Size of Command Buffer to be submitted. 306 * - The size is in units of dwords (4 bytes). 307 * - Could be 0 308 */ 309 uint32_t size; 310}; 311 312/** 313 * Structure describing fence information 314 * 315 * \sa amdgpu_cs_request, amdgpu_cs_query_fence, 316 * amdgpu_cs_submit(), amdgpu_cs_query_fence_status() 317*/ 318struct amdgpu_cs_fence_info { 319 /** buffer object for the fence */ 320 amdgpu_bo_handle handle; 321 322 /** fence offset in the unit of sizeof(uint64_t) */ 323 uint64_t offset; 324}; 325 326/** 327 * Structure describing submission request 328 * 329 * \note We could have several IBs as packet. e.g. CE, CE, DE case for gfx 330 * 331 * \sa amdgpu_cs_submit() 332*/ 333struct amdgpu_cs_request { 334 /** Specify flags with additional information */ 335 uint64_t flags; 336 337 /** Specify HW IP block type to which to send the IB. */ 338 unsigned ip_type; 339 340 /** IP instance index if there are several IPs of the same type. */ 341 unsigned ip_instance; 342 343 /** 344 * Specify ring index of the IP. We could have several rings 345 * in the same IP. E.g. 0 for SDMA0 and 1 for SDMA1. 346 */ 347 uint32_t ring; 348 349 /** 350 * List handle with resources used by this request. 351 */ 352 amdgpu_bo_list_handle resources; 353 354 /** 355 * Number of dependencies this Command submission needs to 356 * wait for before starting execution. 357 */ 358 uint32_t number_of_dependencies; 359 360 /** 361 * Array of dependencies which need to be met before 362 * execution can start. 363 */ 364 struct amdgpu_cs_fence *dependencies; 365 366 /** Number of IBs to submit in the field ibs. */ 367 uint32_t number_of_ibs; 368 369 /** 370 * IBs to submit. Those IBs will be submit together as single entity 371 */ 372 struct amdgpu_cs_ib_info *ibs; 373 374 /** 375 * The returned sequence number for the command submission 376 */ 377 uint64_t seq_no; 378 379 /** 380 * The fence information 381 */ 382 struct amdgpu_cs_fence_info fence_info; 383}; 384 385/** 386 * Structure which provide information about GPU VM MC Address space 387 * alignments requirements 388 * 389 * \sa amdgpu_query_buffer_size_alignment 390 */ 391struct amdgpu_buffer_size_alignments { 392 /** Size alignment requirement for allocation in 393 * local memory */ 394 uint64_t size_local; 395 396 /** 397 * Size alignment requirement for allocation in remote memory 398 */ 399 uint64_t size_remote; 400}; 401 402/** 403 * Structure which provide information about heap 404 * 405 * \sa amdgpu_query_heap_info() 406 * 407 */ 408struct amdgpu_heap_info { 409 /** Theoretical max. available memory in the given heap */ 410 uint64_t heap_size; 411 412 /** 413 * Number of bytes allocated in the heap. This includes all processes 414 * and private allocations in the kernel. It changes when new buffers 415 * are allocated, freed, and moved. It cannot be larger than 416 * heap_size. 417 */ 418 uint64_t heap_usage; 419 420 /** 421 * Theoretical possible max. size of buffer which 422 * could be allocated in the given heap 423 */ 424 uint64_t max_allocation; 425}; 426 427/** 428 * Describe GPU h/w info needed for UMD correct initialization 429 * 430 * \sa amdgpu_query_gpu_info() 431*/ 432struct amdgpu_gpu_info { 433 /** Asic id */ 434 uint32_t asic_id; 435 /** Chip revision */ 436 uint32_t chip_rev; 437 /** Chip external revision */ 438 uint32_t chip_external_rev; 439 /** Family ID */ 440 uint32_t family_id; 441 /** Special flags */ 442 uint64_t ids_flags; 443 /** max engine clock*/ 444 uint64_t max_engine_clk; 445 /** max memory clock */ 446 uint64_t max_memory_clk; 447 /** number of shader engines */ 448 uint32_t num_shader_engines; 449 /** number of shader arrays per engine */ 450 uint32_t num_shader_arrays_per_engine; 451 /** Number of available good shader pipes */ 452 uint32_t avail_quad_shader_pipes; 453 /** Max. number of shader pipes.(including good and bad pipes */ 454 uint32_t max_quad_shader_pipes; 455 /** Number of parameter cache entries per shader quad pipe */ 456 uint32_t cache_entries_per_quad_pipe; 457 /** Number of available graphics context */ 458 uint32_t num_hw_gfx_contexts; 459 /** Number of render backend pipes */ 460 uint32_t rb_pipes; 461 /** Enabled render backend pipe mask */ 462 uint32_t enabled_rb_pipes_mask; 463 /** Frequency of GPU Counter */ 464 uint32_t gpu_counter_freq; 465 /** CC_RB_BACKEND_DISABLE.BACKEND_DISABLE per SE */ 466 uint32_t backend_disable[4]; 467 /** Value of MC_ARB_RAMCFG register*/ 468 uint32_t mc_arb_ramcfg; 469 /** Value of GB_ADDR_CONFIG */ 470 uint32_t gb_addr_cfg; 471 /** Values of the GB_TILE_MODE0..31 registers */ 472 uint32_t gb_tile_mode[32]; 473 /** Values of GB_MACROTILE_MODE0..15 registers */ 474 uint32_t gb_macro_tile_mode[16]; 475 /** Value of PA_SC_RASTER_CONFIG register per SE */ 476 uint32_t pa_sc_raster_cfg[4]; 477 /** Value of PA_SC_RASTER_CONFIG_1 register per SE */ 478 uint32_t pa_sc_raster_cfg1[4]; 479 /* CU info */ 480 uint32_t cu_active_number; 481 uint32_t cu_ao_mask; 482 uint32_t cu_bitmap[4][4]; 483 /* video memory type info*/ 484 uint32_t vram_type; 485 /* video memory bit width*/ 486 uint32_t vram_bit_width; 487 /** constant engine ram size*/ 488 uint32_t ce_ram_size; 489 /* vce harvesting instance */ 490 uint32_t vce_harvest_config; 491 /* PCI revision ID */ 492 uint32_t pci_rev_id; 493}; 494 495 496/*--------------------------------------------------------------------------*/ 497/*------------------------- Functions --------------------------------------*/ 498/*--------------------------------------------------------------------------*/ 499 500/* 501 * Initialization / Cleanup 502 * 503*/ 504 505/** 506 * 507 * \param fd - \c [in] File descriptor for AMD GPU device 508 * received previously as the result of 509 * e.g. drmOpen() call. 510 * For legacy fd type, the DRI2/DRI3 511 * authentication should be done before 512 * calling this function. 513 * \param major_version - \c [out] Major version of library. It is assumed 514 * that adding new functionality will cause 515 * increase in major version 516 * \param minor_version - \c [out] Minor version of library 517 * \param device_handle - \c [out] Pointer to opaque context which should 518 * be passed as the first parameter on each 519 * API call 520 * 521 * 522 * \return 0 on success\n 523 * <0 - Negative POSIX Error code 524 * 525 * 526 * \sa amdgpu_device_deinitialize() 527*/ 528int amdgpu_device_initialize(int fd, 529 uint32_t *major_version, 530 uint32_t *minor_version, 531 amdgpu_device_handle *device_handle); 532 533/** 534 * 535 * When access to such library does not needed any more the special 536 * function must be call giving opportunity to clean up any 537 * resources if needed. 538 * 539 * \param device_handle - \c [in] Context associated with file 540 * descriptor for AMD GPU device 541 * received previously as the 542 * result e.g. of drmOpen() call. 543 * 544 * \return 0 on success\n 545 * <0 - Negative POSIX Error code 546 * 547 * \sa amdgpu_device_initialize() 548 * 549*/ 550int amdgpu_device_deinitialize(amdgpu_device_handle device_handle); 551 552/** 553 * 554 * /param device_handle - \c [in] Device handle. 555 * See #amdgpu_device_initialize() 556 * 557 * \return Returns the drm fd used for operations on this 558 * device. This is still owned by the library and hence 559 * should not be closed. Guaranteed to be valid until 560 * #amdgpu_device_deinitialize gets called. 561 * 562*/ 563int amdgpu_device_get_fd(amdgpu_device_handle device_handle); 564 565/* 566 * Memory Management 567 * 568*/ 569 570/** 571 * Allocate memory to be used by UMD for GPU related operations 572 * 573 * \param dev - \c [in] Device handle. 574 * See #amdgpu_device_initialize() 575 * \param alloc_buffer - \c [in] Pointer to the structure describing an 576 * allocation request 577 * \param buf_handle - \c [out] Allocated buffer handle 578 * 579 * \return 0 on success\n 580 * <0 - Negative POSIX Error code 581 * 582 * \sa amdgpu_bo_free() 583*/ 584int amdgpu_bo_alloc(amdgpu_device_handle dev, 585 struct amdgpu_bo_alloc_request *alloc_buffer, 586 amdgpu_bo_handle *buf_handle); 587 588/** 589 * Associate opaque data with buffer to be queried by another UMD 590 * 591 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 592 * \param buf_handle - \c [in] Buffer handle 593 * \param info - \c [in] Metadata to associated with buffer 594 * 595 * \return 0 on success\n 596 * <0 - Negative POSIX Error code 597*/ 598int amdgpu_bo_set_metadata(amdgpu_bo_handle buf_handle, 599 struct amdgpu_bo_metadata *info); 600 601/** 602 * Query buffer information including metadata previusly associated with 603 * buffer. 604 * 605 * \param dev - \c [in] Device handle. 606 * See #amdgpu_device_initialize() 607 * \param buf_handle - \c [in] Buffer handle 608 * \param info - \c [out] Structure describing buffer 609 * 610 * \return 0 on success\n 611 * <0 - Negative POSIX Error code 612 * 613 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc() 614*/ 615int amdgpu_bo_query_info(amdgpu_bo_handle buf_handle, 616 struct amdgpu_bo_info *info); 617 618/** 619 * Allow others to get access to buffer 620 * 621 * \param dev - \c [in] Device handle. 622 * See #amdgpu_device_initialize() 623 * \param buf_handle - \c [in] Buffer handle 624 * \param type - \c [in] Type of handle requested 625 * \param shared_handle - \c [out] Special "shared" handle 626 * 627 * \return 0 on success\n 628 * <0 - Negative POSIX Error code 629 * 630 * \sa amdgpu_bo_import() 631 * 632*/ 633int amdgpu_bo_export(amdgpu_bo_handle buf_handle, 634 enum amdgpu_bo_handle_type type, 635 uint32_t *shared_handle); 636 637/** 638 * Request access to "shared" buffer 639 * 640 * \param dev - \c [in] Device handle. 641 * See #amdgpu_device_initialize() 642 * \param type - \c [in] Type of handle requested 643 * \param shared_handle - \c [in] Shared handle received as result "import" 644 * operation 645 * \param output - \c [out] Pointer to structure with information 646 * about imported buffer 647 * 648 * \return 0 on success\n 649 * <0 - Negative POSIX Error code 650 * 651 * \note Buffer must be "imported" only using new "fd" (different from 652 * one used by "exporter"). 653 * 654 * \sa amdgpu_bo_export() 655 * 656*/ 657int amdgpu_bo_import(amdgpu_device_handle dev, 658 enum amdgpu_bo_handle_type type, 659 uint32_t shared_handle, 660 struct amdgpu_bo_import_result *output); 661 662/** 663 * Request GPU access to user allocated memory e.g. via "malloc" 664 * 665 * \param dev - [in] Device handle. See #amdgpu_device_initialize() 666 * \param cpu - [in] CPU address of user allocated memory which we 667 * want to map to GPU address space (make GPU accessible) 668 * (This address must be correctly aligned). 669 * \param size - [in] Size of allocation (must be correctly aligned) 670 * \param buf_handle - [out] Buffer handle for the userptr memory 671 * resource on submission and be used in other operations. 672 * 673 * 674 * \return 0 on success\n 675 * <0 - Negative POSIX Error code 676 * 677 * \note 678 * This call doesn't guarantee that such memory will be persistently 679 * "locked" / make non-pageable. The purpose of this call is to provide 680 * opportunity for GPU get access to this resource during submission. 681 * 682 * The maximum amount of memory which could be mapped in this call depends 683 * if overcommit is disabled or not. If overcommit is disabled than the max. 684 * amount of memory to be pinned will be limited by left "free" size in total 685 * amount of memory which could be locked simultaneously ("GART" size). 686 * 687 * Supported (theoretical) max. size of mapping is restricted only by 688 * "GART" size. 689 * 690 * It is responsibility of caller to correctly specify access rights 691 * on VA assignment. 692*/ 693int amdgpu_create_bo_from_user_mem(amdgpu_device_handle dev, 694 void *cpu, uint64_t size, 695 amdgpu_bo_handle *buf_handle); 696 697/** 698 * Validate if the user memory comes from BO 699 * 700 * \param dev - [in] Device handle. See #amdgpu_device_initialize() 701 * \param cpu - [in] CPU address of user allocated memory which we 702 * want to map to GPU address space (make GPU accessible) 703 * (This address must be correctly aligned). 704 * \param size - [in] Size of allocation (must be correctly aligned) 705 * \param buf_handle - [out] Buffer handle for the userptr memory 706 * if the user memory is not from BO, the buf_handle will be NULL. 707 * \param offset_in_bo - [out] offset in this BO for this user memory 708 * 709 * 710 * \return 0 on success\n 711 * <0 - Negative POSIX Error code 712 * 713*/ 714int amdgpu_find_bo_by_cpu_mapping(amdgpu_device_handle dev, 715 void *cpu, 716 uint64_t size, 717 amdgpu_bo_handle *buf_handle, 718 uint64_t *offset_in_bo); 719 720/** 721 * Free previously allocated memory 722 * 723 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 724 * \param buf_handle - \c [in] Buffer handle to free 725 * 726 * \return 0 on success\n 727 * <0 - Negative POSIX Error code 728 * 729 * \note In the case of memory shared between different applications all 730 * resources will be “physically” freed only all such applications 731 * will be terminated 732 * \note If is UMD responsibility to ‘free’ buffer only when there is no 733 * more GPU access 734 * 735 * \sa amdgpu_bo_set_metadata(), amdgpu_bo_alloc() 736 * 737*/ 738int amdgpu_bo_free(amdgpu_bo_handle buf_handle); 739 740/** 741 * Increase the reference count of a buffer object 742 * 743 * \param bo - \c [in] Buffer object handle to increase the reference count 744 * 745 * \sa amdgpu_bo_alloc(), amdgpu_bo_free() 746 * 747*/ 748void amdgpu_bo_inc_ref(amdgpu_bo_handle bo); 749 750/** 751 * Request CPU access to GPU accessible memory 752 * 753 * \param buf_handle - \c [in] Buffer handle 754 * \param cpu - \c [out] CPU address to be used for access 755 * 756 * \return 0 on success\n 757 * <0 - Negative POSIX Error code 758 * 759 * \sa amdgpu_bo_cpu_unmap() 760 * 761*/ 762int amdgpu_bo_cpu_map(amdgpu_bo_handle buf_handle, void **cpu); 763 764/** 765 * Release CPU access to GPU memory 766 * 767 * \param buf_handle - \c [in] Buffer handle 768 * 769 * \return 0 on success\n 770 * <0 - Negative POSIX Error code 771 * 772 * \sa amdgpu_bo_cpu_map() 773 * 774*/ 775int amdgpu_bo_cpu_unmap(amdgpu_bo_handle buf_handle); 776 777/** 778 * Wait until a buffer is not used by the device. 779 * 780 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 781 * \param buf_handle - \c [in] Buffer handle. 782 * \param timeout_ns - Timeout in nanoseconds. 783 * \param buffer_busy - 0 if buffer is idle, all GPU access was completed 784 * and no GPU access is scheduled. 785 * 1 GPU access is in fly or scheduled 786 * 787 * \return 0 - on success 788 * <0 - Negative POSIX Error code 789 */ 790int amdgpu_bo_wait_for_idle(amdgpu_bo_handle buf_handle, 791 uint64_t timeout_ns, 792 bool *buffer_busy); 793 794/** 795 * Creates a BO list handle for command submission. 796 * 797 * \param dev - \c [in] Device handle. 798 * See #amdgpu_device_initialize() 799 * \param number_of_buffers - \c [in] Number of BOs in the list 800 * \param buffers - \c [in] List of BO handles 801 * \param result - \c [out] Created BO list handle 802 * 803 * \return 0 on success\n 804 * <0 - Negative POSIX Error code 805 * 806 * \sa amdgpu_bo_list_destroy_raw(), amdgpu_cs_submit_raw2() 807*/ 808int amdgpu_bo_list_create_raw(amdgpu_device_handle dev, 809 uint32_t number_of_buffers, 810 struct drm_amdgpu_bo_list_entry *buffers, 811 uint32_t *result); 812 813/** 814 * Destroys a BO list handle. 815 * 816 * \param bo_list - \c [in] BO list handle. 817 * 818 * \return 0 on success\n 819 * <0 - Negative POSIX Error code 820 * 821 * \sa amdgpu_bo_list_create_raw(), amdgpu_cs_submit_raw2() 822*/ 823int amdgpu_bo_list_destroy_raw(amdgpu_device_handle dev, uint32_t bo_list); 824 825/** 826 * Creates a BO list handle for command submission. 827 * 828 * \param dev - \c [in] Device handle. 829 * See #amdgpu_device_initialize() 830 * \param number_of_resources - \c [in] Number of BOs in the list 831 * \param resources - \c [in] List of BO handles 832 * \param resource_prios - \c [in] Optional priority for each handle 833 * \param result - \c [out] Created BO list handle 834 * 835 * \return 0 on success\n 836 * <0 - Negative POSIX Error code 837 * 838 * \sa amdgpu_bo_list_destroy() 839*/ 840int amdgpu_bo_list_create(amdgpu_device_handle dev, 841 uint32_t number_of_resources, 842 amdgpu_bo_handle *resources, 843 uint8_t *resource_prios, 844 amdgpu_bo_list_handle *result); 845 846/** 847 * Destroys a BO list handle. 848 * 849 * \param handle - \c [in] BO list handle. 850 * 851 * \return 0 on success\n 852 * <0 - Negative POSIX Error code 853 * 854 * \sa amdgpu_bo_list_create() 855*/ 856int amdgpu_bo_list_destroy(amdgpu_bo_list_handle handle); 857 858/** 859 * Update resources for existing BO list 860 * 861 * \param handle - \c [in] BO list handle 862 * \param number_of_resources - \c [in] Number of BOs in the list 863 * \param resources - \c [in] List of BO handles 864 * \param resource_prios - \c [in] Optional priority for each handle 865 * 866 * \return 0 on success\n 867 * <0 - Negative POSIX Error code 868 * 869 * \sa amdgpu_bo_list_update() 870*/ 871int amdgpu_bo_list_update(amdgpu_bo_list_handle handle, 872 uint32_t number_of_resources, 873 amdgpu_bo_handle *resources, 874 uint8_t *resource_prios); 875 876/* 877 * GPU Execution context 878 * 879*/ 880 881/** 882 * Create GPU execution Context 883 * 884 * For the purpose of GPU Scheduler and GPU Robustness extensions it is 885 * necessary to have information/identify rendering/compute contexts. 886 * It also may be needed to associate some specific requirements with such 887 * contexts. Kernel driver will guarantee that submission from the same 888 * context will always be executed in order (first come, first serve). 889 * 890 * 891 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 892 * \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_* 893 * \param context - \c [out] GPU Context handle 894 * 895 * \return 0 on success\n 896 * <0 - Negative POSIX Error code 897 * 898 * \sa amdgpu_cs_ctx_free() 899 * 900*/ 901int amdgpu_cs_ctx_create2(amdgpu_device_handle dev, 902 uint32_t priority, 903 amdgpu_context_handle *context); 904/** 905 * Create GPU execution Context 906 * 907 * Refer to amdgpu_cs_ctx_create2 for full documentation. This call 908 * is missing the priority parameter. 909 * 910 * \sa amdgpu_cs_ctx_create2() 911 * 912*/ 913int amdgpu_cs_ctx_create(amdgpu_device_handle dev, 914 amdgpu_context_handle *context); 915 916/** 917 * 918 * Destroy GPU execution context when not needed any more 919 * 920 * \param context - \c [in] GPU Context handle 921 * 922 * \return 0 on success\n 923 * <0 - Negative POSIX Error code 924 * 925 * \sa amdgpu_cs_ctx_create() 926 * 927*/ 928int amdgpu_cs_ctx_free(amdgpu_context_handle context); 929 930/** 931 * Override the submission priority for the given context using a master fd. 932 * 933 * \param dev - \c [in] device handle 934 * \param context - \c [in] context handle for context id 935 * \param master_fd - \c [in] The master fd to authorize the override. 936 * \param priority - \c [in] The priority to assign to the context. 937 * 938 * \return 0 on success or a a negative Posix error code on failure. 939 */ 940int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev, 941 amdgpu_context_handle context, 942 int master_fd, 943 unsigned priority); 944 945/** 946 * Query reset state for the specific GPU Context 947 * 948 * \param context - \c [in] GPU Context handle 949 * \param state - \c [out] One of AMDGPU_CTX_*_RESET 950 * \param hangs - \c [out] Number of hangs caused by the context. 951 * 952 * \return 0 on success\n 953 * <0 - Negative POSIX Error code 954 * 955 * \sa amdgpu_cs_ctx_create() 956 * 957*/ 958int amdgpu_cs_query_reset_state(amdgpu_context_handle context, 959 uint32_t *state, uint32_t *hangs); 960 961/** 962 * Query reset state for the specific GPU Context. 963 * 964 * \param context - \c [in] GPU Context handle 965 * \param flags - \c [out] A combination of AMDGPU_CTX_QUERY2_FLAGS_* 966 * 967 * \return 0 on success\n 968 * <0 - Negative POSIX Error code 969 * 970 * \sa amdgpu_cs_ctx_create() 971 * 972*/ 973int amdgpu_cs_query_reset_state2(amdgpu_context_handle context, 974 uint64_t *flags); 975 976/* 977 * Command Buffers Management 978 * 979*/ 980 981/** 982 * Send request to submit command buffers to hardware. 983 * 984 * Kernel driver could use GPU Scheduler to make decision when physically 985 * sent this request to the hardware. Accordingly this request could be put 986 * in queue and sent for execution later. The only guarantee is that request 987 * from the same GPU context to the same ip:ip_instance:ring will be executed in 988 * order. 989 * 990 * The caller can specify the user fence buffer/location with the fence_info in the 991 * cs_request.The sequence number is returned via the 'seq_no' parameter 992 * in ibs_request structure. 993 * 994 * 995 * \param dev - \c [in] Device handle. 996 * See #amdgpu_device_initialize() 997 * \param context - \c [in] GPU Context 998 * \param flags - \c [in] Global submission flags 999 * \param ibs_request - \c [in/out] Pointer to submission requests. 1000 * We could submit to the several 1001 * engines/rings simulteniously as 1002 * 'atomic' operation 1003 * \param number_of_requests - \c [in] Number of submission requests 1004 * 1005 * \return 0 on success\n 1006 * <0 - Negative POSIX Error code 1007 * 1008 * \note It is required to pass correct resource list with buffer handles 1009 * which will be accessible by command buffers from submission 1010 * This will allow kernel driver to correctly implement "paging". 1011 * Failure to do so will have unpredictable results. 1012 * 1013 * \sa amdgpu_command_buffer_alloc(), amdgpu_command_buffer_free(), 1014 * amdgpu_cs_query_fence_status() 1015 * 1016*/ 1017int amdgpu_cs_submit(amdgpu_context_handle context, 1018 uint64_t flags, 1019 struct amdgpu_cs_request *ibs_request, 1020 uint32_t number_of_requests); 1021 1022/** 1023 * Query status of Command Buffer Submission 1024 * 1025 * \param fence - \c [in] Structure describing fence to query 1026 * \param timeout_ns - \c [in] Timeout value to wait 1027 * \param flags - \c [in] Flags for the query 1028 * \param expired - \c [out] If fence expired or not.\n 1029 * 0 – if fence is not expired\n 1030 * !0 - otherwise 1031 * 1032 * \return 0 on success\n 1033 * <0 - Negative POSIX Error code 1034 * 1035 * \note If UMD wants only to check operation status and returned immediately 1036 * then timeout value as 0 must be passed. In this case success will be 1037 * returned in the case if submission was completed or timeout error 1038 * code. 1039 * 1040 * \sa amdgpu_cs_submit() 1041*/ 1042int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence, 1043 uint64_t timeout_ns, 1044 uint64_t flags, 1045 uint32_t *expired); 1046 1047/** 1048 * Wait for multiple fences 1049 * 1050 * \param fences - \c [in] The fence array to wait 1051 * \param fence_count - \c [in] The fence count 1052 * \param wait_all - \c [in] If true, wait all fences to be signaled, 1053 * otherwise, wait at least one fence 1054 * \param timeout_ns - \c [in] The timeout to wait, in nanoseconds 1055 * \param status - \c [out] '1' for signaled, '0' for timeout 1056 * \param first - \c [out] the index of the first signaled fence from @fences 1057 * 1058 * \return 0 on success 1059 * <0 - Negative POSIX Error code 1060 * 1061 * \note Currently it supports only one amdgpu_device. All fences come from 1062 * the same amdgpu_device with the same fd. 1063*/ 1064int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences, 1065 uint32_t fence_count, 1066 bool wait_all, 1067 uint64_t timeout_ns, 1068 uint32_t *status, uint32_t *first); 1069 1070/* 1071 * Query / Info API 1072 * 1073*/ 1074 1075/** 1076 * Query allocation size alignments 1077 * 1078 * UMD should query information about GPU VM MC size alignments requirements 1079 * to be able correctly choose required allocation size and implement 1080 * internal optimization if needed. 1081 * 1082 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1083 * \param info - \c [out] Pointer to structure to get size alignment 1084 * requirements 1085 * 1086 * \return 0 on success\n 1087 * <0 - Negative POSIX Error code 1088 * 1089*/ 1090int amdgpu_query_buffer_size_alignment(amdgpu_device_handle dev, 1091 struct amdgpu_buffer_size_alignments 1092 *info); 1093 1094/** 1095 * Query firmware versions 1096 * 1097 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1098 * \param fw_type - \c [in] AMDGPU_INFO_FW_* 1099 * \param ip_instance - \c [in] Index of the IP block of the same type. 1100 * \param index - \c [in] Index of the engine. (for SDMA and MEC) 1101 * \param version - \c [out] Pointer to to the "version" return value 1102 * \param feature - \c [out] Pointer to to the "feature" return value 1103 * 1104 * \return 0 on success\n 1105 * <0 - Negative POSIX Error code 1106 * 1107*/ 1108int amdgpu_query_firmware_version(amdgpu_device_handle dev, unsigned fw_type, 1109 unsigned ip_instance, unsigned index, 1110 uint32_t *version, uint32_t *feature); 1111 1112/** 1113 * Query the number of HW IP instances of a certain type. 1114 * 1115 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1116 * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_* 1117 * \param count - \c [out] Pointer to structure to get information 1118 * 1119 * \return 0 on success\n 1120 * <0 - Negative POSIX Error code 1121*/ 1122int amdgpu_query_hw_ip_count(amdgpu_device_handle dev, unsigned type, 1123 uint32_t *count); 1124 1125/** 1126 * Query engine information 1127 * 1128 * This query allows UMD to query information different engines and their 1129 * capabilities. 1130 * 1131 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1132 * \param type - \c [in] Hardware IP block type = AMDGPU_HW_IP_* 1133 * \param ip_instance - \c [in] Index of the IP block of the same type. 1134 * \param info - \c [out] Pointer to structure to get information 1135 * 1136 * \return 0 on success\n 1137 * <0 - Negative POSIX Error code 1138*/ 1139int amdgpu_query_hw_ip_info(amdgpu_device_handle dev, unsigned type, 1140 unsigned ip_instance, 1141 struct drm_amdgpu_info_hw_ip *info); 1142 1143/** 1144 * Query heap information 1145 * 1146 * This query allows UMD to query potentially available memory resources and 1147 * adjust their logic if necessary. 1148 * 1149 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1150 * \param heap - \c [in] Heap type 1151 * \param info - \c [in] Pointer to structure to get needed information 1152 * 1153 * \return 0 on success\n 1154 * <0 - Negative POSIX Error code 1155 * 1156*/ 1157int amdgpu_query_heap_info(amdgpu_device_handle dev, uint32_t heap, 1158 uint32_t flags, struct amdgpu_heap_info *info); 1159 1160/** 1161 * Get the CRTC ID from the mode object ID 1162 * 1163 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1164 * \param id - \c [in] Mode object ID 1165 * \param result - \c [in] Pointer to the CRTC ID 1166 * 1167 * \return 0 on success\n 1168 * <0 - Negative POSIX Error code 1169 * 1170*/ 1171int amdgpu_query_crtc_from_id(amdgpu_device_handle dev, unsigned id, 1172 int32_t *result); 1173 1174/** 1175 * Query GPU H/w Info 1176 * 1177 * Query hardware specific information 1178 * 1179 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1180 * \param heap - \c [in] Heap type 1181 * \param info - \c [in] Pointer to structure to get needed information 1182 * 1183 * \return 0 on success\n 1184 * <0 - Negative POSIX Error code 1185 * 1186*/ 1187int amdgpu_query_gpu_info(amdgpu_device_handle dev, 1188 struct amdgpu_gpu_info *info); 1189 1190/** 1191 * Query hardware or driver information. 1192 * 1193 * The return size is query-specific and depends on the "info_id" parameter. 1194 * No more than "size" bytes is returned. 1195 * 1196 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1197 * \param info_id - \c [in] AMDGPU_INFO_* 1198 * \param size - \c [in] Size of the returned value. 1199 * \param value - \c [out] Pointer to the return value. 1200 * 1201 * \return 0 on success\n 1202 * <0 - Negative POSIX error code 1203 * 1204*/ 1205int amdgpu_query_info(amdgpu_device_handle dev, unsigned info_id, 1206 unsigned size, void *value); 1207 1208/** 1209 * Query hardware or driver information. 1210 * 1211 * The return size is query-specific and depends on the "info_id" parameter. 1212 * No more than "size" bytes is returned. 1213 * 1214 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1215 * \param info - \c [in] amdgpu_sw_info_* 1216 * \param value - \c [out] Pointer to the return value. 1217 * 1218 * \return 0 on success\n 1219 * <0 - Negative POSIX error code 1220 * 1221*/ 1222int amdgpu_query_sw_info(amdgpu_device_handle dev, enum amdgpu_sw_info info, 1223 void *value); 1224 1225/** 1226 * Query information about GDS 1227 * 1228 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1229 * \param gds_info - \c [out] Pointer to structure to get GDS information 1230 * 1231 * \return 0 on success\n 1232 * <0 - Negative POSIX Error code 1233 * 1234*/ 1235int amdgpu_query_gds_info(amdgpu_device_handle dev, 1236 struct amdgpu_gds_resource_info *gds_info); 1237 1238/** 1239 * Query information about sensor. 1240 * 1241 * The return size is query-specific and depends on the "sensor_type" 1242 * parameter. No more than "size" bytes is returned. 1243 * 1244 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1245 * \param sensor_type - \c [in] AMDGPU_INFO_SENSOR_* 1246 * \param size - \c [in] Size of the returned value. 1247 * \param value - \c [out] Pointer to the return value. 1248 * 1249 * \return 0 on success\n 1250 * <0 - Negative POSIX Error code 1251 * 1252*/ 1253int amdgpu_query_sensor_info(amdgpu_device_handle dev, unsigned sensor_type, 1254 unsigned size, void *value); 1255 1256/** 1257 * Query information about video capabilities 1258 * 1259 * The return sizeof(struct drm_amdgpu_info_video_caps) 1260 * 1261 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1262 * \param caps_type - \c [in] AMDGPU_INFO_VIDEO_CAPS_DECODE(ENCODE) 1263 * \param size - \c [in] Size of the returned value. 1264 * \param value - \c [out] Pointer to the return value. 1265 * 1266 * \return 0 on success\n 1267 * <0 - Negative POSIX Error code 1268 * 1269*/ 1270int amdgpu_query_video_caps_info(amdgpu_device_handle dev, unsigned cap_type, 1271 unsigned size, void *value); 1272 1273/** 1274 * Read a set of consecutive memory-mapped registers. 1275 * Not all registers are allowed to be read by userspace. 1276 * 1277 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize( 1278 * \param dword_offset - \c [in] Register offset in dwords 1279 * \param count - \c [in] The number of registers to read starting 1280 * from the offset 1281 * \param instance - \c [in] GRBM_GFX_INDEX selector. It may have other 1282 * uses. Set it to 0xffffffff if unsure. 1283 * \param flags - \c [in] Flags with additional information. 1284 * \param values - \c [out] The pointer to return values. 1285 * 1286 * \return 0 on success\n 1287 * <0 - Negative POSIX error code 1288 * 1289*/ 1290int amdgpu_read_mm_registers(amdgpu_device_handle dev, unsigned dword_offset, 1291 unsigned count, uint32_t instance, uint32_t flags, 1292 uint32_t *values); 1293 1294/** 1295 * Flag to request VA address range in the 32bit address space 1296*/ 1297#define AMDGPU_VA_RANGE_32_BIT 0x1 1298#define AMDGPU_VA_RANGE_HIGH 0x2 1299#define AMDGPU_VA_RANGE_REPLAYABLE 0x4 1300 1301/** 1302 * Allocate virtual address range 1303 * 1304 * \param dev - [in] Device handle. See #amdgpu_device_initialize() 1305 * \param va_range_type - \c [in] Type of MC va range from which to allocate 1306 * \param size - \c [in] Size of range. Size must be correctly* aligned. 1307 * It is client responsibility to correctly aligned size based on the future 1308 * usage of allocated range. 1309 * \param va_base_alignment - \c [in] Overwrite base address alignment 1310 * requirement for GPU VM MC virtual 1311 * address assignment. Must be multiple of size alignments received as 1312 * 'amdgpu_buffer_size_alignments'. 1313 * If 0 use the default one. 1314 * \param va_base_required - \c [in] Specified required va base address. 1315 * If 0 then library choose available one. 1316 * If !0 value will be passed and those value already "in use" then 1317 * corresponding error status will be returned. 1318 * \param va_base_allocated - \c [out] On return: Allocated VA base to be used 1319 * by client. 1320 * \param va_range_handle - \c [out] On return: Handle assigned to allocation 1321 * \param flags - \c [in] flags for special VA range 1322 * 1323 * \return 0 on success\n 1324 * >0 - AMD specific error code\n 1325 * <0 - Negative POSIX Error code 1326 * 1327 * \notes \n 1328 * It is client responsibility to correctly handle VA assignments and usage. 1329 * Neither kernel driver nor libdrm_amdpgu are able to prevent and 1330 * detect wrong va assignment. 1331 * 1332 * It is client responsibility to correctly handle multi-GPU cases and to pass 1333 * the corresponding arrays of all devices handles where corresponding VA will 1334 * be used. 1335 * 1336*/ 1337int amdgpu_va_range_alloc(amdgpu_device_handle dev, 1338 enum amdgpu_gpu_va_range va_range_type, 1339 uint64_t size, 1340 uint64_t va_base_alignment, 1341 uint64_t va_base_required, 1342 uint64_t *va_base_allocated, 1343 amdgpu_va_handle *va_range_handle, 1344 uint64_t flags); 1345 1346/** 1347 * Free previously allocated virtual address range 1348 * 1349 * 1350 * \param va_range_handle - \c [in] Handle assigned to VA allocation 1351 * 1352 * \return 0 on success\n 1353 * >0 - AMD specific error code\n 1354 * <0 - Negative POSIX Error code 1355 * 1356*/ 1357int amdgpu_va_range_free(amdgpu_va_handle va_range_handle); 1358 1359/** 1360* Query virtual address range 1361* 1362* UMD can query GPU VM range supported by each device 1363* to initialize its own VAM accordingly. 1364* 1365* \param dev - [in] Device handle. See #amdgpu_device_initialize() 1366* \param type - \c [in] Type of virtual address range 1367* \param offset - \c [out] Start offset of virtual address range 1368* \param size - \c [out] Size of virtual address range 1369* 1370* \return 0 on success\n 1371* <0 - Negative POSIX Error code 1372* 1373*/ 1374 1375int amdgpu_va_range_query(amdgpu_device_handle dev, 1376 enum amdgpu_gpu_va_range type, 1377 uint64_t *start, 1378 uint64_t *end); 1379 1380/** 1381 * VA mapping/unmapping for the buffer object 1382 * 1383 * \param bo - \c [in] BO handle 1384 * \param offset - \c [in] Start offset to map 1385 * \param size - \c [in] Size to map 1386 * \param addr - \c [in] Start virtual address. 1387 * \param flags - \c [in] Supported flags for mapping/unmapping 1388 * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP 1389 * 1390 * \return 0 on success\n 1391 * <0 - Negative POSIX Error code 1392 * 1393*/ 1394 1395int amdgpu_bo_va_op(amdgpu_bo_handle bo, 1396 uint64_t offset, 1397 uint64_t size, 1398 uint64_t addr, 1399 uint64_t flags, 1400 uint32_t ops); 1401 1402/** 1403 * VA mapping/unmapping for a buffer object or PRT region. 1404 * 1405 * This is not a simple drop-in extension for amdgpu_bo_va_op; instead, all 1406 * parameters are treated "raw", i.e. size is not automatically aligned, and 1407 * all flags must be specified explicitly. 1408 * 1409 * \param dev - \c [in] device handle 1410 * \param bo - \c [in] BO handle (may be NULL) 1411 * \param offset - \c [in] Start offset to map 1412 * \param size - \c [in] Size to map 1413 * \param addr - \c [in] Start virtual address. 1414 * \param flags - \c [in] Supported flags for mapping/unmapping 1415 * \param ops - \c [in] AMDGPU_VA_OP_MAP or AMDGPU_VA_OP_UNMAP 1416 * 1417 * \return 0 on success\n 1418 * <0 - Negative POSIX Error code 1419 * 1420*/ 1421 1422int amdgpu_bo_va_op_raw(amdgpu_device_handle dev, 1423 amdgpu_bo_handle bo, 1424 uint64_t offset, 1425 uint64_t size, 1426 uint64_t addr, 1427 uint64_t flags, 1428 uint32_t ops); 1429 1430/** 1431 * create semaphore 1432 * 1433 * \param sem - \c [out] semaphore handle 1434 * 1435 * \return 0 on success\n 1436 * <0 - Negative POSIX Error code 1437 * 1438*/ 1439int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem); 1440 1441/** 1442 * signal semaphore 1443 * 1444 * \param context - \c [in] GPU Context 1445 * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_* 1446 * \param ip_instance - \c [in] Index of the IP block of the same type 1447 * \param ring - \c [in] Specify ring index of the IP 1448 * \param sem - \c [in] semaphore handle 1449 * 1450 * \return 0 on success\n 1451 * <0 - Negative POSIX Error code 1452 * 1453*/ 1454int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx, 1455 uint32_t ip_type, 1456 uint32_t ip_instance, 1457 uint32_t ring, 1458 amdgpu_semaphore_handle sem); 1459 1460/** 1461 * wait semaphore 1462 * 1463 * \param context - \c [in] GPU Context 1464 * \param ip_type - \c [in] Hardware IP block type = AMDGPU_HW_IP_* 1465 * \param ip_instance - \c [in] Index of the IP block of the same type 1466 * \param ring - \c [in] Specify ring index of the IP 1467 * \param sem - \c [in] semaphore handle 1468 * 1469 * \return 0 on success\n 1470 * <0 - Negative POSIX Error code 1471 * 1472*/ 1473int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx, 1474 uint32_t ip_type, 1475 uint32_t ip_instance, 1476 uint32_t ring, 1477 amdgpu_semaphore_handle sem); 1478 1479/** 1480 * destroy semaphore 1481 * 1482 * \param sem - \c [in] semaphore handle 1483 * 1484 * \return 0 on success\n 1485 * <0 - Negative POSIX Error code 1486 * 1487*/ 1488int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem); 1489 1490/** 1491 * Get the ASIC marketing name 1492 * 1493 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize() 1494 * 1495 * \return the constant string of the marketing name 1496 * "NULL" means the ASIC is not found 1497*/ 1498const char *amdgpu_get_marketing_name(amdgpu_device_handle dev); 1499 1500/** 1501 * Create kernel sync object 1502 * 1503 * \param dev - \c [in] device handle 1504 * \param flags - \c [in] flags that affect creation 1505 * \param syncobj - \c [out] sync object handle 1506 * 1507 * \return 0 on success\n 1508 * <0 - Negative POSIX Error code 1509 * 1510*/ 1511int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev, 1512 uint32_t flags, 1513 uint32_t *syncobj); 1514 1515/** 1516 * Create kernel sync object 1517 * 1518 * \param dev - \c [in] device handle 1519 * \param syncobj - \c [out] sync object handle 1520 * 1521 * \return 0 on success\n 1522 * <0 - Negative POSIX Error code 1523 * 1524*/ 1525int amdgpu_cs_create_syncobj(amdgpu_device_handle dev, 1526 uint32_t *syncobj); 1527/** 1528 * Destroy kernel sync object 1529 * 1530 * \param dev - \c [in] device handle 1531 * \param syncobj - \c [in] sync object handle 1532 * 1533 * \return 0 on success\n 1534 * <0 - Negative POSIX Error code 1535 * 1536*/ 1537int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev, 1538 uint32_t syncobj); 1539 1540/** 1541 * Reset kernel sync objects to unsignalled state. 1542 * 1543 * \param dev - \c [in] device handle 1544 * \param syncobjs - \c [in] array of sync object handles 1545 * \param syncobj_count - \c [in] number of handles in syncobjs 1546 * 1547 * \return 0 on success\n 1548 * <0 - Negative POSIX Error code 1549 * 1550*/ 1551int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev, 1552 const uint32_t *syncobjs, uint32_t syncobj_count); 1553 1554/** 1555 * Signal kernel sync objects. 1556 * 1557 * \param dev - \c [in] device handle 1558 * \param syncobjs - \c [in] array of sync object handles 1559 * \param syncobj_count - \c [in] number of handles in syncobjs 1560 * 1561 * \return 0 on success\n 1562 * <0 - Negative POSIX Error code 1563 * 1564*/ 1565int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev, 1566 const uint32_t *syncobjs, uint32_t syncobj_count); 1567 1568/** 1569 * Signal kernel timeline sync objects. 1570 * 1571 * \param dev - \c [in] device handle 1572 * \param syncobjs - \c [in] array of sync object handles 1573 * \param points - \c [in] array of timeline points 1574 * \param syncobj_count - \c [in] number of handles in syncobjs 1575 * 1576 * \return 0 on success\n 1577 * <0 - Negative POSIX Error code 1578 * 1579*/ 1580int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev, 1581 const uint32_t *syncobjs, 1582 uint64_t *points, 1583 uint32_t syncobj_count); 1584 1585/** 1586 * Wait for one or all sync objects to signal. 1587 * 1588 * \param dev - \c [in] self-explanatory 1589 * \param handles - \c [in] array of sync object handles 1590 * \param num_handles - \c [in] self-explanatory 1591 * \param timeout_nsec - \c [in] self-explanatory 1592 * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_* 1593 * \param first_signaled - \c [in] self-explanatory 1594 * 1595 * \return 0 on success\n 1596 * -ETIME - Timeout 1597 * <0 - Negative POSIX Error code 1598 * 1599 */ 1600int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev, 1601 uint32_t *handles, unsigned num_handles, 1602 int64_t timeout_nsec, unsigned flags, 1603 uint32_t *first_signaled); 1604 1605/** 1606 * Wait for one or all sync objects on their points to signal. 1607 * 1608 * \param dev - \c [in] self-explanatory 1609 * \param handles - \c [in] array of sync object handles 1610 * \param points - \c [in] array of sync points to wait 1611 * \param num_handles - \c [in] self-explanatory 1612 * \param timeout_nsec - \c [in] self-explanatory 1613 * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_WAIT_FLAGS_* 1614 * \param first_signaled - \c [in] self-explanatory 1615 * 1616 * \return 0 on success\n 1617 * -ETIME - Timeout 1618 * <0 - Negative POSIX Error code 1619 * 1620 */ 1621int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev, 1622 uint32_t *handles, uint64_t *points, 1623 unsigned num_handles, 1624 int64_t timeout_nsec, unsigned flags, 1625 uint32_t *first_signaled); 1626/** 1627 * Query sync objects payloads. 1628 * 1629 * \param dev - \c [in] self-explanatory 1630 * \param handles - \c [in] array of sync object handles 1631 * \param points - \c [out] array of sync points returned, which presents 1632 * syncobj payload. 1633 * \param num_handles - \c [in] self-explanatory 1634 * 1635 * \return 0 on success\n 1636 * -ETIME - Timeout 1637 * <0 - Negative POSIX Error code 1638 * 1639 */ 1640int amdgpu_cs_syncobj_query(amdgpu_device_handle dev, 1641 uint32_t *handles, uint64_t *points, 1642 unsigned num_handles); 1643/** 1644 * Query sync objects last signaled or submitted point. 1645 * 1646 * \param dev - \c [in] self-explanatory 1647 * \param handles - \c [in] array of sync object handles 1648 * \param points - \c [out] array of sync points returned, which presents 1649 * syncobj payload. 1650 * \param num_handles - \c [in] self-explanatory 1651 * \param flags - \c [in] a bitmask of DRM_SYNCOBJ_QUERY_FLAGS_* 1652 * 1653 * \return 0 on success\n 1654 * -ETIME - Timeout 1655 * <0 - Negative POSIX Error code 1656 * 1657 */ 1658int amdgpu_cs_syncobj_query2(amdgpu_device_handle dev, 1659 uint32_t *handles, uint64_t *points, 1660 unsigned num_handles, uint32_t flags); 1661 1662/** 1663 * Export kernel sync object to shareable fd. 1664 * 1665 * \param dev - \c [in] device handle 1666 * \param syncobj - \c [in] sync object handle 1667 * \param shared_fd - \c [out] shared file descriptor. 1668 * 1669 * \return 0 on success\n 1670 * <0 - Negative POSIX Error code 1671 * 1672*/ 1673int amdgpu_cs_export_syncobj(amdgpu_device_handle dev, 1674 uint32_t syncobj, 1675 int *shared_fd); 1676/** 1677 * Import kernel sync object from shareable fd. 1678 * 1679 * \param dev - \c [in] device handle 1680 * \param shared_fd - \c [in] shared file descriptor. 1681 * \param syncobj - \c [out] sync object handle 1682 * 1683 * \return 0 on success\n 1684 * <0 - Negative POSIX Error code 1685 * 1686*/ 1687int amdgpu_cs_import_syncobj(amdgpu_device_handle dev, 1688 int shared_fd, 1689 uint32_t *syncobj); 1690 1691/** 1692 * Export kernel sync object to a sync_file. 1693 * 1694 * \param dev - \c [in] device handle 1695 * \param syncobj - \c [in] sync object handle 1696 * \param sync_file_fd - \c [out] sync_file file descriptor. 1697 * 1698 * \return 0 on success\n 1699 * <0 - Negative POSIX Error code 1700 * 1701 */ 1702int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev, 1703 uint32_t syncobj, 1704 int *sync_file_fd); 1705 1706/** 1707 * Import kernel sync object from a sync_file. 1708 * 1709 * \param dev - \c [in] device handle 1710 * \param syncobj - \c [in] sync object handle 1711 * \param sync_file_fd - \c [in] sync_file file descriptor. 1712 * 1713 * \return 0 on success\n 1714 * <0 - Negative POSIX Error code 1715 * 1716 */ 1717int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev, 1718 uint32_t syncobj, 1719 int sync_file_fd); 1720/** 1721 * Export kernel timeline sync object to a sync_file. 1722 * 1723 * \param dev - \c [in] device handle 1724 * \param syncobj - \c [in] sync object handle 1725 * \param point - \c [in] timeline point 1726 * \param flags - \c [in] flags 1727 * \param sync_file_fd - \c [out] sync_file file descriptor. 1728 * 1729 * \return 0 on success\n 1730 * <0 - Negative POSIX Error code 1731 * 1732 */ 1733int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev, 1734 uint32_t syncobj, 1735 uint64_t point, 1736 uint32_t flags, 1737 int *sync_file_fd); 1738 1739/** 1740 * Import kernel timeline sync object from a sync_file. 1741 * 1742 * \param dev - \c [in] device handle 1743 * \param syncobj - \c [in] sync object handle 1744 * \param point - \c [in] timeline point 1745 * \param sync_file_fd - \c [in] sync_file file descriptor. 1746 * 1747 * \return 0 on success\n 1748 * <0 - Negative POSIX Error code 1749 * 1750 */ 1751int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev, 1752 uint32_t syncobj, 1753 uint64_t point, 1754 int sync_file_fd); 1755 1756/** 1757 * transfer between syncbojs. 1758 * 1759 * \param dev - \c [in] device handle 1760 * \param dst_handle - \c [in] sync object handle 1761 * \param dst_point - \c [in] timeline point, 0 presents dst is binary 1762 * \param src_handle - \c [in] sync object handle 1763 * \param src_point - \c [in] timeline point, 0 presents src is binary 1764 * \param flags - \c [in] flags 1765 * 1766 * \return 0 on success\n 1767 * <0 - Negative POSIX Error code 1768 * 1769 */ 1770int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev, 1771 uint32_t dst_handle, 1772 uint64_t dst_point, 1773 uint32_t src_handle, 1774 uint64_t src_point, 1775 uint32_t flags); 1776 1777/** 1778 * Export an amdgpu fence as a handle (syncobj or fd). 1779 * 1780 * \param what AMDGPU_FENCE_TO_HANDLE_GET_{SYNCOBJ, FD} 1781 * \param out_handle returned handle 1782 * 1783 * \return 0 on success\n 1784 * <0 - Negative POSIX Error code 1785 */ 1786int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev, 1787 struct amdgpu_cs_fence *fence, 1788 uint32_t what, 1789 uint32_t *out_handle); 1790 1791/** 1792 * Submit raw command submission to kernel 1793 * 1794 * \param dev - \c [in] device handle 1795 * \param context - \c [in] context handle for context id 1796 * \param bo_list_handle - \c [in] request bo list handle (0 for none) 1797 * \param num_chunks - \c [in] number of CS chunks to submit 1798 * \param chunks - \c [in] array of CS chunks 1799 * \param seq_no - \c [out] output sequence number for submission. 1800 * 1801 * \return 0 on success\n 1802 * <0 - Negative POSIX Error code 1803 * 1804 */ 1805struct drm_amdgpu_cs_chunk; 1806struct drm_amdgpu_cs_chunk_dep; 1807struct drm_amdgpu_cs_chunk_data; 1808 1809int amdgpu_cs_submit_raw(amdgpu_device_handle dev, 1810 amdgpu_context_handle context, 1811 amdgpu_bo_list_handle bo_list_handle, 1812 int num_chunks, 1813 struct drm_amdgpu_cs_chunk *chunks, 1814 uint64_t *seq_no); 1815 1816/** 1817 * Submit raw command submission to the kernel with a raw BO list handle. 1818 * 1819 * \param dev - \c [in] device handle 1820 * \param context - \c [in] context handle for context id 1821 * \param bo_list_handle - \c [in] raw bo list handle (0 for none) 1822 * \param num_chunks - \c [in] number of CS chunks to submit 1823 * \param chunks - \c [in] array of CS chunks 1824 * \param seq_no - \c [out] output sequence number for submission. 1825 * 1826 * \return 0 on success\n 1827 * <0 - Negative POSIX Error code 1828 * 1829 * \sa amdgpu_bo_list_create_raw(), amdgpu_bo_list_destroy_raw() 1830 */ 1831int amdgpu_cs_submit_raw2(amdgpu_device_handle dev, 1832 amdgpu_context_handle context, 1833 uint32_t bo_list_handle, 1834 int num_chunks, 1835 struct drm_amdgpu_cs_chunk *chunks, 1836 uint64_t *seq_no); 1837 1838void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence, 1839 struct drm_amdgpu_cs_chunk_dep *dep); 1840void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info, 1841 struct drm_amdgpu_cs_chunk_data *data); 1842 1843/** 1844 * Reserve VMID 1845 * \param context - \c [in] GPU Context 1846 * \param flags - \c [in] TBD 1847 * 1848 * \return 0 on success otherwise POSIX Error code 1849*/ 1850int amdgpu_vm_reserve_vmid(amdgpu_device_handle dev, uint32_t flags); 1851 1852/** 1853 * Free reserved VMID 1854 * \param context - \c [in] GPU Context 1855 * \param flags - \c [in] TBD 1856 * 1857 * \return 0 on success otherwise POSIX Error code 1858*/ 1859int amdgpu_vm_unreserve_vmid(amdgpu_device_handle dev, uint32_t flags); 1860 1861#ifdef __cplusplus 1862} 1863#endif 1864#endif /* #ifdef _AMDGPU_H_ */ 1865