security_tests.c revision 41687f09
141687f09Smrg/* 241687f09Smrg * Copyright 2019 Advanced Micro Devices, Inc. 341687f09Smrg * 441687f09Smrg * Permission is hereby granted, free of charge, to any person obtaining a 541687f09Smrg * copy of this software and associated documentation files (the "Software"), 641687f09Smrg * to deal in the Software without restriction, including without limitation 741687f09Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 841687f09Smrg * and/or sell copies of the Software, and to permit persons to whom the 941687f09Smrg * Software is furnished to do so, subject to the following conditions: 1041687f09Smrg * 1141687f09Smrg * The above copyright notice and this permission notice shall be included in 1241687f09Smrg * all copies or substantial portions of the Software. 1341687f09Smrg * 1441687f09Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1541687f09Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1641687f09Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1741687f09Smrg * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 1841687f09Smrg * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 1941687f09Smrg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 2041687f09Smrg * OTHER DEALINGS IN THE SOFTWARE. 2141687f09Smrg * 2241687f09Smrg */ 2341687f09Smrg 2441687f09Smrg#include "CUnit/Basic.h" 2541687f09Smrg 2641687f09Smrg#include "amdgpu_test.h" 2741687f09Smrg#include "amdgpu_drm.h" 2841687f09Smrg#include "amdgpu_internal.h" 2941687f09Smrg 3041687f09Smrg#include <string.h> 3141687f09Smrg#include <unistd.h> 3241687f09Smrg#ifdef __FreeBSD__ 3341687f09Smrg#include <sys/endian.h> 3441687f09Smrg#else 3541687f09Smrg#include <endian.h> 3641687f09Smrg#endif 3741687f09Smrg#include <strings.h> 3841687f09Smrg#include <xf86drm.h> 3941687f09Smrg 4041687f09Smrgstatic amdgpu_device_handle device_handle; 4141687f09Smrgstatic uint32_t major_version; 4241687f09Smrgstatic uint32_t minor_version; 4341687f09Smrg 4441687f09Smrgstatic struct drm_amdgpu_info_hw_ip sdma_info; 4541687f09Smrg 4641687f09Smrg#ifndef ARRAY_SIZE 4741687f09Smrg#define ARRAY_SIZE(_Arr) (sizeof(_Arr)/sizeof((_Arr)[0])) 4841687f09Smrg#endif 4941687f09Smrg 5041687f09Smrg 5141687f09Smrg/* --------------------- Secure bounce test ------------------------ * 5241687f09Smrg * 5341687f09Smrg * The secure bounce test tests that we can evict a TMZ buffer, 5441687f09Smrg * and page it back in, via a bounce buffer, as it encryption/decryption 5541687f09Smrg * depends on its physical address, and have the same data, i.e. data 5641687f09Smrg * integrity is preserved. 5741687f09Smrg * 5841687f09Smrg * The steps are as follows (from Christian K.): 5941687f09Smrg * 6041687f09Smrg * Buffer A which is TMZ protected and filled by the CPU with a 6141687f09Smrg * certain pattern. That the GPU is reading only random nonsense from 6241687f09Smrg * that pattern is irrelevant for the test. 6341687f09Smrg * 6441687f09Smrg * This buffer A is then secure copied into buffer B which is also 6541687f09Smrg * TMZ protected. 6641687f09Smrg * 6741687f09Smrg * Buffer B is moved around, from VRAM to GTT, GTT to SYSTEM, 6841687f09Smrg * etc. 6941687f09Smrg * 7041687f09Smrg * Then, we use another secure copy of buffer B back to buffer A. 7141687f09Smrg * 7241687f09Smrg * And lastly we check with the CPU the pattern. 7341687f09Smrg * 7441687f09Smrg * Assuming that we don't have memory contention and buffer A stayed 7541687f09Smrg * at the same place, we should still see the same pattern when read 7641687f09Smrg * by the CPU. 7741687f09Smrg * 7841687f09Smrg * If we don't see the same pattern then something in the buffer 7941687f09Smrg * migration code is not working as expected. 8041687f09Smrg */ 8141687f09Smrg 8241687f09Smrg#define SECURE_BOUNCE_TEST_STR "secure bounce" 8341687f09Smrg#define SECURE_BOUNCE_FAILED_STR SECURE_BOUNCE_TEST_STR " failed" 8441687f09Smrg 8541687f09Smrg#define PRINT_ERROR(_Res) fprintf(stderr, "%s:%d: %s (%d)\n", \ 8641687f09Smrg __func__, __LINE__, strerror(-(_Res)), _Res) 8741687f09Smrg 8841687f09Smrg#define PACKET_LCOPY_SIZE 7 8941687f09Smrg#define PACKET_NOP_SIZE 12 9041687f09Smrg 9141687f09Smrgstruct sec_amdgpu_bo { 9241687f09Smrg struct amdgpu_bo *bo; 9341687f09Smrg struct amdgpu_va *va; 9441687f09Smrg}; 9541687f09Smrg 9641687f09Smrgstruct command_ctx { 9741687f09Smrg struct amdgpu_device *dev; 9841687f09Smrg struct amdgpu_cs_ib_info cs_ibinfo; 9941687f09Smrg struct amdgpu_cs_request cs_req; 10041687f09Smrg struct amdgpu_context *context; 10141687f09Smrg int ring_id; 10241687f09Smrg}; 10341687f09Smrg 10441687f09Smrg/** 10541687f09Smrg * amdgpu_bo_alloc_map -- Allocate and map a buffer object (BO) 10641687f09Smrg * @dev: The AMDGPU device this BO belongs to. 10741687f09Smrg * @size: The size of the BO. 10841687f09Smrg * @alignment: Alignment of the BO. 10941687f09Smrg * @gem_domain: One of AMDGPU_GEM_DOMAIN_xyz. 11041687f09Smrg * @alloc_flags: One of AMDGPU_GEM_CREATE_xyz. 11141687f09Smrg * @sbo: the result 11241687f09Smrg * 11341687f09Smrg * Allocate a buffer object (BO) with the desired attributes 11441687f09Smrg * as specified by the argument list and write out the result 11541687f09Smrg * into @sbo. 11641687f09Smrg * 11741687f09Smrg * Return 0 on success and @sbo->bo and @sbo->va are set, 11841687f09Smrg * or -errno on error. 11941687f09Smrg */ 12041687f09Smrgstatic int amdgpu_bo_alloc_map(struct amdgpu_device *dev, 12141687f09Smrg unsigned size, 12241687f09Smrg unsigned alignment, 12341687f09Smrg unsigned gem_domain, 12441687f09Smrg uint64_t alloc_flags, 12541687f09Smrg struct sec_amdgpu_bo *sbo) 12641687f09Smrg{ 12741687f09Smrg void *cpu; 12841687f09Smrg uint64_t mc_addr; 12941687f09Smrg 13041687f09Smrg return amdgpu_bo_alloc_and_map_raw(dev, 13141687f09Smrg size, 13241687f09Smrg alignment, 13341687f09Smrg gem_domain, 13441687f09Smrg alloc_flags, 13541687f09Smrg 0, 13641687f09Smrg &sbo->bo, 13741687f09Smrg &cpu, &mc_addr, 13841687f09Smrg &sbo->va); 13941687f09Smrg} 14041687f09Smrg 14141687f09Smrgstatic void amdgpu_bo_unmap_free(struct sec_amdgpu_bo *sbo, 14241687f09Smrg const uint64_t size) 14341687f09Smrg{ 14441687f09Smrg (void) amdgpu_bo_unmap_and_free(sbo->bo, 14541687f09Smrg sbo->va, 14641687f09Smrg sbo->va->address, 14741687f09Smrg size); 14841687f09Smrg sbo->bo = NULL; 14941687f09Smrg sbo->va = NULL; 15041687f09Smrg} 15141687f09Smrg 15241687f09Smrgstatic void amdgpu_sdma_lcopy(uint32_t *packet, 15341687f09Smrg const uint64_t dst, 15441687f09Smrg const uint64_t src, 15541687f09Smrg const uint32_t size, 15641687f09Smrg const int secure) 15741687f09Smrg{ 15841687f09Smrg /* Set the packet to Linear copy with TMZ set. 15941687f09Smrg */ 16041687f09Smrg packet[0] = htole32(secure << 18 | 1); 16141687f09Smrg packet[1] = htole32(size-1); 16241687f09Smrg packet[2] = htole32(0); 16341687f09Smrg packet[3] = htole32((uint32_t)(src & 0xFFFFFFFFU)); 16441687f09Smrg packet[4] = htole32((uint32_t)(src >> 32)); 16541687f09Smrg packet[5] = htole32((uint32_t)(dst & 0xFFFFFFFFU)); 16641687f09Smrg packet[6] = htole32((uint32_t)(dst >> 32)); 16741687f09Smrg} 16841687f09Smrg 16941687f09Smrgstatic void amdgpu_sdma_nop(uint32_t *packet, uint32_t nop_count) 17041687f09Smrg{ 17141687f09Smrg /* A packet of the desired number of NOPs. 17241687f09Smrg */ 17341687f09Smrg packet[0] = htole32(nop_count << 16); 17441687f09Smrg for ( ; nop_count > 0; nop_count--) 17541687f09Smrg packet[nop_count-1] = 0; 17641687f09Smrg} 17741687f09Smrg 17841687f09Smrg/** 17941687f09Smrg * amdgpu_bo_lcopy -- linear copy with TMZ set, using sDMA 18041687f09Smrg * @dev: AMDGPU device to which both buffer objects belong to 18141687f09Smrg * @dst: destination buffer object 18241687f09Smrg * @src: source buffer object 18341687f09Smrg * @size: size of memory to move, in bytes. 18441687f09Smrg * @secure: Set to 1 to perform secure copy, 0 for clear 18541687f09Smrg * 18641687f09Smrg * Issues and waits for completion of a Linear Copy with TMZ 18741687f09Smrg * set, to the sDMA engine. @size should be a multiple of 18841687f09Smrg * at least 16 bytes. 18941687f09Smrg */ 19041687f09Smrgstatic void amdgpu_bo_lcopy(struct command_ctx *ctx, 19141687f09Smrg struct sec_amdgpu_bo *dst, 19241687f09Smrg struct sec_amdgpu_bo *src, 19341687f09Smrg const uint32_t size, 19441687f09Smrg int secure) 19541687f09Smrg{ 19641687f09Smrg struct amdgpu_bo *bos[] = { dst->bo, src->bo }; 19741687f09Smrg uint32_t packet[PACKET_LCOPY_SIZE]; 19841687f09Smrg 19941687f09Smrg amdgpu_sdma_lcopy(packet, 20041687f09Smrg dst->va->address, 20141687f09Smrg src->va->address, 20241687f09Smrg size, secure); 20341687f09Smrg amdgpu_test_exec_cs_helper_raw(ctx->dev, ctx->context, 20441687f09Smrg AMDGPU_HW_IP_DMA, ctx->ring_id, 20541687f09Smrg ARRAY_SIZE(packet), packet, 20641687f09Smrg ARRAY_SIZE(bos), bos, 20741687f09Smrg &ctx->cs_ibinfo, &ctx->cs_req, 20841687f09Smrg secure == 1); 20941687f09Smrg} 21041687f09Smrg 21141687f09Smrg/** 21241687f09Smrg * amdgpu_bo_move -- Evoke a move of the buffer object (BO) 21341687f09Smrg * @dev: device to which this buffer object belongs to 21441687f09Smrg * @bo: the buffer object to be moved 21541687f09Smrg * @whereto: one of AMDGPU_GEM_DOMAIN_xyz 21641687f09Smrg * @secure: set to 1 to submit secure IBs 21741687f09Smrg * 21841687f09Smrg * Evokes a move of the buffer object @bo to the GEM domain 21941687f09Smrg * descibed by @whereto. 22041687f09Smrg * 22141687f09Smrg * Returns 0 on sucess; -errno on error. 22241687f09Smrg */ 22341687f09Smrgstatic int amdgpu_bo_move(struct command_ctx *ctx, 22441687f09Smrg struct amdgpu_bo *bo, 22541687f09Smrg uint64_t whereto, 22641687f09Smrg int secure) 22741687f09Smrg{ 22841687f09Smrg struct amdgpu_bo *bos[] = { bo }; 22941687f09Smrg struct drm_amdgpu_gem_op gop = { 23041687f09Smrg .handle = bo->handle, 23141687f09Smrg .op = AMDGPU_GEM_OP_SET_PLACEMENT, 23241687f09Smrg .value = whereto, 23341687f09Smrg }; 23441687f09Smrg uint32_t packet[PACKET_NOP_SIZE]; 23541687f09Smrg int res; 23641687f09Smrg 23741687f09Smrg /* Change the buffer's placement. 23841687f09Smrg */ 23941687f09Smrg res = drmIoctl(ctx->dev->fd, DRM_IOCTL_AMDGPU_GEM_OP, &gop); 24041687f09Smrg if (res) 24141687f09Smrg return -errno; 24241687f09Smrg 24341687f09Smrg /* Now issue a NOP to actually evoke the MM to move 24441687f09Smrg * it to the desired location. 24541687f09Smrg */ 24641687f09Smrg amdgpu_sdma_nop(packet, PACKET_NOP_SIZE); 24741687f09Smrg amdgpu_test_exec_cs_helper_raw(ctx->dev, ctx->context, 24841687f09Smrg AMDGPU_HW_IP_DMA, ctx->ring_id, 24941687f09Smrg ARRAY_SIZE(packet), packet, 25041687f09Smrg ARRAY_SIZE(bos), bos, 25141687f09Smrg &ctx->cs_ibinfo, &ctx->cs_req, 25241687f09Smrg secure == 1); 25341687f09Smrg return 0; 25441687f09Smrg} 25541687f09Smrg 25641687f09Smrg/* Safe, O Sec! 25741687f09Smrg */ 25841687f09Smrgstatic const uint8_t secure_pattern[] = { 0x5A, 0xFE, 0x05, 0xEC }; 25941687f09Smrg 26041687f09Smrg#define SECURE_BUFFER_SIZE (4 * 1024 * sizeof(secure_pattern)) 26141687f09Smrg 26241687f09Smrgstatic void amdgpu_secure_bounce(void) 26341687f09Smrg{ 26441687f09Smrg struct sec_amdgpu_bo alice, bob; 26541687f09Smrg struct command_ctx sb_ctx; 26641687f09Smrg long page_size; 26741687f09Smrg uint8_t *pp; 26841687f09Smrg int res; 26941687f09Smrg 27041687f09Smrg page_size = sysconf(_SC_PAGESIZE); 27141687f09Smrg 27241687f09Smrg memset(&sb_ctx, 0, sizeof(sb_ctx)); 27341687f09Smrg sb_ctx.dev = device_handle; 27441687f09Smrg res = amdgpu_cs_ctx_create(sb_ctx.dev, &sb_ctx.context); 27541687f09Smrg if (res) { 27641687f09Smrg PRINT_ERROR(res); 27741687f09Smrg CU_FAIL(SECURE_BOUNCE_FAILED_STR); 27841687f09Smrg return; 27941687f09Smrg } 28041687f09Smrg 28141687f09Smrg /* Use the first present ring. 28241687f09Smrg */ 28341687f09Smrg res = ffs(sdma_info.available_rings) - 1; 28441687f09Smrg if (res == -1) { 28541687f09Smrg PRINT_ERROR(-ENOENT); 28641687f09Smrg CU_FAIL(SECURE_BOUNCE_FAILED_STR); 28741687f09Smrg goto Out_free_ctx; 28841687f09Smrg } 28941687f09Smrg sb_ctx.ring_id = res; 29041687f09Smrg 29141687f09Smrg /* Allocate a buffer named Alice in VRAM. 29241687f09Smrg */ 29341687f09Smrg res = amdgpu_bo_alloc_map(device_handle, 29441687f09Smrg SECURE_BUFFER_SIZE, 29541687f09Smrg page_size, 29641687f09Smrg AMDGPU_GEM_DOMAIN_VRAM, 29741687f09Smrg AMDGPU_GEM_CREATE_ENCRYPTED, 29841687f09Smrg &alice); 29941687f09Smrg if (res) { 30041687f09Smrg PRINT_ERROR(res); 30141687f09Smrg CU_FAIL(SECURE_BOUNCE_FAILED_STR); 30241687f09Smrg return; 30341687f09Smrg } 30441687f09Smrg 30541687f09Smrg /* Fill Alice with a pattern. 30641687f09Smrg */ 30741687f09Smrg for (pp = alice.bo->cpu_ptr; 30841687f09Smrg pp < (typeof(pp)) alice.bo->cpu_ptr + SECURE_BUFFER_SIZE; 30941687f09Smrg pp += sizeof(secure_pattern)) 31041687f09Smrg memcpy(pp, secure_pattern, sizeof(secure_pattern)); 31141687f09Smrg 31241687f09Smrg /* Allocate a buffer named Bob in VRAM. 31341687f09Smrg */ 31441687f09Smrg res = amdgpu_bo_alloc_map(device_handle, 31541687f09Smrg SECURE_BUFFER_SIZE, 31641687f09Smrg page_size, 31741687f09Smrg AMDGPU_GEM_DOMAIN_VRAM, 31841687f09Smrg 0 /* AMDGPU_GEM_CREATE_ENCRYPTED */, 31941687f09Smrg &bob); 32041687f09Smrg if (res) { 32141687f09Smrg PRINT_ERROR(res); 32241687f09Smrg CU_FAIL(SECURE_BOUNCE_FAILED_STR); 32341687f09Smrg goto Out_free_Alice; 32441687f09Smrg } 32541687f09Smrg 32641687f09Smrg /* sDMA clear copy from Alice to Bob. 32741687f09Smrg */ 32841687f09Smrg amdgpu_bo_lcopy(&sb_ctx, &bob, &alice, SECURE_BUFFER_SIZE, 0); 32941687f09Smrg 33041687f09Smrg /* Move Bob to the GTT domain. 33141687f09Smrg */ 33241687f09Smrg res = amdgpu_bo_move(&sb_ctx, bob.bo, AMDGPU_GEM_DOMAIN_GTT, 0); 33341687f09Smrg if (res) { 33441687f09Smrg PRINT_ERROR(res); 33541687f09Smrg CU_FAIL(SECURE_BOUNCE_FAILED_STR); 33641687f09Smrg goto Out_free_all; 33741687f09Smrg } 33841687f09Smrg 33941687f09Smrg /* sDMA clear copy from Bob to Alice. 34041687f09Smrg */ 34141687f09Smrg amdgpu_bo_lcopy(&sb_ctx, &alice, &bob, SECURE_BUFFER_SIZE, 0); 34241687f09Smrg 34341687f09Smrg /* Verify the contents of Alice. 34441687f09Smrg */ 34541687f09Smrg for (pp = alice.bo->cpu_ptr; 34641687f09Smrg pp < (typeof(pp)) alice.bo->cpu_ptr + SECURE_BUFFER_SIZE; 34741687f09Smrg pp += sizeof(secure_pattern)) { 34841687f09Smrg res = memcmp(pp, secure_pattern, sizeof(secure_pattern)); 34941687f09Smrg if (res) { 35041687f09Smrg fprintf(stderr, SECURE_BOUNCE_FAILED_STR); 35141687f09Smrg CU_FAIL(SECURE_BOUNCE_FAILED_STR); 35241687f09Smrg break; 35341687f09Smrg } 35441687f09Smrg } 35541687f09Smrg 35641687f09SmrgOut_free_all: 35741687f09Smrg amdgpu_bo_unmap_free(&bob, SECURE_BUFFER_SIZE); 35841687f09SmrgOut_free_Alice: 35941687f09Smrg amdgpu_bo_unmap_free(&alice, SECURE_BUFFER_SIZE); 36041687f09SmrgOut_free_ctx: 36141687f09Smrg res = amdgpu_cs_ctx_free(sb_ctx.context); 36241687f09Smrg CU_ASSERT_EQUAL(res, 0); 36341687f09Smrg} 36441687f09Smrg 36541687f09Smrg/* ----------------------------------------------------------------- */ 36641687f09Smrg 36741687f09Smrgstatic void amdgpu_security_alloc_buf_test(void) 36841687f09Smrg{ 36941687f09Smrg amdgpu_bo_handle bo; 37041687f09Smrg amdgpu_va_handle va_handle; 37141687f09Smrg uint64_t bo_mc; 37241687f09Smrg int r; 37341687f09Smrg 37441687f09Smrg /* Test secure buffer allocation in VRAM */ 37541687f09Smrg bo = gpu_mem_alloc(device_handle, 4096, 4096, 37641687f09Smrg AMDGPU_GEM_DOMAIN_VRAM, 37741687f09Smrg AMDGPU_GEM_CREATE_ENCRYPTED, 37841687f09Smrg &bo_mc, &va_handle); 37941687f09Smrg 38041687f09Smrg r = gpu_mem_free(bo, va_handle, bo_mc, 4096); 38141687f09Smrg CU_ASSERT_EQUAL(r, 0); 38241687f09Smrg 38341687f09Smrg /* Test secure buffer allocation in system memory */ 38441687f09Smrg bo = gpu_mem_alloc(device_handle, 4096, 4096, 38541687f09Smrg AMDGPU_GEM_DOMAIN_GTT, 38641687f09Smrg AMDGPU_GEM_CREATE_ENCRYPTED, 38741687f09Smrg &bo_mc, &va_handle); 38841687f09Smrg 38941687f09Smrg r = gpu_mem_free(bo, va_handle, bo_mc, 4096); 39041687f09Smrg CU_ASSERT_EQUAL(r, 0); 39141687f09Smrg 39241687f09Smrg /* Test secure buffer allocation in invisible VRAM */ 39341687f09Smrg bo = gpu_mem_alloc(device_handle, 4096, 4096, 39441687f09Smrg AMDGPU_GEM_DOMAIN_GTT, 39541687f09Smrg AMDGPU_GEM_CREATE_ENCRYPTED | 39641687f09Smrg AMDGPU_GEM_CREATE_NO_CPU_ACCESS, 39741687f09Smrg &bo_mc, &va_handle); 39841687f09Smrg 39941687f09Smrg r = gpu_mem_free(bo, va_handle, bo_mc, 4096); 40041687f09Smrg CU_ASSERT_EQUAL(r, 0); 40141687f09Smrg} 40241687f09Smrg 40341687f09Smrgstatic void amdgpu_security_gfx_submission_test(void) 40441687f09Smrg{ 40541687f09Smrg amdgpu_command_submission_write_linear_helper_with_secure(device_handle, 40641687f09Smrg AMDGPU_HW_IP_GFX, 40741687f09Smrg true); 40841687f09Smrg} 40941687f09Smrg 41041687f09Smrgstatic void amdgpu_security_sdma_submission_test(void) 41141687f09Smrg{ 41241687f09Smrg amdgpu_command_submission_write_linear_helper_with_secure(device_handle, 41341687f09Smrg AMDGPU_HW_IP_DMA, 41441687f09Smrg true); 41541687f09Smrg} 41641687f09Smrg 41741687f09Smrg/* ----------------------------------------------------------------- */ 41841687f09Smrg 41941687f09SmrgCU_TestInfo security_tests[] = { 42041687f09Smrg { "allocate secure buffer test", amdgpu_security_alloc_buf_test }, 42141687f09Smrg { "graphics secure command submission", amdgpu_security_gfx_submission_test }, 42241687f09Smrg { "sDMA secure command submission", amdgpu_security_sdma_submission_test }, 42341687f09Smrg { SECURE_BOUNCE_TEST_STR, amdgpu_secure_bounce }, 42441687f09Smrg CU_TEST_INFO_NULL, 42541687f09Smrg}; 42641687f09Smrg 42741687f09SmrgCU_BOOL suite_security_tests_enable(void) 42841687f09Smrg{ 42941687f09Smrg CU_BOOL enable = CU_TRUE; 43041687f09Smrg 43141687f09Smrg if (amdgpu_device_initialize(drm_amdgpu[0], &major_version, 43241687f09Smrg &minor_version, &device_handle)) 43341687f09Smrg return CU_FALSE; 43441687f09Smrg 43541687f09Smrg if (device_handle->info.family_id != AMDGPU_FAMILY_RV) { 43641687f09Smrg printf("\n\nDon't support TMZ (trust memory zone), security suite disabled\n"); 43741687f09Smrg enable = CU_FALSE; 43841687f09Smrg } 43941687f09Smrg 44041687f09Smrg if ((major_version < 3) || 44141687f09Smrg ((major_version == 3) && (minor_version < 37))) { 44241687f09Smrg printf("\n\nDon't support TMZ (trust memory zone), kernel DRM version (%d.%d)\n", 44341687f09Smrg major_version, minor_version); 44441687f09Smrg printf("is older, security suite disabled\n"); 44541687f09Smrg enable = CU_FALSE; 44641687f09Smrg } 44741687f09Smrg 44841687f09Smrg if (amdgpu_device_deinitialize(device_handle)) 44941687f09Smrg return CU_FALSE; 45041687f09Smrg 45141687f09Smrg return enable; 45241687f09Smrg} 45341687f09Smrg 45441687f09Smrgint suite_security_tests_init(void) 45541687f09Smrg{ 45641687f09Smrg int res; 45741687f09Smrg 45841687f09Smrg res = amdgpu_device_initialize(drm_amdgpu[0], &major_version, 45941687f09Smrg &minor_version, &device_handle); 46041687f09Smrg if (res) { 46141687f09Smrg PRINT_ERROR(res); 46241687f09Smrg return CUE_SINIT_FAILED; 46341687f09Smrg } 46441687f09Smrg 46541687f09Smrg res = amdgpu_query_hw_ip_info(device_handle, 46641687f09Smrg AMDGPU_HW_IP_DMA, 46741687f09Smrg 0, &sdma_info); 46841687f09Smrg if (res) { 46941687f09Smrg PRINT_ERROR(res); 47041687f09Smrg return CUE_SINIT_FAILED; 47141687f09Smrg } 47241687f09Smrg 47341687f09Smrg return CUE_SUCCESS; 47441687f09Smrg} 47541687f09Smrg 47641687f09Smrgint suite_security_tests_clean(void) 47741687f09Smrg{ 47841687f09Smrg int res; 47941687f09Smrg 48041687f09Smrg res = amdgpu_device_deinitialize(device_handle); 48141687f09Smrg if (res) 48241687f09Smrg return CUE_SCLEAN_FAILED; 48341687f09Smrg 48441687f09Smrg return CUE_SUCCESS; 48541687f09Smrg} 486