1/* 2 * Copyright 2011 VMWare, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sub license, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Author: Jakob Bornecrantz <wallbraker@gmail.com> 26 * Author: Thomas Hellstrom <thellstrom@vmware.com> 27 */ 28 29#ifdef HAVE_CONFIG_H 30#include "config.h" 31#endif 32 33#include <stdint.h> 34#include <errno.h> 35#include <sys/mman.h> 36#include "vmwgfx_drm.h" 37#include <xf86drm.h> 38#include "vmwgfx_drmi.h" 39 40#define uint32 uint32_t 41#define int32 int32_t 42#define uint16 uint16_t 43#define uint8 uint8_t 44 45#include "svga3d_reg.h" 46#include "vmwgfx_driver.h" 47#include "common_compat.h" 48 49static int 50vmwgfx_fence_wait(int drm_fd, uint32_t handle, Bool unref) 51{ 52 struct drm_vmw_fence_wait_arg farg; 53 memset(&farg, 0, sizeof(farg)); 54 55 farg.handle = handle; 56 farg.flags = DRM_VMW_FENCE_FLAG_EXEC; 57 farg.timeout_us = 10*1000000; 58 farg.cookie_valid = 0; 59 60 if (unref) 61 farg.wait_options |= DRM_VMW_WAIT_OPTION_UNREF; 62 63 return drmCommandWriteRead(drm_fd, DRM_VMW_FENCE_WAIT, &farg, 64 sizeof(farg)); 65} 66 67static void 68vmwgfx_fence_unref(int drm_fd, uint32_t handle) 69{ 70 struct drm_vmw_fence_arg farg; 71 memset(&farg, 0, sizeof(farg)); 72 73 farg.handle = handle; 74 75 (void) drmCommandWrite(drm_fd, DRM_VMW_FENCE_UNREF, &farg, 76 sizeof(farg)); 77} 78 79 80int 81vmwgfx_present_readback(int drm_fd, uint32_t fb_id, RegionPtr region) 82{ 83 BoxPtr clips = REGION_RECTS(region); 84 unsigned int num_clips = REGION_NUM_RECTS(region); 85 unsigned int alloc_clips = min(num_clips, DRM_MODE_FB_DIRTY_MAX_CLIPS); 86 struct drm_vmw_fence_rep rep; 87 struct drm_vmw_present_readback_arg arg; 88 int ret; 89 unsigned i; 90 struct drm_vmw_rect *rects, *r; 91 92 if (num_clips == 0) 93 return 0; 94 95 rects = malloc(alloc_clips * sizeof(*rects)); 96 if (!rects) { 97 LogMessage(X_ERROR, "Failed to alloc cliprects for " 98 "present readback.\n"); 99 return -1; 100 } 101 102 while (num_clips > 0) { 103 unsigned int cur_clips = min(num_clips, DRM_MODE_FB_DIRTY_MAX_CLIPS); 104 105 memset(&arg, 0, sizeof(arg)); 106 memset(&rep, 0, sizeof(rep)); 107 memset(rects, 0, alloc_clips * sizeof(*rects)); 108 109 arg.fb_id = fb_id; 110 arg.num_clips = cur_clips; 111 arg.clips_ptr = (unsigned long) rects; 112 113 /* Only request a fence on the last clip batch */ 114 arg.fence_rep = (cur_clips == num_clips) ? (unsigned long) &rep : 0UL; 115 rep.error = -EFAULT; 116 117 for (i = 0, r = rects; i < cur_clips; ++i, ++r, ++clips) { 118 r->x = clips->x1; 119 r->y = clips->y1; 120 r->w = clips->x2 - clips->x1; 121 r->h = clips->y2 - clips->y1; 122 } 123 124 ret = drmCommandWrite(drm_fd, DRM_VMW_PRESENT_READBACK, &arg, 125 sizeof(arg)); 126 if (ret) 127 LogMessage(X_ERROR, "Present readback error %s.\n", strerror(-ret)); 128 129 /* Sync if we have a fence to avoid racing with Xorg SW rendering. */ 130 if (rep.error == 0) { 131 ret = vmwgfx_fence_wait(drm_fd, rep.handle, TRUE); 132 if (ret) { 133 LogMessage(X_ERROR, "Present readback fence wait error %s.\n", 134 strerror(-ret)); 135 /* vmwgfx_fence_wait() takes care of this if ret == 0. */ 136 vmwgfx_fence_unref(drm_fd, rep.handle); 137 } 138 } 139 140 num_clips -= cur_clips; 141 } 142 143 free(rects); 144 145 return 0; 146} 147 148 149int 150vmwgfx_present(int drm_fd, uint32_t fb_id, unsigned int dst_x, 151 unsigned int dst_y, RegionPtr region, uint32_t handle) 152{ 153 BoxPtr clips = REGION_RECTS(region); 154 unsigned int num_clips = REGION_NUM_RECTS(region); 155 unsigned int alloc_clips = min(num_clips, DRM_MODE_FB_DIRTY_MAX_CLIPS); 156 struct drm_vmw_present_arg arg; 157 unsigned int i; 158 struct drm_vmw_rect *rects, *r; 159 int ret; 160 161 if (num_clips == 0) 162 return 0; 163 164 rects = malloc(alloc_clips * sizeof(*rects)); 165 if (!rects) { 166 LogMessage(X_ERROR, "Failed to alloc cliprects for " 167 "present.\n"); 168 return -1; 169 } 170 171 while (num_clips > 0) { 172 unsigned int cur_clips = min(num_clips, DRM_MODE_FB_DIRTY_MAX_CLIPS); 173 174 memset(&arg, 0, sizeof(arg)); 175 memset(rects, 0, alloc_clips * sizeof(*rects)); 176 arg.fb_id = fb_id; 177 arg.sid = handle; 178 arg.dest_x = dst_x; 179 arg.dest_y = dst_y; 180 arg.num_clips = cur_clips; 181 arg.clips_ptr = (unsigned long) rects; 182 183 for (i = 0, r = rects; i < cur_clips; ++i, ++r, ++clips) { 184 r->x = clips->x1; 185 r->y = clips->y1; 186 r->w = clips->x2 - clips->x1; 187 r->h = clips->y2 - clips->y1; 188 } 189 190 ret = drmCommandWrite(drm_fd, DRM_VMW_PRESENT, &arg, sizeof(arg)); 191 if (ret) 192 LogMessage(X_ERROR, "Present error %s.\n", strerror(-ret)); 193 194 num_clips -= cur_clips; 195 } 196 197 free(rects); 198 199 return 0; 200} 201 202 203struct vmwgfx_int_dmabuf { 204 struct vmwgfx_dmabuf buf; 205 uint64_t map_handle; 206 uint64_t sync_handle; 207 int sync_valid; 208 int drm_fd; 209 uint32_t map_count; 210 void *addr; 211}; 212 213static inline struct vmwgfx_int_dmabuf * 214vmwgfx_int_dmabuf(struct vmwgfx_dmabuf *buf) 215{ 216 return (struct vmwgfx_int_dmabuf *) buf; 217} 218 219struct vmwgfx_dmabuf* 220vmwgfx_dmabuf_alloc(int drm_fd, size_t size) 221{ 222 union drm_vmw_alloc_dmabuf_arg arg; 223 struct vmwgfx_dmabuf *buf; 224 struct vmwgfx_int_dmabuf *ibuf; 225 int ret; 226 227 ibuf = calloc(1, sizeof(*ibuf)); 228 if (!ibuf) 229 return NULL; 230 231 buf = &ibuf->buf; 232 memset(&arg, 0, sizeof(arg)); 233 arg.req.size = size; 234 235 ret = drmCommandWriteRead(drm_fd, DRM_VMW_ALLOC_DMABUF, &arg, 236 sizeof(arg)); 237 if (ret) 238 goto out_kernel_fail; 239 240 ibuf = vmwgfx_int_dmabuf(buf); 241 ibuf->map_handle = arg.rep.map_handle; 242 ibuf->drm_fd = drm_fd; 243 buf->handle = arg.rep.handle; 244 buf->gmr_id = arg.rep.cur_gmr_id; 245 buf->gmr_offset = arg.rep.cur_gmr_offset; 246 buf->size = size; 247 248 return buf; 249 out_kernel_fail: 250 free(buf); 251 return NULL; 252} 253 254void * 255vmwgfx_dmabuf_map(struct vmwgfx_dmabuf *buf) 256{ 257 struct vmwgfx_int_dmabuf *ibuf = vmwgfx_int_dmabuf(buf); 258 259 if (ibuf->addr) 260 return ibuf->addr; 261 262 ibuf->addr = mmap(NULL, buf->size, PROT_READ | PROT_WRITE, MAP_SHARED, 263 ibuf->drm_fd, ibuf->map_handle); 264 265 if (ibuf->addr == MAP_FAILED) { 266 ibuf->addr = NULL; 267 return NULL; 268 } 269 270 ibuf->map_count++; 271 return ibuf->addr; 272} 273 274void 275vmwgfx_dmabuf_unmap(struct vmwgfx_dmabuf *buf) 276{ 277 struct vmwgfx_int_dmabuf *ibuf = vmwgfx_int_dmabuf(buf); 278 279 if (--ibuf->map_count) 280 return; 281 282 /* 283 * It's a pretty important performance optimzation not to call 284 * munmap here, although we should watch out for cases where we might fill 285 * the virtual memory space of the process. 286 */ 287} 288 289void 290vmwgfx_dmabuf_destroy(struct vmwgfx_dmabuf *buf) 291{ 292 struct vmwgfx_int_dmabuf *ibuf = vmwgfx_int_dmabuf(buf); 293 struct drm_vmw_unref_dmabuf_arg arg; 294 295 if (ibuf->addr) { 296 munmap(ibuf->addr, buf->size); 297 ibuf->addr = NULL; 298 } 299 300 memset(&arg, 0, sizeof(arg)); 301 arg.handle = buf->handle; 302 303 (void) drmCommandWrite(ibuf->drm_fd, DRM_VMW_UNREF_DMABUF, &arg, 304 sizeof(arg)); 305 free(buf); 306} 307 308int 309vmwgfx_dma(int host_x, int host_y, 310 RegionPtr region, struct vmwgfx_dmabuf *buf, 311 uint32_t buf_pitch, uint32_t surface_handle, int to_surface) 312{ 313 BoxPtr clips = REGION_RECTS(region); 314 unsigned int num_clips = REGION_NUM_RECTS(region); 315 struct drm_vmw_execbuf_arg arg; 316 struct drm_vmw_fence_rep rep; 317 int ret; 318 unsigned i; 319 SVGA3dCopyBox *cb; 320 SVGA3dCmdSurfaceDMASuffix *suffix; 321 SVGA3dCmdSurfaceDMA *body; 322 struct vmwgfx_int_dmabuf *ibuf = vmwgfx_int_dmabuf(buf); 323 struct { 324 SVGA3dCmdHeader header; 325 SVGA3dCmdSurfaceDMA body; 326 SVGA3dCopyBox cb; 327 } *cmd; 328 static unsigned int max_clips = 329 (SVGA_CB_MAX_COMMAND_SIZE - sizeof(*cmd) - sizeof(*suffix)) / 330 sizeof(cmd->cb) + 1; 331 332 while (num_clips > 0) { 333 unsigned int size; 334 unsigned int cur_clips; 335 336 cur_clips = min(num_clips, max_clips); 337 size = sizeof(*cmd) + (cur_clips - 1) * sizeof(cmd->cb) + 338 sizeof(*suffix); 339 340 cmd = calloc(1, size); 341 if (!cmd) 342 return -1; 343 344 cmd->header.id = SVGA_3D_CMD_SURFACE_DMA; 345 cmd->header.size = sizeof(cmd->body) + cur_clips * sizeof(cmd->cb) + 346 sizeof(*suffix); 347 cb = &cmd->cb; 348 349 suffix = (SVGA3dCmdSurfaceDMASuffix *) &cb[cur_clips]; 350 suffix->suffixSize = sizeof(*suffix); 351 suffix->maximumOffset = (uint32_t) -1; 352 suffix->flags.discard = 0; 353 suffix->flags.unsynchronized = 0; 354 suffix->flags.reserved = 0; 355 356 body = &cmd->body; 357 body->guest.ptr.gmrId = buf->gmr_id; 358 body->guest.ptr.offset = buf->gmr_offset; 359 body->guest.pitch = buf_pitch; 360 body->host.sid = surface_handle; 361 body->host.face = 0; 362 body->host.mipmap = 0; 363 364 body->transfer = (to_surface ? SVGA3D_WRITE_HOST_VRAM : 365 SVGA3D_READ_HOST_VRAM); 366 367 for (i = 0; i < cur_clips; i++, cb++, clips++) { 368 cb->x = (uint16_t) clips->x1 + host_x; 369 cb->y = (uint16_t) clips->y1 + host_y; 370 cb->z = 0; 371 cb->srcx = (uint16_t) clips->x1; 372 cb->srcy = (uint16_t) clips->y1; 373 cb->srcz = 0; 374 cb->w = (uint16_t) (clips->x2 - clips->x1); 375 cb->h = (uint16_t) (clips->y2 - clips->y1); 376 cb->d = 1; 377#if 0 378 LogMessage(X_INFO, "DMA! x: %u y: %u srcx: %u srcy: %u w: %u h: %u %s\n", 379 cb->x, cb->y, cb->srcx, cb->srcy, cb->w, cb->h, 380 to_surface ? "to" : "from"); 381#endif 382 383 } 384 385 memset(&arg, 0, sizeof(arg)); 386 memset(&rep, 0, sizeof(rep)); 387 388 rep.error = -EFAULT; 389 390 /* Only require a fence if readback and last batch of cliprects. */ 391 arg.fence_rep = ((to_surface && (cur_clips == num_clips)) ? 392 0UL : (unsigned long) &rep); 393 arg.commands = (unsigned long)cmd; 394 arg.command_size = size; 395 arg.throttle_us = 0; 396 arg.version = DRM_VMW_EXECBUF_VERSION; 397 398 ret = drmCommandWrite(ibuf->drm_fd, DRM_VMW_EXECBUF, &arg, sizeof(arg)); 399 if (ret) { 400 LogMessage(X_ERROR, "DMA error %s.\n", strerror(-ret)); 401 } 402 403 free(cmd); 404 num_clips -= cur_clips; 405 406 if (rep.error == 0) { 407 ret = vmwgfx_fence_wait(ibuf->drm_fd, rep.handle, TRUE); 408 if (ret) { 409 LogMessage(X_ERROR, "DMA from host fence wait error %s.\n", 410 strerror(-ret)); 411 /* vmwgfx_fence_wait() takes care of this if ret == 0. */ 412 vmwgfx_fence_unref(ibuf->drm_fd, rep.handle); 413 } 414 } 415 } 416 417 return 0; 418} 419 420int 421vmwgfx_get_param(int drm_fd, uint32_t param, uint64_t *out) 422{ 423 struct drm_vmw_getparam_arg gp_arg; 424 int ret; 425 426 memset(&gp_arg, 0, sizeof(gp_arg)); 427 gp_arg.param = param; 428 ret = drmCommandWriteRead(drm_fd, DRM_VMW_GET_PARAM, 429 &gp_arg, sizeof(gp_arg)); 430 431 if (ret == 0) { 432 *out = gp_arg.value; 433 } 434 435 return ret; 436} 437 438int 439vmwgfx_num_streams(int drm_fd, uint32_t *ntot, uint32_t *nfree) 440{ 441 uint64_t v1, v2; 442 int ret; 443 444 ret = vmwgfx_get_param(drm_fd, DRM_VMW_PARAM_NUM_STREAMS, &v1); 445 if (ret) 446 return ret; 447 448 ret = vmwgfx_get_param(drm_fd, DRM_VMW_PARAM_NUM_FREE_STREAMS, &v2); 449 if (ret) 450 return ret; 451 452 *ntot = (uint32_t)v1; 453 *nfree = (uint32_t)v2; 454 455 return 0; 456} 457 458int 459vmwgfx_claim_stream(int drm_fd, uint32_t *out) 460{ 461 struct drm_vmw_stream_arg s_arg; 462 int ret; 463 464 ret = drmCommandRead(drm_fd, DRM_VMW_CLAIM_STREAM, 465 &s_arg, sizeof(s_arg)); 466 467 if (ret) 468 return -1; 469 470 *out = s_arg.stream_id; 471 return 0; 472} 473 474int 475vmwgfx_unref_stream(int drm_fd, uint32_t stream_id) 476{ 477 struct drm_vmw_stream_arg s_arg; 478 int ret; 479 480 memset(&s_arg, 0, sizeof(s_arg)); 481 s_arg.stream_id = stream_id; 482 483 ret = drmCommandWrite(drm_fd, DRM_VMW_UNREF_STREAM, 484 &s_arg, sizeof(s_arg)); 485 486 return (ret != 0) ? -1 : 0; 487} 488 489int 490vmwgfx_cursor_bypass(int drm_fd, int xhot, int yhot) 491{ 492 struct drm_vmw_cursor_bypass_arg arg; 493 int ret; 494 495 memset(&arg, 0, sizeof(arg)); 496 arg.flags = DRM_VMW_CURSOR_BYPASS_ALL; 497 arg.xhot = xhot; 498 arg.yhot = yhot; 499 500 ret = drmCommandWrite(drm_fd, DRM_VMW_CURSOR_BYPASS, 501 &arg, sizeof(arg)); 502 503 return ret; 504} 505 506int 507vmwgfx_update_gui_layout(int drm_fd, unsigned int num_rects, 508 struct drm_vmw_rect *rects) 509{ 510 struct drm_vmw_update_layout_arg arg; 511 512 memset(&arg, 0, sizeof(arg)); 513 514 arg.num_outputs = num_rects; 515 arg.rects = (unsigned long) rects; 516 517 return drmCommandWrite(drm_fd, DRM_VMW_UPDATE_LAYOUT, &arg, 518 sizeof(arg)); 519} 520 521 522int 523vmwgfx_max_fb_size(int drm_fd, size_t *size) 524{ 525 uint64_t tmp_size; 526 527 if (vmwgfx_get_param(drm_fd, DRM_VMW_PARAM_MAX_FB_SIZE, &tmp_size) != 0) 528 return -1; 529 530 *size = tmp_size; 531 532 return 0; 533} 534 535#ifdef HAVE_LIBDRM_2_4_38 536/** 537 * vmwgfx_prime_fd_to_handle - Return a TTM handle to a prime object 538 * 539 * @drm_fd: File descriptor for the drm connection. 540 * @prime_fd: File descriptor identifying the prime object. 541 * @handle: Pointer to returned TTM handle. 542 * 543 * Takes a reference on the underlying object and returns a TTM handle to it. 544 */ 545int 546vmwgfx_prime_fd_to_handle(int drm_fd, int prime_fd, uint32_t *handle) 547{ 548 *handle = 0; 549 550 return drmPrimeFDToHandle(drm_fd, prime_fd, handle); 551} 552 553/** 554 * vmwgfx_prime_release_handle - Release a reference on a TTM object 555 * 556 * @drm_fd: File descriptor for the drm connection. 557 * @handle: TTM handle as returned by vmwgfx_prime_fd_to_handle. 558 * 559 * Releases the reference obtained by vmwgfx_prime_fd_to_handle(). 560 */ 561void 562vmwgfx_prime_release_handle(int drm_fd, uint32_t handle) 563{ 564 struct drm_vmw_surface_arg s_arg; 565 566 memset(&s_arg, 0, sizeof(s_arg)); 567 s_arg.sid = handle; 568 569 (void) drmCommandWrite(drm_fd, DRM_VMW_UNREF_SURFACE, &s_arg, 570 sizeof(s_arg)); 571} 572#endif /* HAVE_LIBDRM_2_4_38 */ 573