sna_dri3.c revision fe8aea9e
1/* 2 * Copyright (c) 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 */ 24 25#ifdef HAVE_CONFIG_H 26#include "config.h" 27#endif 28 29#include <sys/types.h> 30#include <fcntl.h> 31#include <unistd.h> 32#include <errno.h> 33#include <xf86drm.h> 34 35#include "sna.h" 36 37#include <xf86.h> 38#include <dri3.h> 39#include <misyncshm.h> 40#include <misyncstr.h> 41 42static DevPrivateKeyRec sna_sync_fence_private_key; 43struct sna_sync_fence { 44 SyncFenceSetTriggeredFunc set_triggered; 45}; 46 47static inline struct sna_sync_fence *sna_sync_fence(SyncFence *fence) 48{ 49 return dixLookupPrivate(&fence->devPrivates, &sna_sync_fence_private_key); 50} 51 52static inline void mark_dri3_pixmap(struct sna *sna, struct sna_pixmap *priv, struct kgem_bo *bo) 53{ 54 bo->flush = true; 55 if (bo->exec) 56 sna->kgem.flush = 1; 57 if (bo == priv->gpu_bo) 58 priv->flush |= FLUSH_READ | FLUSH_WRITE; 59 else 60 priv->shm = true; 61 62 sna_watch_flush(sna, 1); 63 64 kgem_bo_submit(&sna->kgem, bo); 65 kgem_bo_unclean(&sna->kgem, bo); 66} 67 68static void sna_sync_flush(struct sna *sna, struct sna_pixmap *priv) 69{ 70 struct kgem_bo *bo = NULL; 71 72 DBG(("%s(pixmap=%ld)\n", __FUNCTION__, priv->pixmap->drawable.serialNumber)); 73 assert(priv); 74 75 if (priv->pinned & PIN_DRI3) { 76 assert(priv->gpu_bo); 77 assert(priv->pinned & PIN_DRI3); 78 DBG(("%s: flushing prime GPU bo, handle=%ld\n", __FUNCTION__, priv->gpu_bo->handle)); 79 if (sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ | MOVE_WRITE | MOVE_ASYNC_HINT | __MOVE_FORCE)) { 80 sna_damage_all(&priv->gpu_damage, priv->pixmap); 81 bo = priv->gpu_bo; 82 } 83 } else { 84 assert(priv->cpu_bo); 85 assert(IS_STATIC_PTR(priv->ptr)); 86 DBG(("%s: flushing prime CPU bo, handle=%ld\n", __FUNCTION__, priv->cpu_bo->handle)); 87 if (sna_pixmap_move_to_cpu(priv->pixmap, MOVE_READ | MOVE_WRITE | MOVE_ASYNC_HINT)) 88 bo = priv->cpu_bo; 89 } 90 91 if (bo != NULL) { 92 kgem_bo_submit(&sna->kgem, bo); 93 kgem_bo_unclean(&sna->kgem, bo); 94 } 95} 96 97static void 98sna_sync_fence_set_triggered(SyncFence *fence) 99{ 100 struct sna *sna = to_sna_from_screen(fence->pScreen); 101 struct sna_sync_fence *sna_fence = sna_sync_fence(fence); 102 103 DBG(("%s()\n", __FUNCTION__)); 104 sna_accel_flush(sna); 105 106 fence->funcs.SetTriggered = sna_fence->set_triggered; 107 sna_fence->set_triggered(fence); 108 sna_fence->set_triggered = fence->funcs.SetTriggered; 109 fence->funcs.SetTriggered = sna_sync_fence_set_triggered; 110} 111 112static void 113sna_sync_create_fence(ScreenPtr screen, SyncFence *fence, Bool initially_triggered) 114{ 115 struct sna *sna = to_sna_from_screen(screen); 116 SyncScreenFuncsPtr funcs = miSyncGetScreenFuncs(screen); 117 118 DBG(("%s()\n", __FUNCTION__)); 119 120 funcs->CreateFence = sna->dri3.create_fence; 121 sna->dri3.create_fence(screen, fence, initially_triggered); 122 sna->dri3.create_fence = funcs->CreateFence; 123 funcs->CreateFence = sna_sync_create_fence; 124 125 sna_sync_fence(fence)->set_triggered = fence->funcs.SetTriggered; 126 fence->funcs.SetTriggered = sna_sync_fence_set_triggered; 127} 128 129static bool 130sna_sync_open(struct sna *sna, ScreenPtr screen) 131{ 132 SyncScreenFuncsPtr funcs; 133 134 DBG(("%s()\n", __FUNCTION__)); 135 136 if (!miSyncShmScreenInit(screen)) 137 return false; 138 139 if (!dixPrivateKeyRegistered(&sna_sync_fence_private_key)) { 140 if (!dixRegisterPrivateKey(&sna_sync_fence_private_key, 141 PRIVATE_SYNC_FENCE, 142 sizeof(struct sna_sync_fence))) 143 return false; 144 } 145 146 funcs = miSyncGetScreenFuncs(screen); 147 sna->dri3.create_fence = funcs->CreateFence; 148 funcs->CreateFence = sna_sync_create_fence; 149 150 return true; 151} 152 153static int sna_dri3_open_device(ScreenPtr screen, 154 RRProviderPtr provider, 155 int *out) 156{ 157 int fd; 158 159 DBG(("%s()\n", __FUNCTION__)); 160 fd = intel_get_client_fd(to_sna_from_screen(screen)->dev); 161 if (fd < 0) 162 return -fd; 163 164 *out = fd; 165 return Success; 166} 167 168static PixmapPtr sna_dri3_pixmap_from_fd(ScreenPtr screen, 169 int fd, 170 CARD16 width, 171 CARD16 height, 172 CARD16 stride, 173 CARD8 depth, 174 CARD8 bpp) 175{ 176 struct sna *sna = to_sna_from_screen(screen); 177 PixmapPtr pixmap; 178 struct sna_pixmap *priv; 179 struct kgem_bo *bo; 180 181 DBG(("%s(fd=%d, width=%d, height=%d, stride=%d, depth=%d, bpp=%d)\n", 182 __FUNCTION__, fd, width, height, stride, depth, bpp)); 183 if (width > INT16_MAX || height > INT16_MAX) 184 return NULL; 185 186 if ((uint32_t)width * bpp > (uint32_t)stride * 8) 187 return NULL; 188 189 if (depth < 8) 190 return NULL; 191 192 switch (bpp) { 193 case 8: 194 case 16: 195 case 32: 196 break; 197 default: 198 return NULL; 199 } 200 201 bo = kgem_create_for_prime(&sna->kgem, fd, (uint32_t)stride * height); 202 if (bo == NULL) 203 return NULL; 204 205 /* Check for a duplicate */ 206 list_for_each_entry(priv, &sna->dri3.pixmaps, cow_list) { 207 int other_stride = 0; 208 if (bo->snoop) { 209 assert(priv->cpu_bo); 210 assert(IS_STATIC_PTR(priv->ptr)); 211 if (bo->handle == priv->cpu_bo->handle) 212 other_stride = priv->cpu_bo->pitch; 213 } else { 214 assert(priv->gpu_bo); 215 assert(priv->pinned & PIN_DRI3); 216 if (bo->handle == priv->gpu_bo->handle) 217 other_stride = priv->gpu_bo->pitch; 218 } 219 if (other_stride) { 220 pixmap = priv->pixmap; 221 DBG(("%s: imported fd matches existing DRI3 pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber)); 222 bo->handle = 0; /* fudge to prevent gem_close */ 223 kgem_bo_destroy(&sna->kgem, bo); 224 if (width != pixmap->drawable.width || 225 height != pixmap->drawable.height || 226 depth != pixmap->drawable.depth || 227 bpp != pixmap->drawable.bitsPerPixel || 228 stride != other_stride) { 229 DBG(("%s: imported fd mismatches existing DRI3 pixmap (width=%d, height=%d, depth=%d, bpp=%d, stride=%d)\n", __FUNCTION__, 230 pixmap->drawable.width, 231 pixmap->drawable.height, 232 pixmap->drawable.depth, 233 pixmap->drawable.bitsPerPixel, 234 other_stride)); 235 return NULL; 236 } 237 sna_sync_flush(sna, priv); 238 pixmap->refcnt++; 239 return pixmap; 240 } 241 } 242 243 if (!kgem_check_surface_size(&sna->kgem, 244 width, height, bpp, 245 bo->tiling, stride, kgem_bo_size(bo))) { 246 DBG(("%s: client supplied pitch=%d, size=%d too small for %dx%d surface\n", 247 __FUNCTION__, stride, kgem_bo_size(bo), width, height)); 248 goto free_bo; 249 } 250 251 pixmap = sna_pixmap_create_unattached(screen, 0, 0, depth); 252 if (pixmap == NullPixmap) 253 goto free_bo; 254 255 if (!screen->ModifyPixmapHeader(pixmap, width, height, 256 depth, bpp, stride, NULL)) 257 goto free_pixmap; 258 259 priv = sna_pixmap_attach_to_bo(pixmap, bo); 260 if (priv == NULL) 261 goto free_pixmap; 262 263 bo->pitch = stride; 264 priv->stride = stride; 265 266 if (bo->snoop) { 267 assert(priv->cpu_bo == bo); 268 pixmap->devPrivate.ptr = kgem_bo_map__cpu(&sna->kgem, priv->cpu_bo); 269 if (pixmap->devPrivate.ptr == NULL) 270 goto free_pixmap; 271 272 pixmap->devKind = stride; 273 priv->ptr = MAKE_STATIC_PTR(pixmap->devPrivate.ptr); 274 } else { 275 assert(priv->gpu_bo == bo); 276 priv->create = kgem_can_create_2d(&sna->kgem, 277 width, height, depth); 278 priv->pinned |= PIN_DRI3; 279 } 280 list_add(&priv->cow_list, &sna->dri3.pixmaps); 281 282 mark_dri3_pixmap(sna, priv, bo); 283 284 return pixmap; 285 286free_pixmap: 287 screen->DestroyPixmap(pixmap); 288free_bo: 289 kgem_bo_destroy(&sna->kgem, bo); 290 return NULL; 291} 292 293static int sna_dri3_fd_from_pixmap(ScreenPtr screen, 294 PixmapPtr pixmap, 295 CARD16 *stride, 296 CARD32 *size) 297{ 298 struct sna *sna = to_sna_from_screen(screen); 299 struct sna_pixmap *priv; 300 struct kgem_bo *bo = NULL; 301 int fd; 302 303 DBG(("%s(pixmap=%ld, width=%d, height=%d)\n", __FUNCTION__, 304 pixmap->drawable.serialNumber, pixmap->drawable.width, pixmap->drawable.height)); 305 if (pixmap == sna->front && sna->flags & SNA_TEAR_FREE) { 306 DBG(("%s: DRI3 protocol cannot support TearFree frontbuffers\n", __FUNCTION__)); 307 return -1; 308 } 309 310 priv = sna_pixmap(pixmap); 311 if (priv && IS_STATIC_PTR(priv->ptr) && priv->cpu_bo) { 312 if (sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE | MOVE_ASYNC_HINT)) 313 bo = priv->cpu_bo; 314 } else { 315 priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ | MOVE_WRITE | MOVE_ASYNC_HINT | __MOVE_FORCE | __MOVE_DRI); 316 if (priv != NULL) { 317 sna_damage_all(&priv->gpu_damage, pixmap); 318 bo = priv->gpu_bo; 319 } 320 } 321 if (bo == NULL) { 322 DBG(("%s: pixmap not supported by GPU\n", __FUNCTION__)); 323 return -1; 324 } 325 assert(priv != NULL); 326 327 if (bo->pitch > UINT16_MAX) { 328 DBG(("%s: pixmap pitch (%d) too large for DRI3 protocol\n", 329 __FUNCTION__, bo->pitch)); 330 return -1; 331 } 332 333 if (bo->tiling && !sna->kgem.can_fence) { 334 if (!sna_pixmap_change_tiling(pixmap, I915_TILING_NONE)) { 335 DBG(("%s: unable to discard GPU tiling (%d) for DRI3 protocol\n", 336 __FUNCTION__, bo->tiling)); 337 return -1; 338 } 339 bo = priv->gpu_bo; 340 } 341 342 fd = kgem_bo_export_to_prime(&sna->kgem, bo); 343 if (fd == -1) { 344 DBG(("%s: exporting handle=%d to fd failed\n", __FUNCTION__, bo->handle)); 345 return -1; 346 } 347 348 if (bo == priv->gpu_bo) 349 priv->pinned |= PIN_DRI3; 350 list_move(&priv->cow_list, &sna->dri3.pixmaps); 351 352 mark_dri3_pixmap(sna, priv, bo); 353 354 *stride = (priv->pinned & PIN_DRI3) ? priv->gpu_bo->pitch : priv->cpu_bo->pitch; 355 *size = kgem_bo_size((priv->pinned & PIN_DRI3) ? priv->gpu_bo : priv->cpu_bo); 356 DBG(("%s: exporting %s pixmap=%ld, handle=%d, stride=%d, size=%d\n", 357 __FUNCTION__, 358 (priv->pinned & PIN_DRI3) ? "GPU" : "CPU", pixmap->drawable.serialNumber, 359 (priv->pinned & PIN_DRI3) ? priv->gpu_bo->handle : priv->cpu_bo->handle, 360 *stride, *size)); 361 return fd; 362} 363 364static dri3_screen_info_rec sna_dri3_info = { 365 .version = DRI3_SCREEN_INFO_VERSION, 366 367 .open = sna_dri3_open_device, 368 .pixmap_from_fd = sna_dri3_pixmap_from_fd, 369 .fd_from_pixmap = sna_dri3_fd_from_pixmap, 370}; 371 372bool sna_dri3_open(struct sna *sna, ScreenPtr screen) 373{ 374 DBG(("%s()\n", __FUNCTION__)); 375 376 if (!sna_sync_open(sna, screen)) 377 return false; 378 379 list_init(&sna->dri3.pixmaps); 380 return dri3_screen_init(screen, &sna_dri3_info); 381} 382 383void sna_dri3_close(struct sna *sna, ScreenPtr screen) 384{ 385 SyncScreenFuncsPtr funcs; 386 387 DBG(("%s()\n", __FUNCTION__)); 388 389 funcs = miSyncGetScreenFuncs(screen); 390 if (funcs) 391 funcs->CreateFence = sna->dri3.create_fence; 392} 393