1/* 2 * Copyright (c) 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 */ 24 25#ifdef HAVE_CONFIG_H 26#include "config.h" 27#endif 28 29#include <sys/types.h> 30#include <fcntl.h> 31#include <unistd.h> 32#include <errno.h> 33#include <xf86drm.h> 34 35#include "sna.h" 36 37#include <xf86.h> 38#include <dri3.h> 39#include <misyncshm.h> 40#include <misyncstr.h> 41 42static DevPrivateKeyRec sna_sync_fence_private_key; 43struct sna_sync_fence { 44 SyncFenceSetTriggeredFunc set_triggered; 45}; 46 47static inline struct sna_sync_fence *sna_sync_fence(SyncFence *fence) 48{ 49 return dixLookupPrivate(&fence->devPrivates, &sna_sync_fence_private_key); 50} 51 52static inline void mark_dri3_pixmap(struct sna *sna, struct sna_pixmap *priv, struct kgem_bo *bo) 53{ 54 bo->flush = true; 55 if (bo->exec) 56 sna->kgem.flush = 1; 57 if (bo == priv->gpu_bo) 58 priv->flush |= 3; 59 else 60 priv->shm = true; 61 62 sna_accel_watch_flush(sna, 1); 63} 64 65static void sna_sync_flush(struct sna *sna, struct sna_pixmap *priv) 66{ 67 struct kgem_bo *bo = NULL; 68 69 DBG(("%s(pixmap=%ld)\n", __FUNCTION__, priv->pixmap->drawable.serialNumber)); 70 assert(priv); 71 72 if (priv->pinned & PIN_DRI3) { 73 assert(priv->gpu_bo); 74 assert(priv->pinned & PIN_DRI3); 75 DBG(("%s: flushing prime GPU bo, handle=%ld\n", __FUNCTION__, priv->gpu_bo->handle)); 76 if (sna_pixmap_move_to_gpu(priv->pixmap, MOVE_READ | MOVE_WRITE | MOVE_ASYNC_HINT | __MOVE_FORCE)) { 77 sna_damage_all(&priv->gpu_damage, priv->pixmap); 78 bo = priv->gpu_bo; 79 } 80 } else { 81 assert(priv->cpu_bo); 82 assert(IS_STATIC_PTR(priv->ptr)); 83 DBG(("%s: flushing prime CPU bo, handle=%ld\n", __FUNCTION__, priv->cpu_bo->handle)); 84 if (sna_pixmap_move_to_cpu(priv->pixmap, MOVE_READ | MOVE_WRITE | MOVE_ASYNC_HINT)) 85 bo = priv->cpu_bo; 86 } 87 88 if (bo != NULL) { 89 kgem_bo_submit(&sna->kgem, bo); 90 kgem_bo_unclean(&sna->kgem, bo); 91 } 92} 93 94static void 95sna_sync_fence_set_triggered(SyncFence *fence) 96{ 97 struct sna *sna = to_sna_from_screen(fence->pScreen); 98 struct sna_sync_fence *sna_fence = sna_sync_fence(fence); 99 100 DBG(("%s()\n", __FUNCTION__)); 101 sna_accel_flush(sna); 102 103 fence->funcs.SetTriggered = sna_fence->set_triggered; 104 sna_fence->set_triggered(fence); 105 sna_fence->set_triggered = fence->funcs.SetTriggered; 106 fence->funcs.SetTriggered = sna_sync_fence_set_triggered; 107} 108 109static void 110sna_sync_create_fence(ScreenPtr screen, SyncFence *fence, Bool initially_triggered) 111{ 112 struct sna *sna = to_sna_from_screen(screen); 113 SyncScreenFuncsPtr funcs = miSyncGetScreenFuncs(screen); 114 115 DBG(("%s()\n", __FUNCTION__)); 116 117 funcs->CreateFence = sna->dri3.create_fence; 118 sna->dri3.create_fence(screen, fence, initially_triggered); 119 sna->dri3.create_fence = funcs->CreateFence; 120 funcs->CreateFence = sna_sync_create_fence; 121 122 sna_sync_fence(fence)->set_triggered = fence->funcs.SetTriggered; 123 fence->funcs.SetTriggered = sna_sync_fence_set_triggered; 124} 125 126static bool 127sna_sync_open(struct sna *sna, ScreenPtr screen) 128{ 129 SyncScreenFuncsPtr funcs; 130 131 DBG(("%s()\n", __FUNCTION__)); 132 133 if (!miSyncShmScreenInit(screen)) 134 return false; 135 136 if (!dixPrivateKeyRegistered(&sna_sync_fence_private_key)) { 137 if (!dixRegisterPrivateKey(&sna_sync_fence_private_key, 138 PRIVATE_SYNC_FENCE, 139 sizeof(struct sna_sync_fence))) 140 return false; 141 } 142 143 funcs = miSyncGetScreenFuncs(screen); 144 sna->dri3.create_fence = funcs->CreateFence; 145 funcs->CreateFence = sna_sync_create_fence; 146 147 return true; 148} 149 150static int sna_dri3_open_device(ScreenPtr screen, 151 RRProviderPtr provider, 152 int *out) 153{ 154 int fd; 155 156 DBG(("%s()\n", __FUNCTION__)); 157 fd = intel_get_client_fd(to_sna_from_screen(screen)->dev); 158 if (fd < 0) 159 return -fd; 160 161 *out = fd; 162 return Success; 163} 164 165static PixmapPtr sna_dri3_pixmap_from_fd(ScreenPtr screen, 166 int fd, 167 CARD16 width, 168 CARD16 height, 169 CARD16 stride, 170 CARD8 depth, 171 CARD8 bpp) 172{ 173 struct sna *sna = to_sna_from_screen(screen); 174 PixmapPtr pixmap; 175 struct sna_pixmap *priv; 176 struct kgem_bo *bo; 177 178 DBG(("%s(fd=%d, width=%d, height=%d, stride=%d, depth=%d, bpp=%d)\n", 179 __FUNCTION__, fd, width, height, stride, depth, bpp)); 180 if (width > INT16_MAX || height > INT16_MAX) 181 return NULL; 182 183 if ((uint32_t)width * bpp > (uint32_t)stride * 8) 184 return NULL; 185 186 if (depth < 8) 187 return NULL; 188 189 switch (bpp) { 190 case 8: 191 case 16: 192 case 32: 193 break; 194 default: 195 return NULL; 196 } 197 198 bo = kgem_create_for_prime(&sna->kgem, fd, (uint32_t)stride * height); 199 if (bo == NULL) 200 return NULL; 201 202 /* Check for a duplicate */ 203 list_for_each_entry(priv, &sna->dri3.pixmaps, cow_list) { 204 int other_stride = 0; 205 if (bo->snoop) { 206 assert(priv->cpu_bo); 207 assert(IS_STATIC_PTR(priv->ptr)); 208 if (bo->handle == priv->cpu_bo->handle) 209 other_stride = priv->cpu_bo->pitch; 210 } else { 211 assert(priv->gpu_bo); 212 assert(priv->pinned & PIN_DRI3); 213 if (bo->handle == priv->gpu_bo->handle) 214 other_stride = priv->gpu_bo->pitch; 215 } 216 if (other_stride) { 217 pixmap = priv->pixmap; 218 DBG(("%s: imported fd matches existing DRI3 pixmap=%ld\n", __FUNCTION__, pixmap->drawable.serialNumber)); 219 bo->handle = 0; /* fudge to prevent gem_close */ 220 kgem_bo_destroy(&sna->kgem, bo); 221 if (width != pixmap->drawable.width || 222 height != pixmap->drawable.height || 223 depth != pixmap->drawable.depth || 224 bpp != pixmap->drawable.bitsPerPixel || 225 stride != other_stride) { 226 DBG(("%s: imported fd mismatches existing DRI3 pixmap (width=%d, height=%d, depth=%d, bpp=%d, stride=%d)\n", __FUNCTION__, 227 pixmap->drawable.width, 228 pixmap->drawable.height, 229 pixmap->drawable.depth, 230 pixmap->drawable.bitsPerPixel, 231 other_stride)); 232 return NULL; 233 } 234 sna_sync_flush(sna, priv); 235 pixmap->refcnt++; 236 return pixmap; 237 } 238 } 239 240 if (!kgem_check_surface_size(&sna->kgem, 241 width, height, bpp, 242 bo->tiling, stride, kgem_bo_size(bo))) { 243 DBG(("%s: client supplied pitch=%d, size=%d too small for %dx%d surface\n", 244 __FUNCTION__, stride, kgem_bo_size(bo), width, height)); 245 goto free_bo; 246 } 247 248 pixmap = sna_pixmap_create_unattached(screen, 0, 0, depth); 249 if (pixmap == NullPixmap) 250 goto free_bo; 251 252 if (!screen->ModifyPixmapHeader(pixmap, width, height, 253 depth, bpp, stride, NULL)) 254 goto free_pixmap; 255 256 priv = sna_pixmap_attach_to_bo(pixmap, bo); 257 if (priv == NULL) 258 goto free_pixmap; 259 260 bo->pitch = stride; 261 priv->stride = stride; 262 263 if (bo->snoop) { 264 assert(priv->cpu_bo == bo); 265 pixmap->devPrivate.ptr = kgem_bo_map__cpu(&sna->kgem, priv->cpu_bo); 266 if (pixmap->devPrivate.ptr == NULL) 267 goto free_pixmap; 268 269 pixmap->devKind = stride; 270 priv->ptr = MAKE_STATIC_PTR(pixmap->devPrivate.ptr); 271 } else { 272 assert(priv->gpu_bo == bo); 273 priv->pinned |= PIN_DRI3; 274 } 275 list_add(&priv->cow_list, &sna->dri3.pixmaps); 276 277 mark_dri3_pixmap(sna, priv, bo); 278 279 return pixmap; 280 281free_pixmap: 282 screen->DestroyPixmap(pixmap); 283free_bo: 284 kgem_bo_destroy(&sna->kgem, bo); 285 return NULL; 286} 287 288static int sna_dri3_fd_from_pixmap(ScreenPtr screen, 289 PixmapPtr pixmap, 290 CARD16 *stride, 291 CARD32 *size) 292{ 293 struct sna *sna = to_sna_from_screen(screen); 294 struct sna_pixmap *priv; 295 struct kgem_bo *bo = NULL; 296 int fd; 297 298 DBG(("%s(pixmap=%ld, width=%d, height=%d)\n", __FUNCTION__, 299 pixmap->drawable.serialNumber, pixmap->drawable.width, pixmap->drawable.height)); 300 if (pixmap == sna->front && sna->flags & SNA_TEAR_FREE) { 301 DBG(("%s: DRI3 protocol cannot support TearFree frontbuffers\n", __FUNCTION__)); 302 return -1; 303 } 304 305 priv = sna_pixmap(pixmap); 306 if (priv && IS_STATIC_PTR(priv->ptr) && priv->cpu_bo) { 307 if (sna_pixmap_move_to_cpu(pixmap, MOVE_READ | MOVE_WRITE | MOVE_ASYNC_HINT)) 308 bo = priv->cpu_bo; 309 } else { 310 priv = sna_pixmap_move_to_gpu(pixmap, MOVE_READ | MOVE_WRITE | MOVE_ASYNC_HINT | __MOVE_FORCE | __MOVE_DRI); 311 if (priv != NULL) { 312 sna_damage_all(&priv->gpu_damage, pixmap); 313 bo = priv->gpu_bo; 314 } 315 } 316 if (bo == NULL) { 317 DBG(("%s: pixmap not supported by GPU\n", __FUNCTION__)); 318 return -1; 319 } 320 assert(priv != NULL); 321 322 if (bo->pitch > UINT16_MAX) { 323 DBG(("%s: pixmap pitch (%d) too large for DRI3 protocol\n", 324 __FUNCTION__, bo->pitch)); 325 return -1; 326 } 327 328 fd = kgem_bo_export_to_prime(&sna->kgem, bo); 329 if (fd == -1) { 330 DBG(("%s: exporting handle=%d to fd failed\n", __FUNCTION__, bo->handle)); 331 return -1; 332 } 333 334 if (bo == priv->gpu_bo) 335 priv->pinned |= PIN_DRI3; 336 list_move(&priv->cow_list, &sna->dri3.pixmaps); 337 338 mark_dri3_pixmap(sna, priv, bo); 339 340 *stride = (priv->pinned & PIN_DRI3) ? priv->gpu_bo->pitch : priv->cpu_bo->pitch; 341 *size = kgem_bo_size((priv->pinned & PIN_DRI3) ? priv->gpu_bo : priv->cpu_bo); 342 DBG(("%s: exporting %s pixmap=%ld, handle=%d, stride=%d, size=%d\n", 343 __FUNCTION__, 344 (priv->pinned & PIN_DRI3) ? "GPU" : "CPU", pixmap->drawable.serialNumber, 345 (priv->pinned & PIN_DRI3) ? priv->gpu_bo->handle : priv->cpu_bo->handle, 346 *stride, *size)); 347 return fd; 348} 349 350static dri3_screen_info_rec sna_dri3_info = { 351 .version = DRI3_SCREEN_INFO_VERSION, 352 353 .open = sna_dri3_open_device, 354 .pixmap_from_fd = sna_dri3_pixmap_from_fd, 355 .fd_from_pixmap = sna_dri3_fd_from_pixmap, 356}; 357 358bool sna_dri3_open(struct sna *sna, ScreenPtr screen) 359{ 360 DBG(("%s()\n", __FUNCTION__)); 361 362 if (!sna_sync_open(sna, screen)) 363 return false; 364 365 list_init(&sna->dri3.pixmaps); 366 return dri3_screen_init(screen, &sna_dri3_info); 367} 368 369void sna_dri3_close(struct sna *sna, ScreenPtr screen) 370{ 371 SyncScreenFuncsPtr funcs; 372 373 DBG(("%s()\n", __FUNCTION__)); 374 375 funcs = miSyncGetScreenFuncs(screen); 376 if (funcs) 377 funcs->CreateFence = sna->dri3.create_fence; 378} 379