1 /* $NetBSD: qxl_release.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $ */ 2 3 /* 4 * Copyright 2011 Red Hat, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * on the rights to use, copy, modify, merge, publish, distribute, sub 10 * license, and/or sell copies of the Software, and to permit persons to whom 11 * the Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 21 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 22 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 25 #include <sys/cdefs.h> 26 __KERNEL_RCSID(0, "$NetBSD: qxl_release.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $"); 27 28 #include <linux/delay.h> 29 30 #include <trace/events/dma_fence.h> 31 32 #include "qxl_drv.h" 33 #include "qxl_object.h" 34 35 /* 36 * drawable cmd cache - allocate a bunch of VRAM pages, suballocate 37 * into 256 byte chunks for now - gives 16 cmds per page. 38 * 39 * use an ida to index into the chunks? 40 */ 41 /* manage releaseables */ 42 /* stack them 16 high for now -drawable object is 191 */ 43 #define RELEASE_SIZE 256 44 #define RELEASES_PER_BO (4096 / RELEASE_SIZE) 45 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */ 46 #define SURFACE_RELEASE_SIZE 128 47 #define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE) 48 49 static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; 50 static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; 51 52 static const char *qxl_get_driver_name(struct dma_fence *fence) 53 { 54 return "qxl"; 55 } 56 57 static const char *qxl_get_timeline_name(struct dma_fence *fence) 58 { 59 return "release"; 60 } 61 62 static long qxl_fence_wait(struct dma_fence *fence, bool intr, 63 signed long timeout) 64 { 65 struct qxl_device *qdev; 66 struct qxl_release *release; 67 int count = 0, sc = 0; 68 bool have_drawable_releases; 69 unsigned long cur, end = jiffies + timeout; 70 71 qdev = container_of(fence->lock, struct qxl_device, release_lock); 72 release = container_of(fence, struct qxl_release, base); 73 have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE; 74 75 retry: 76 sc++; 77 78 if (dma_fence_is_signaled(fence)) 79 goto signaled; 80 81 qxl_io_notify_oom(qdev); 82 83 for (count = 0; count < 11; count++) { 84 if (!qxl_queue_garbage_collect(qdev, true)) 85 break; 86 87 if (dma_fence_is_signaled(fence)) 88 goto signaled; 89 } 90 91 if (dma_fence_is_signaled(fence)) 92 goto signaled; 93 94 if (have_drawable_releases || sc < 4) { 95 if (sc > 2) 96 /* back off */ 97 usleep_range(500, 1000); 98 99 if (time_after(jiffies, end)) 100 return 0; 101 102 if (have_drawable_releases && sc > 300) { 103 DMA_FENCE_WARN(fence, "failed to wait on release %llu " 104 "after spincount %d\n", 105 fence->context & ~0xf0000000, sc); 106 goto signaled; 107 } 108 goto retry; 109 } 110 /* 111 * yeah, original sync_obj_wait gave up after 3 spins when 112 * have_drawable_releases is not set. 113 */ 114 115 signaled: 116 cur = jiffies; 117 if (time_after(cur, end)) 118 return 0; 119 return end - cur; 120 } 121 122 static const struct dma_fence_ops qxl_fence_ops = { 123 .get_driver_name = qxl_get_driver_name, 124 .get_timeline_name = qxl_get_timeline_name, 125 .wait = qxl_fence_wait, 126 }; 127 128 static int 129 qxl_release_alloc(struct qxl_device *qdev, int type, 130 struct qxl_release **ret) 131 { 132 struct qxl_release *release; 133 int handle; 134 size_t size = sizeof(*release); 135 136 release = kmalloc(size, GFP_KERNEL); 137 if (!release) { 138 DRM_ERROR("Out of memory\n"); 139 return -ENOMEM; 140 } 141 release->base.ops = NULL; 142 release->type = type; 143 release->release_offset = 0; 144 release->surface_release_id = 0; 145 INIT_LIST_HEAD(&release->bos); 146 147 idr_preload(GFP_KERNEL); 148 spin_lock(&qdev->release_idr_lock); 149 handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); 150 release->base.seqno = ++qdev->release_seqno; 151 spin_unlock(&qdev->release_idr_lock); 152 idr_preload_end(); 153 if (handle < 0) { 154 kfree(release); 155 *ret = NULL; 156 return handle; 157 } 158 *ret = release; 159 DRM_DEBUG_DRIVER("allocated release %d\n", handle); 160 release->id = handle; 161 return handle; 162 } 163 164 static void 165 qxl_release_free_list(struct qxl_release *release) 166 { 167 while (!list_empty(&release->bos)) { 168 struct qxl_bo_list *entry; 169 struct qxl_bo *bo; 170 171 entry = container_of(release->bos.next, 172 struct qxl_bo_list, tv.head); 173 bo = to_qxl_bo(entry->tv.bo); 174 qxl_bo_unref(&bo); 175 list_del(&entry->tv.head); 176 kfree(entry); 177 } 178 release->release_bo = NULL; 179 } 180 181 void 182 qxl_release_free(struct qxl_device *qdev, 183 struct qxl_release *release) 184 { 185 DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type); 186 187 if (release->surface_release_id) 188 qxl_surface_id_dealloc(qdev, release->surface_release_id); 189 190 spin_lock(&qdev->release_idr_lock); 191 idr_remove(&qdev->release_idr, release->id); 192 spin_unlock(&qdev->release_idr_lock); 193 194 if (release->base.ops) { 195 WARN_ON(list_empty(&release->bos)); 196 qxl_release_free_list(release); 197 198 dma_fence_signal(&release->base); 199 dma_fence_put(&release->base); 200 } else { 201 qxl_release_free_list(release); 202 kfree(release); 203 } 204 } 205 206 static int qxl_release_bo_alloc(struct qxl_device *qdev, 207 struct qxl_bo **bo) 208 { 209 /* pin releases bo's they are too messy to evict */ 210 return qxl_bo_create(qdev, PAGE_SIZE, false, true, 211 QXL_GEM_DOMAIN_VRAM, NULL, bo); 212 } 213 214 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) 215 { 216 struct qxl_bo_list *entry; 217 218 list_for_each_entry(entry, &release->bos, tv.head) { 219 if (entry->tv.bo == &bo->tbo) 220 return 0; 221 } 222 223 entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); 224 if (!entry) 225 return -ENOMEM; 226 227 qxl_bo_ref(bo); 228 entry->tv.bo = &bo->tbo; 229 entry->tv.num_shared = 0; 230 list_add_tail(&entry->tv.head, &release->bos); 231 return 0; 232 } 233 234 static int qxl_release_validate_bo(struct qxl_bo *bo) 235 { 236 struct ttm_operation_ctx ctx = { true, false }; 237 int ret; 238 239 if (!bo->pin_count) { 240 qxl_ttm_placement_from_domain(bo, bo->type, false); 241 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 242 if (ret) 243 return ret; 244 } 245 246 ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1); 247 if (ret) 248 return ret; 249 250 /* allocate a surface for reserved + validated buffers */ 251 ret = qxl_bo_check_id(bo->tbo.base.dev->dev_private, bo); 252 if (ret) 253 return ret; 254 return 0; 255 } 256 257 int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) 258 { 259 int ret; 260 struct qxl_bo_list *entry; 261 262 /* if only one object on the release its the release itself 263 since these objects are pinned no need to reserve */ 264 if (list_is_singular(&release->bos)) 265 return 0; 266 267 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, 268 !no_intr, NULL); 269 if (ret) 270 return ret; 271 272 list_for_each_entry(entry, &release->bos, tv.head) { 273 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); 274 275 ret = qxl_release_validate_bo(bo); 276 if (ret) { 277 ttm_eu_backoff_reservation(&release->ticket, &release->bos); 278 return ret; 279 } 280 } 281 return 0; 282 } 283 284 void qxl_release_backoff_reserve_list(struct qxl_release *release) 285 { 286 /* if only one object on the release its the release itself 287 since these objects are pinned no need to reserve */ 288 if (list_is_singular(&release->bos)) 289 return; 290 291 ttm_eu_backoff_reservation(&release->ticket, &release->bos); 292 } 293 294 int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, 295 enum qxl_surface_cmd_type surface_cmd_type, 296 struct qxl_release *create_rel, 297 struct qxl_release **release) 298 { 299 if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { 300 int idr_ret; 301 struct qxl_bo *bo; 302 union qxl_release_info *info; 303 304 /* stash the release after the create command */ 305 idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); 306 if (idr_ret < 0) 307 return idr_ret; 308 bo = create_rel->release_bo; 309 310 (*release)->release_bo = bo; 311 (*release)->release_offset = create_rel->release_offset + 64; 312 313 qxl_release_list_add(*release, bo); 314 315 info = qxl_release_map(qdev, *release); 316 info->id = idr_ret; 317 qxl_release_unmap(qdev, *release, info); 318 return 0; 319 } 320 321 return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), 322 QXL_RELEASE_SURFACE_CMD, release, NULL); 323 } 324 325 int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, 326 int type, struct qxl_release **release, 327 struct qxl_bo **rbo) 328 { 329 struct qxl_bo *bo; 330 int idr_ret; 331 int ret = 0; 332 union qxl_release_info *info; 333 int cur_idx; 334 335 if (type == QXL_RELEASE_DRAWABLE) 336 cur_idx = 0; 337 else if (type == QXL_RELEASE_SURFACE_CMD) 338 cur_idx = 1; 339 else if (type == QXL_RELEASE_CURSOR_CMD) 340 cur_idx = 2; 341 else { 342 DRM_ERROR("got illegal type: %d\n", type); 343 return -EINVAL; 344 } 345 346 idr_ret = qxl_release_alloc(qdev, type, release); 347 if (idr_ret < 0) { 348 if (rbo) 349 *rbo = NULL; 350 return idr_ret; 351 } 352 353 mutex_lock(&qdev->release_mutex); 354 if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { 355 qxl_bo_unref(&qdev->current_release_bo[cur_idx]); 356 qdev->current_release_bo_offset[cur_idx] = 0; 357 qdev->current_release_bo[cur_idx] = NULL; 358 } 359 if (!qdev->current_release_bo[cur_idx]) { 360 ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]); 361 if (ret) { 362 mutex_unlock(&qdev->release_mutex); 363 qxl_release_free(qdev, *release); 364 return ret; 365 } 366 } 367 368 bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); 369 370 (*release)->release_bo = bo; 371 (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; 372 qdev->current_release_bo_offset[cur_idx]++; 373 374 if (rbo) 375 *rbo = bo; 376 377 mutex_unlock(&qdev->release_mutex); 378 379 ret = qxl_release_list_add(*release, bo); 380 qxl_bo_unref(&bo); 381 if (ret) { 382 qxl_release_free(qdev, *release); 383 return ret; 384 } 385 386 info = qxl_release_map(qdev, *release); 387 info->id = idr_ret; 388 qxl_release_unmap(qdev, *release, info); 389 390 return ret; 391 } 392 393 struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, 394 uint64_t id) 395 { 396 struct qxl_release *release; 397 398 spin_lock(&qdev->release_idr_lock); 399 release = idr_find(&qdev->release_idr, id); 400 spin_unlock(&qdev->release_idr_lock); 401 if (!release) { 402 DRM_ERROR("failed to find id in release_idr\n"); 403 return NULL; 404 } 405 406 return release; 407 } 408 409 union qxl_release_info *qxl_release_map(struct qxl_device *qdev, 410 struct qxl_release *release) 411 { 412 void *ptr; 413 union qxl_release_info *info; 414 struct qxl_bo *bo = release->release_bo; 415 416 ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK); 417 if (!ptr) 418 return NULL; 419 info = ptr + (release->release_offset & ~PAGE_MASK); 420 return info; 421 } 422 423 void qxl_release_unmap(struct qxl_device *qdev, 424 struct qxl_release *release, 425 union qxl_release_info *info) 426 { 427 struct qxl_bo *bo = release->release_bo; 428 void *ptr; 429 430 ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK); 431 qxl_bo_kunmap_atomic_page(qdev, bo, ptr); 432 } 433 434 void qxl_release_fence_buffer_objects(struct qxl_release *release) 435 { 436 struct ttm_buffer_object *bo; 437 struct ttm_bo_device *bdev; 438 struct ttm_validate_buffer *entry; 439 struct qxl_device *qdev; 440 441 /* if only one object on the release its the release itself 442 since these objects are pinned no need to reserve */ 443 if (list_is_singular(&release->bos) || list_empty(&release->bos)) 444 return; 445 446 bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; 447 bdev = bo->bdev; 448 qdev = container_of(bdev, struct qxl_device, mman.bdev); 449 450 /* 451 * Since we never really allocated a context and we don't want to conflict, 452 * set the highest bits. This will break if we really allow exporting of dma-bufs. 453 */ 454 dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, 455 release->id | 0xf0000000, release->base.seqno); 456 trace_dma_fence_emit(&release->base); 457 458 spin_lock(&ttm_bo_glob.lru_lock); 459 460 list_for_each_entry(entry, &release->bos, head) { 461 bo = entry->bo; 462 463 dma_resv_add_shared_fence(bo->base.resv, &release->base); 464 ttm_bo_move_to_lru_tail(bo, NULL); 465 dma_resv_unlock(bo->base.resv); 466 } 467 spin_unlock(&ttm_bo_glob.lru_lock); 468 ww_acquire_fini(&release->ticket); 469 } 470 471