1 1.14 riastrad /* $NetBSD: nouveau_gem.c,v 1.14 2022/05/31 00:17:10 riastradh Exp $ */ 2 1.2 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright (C) 2008 Ben Skeggs. 5 1.1 riastrad * All Rights Reserved. 6 1.1 riastrad * 7 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining 8 1.1 riastrad * a copy of this software and associated documentation files (the 9 1.1 riastrad * "Software"), to deal in the Software without restriction, including 10 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 11 1.1 riastrad * distribute, sublicense, and/or sell copies of the Software, and to 12 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 13 1.1 riastrad * the following conditions: 14 1.1 riastrad * 15 1.1 riastrad * The above copyright notice and this permission notice (including the 16 1.1 riastrad * next paragraph) shall be included in all copies or substantial 17 1.1 riastrad * portions of the Software. 18 1.1 riastrad * 19 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 20 1.1 riastrad * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 1.1 riastrad * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 22 1.1 riastrad * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 23 1.1 riastrad * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 24 1.1 riastrad * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 25 1.1 riastrad * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 1.1 riastrad * 27 1.1 riastrad */ 28 1.1 riastrad 29 1.2 riastrad #include <sys/cdefs.h> 30 1.14 riastrad __KERNEL_RCSID(0, "$NetBSD: nouveau_gem.c,v 1.14 2022/05/31 00:17:10 riastradh Exp $"); 31 1.13 riastrad 32 1.13 riastrad #include <asm/uaccess.h> 33 1.3 riastrad 34 1.12 riastrad #include "nouveau_drv.h" 35 1.1 riastrad #include "nouveau_dma.h" 36 1.1 riastrad #include "nouveau_fence.h" 37 1.1 riastrad #include "nouveau_abi16.h" 38 1.1 riastrad 39 1.1 riastrad #include "nouveau_ttm.h" 40 1.1 riastrad #include "nouveau_gem.h" 41 1.12 riastrad #include "nouveau_mem.h" 42 1.12 riastrad #include "nouveau_vmm.h" 43 1.12 riastrad 44 1.12 riastrad #include <nvif/class.h> 45 1.1 riastrad 46 1.10 riastrad #include <linux/nbsd-namespace.h> 47 1.10 riastrad 48 1.1 riastrad void 49 1.1 riastrad nouveau_gem_object_del(struct drm_gem_object *gem) 50 1.1 riastrad { 51 1.1 riastrad struct nouveau_bo *nvbo = nouveau_gem_object(gem); 52 1.5 riastrad struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 53 1.5 riastrad struct device *dev = drm->dev->dev; 54 1.5 riastrad int ret; 55 1.5 riastrad 56 1.5 riastrad ret = pm_runtime_get_sync(dev); 57 1.5 riastrad if (WARN_ON(ret < 0 && ret != -EACCES)) 58 1.5 riastrad return; 59 1.1 riastrad 60 1.1 riastrad if (gem->import_attach) 61 1.1 riastrad drm_prime_gem_destroy(gem, nvbo->bo.sg); 62 1.1 riastrad 63 1.12 riastrad ttm_bo_put(&nvbo->bo); 64 1.5 riastrad 65 1.5 riastrad pm_runtime_mark_last_busy(dev); 66 1.5 riastrad pm_runtime_put_autosuspend(dev); 67 1.1 riastrad } 68 1.1 riastrad 69 1.1 riastrad int 70 1.1 riastrad nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) 71 1.1 riastrad { 72 1.1 riastrad struct nouveau_cli *cli = nouveau_cli(file_priv); 73 1.1 riastrad struct nouveau_bo *nvbo = nouveau_gem_object(gem); 74 1.5 riastrad struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 75 1.5 riastrad struct device *dev = drm->dev->dev; 76 1.12 riastrad struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm; 77 1.12 riastrad struct nouveau_vma *vma; 78 1.1 riastrad int ret; 79 1.1 riastrad 80 1.12 riastrad if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) 81 1.1 riastrad return 0; 82 1.1 riastrad 83 1.12 riastrad ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 84 1.1 riastrad if (ret) 85 1.1 riastrad return ret; 86 1.1 riastrad 87 1.12 riastrad ret = pm_runtime_get_sync(dev); 88 1.12 riastrad if (ret < 0 && ret != -EACCES) 89 1.12 riastrad goto out; 90 1.1 riastrad 91 1.12 riastrad ret = nouveau_vma_new(nvbo, vmm, &vma); 92 1.12 riastrad pm_runtime_mark_last_busy(dev); 93 1.12 riastrad pm_runtime_put_autosuspend(dev); 94 1.1 riastrad out: 95 1.1 riastrad ttm_bo_unreserve(&nvbo->bo); 96 1.1 riastrad return ret; 97 1.1 riastrad } 98 1.1 riastrad 99 1.12 riastrad struct nouveau_gem_object_unmap { 100 1.12 riastrad struct nouveau_cli_work work; 101 1.12 riastrad struct nouveau_vma *vma; 102 1.12 riastrad }; 103 1.12 riastrad 104 1.12 riastrad static void 105 1.12 riastrad nouveau_gem_object_delete(struct nouveau_vma *vma) 106 1.12 riastrad { 107 1.12 riastrad nouveau_fence_unref(&vma->fence); 108 1.12 riastrad nouveau_vma_del(&vma); 109 1.12 riastrad } 110 1.12 riastrad 111 1.1 riastrad static void 112 1.12 riastrad nouveau_gem_object_delete_work(struct nouveau_cli_work *w) 113 1.1 riastrad { 114 1.12 riastrad struct nouveau_gem_object_unmap *work = 115 1.12 riastrad container_of(w, typeof(*work), work); 116 1.12 riastrad nouveau_gem_object_delete(work->vma); 117 1.12 riastrad kfree(work); 118 1.1 riastrad } 119 1.1 riastrad 120 1.1 riastrad static void 121 1.12 riastrad nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) 122 1.1 riastrad { 123 1.12 riastrad struct dma_fence *fence = vma->fence ? &vma->fence->base : NULL; 124 1.12 riastrad struct nouveau_gem_object_unmap *work; 125 1.12 riastrad 126 1.12 riastrad list_del_init(&vma->head); 127 1.12 riastrad 128 1.12 riastrad if (!fence) { 129 1.12 riastrad nouveau_gem_object_delete(vma); 130 1.12 riastrad return; 131 1.12 riastrad } 132 1.1 riastrad 133 1.12 riastrad if (!(work = kmalloc(sizeof(*work), GFP_KERNEL))) { 134 1.12 riastrad WARN_ON(dma_fence_wait_timeout(fence, false, 2 * HZ) <= 0); 135 1.12 riastrad nouveau_gem_object_delete(vma); 136 1.12 riastrad return; 137 1.1 riastrad } 138 1.12 riastrad 139 1.12 riastrad work->work.func = nouveau_gem_object_delete_work; 140 1.12 riastrad work->vma = vma; 141 1.12 riastrad nouveau_cli_work_queue(vma->vmm->cli, fence, &work->work); 142 1.1 riastrad } 143 1.1 riastrad 144 1.1 riastrad void 145 1.1 riastrad nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) 146 1.1 riastrad { 147 1.1 riastrad struct nouveau_cli *cli = nouveau_cli(file_priv); 148 1.1 riastrad struct nouveau_bo *nvbo = nouveau_gem_object(gem); 149 1.5 riastrad struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 150 1.5 riastrad struct device *dev = drm->dev->dev; 151 1.12 riastrad struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : & cli->vmm; 152 1.12 riastrad struct nouveau_vma *vma; 153 1.1 riastrad int ret; 154 1.1 riastrad 155 1.12 riastrad if (vmm->vmm.object.oclass < NVIF_CLASS_VMM_NV50) 156 1.1 riastrad return; 157 1.1 riastrad 158 1.12 riastrad ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL); 159 1.1 riastrad if (ret) 160 1.1 riastrad return; 161 1.1 riastrad 162 1.12 riastrad vma = nouveau_vma_find(nvbo, vmm); 163 1.1 riastrad if (vma) { 164 1.12 riastrad if (--vma->refs == 0) { 165 1.5 riastrad ret = pm_runtime_get_sync(dev); 166 1.5 riastrad if (!WARN_ON(ret < 0 && ret != -EACCES)) { 167 1.5 riastrad nouveau_gem_object_unmap(nvbo, vma); 168 1.5 riastrad pm_runtime_mark_last_busy(dev); 169 1.5 riastrad pm_runtime_put_autosuspend(dev); 170 1.5 riastrad } 171 1.5 riastrad } 172 1.1 riastrad } 173 1.1 riastrad ttm_bo_unreserve(&nvbo->bo); 174 1.1 riastrad } 175 1.1 riastrad 176 1.1 riastrad int 177 1.12 riastrad nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain, 178 1.1 riastrad uint32_t tile_mode, uint32_t tile_flags, 179 1.1 riastrad struct nouveau_bo **pnvbo) 180 1.1 riastrad { 181 1.12 riastrad struct nouveau_drm *drm = cli->drm; 182 1.1 riastrad struct nouveau_bo *nvbo; 183 1.1 riastrad u32 flags = 0; 184 1.1 riastrad int ret; 185 1.1 riastrad 186 1.1 riastrad if (domain & NOUVEAU_GEM_DOMAIN_VRAM) 187 1.1 riastrad flags |= TTM_PL_FLAG_VRAM; 188 1.1 riastrad if (domain & NOUVEAU_GEM_DOMAIN_GART) 189 1.1 riastrad flags |= TTM_PL_FLAG_TT; 190 1.1 riastrad if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) 191 1.1 riastrad flags |= TTM_PL_FLAG_SYSTEM; 192 1.1 riastrad 193 1.5 riastrad if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) 194 1.5 riastrad flags |= TTM_PL_FLAG_UNCACHED; 195 1.5 riastrad 196 1.12 riastrad nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode, 197 1.12 riastrad tile_flags); 198 1.12 riastrad if (IS_ERR(nvbo)) 199 1.12 riastrad return PTR_ERR(nvbo); 200 1.12 riastrad 201 1.12 riastrad /* Initialize the embedded gem-object. We return a single gem-reference 202 1.12 riastrad * to the caller, instead of a normal nouveau_bo ttm reference. */ 203 1.12 riastrad ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size); 204 1.12 riastrad if (ret) { 205 1.14 riastrad kfree(nvbo); 206 1.12 riastrad return ret; 207 1.12 riastrad } 208 1.12 riastrad 209 1.12 riastrad ret = nouveau_bo_init(nvbo, size, align, flags, NULL, NULL); 210 1.12 riastrad if (ret) { 211 1.14 riastrad /* XXX note: if this fails it kfrees nvbo */ 212 1.1 riastrad return ret; 213 1.12 riastrad } 214 1.1 riastrad 215 1.1 riastrad /* we restrict allowed domains on nv50+ to only the types 216 1.1 riastrad * that were requested at creation time. not possibly on 217 1.1 riastrad * earlier chips without busting the ABI. 218 1.1 riastrad */ 219 1.1 riastrad nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | 220 1.1 riastrad NOUVEAU_GEM_DOMAIN_GART; 221 1.12 riastrad if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) 222 1.1 riastrad nvbo->valid_domains &= domain; 223 1.1 riastrad 224 1.2 riastrad #ifndef __NetBSD__ /* XXX Let TTM swap; skip GEM like radeon. */ 225 1.12 riastrad nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp; 226 1.2 riastrad #endif 227 1.12 riastrad *pnvbo = nvbo; 228 1.1 riastrad return 0; 229 1.1 riastrad } 230 1.1 riastrad 231 1.1 riastrad static int 232 1.1 riastrad nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, 233 1.1 riastrad struct drm_nouveau_gem_info *rep) 234 1.1 riastrad { 235 1.1 riastrad struct nouveau_cli *cli = nouveau_cli(file_priv); 236 1.1 riastrad struct nouveau_bo *nvbo = nouveau_gem_object(gem); 237 1.12 riastrad struct nouveau_vmm *vmm = cli->svm.cli ? &cli->svm : &cli->vmm; 238 1.12 riastrad struct nouveau_vma *vma; 239 1.1 riastrad 240 1.5 riastrad if (is_power_of_2(nvbo->valid_domains)) 241 1.5 riastrad rep->domain = nvbo->valid_domains; 242 1.5 riastrad else if (nvbo->bo.mem.mem_type == TTM_PL_TT) 243 1.1 riastrad rep->domain = NOUVEAU_GEM_DOMAIN_GART; 244 1.1 riastrad else 245 1.1 riastrad rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; 246 1.1 riastrad rep->offset = nvbo->bo.offset; 247 1.12 riastrad if (vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { 248 1.12 riastrad vma = nouveau_vma_find(nvbo, vmm); 249 1.1 riastrad if (!vma) 250 1.1 riastrad return -EINVAL; 251 1.1 riastrad 252 1.12 riastrad rep->offset = vma->addr; 253 1.1 riastrad } 254 1.1 riastrad 255 1.1 riastrad rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; 256 1.12 riastrad rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node); 257 1.12 riastrad rep->tile_mode = nvbo->mode; 258 1.12 riastrad rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG; 259 1.12 riastrad if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) 260 1.12 riastrad rep->tile_flags |= nvbo->kind << 8; 261 1.12 riastrad else 262 1.12 riastrad if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) 263 1.12 riastrad rep->tile_flags |= nvbo->kind << 8 | nvbo->comp << 16; 264 1.12 riastrad else 265 1.12 riastrad rep->tile_flags |= nvbo->zeta; 266 1.1 riastrad return 0; 267 1.1 riastrad } 268 1.1 riastrad 269 1.1 riastrad int 270 1.1 riastrad nouveau_gem_ioctl_new(struct drm_device *dev, void *data, 271 1.1 riastrad struct drm_file *file_priv) 272 1.1 riastrad { 273 1.1 riastrad struct nouveau_cli *cli = nouveau_cli(file_priv); 274 1.1 riastrad struct drm_nouveau_gem_new *req = data; 275 1.1 riastrad struct nouveau_bo *nvbo = NULL; 276 1.1 riastrad int ret = 0; 277 1.1 riastrad 278 1.12 riastrad ret = nouveau_gem_new(cli, req->info.size, req->align, 279 1.1 riastrad req->info.domain, req->info.tile_mode, 280 1.1 riastrad req->info.tile_flags, &nvbo); 281 1.1 riastrad if (ret) 282 1.1 riastrad return ret; 283 1.1 riastrad 284 1.12 riastrad ret = drm_gem_handle_create(file_priv, &nvbo->bo.base, 285 1.12 riastrad &req->info.handle); 286 1.1 riastrad if (ret == 0) { 287 1.12 riastrad ret = nouveau_gem_info(file_priv, &nvbo->bo.base, &req->info); 288 1.1 riastrad if (ret) 289 1.1 riastrad drm_gem_handle_delete(file_priv, req->info.handle); 290 1.1 riastrad } 291 1.1 riastrad 292 1.1 riastrad /* drop reference from allocate - handle holds it now */ 293 1.12 riastrad drm_gem_object_put_unlocked(&nvbo->bo.base); 294 1.1 riastrad return ret; 295 1.1 riastrad } 296 1.1 riastrad 297 1.1 riastrad static int 298 1.1 riastrad nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, 299 1.1 riastrad uint32_t write_domains, uint32_t valid_domains) 300 1.1 riastrad { 301 1.1 riastrad struct nouveau_bo *nvbo = nouveau_gem_object(gem); 302 1.1 riastrad struct ttm_buffer_object *bo = &nvbo->bo; 303 1.1 riastrad uint32_t domains = valid_domains & nvbo->valid_domains & 304 1.1 riastrad (write_domains ? write_domains : read_domains); 305 1.1 riastrad uint32_t pref_flags = 0, valid_flags = 0; 306 1.1 riastrad 307 1.1 riastrad if (!domains) 308 1.1 riastrad return -EINVAL; 309 1.1 riastrad 310 1.1 riastrad if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) 311 1.1 riastrad valid_flags |= TTM_PL_FLAG_VRAM; 312 1.1 riastrad 313 1.1 riastrad if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) 314 1.1 riastrad valid_flags |= TTM_PL_FLAG_TT; 315 1.1 riastrad 316 1.1 riastrad if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && 317 1.1 riastrad bo->mem.mem_type == TTM_PL_VRAM) 318 1.1 riastrad pref_flags |= TTM_PL_FLAG_VRAM; 319 1.1 riastrad 320 1.1 riastrad else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && 321 1.1 riastrad bo->mem.mem_type == TTM_PL_TT) 322 1.1 riastrad pref_flags |= TTM_PL_FLAG_TT; 323 1.1 riastrad 324 1.1 riastrad else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) 325 1.1 riastrad pref_flags |= TTM_PL_FLAG_VRAM; 326 1.1 riastrad 327 1.1 riastrad else 328 1.1 riastrad pref_flags |= TTM_PL_FLAG_TT; 329 1.1 riastrad 330 1.1 riastrad nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); 331 1.1 riastrad 332 1.1 riastrad return 0; 333 1.1 riastrad } 334 1.1 riastrad 335 1.1 riastrad struct validate_op { 336 1.5 riastrad struct list_head list; 337 1.1 riastrad struct ww_acquire_ctx ticket; 338 1.1 riastrad }; 339 1.1 riastrad 340 1.1 riastrad static void 341 1.12 riastrad validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan, 342 1.12 riastrad struct nouveau_fence *fence, 343 1.5 riastrad struct drm_nouveau_gem_pushbuf_bo *pbbo) 344 1.1 riastrad { 345 1.1 riastrad struct nouveau_bo *nvbo; 346 1.5 riastrad struct drm_nouveau_gem_pushbuf_bo *b; 347 1.1 riastrad 348 1.5 riastrad while (!list_empty(&op->list)) { 349 1.5 riastrad nvbo = list_entry(op->list.next, struct nouveau_bo, entry); 350 1.5 riastrad b = &pbbo[nvbo->pbbo_index]; 351 1.1 riastrad 352 1.12 riastrad if (likely(fence)) { 353 1.5 riastrad nouveau_bo_fence(nvbo, fence, !!b->write_domains); 354 1.1 riastrad 355 1.12 riastrad if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { 356 1.12 riastrad struct nouveau_vma *vma = 357 1.12 riastrad (void *)(unsigned long)b->user_priv; 358 1.12 riastrad nouveau_fence_unref(&vma->fence); 359 1.12 riastrad dma_fence_get(&fence->base); 360 1.12 riastrad vma->fence = fence; 361 1.12 riastrad } 362 1.12 riastrad } 363 1.12 riastrad 364 1.1 riastrad if (unlikely(nvbo->validate_mapped)) { 365 1.1 riastrad ttm_bo_kunmap(&nvbo->kmap); 366 1.1 riastrad nvbo->validate_mapped = false; 367 1.1 riastrad } 368 1.1 riastrad 369 1.1 riastrad list_del(&nvbo->entry); 370 1.1 riastrad nvbo->reserved_by = NULL; 371 1.12 riastrad ttm_bo_unreserve(&nvbo->bo); 372 1.12 riastrad drm_gem_object_put_unlocked(&nvbo->bo.base); 373 1.1 riastrad } 374 1.1 riastrad } 375 1.1 riastrad 376 1.1 riastrad static void 377 1.12 riastrad validate_fini(struct validate_op *op, struct nouveau_channel *chan, 378 1.12 riastrad struct nouveau_fence *fence, 379 1.5 riastrad struct drm_nouveau_gem_pushbuf_bo *pbbo) 380 1.1 riastrad { 381 1.12 riastrad validate_fini_no_ticket(op, chan, fence, pbbo); 382 1.1 riastrad ww_acquire_fini(&op->ticket); 383 1.1 riastrad } 384 1.1 riastrad 385 1.1 riastrad static int 386 1.1 riastrad validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, 387 1.1 riastrad struct drm_nouveau_gem_pushbuf_bo *pbbo, 388 1.1 riastrad int nr_buffers, struct validate_op *op) 389 1.1 riastrad { 390 1.1 riastrad struct nouveau_cli *cli = nouveau_cli(file_priv); 391 1.1 riastrad int trycnt = 0; 392 1.5 riastrad int ret = -EINVAL, i; 393 1.1 riastrad struct nouveau_bo *res_bo = NULL; 394 1.10 riastrad LIST_HEAD(gart_list); 395 1.10 riastrad LIST_HEAD(vram_list); 396 1.10 riastrad LIST_HEAD(both_list); 397 1.1 riastrad 398 1.1 riastrad ww_acquire_init(&op->ticket, &reservation_ww_class); 399 1.1 riastrad retry: 400 1.1 riastrad if (++trycnt > 100000) { 401 1.5 riastrad NV_PRINTK(err, cli, "%s failed and gave up.\n", __func__); 402 1.1 riastrad return -EINVAL; 403 1.1 riastrad } 404 1.1 riastrad 405 1.1 riastrad for (i = 0; i < nr_buffers; i++) { 406 1.1 riastrad struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i]; 407 1.1 riastrad struct drm_gem_object *gem; 408 1.1 riastrad struct nouveau_bo *nvbo; 409 1.1 riastrad 410 1.12 riastrad gem = drm_gem_object_lookup(file_priv, b->handle); 411 1.1 riastrad if (!gem) { 412 1.5 riastrad NV_PRINTK(err, cli, "Unknown handle 0x%08x\n", b->handle); 413 1.5 riastrad ret = -ENOENT; 414 1.5 riastrad break; 415 1.1 riastrad } 416 1.1 riastrad nvbo = nouveau_gem_object(gem); 417 1.1 riastrad if (nvbo == res_bo) { 418 1.1 riastrad res_bo = NULL; 419 1.12 riastrad drm_gem_object_put_unlocked(gem); 420 1.1 riastrad continue; 421 1.1 riastrad } 422 1.1 riastrad 423 1.1 riastrad if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { 424 1.5 riastrad NV_PRINTK(err, cli, "multiple instances of buffer %d on " 425 1.1 riastrad "validation list\n", b->handle); 426 1.12 riastrad drm_gem_object_put_unlocked(gem); 427 1.5 riastrad ret = -EINVAL; 428 1.5 riastrad break; 429 1.1 riastrad } 430 1.1 riastrad 431 1.12 riastrad ret = ttm_bo_reserve(&nvbo->bo, true, false, &op->ticket); 432 1.1 riastrad if (ret) { 433 1.5 riastrad list_splice_tail_init(&vram_list, &op->list); 434 1.5 riastrad list_splice_tail_init(&gart_list, &op->list); 435 1.5 riastrad list_splice_tail_init(&both_list, &op->list); 436 1.12 riastrad validate_fini_no_ticket(op, chan, NULL, NULL); 437 1.1 riastrad if (unlikely(ret == -EDEADLK)) { 438 1.1 riastrad ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, 439 1.1 riastrad &op->ticket); 440 1.1 riastrad if (!ret) 441 1.1 riastrad res_bo = nvbo; 442 1.1 riastrad } 443 1.1 riastrad if (unlikely(ret)) { 444 1.1 riastrad if (ret != -ERESTARTSYS) 445 1.5 riastrad NV_PRINTK(err, cli, "fail reserve\n"); 446 1.5 riastrad break; 447 1.1 riastrad } 448 1.1 riastrad } 449 1.1 riastrad 450 1.12 riastrad if (chan->vmm->vmm.object.oclass >= NVIF_CLASS_VMM_NV50) { 451 1.12 riastrad struct nouveau_vmm *vmm = chan->vmm; 452 1.12 riastrad struct nouveau_vma *vma = nouveau_vma_find(nvbo, vmm); 453 1.12 riastrad if (!vma) { 454 1.12 riastrad NV_PRINTK(err, cli, "vma not found!\n"); 455 1.12 riastrad ret = -EINVAL; 456 1.12 riastrad break; 457 1.12 riastrad } 458 1.12 riastrad 459 1.12 riastrad b->user_priv = (uint64_t)(unsigned long)vma; 460 1.12 riastrad } else { 461 1.12 riastrad b->user_priv = (uint64_t)(unsigned long)nvbo; 462 1.12 riastrad } 463 1.12 riastrad 464 1.1 riastrad nvbo->reserved_by = file_priv; 465 1.1 riastrad nvbo->pbbo_index = i; 466 1.1 riastrad if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && 467 1.1 riastrad (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)) 468 1.5 riastrad list_add_tail(&nvbo->entry, &both_list); 469 1.1 riastrad else 470 1.1 riastrad if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) 471 1.5 riastrad list_add_tail(&nvbo->entry, &vram_list); 472 1.1 riastrad else 473 1.1 riastrad if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) 474 1.5 riastrad list_add_tail(&nvbo->entry, &gart_list); 475 1.1 riastrad else { 476 1.5 riastrad NV_PRINTK(err, cli, "invalid valid domains: 0x%08x\n", 477 1.1 riastrad b->valid_domains); 478 1.5 riastrad list_add_tail(&nvbo->entry, &both_list); 479 1.5 riastrad ret = -EINVAL; 480 1.5 riastrad break; 481 1.1 riastrad } 482 1.1 riastrad if (nvbo == res_bo) 483 1.1 riastrad goto retry; 484 1.1 riastrad } 485 1.1 riastrad 486 1.1 riastrad ww_acquire_done(&op->ticket); 487 1.5 riastrad list_splice_tail(&vram_list, &op->list); 488 1.5 riastrad list_splice_tail(&gart_list, &op->list); 489 1.5 riastrad list_splice_tail(&both_list, &op->list); 490 1.5 riastrad if (ret) 491 1.12 riastrad validate_fini(op, chan, NULL, NULL); 492 1.5 riastrad return ret; 493 1.1 riastrad 494 1.1 riastrad } 495 1.1 riastrad 496 1.3 riastrad #ifdef __NetBSD__ /* XXX yargleblargh */ 497 1.3 riastrad # define __force 498 1.3 riastrad #endif 499 1.3 riastrad 500 1.1 riastrad static int 501 1.1 riastrad validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, 502 1.12 riastrad struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo) 503 1.1 riastrad { 504 1.1 riastrad struct nouveau_drm *drm = chan->drm; 505 1.1 riastrad struct nouveau_bo *nvbo; 506 1.1 riastrad int ret, relocs = 0; 507 1.1 riastrad 508 1.1 riastrad list_for_each_entry(nvbo, list, entry) { 509 1.1 riastrad struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 510 1.1 riastrad 511 1.12 riastrad ret = nouveau_gem_set_domain(&nvbo->bo.base, b->read_domains, 512 1.1 riastrad b->write_domains, 513 1.1 riastrad b->valid_domains); 514 1.1 riastrad if (unlikely(ret)) { 515 1.5 riastrad NV_PRINTK(err, cli, "fail set_domain\n"); 516 1.1 riastrad return ret; 517 1.1 riastrad } 518 1.1 riastrad 519 1.1 riastrad ret = nouveau_bo_validate(nvbo, true, false); 520 1.1 riastrad if (unlikely(ret)) { 521 1.1 riastrad if (ret != -ERESTARTSYS) 522 1.5 riastrad NV_PRINTK(err, cli, "fail ttm_validate\n"); 523 1.1 riastrad return ret; 524 1.1 riastrad } 525 1.1 riastrad 526 1.5 riastrad ret = nouveau_fence_sync(nvbo, chan, !!b->write_domains, true); 527 1.1 riastrad if (unlikely(ret)) { 528 1.5 riastrad if (ret != -ERESTARTSYS) 529 1.5 riastrad NV_PRINTK(err, cli, "fail post-validate sync\n"); 530 1.1 riastrad return ret; 531 1.1 riastrad } 532 1.1 riastrad 533 1.12 riastrad if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 534 1.1 riastrad if (nvbo->bo.offset == b->presumed.offset && 535 1.1 riastrad ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && 536 1.1 riastrad b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || 537 1.1 riastrad (nvbo->bo.mem.mem_type == TTM_PL_TT && 538 1.1 riastrad b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) 539 1.1 riastrad continue; 540 1.1 riastrad 541 1.1 riastrad if (nvbo->bo.mem.mem_type == TTM_PL_TT) 542 1.1 riastrad b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; 543 1.1 riastrad else 544 1.1 riastrad b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; 545 1.1 riastrad b->presumed.offset = nvbo->bo.offset; 546 1.1 riastrad b->presumed.valid = 0; 547 1.1 riastrad relocs++; 548 1.1 riastrad } 549 1.1 riastrad } 550 1.1 riastrad 551 1.1 riastrad return relocs; 552 1.1 riastrad } 553 1.1 riastrad 554 1.1 riastrad static int 555 1.1 riastrad nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, 556 1.1 riastrad struct drm_file *file_priv, 557 1.1 riastrad struct drm_nouveau_gem_pushbuf_bo *pbbo, 558 1.12 riastrad int nr_buffers, 559 1.12 riastrad struct validate_op *op, bool *apply_relocs) 560 1.1 riastrad { 561 1.1 riastrad struct nouveau_cli *cli = nouveau_cli(file_priv); 562 1.5 riastrad int ret; 563 1.1 riastrad 564 1.5 riastrad INIT_LIST_HEAD(&op->list); 565 1.1 riastrad 566 1.1 riastrad if (nr_buffers == 0) 567 1.1 riastrad return 0; 568 1.1 riastrad 569 1.1 riastrad ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); 570 1.1 riastrad if (unlikely(ret)) { 571 1.1 riastrad if (ret != -ERESTARTSYS) 572 1.5 riastrad NV_PRINTK(err, cli, "validate_init\n"); 573 1.1 riastrad return ret; 574 1.1 riastrad } 575 1.1 riastrad 576 1.12 riastrad ret = validate_list(chan, cli, &op->list, pbbo); 577 1.1 riastrad if (unlikely(ret < 0)) { 578 1.1 riastrad if (ret != -ERESTARTSYS) 579 1.5 riastrad NV_PRINTK(err, cli, "validating bo list\n"); 580 1.12 riastrad validate_fini(op, chan, NULL, NULL); 581 1.1 riastrad return ret; 582 1.1 riastrad } 583 1.5 riastrad *apply_relocs = ret; 584 1.1 riastrad return 0; 585 1.1 riastrad } 586 1.1 riastrad 587 1.1 riastrad static inline void 588 1.1 riastrad u_free(void *addr) 589 1.1 riastrad { 590 1.5 riastrad kvfree(addr); 591 1.1 riastrad } 592 1.1 riastrad 593 1.1 riastrad static inline void * 594 1.1 riastrad u_memcpya(uint64_t user, unsigned nmemb, unsigned size) 595 1.1 riastrad { 596 1.1 riastrad void *mem; 597 1.1 riastrad void __user *userptr = (void __force __user *)(uintptr_t)user; 598 1.1 riastrad 599 1.1 riastrad size *= nmemb; 600 1.1 riastrad 601 1.12 riastrad mem = kvmalloc(size, GFP_KERNEL); 602 1.1 riastrad if (!mem) 603 1.1 riastrad return ERR_PTR(-ENOMEM); 604 1.1 riastrad 605 1.1 riastrad if (copy_from_user(mem, userptr, size)) { 606 1.1 riastrad u_free(mem); 607 1.1 riastrad return ERR_PTR(-EFAULT); 608 1.1 riastrad } 609 1.1 riastrad 610 1.1 riastrad return mem; 611 1.1 riastrad } 612 1.1 riastrad 613 1.1 riastrad static int 614 1.1 riastrad nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, 615 1.1 riastrad struct drm_nouveau_gem_pushbuf *req, 616 1.12 riastrad struct drm_nouveau_gem_pushbuf_reloc *reloc, 617 1.1 riastrad struct drm_nouveau_gem_pushbuf_bo *bo) 618 1.1 riastrad { 619 1.1 riastrad int ret = 0; 620 1.1 riastrad unsigned i; 621 1.1 riastrad 622 1.1 riastrad for (i = 0; i < req->nr_relocs; i++) { 623 1.1 riastrad struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; 624 1.1 riastrad struct drm_nouveau_gem_pushbuf_bo *b; 625 1.1 riastrad struct nouveau_bo *nvbo; 626 1.1 riastrad uint32_t data; 627 1.1 riastrad 628 1.12 riastrad if (unlikely(r->bo_index >= req->nr_buffers)) { 629 1.5 riastrad NV_PRINTK(err, cli, "reloc bo index invalid\n"); 630 1.1 riastrad ret = -EINVAL; 631 1.1 riastrad break; 632 1.1 riastrad } 633 1.1 riastrad 634 1.1 riastrad b = &bo[r->bo_index]; 635 1.1 riastrad if (b->presumed.valid) 636 1.1 riastrad continue; 637 1.1 riastrad 638 1.12 riastrad if (unlikely(r->reloc_bo_index >= req->nr_buffers)) { 639 1.5 riastrad NV_PRINTK(err, cli, "reloc container bo index invalid\n"); 640 1.1 riastrad ret = -EINVAL; 641 1.1 riastrad break; 642 1.1 riastrad } 643 1.1 riastrad nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; 644 1.1 riastrad 645 1.1 riastrad if (unlikely(r->reloc_bo_offset + 4 > 646 1.1 riastrad nvbo->bo.mem.num_pages << PAGE_SHIFT)) { 647 1.5 riastrad NV_PRINTK(err, cli, "reloc outside of bo\n"); 648 1.1 riastrad ret = -EINVAL; 649 1.1 riastrad break; 650 1.1 riastrad } 651 1.1 riastrad 652 1.1 riastrad if (!nvbo->kmap.virtual) { 653 1.1 riastrad ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, 654 1.1 riastrad &nvbo->kmap); 655 1.1 riastrad if (ret) { 656 1.5 riastrad NV_PRINTK(err, cli, "failed kmap for reloc\n"); 657 1.1 riastrad break; 658 1.1 riastrad } 659 1.1 riastrad nvbo->validate_mapped = true; 660 1.1 riastrad } 661 1.1 riastrad 662 1.1 riastrad if (r->flags & NOUVEAU_GEM_RELOC_LOW) 663 1.1 riastrad data = b->presumed.offset + r->data; 664 1.1 riastrad else 665 1.1 riastrad if (r->flags & NOUVEAU_GEM_RELOC_HIGH) 666 1.1 riastrad data = (b->presumed.offset + r->data) >> 32; 667 1.1 riastrad else 668 1.1 riastrad data = r->data; 669 1.1 riastrad 670 1.1 riastrad if (r->flags & NOUVEAU_GEM_RELOC_OR) { 671 1.1 riastrad if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) 672 1.1 riastrad data |= r->tor; 673 1.1 riastrad else 674 1.1 riastrad data |= r->vor; 675 1.1 riastrad } 676 1.1 riastrad 677 1.12 riastrad ret = ttm_bo_wait(&nvbo->bo, false, false); 678 1.1 riastrad if (ret) { 679 1.5 riastrad NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret); 680 1.1 riastrad break; 681 1.1 riastrad } 682 1.1 riastrad 683 1.1 riastrad nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); 684 1.1 riastrad } 685 1.1 riastrad 686 1.1 riastrad u_free(reloc); 687 1.1 riastrad return ret; 688 1.1 riastrad } 689 1.1 riastrad 690 1.1 riastrad int 691 1.1 riastrad nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, 692 1.1 riastrad struct drm_file *file_priv) 693 1.1 riastrad { 694 1.5 riastrad struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv); 695 1.1 riastrad struct nouveau_cli *cli = nouveau_cli(file_priv); 696 1.1 riastrad struct nouveau_abi16_chan *temp; 697 1.1 riastrad struct nouveau_drm *drm = nouveau_drm(dev); 698 1.1 riastrad struct drm_nouveau_gem_pushbuf *req = data; 699 1.1 riastrad struct drm_nouveau_gem_pushbuf_push *push; 700 1.12 riastrad struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; 701 1.1 riastrad struct drm_nouveau_gem_pushbuf_bo *bo; 702 1.1 riastrad struct nouveau_channel *chan = NULL; 703 1.1 riastrad struct validate_op op; 704 1.1 riastrad struct nouveau_fence *fence = NULL; 705 1.12 riastrad int i, j, ret = 0; 706 1.12 riastrad bool do_reloc = false, sync = false; 707 1.1 riastrad 708 1.1 riastrad if (unlikely(!abi16)) 709 1.1 riastrad return -ENOMEM; 710 1.1 riastrad 711 1.1 riastrad list_for_each_entry(temp, &abi16->channels, head) { 712 1.5 riastrad if (temp->chan->chid == req->channel) { 713 1.1 riastrad chan = temp->chan; 714 1.1 riastrad break; 715 1.1 riastrad } 716 1.1 riastrad } 717 1.1 riastrad 718 1.1 riastrad if (!chan) 719 1.1 riastrad return nouveau_abi16_put(abi16, -ENOENT); 720 1.12 riastrad if (unlikely(atomic_read(&chan->killed))) 721 1.12 riastrad return nouveau_abi16_put(abi16, -ENODEV); 722 1.12 riastrad 723 1.12 riastrad sync = req->vram_available & NOUVEAU_GEM_PUSHBUF_SYNC; 724 1.1 riastrad 725 1.1 riastrad req->vram_available = drm->gem.vram_available; 726 1.1 riastrad req->gart_available = drm->gem.gart_available; 727 1.1 riastrad if (unlikely(req->nr_push == 0)) 728 1.1 riastrad goto out_next; 729 1.1 riastrad 730 1.1 riastrad if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { 731 1.5 riastrad NV_PRINTK(err, cli, "pushbuf push count exceeds limit: %d max %d\n", 732 1.1 riastrad req->nr_push, NOUVEAU_GEM_MAX_PUSH); 733 1.1 riastrad return nouveau_abi16_put(abi16, -EINVAL); 734 1.1 riastrad } 735 1.1 riastrad 736 1.1 riastrad if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { 737 1.5 riastrad NV_PRINTK(err, cli, "pushbuf bo count exceeds limit: %d max %d\n", 738 1.1 riastrad req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); 739 1.1 riastrad return nouveau_abi16_put(abi16, -EINVAL); 740 1.1 riastrad } 741 1.1 riastrad 742 1.1 riastrad if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { 743 1.5 riastrad NV_PRINTK(err, cli, "pushbuf reloc count exceeds limit: %d max %d\n", 744 1.1 riastrad req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); 745 1.1 riastrad return nouveau_abi16_put(abi16, -EINVAL); 746 1.1 riastrad } 747 1.1 riastrad 748 1.1 riastrad push = u_memcpya(req->push, req->nr_push, sizeof(*push)); 749 1.1 riastrad if (IS_ERR(push)) 750 1.1 riastrad return nouveau_abi16_put(abi16, PTR_ERR(push)); 751 1.1 riastrad 752 1.1 riastrad bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); 753 1.1 riastrad if (IS_ERR(bo)) { 754 1.1 riastrad u_free(push); 755 1.1 riastrad return nouveau_abi16_put(abi16, PTR_ERR(bo)); 756 1.1 riastrad } 757 1.1 riastrad 758 1.1 riastrad /* Ensure all push buffers are on validate list */ 759 1.1 riastrad for (i = 0; i < req->nr_push; i++) { 760 1.1 riastrad if (push[i].bo_index >= req->nr_buffers) { 761 1.5 riastrad NV_PRINTK(err, cli, "push %d buffer not in list\n", i); 762 1.1 riastrad ret = -EINVAL; 763 1.1 riastrad goto out_prevalid; 764 1.1 riastrad } 765 1.1 riastrad } 766 1.1 riastrad 767 1.1 riastrad /* Validate buffer list */ 768 1.12 riastrad revalidate: 769 1.12 riastrad ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, 770 1.1 riastrad req->nr_buffers, &op, &do_reloc); 771 1.1 riastrad if (ret) { 772 1.1 riastrad if (ret != -ERESTARTSYS) 773 1.5 riastrad NV_PRINTK(err, cli, "validate: %d\n", ret); 774 1.1 riastrad goto out_prevalid; 775 1.1 riastrad } 776 1.1 riastrad 777 1.1 riastrad /* Apply any relocations that are required */ 778 1.1 riastrad if (do_reloc) { 779 1.12 riastrad if (!reloc) { 780 1.12 riastrad validate_fini(&op, chan, NULL, bo); 781 1.12 riastrad reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); 782 1.12 riastrad if (IS_ERR(reloc)) { 783 1.12 riastrad ret = PTR_ERR(reloc); 784 1.12 riastrad goto out_prevalid; 785 1.12 riastrad } 786 1.12 riastrad 787 1.12 riastrad goto revalidate; 788 1.12 riastrad } 789 1.12 riastrad 790 1.12 riastrad ret = nouveau_gem_pushbuf_reloc_apply(cli, req, reloc, bo); 791 1.1 riastrad if (ret) { 792 1.5 riastrad NV_PRINTK(err, cli, "reloc apply: %d\n", ret); 793 1.1 riastrad goto out; 794 1.1 riastrad } 795 1.1 riastrad } 796 1.1 riastrad 797 1.1 riastrad if (chan->dma.ib_max) { 798 1.1 riastrad ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); 799 1.1 riastrad if (ret) { 800 1.5 riastrad NV_PRINTK(err, cli, "nv50cal_space: %d\n", ret); 801 1.1 riastrad goto out; 802 1.1 riastrad } 803 1.1 riastrad 804 1.1 riastrad for (i = 0; i < req->nr_push; i++) { 805 1.12 riastrad struct nouveau_vma *vma = (void *)(unsigned long) 806 1.1 riastrad bo[push[i].bo_index].user_priv; 807 1.1 riastrad 808 1.12 riastrad nv50_dma_push(chan, vma->addr + push[i].offset, 809 1.1 riastrad push[i].length); 810 1.1 riastrad } 811 1.1 riastrad } else 812 1.12 riastrad if (drm->client.device.info.chipset >= 0x25) { 813 1.1 riastrad ret = RING_SPACE(chan, req->nr_push * 2); 814 1.1 riastrad if (ret) { 815 1.5 riastrad NV_PRINTK(err, cli, "cal_space: %d\n", ret); 816 1.1 riastrad goto out; 817 1.1 riastrad } 818 1.1 riastrad 819 1.1 riastrad for (i = 0; i < req->nr_push; i++) { 820 1.1 riastrad struct nouveau_bo *nvbo = (void *)(unsigned long) 821 1.1 riastrad bo[push[i].bo_index].user_priv; 822 1.1 riastrad 823 1.1 riastrad OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); 824 1.1 riastrad OUT_RING(chan, 0); 825 1.1 riastrad } 826 1.1 riastrad } else { 827 1.1 riastrad ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); 828 1.1 riastrad if (ret) { 829 1.5 riastrad NV_PRINTK(err, cli, "jmp_space: %d\n", ret); 830 1.1 riastrad goto out; 831 1.1 riastrad } 832 1.1 riastrad 833 1.1 riastrad for (i = 0; i < req->nr_push; i++) { 834 1.1 riastrad struct nouveau_bo *nvbo = (void *)(unsigned long) 835 1.1 riastrad bo[push[i].bo_index].user_priv; 836 1.1 riastrad uint32_t cmd; 837 1.1 riastrad 838 1.12 riastrad cmd = chan->push.addr + ((chan->dma.cur + 2) << 2); 839 1.1 riastrad cmd |= 0x20000000; 840 1.1 riastrad if (unlikely(cmd != req->suffix0)) { 841 1.1 riastrad if (!nvbo->kmap.virtual) { 842 1.1 riastrad ret = ttm_bo_kmap(&nvbo->bo, 0, 843 1.1 riastrad nvbo->bo.mem. 844 1.1 riastrad num_pages, 845 1.1 riastrad &nvbo->kmap); 846 1.1 riastrad if (ret) { 847 1.1 riastrad WIND_RING(chan); 848 1.1 riastrad goto out; 849 1.1 riastrad } 850 1.1 riastrad nvbo->validate_mapped = true; 851 1.1 riastrad } 852 1.1 riastrad 853 1.1 riastrad nouveau_bo_wr32(nvbo, (push[i].offset + 854 1.1 riastrad push[i].length - 8) / 4, cmd); 855 1.1 riastrad } 856 1.1 riastrad 857 1.1 riastrad OUT_RING(chan, 0x20000000 | 858 1.1 riastrad (nvbo->bo.offset + push[i].offset)); 859 1.1 riastrad OUT_RING(chan, 0); 860 1.1 riastrad for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) 861 1.1 riastrad OUT_RING(chan, 0); 862 1.1 riastrad } 863 1.1 riastrad } 864 1.1 riastrad 865 1.1 riastrad ret = nouveau_fence_new(chan, false, &fence); 866 1.1 riastrad if (ret) { 867 1.5 riastrad NV_PRINTK(err, cli, "error fencing pushbuf: %d\n", ret); 868 1.1 riastrad WIND_RING(chan); 869 1.1 riastrad goto out; 870 1.1 riastrad } 871 1.1 riastrad 872 1.12 riastrad if (sync) { 873 1.12 riastrad if (!(ret = nouveau_fence_wait(fence, false, false))) { 874 1.12 riastrad if ((ret = dma_fence_get_status(&fence->base)) == 1) 875 1.12 riastrad ret = 0; 876 1.12 riastrad } 877 1.12 riastrad } 878 1.12 riastrad 879 1.1 riastrad out: 880 1.12 riastrad validate_fini(&op, chan, fence, bo); 881 1.1 riastrad nouveau_fence_unref(&fence); 882 1.1 riastrad 883 1.12 riastrad if (do_reloc) { 884 1.12 riastrad struct drm_nouveau_gem_pushbuf_bo __user *upbbo = 885 1.12 riastrad u64_to_user_ptr(req->buffers); 886 1.12 riastrad 887 1.12 riastrad for (i = 0; i < req->nr_buffers; i++) { 888 1.12 riastrad if (bo[i].presumed.valid) 889 1.12 riastrad continue; 890 1.12 riastrad 891 1.12 riastrad if (copy_to_user(&upbbo[i].presumed, &bo[i].presumed, 892 1.12 riastrad sizeof(bo[i].presumed))) { 893 1.12 riastrad ret = -EFAULT; 894 1.12 riastrad break; 895 1.12 riastrad } 896 1.12 riastrad } 897 1.12 riastrad u_free(reloc); 898 1.12 riastrad } 899 1.1 riastrad out_prevalid: 900 1.1 riastrad u_free(bo); 901 1.1 riastrad u_free(push); 902 1.1 riastrad 903 1.1 riastrad out_next: 904 1.1 riastrad if (chan->dma.ib_max) { 905 1.1 riastrad req->suffix0 = 0x00000000; 906 1.1 riastrad req->suffix1 = 0x00000000; 907 1.1 riastrad } else 908 1.12 riastrad if (drm->client.device.info.chipset >= 0x25) { 909 1.1 riastrad req->suffix0 = 0x00020000; 910 1.1 riastrad req->suffix1 = 0x00000000; 911 1.1 riastrad } else { 912 1.1 riastrad req->suffix0 = 0x20000000 | 913 1.12 riastrad (chan->push.addr + ((chan->dma.cur + 2) << 2)); 914 1.1 riastrad req->suffix1 = 0x00000000; 915 1.1 riastrad } 916 1.1 riastrad 917 1.1 riastrad return nouveau_abi16_put(abi16, ret); 918 1.1 riastrad } 919 1.1 riastrad 920 1.1 riastrad int 921 1.1 riastrad nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, 922 1.1 riastrad struct drm_file *file_priv) 923 1.1 riastrad { 924 1.1 riastrad struct drm_nouveau_gem_cpu_prep *req = data; 925 1.1 riastrad struct drm_gem_object *gem; 926 1.1 riastrad struct nouveau_bo *nvbo; 927 1.1 riastrad bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); 928 1.5 riastrad bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE); 929 1.12 riastrad long lret; 930 1.5 riastrad int ret; 931 1.1 riastrad 932 1.12 riastrad gem = drm_gem_object_lookup(file_priv, req->handle); 933 1.1 riastrad if (!gem) 934 1.1 riastrad return -ENOENT; 935 1.1 riastrad nvbo = nouveau_gem_object(gem); 936 1.1 riastrad 937 1.12 riastrad lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true, 938 1.12 riastrad no_wait ? 0 : 30 * HZ); 939 1.12 riastrad if (!lret) 940 1.12 riastrad ret = -EBUSY; 941 1.12 riastrad else if (lret > 0) 942 1.12 riastrad ret = 0; 943 1.12 riastrad else 944 1.12 riastrad ret = lret; 945 1.12 riastrad 946 1.5 riastrad nouveau_bo_sync_for_cpu(nvbo); 947 1.12 riastrad drm_gem_object_put_unlocked(gem); 948 1.5 riastrad 949 1.1 riastrad return ret; 950 1.1 riastrad } 951 1.1 riastrad 952 1.1 riastrad int 953 1.1 riastrad nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, 954 1.1 riastrad struct drm_file *file_priv) 955 1.1 riastrad { 956 1.5 riastrad struct drm_nouveau_gem_cpu_fini *req = data; 957 1.5 riastrad struct drm_gem_object *gem; 958 1.5 riastrad struct nouveau_bo *nvbo; 959 1.5 riastrad 960 1.12 riastrad gem = drm_gem_object_lookup(file_priv, req->handle); 961 1.5 riastrad if (!gem) 962 1.5 riastrad return -ENOENT; 963 1.5 riastrad nvbo = nouveau_gem_object(gem); 964 1.5 riastrad 965 1.5 riastrad nouveau_bo_sync_for_device(nvbo); 966 1.12 riastrad drm_gem_object_put_unlocked(gem); 967 1.1 riastrad return 0; 968 1.1 riastrad } 969 1.1 riastrad 970 1.1 riastrad int 971 1.1 riastrad nouveau_gem_ioctl_info(struct drm_device *dev, void *data, 972 1.1 riastrad struct drm_file *file_priv) 973 1.1 riastrad { 974 1.1 riastrad struct drm_nouveau_gem_info *req = data; 975 1.1 riastrad struct drm_gem_object *gem; 976 1.1 riastrad int ret; 977 1.1 riastrad 978 1.12 riastrad gem = drm_gem_object_lookup(file_priv, req->handle); 979 1.1 riastrad if (!gem) 980 1.1 riastrad return -ENOENT; 981 1.1 riastrad 982 1.1 riastrad ret = nouveau_gem_info(file_priv, gem, req); 983 1.12 riastrad drm_gem_object_put_unlocked(gem); 984 1.1 riastrad return ret; 985 1.1 riastrad } 986 1.1 riastrad 987