1 /* $NetBSD: nouveau_prime.c,v 1.3 2021/12/18 23:45:32 riastradh Exp $ */ 2 3 /* 4 * Copyright 2011 Red Hat Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 */ 26 27 #include <sys/cdefs.h> 28 __KERNEL_RCSID(0, "$NetBSD: nouveau_prime.c,v 1.3 2021/12/18 23:45:32 riastradh Exp $"); 29 30 #include <linux/dma-buf.h> 31 32 #include "nouveau_drv.h" 33 #include "nouveau_gem.h" 34 35 struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj) 36 { 37 struct nouveau_bo *nvbo = nouveau_gem_object(obj); 38 int npages = nvbo->bo.num_pages; 39 40 return drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages); 41 } 42 43 void *nouveau_gem_prime_vmap(struct drm_gem_object *obj) 44 { 45 struct nouveau_bo *nvbo = nouveau_gem_object(obj); 46 int ret; 47 48 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.num_pages, 49 &nvbo->dma_buf_vmap); 50 if (ret) 51 return ERR_PTR(ret); 52 53 return nvbo->dma_buf_vmap.virtual; 54 } 55 56 void nouveau_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 57 { 58 struct nouveau_bo *nvbo = nouveau_gem_object(obj); 59 60 ttm_bo_kunmap(&nvbo->dma_buf_vmap); 61 } 62 63 struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, 64 struct dma_buf_attachment *attach, 65 struct sg_table *sg) 66 { 67 struct nouveau_drm *drm = nouveau_drm(dev); 68 struct drm_gem_object *obj; 69 struct nouveau_bo *nvbo; 70 struct dma_resv *robj = attach->dmabuf->resv; 71 u64 size = attach->dmabuf->size; 72 u32 flags = 0; 73 int align = 0; 74 int ret; 75 76 flags = TTM_PL_FLAG_TT; 77 78 dma_resv_lock(robj, NULL); 79 nvbo = nouveau_bo_alloc(&drm->client, &size, &align, flags, 0, 0); 80 if (IS_ERR(nvbo)) { 81 obj = ERR_CAST(nvbo); 82 goto unlock; 83 } 84 85 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART; 86 87 /* Initialize the embedded gem-object. We return a single gem-reference 88 * to the caller, instead of a normal nouveau_bo ttm reference. */ 89 ret = drm_gem_object_init(dev, &nvbo->bo.base, size); 90 if (ret) { 91 nouveau_bo_ref(NULL, &nvbo); 92 obj = ERR_PTR(-ENOMEM); 93 goto unlock; 94 } 95 96 ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj); 97 if (ret) { 98 nouveau_bo_ref(NULL, &nvbo); 99 obj = ERR_PTR(ret); 100 goto unlock; 101 } 102 103 obj = &nvbo->bo.base; 104 105 unlock: 106 dma_resv_unlock(robj); 107 return obj; 108 } 109 110 int nouveau_gem_prime_pin(struct drm_gem_object *obj) 111 { 112 struct nouveau_bo *nvbo = nouveau_gem_object(obj); 113 int ret; 114 115 /* pin buffer into GTT */ 116 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false); 117 if (ret) 118 return -EINVAL; 119 120 return 0; 121 } 122 123 void nouveau_gem_prime_unpin(struct drm_gem_object *obj) 124 { 125 struct nouveau_bo *nvbo = nouveau_gem_object(obj); 126 127 nouveau_bo_unpin(nvbo); 128 } 129