1 1.2 riastrad /* $NetBSD: qxl_object.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $ */ 2 1.2 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2013 Red Hat Inc. 5 1.1 riastrad * 6 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 7 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 8 1.1 riastrad * to deal in the Software without restriction, including without limitation 9 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 11 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 12 1.1 riastrad * 13 1.1 riastrad * The above copyright notice and this permission notice shall be included in 14 1.1 riastrad * all copies or substantial portions of the Software. 15 1.1 riastrad * 16 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 23 1.1 riastrad * 24 1.1 riastrad * Authors: Dave Airlie 25 1.1 riastrad * Alon Levy 26 1.1 riastrad */ 27 1.1 riastrad 28 1.2 riastrad #include <sys/cdefs.h> 29 1.2 riastrad __KERNEL_RCSID(0, "$NetBSD: qxl_object.c,v 1.3 2021/12/18 23:45:42 riastradh Exp $"); 30 1.2 riastrad 31 1.1 riastrad #include "qxl_drv.h" 32 1.1 riastrad #include "qxl_object.h" 33 1.1 riastrad 34 1.1 riastrad #include <linux/io-mapping.h> 35 1.1 riastrad static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) 36 1.1 riastrad { 37 1.1 riastrad struct qxl_bo *bo; 38 1.1 riastrad struct qxl_device *qdev; 39 1.1 riastrad 40 1.3 riastrad bo = to_qxl_bo(tbo); 41 1.3 riastrad qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private; 42 1.1 riastrad 43 1.1 riastrad qxl_surface_evict(qdev, bo, false); 44 1.3 riastrad WARN_ON_ONCE(bo->map_count > 0); 45 1.1 riastrad mutex_lock(&qdev->gem.mutex); 46 1.1 riastrad list_del_init(&bo->list); 47 1.1 riastrad mutex_unlock(&qdev->gem.mutex); 48 1.3 riastrad drm_gem_object_release(&bo->tbo.base); 49 1.1 riastrad kfree(bo); 50 1.1 riastrad } 51 1.1 riastrad 52 1.1 riastrad bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) 53 1.1 riastrad { 54 1.1 riastrad if (bo->destroy == &qxl_ttm_bo_destroy) 55 1.1 riastrad return true; 56 1.1 riastrad return false; 57 1.1 riastrad } 58 1.1 riastrad 59 1.1 riastrad void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) 60 1.1 riastrad { 61 1.1 riastrad u32 c = 0; 62 1.3 riastrad u32 pflag = 0; 63 1.3 riastrad unsigned int i; 64 1.3 riastrad 65 1.3 riastrad if (pinned) 66 1.3 riastrad pflag |= TTM_PL_FLAG_NO_EVICT; 67 1.3 riastrad if (qbo->tbo.base.size <= PAGE_SIZE) 68 1.3 riastrad pflag |= TTM_PL_FLAG_TOPDOWN; 69 1.1 riastrad 70 1.1 riastrad qbo->placement.placement = qbo->placements; 71 1.1 riastrad qbo->placement.busy_placement = qbo->placements; 72 1.1 riastrad if (domain == QXL_GEM_DOMAIN_VRAM) 73 1.2 riastrad qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; 74 1.3 riastrad if (domain == QXL_GEM_DOMAIN_SURFACE) { 75 1.3 riastrad qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV | pflag; 76 1.3 riastrad qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; 77 1.3 riastrad } 78 1.1 riastrad if (domain == QXL_GEM_DOMAIN_CPU) 79 1.2 riastrad qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; 80 1.1 riastrad if (!c) 81 1.2 riastrad qbo->placements[c++].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; 82 1.1 riastrad qbo->placement.num_placement = c; 83 1.1 riastrad qbo->placement.num_busy_placement = c; 84 1.2 riastrad for (i = 0; i < c; ++i) { 85 1.2 riastrad qbo->placements[i].fpfn = 0; 86 1.2 riastrad qbo->placements[i].lpfn = 0; 87 1.2 riastrad } 88 1.1 riastrad } 89 1.1 riastrad 90 1.3 riastrad static const struct drm_gem_object_funcs qxl_object_funcs = { 91 1.3 riastrad .free = qxl_gem_object_free, 92 1.3 riastrad .open = qxl_gem_object_open, 93 1.3 riastrad .close = qxl_gem_object_close, 94 1.3 riastrad .pin = qxl_gem_prime_pin, 95 1.3 riastrad .unpin = qxl_gem_prime_unpin, 96 1.3 riastrad .get_sg_table = qxl_gem_prime_get_sg_table, 97 1.3 riastrad .vmap = qxl_gem_prime_vmap, 98 1.3 riastrad .vunmap = qxl_gem_prime_vunmap, 99 1.3 riastrad .mmap = drm_gem_ttm_mmap, 100 1.3 riastrad .print_info = drm_gem_ttm_print_info, 101 1.3 riastrad }; 102 1.1 riastrad 103 1.1 riastrad int qxl_bo_create(struct qxl_device *qdev, 104 1.1 riastrad unsigned long size, bool kernel, bool pinned, u32 domain, 105 1.1 riastrad struct qxl_surface *surf, 106 1.1 riastrad struct qxl_bo **bo_ptr) 107 1.1 riastrad { 108 1.1 riastrad struct qxl_bo *bo; 109 1.1 riastrad enum ttm_bo_type type; 110 1.1 riastrad int r; 111 1.1 riastrad 112 1.1 riastrad if (kernel) 113 1.1 riastrad type = ttm_bo_type_kernel; 114 1.1 riastrad else 115 1.1 riastrad type = ttm_bo_type_device; 116 1.1 riastrad *bo_ptr = NULL; 117 1.1 riastrad bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL); 118 1.1 riastrad if (bo == NULL) 119 1.1 riastrad return -ENOMEM; 120 1.1 riastrad size = roundup(size, PAGE_SIZE); 121 1.3 riastrad r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); 122 1.1 riastrad if (unlikely(r)) { 123 1.1 riastrad kfree(bo); 124 1.1 riastrad return r; 125 1.1 riastrad } 126 1.3 riastrad bo->tbo.base.funcs = &qxl_object_funcs; 127 1.1 riastrad bo->type = domain; 128 1.1 riastrad bo->pin_count = pinned ? 1 : 0; 129 1.1 riastrad bo->surface_id = 0; 130 1.1 riastrad INIT_LIST_HEAD(&bo->list); 131 1.1 riastrad 132 1.1 riastrad if (surf) 133 1.1 riastrad bo->surf = *surf; 134 1.1 riastrad 135 1.1 riastrad qxl_ttm_placement_from_domain(bo, domain, pinned); 136 1.1 riastrad 137 1.1 riastrad r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, 138 1.3 riastrad &bo->placement, 0, !kernel, size, 139 1.2 riastrad NULL, NULL, &qxl_ttm_bo_destroy); 140 1.1 riastrad if (unlikely(r != 0)) { 141 1.1 riastrad if (r != -ERESTARTSYS) 142 1.3 riastrad dev_err(qdev->ddev.dev, 143 1.1 riastrad "object_init failed for (%lu, 0x%08X)\n", 144 1.1 riastrad size, domain); 145 1.1 riastrad return r; 146 1.1 riastrad } 147 1.1 riastrad *bo_ptr = bo; 148 1.1 riastrad return 0; 149 1.1 riastrad } 150 1.1 riastrad 151 1.1 riastrad int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) 152 1.1 riastrad { 153 1.1 riastrad bool is_iomem; 154 1.1 riastrad int r; 155 1.1 riastrad 156 1.1 riastrad if (bo->kptr) { 157 1.1 riastrad if (ptr) 158 1.1 riastrad *ptr = bo->kptr; 159 1.3 riastrad bo->map_count++; 160 1.1 riastrad return 0; 161 1.1 riastrad } 162 1.1 riastrad r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); 163 1.1 riastrad if (r) 164 1.1 riastrad return r; 165 1.1 riastrad bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); 166 1.1 riastrad if (ptr) 167 1.1 riastrad *ptr = bo->kptr; 168 1.3 riastrad bo->map_count = 1; 169 1.1 riastrad return 0; 170 1.1 riastrad } 171 1.1 riastrad 172 1.1 riastrad void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, 173 1.1 riastrad struct qxl_bo *bo, int page_offset) 174 1.1 riastrad { 175 1.1 riastrad void *rptr; 176 1.1 riastrad int ret; 177 1.1 riastrad struct io_mapping *map; 178 1.1 riastrad 179 1.1 riastrad if (bo->tbo.mem.mem_type == TTM_PL_VRAM) 180 1.1 riastrad map = qdev->vram_mapping; 181 1.3 riastrad else if (bo->tbo.mem.mem_type == TTM_PL_PRIV) 182 1.1 riastrad map = qdev->surface_mapping; 183 1.1 riastrad else 184 1.1 riastrad goto fallback; 185 1.1 riastrad 186 1.3 riastrad ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem); 187 1.1 riastrad 188 1.1 riastrad return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); 189 1.1 riastrad fallback: 190 1.1 riastrad if (bo->kptr) { 191 1.1 riastrad rptr = bo->kptr + (page_offset * PAGE_SIZE); 192 1.1 riastrad return rptr; 193 1.1 riastrad } 194 1.1 riastrad 195 1.1 riastrad ret = qxl_bo_kmap(bo, &rptr); 196 1.1 riastrad if (ret) 197 1.1 riastrad return NULL; 198 1.1 riastrad 199 1.1 riastrad rptr += page_offset * PAGE_SIZE; 200 1.1 riastrad return rptr; 201 1.1 riastrad } 202 1.1 riastrad 203 1.1 riastrad void qxl_bo_kunmap(struct qxl_bo *bo) 204 1.1 riastrad { 205 1.1 riastrad if (bo->kptr == NULL) 206 1.1 riastrad return; 207 1.3 riastrad bo->map_count--; 208 1.3 riastrad if (bo->map_count > 0) 209 1.3 riastrad return; 210 1.1 riastrad bo->kptr = NULL; 211 1.1 riastrad ttm_bo_kunmap(&bo->kmap); 212 1.1 riastrad } 213 1.1 riastrad 214 1.1 riastrad void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, 215 1.1 riastrad struct qxl_bo *bo, void *pmap) 216 1.1 riastrad { 217 1.3 riastrad if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) && 218 1.3 riastrad (bo->tbo.mem.mem_type != TTM_PL_PRIV)) 219 1.1 riastrad goto fallback; 220 1.1 riastrad 221 1.1 riastrad io_mapping_unmap_atomic(pmap); 222 1.3 riastrad return; 223 1.1 riastrad fallback: 224 1.1 riastrad qxl_bo_kunmap(bo); 225 1.1 riastrad } 226 1.1 riastrad 227 1.1 riastrad void qxl_bo_unref(struct qxl_bo **bo) 228 1.1 riastrad { 229 1.1 riastrad if ((*bo) == NULL) 230 1.1 riastrad return; 231 1.2 riastrad 232 1.3 riastrad drm_gem_object_put_unlocked(&(*bo)->tbo.base); 233 1.2 riastrad *bo = NULL; 234 1.1 riastrad } 235 1.1 riastrad 236 1.1 riastrad struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) 237 1.1 riastrad { 238 1.3 riastrad drm_gem_object_get(&bo->tbo.base); 239 1.1 riastrad return bo; 240 1.1 riastrad } 241 1.1 riastrad 242 1.3 riastrad static int __qxl_bo_pin(struct qxl_bo *bo) 243 1.1 riastrad { 244 1.3 riastrad struct ttm_operation_ctx ctx = { false, false }; 245 1.3 riastrad struct drm_device *ddev = bo->tbo.base.dev; 246 1.1 riastrad int r; 247 1.1 riastrad 248 1.1 riastrad if (bo->pin_count) { 249 1.1 riastrad bo->pin_count++; 250 1.1 riastrad return 0; 251 1.1 riastrad } 252 1.3 riastrad qxl_ttm_placement_from_domain(bo, bo->type, true); 253 1.3 riastrad r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 254 1.1 riastrad if (likely(r == 0)) { 255 1.1 riastrad bo->pin_count = 1; 256 1.1 riastrad } 257 1.1 riastrad if (unlikely(r != 0)) 258 1.3 riastrad dev_err(ddev->dev, "%p pin failed\n", bo); 259 1.1 riastrad return r; 260 1.1 riastrad } 261 1.1 riastrad 262 1.3 riastrad static int __qxl_bo_unpin(struct qxl_bo *bo) 263 1.1 riastrad { 264 1.3 riastrad struct ttm_operation_ctx ctx = { false, false }; 265 1.3 riastrad struct drm_device *ddev = bo->tbo.base.dev; 266 1.1 riastrad int r, i; 267 1.1 riastrad 268 1.1 riastrad if (!bo->pin_count) { 269 1.3 riastrad dev_warn(ddev->dev, "%p unpin not necessary\n", bo); 270 1.1 riastrad return 0; 271 1.1 riastrad } 272 1.1 riastrad bo->pin_count--; 273 1.1 riastrad if (bo->pin_count) 274 1.1 riastrad return 0; 275 1.1 riastrad for (i = 0; i < bo->placement.num_placement; i++) 276 1.2 riastrad bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 277 1.3 riastrad r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 278 1.1 riastrad if (unlikely(r != 0)) 279 1.3 riastrad dev_err(ddev->dev, "%p validate failed for unpin\n", bo); 280 1.3 riastrad return r; 281 1.3 riastrad } 282 1.3 riastrad 283 1.3 riastrad /* 284 1.3 riastrad * Reserve the BO before pinning the object. If the BO was reserved 285 1.3 riastrad * beforehand, use the internal version directly __qxl_bo_pin. 286 1.3 riastrad * 287 1.3 riastrad */ 288 1.3 riastrad int qxl_bo_pin(struct qxl_bo *bo) 289 1.3 riastrad { 290 1.3 riastrad int r; 291 1.3 riastrad 292 1.3 riastrad r = qxl_bo_reserve(bo, false); 293 1.3 riastrad if (r) 294 1.3 riastrad return r; 295 1.3 riastrad 296 1.3 riastrad r = __qxl_bo_pin(bo); 297 1.3 riastrad qxl_bo_unreserve(bo); 298 1.3 riastrad return r; 299 1.3 riastrad } 300 1.3 riastrad 301 1.3 riastrad /* 302 1.3 riastrad * Reserve the BO before pinning the object. If the BO was reserved 303 1.3 riastrad * beforehand, use the internal version directly __qxl_bo_unpin. 304 1.3 riastrad * 305 1.3 riastrad */ 306 1.3 riastrad int qxl_bo_unpin(struct qxl_bo *bo) 307 1.3 riastrad { 308 1.3 riastrad int r; 309 1.3 riastrad 310 1.3 riastrad r = qxl_bo_reserve(bo, false); 311 1.3 riastrad if (r) 312 1.3 riastrad return r; 313 1.3 riastrad 314 1.3 riastrad r = __qxl_bo_unpin(bo); 315 1.3 riastrad qxl_bo_unreserve(bo); 316 1.1 riastrad return r; 317 1.1 riastrad } 318 1.1 riastrad 319 1.1 riastrad void qxl_bo_force_delete(struct qxl_device *qdev) 320 1.1 riastrad { 321 1.1 riastrad struct qxl_bo *bo, *n; 322 1.1 riastrad 323 1.1 riastrad if (list_empty(&qdev->gem.objects)) 324 1.1 riastrad return; 325 1.3 riastrad dev_err(qdev->ddev.dev, "Userspace still has active objects !\n"); 326 1.1 riastrad list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { 327 1.3 riastrad dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n", 328 1.3 riastrad &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size, 329 1.3 riastrad *((unsigned long *)&bo->tbo.base.refcount)); 330 1.1 riastrad mutex_lock(&qdev->gem.mutex); 331 1.1 riastrad list_del_init(&bo->list); 332 1.1 riastrad mutex_unlock(&qdev->gem.mutex); 333 1.1 riastrad /* this should unref the ttm bo */ 334 1.3 riastrad drm_gem_object_put_unlocked(&bo->tbo.base); 335 1.1 riastrad } 336 1.1 riastrad } 337 1.1 riastrad 338 1.1 riastrad int qxl_bo_init(struct qxl_device *qdev) 339 1.1 riastrad { 340 1.1 riastrad return qxl_ttm_init(qdev); 341 1.1 riastrad } 342 1.1 riastrad 343 1.1 riastrad void qxl_bo_fini(struct qxl_device *qdev) 344 1.1 riastrad { 345 1.1 riastrad qxl_ttm_fini(qdev); 346 1.1 riastrad } 347 1.1 riastrad 348 1.1 riastrad int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) 349 1.1 riastrad { 350 1.1 riastrad int ret; 351 1.3 riastrad 352 1.1 riastrad if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) { 353 1.1 riastrad /* allocate a surface id for this surface now */ 354 1.1 riastrad ret = qxl_surface_id_alloc(qdev, bo); 355 1.1 riastrad if (ret) 356 1.1 riastrad return ret; 357 1.1 riastrad 358 1.3 riastrad ret = qxl_hw_surface_alloc(qdev, bo); 359 1.1 riastrad if (ret) 360 1.1 riastrad return ret; 361 1.1 riastrad } 362 1.1 riastrad return 0; 363 1.1 riastrad } 364 1.1 riastrad 365 1.1 riastrad int qxl_surf_evict(struct qxl_device *qdev) 366 1.1 riastrad { 367 1.3 riastrad return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV); 368 1.1 riastrad } 369 1.1 riastrad 370 1.1 riastrad int qxl_vram_evict(struct qxl_device *qdev) 371 1.1 riastrad { 372 1.1 riastrad return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM); 373 1.1 riastrad } 374