1 1.4 riastrad /* $NetBSD: vmwgfx_resource.c,v 1.4 2022/02/17 01:21:02 riastradh Exp $ */ 2 1.2 riastrad 3 1.3 riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT 4 1.1 riastrad /************************************************************************** 5 1.1 riastrad * 6 1.3 riastrad * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 7 1.1 riastrad * 8 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 9 1.1 riastrad * copy of this software and associated documentation files (the 10 1.1 riastrad * "Software"), to deal in the Software without restriction, including 11 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 12 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to 13 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 14 1.1 riastrad * the following conditions: 15 1.1 riastrad * 16 1.1 riastrad * The above copyright notice and this permission notice (including the 17 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 18 1.1 riastrad * of the Software. 19 1.1 riastrad * 20 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 1.1 riastrad * 28 1.1 riastrad **************************************************************************/ 29 1.1 riastrad 30 1.2 riastrad #include <sys/cdefs.h> 31 1.4 riastrad __KERNEL_RCSID(0, "$NetBSD: vmwgfx_resource.c,v 1.4 2022/02/17 01:21:02 riastradh Exp $"); 32 1.2 riastrad 33 1.1 riastrad #include <drm/ttm/ttm_placement.h> 34 1.3 riastrad 35 1.1 riastrad #include "vmwgfx_resource_priv.h" 36 1.2 riastrad #include "vmwgfx_binding.h" 37 1.3 riastrad #include "vmwgfx_drv.h" 38 1.2 riastrad 39 1.2 riastrad #define VMW_RES_EVICT_ERR_COUNT 10 40 1.1 riastrad 41 1.3 riastrad /** 42 1.3 riastrad * vmw_resource_mob_attach - Mark a resource as attached to its backing mob 43 1.3 riastrad * @res: The resource 44 1.3 riastrad */ 45 1.3 riastrad void vmw_resource_mob_attach(struct vmw_resource *res) 46 1.1 riastrad { 47 1.3 riastrad struct vmw_buffer_object *backup = res->backup; 48 1.3 riastrad struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL; 49 1.3 riastrad 50 1.3 riastrad dma_resv_assert_held(res->backup->base.base.resv); 51 1.3 riastrad res->used_prio = (res->res_dirty) ? res->func->dirty_prio : 52 1.3 riastrad res->func->prio; 53 1.3 riastrad 54 1.4 riastrad #ifdef __NetBSD__ 55 1.4 riastrad rb_tree_insert_node etc etc etc 56 1.4 riastrad #else 57 1.3 riastrad while (*new) { 58 1.3 riastrad struct vmw_resource *this = 59 1.3 riastrad container_of(*new, struct vmw_resource, mob_node); 60 1.3 riastrad 61 1.3 riastrad parent = *new; 62 1.3 riastrad new = (res->backup_offset < this->backup_offset) ? 63 1.3 riastrad &((*new)->rb_left) : &((*new)->rb_right); 64 1.3 riastrad } 65 1.3 riastrad 66 1.3 riastrad rb_link_node(&res->mob_node, parent, new); 67 1.3 riastrad rb_insert_color(&res->mob_node, &backup->res_tree); 68 1.4 riastrad #endif 69 1.4 riastrad res->mob_attached = true; 70 1.3 riastrad 71 1.3 riastrad vmw_bo_prio_add(backup, res->used_prio); 72 1.1 riastrad } 73 1.1 riastrad 74 1.3 riastrad /** 75 1.3 riastrad * vmw_resource_mob_detach - Mark a resource as detached from its backing mob 76 1.3 riastrad * @res: The resource 77 1.3 riastrad */ 78 1.3 riastrad void vmw_resource_mob_detach(struct vmw_resource *res) 79 1.1 riastrad { 80 1.3 riastrad struct vmw_buffer_object *backup = res->backup; 81 1.3 riastrad 82 1.3 riastrad dma_resv_assert_held(backup->base.base.resv); 83 1.3 riastrad if (vmw_resource_mob_attached(res)) { 84 1.4 riastrad res->mob_attached = false; 85 1.3 riastrad rb_erase(&res->mob_node, &backup->res_tree); 86 1.3 riastrad RB_CLEAR_NODE(&res->mob_node); 87 1.3 riastrad vmw_bo_prio_del(backup, res->used_prio); 88 1.3 riastrad } 89 1.1 riastrad } 90 1.1 riastrad 91 1.1 riastrad struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) 92 1.1 riastrad { 93 1.1 riastrad kref_get(&res->kref); 94 1.1 riastrad return res; 95 1.1 riastrad } 96 1.1 riastrad 97 1.2 riastrad struct vmw_resource * 98 1.2 riastrad vmw_resource_reference_unless_doomed(struct vmw_resource *res) 99 1.2 riastrad { 100 1.2 riastrad return kref_get_unless_zero(&res->kref) ? res : NULL; 101 1.2 riastrad } 102 1.1 riastrad 103 1.1 riastrad /** 104 1.1 riastrad * vmw_resource_release_id - release a resource id to the id manager. 105 1.1 riastrad * 106 1.1 riastrad * @res: Pointer to the resource. 107 1.1 riastrad * 108 1.1 riastrad * Release the resource id to the resource id manager and set it to -1 109 1.1 riastrad */ 110 1.1 riastrad void vmw_resource_release_id(struct vmw_resource *res) 111 1.1 riastrad { 112 1.1 riastrad struct vmw_private *dev_priv = res->dev_priv; 113 1.1 riastrad struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 114 1.1 riastrad 115 1.3 riastrad spin_lock(&dev_priv->resource_lock); 116 1.1 riastrad if (res->id != -1) 117 1.1 riastrad idr_remove(idr, res->id); 118 1.1 riastrad res->id = -1; 119 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 120 1.1 riastrad } 121 1.1 riastrad 122 1.1 riastrad static void vmw_resource_release(struct kref *kref) 123 1.1 riastrad { 124 1.1 riastrad struct vmw_resource *res = 125 1.1 riastrad container_of(kref, struct vmw_resource, kref); 126 1.1 riastrad struct vmw_private *dev_priv = res->dev_priv; 127 1.1 riastrad int id; 128 1.1 riastrad struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 129 1.1 riastrad 130 1.3 riastrad spin_lock(&dev_priv->resource_lock); 131 1.1 riastrad list_del_init(&res->lru_head); 132 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 133 1.1 riastrad if (res->backup) { 134 1.1 riastrad struct ttm_buffer_object *bo = &res->backup->base; 135 1.1 riastrad 136 1.3 riastrad ttm_bo_reserve(bo, false, false, NULL); 137 1.3 riastrad if (vmw_resource_mob_attached(res) && 138 1.1 riastrad res->func->unbind != NULL) { 139 1.1 riastrad struct ttm_validate_buffer val_buf; 140 1.1 riastrad 141 1.1 riastrad val_buf.bo = bo; 142 1.3 riastrad val_buf.num_shared = 0; 143 1.1 riastrad res->func->unbind(res, false, &val_buf); 144 1.1 riastrad } 145 1.1 riastrad res->backup_dirty = false; 146 1.3 riastrad vmw_resource_mob_detach(res); 147 1.3 riastrad if (res->dirty) 148 1.3 riastrad res->func->dirty_free(res); 149 1.3 riastrad if (res->coherent) 150 1.3 riastrad vmw_bo_dirty_release(res->backup); 151 1.1 riastrad ttm_bo_unreserve(bo); 152 1.3 riastrad vmw_bo_unreference(&res->backup); 153 1.1 riastrad } 154 1.1 riastrad 155 1.2 riastrad if (likely(res->hw_destroy != NULL)) { 156 1.2 riastrad mutex_lock(&dev_priv->binding_mutex); 157 1.2 riastrad vmw_binding_res_list_kill(&res->binding_head); 158 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 159 1.1 riastrad res->hw_destroy(res); 160 1.2 riastrad } 161 1.1 riastrad 162 1.1 riastrad id = res->id; 163 1.1 riastrad if (res->res_free != NULL) 164 1.1 riastrad res->res_free(res); 165 1.1 riastrad else 166 1.1 riastrad kfree(res); 167 1.1 riastrad 168 1.3 riastrad spin_lock(&dev_priv->resource_lock); 169 1.1 riastrad if (id != -1) 170 1.1 riastrad idr_remove(idr, id); 171 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 172 1.1 riastrad } 173 1.1 riastrad 174 1.1 riastrad void vmw_resource_unreference(struct vmw_resource **p_res) 175 1.1 riastrad { 176 1.1 riastrad struct vmw_resource *res = *p_res; 177 1.1 riastrad 178 1.1 riastrad *p_res = NULL; 179 1.1 riastrad kref_put(&res->kref, vmw_resource_release); 180 1.1 riastrad } 181 1.1 riastrad 182 1.1 riastrad 183 1.1 riastrad /** 184 1.1 riastrad * vmw_resource_alloc_id - release a resource id to the id manager. 185 1.1 riastrad * 186 1.1 riastrad * @res: Pointer to the resource. 187 1.1 riastrad * 188 1.1 riastrad * Allocate the lowest free resource from the resource manager, and set 189 1.1 riastrad * @res->id to that id. Returns 0 on success and -ENOMEM on failure. 190 1.1 riastrad */ 191 1.1 riastrad int vmw_resource_alloc_id(struct vmw_resource *res) 192 1.1 riastrad { 193 1.1 riastrad struct vmw_private *dev_priv = res->dev_priv; 194 1.1 riastrad int ret; 195 1.1 riastrad struct idr *idr = &dev_priv->res_idr[res->func->res_type]; 196 1.1 riastrad 197 1.1 riastrad BUG_ON(res->id != -1); 198 1.1 riastrad 199 1.2 riastrad idr_preload(GFP_KERNEL); 200 1.3 riastrad spin_lock(&dev_priv->resource_lock); 201 1.1 riastrad 202 1.2 riastrad ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT); 203 1.2 riastrad if (ret >= 0) 204 1.2 riastrad res->id = ret; 205 1.1 riastrad 206 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 207 1.2 riastrad idr_preload_end(); 208 1.2 riastrad return ret < 0 ? ret : 0; 209 1.1 riastrad } 210 1.1 riastrad 211 1.1 riastrad /** 212 1.1 riastrad * vmw_resource_init - initialize a struct vmw_resource 213 1.1 riastrad * 214 1.1 riastrad * @dev_priv: Pointer to a device private struct. 215 1.1 riastrad * @res: The struct vmw_resource to initialize. 216 1.1 riastrad * @obj_type: Resource object type. 217 1.1 riastrad * @delay_id: Boolean whether to defer device id allocation until 218 1.1 riastrad * the first validation. 219 1.1 riastrad * @res_free: Resource destructor. 220 1.1 riastrad * @func: Resource function table. 221 1.1 riastrad */ 222 1.1 riastrad int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, 223 1.1 riastrad bool delay_id, 224 1.1 riastrad void (*res_free) (struct vmw_resource *res), 225 1.1 riastrad const struct vmw_res_func *func) 226 1.1 riastrad { 227 1.1 riastrad kref_init(&res->kref); 228 1.1 riastrad res->hw_destroy = NULL; 229 1.1 riastrad res->res_free = res_free; 230 1.1 riastrad res->dev_priv = dev_priv; 231 1.1 riastrad res->func = func; 232 1.3 riastrad RB_CLEAR_NODE(&res->mob_node); 233 1.1 riastrad INIT_LIST_HEAD(&res->lru_head); 234 1.2 riastrad INIT_LIST_HEAD(&res->binding_head); 235 1.1 riastrad res->id = -1; 236 1.1 riastrad res->backup = NULL; 237 1.1 riastrad res->backup_offset = 0; 238 1.1 riastrad res->backup_dirty = false; 239 1.1 riastrad res->res_dirty = false; 240 1.3 riastrad res->coherent = false; 241 1.3 riastrad res->used_prio = 3; 242 1.3 riastrad res->dirty = NULL; 243 1.1 riastrad if (delay_id) 244 1.1 riastrad return 0; 245 1.1 riastrad else 246 1.1 riastrad return vmw_resource_alloc_id(res); 247 1.1 riastrad } 248 1.1 riastrad 249 1.1 riastrad 250 1.1 riastrad /** 251 1.1 riastrad * vmw_user_resource_lookup_handle - lookup a struct resource from a 252 1.1 riastrad * TTM user-space handle and perform basic type checks 253 1.1 riastrad * 254 1.1 riastrad * @dev_priv: Pointer to a device private struct 255 1.1 riastrad * @tfile: Pointer to a struct ttm_object_file identifying the caller 256 1.1 riastrad * @handle: The TTM user-space handle 257 1.1 riastrad * @converter: Pointer to an object describing the resource type 258 1.1 riastrad * @p_res: On successful return the location pointed to will contain 259 1.1 riastrad * a pointer to a refcounted struct vmw_resource. 260 1.1 riastrad * 261 1.1 riastrad * If the handle can't be found or is associated with an incorrect resource 262 1.1 riastrad * type, -EINVAL will be returned. 263 1.1 riastrad */ 264 1.1 riastrad int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, 265 1.1 riastrad struct ttm_object_file *tfile, 266 1.1 riastrad uint32_t handle, 267 1.1 riastrad const struct vmw_user_resource_conv 268 1.1 riastrad *converter, 269 1.1 riastrad struct vmw_resource **p_res) 270 1.1 riastrad { 271 1.1 riastrad struct ttm_base_object *base; 272 1.1 riastrad struct vmw_resource *res; 273 1.1 riastrad int ret = -EINVAL; 274 1.1 riastrad 275 1.1 riastrad base = ttm_base_object_lookup(tfile, handle); 276 1.1 riastrad if (unlikely(base == NULL)) 277 1.1 riastrad return -EINVAL; 278 1.1 riastrad 279 1.2 riastrad if (unlikely(ttm_base_object_type(base) != converter->object_type)) 280 1.1 riastrad goto out_bad_resource; 281 1.1 riastrad 282 1.1 riastrad res = converter->base_obj_to_res(base); 283 1.1 riastrad kref_get(&res->kref); 284 1.1 riastrad 285 1.1 riastrad *p_res = res; 286 1.1 riastrad ret = 0; 287 1.1 riastrad 288 1.1 riastrad out_bad_resource: 289 1.1 riastrad ttm_base_object_unref(&base); 290 1.1 riastrad 291 1.1 riastrad return ret; 292 1.1 riastrad } 293 1.1 riastrad 294 1.1 riastrad /** 295 1.3 riastrad * vmw_user_resource_lookup_handle - lookup a struct resource from a 296 1.3 riastrad * TTM user-space handle and perform basic type checks 297 1.3 riastrad * 298 1.3 riastrad * @dev_priv: Pointer to a device private struct 299 1.3 riastrad * @tfile: Pointer to a struct ttm_object_file identifying the caller 300 1.3 riastrad * @handle: The TTM user-space handle 301 1.3 riastrad * @converter: Pointer to an object describing the resource type 302 1.3 riastrad * @p_res: On successful return the location pointed to will contain 303 1.3 riastrad * a pointer to a refcounted struct vmw_resource. 304 1.3 riastrad * 305 1.3 riastrad * If the handle can't be found or is associated with an incorrect resource 306 1.3 riastrad * type, -EINVAL will be returned. 307 1.3 riastrad */ 308 1.3 riastrad struct vmw_resource * 309 1.3 riastrad vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv, 310 1.3 riastrad struct ttm_object_file *tfile, 311 1.3 riastrad uint32_t handle, 312 1.3 riastrad const struct vmw_user_resource_conv 313 1.3 riastrad *converter) 314 1.3 riastrad { 315 1.3 riastrad struct ttm_base_object *base; 316 1.3 riastrad 317 1.3 riastrad base = ttm_base_object_noref_lookup(tfile, handle); 318 1.3 riastrad if (!base) 319 1.3 riastrad return ERR_PTR(-ESRCH); 320 1.3 riastrad 321 1.3 riastrad if (unlikely(ttm_base_object_type(base) != converter->object_type)) { 322 1.3 riastrad ttm_base_object_noref_release(); 323 1.3 riastrad return ERR_PTR(-EINVAL); 324 1.3 riastrad } 325 1.3 riastrad 326 1.3 riastrad return converter->base_obj_to_res(base); 327 1.3 riastrad } 328 1.3 riastrad 329 1.3 riastrad /** 330 1.3 riastrad * Helper function that looks either a surface or bo. 331 1.1 riastrad * 332 1.1 riastrad * The pointer this pointed at by out_surf and out_buf needs to be null. 333 1.1 riastrad */ 334 1.1 riastrad int vmw_user_lookup_handle(struct vmw_private *dev_priv, 335 1.1 riastrad struct ttm_object_file *tfile, 336 1.1 riastrad uint32_t handle, 337 1.1 riastrad struct vmw_surface **out_surf, 338 1.3 riastrad struct vmw_buffer_object **out_buf) 339 1.1 riastrad { 340 1.1 riastrad struct vmw_resource *res; 341 1.1 riastrad int ret; 342 1.1 riastrad 343 1.1 riastrad BUG_ON(*out_surf || *out_buf); 344 1.1 riastrad 345 1.1 riastrad ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle, 346 1.1 riastrad user_surface_converter, 347 1.1 riastrad &res); 348 1.1 riastrad if (!ret) { 349 1.1 riastrad *out_surf = vmw_res_to_srf(res); 350 1.1 riastrad return 0; 351 1.1 riastrad } 352 1.1 riastrad 353 1.1 riastrad *out_surf = NULL; 354 1.3 riastrad ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL); 355 1.1 riastrad return ret; 356 1.1 riastrad } 357 1.1 riastrad 358 1.2 riastrad /** 359 1.1 riastrad * vmw_resource_buf_alloc - Allocate a backup buffer for a resource. 360 1.1 riastrad * 361 1.1 riastrad * @res: The resource for which to allocate a backup buffer. 362 1.1 riastrad * @interruptible: Whether any sleeps during allocation should be 363 1.1 riastrad * performed while interruptible. 364 1.1 riastrad */ 365 1.1 riastrad static int vmw_resource_buf_alloc(struct vmw_resource *res, 366 1.1 riastrad bool interruptible) 367 1.1 riastrad { 368 1.1 riastrad unsigned long size = 369 1.1 riastrad (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK; 370 1.3 riastrad struct vmw_buffer_object *backup; 371 1.1 riastrad int ret; 372 1.1 riastrad 373 1.1 riastrad if (likely(res->backup)) { 374 1.1 riastrad BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); 375 1.1 riastrad return 0; 376 1.1 riastrad } 377 1.1 riastrad 378 1.1 riastrad backup = kzalloc(sizeof(*backup), GFP_KERNEL); 379 1.3 riastrad if (unlikely(!backup)) 380 1.1 riastrad return -ENOMEM; 381 1.1 riastrad 382 1.3 riastrad ret = vmw_bo_init(res->dev_priv, backup, res->backup_size, 383 1.1 riastrad res->func->backup_placement, 384 1.1 riastrad interruptible, 385 1.3 riastrad &vmw_bo_bo_free); 386 1.1 riastrad if (unlikely(ret != 0)) 387 1.3 riastrad goto out_no_bo; 388 1.1 riastrad 389 1.1 riastrad res->backup = backup; 390 1.1 riastrad 391 1.3 riastrad out_no_bo: 392 1.1 riastrad return ret; 393 1.1 riastrad } 394 1.1 riastrad 395 1.1 riastrad /** 396 1.1 riastrad * vmw_resource_do_validate - Make a resource up-to-date and visible 397 1.1 riastrad * to the device. 398 1.1 riastrad * 399 1.1 riastrad * @res: The resource to make visible to the device. 400 1.1 riastrad * @val_buf: Information about a buffer possibly 401 1.1 riastrad * containing backup data if a bind operation is needed. 402 1.1 riastrad * 403 1.1 riastrad * On hardware resource shortage, this function returns -EBUSY and 404 1.1 riastrad * should be retried once resources have been freed up. 405 1.1 riastrad */ 406 1.1 riastrad static int vmw_resource_do_validate(struct vmw_resource *res, 407 1.3 riastrad struct ttm_validate_buffer *val_buf, 408 1.3 riastrad bool dirtying) 409 1.1 riastrad { 410 1.1 riastrad int ret = 0; 411 1.1 riastrad const struct vmw_res_func *func = res->func; 412 1.1 riastrad 413 1.1 riastrad if (unlikely(res->id == -1)) { 414 1.1 riastrad ret = func->create(res); 415 1.1 riastrad if (unlikely(ret != 0)) 416 1.1 riastrad return ret; 417 1.1 riastrad } 418 1.1 riastrad 419 1.1 riastrad if (func->bind && 420 1.3 riastrad ((func->needs_backup && !vmw_resource_mob_attached(res) && 421 1.1 riastrad val_buf->bo != NULL) || 422 1.1 riastrad (!func->needs_backup && val_buf->bo != NULL))) { 423 1.1 riastrad ret = func->bind(res, val_buf); 424 1.1 riastrad if (unlikely(ret != 0)) 425 1.1 riastrad goto out_bind_failed; 426 1.1 riastrad if (func->needs_backup) 427 1.3 riastrad vmw_resource_mob_attach(res); 428 1.1 riastrad } 429 1.1 riastrad 430 1.1 riastrad /* 431 1.3 riastrad * Handle the case where the backup mob is marked coherent but 432 1.3 riastrad * the resource isn't. 433 1.1 riastrad */ 434 1.3 riastrad if (func->dirty_alloc && vmw_resource_mob_attached(res) && 435 1.3 riastrad !res->coherent) { 436 1.3 riastrad if (res->backup->dirty && !res->dirty) { 437 1.3 riastrad ret = func->dirty_alloc(res); 438 1.3 riastrad if (ret) 439 1.3 riastrad return ret; 440 1.3 riastrad } else if (!res->backup->dirty && res->dirty) { 441 1.3 riastrad func->dirty_free(res); 442 1.3 riastrad } 443 1.3 riastrad } 444 1.3 riastrad 445 1.3 riastrad /* 446 1.3 riastrad * Transfer the dirty regions to the resource and update 447 1.3 riastrad * the resource. 448 1.3 riastrad */ 449 1.3 riastrad if (res->dirty) { 450 1.3 riastrad if (dirtying && !res->res_dirty) { 451 1.3 riastrad pgoff_t start = res->backup_offset >> PAGE_SHIFT; 452 1.3 riastrad pgoff_t end = __KERNEL_DIV_ROUND_UP 453 1.3 riastrad (res->backup_offset + res->backup_size, 454 1.3 riastrad PAGE_SIZE); 455 1.3 riastrad 456 1.3 riastrad vmw_bo_dirty_unmap(res->backup, start, end); 457 1.3 riastrad } 458 1.3 riastrad 459 1.3 riastrad vmw_bo_dirty_transfer_to_res(res); 460 1.3 riastrad return func->dirty_sync(res); 461 1.3 riastrad } 462 1.1 riastrad 463 1.1 riastrad return 0; 464 1.1 riastrad 465 1.1 riastrad out_bind_failed: 466 1.1 riastrad func->destroy(res); 467 1.1 riastrad 468 1.1 riastrad return ret; 469 1.1 riastrad } 470 1.1 riastrad 471 1.1 riastrad /** 472 1.1 riastrad * vmw_resource_unreserve - Unreserve a resource previously reserved for 473 1.1 riastrad * command submission. 474 1.1 riastrad * 475 1.1 riastrad * @res: Pointer to the struct vmw_resource to unreserve. 476 1.3 riastrad * @dirty_set: Change dirty status of the resource. 477 1.3 riastrad * @dirty: When changing dirty status indicates the new status. 478 1.2 riastrad * @switch_backup: Backup buffer has been switched. 479 1.1 riastrad * @new_backup: Pointer to new backup buffer if command submission 480 1.2 riastrad * switched. May be NULL. 481 1.2 riastrad * @new_backup_offset: New backup offset if @switch_backup is true. 482 1.1 riastrad * 483 1.1 riastrad * Currently unreserving a resource means putting it back on the device's 484 1.1 riastrad * resource lru list, so that it can be evicted if necessary. 485 1.1 riastrad */ 486 1.1 riastrad void vmw_resource_unreserve(struct vmw_resource *res, 487 1.3 riastrad bool dirty_set, 488 1.3 riastrad bool dirty, 489 1.2 riastrad bool switch_backup, 490 1.3 riastrad struct vmw_buffer_object *new_backup, 491 1.1 riastrad unsigned long new_backup_offset) 492 1.1 riastrad { 493 1.1 riastrad struct vmw_private *dev_priv = res->dev_priv; 494 1.1 riastrad 495 1.1 riastrad if (!list_empty(&res->lru_head)) 496 1.1 riastrad return; 497 1.1 riastrad 498 1.2 riastrad if (switch_backup && new_backup != res->backup) { 499 1.1 riastrad if (res->backup) { 500 1.3 riastrad vmw_resource_mob_detach(res); 501 1.3 riastrad if (res->coherent) 502 1.3 riastrad vmw_bo_dirty_release(res->backup); 503 1.3 riastrad vmw_bo_unreference(&res->backup); 504 1.1 riastrad } 505 1.1 riastrad 506 1.2 riastrad if (new_backup) { 507 1.3 riastrad res->backup = vmw_bo_reference(new_backup); 508 1.3 riastrad 509 1.3 riastrad /* 510 1.3 riastrad * The validation code should already have added a 511 1.3 riastrad * dirty tracker here. 512 1.3 riastrad */ 513 1.3 riastrad WARN_ON(res->coherent && !new_backup->dirty); 514 1.3 riastrad 515 1.3 riastrad vmw_resource_mob_attach(res); 516 1.2 riastrad } else { 517 1.2 riastrad res->backup = NULL; 518 1.2 riastrad } 519 1.3 riastrad } else if (switch_backup && res->coherent) { 520 1.3 riastrad vmw_bo_dirty_release(res->backup); 521 1.1 riastrad } 522 1.3 riastrad 523 1.2 riastrad if (switch_backup) 524 1.1 riastrad res->backup_offset = new_backup_offset; 525 1.1 riastrad 526 1.3 riastrad if (dirty_set) 527 1.3 riastrad res->res_dirty = dirty; 528 1.3 riastrad 529 1.2 riastrad if (!res->func->may_evict || res->id == -1 || res->pin_count) 530 1.1 riastrad return; 531 1.1 riastrad 532 1.3 riastrad spin_lock(&dev_priv->resource_lock); 533 1.1 riastrad list_add_tail(&res->lru_head, 534 1.1 riastrad &res->dev_priv->res_lru[res->func->res_type]); 535 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 536 1.1 riastrad } 537 1.1 riastrad 538 1.1 riastrad /** 539 1.1 riastrad * vmw_resource_check_buffer - Check whether a backup buffer is needed 540 1.1 riastrad * for a resource and in that case, allocate 541 1.1 riastrad * one, reserve and validate it. 542 1.1 riastrad * 543 1.3 riastrad * @ticket: The ww aqcquire context to use, or NULL if trylocking. 544 1.1 riastrad * @res: The resource for which to allocate a backup buffer. 545 1.1 riastrad * @interruptible: Whether any sleeps during allocation should be 546 1.1 riastrad * performed while interruptible. 547 1.1 riastrad * @val_buf: On successful return contains data about the 548 1.1 riastrad * reserved and validated backup buffer. 549 1.1 riastrad */ 550 1.2 riastrad static int 551 1.3 riastrad vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, 552 1.3 riastrad struct vmw_resource *res, 553 1.2 riastrad bool interruptible, 554 1.2 riastrad struct ttm_validate_buffer *val_buf) 555 1.1 riastrad { 556 1.3 riastrad struct ttm_operation_ctx ctx = { true, false }; 557 1.1 riastrad struct list_head val_list; 558 1.1 riastrad bool backup_dirty = false; 559 1.1 riastrad int ret; 560 1.1 riastrad 561 1.1 riastrad if (unlikely(res->backup == NULL)) { 562 1.1 riastrad ret = vmw_resource_buf_alloc(res, interruptible); 563 1.1 riastrad if (unlikely(ret != 0)) 564 1.1 riastrad return ret; 565 1.1 riastrad } 566 1.1 riastrad 567 1.1 riastrad INIT_LIST_HEAD(&val_list); 568 1.3 riastrad ttm_bo_get(&res->backup->base); 569 1.3 riastrad val_buf->bo = &res->backup->base; 570 1.3 riastrad val_buf->num_shared = 0; 571 1.1 riastrad list_add_tail(&val_buf->head, &val_list); 572 1.3 riastrad ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); 573 1.1 riastrad if (unlikely(ret != 0)) 574 1.1 riastrad goto out_no_reserve; 575 1.1 riastrad 576 1.3 riastrad if (res->func->needs_backup && !vmw_resource_mob_attached(res)) 577 1.1 riastrad return 0; 578 1.1 riastrad 579 1.1 riastrad backup_dirty = res->backup_dirty; 580 1.1 riastrad ret = ttm_bo_validate(&res->backup->base, 581 1.1 riastrad res->func->backup_placement, 582 1.3 riastrad &ctx); 583 1.1 riastrad 584 1.1 riastrad if (unlikely(ret != 0)) 585 1.1 riastrad goto out_no_validate; 586 1.1 riastrad 587 1.1 riastrad return 0; 588 1.1 riastrad 589 1.1 riastrad out_no_validate: 590 1.3 riastrad ttm_eu_backoff_reservation(ticket, &val_list); 591 1.1 riastrad out_no_reserve: 592 1.3 riastrad ttm_bo_put(val_buf->bo); 593 1.3 riastrad val_buf->bo = NULL; 594 1.1 riastrad if (backup_dirty) 595 1.3 riastrad vmw_bo_unreference(&res->backup); 596 1.1 riastrad 597 1.1 riastrad return ret; 598 1.1 riastrad } 599 1.1 riastrad 600 1.1 riastrad /** 601 1.1 riastrad * vmw_resource_reserve - Reserve a resource for command submission 602 1.1 riastrad * 603 1.1 riastrad * @res: The resource to reserve. 604 1.1 riastrad * 605 1.1 riastrad * This function takes the resource off the LRU list and make sure 606 1.1 riastrad * a backup buffer is present for guest-backed resources. However, 607 1.1 riastrad * the buffer may not be bound to the resource at this point. 608 1.1 riastrad * 609 1.1 riastrad */ 610 1.2 riastrad int vmw_resource_reserve(struct vmw_resource *res, bool interruptible, 611 1.2 riastrad bool no_backup) 612 1.1 riastrad { 613 1.1 riastrad struct vmw_private *dev_priv = res->dev_priv; 614 1.1 riastrad int ret; 615 1.1 riastrad 616 1.3 riastrad spin_lock(&dev_priv->resource_lock); 617 1.1 riastrad list_del_init(&res->lru_head); 618 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 619 1.1 riastrad 620 1.1 riastrad if (res->func->needs_backup && res->backup == NULL && 621 1.1 riastrad !no_backup) { 622 1.2 riastrad ret = vmw_resource_buf_alloc(res, interruptible); 623 1.2 riastrad if (unlikely(ret != 0)) { 624 1.2 riastrad DRM_ERROR("Failed to allocate a backup buffer " 625 1.2 riastrad "of size %lu. bytes\n", 626 1.2 riastrad (unsigned long) res->backup_size); 627 1.1 riastrad return ret; 628 1.2 riastrad } 629 1.1 riastrad } 630 1.1 riastrad 631 1.1 riastrad return 0; 632 1.1 riastrad } 633 1.1 riastrad 634 1.1 riastrad /** 635 1.1 riastrad * vmw_resource_backoff_reservation - Unreserve and unreference a 636 1.1 riastrad * backup buffer 637 1.1 riastrad *. 638 1.3 riastrad * @ticket: The ww acquire ctx used for reservation. 639 1.1 riastrad * @val_buf: Backup buffer information. 640 1.1 riastrad */ 641 1.2 riastrad static void 642 1.3 riastrad vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, 643 1.3 riastrad struct ttm_validate_buffer *val_buf) 644 1.1 riastrad { 645 1.1 riastrad struct list_head val_list; 646 1.1 riastrad 647 1.1 riastrad if (likely(val_buf->bo == NULL)) 648 1.1 riastrad return; 649 1.1 riastrad 650 1.1 riastrad INIT_LIST_HEAD(&val_list); 651 1.1 riastrad list_add_tail(&val_buf->head, &val_list); 652 1.3 riastrad ttm_eu_backoff_reservation(ticket, &val_list); 653 1.3 riastrad ttm_bo_put(val_buf->bo); 654 1.3 riastrad val_buf->bo = NULL; 655 1.1 riastrad } 656 1.1 riastrad 657 1.1 riastrad /** 658 1.1 riastrad * vmw_resource_do_evict - Evict a resource, and transfer its data 659 1.1 riastrad * to a backup buffer. 660 1.1 riastrad * 661 1.3 riastrad * @ticket: The ww acquire ticket to use, or NULL if trylocking. 662 1.1 riastrad * @res: The resource to evict. 663 1.2 riastrad * @interruptible: Whether to wait interruptible. 664 1.1 riastrad */ 665 1.3 riastrad static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket, 666 1.3 riastrad struct vmw_resource *res, bool interruptible) 667 1.1 riastrad { 668 1.1 riastrad struct ttm_validate_buffer val_buf; 669 1.1 riastrad const struct vmw_res_func *func = res->func; 670 1.1 riastrad int ret; 671 1.1 riastrad 672 1.1 riastrad BUG_ON(!func->may_evict); 673 1.1 riastrad 674 1.1 riastrad val_buf.bo = NULL; 675 1.3 riastrad val_buf.num_shared = 0; 676 1.3 riastrad ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf); 677 1.1 riastrad if (unlikely(ret != 0)) 678 1.1 riastrad return ret; 679 1.1 riastrad 680 1.1 riastrad if (unlikely(func->unbind != NULL && 681 1.3 riastrad (!func->needs_backup || vmw_resource_mob_attached(res)))) { 682 1.1 riastrad ret = func->unbind(res, res->res_dirty, &val_buf); 683 1.1 riastrad if (unlikely(ret != 0)) 684 1.1 riastrad goto out_no_unbind; 685 1.3 riastrad vmw_resource_mob_detach(res); 686 1.1 riastrad } 687 1.1 riastrad ret = func->destroy(res); 688 1.1 riastrad res->backup_dirty = true; 689 1.1 riastrad res->res_dirty = false; 690 1.1 riastrad out_no_unbind: 691 1.3 riastrad vmw_resource_backoff_reservation(ticket, &val_buf); 692 1.1 riastrad 693 1.1 riastrad return ret; 694 1.1 riastrad } 695 1.1 riastrad 696 1.1 riastrad 697 1.1 riastrad /** 698 1.1 riastrad * vmw_resource_validate - Make a resource up-to-date and visible 699 1.1 riastrad * to the device. 700 1.3 riastrad * @res: The resource to make visible to the device. 701 1.3 riastrad * @intr: Perform waits interruptible if possible. 702 1.3 riastrad * @dirtying: Pending GPU operation will dirty the resource 703 1.1 riastrad * 704 1.1 riastrad * On succesful return, any backup DMA buffer pointed to by @res->backup will 705 1.1 riastrad * be reserved and validated. 706 1.1 riastrad * On hardware resource shortage, this function will repeatedly evict 707 1.1 riastrad * resources of the same type until the validation succeeds. 708 1.3 riastrad * 709 1.3 riastrad * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code 710 1.3 riastrad * on failure. 711 1.1 riastrad */ 712 1.3 riastrad int vmw_resource_validate(struct vmw_resource *res, bool intr, 713 1.3 riastrad bool dirtying) 714 1.1 riastrad { 715 1.1 riastrad int ret; 716 1.1 riastrad struct vmw_resource *evict_res; 717 1.1 riastrad struct vmw_private *dev_priv = res->dev_priv; 718 1.1 riastrad struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; 719 1.1 riastrad struct ttm_validate_buffer val_buf; 720 1.2 riastrad unsigned err_count = 0; 721 1.1 riastrad 722 1.2 riastrad if (!res->func->create) 723 1.1 riastrad return 0; 724 1.1 riastrad 725 1.1 riastrad val_buf.bo = NULL; 726 1.3 riastrad val_buf.num_shared = 0; 727 1.1 riastrad if (res->backup) 728 1.1 riastrad val_buf.bo = &res->backup->base; 729 1.1 riastrad do { 730 1.3 riastrad ret = vmw_resource_do_validate(res, &val_buf, dirtying); 731 1.1 riastrad if (likely(ret != -EBUSY)) 732 1.1 riastrad break; 733 1.1 riastrad 734 1.3 riastrad spin_lock(&dev_priv->resource_lock); 735 1.1 riastrad if (list_empty(lru_list) || !res->func->may_evict) { 736 1.2 riastrad DRM_ERROR("Out of device device resources " 737 1.1 riastrad "for %s.\n", res->func->type_name); 738 1.1 riastrad ret = -EBUSY; 739 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 740 1.1 riastrad break; 741 1.1 riastrad } 742 1.1 riastrad 743 1.1 riastrad evict_res = vmw_resource_reference 744 1.1 riastrad (list_first_entry(lru_list, struct vmw_resource, 745 1.1 riastrad lru_head)); 746 1.1 riastrad list_del_init(&evict_res->lru_head); 747 1.1 riastrad 748 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 749 1.2 riastrad 750 1.3 riastrad /* Trylock backup buffers with a NULL ticket. */ 751 1.3 riastrad ret = vmw_resource_do_evict(NULL, evict_res, intr); 752 1.2 riastrad if (unlikely(ret != 0)) { 753 1.3 riastrad spin_lock(&dev_priv->resource_lock); 754 1.2 riastrad list_add_tail(&evict_res->lru_head, lru_list); 755 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 756 1.2 riastrad if (ret == -ERESTARTSYS || 757 1.2 riastrad ++err_count > VMW_RES_EVICT_ERR_COUNT) { 758 1.2 riastrad vmw_resource_unreference(&evict_res); 759 1.2 riastrad goto out_no_validate; 760 1.2 riastrad } 761 1.2 riastrad } 762 1.2 riastrad 763 1.1 riastrad vmw_resource_unreference(&evict_res); 764 1.1 riastrad } while (1); 765 1.1 riastrad 766 1.1 riastrad if (unlikely(ret != 0)) 767 1.1 riastrad goto out_no_validate; 768 1.1 riastrad else if (!res->func->needs_backup && res->backup) { 769 1.3 riastrad WARN_ON_ONCE(vmw_resource_mob_attached(res)); 770 1.3 riastrad vmw_bo_unreference(&res->backup); 771 1.1 riastrad } 772 1.1 riastrad 773 1.1 riastrad return 0; 774 1.1 riastrad 775 1.1 riastrad out_no_validate: 776 1.1 riastrad return ret; 777 1.1 riastrad } 778 1.1 riastrad 779 1.2 riastrad 780 1.2 riastrad /** 781 1.3 riastrad * vmw_resource_unbind_list 782 1.2 riastrad * 783 1.3 riastrad * @vbo: Pointer to the current backing MOB. 784 1.2 riastrad * 785 1.2 riastrad * Evicts the Guest Backed hardware resource if the backup 786 1.2 riastrad * buffer is being moved out of MOB memory. 787 1.3 riastrad * Note that this function will not race with the resource 788 1.3 riastrad * validation code, since resource validation and eviction 789 1.3 riastrad * both require the backup buffer to be reserved. 790 1.2 riastrad */ 791 1.3 riastrad void vmw_resource_unbind_list(struct vmw_buffer_object *vbo) 792 1.2 riastrad { 793 1.3 riastrad struct ttm_validate_buffer val_buf = { 794 1.3 riastrad .bo = &vbo->base, 795 1.3 riastrad .num_shared = 0 796 1.3 riastrad }; 797 1.2 riastrad 798 1.3 riastrad dma_resv_assert_held(vbo->base.base.resv); 799 1.3 riastrad while (!RB_EMPTY_ROOT(&vbo->res_tree)) { 800 1.3 riastrad struct rb_node *node = vbo->res_tree.rb_node; 801 1.3 riastrad struct vmw_resource *res = 802 1.3 riastrad container_of(node, struct vmw_resource, mob_node); 803 1.2 riastrad 804 1.3 riastrad if (!WARN_ON_ONCE(!res->func->unbind)) 805 1.3 riastrad (void) res->func->unbind(res, res->res_dirty, &val_buf); 806 1.2 riastrad 807 1.3 riastrad res->backup_dirty = true; 808 1.3 riastrad res->res_dirty = false; 809 1.3 riastrad vmw_resource_mob_detach(res); 810 1.3 riastrad } 811 1.2 riastrad 812 1.3 riastrad (void) ttm_bo_wait(&vbo->base, false, false); 813 1.2 riastrad } 814 1.2 riastrad 815 1.2 riastrad 816 1.2 riastrad /** 817 1.2 riastrad * vmw_query_readback_all - Read back cached query states 818 1.2 riastrad * 819 1.2 riastrad * @dx_query_mob: Buffer containing the DX query MOB 820 1.2 riastrad * 821 1.2 riastrad * Read back cached states from the device if they exist. This function 822 1.2 riastrad * assumings binding_mutex is held. 823 1.2 riastrad */ 824 1.3 riastrad int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob) 825 1.2 riastrad { 826 1.2 riastrad struct vmw_resource *dx_query_ctx; 827 1.2 riastrad struct vmw_private *dev_priv; 828 1.2 riastrad struct { 829 1.2 riastrad SVGA3dCmdHeader header; 830 1.2 riastrad SVGA3dCmdDXReadbackAllQuery body; 831 1.2 riastrad } *cmd; 832 1.2 riastrad 833 1.2 riastrad 834 1.2 riastrad /* No query bound, so do nothing */ 835 1.2 riastrad if (!dx_query_mob || !dx_query_mob->dx_query_ctx) 836 1.2 riastrad return 0; 837 1.2 riastrad 838 1.2 riastrad dx_query_ctx = dx_query_mob->dx_query_ctx; 839 1.2 riastrad dev_priv = dx_query_ctx->dev_priv; 840 1.2 riastrad 841 1.3 riastrad cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id); 842 1.3 riastrad if (unlikely(cmd == NULL)) 843 1.2 riastrad return -ENOMEM; 844 1.1 riastrad 845 1.2 riastrad cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY; 846 1.2 riastrad cmd->header.size = sizeof(cmd->body); 847 1.2 riastrad cmd->body.cid = dx_query_ctx->id; 848 1.1 riastrad 849 1.2 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 850 1.1 riastrad 851 1.2 riastrad /* Triggers a rebind the next time affected context is bound */ 852 1.2 riastrad dx_query_mob->dx_query_ctx = NULL; 853 1.1 riastrad 854 1.2 riastrad return 0; 855 1.1 riastrad } 856 1.1 riastrad 857 1.2 riastrad 858 1.2 riastrad 859 1.1 riastrad /** 860 1.2 riastrad * vmw_query_move_notify - Read back cached query states 861 1.1 riastrad * 862 1.2 riastrad * @bo: The TTM buffer object about to move. 863 1.2 riastrad * @mem: The memory region @bo is moving to. 864 1.1 riastrad * 865 1.2 riastrad * Called before the query MOB is swapped out to read back cached query 866 1.2 riastrad * states from the device. 867 1.1 riastrad */ 868 1.2 riastrad void vmw_query_move_notify(struct ttm_buffer_object *bo, 869 1.2 riastrad struct ttm_mem_reg *mem) 870 1.1 riastrad { 871 1.3 riastrad struct vmw_buffer_object *dx_query_mob; 872 1.2 riastrad struct ttm_bo_device *bdev = bo->bdev; 873 1.2 riastrad struct vmw_private *dev_priv; 874 1.2 riastrad 875 1.2 riastrad 876 1.2 riastrad dev_priv = container_of(bdev, struct vmw_private, bdev); 877 1.2 riastrad 878 1.2 riastrad mutex_lock(&dev_priv->binding_mutex); 879 1.2 riastrad 880 1.3 riastrad dx_query_mob = container_of(bo, struct vmw_buffer_object, base); 881 1.2 riastrad if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) { 882 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 883 1.2 riastrad return; 884 1.2 riastrad } 885 1.2 riastrad 886 1.2 riastrad /* If BO is being moved from MOB to system memory */ 887 1.2 riastrad if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) { 888 1.2 riastrad struct vmw_fence_obj *fence; 889 1.2 riastrad 890 1.2 riastrad (void) vmw_query_readback_all(dx_query_mob); 891 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 892 1.2 riastrad 893 1.2 riastrad /* Create a fence and attach the BO to it */ 894 1.2 riastrad (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 895 1.3 riastrad vmw_bo_fence_single(bo, fence); 896 1.2 riastrad 897 1.2 riastrad if (fence != NULL) 898 1.2 riastrad vmw_fence_obj_unreference(&fence); 899 1.2 riastrad 900 1.3 riastrad (void) ttm_bo_wait(bo, false, false); 901 1.2 riastrad } else 902 1.2 riastrad mutex_unlock(&dev_priv->binding_mutex); 903 1.2 riastrad 904 1.1 riastrad } 905 1.1 riastrad 906 1.1 riastrad /** 907 1.1 riastrad * vmw_resource_needs_backup - Return whether a resource needs a backup buffer. 908 1.1 riastrad * 909 1.1 riastrad * @res: The resource being queried. 910 1.1 riastrad */ 911 1.1 riastrad bool vmw_resource_needs_backup(const struct vmw_resource *res) 912 1.1 riastrad { 913 1.1 riastrad return res->func->needs_backup; 914 1.1 riastrad } 915 1.1 riastrad 916 1.1 riastrad /** 917 1.1 riastrad * vmw_resource_evict_type - Evict all resources of a specific type 918 1.1 riastrad * 919 1.1 riastrad * @dev_priv: Pointer to a device private struct 920 1.1 riastrad * @type: The resource type to evict 921 1.1 riastrad * 922 1.1 riastrad * To avoid thrashing starvation or as part of the hibernation sequence, 923 1.2 riastrad * try to evict all evictable resources of a specific type. 924 1.1 riastrad */ 925 1.1 riastrad static void vmw_resource_evict_type(struct vmw_private *dev_priv, 926 1.1 riastrad enum vmw_res_type type) 927 1.1 riastrad { 928 1.1 riastrad struct list_head *lru_list = &dev_priv->res_lru[type]; 929 1.1 riastrad struct vmw_resource *evict_res; 930 1.2 riastrad unsigned err_count = 0; 931 1.2 riastrad int ret; 932 1.3 riastrad struct ww_acquire_ctx ticket; 933 1.1 riastrad 934 1.1 riastrad do { 935 1.3 riastrad spin_lock(&dev_priv->resource_lock); 936 1.1 riastrad 937 1.1 riastrad if (list_empty(lru_list)) 938 1.1 riastrad goto out_unlock; 939 1.1 riastrad 940 1.1 riastrad evict_res = vmw_resource_reference( 941 1.1 riastrad list_first_entry(lru_list, struct vmw_resource, 942 1.1 riastrad lru_head)); 943 1.1 riastrad list_del_init(&evict_res->lru_head); 944 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 945 1.2 riastrad 946 1.3 riastrad /* Wait lock backup buffers with a ticket. */ 947 1.3 riastrad ret = vmw_resource_do_evict(&ticket, evict_res, false); 948 1.2 riastrad if (unlikely(ret != 0)) { 949 1.3 riastrad spin_lock(&dev_priv->resource_lock); 950 1.2 riastrad list_add_tail(&evict_res->lru_head, lru_list); 951 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 952 1.2 riastrad if (++err_count > VMW_RES_EVICT_ERR_COUNT) { 953 1.2 riastrad vmw_resource_unreference(&evict_res); 954 1.2 riastrad return; 955 1.2 riastrad } 956 1.2 riastrad } 957 1.2 riastrad 958 1.1 riastrad vmw_resource_unreference(&evict_res); 959 1.1 riastrad } while (1); 960 1.1 riastrad 961 1.1 riastrad out_unlock: 962 1.3 riastrad spin_unlock(&dev_priv->resource_lock); 963 1.1 riastrad } 964 1.1 riastrad 965 1.1 riastrad /** 966 1.1 riastrad * vmw_resource_evict_all - Evict all evictable resources 967 1.1 riastrad * 968 1.1 riastrad * @dev_priv: Pointer to a device private struct 969 1.1 riastrad * 970 1.1 riastrad * To avoid thrashing starvation or as part of the hibernation sequence, 971 1.1 riastrad * evict all evictable resources. In particular this means that all 972 1.1 riastrad * guest-backed resources that are registered with the device are 973 1.1 riastrad * evicted and the OTable becomes clean. 974 1.1 riastrad */ 975 1.1 riastrad void vmw_resource_evict_all(struct vmw_private *dev_priv) 976 1.1 riastrad { 977 1.1 riastrad enum vmw_res_type type; 978 1.1 riastrad 979 1.1 riastrad mutex_lock(&dev_priv->cmdbuf_mutex); 980 1.1 riastrad 981 1.1 riastrad for (type = 0; type < vmw_res_max; ++type) 982 1.1 riastrad vmw_resource_evict_type(dev_priv, type); 983 1.1 riastrad 984 1.1 riastrad mutex_unlock(&dev_priv->cmdbuf_mutex); 985 1.1 riastrad } 986 1.2 riastrad 987 1.2 riastrad /** 988 1.2 riastrad * vmw_resource_pin - Add a pin reference on a resource 989 1.2 riastrad * 990 1.2 riastrad * @res: The resource to add a pin reference on 991 1.2 riastrad * 992 1.2 riastrad * This function adds a pin reference, and if needed validates the resource. 993 1.2 riastrad * Having a pin reference means that the resource can never be evicted, and 994 1.2 riastrad * its id will never change as long as there is a pin reference. 995 1.2 riastrad * This function returns 0 on success and a negative error code on failure. 996 1.2 riastrad */ 997 1.2 riastrad int vmw_resource_pin(struct vmw_resource *res, bool interruptible) 998 1.2 riastrad { 999 1.3 riastrad struct ttm_operation_ctx ctx = { interruptible, false }; 1000 1.2 riastrad struct vmw_private *dev_priv = res->dev_priv; 1001 1.2 riastrad int ret; 1002 1.2 riastrad 1003 1.2 riastrad ttm_write_lock(&dev_priv->reservation_sem, interruptible); 1004 1.2 riastrad mutex_lock(&dev_priv->cmdbuf_mutex); 1005 1.2 riastrad ret = vmw_resource_reserve(res, interruptible, false); 1006 1.2 riastrad if (ret) 1007 1.2 riastrad goto out_no_reserve; 1008 1.2 riastrad 1009 1.2 riastrad if (res->pin_count == 0) { 1010 1.3 riastrad struct vmw_buffer_object *vbo = NULL; 1011 1.2 riastrad 1012 1.2 riastrad if (res->backup) { 1013 1.2 riastrad vbo = res->backup; 1014 1.2 riastrad 1015 1.3 riastrad ttm_bo_reserve(&vbo->base, interruptible, false, NULL); 1016 1.2 riastrad if (!vbo->pin_count) { 1017 1.2 riastrad ret = ttm_bo_validate 1018 1.2 riastrad (&vbo->base, 1019 1.2 riastrad res->func->backup_placement, 1020 1.3 riastrad &ctx); 1021 1.2 riastrad if (ret) { 1022 1.2 riastrad ttm_bo_unreserve(&vbo->base); 1023 1.2 riastrad goto out_no_validate; 1024 1.2 riastrad } 1025 1.2 riastrad } 1026 1.2 riastrad 1027 1.2 riastrad /* Do we really need to pin the MOB as well? */ 1028 1.2 riastrad vmw_bo_pin_reserved(vbo, true); 1029 1.2 riastrad } 1030 1.3 riastrad ret = vmw_resource_validate(res, interruptible, true); 1031 1.2 riastrad if (vbo) 1032 1.2 riastrad ttm_bo_unreserve(&vbo->base); 1033 1.2 riastrad if (ret) 1034 1.2 riastrad goto out_no_validate; 1035 1.2 riastrad } 1036 1.2 riastrad res->pin_count++; 1037 1.2 riastrad 1038 1.2 riastrad out_no_validate: 1039 1.3 riastrad vmw_resource_unreserve(res, false, false, false, NULL, 0UL); 1040 1.2 riastrad out_no_reserve: 1041 1.2 riastrad mutex_unlock(&dev_priv->cmdbuf_mutex); 1042 1.2 riastrad ttm_write_unlock(&dev_priv->reservation_sem); 1043 1.2 riastrad 1044 1.2 riastrad return ret; 1045 1.2 riastrad } 1046 1.2 riastrad 1047 1.2 riastrad /** 1048 1.2 riastrad * vmw_resource_unpin - Remove a pin reference from a resource 1049 1.2 riastrad * 1050 1.2 riastrad * @res: The resource to remove a pin reference from 1051 1.2 riastrad * 1052 1.2 riastrad * Having a pin reference means that the resource can never be evicted, and 1053 1.2 riastrad * its id will never change as long as there is a pin reference. 1054 1.2 riastrad */ 1055 1.2 riastrad void vmw_resource_unpin(struct vmw_resource *res) 1056 1.2 riastrad { 1057 1.2 riastrad struct vmw_private *dev_priv = res->dev_priv; 1058 1.2 riastrad int ret; 1059 1.2 riastrad 1060 1.3 riastrad (void) ttm_read_lock(&dev_priv->reservation_sem, false); 1061 1.2 riastrad mutex_lock(&dev_priv->cmdbuf_mutex); 1062 1.2 riastrad 1063 1.2 riastrad ret = vmw_resource_reserve(res, false, true); 1064 1.2 riastrad WARN_ON(ret); 1065 1.2 riastrad 1066 1.2 riastrad WARN_ON(res->pin_count == 0); 1067 1.2 riastrad if (--res->pin_count == 0 && res->backup) { 1068 1.3 riastrad struct vmw_buffer_object *vbo = res->backup; 1069 1.2 riastrad 1070 1.3 riastrad (void) ttm_bo_reserve(&vbo->base, false, false, NULL); 1071 1.2 riastrad vmw_bo_pin_reserved(vbo, false); 1072 1.2 riastrad ttm_bo_unreserve(&vbo->base); 1073 1.2 riastrad } 1074 1.2 riastrad 1075 1.3 riastrad vmw_resource_unreserve(res, false, false, false, NULL, 0UL); 1076 1.2 riastrad 1077 1.2 riastrad mutex_unlock(&dev_priv->cmdbuf_mutex); 1078 1.2 riastrad ttm_read_unlock(&dev_priv->reservation_sem); 1079 1.2 riastrad } 1080 1.2 riastrad 1081 1.2 riastrad /** 1082 1.2 riastrad * vmw_res_type - Return the resource type 1083 1.2 riastrad * 1084 1.2 riastrad * @res: Pointer to the resource 1085 1.2 riastrad */ 1086 1.2 riastrad enum vmw_res_type vmw_res_type(const struct vmw_resource *res) 1087 1.2 riastrad { 1088 1.2 riastrad return res->func->res_type; 1089 1.2 riastrad } 1090 1.3 riastrad 1091 1.3 riastrad /** 1092 1.3 riastrad * vmw_resource_update_dirty - Update a resource's dirty tracker with a 1093 1.3 riastrad * sequential range of touched backing store memory. 1094 1.3 riastrad * @res: The resource. 1095 1.3 riastrad * @start: The first page touched. 1096 1.3 riastrad * @end: The last page touched + 1. 1097 1.3 riastrad */ 1098 1.3 riastrad void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, 1099 1.3 riastrad pgoff_t end) 1100 1.3 riastrad { 1101 1.3 riastrad if (res->dirty) 1102 1.3 riastrad res->func->dirty_range_add(res, start << PAGE_SHIFT, 1103 1.3 riastrad end << PAGE_SHIFT); 1104 1.3 riastrad } 1105 1.3 riastrad 1106 1.3 riastrad /** 1107 1.3 riastrad * vmw_resources_clean - Clean resources intersecting a mob range 1108 1.3 riastrad * @vbo: The mob buffer object 1109 1.3 riastrad * @start: The mob page offset starting the range 1110 1.3 riastrad * @end: The mob page offset ending the range 1111 1.3 riastrad * @num_prefault: Returns how many pages including the first have been 1112 1.3 riastrad * cleaned and are ok to prefault 1113 1.3 riastrad */ 1114 1.3 riastrad int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start, 1115 1.3 riastrad pgoff_t end, pgoff_t *num_prefault) 1116 1.3 riastrad { 1117 1.3 riastrad struct rb_node *cur = vbo->res_tree.rb_node; 1118 1.3 riastrad struct vmw_resource *found = NULL; 1119 1.3 riastrad unsigned long res_start = start << PAGE_SHIFT; 1120 1.3 riastrad unsigned long res_end = end << PAGE_SHIFT; 1121 1.3 riastrad unsigned long last_cleaned = 0; 1122 1.3 riastrad 1123 1.3 riastrad /* 1124 1.3 riastrad * Find the resource with lowest backup_offset that intersects the 1125 1.3 riastrad * range. 1126 1.3 riastrad */ 1127 1.3 riastrad while (cur) { 1128 1.3 riastrad struct vmw_resource *cur_res = 1129 1.3 riastrad container_of(cur, struct vmw_resource, mob_node); 1130 1.3 riastrad 1131 1.3 riastrad if (cur_res->backup_offset >= res_end) { 1132 1.3 riastrad cur = cur->rb_left; 1133 1.3 riastrad } else if (cur_res->backup_offset + cur_res->backup_size <= 1134 1.3 riastrad res_start) { 1135 1.3 riastrad cur = cur->rb_right; 1136 1.3 riastrad } else { 1137 1.3 riastrad found = cur_res; 1138 1.3 riastrad cur = cur->rb_left; 1139 1.3 riastrad /* Continue to look for resources with lower offsets */ 1140 1.3 riastrad } 1141 1.3 riastrad } 1142 1.3 riastrad 1143 1.3 riastrad /* 1144 1.3 riastrad * In order of increasing backup_offset, clean dirty resorces 1145 1.3 riastrad * intersecting the range. 1146 1.3 riastrad */ 1147 1.3 riastrad while (found) { 1148 1.3 riastrad if (found->res_dirty) { 1149 1.3 riastrad int ret; 1150 1.3 riastrad 1151 1.3 riastrad if (!found->func->clean) 1152 1.3 riastrad return -EINVAL; 1153 1.3 riastrad 1154 1.3 riastrad ret = found->func->clean(found); 1155 1.3 riastrad if (ret) 1156 1.3 riastrad return ret; 1157 1.3 riastrad 1158 1.3 riastrad found->res_dirty = false; 1159 1.3 riastrad } 1160 1.3 riastrad last_cleaned = found->backup_offset + found->backup_size; 1161 1.3 riastrad cur = rb_next(&found->mob_node); 1162 1.3 riastrad if (!cur) 1163 1.3 riastrad break; 1164 1.3 riastrad 1165 1.3 riastrad found = container_of(cur, struct vmw_resource, mob_node); 1166 1.3 riastrad if (found->backup_offset >= res_end) 1167 1.3 riastrad break; 1168 1.3 riastrad } 1169 1.3 riastrad 1170 1.3 riastrad /* 1171 1.3 riastrad * Set number of pages allowed prefaulting and fence the buffer object 1172 1.3 riastrad */ 1173 1.3 riastrad *num_prefault = 1; 1174 1.3 riastrad if (last_cleaned > res_start) { 1175 1.3 riastrad struct ttm_buffer_object *bo = &vbo->base; 1176 1.3 riastrad 1177 1.3 riastrad *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start, 1178 1.3 riastrad PAGE_SIZE); 1179 1.3 riastrad vmw_bo_fence_single(bo, NULL); 1180 1.3 riastrad if (bo->moving) 1181 1.3 riastrad dma_fence_put(bo->moving); 1182 1.3 riastrad bo->moving = dma_fence_get 1183 1.3 riastrad (dma_resv_get_excl(bo->base.resv)); 1184 1.3 riastrad } 1185 1.3 riastrad 1186 1.3 riastrad return 0; 1187 1.3 riastrad } 1188