1 1.2 riastrad /* $NetBSD: vmwgfx_mob.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $ */ 2 1.2 riastrad 3 1.3 riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT 4 1.1 riastrad /************************************************************************** 5 1.1 riastrad * 6 1.3 riastrad * Copyright 2012-2015 VMware, Inc., Palo Alto, CA., USA 7 1.1 riastrad * 8 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 9 1.1 riastrad * copy of this software and associated documentation files (the 10 1.1 riastrad * "Software"), to deal in the Software without restriction, including 11 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 12 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to 13 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 14 1.1 riastrad * the following conditions: 15 1.1 riastrad * 16 1.1 riastrad * The above copyright notice and this permission notice (including the 17 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 18 1.1 riastrad * of the Software. 19 1.1 riastrad * 20 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 21 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 22 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 23 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 24 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 25 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 26 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 27 1.1 riastrad * 28 1.1 riastrad **************************************************************************/ 29 1.1 riastrad 30 1.2 riastrad #include <sys/cdefs.h> 31 1.2 riastrad __KERNEL_RCSID(0, "$NetBSD: vmwgfx_mob.c,v 1.3 2021/12/18 23:45:45 riastradh Exp $"); 32 1.2 riastrad 33 1.3 riastrad #include <linux/highmem.h> 34 1.3 riastrad 35 1.1 riastrad #include "vmwgfx_drv.h" 36 1.1 riastrad 37 1.1 riastrad /* 38 1.1 riastrad * If we set up the screen target otable, screen objects stop working. 39 1.1 riastrad */ 40 1.1 riastrad 41 1.2 riastrad #define VMW_OTABLE_SETUP_SUB ((VMWGFX_ENABLE_SCREEN_TARGET_OTABLE ? 0 : 1)) 42 1.1 riastrad 43 1.1 riastrad #ifdef CONFIG_64BIT 44 1.1 riastrad #define VMW_PPN_SIZE 8 45 1.1 riastrad #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH64_0 46 1.1 riastrad #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH64_1 47 1.1 riastrad #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH64_2 48 1.1 riastrad #else 49 1.1 riastrad #define VMW_PPN_SIZE 4 50 1.1 riastrad #define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PTDEPTH_0 51 1.1 riastrad #define VMW_MOBFMT_PTDEPTH_1 SVGA3D_MOBFMT_PTDEPTH_1 52 1.1 riastrad #define VMW_MOBFMT_PTDEPTH_2 SVGA3D_MOBFMT_PTDEPTH_2 53 1.1 riastrad #endif 54 1.1 riastrad 55 1.1 riastrad /* 56 1.1 riastrad * struct vmw_mob - Structure containing page table and metadata for a 57 1.1 riastrad * Guest Memory OBject. 58 1.1 riastrad * 59 1.1 riastrad * @num_pages Number of pages that make up the page table. 60 1.1 riastrad * @pt_level The indirection level of the page table. 0-2. 61 1.1 riastrad * @pt_root_page DMA address of the level 0 page of the page table. 62 1.1 riastrad */ 63 1.1 riastrad struct vmw_mob { 64 1.1 riastrad struct ttm_buffer_object *pt_bo; 65 1.1 riastrad unsigned long num_pages; 66 1.1 riastrad unsigned pt_level; 67 1.1 riastrad dma_addr_t pt_root_page; 68 1.1 riastrad uint32_t id; 69 1.1 riastrad }; 70 1.1 riastrad 71 1.1 riastrad /* 72 1.1 riastrad * struct vmw_otable - Guest Memory OBject table metadata 73 1.1 riastrad * 74 1.1 riastrad * @size: Size of the table (page-aligned). 75 1.1 riastrad * @page_table: Pointer to a struct vmw_mob holding the page table. 76 1.1 riastrad */ 77 1.2 riastrad static const struct vmw_otable pre_dx_tables[] = { 78 1.2 riastrad {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, 79 1.2 riastrad {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, 80 1.2 riastrad {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, 81 1.2 riastrad {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, 82 1.2 riastrad {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, 83 1.2 riastrad NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE} 84 1.2 riastrad }; 85 1.2 riastrad 86 1.2 riastrad static const struct vmw_otable dx_tables[] = { 87 1.2 riastrad {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, 88 1.2 riastrad {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, 89 1.2 riastrad {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, 90 1.2 riastrad {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, 91 1.2 riastrad {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, 92 1.2 riastrad NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}, 93 1.2 riastrad {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true}, 94 1.1 riastrad }; 95 1.1 riastrad 96 1.1 riastrad static int vmw_mob_pt_populate(struct vmw_private *dev_priv, 97 1.1 riastrad struct vmw_mob *mob); 98 1.1 riastrad static void vmw_mob_pt_setup(struct vmw_mob *mob, 99 1.1 riastrad struct vmw_piter data_iter, 100 1.1 riastrad unsigned long num_data_pages); 101 1.1 riastrad 102 1.1 riastrad /* 103 1.1 riastrad * vmw_setup_otable_base - Issue an object table base setup command to 104 1.1 riastrad * the device 105 1.1 riastrad * 106 1.1 riastrad * @dev_priv: Pointer to a device private structure 107 1.1 riastrad * @type: Type of object table base 108 1.1 riastrad * @offset Start of table offset into dev_priv::otable_bo 109 1.1 riastrad * @otable Pointer to otable metadata; 110 1.1 riastrad * 111 1.1 riastrad * This function returns -ENOMEM if it fails to reserve fifo space, 112 1.1 riastrad * and may block waiting for fifo space. 113 1.1 riastrad */ 114 1.1 riastrad static int vmw_setup_otable_base(struct vmw_private *dev_priv, 115 1.1 riastrad SVGAOTableType type, 116 1.2 riastrad struct ttm_buffer_object *otable_bo, 117 1.1 riastrad unsigned long offset, 118 1.1 riastrad struct vmw_otable *otable) 119 1.1 riastrad { 120 1.1 riastrad struct { 121 1.1 riastrad SVGA3dCmdHeader header; 122 1.1 riastrad SVGA3dCmdSetOTableBase64 body; 123 1.1 riastrad } *cmd; 124 1.1 riastrad struct vmw_mob *mob; 125 1.1 riastrad const struct vmw_sg_table *vsgt; 126 1.1 riastrad struct vmw_piter iter; 127 1.1 riastrad int ret; 128 1.1 riastrad 129 1.1 riastrad BUG_ON(otable->page_table != NULL); 130 1.1 riastrad 131 1.2 riastrad vsgt = vmw_bo_sg_table(otable_bo); 132 1.1 riastrad vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); 133 1.1 riastrad WARN_ON(!vmw_piter_next(&iter)); 134 1.1 riastrad 135 1.1 riastrad mob = vmw_mob_create(otable->size >> PAGE_SHIFT); 136 1.1 riastrad if (unlikely(mob == NULL)) { 137 1.1 riastrad DRM_ERROR("Failed creating OTable page table.\n"); 138 1.1 riastrad return -ENOMEM; 139 1.1 riastrad } 140 1.1 riastrad 141 1.1 riastrad if (otable->size <= PAGE_SIZE) { 142 1.1 riastrad mob->pt_level = VMW_MOBFMT_PTDEPTH_0; 143 1.1 riastrad mob->pt_root_page = vmw_piter_dma_addr(&iter); 144 1.1 riastrad } else if (vsgt->num_regions == 1) { 145 1.1 riastrad mob->pt_level = SVGA3D_MOBFMT_RANGE; 146 1.1 riastrad mob->pt_root_page = vmw_piter_dma_addr(&iter); 147 1.1 riastrad } else { 148 1.1 riastrad ret = vmw_mob_pt_populate(dev_priv, mob); 149 1.1 riastrad if (unlikely(ret != 0)) 150 1.1 riastrad goto out_no_populate; 151 1.1 riastrad 152 1.1 riastrad vmw_mob_pt_setup(mob, iter, otable->size >> PAGE_SHIFT); 153 1.1 riastrad mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; 154 1.1 riastrad } 155 1.1 riastrad 156 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 157 1.1 riastrad if (unlikely(cmd == NULL)) { 158 1.1 riastrad ret = -ENOMEM; 159 1.1 riastrad goto out_no_fifo; 160 1.1 riastrad } 161 1.1 riastrad 162 1.1 riastrad memset(cmd, 0, sizeof(*cmd)); 163 1.1 riastrad cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE64; 164 1.1 riastrad cmd->header.size = sizeof(cmd->body); 165 1.1 riastrad cmd->body.type = type; 166 1.2 riastrad cmd->body.baseAddress = mob->pt_root_page >> PAGE_SHIFT; 167 1.1 riastrad cmd->body.sizeInBytes = otable->size; 168 1.1 riastrad cmd->body.validSizeInBytes = 0; 169 1.1 riastrad cmd->body.ptDepth = mob->pt_level; 170 1.1 riastrad 171 1.1 riastrad /* 172 1.1 riastrad * The device doesn't support this, But the otable size is 173 1.1 riastrad * determined at compile-time, so this BUG shouldn't trigger 174 1.1 riastrad * randomly. 175 1.1 riastrad */ 176 1.1 riastrad BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2); 177 1.1 riastrad 178 1.1 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 179 1.1 riastrad otable->page_table = mob; 180 1.1 riastrad 181 1.1 riastrad return 0; 182 1.1 riastrad 183 1.1 riastrad out_no_fifo: 184 1.1 riastrad out_no_populate: 185 1.1 riastrad vmw_mob_destroy(mob); 186 1.1 riastrad return ret; 187 1.1 riastrad } 188 1.1 riastrad 189 1.1 riastrad /* 190 1.1 riastrad * vmw_takedown_otable_base - Issue an object table base takedown command 191 1.1 riastrad * to the device 192 1.1 riastrad * 193 1.1 riastrad * @dev_priv: Pointer to a device private structure 194 1.1 riastrad * @type: Type of object table base 195 1.1 riastrad * 196 1.1 riastrad */ 197 1.1 riastrad static void vmw_takedown_otable_base(struct vmw_private *dev_priv, 198 1.1 riastrad SVGAOTableType type, 199 1.1 riastrad struct vmw_otable *otable) 200 1.1 riastrad { 201 1.1 riastrad struct { 202 1.1 riastrad SVGA3dCmdHeader header; 203 1.1 riastrad SVGA3dCmdSetOTableBase body; 204 1.1 riastrad } *cmd; 205 1.1 riastrad struct ttm_buffer_object *bo; 206 1.1 riastrad 207 1.1 riastrad if (otable->page_table == NULL) 208 1.1 riastrad return; 209 1.1 riastrad 210 1.1 riastrad bo = otable->page_table->pt_bo; 211 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 212 1.3 riastrad if (unlikely(cmd == NULL)) 213 1.2 riastrad return; 214 1.1 riastrad 215 1.2 riastrad memset(cmd, 0, sizeof(*cmd)); 216 1.2 riastrad cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; 217 1.2 riastrad cmd->header.size = sizeof(cmd->body); 218 1.2 riastrad cmd->body.type = type; 219 1.2 riastrad cmd->body.baseAddress = 0; 220 1.2 riastrad cmd->body.sizeInBytes = 0; 221 1.2 riastrad cmd->body.validSizeInBytes = 0; 222 1.2 riastrad cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; 223 1.2 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 224 1.2 riastrad 225 1.1 riastrad if (bo) { 226 1.1 riastrad int ret; 227 1.1 riastrad 228 1.3 riastrad ret = ttm_bo_reserve(bo, false, true, NULL); 229 1.1 riastrad BUG_ON(ret != 0); 230 1.1 riastrad 231 1.3 riastrad vmw_bo_fence_single(bo, NULL); 232 1.1 riastrad ttm_bo_unreserve(bo); 233 1.1 riastrad } 234 1.1 riastrad 235 1.1 riastrad vmw_mob_destroy(otable->page_table); 236 1.1 riastrad otable->page_table = NULL; 237 1.1 riastrad } 238 1.1 riastrad 239 1.2 riastrad 240 1.2 riastrad static int vmw_otable_batch_setup(struct vmw_private *dev_priv, 241 1.2 riastrad struct vmw_otable_batch *batch) 242 1.1 riastrad { 243 1.1 riastrad unsigned long offset; 244 1.1 riastrad unsigned long bo_size; 245 1.2 riastrad struct vmw_otable *otables = batch->otables; 246 1.3 riastrad struct ttm_operation_ctx ctx = { 247 1.3 riastrad .interruptible = false, 248 1.3 riastrad .no_wait_gpu = false 249 1.3 riastrad }; 250 1.1 riastrad SVGAOTableType i; 251 1.1 riastrad int ret; 252 1.1 riastrad 253 1.2 riastrad bo_size = 0; 254 1.2 riastrad for (i = 0; i < batch->num_otables; ++i) { 255 1.2 riastrad if (!otables[i].enabled) 256 1.2 riastrad continue; 257 1.1 riastrad 258 1.1 riastrad otables[i].size = 259 1.1 riastrad (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; 260 1.1 riastrad bo_size += otables[i].size; 261 1.1 riastrad } 262 1.1 riastrad 263 1.1 riastrad ret = ttm_bo_create(&dev_priv->bdev, bo_size, 264 1.1 riastrad ttm_bo_type_device, 265 1.1 riastrad &vmw_sys_ne_placement, 266 1.3 riastrad 0, false, &batch->otable_bo); 267 1.1 riastrad 268 1.1 riastrad if (unlikely(ret != 0)) 269 1.1 riastrad goto out_no_bo; 270 1.1 riastrad 271 1.3 riastrad ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL); 272 1.1 riastrad BUG_ON(ret != 0); 273 1.3 riastrad ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx); 274 1.1 riastrad if (unlikely(ret != 0)) 275 1.1 riastrad goto out_unreserve; 276 1.2 riastrad ret = vmw_bo_map_dma(batch->otable_bo); 277 1.1 riastrad if (unlikely(ret != 0)) 278 1.1 riastrad goto out_unreserve; 279 1.1 riastrad 280 1.2 riastrad ttm_bo_unreserve(batch->otable_bo); 281 1.1 riastrad 282 1.1 riastrad offset = 0; 283 1.2 riastrad for (i = 0; i < batch->num_otables; ++i) { 284 1.2 riastrad if (!batch->otables[i].enabled) 285 1.2 riastrad continue; 286 1.2 riastrad 287 1.2 riastrad ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo, 288 1.2 riastrad offset, 289 1.1 riastrad &otables[i]); 290 1.1 riastrad if (unlikely(ret != 0)) 291 1.1 riastrad goto out_no_setup; 292 1.1 riastrad offset += otables[i].size; 293 1.1 riastrad } 294 1.1 riastrad 295 1.1 riastrad return 0; 296 1.1 riastrad 297 1.1 riastrad out_unreserve: 298 1.2 riastrad ttm_bo_unreserve(batch->otable_bo); 299 1.1 riastrad out_no_setup: 300 1.2 riastrad for (i = 0; i < batch->num_otables; ++i) { 301 1.2 riastrad if (batch->otables[i].enabled) 302 1.2 riastrad vmw_takedown_otable_base(dev_priv, i, 303 1.2 riastrad &batch->otables[i]); 304 1.2 riastrad } 305 1.1 riastrad 306 1.3 riastrad ttm_bo_put(batch->otable_bo); 307 1.3 riastrad batch->otable_bo = NULL; 308 1.1 riastrad out_no_bo: 309 1.1 riastrad return ret; 310 1.1 riastrad } 311 1.1 riastrad 312 1.1 riastrad /* 313 1.2 riastrad * vmw_otables_setup - Set up guest backed memory object tables 314 1.1 riastrad * 315 1.1 riastrad * @dev_priv: Pointer to a device private structure 316 1.1 riastrad * 317 1.2 riastrad * Takes care of the device guest backed surface 318 1.2 riastrad * initialization, by setting up the guest backed memory object tables. 319 1.2 riastrad * Returns 0 on success and various error codes on failure. A successful return 320 1.2 riastrad * means the object tables can be taken down using the vmw_otables_takedown 321 1.2 riastrad * function. 322 1.1 riastrad */ 323 1.2 riastrad int vmw_otables_setup(struct vmw_private *dev_priv) 324 1.2 riastrad { 325 1.2 riastrad struct vmw_otable **otables = &dev_priv->otable_batch.otables; 326 1.2 riastrad int ret; 327 1.2 riastrad 328 1.2 riastrad if (dev_priv->has_dx) { 329 1.3 riastrad *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); 330 1.3 riastrad if (!(*otables)) 331 1.2 riastrad return -ENOMEM; 332 1.2 riastrad 333 1.2 riastrad dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); 334 1.2 riastrad } else { 335 1.3 riastrad *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), 336 1.3 riastrad GFP_KERNEL); 337 1.3 riastrad if (!(*otables)) 338 1.2 riastrad return -ENOMEM; 339 1.2 riastrad 340 1.2 riastrad dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); 341 1.2 riastrad } 342 1.2 riastrad 343 1.2 riastrad ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch); 344 1.2 riastrad if (unlikely(ret != 0)) 345 1.2 riastrad goto out_setup; 346 1.2 riastrad 347 1.2 riastrad return 0; 348 1.2 riastrad 349 1.2 riastrad out_setup: 350 1.2 riastrad kfree(*otables); 351 1.2 riastrad return ret; 352 1.2 riastrad } 353 1.2 riastrad 354 1.2 riastrad static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, 355 1.2 riastrad struct vmw_otable_batch *batch) 356 1.1 riastrad { 357 1.1 riastrad SVGAOTableType i; 358 1.2 riastrad struct ttm_buffer_object *bo = batch->otable_bo; 359 1.1 riastrad int ret; 360 1.1 riastrad 361 1.2 riastrad for (i = 0; i < batch->num_otables; ++i) 362 1.2 riastrad if (batch->otables[i].enabled) 363 1.2 riastrad vmw_takedown_otable_base(dev_priv, i, 364 1.2 riastrad &batch->otables[i]); 365 1.1 riastrad 366 1.3 riastrad ret = ttm_bo_reserve(bo, false, true, NULL); 367 1.1 riastrad BUG_ON(ret != 0); 368 1.1 riastrad 369 1.3 riastrad vmw_bo_fence_single(bo, NULL); 370 1.1 riastrad ttm_bo_unreserve(bo); 371 1.1 riastrad 372 1.3 riastrad ttm_bo_put(batch->otable_bo); 373 1.3 riastrad batch->otable_bo = NULL; 374 1.1 riastrad } 375 1.1 riastrad 376 1.2 riastrad /* 377 1.2 riastrad * vmw_otables_takedown - Take down guest backed memory object tables 378 1.2 riastrad * 379 1.2 riastrad * @dev_priv: Pointer to a device private structure 380 1.2 riastrad * 381 1.2 riastrad * Take down the Guest Memory Object tables. 382 1.2 riastrad */ 383 1.2 riastrad void vmw_otables_takedown(struct vmw_private *dev_priv) 384 1.2 riastrad { 385 1.2 riastrad vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch); 386 1.2 riastrad kfree(dev_priv->otable_batch.otables); 387 1.2 riastrad } 388 1.1 riastrad 389 1.1 riastrad /* 390 1.1 riastrad * vmw_mob_calculate_pt_pages - Calculate the number of page table pages 391 1.1 riastrad * needed for a guest backed memory object. 392 1.1 riastrad * 393 1.1 riastrad * @data_pages: Number of data pages in the memory object buffer. 394 1.1 riastrad */ 395 1.1 riastrad static unsigned long vmw_mob_calculate_pt_pages(unsigned long data_pages) 396 1.1 riastrad { 397 1.1 riastrad unsigned long data_size = data_pages * PAGE_SIZE; 398 1.1 riastrad unsigned long tot_size = 0; 399 1.1 riastrad 400 1.1 riastrad while (likely(data_size > PAGE_SIZE)) { 401 1.1 riastrad data_size = DIV_ROUND_UP(data_size, PAGE_SIZE); 402 1.1 riastrad data_size *= VMW_PPN_SIZE; 403 1.1 riastrad tot_size += (data_size + PAGE_SIZE - 1) & PAGE_MASK; 404 1.1 riastrad } 405 1.1 riastrad 406 1.1 riastrad return tot_size >> PAGE_SHIFT; 407 1.1 riastrad } 408 1.1 riastrad 409 1.1 riastrad /* 410 1.1 riastrad * vmw_mob_create - Create a mob, but don't populate it. 411 1.1 riastrad * 412 1.1 riastrad * @data_pages: Number of data pages of the underlying buffer object. 413 1.1 riastrad */ 414 1.1 riastrad struct vmw_mob *vmw_mob_create(unsigned long data_pages) 415 1.1 riastrad { 416 1.1 riastrad struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); 417 1.1 riastrad 418 1.3 riastrad if (unlikely(!mob)) 419 1.1 riastrad return NULL; 420 1.1 riastrad 421 1.1 riastrad mob->num_pages = vmw_mob_calculate_pt_pages(data_pages); 422 1.1 riastrad 423 1.1 riastrad return mob; 424 1.1 riastrad } 425 1.1 riastrad 426 1.1 riastrad /* 427 1.1 riastrad * vmw_mob_pt_populate - Populate the mob pagetable 428 1.1 riastrad * 429 1.1 riastrad * @mob: Pointer to the mob the pagetable of which we want to 430 1.1 riastrad * populate. 431 1.1 riastrad * 432 1.1 riastrad * This function allocates memory to be used for the pagetable, and 433 1.1 riastrad * adjusts TTM memory accounting accordingly. Returns ENOMEM if 434 1.1 riastrad * memory resources aren't sufficient and may cause TTM buffer objects 435 1.1 riastrad * to be swapped out by using the TTM memory accounting function. 436 1.1 riastrad */ 437 1.1 riastrad static int vmw_mob_pt_populate(struct vmw_private *dev_priv, 438 1.1 riastrad struct vmw_mob *mob) 439 1.1 riastrad { 440 1.1 riastrad int ret; 441 1.3 riastrad struct ttm_operation_ctx ctx = { 442 1.3 riastrad .interruptible = false, 443 1.3 riastrad .no_wait_gpu = false 444 1.3 riastrad }; 445 1.3 riastrad 446 1.1 riastrad BUG_ON(mob->pt_bo != NULL); 447 1.1 riastrad 448 1.1 riastrad ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, 449 1.1 riastrad ttm_bo_type_device, 450 1.1 riastrad &vmw_sys_ne_placement, 451 1.3 riastrad 0, false, &mob->pt_bo); 452 1.1 riastrad if (unlikely(ret != 0)) 453 1.1 riastrad return ret; 454 1.1 riastrad 455 1.3 riastrad ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL); 456 1.1 riastrad 457 1.1 riastrad BUG_ON(ret != 0); 458 1.3 riastrad ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx); 459 1.1 riastrad if (unlikely(ret != 0)) 460 1.1 riastrad goto out_unreserve; 461 1.1 riastrad ret = vmw_bo_map_dma(mob->pt_bo); 462 1.1 riastrad if (unlikely(ret != 0)) 463 1.1 riastrad goto out_unreserve; 464 1.1 riastrad 465 1.1 riastrad ttm_bo_unreserve(mob->pt_bo); 466 1.2 riastrad 467 1.1 riastrad return 0; 468 1.1 riastrad 469 1.1 riastrad out_unreserve: 470 1.1 riastrad ttm_bo_unreserve(mob->pt_bo); 471 1.3 riastrad ttm_bo_put(mob->pt_bo); 472 1.3 riastrad mob->pt_bo = NULL; 473 1.1 riastrad 474 1.1 riastrad return ret; 475 1.1 riastrad } 476 1.1 riastrad 477 1.1 riastrad /** 478 1.1 riastrad * vmw_mob_assign_ppn - Assign a value to a page table entry 479 1.1 riastrad * 480 1.1 riastrad * @addr: Pointer to pointer to page table entry. 481 1.1 riastrad * @val: The page table entry 482 1.1 riastrad * 483 1.1 riastrad * Assigns a value to a page table entry pointed to by *@addr and increments 484 1.1 riastrad * *@addr according to the page table entry size. 485 1.1 riastrad */ 486 1.1 riastrad #if (VMW_PPN_SIZE == 8) 487 1.2 riastrad static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val) 488 1.1 riastrad { 489 1.2 riastrad *((u64 *) *addr) = val >> PAGE_SHIFT; 490 1.1 riastrad *addr += 2; 491 1.1 riastrad } 492 1.1 riastrad #else 493 1.2 riastrad static void vmw_mob_assign_ppn(u32 **addr, dma_addr_t val) 494 1.1 riastrad { 495 1.2 riastrad *(*addr)++ = val >> PAGE_SHIFT; 496 1.1 riastrad } 497 1.1 riastrad #endif 498 1.1 riastrad 499 1.1 riastrad /* 500 1.1 riastrad * vmw_mob_build_pt - Build a pagetable 501 1.1 riastrad * 502 1.1 riastrad * @data_addr: Array of DMA addresses to the underlying buffer 503 1.1 riastrad * object's data pages. 504 1.1 riastrad * @num_data_pages: Number of buffer object data pages. 505 1.1 riastrad * @pt_pages: Array of page pointers to the page table pages. 506 1.1 riastrad * 507 1.1 riastrad * Returns the number of page table pages actually used. 508 1.1 riastrad * Uses atomic kmaps of highmem pages to avoid TLB thrashing. 509 1.1 riastrad */ 510 1.1 riastrad static unsigned long vmw_mob_build_pt(struct vmw_piter *data_iter, 511 1.1 riastrad unsigned long num_data_pages, 512 1.1 riastrad struct vmw_piter *pt_iter) 513 1.1 riastrad { 514 1.1 riastrad unsigned long pt_size = num_data_pages * VMW_PPN_SIZE; 515 1.1 riastrad unsigned long num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); 516 1.1 riastrad unsigned long pt_page; 517 1.2 riastrad u32 *addr, *save_addr; 518 1.1 riastrad unsigned long i; 519 1.1 riastrad struct page *page; 520 1.1 riastrad 521 1.1 riastrad for (pt_page = 0; pt_page < num_pt_pages; ++pt_page) { 522 1.1 riastrad page = vmw_piter_page(pt_iter); 523 1.1 riastrad 524 1.1 riastrad save_addr = addr = kmap_atomic(page); 525 1.1 riastrad 526 1.1 riastrad for (i = 0; i < PAGE_SIZE / VMW_PPN_SIZE; ++i) { 527 1.1 riastrad vmw_mob_assign_ppn(&addr, 528 1.1 riastrad vmw_piter_dma_addr(data_iter)); 529 1.1 riastrad if (unlikely(--num_data_pages == 0)) 530 1.1 riastrad break; 531 1.1 riastrad WARN_ON(!vmw_piter_next(data_iter)); 532 1.1 riastrad } 533 1.1 riastrad kunmap_atomic(save_addr); 534 1.1 riastrad vmw_piter_next(pt_iter); 535 1.1 riastrad } 536 1.1 riastrad 537 1.1 riastrad return num_pt_pages; 538 1.1 riastrad } 539 1.1 riastrad 540 1.1 riastrad /* 541 1.1 riastrad * vmw_mob_build_pt - Set up a multilevel mob pagetable 542 1.1 riastrad * 543 1.1 riastrad * @mob: Pointer to a mob whose page table needs setting up. 544 1.1 riastrad * @data_addr Array of DMA addresses to the buffer object's data 545 1.1 riastrad * pages. 546 1.1 riastrad * @num_data_pages: Number of buffer object data pages. 547 1.1 riastrad * 548 1.1 riastrad * Uses tail recursion to set up a multilevel mob page table. 549 1.1 riastrad */ 550 1.1 riastrad static void vmw_mob_pt_setup(struct vmw_mob *mob, 551 1.1 riastrad struct vmw_piter data_iter, 552 1.1 riastrad unsigned long num_data_pages) 553 1.1 riastrad { 554 1.1 riastrad unsigned long num_pt_pages = 0; 555 1.1 riastrad struct ttm_buffer_object *bo = mob->pt_bo; 556 1.1 riastrad struct vmw_piter save_pt_iter; 557 1.1 riastrad struct vmw_piter pt_iter; 558 1.1 riastrad const struct vmw_sg_table *vsgt; 559 1.1 riastrad int ret; 560 1.1 riastrad 561 1.3 riastrad ret = ttm_bo_reserve(bo, false, true, NULL); 562 1.1 riastrad BUG_ON(ret != 0); 563 1.1 riastrad 564 1.1 riastrad vsgt = vmw_bo_sg_table(bo); 565 1.1 riastrad vmw_piter_start(&pt_iter, vsgt, 0); 566 1.1 riastrad BUG_ON(!vmw_piter_next(&pt_iter)); 567 1.1 riastrad mob->pt_level = 0; 568 1.1 riastrad while (likely(num_data_pages > 1)) { 569 1.1 riastrad ++mob->pt_level; 570 1.1 riastrad BUG_ON(mob->pt_level > 2); 571 1.1 riastrad save_pt_iter = pt_iter; 572 1.1 riastrad num_pt_pages = vmw_mob_build_pt(&data_iter, num_data_pages, 573 1.1 riastrad &pt_iter); 574 1.1 riastrad data_iter = save_pt_iter; 575 1.1 riastrad num_data_pages = num_pt_pages; 576 1.1 riastrad } 577 1.1 riastrad 578 1.1 riastrad mob->pt_root_page = vmw_piter_dma_addr(&save_pt_iter); 579 1.1 riastrad ttm_bo_unreserve(bo); 580 1.1 riastrad } 581 1.1 riastrad 582 1.1 riastrad /* 583 1.1 riastrad * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. 584 1.1 riastrad * 585 1.1 riastrad * @mob: Pointer to a mob to destroy. 586 1.1 riastrad */ 587 1.1 riastrad void vmw_mob_destroy(struct vmw_mob *mob) 588 1.1 riastrad { 589 1.3 riastrad if (mob->pt_bo) { 590 1.3 riastrad ttm_bo_put(mob->pt_bo); 591 1.3 riastrad mob->pt_bo = NULL; 592 1.3 riastrad } 593 1.1 riastrad kfree(mob); 594 1.1 riastrad } 595 1.1 riastrad 596 1.1 riastrad /* 597 1.1 riastrad * vmw_mob_unbind - Hide a mob from the device. 598 1.1 riastrad * 599 1.1 riastrad * @dev_priv: Pointer to a device private. 600 1.1 riastrad * @mob_id: Device id of the mob to unbind. 601 1.1 riastrad */ 602 1.1 riastrad void vmw_mob_unbind(struct vmw_private *dev_priv, 603 1.1 riastrad struct vmw_mob *mob) 604 1.1 riastrad { 605 1.1 riastrad struct { 606 1.1 riastrad SVGA3dCmdHeader header; 607 1.1 riastrad SVGA3dCmdDestroyGBMob body; 608 1.1 riastrad } *cmd; 609 1.1 riastrad int ret; 610 1.1 riastrad struct ttm_buffer_object *bo = mob->pt_bo; 611 1.1 riastrad 612 1.1 riastrad if (bo) { 613 1.3 riastrad ret = ttm_bo_reserve(bo, false, true, NULL); 614 1.1 riastrad /* 615 1.1 riastrad * Noone else should be using this buffer. 616 1.1 riastrad */ 617 1.1 riastrad BUG_ON(ret != 0); 618 1.1 riastrad } 619 1.1 riastrad 620 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 621 1.3 riastrad if (cmd) { 622 1.1 riastrad cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; 623 1.1 riastrad cmd->header.size = sizeof(cmd->body); 624 1.1 riastrad cmd->body.mobid = mob->id; 625 1.1 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 626 1.1 riastrad } 627 1.3 riastrad 628 1.1 riastrad if (bo) { 629 1.3 riastrad vmw_bo_fence_single(bo, NULL); 630 1.1 riastrad ttm_bo_unreserve(bo); 631 1.1 riastrad } 632 1.2 riastrad vmw_fifo_resource_dec(dev_priv); 633 1.1 riastrad } 634 1.1 riastrad 635 1.1 riastrad /* 636 1.1 riastrad * vmw_mob_bind - Make a mob visible to the device after first 637 1.1 riastrad * populating it if necessary. 638 1.1 riastrad * 639 1.1 riastrad * @dev_priv: Pointer to a device private. 640 1.1 riastrad * @mob: Pointer to the mob we're making visible. 641 1.1 riastrad * @data_addr: Array of DMA addresses to the data pages of the underlying 642 1.1 riastrad * buffer object. 643 1.1 riastrad * @num_data_pages: Number of data pages of the underlying buffer 644 1.1 riastrad * object. 645 1.1 riastrad * @mob_id: Device id of the mob to bind 646 1.1 riastrad * 647 1.1 riastrad * This function is intended to be interfaced with the ttm_tt backend 648 1.1 riastrad * code. 649 1.1 riastrad */ 650 1.1 riastrad int vmw_mob_bind(struct vmw_private *dev_priv, 651 1.1 riastrad struct vmw_mob *mob, 652 1.1 riastrad const struct vmw_sg_table *vsgt, 653 1.1 riastrad unsigned long num_data_pages, 654 1.1 riastrad int32_t mob_id) 655 1.1 riastrad { 656 1.1 riastrad int ret; 657 1.1 riastrad bool pt_set_up = false; 658 1.1 riastrad struct vmw_piter data_iter; 659 1.1 riastrad struct { 660 1.1 riastrad SVGA3dCmdHeader header; 661 1.1 riastrad SVGA3dCmdDefineGBMob64 body; 662 1.1 riastrad } *cmd; 663 1.1 riastrad 664 1.1 riastrad mob->id = mob_id; 665 1.1 riastrad vmw_piter_start(&data_iter, vsgt, 0); 666 1.1 riastrad if (unlikely(!vmw_piter_next(&data_iter))) 667 1.1 riastrad return 0; 668 1.1 riastrad 669 1.1 riastrad if (likely(num_data_pages == 1)) { 670 1.1 riastrad mob->pt_level = VMW_MOBFMT_PTDEPTH_0; 671 1.1 riastrad mob->pt_root_page = vmw_piter_dma_addr(&data_iter); 672 1.1 riastrad } else if (vsgt->num_regions == 1) { 673 1.1 riastrad mob->pt_level = SVGA3D_MOBFMT_RANGE; 674 1.1 riastrad mob->pt_root_page = vmw_piter_dma_addr(&data_iter); 675 1.1 riastrad } else if (unlikely(mob->pt_bo == NULL)) { 676 1.1 riastrad ret = vmw_mob_pt_populate(dev_priv, mob); 677 1.1 riastrad if (unlikely(ret != 0)) 678 1.1 riastrad return ret; 679 1.1 riastrad 680 1.1 riastrad vmw_mob_pt_setup(mob, data_iter, num_data_pages); 681 1.1 riastrad pt_set_up = true; 682 1.1 riastrad mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; 683 1.1 riastrad } 684 1.1 riastrad 685 1.2 riastrad vmw_fifo_resource_inc(dev_priv); 686 1.1 riastrad 687 1.3 riastrad cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); 688 1.3 riastrad if (unlikely(cmd == NULL)) 689 1.1 riastrad goto out_no_cmd_space; 690 1.1 riastrad 691 1.1 riastrad cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64; 692 1.1 riastrad cmd->header.size = sizeof(cmd->body); 693 1.1 riastrad cmd->body.mobid = mob_id; 694 1.1 riastrad cmd->body.ptDepth = mob->pt_level; 695 1.2 riastrad cmd->body.base = mob->pt_root_page >> PAGE_SHIFT; 696 1.1 riastrad cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; 697 1.1 riastrad 698 1.1 riastrad vmw_fifo_commit(dev_priv, sizeof(*cmd)); 699 1.1 riastrad 700 1.1 riastrad return 0; 701 1.1 riastrad 702 1.1 riastrad out_no_cmd_space: 703 1.2 riastrad vmw_fifo_resource_dec(dev_priv); 704 1.3 riastrad if (pt_set_up) { 705 1.3 riastrad ttm_bo_put(mob->pt_bo); 706 1.3 riastrad mob->pt_bo = NULL; 707 1.3 riastrad } 708 1.1 riastrad 709 1.1 riastrad return -ENOMEM; 710 1.1 riastrad } 711