1 1.2 riastrad /* $NetBSD: drm_vma_manager.c,v 1.3 2021/12/18 23:44:57 riastradh Exp $ */ 2 1.2 riastrad 3 1.3 riastrad // SPDX-License-Identifier: GPL-2.0 OR MIT 4 1.1 riastrad /* 5 1.1 riastrad * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 6 1.1 riastrad * Copyright (c) 2012 David Airlie <airlied (at) linux.ie> 7 1.1 riastrad * Copyright (c) 2013 David Herrmann <dh.herrmann (at) gmail.com> 8 1.1 riastrad * 9 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 10 1.1 riastrad * copy of this software and associated documentation files (the "Software"), 11 1.1 riastrad * to deal in the Software without restriction, including without limitation 12 1.1 riastrad * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 1.1 riastrad * and/or sell copies of the Software, and to permit persons to whom the 14 1.1 riastrad * Software is furnished to do so, subject to the following conditions: 15 1.1 riastrad * 16 1.1 riastrad * The above copyright notice and this permission notice shall be included in 17 1.1 riastrad * all copies or substantial portions of the Software. 18 1.1 riastrad * 19 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 1.1 riastrad * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 1.1 riastrad * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 1.1 riastrad * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 1.1 riastrad * OTHER DEALINGS IN THE SOFTWARE. 26 1.1 riastrad */ 27 1.1 riastrad 28 1.2 riastrad #include <sys/cdefs.h> 29 1.2 riastrad __KERNEL_RCSID(0, "$NetBSD: drm_vma_manager.c,v 1.3 2021/12/18 23:44:57 riastradh Exp $"); 30 1.2 riastrad 31 1.1 riastrad #include <linux/mm.h> 32 1.1 riastrad #include <linux/module.h> 33 1.1 riastrad #include <linux/rbtree.h> 34 1.1 riastrad #include <linux/slab.h> 35 1.1 riastrad #include <linux/spinlock.h> 36 1.1 riastrad #include <linux/types.h> 37 1.1 riastrad 38 1.3 riastrad #include <drm/drm_mm.h> 39 1.3 riastrad #include <drm/drm_vma_manager.h> 40 1.3 riastrad 41 1.1 riastrad /** 42 1.1 riastrad * DOC: vma offset manager 43 1.1 riastrad * 44 1.1 riastrad * The vma-manager is responsible to map arbitrary driver-dependent memory 45 1.1 riastrad * regions into the linear user address-space. It provides offsets to the 46 1.1 riastrad * caller which can then be used on the address_space of the drm-device. It 47 1.1 riastrad * takes care to not overlap regions, size them appropriately and to not 48 1.1 riastrad * confuse mm-core by inconsistent fake vm_pgoff fields. 49 1.1 riastrad * Drivers shouldn't use this for object placement in VMEM. This manager should 50 1.1 riastrad * only be used to manage mappings into linear user-space VMs. 51 1.1 riastrad * 52 1.1 riastrad * We use drm_mm as backend to manage object allocations. But it is highly 53 1.1 riastrad * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to 54 1.1 riastrad * speed up offset lookups. 55 1.1 riastrad * 56 1.1 riastrad * You must not use multiple offset managers on a single address_space. 57 1.1 riastrad * Otherwise, mm-core will be unable to tear down memory mappings as the VM will 58 1.2 riastrad * no longer be linear. 59 1.1 riastrad * 60 1.1 riastrad * This offset manager works on page-based addresses. That is, every argument 61 1.1 riastrad * and return code (with the exception of drm_vma_node_offset_addr()) is given 62 1.1 riastrad * in number of pages, not number of bytes. That means, object sizes and offsets 63 1.1 riastrad * must always be page-aligned (as usual). 64 1.1 riastrad * If you want to get a valid byte-based user-space address for a given offset, 65 1.1 riastrad * please see drm_vma_node_offset_addr(). 66 1.1 riastrad * 67 1.1 riastrad * Additionally to offset management, the vma offset manager also handles access 68 1.1 riastrad * management. For every open-file context that is allowed to access a given 69 1.1 riastrad * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this 70 1.1 riastrad * open-file with the offset of the node will fail with -EACCES. To revoke 71 1.1 riastrad * access again, use drm_vma_node_revoke(). However, the caller is responsible 72 1.1 riastrad * for destroying already existing mappings, if required. 73 1.1 riastrad */ 74 1.1 riastrad 75 1.1 riastrad /** 76 1.1 riastrad * drm_vma_offset_manager_init - Initialize new offset-manager 77 1.1 riastrad * @mgr: Manager object 78 1.1 riastrad * @page_offset: Offset of available memory area (page-based) 79 1.1 riastrad * @size: Size of available address space range (page-based) 80 1.1 riastrad * 81 1.1 riastrad * Initialize a new offset-manager. The offset and area size available for the 82 1.1 riastrad * manager are given as @page_offset and @size. Both are interpreted as 83 1.1 riastrad * page-numbers, not bytes. 84 1.1 riastrad * 85 1.1 riastrad * Adding/removing nodes from the manager is locked internally and protected 86 1.1 riastrad * against concurrent access. However, node allocation and destruction is left 87 1.1 riastrad * for the caller. While calling into the vma-manager, a given node must 88 1.1 riastrad * always be guaranteed to be referenced. 89 1.1 riastrad */ 90 1.1 riastrad void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, 91 1.1 riastrad unsigned long page_offset, unsigned long size) 92 1.1 riastrad { 93 1.1 riastrad rwlock_init(&mgr->vm_lock); 94 1.1 riastrad drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size); 95 1.1 riastrad } 96 1.1 riastrad EXPORT_SYMBOL(drm_vma_offset_manager_init); 97 1.1 riastrad 98 1.1 riastrad /** 99 1.1 riastrad * drm_vma_offset_manager_destroy() - Destroy offset manager 100 1.1 riastrad * @mgr: Manager object 101 1.1 riastrad * 102 1.1 riastrad * Destroy an object manager which was previously created via 103 1.1 riastrad * drm_vma_offset_manager_init(). The caller must remove all allocated nodes 104 1.1 riastrad * before destroying the manager. Otherwise, drm_mm will refuse to free the 105 1.1 riastrad * requested resources. 106 1.1 riastrad * 107 1.1 riastrad * The manager must not be accessed after this function is called. 108 1.1 riastrad */ 109 1.1 riastrad void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr) 110 1.1 riastrad { 111 1.1 riastrad drm_mm_takedown(&mgr->vm_addr_space_mm); 112 1.1 riastrad } 113 1.1 riastrad EXPORT_SYMBOL(drm_vma_offset_manager_destroy); 114 1.1 riastrad 115 1.1 riastrad /** 116 1.2 riastrad * drm_vma_offset_lookup_locked() - Find node in offset space 117 1.1 riastrad * @mgr: Manager object 118 1.1 riastrad * @start: Start address for object (page-based) 119 1.1 riastrad * @pages: Size of object (page-based) 120 1.1 riastrad * 121 1.1 riastrad * Find a node given a start address and object size. This returns the _best_ 122 1.1 riastrad * match for the given node. That is, @start may point somewhere into a valid 123 1.1 riastrad * region and the given node will be returned, as long as the node spans the 124 1.1 riastrad * whole requested area (given the size in number of pages as @pages). 125 1.1 riastrad * 126 1.2 riastrad * Note that before lookup the vma offset manager lookup lock must be acquired 127 1.2 riastrad * with drm_vma_offset_lock_lookup(). See there for an example. This can then be 128 1.2 riastrad * used to implement weakly referenced lookups using kref_get_unless_zero(). 129 1.2 riastrad * 130 1.2 riastrad * Example: 131 1.3 riastrad * 132 1.3 riastrad * :: 133 1.3 riastrad * 134 1.2 riastrad * drm_vma_offset_lock_lookup(mgr); 135 1.2 riastrad * node = drm_vma_offset_lookup_locked(mgr); 136 1.2 riastrad * if (node) 137 1.2 riastrad * kref_get_unless_zero(container_of(node, sth, entr)); 138 1.2 riastrad * drm_vma_offset_unlock_lookup(mgr); 139 1.2 riastrad * 140 1.1 riastrad * RETURNS: 141 1.1 riastrad * Returns NULL if no suitable node can be found. Otherwise, the best match 142 1.1 riastrad * is returned. It's the caller's responsibility to make sure the node doesn't 143 1.1 riastrad * get destroyed before the caller can access it. 144 1.1 riastrad */ 145 1.1 riastrad struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, 146 1.1 riastrad unsigned long start, 147 1.1 riastrad unsigned long pages) 148 1.1 riastrad { 149 1.3 riastrad struct drm_mm_node *node, *best; 150 1.1 riastrad struct rb_node *iter; 151 1.1 riastrad unsigned long offset; 152 1.1 riastrad 153 1.3 riastrad iter = mgr->vm_addr_space_mm.interval_tree.rb_root.rb_node; 154 1.1 riastrad best = NULL; 155 1.1 riastrad 156 1.1 riastrad while (likely(iter)) { 157 1.3 riastrad node = rb_entry(iter, struct drm_mm_node, rb); 158 1.3 riastrad offset = node->start; 159 1.1 riastrad if (start >= offset) { 160 1.1 riastrad iter = iter->rb_right; 161 1.1 riastrad best = node; 162 1.1 riastrad if (start == offset) 163 1.1 riastrad break; 164 1.1 riastrad } else { 165 1.1 riastrad iter = iter->rb_left; 166 1.1 riastrad } 167 1.1 riastrad } 168 1.1 riastrad 169 1.1 riastrad /* verify that the node spans the requested area */ 170 1.1 riastrad if (best) { 171 1.3 riastrad offset = best->start + best->size; 172 1.1 riastrad if (offset < start + pages) 173 1.1 riastrad best = NULL; 174 1.1 riastrad } 175 1.1 riastrad 176 1.3 riastrad if (!best) 177 1.3 riastrad return NULL; 178 1.3 riastrad 179 1.3 riastrad return container_of(best, struct drm_vma_offset_node, vm_node); 180 1.1 riastrad } 181 1.1 riastrad EXPORT_SYMBOL(drm_vma_offset_lookup_locked); 182 1.1 riastrad 183 1.1 riastrad /** 184 1.1 riastrad * drm_vma_offset_add() - Add offset node to manager 185 1.1 riastrad * @mgr: Manager object 186 1.1 riastrad * @node: Node to be added 187 1.1 riastrad * @pages: Allocation size visible to user-space (in number of pages) 188 1.1 riastrad * 189 1.1 riastrad * Add a node to the offset-manager. If the node was already added, this does 190 1.1 riastrad * nothing and return 0. @pages is the size of the object given in number of 191 1.1 riastrad * pages. 192 1.1 riastrad * After this call succeeds, you can access the offset of the node until it 193 1.1 riastrad * is removed again. 194 1.1 riastrad * 195 1.1 riastrad * If this call fails, it is safe to retry the operation or call 196 1.1 riastrad * drm_vma_offset_remove(), anyway. However, no cleanup is required in that 197 1.1 riastrad * case. 198 1.1 riastrad * 199 1.1 riastrad * @pages is not required to be the same size as the underlying memory object 200 1.1 riastrad * that you want to map. It only limits the size that user-space can map into 201 1.1 riastrad * their address space. 202 1.1 riastrad * 203 1.1 riastrad * RETURNS: 204 1.1 riastrad * 0 on success, negative error code on failure. 205 1.1 riastrad */ 206 1.1 riastrad int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, 207 1.1 riastrad struct drm_vma_offset_node *node, unsigned long pages) 208 1.1 riastrad { 209 1.3 riastrad int ret = 0; 210 1.1 riastrad 211 1.1 riastrad write_lock(&mgr->vm_lock); 212 1.1 riastrad 213 1.3 riastrad if (!drm_mm_node_allocated(&node->vm_node)) 214 1.3 riastrad ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, 215 1.3 riastrad &node->vm_node, pages); 216 1.1 riastrad 217 1.3 riastrad write_unlock(&mgr->vm_lock); 218 1.1 riastrad 219 1.1 riastrad return ret; 220 1.1 riastrad } 221 1.1 riastrad EXPORT_SYMBOL(drm_vma_offset_add); 222 1.1 riastrad 223 1.1 riastrad /** 224 1.1 riastrad * drm_vma_offset_remove() - Remove offset node from manager 225 1.1 riastrad * @mgr: Manager object 226 1.1 riastrad * @node: Node to be removed 227 1.1 riastrad * 228 1.1 riastrad * Remove a node from the offset manager. If the node wasn't added before, this 229 1.1 riastrad * does nothing. After this call returns, the offset and size will be 0 until a 230 1.1 riastrad * new offset is allocated via drm_vma_offset_add() again. Helper functions like 231 1.1 riastrad * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no 232 1.1 riastrad * offset is allocated. 233 1.1 riastrad */ 234 1.1 riastrad void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr, 235 1.1 riastrad struct drm_vma_offset_node *node) 236 1.1 riastrad { 237 1.1 riastrad write_lock(&mgr->vm_lock); 238 1.1 riastrad 239 1.1 riastrad if (drm_mm_node_allocated(&node->vm_node)) { 240 1.1 riastrad drm_mm_remove_node(&node->vm_node); 241 1.1 riastrad memset(&node->vm_node, 0, sizeof(node->vm_node)); 242 1.1 riastrad } 243 1.1 riastrad 244 1.1 riastrad write_unlock(&mgr->vm_lock); 245 1.1 riastrad } 246 1.1 riastrad EXPORT_SYMBOL(drm_vma_offset_remove); 247 1.1 riastrad 248 1.1 riastrad /** 249 1.1 riastrad * drm_vma_node_allow - Add open-file to list of allowed users 250 1.1 riastrad * @node: Node to modify 251 1.3 riastrad * @tag: Tag of file to remove 252 1.1 riastrad * 253 1.3 riastrad * Add @tag to the list of allowed open-files for this node. If @tag is 254 1.1 riastrad * already on this list, the ref-count is incremented. 255 1.1 riastrad * 256 1.1 riastrad * The list of allowed-users is preserved across drm_vma_offset_add() and 257 1.1 riastrad * drm_vma_offset_remove() calls. You may even call it if the node is currently 258 1.1 riastrad * not added to any offset-manager. 259 1.1 riastrad * 260 1.1 riastrad * You must remove all open-files the same number of times as you added them 261 1.1 riastrad * before destroying the node. Otherwise, you will leak memory. 262 1.1 riastrad * 263 1.1 riastrad * This is locked against concurrent access internally. 264 1.1 riastrad * 265 1.1 riastrad * RETURNS: 266 1.1 riastrad * 0 on success, negative error code on internal failure (out-of-mem) 267 1.1 riastrad */ 268 1.3 riastrad int drm_vma_node_allow(struct drm_vma_offset_node *node, struct drm_file *tag) 269 1.1 riastrad { 270 1.1 riastrad struct rb_node **iter; 271 1.1 riastrad struct rb_node *parent = NULL; 272 1.1 riastrad struct drm_vma_offset_file *new, *entry; 273 1.1 riastrad int ret = 0; 274 1.1 riastrad 275 1.1 riastrad /* Preallocate entry to avoid atomic allocations below. It is quite 276 1.1 riastrad * unlikely that an open-file is added twice to a single node so we 277 1.1 riastrad * don't optimize for this case. OOM is checked below only if the entry 278 1.1 riastrad * is actually used. */ 279 1.1 riastrad new = kmalloc(sizeof(*entry), GFP_KERNEL); 280 1.1 riastrad 281 1.1 riastrad write_lock(&node->vm_lock); 282 1.1 riastrad 283 1.1 riastrad iter = &node->vm_files.rb_node; 284 1.1 riastrad 285 1.1 riastrad while (likely(*iter)) { 286 1.1 riastrad parent = *iter; 287 1.1 riastrad entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); 288 1.1 riastrad 289 1.3 riastrad if (tag == entry->vm_tag) { 290 1.1 riastrad entry->vm_count++; 291 1.1 riastrad goto unlock; 292 1.3 riastrad } else if (tag > entry->vm_tag) { 293 1.1 riastrad iter = &(*iter)->rb_right; 294 1.1 riastrad } else { 295 1.1 riastrad iter = &(*iter)->rb_left; 296 1.1 riastrad } 297 1.1 riastrad } 298 1.1 riastrad 299 1.1 riastrad if (!new) { 300 1.1 riastrad ret = -ENOMEM; 301 1.1 riastrad goto unlock; 302 1.1 riastrad } 303 1.1 riastrad 304 1.3 riastrad new->vm_tag = tag; 305 1.1 riastrad new->vm_count = 1; 306 1.1 riastrad rb_link_node(&new->vm_rb, parent, iter); 307 1.1 riastrad rb_insert_color(&new->vm_rb, &node->vm_files); 308 1.1 riastrad new = NULL; 309 1.1 riastrad 310 1.1 riastrad unlock: 311 1.1 riastrad write_unlock(&node->vm_lock); 312 1.1 riastrad kfree(new); 313 1.1 riastrad return ret; 314 1.1 riastrad } 315 1.1 riastrad EXPORT_SYMBOL(drm_vma_node_allow); 316 1.1 riastrad 317 1.1 riastrad /** 318 1.1 riastrad * drm_vma_node_revoke - Remove open-file from list of allowed users 319 1.1 riastrad * @node: Node to modify 320 1.3 riastrad * @tag: Tag of file to remove 321 1.1 riastrad * 322 1.3 riastrad * Decrement the ref-count of @tag in the list of allowed open-files on @node. 323 1.3 riastrad * If the ref-count drops to zero, remove @tag from the list. You must call 324 1.3 riastrad * this once for every drm_vma_node_allow() on @tag. 325 1.1 riastrad * 326 1.1 riastrad * This is locked against concurrent access internally. 327 1.1 riastrad * 328 1.3 riastrad * If @tag is not on the list, nothing is done. 329 1.1 riastrad */ 330 1.3 riastrad void drm_vma_node_revoke(struct drm_vma_offset_node *node, 331 1.3 riastrad struct drm_file *tag) 332 1.1 riastrad { 333 1.1 riastrad struct drm_vma_offset_file *entry; 334 1.1 riastrad struct rb_node *iter; 335 1.1 riastrad 336 1.1 riastrad write_lock(&node->vm_lock); 337 1.1 riastrad 338 1.1 riastrad iter = node->vm_files.rb_node; 339 1.1 riastrad while (likely(iter)) { 340 1.1 riastrad entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); 341 1.3 riastrad if (tag == entry->vm_tag) { 342 1.1 riastrad if (!--entry->vm_count) { 343 1.1 riastrad rb_erase(&entry->vm_rb, &node->vm_files); 344 1.1 riastrad kfree(entry); 345 1.1 riastrad } 346 1.1 riastrad break; 347 1.3 riastrad } else if (tag > entry->vm_tag) { 348 1.1 riastrad iter = iter->rb_right; 349 1.1 riastrad } else { 350 1.1 riastrad iter = iter->rb_left; 351 1.1 riastrad } 352 1.1 riastrad } 353 1.1 riastrad 354 1.1 riastrad write_unlock(&node->vm_lock); 355 1.1 riastrad } 356 1.1 riastrad EXPORT_SYMBOL(drm_vma_node_revoke); 357 1.1 riastrad 358 1.1 riastrad /** 359 1.1 riastrad * drm_vma_node_is_allowed - Check whether an open-file is granted access 360 1.1 riastrad * @node: Node to check 361 1.3 riastrad * @tag: Tag of file to remove 362 1.1 riastrad * 363 1.3 riastrad * Search the list in @node whether @tag is currently on the list of allowed 364 1.1 riastrad * open-files (see drm_vma_node_allow()). 365 1.1 riastrad * 366 1.1 riastrad * This is locked against concurrent access internally. 367 1.1 riastrad * 368 1.1 riastrad * RETURNS: 369 1.1 riastrad * true iff @filp is on the list 370 1.1 riastrad */ 371 1.1 riastrad bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, 372 1.3 riastrad struct drm_file *tag) 373 1.1 riastrad { 374 1.1 riastrad struct drm_vma_offset_file *entry; 375 1.1 riastrad struct rb_node *iter; 376 1.1 riastrad 377 1.1 riastrad read_lock(&node->vm_lock); 378 1.1 riastrad 379 1.1 riastrad iter = node->vm_files.rb_node; 380 1.1 riastrad while (likely(iter)) { 381 1.1 riastrad entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); 382 1.3 riastrad if (tag == entry->vm_tag) 383 1.1 riastrad break; 384 1.3 riastrad else if (tag > entry->vm_tag) 385 1.1 riastrad iter = iter->rb_right; 386 1.1 riastrad else 387 1.1 riastrad iter = iter->rb_left; 388 1.1 riastrad } 389 1.1 riastrad 390 1.1 riastrad read_unlock(&node->vm_lock); 391 1.1 riastrad 392 1.1 riastrad return iter; 393 1.1 riastrad } 394 1.1 riastrad EXPORT_SYMBOL(drm_vma_node_is_allowed); 395