1 1.9 riastrad /* $NetBSD: ttm_memory.c,v 1.9 2021/12/19 11:07:20 riastradh Exp $ */ 2 1.3 riastrad 3 1.7 riastrad /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 4 1.1 riastrad /************************************************************************** 5 1.1 riastrad * 6 1.1 riastrad * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 7 1.1 riastrad * All Rights Reserved. 8 1.1 riastrad * 9 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 10 1.1 riastrad * copy of this software and associated documentation files (the 11 1.1 riastrad * "Software"), to deal in the Software without restriction, including 12 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 13 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to 14 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 15 1.1 riastrad * the following conditions: 16 1.1 riastrad * 17 1.1 riastrad * The above copyright notice and this permission notice (including the 18 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 19 1.1 riastrad * of the Software. 20 1.1 riastrad * 21 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 22 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 23 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 24 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 25 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 26 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 27 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 28 1.1 riastrad * 29 1.1 riastrad **************************************************************************/ 30 1.1 riastrad 31 1.3 riastrad #include <sys/cdefs.h> 32 1.9 riastrad __KERNEL_RCSID(0, "$NetBSD: ttm_memory.c,v 1.9 2021/12/19 11:07:20 riastradh Exp $"); 33 1.3 riastrad 34 1.1 riastrad #define pr_fmt(fmt) "[TTM] " fmt 35 1.1 riastrad 36 1.1 riastrad #include <drm/ttm/ttm_memory.h> 37 1.1 riastrad #include <drm/ttm/ttm_module.h> 38 1.1 riastrad #include <drm/ttm/ttm_page_alloc.h> 39 1.1 riastrad #include <linux/spinlock.h> 40 1.1 riastrad #include <linux/sched.h> 41 1.1 riastrad #include <linux/wait.h> 42 1.1 riastrad #include <linux/mm.h> 43 1.1 riastrad #include <linux/module.h> 44 1.1 riastrad #include <linux/slab.h> 45 1.7 riastrad #include <linux/swap.h> 46 1.1 riastrad 47 1.1 riastrad #define TTM_MEMORY_ALLOC_RETRIES 4 48 1.1 riastrad 49 1.7 riastrad struct ttm_mem_global ttm_mem_glob; 50 1.7 riastrad EXPORT_SYMBOL(ttm_mem_glob); 51 1.7 riastrad 52 1.1 riastrad struct ttm_mem_zone { 53 1.2 riastrad #ifndef __NetBSD__ 54 1.1 riastrad struct kobject kobj; 55 1.2 riastrad #endif 56 1.1 riastrad struct ttm_mem_global *glob; 57 1.1 riastrad const char *name; 58 1.1 riastrad uint64_t zone_mem; 59 1.1 riastrad uint64_t emer_mem; 60 1.1 riastrad uint64_t max_mem; 61 1.1 riastrad uint64_t swap_limit; 62 1.1 riastrad uint64_t used_mem; 63 1.1 riastrad }; 64 1.1 riastrad 65 1.2 riastrad #ifndef __NetBSD__ 66 1.1 riastrad static struct attribute ttm_mem_sys = { 67 1.1 riastrad .name = "zone_memory", 68 1.1 riastrad .mode = S_IRUGO 69 1.1 riastrad }; 70 1.1 riastrad static struct attribute ttm_mem_emer = { 71 1.1 riastrad .name = "emergency_memory", 72 1.1 riastrad .mode = S_IRUGO | S_IWUSR 73 1.1 riastrad }; 74 1.1 riastrad static struct attribute ttm_mem_max = { 75 1.1 riastrad .name = "available_memory", 76 1.1 riastrad .mode = S_IRUGO | S_IWUSR 77 1.1 riastrad }; 78 1.1 riastrad static struct attribute ttm_mem_swap = { 79 1.1 riastrad .name = "swap_limit", 80 1.1 riastrad .mode = S_IRUGO | S_IWUSR 81 1.1 riastrad }; 82 1.1 riastrad static struct attribute ttm_mem_used = { 83 1.1 riastrad .name = "used_memory", 84 1.1 riastrad .mode = S_IRUGO 85 1.1 riastrad }; 86 1.1 riastrad 87 1.1 riastrad static void ttm_mem_zone_kobj_release(struct kobject *kobj) 88 1.1 riastrad { 89 1.1 riastrad struct ttm_mem_zone *zone = 90 1.1 riastrad container_of(kobj, struct ttm_mem_zone, kobj); 91 1.1 riastrad 92 1.7 riastrad pr_info("Zone %7s: Used memory at exit: %llu KiB\n", 93 1.1 riastrad zone->name, (unsigned long long)zone->used_mem >> 10); 94 1.1 riastrad kfree(zone); 95 1.1 riastrad } 96 1.1 riastrad 97 1.1 riastrad static ssize_t ttm_mem_zone_show(struct kobject *kobj, 98 1.1 riastrad struct attribute *attr, 99 1.1 riastrad char *buffer) 100 1.1 riastrad { 101 1.1 riastrad struct ttm_mem_zone *zone = 102 1.1 riastrad container_of(kobj, struct ttm_mem_zone, kobj); 103 1.1 riastrad uint64_t val = 0; 104 1.1 riastrad 105 1.1 riastrad spin_lock(&zone->glob->lock); 106 1.1 riastrad if (attr == &ttm_mem_sys) 107 1.1 riastrad val = zone->zone_mem; 108 1.1 riastrad else if (attr == &ttm_mem_emer) 109 1.1 riastrad val = zone->emer_mem; 110 1.1 riastrad else if (attr == &ttm_mem_max) 111 1.1 riastrad val = zone->max_mem; 112 1.1 riastrad else if (attr == &ttm_mem_swap) 113 1.1 riastrad val = zone->swap_limit; 114 1.1 riastrad else if (attr == &ttm_mem_used) 115 1.1 riastrad val = zone->used_mem; 116 1.1 riastrad spin_unlock(&zone->glob->lock); 117 1.1 riastrad 118 1.1 riastrad return snprintf(buffer, PAGE_SIZE, "%llu\n", 119 1.1 riastrad (unsigned long long) val >> 10); 120 1.1 riastrad } 121 1.1 riastrad 122 1.1 riastrad static void ttm_check_swapping(struct ttm_mem_global *glob); 123 1.1 riastrad 124 1.1 riastrad static ssize_t ttm_mem_zone_store(struct kobject *kobj, 125 1.1 riastrad struct attribute *attr, 126 1.1 riastrad const char *buffer, 127 1.1 riastrad size_t size) 128 1.1 riastrad { 129 1.1 riastrad struct ttm_mem_zone *zone = 130 1.1 riastrad container_of(kobj, struct ttm_mem_zone, kobj); 131 1.1 riastrad int chars; 132 1.1 riastrad unsigned long val; 133 1.1 riastrad uint64_t val64; 134 1.1 riastrad 135 1.1 riastrad chars = sscanf(buffer, "%lu", &val); 136 1.1 riastrad if (chars == 0) 137 1.1 riastrad return size; 138 1.1 riastrad 139 1.1 riastrad val64 = val; 140 1.1 riastrad val64 <<= 10; 141 1.1 riastrad 142 1.1 riastrad spin_lock(&zone->glob->lock); 143 1.1 riastrad if (val64 > zone->zone_mem) 144 1.1 riastrad val64 = zone->zone_mem; 145 1.1 riastrad if (attr == &ttm_mem_emer) { 146 1.1 riastrad zone->emer_mem = val64; 147 1.1 riastrad if (zone->max_mem > val64) 148 1.1 riastrad zone->max_mem = val64; 149 1.1 riastrad } else if (attr == &ttm_mem_max) { 150 1.1 riastrad zone->max_mem = val64; 151 1.1 riastrad if (zone->emer_mem < val64) 152 1.1 riastrad zone->emer_mem = val64; 153 1.1 riastrad } else if (attr == &ttm_mem_swap) 154 1.1 riastrad zone->swap_limit = val64; 155 1.1 riastrad spin_unlock(&zone->glob->lock); 156 1.1 riastrad 157 1.1 riastrad ttm_check_swapping(zone->glob); 158 1.1 riastrad 159 1.1 riastrad return size; 160 1.1 riastrad } 161 1.1 riastrad 162 1.1 riastrad static struct attribute *ttm_mem_zone_attrs[] = { 163 1.1 riastrad &ttm_mem_sys, 164 1.1 riastrad &ttm_mem_emer, 165 1.1 riastrad &ttm_mem_max, 166 1.1 riastrad &ttm_mem_swap, 167 1.1 riastrad &ttm_mem_used, 168 1.1 riastrad NULL 169 1.1 riastrad }; 170 1.1 riastrad 171 1.1 riastrad static const struct sysfs_ops ttm_mem_zone_ops = { 172 1.1 riastrad .show = &ttm_mem_zone_show, 173 1.1 riastrad .store = &ttm_mem_zone_store 174 1.1 riastrad }; 175 1.1 riastrad 176 1.1 riastrad static struct kobj_type ttm_mem_zone_kobj_type = { 177 1.1 riastrad .release = &ttm_mem_zone_kobj_release, 178 1.1 riastrad .sysfs_ops = &ttm_mem_zone_ops, 179 1.1 riastrad .default_attrs = ttm_mem_zone_attrs, 180 1.1 riastrad }; 181 1.1 riastrad 182 1.7 riastrad static struct attribute ttm_mem_global_lower_mem_limit = { 183 1.7 riastrad .name = "lower_mem_limit", 184 1.7 riastrad .mode = S_IRUGO | S_IWUSR 185 1.7 riastrad }; 186 1.7 riastrad 187 1.7 riastrad static ssize_t ttm_mem_global_show(struct kobject *kobj, 188 1.7 riastrad struct attribute *attr, 189 1.7 riastrad char *buffer) 190 1.1 riastrad { 191 1.1 riastrad struct ttm_mem_global *glob = 192 1.1 riastrad container_of(kobj, struct ttm_mem_global, kobj); 193 1.7 riastrad uint64_t val = 0; 194 1.7 riastrad 195 1.7 riastrad spin_lock(&glob->lock); 196 1.7 riastrad val = glob->lower_mem_limit; 197 1.7 riastrad spin_unlock(&glob->lock); 198 1.7 riastrad /* convert from number of pages to KB */ 199 1.7 riastrad val <<= (PAGE_SHIFT - 10); 200 1.7 riastrad return snprintf(buffer, PAGE_SIZE, "%llu\n", 201 1.7 riastrad (unsigned long long) val); 202 1.7 riastrad } 203 1.7 riastrad 204 1.7 riastrad static ssize_t ttm_mem_global_store(struct kobject *kobj, 205 1.7 riastrad struct attribute *attr, 206 1.7 riastrad const char *buffer, 207 1.7 riastrad size_t size) 208 1.7 riastrad { 209 1.7 riastrad int chars; 210 1.7 riastrad uint64_t val64; 211 1.7 riastrad unsigned long val; 212 1.7 riastrad struct ttm_mem_global *glob = 213 1.7 riastrad container_of(kobj, struct ttm_mem_global, kobj); 214 1.7 riastrad 215 1.7 riastrad chars = sscanf(buffer, "%lu", &val); 216 1.7 riastrad if (chars == 0) 217 1.7 riastrad return size; 218 1.7 riastrad 219 1.7 riastrad val64 = val; 220 1.7 riastrad /* convert from KB to number of pages */ 221 1.7 riastrad val64 >>= (PAGE_SHIFT - 10); 222 1.7 riastrad 223 1.7 riastrad spin_lock(&glob->lock); 224 1.7 riastrad glob->lower_mem_limit = val64; 225 1.7 riastrad spin_unlock(&glob->lock); 226 1.1 riastrad 227 1.7 riastrad return size; 228 1.1 riastrad } 229 1.1 riastrad 230 1.7 riastrad static struct attribute *ttm_mem_global_attrs[] = { 231 1.7 riastrad &ttm_mem_global_lower_mem_limit, 232 1.7 riastrad NULL 233 1.7 riastrad }; 234 1.7 riastrad 235 1.7 riastrad static const struct sysfs_ops ttm_mem_global_ops = { 236 1.7 riastrad .show = &ttm_mem_global_show, 237 1.7 riastrad .store = &ttm_mem_global_store, 238 1.7 riastrad }; 239 1.7 riastrad 240 1.1 riastrad static struct kobj_type ttm_mem_glob_kobj_type = { 241 1.7 riastrad .sysfs_ops = &ttm_mem_global_ops, 242 1.7 riastrad .default_attrs = ttm_mem_global_attrs, 243 1.1 riastrad }; 244 1.2 riastrad #endif 245 1.1 riastrad 246 1.1 riastrad static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob, 247 1.1 riastrad bool from_wq, uint64_t extra) 248 1.1 riastrad { 249 1.1 riastrad unsigned int i; 250 1.1 riastrad struct ttm_mem_zone *zone; 251 1.1 riastrad uint64_t target; 252 1.1 riastrad 253 1.1 riastrad for (i = 0; i < glob->num_zones; ++i) { 254 1.1 riastrad zone = glob->zones[i]; 255 1.1 riastrad 256 1.1 riastrad if (from_wq) 257 1.1 riastrad target = zone->swap_limit; 258 1.1 riastrad else if (capable(CAP_SYS_ADMIN)) 259 1.1 riastrad target = zone->emer_mem; 260 1.1 riastrad else 261 1.1 riastrad target = zone->max_mem; 262 1.1 riastrad 263 1.1 riastrad target = (extra > target) ? 0ULL : target; 264 1.1 riastrad 265 1.1 riastrad if (zone->used_mem > target) 266 1.1 riastrad return true; 267 1.1 riastrad } 268 1.1 riastrad return false; 269 1.1 riastrad } 270 1.1 riastrad 271 1.1 riastrad /** 272 1.1 riastrad * At this point we only support a single shrink callback. 273 1.1 riastrad * Extend this if needed, perhaps using a linked list of callbacks. 274 1.1 riastrad * Note that this function is reentrant: 275 1.1 riastrad * many threads may try to swap out at any given time. 276 1.1 riastrad */ 277 1.1 riastrad 278 1.1 riastrad static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, 279 1.7 riastrad uint64_t extra, struct ttm_operation_ctx *ctx) 280 1.1 riastrad { 281 1.1 riastrad int ret; 282 1.1 riastrad 283 1.1 riastrad spin_lock(&glob->lock); 284 1.1 riastrad 285 1.1 riastrad while (ttm_zones_above_swap_target(glob, from_wq, extra)) { 286 1.1 riastrad spin_unlock(&glob->lock); 287 1.7 riastrad ret = ttm_bo_swapout(&ttm_bo_glob, ctx); 288 1.1 riastrad spin_lock(&glob->lock); 289 1.1 riastrad if (unlikely(ret != 0)) 290 1.7 riastrad break; 291 1.1 riastrad } 292 1.7 riastrad 293 1.1 riastrad spin_unlock(&glob->lock); 294 1.1 riastrad } 295 1.1 riastrad 296 1.1 riastrad static void ttm_shrink_work(struct work_struct *work) 297 1.1 riastrad { 298 1.7 riastrad struct ttm_operation_ctx ctx = { 299 1.7 riastrad .interruptible = false, 300 1.7 riastrad .no_wait_gpu = false 301 1.7 riastrad }; 302 1.1 riastrad struct ttm_mem_global *glob = 303 1.1 riastrad container_of(work, struct ttm_mem_global, work); 304 1.1 riastrad 305 1.7 riastrad ttm_shrink(glob, true, 0ULL, &ctx); 306 1.1 riastrad } 307 1.1 riastrad 308 1.1 riastrad static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob, 309 1.1 riastrad const struct sysinfo *si) 310 1.1 riastrad { 311 1.1 riastrad struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); 312 1.1 riastrad uint64_t mem; 313 1.2 riastrad #ifndef __NetBSD__ 314 1.1 riastrad int ret; 315 1.2 riastrad #endif 316 1.1 riastrad 317 1.1 riastrad if (unlikely(!zone)) 318 1.1 riastrad return -ENOMEM; 319 1.1 riastrad 320 1.1 riastrad mem = si->totalram - si->totalhigh; 321 1.1 riastrad mem *= si->mem_unit; 322 1.1 riastrad 323 1.1 riastrad zone->name = "kernel"; 324 1.1 riastrad zone->zone_mem = mem; 325 1.1 riastrad zone->max_mem = mem >> 1; 326 1.1 riastrad zone->emer_mem = (mem >> 1) + (mem >> 2); 327 1.1 riastrad zone->swap_limit = zone->max_mem - (mem >> 3); 328 1.1 riastrad zone->used_mem = 0; 329 1.1 riastrad zone->glob = glob; 330 1.1 riastrad glob->zone_kernel = zone; 331 1.2 riastrad #ifndef __NetBSD__ 332 1.1 riastrad ret = kobject_init_and_add( 333 1.1 riastrad &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); 334 1.1 riastrad if (unlikely(ret != 0)) { 335 1.1 riastrad kobject_put(&zone->kobj); 336 1.1 riastrad return ret; 337 1.1 riastrad } 338 1.2 riastrad #endif 339 1.1 riastrad glob->zones[glob->num_zones++] = zone; 340 1.1 riastrad return 0; 341 1.1 riastrad } 342 1.1 riastrad 343 1.1 riastrad #ifdef CONFIG_HIGHMEM 344 1.1 riastrad static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, 345 1.1 riastrad const struct sysinfo *si) 346 1.1 riastrad { 347 1.1 riastrad struct ttm_mem_zone *zone; 348 1.1 riastrad uint64_t mem; 349 1.2 riastrad #ifndef __NetBSD__ 350 1.1 riastrad int ret; 351 1.2 riastrad #endif 352 1.1 riastrad 353 1.1 riastrad if (si->totalhigh == 0) 354 1.1 riastrad return 0; 355 1.1 riastrad 356 1.1 riastrad zone = kzalloc(sizeof(*zone), GFP_KERNEL); 357 1.1 riastrad if (unlikely(!zone)) 358 1.1 riastrad return -ENOMEM; 359 1.1 riastrad 360 1.1 riastrad mem = si->totalram; 361 1.1 riastrad mem *= si->mem_unit; 362 1.1 riastrad 363 1.1 riastrad zone->name = "highmem"; 364 1.1 riastrad zone->zone_mem = mem; 365 1.1 riastrad zone->max_mem = mem >> 1; 366 1.1 riastrad zone->emer_mem = (mem >> 1) + (mem >> 2); 367 1.1 riastrad zone->swap_limit = zone->max_mem - (mem >> 3); 368 1.1 riastrad zone->used_mem = 0; 369 1.1 riastrad zone->glob = glob; 370 1.1 riastrad glob->zone_highmem = zone; 371 1.2 riastrad #ifndef __NetBSD__ 372 1.1 riastrad ret = kobject_init_and_add( 373 1.3 riastrad &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", 374 1.3 riastrad zone->name); 375 1.1 riastrad if (unlikely(ret != 0)) { 376 1.1 riastrad kobject_put(&zone->kobj); 377 1.1 riastrad return ret; 378 1.1 riastrad } 379 1.2 riastrad #endif 380 1.1 riastrad glob->zones[glob->num_zones++] = zone; 381 1.1 riastrad return 0; 382 1.1 riastrad } 383 1.1 riastrad #else 384 1.1 riastrad static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob, 385 1.1 riastrad const struct sysinfo *si) 386 1.1 riastrad { 387 1.1 riastrad struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); 388 1.1 riastrad uint64_t mem; 389 1.2 riastrad #ifndef __NetBSD__ 390 1.1 riastrad int ret; 391 1.2 riastrad #endif 392 1.1 riastrad 393 1.1 riastrad if (unlikely(!zone)) 394 1.1 riastrad return -ENOMEM; 395 1.1 riastrad 396 1.1 riastrad mem = si->totalram; 397 1.1 riastrad mem *= si->mem_unit; 398 1.1 riastrad 399 1.1 riastrad /** 400 1.1 riastrad * No special dma32 zone needed. 401 1.1 riastrad */ 402 1.1 riastrad 403 1.1 riastrad if (mem <= ((uint64_t) 1ULL << 32)) { 404 1.1 riastrad kfree(zone); 405 1.1 riastrad return 0; 406 1.1 riastrad } 407 1.1 riastrad 408 1.1 riastrad /* 409 1.1 riastrad * Limit max dma32 memory to 4GB for now 410 1.1 riastrad * until we can figure out how big this 411 1.1 riastrad * zone really is. 412 1.1 riastrad */ 413 1.1 riastrad 414 1.1 riastrad mem = ((uint64_t) 1ULL << 32); 415 1.1 riastrad zone->name = "dma32"; 416 1.1 riastrad zone->zone_mem = mem; 417 1.1 riastrad zone->max_mem = mem >> 1; 418 1.1 riastrad zone->emer_mem = (mem >> 1) + (mem >> 2); 419 1.1 riastrad zone->swap_limit = zone->max_mem - (mem >> 3); 420 1.1 riastrad zone->used_mem = 0; 421 1.1 riastrad zone->glob = glob; 422 1.1 riastrad glob->zone_dma32 = zone; 423 1.2 riastrad #ifndef __NetBSD__ 424 1.1 riastrad ret = kobject_init_and_add( 425 1.1 riastrad &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name); 426 1.1 riastrad if (unlikely(ret != 0)) { 427 1.1 riastrad kobject_put(&zone->kobj); 428 1.1 riastrad return ret; 429 1.1 riastrad } 430 1.2 riastrad #endif 431 1.1 riastrad glob->zones[glob->num_zones++] = zone; 432 1.1 riastrad return 0; 433 1.1 riastrad } 434 1.1 riastrad #endif 435 1.1 riastrad 436 1.1 riastrad int ttm_mem_global_init(struct ttm_mem_global *glob) 437 1.1 riastrad { 438 1.1 riastrad struct sysinfo si; 439 1.1 riastrad int ret; 440 1.1 riastrad int i; 441 1.1 riastrad struct ttm_mem_zone *zone; 442 1.1 riastrad 443 1.1 riastrad spin_lock_init(&glob->lock); 444 1.1 riastrad glob->swap_queue = create_singlethread_workqueue("ttm_swap"); 445 1.1 riastrad INIT_WORK(&glob->work, ttm_shrink_work); 446 1.2 riastrad #ifndef __NetBSD__ 447 1.1 riastrad ret = kobject_init_and_add( 448 1.1 riastrad &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting"); 449 1.1 riastrad if (unlikely(ret != 0)) { 450 1.1 riastrad kobject_put(&glob->kobj); 451 1.1 riastrad return ret; 452 1.1 riastrad } 453 1.2 riastrad #endif 454 1.1 riastrad 455 1.1 riastrad si_meminfo(&si); 456 1.1 riastrad 457 1.7 riastrad /* set it as 0 by default to keep original behavior of OOM */ 458 1.7 riastrad glob->lower_mem_limit = 0; 459 1.7 riastrad 460 1.1 riastrad ret = ttm_mem_init_kernel_zone(glob, &si); 461 1.1 riastrad if (unlikely(ret != 0)) 462 1.1 riastrad goto out_no_zone; 463 1.1 riastrad #ifdef CONFIG_HIGHMEM 464 1.1 riastrad ret = ttm_mem_init_highmem_zone(glob, &si); 465 1.1 riastrad if (unlikely(ret != 0)) 466 1.1 riastrad goto out_no_zone; 467 1.1 riastrad #else 468 1.1 riastrad ret = ttm_mem_init_dma32_zone(glob, &si); 469 1.1 riastrad if (unlikely(ret != 0)) 470 1.1 riastrad goto out_no_zone; 471 1.1 riastrad #endif 472 1.1 riastrad for (i = 0; i < glob->num_zones; ++i) { 473 1.1 riastrad zone = glob->zones[i]; 474 1.7 riastrad pr_info("Zone %7s: Available graphics memory: %llu KiB\n", 475 1.1 riastrad zone->name, (unsigned long long)zone->max_mem >> 10); 476 1.1 riastrad } 477 1.1 riastrad ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); 478 1.1 riastrad ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE)); 479 1.1 riastrad return 0; 480 1.1 riastrad out_no_zone: 481 1.1 riastrad ttm_mem_global_release(glob); 482 1.1 riastrad return ret; 483 1.1 riastrad } 484 1.1 riastrad 485 1.1 riastrad void ttm_mem_global_release(struct ttm_mem_global *glob) 486 1.1 riastrad { 487 1.7 riastrad struct ttm_mem_zone *zone; 488 1.1 riastrad unsigned int i; 489 1.1 riastrad 490 1.1 riastrad /* let the page allocator first stop the shrink work. */ 491 1.1 riastrad ttm_page_alloc_fini(); 492 1.1 riastrad ttm_dma_page_alloc_fini(); 493 1.1 riastrad 494 1.1 riastrad flush_workqueue(glob->swap_queue); 495 1.1 riastrad destroy_workqueue(glob->swap_queue); 496 1.1 riastrad glob->swap_queue = NULL; 497 1.1 riastrad for (i = 0; i < glob->num_zones; ++i) { 498 1.1 riastrad zone = glob->zones[i]; 499 1.2 riastrad #ifdef __NetBSD__ 500 1.2 riastrad kfree(zone); 501 1.2 riastrad #else 502 1.1 riastrad kobject_del(&zone->kobj); 503 1.1 riastrad kobject_put(&zone->kobj); 504 1.2 riastrad #endif 505 1.7 riastrad } 506 1.9 riastrad #ifndef __NetBSD__ 507 1.1 riastrad kobject_del(&glob->kobj); 508 1.1 riastrad kobject_put(&glob->kobj); 509 1.2 riastrad #endif 510 1.7 riastrad memset(glob, 0, sizeof(*glob)); 511 1.1 riastrad } 512 1.1 riastrad 513 1.1 riastrad static void ttm_check_swapping(struct ttm_mem_global *glob) 514 1.1 riastrad { 515 1.1 riastrad bool needs_swapping = false; 516 1.1 riastrad unsigned int i; 517 1.1 riastrad struct ttm_mem_zone *zone; 518 1.1 riastrad 519 1.1 riastrad spin_lock(&glob->lock); 520 1.1 riastrad for (i = 0; i < glob->num_zones; ++i) { 521 1.1 riastrad zone = glob->zones[i]; 522 1.1 riastrad if (zone->used_mem > zone->swap_limit) { 523 1.1 riastrad needs_swapping = true; 524 1.1 riastrad break; 525 1.1 riastrad } 526 1.1 riastrad } 527 1.1 riastrad 528 1.1 riastrad spin_unlock(&glob->lock); 529 1.1 riastrad 530 1.1 riastrad if (unlikely(needs_swapping)) 531 1.1 riastrad (void)queue_work(glob->swap_queue, &glob->work); 532 1.1 riastrad 533 1.1 riastrad } 534 1.1 riastrad 535 1.1 riastrad static void ttm_mem_global_free_zone(struct ttm_mem_global *glob, 536 1.1 riastrad struct ttm_mem_zone *single_zone, 537 1.1 riastrad uint64_t amount) 538 1.1 riastrad { 539 1.1 riastrad unsigned int i; 540 1.1 riastrad struct ttm_mem_zone *zone; 541 1.1 riastrad 542 1.1 riastrad spin_lock(&glob->lock); 543 1.1 riastrad for (i = 0; i < glob->num_zones; ++i) { 544 1.1 riastrad zone = glob->zones[i]; 545 1.1 riastrad if (single_zone && zone != single_zone) 546 1.1 riastrad continue; 547 1.1 riastrad zone->used_mem -= amount; 548 1.1 riastrad } 549 1.1 riastrad spin_unlock(&glob->lock); 550 1.1 riastrad } 551 1.1 riastrad 552 1.1 riastrad void ttm_mem_global_free(struct ttm_mem_global *glob, 553 1.1 riastrad uint64_t amount) 554 1.1 riastrad { 555 1.7 riastrad return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount); 556 1.1 riastrad } 557 1.1 riastrad EXPORT_SYMBOL(ttm_mem_global_free); 558 1.1 riastrad 559 1.7 riastrad /* 560 1.7 riastrad * check if the available mem is under lower memory limit 561 1.7 riastrad * 562 1.7 riastrad * a. if no swap disk at all or free swap space is under swap_mem_limit 563 1.7 riastrad * but available system mem is bigger than sys_mem_limit, allow TTM 564 1.7 riastrad * allocation; 565 1.7 riastrad * 566 1.7 riastrad * b. if the available system mem is less than sys_mem_limit but free 567 1.7 riastrad * swap disk is bigger than swap_mem_limit, allow TTM allocation. 568 1.7 riastrad */ 569 1.7 riastrad bool 570 1.7 riastrad ttm_check_under_lowerlimit(struct ttm_mem_global *glob, 571 1.7 riastrad uint64_t num_pages, 572 1.7 riastrad struct ttm_operation_ctx *ctx) 573 1.7 riastrad { 574 1.7 riastrad int64_t available; 575 1.7 riastrad 576 1.7 riastrad if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC) 577 1.7 riastrad return false; 578 1.7 riastrad 579 1.7 riastrad available = get_nr_swap_pages() + si_mem_available(); 580 1.7 riastrad available -= num_pages; 581 1.7 riastrad if (available < glob->lower_mem_limit) 582 1.7 riastrad return true; 583 1.7 riastrad 584 1.7 riastrad return false; 585 1.7 riastrad } 586 1.7 riastrad EXPORT_SYMBOL(ttm_check_under_lowerlimit); 587 1.7 riastrad 588 1.1 riastrad static int ttm_mem_global_reserve(struct ttm_mem_global *glob, 589 1.1 riastrad struct ttm_mem_zone *single_zone, 590 1.1 riastrad uint64_t amount, bool reserve) 591 1.1 riastrad { 592 1.1 riastrad uint64_t limit; 593 1.1 riastrad int ret = -ENOMEM; 594 1.1 riastrad unsigned int i; 595 1.1 riastrad struct ttm_mem_zone *zone; 596 1.1 riastrad 597 1.1 riastrad spin_lock(&glob->lock); 598 1.1 riastrad for (i = 0; i < glob->num_zones; ++i) { 599 1.1 riastrad zone = glob->zones[i]; 600 1.1 riastrad if (single_zone && zone != single_zone) 601 1.1 riastrad continue; 602 1.1 riastrad 603 1.1 riastrad limit = (capable(CAP_SYS_ADMIN)) ? 604 1.1 riastrad zone->emer_mem : zone->max_mem; 605 1.1 riastrad 606 1.1 riastrad if (zone->used_mem > limit) 607 1.1 riastrad goto out_unlock; 608 1.1 riastrad } 609 1.1 riastrad 610 1.1 riastrad if (reserve) { 611 1.1 riastrad for (i = 0; i < glob->num_zones; ++i) { 612 1.1 riastrad zone = glob->zones[i]; 613 1.1 riastrad if (single_zone && zone != single_zone) 614 1.1 riastrad continue; 615 1.1 riastrad zone->used_mem += amount; 616 1.1 riastrad } 617 1.1 riastrad } 618 1.1 riastrad 619 1.1 riastrad ret = 0; 620 1.1 riastrad out_unlock: 621 1.1 riastrad spin_unlock(&glob->lock); 622 1.1 riastrad ttm_check_swapping(glob); 623 1.1 riastrad 624 1.1 riastrad return ret; 625 1.1 riastrad } 626 1.1 riastrad 627 1.1 riastrad 628 1.1 riastrad static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob, 629 1.1 riastrad struct ttm_mem_zone *single_zone, 630 1.1 riastrad uint64_t memory, 631 1.7 riastrad struct ttm_operation_ctx *ctx) 632 1.1 riastrad { 633 1.1 riastrad int count = TTM_MEMORY_ALLOC_RETRIES; 634 1.1 riastrad 635 1.1 riastrad while (unlikely(ttm_mem_global_reserve(glob, 636 1.1 riastrad single_zone, 637 1.1 riastrad memory, true) 638 1.1 riastrad != 0)) { 639 1.7 riastrad if (ctx->no_wait_gpu) 640 1.1 riastrad return -ENOMEM; 641 1.1 riastrad if (unlikely(count-- == 0)) 642 1.1 riastrad return -ENOMEM; 643 1.7 riastrad ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx); 644 1.1 riastrad } 645 1.1 riastrad 646 1.1 riastrad return 0; 647 1.1 riastrad } 648 1.1 riastrad 649 1.1 riastrad int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory, 650 1.7 riastrad struct ttm_operation_ctx *ctx) 651 1.1 riastrad { 652 1.1 riastrad /** 653 1.1 riastrad * Normal allocations of kernel memory are registered in 654 1.7 riastrad * the kernel zone. 655 1.1 riastrad */ 656 1.1 riastrad 657 1.7 riastrad return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx); 658 1.1 riastrad } 659 1.1 riastrad EXPORT_SYMBOL(ttm_mem_global_alloc); 660 1.1 riastrad 661 1.1 riastrad int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, 662 1.7 riastrad struct page *page, uint64_t size, 663 1.7 riastrad struct ttm_operation_ctx *ctx) 664 1.1 riastrad { 665 1.1 riastrad struct ttm_mem_zone *zone = NULL; 666 1.1 riastrad 667 1.1 riastrad /** 668 1.1 riastrad * Page allocations may be registed in a single zone 669 1.1 riastrad * only if highmem or !dma32. 670 1.1 riastrad */ 671 1.1 riastrad 672 1.1 riastrad #ifdef CONFIG_HIGHMEM 673 1.1 riastrad if (PageHighMem(page) && glob->zone_highmem != NULL) 674 1.1 riastrad zone = glob->zone_highmem; 675 1.1 riastrad #else 676 1.1 riastrad if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) 677 1.1 riastrad zone = glob->zone_kernel; 678 1.1 riastrad #endif 679 1.7 riastrad return ttm_mem_global_alloc_zone(glob, zone, size, ctx); 680 1.1 riastrad } 681 1.1 riastrad 682 1.7 riastrad void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page, 683 1.7 riastrad uint64_t size) 684 1.1 riastrad { 685 1.1 riastrad struct ttm_mem_zone *zone = NULL; 686 1.1 riastrad 687 1.1 riastrad #ifdef CONFIG_HIGHMEM 688 1.1 riastrad if (PageHighMem(page) && glob->zone_highmem != NULL) 689 1.1 riastrad zone = glob->zone_highmem; 690 1.1 riastrad #else 691 1.1 riastrad if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL) 692 1.1 riastrad zone = glob->zone_kernel; 693 1.1 riastrad #endif 694 1.7 riastrad ttm_mem_global_free_zone(glob, zone, size); 695 1.1 riastrad } 696 1.1 riastrad 697 1.1 riastrad size_t ttm_round_pot(size_t size) 698 1.1 riastrad { 699 1.1 riastrad if ((size & (size - 1)) == 0) 700 1.1 riastrad return size; 701 1.1 riastrad else if (size > PAGE_SIZE) 702 1.1 riastrad return PAGE_ALIGN(size); 703 1.1 riastrad else { 704 1.1 riastrad size_t tmp_size = 4; 705 1.1 riastrad 706 1.1 riastrad while (tmp_size < size) 707 1.1 riastrad tmp_size <<= 1; 708 1.1 riastrad 709 1.1 riastrad return tmp_size; 710 1.1 riastrad } 711 1.1 riastrad return 0; 712 1.1 riastrad } 713 1.1 riastrad EXPORT_SYMBOL(ttm_round_pot); 714 1.7 riastrad 715 1.7 riastrad uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob) 716 1.7 riastrad { 717 1.7 riastrad return glob->zone_kernel->max_mem; 718 1.7 riastrad } 719 1.7 riastrad EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size); 720