Home | History | Annotate | Line # | Download | only in ttm
      1 /*	$NetBSD: ttm_memory.c,v 1.9 2021/12/19 11:07:20 riastradh Exp $	*/
      2 
      3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
      4 /**************************************************************************
      5  *
      6  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the
     11  * "Software"), to deal in the Software without restriction, including
     12  * without limitation the rights to use, copy, modify, merge, publish,
     13  * distribute, sub license, and/or sell copies of the Software, and to
     14  * permit persons to whom the Software is furnished to do so, subject to
     15  * the following conditions:
     16  *
     17  * The above copyright notice and this permission notice (including the
     18  * next paragraph) shall be included in all copies or substantial portions
     19  * of the Software.
     20  *
     21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  *
     29  **************************************************************************/
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: ttm_memory.c,v 1.9 2021/12/19 11:07:20 riastradh Exp $");
     33 
     34 #define pr_fmt(fmt) "[TTM] " fmt
     35 
     36 #include <drm/ttm/ttm_memory.h>
     37 #include <drm/ttm/ttm_module.h>
     38 #include <drm/ttm/ttm_page_alloc.h>
     39 #include <linux/spinlock.h>
     40 #include <linux/sched.h>
     41 #include <linux/wait.h>
     42 #include <linux/mm.h>
     43 #include <linux/module.h>
     44 #include <linux/slab.h>
     45 #include <linux/swap.h>
     46 
     47 #define TTM_MEMORY_ALLOC_RETRIES 4
     48 
     49 struct ttm_mem_global ttm_mem_glob;
     50 EXPORT_SYMBOL(ttm_mem_glob);
     51 
     52 struct ttm_mem_zone {
     53 #ifndef __NetBSD__
     54 	struct kobject kobj;
     55 #endif
     56 	struct ttm_mem_global *glob;
     57 	const char *name;
     58 	uint64_t zone_mem;
     59 	uint64_t emer_mem;
     60 	uint64_t max_mem;
     61 	uint64_t swap_limit;
     62 	uint64_t used_mem;
     63 };
     64 
     65 #ifndef __NetBSD__
     66 static struct attribute ttm_mem_sys = {
     67 	.name = "zone_memory",
     68 	.mode = S_IRUGO
     69 };
     70 static struct attribute ttm_mem_emer = {
     71 	.name = "emergency_memory",
     72 	.mode = S_IRUGO | S_IWUSR
     73 };
     74 static struct attribute ttm_mem_max = {
     75 	.name = "available_memory",
     76 	.mode = S_IRUGO | S_IWUSR
     77 };
     78 static struct attribute ttm_mem_swap = {
     79 	.name = "swap_limit",
     80 	.mode = S_IRUGO | S_IWUSR
     81 };
     82 static struct attribute ttm_mem_used = {
     83 	.name = "used_memory",
     84 	.mode = S_IRUGO
     85 };
     86 
     87 static void ttm_mem_zone_kobj_release(struct kobject *kobj)
     88 {
     89 	struct ttm_mem_zone *zone =
     90 		container_of(kobj, struct ttm_mem_zone, kobj);
     91 
     92 	pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
     93 		zone->name, (unsigned long long)zone->used_mem >> 10);
     94 	kfree(zone);
     95 }
     96 
     97 static ssize_t ttm_mem_zone_show(struct kobject *kobj,
     98 				 struct attribute *attr,
     99 				 char *buffer)
    100 {
    101 	struct ttm_mem_zone *zone =
    102 		container_of(kobj, struct ttm_mem_zone, kobj);
    103 	uint64_t val = 0;
    104 
    105 	spin_lock(&zone->glob->lock);
    106 	if (attr == &ttm_mem_sys)
    107 		val = zone->zone_mem;
    108 	else if (attr == &ttm_mem_emer)
    109 		val = zone->emer_mem;
    110 	else if (attr == &ttm_mem_max)
    111 		val = zone->max_mem;
    112 	else if (attr == &ttm_mem_swap)
    113 		val = zone->swap_limit;
    114 	else if (attr == &ttm_mem_used)
    115 		val = zone->used_mem;
    116 	spin_unlock(&zone->glob->lock);
    117 
    118 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
    119 			(unsigned long long) val >> 10);
    120 }
    121 
    122 static void ttm_check_swapping(struct ttm_mem_global *glob);
    123 
    124 static ssize_t ttm_mem_zone_store(struct kobject *kobj,
    125 				  struct attribute *attr,
    126 				  const char *buffer,
    127 				  size_t size)
    128 {
    129 	struct ttm_mem_zone *zone =
    130 		container_of(kobj, struct ttm_mem_zone, kobj);
    131 	int chars;
    132 	unsigned long val;
    133 	uint64_t val64;
    134 
    135 	chars = sscanf(buffer, "%lu", &val);
    136 	if (chars == 0)
    137 		return size;
    138 
    139 	val64 = val;
    140 	val64 <<= 10;
    141 
    142 	spin_lock(&zone->glob->lock);
    143 	if (val64 > zone->zone_mem)
    144 		val64 = zone->zone_mem;
    145 	if (attr == &ttm_mem_emer) {
    146 		zone->emer_mem = val64;
    147 		if (zone->max_mem > val64)
    148 			zone->max_mem = val64;
    149 	} else if (attr == &ttm_mem_max) {
    150 		zone->max_mem = val64;
    151 		if (zone->emer_mem < val64)
    152 			zone->emer_mem = val64;
    153 	} else if (attr == &ttm_mem_swap)
    154 		zone->swap_limit = val64;
    155 	spin_unlock(&zone->glob->lock);
    156 
    157 	ttm_check_swapping(zone->glob);
    158 
    159 	return size;
    160 }
    161 
    162 static struct attribute *ttm_mem_zone_attrs[] = {
    163 	&ttm_mem_sys,
    164 	&ttm_mem_emer,
    165 	&ttm_mem_max,
    166 	&ttm_mem_swap,
    167 	&ttm_mem_used,
    168 	NULL
    169 };
    170 
    171 static const struct sysfs_ops ttm_mem_zone_ops = {
    172 	.show = &ttm_mem_zone_show,
    173 	.store = &ttm_mem_zone_store
    174 };
    175 
    176 static struct kobj_type ttm_mem_zone_kobj_type = {
    177 	.release = &ttm_mem_zone_kobj_release,
    178 	.sysfs_ops = &ttm_mem_zone_ops,
    179 	.default_attrs = ttm_mem_zone_attrs,
    180 };
    181 
    182 static struct attribute ttm_mem_global_lower_mem_limit = {
    183 	.name = "lower_mem_limit",
    184 	.mode = S_IRUGO | S_IWUSR
    185 };
    186 
    187 static ssize_t ttm_mem_global_show(struct kobject *kobj,
    188 				 struct attribute *attr,
    189 				 char *buffer)
    190 {
    191 	struct ttm_mem_global *glob =
    192 		container_of(kobj, struct ttm_mem_global, kobj);
    193 	uint64_t val = 0;
    194 
    195 	spin_lock(&glob->lock);
    196 	val = glob->lower_mem_limit;
    197 	spin_unlock(&glob->lock);
    198 	/* convert from number of pages to KB */
    199 	val <<= (PAGE_SHIFT - 10);
    200 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
    201 			(unsigned long long) val);
    202 }
    203 
    204 static ssize_t ttm_mem_global_store(struct kobject *kobj,
    205 				  struct attribute *attr,
    206 				  const char *buffer,
    207 				  size_t size)
    208 {
    209 	int chars;
    210 	uint64_t val64;
    211 	unsigned long val;
    212 	struct ttm_mem_global *glob =
    213 		container_of(kobj, struct ttm_mem_global, kobj);
    214 
    215 	chars = sscanf(buffer, "%lu", &val);
    216 	if (chars == 0)
    217 		return size;
    218 
    219 	val64 = val;
    220 	/* convert from KB to number of pages */
    221 	val64 >>= (PAGE_SHIFT - 10);
    222 
    223 	spin_lock(&glob->lock);
    224 	glob->lower_mem_limit = val64;
    225 	spin_unlock(&glob->lock);
    226 
    227 	return size;
    228 }
    229 
    230 static struct attribute *ttm_mem_global_attrs[] = {
    231 	&ttm_mem_global_lower_mem_limit,
    232 	NULL
    233 };
    234 
    235 static const struct sysfs_ops ttm_mem_global_ops = {
    236 	.show = &ttm_mem_global_show,
    237 	.store = &ttm_mem_global_store,
    238 };
    239 
    240 static struct kobj_type ttm_mem_glob_kobj_type = {
    241 	.sysfs_ops = &ttm_mem_global_ops,
    242 	.default_attrs = ttm_mem_global_attrs,
    243 };
    244 #endif
    245 
    246 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
    247 					bool from_wq, uint64_t extra)
    248 {
    249 	unsigned int i;
    250 	struct ttm_mem_zone *zone;
    251 	uint64_t target;
    252 
    253 	for (i = 0; i < glob->num_zones; ++i) {
    254 		zone = glob->zones[i];
    255 
    256 		if (from_wq)
    257 			target = zone->swap_limit;
    258 		else if (capable(CAP_SYS_ADMIN))
    259 			target = zone->emer_mem;
    260 		else
    261 			target = zone->max_mem;
    262 
    263 		target = (extra > target) ? 0ULL : target;
    264 
    265 		if (zone->used_mem > target)
    266 			return true;
    267 	}
    268 	return false;
    269 }
    270 
    271 /**
    272  * At this point we only support a single shrink callback.
    273  * Extend this if needed, perhaps using a linked list of callbacks.
    274  * Note that this function is reentrant:
    275  * many threads may try to swap out at any given time.
    276  */
    277 
    278 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
    279 			uint64_t extra, struct ttm_operation_ctx *ctx)
    280 {
    281 	int ret;
    282 
    283 	spin_lock(&glob->lock);
    284 
    285 	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
    286 		spin_unlock(&glob->lock);
    287 		ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
    288 		spin_lock(&glob->lock);
    289 		if (unlikely(ret != 0))
    290 			break;
    291 	}
    292 
    293 	spin_unlock(&glob->lock);
    294 }
    295 
    296 static void ttm_shrink_work(struct work_struct *work)
    297 {
    298 	struct ttm_operation_ctx ctx = {
    299 		.interruptible = false,
    300 		.no_wait_gpu = false
    301 	};
    302 	struct ttm_mem_global *glob =
    303 	    container_of(work, struct ttm_mem_global, work);
    304 
    305 	ttm_shrink(glob, true, 0ULL, &ctx);
    306 }
    307 
    308 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
    309 				    const struct sysinfo *si)
    310 {
    311 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    312 	uint64_t mem;
    313 #ifndef __NetBSD__
    314 	int ret;
    315 #endif
    316 
    317 	if (unlikely(!zone))
    318 		return -ENOMEM;
    319 
    320 	mem = si->totalram - si->totalhigh;
    321 	mem *= si->mem_unit;
    322 
    323 	zone->name = "kernel";
    324 	zone->zone_mem = mem;
    325 	zone->max_mem = mem >> 1;
    326 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    327 	zone->swap_limit = zone->max_mem - (mem >> 3);
    328 	zone->used_mem = 0;
    329 	zone->glob = glob;
    330 	glob->zone_kernel = zone;
    331 #ifndef __NetBSD__
    332 	ret = kobject_init_and_add(
    333 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
    334 	if (unlikely(ret != 0)) {
    335 		kobject_put(&zone->kobj);
    336 		return ret;
    337 	}
    338 #endif
    339 	glob->zones[glob->num_zones++] = zone;
    340 	return 0;
    341 }
    342 
    343 #ifdef CONFIG_HIGHMEM
    344 static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
    345 				     const struct sysinfo *si)
    346 {
    347 	struct ttm_mem_zone *zone;
    348 	uint64_t mem;
    349 #ifndef __NetBSD__
    350 	int ret;
    351 #endif
    352 
    353 	if (si->totalhigh == 0)
    354 		return 0;
    355 
    356 	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    357 	if (unlikely(!zone))
    358 		return -ENOMEM;
    359 
    360 	mem = si->totalram;
    361 	mem *= si->mem_unit;
    362 
    363 	zone->name = "highmem";
    364 	zone->zone_mem = mem;
    365 	zone->max_mem = mem >> 1;
    366 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    367 	zone->swap_limit = zone->max_mem - (mem >> 3);
    368 	zone->used_mem = 0;
    369 	zone->glob = glob;
    370 	glob->zone_highmem = zone;
    371 #ifndef __NetBSD__
    372 	ret = kobject_init_and_add(
    373 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
    374 		zone->name);
    375 	if (unlikely(ret != 0)) {
    376 		kobject_put(&zone->kobj);
    377 		return ret;
    378 	}
    379 #endif
    380 	glob->zones[glob->num_zones++] = zone;
    381 	return 0;
    382 }
    383 #else
    384 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
    385 				   const struct sysinfo *si)
    386 {
    387 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    388 	uint64_t mem;
    389 #ifndef __NetBSD__
    390 	int ret;
    391 #endif
    392 
    393 	if (unlikely(!zone))
    394 		return -ENOMEM;
    395 
    396 	mem = si->totalram;
    397 	mem *= si->mem_unit;
    398 
    399 	/**
    400 	 * No special dma32 zone needed.
    401 	 */
    402 
    403 	if (mem <= ((uint64_t) 1ULL << 32)) {
    404 		kfree(zone);
    405 		return 0;
    406 	}
    407 
    408 	/*
    409 	 * Limit max dma32 memory to 4GB for now
    410 	 * until we can figure out how big this
    411 	 * zone really is.
    412 	 */
    413 
    414 	mem = ((uint64_t) 1ULL << 32);
    415 	zone->name = "dma32";
    416 	zone->zone_mem = mem;
    417 	zone->max_mem = mem >> 1;
    418 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    419 	zone->swap_limit = zone->max_mem - (mem >> 3);
    420 	zone->used_mem = 0;
    421 	zone->glob = glob;
    422 	glob->zone_dma32 = zone;
    423 #ifndef __NetBSD__
    424 	ret = kobject_init_and_add(
    425 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
    426 	if (unlikely(ret != 0)) {
    427 		kobject_put(&zone->kobj);
    428 		return ret;
    429 	}
    430 #endif
    431 	glob->zones[glob->num_zones++] = zone;
    432 	return 0;
    433 }
    434 #endif
    435 
    436 int ttm_mem_global_init(struct ttm_mem_global *glob)
    437 {
    438 	struct sysinfo si;
    439 	int ret;
    440 	int i;
    441 	struct ttm_mem_zone *zone;
    442 
    443 	spin_lock_init(&glob->lock);
    444 	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
    445 	INIT_WORK(&glob->work, ttm_shrink_work);
    446 #ifndef __NetBSD__
    447 	ret = kobject_init_and_add(
    448 		&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
    449 	if (unlikely(ret != 0)) {
    450 		kobject_put(&glob->kobj);
    451 		return ret;
    452 	}
    453 #endif
    454 
    455 	si_meminfo(&si);
    456 
    457 	/* set it as 0 by default to keep original behavior of OOM */
    458 	glob->lower_mem_limit = 0;
    459 
    460 	ret = ttm_mem_init_kernel_zone(glob, &si);
    461 	if (unlikely(ret != 0))
    462 		goto out_no_zone;
    463 #ifdef CONFIG_HIGHMEM
    464 	ret = ttm_mem_init_highmem_zone(glob, &si);
    465 	if (unlikely(ret != 0))
    466 		goto out_no_zone;
    467 #else
    468 	ret = ttm_mem_init_dma32_zone(glob, &si);
    469 	if (unlikely(ret != 0))
    470 		goto out_no_zone;
    471 #endif
    472 	for (i = 0; i < glob->num_zones; ++i) {
    473 		zone = glob->zones[i];
    474 		pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
    475 			zone->name, (unsigned long long)zone->max_mem >> 10);
    476 	}
    477 	ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
    478 	ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
    479 	return 0;
    480 out_no_zone:
    481 	ttm_mem_global_release(glob);
    482 	return ret;
    483 }
    484 
    485 void ttm_mem_global_release(struct ttm_mem_global *glob)
    486 {
    487 	struct ttm_mem_zone *zone;
    488 	unsigned int i;
    489 
    490 	/* let the page allocator first stop the shrink work. */
    491 	ttm_page_alloc_fini();
    492 	ttm_dma_page_alloc_fini();
    493 
    494 	flush_workqueue(glob->swap_queue);
    495 	destroy_workqueue(glob->swap_queue);
    496 	glob->swap_queue = NULL;
    497 	for (i = 0; i < glob->num_zones; ++i) {
    498 		zone = glob->zones[i];
    499 #ifdef __NetBSD__
    500 		kfree(zone);
    501 #else
    502 		kobject_del(&zone->kobj);
    503 		kobject_put(&zone->kobj);
    504 #endif
    505 	}
    506 #ifndef __NetBSD__
    507 	kobject_del(&glob->kobj);
    508 	kobject_put(&glob->kobj);
    509 #endif
    510 	memset(glob, 0, sizeof(*glob));
    511 }
    512 
    513 static void ttm_check_swapping(struct ttm_mem_global *glob)
    514 {
    515 	bool needs_swapping = false;
    516 	unsigned int i;
    517 	struct ttm_mem_zone *zone;
    518 
    519 	spin_lock(&glob->lock);
    520 	for (i = 0; i < glob->num_zones; ++i) {
    521 		zone = glob->zones[i];
    522 		if (zone->used_mem > zone->swap_limit) {
    523 			needs_swapping = true;
    524 			break;
    525 		}
    526 	}
    527 
    528 	spin_unlock(&glob->lock);
    529 
    530 	if (unlikely(needs_swapping))
    531 		(void)queue_work(glob->swap_queue, &glob->work);
    532 
    533 }
    534 
    535 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
    536 				     struct ttm_mem_zone *single_zone,
    537 				     uint64_t amount)
    538 {
    539 	unsigned int i;
    540 	struct ttm_mem_zone *zone;
    541 
    542 	spin_lock(&glob->lock);
    543 	for (i = 0; i < glob->num_zones; ++i) {
    544 		zone = glob->zones[i];
    545 		if (single_zone && zone != single_zone)
    546 			continue;
    547 		zone->used_mem -= amount;
    548 	}
    549 	spin_unlock(&glob->lock);
    550 }
    551 
    552 void ttm_mem_global_free(struct ttm_mem_global *glob,
    553 			 uint64_t amount)
    554 {
    555 	return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
    556 }
    557 EXPORT_SYMBOL(ttm_mem_global_free);
    558 
    559 /*
    560  * check if the available mem is under lower memory limit
    561  *
    562  * a. if no swap disk at all or free swap space is under swap_mem_limit
    563  * but available system mem is bigger than sys_mem_limit, allow TTM
    564  * allocation;
    565  *
    566  * b. if the available system mem is less than sys_mem_limit but free
    567  * swap disk is bigger than swap_mem_limit, allow TTM allocation.
    568  */
    569 bool
    570 ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
    571 			uint64_t num_pages,
    572 			struct ttm_operation_ctx *ctx)
    573 {
    574 	int64_t available;
    575 
    576 	if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
    577 		return false;
    578 
    579 	available = get_nr_swap_pages() + si_mem_available();
    580 	available -= num_pages;
    581 	if (available < glob->lower_mem_limit)
    582 		return true;
    583 
    584 	return false;
    585 }
    586 EXPORT_SYMBOL(ttm_check_under_lowerlimit);
    587 
    588 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
    589 				  struct ttm_mem_zone *single_zone,
    590 				  uint64_t amount, bool reserve)
    591 {
    592 	uint64_t limit;
    593 	int ret = -ENOMEM;
    594 	unsigned int i;
    595 	struct ttm_mem_zone *zone;
    596 
    597 	spin_lock(&glob->lock);
    598 	for (i = 0; i < glob->num_zones; ++i) {
    599 		zone = glob->zones[i];
    600 		if (single_zone && zone != single_zone)
    601 			continue;
    602 
    603 		limit = (capable(CAP_SYS_ADMIN)) ?
    604 			zone->emer_mem : zone->max_mem;
    605 
    606 		if (zone->used_mem > limit)
    607 			goto out_unlock;
    608 	}
    609 
    610 	if (reserve) {
    611 		for (i = 0; i < glob->num_zones; ++i) {
    612 			zone = glob->zones[i];
    613 			if (single_zone && zone != single_zone)
    614 				continue;
    615 			zone->used_mem += amount;
    616 		}
    617 	}
    618 
    619 	ret = 0;
    620 out_unlock:
    621 	spin_unlock(&glob->lock);
    622 	ttm_check_swapping(glob);
    623 
    624 	return ret;
    625 }
    626 
    627 
    628 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
    629 				     struct ttm_mem_zone *single_zone,
    630 				     uint64_t memory,
    631 				     struct ttm_operation_ctx *ctx)
    632 {
    633 	int count = TTM_MEMORY_ALLOC_RETRIES;
    634 
    635 	while (unlikely(ttm_mem_global_reserve(glob,
    636 					       single_zone,
    637 					       memory, true)
    638 			!= 0)) {
    639 		if (ctx->no_wait_gpu)
    640 			return -ENOMEM;
    641 		if (unlikely(count-- == 0))
    642 			return -ENOMEM;
    643 		ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
    644 	}
    645 
    646 	return 0;
    647 }
    648 
    649 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
    650 			 struct ttm_operation_ctx *ctx)
    651 {
    652 	/**
    653 	 * Normal allocations of kernel memory are registered in
    654 	 * the kernel zone.
    655 	 */
    656 
    657 	return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
    658 }
    659 EXPORT_SYMBOL(ttm_mem_global_alloc);
    660 
    661 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
    662 			      struct page *page, uint64_t size,
    663 			      struct ttm_operation_ctx *ctx)
    664 {
    665 	struct ttm_mem_zone *zone = NULL;
    666 
    667 	/**
    668 	 * Page allocations may be registed in a single zone
    669 	 * only if highmem or !dma32.
    670 	 */
    671 
    672 #ifdef CONFIG_HIGHMEM
    673 	if (PageHighMem(page) && glob->zone_highmem != NULL)
    674 		zone = glob->zone_highmem;
    675 #else
    676 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
    677 		zone = glob->zone_kernel;
    678 #endif
    679 	return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
    680 }
    681 
    682 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
    683 			      uint64_t size)
    684 {
    685 	struct ttm_mem_zone *zone = NULL;
    686 
    687 #ifdef CONFIG_HIGHMEM
    688 	if (PageHighMem(page) && glob->zone_highmem != NULL)
    689 		zone = glob->zone_highmem;
    690 #else
    691 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
    692 		zone = glob->zone_kernel;
    693 #endif
    694 	ttm_mem_global_free_zone(glob, zone, size);
    695 }
    696 
    697 size_t ttm_round_pot(size_t size)
    698 {
    699 	if ((size & (size - 1)) == 0)
    700 		return size;
    701 	else if (size > PAGE_SIZE)
    702 		return PAGE_ALIGN(size);
    703 	else {
    704 		size_t tmp_size = 4;
    705 
    706 		while (tmp_size < size)
    707 			tmp_size <<= 1;
    708 
    709 		return tmp_size;
    710 	}
    711 	return 0;
    712 }
    713 EXPORT_SYMBOL(ttm_round_pot);
    714 
    715 uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
    716 {
    717 	return glob->zone_kernel->max_mem;
    718 }
    719 EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);
    720