Home | History | Annotate | Line # | Download | only in ttm
ttm_memory.c revision 1.7
      1 /*	$NetBSD: ttm_memory.c,v 1.7 2021/12/18 23:45:44 riastradh Exp $	*/
      2 
      3 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
      4 /**************************************************************************
      5  *
      6  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
      7  * All Rights Reserved.
      8  *
      9  * Permission is hereby granted, free of charge, to any person obtaining a
     10  * copy of this software and associated documentation files (the
     11  * "Software"), to deal in the Software without restriction, including
     12  * without limitation the rights to use, copy, modify, merge, publish,
     13  * distribute, sub license, and/or sell copies of the Software, and to
     14  * permit persons to whom the Software is furnished to do so, subject to
     15  * the following conditions:
     16  *
     17  * The above copyright notice and this permission notice (including the
     18  * next paragraph) shall be included in all copies or substantial portions
     19  * of the Software.
     20  *
     21  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     22  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     23  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     24  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     25  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     26  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     27  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     28  *
     29  **************************************************************************/
     30 
     31 #include <sys/cdefs.h>
     32 __KERNEL_RCSID(0, "$NetBSD: ttm_memory.c,v 1.7 2021/12/18 23:45:44 riastradh Exp $");
     33 
     34 #define pr_fmt(fmt) "[TTM] " fmt
     35 
     36 #include <drm/drmP.h>
     37 #include <drm/ttm/ttm_memory.h>
     38 #include <drm/ttm/ttm_module.h>
     39 #include <drm/ttm/ttm_page_alloc.h>
     40 #include <linux/spinlock.h>
     41 #include <linux/sched.h>
     42 #include <linux/wait.h>
     43 #include <linux/mm.h>
     44 #include <linux/module.h>
     45 #include <linux/slab.h>
     46 #include <linux/swap.h>
     47 
     48 #define TTM_MEMORY_ALLOC_RETRIES 4
     49 
     50 struct ttm_mem_global ttm_mem_glob;
     51 EXPORT_SYMBOL(ttm_mem_glob);
     52 
     53 struct ttm_mem_zone {
     54 #ifndef __NetBSD__
     55 	struct kobject kobj;
     56 #endif
     57 	struct ttm_mem_global *glob;
     58 	const char *name;
     59 	uint64_t zone_mem;
     60 	uint64_t emer_mem;
     61 	uint64_t max_mem;
     62 	uint64_t swap_limit;
     63 	uint64_t used_mem;
     64 };
     65 
     66 #ifndef __NetBSD__
     67 static struct attribute ttm_mem_sys = {
     68 	.name = "zone_memory",
     69 	.mode = S_IRUGO
     70 };
     71 static struct attribute ttm_mem_emer = {
     72 	.name = "emergency_memory",
     73 	.mode = S_IRUGO | S_IWUSR
     74 };
     75 static struct attribute ttm_mem_max = {
     76 	.name = "available_memory",
     77 	.mode = S_IRUGO | S_IWUSR
     78 };
     79 static struct attribute ttm_mem_swap = {
     80 	.name = "swap_limit",
     81 	.mode = S_IRUGO | S_IWUSR
     82 };
     83 static struct attribute ttm_mem_used = {
     84 	.name = "used_memory",
     85 	.mode = S_IRUGO
     86 };
     87 
     88 static void ttm_mem_zone_kobj_release(struct kobject *kobj)
     89 {
     90 	struct ttm_mem_zone *zone =
     91 		container_of(kobj, struct ttm_mem_zone, kobj);
     92 
     93 	pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
     94 		zone->name, (unsigned long long)zone->used_mem >> 10);
     95 	kfree(zone);
     96 }
     97 
     98 static ssize_t ttm_mem_zone_show(struct kobject *kobj,
     99 				 struct attribute *attr,
    100 				 char *buffer)
    101 {
    102 	struct ttm_mem_zone *zone =
    103 		container_of(kobj, struct ttm_mem_zone, kobj);
    104 	uint64_t val = 0;
    105 
    106 	spin_lock(&zone->glob->lock);
    107 	if (attr == &ttm_mem_sys)
    108 		val = zone->zone_mem;
    109 	else if (attr == &ttm_mem_emer)
    110 		val = zone->emer_mem;
    111 	else if (attr == &ttm_mem_max)
    112 		val = zone->max_mem;
    113 	else if (attr == &ttm_mem_swap)
    114 		val = zone->swap_limit;
    115 	else if (attr == &ttm_mem_used)
    116 		val = zone->used_mem;
    117 	spin_unlock(&zone->glob->lock);
    118 
    119 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
    120 			(unsigned long long) val >> 10);
    121 }
    122 
    123 static void ttm_check_swapping(struct ttm_mem_global *glob);
    124 
    125 static ssize_t ttm_mem_zone_store(struct kobject *kobj,
    126 				  struct attribute *attr,
    127 				  const char *buffer,
    128 				  size_t size)
    129 {
    130 	struct ttm_mem_zone *zone =
    131 		container_of(kobj, struct ttm_mem_zone, kobj);
    132 	int chars;
    133 	unsigned long val;
    134 	uint64_t val64;
    135 
    136 	chars = sscanf(buffer, "%lu", &val);
    137 	if (chars == 0)
    138 		return size;
    139 
    140 	val64 = val;
    141 	val64 <<= 10;
    142 
    143 	spin_lock(&zone->glob->lock);
    144 	if (val64 > zone->zone_mem)
    145 		val64 = zone->zone_mem;
    146 	if (attr == &ttm_mem_emer) {
    147 		zone->emer_mem = val64;
    148 		if (zone->max_mem > val64)
    149 			zone->max_mem = val64;
    150 	} else if (attr == &ttm_mem_max) {
    151 		zone->max_mem = val64;
    152 		if (zone->emer_mem < val64)
    153 			zone->emer_mem = val64;
    154 	} else if (attr == &ttm_mem_swap)
    155 		zone->swap_limit = val64;
    156 	spin_unlock(&zone->glob->lock);
    157 
    158 	ttm_check_swapping(zone->glob);
    159 
    160 	return size;
    161 }
    162 
    163 static struct attribute *ttm_mem_zone_attrs[] = {
    164 	&ttm_mem_sys,
    165 	&ttm_mem_emer,
    166 	&ttm_mem_max,
    167 	&ttm_mem_swap,
    168 	&ttm_mem_used,
    169 	NULL
    170 };
    171 
    172 static const struct sysfs_ops ttm_mem_zone_ops = {
    173 	.show = &ttm_mem_zone_show,
    174 	.store = &ttm_mem_zone_store
    175 };
    176 
    177 static struct kobj_type ttm_mem_zone_kobj_type = {
    178 	.release = &ttm_mem_zone_kobj_release,
    179 	.sysfs_ops = &ttm_mem_zone_ops,
    180 	.default_attrs = ttm_mem_zone_attrs,
    181 };
    182 
    183 static struct attribute ttm_mem_global_lower_mem_limit = {
    184 	.name = "lower_mem_limit",
    185 	.mode = S_IRUGO | S_IWUSR
    186 };
    187 
    188 static ssize_t ttm_mem_global_show(struct kobject *kobj,
    189 				 struct attribute *attr,
    190 				 char *buffer)
    191 {
    192 	struct ttm_mem_global *glob =
    193 		container_of(kobj, struct ttm_mem_global, kobj);
    194 	uint64_t val = 0;
    195 
    196 	spin_lock(&glob->lock);
    197 	val = glob->lower_mem_limit;
    198 	spin_unlock(&glob->lock);
    199 	/* convert from number of pages to KB */
    200 	val <<= (PAGE_SHIFT - 10);
    201 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
    202 			(unsigned long long) val);
    203 }
    204 
    205 static ssize_t ttm_mem_global_store(struct kobject *kobj,
    206 				  struct attribute *attr,
    207 				  const char *buffer,
    208 				  size_t size)
    209 {
    210 	int chars;
    211 	uint64_t val64;
    212 	unsigned long val;
    213 	struct ttm_mem_global *glob =
    214 		container_of(kobj, struct ttm_mem_global, kobj);
    215 
    216 	chars = sscanf(buffer, "%lu", &val);
    217 	if (chars == 0)
    218 		return size;
    219 
    220 	val64 = val;
    221 	/* convert from KB to number of pages */
    222 	val64 >>= (PAGE_SHIFT - 10);
    223 
    224 	spin_lock(&glob->lock);
    225 	glob->lower_mem_limit = val64;
    226 	spin_unlock(&glob->lock);
    227 
    228 	return size;
    229 }
    230 
    231 static struct attribute *ttm_mem_global_attrs[] = {
    232 	&ttm_mem_global_lower_mem_limit,
    233 	NULL
    234 };
    235 
    236 static const struct sysfs_ops ttm_mem_global_ops = {
    237 	.show = &ttm_mem_global_show,
    238 	.store = &ttm_mem_global_store,
    239 };
    240 
    241 static struct kobj_type ttm_mem_glob_kobj_type = {
    242 	.sysfs_ops = &ttm_mem_global_ops,
    243 	.default_attrs = ttm_mem_global_attrs,
    244 };
    245 #endif
    246 
    247 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
    248 					bool from_wq, uint64_t extra)
    249 {
    250 	unsigned int i;
    251 	struct ttm_mem_zone *zone;
    252 	uint64_t target;
    253 
    254 	for (i = 0; i < glob->num_zones; ++i) {
    255 		zone = glob->zones[i];
    256 
    257 		if (from_wq)
    258 			target = zone->swap_limit;
    259 		else if (capable(CAP_SYS_ADMIN))
    260 			target = zone->emer_mem;
    261 		else
    262 			target = zone->max_mem;
    263 
    264 		target = (extra > target) ? 0ULL : target;
    265 
    266 		if (zone->used_mem > target)
    267 			return true;
    268 	}
    269 	return false;
    270 }
    271 
    272 /**
    273  * At this point we only support a single shrink callback.
    274  * Extend this if needed, perhaps using a linked list of callbacks.
    275  * Note that this function is reentrant:
    276  * many threads may try to swap out at any given time.
    277  */
    278 
    279 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
    280 			uint64_t extra, struct ttm_operation_ctx *ctx)
    281 {
    282 	int ret;
    283 
    284 	spin_lock(&glob->lock);
    285 
    286 	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
    287 		spin_unlock(&glob->lock);
    288 		ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
    289 		spin_lock(&glob->lock);
    290 		if (unlikely(ret != 0))
    291 			break;
    292 	}
    293 
    294 	spin_unlock(&glob->lock);
    295 }
    296 
    297 static void ttm_shrink_work(struct work_struct *work)
    298 {
    299 	struct ttm_operation_ctx ctx = {
    300 		.interruptible = false,
    301 		.no_wait_gpu = false
    302 	};
    303 	struct ttm_mem_global *glob =
    304 	    container_of(work, struct ttm_mem_global, work);
    305 
    306 	ttm_shrink(glob, true, 0ULL, &ctx);
    307 }
    308 
    309 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
    310 				    const struct sysinfo *si)
    311 {
    312 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    313 	uint64_t mem;
    314 #ifndef __NetBSD__
    315 	int ret;
    316 #endif
    317 
    318 	if (unlikely(!zone))
    319 		return -ENOMEM;
    320 
    321 	mem = si->totalram - si->totalhigh;
    322 	mem *= si->mem_unit;
    323 
    324 	zone->name = "kernel";
    325 	zone->zone_mem = mem;
    326 	zone->max_mem = mem >> 1;
    327 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    328 	zone->swap_limit = zone->max_mem - (mem >> 3);
    329 	zone->used_mem = 0;
    330 	zone->glob = glob;
    331 	glob->zone_kernel = zone;
    332 #ifndef __NetBSD__
    333 	ret = kobject_init_and_add(
    334 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
    335 	if (unlikely(ret != 0)) {
    336 		kobject_put(&zone->kobj);
    337 		return ret;
    338 	}
    339 #endif
    340 	glob->zones[glob->num_zones++] = zone;
    341 	return 0;
    342 }
    343 
    344 #ifdef CONFIG_HIGHMEM
    345 static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
    346 				     const struct sysinfo *si)
    347 {
    348 	struct ttm_mem_zone *zone;
    349 	uint64_t mem;
    350 #ifndef __NetBSD__
    351 	int ret;
    352 #endif
    353 
    354 	if (si->totalhigh == 0)
    355 		return 0;
    356 
    357 	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    358 	if (unlikely(!zone))
    359 		return -ENOMEM;
    360 
    361 	mem = si->totalram;
    362 	mem *= si->mem_unit;
    363 
    364 	zone->name = "highmem";
    365 	zone->zone_mem = mem;
    366 	zone->max_mem = mem >> 1;
    367 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    368 	zone->swap_limit = zone->max_mem - (mem >> 3);
    369 	zone->used_mem = 0;
    370 	zone->glob = glob;
    371 	glob->zone_highmem = zone;
    372 #ifndef __NetBSD__
    373 	ret = kobject_init_and_add(
    374 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
    375 		zone->name);
    376 	if (unlikely(ret != 0)) {
    377 		kobject_put(&zone->kobj);
    378 		return ret;
    379 	}
    380 #endif
    381 	glob->zones[glob->num_zones++] = zone;
    382 	return 0;
    383 }
    384 #else
    385 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
    386 				   const struct sysinfo *si)
    387 {
    388 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    389 	uint64_t mem;
    390 #ifndef __NetBSD__
    391 	int ret;
    392 #endif
    393 
    394 	if (unlikely(!zone))
    395 		return -ENOMEM;
    396 
    397 	mem = si->totalram;
    398 	mem *= si->mem_unit;
    399 
    400 	/**
    401 	 * No special dma32 zone needed.
    402 	 */
    403 
    404 	if (mem <= ((uint64_t) 1ULL << 32)) {
    405 		kfree(zone);
    406 		return 0;
    407 	}
    408 
    409 	/*
    410 	 * Limit max dma32 memory to 4GB for now
    411 	 * until we can figure out how big this
    412 	 * zone really is.
    413 	 */
    414 
    415 	mem = ((uint64_t) 1ULL << 32);
    416 	zone->name = "dma32";
    417 	zone->zone_mem = mem;
    418 	zone->max_mem = mem >> 1;
    419 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    420 	zone->swap_limit = zone->max_mem - (mem >> 3);
    421 	zone->used_mem = 0;
    422 	zone->glob = glob;
    423 	glob->zone_dma32 = zone;
    424 #ifndef __NetBSD__
    425 	ret = kobject_init_and_add(
    426 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
    427 	if (unlikely(ret != 0)) {
    428 		kobject_put(&zone->kobj);
    429 		return ret;
    430 	}
    431 #endif
    432 	glob->zones[glob->num_zones++] = zone;
    433 	return 0;
    434 }
    435 #endif
    436 
    437 int ttm_mem_global_init(struct ttm_mem_global *glob)
    438 {
    439 	struct sysinfo si;
    440 	int ret;
    441 	int i;
    442 	struct ttm_mem_zone *zone;
    443 
    444 	spin_lock_init(&glob->lock);
    445 	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
    446 	INIT_WORK(&glob->work, ttm_shrink_work);
    447 #ifndef __NetBSD__
    448 	ret = kobject_init_and_add(
    449 		&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
    450 	if (unlikely(ret != 0)) {
    451 		kobject_put(&glob->kobj);
    452 		return ret;
    453 	}
    454 #endif
    455 
    456 	si_meminfo(&si);
    457 
    458 	/* set it as 0 by default to keep original behavior of OOM */
    459 	glob->lower_mem_limit = 0;
    460 
    461 	ret = ttm_mem_init_kernel_zone(glob, &si);
    462 	if (unlikely(ret != 0))
    463 		goto out_no_zone;
    464 #ifdef CONFIG_HIGHMEM
    465 	ret = ttm_mem_init_highmem_zone(glob, &si);
    466 	if (unlikely(ret != 0))
    467 		goto out_no_zone;
    468 #else
    469 	ret = ttm_mem_init_dma32_zone(glob, &si);
    470 	if (unlikely(ret != 0))
    471 		goto out_no_zone;
    472 #endif
    473 	for (i = 0; i < glob->num_zones; ++i) {
    474 		zone = glob->zones[i];
    475 		pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
    476 			zone->name, (unsigned long long)zone->max_mem >> 10);
    477 	}
    478 	ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
    479 	ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
    480 	return 0;
    481 out_no_zone:
    482 	ttm_mem_global_release(glob);
    483 	return ret;
    484 }
    485 
    486 void ttm_mem_global_release(struct ttm_mem_global *glob)
    487 {
    488 	struct ttm_mem_zone *zone;
    489 	unsigned int i;
    490 
    491 	/* let the page allocator first stop the shrink work. */
    492 	ttm_page_alloc_fini();
    493 	ttm_dma_page_alloc_fini();
    494 
    495 	flush_workqueue(glob->swap_queue);
    496 	destroy_workqueue(glob->swap_queue);
    497 	glob->swap_queue = NULL;
    498 	for (i = 0; i < glob->num_zones; ++i) {
    499 		zone = glob->zones[i];
    500 #ifdef __NetBSD__
    501 		kfree(zone);
    502 #else
    503 		kobject_del(&zone->kobj);
    504 		kobject_put(&zone->kobj);
    505 #endif
    506 	}
    507 #ifdef __NetBSD__
    508 	kfree(glob);
    509 #else
    510 	kobject_del(&glob->kobj);
    511 	kobject_put(&glob->kobj);
    512 #endif
    513 	memset(glob, 0, sizeof(*glob));
    514 }
    515 
    516 static void ttm_check_swapping(struct ttm_mem_global *glob)
    517 {
    518 	bool needs_swapping = false;
    519 	unsigned int i;
    520 	struct ttm_mem_zone *zone;
    521 
    522 	spin_lock(&glob->lock);
    523 	for (i = 0; i < glob->num_zones; ++i) {
    524 		zone = glob->zones[i];
    525 		if (zone->used_mem > zone->swap_limit) {
    526 			needs_swapping = true;
    527 			break;
    528 		}
    529 	}
    530 
    531 	spin_unlock(&glob->lock);
    532 
    533 	if (unlikely(needs_swapping))
    534 		(void)queue_work(glob->swap_queue, &glob->work);
    535 
    536 }
    537 
    538 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
    539 				     struct ttm_mem_zone *single_zone,
    540 				     uint64_t amount)
    541 {
    542 	unsigned int i;
    543 	struct ttm_mem_zone *zone;
    544 
    545 	spin_lock(&glob->lock);
    546 	for (i = 0; i < glob->num_zones; ++i) {
    547 		zone = glob->zones[i];
    548 		if (single_zone && zone != single_zone)
    549 			continue;
    550 		zone->used_mem -= amount;
    551 	}
    552 	spin_unlock(&glob->lock);
    553 }
    554 
    555 void ttm_mem_global_free(struct ttm_mem_global *glob,
    556 			 uint64_t amount)
    557 {
    558 	return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
    559 }
    560 EXPORT_SYMBOL(ttm_mem_global_free);
    561 
    562 /*
    563  * check if the available mem is under lower memory limit
    564  *
    565  * a. if no swap disk at all or free swap space is under swap_mem_limit
    566  * but available system mem is bigger than sys_mem_limit, allow TTM
    567  * allocation;
    568  *
    569  * b. if the available system mem is less than sys_mem_limit but free
    570  * swap disk is bigger than swap_mem_limit, allow TTM allocation.
    571  */
    572 bool
    573 ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
    574 			uint64_t num_pages,
    575 			struct ttm_operation_ctx *ctx)
    576 {
    577 	int64_t available;
    578 
    579 	if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
    580 		return false;
    581 
    582 	available = get_nr_swap_pages() + si_mem_available();
    583 	available -= num_pages;
    584 	if (available < glob->lower_mem_limit)
    585 		return true;
    586 
    587 	return false;
    588 }
    589 EXPORT_SYMBOL(ttm_check_under_lowerlimit);
    590 
    591 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
    592 				  struct ttm_mem_zone *single_zone,
    593 				  uint64_t amount, bool reserve)
    594 {
    595 	uint64_t limit;
    596 	int ret = -ENOMEM;
    597 	unsigned int i;
    598 	struct ttm_mem_zone *zone;
    599 
    600 	spin_lock(&glob->lock);
    601 	for (i = 0; i < glob->num_zones; ++i) {
    602 		zone = glob->zones[i];
    603 		if (single_zone && zone != single_zone)
    604 			continue;
    605 
    606 		limit = (capable(CAP_SYS_ADMIN)) ?
    607 			zone->emer_mem : zone->max_mem;
    608 
    609 		if (zone->used_mem > limit)
    610 			goto out_unlock;
    611 	}
    612 
    613 	if (reserve) {
    614 		for (i = 0; i < glob->num_zones; ++i) {
    615 			zone = glob->zones[i];
    616 			if (single_zone && zone != single_zone)
    617 				continue;
    618 			zone->used_mem += amount;
    619 		}
    620 	}
    621 
    622 	ret = 0;
    623 out_unlock:
    624 	spin_unlock(&glob->lock);
    625 	ttm_check_swapping(glob);
    626 
    627 	return ret;
    628 }
    629 
    630 
    631 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
    632 				     struct ttm_mem_zone *single_zone,
    633 				     uint64_t memory,
    634 				     struct ttm_operation_ctx *ctx)
    635 {
    636 	int count = TTM_MEMORY_ALLOC_RETRIES;
    637 
    638 	while (unlikely(ttm_mem_global_reserve(glob,
    639 					       single_zone,
    640 					       memory, true)
    641 			!= 0)) {
    642 		if (ctx->no_wait_gpu)
    643 			return -ENOMEM;
    644 		if (unlikely(count-- == 0))
    645 			return -ENOMEM;
    646 		ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
    647 	}
    648 
    649 	return 0;
    650 }
    651 
    652 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
    653 			 struct ttm_operation_ctx *ctx)
    654 {
    655 	/**
    656 	 * Normal allocations of kernel memory are registered in
    657 	 * the kernel zone.
    658 	 */
    659 
    660 	return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
    661 }
    662 EXPORT_SYMBOL(ttm_mem_global_alloc);
    663 
    664 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
    665 			      struct page *page, uint64_t size,
    666 			      struct ttm_operation_ctx *ctx)
    667 {
    668 	struct ttm_mem_zone *zone = NULL;
    669 
    670 	/**
    671 	 * Page allocations may be registed in a single zone
    672 	 * only if highmem or !dma32.
    673 	 */
    674 
    675 #ifdef CONFIG_HIGHMEM
    676 	if (PageHighMem(page) && glob->zone_highmem != NULL)
    677 		zone = glob->zone_highmem;
    678 #else
    679 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
    680 		zone = glob->zone_kernel;
    681 #endif
    682 	return ttm_mem_global_alloc_zone(glob, zone, size, ctx);
    683 }
    684 
    685 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page,
    686 			      uint64_t size)
    687 {
    688 	struct ttm_mem_zone *zone = NULL;
    689 
    690 #ifdef CONFIG_HIGHMEM
    691 	if (PageHighMem(page) && glob->zone_highmem != NULL)
    692 		zone = glob->zone_highmem;
    693 #else
    694 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
    695 		zone = glob->zone_kernel;
    696 #endif
    697 	ttm_mem_global_free_zone(glob, zone, size);
    698 }
    699 
    700 size_t ttm_round_pot(size_t size)
    701 {
    702 	if ((size & (size - 1)) == 0)
    703 		return size;
    704 	else if (size > PAGE_SIZE)
    705 		return PAGE_ALIGN(size);
    706 	else {
    707 		size_t tmp_size = 4;
    708 
    709 		while (tmp_size < size)
    710 			tmp_size <<= 1;
    711 
    712 		return tmp_size;
    713 	}
    714 	return 0;
    715 }
    716 EXPORT_SYMBOL(ttm_round_pot);
    717 
    718 uint64_t ttm_get_kernel_zone_memory_size(struct ttm_mem_global *glob)
    719 {
    720 	return glob->zone_kernel->max_mem;
    721 }
    722 EXPORT_SYMBOL(ttm_get_kernel_zone_memory_size);
    723