Home | History | Annotate | Line # | Download | only in ttm
ttm_memory.c revision 1.1.1.2
      1 /*	$NetBSD: ttm_memory.c,v 1.1.1.2 2018/08/27 01:34:59 riastradh Exp $	*/
      2 
      3 /**************************************************************************
      4  *
      5  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
      6  * All Rights Reserved.
      7  *
      8  * Permission is hereby granted, free of charge, to any person obtaining a
      9  * copy of this software and associated documentation files (the
     10  * "Software"), to deal in the Software without restriction, including
     11  * without limitation the rights to use, copy, modify, merge, publish,
     12  * distribute, sub license, and/or sell copies of the Software, and to
     13  * permit persons to whom the Software is furnished to do so, subject to
     14  * the following conditions:
     15  *
     16  * The above copyright notice and this permission notice (including the
     17  * next paragraph) shall be included in all copies or substantial portions
     18  * of the Software.
     19  *
     20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     22  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     23  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     24  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     25  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     26  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     27  *
     28  **************************************************************************/
     29 
     30 #include <sys/cdefs.h>
     31 __KERNEL_RCSID(0, "$NetBSD: ttm_memory.c,v 1.1.1.2 2018/08/27 01:34:59 riastradh Exp $");
     32 
     33 #define pr_fmt(fmt) "[TTM] " fmt
     34 
     35 #include <drm/ttm/ttm_memory.h>
     36 #include <drm/ttm/ttm_module.h>
     37 #include <drm/ttm/ttm_page_alloc.h>
     38 #include <linux/spinlock.h>
     39 #include <linux/sched.h>
     40 #include <linux/wait.h>
     41 #include <linux/mm.h>
     42 #include <linux/module.h>
     43 #include <linux/slab.h>
     44 
     45 #define TTM_MEMORY_ALLOC_RETRIES 4
     46 
     47 struct ttm_mem_zone {
     48 	struct kobject kobj;
     49 	struct ttm_mem_global *glob;
     50 	const char *name;
     51 	uint64_t zone_mem;
     52 	uint64_t emer_mem;
     53 	uint64_t max_mem;
     54 	uint64_t swap_limit;
     55 	uint64_t used_mem;
     56 };
     57 
     58 static struct attribute ttm_mem_sys = {
     59 	.name = "zone_memory",
     60 	.mode = S_IRUGO
     61 };
     62 static struct attribute ttm_mem_emer = {
     63 	.name = "emergency_memory",
     64 	.mode = S_IRUGO | S_IWUSR
     65 };
     66 static struct attribute ttm_mem_max = {
     67 	.name = "available_memory",
     68 	.mode = S_IRUGO | S_IWUSR
     69 };
     70 static struct attribute ttm_mem_swap = {
     71 	.name = "swap_limit",
     72 	.mode = S_IRUGO | S_IWUSR
     73 };
     74 static struct attribute ttm_mem_used = {
     75 	.name = "used_memory",
     76 	.mode = S_IRUGO
     77 };
     78 
     79 static void ttm_mem_zone_kobj_release(struct kobject *kobj)
     80 {
     81 	struct ttm_mem_zone *zone =
     82 		container_of(kobj, struct ttm_mem_zone, kobj);
     83 
     84 	pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
     85 		zone->name, (unsigned long long)zone->used_mem >> 10);
     86 	kfree(zone);
     87 }
     88 
     89 static ssize_t ttm_mem_zone_show(struct kobject *kobj,
     90 				 struct attribute *attr,
     91 				 char *buffer)
     92 {
     93 	struct ttm_mem_zone *zone =
     94 		container_of(kobj, struct ttm_mem_zone, kobj);
     95 	uint64_t val = 0;
     96 
     97 	spin_lock(&zone->glob->lock);
     98 	if (attr == &ttm_mem_sys)
     99 		val = zone->zone_mem;
    100 	else if (attr == &ttm_mem_emer)
    101 		val = zone->emer_mem;
    102 	else if (attr == &ttm_mem_max)
    103 		val = zone->max_mem;
    104 	else if (attr == &ttm_mem_swap)
    105 		val = zone->swap_limit;
    106 	else if (attr == &ttm_mem_used)
    107 		val = zone->used_mem;
    108 	spin_unlock(&zone->glob->lock);
    109 
    110 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
    111 			(unsigned long long) val >> 10);
    112 }
    113 
    114 static void ttm_check_swapping(struct ttm_mem_global *glob);
    115 
    116 static ssize_t ttm_mem_zone_store(struct kobject *kobj,
    117 				  struct attribute *attr,
    118 				  const char *buffer,
    119 				  size_t size)
    120 {
    121 	struct ttm_mem_zone *zone =
    122 		container_of(kobj, struct ttm_mem_zone, kobj);
    123 	int chars;
    124 	unsigned long val;
    125 	uint64_t val64;
    126 
    127 	chars = sscanf(buffer, "%lu", &val);
    128 	if (chars == 0)
    129 		return size;
    130 
    131 	val64 = val;
    132 	val64 <<= 10;
    133 
    134 	spin_lock(&zone->glob->lock);
    135 	if (val64 > zone->zone_mem)
    136 		val64 = zone->zone_mem;
    137 	if (attr == &ttm_mem_emer) {
    138 		zone->emer_mem = val64;
    139 		if (zone->max_mem > val64)
    140 			zone->max_mem = val64;
    141 	} else if (attr == &ttm_mem_max) {
    142 		zone->max_mem = val64;
    143 		if (zone->emer_mem < val64)
    144 			zone->emer_mem = val64;
    145 	} else if (attr == &ttm_mem_swap)
    146 		zone->swap_limit = val64;
    147 	spin_unlock(&zone->glob->lock);
    148 
    149 	ttm_check_swapping(zone->glob);
    150 
    151 	return size;
    152 }
    153 
    154 static struct attribute *ttm_mem_zone_attrs[] = {
    155 	&ttm_mem_sys,
    156 	&ttm_mem_emer,
    157 	&ttm_mem_max,
    158 	&ttm_mem_swap,
    159 	&ttm_mem_used,
    160 	NULL
    161 };
    162 
    163 static const struct sysfs_ops ttm_mem_zone_ops = {
    164 	.show = &ttm_mem_zone_show,
    165 	.store = &ttm_mem_zone_store
    166 };
    167 
    168 static struct kobj_type ttm_mem_zone_kobj_type = {
    169 	.release = &ttm_mem_zone_kobj_release,
    170 	.sysfs_ops = &ttm_mem_zone_ops,
    171 	.default_attrs = ttm_mem_zone_attrs,
    172 };
    173 
    174 static void ttm_mem_global_kobj_release(struct kobject *kobj)
    175 {
    176 	struct ttm_mem_global *glob =
    177 		container_of(kobj, struct ttm_mem_global, kobj);
    178 
    179 	kfree(glob);
    180 }
    181 
    182 static struct kobj_type ttm_mem_glob_kobj_type = {
    183 	.release = &ttm_mem_global_kobj_release,
    184 };
    185 
    186 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
    187 					bool from_wq, uint64_t extra)
    188 {
    189 	unsigned int i;
    190 	struct ttm_mem_zone *zone;
    191 	uint64_t target;
    192 
    193 	for (i = 0; i < glob->num_zones; ++i) {
    194 		zone = glob->zones[i];
    195 
    196 		if (from_wq)
    197 			target = zone->swap_limit;
    198 		else if (capable(CAP_SYS_ADMIN))
    199 			target = zone->emer_mem;
    200 		else
    201 			target = zone->max_mem;
    202 
    203 		target = (extra > target) ? 0ULL : target;
    204 
    205 		if (zone->used_mem > target)
    206 			return true;
    207 	}
    208 	return false;
    209 }
    210 
    211 /**
    212  * At this point we only support a single shrink callback.
    213  * Extend this if needed, perhaps using a linked list of callbacks.
    214  * Note that this function is reentrant:
    215  * many threads may try to swap out at any given time.
    216  */
    217 
    218 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
    219 		       uint64_t extra)
    220 {
    221 	int ret;
    222 	struct ttm_mem_shrink *shrink;
    223 
    224 	spin_lock(&glob->lock);
    225 	if (glob->shrink == NULL)
    226 		goto out;
    227 
    228 	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
    229 		shrink = glob->shrink;
    230 		spin_unlock(&glob->lock);
    231 		ret = shrink->do_shrink(shrink);
    232 		spin_lock(&glob->lock);
    233 		if (unlikely(ret != 0))
    234 			goto out;
    235 	}
    236 out:
    237 	spin_unlock(&glob->lock);
    238 }
    239 
    240 
    241 
    242 static void ttm_shrink_work(struct work_struct *work)
    243 {
    244 	struct ttm_mem_global *glob =
    245 	    container_of(work, struct ttm_mem_global, work);
    246 
    247 	ttm_shrink(glob, true, 0ULL);
    248 }
    249 
    250 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
    251 				    const struct sysinfo *si)
    252 {
    253 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    254 	uint64_t mem;
    255 	int ret;
    256 
    257 	if (unlikely(!zone))
    258 		return -ENOMEM;
    259 
    260 	mem = si->totalram - si->totalhigh;
    261 	mem *= si->mem_unit;
    262 
    263 	zone->name = "kernel";
    264 	zone->zone_mem = mem;
    265 	zone->max_mem = mem >> 1;
    266 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    267 	zone->swap_limit = zone->max_mem - (mem >> 3);
    268 	zone->used_mem = 0;
    269 	zone->glob = glob;
    270 	glob->zone_kernel = zone;
    271 	ret = kobject_init_and_add(
    272 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
    273 	if (unlikely(ret != 0)) {
    274 		kobject_put(&zone->kobj);
    275 		return ret;
    276 	}
    277 	glob->zones[glob->num_zones++] = zone;
    278 	return 0;
    279 }
    280 
    281 #ifdef CONFIG_HIGHMEM
    282 static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
    283 				     const struct sysinfo *si)
    284 {
    285 	struct ttm_mem_zone *zone;
    286 	uint64_t mem;
    287 	int ret;
    288 
    289 	if (si->totalhigh == 0)
    290 		return 0;
    291 
    292 	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    293 	if (unlikely(!zone))
    294 		return -ENOMEM;
    295 
    296 	mem = si->totalram;
    297 	mem *= si->mem_unit;
    298 
    299 	zone->name = "highmem";
    300 	zone->zone_mem = mem;
    301 	zone->max_mem = mem >> 1;
    302 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    303 	zone->swap_limit = zone->max_mem - (mem >> 3);
    304 	zone->used_mem = 0;
    305 	zone->glob = glob;
    306 	glob->zone_highmem = zone;
    307 	ret = kobject_init_and_add(
    308 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s",
    309 		zone->name);
    310 	if (unlikely(ret != 0)) {
    311 		kobject_put(&zone->kobj);
    312 		return ret;
    313 	}
    314 	glob->zones[glob->num_zones++] = zone;
    315 	return 0;
    316 }
    317 #else
    318 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
    319 				   const struct sysinfo *si)
    320 {
    321 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    322 	uint64_t mem;
    323 	int ret;
    324 
    325 	if (unlikely(!zone))
    326 		return -ENOMEM;
    327 
    328 	mem = si->totalram;
    329 	mem *= si->mem_unit;
    330 
    331 	/**
    332 	 * No special dma32 zone needed.
    333 	 */
    334 
    335 	if (mem <= ((uint64_t) 1ULL << 32)) {
    336 		kfree(zone);
    337 		return 0;
    338 	}
    339 
    340 	/*
    341 	 * Limit max dma32 memory to 4GB for now
    342 	 * until we can figure out how big this
    343 	 * zone really is.
    344 	 */
    345 
    346 	mem = ((uint64_t) 1ULL << 32);
    347 	zone->name = "dma32";
    348 	zone->zone_mem = mem;
    349 	zone->max_mem = mem >> 1;
    350 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    351 	zone->swap_limit = zone->max_mem - (mem >> 3);
    352 	zone->used_mem = 0;
    353 	zone->glob = glob;
    354 	glob->zone_dma32 = zone;
    355 	ret = kobject_init_and_add(
    356 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
    357 	if (unlikely(ret != 0)) {
    358 		kobject_put(&zone->kobj);
    359 		return ret;
    360 	}
    361 	glob->zones[glob->num_zones++] = zone;
    362 	return 0;
    363 }
    364 #endif
    365 
    366 int ttm_mem_global_init(struct ttm_mem_global *glob)
    367 {
    368 	struct sysinfo si;
    369 	int ret;
    370 	int i;
    371 	struct ttm_mem_zone *zone;
    372 
    373 	spin_lock_init(&glob->lock);
    374 	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
    375 	INIT_WORK(&glob->work, ttm_shrink_work);
    376 	ret = kobject_init_and_add(
    377 		&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
    378 	if (unlikely(ret != 0)) {
    379 		kobject_put(&glob->kobj);
    380 		return ret;
    381 	}
    382 
    383 	si_meminfo(&si);
    384 
    385 	ret = ttm_mem_init_kernel_zone(glob, &si);
    386 	if (unlikely(ret != 0))
    387 		goto out_no_zone;
    388 #ifdef CONFIG_HIGHMEM
    389 	ret = ttm_mem_init_highmem_zone(glob, &si);
    390 	if (unlikely(ret != 0))
    391 		goto out_no_zone;
    392 #else
    393 	ret = ttm_mem_init_dma32_zone(glob, &si);
    394 	if (unlikely(ret != 0))
    395 		goto out_no_zone;
    396 #endif
    397 	for (i = 0; i < glob->num_zones; ++i) {
    398 		zone = glob->zones[i];
    399 		pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
    400 			zone->name, (unsigned long long)zone->max_mem >> 10);
    401 	}
    402 	ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
    403 	ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
    404 	return 0;
    405 out_no_zone:
    406 	ttm_mem_global_release(glob);
    407 	return ret;
    408 }
    409 EXPORT_SYMBOL(ttm_mem_global_init);
    410 
    411 void ttm_mem_global_release(struct ttm_mem_global *glob)
    412 {
    413 	unsigned int i;
    414 	struct ttm_mem_zone *zone;
    415 
    416 	/* let the page allocator first stop the shrink work. */
    417 	ttm_page_alloc_fini();
    418 	ttm_dma_page_alloc_fini();
    419 
    420 	flush_workqueue(glob->swap_queue);
    421 	destroy_workqueue(glob->swap_queue);
    422 	glob->swap_queue = NULL;
    423 	for (i = 0; i < glob->num_zones; ++i) {
    424 		zone = glob->zones[i];
    425 		kobject_del(&zone->kobj);
    426 		kobject_put(&zone->kobj);
    427 			}
    428 	kobject_del(&glob->kobj);
    429 	kobject_put(&glob->kobj);
    430 }
    431 EXPORT_SYMBOL(ttm_mem_global_release);
    432 
    433 static void ttm_check_swapping(struct ttm_mem_global *glob)
    434 {
    435 	bool needs_swapping = false;
    436 	unsigned int i;
    437 	struct ttm_mem_zone *zone;
    438 
    439 	spin_lock(&glob->lock);
    440 	for (i = 0; i < glob->num_zones; ++i) {
    441 		zone = glob->zones[i];
    442 		if (zone->used_mem > zone->swap_limit) {
    443 			needs_swapping = true;
    444 			break;
    445 		}
    446 	}
    447 
    448 	spin_unlock(&glob->lock);
    449 
    450 	if (unlikely(needs_swapping))
    451 		(void)queue_work(glob->swap_queue, &glob->work);
    452 
    453 }
    454 
    455 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
    456 				     struct ttm_mem_zone *single_zone,
    457 				     uint64_t amount)
    458 {
    459 	unsigned int i;
    460 	struct ttm_mem_zone *zone;
    461 
    462 	spin_lock(&glob->lock);
    463 	for (i = 0; i < glob->num_zones; ++i) {
    464 		zone = glob->zones[i];
    465 		if (single_zone && zone != single_zone)
    466 			continue;
    467 		zone->used_mem -= amount;
    468 	}
    469 	spin_unlock(&glob->lock);
    470 }
    471 
    472 void ttm_mem_global_free(struct ttm_mem_global *glob,
    473 			 uint64_t amount)
    474 {
    475 	return ttm_mem_global_free_zone(glob, NULL, amount);
    476 }
    477 EXPORT_SYMBOL(ttm_mem_global_free);
    478 
    479 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
    480 				  struct ttm_mem_zone *single_zone,
    481 				  uint64_t amount, bool reserve)
    482 {
    483 	uint64_t limit;
    484 	int ret = -ENOMEM;
    485 	unsigned int i;
    486 	struct ttm_mem_zone *zone;
    487 
    488 	spin_lock(&glob->lock);
    489 	for (i = 0; i < glob->num_zones; ++i) {
    490 		zone = glob->zones[i];
    491 		if (single_zone && zone != single_zone)
    492 			continue;
    493 
    494 		limit = (capable(CAP_SYS_ADMIN)) ?
    495 			zone->emer_mem : zone->max_mem;
    496 
    497 		if (zone->used_mem > limit)
    498 			goto out_unlock;
    499 	}
    500 
    501 	if (reserve) {
    502 		for (i = 0; i < glob->num_zones; ++i) {
    503 			zone = glob->zones[i];
    504 			if (single_zone && zone != single_zone)
    505 				continue;
    506 			zone->used_mem += amount;
    507 		}
    508 	}
    509 
    510 	ret = 0;
    511 out_unlock:
    512 	spin_unlock(&glob->lock);
    513 	ttm_check_swapping(glob);
    514 
    515 	return ret;
    516 }
    517 
    518 
    519 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
    520 				     struct ttm_mem_zone *single_zone,
    521 				     uint64_t memory,
    522 				     bool no_wait, bool interruptible)
    523 {
    524 	int count = TTM_MEMORY_ALLOC_RETRIES;
    525 
    526 	while (unlikely(ttm_mem_global_reserve(glob,
    527 					       single_zone,
    528 					       memory, true)
    529 			!= 0)) {
    530 		if (no_wait)
    531 			return -ENOMEM;
    532 		if (unlikely(count-- == 0))
    533 			return -ENOMEM;
    534 		ttm_shrink(glob, false, memory + (memory >> 2) + 16);
    535 	}
    536 
    537 	return 0;
    538 }
    539 
    540 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
    541 			 bool no_wait, bool interruptible)
    542 {
    543 	/**
    544 	 * Normal allocations of kernel memory are registered in
    545 	 * all zones.
    546 	 */
    547 
    548 	return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
    549 					 interruptible);
    550 }
    551 EXPORT_SYMBOL(ttm_mem_global_alloc);
    552 
    553 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
    554 			      struct page *page,
    555 			      bool no_wait, bool interruptible)
    556 {
    557 
    558 	struct ttm_mem_zone *zone = NULL;
    559 
    560 	/**
    561 	 * Page allocations may be registed in a single zone
    562 	 * only if highmem or !dma32.
    563 	 */
    564 
    565 #ifdef CONFIG_HIGHMEM
    566 	if (PageHighMem(page) && glob->zone_highmem != NULL)
    567 		zone = glob->zone_highmem;
    568 #else
    569 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
    570 		zone = glob->zone_kernel;
    571 #endif
    572 	return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
    573 					 interruptible);
    574 }
    575 
    576 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
    577 {
    578 	struct ttm_mem_zone *zone = NULL;
    579 
    580 #ifdef CONFIG_HIGHMEM
    581 	if (PageHighMem(page) && glob->zone_highmem != NULL)
    582 		zone = glob->zone_highmem;
    583 #else
    584 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
    585 		zone = glob->zone_kernel;
    586 #endif
    587 	ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
    588 }
    589 
    590 
    591 size_t ttm_round_pot(size_t size)
    592 {
    593 	if ((size & (size - 1)) == 0)
    594 		return size;
    595 	else if (size > PAGE_SIZE)
    596 		return PAGE_ALIGN(size);
    597 	else {
    598 		size_t tmp_size = 4;
    599 
    600 		while (tmp_size < size)
    601 			tmp_size <<= 1;
    602 
    603 		return tmp_size;
    604 	}
    605 	return 0;
    606 }
    607 EXPORT_SYMBOL(ttm_round_pot);
    608