Home | History | Annotate | Line # | Download | only in ttm
ttm_memory.c revision 1.1.1.1
      1 /**************************************************************************
      2  *
      3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 
     28 #define pr_fmt(fmt) "[TTM] " fmt
     29 
     30 #include <drm/ttm/ttm_memory.h>
     31 #include <drm/ttm/ttm_module.h>
     32 #include <drm/ttm/ttm_page_alloc.h>
     33 #include <linux/spinlock.h>
     34 #include <linux/sched.h>
     35 #include <linux/wait.h>
     36 #include <linux/mm.h>
     37 #include <linux/module.h>
     38 #include <linux/slab.h>
     39 
     40 #define TTM_MEMORY_ALLOC_RETRIES 4
     41 
     42 struct ttm_mem_zone {
     43 	struct kobject kobj;
     44 	struct ttm_mem_global *glob;
     45 	const char *name;
     46 	uint64_t zone_mem;
     47 	uint64_t emer_mem;
     48 	uint64_t max_mem;
     49 	uint64_t swap_limit;
     50 	uint64_t used_mem;
     51 };
     52 
     53 static struct attribute ttm_mem_sys = {
     54 	.name = "zone_memory",
     55 	.mode = S_IRUGO
     56 };
     57 static struct attribute ttm_mem_emer = {
     58 	.name = "emergency_memory",
     59 	.mode = S_IRUGO | S_IWUSR
     60 };
     61 static struct attribute ttm_mem_max = {
     62 	.name = "available_memory",
     63 	.mode = S_IRUGO | S_IWUSR
     64 };
     65 static struct attribute ttm_mem_swap = {
     66 	.name = "swap_limit",
     67 	.mode = S_IRUGO | S_IWUSR
     68 };
     69 static struct attribute ttm_mem_used = {
     70 	.name = "used_memory",
     71 	.mode = S_IRUGO
     72 };
     73 
     74 static void ttm_mem_zone_kobj_release(struct kobject *kobj)
     75 {
     76 	struct ttm_mem_zone *zone =
     77 		container_of(kobj, struct ttm_mem_zone, kobj);
     78 
     79 	pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
     80 		zone->name, (unsigned long long)zone->used_mem >> 10);
     81 	kfree(zone);
     82 }
     83 
     84 static ssize_t ttm_mem_zone_show(struct kobject *kobj,
     85 				 struct attribute *attr,
     86 				 char *buffer)
     87 {
     88 	struct ttm_mem_zone *zone =
     89 		container_of(kobj, struct ttm_mem_zone, kobj);
     90 	uint64_t val = 0;
     91 
     92 	spin_lock(&zone->glob->lock);
     93 	if (attr == &ttm_mem_sys)
     94 		val = zone->zone_mem;
     95 	else if (attr == &ttm_mem_emer)
     96 		val = zone->emer_mem;
     97 	else if (attr == &ttm_mem_max)
     98 		val = zone->max_mem;
     99 	else if (attr == &ttm_mem_swap)
    100 		val = zone->swap_limit;
    101 	else if (attr == &ttm_mem_used)
    102 		val = zone->used_mem;
    103 	spin_unlock(&zone->glob->lock);
    104 
    105 	return snprintf(buffer, PAGE_SIZE, "%llu\n",
    106 			(unsigned long long) val >> 10);
    107 }
    108 
    109 static void ttm_check_swapping(struct ttm_mem_global *glob);
    110 
    111 static ssize_t ttm_mem_zone_store(struct kobject *kobj,
    112 				  struct attribute *attr,
    113 				  const char *buffer,
    114 				  size_t size)
    115 {
    116 	struct ttm_mem_zone *zone =
    117 		container_of(kobj, struct ttm_mem_zone, kobj);
    118 	int chars;
    119 	unsigned long val;
    120 	uint64_t val64;
    121 
    122 	chars = sscanf(buffer, "%lu", &val);
    123 	if (chars == 0)
    124 		return size;
    125 
    126 	val64 = val;
    127 	val64 <<= 10;
    128 
    129 	spin_lock(&zone->glob->lock);
    130 	if (val64 > zone->zone_mem)
    131 		val64 = zone->zone_mem;
    132 	if (attr == &ttm_mem_emer) {
    133 		zone->emer_mem = val64;
    134 		if (zone->max_mem > val64)
    135 			zone->max_mem = val64;
    136 	} else if (attr == &ttm_mem_max) {
    137 		zone->max_mem = val64;
    138 		if (zone->emer_mem < val64)
    139 			zone->emer_mem = val64;
    140 	} else if (attr == &ttm_mem_swap)
    141 		zone->swap_limit = val64;
    142 	spin_unlock(&zone->glob->lock);
    143 
    144 	ttm_check_swapping(zone->glob);
    145 
    146 	return size;
    147 }
    148 
    149 static struct attribute *ttm_mem_zone_attrs[] = {
    150 	&ttm_mem_sys,
    151 	&ttm_mem_emer,
    152 	&ttm_mem_max,
    153 	&ttm_mem_swap,
    154 	&ttm_mem_used,
    155 	NULL
    156 };
    157 
    158 static const struct sysfs_ops ttm_mem_zone_ops = {
    159 	.show = &ttm_mem_zone_show,
    160 	.store = &ttm_mem_zone_store
    161 };
    162 
    163 static struct kobj_type ttm_mem_zone_kobj_type = {
    164 	.release = &ttm_mem_zone_kobj_release,
    165 	.sysfs_ops = &ttm_mem_zone_ops,
    166 	.default_attrs = ttm_mem_zone_attrs,
    167 };
    168 
    169 static void ttm_mem_global_kobj_release(struct kobject *kobj)
    170 {
    171 	struct ttm_mem_global *glob =
    172 		container_of(kobj, struct ttm_mem_global, kobj);
    173 
    174 	kfree(glob);
    175 }
    176 
    177 static struct kobj_type ttm_mem_glob_kobj_type = {
    178 	.release = &ttm_mem_global_kobj_release,
    179 };
    180 
    181 static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
    182 					bool from_wq, uint64_t extra)
    183 {
    184 	unsigned int i;
    185 	struct ttm_mem_zone *zone;
    186 	uint64_t target;
    187 
    188 	for (i = 0; i < glob->num_zones; ++i) {
    189 		zone = glob->zones[i];
    190 
    191 		if (from_wq)
    192 			target = zone->swap_limit;
    193 		else if (capable(CAP_SYS_ADMIN))
    194 			target = zone->emer_mem;
    195 		else
    196 			target = zone->max_mem;
    197 
    198 		target = (extra > target) ? 0ULL : target;
    199 
    200 		if (zone->used_mem > target)
    201 			return true;
    202 	}
    203 	return false;
    204 }
    205 
    206 /**
    207  * At this point we only support a single shrink callback.
    208  * Extend this if needed, perhaps using a linked list of callbacks.
    209  * Note that this function is reentrant:
    210  * many threads may try to swap out at any given time.
    211  */
    212 
    213 static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
    214 		       uint64_t extra)
    215 {
    216 	int ret;
    217 	struct ttm_mem_shrink *shrink;
    218 
    219 	spin_lock(&glob->lock);
    220 	if (glob->shrink == NULL)
    221 		goto out;
    222 
    223 	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
    224 		shrink = glob->shrink;
    225 		spin_unlock(&glob->lock);
    226 		ret = shrink->do_shrink(shrink);
    227 		spin_lock(&glob->lock);
    228 		if (unlikely(ret != 0))
    229 			goto out;
    230 	}
    231 out:
    232 	spin_unlock(&glob->lock);
    233 }
    234 
    235 
    236 
    237 static void ttm_shrink_work(struct work_struct *work)
    238 {
    239 	struct ttm_mem_global *glob =
    240 	    container_of(work, struct ttm_mem_global, work);
    241 
    242 	ttm_shrink(glob, true, 0ULL);
    243 }
    244 
    245 static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
    246 				    const struct sysinfo *si)
    247 {
    248 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    249 	uint64_t mem;
    250 	int ret;
    251 
    252 	if (unlikely(!zone))
    253 		return -ENOMEM;
    254 
    255 	mem = si->totalram - si->totalhigh;
    256 	mem *= si->mem_unit;
    257 
    258 	zone->name = "kernel";
    259 	zone->zone_mem = mem;
    260 	zone->max_mem = mem >> 1;
    261 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    262 	zone->swap_limit = zone->max_mem - (mem >> 3);
    263 	zone->used_mem = 0;
    264 	zone->glob = glob;
    265 	glob->zone_kernel = zone;
    266 	ret = kobject_init_and_add(
    267 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
    268 	if (unlikely(ret != 0)) {
    269 		kobject_put(&zone->kobj);
    270 		return ret;
    271 	}
    272 	glob->zones[glob->num_zones++] = zone;
    273 	return 0;
    274 }
    275 
    276 #ifdef CONFIG_HIGHMEM
    277 static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
    278 				     const struct sysinfo *si)
    279 {
    280 	struct ttm_mem_zone *zone;
    281 	uint64_t mem;
    282 	int ret;
    283 
    284 	if (si->totalhigh == 0)
    285 		return 0;
    286 
    287 	zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    288 	if (unlikely(!zone))
    289 		return -ENOMEM;
    290 
    291 	mem = si->totalram;
    292 	mem *= si->mem_unit;
    293 
    294 	zone->name = "highmem";
    295 	zone->zone_mem = mem;
    296 	zone->max_mem = mem >> 1;
    297 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    298 	zone->swap_limit = zone->max_mem - (mem >> 3);
    299 	zone->used_mem = 0;
    300 	zone->glob = glob;
    301 	glob->zone_highmem = zone;
    302 	ret = kobject_init_and_add(
    303 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
    304 	if (unlikely(ret != 0)) {
    305 		kobject_put(&zone->kobj);
    306 		return ret;
    307 	}
    308 	glob->zones[glob->num_zones++] = zone;
    309 	return 0;
    310 }
    311 #else
    312 static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
    313 				   const struct sysinfo *si)
    314 {
    315 	struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL);
    316 	uint64_t mem;
    317 	int ret;
    318 
    319 	if (unlikely(!zone))
    320 		return -ENOMEM;
    321 
    322 	mem = si->totalram;
    323 	mem *= si->mem_unit;
    324 
    325 	/**
    326 	 * No special dma32 zone needed.
    327 	 */
    328 
    329 	if (mem <= ((uint64_t) 1ULL << 32)) {
    330 		kfree(zone);
    331 		return 0;
    332 	}
    333 
    334 	/*
    335 	 * Limit max dma32 memory to 4GB for now
    336 	 * until we can figure out how big this
    337 	 * zone really is.
    338 	 */
    339 
    340 	mem = ((uint64_t) 1ULL << 32);
    341 	zone->name = "dma32";
    342 	zone->zone_mem = mem;
    343 	zone->max_mem = mem >> 1;
    344 	zone->emer_mem = (mem >> 1) + (mem >> 2);
    345 	zone->swap_limit = zone->max_mem - (mem >> 3);
    346 	zone->used_mem = 0;
    347 	zone->glob = glob;
    348 	glob->zone_dma32 = zone;
    349 	ret = kobject_init_and_add(
    350 		&zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
    351 	if (unlikely(ret != 0)) {
    352 		kobject_put(&zone->kobj);
    353 		return ret;
    354 	}
    355 	glob->zones[glob->num_zones++] = zone;
    356 	return 0;
    357 }
    358 #endif
    359 
    360 int ttm_mem_global_init(struct ttm_mem_global *glob)
    361 {
    362 	struct sysinfo si;
    363 	int ret;
    364 	int i;
    365 	struct ttm_mem_zone *zone;
    366 
    367 	spin_lock_init(&glob->lock);
    368 	glob->swap_queue = create_singlethread_workqueue("ttm_swap");
    369 	INIT_WORK(&glob->work, ttm_shrink_work);
    370 	ret = kobject_init_and_add(
    371 		&glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
    372 	if (unlikely(ret != 0)) {
    373 		kobject_put(&glob->kobj);
    374 		return ret;
    375 	}
    376 
    377 	si_meminfo(&si);
    378 
    379 	ret = ttm_mem_init_kernel_zone(glob, &si);
    380 	if (unlikely(ret != 0))
    381 		goto out_no_zone;
    382 #ifdef CONFIG_HIGHMEM
    383 	ret = ttm_mem_init_highmem_zone(glob, &si);
    384 	if (unlikely(ret != 0))
    385 		goto out_no_zone;
    386 #else
    387 	ret = ttm_mem_init_dma32_zone(glob, &si);
    388 	if (unlikely(ret != 0))
    389 		goto out_no_zone;
    390 #endif
    391 	for (i = 0; i < glob->num_zones; ++i) {
    392 		zone = glob->zones[i];
    393 		pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
    394 			zone->name, (unsigned long long)zone->max_mem >> 10);
    395 	}
    396 	ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
    397 	ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
    398 	return 0;
    399 out_no_zone:
    400 	ttm_mem_global_release(glob);
    401 	return ret;
    402 }
    403 EXPORT_SYMBOL(ttm_mem_global_init);
    404 
    405 void ttm_mem_global_release(struct ttm_mem_global *glob)
    406 {
    407 	unsigned int i;
    408 	struct ttm_mem_zone *zone;
    409 
    410 	/* let the page allocator first stop the shrink work. */
    411 	ttm_page_alloc_fini();
    412 	ttm_dma_page_alloc_fini();
    413 
    414 	flush_workqueue(glob->swap_queue);
    415 	destroy_workqueue(glob->swap_queue);
    416 	glob->swap_queue = NULL;
    417 	for (i = 0; i < glob->num_zones; ++i) {
    418 		zone = glob->zones[i];
    419 		kobject_del(&zone->kobj);
    420 		kobject_put(&zone->kobj);
    421 			}
    422 	kobject_del(&glob->kobj);
    423 	kobject_put(&glob->kobj);
    424 }
    425 EXPORT_SYMBOL(ttm_mem_global_release);
    426 
    427 static void ttm_check_swapping(struct ttm_mem_global *glob)
    428 {
    429 	bool needs_swapping = false;
    430 	unsigned int i;
    431 	struct ttm_mem_zone *zone;
    432 
    433 	spin_lock(&glob->lock);
    434 	for (i = 0; i < glob->num_zones; ++i) {
    435 		zone = glob->zones[i];
    436 		if (zone->used_mem > zone->swap_limit) {
    437 			needs_swapping = true;
    438 			break;
    439 		}
    440 	}
    441 
    442 	spin_unlock(&glob->lock);
    443 
    444 	if (unlikely(needs_swapping))
    445 		(void)queue_work(glob->swap_queue, &glob->work);
    446 
    447 }
    448 
    449 static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
    450 				     struct ttm_mem_zone *single_zone,
    451 				     uint64_t amount)
    452 {
    453 	unsigned int i;
    454 	struct ttm_mem_zone *zone;
    455 
    456 	spin_lock(&glob->lock);
    457 	for (i = 0; i < glob->num_zones; ++i) {
    458 		zone = glob->zones[i];
    459 		if (single_zone && zone != single_zone)
    460 			continue;
    461 		zone->used_mem -= amount;
    462 	}
    463 	spin_unlock(&glob->lock);
    464 }
    465 
    466 void ttm_mem_global_free(struct ttm_mem_global *glob,
    467 			 uint64_t amount)
    468 {
    469 	return ttm_mem_global_free_zone(glob, NULL, amount);
    470 }
    471 EXPORT_SYMBOL(ttm_mem_global_free);
    472 
    473 static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
    474 				  struct ttm_mem_zone *single_zone,
    475 				  uint64_t amount, bool reserve)
    476 {
    477 	uint64_t limit;
    478 	int ret = -ENOMEM;
    479 	unsigned int i;
    480 	struct ttm_mem_zone *zone;
    481 
    482 	spin_lock(&glob->lock);
    483 	for (i = 0; i < glob->num_zones; ++i) {
    484 		zone = glob->zones[i];
    485 		if (single_zone && zone != single_zone)
    486 			continue;
    487 
    488 		limit = (capable(CAP_SYS_ADMIN)) ?
    489 			zone->emer_mem : zone->max_mem;
    490 
    491 		if (zone->used_mem > limit)
    492 			goto out_unlock;
    493 	}
    494 
    495 	if (reserve) {
    496 		for (i = 0; i < glob->num_zones; ++i) {
    497 			zone = glob->zones[i];
    498 			if (single_zone && zone != single_zone)
    499 				continue;
    500 			zone->used_mem += amount;
    501 		}
    502 	}
    503 
    504 	ret = 0;
    505 out_unlock:
    506 	spin_unlock(&glob->lock);
    507 	ttm_check_swapping(glob);
    508 
    509 	return ret;
    510 }
    511 
    512 
    513 static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
    514 				     struct ttm_mem_zone *single_zone,
    515 				     uint64_t memory,
    516 				     bool no_wait, bool interruptible)
    517 {
    518 	int count = TTM_MEMORY_ALLOC_RETRIES;
    519 
    520 	while (unlikely(ttm_mem_global_reserve(glob,
    521 					       single_zone,
    522 					       memory, true)
    523 			!= 0)) {
    524 		if (no_wait)
    525 			return -ENOMEM;
    526 		if (unlikely(count-- == 0))
    527 			return -ENOMEM;
    528 		ttm_shrink(glob, false, memory + (memory >> 2) + 16);
    529 	}
    530 
    531 	return 0;
    532 }
    533 
    534 int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
    535 			 bool no_wait, bool interruptible)
    536 {
    537 	/**
    538 	 * Normal allocations of kernel memory are registered in
    539 	 * all zones.
    540 	 */
    541 
    542 	return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
    543 					 interruptible);
    544 }
    545 EXPORT_SYMBOL(ttm_mem_global_alloc);
    546 
    547 int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
    548 			      struct page *page,
    549 			      bool no_wait, bool interruptible)
    550 {
    551 
    552 	struct ttm_mem_zone *zone = NULL;
    553 
    554 	/**
    555 	 * Page allocations may be registed in a single zone
    556 	 * only if highmem or !dma32.
    557 	 */
    558 
    559 #ifdef CONFIG_HIGHMEM
    560 	if (PageHighMem(page) && glob->zone_highmem != NULL)
    561 		zone = glob->zone_highmem;
    562 #else
    563 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
    564 		zone = glob->zone_kernel;
    565 #endif
    566 	return ttm_mem_global_alloc_zone(glob, zone, PAGE_SIZE, no_wait,
    567 					 interruptible);
    568 }
    569 
    570 void ttm_mem_global_free_page(struct ttm_mem_global *glob, struct page *page)
    571 {
    572 	struct ttm_mem_zone *zone = NULL;
    573 
    574 #ifdef CONFIG_HIGHMEM
    575 	if (PageHighMem(page) && glob->zone_highmem != NULL)
    576 		zone = glob->zone_highmem;
    577 #else
    578 	if (glob->zone_dma32 && page_to_pfn(page) > 0x00100000UL)
    579 		zone = glob->zone_kernel;
    580 #endif
    581 	ttm_mem_global_free_zone(glob, zone, PAGE_SIZE);
    582 }
    583 
    584 
    585 size_t ttm_round_pot(size_t size)
    586 {
    587 	if ((size & (size - 1)) == 0)
    588 		return size;
    589 	else if (size > PAGE_SIZE)
    590 		return PAGE_ALIGN(size);
    591 	else {
    592 		size_t tmp_size = 4;
    593 
    594 		while (tmp_size < size)
    595 			tmp_size <<= 1;
    596 
    597 		return tmp_size;
    598 	}
    599 	return 0;
    600 }
    601 EXPORT_SYMBOL(ttm_round_pot);
    602