Home | History | Annotate | Line # | Download | only in ttm
ttm_page_alloc.c revision 1.1.1.1.8.2
      1 /*
      2  * Copyright (c) Red Hat Inc.
      3 
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sub license,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the
     12  * next paragraph) shall be included in all copies or substantial portions
     13  * of the Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     21  * DEALINGS IN THE SOFTWARE.
     22  *
     23  * Authors: Dave Airlie <airlied (at) redhat.com>
     24  *          Jerome Glisse <jglisse (at) redhat.com>
     25  *          Pauli Nieminen <suokkos (at) gmail.com>
     26  */
     27 
     28 /* simple list based uncached page pool
     29  * - Pool collects resently freed pages for reuse
     30  * - Use page->lru to keep a free list
     31  * - doesn't track currently in use pages
     32  */
     33 
     34 #define pr_fmt(fmt) "[TTM] " fmt
     35 
     36 #include <linux/list.h>
     37 #include <linux/spinlock.h>
     38 #include <linux/highmem.h>
     39 #include <linux/mm_types.h>
     40 #include <linux/module.h>
     41 #include <linux/mm.h>
     42 #include <linux/seq_file.h> /* for seq_printf */
     43 #include <linux/slab.h>
     44 #include <linux/dma-mapping.h>
     45 
     46 #include <linux/atomic.h>
     47 
     48 #include <drm/ttm/ttm_bo_driver.h>
     49 #include <drm/ttm/ttm_page_alloc.h>
     50 
     51 #ifdef TTM_HAS_AGP
     52 #include <asm/agp.h>
     53 #endif
     54 
     55 #define NUM_PAGES_TO_ALLOC		(PAGE_SIZE/sizeof(struct page *))
     56 #define SMALL_ALLOCATION		16
     57 #define FREE_ALL_PAGES			(~0U)
     58 /* times are in msecs */
     59 #define PAGE_FREE_INTERVAL		1000
     60 
     61 /**
     62  * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
     63  *
     64  * @lock: Protects the shared pool from concurrnet access. Must be used with
     65  * irqsave/irqrestore variants because pool allocator maybe called from
     66  * delayed work.
     67  * @fill_lock: Prevent concurrent calls to fill.
     68  * @list: Pool of free uc/wc pages for fast reuse.
     69  * @gfp_flags: Flags to pass for alloc_page.
     70  * @npages: Number of pages in pool.
     71  */
     72 struct ttm_page_pool {
     73 	spinlock_t		lock;
     74 	bool			fill_lock;
     75 	struct list_head	list;
     76 	gfp_t			gfp_flags;
     77 	unsigned		npages;
     78 	char			*name;
     79 	unsigned long		nfrees;
     80 	unsigned long		nrefills;
     81 };
     82 
     83 /**
     84  * Limits for the pool. They are handled without locks because only place where
     85  * they may change is in sysfs store. They won't have immediate effect anyway
     86  * so forcing serialization to access them is pointless.
     87  */
     88 
     89 struct ttm_pool_opts {
     90 	unsigned	alloc_size;
     91 	unsigned	max_size;
     92 	unsigned	small;
     93 };
     94 
     95 #define NUM_POOLS 4
     96 
     97 /**
     98  * struct ttm_pool_manager - Holds memory pools for fst allocation
     99  *
    100  * Manager is read only object for pool code so it doesn't need locking.
    101  *
    102  * @free_interval: minimum number of jiffies between freeing pages from pool.
    103  * @page_alloc_inited: reference counting for pool allocation.
    104  * @work: Work that is used to shrink the pool. Work is only run when there is
    105  * some pages to free.
    106  * @small_allocation: Limit in number of pages what is small allocation.
    107  *
    108  * @pools: All pool objects in use.
    109  **/
    110 struct ttm_pool_manager {
    111 	struct kobject		kobj;
    112 	struct shrinker		mm_shrink;
    113 	struct ttm_pool_opts	options;
    114 
    115 	union {
    116 		struct ttm_page_pool	pools[NUM_POOLS];
    117 		struct {
    118 			struct ttm_page_pool	wc_pool;
    119 			struct ttm_page_pool	uc_pool;
    120 			struct ttm_page_pool	wc_pool_dma32;
    121 			struct ttm_page_pool	uc_pool_dma32;
    122 		} ;
    123 	};
    124 };
    125 
    126 static struct attribute ttm_page_pool_max = {
    127 	.name = "pool_max_size",
    128 	.mode = S_IRUGO | S_IWUSR
    129 };
    130 static struct attribute ttm_page_pool_small = {
    131 	.name = "pool_small_allocation",
    132 	.mode = S_IRUGO | S_IWUSR
    133 };
    134 static struct attribute ttm_page_pool_alloc_size = {
    135 	.name = "pool_allocation_size",
    136 	.mode = S_IRUGO | S_IWUSR
    137 };
    138 
    139 static struct attribute *ttm_pool_attrs[] = {
    140 	&ttm_page_pool_max,
    141 	&ttm_page_pool_small,
    142 	&ttm_page_pool_alloc_size,
    143 	NULL
    144 };
    145 
    146 static void ttm_pool_kobj_release(struct kobject *kobj)
    147 {
    148 	struct ttm_pool_manager *m =
    149 		container_of(kobj, struct ttm_pool_manager, kobj);
    150 	kfree(m);
    151 }
    152 
    153 static ssize_t ttm_pool_store(struct kobject *kobj,
    154 		struct attribute *attr, const char *buffer, size_t size)
    155 {
    156 	struct ttm_pool_manager *m =
    157 		container_of(kobj, struct ttm_pool_manager, kobj);
    158 	int chars;
    159 	unsigned val;
    160 	chars = sscanf(buffer, "%u", &val);
    161 	if (chars == 0)
    162 		return size;
    163 
    164 	/* Convert kb to number of pages */
    165 	val = val / (PAGE_SIZE >> 10);
    166 
    167 	if (attr == &ttm_page_pool_max)
    168 		m->options.max_size = val;
    169 	else if (attr == &ttm_page_pool_small)
    170 		m->options.small = val;
    171 	else if (attr == &ttm_page_pool_alloc_size) {
    172 		if (val > NUM_PAGES_TO_ALLOC*8) {
    173 			pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
    174 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
    175 			       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
    176 			return size;
    177 		} else if (val > NUM_PAGES_TO_ALLOC) {
    178 			pr_warn("Setting allocation size to larger than %lu is not recommended\n",
    179 				NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
    180 		}
    181 		m->options.alloc_size = val;
    182 	}
    183 
    184 	return size;
    185 }
    186 
    187 static ssize_t ttm_pool_show(struct kobject *kobj,
    188 		struct attribute *attr, char *buffer)
    189 {
    190 	struct ttm_pool_manager *m =
    191 		container_of(kobj, struct ttm_pool_manager, kobj);
    192 	unsigned val = 0;
    193 
    194 	if (attr == &ttm_page_pool_max)
    195 		val = m->options.max_size;
    196 	else if (attr == &ttm_page_pool_small)
    197 		val = m->options.small;
    198 	else if (attr == &ttm_page_pool_alloc_size)
    199 		val = m->options.alloc_size;
    200 
    201 	val = val * (PAGE_SIZE >> 10);
    202 
    203 	return snprintf(buffer, PAGE_SIZE, "%u\n", val);
    204 }
    205 
    206 static const struct sysfs_ops ttm_pool_sysfs_ops = {
    207 	.show = &ttm_pool_show,
    208 	.store = &ttm_pool_store,
    209 };
    210 
    211 static struct kobj_type ttm_pool_kobj_type = {
    212 	.release = &ttm_pool_kobj_release,
    213 	.sysfs_ops = &ttm_pool_sysfs_ops,
    214 	.default_attrs = ttm_pool_attrs,
    215 };
    216 
    217 static struct ttm_pool_manager *_manager;
    218 
    219 #ifndef CONFIG_X86
    220 static int set_pages_array_wb(struct page **pages, int addrinarray)
    221 {
    222 #ifdef TTM_HAS_AGP
    223 	int i;
    224 
    225 	for (i = 0; i < addrinarray; i++)
    226 		unmap_page_from_agp(pages[i]);
    227 #endif
    228 	return 0;
    229 }
    230 
    231 static int set_pages_array_wc(struct page **pages, int addrinarray)
    232 {
    233 #ifdef TTM_HAS_AGP
    234 	int i;
    235 
    236 	for (i = 0; i < addrinarray; i++)
    237 		map_page_into_agp(pages[i]);
    238 #endif
    239 	return 0;
    240 }
    241 
    242 static int set_pages_array_uc(struct page **pages, int addrinarray)
    243 {
    244 #ifdef TTM_HAS_AGP
    245 	int i;
    246 
    247 	for (i = 0; i < addrinarray; i++)
    248 		map_page_into_agp(pages[i]);
    249 #endif
    250 	return 0;
    251 }
    252 #endif
    253 
    254 /**
    255  * Select the right pool or requested caching state and ttm flags. */
    256 static struct ttm_page_pool *ttm_get_pool(int flags,
    257 		enum ttm_caching_state cstate)
    258 {
    259 	int pool_index;
    260 
    261 	if (cstate == tt_cached)
    262 		return NULL;
    263 
    264 	if (cstate == tt_wc)
    265 		pool_index = 0x0;
    266 	else
    267 		pool_index = 0x1;
    268 
    269 	if (flags & TTM_PAGE_FLAG_DMA32)
    270 		pool_index |= 0x2;
    271 
    272 	return &_manager->pools[pool_index];
    273 }
    274 
    275 /* set memory back to wb and free the pages. */
    276 static void ttm_pages_put(struct page *pages[], unsigned npages)
    277 {
    278 	unsigned i;
    279 	if (set_pages_array_wb(pages, npages))
    280 		pr_err("Failed to set %d pages to wb!\n", npages);
    281 	for (i = 0; i < npages; ++i)
    282 		__free_page(pages[i]);
    283 }
    284 
    285 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
    286 		unsigned freed_pages)
    287 {
    288 	pool->npages -= freed_pages;
    289 	pool->nfrees += freed_pages;
    290 }
    291 
    292 /**
    293  * Free pages from pool.
    294  *
    295  * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
    296  * number of pages in one go.
    297  *
    298  * @pool: to free the pages from
    299  * @free_all: If set to true will free all pages in pool
    300  **/
    301 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
    302 {
    303 	unsigned long irq_flags;
    304 	struct page *p;
    305 	struct page **pages_to_free;
    306 	unsigned freed_pages = 0,
    307 		 npages_to_free = nr_free;
    308 
    309 	if (NUM_PAGES_TO_ALLOC < nr_free)
    310 		npages_to_free = NUM_PAGES_TO_ALLOC;
    311 
    312 	pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
    313 			GFP_KERNEL);
    314 	if (!pages_to_free) {
    315 		pr_err("Failed to allocate memory for pool free operation\n");
    316 		return 0;
    317 	}
    318 
    319 restart:
    320 	spin_lock_irqsave(&pool->lock, irq_flags);
    321 
    322 	list_for_each_entry_reverse(p, &pool->list, lru) {
    323 		if (freed_pages >= npages_to_free)
    324 			break;
    325 
    326 		pages_to_free[freed_pages++] = p;
    327 		/* We can only remove NUM_PAGES_TO_ALLOC at a time. */
    328 		if (freed_pages >= NUM_PAGES_TO_ALLOC) {
    329 			/* remove range of pages from the pool */
    330 			__list_del(p->lru.prev, &pool->list);
    331 
    332 			ttm_pool_update_free_locked(pool, freed_pages);
    333 			/**
    334 			 * Because changing page caching is costly
    335 			 * we unlock the pool to prevent stalling.
    336 			 */
    337 			spin_unlock_irqrestore(&pool->lock, irq_flags);
    338 
    339 			ttm_pages_put(pages_to_free, freed_pages);
    340 			if (likely(nr_free != FREE_ALL_PAGES))
    341 				nr_free -= freed_pages;
    342 
    343 			if (NUM_PAGES_TO_ALLOC >= nr_free)
    344 				npages_to_free = nr_free;
    345 			else
    346 				npages_to_free = NUM_PAGES_TO_ALLOC;
    347 
    348 			freed_pages = 0;
    349 
    350 			/* free all so restart the processing */
    351 			if (nr_free)
    352 				goto restart;
    353 
    354 			/* Not allowed to fall through or break because
    355 			 * following context is inside spinlock while we are
    356 			 * outside here.
    357 			 */
    358 			goto out;
    359 
    360 		}
    361 	}
    362 
    363 	/* remove range of pages from the pool */
    364 	if (freed_pages) {
    365 		__list_del(&p->lru, &pool->list);
    366 
    367 		ttm_pool_update_free_locked(pool, freed_pages);
    368 		nr_free -= freed_pages;
    369 	}
    370 
    371 	spin_unlock_irqrestore(&pool->lock, irq_flags);
    372 
    373 	if (freed_pages)
    374 		ttm_pages_put(pages_to_free, freed_pages);
    375 out:
    376 	kfree(pages_to_free);
    377 	return nr_free;
    378 }
    379 
    380 /* Get good estimation how many pages are free in pools */
    381 static int ttm_pool_get_num_unused_pages(void)
    382 {
    383 	unsigned i;
    384 	int total = 0;
    385 	for (i = 0; i < NUM_POOLS; ++i)
    386 		total += _manager->pools[i].npages;
    387 
    388 	return total;
    389 }
    390 
    391 /**
    392  * Callback for mm to request pool to reduce number of page held.
    393  */
    394 static int ttm_pool_mm_shrink(struct shrinker *shrink,
    395 			      struct shrink_control *sc)
    396 {
    397 	static atomic_t start_pool = ATOMIC_INIT(0);
    398 	unsigned i;
    399 	unsigned pool_offset = atomic_add_return(1, &start_pool);
    400 	struct ttm_page_pool *pool;
    401 	int shrink_pages = sc->nr_to_scan;
    402 
    403 	pool_offset = pool_offset % NUM_POOLS;
    404 	/* select start pool in round robin fashion */
    405 	for (i = 0; i < NUM_POOLS; ++i) {
    406 		unsigned nr_free = shrink_pages;
    407 		if (shrink_pages == 0)
    408 			break;
    409 		pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
    410 		shrink_pages = ttm_page_pool_free(pool, nr_free);
    411 	}
    412 	/* return estimated number of unused pages in pool */
    413 	return ttm_pool_get_num_unused_pages();
    414 }
    415 
    416 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
    417 {
    418 	manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
    419 	manager->mm_shrink.seeks = 1;
    420 	register_shrinker(&manager->mm_shrink);
    421 }
    422 
    423 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
    424 {
    425 	unregister_shrinker(&manager->mm_shrink);
    426 }
    427 
    428 static int ttm_set_pages_caching(struct page **pages,
    429 		enum ttm_caching_state cstate, unsigned cpages)
    430 {
    431 	int r = 0;
    432 	/* Set page caching */
    433 	switch (cstate) {
    434 	case tt_uncached:
    435 		r = set_pages_array_uc(pages, cpages);
    436 		if (r)
    437 			pr_err("Failed to set %d pages to uc!\n", cpages);
    438 		break;
    439 	case tt_wc:
    440 		r = set_pages_array_wc(pages, cpages);
    441 		if (r)
    442 			pr_err("Failed to set %d pages to wc!\n", cpages);
    443 		break;
    444 	default:
    445 		break;
    446 	}
    447 	return r;
    448 }
    449 
    450 /**
    451  * Free pages the pages that failed to change the caching state. If there is
    452  * any pages that have changed their caching state already put them to the
    453  * pool.
    454  */
    455 static void ttm_handle_caching_state_failure(struct list_head *pages,
    456 		int ttm_flags, enum ttm_caching_state cstate,
    457 		struct page **failed_pages, unsigned cpages)
    458 {
    459 	unsigned i;
    460 	/* Failed pages have to be freed */
    461 	for (i = 0; i < cpages; ++i) {
    462 		list_del(&failed_pages[i]->lru);
    463 		__free_page(failed_pages[i]);
    464 	}
    465 }
    466 
    467 /**
    468  * Allocate new pages with correct caching.
    469  *
    470  * This function is reentrant if caller updates count depending on number of
    471  * pages returned in pages array.
    472  */
    473 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
    474 		int ttm_flags, enum ttm_caching_state cstate, unsigned count)
    475 {
    476 	struct page **caching_array;
    477 	struct page *p;
    478 	int r = 0;
    479 	unsigned i, cpages;
    480 	unsigned max_cpages = min(count,
    481 			(unsigned)(PAGE_SIZE/sizeof(struct page *)));
    482 
    483 	/* allocate array for page caching change */
    484 	caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
    485 
    486 	if (!caching_array) {
    487 		pr_err("Unable to allocate table for new pages\n");
    488 		return -ENOMEM;
    489 	}
    490 
    491 	for (i = 0, cpages = 0; i < count; ++i) {
    492 		p = alloc_page(gfp_flags);
    493 
    494 		if (!p) {
    495 			pr_err("Unable to get page %u\n", i);
    496 
    497 			/* store already allocated pages in the pool after
    498 			 * setting the caching state */
    499 			if (cpages) {
    500 				r = ttm_set_pages_caching(caching_array,
    501 							  cstate, cpages);
    502 				if (r)
    503 					ttm_handle_caching_state_failure(pages,
    504 						ttm_flags, cstate,
    505 						caching_array, cpages);
    506 			}
    507 			r = -ENOMEM;
    508 			goto out;
    509 		}
    510 
    511 #ifdef CONFIG_HIGHMEM
    512 		/* gfp flags of highmem page should never be dma32 so we
    513 		 * we should be fine in such case
    514 		 */
    515 		if (!PageHighMem(p))
    516 #endif
    517 		{
    518 			caching_array[cpages++] = p;
    519 			if (cpages == max_cpages) {
    520 
    521 				r = ttm_set_pages_caching(caching_array,
    522 						cstate, cpages);
    523 				if (r) {
    524 					ttm_handle_caching_state_failure(pages,
    525 						ttm_flags, cstate,
    526 						caching_array, cpages);
    527 					goto out;
    528 				}
    529 				cpages = 0;
    530 			}
    531 		}
    532 
    533 		list_add(&p->lru, pages);
    534 	}
    535 
    536 	if (cpages) {
    537 		r = ttm_set_pages_caching(caching_array, cstate, cpages);
    538 		if (r)
    539 			ttm_handle_caching_state_failure(pages,
    540 					ttm_flags, cstate,
    541 					caching_array, cpages);
    542 	}
    543 out:
    544 	kfree(caching_array);
    545 
    546 	return r;
    547 }
    548 
    549 /**
    550  * Fill the given pool if there aren't enough pages and the requested number of
    551  * pages is small.
    552  */
    553 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
    554 		int ttm_flags, enum ttm_caching_state cstate, unsigned count,
    555 		unsigned long *irq_flags)
    556 {
    557 	struct page *p;
    558 	int r;
    559 	unsigned cpages = 0;
    560 	/**
    561 	 * Only allow one pool fill operation at a time.
    562 	 * If pool doesn't have enough pages for the allocation new pages are
    563 	 * allocated from outside of pool.
    564 	 */
    565 	if (pool->fill_lock)
    566 		return;
    567 
    568 	pool->fill_lock = true;
    569 
    570 	/* If allocation request is small and there are not enough
    571 	 * pages in a pool we fill the pool up first. */
    572 	if (count < _manager->options.small
    573 		&& count > pool->npages) {
    574 		struct list_head new_pages;
    575 		unsigned alloc_size = _manager->options.alloc_size;
    576 
    577 		/**
    578 		 * Can't change page caching if in irqsave context. We have to
    579 		 * drop the pool->lock.
    580 		 */
    581 		spin_unlock_irqrestore(&pool->lock, *irq_flags);
    582 
    583 		INIT_LIST_HEAD(&new_pages);
    584 		r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
    585 				cstate,	alloc_size);
    586 		spin_lock_irqsave(&pool->lock, *irq_flags);
    587 
    588 		if (!r) {
    589 			list_splice(&new_pages, &pool->list);
    590 			++pool->nrefills;
    591 			pool->npages += alloc_size;
    592 		} else {
    593 			pr_err("Failed to fill pool (%p)\n", pool);
    594 			/* If we have any pages left put them to the pool. */
    595 			list_for_each_entry(p, &pool->list, lru) {
    596 				++cpages;
    597 			}
    598 			list_splice(&new_pages, &pool->list);
    599 			pool->npages += cpages;
    600 		}
    601 
    602 	}
    603 	pool->fill_lock = false;
    604 }
    605 
    606 /**
    607  * Cut 'count' number of pages from the pool and put them on the return list.
    608  *
    609  * @return count of pages still required to fulfill the request.
    610  */
    611 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
    612 					struct list_head *pages,
    613 					int ttm_flags,
    614 					enum ttm_caching_state cstate,
    615 					unsigned count)
    616 {
    617 	unsigned long irq_flags;
    618 	struct list_head *p;
    619 	unsigned i;
    620 
    621 	spin_lock_irqsave(&pool->lock, irq_flags);
    622 	ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
    623 
    624 	if (count >= pool->npages) {
    625 		/* take all pages from the pool */
    626 		list_splice_init(&pool->list, pages);
    627 		count -= pool->npages;
    628 		pool->npages = 0;
    629 		goto out;
    630 	}
    631 	/* find the last pages to include for requested number of pages. Split
    632 	 * pool to begin and halve it to reduce search space. */
    633 	if (count <= pool->npages/2) {
    634 		i = 0;
    635 		list_for_each(p, &pool->list) {
    636 			if (++i == count)
    637 				break;
    638 		}
    639 	} else {
    640 		i = pool->npages + 1;
    641 		list_for_each_prev(p, &pool->list) {
    642 			if (--i == count)
    643 				break;
    644 		}
    645 	}
    646 	/* Cut 'count' number of pages from the pool */
    647 	list_cut_position(pages, &pool->list, p);
    648 	pool->npages -= count;
    649 	count = 0;
    650 out:
    651 	spin_unlock_irqrestore(&pool->lock, irq_flags);
    652 	return count;
    653 }
    654 
    655 /* Put all pages in pages list to correct pool to wait for reuse */
    656 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
    657 			  enum ttm_caching_state cstate)
    658 {
    659 	unsigned long irq_flags;
    660 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
    661 	unsigned i;
    662 
    663 	if (pool == NULL) {
    664 		/* No pool for this memory type so free the pages */
    665 		for (i = 0; i < npages; i++) {
    666 			if (pages[i]) {
    667 				if (page_count(pages[i]) != 1)
    668 					pr_err("Erroneous page count. Leaking pages.\n");
    669 				__free_page(pages[i]);
    670 				pages[i] = NULL;
    671 			}
    672 		}
    673 		return;
    674 	}
    675 
    676 	spin_lock_irqsave(&pool->lock, irq_flags);
    677 	for (i = 0; i < npages; i++) {
    678 		if (pages[i]) {
    679 			if (page_count(pages[i]) != 1)
    680 				pr_err("Erroneous page count. Leaking pages.\n");
    681 			list_add_tail(&pages[i]->lru, &pool->list);
    682 			pages[i] = NULL;
    683 			pool->npages++;
    684 		}
    685 	}
    686 	/* Check that we don't go over the pool limit */
    687 	npages = 0;
    688 	if (pool->npages > _manager->options.max_size) {
    689 		npages = pool->npages - _manager->options.max_size;
    690 		/* free at least NUM_PAGES_TO_ALLOC number of pages
    691 		 * to reduce calls to set_memory_wb */
    692 		if (npages < NUM_PAGES_TO_ALLOC)
    693 			npages = NUM_PAGES_TO_ALLOC;
    694 	}
    695 	spin_unlock_irqrestore(&pool->lock, irq_flags);
    696 	if (npages)
    697 		ttm_page_pool_free(pool, npages);
    698 }
    699 
    700 /*
    701  * On success pages list will hold count number of correctly
    702  * cached pages.
    703  */
    704 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
    705 			 enum ttm_caching_state cstate)
    706 {
    707 	struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
    708 	struct list_head plist;
    709 	struct page *p = NULL;
    710 	gfp_t gfp_flags = GFP_USER;
    711 	unsigned count;
    712 	int r;
    713 
    714 	/* set zero flag for page allocation if required */
    715 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
    716 		gfp_flags |= __GFP_ZERO;
    717 
    718 	/* No pool for cached pages */
    719 	if (pool == NULL) {
    720 		if (flags & TTM_PAGE_FLAG_DMA32)
    721 			gfp_flags |= GFP_DMA32;
    722 		else
    723 			gfp_flags |= GFP_HIGHUSER;
    724 
    725 		for (r = 0; r < npages; ++r) {
    726 			p = alloc_page(gfp_flags);
    727 			if (!p) {
    728 
    729 				pr_err("Unable to allocate page\n");
    730 				return -ENOMEM;
    731 			}
    732 
    733 			pages[r] = p;
    734 		}
    735 		return 0;
    736 	}
    737 
    738 	/* combine zero flag to pool flags */
    739 	gfp_flags |= pool->gfp_flags;
    740 
    741 	/* First we take pages from the pool */
    742 	INIT_LIST_HEAD(&plist);
    743 	npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
    744 	count = 0;
    745 	list_for_each_entry(p, &plist, lru) {
    746 		pages[count++] = p;
    747 	}
    748 
    749 	/* clear the pages coming from the pool if requested */
    750 	if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
    751 		list_for_each_entry(p, &plist, lru) {
    752 			if (PageHighMem(p))
    753 				clear_highpage(p);
    754 			else
    755 				clear_page(page_address(p));
    756 		}
    757 	}
    758 
    759 	/* If pool didn't have enough pages allocate new one. */
    760 	if (npages > 0) {
    761 		/* ttm_alloc_new_pages doesn't reference pool so we can run
    762 		 * multiple requests in parallel.
    763 		 **/
    764 		INIT_LIST_HEAD(&plist);
    765 		r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
    766 		list_for_each_entry(p, &plist, lru) {
    767 			pages[count++] = p;
    768 		}
    769 		if (r) {
    770 			/* If there is any pages in the list put them back to
    771 			 * the pool. */
    772 			pr_err("Failed to allocate extra pages for large request\n");
    773 			ttm_put_pages(pages, count, flags, cstate);
    774 			return r;
    775 		}
    776 	}
    777 
    778 	return 0;
    779 }
    780 
    781 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
    782 		char *name)
    783 {
    784 	spin_lock_init(&pool->lock);
    785 	pool->fill_lock = false;
    786 	INIT_LIST_HEAD(&pool->list);
    787 	pool->npages = pool->nfrees = 0;
    788 	pool->gfp_flags = flags;
    789 	pool->name = name;
    790 }
    791 
    792 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
    793 {
    794 	int ret;
    795 
    796 	WARN_ON(_manager);
    797 
    798 	pr_info("Initializing pool allocator\n");
    799 
    800 	_manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
    801 
    802 	ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
    803 
    804 	ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
    805 
    806 	ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
    807 				  GFP_USER | GFP_DMA32, "wc dma");
    808 
    809 	ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
    810 				  GFP_USER | GFP_DMA32, "uc dma");
    811 
    812 	_manager->options.max_size = max_pages;
    813 	_manager->options.small = SMALL_ALLOCATION;
    814 	_manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
    815 
    816 	ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
    817 				   &glob->kobj, "pool");
    818 	if (unlikely(ret != 0)) {
    819 		kobject_put(&_manager->kobj);
    820 		_manager = NULL;
    821 		return ret;
    822 	}
    823 
    824 	ttm_pool_mm_shrink_init(_manager);
    825 
    826 	return 0;
    827 }
    828 
    829 void ttm_page_alloc_fini(void)
    830 {
    831 	int i;
    832 
    833 	pr_info("Finalizing pool allocator\n");
    834 	ttm_pool_mm_shrink_fini(_manager);
    835 
    836 	for (i = 0; i < NUM_POOLS; ++i)
    837 		ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
    838 
    839 	kobject_put(&_manager->kobj);
    840 	_manager = NULL;
    841 }
    842 
    843 int ttm_pool_populate(struct ttm_tt *ttm)
    844 {
    845 	struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
    846 	unsigned i;
    847 	int ret;
    848 
    849 	if (ttm->state != tt_unpopulated)
    850 		return 0;
    851 
    852 	for (i = 0; i < ttm->num_pages; ++i) {
    853 		ret = ttm_get_pages(&ttm->pages[i], 1,
    854 				    ttm->page_flags,
    855 				    ttm->caching_state);
    856 		if (ret != 0) {
    857 			ttm_pool_unpopulate(ttm);
    858 			return -ENOMEM;
    859 		}
    860 
    861 		ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
    862 						false, false);
    863 		if (unlikely(ret != 0)) {
    864 			ttm_pool_unpopulate(ttm);
    865 			return -ENOMEM;
    866 		}
    867 	}
    868 
    869 	if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
    870 		ret = ttm_tt_swapin(ttm);
    871 		if (unlikely(ret != 0)) {
    872 			ttm_pool_unpopulate(ttm);
    873 			return ret;
    874 		}
    875 	}
    876 
    877 	ttm->state = tt_unbound;
    878 	return 0;
    879 }
    880 EXPORT_SYMBOL(ttm_pool_populate);
    881 
    882 void ttm_pool_unpopulate(struct ttm_tt *ttm)
    883 {
    884 	unsigned i;
    885 
    886 	for (i = 0; i < ttm->num_pages; ++i) {
    887 		if (ttm->pages[i]) {
    888 			ttm_mem_global_free_page(ttm->glob->mem_glob,
    889 						 ttm->pages[i]);
    890 			ttm_put_pages(&ttm->pages[i], 1,
    891 				      ttm->page_flags,
    892 				      ttm->caching_state);
    893 		}
    894 	}
    895 	ttm->state = tt_unpopulated;
    896 }
    897 EXPORT_SYMBOL(ttm_pool_unpopulate);
    898 
    899 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
    900 {
    901 	struct ttm_page_pool *p;
    902 	unsigned i;
    903 	char *h[] = {"pool", "refills", "pages freed", "size"};
    904 	if (!_manager) {
    905 		seq_printf(m, "No pool allocator running.\n");
    906 		return 0;
    907 	}
    908 	seq_printf(m, "%6s %12s %13s %8s\n",
    909 			h[0], h[1], h[2], h[3]);
    910 	for (i = 0; i < NUM_POOLS; ++i) {
    911 		p = &_manager->pools[i];
    912 
    913 		seq_printf(m, "%6s %12ld %13ld %8d\n",
    914 				p->name, p->nrefills,
    915 				p->nfrees, p->npages);
    916 	}
    917 	return 0;
    918 }
    919 EXPORT_SYMBOL(ttm_page_alloc_debugfs);
    920