Home | History | Annotate | Line # | Download | only in uvm
uvm_pgflcache.c revision 1.1
      1 /*	$NetBSD: uvm_pgflcache.c,v 1.1 2019/12/27 12:51:57 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * uvm_pgflcache.c: page freelist cache.
     34  *
     35  * This implements a tiny per-CPU cache of pages that sits between the main
     36  * page allocator and the freelists.  By allocating and freeing pages in
     37  * batch, it reduces freelist contention by an order of magnitude.
     38  *
     39  * The cache can be paused & resumed at runtime so that UVM_HOTPLUG,
     40  * uvm_pglistalloc() and uvm_page_redim() can have a consistent view of the
     41  * world.  On system with one CPU per physical package (e.g. a uniprocessor)
     42  * the cache is not enabled.
     43  */
     44 
     45 #include <sys/cdefs.h>
     46 __KERNEL_RCSID(0, "$NetBSD: uvm_pgflcache.c,v 1.1 2019/12/27 12:51:57 ad Exp $");
     47 
     48 #include "opt_uvm.h"
     49 #include "opt_multiprocessor.h"
     50 
     51 #include <sys/param.h>
     52 #include <sys/systm.h>
     53 #include <sys/sched.h>
     54 #include <sys/kernel.h>
     55 #include <sys/vnode.h>
     56 #include <sys/proc.h>
     57 #include <sys/atomic.h>
     58 #include <sys/cpu.h>
     59 #include <sys/xcall.h>
     60 
     61 #include <uvm/uvm.h>
     62 #include <uvm/uvm_pglist.h>
     63 #include <uvm/uvm_pgflcache.h>
     64 
     65 /* There is no point doing any of this on a uniprocessor. */
     66 #ifdef MULTIPROCESSOR
     67 
     68 /*
     69  * MAXPGS - maximum pages per color, per bucket.
     70  * FILLPGS - number of pages to allocate at once, per color, per bucket.
     71  *
     72  * Why the chosen values:
     73  *
     74  * (1) In 2019, an average Intel system has 4kB pages and 8x L2 cache
     75  * colors.  We make the assumption that most of the time allocation activity
     76  * will be centered around one UVM freelist, so most of the time there will
     77  * be no more than 224kB worth of cached pages per-CPU.  That's tiny, but
     78  * enough to hugely reduce contention on the freelist locks, and give us a
     79  * small pool of pages which if we're very lucky may have some L1/L2 cache
     80  * locality, and do so without subtracting too much from the L2/L3 cache
     81  * benefits of having per-package free lists in the page allocator.
     82  *
     83  * (2) With the chosen values on _LP64, the data structure for each color
     84  * takes up a single cache line (64 bytes) giving this very low overhead
     85  * even in the "miss" case.
     86  *
     87  * (3) We don't want to cause too much pressure by hiding away memory that
     88  * could otherwise be put to good use.
     89  */
     90 #define	MAXPGS		7
     91 #define	FILLPGS		6
     92 
     93 /* Variable size, according to # colors. */
     94 struct pgflcache {
     95 	struct pccolor {
     96 		intptr_t	count;
     97 		struct vm_page	*pages[MAXPGS];
     98 	} color[1];
     99 };
    100 
    101 static kmutex_t		uvm_pgflcache_lock;
    102 static kcondvar_t	uvm_pgflcache_cv;
    103 static int		uvm_pgflcache_sem;
    104 static bool		uvm_pgflcache_draining;
    105 
    106 /*
    107  * uvm_pgflcache_fill: fill specified freelist/color from global list
    108  *
    109  * => must be called at IPL_VM
    110  * => must be called with given bucket lock held
    111  * => must only fill from the correct bucket for this CPU
    112  */
    113 
    114 void
    115 uvm_pgflcache_fill(struct uvm_cpu *ucpu, int fl, int b, int c)
    116 {
    117 	struct pgflbucket *pgb;
    118 	struct pgflcache *pc;
    119 	struct pccolor *pcc;
    120 	struct pgflist *head;
    121 	struct vm_page *pg;
    122 	int count;
    123 
    124 	KASSERT(mutex_owned(&uvm_freelist_locks[b].lock));
    125 	KASSERT(ucpu->pgflbucket == b);
    126 
    127 	/* If caching is off, then bail out. */
    128 	if (__predict_false((pc = ucpu->pgflcache[fl]) == NULL)) {
    129 		return;
    130 	}
    131 
    132 	/* Fill only to the limit. */
    133 	pcc = &pc->color[c];
    134 	pgb = uvm.page_free[fl].pgfl_buckets[b];
    135 	head = &pgb->pgb_colors[c];
    136 	if (pcc->count >= FILLPGS) {
    137 		return;
    138 	}
    139 
    140 	/* Pull pages from the bucket until it's empty, or we are full. */
    141 	count = pcc->count;
    142 	pg = LIST_FIRST(head);
    143 	while (__predict_true(pg != NULL && count < FILLPGS)) {
    144 		KASSERT(pg->flags & PG_FREE);
    145 		KASSERT(uvm_page_get_bucket(pg) == b);
    146 		pcc->pages[count++] = pg;
    147 		pg = LIST_NEXT(pg, pageq.list);
    148 	}
    149 
    150 	/* Violate LIST abstraction to remove all pages at once. */
    151 	head->lh_first = pg;
    152 	if (__predict_true(pg != NULL)) {
    153 		pg->pageq.list.le_prev = &head->lh_first;
    154 	}
    155 	pgb->pgb_nfree -= (count - pcc->count);
    156 	pcc->count = count;
    157 }
    158 
    159 /*
    160  * uvm_pgflcache_spill: spill specified freelist/color to global list
    161  *
    162  * => must be called at IPL_VM
    163  * => mark __noinline so we don't pull it into uvm_pgflcache_free()
    164  */
    165 
    166 static void __noinline
    167 uvm_pgflcache_spill(struct uvm_cpu *ucpu, int fl, int c)
    168 {
    169 	struct pgflbucket *pgb;
    170 	struct pgfreelist *pgfl;
    171 	struct pgflcache *pc;
    172 	struct pccolor *pcc;
    173 	struct pgflist *head;
    174 	kmutex_t *lock;
    175 	int b, adj;
    176 
    177 	pc = ucpu->pgflcache[fl];
    178 	pcc = &pc->color[c];
    179 	pgfl = &uvm.page_free[fl];
    180 	b = ucpu->pgflbucket;
    181 	pgb = pgfl->pgfl_buckets[b];
    182 	head = &pgb->pgb_colors[c];
    183 	lock = &uvm_freelist_locks[b].lock;
    184 
    185 	mutex_spin_enter(lock);
    186 	for (adj = pcc->count; pcc->count != 0;) {
    187 		pcc->count--;
    188 		KASSERT(pcc->pages[pcc->count] != NULL);
    189 		KASSERT(pcc->pages[pcc->count]->flags & PG_FREE);
    190 		LIST_INSERT_HEAD(head, pcc->pages[pcc->count], pageq.list);
    191 	}
    192 	pgb->pgb_nfree += adj;
    193 	mutex_spin_exit(lock);
    194 }
    195 
    196 /*
    197  * uvm_pgflcache_alloc: try to allocate a cached page.
    198  *
    199  * => must be called at IPL_VM
    200  * => allocate only from the given freelist and given page color
    201  */
    202 
    203 struct vm_page *
    204 uvm_pgflcache_alloc(struct uvm_cpu *ucpu, int fl, int c)
    205 {
    206 	struct pgflcache *pc;
    207 	struct pccolor *pcc;
    208 	struct vm_page *pg;
    209 
    210 	/* If caching is off, then bail out. */
    211 	if (__predict_false((pc = ucpu->pgflcache[fl]) == NULL)) {
    212 		return NULL;
    213 	}
    214 
    215 	/* Very simple: if we have a page then return it. */
    216 	pcc = &pc->color[c];
    217 	if (__predict_false(pcc->count == 0)) {
    218 		return NULL;
    219 	}
    220 	pg = pcc->pages[--(pcc->count)];
    221 	KASSERT(pg != NULL);
    222 	KASSERT(pg->flags & PG_FREE);
    223 	KASSERT(uvm_page_get_freelist(pg) == fl);
    224 	KASSERT(uvm_page_get_bucket(pg) == ucpu->pgflbucket);
    225 	pg->flags &= PG_ZERO;
    226 	return pg;
    227 }
    228 
    229 /*
    230  * uvm_pgflcache_free: cache a page, if possible.
    231  *
    232  * => must be called at IPL_VM
    233  * => must only send pages for the correct bucket for this CPU
    234  */
    235 
    236 bool
    237 uvm_pgflcache_free(struct uvm_cpu *ucpu, struct vm_page *pg)
    238 {
    239 	struct pgflcache *pc;
    240 	struct pccolor *pcc;
    241 	int fl, c;
    242 
    243 	KASSERT(uvm_page_get_bucket(pg) == ucpu->pgflbucket);
    244 
    245 	/* If caching is off, then bail out. */
    246  	fl = uvm_page_get_freelist(pg);
    247 	if (__predict_false((pc = ucpu->pgflcache[fl]) == NULL)) {
    248 		return false;
    249 	}
    250 
    251 	/* If the array is full spill it first, then add page to array. */
    252 	c = VM_PGCOLOR(pg);
    253 	pcc = &pc->color[c];
    254 	KASSERT((pg->flags & PG_FREE) == 0);
    255 	if (__predict_false(pcc->count == MAXPGS)) {
    256 		uvm_pgflcache_spill(ucpu, fl, c);
    257 	}
    258 	pg->flags = (pg->flags & PG_ZERO) | PG_FREE;
    259 	pcc->pages[pcc->count] = pg;
    260 	pcc->count++;
    261 	return true;
    262 }
    263 
    264 /*
    265  * uvm_pgflcache_init: allocate and initialize per-CPU data structures for
    266  * the free page cache.  Don't set anything in motion - that's taken care
    267  * of by uvm_pgflcache_resume().
    268  */
    269 
    270 static void
    271 uvm_pgflcache_init_cpu(struct cpu_info *ci)
    272 {
    273 	struct uvm_cpu *ucpu;
    274 	size_t sz;
    275 
    276 	ucpu = ci->ci_data.cpu_uvm;
    277 	KASSERT(ucpu->pgflcachemem == NULL);
    278 	KASSERT(ucpu->pgflcache[0] == NULL);
    279 
    280 	sz = offsetof(struct pgflcache, color[uvmexp.ncolors]);
    281 	ucpu->pgflcachememsz =
    282 	    (roundup2(sz * VM_NFREELIST, coherency_unit) + coherency_unit - 1);
    283 	ucpu->pgflcachemem = kmem_zalloc(ucpu->pgflcachememsz, KM_SLEEP);
    284 }
    285 
    286 /*
    287  * uvm_pgflcache_fini_cpu: dump all cached pages back to global free list
    288  * and shut down caching on the CPU.  Called on each CPU in the system via
    289  * xcall.
    290  */
    291 
    292 static void
    293 uvm_pgflcache_fini_cpu(void *arg1 __unused, void *arg2 __unused)
    294 {
    295 	struct uvm_cpu *ucpu;
    296 	int fl, color, s;
    297 
    298 	ucpu = curcpu()->ci_data.cpu_uvm;
    299 	for (fl = 0; fl < VM_NFREELIST; fl++) {
    300 		s = splvm();
    301 		for (color = 0; color < uvmexp.ncolors; color++) {
    302 			uvm_pgflcache_spill(ucpu, fl, color);
    303 		}
    304 		ucpu->pgflcache[fl] = NULL;
    305 		splx(s);
    306 	}
    307 }
    308 
    309 /*
    310  * uvm_pgflcache_pause: pause operation of the caches
    311  */
    312 
    313 void
    314 uvm_pgflcache_pause(void)
    315 {
    316 	uint64_t where;
    317 
    318 	/* First one in starts draining.  Everyone else waits. */
    319 	mutex_enter(&uvm_pgflcache_lock);
    320 	if (uvm_pgflcache_sem++ == 0) {
    321 		uvm_pgflcache_draining = true;
    322 		mutex_exit(&uvm_pgflcache_lock);
    323 		where = xc_broadcast(0, uvm_pgflcache_fini_cpu, NULL, NULL);
    324 		xc_wait(where);
    325 		mutex_enter(&uvm_pgflcache_lock);
    326 		uvm_pgflcache_draining = false;
    327 		cv_broadcast(&uvm_pgflcache_cv);
    328 	} else {
    329 		while (uvm_pgflcache_draining) {
    330 			cv_wait(&uvm_pgflcache_cv, &uvm_pgflcache_lock);
    331 		}
    332 	}
    333 	mutex_exit(&uvm_pgflcache_lock);
    334 }
    335 
    336 /*
    337  * uvm_pgflcache_resume: resume operation of the caches
    338  */
    339 
    340 void
    341 uvm_pgflcache_resume(void)
    342 {
    343 	CPU_INFO_ITERATOR cii;
    344 	struct cpu_info *ci;
    345 	struct uvm_cpu *ucpu;
    346 	uintptr_t addr;
    347 	size_t sz;
    348 	int fl;
    349 
    350 	/* Last guy out takes care of business. */
    351 	mutex_enter(&uvm_pgflcache_lock);
    352 	KASSERT(!uvm_pgflcache_draining);
    353 	KASSERT(uvm_pgflcache_sem > 0);
    354 	if (uvm_pgflcache_sem-- > 1) {
    355 		mutex_exit(&uvm_pgflcache_lock);
    356 		return;
    357 	}
    358 
    359 	/*
    360 	 * Make sure dependant data structure updates are remotely visible.
    361 	 * Essentially this functions as a global memory barrier.
    362 	 */
    363 	xc_barrier(XC_HIGHPRI);
    364 
    365 	/*
    366 	 * Then set all of the pointers in place on each CPU.  As soon as
    367 	 * each pointer is set, caching is operational in that dimension.
    368 	 */
    369 	sz = offsetof(struct pgflcache, color[uvmexp.ncolors]);
    370 	for (CPU_INFO_FOREACH(cii, ci)) {
    371 		ucpu = ci->ci_data.cpu_uvm;
    372 		addr = roundup2((uintptr_t)ucpu->pgflcachemem, coherency_unit);
    373 		for (fl = 0; fl < VM_NFREELIST; fl++) {
    374 			ucpu->pgflcache[fl] = (struct pgflcache *)addr;
    375 			addr += sz;
    376 		}
    377 	}
    378 	mutex_exit(&uvm_pgflcache_lock);
    379 }
    380 
    381 /*
    382  * uvm_pgflcache_start: start operation of the cache.
    383  *
    384  * => called once only, when init(8) is about to be started
    385  */
    386 
    387 void
    388 uvm_pgflcache_start(void)
    389 {
    390 	CPU_INFO_ITERATOR cii;
    391 	struct cpu_info *ci;
    392 
    393 	KASSERT(uvm_pgflcache_sem > 0);
    394 
    395 	/*
    396 	 * There's not much point doing this if every CPU has its own
    397 	 * bucket (and that includes the uniprocessor case).
    398 	 */
    399 	if (ncpu == uvm.bucketcount) {
    400 		return;
    401 	}
    402 
    403 	/* Create each CPU's buckets. */
    404 	for (CPU_INFO_FOREACH(cii, ci)) {
    405 		uvm_pgflcache_init_cpu(ci);
    406 	}
    407 
    408 	/* Kick it into action. */
    409 	uvm_pgflcache_resume();
    410 }
    411 
    412 /*
    413  * uvm_pgflcache_init: set up data structures for the free page cache.
    414  */
    415 
    416 void
    417 uvm_pgflcache_init(void)
    418 {
    419 
    420 	uvm_pgflcache_sem = 1;
    421 	mutex_init(&uvm_pgflcache_lock, MUTEX_DEFAULT, IPL_NONE);
    422 	cv_init(&uvm_pgflcache_cv, "flcache");
    423 }
    424 
    425 #else	/* MULTIPROCESSOR */
    426 
    427 struct vm_page *
    428 uvm_pgflcache_alloc(struct uvm_cpu *ucpu, int fl, int c)
    429 {
    430 
    431 	return NULL;
    432 }
    433 
    434 bool
    435 uvm_pgflcache_free(struct uvm_cpu *ucpu, struct vm_page *pg)
    436 {
    437 
    438 	return false;
    439 }
    440 
    441 void
    442 uvm_pgflcache_fill(struct uvm_cpu *ucpu, int fl, int b, int c)
    443 {
    444 
    445 }
    446 
    447 void
    448 uvm_pgflcache_pause(void)
    449 {
    450 
    451 }
    452 
    453 void
    454 uvm_pgflcache_resume(void)
    455 {
    456 
    457 }
    458 
    459 void
    460 uvm_pgflcache_start(void)
    461 {
    462 
    463 }
    464 
    465 void
    466 uvm_pgflcache_init(void)
    467 {
    468 
    469 }
    470 
    471 #endif	/* MULTIPROCESSOR */
    472