disk_cache.c revision 01e04c3f
101e04c3fSmrg/* 201e04c3fSmrg * Copyright © 2014 Intel Corporation 301e04c3fSmrg * 401e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a 501e04c3fSmrg * copy of this software and associated documentation files (the "Software"), 601e04c3fSmrg * to deal in the Software without restriction, including without limitation 701e04c3fSmrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 801e04c3fSmrg * and/or sell copies of the Software, and to permit persons to whom the 901e04c3fSmrg * Software is furnished to do so, subject to the following conditions: 1001e04c3fSmrg * 1101e04c3fSmrg * The above copyright notice and this permission notice (including the next 1201e04c3fSmrg * paragraph) shall be included in all copies or substantial portions of the 1301e04c3fSmrg * Software. 1401e04c3fSmrg * 1501e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 1601e04c3fSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 1701e04c3fSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 1801e04c3fSmrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 1901e04c3fSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 2001e04c3fSmrg * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 2101e04c3fSmrg * IN THE SOFTWARE. 2201e04c3fSmrg */ 2301e04c3fSmrg 2401e04c3fSmrg#ifdef ENABLE_SHADER_CACHE 2501e04c3fSmrg 2601e04c3fSmrg#include <ctype.h> 2701e04c3fSmrg#include <ftw.h> 2801e04c3fSmrg#include <string.h> 2901e04c3fSmrg#include <stdlib.h> 3001e04c3fSmrg#include <stdio.h> 3101e04c3fSmrg#include <sys/file.h> 3201e04c3fSmrg#include <sys/types.h> 3301e04c3fSmrg#include <sys/stat.h> 3401e04c3fSmrg#include <sys/mman.h> 3501e04c3fSmrg#include <unistd.h> 3601e04c3fSmrg#include <fcntl.h> 3701e04c3fSmrg#include <pwd.h> 3801e04c3fSmrg#include <errno.h> 3901e04c3fSmrg#include <dirent.h> 4001e04c3fSmrg#include "zlib.h" 4101e04c3fSmrg 4201e04c3fSmrg#include "util/crc32.h" 4301e04c3fSmrg#include "util/debug.h" 4401e04c3fSmrg#include "util/rand_xor.h" 4501e04c3fSmrg#include "util/u_atomic.h" 4601e04c3fSmrg#include "util/u_queue.h" 4701e04c3fSmrg#include "util/mesa-sha1.h" 4801e04c3fSmrg#include "util/ralloc.h" 4901e04c3fSmrg#include "main/compiler.h" 5001e04c3fSmrg#include "main/errors.h" 5101e04c3fSmrg 5201e04c3fSmrg#include "disk_cache.h" 5301e04c3fSmrg 5401e04c3fSmrg/* Number of bits to mask off from a cache key to get an index. */ 5501e04c3fSmrg#define CACHE_INDEX_KEY_BITS 16 5601e04c3fSmrg 5701e04c3fSmrg/* Mask for computing an index from a key. */ 5801e04c3fSmrg#define CACHE_INDEX_KEY_MASK ((1 << CACHE_INDEX_KEY_BITS) - 1) 5901e04c3fSmrg 6001e04c3fSmrg/* The number of keys that can be stored in the index. */ 6101e04c3fSmrg#define CACHE_INDEX_MAX_KEYS (1 << CACHE_INDEX_KEY_BITS) 6201e04c3fSmrg 6301e04c3fSmrg/* The cache version should be bumped whenever a change is made to the 6401e04c3fSmrg * structure of cache entries or the index. This will give any 3rd party 6501e04c3fSmrg * applications reading the cache entries a chance to adjust to the changes. 6601e04c3fSmrg * 6701e04c3fSmrg * - The cache version is checked internally when reading a cache entry. If we 6801e04c3fSmrg * ever have a mismatch we are in big trouble as this means we had a cache 6901e04c3fSmrg * collision. In case of such an event please check the skys for giant 7001e04c3fSmrg * asteroids and that the entire Mesa team hasn't been eaten by wolves. 7101e04c3fSmrg * 7201e04c3fSmrg * - There is no strict requirement that cache versions be backwards 7301e04c3fSmrg * compatible but effort should be taken to limit disruption where possible. 7401e04c3fSmrg */ 7501e04c3fSmrg#define CACHE_VERSION 1 7601e04c3fSmrg 7701e04c3fSmrgstruct disk_cache { 7801e04c3fSmrg /* The path to the cache directory. */ 7901e04c3fSmrg char *path; 8001e04c3fSmrg bool path_init_failed; 8101e04c3fSmrg 8201e04c3fSmrg /* Thread queue for compressing and writing cache entries to disk */ 8301e04c3fSmrg struct util_queue cache_queue; 8401e04c3fSmrg 8501e04c3fSmrg /* Seed for rand, which is used to pick a random directory */ 8601e04c3fSmrg uint64_t seed_xorshift128plus[2]; 8701e04c3fSmrg 8801e04c3fSmrg /* A pointer to the mmapped index file within the cache directory. */ 8901e04c3fSmrg uint8_t *index_mmap; 9001e04c3fSmrg size_t index_mmap_size; 9101e04c3fSmrg 9201e04c3fSmrg /* Pointer to total size of all objects in cache (within index_mmap) */ 9301e04c3fSmrg uint64_t *size; 9401e04c3fSmrg 9501e04c3fSmrg /* Pointer to stored keys, (within index_mmap). */ 9601e04c3fSmrg uint8_t *stored_keys; 9701e04c3fSmrg 9801e04c3fSmrg /* Maximum size of all cached objects (in bytes). */ 9901e04c3fSmrg uint64_t max_size; 10001e04c3fSmrg 10101e04c3fSmrg /* Driver cache keys. */ 10201e04c3fSmrg uint8_t *driver_keys_blob; 10301e04c3fSmrg size_t driver_keys_blob_size; 10401e04c3fSmrg 10501e04c3fSmrg disk_cache_put_cb blob_put_cb; 10601e04c3fSmrg disk_cache_get_cb blob_get_cb; 10701e04c3fSmrg}; 10801e04c3fSmrg 10901e04c3fSmrgstruct disk_cache_put_job { 11001e04c3fSmrg struct util_queue_fence fence; 11101e04c3fSmrg 11201e04c3fSmrg struct disk_cache *cache; 11301e04c3fSmrg 11401e04c3fSmrg cache_key key; 11501e04c3fSmrg 11601e04c3fSmrg /* Copy of cache data to be compressed and written. */ 11701e04c3fSmrg void *data; 11801e04c3fSmrg 11901e04c3fSmrg /* Size of data to be compressed and written. */ 12001e04c3fSmrg size_t size; 12101e04c3fSmrg 12201e04c3fSmrg struct cache_item_metadata cache_item_metadata; 12301e04c3fSmrg}; 12401e04c3fSmrg 12501e04c3fSmrg/* Create a directory named 'path' if it does not already exist. 12601e04c3fSmrg * 12701e04c3fSmrg * Returns: 0 if path already exists as a directory or if created. 12801e04c3fSmrg * -1 in all other cases. 12901e04c3fSmrg */ 13001e04c3fSmrgstatic int 13101e04c3fSmrgmkdir_if_needed(const char *path) 13201e04c3fSmrg{ 13301e04c3fSmrg struct stat sb; 13401e04c3fSmrg 13501e04c3fSmrg /* If the path exists already, then our work is done if it's a 13601e04c3fSmrg * directory, but it's an error if it is not. 13701e04c3fSmrg */ 13801e04c3fSmrg if (stat(path, &sb) == 0) { 13901e04c3fSmrg if (S_ISDIR(sb.st_mode)) { 14001e04c3fSmrg return 0; 14101e04c3fSmrg } else { 14201e04c3fSmrg fprintf(stderr, "Cannot use %s for shader cache (not a directory)" 14301e04c3fSmrg "---disabling.\n", path); 14401e04c3fSmrg return -1; 14501e04c3fSmrg } 14601e04c3fSmrg } 14701e04c3fSmrg 14801e04c3fSmrg int ret = mkdir(path, 0755); 14901e04c3fSmrg if (ret == 0 || (ret == -1 && errno == EEXIST)) 15001e04c3fSmrg return 0; 15101e04c3fSmrg 15201e04c3fSmrg fprintf(stderr, "Failed to create %s for shader cache (%s)---disabling.\n", 15301e04c3fSmrg path, strerror(errno)); 15401e04c3fSmrg 15501e04c3fSmrg return -1; 15601e04c3fSmrg} 15701e04c3fSmrg 15801e04c3fSmrg/* Concatenate an existing path and a new name to form a new path. If the new 15901e04c3fSmrg * path does not exist as a directory, create it then return the resulting 16001e04c3fSmrg * name of the new path (ralloc'ed off of 'ctx'). 16101e04c3fSmrg * 16201e04c3fSmrg * Returns NULL on any error, such as: 16301e04c3fSmrg * 16401e04c3fSmrg * <path> does not exist or is not a directory 16501e04c3fSmrg * <path>/<name> exists but is not a directory 16601e04c3fSmrg * <path>/<name> cannot be created as a directory 16701e04c3fSmrg */ 16801e04c3fSmrgstatic char * 16901e04c3fSmrgconcatenate_and_mkdir(void *ctx, const char *path, const char *name) 17001e04c3fSmrg{ 17101e04c3fSmrg char *new_path; 17201e04c3fSmrg struct stat sb; 17301e04c3fSmrg 17401e04c3fSmrg if (stat(path, &sb) != 0 || ! S_ISDIR(sb.st_mode)) 17501e04c3fSmrg return NULL; 17601e04c3fSmrg 17701e04c3fSmrg new_path = ralloc_asprintf(ctx, "%s/%s", path, name); 17801e04c3fSmrg 17901e04c3fSmrg if (mkdir_if_needed(new_path) == 0) 18001e04c3fSmrg return new_path; 18101e04c3fSmrg else 18201e04c3fSmrg return NULL; 18301e04c3fSmrg} 18401e04c3fSmrg 18501e04c3fSmrg#define DRV_KEY_CPY(_dst, _src, _src_size) \ 18601e04c3fSmrgdo { \ 18701e04c3fSmrg memcpy(_dst, _src, _src_size); \ 18801e04c3fSmrg _dst += _src_size; \ 18901e04c3fSmrg} while (0); 19001e04c3fSmrg 19101e04c3fSmrgstruct disk_cache * 19201e04c3fSmrgdisk_cache_create(const char *gpu_name, const char *driver_id, 19301e04c3fSmrg uint64_t driver_flags) 19401e04c3fSmrg{ 19501e04c3fSmrg void *local; 19601e04c3fSmrg struct disk_cache *cache = NULL; 19701e04c3fSmrg char *path, *max_size_str; 19801e04c3fSmrg uint64_t max_size; 19901e04c3fSmrg int fd = -1; 20001e04c3fSmrg struct stat sb; 20101e04c3fSmrg size_t size; 20201e04c3fSmrg 20301e04c3fSmrg uint8_t cache_version = CACHE_VERSION; 20401e04c3fSmrg size_t cv_size = sizeof(cache_version); 20501e04c3fSmrg 20601e04c3fSmrg /* If running as a users other than the real user disable cache */ 20701e04c3fSmrg if (geteuid() != getuid()) 20801e04c3fSmrg return NULL; 20901e04c3fSmrg 21001e04c3fSmrg /* A ralloc context for transient data during this invocation. */ 21101e04c3fSmrg local = ralloc_context(NULL); 21201e04c3fSmrg if (local == NULL) 21301e04c3fSmrg goto fail; 21401e04c3fSmrg 21501e04c3fSmrg /* At user request, disable shader cache entirely. */ 21601e04c3fSmrg if (env_var_as_boolean("MESA_GLSL_CACHE_DISABLE", false)) 21701e04c3fSmrg goto fail; 21801e04c3fSmrg 21901e04c3fSmrg cache = rzalloc(NULL, struct disk_cache); 22001e04c3fSmrg if (cache == NULL) 22101e04c3fSmrg goto fail; 22201e04c3fSmrg 22301e04c3fSmrg /* Assume failure. */ 22401e04c3fSmrg cache->path_init_failed = true; 22501e04c3fSmrg 22601e04c3fSmrg /* Determine path for cache based on the first defined name as follows: 22701e04c3fSmrg * 22801e04c3fSmrg * $MESA_GLSL_CACHE_DIR 22901e04c3fSmrg * $XDG_CACHE_HOME/mesa_shader_cache 23001e04c3fSmrg * <pwd.pw_dir>/.cache/mesa_shader_cache 23101e04c3fSmrg */ 23201e04c3fSmrg path = getenv("MESA_GLSL_CACHE_DIR"); 23301e04c3fSmrg if (path) { 23401e04c3fSmrg if (mkdir_if_needed(path) == -1) 23501e04c3fSmrg goto path_fail; 23601e04c3fSmrg 23701e04c3fSmrg path = concatenate_and_mkdir(local, path, CACHE_DIR_NAME); 23801e04c3fSmrg if (path == NULL) 23901e04c3fSmrg goto path_fail; 24001e04c3fSmrg } 24101e04c3fSmrg 24201e04c3fSmrg if (path == NULL) { 24301e04c3fSmrg char *xdg_cache_home = getenv("XDG_CACHE_HOME"); 24401e04c3fSmrg 24501e04c3fSmrg if (xdg_cache_home) { 24601e04c3fSmrg if (mkdir_if_needed(xdg_cache_home) == -1) 24701e04c3fSmrg goto path_fail; 24801e04c3fSmrg 24901e04c3fSmrg path = concatenate_and_mkdir(local, xdg_cache_home, CACHE_DIR_NAME); 25001e04c3fSmrg if (path == NULL) 25101e04c3fSmrg goto path_fail; 25201e04c3fSmrg } 25301e04c3fSmrg } 25401e04c3fSmrg 25501e04c3fSmrg if (path == NULL) { 25601e04c3fSmrg char *buf; 25701e04c3fSmrg size_t buf_size; 25801e04c3fSmrg struct passwd pwd, *result; 25901e04c3fSmrg 26001e04c3fSmrg buf_size = sysconf(_SC_GETPW_R_SIZE_MAX); 26101e04c3fSmrg if (buf_size == -1) 26201e04c3fSmrg buf_size = 512; 26301e04c3fSmrg 26401e04c3fSmrg /* Loop until buf_size is large enough to query the directory */ 26501e04c3fSmrg while (1) { 26601e04c3fSmrg buf = ralloc_size(local, buf_size); 26701e04c3fSmrg 26801e04c3fSmrg getpwuid_r(getuid(), &pwd, buf, buf_size, &result); 26901e04c3fSmrg if (result) 27001e04c3fSmrg break; 27101e04c3fSmrg 27201e04c3fSmrg if (errno == ERANGE) { 27301e04c3fSmrg ralloc_free(buf); 27401e04c3fSmrg buf = NULL; 27501e04c3fSmrg buf_size *= 2; 27601e04c3fSmrg } else { 27701e04c3fSmrg goto path_fail; 27801e04c3fSmrg } 27901e04c3fSmrg } 28001e04c3fSmrg 28101e04c3fSmrg path = concatenate_and_mkdir(local, pwd.pw_dir, ".cache"); 28201e04c3fSmrg if (path == NULL) 28301e04c3fSmrg goto path_fail; 28401e04c3fSmrg 28501e04c3fSmrg path = concatenate_and_mkdir(local, path, CACHE_DIR_NAME); 28601e04c3fSmrg if (path == NULL) 28701e04c3fSmrg goto path_fail; 28801e04c3fSmrg } 28901e04c3fSmrg 29001e04c3fSmrg cache->path = ralloc_strdup(cache, path); 29101e04c3fSmrg if (cache->path == NULL) 29201e04c3fSmrg goto path_fail; 29301e04c3fSmrg 29401e04c3fSmrg path = ralloc_asprintf(local, "%s/index", cache->path); 29501e04c3fSmrg if (path == NULL) 29601e04c3fSmrg goto path_fail; 29701e04c3fSmrg 29801e04c3fSmrg fd = open(path, O_RDWR | O_CREAT | O_CLOEXEC, 0644); 29901e04c3fSmrg if (fd == -1) 30001e04c3fSmrg goto path_fail; 30101e04c3fSmrg 30201e04c3fSmrg if (fstat(fd, &sb) == -1) 30301e04c3fSmrg goto path_fail; 30401e04c3fSmrg 30501e04c3fSmrg /* Force the index file to be the expected size. */ 30601e04c3fSmrg size = sizeof(*cache->size) + CACHE_INDEX_MAX_KEYS * CACHE_KEY_SIZE; 30701e04c3fSmrg if (sb.st_size != size) { 30801e04c3fSmrg if (ftruncate(fd, size) == -1) 30901e04c3fSmrg goto path_fail; 31001e04c3fSmrg } 31101e04c3fSmrg 31201e04c3fSmrg /* We map this shared so that other processes see updates that we 31301e04c3fSmrg * make. 31401e04c3fSmrg * 31501e04c3fSmrg * Note: We do use atomic addition to ensure that multiple 31601e04c3fSmrg * processes don't scramble the cache size recorded in the 31701e04c3fSmrg * index. But we don't use any locking to prevent multiple 31801e04c3fSmrg * processes from updating the same entry simultaneously. The idea 31901e04c3fSmrg * is that if either result lands entirely in the index, then 32001e04c3fSmrg * that's equivalent to a well-ordered write followed by an 32101e04c3fSmrg * eviction and a write. On the other hand, if the simultaneous 32201e04c3fSmrg * writes result in a corrupt entry, that's not really any 32301e04c3fSmrg * different than both entries being evicted, (since within the 32401e04c3fSmrg * guarantees of the cryptographic hash, a corrupt entry is 32501e04c3fSmrg * unlikely to ever match a real cache key). 32601e04c3fSmrg */ 32701e04c3fSmrg cache->index_mmap = mmap(NULL, size, PROT_READ | PROT_WRITE, 32801e04c3fSmrg MAP_SHARED, fd, 0); 32901e04c3fSmrg if (cache->index_mmap == MAP_FAILED) 33001e04c3fSmrg goto path_fail; 33101e04c3fSmrg cache->index_mmap_size = size; 33201e04c3fSmrg 33301e04c3fSmrg close(fd); 33401e04c3fSmrg 33501e04c3fSmrg cache->size = (uint64_t *) cache->index_mmap; 33601e04c3fSmrg cache->stored_keys = cache->index_mmap + sizeof(uint64_t); 33701e04c3fSmrg 33801e04c3fSmrg max_size = 0; 33901e04c3fSmrg 34001e04c3fSmrg max_size_str = getenv("MESA_GLSL_CACHE_MAX_SIZE"); 34101e04c3fSmrg if (max_size_str) { 34201e04c3fSmrg char *end; 34301e04c3fSmrg max_size = strtoul(max_size_str, &end, 10); 34401e04c3fSmrg if (end == max_size_str) { 34501e04c3fSmrg max_size = 0; 34601e04c3fSmrg } else { 34701e04c3fSmrg switch (*end) { 34801e04c3fSmrg case 'K': 34901e04c3fSmrg case 'k': 35001e04c3fSmrg max_size *= 1024; 35101e04c3fSmrg break; 35201e04c3fSmrg case 'M': 35301e04c3fSmrg case 'm': 35401e04c3fSmrg max_size *= 1024*1024; 35501e04c3fSmrg break; 35601e04c3fSmrg case '\0': 35701e04c3fSmrg case 'G': 35801e04c3fSmrg case 'g': 35901e04c3fSmrg default: 36001e04c3fSmrg max_size *= 1024*1024*1024; 36101e04c3fSmrg break; 36201e04c3fSmrg } 36301e04c3fSmrg } 36401e04c3fSmrg } 36501e04c3fSmrg 36601e04c3fSmrg /* Default to 1GB for maximum cache size. */ 36701e04c3fSmrg if (max_size == 0) { 36801e04c3fSmrg max_size = 1024*1024*1024; 36901e04c3fSmrg } 37001e04c3fSmrg 37101e04c3fSmrg cache->max_size = max_size; 37201e04c3fSmrg 37301e04c3fSmrg /* 1 thread was chosen because we don't really care about getting things 37401e04c3fSmrg * to disk quickly just that it's not blocking other tasks. 37501e04c3fSmrg * 37601e04c3fSmrg * The queue will resize automatically when it's full, so adding new jobs 37701e04c3fSmrg * doesn't stall. 37801e04c3fSmrg */ 37901e04c3fSmrg util_queue_init(&cache->cache_queue, "disk$", 32, 1, 38001e04c3fSmrg UTIL_QUEUE_INIT_RESIZE_IF_FULL | 38101e04c3fSmrg UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY | 38201e04c3fSmrg UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY); 38301e04c3fSmrg 38401e04c3fSmrg cache->path_init_failed = false; 38501e04c3fSmrg 38601e04c3fSmrg path_fail: 38701e04c3fSmrg 38801e04c3fSmrg cache->driver_keys_blob_size = cv_size; 38901e04c3fSmrg 39001e04c3fSmrg /* Create driver id keys */ 39101e04c3fSmrg size_t id_size = strlen(driver_id) + 1; 39201e04c3fSmrg size_t gpu_name_size = strlen(gpu_name) + 1; 39301e04c3fSmrg cache->driver_keys_blob_size += id_size; 39401e04c3fSmrg cache->driver_keys_blob_size += gpu_name_size; 39501e04c3fSmrg 39601e04c3fSmrg /* We sometimes store entire structs that contains a pointers in the cache, 39701e04c3fSmrg * use pointer size as a key to avoid hard to debug issues. 39801e04c3fSmrg */ 39901e04c3fSmrg uint8_t ptr_size = sizeof(void *); 40001e04c3fSmrg size_t ptr_size_size = sizeof(ptr_size); 40101e04c3fSmrg cache->driver_keys_blob_size += ptr_size_size; 40201e04c3fSmrg 40301e04c3fSmrg size_t driver_flags_size = sizeof(driver_flags); 40401e04c3fSmrg cache->driver_keys_blob_size += driver_flags_size; 40501e04c3fSmrg 40601e04c3fSmrg cache->driver_keys_blob = 40701e04c3fSmrg ralloc_size(cache, cache->driver_keys_blob_size); 40801e04c3fSmrg if (!cache->driver_keys_blob) 40901e04c3fSmrg goto fail; 41001e04c3fSmrg 41101e04c3fSmrg uint8_t *drv_key_blob = cache->driver_keys_blob; 41201e04c3fSmrg DRV_KEY_CPY(drv_key_blob, &cache_version, cv_size) 41301e04c3fSmrg DRV_KEY_CPY(drv_key_blob, driver_id, id_size) 41401e04c3fSmrg DRV_KEY_CPY(drv_key_blob, gpu_name, gpu_name_size) 41501e04c3fSmrg DRV_KEY_CPY(drv_key_blob, &ptr_size, ptr_size_size) 41601e04c3fSmrg DRV_KEY_CPY(drv_key_blob, &driver_flags, driver_flags_size) 41701e04c3fSmrg 41801e04c3fSmrg /* Seed our rand function */ 41901e04c3fSmrg s_rand_xorshift128plus(cache->seed_xorshift128plus, true); 42001e04c3fSmrg 42101e04c3fSmrg ralloc_free(local); 42201e04c3fSmrg 42301e04c3fSmrg return cache; 42401e04c3fSmrg 42501e04c3fSmrg fail: 42601e04c3fSmrg if (fd != -1) 42701e04c3fSmrg close(fd); 42801e04c3fSmrg if (cache) 42901e04c3fSmrg ralloc_free(cache); 43001e04c3fSmrg ralloc_free(local); 43101e04c3fSmrg 43201e04c3fSmrg return NULL; 43301e04c3fSmrg} 43401e04c3fSmrg 43501e04c3fSmrgvoid 43601e04c3fSmrgdisk_cache_destroy(struct disk_cache *cache) 43701e04c3fSmrg{ 43801e04c3fSmrg if (cache && !cache->path_init_failed) { 43901e04c3fSmrg util_queue_destroy(&cache->cache_queue); 44001e04c3fSmrg munmap(cache->index_mmap, cache->index_mmap_size); 44101e04c3fSmrg } 44201e04c3fSmrg 44301e04c3fSmrg ralloc_free(cache); 44401e04c3fSmrg} 44501e04c3fSmrg 44601e04c3fSmrg/* Return a filename within the cache's directory corresponding to 'key'. The 44701e04c3fSmrg * returned filename is ralloced with 'cache' as the parent context. 44801e04c3fSmrg * 44901e04c3fSmrg * Returns NULL if out of memory. 45001e04c3fSmrg */ 45101e04c3fSmrgstatic char * 45201e04c3fSmrgget_cache_file(struct disk_cache *cache, const cache_key key) 45301e04c3fSmrg{ 45401e04c3fSmrg char buf[41]; 45501e04c3fSmrg char *filename; 45601e04c3fSmrg 45701e04c3fSmrg if (cache->path_init_failed) 45801e04c3fSmrg return NULL; 45901e04c3fSmrg 46001e04c3fSmrg _mesa_sha1_format(buf, key); 46101e04c3fSmrg if (asprintf(&filename, "%s/%c%c/%s", cache->path, buf[0], 46201e04c3fSmrg buf[1], buf + 2) == -1) 46301e04c3fSmrg return NULL; 46401e04c3fSmrg 46501e04c3fSmrg return filename; 46601e04c3fSmrg} 46701e04c3fSmrg 46801e04c3fSmrg/* Create the directory that will be needed for the cache file for \key. 46901e04c3fSmrg * 47001e04c3fSmrg * Obviously, the implementation here must closely match 47101e04c3fSmrg * _get_cache_file above. 47201e04c3fSmrg*/ 47301e04c3fSmrgstatic void 47401e04c3fSmrgmake_cache_file_directory(struct disk_cache *cache, const cache_key key) 47501e04c3fSmrg{ 47601e04c3fSmrg char *dir; 47701e04c3fSmrg char buf[41]; 47801e04c3fSmrg 47901e04c3fSmrg _mesa_sha1_format(buf, key); 48001e04c3fSmrg if (asprintf(&dir, "%s/%c%c", cache->path, buf[0], buf[1]) == -1) 48101e04c3fSmrg return; 48201e04c3fSmrg 48301e04c3fSmrg mkdir_if_needed(dir); 48401e04c3fSmrg free(dir); 48501e04c3fSmrg} 48601e04c3fSmrg 48701e04c3fSmrg/* Given a directory path and predicate function, find the entry with 48801e04c3fSmrg * the oldest access time in that directory for which the predicate 48901e04c3fSmrg * returns true. 49001e04c3fSmrg * 49101e04c3fSmrg * Returns: A malloc'ed string for the path to the chosen file, (or 49201e04c3fSmrg * NULL on any error). The caller should free the string when 49301e04c3fSmrg * finished. 49401e04c3fSmrg */ 49501e04c3fSmrgstatic char * 49601e04c3fSmrgchoose_lru_file_matching(const char *dir_path, 49701e04c3fSmrg bool (*predicate)(const char *dir_path, 49801e04c3fSmrg const struct stat *, 49901e04c3fSmrg const char *, const size_t)) 50001e04c3fSmrg{ 50101e04c3fSmrg DIR *dir; 50201e04c3fSmrg struct dirent *entry; 50301e04c3fSmrg char *filename; 50401e04c3fSmrg char *lru_name = NULL; 50501e04c3fSmrg time_t lru_atime = 0; 50601e04c3fSmrg 50701e04c3fSmrg dir = opendir(dir_path); 50801e04c3fSmrg if (dir == NULL) 50901e04c3fSmrg return NULL; 51001e04c3fSmrg 51101e04c3fSmrg while (1) { 51201e04c3fSmrg entry = readdir(dir); 51301e04c3fSmrg if (entry == NULL) 51401e04c3fSmrg break; 51501e04c3fSmrg 51601e04c3fSmrg struct stat sb; 51701e04c3fSmrg if (fstatat(dirfd(dir), entry->d_name, &sb, 0) == 0) { 51801e04c3fSmrg if (!lru_atime || (sb.st_atime < lru_atime)) { 51901e04c3fSmrg size_t len = strlen(entry->d_name); 52001e04c3fSmrg 52101e04c3fSmrg if (!predicate(dir_path, &sb, entry->d_name, len)) 52201e04c3fSmrg continue; 52301e04c3fSmrg 52401e04c3fSmrg char *tmp = realloc(lru_name, len + 1); 52501e04c3fSmrg if (tmp) { 52601e04c3fSmrg lru_name = tmp; 52701e04c3fSmrg memcpy(lru_name, entry->d_name, len + 1); 52801e04c3fSmrg lru_atime = sb.st_atime; 52901e04c3fSmrg } 53001e04c3fSmrg } 53101e04c3fSmrg } 53201e04c3fSmrg } 53301e04c3fSmrg 53401e04c3fSmrg if (lru_name == NULL) { 53501e04c3fSmrg closedir(dir); 53601e04c3fSmrg return NULL; 53701e04c3fSmrg } 53801e04c3fSmrg 53901e04c3fSmrg if (asprintf(&filename, "%s/%s", dir_path, lru_name) < 0) 54001e04c3fSmrg filename = NULL; 54101e04c3fSmrg 54201e04c3fSmrg free(lru_name); 54301e04c3fSmrg closedir(dir); 54401e04c3fSmrg 54501e04c3fSmrg return filename; 54601e04c3fSmrg} 54701e04c3fSmrg 54801e04c3fSmrg/* Is entry a regular file, and not having a name with a trailing 54901e04c3fSmrg * ".tmp" 55001e04c3fSmrg */ 55101e04c3fSmrgstatic bool 55201e04c3fSmrgis_regular_non_tmp_file(const char *path, const struct stat *sb, 55301e04c3fSmrg const char *d_name, const size_t len) 55401e04c3fSmrg{ 55501e04c3fSmrg if (!S_ISREG(sb->st_mode)) 55601e04c3fSmrg return false; 55701e04c3fSmrg 55801e04c3fSmrg if (len >= 4 && strcmp(&d_name[len-4], ".tmp") == 0) 55901e04c3fSmrg return false; 56001e04c3fSmrg 56101e04c3fSmrg return true; 56201e04c3fSmrg} 56301e04c3fSmrg 56401e04c3fSmrg/* Returns the size of the deleted file, (or 0 on any error). */ 56501e04c3fSmrgstatic size_t 56601e04c3fSmrgunlink_lru_file_from_directory(const char *path) 56701e04c3fSmrg{ 56801e04c3fSmrg struct stat sb; 56901e04c3fSmrg char *filename; 57001e04c3fSmrg 57101e04c3fSmrg filename = choose_lru_file_matching(path, is_regular_non_tmp_file); 57201e04c3fSmrg if (filename == NULL) 57301e04c3fSmrg return 0; 57401e04c3fSmrg 57501e04c3fSmrg if (stat(filename, &sb) == -1) { 57601e04c3fSmrg free (filename); 57701e04c3fSmrg return 0; 57801e04c3fSmrg } 57901e04c3fSmrg 58001e04c3fSmrg unlink(filename); 58101e04c3fSmrg free (filename); 58201e04c3fSmrg 58301e04c3fSmrg return sb.st_blocks * 512; 58401e04c3fSmrg} 58501e04c3fSmrg 58601e04c3fSmrg/* Is entry a directory with a two-character name, (and not the 58701e04c3fSmrg * special name of ".."). We also return false if the dir is empty. 58801e04c3fSmrg */ 58901e04c3fSmrgstatic bool 59001e04c3fSmrgis_two_character_sub_directory(const char *path, const struct stat *sb, 59101e04c3fSmrg const char *d_name, const size_t len) 59201e04c3fSmrg{ 59301e04c3fSmrg if (!S_ISDIR(sb->st_mode)) 59401e04c3fSmrg return false; 59501e04c3fSmrg 59601e04c3fSmrg if (len != 2) 59701e04c3fSmrg return false; 59801e04c3fSmrg 59901e04c3fSmrg if (strcmp(d_name, "..") == 0) 60001e04c3fSmrg return false; 60101e04c3fSmrg 60201e04c3fSmrg char *subdir; 60301e04c3fSmrg if (asprintf(&subdir, "%s/%s", path, d_name) == -1) 60401e04c3fSmrg return false; 60501e04c3fSmrg DIR *dir = opendir(subdir); 60601e04c3fSmrg free(subdir); 60701e04c3fSmrg 60801e04c3fSmrg if (dir == NULL) 60901e04c3fSmrg return false; 61001e04c3fSmrg 61101e04c3fSmrg unsigned subdir_entries = 0; 61201e04c3fSmrg struct dirent *d; 61301e04c3fSmrg while ((d = readdir(dir)) != NULL) { 61401e04c3fSmrg if(++subdir_entries > 2) 61501e04c3fSmrg break; 61601e04c3fSmrg } 61701e04c3fSmrg closedir(dir); 61801e04c3fSmrg 61901e04c3fSmrg /* If dir only contains '.' and '..' it must be empty */ 62001e04c3fSmrg if (subdir_entries <= 2) 62101e04c3fSmrg return false; 62201e04c3fSmrg 62301e04c3fSmrg return true; 62401e04c3fSmrg} 62501e04c3fSmrg 62601e04c3fSmrgstatic void 62701e04c3fSmrgevict_lru_item(struct disk_cache *cache) 62801e04c3fSmrg{ 62901e04c3fSmrg char *dir_path; 63001e04c3fSmrg 63101e04c3fSmrg /* With a reasonably-sized, full cache, (and with keys generated 63201e04c3fSmrg * from a cryptographic hash), we can choose two random hex digits 63301e04c3fSmrg * and reasonably expect the directory to exist with a file in it. 63401e04c3fSmrg * Provides pseudo-LRU eviction to reduce checking all cache files. 63501e04c3fSmrg */ 63601e04c3fSmrg uint64_t rand64 = rand_xorshift128plus(cache->seed_xorshift128plus); 63701e04c3fSmrg if (asprintf(&dir_path, "%s/%02" PRIx64 , cache->path, rand64 & 0xff) < 0) 63801e04c3fSmrg return; 63901e04c3fSmrg 64001e04c3fSmrg size_t size = unlink_lru_file_from_directory(dir_path); 64101e04c3fSmrg 64201e04c3fSmrg free(dir_path); 64301e04c3fSmrg 64401e04c3fSmrg if (size) { 64501e04c3fSmrg p_atomic_add(cache->size, - (uint64_t)size); 64601e04c3fSmrg return; 64701e04c3fSmrg } 64801e04c3fSmrg 64901e04c3fSmrg /* In the case where the random choice of directory didn't find 65001e04c3fSmrg * something, we choose the least recently accessed from the 65101e04c3fSmrg * existing directories. 65201e04c3fSmrg * 65301e04c3fSmrg * Really, the only reason this code exists is to allow the unit 65401e04c3fSmrg * tests to work, (which use an artificially-small cache to be able 65501e04c3fSmrg * to force a single cached item to be evicted). 65601e04c3fSmrg */ 65701e04c3fSmrg dir_path = choose_lru_file_matching(cache->path, 65801e04c3fSmrg is_two_character_sub_directory); 65901e04c3fSmrg if (dir_path == NULL) 66001e04c3fSmrg return; 66101e04c3fSmrg 66201e04c3fSmrg size = unlink_lru_file_from_directory(dir_path); 66301e04c3fSmrg 66401e04c3fSmrg free(dir_path); 66501e04c3fSmrg 66601e04c3fSmrg if (size) 66701e04c3fSmrg p_atomic_add(cache->size, - (uint64_t)size); 66801e04c3fSmrg} 66901e04c3fSmrg 67001e04c3fSmrgvoid 67101e04c3fSmrgdisk_cache_remove(struct disk_cache *cache, const cache_key key) 67201e04c3fSmrg{ 67301e04c3fSmrg struct stat sb; 67401e04c3fSmrg 67501e04c3fSmrg char *filename = get_cache_file(cache, key); 67601e04c3fSmrg if (filename == NULL) { 67701e04c3fSmrg return; 67801e04c3fSmrg } 67901e04c3fSmrg 68001e04c3fSmrg if (stat(filename, &sb) == -1) { 68101e04c3fSmrg free(filename); 68201e04c3fSmrg return; 68301e04c3fSmrg } 68401e04c3fSmrg 68501e04c3fSmrg unlink(filename); 68601e04c3fSmrg free(filename); 68701e04c3fSmrg 68801e04c3fSmrg if (sb.st_blocks) 68901e04c3fSmrg p_atomic_add(cache->size, - (uint64_t)sb.st_blocks * 512); 69001e04c3fSmrg} 69101e04c3fSmrg 69201e04c3fSmrgstatic ssize_t 69301e04c3fSmrgread_all(int fd, void *buf, size_t count) 69401e04c3fSmrg{ 69501e04c3fSmrg char *in = buf; 69601e04c3fSmrg ssize_t read_ret; 69701e04c3fSmrg size_t done; 69801e04c3fSmrg 69901e04c3fSmrg for (done = 0; done < count; done += read_ret) { 70001e04c3fSmrg read_ret = read(fd, in + done, count - done); 70101e04c3fSmrg if (read_ret == -1 || read_ret == 0) 70201e04c3fSmrg return -1; 70301e04c3fSmrg } 70401e04c3fSmrg return done; 70501e04c3fSmrg} 70601e04c3fSmrg 70701e04c3fSmrgstatic ssize_t 70801e04c3fSmrgwrite_all(int fd, const void *buf, size_t count) 70901e04c3fSmrg{ 71001e04c3fSmrg const char *out = buf; 71101e04c3fSmrg ssize_t written; 71201e04c3fSmrg size_t done; 71301e04c3fSmrg 71401e04c3fSmrg for (done = 0; done < count; done += written) { 71501e04c3fSmrg written = write(fd, out + done, count - done); 71601e04c3fSmrg if (written == -1) 71701e04c3fSmrg return -1; 71801e04c3fSmrg } 71901e04c3fSmrg return done; 72001e04c3fSmrg} 72101e04c3fSmrg 72201e04c3fSmrg/* From the zlib docs: 72301e04c3fSmrg * "If the memory is available, buffers sizes on the order of 128K or 256K 72401e04c3fSmrg * bytes should be used." 72501e04c3fSmrg */ 72601e04c3fSmrg#define BUFSIZE 256 * 1024 72701e04c3fSmrg 72801e04c3fSmrg/** 72901e04c3fSmrg * Compresses cache entry in memory and writes it to disk. Returns the size 73001e04c3fSmrg * of the data written to disk. 73101e04c3fSmrg */ 73201e04c3fSmrgstatic size_t 73301e04c3fSmrgdeflate_and_write_to_disk(const void *in_data, size_t in_data_size, int dest, 73401e04c3fSmrg const char *filename) 73501e04c3fSmrg{ 73601e04c3fSmrg unsigned char out[BUFSIZE]; 73701e04c3fSmrg 73801e04c3fSmrg /* allocate deflate state */ 73901e04c3fSmrg z_stream strm; 74001e04c3fSmrg strm.zalloc = Z_NULL; 74101e04c3fSmrg strm.zfree = Z_NULL; 74201e04c3fSmrg strm.opaque = Z_NULL; 74301e04c3fSmrg strm.next_in = (uint8_t *) in_data; 74401e04c3fSmrg strm.avail_in = in_data_size; 74501e04c3fSmrg 74601e04c3fSmrg int ret = deflateInit(&strm, Z_BEST_COMPRESSION); 74701e04c3fSmrg if (ret != Z_OK) 74801e04c3fSmrg return 0; 74901e04c3fSmrg 75001e04c3fSmrg /* compress until end of in_data */ 75101e04c3fSmrg size_t compressed_size = 0; 75201e04c3fSmrg int flush; 75301e04c3fSmrg do { 75401e04c3fSmrg int remaining = in_data_size - BUFSIZE; 75501e04c3fSmrg flush = remaining > 0 ? Z_NO_FLUSH : Z_FINISH; 75601e04c3fSmrg in_data_size -= BUFSIZE; 75701e04c3fSmrg 75801e04c3fSmrg /* Run deflate() on input until the output buffer is not full (which 75901e04c3fSmrg * means there is no more data to deflate). 76001e04c3fSmrg */ 76101e04c3fSmrg do { 76201e04c3fSmrg strm.avail_out = BUFSIZE; 76301e04c3fSmrg strm.next_out = out; 76401e04c3fSmrg 76501e04c3fSmrg ret = deflate(&strm, flush); /* no bad return value */ 76601e04c3fSmrg assert(ret != Z_STREAM_ERROR); /* state not clobbered */ 76701e04c3fSmrg 76801e04c3fSmrg size_t have = BUFSIZE - strm.avail_out; 76901e04c3fSmrg compressed_size += have; 77001e04c3fSmrg 77101e04c3fSmrg ssize_t written = write_all(dest, out, have); 77201e04c3fSmrg if (written == -1) { 77301e04c3fSmrg (void)deflateEnd(&strm); 77401e04c3fSmrg return 0; 77501e04c3fSmrg } 77601e04c3fSmrg } while (strm.avail_out == 0); 77701e04c3fSmrg 77801e04c3fSmrg /* all input should be used */ 77901e04c3fSmrg assert(strm.avail_in == 0); 78001e04c3fSmrg 78101e04c3fSmrg } while (flush != Z_FINISH); 78201e04c3fSmrg 78301e04c3fSmrg /* stream should be complete */ 78401e04c3fSmrg assert(ret == Z_STREAM_END); 78501e04c3fSmrg 78601e04c3fSmrg /* clean up and return */ 78701e04c3fSmrg (void)deflateEnd(&strm); 78801e04c3fSmrg return compressed_size; 78901e04c3fSmrg} 79001e04c3fSmrg 79101e04c3fSmrgstatic struct disk_cache_put_job * 79201e04c3fSmrgcreate_put_job(struct disk_cache *cache, const cache_key key, 79301e04c3fSmrg const void *data, size_t size, 79401e04c3fSmrg struct cache_item_metadata *cache_item_metadata) 79501e04c3fSmrg{ 79601e04c3fSmrg struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) 79701e04c3fSmrg malloc(sizeof(struct disk_cache_put_job) + size); 79801e04c3fSmrg 79901e04c3fSmrg if (dc_job) { 80001e04c3fSmrg dc_job->cache = cache; 80101e04c3fSmrg memcpy(dc_job->key, key, sizeof(cache_key)); 80201e04c3fSmrg dc_job->data = dc_job + 1; 80301e04c3fSmrg memcpy(dc_job->data, data, size); 80401e04c3fSmrg dc_job->size = size; 80501e04c3fSmrg 80601e04c3fSmrg /* Copy the cache item metadata */ 80701e04c3fSmrg if (cache_item_metadata) { 80801e04c3fSmrg dc_job->cache_item_metadata.type = cache_item_metadata->type; 80901e04c3fSmrg if (cache_item_metadata->type == CACHE_ITEM_TYPE_GLSL) { 81001e04c3fSmrg dc_job->cache_item_metadata.num_keys = 81101e04c3fSmrg cache_item_metadata->num_keys; 81201e04c3fSmrg dc_job->cache_item_metadata.keys = (cache_key *) 81301e04c3fSmrg malloc(cache_item_metadata->num_keys * sizeof(cache_key)); 81401e04c3fSmrg 81501e04c3fSmrg if (!dc_job->cache_item_metadata.keys) 81601e04c3fSmrg goto fail; 81701e04c3fSmrg 81801e04c3fSmrg memcpy(dc_job->cache_item_metadata.keys, 81901e04c3fSmrg cache_item_metadata->keys, 82001e04c3fSmrg sizeof(cache_key) * cache_item_metadata->num_keys); 82101e04c3fSmrg } 82201e04c3fSmrg } else { 82301e04c3fSmrg dc_job->cache_item_metadata.type = CACHE_ITEM_TYPE_UNKNOWN; 82401e04c3fSmrg dc_job->cache_item_metadata.keys = NULL; 82501e04c3fSmrg } 82601e04c3fSmrg } 82701e04c3fSmrg 82801e04c3fSmrg return dc_job; 82901e04c3fSmrg 83001e04c3fSmrgfail: 83101e04c3fSmrg free(dc_job); 83201e04c3fSmrg 83301e04c3fSmrg return NULL; 83401e04c3fSmrg} 83501e04c3fSmrg 83601e04c3fSmrgstatic void 83701e04c3fSmrgdestroy_put_job(void *job, int thread_index) 83801e04c3fSmrg{ 83901e04c3fSmrg if (job) { 84001e04c3fSmrg struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job; 84101e04c3fSmrg free(dc_job->cache_item_metadata.keys); 84201e04c3fSmrg 84301e04c3fSmrg free(job); 84401e04c3fSmrg } 84501e04c3fSmrg} 84601e04c3fSmrg 84701e04c3fSmrgstruct cache_entry_file_data { 84801e04c3fSmrg uint32_t crc32; 84901e04c3fSmrg uint32_t uncompressed_size; 85001e04c3fSmrg}; 85101e04c3fSmrg 85201e04c3fSmrgstatic void 85301e04c3fSmrgcache_put(void *job, int thread_index) 85401e04c3fSmrg{ 85501e04c3fSmrg assert(job); 85601e04c3fSmrg 85701e04c3fSmrg int fd = -1, fd_final = -1, err, ret; 85801e04c3fSmrg unsigned i = 0; 85901e04c3fSmrg char *filename = NULL, *filename_tmp = NULL; 86001e04c3fSmrg struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job; 86101e04c3fSmrg 86201e04c3fSmrg filename = get_cache_file(dc_job->cache, dc_job->key); 86301e04c3fSmrg if (filename == NULL) 86401e04c3fSmrg goto done; 86501e04c3fSmrg 86601e04c3fSmrg /* If the cache is too large, evict something else first. */ 86701e04c3fSmrg while (*dc_job->cache->size + dc_job->size > dc_job->cache->max_size && 86801e04c3fSmrg i < 8) { 86901e04c3fSmrg evict_lru_item(dc_job->cache); 87001e04c3fSmrg i++; 87101e04c3fSmrg } 87201e04c3fSmrg 87301e04c3fSmrg /* Write to a temporary file to allow for an atomic rename to the 87401e04c3fSmrg * final destination filename, (to prevent any readers from seeing 87501e04c3fSmrg * a partially written file). 87601e04c3fSmrg */ 87701e04c3fSmrg if (asprintf(&filename_tmp, "%s.tmp", filename) == -1) 87801e04c3fSmrg goto done; 87901e04c3fSmrg 88001e04c3fSmrg fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644); 88101e04c3fSmrg 88201e04c3fSmrg /* Make the two-character subdirectory within the cache as needed. */ 88301e04c3fSmrg if (fd == -1) { 88401e04c3fSmrg if (errno != ENOENT) 88501e04c3fSmrg goto done; 88601e04c3fSmrg 88701e04c3fSmrg make_cache_file_directory(dc_job->cache, dc_job->key); 88801e04c3fSmrg 88901e04c3fSmrg fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644); 89001e04c3fSmrg if (fd == -1) 89101e04c3fSmrg goto done; 89201e04c3fSmrg } 89301e04c3fSmrg 89401e04c3fSmrg /* With the temporary file open, we take an exclusive flock on 89501e04c3fSmrg * it. If the flock fails, then another process still has the file 89601e04c3fSmrg * open with the flock held. So just let that file be responsible 89701e04c3fSmrg * for writing the file. 89801e04c3fSmrg */ 89901e04c3fSmrg err = flock(fd, LOCK_EX | LOCK_NB); 90001e04c3fSmrg if (err == -1) 90101e04c3fSmrg goto done; 90201e04c3fSmrg 90301e04c3fSmrg /* Now that we have the lock on the open temporary file, we can 90401e04c3fSmrg * check to see if the destination file already exists. If so, 90501e04c3fSmrg * another process won the race between when we saw that the file 90601e04c3fSmrg * didn't exist and now. In this case, we don't do anything more, 90701e04c3fSmrg * (to ensure the size accounting of the cache doesn't get off). 90801e04c3fSmrg */ 90901e04c3fSmrg fd_final = open(filename, O_RDONLY | O_CLOEXEC); 91001e04c3fSmrg if (fd_final != -1) { 91101e04c3fSmrg unlink(filename_tmp); 91201e04c3fSmrg goto done; 91301e04c3fSmrg } 91401e04c3fSmrg 91501e04c3fSmrg /* OK, we're now on the hook to write out a file that we know is 91601e04c3fSmrg * not in the cache, and is also not being written out to the cache 91701e04c3fSmrg * by some other process. 91801e04c3fSmrg */ 91901e04c3fSmrg 92001e04c3fSmrg /* Write the driver_keys_blob, this can be used find information about the 92101e04c3fSmrg * mesa version that produced the entry or deal with hash collisions, 92201e04c3fSmrg * should that ever become a real problem. 92301e04c3fSmrg */ 92401e04c3fSmrg ret = write_all(fd, dc_job->cache->driver_keys_blob, 92501e04c3fSmrg dc_job->cache->driver_keys_blob_size); 92601e04c3fSmrg if (ret == -1) { 92701e04c3fSmrg unlink(filename_tmp); 92801e04c3fSmrg goto done; 92901e04c3fSmrg } 93001e04c3fSmrg 93101e04c3fSmrg /* Write the cache item metadata. This data can be used to deal with 93201e04c3fSmrg * hash collisions, as well as providing useful information to 3rd party 93301e04c3fSmrg * tools reading the cache files. 93401e04c3fSmrg */ 93501e04c3fSmrg ret = write_all(fd, &dc_job->cache_item_metadata.type, 93601e04c3fSmrg sizeof(uint32_t)); 93701e04c3fSmrg if (ret == -1) { 93801e04c3fSmrg unlink(filename_tmp); 93901e04c3fSmrg goto done; 94001e04c3fSmrg } 94101e04c3fSmrg 94201e04c3fSmrg if (dc_job->cache_item_metadata.type == CACHE_ITEM_TYPE_GLSL) { 94301e04c3fSmrg ret = write_all(fd, &dc_job->cache_item_metadata.num_keys, 94401e04c3fSmrg sizeof(uint32_t)); 94501e04c3fSmrg if (ret == -1) { 94601e04c3fSmrg unlink(filename_tmp); 94701e04c3fSmrg goto done; 94801e04c3fSmrg } 94901e04c3fSmrg 95001e04c3fSmrg ret = write_all(fd, dc_job->cache_item_metadata.keys[0], 95101e04c3fSmrg dc_job->cache_item_metadata.num_keys * 95201e04c3fSmrg sizeof(cache_key)); 95301e04c3fSmrg if (ret == -1) { 95401e04c3fSmrg unlink(filename_tmp); 95501e04c3fSmrg goto done; 95601e04c3fSmrg } 95701e04c3fSmrg } 95801e04c3fSmrg 95901e04c3fSmrg /* Create CRC of the data. We will read this when restoring the cache and 96001e04c3fSmrg * use it to check for corruption. 96101e04c3fSmrg */ 96201e04c3fSmrg struct cache_entry_file_data cf_data; 96301e04c3fSmrg cf_data.crc32 = util_hash_crc32(dc_job->data, dc_job->size); 96401e04c3fSmrg cf_data.uncompressed_size = dc_job->size; 96501e04c3fSmrg 96601e04c3fSmrg size_t cf_data_size = sizeof(cf_data); 96701e04c3fSmrg ret = write_all(fd, &cf_data, cf_data_size); 96801e04c3fSmrg if (ret == -1) { 96901e04c3fSmrg unlink(filename_tmp); 97001e04c3fSmrg goto done; 97101e04c3fSmrg } 97201e04c3fSmrg 97301e04c3fSmrg /* Now, finally, write out the contents to the temporary file, then 97401e04c3fSmrg * rename them atomically to the destination filename, and also 97501e04c3fSmrg * perform an atomic increment of the total cache size. 97601e04c3fSmrg */ 97701e04c3fSmrg size_t file_size = deflate_and_write_to_disk(dc_job->data, dc_job->size, 97801e04c3fSmrg fd, filename_tmp); 97901e04c3fSmrg if (file_size == 0) { 98001e04c3fSmrg unlink(filename_tmp); 98101e04c3fSmrg goto done; 98201e04c3fSmrg } 98301e04c3fSmrg ret = rename(filename_tmp, filename); 98401e04c3fSmrg if (ret == -1) { 98501e04c3fSmrg unlink(filename_tmp); 98601e04c3fSmrg goto done; 98701e04c3fSmrg } 98801e04c3fSmrg 98901e04c3fSmrg struct stat sb; 99001e04c3fSmrg if (stat(filename, &sb) == -1) { 99101e04c3fSmrg /* Something went wrong remove the file */ 99201e04c3fSmrg unlink(filename); 99301e04c3fSmrg goto done; 99401e04c3fSmrg } 99501e04c3fSmrg 99601e04c3fSmrg p_atomic_add(dc_job->cache->size, sb.st_blocks * 512); 99701e04c3fSmrg 99801e04c3fSmrg done: 99901e04c3fSmrg if (fd_final != -1) 100001e04c3fSmrg close(fd_final); 100101e04c3fSmrg /* This close finally releases the flock, (now that the final file 100201e04c3fSmrg * has been renamed into place and the size has been added). 100301e04c3fSmrg */ 100401e04c3fSmrg if (fd != -1) 100501e04c3fSmrg close(fd); 100601e04c3fSmrg free(filename_tmp); 100701e04c3fSmrg free(filename); 100801e04c3fSmrg} 100901e04c3fSmrg 101001e04c3fSmrgvoid 101101e04c3fSmrgdisk_cache_put(struct disk_cache *cache, const cache_key key, 101201e04c3fSmrg const void *data, size_t size, 101301e04c3fSmrg struct cache_item_metadata *cache_item_metadata) 101401e04c3fSmrg{ 101501e04c3fSmrg if (cache->blob_put_cb) { 101601e04c3fSmrg cache->blob_put_cb(key, CACHE_KEY_SIZE, data, size); 101701e04c3fSmrg return; 101801e04c3fSmrg } 101901e04c3fSmrg 102001e04c3fSmrg if (cache->path_init_failed) 102101e04c3fSmrg return; 102201e04c3fSmrg 102301e04c3fSmrg struct disk_cache_put_job *dc_job = 102401e04c3fSmrg create_put_job(cache, key, data, size, cache_item_metadata); 102501e04c3fSmrg 102601e04c3fSmrg if (dc_job) { 102701e04c3fSmrg util_queue_fence_init(&dc_job->fence); 102801e04c3fSmrg util_queue_add_job(&cache->cache_queue, dc_job, &dc_job->fence, 102901e04c3fSmrg cache_put, destroy_put_job); 103001e04c3fSmrg } 103101e04c3fSmrg} 103201e04c3fSmrg 103301e04c3fSmrg/** 103401e04c3fSmrg * Decompresses cache entry, returns true if successful. 103501e04c3fSmrg */ 103601e04c3fSmrgstatic bool 103701e04c3fSmrginflate_cache_data(uint8_t *in_data, size_t in_data_size, 103801e04c3fSmrg uint8_t *out_data, size_t out_data_size) 103901e04c3fSmrg{ 104001e04c3fSmrg z_stream strm; 104101e04c3fSmrg 104201e04c3fSmrg /* allocate inflate state */ 104301e04c3fSmrg strm.zalloc = Z_NULL; 104401e04c3fSmrg strm.zfree = Z_NULL; 104501e04c3fSmrg strm.opaque = Z_NULL; 104601e04c3fSmrg strm.next_in = in_data; 104701e04c3fSmrg strm.avail_in = in_data_size; 104801e04c3fSmrg strm.next_out = out_data; 104901e04c3fSmrg strm.avail_out = out_data_size; 105001e04c3fSmrg 105101e04c3fSmrg int ret = inflateInit(&strm); 105201e04c3fSmrg if (ret != Z_OK) 105301e04c3fSmrg return false; 105401e04c3fSmrg 105501e04c3fSmrg ret = inflate(&strm, Z_NO_FLUSH); 105601e04c3fSmrg assert(ret != Z_STREAM_ERROR); /* state not clobbered */ 105701e04c3fSmrg 105801e04c3fSmrg /* Unless there was an error we should have decompressed everything in one 105901e04c3fSmrg * go as we know the uncompressed file size. 106001e04c3fSmrg */ 106101e04c3fSmrg if (ret != Z_STREAM_END) { 106201e04c3fSmrg (void)inflateEnd(&strm); 106301e04c3fSmrg return false; 106401e04c3fSmrg } 106501e04c3fSmrg assert(strm.avail_out == 0); 106601e04c3fSmrg 106701e04c3fSmrg /* clean up and return */ 106801e04c3fSmrg (void)inflateEnd(&strm); 106901e04c3fSmrg return true; 107001e04c3fSmrg} 107101e04c3fSmrg 107201e04c3fSmrgvoid * 107301e04c3fSmrgdisk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size) 107401e04c3fSmrg{ 107501e04c3fSmrg int fd = -1, ret; 107601e04c3fSmrg struct stat sb; 107701e04c3fSmrg char *filename = NULL; 107801e04c3fSmrg uint8_t *data = NULL; 107901e04c3fSmrg uint8_t *uncompressed_data = NULL; 108001e04c3fSmrg uint8_t *file_header = NULL; 108101e04c3fSmrg 108201e04c3fSmrg if (size) 108301e04c3fSmrg *size = 0; 108401e04c3fSmrg 108501e04c3fSmrg if (cache->blob_get_cb) { 108601e04c3fSmrg /* This is what Android EGL defines as the maxValueSize in egl_cache_t 108701e04c3fSmrg * class implementation. 108801e04c3fSmrg */ 108901e04c3fSmrg const signed long max_blob_size = 64 * 1024; 109001e04c3fSmrg void *blob = malloc(max_blob_size); 109101e04c3fSmrg if (!blob) 109201e04c3fSmrg return NULL; 109301e04c3fSmrg 109401e04c3fSmrg signed long bytes = 109501e04c3fSmrg cache->blob_get_cb(key, CACHE_KEY_SIZE, blob, max_blob_size); 109601e04c3fSmrg 109701e04c3fSmrg if (!bytes) { 109801e04c3fSmrg free(blob); 109901e04c3fSmrg return NULL; 110001e04c3fSmrg } 110101e04c3fSmrg 110201e04c3fSmrg if (size) 110301e04c3fSmrg *size = bytes; 110401e04c3fSmrg return blob; 110501e04c3fSmrg } 110601e04c3fSmrg 110701e04c3fSmrg filename = get_cache_file(cache, key); 110801e04c3fSmrg if (filename == NULL) 110901e04c3fSmrg goto fail; 111001e04c3fSmrg 111101e04c3fSmrg fd = open(filename, O_RDONLY | O_CLOEXEC); 111201e04c3fSmrg if (fd == -1) 111301e04c3fSmrg goto fail; 111401e04c3fSmrg 111501e04c3fSmrg if (fstat(fd, &sb) == -1) 111601e04c3fSmrg goto fail; 111701e04c3fSmrg 111801e04c3fSmrg data = malloc(sb.st_size); 111901e04c3fSmrg if (data == NULL) 112001e04c3fSmrg goto fail; 112101e04c3fSmrg 112201e04c3fSmrg size_t ck_size = cache->driver_keys_blob_size; 112301e04c3fSmrg file_header = malloc(ck_size); 112401e04c3fSmrg if (!file_header) 112501e04c3fSmrg goto fail; 112601e04c3fSmrg 112701e04c3fSmrg if (sb.st_size < ck_size) 112801e04c3fSmrg goto fail; 112901e04c3fSmrg 113001e04c3fSmrg ret = read_all(fd, file_header, ck_size); 113101e04c3fSmrg if (ret == -1) 113201e04c3fSmrg goto fail; 113301e04c3fSmrg 113401e04c3fSmrg /* Check for extremely unlikely hash collisions */ 113501e04c3fSmrg if (memcmp(cache->driver_keys_blob, file_header, ck_size) != 0) { 113601e04c3fSmrg assert(!"Mesa cache keys mismatch!"); 113701e04c3fSmrg goto fail; 113801e04c3fSmrg } 113901e04c3fSmrg 114001e04c3fSmrg size_t cache_item_md_size = sizeof(uint32_t); 114101e04c3fSmrg uint32_t md_type; 114201e04c3fSmrg ret = read_all(fd, &md_type, cache_item_md_size); 114301e04c3fSmrg if (ret == -1) 114401e04c3fSmrg goto fail; 114501e04c3fSmrg 114601e04c3fSmrg if (md_type == CACHE_ITEM_TYPE_GLSL) { 114701e04c3fSmrg uint32_t num_keys; 114801e04c3fSmrg cache_item_md_size += sizeof(uint32_t); 114901e04c3fSmrg ret = read_all(fd, &num_keys, sizeof(uint32_t)); 115001e04c3fSmrg if (ret == -1) 115101e04c3fSmrg goto fail; 115201e04c3fSmrg 115301e04c3fSmrg /* The cache item metadata is currently just used for distributing 115401e04c3fSmrg * precompiled shaders, they are not used by Mesa so just skip them for 115501e04c3fSmrg * now. 115601e04c3fSmrg * TODO: pass the metadata back to the caller and do some basic 115701e04c3fSmrg * validation. 115801e04c3fSmrg */ 115901e04c3fSmrg cache_item_md_size += num_keys * sizeof(cache_key); 116001e04c3fSmrg ret = lseek(fd, num_keys * sizeof(cache_key), SEEK_CUR); 116101e04c3fSmrg if (ret == -1) 116201e04c3fSmrg goto fail; 116301e04c3fSmrg } 116401e04c3fSmrg 116501e04c3fSmrg /* Load the CRC that was created when the file was written. */ 116601e04c3fSmrg struct cache_entry_file_data cf_data; 116701e04c3fSmrg size_t cf_data_size = sizeof(cf_data); 116801e04c3fSmrg ret = read_all(fd, &cf_data, cf_data_size); 116901e04c3fSmrg if (ret == -1) 117001e04c3fSmrg goto fail; 117101e04c3fSmrg 117201e04c3fSmrg /* Load the actual cache data. */ 117301e04c3fSmrg size_t cache_data_size = 117401e04c3fSmrg sb.st_size - cf_data_size - ck_size - cache_item_md_size; 117501e04c3fSmrg ret = read_all(fd, data, cache_data_size); 117601e04c3fSmrg if (ret == -1) 117701e04c3fSmrg goto fail; 117801e04c3fSmrg 117901e04c3fSmrg /* Uncompress the cache data */ 118001e04c3fSmrg uncompressed_data = malloc(cf_data.uncompressed_size); 118101e04c3fSmrg if (!inflate_cache_data(data, cache_data_size, uncompressed_data, 118201e04c3fSmrg cf_data.uncompressed_size)) 118301e04c3fSmrg goto fail; 118401e04c3fSmrg 118501e04c3fSmrg /* Check the data for corruption */ 118601e04c3fSmrg if (cf_data.crc32 != util_hash_crc32(uncompressed_data, 118701e04c3fSmrg cf_data.uncompressed_size)) 118801e04c3fSmrg goto fail; 118901e04c3fSmrg 119001e04c3fSmrg free(data); 119101e04c3fSmrg free(filename); 119201e04c3fSmrg free(file_header); 119301e04c3fSmrg close(fd); 119401e04c3fSmrg 119501e04c3fSmrg if (size) 119601e04c3fSmrg *size = cf_data.uncompressed_size; 119701e04c3fSmrg 119801e04c3fSmrg return uncompressed_data; 119901e04c3fSmrg 120001e04c3fSmrg fail: 120101e04c3fSmrg if (data) 120201e04c3fSmrg free(data); 120301e04c3fSmrg if (uncompressed_data) 120401e04c3fSmrg free(uncompressed_data); 120501e04c3fSmrg if (filename) 120601e04c3fSmrg free(filename); 120701e04c3fSmrg if (file_header) 120801e04c3fSmrg free(file_header); 120901e04c3fSmrg if (fd != -1) 121001e04c3fSmrg close(fd); 121101e04c3fSmrg 121201e04c3fSmrg return NULL; 121301e04c3fSmrg} 121401e04c3fSmrg 121501e04c3fSmrgvoid 121601e04c3fSmrgdisk_cache_put_key(struct disk_cache *cache, const cache_key key) 121701e04c3fSmrg{ 121801e04c3fSmrg const uint32_t *key_chunk = (const uint32_t *) key; 121901e04c3fSmrg int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK; 122001e04c3fSmrg unsigned char *entry; 122101e04c3fSmrg 122201e04c3fSmrg if (cache->blob_put_cb) { 122301e04c3fSmrg cache->blob_put_cb(key, CACHE_KEY_SIZE, key_chunk, sizeof(uint32_t)); 122401e04c3fSmrg return; 122501e04c3fSmrg } 122601e04c3fSmrg 122701e04c3fSmrg if (cache->path_init_failed) 122801e04c3fSmrg return; 122901e04c3fSmrg 123001e04c3fSmrg entry = &cache->stored_keys[i * CACHE_KEY_SIZE]; 123101e04c3fSmrg 123201e04c3fSmrg memcpy(entry, key, CACHE_KEY_SIZE); 123301e04c3fSmrg} 123401e04c3fSmrg 123501e04c3fSmrg/* This function lets us test whether a given key was previously 123601e04c3fSmrg * stored in the cache with disk_cache_put_key(). The implement is 123701e04c3fSmrg * efficient by not using syscalls or hitting the disk. It's not 123801e04c3fSmrg * race-free, but the races are benign. If we race with someone else 123901e04c3fSmrg * calling disk_cache_put_key, then that's just an extra cache miss and an 124001e04c3fSmrg * extra recompile. 124101e04c3fSmrg */ 124201e04c3fSmrgbool 124301e04c3fSmrgdisk_cache_has_key(struct disk_cache *cache, const cache_key key) 124401e04c3fSmrg{ 124501e04c3fSmrg const uint32_t *key_chunk = (const uint32_t *) key; 124601e04c3fSmrg int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK; 124701e04c3fSmrg unsigned char *entry; 124801e04c3fSmrg 124901e04c3fSmrg if (cache->blob_get_cb) { 125001e04c3fSmrg uint32_t blob; 125101e04c3fSmrg return cache->blob_get_cb(key, CACHE_KEY_SIZE, &blob, sizeof(uint32_t)); 125201e04c3fSmrg } 125301e04c3fSmrg 125401e04c3fSmrg if (cache->path_init_failed) 125501e04c3fSmrg return false; 125601e04c3fSmrg 125701e04c3fSmrg entry = &cache->stored_keys[i * CACHE_KEY_SIZE]; 125801e04c3fSmrg 125901e04c3fSmrg return memcmp(entry, key, CACHE_KEY_SIZE) == 0; 126001e04c3fSmrg} 126101e04c3fSmrg 126201e04c3fSmrgvoid 126301e04c3fSmrgdisk_cache_compute_key(struct disk_cache *cache, const void *data, size_t size, 126401e04c3fSmrg cache_key key) 126501e04c3fSmrg{ 126601e04c3fSmrg struct mesa_sha1 ctx; 126701e04c3fSmrg 126801e04c3fSmrg _mesa_sha1_init(&ctx); 126901e04c3fSmrg _mesa_sha1_update(&ctx, cache->driver_keys_blob, 127001e04c3fSmrg cache->driver_keys_blob_size); 127101e04c3fSmrg _mesa_sha1_update(&ctx, data, size); 127201e04c3fSmrg _mesa_sha1_final(&ctx, key); 127301e04c3fSmrg} 127401e04c3fSmrg 127501e04c3fSmrgvoid 127601e04c3fSmrgdisk_cache_set_callbacks(struct disk_cache *cache, disk_cache_put_cb put, 127701e04c3fSmrg disk_cache_get_cb get) 127801e04c3fSmrg{ 127901e04c3fSmrg cache->blob_put_cb = put; 128001e04c3fSmrg cache->blob_get_cb = get; 128101e04c3fSmrg} 128201e04c3fSmrg 128301e04c3fSmrg#endif /* ENABLE_SHADER_CACHE */ 1284