disk_cache.c revision d8407755
1/* 2 * Copyright © 2014 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#ifdef ENABLE_SHADER_CACHE 25 26#include <ctype.h> 27#include <ftw.h> 28#include <string.h> 29#include <stdlib.h> 30#include <stdio.h> 31#include <sys/file.h> 32#include <sys/types.h> 33#include <sys/stat.h> 34#include <sys/mman.h> 35#include <unistd.h> 36#include <fcntl.h> 37#include <pwd.h> 38#include <errno.h> 39#include <dirent.h> 40#include "zlib.h" 41 42#include "util/crc32.h" 43#include "util/debug.h" 44#include "util/rand_xor.h" 45#include "util/u_atomic.h" 46#include "util/u_queue.h" 47#include "util/mesa-sha1.h" 48#include "util/ralloc.h" 49#include "main/compiler.h" 50#include "main/errors.h" 51 52#include "disk_cache.h" 53 54/* Number of bits to mask off from a cache key to get an index. */ 55#define CACHE_INDEX_KEY_BITS 16 56 57/* Mask for computing an index from a key. */ 58#define CACHE_INDEX_KEY_MASK ((1 << CACHE_INDEX_KEY_BITS) - 1) 59 60/* The number of keys that can be stored in the index. */ 61#define CACHE_INDEX_MAX_KEYS (1 << CACHE_INDEX_KEY_BITS) 62 63/* The cache version should be bumped whenever a change is made to the 64 * structure of cache entries or the index. This will give any 3rd party 65 * applications reading the cache entries a chance to adjust to the changes. 66 * 67 * - The cache version is checked internally when reading a cache entry. If we 68 * ever have a mismatch we are in big trouble as this means we had a cache 69 * collision. In case of such an event please check the skys for giant 70 * asteroids and that the entire Mesa team hasn't been eaten by wolves. 71 * 72 * - There is no strict requirement that cache versions be backwards 73 * compatible but effort should be taken to limit disruption where possible. 74 */ 75#define CACHE_VERSION 1 76 77struct disk_cache { 78 /* The path to the cache directory. */ 79 char *path; 80 bool path_init_failed; 81 82 /* Thread queue for compressing and writing cache entries to disk */ 83 struct util_queue cache_queue; 84 85 /* Seed for rand, which is used to pick a random directory */ 86 uint64_t seed_xorshift128plus[2]; 87 88 /* A pointer to the mmapped index file within the cache directory. */ 89 uint8_t *index_mmap; 90 size_t index_mmap_size; 91 92 /* Pointer to total size of all objects in cache (within index_mmap) */ 93 uint64_t *size; 94 95 /* Pointer to stored keys, (within index_mmap). */ 96 uint8_t *stored_keys; 97 98 /* Maximum size of all cached objects (in bytes). */ 99 uint64_t max_size; 100 101 /* Driver cache keys. */ 102 uint8_t *driver_keys_blob; 103 size_t driver_keys_blob_size; 104 105 disk_cache_put_cb blob_put_cb; 106 disk_cache_get_cb blob_get_cb; 107}; 108 109struct disk_cache_put_job { 110 struct util_queue_fence fence; 111 112 struct disk_cache *cache; 113 114 cache_key key; 115 116 /* Copy of cache data to be compressed and written. */ 117 void *data; 118 119 /* Size of data to be compressed and written. */ 120 size_t size; 121 122 struct cache_item_metadata cache_item_metadata; 123}; 124 125#ifdef __HAVE_ATOMIC64_OPS 126#define cache_size_adjust(size, val) p_atomic_add(size, val) 127#else 128 // XXX no locking 129#define cache_size_adjust(size, val) (size) += (val) 130#endif 131 132 133/* Create a directory named 'path' if it does not already exist. 134 * 135 * Returns: 0 if path already exists as a directory or if created. 136 * -1 in all other cases. 137 */ 138static int 139mkdir_if_needed(const char *path) 140{ 141 struct stat sb; 142 143 /* If the path exists already, then our work is done if it's a 144 * directory, but it's an error if it is not. 145 */ 146 if (stat(path, &sb) == 0) { 147 if (S_ISDIR(sb.st_mode)) { 148 return 0; 149 } else { 150 fprintf(stderr, "Cannot use %s for shader cache (not a directory)" 151 "---disabling.\n", path); 152 return -1; 153 } 154 } 155 156 int ret = mkdir(path, 0755); 157 if (ret == 0 || (ret == -1 && errno == EEXIST)) 158 return 0; 159 160 fprintf(stderr, "Failed to create %s for shader cache (%s)---disabling.\n", 161 path, strerror(errno)); 162 163 return -1; 164} 165 166/* Concatenate an existing path and a new name to form a new path. If the new 167 * path does not exist as a directory, create it then return the resulting 168 * name of the new path (ralloc'ed off of 'ctx'). 169 * 170 * Returns NULL on any error, such as: 171 * 172 * <path> does not exist or is not a directory 173 * <path>/<name> exists but is not a directory 174 * <path>/<name> cannot be created as a directory 175 */ 176static char * 177concatenate_and_mkdir(void *ctx, const char *path, const char *name) 178{ 179 char *new_path; 180 struct stat sb; 181 182 if (stat(path, &sb) != 0 || ! S_ISDIR(sb.st_mode)) 183 return NULL; 184 185 new_path = ralloc_asprintf(ctx, "%s/%s", path, name); 186 187 if (mkdir_if_needed(new_path) == 0) 188 return new_path; 189 else 190 return NULL; 191} 192 193#define DRV_KEY_CPY(_dst, _src, _src_size) \ 194do { \ 195 memcpy(_dst, _src, _src_size); \ 196 _dst += _src_size; \ 197} while (0); 198 199struct disk_cache * 200disk_cache_create(const char *gpu_name, const char *driver_id, 201 uint64_t driver_flags) 202{ 203 void *local; 204 struct disk_cache *cache = NULL; 205 char *path, *max_size_str; 206 uint64_t max_size; 207 int fd = -1; 208 struct stat sb; 209 size_t size; 210 211 uint8_t cache_version = CACHE_VERSION; 212 size_t cv_size = sizeof(cache_version); 213 214 /* If running as a users other than the real user disable cache */ 215 if (geteuid() != getuid()) 216 return NULL; 217 218 /* A ralloc context for transient data during this invocation. */ 219 local = ralloc_context(NULL); 220 if (local == NULL) 221 goto fail; 222 223 /* At user request, disable shader cache entirely. */ 224 if (env_var_as_boolean("MESA_GLSL_CACHE_DISABLE", false)) 225 goto fail; 226 227 cache = rzalloc(NULL, struct disk_cache); 228 if (cache == NULL) 229 goto fail; 230 231 /* Assume failure. */ 232 cache->path_init_failed = true; 233 234 /* Determine path for cache based on the first defined name as follows: 235 * 236 * $MESA_GLSL_CACHE_DIR 237 * $XDG_CACHE_HOME/mesa_shader_cache 238 * <pwd.pw_dir>/.cache/mesa_shader_cache 239 */ 240 path = getenv("MESA_GLSL_CACHE_DIR"); 241 if (path) { 242 if (mkdir_if_needed(path) == -1) 243 goto path_fail; 244 245 path = concatenate_and_mkdir(local, path, CACHE_DIR_NAME); 246 if (path == NULL) 247 goto path_fail; 248 } 249 250 if (path == NULL) { 251 char *xdg_cache_home = getenv("XDG_CACHE_HOME"); 252 253 if (xdg_cache_home) { 254 if (mkdir_if_needed(xdg_cache_home) == -1) 255 goto path_fail; 256 257 path = concatenate_and_mkdir(local, xdg_cache_home, CACHE_DIR_NAME); 258 if (path == NULL) 259 goto path_fail; 260 } 261 } 262 263 if (path == NULL) { 264 char *buf; 265 size_t buf_size; 266 struct passwd pwd, *result; 267 268 buf_size = sysconf(_SC_GETPW_R_SIZE_MAX); 269 if (buf_size == -1) 270 buf_size = 512; 271 272 /* Loop until buf_size is large enough to query the directory */ 273 while (1) { 274 buf = ralloc_size(local, buf_size); 275 276 getpwuid_r(getuid(), &pwd, buf, buf_size, &result); 277 if (result) 278 break; 279 280 if (errno == ERANGE) { 281 ralloc_free(buf); 282 buf = NULL; 283 buf_size *= 2; 284 } else { 285 goto path_fail; 286 } 287 } 288 289 path = concatenate_and_mkdir(local, pwd.pw_dir, ".cache"); 290 if (path == NULL) 291 goto path_fail; 292 293 path = concatenate_and_mkdir(local, path, CACHE_DIR_NAME); 294 if (path == NULL) 295 goto path_fail; 296 } 297 298 cache->path = ralloc_strdup(cache, path); 299 if (cache->path == NULL) 300 goto path_fail; 301 302 path = ralloc_asprintf(local, "%s/index", cache->path); 303 if (path == NULL) 304 goto path_fail; 305 306 fd = open(path, O_RDWR | O_CREAT | O_CLOEXEC, 0644); 307 if (fd == -1) 308 goto path_fail; 309 310 if (fstat(fd, &sb) == -1) 311 goto path_fail; 312 313 /* Force the index file to be the expected size. */ 314 size = sizeof(*cache->size) + CACHE_INDEX_MAX_KEYS * CACHE_KEY_SIZE; 315 if (sb.st_size != size) { 316 if (ftruncate(fd, size) == -1) 317 goto path_fail; 318 } 319 320 /* We map this shared so that other processes see updates that we 321 * make. 322 * 323 * Note: We do use atomic addition to ensure that multiple 324 * processes don't scramble the cache size recorded in the 325 * index. But we don't use any locking to prevent multiple 326 * processes from updating the same entry simultaneously. The idea 327 * is that if either result lands entirely in the index, then 328 * that's equivalent to a well-ordered write followed by an 329 * eviction and a write. On the other hand, if the simultaneous 330 * writes result in a corrupt entry, that's not really any 331 * different than both entries being evicted, (since within the 332 * guarantees of the cryptographic hash, a corrupt entry is 333 * unlikely to ever match a real cache key). 334 */ 335 cache->index_mmap = mmap(NULL, size, PROT_READ | PROT_WRITE, 336 MAP_SHARED, fd, 0); 337 if (cache->index_mmap == MAP_FAILED) 338 goto path_fail; 339 cache->index_mmap_size = size; 340 341 cache->size = (uint64_t *) cache->index_mmap; 342 cache->stored_keys = cache->index_mmap + sizeof(uint64_t); 343 344 max_size = 0; 345 346 max_size_str = getenv("MESA_GLSL_CACHE_MAX_SIZE"); 347 if (max_size_str) { 348 char *end; 349 max_size = strtoul(max_size_str, &end, 10); 350 if (end == max_size_str) { 351 max_size = 0; 352 } else { 353 switch (*end) { 354 case 'K': 355 case 'k': 356 max_size *= 1024; 357 break; 358 case 'M': 359 case 'm': 360 max_size *= 1024*1024; 361 break; 362 case '\0': 363 case 'G': 364 case 'g': 365 default: 366 max_size *= 1024*1024*1024; 367 break; 368 } 369 } 370 } 371 372 /* Default to 1GB for maximum cache size. */ 373 if (max_size == 0) { 374 max_size = 1024*1024*1024; 375 } 376 377 cache->max_size = max_size; 378 379 /* 1 thread was chosen because we don't really care about getting things 380 * to disk quickly just that it's not blocking other tasks. 381 * 382 * The queue will resize automatically when it's full, so adding new jobs 383 * doesn't stall. 384 */ 385 util_queue_init(&cache->cache_queue, "disk$", 32, 1, 386 UTIL_QUEUE_INIT_RESIZE_IF_FULL | 387 UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY | 388 UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY); 389 390 cache->path_init_failed = false; 391 392 path_fail: 393 394 if (fd != -1) 395 close(fd); 396 397 cache->driver_keys_blob_size = cv_size; 398 399 /* Create driver id keys */ 400 size_t id_size = strlen(driver_id) + 1; 401 size_t gpu_name_size = strlen(gpu_name) + 1; 402 cache->driver_keys_blob_size += id_size; 403 cache->driver_keys_blob_size += gpu_name_size; 404 405 /* We sometimes store entire structs that contains a pointers in the cache, 406 * use pointer size as a key to avoid hard to debug issues. 407 */ 408 uint8_t ptr_size = sizeof(void *); 409 size_t ptr_size_size = sizeof(ptr_size); 410 cache->driver_keys_blob_size += ptr_size_size; 411 412 size_t driver_flags_size = sizeof(driver_flags); 413 cache->driver_keys_blob_size += driver_flags_size; 414 415 cache->driver_keys_blob = 416 ralloc_size(cache, cache->driver_keys_blob_size); 417 if (!cache->driver_keys_blob) 418 goto fail; 419 420 uint8_t *drv_key_blob = cache->driver_keys_blob; 421 DRV_KEY_CPY(drv_key_blob, &cache_version, cv_size) 422 DRV_KEY_CPY(drv_key_blob, driver_id, id_size) 423 DRV_KEY_CPY(drv_key_blob, gpu_name, gpu_name_size) 424 DRV_KEY_CPY(drv_key_blob, &ptr_size, ptr_size_size) 425 DRV_KEY_CPY(drv_key_blob, &driver_flags, driver_flags_size) 426 427 /* Seed our rand function */ 428 s_rand_xorshift128plus(cache->seed_xorshift128plus, true); 429 430 ralloc_free(local); 431 432 return cache; 433 434 fail: 435 if (cache) 436 ralloc_free(cache); 437 ralloc_free(local); 438 439 return NULL; 440} 441 442void 443disk_cache_destroy(struct disk_cache *cache) 444{ 445 if (cache && !cache->path_init_failed) { 446 util_queue_destroy(&cache->cache_queue); 447 munmap(cache->index_mmap, cache->index_mmap_size); 448 } 449 450 ralloc_free(cache); 451} 452 453/* Return a filename within the cache's directory corresponding to 'key'. The 454 * returned filename is ralloced with 'cache' as the parent context. 455 * 456 * Returns NULL if out of memory. 457 */ 458static char * 459get_cache_file(struct disk_cache *cache, const cache_key key) 460{ 461 char buf[41]; 462 char *filename; 463 464 if (cache->path_init_failed) 465 return NULL; 466 467 _mesa_sha1_format(buf, key); 468 if (asprintf(&filename, "%s/%c%c/%s", cache->path, buf[0], 469 buf[1], buf + 2) == -1) 470 return NULL; 471 472 return filename; 473} 474 475/* Create the directory that will be needed for the cache file for \key. 476 * 477 * Obviously, the implementation here must closely match 478 * _get_cache_file above. 479*/ 480static void 481make_cache_file_directory(struct disk_cache *cache, const cache_key key) 482{ 483 char *dir; 484 char buf[41]; 485 486 _mesa_sha1_format(buf, key); 487 if (asprintf(&dir, "%s/%c%c", cache->path, buf[0], buf[1]) == -1) 488 return; 489 490 mkdir_if_needed(dir); 491 free(dir); 492} 493 494/* Given a directory path and predicate function, find the entry with 495 * the oldest access time in that directory for which the predicate 496 * returns true. 497 * 498 * Returns: A malloc'ed string for the path to the chosen file, (or 499 * NULL on any error). The caller should free the string when 500 * finished. 501 */ 502static char * 503choose_lru_file_matching(const char *dir_path, 504 bool (*predicate)(const char *dir_path, 505 const struct stat *, 506 const char *, const size_t)) 507{ 508 DIR *dir; 509 struct dirent *entry; 510 char *filename; 511 char *lru_name = NULL; 512 time_t lru_atime = 0; 513 514 dir = opendir(dir_path); 515 if (dir == NULL) 516 return NULL; 517 518 while (1) { 519 entry = readdir(dir); 520 if (entry == NULL) 521 break; 522 523 struct stat sb; 524 if (fstatat(dirfd(dir), entry->d_name, &sb, 0) == 0) { 525 if (!lru_atime || (sb.st_atime < lru_atime)) { 526 size_t len = strlen(entry->d_name); 527 528 if (!predicate(dir_path, &sb, entry->d_name, len)) 529 continue; 530 531 char *tmp = realloc(lru_name, len + 1); 532 if (tmp) { 533 lru_name = tmp; 534 memcpy(lru_name, entry->d_name, len + 1); 535 lru_atime = sb.st_atime; 536 } 537 } 538 } 539 } 540 541 if (lru_name == NULL) { 542 closedir(dir); 543 return NULL; 544 } 545 546 if (asprintf(&filename, "%s/%s", dir_path, lru_name) < 0) 547 filename = NULL; 548 549 free(lru_name); 550 closedir(dir); 551 552 return filename; 553} 554 555/* Is entry a regular file, and not having a name with a trailing 556 * ".tmp" 557 */ 558static bool 559is_regular_non_tmp_file(const char *path, const struct stat *sb, 560 const char *d_name, const size_t len) 561{ 562 if (!S_ISREG(sb->st_mode)) 563 return false; 564 565 if (len >= 4 && strcmp(&d_name[len-4], ".tmp") == 0) 566 return false; 567 568 return true; 569} 570 571/* Returns the size of the deleted file, (or 0 on any error). */ 572static size_t 573unlink_lru_file_from_directory(const char *path) 574{ 575 struct stat sb; 576 char *filename; 577 578 filename = choose_lru_file_matching(path, is_regular_non_tmp_file); 579 if (filename == NULL) 580 return 0; 581 582 if (stat(filename, &sb) == -1) { 583 free (filename); 584 return 0; 585 } 586 587 unlink(filename); 588 free (filename); 589 590 return sb.st_blocks * 512; 591} 592 593/* Is entry a directory with a two-character name, (and not the 594 * special name of ".."). We also return false if the dir is empty. 595 */ 596static bool 597is_two_character_sub_directory(const char *path, const struct stat *sb, 598 const char *d_name, const size_t len) 599{ 600 if (!S_ISDIR(sb->st_mode)) 601 return false; 602 603 if (len != 2) 604 return false; 605 606 if (strcmp(d_name, "..") == 0) 607 return false; 608 609 char *subdir; 610 if (asprintf(&subdir, "%s/%s", path, d_name) == -1) 611 return false; 612 DIR *dir = opendir(subdir); 613 free(subdir); 614 615 if (dir == NULL) 616 return false; 617 618 unsigned subdir_entries = 0; 619 struct dirent *d; 620 while ((d = readdir(dir)) != NULL) { 621 if(++subdir_entries > 2) 622 break; 623 } 624 closedir(dir); 625 626 /* If dir only contains '.' and '..' it must be empty */ 627 if (subdir_entries <= 2) 628 return false; 629 630 return true; 631} 632 633static void 634evict_lru_item(struct disk_cache *cache) 635{ 636 char *dir_path; 637 638 /* With a reasonably-sized, full cache, (and with keys generated 639 * from a cryptographic hash), we can choose two random hex digits 640 * and reasonably expect the directory to exist with a file in it. 641 * Provides pseudo-LRU eviction to reduce checking all cache files. 642 */ 643 uint64_t rand64 = rand_xorshift128plus(cache->seed_xorshift128plus); 644 if (asprintf(&dir_path, "%s/%02" PRIx64 , cache->path, rand64 & 0xff) < 0) 645 return; 646 647 size_t size = unlink_lru_file_from_directory(dir_path); 648 649 free(dir_path); 650 651 if (size) { 652 cache_size_adjust(cache->size, - (uint64_t)size); 653 return; 654 } 655 656 /* In the case where the random choice of directory didn't find 657 * something, we choose the least recently accessed from the 658 * existing directories. 659 * 660 * Really, the only reason this code exists is to allow the unit 661 * tests to work, (which use an artificially-small cache to be able 662 * to force a single cached item to be evicted). 663 */ 664 dir_path = choose_lru_file_matching(cache->path, 665 is_two_character_sub_directory); 666 if (dir_path == NULL) 667 return; 668 669 size = unlink_lru_file_from_directory(dir_path); 670 671 free(dir_path); 672 673 if (size) 674 cache_size_adjust(cache->size, - (uint64_t)size); 675} 676 677void 678disk_cache_remove(struct disk_cache *cache, const cache_key key) 679{ 680 struct stat sb; 681 682 char *filename = get_cache_file(cache, key); 683 if (filename == NULL) { 684 return; 685 } 686 687 if (stat(filename, &sb) == -1) { 688 free(filename); 689 return; 690 } 691 692 unlink(filename); 693 free(filename); 694 695 if (sb.st_blocks) 696 cache_size_adjust(cache->size, - (uint64_t)sb.st_blocks * 512); 697} 698 699static ssize_t 700read_all(int fd, void *buf, size_t count) 701{ 702 char *in = buf; 703 ssize_t read_ret; 704 size_t done; 705 706 for (done = 0; done < count; done += read_ret) { 707 read_ret = read(fd, in + done, count - done); 708 if (read_ret == -1 || read_ret == 0) 709 return -1; 710 } 711 return done; 712} 713 714static ssize_t 715write_all(int fd, const void *buf, size_t count) 716{ 717 const char *out = buf; 718 ssize_t written; 719 size_t done; 720 721 for (done = 0; done < count; done += written) { 722 written = write(fd, out + done, count - done); 723 if (written == -1) 724 return -1; 725 } 726 return done; 727} 728 729/* From the zlib docs: 730 * "If the memory is available, buffers sizes on the order of 128K or 256K 731 * bytes should be used." 732 */ 733#define BUFSIZE 256 * 1024 734 735/** 736 * Compresses cache entry in memory and writes it to disk. Returns the size 737 * of the data written to disk. 738 */ 739static size_t 740deflate_and_write_to_disk(const void *in_data, size_t in_data_size, int dest, 741 const char *filename) 742{ 743 unsigned char *out; 744 745 /* allocate deflate state */ 746 z_stream strm; 747 strm.zalloc = Z_NULL; 748 strm.zfree = Z_NULL; 749 strm.opaque = Z_NULL; 750 strm.next_in = (uint8_t *) in_data; 751 strm.avail_in = in_data_size; 752 753 int ret = deflateInit(&strm, Z_BEST_COMPRESSION); 754 if (ret != Z_OK) 755 return 0; 756 757 /* compress until end of in_data */ 758 size_t compressed_size = 0; 759 int flush; 760 761 out = malloc(BUFSIZE * sizeof(unsigned char)); 762 if (out == NULL) 763 return 0; 764 765 do { 766 int remaining = in_data_size - BUFSIZE; 767 flush = remaining > 0 ? Z_NO_FLUSH : Z_FINISH; 768 in_data_size -= BUFSIZE; 769 770 /* Run deflate() on input until the output buffer is not full (which 771 * means there is no more data to deflate). 772 */ 773 do { 774 strm.avail_out = BUFSIZE; 775 strm.next_out = out; 776 777 ret = deflate(&strm, flush); /* no bad return value */ 778 assert(ret != Z_STREAM_ERROR); /* state not clobbered */ 779 780 size_t have = BUFSIZE - strm.avail_out; 781 compressed_size += have; 782 783 ssize_t written = write_all(dest, out, have); 784 if (written == -1) { 785 (void)deflateEnd(&strm); 786 free(out); 787 return 0; 788 } 789 } while (strm.avail_out == 0); 790 791 /* all input should be used */ 792 assert(strm.avail_in == 0); 793 794 } while (flush != Z_FINISH); 795 796 /* stream should be complete */ 797 assert(ret == Z_STREAM_END); 798 799 /* clean up and return */ 800 (void)deflateEnd(&strm); 801 free(out); 802 return compressed_size; 803} 804 805static struct disk_cache_put_job * 806create_put_job(struct disk_cache *cache, const cache_key key, 807 const void *data, size_t size, 808 struct cache_item_metadata *cache_item_metadata) 809{ 810 struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) 811 malloc(sizeof(struct disk_cache_put_job) + size); 812 813 if (dc_job) { 814 dc_job->cache = cache; 815 memcpy(dc_job->key, key, sizeof(cache_key)); 816 dc_job->data = dc_job + 1; 817 memcpy(dc_job->data, data, size); 818 dc_job->size = size; 819 820 /* Copy the cache item metadata */ 821 if (cache_item_metadata) { 822 dc_job->cache_item_metadata.type = cache_item_metadata->type; 823 if (cache_item_metadata->type == CACHE_ITEM_TYPE_GLSL) { 824 dc_job->cache_item_metadata.num_keys = 825 cache_item_metadata->num_keys; 826 dc_job->cache_item_metadata.keys = (cache_key *) 827 malloc(cache_item_metadata->num_keys * sizeof(cache_key)); 828 829 if (!dc_job->cache_item_metadata.keys) 830 goto fail; 831 832 memcpy(dc_job->cache_item_metadata.keys, 833 cache_item_metadata->keys, 834 sizeof(cache_key) * cache_item_metadata->num_keys); 835 } 836 } else { 837 dc_job->cache_item_metadata.type = CACHE_ITEM_TYPE_UNKNOWN; 838 dc_job->cache_item_metadata.keys = NULL; 839 } 840 } 841 842 return dc_job; 843 844fail: 845 free(dc_job); 846 847 return NULL; 848} 849 850static void 851destroy_put_job(void *job, int thread_index) 852{ 853 if (job) { 854 struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job; 855 free(dc_job->cache_item_metadata.keys); 856 857 free(job); 858 } 859} 860 861struct cache_entry_file_data { 862 uint32_t crc32; 863 uint32_t uncompressed_size; 864}; 865 866static void 867cache_put(void *job, int thread_index) 868{ 869 assert(job); 870 871 int fd = -1, fd_final = -1, err, ret; 872 unsigned i = 0; 873 char *filename = NULL, *filename_tmp = NULL; 874 struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job; 875 876 filename = get_cache_file(dc_job->cache, dc_job->key); 877 if (filename == NULL) 878 goto done; 879 880 /* If the cache is too large, evict something else first. */ 881 while (*dc_job->cache->size + dc_job->size > dc_job->cache->max_size && 882 i < 8) { 883 evict_lru_item(dc_job->cache); 884 i++; 885 } 886 887 /* Write to a temporary file to allow for an atomic rename to the 888 * final destination filename, (to prevent any readers from seeing 889 * a partially written file). 890 */ 891 if (asprintf(&filename_tmp, "%s.tmp", filename) == -1) 892 goto done; 893 894 fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644); 895 896 /* Make the two-character subdirectory within the cache as needed. */ 897 if (fd == -1) { 898 if (errno != ENOENT) 899 goto done; 900 901 make_cache_file_directory(dc_job->cache, dc_job->key); 902 903 fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644); 904 if (fd == -1) 905 goto done; 906 } 907 908 /* With the temporary file open, we take an exclusive flock on 909 * it. If the flock fails, then another process still has the file 910 * open with the flock held. So just let that file be responsible 911 * for writing the file. 912 */ 913 err = flock(fd, LOCK_EX | LOCK_NB); 914 if (err == -1) 915 goto done; 916 917 /* Now that we have the lock on the open temporary file, we can 918 * check to see if the destination file already exists. If so, 919 * another process won the race between when we saw that the file 920 * didn't exist and now. In this case, we don't do anything more, 921 * (to ensure the size accounting of the cache doesn't get off). 922 */ 923 fd_final = open(filename, O_RDONLY | O_CLOEXEC); 924 if (fd_final != -1) { 925 unlink(filename_tmp); 926 goto done; 927 } 928 929 /* OK, we're now on the hook to write out a file that we know is 930 * not in the cache, and is also not being written out to the cache 931 * by some other process. 932 */ 933 934 /* Write the driver_keys_blob, this can be used find information about the 935 * mesa version that produced the entry or deal with hash collisions, 936 * should that ever become a real problem. 937 */ 938 ret = write_all(fd, dc_job->cache->driver_keys_blob, 939 dc_job->cache->driver_keys_blob_size); 940 if (ret == -1) { 941 unlink(filename_tmp); 942 goto done; 943 } 944 945 /* Write the cache item metadata. This data can be used to deal with 946 * hash collisions, as well as providing useful information to 3rd party 947 * tools reading the cache files. 948 */ 949 ret = write_all(fd, &dc_job->cache_item_metadata.type, 950 sizeof(uint32_t)); 951 if (ret == -1) { 952 unlink(filename_tmp); 953 goto done; 954 } 955 956 if (dc_job->cache_item_metadata.type == CACHE_ITEM_TYPE_GLSL) { 957 ret = write_all(fd, &dc_job->cache_item_metadata.num_keys, 958 sizeof(uint32_t)); 959 if (ret == -1) { 960 unlink(filename_tmp); 961 goto done; 962 } 963 964 ret = write_all(fd, dc_job->cache_item_metadata.keys[0], 965 dc_job->cache_item_metadata.num_keys * 966 sizeof(cache_key)); 967 if (ret == -1) { 968 unlink(filename_tmp); 969 goto done; 970 } 971 } 972 973 /* Create CRC of the data. We will read this when restoring the cache and 974 * use it to check for corruption. 975 */ 976 struct cache_entry_file_data cf_data; 977 cf_data.crc32 = util_hash_crc32(dc_job->data, dc_job->size); 978 cf_data.uncompressed_size = dc_job->size; 979 980 size_t cf_data_size = sizeof(cf_data); 981 ret = write_all(fd, &cf_data, cf_data_size); 982 if (ret == -1) { 983 unlink(filename_tmp); 984 goto done; 985 } 986 987 /* Now, finally, write out the contents to the temporary file, then 988 * rename them atomically to the destination filename, and also 989 * perform an atomic increment of the total cache size. 990 */ 991 size_t file_size = deflate_and_write_to_disk(dc_job->data, dc_job->size, 992 fd, filename_tmp); 993 if (file_size == 0) { 994 unlink(filename_tmp); 995 goto done; 996 } 997 ret = rename(filename_tmp, filename); 998 if (ret == -1) { 999 unlink(filename_tmp); 1000 goto done; 1001 } 1002 1003 struct stat sb; 1004 if (stat(filename, &sb) == -1) { 1005 /* Something went wrong remove the file */ 1006 unlink(filename); 1007 goto done; 1008 } 1009 1010 cache_size_adjust(dc_job->cache->size, sb.st_blocks * 512); 1011 1012 done: 1013 if (fd_final != -1) 1014 close(fd_final); 1015 /* This close finally releases the flock, (now that the final file 1016 * has been renamed into place and the size has been added). 1017 */ 1018 if (fd != -1) 1019 close(fd); 1020 free(filename_tmp); 1021 free(filename); 1022} 1023 1024void 1025disk_cache_put(struct disk_cache *cache, const cache_key key, 1026 const void *data, size_t size, 1027 struct cache_item_metadata *cache_item_metadata) 1028{ 1029 if (cache->blob_put_cb) { 1030 cache->blob_put_cb(key, CACHE_KEY_SIZE, data, size); 1031 return; 1032 } 1033 1034 if (cache->path_init_failed) 1035 return; 1036 1037 struct disk_cache_put_job *dc_job = 1038 create_put_job(cache, key, data, size, cache_item_metadata); 1039 1040 if (dc_job) { 1041 util_queue_fence_init(&dc_job->fence); 1042 util_queue_add_job(&cache->cache_queue, dc_job, &dc_job->fence, 1043 cache_put, destroy_put_job); 1044 } 1045} 1046 1047/** 1048 * Decompresses cache entry, returns true if successful. 1049 */ 1050static bool 1051inflate_cache_data(uint8_t *in_data, size_t in_data_size, 1052 uint8_t *out_data, size_t out_data_size) 1053{ 1054 z_stream strm; 1055 1056 /* allocate inflate state */ 1057 strm.zalloc = Z_NULL; 1058 strm.zfree = Z_NULL; 1059 strm.opaque = Z_NULL; 1060 strm.next_in = in_data; 1061 strm.avail_in = in_data_size; 1062 strm.next_out = out_data; 1063 strm.avail_out = out_data_size; 1064 1065 int ret = inflateInit(&strm); 1066 if (ret != Z_OK) 1067 return false; 1068 1069 ret = inflate(&strm, Z_NO_FLUSH); 1070 assert(ret != Z_STREAM_ERROR); /* state not clobbered */ 1071 1072 /* Unless there was an error we should have decompressed everything in one 1073 * go as we know the uncompressed file size. 1074 */ 1075 if (ret != Z_STREAM_END) { 1076 (void)inflateEnd(&strm); 1077 return false; 1078 } 1079 assert(strm.avail_out == 0); 1080 1081 /* clean up and return */ 1082 (void)inflateEnd(&strm); 1083 return true; 1084} 1085 1086void * 1087disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size) 1088{ 1089 int fd = -1, ret; 1090 struct stat sb; 1091 char *filename = NULL; 1092 uint8_t *data = NULL; 1093 uint8_t *uncompressed_data = NULL; 1094 uint8_t *file_header = NULL; 1095 1096 if (size) 1097 *size = 0; 1098 1099 if (cache->blob_get_cb) { 1100 /* This is what Android EGL defines as the maxValueSize in egl_cache_t 1101 * class implementation. 1102 */ 1103 const signed long max_blob_size = 64 * 1024; 1104 void *blob = malloc(max_blob_size); 1105 if (!blob) 1106 return NULL; 1107 1108 signed long bytes = 1109 cache->blob_get_cb(key, CACHE_KEY_SIZE, blob, max_blob_size); 1110 1111 if (!bytes) { 1112 free(blob); 1113 return NULL; 1114 } 1115 1116 if (size) 1117 *size = bytes; 1118 return blob; 1119 } 1120 1121 filename = get_cache_file(cache, key); 1122 if (filename == NULL) 1123 goto fail; 1124 1125 fd = open(filename, O_RDONLY | O_CLOEXEC); 1126 if (fd == -1) 1127 goto fail; 1128 1129 if (fstat(fd, &sb) == -1) 1130 goto fail; 1131 1132 data = malloc(sb.st_size); 1133 if (data == NULL) 1134 goto fail; 1135 1136 size_t ck_size = cache->driver_keys_blob_size; 1137 file_header = malloc(ck_size); 1138 if (!file_header) 1139 goto fail; 1140 1141 if (sb.st_size < ck_size) 1142 goto fail; 1143 1144 ret = read_all(fd, file_header, ck_size); 1145 if (ret == -1) 1146 goto fail; 1147 1148 /* Check for extremely unlikely hash collisions */ 1149 if (memcmp(cache->driver_keys_blob, file_header, ck_size) != 0) { 1150 assert(!"Mesa cache keys mismatch!"); 1151 goto fail; 1152 } 1153 1154 size_t cache_item_md_size = sizeof(uint32_t); 1155 uint32_t md_type; 1156 ret = read_all(fd, &md_type, cache_item_md_size); 1157 if (ret == -1) 1158 goto fail; 1159 1160 if (md_type == CACHE_ITEM_TYPE_GLSL) { 1161 uint32_t num_keys; 1162 cache_item_md_size += sizeof(uint32_t); 1163 ret = read_all(fd, &num_keys, sizeof(uint32_t)); 1164 if (ret == -1) 1165 goto fail; 1166 1167 /* The cache item metadata is currently just used for distributing 1168 * precompiled shaders, they are not used by Mesa so just skip them for 1169 * now. 1170 * TODO: pass the metadata back to the caller and do some basic 1171 * validation. 1172 */ 1173 cache_item_md_size += num_keys * sizeof(cache_key); 1174 ret = lseek(fd, num_keys * sizeof(cache_key), SEEK_CUR); 1175 if (ret == -1) 1176 goto fail; 1177 } 1178 1179 /* Load the CRC that was created when the file was written. */ 1180 struct cache_entry_file_data cf_data; 1181 size_t cf_data_size = sizeof(cf_data); 1182 ret = read_all(fd, &cf_data, cf_data_size); 1183 if (ret == -1) 1184 goto fail; 1185 1186 /* Load the actual cache data. */ 1187 size_t cache_data_size = 1188 sb.st_size - cf_data_size - ck_size - cache_item_md_size; 1189 ret = read_all(fd, data, cache_data_size); 1190 if (ret == -1) 1191 goto fail; 1192 1193 /* Uncompress the cache data */ 1194 uncompressed_data = malloc(cf_data.uncompressed_size); 1195 if (!inflate_cache_data(data, cache_data_size, uncompressed_data, 1196 cf_data.uncompressed_size)) 1197 goto fail; 1198 1199 /* Check the data for corruption */ 1200 if (cf_data.crc32 != util_hash_crc32(uncompressed_data, 1201 cf_data.uncompressed_size)) 1202 goto fail; 1203 1204 free(data); 1205 free(filename); 1206 free(file_header); 1207 close(fd); 1208 1209 if (size) 1210 *size = cf_data.uncompressed_size; 1211 1212 return uncompressed_data; 1213 1214 fail: 1215 if (data) 1216 free(data); 1217 if (uncompressed_data) 1218 free(uncompressed_data); 1219 if (filename) 1220 free(filename); 1221 if (file_header) 1222 free(file_header); 1223 if (fd != -1) 1224 close(fd); 1225 1226 return NULL; 1227} 1228 1229void 1230disk_cache_put_key(struct disk_cache *cache, const cache_key key) 1231{ 1232 const uint32_t *key_chunk = (const uint32_t *) key; 1233 int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK; 1234 unsigned char *entry; 1235 1236 if (cache->blob_put_cb) { 1237 cache->blob_put_cb(key, CACHE_KEY_SIZE, key_chunk, sizeof(uint32_t)); 1238 return; 1239 } 1240 1241 if (cache->path_init_failed) 1242 return; 1243 1244 entry = &cache->stored_keys[i * CACHE_KEY_SIZE]; 1245 1246 memcpy(entry, key, CACHE_KEY_SIZE); 1247} 1248 1249/* This function lets us test whether a given key was previously 1250 * stored in the cache with disk_cache_put_key(). The implement is 1251 * efficient by not using syscalls or hitting the disk. It's not 1252 * race-free, but the races are benign. If we race with someone else 1253 * calling disk_cache_put_key, then that's just an extra cache miss and an 1254 * extra recompile. 1255 */ 1256bool 1257disk_cache_has_key(struct disk_cache *cache, const cache_key key) 1258{ 1259 const uint32_t *key_chunk = (const uint32_t *) key; 1260 int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK; 1261 unsigned char *entry; 1262 1263 if (cache->blob_get_cb) { 1264 uint32_t blob; 1265 return cache->blob_get_cb(key, CACHE_KEY_SIZE, &blob, sizeof(uint32_t)); 1266 } 1267 1268 if (cache->path_init_failed) 1269 return false; 1270 1271 entry = &cache->stored_keys[i * CACHE_KEY_SIZE]; 1272 1273 return memcmp(entry, key, CACHE_KEY_SIZE) == 0; 1274} 1275 1276void 1277disk_cache_compute_key(struct disk_cache *cache, const void *data, size_t size, 1278 cache_key key) 1279{ 1280 struct mesa_sha1 ctx; 1281 1282 _mesa_sha1_init(&ctx); 1283 _mesa_sha1_update(&ctx, cache->driver_keys_blob, 1284 cache->driver_keys_blob_size); 1285 _mesa_sha1_update(&ctx, data, size); 1286 _mesa_sha1_final(&ctx, key); 1287} 1288 1289void 1290disk_cache_set_callbacks(struct disk_cache *cache, disk_cache_put_cb put, 1291 disk_cache_get_cb get) 1292{ 1293 cache->blob_put_cb = put; 1294 cache->blob_get_cb = get; 1295} 1296 1297#endif /* ENABLE_SHADER_CACHE */ 1298