1/*
2 * Copyright © 2017 Timothy Arceri
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include <stdio.h>
25#include "st_debug.h"
26#include "st_program.h"
27#include "st_shader_cache.h"
28#include "st_util.h"
29#include "compiler/glsl/program.h"
30#include "compiler/nir/nir.h"
31#include "compiler/nir/nir_serialize.h"
32#include "pipe/p_shader_tokens.h"
33#include "program/ir_to_mesa.h"
34#include "util/u_memory.h"
35
36void
37st_get_program_binary_driver_sha1(struct gl_context *ctx, uint8_t *sha1)
38{
39   disk_cache_compute_key(ctx->Cache, NULL, 0, sha1);
40}
41
42static void
43write_stream_out_to_cache(struct blob *blob,
44                          struct pipe_shader_state *tgsi)
45{
46   blob_write_bytes(blob, &tgsi->stream_output,
47                    sizeof(tgsi->stream_output));
48}
49
50static void
51copy_blob_to_driver_cache_blob(struct blob *blob, struct gl_program *prog)
52{
53   prog->driver_cache_blob = ralloc_size(NULL, blob->size);
54   memcpy(prog->driver_cache_blob, blob->data, blob->size);
55   prog->driver_cache_blob_size = blob->size;
56}
57
58static void
59write_tgsi_to_cache(struct blob *blob, const struct tgsi_token *tokens,
60                    struct gl_program *prog, unsigned num_tokens)
61{
62   blob_write_uint32(blob, num_tokens);
63   blob_write_bytes(blob, tokens, num_tokens * sizeof(struct tgsi_token));
64   copy_blob_to_driver_cache_blob(blob, prog);
65}
66
67static void
68write_nir_to_cache(struct blob *blob, struct gl_program *prog)
69{
70   nir_serialize(blob, prog->nir);
71   copy_blob_to_driver_cache_blob(blob, prog);
72}
73
74static void
75st_serialise_ir_program(struct gl_context *ctx, struct gl_program *prog,
76                        bool nir)
77{
78   if (prog->driver_cache_blob)
79      return;
80
81   struct blob blob;
82   blob_init(&blob);
83
84   switch (prog->info.stage) {
85   case MESA_SHADER_VERTEX: {
86      struct st_vertex_program *stvp = (struct st_vertex_program *) prog;
87
88      blob_write_uint32(&blob, stvp->num_inputs);
89      blob_write_bytes(&blob, stvp->index_to_input,
90                       sizeof(stvp->index_to_input));
91      blob_write_bytes(&blob, stvp->input_to_index,
92                       sizeof(stvp->input_to_index));
93      blob_write_bytes(&blob, stvp->result_to_output,
94                       sizeof(stvp->result_to_output));
95
96      write_stream_out_to_cache(&blob, &stvp->tgsi);
97
98      if (nir)
99         write_nir_to_cache(&blob, prog);
100      else
101         write_tgsi_to_cache(&blob, stvp->tgsi.tokens, prog,
102                             stvp->num_tgsi_tokens);
103      break;
104   }
105   case MESA_SHADER_TESS_CTRL:
106   case MESA_SHADER_TESS_EVAL:
107   case MESA_SHADER_GEOMETRY: {
108      struct st_common_program *stcp = (struct st_common_program *) prog;
109
110      write_stream_out_to_cache(&blob, &stcp->tgsi);
111
112      if (nir)
113         write_nir_to_cache(&blob, prog);
114      else
115         write_tgsi_to_cache(&blob, stcp->tgsi.tokens, prog,
116                             stcp->num_tgsi_tokens);
117      break;
118   }
119   case MESA_SHADER_FRAGMENT: {
120      struct st_fragment_program *stfp = (struct st_fragment_program *) prog;
121
122      if (nir)
123         write_nir_to_cache(&blob, prog);
124      else
125         write_tgsi_to_cache(&blob, stfp->tgsi.tokens, prog,
126                             stfp->num_tgsi_tokens);
127      break;
128   }
129   case MESA_SHADER_COMPUTE: {
130      struct st_compute_program *stcp = (struct st_compute_program *) prog;
131
132      if (nir)
133         write_nir_to_cache(&blob, prog);
134      else
135         write_tgsi_to_cache(&blob, stcp->tgsi.prog, prog,
136                             stcp->num_tgsi_tokens);
137      break;
138   }
139   default:
140      unreachable("Unsupported stage");
141   }
142
143   blob_finish(&blob);
144}
145
146/**
147 * Store tgsi and any other required state in on-disk shader cache.
148 */
149void
150st_store_ir_in_disk_cache(struct st_context *st, struct gl_program *prog,
151                          bool nir)
152{
153   if (!st->ctx->Cache)
154      return;
155
156   /* Exit early when we are dealing with a ff shader with no source file to
157    * generate a source from.
158    */
159   static const char zero[sizeof(prog->sh.data->sha1)] = {0};
160   if (memcmp(prog->sh.data->sha1, zero, sizeof(prog->sh.data->sha1)) == 0)
161      return;
162
163   st_serialise_ir_program(st->ctx, prog, nir);
164
165   if (st->ctx->_Shader->Flags & GLSL_CACHE_INFO) {
166      fprintf(stderr, "putting %s state tracker IR in cache\n",
167              _mesa_shader_stage_to_string(prog->info.stage));
168   }
169}
170
171static void
172read_stream_out_from_cache(struct blob_reader *blob_reader,
173                           struct pipe_shader_state *tgsi)
174{
175   blob_copy_bytes(blob_reader, (uint8_t *) &tgsi->stream_output,
176                    sizeof(tgsi->stream_output));
177}
178
179static void
180read_tgsi_from_cache(struct blob_reader *blob_reader,
181                     const struct tgsi_token **tokens,
182                     unsigned *num_tokens)
183{
184   *num_tokens  = blob_read_uint32(blob_reader);
185   unsigned tokens_size = *num_tokens * sizeof(struct tgsi_token);
186   *tokens = (const struct tgsi_token*) MALLOC(tokens_size);
187   blob_copy_bytes(blob_reader, (uint8_t *) *tokens, tokens_size);
188}
189
190static void
191st_deserialise_ir_program(struct gl_context *ctx,
192                          struct gl_shader_program *shProg,
193                          struct gl_program *prog, bool nir)
194{
195   struct st_context *st = st_context(ctx);
196   size_t size = prog->driver_cache_blob_size;
197   uint8_t *buffer = (uint8_t *) prog->driver_cache_blob;
198   const struct nir_shader_compiler_options *options =
199      ctx->Const.ShaderCompilerOptions[prog->info.stage].NirOptions;
200
201   assert(prog->driver_cache_blob && prog->driver_cache_blob_size > 0);
202
203   struct blob_reader blob_reader;
204   blob_reader_init(&blob_reader, buffer, size);
205
206   switch (prog->info.stage) {
207   case MESA_SHADER_VERTEX: {
208      struct st_vertex_program *stvp = (struct st_vertex_program *) prog;
209
210      st_release_vp_variants(st, stvp);
211
212      stvp->num_inputs = blob_read_uint32(&blob_reader);
213      blob_copy_bytes(&blob_reader, (uint8_t *) stvp->index_to_input,
214                      sizeof(stvp->index_to_input));
215      blob_copy_bytes(&blob_reader, (uint8_t *) stvp->input_to_index,
216                      sizeof(stvp->input_to_index));
217      blob_copy_bytes(&blob_reader, (uint8_t *) stvp->result_to_output,
218                      sizeof(stvp->result_to_output));
219
220      read_stream_out_from_cache(&blob_reader, &stvp->tgsi);
221
222      if (nir) {
223         stvp->tgsi.type = PIPE_SHADER_IR_NIR;
224         stvp->shader_program = shProg;
225         stvp->tgsi.ir.nir = nir_deserialize(NULL, options, &blob_reader);
226         prog->nir = stvp->tgsi.ir.nir;
227      } else {
228         read_tgsi_from_cache(&blob_reader, &stvp->tgsi.tokens,
229                              &stvp->num_tgsi_tokens);
230      }
231
232      if (st->vp == stvp)
233         st->dirty |= ST_NEW_VERTEX_PROGRAM(st, stvp);
234
235      break;
236   }
237   case MESA_SHADER_TESS_CTRL: {
238      struct st_common_program *sttcp = st_common_program(prog);
239
240      st_release_basic_variants(st, sttcp->Base.Target,
241                                &sttcp->variants, &sttcp->tgsi);
242
243      read_stream_out_from_cache(&blob_reader, &sttcp->tgsi);
244
245      if (nir) {
246         sttcp->tgsi.type = PIPE_SHADER_IR_NIR;
247         sttcp->shader_program = shProg;
248         sttcp->tgsi.ir.nir = nir_deserialize(NULL, options, &blob_reader);
249         prog->nir = sttcp->tgsi.ir.nir;
250      } else {
251         read_tgsi_from_cache(&blob_reader, &sttcp->tgsi.tokens,
252                              &sttcp->num_tgsi_tokens);
253      }
254
255      if (st->tcp == sttcp)
256         st->dirty |= sttcp->affected_states;
257
258      break;
259   }
260   case MESA_SHADER_TESS_EVAL: {
261      struct st_common_program *sttep = st_common_program(prog);
262
263      st_release_basic_variants(st, sttep->Base.Target,
264                                &sttep->variants, &sttep->tgsi);
265
266      read_stream_out_from_cache(&blob_reader, &sttep->tgsi);
267
268      if (nir) {
269         sttep->tgsi.type = PIPE_SHADER_IR_NIR;
270         sttep->shader_program = shProg;
271         sttep->tgsi.ir.nir = nir_deserialize(NULL, options, &blob_reader);
272         prog->nir = sttep->tgsi.ir.nir;
273      } else {
274         read_tgsi_from_cache(&blob_reader, &sttep->tgsi.tokens,
275                              &sttep->num_tgsi_tokens);
276      }
277
278      if (st->tep == sttep)
279         st->dirty |= sttep->affected_states;
280
281      break;
282   }
283   case MESA_SHADER_GEOMETRY: {
284      struct st_common_program *stgp = st_common_program(prog);
285
286      st_release_basic_variants(st, stgp->Base.Target, &stgp->variants,
287                                &stgp->tgsi);
288
289      read_stream_out_from_cache(&blob_reader, &stgp->tgsi);
290
291      if (nir) {
292         stgp->tgsi.type = PIPE_SHADER_IR_NIR;
293         stgp->shader_program = shProg;
294         stgp->tgsi.ir.nir = nir_deserialize(NULL, options, &blob_reader);
295         prog->nir = stgp->tgsi.ir.nir;
296      } else {
297         read_tgsi_from_cache(&blob_reader, &stgp->tgsi.tokens,
298                              &stgp->num_tgsi_tokens);
299      }
300
301      if (st->gp == stgp)
302         st->dirty |= stgp->affected_states;
303
304      break;
305   }
306   case MESA_SHADER_FRAGMENT: {
307      struct st_fragment_program *stfp = (struct st_fragment_program *) prog;
308
309      st_release_fp_variants(st, stfp);
310
311      if (nir) {
312         stfp->tgsi.type = PIPE_SHADER_IR_NIR;
313         stfp->shader_program = shProg;
314         stfp->tgsi.ir.nir = nir_deserialize(NULL, options, &blob_reader);
315         prog->nir = stfp->tgsi.ir.nir;
316      } else {
317         read_tgsi_from_cache(&blob_reader, &stfp->tgsi.tokens,
318                              &stfp->num_tgsi_tokens);
319      }
320
321      if (st->fp == stfp)
322         st->dirty |= stfp->affected_states;
323
324      break;
325   }
326   case MESA_SHADER_COMPUTE: {
327      struct st_compute_program *stcp = (struct st_compute_program *) prog;
328
329      st_release_cp_variants(st, stcp);
330
331      if (nir) {
332         stcp->tgsi.ir_type = PIPE_SHADER_IR_NIR;
333         stcp->shader_program = shProg;
334         stcp->tgsi.prog = nir_deserialize(NULL, options, &blob_reader);
335         prog->nir = (nir_shader *) stcp->tgsi.prog;
336      } else {
337         read_tgsi_from_cache(&blob_reader,
338                              (const struct tgsi_token**) &stcp->tgsi.prog,
339                              &stcp->num_tgsi_tokens);
340      }
341
342      stcp->tgsi.req_local_mem = stcp->Base.info.cs.shared_size;
343      stcp->tgsi.req_private_mem = 0;
344      stcp->tgsi.req_input_mem = 0;
345
346      if (st->cp == stcp)
347         st->dirty |= stcp->affected_states;
348
349      break;
350   }
351   default:
352      unreachable("Unsupported stage");
353   }
354
355   /* Make sure we don't try to read more data than we wrote. This should
356    * never happen in release builds but its useful to have this check to
357    * catch development bugs.
358    */
359   if (blob_reader.current != blob_reader.end || blob_reader.overrun) {
360      assert(!"Invalid TGSI shader disk cache item!");
361
362      if (ctx->_Shader->Flags & GLSL_CACHE_INFO) {
363         fprintf(stderr, "Error reading program from cache (invalid "
364                 "TGSI cache item)\n");
365      }
366   }
367
368   st_set_prog_affected_state_flags(prog);
369   _mesa_associate_uniform_storage(ctx, shProg, prog);
370
371   /* Create Gallium shaders now instead of on demand. */
372   if (ST_DEBUG & DEBUG_PRECOMPILE ||
373       st->shader_has_one_variant[prog->info.stage])
374      st_precompile_shader_variant(st, prog);
375}
376
377bool
378st_load_ir_from_disk_cache(struct gl_context *ctx,
379                           struct gl_shader_program *prog,
380                           bool nir)
381{
382   if (!ctx->Cache)
383      return false;
384
385   /* If we didn't load the GLSL metadata from cache then we could not have
386    * loaded the tgsi either.
387    */
388   if (prog->data->LinkStatus != LINKING_SKIPPED)
389      return false;
390
391   for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
392      if (prog->_LinkedShaders[i] == NULL)
393         continue;
394
395      struct gl_program *glprog = prog->_LinkedShaders[i]->Program;
396      st_deserialise_ir_program(ctx, prog, glprog, nir);
397
398      /* We don't need the cached blob anymore so free it */
399      ralloc_free(glprog->driver_cache_blob);
400      glprog->driver_cache_blob = NULL;
401      glprog->driver_cache_blob_size = 0;
402
403      if (ctx->_Shader->Flags & GLSL_CACHE_INFO) {
404         fprintf(stderr, "%s state tracker IR retrieved from cache\n",
405                 _mesa_shader_stage_to_string(i));
406      }
407   }
408
409   return true;
410}
411
412void
413st_serialise_tgsi_program(struct gl_context *ctx, struct gl_program *prog)
414{
415   st_serialise_ir_program(ctx, prog, false);
416}
417
418void
419st_serialise_tgsi_program_binary(struct gl_context *ctx,
420                                 struct gl_shader_program *shProg,
421                                 struct gl_program *prog)
422{
423   st_serialise_ir_program(ctx, prog, false);
424}
425
426void
427st_deserialise_tgsi_program(struct gl_context *ctx,
428                            struct gl_shader_program *shProg,
429                            struct gl_program *prog)
430{
431   st_deserialise_ir_program(ctx, shProg, prog, false);
432}
433
434void
435st_serialise_nir_program(struct gl_context *ctx, struct gl_program *prog)
436{
437   st_serialise_ir_program(ctx, prog, true);
438}
439
440void
441st_serialise_nir_program_binary(struct gl_context *ctx,
442                                struct gl_shader_program *shProg,
443                                struct gl_program *prog)
444{
445   st_serialise_ir_program(ctx, prog, true);
446}
447
448void
449st_deserialise_nir_program(struct gl_context *ctx,
450                           struct gl_shader_program *shProg,
451                           struct gl_program *prog)
452{
453   st_deserialise_ir_program(ctx, shProg, prog, true);
454}
455