1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 *    Jason Ekstrand (jason@jlekstrand.net)
25 *
26 */
27
28#ifndef _VTN_PRIVATE_H_
29#define _VTN_PRIVATE_H_
30
31#include <setjmp.h>
32
33#include "nir/nir.h"
34#include "nir/nir_builder.h"
35#include "util/u_dynarray.h"
36#include "nir_spirv.h"
37#include "spirv.h"
38#include "vtn_generator_ids.h"
39
40struct vtn_builder;
41struct vtn_decoration;
42
43/* setjmp/longjmp is broken on MinGW: https://sourceforge.net/p/mingw-w64/bugs/406/ */
44#ifdef __MINGW32__
45  #define vtn_setjmp __builtin_setjmp
46  #define vtn_longjmp __builtin_longjmp
47#else
48  #define vtn_setjmp setjmp
49  #define vtn_longjmp longjmp
50#endif
51
52void vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level,
53             size_t spirv_offset, const char *message);
54
55void vtn_logf(struct vtn_builder *b, enum nir_spirv_debug_level level,
56              size_t spirv_offset, const char *fmt, ...) PRINTFLIKE(4, 5);
57
58#define vtn_info(...) vtn_logf(b, NIR_SPIRV_DEBUG_LEVEL_INFO, 0, __VA_ARGS__)
59
60void _vtn_warn(struct vtn_builder *b, const char *file, unsigned line,
61               const char *fmt, ...) PRINTFLIKE(4, 5);
62#define vtn_warn(...) _vtn_warn(b, __FILE__, __LINE__, __VA_ARGS__)
63
64void _vtn_err(struct vtn_builder *b, const char *file, unsigned line,
65               const char *fmt, ...) PRINTFLIKE(4, 5);
66#define vtn_err(...) _vtn_err(b, __FILE__, __LINE__, __VA_ARGS__)
67
68/** Fail SPIR-V parsing
69 *
70 * This function logs an error and then bails out of the shader compile using
71 * longjmp.  This being safe relies on two things:
72 *
73 *  1) We must guarantee that setjmp is called after allocating the builder
74 *     and setting up b->debug (so that logging works) but before before any
75 *     errors have a chance to occur.
76 *
77 *  2) While doing the SPIR-V -> NIR conversion, we need to be careful to
78 *     ensure that all heap allocations happen through ralloc and are parented
79 *     to the builder.  This way they will get properly cleaned up on error.
80 *
81 *  3) We must ensure that _vtn_fail is never called while a mutex lock or a
82 *     reference to any other resource is held with the exception of ralloc
83 *     objects which are parented to the builder.
84 *
85 * So long as these two things continue to hold, we can easily longjmp back to
86 * spirv_to_nir(), clean up the builder, and return NULL.
87 */
88NORETURN void
89_vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
90             const char *fmt, ...) PRINTFLIKE(4, 5);
91
92#define vtn_fail(...) _vtn_fail(b, __FILE__, __LINE__, __VA_ARGS__)
93
94/** Fail if the given expression evaluates to true */
95#define vtn_fail_if(expr, ...) \
96   do { \
97      if (unlikely(expr)) \
98         vtn_fail(__VA_ARGS__); \
99   } while (0)
100
101#define _vtn_fail_with(t, msg, v) \
102   vtn_fail("%s: %s (%u)\n", msg, spirv_ ## t ## _to_string(v), v)
103
104#define vtn_fail_with_decoration(msg, v) _vtn_fail_with(decoration, msg, v)
105#define vtn_fail_with_opcode(msg, v)     _vtn_fail_with(op, msg, v)
106
107/** Assert that a condition is true and, if it isn't, vtn_fail
108 *
109 * This macro is transitional only and should not be used in new code.  Use
110 * vtn_fail_if and provide a real message instead.
111 */
112#define vtn_assert(expr) \
113   do { \
114      if (!likely(expr)) \
115         vtn_fail("%s", #expr); \
116   } while (0)
117
118enum vtn_value_type {
119   vtn_value_type_invalid = 0,
120   vtn_value_type_undef,
121   vtn_value_type_string,
122   vtn_value_type_decoration_group,
123   vtn_value_type_type,
124   vtn_value_type_constant,
125   vtn_value_type_pointer,
126   vtn_value_type_function,
127   vtn_value_type_block,
128   vtn_value_type_ssa,
129   vtn_value_type_extension,
130   vtn_value_type_image_pointer,
131};
132
133enum vtn_branch_type {
134   vtn_branch_type_none,
135   vtn_branch_type_if_merge,
136   vtn_branch_type_switch_break,
137   vtn_branch_type_switch_fallthrough,
138   vtn_branch_type_loop_break,
139   vtn_branch_type_loop_continue,
140   vtn_branch_type_loop_back_edge,
141   vtn_branch_type_discard,
142   vtn_branch_type_terminate_invocation,
143   vtn_branch_type_ignore_intersection,
144   vtn_branch_type_terminate_ray,
145   vtn_branch_type_return,
146};
147
148enum vtn_cf_node_type {
149   vtn_cf_node_type_block,
150   vtn_cf_node_type_if,
151   vtn_cf_node_type_loop,
152   vtn_cf_node_type_case,
153   vtn_cf_node_type_switch,
154   vtn_cf_node_type_function,
155};
156
157struct vtn_cf_node {
158   struct list_head link;
159   struct vtn_cf_node *parent;
160   enum vtn_cf_node_type type;
161};
162
163struct vtn_loop {
164   struct vtn_cf_node node;
165
166   /* The main body of the loop */
167   struct list_head body;
168
169   /* The "continue" part of the loop.  This gets executed after the body
170    * and is where you go when you hit a continue.
171    */
172   struct list_head cont_body;
173
174   struct vtn_block *header_block;
175   struct vtn_block *cont_block;
176   struct vtn_block *break_block;
177
178   SpvLoopControlMask control;
179};
180
181struct vtn_if {
182   struct vtn_cf_node node;
183
184   enum vtn_branch_type then_type;
185   struct list_head then_body;
186
187   enum vtn_branch_type else_type;
188   struct list_head else_body;
189
190   struct vtn_block *header_block;
191   struct vtn_block *merge_block;
192
193   SpvSelectionControlMask control;
194};
195
196struct vtn_case {
197   struct vtn_cf_node node;
198
199   struct vtn_block *block;
200
201   enum vtn_branch_type type;
202   struct list_head body;
203
204   /* The fallthrough case, if any */
205   struct vtn_case *fallthrough;
206
207   /* The uint32_t values that map to this case */
208   struct util_dynarray values;
209
210   /* True if this is the default case */
211   bool is_default;
212
213   /* Initialized to false; used when sorting the list of cases */
214   bool visited;
215};
216
217struct vtn_switch {
218   struct vtn_cf_node node;
219
220   uint32_t selector;
221
222   struct list_head cases;
223
224   struct vtn_block *break_block;
225};
226
227struct vtn_block {
228   struct vtn_cf_node node;
229
230   /** A pointer to the label instruction */
231   const uint32_t *label;
232
233   /** A pointer to the merge instruction (or NULL if non exists) */
234   const uint32_t *merge;
235
236   /** A pointer to the branch instruction that ends this block */
237   const uint32_t *branch;
238
239   enum vtn_branch_type branch_type;
240
241   /* The CF node for which this is a merge target
242    *
243    * The SPIR-V spec requires that any given block can be the merge target
244    * for at most one merge instruction.  If this block is a merge target,
245    * this points back to the block containing that merge instruction.
246    */
247   struct vtn_cf_node *merge_cf_node;
248
249   /** Points to the loop that this block starts (if it starts a loop) */
250   struct vtn_loop *loop;
251
252   /** Points to the switch case started by this block (if any) */
253   struct vtn_case *switch_case;
254
255   /** Every block ends in a nop intrinsic so that we can find it again */
256   nir_intrinsic_instr *end_nop;
257
258   /** attached nir_block */
259   struct nir_block *block;
260};
261
262struct vtn_function {
263   struct vtn_cf_node node;
264
265   struct vtn_type *type;
266
267   bool referenced;
268   bool emitted;
269
270   nir_function *nir_func;
271   struct vtn_block *start_block;
272
273   struct list_head body;
274
275   const uint32_t *end;
276
277   SpvFunctionControlMask control;
278};
279
280#define VTN_DECL_CF_NODE_CAST(_type)               \
281static inline struct vtn_##_type *                 \
282vtn_cf_node_as_##_type(struct vtn_cf_node *node)   \
283{                                                  \
284   assert(node->type == vtn_cf_node_type_##_type); \
285   return (struct vtn_##_type *)node;              \
286}
287
288VTN_DECL_CF_NODE_CAST(block)
289VTN_DECL_CF_NODE_CAST(loop)
290VTN_DECL_CF_NODE_CAST(if)
291VTN_DECL_CF_NODE_CAST(case)
292VTN_DECL_CF_NODE_CAST(switch)
293VTN_DECL_CF_NODE_CAST(function)
294
295#define vtn_foreach_cf_node(node, cf_list) \
296   list_for_each_entry(struct vtn_cf_node, node, cf_list, link)
297
298typedef bool (*vtn_instruction_handler)(struct vtn_builder *, SpvOp,
299                                        const uint32_t *, unsigned);
300
301void vtn_build_cfg(struct vtn_builder *b, const uint32_t *words,
302                   const uint32_t *end);
303void vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
304                       vtn_instruction_handler instruction_handler);
305void vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
306                              const uint32_t *w, unsigned count);
307
308const uint32_t *
309vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
310                        const uint32_t *end, vtn_instruction_handler handler);
311
312struct vtn_ssa_value {
313   union {
314      nir_ssa_def *def;
315      struct vtn_ssa_value **elems;
316   };
317
318   /* For matrices, if this is non-NULL, then this value is actually the
319    * transpose of some other value.  The value that `transposed` points to
320    * always dominates this value.
321    */
322   struct vtn_ssa_value *transposed;
323
324   const struct glsl_type *type;
325};
326
327enum vtn_base_type {
328   vtn_base_type_void,
329   vtn_base_type_scalar,
330   vtn_base_type_vector,
331   vtn_base_type_matrix,
332   vtn_base_type_array,
333   vtn_base_type_struct,
334   vtn_base_type_pointer,
335   vtn_base_type_image,
336   vtn_base_type_sampler,
337   vtn_base_type_sampled_image,
338   vtn_base_type_accel_struct,
339   vtn_base_type_function,
340   vtn_base_type_event,
341};
342
343struct vtn_type {
344   enum vtn_base_type base_type;
345
346   const struct glsl_type *type;
347
348   /* The SPIR-V id of the given type. */
349   uint32_t id;
350
351   /* Specifies the length of complex types.
352    *
353    * For Workgroup pointers, this is the size of the referenced type.
354    */
355   unsigned length;
356
357   /* for arrays, matrices and pointers, the array stride */
358   unsigned stride;
359
360   /* Access qualifiers */
361   enum gl_access_qualifier access;
362
363   union {
364      /* Members for scalar, vector, and array-like types */
365      struct {
366         /* for arrays, the vtn_type for the elements of the array */
367         struct vtn_type *array_element;
368
369         /* for matrices, whether the matrix is stored row-major */
370         bool row_major:1;
371
372         /* Whether this type, or a parent type, has been decorated as a
373          * builtin
374          */
375         bool is_builtin:1;
376
377         /* Which built-in to use */
378         SpvBuiltIn builtin;
379      };
380
381      /* Members for struct types */
382      struct {
383         /* for structures, the vtn_type for each member */
384         struct vtn_type **members;
385
386         /* for structs, the offset of each member */
387         unsigned *offsets;
388
389         /* for structs, whether it was decorated as a "non-SSBO-like" block */
390         bool block:1;
391
392         /* for structs, whether it was decorated as an "SSBO-like" block */
393         bool buffer_block:1;
394
395         /* for structs with block == true, whether this is a builtin block
396          * (i.e. a block that contains only builtins).
397          */
398         bool builtin_block:1;
399
400         /* for structs and unions it specifies the minimum alignment of the
401          * members. 0 means packed.
402          *
403          * Set by CPacked and Alignment Decorations in kernels.
404          */
405         bool packed:1;
406      };
407
408      /* Members for pointer types */
409      struct {
410         /* For pointers, the vtn_type for dereferenced type */
411         struct vtn_type *deref;
412
413         /* Storage class for pointers */
414         SpvStorageClass storage_class;
415
416         /* Required alignment for pointers */
417         uint32_t align;
418      };
419
420      /* Members for image types */
421      struct {
422         /* GLSL image type for this type.  This is not to be confused with
423          * vtn_type::type which is actually going to be the GLSL type for a
424          * pointer to an image, likely a uint32_t.
425          */
426         const struct glsl_type *glsl_image;
427
428         /* Image format for image_load_store type images */
429         unsigned image_format;
430
431         /* Access qualifier for storage images */
432         SpvAccessQualifier access_qualifier;
433      };
434
435      /* Members for sampled image types */
436      struct {
437         /* For sampled images, the image type */
438         struct vtn_type *image;
439      };
440
441      /* Members for function types */
442      struct {
443         /* For functions, the vtn_type for each parameter */
444         struct vtn_type **params;
445
446         /* Return type for functions */
447         struct vtn_type *return_type;
448      };
449   };
450};
451
452bool vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type);
453
454bool vtn_types_compatible(struct vtn_builder *b,
455                          struct vtn_type *t1, struct vtn_type *t2);
456
457struct vtn_type *vtn_type_without_array(struct vtn_type *type);
458
459struct vtn_variable;
460
461enum vtn_access_mode {
462   vtn_access_mode_id,
463   vtn_access_mode_literal,
464};
465
466struct vtn_access_link {
467   enum vtn_access_mode mode;
468   int64_t id;
469};
470
471struct vtn_access_chain {
472   uint32_t length;
473
474   /** Whether or not to treat the base pointer as an array.  This is only
475    * true if this access chain came from an OpPtrAccessChain.
476    */
477   bool ptr_as_array;
478
479   /* Access qualifiers */
480   enum gl_access_qualifier access;
481
482   /** Struct elements and array offsets.
483    *
484    * This is an array of 1 so that it can conveniently be created on the
485    * stack but the real length is given by the length field.
486    */
487   struct vtn_access_link link[1];
488};
489
490enum vtn_variable_mode {
491   vtn_variable_mode_function,
492   vtn_variable_mode_private,
493   vtn_variable_mode_uniform,
494   vtn_variable_mode_atomic_counter,
495   vtn_variable_mode_ubo,
496   vtn_variable_mode_ssbo,
497   vtn_variable_mode_phys_ssbo,
498   vtn_variable_mode_push_constant,
499   vtn_variable_mode_workgroup,
500   vtn_variable_mode_cross_workgroup,
501   vtn_variable_mode_generic,
502   vtn_variable_mode_constant,
503   vtn_variable_mode_input,
504   vtn_variable_mode_output,
505   vtn_variable_mode_image,
506   vtn_variable_mode_accel_struct,
507   vtn_variable_mode_call_data,
508   vtn_variable_mode_call_data_in,
509   vtn_variable_mode_ray_payload,
510   vtn_variable_mode_ray_payload_in,
511   vtn_variable_mode_hit_attrib,
512   vtn_variable_mode_shader_record,
513};
514
515struct vtn_pointer {
516   /** The variable mode for the referenced data */
517   enum vtn_variable_mode mode;
518
519   /** The dereferenced type of this pointer */
520   struct vtn_type *type;
521
522   /** The pointer type of this pointer
523    *
524    * This may be NULL for some temporary pointers constructed as part of a
525    * large load, store, or copy.  It MUST be valid for all pointers which are
526    * stored as SPIR-V SSA values.
527    */
528   struct vtn_type *ptr_type;
529
530   /** The referenced variable, if known
531    *
532    * This field may be NULL if the pointer uses a (block_index, offset) pair
533    * instead of an access chain or if the access chain starts at a deref.
534    */
535   struct vtn_variable *var;
536
537   /** The NIR deref corresponding to this pointer */
538   nir_deref_instr *deref;
539
540   /** A (block_index, offset) pair representing a UBO or SSBO position. */
541   struct nir_ssa_def *block_index;
542   struct nir_ssa_def *offset;
543
544   /* Access qualifiers */
545   enum gl_access_qualifier access;
546};
547
548struct vtn_variable {
549   enum vtn_variable_mode mode;
550
551   struct vtn_type *type;
552
553   unsigned descriptor_set;
554   unsigned binding;
555   bool explicit_binding;
556   unsigned offset;
557   unsigned input_attachment_index;
558
559   nir_variable *var;
560
561   /* If the variable is a struct with a location set on it then this will be
562    * stored here. This will be used to calculate locations for members that
563    * don’t have their own explicit location.
564    */
565   int base_location;
566
567   /**
568    * In some early released versions of GLSLang, it implemented all function
569    * calls by making copies of all parameters into temporary variables and
570    * passing those variables into the function.  It even did so for samplers
571    * and images which violates the SPIR-V spec.  Unfortunately, two games
572    * (Talos Principle and Doom) shipped with this old version of GLSLang and
573    * also happen to pass samplers into functions.  Talos Principle received
574    * an update fairly shortly after release with an updated GLSLang.  Doom,
575    * on the other hand, has never received an update so we need to work
576    * around this GLSLang issue in SPIR-V -> NIR.  Hopefully, we can drop this
577    * hack at some point in the future.
578    */
579   struct vtn_pointer *copy_prop_sampler;
580
581   /* Access qualifiers. */
582   enum gl_access_qualifier access;
583};
584
585const struct glsl_type *
586vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
587                      enum vtn_variable_mode mode);
588
589struct vtn_image_pointer {
590   nir_deref_instr *image;
591   nir_ssa_def *coord;
592   nir_ssa_def *sample;
593   nir_ssa_def *lod;
594};
595
596struct vtn_value {
597   enum vtn_value_type value_type;
598
599   /* Workaround for https://gitlab.freedesktop.org/mesa/mesa/-/issues/3406
600    * Only set for OpImage / OpSampledImage. Note that this is in addition
601    * the existence of a NonUniform decoration on this value.*/
602   uint32_t propagated_non_uniform : 1;
603
604   /* Valid for vtn_value_type_constant to indicate the value is OpConstantNull. */
605   bool is_null_constant:1;
606
607   /* Valid when all the members of the value are undef. */
608   bool is_undef_constant:1;
609
610   const char *name;
611   struct vtn_decoration *decoration;
612   struct vtn_type *type;
613   union {
614      const char *str;
615      nir_constant *constant;
616      struct vtn_pointer *pointer;
617      struct vtn_image_pointer *image;
618      struct vtn_function *func;
619      struct vtn_block *block;
620      struct vtn_ssa_value *ssa;
621      vtn_instruction_handler ext_handler;
622   };
623};
624
625#define VTN_DEC_DECORATION -1
626#define VTN_DEC_EXECUTION_MODE -2
627#define VTN_DEC_STRUCT_MEMBER0 0
628
629struct vtn_decoration {
630   struct vtn_decoration *next;
631
632   /* Specifies how to apply this decoration.  Negative values represent a
633    * decoration or execution mode. (See the VTN_DEC_ #defines above.)
634    * Non-negative values specify that it applies to a structure member.
635    */
636   int scope;
637
638   const uint32_t *operands;
639   struct vtn_value *group;
640
641   union {
642      SpvDecoration decoration;
643      SpvExecutionMode exec_mode;
644   };
645};
646
647struct vtn_builder {
648   nir_builder nb;
649
650   /* Used by vtn_fail to jump back to the beginning of SPIR-V compilation */
651   jmp_buf fail_jump;
652
653   const uint32_t *spirv;
654   size_t spirv_word_count;
655   uint32_t version;
656
657   nir_shader *shader;
658   struct spirv_to_nir_options *options;
659   struct vtn_block *block;
660
661   /* Current offset, file, line, and column.  Useful for debugging.  Set
662    * automatically by vtn_foreach_instruction.
663    */
664   size_t spirv_offset;
665   const char *file;
666   int line, col;
667
668   /*
669    * In SPIR-V, constants are global, whereas in NIR, the load_const
670    * instruction we use is per-function. So while we parse each function, we
671    * keep a hash table of constants we've resolved to nir_ssa_value's so
672    * far, and we lazily resolve them when we see them used in a function.
673    */
674   struct hash_table *const_table;
675
676   /*
677    * Map from phi instructions (pointer to the start of the instruction)
678    * to the variable corresponding to it.
679    */
680   struct hash_table *phi_table;
681
682   /* In Vulkan, when lowering some modes variable access, the derefs of the
683    * variables are replaced with a resource index intrinsics, leaving the
684    * variable hanging.  This set keeps track of them so they can be filtered
685    * (and not removed) in nir_remove_dead_variables.
686    */
687   struct set *vars_used_indirectly;
688
689   unsigned num_specializations;
690   struct nir_spirv_specialization *specializations;
691
692   unsigned value_id_bound;
693   struct vtn_value *values;
694
695   /* Information on the origin of the SPIR-V */
696   enum vtn_generator generator_id;
697   SpvSourceLanguage source_lang;
698
699   /* True if we need to fix up CS OpControlBarrier */
700   bool wa_glslang_cs_barrier;
701
702   /* True if we need to ignore undef initializers */
703   bool wa_llvm_spirv_ignore_workgroup_initializer;
704
705   /* Workaround discard bugs in HLSL -> SPIR-V compilers */
706   bool uses_demote_to_helper_invocation;
707   bool convert_discard_to_demote;
708
709   gl_shader_stage entry_point_stage;
710   const char *entry_point_name;
711   struct vtn_value *entry_point;
712   struct vtn_value *workgroup_size_builtin;
713   bool variable_pointers;
714
715   uint32_t *interface_ids;
716   size_t interface_ids_count;
717
718   struct vtn_function *func;
719   struct list_head functions;
720
721   /* Current function parameter index */
722   unsigned func_param_idx;
723
724   /* false by default, set to true by the ContractionOff execution mode */
725   bool exact;
726
727   /* when a physical memory model is choosen */
728   bool physical_ptrs;
729
730   /* memory model specified by OpMemoryModel */
731   unsigned mem_model;
732};
733
734nir_ssa_def *
735vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr);
736struct vtn_pointer *
737vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
738                     struct vtn_type *ptr_type);
739
740struct vtn_ssa_value *
741vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
742                    const struct glsl_type *type);
743
744static inline struct vtn_value *
745vtn_untyped_value(struct vtn_builder *b, uint32_t value_id)
746{
747   vtn_fail_if(value_id >= b->value_id_bound,
748               "SPIR-V id %u is out-of-bounds", value_id);
749   return &b->values[value_id];
750}
751
752static inline uint32_t
753vtn_id_for_value(struct vtn_builder *b, struct vtn_value *value)
754{
755   vtn_fail_if(value <= b->values, "vtn_value pointer outside the range of valid values");
756   uint32_t value_id = value - b->values;
757   vtn_fail_if(value_id >= b->value_id_bound, "vtn_value pointer outside the range of valid values");
758   return value_id;
759}
760
761/* Consider not using this function directly and instead use
762 * vtn_push_ssa/vtn_push_pointer so that appropriate applying of
763 * decorations is handled by common code.
764 */
765static inline struct vtn_value *
766vtn_push_value(struct vtn_builder *b, uint32_t value_id,
767               enum vtn_value_type value_type)
768{
769   struct vtn_value *val = vtn_untyped_value(b, value_id);
770
771   vtn_fail_if(value_type == vtn_value_type_ssa,
772               "Do not call vtn_push_value for value_type_ssa.  Use "
773               "vtn_push_ssa_value instead.");
774
775   vtn_fail_if(val->value_type != vtn_value_type_invalid,
776               "SPIR-V id %u has already been written by another instruction",
777               value_id);
778
779   val->value_type = value_type;
780
781   return &b->values[value_id];
782}
783
784static inline struct vtn_value *
785vtn_value(struct vtn_builder *b, uint32_t value_id,
786          enum vtn_value_type value_type)
787{
788   struct vtn_value *val = vtn_untyped_value(b, value_id);
789   vtn_fail_if(val->value_type != value_type,
790               "SPIR-V id %u is the wrong kind of value", value_id);
791   return val;
792}
793
794static inline struct vtn_value *
795vtn_pointer_value(struct vtn_builder *b, uint32_t value_id)
796{
797   struct vtn_value *val = vtn_untyped_value(b, value_id);
798   vtn_fail_if(val->value_type != vtn_value_type_pointer &&
799               !val->is_null_constant,
800               "SPIR-V id %u is the wrong kind of value", value_id);
801   return val;
802}
803
804static inline struct vtn_pointer *
805vtn_value_to_pointer(struct vtn_builder *b, struct vtn_value *value)
806{
807   if (value->is_null_constant) {
808      vtn_assert(glsl_type_is_vector_or_scalar(value->type->type));
809      nir_ssa_def *const_ssa =
810         vtn_const_ssa_value(b, value->constant, value->type->type)->def;
811      return vtn_pointer_from_ssa(b, const_ssa, value->type);
812   }
813   vtn_assert(value->value_type == vtn_value_type_pointer);
814   return value->pointer;
815}
816
817static inline struct vtn_pointer *
818vtn_pointer(struct vtn_builder *b, uint32_t value_id)
819{
820   return vtn_value_to_pointer(b, vtn_pointer_value(b, value_id));
821}
822
823bool
824vtn_set_instruction_result_type(struct vtn_builder *b, SpvOp opcode,
825                                const uint32_t *w, unsigned count);
826
827static inline uint64_t
828vtn_constant_uint(struct vtn_builder *b, uint32_t value_id)
829{
830   struct vtn_value *val = vtn_value(b, value_id, vtn_value_type_constant);
831
832   vtn_fail_if(val->type->base_type != vtn_base_type_scalar ||
833               !glsl_type_is_integer(val->type->type),
834               "Expected id %u to be an integer constant", value_id);
835
836   switch (glsl_get_bit_size(val->type->type)) {
837   case 8:  return val->constant->values[0].u8;
838   case 16: return val->constant->values[0].u16;
839   case 32: return val->constant->values[0].u32;
840   case 64: return val->constant->values[0].u64;
841   default: unreachable("Invalid bit size");
842   }
843}
844
845static inline int64_t
846vtn_constant_int(struct vtn_builder *b, uint32_t value_id)
847{
848   struct vtn_value *val = vtn_value(b, value_id, vtn_value_type_constant);
849
850   vtn_fail_if(val->type->base_type != vtn_base_type_scalar ||
851               !glsl_type_is_integer(val->type->type),
852               "Expected id %u to be an integer constant", value_id);
853
854   switch (glsl_get_bit_size(val->type->type)) {
855   case 8:  return val->constant->values[0].i8;
856   case 16: return val->constant->values[0].i16;
857   case 32: return val->constant->values[0].i32;
858   case 64: return val->constant->values[0].i64;
859   default: unreachable("Invalid bit size");
860   }
861}
862
863static inline struct vtn_type *
864vtn_get_value_type(struct vtn_builder *b, uint32_t value_id)
865{
866   struct vtn_value *val = vtn_untyped_value(b, value_id);
867   vtn_fail_if(val->type == NULL, "Value %u does not have a type", value_id);
868   return val->type;
869}
870
871static inline struct vtn_type *
872vtn_get_type(struct vtn_builder *b, uint32_t value_id)
873{
874   return vtn_value(b, value_id, vtn_value_type_type)->type;
875}
876
877struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id);
878struct vtn_value *vtn_push_ssa_value(struct vtn_builder *b, uint32_t value_id,
879                                     struct vtn_ssa_value *ssa);
880
881nir_ssa_def *vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id);
882struct vtn_value *vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id,
883                                   nir_ssa_def *def);
884
885struct vtn_value *vtn_push_pointer(struct vtn_builder *b,
886                                   uint32_t value_id,
887                                   struct vtn_pointer *ptr);
888
889struct vtn_sampled_image {
890   nir_deref_instr *image;
891   nir_deref_instr *sampler;
892};
893
894nir_ssa_def *vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
895                                          struct vtn_sampled_image si);
896
897void
898vtn_copy_value(struct vtn_builder *b, uint32_t src_value_id,
899               uint32_t dst_value_id);
900
901struct vtn_ssa_value *vtn_create_ssa_value(struct vtn_builder *b,
902                                           const struct glsl_type *type);
903
904struct vtn_ssa_value *vtn_ssa_transpose(struct vtn_builder *b,
905                                        struct vtn_ssa_value *src);
906
907nir_deref_instr *vtn_nir_deref(struct vtn_builder *b, uint32_t id);
908
909nir_deref_instr *vtn_pointer_to_deref(struct vtn_builder *b,
910                                      struct vtn_pointer *ptr);
911nir_ssa_def *
912vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr,
913                      nir_ssa_def **index_out);
914
915nir_deref_instr *
916vtn_get_call_payload_for_location(struct vtn_builder *b, uint32_t location_id);
917
918struct vtn_ssa_value *
919vtn_local_load(struct vtn_builder *b, nir_deref_instr *src,
920               enum gl_access_qualifier access);
921
922void vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
923                     nir_deref_instr *dest,
924                     enum gl_access_qualifier access);
925
926struct vtn_ssa_value *
927vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src,
928                  enum gl_access_qualifier access);
929
930void vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
931                        struct vtn_pointer *dest, enum gl_access_qualifier access);
932
933void vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
934                          const uint32_t *w, unsigned count);
935
936
937typedef void (*vtn_decoration_foreach_cb)(struct vtn_builder *,
938                                          struct vtn_value *,
939                                          int member,
940                                          const struct vtn_decoration *,
941                                          void *);
942
943void vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
944                            vtn_decoration_foreach_cb cb, void *data);
945
946typedef void (*vtn_execution_mode_foreach_cb)(struct vtn_builder *,
947                                              struct vtn_value *,
948                                              const struct vtn_decoration *,
949                                              void *);
950
951void vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
952                                vtn_execution_mode_foreach_cb cb, void *data);
953
954nir_op vtn_nir_alu_op_for_spirv_opcode(struct vtn_builder *b,
955                                       SpvOp opcode, bool *swap, bool *exact,
956                                       unsigned src_bit_size, unsigned dst_bit_size);
957
958void vtn_handle_alu(struct vtn_builder *b, SpvOp opcode,
959                    const uint32_t *w, unsigned count);
960
961void vtn_handle_integer_dot(struct vtn_builder *b, SpvOp opcode,
962                            const uint32_t *w, unsigned count);
963
964void vtn_handle_bitcast(struct vtn_builder *b, const uint32_t *w,
965                        unsigned count);
966
967void vtn_handle_no_contraction(struct vtn_builder *b, struct vtn_value *val);
968
969void vtn_handle_subgroup(struct vtn_builder *b, SpvOp opcode,
970                         const uint32_t *w, unsigned count);
971
972bool vtn_handle_glsl450_instruction(struct vtn_builder *b, SpvOp ext_opcode,
973                                    const uint32_t *words, unsigned count);
974
975bool vtn_handle_opencl_instruction(struct vtn_builder *b, SpvOp ext_opcode,
976                                   const uint32_t *words, unsigned count);
977bool vtn_handle_opencl_core_instruction(struct vtn_builder *b, SpvOp opcode,
978                                        const uint32_t *w, unsigned count);
979
980struct vtn_builder* vtn_create_builder(const uint32_t *words, size_t word_count,
981                                       gl_shader_stage stage, const char *entry_point_name,
982                                       const struct spirv_to_nir_options *options);
983
984void vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w,
985                            unsigned count);
986
987void vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
988                           const uint32_t *w, unsigned count);
989
990enum vtn_variable_mode vtn_storage_class_to_mode(struct vtn_builder *b,
991                                                 SpvStorageClass class,
992                                                 struct vtn_type *interface_type,
993                                                 nir_variable_mode *nir_mode_out);
994
995nir_address_format vtn_mode_to_address_format(struct vtn_builder *b,
996                                              enum vtn_variable_mode);
997
998nir_rounding_mode vtn_rounding_mode_to_nir(struct vtn_builder *b,
999                                           SpvFPRoundingMode mode);
1000
1001static inline uint32_t
1002vtn_align_u32(uint32_t v, uint32_t a)
1003{
1004   assert(a != 0 && a == (a & -((int32_t) a)));
1005   return (v + a - 1) & ~(a - 1);
1006}
1007
1008static inline uint64_t
1009vtn_u64_literal(const uint32_t *w)
1010{
1011   return (uint64_t)w[1] << 32 | w[0];
1012}
1013
1014bool vtn_handle_amd_gcn_shader_instruction(struct vtn_builder *b, SpvOp ext_opcode,
1015                                           const uint32_t *words, unsigned count);
1016
1017bool vtn_handle_amd_shader_ballot_instruction(struct vtn_builder *b, SpvOp ext_opcode,
1018                                              const uint32_t *w, unsigned count);
1019
1020bool vtn_handle_amd_shader_trinary_minmax_instruction(struct vtn_builder *b, SpvOp ext_opcode,
1021						      const uint32_t *words, unsigned count);
1022
1023bool vtn_handle_amd_shader_explicit_vertex_parameter_instruction(struct vtn_builder *b,
1024                                                                 SpvOp ext_opcode,
1025                                                                 const uint32_t *words,
1026                                                                 unsigned count);
1027
1028SpvMemorySemanticsMask vtn_mode_to_memory_semantics(enum vtn_variable_mode mode);
1029
1030void vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
1031                             SpvMemorySemanticsMask semantics);
1032
1033static inline int
1034cmp_uint32_t(const void *pa, const void *pb)
1035{
1036   uint32_t a = *((const uint32_t *)pa);
1037   uint32_t b = *((const uint32_t *)pb);
1038   if (a < b)
1039      return -1;
1040   if (a > b)
1041      return 1;
1042   return 0;
1043}
1044
1045#endif /* _VTN_PRIVATE_H_ */
1046