1b8e80941Smrg#
2b8e80941Smrg# Copyright (C) 2018 Red Hat
3b8e80941Smrg# Copyright (C) 2014 Intel Corporation
4b8e80941Smrg#
5b8e80941Smrg# Permission is hereby granted, free of charge, to any person obtaining a
6b8e80941Smrg# copy of this software and associated documentation files (the "Software"),
7b8e80941Smrg# to deal in the Software without restriction, including without limitation
8b8e80941Smrg# the rights to use, copy, modify, merge, publish, distribute, sublicense,
9b8e80941Smrg# and/or sell copies of the Software, and to permit persons to whom the
10b8e80941Smrg# Software is furnished to do so, subject to the following conditions:
11b8e80941Smrg#
12b8e80941Smrg# The above copyright notice and this permission notice (including the next
13b8e80941Smrg# paragraph) shall be included in all copies or substantial portions of the
14b8e80941Smrg# Software.
15b8e80941Smrg#
16b8e80941Smrg# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17b8e80941Smrg# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18b8e80941Smrg# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19b8e80941Smrg# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20b8e80941Smrg# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21b8e80941Smrg# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22b8e80941Smrg# IN THE SOFTWARE.
23b8e80941Smrg#
24b8e80941Smrg
25b8e80941Smrg# This file defines all the available intrinsics in one place.
26b8e80941Smrg#
27b8e80941Smrg# The Intrinsic class corresponds one-to-one with nir_intrinsic_info
28b8e80941Smrg# structure.
29b8e80941Smrg
30b8e80941Smrgclass Intrinsic(object):
31b8e80941Smrg   """Class that represents all the information about an intrinsic opcode.
32b8e80941Smrg   NOTE: this must be kept in sync with nir_intrinsic_info.
33b8e80941Smrg   """
34b8e80941Smrg   def __init__(self, name, src_components, dest_components,
35b8e80941Smrg                indices, flags, sysval, bit_sizes):
36b8e80941Smrg       """Parameters:
37b8e80941Smrg
38b8e80941Smrg       - name: the intrinsic name
39b8e80941Smrg       - src_components: list of the number of components per src, 0 means
40b8e80941Smrg         vectorized instruction with number of components given in the
41b8e80941Smrg         num_components field in nir_intrinsic_instr.
42b8e80941Smrg       - dest_components: number of destination components, -1 means no
43b8e80941Smrg         dest, 0 means number of components given in num_components field
44b8e80941Smrg         in nir_intrinsic_instr.
45b8e80941Smrg       - indices: list of constant indicies
46b8e80941Smrg       - flags: list of semantic flags
47b8e80941Smrg       - sysval: is this a system-value intrinsic
48b8e80941Smrg       - bit_sizes: allowed dest bit_sizes
49b8e80941Smrg       """
50b8e80941Smrg       assert isinstance(name, str)
51b8e80941Smrg       assert isinstance(src_components, list)
52b8e80941Smrg       if src_components:
53b8e80941Smrg           assert isinstance(src_components[0], int)
54b8e80941Smrg       assert isinstance(dest_components, int)
55b8e80941Smrg       assert isinstance(indices, list)
56b8e80941Smrg       if indices:
57b8e80941Smrg           assert isinstance(indices[0], str)
58b8e80941Smrg       assert isinstance(flags, list)
59b8e80941Smrg       if flags:
60b8e80941Smrg           assert isinstance(flags[0], str)
61b8e80941Smrg       assert isinstance(sysval, bool)
62b8e80941Smrg       if bit_sizes:
63b8e80941Smrg           assert isinstance(bit_sizes[0], int)
64b8e80941Smrg
65b8e80941Smrg       self.name = name
66b8e80941Smrg       self.num_srcs = len(src_components)
67b8e80941Smrg       self.src_components = src_components
68b8e80941Smrg       self.has_dest = (dest_components >= 0)
69b8e80941Smrg       self.dest_components = dest_components
70b8e80941Smrg       self.num_indices = len(indices)
71b8e80941Smrg       self.indices = indices
72b8e80941Smrg       self.flags = flags
73b8e80941Smrg       self.sysval = sysval
74b8e80941Smrg       self.bit_sizes = bit_sizes
75b8e80941Smrg
76b8e80941Smrg#
77b8e80941Smrg# Possible indices:
78b8e80941Smrg#
79b8e80941Smrg
80b8e80941Smrg# A constant 'base' value that is added to an offset src:
81b8e80941SmrgBASE = "NIR_INTRINSIC_BASE"
82b8e80941Smrg# For store instructions, a writemask:
83b8e80941SmrgWRMASK = "NIR_INTRINSIC_WRMASK"
84b8e80941Smrg# The stream-id for GS emit_vertex/end_primitive intrinsics:
85b8e80941SmrgSTREAM_ID = "NIR_INTRINSIC_STREAM_ID"
86b8e80941Smrg# The clip-plane id for load_user_clip_plane intrinsics:
87b8e80941SmrgUCP_ID = "NIR_INTRINSIC_UCP_ID"
88b8e80941Smrg# The amount of data, starting from BASE, that this instruction
89b8e80941Smrg# may access.  This is used to provide bounds if the offset is
90b8e80941Smrg# not constant.
91b8e80941SmrgRANGE = "NIR_INTRINSIC_RANGE"
92b8e80941Smrg# The vulkan descriptor set binding for vulkan_resource_index
93b8e80941Smrg# intrinsic
94b8e80941SmrgDESC_SET = "NIR_INTRINSIC_DESC_SET"
95b8e80941Smrg# The vulkan descriptor set binding for vulkan_resource_index
96b8e80941Smrg# intrinsic
97b8e80941SmrgBINDING = "NIR_INTRINSIC_BINDING"
98b8e80941Smrg# Component offset
99b8e80941SmrgCOMPONENT = "NIR_INTRINSIC_COMPONENT"
100b8e80941Smrg# Interpolation mode (only meaningful for FS inputs)
101b8e80941SmrgINTERP_MODE = "NIR_INTRINSIC_INTERP_MODE"
102b8e80941Smrg# A binary nir_op to use when performing a reduction or scan operation
103b8e80941SmrgREDUCTION_OP = "NIR_INTRINSIC_REDUCTION_OP"
104b8e80941Smrg# Cluster size for reduction operations
105b8e80941SmrgCLUSTER_SIZE = "NIR_INTRINSIC_CLUSTER_SIZE"
106b8e80941Smrg# Parameter index for a load_param intrinsic
107b8e80941SmrgPARAM_IDX = "NIR_INTRINSIC_PARAM_IDX"
108b8e80941Smrg# Image dimensionality for image intrinsics
109b8e80941SmrgIMAGE_DIM = "NIR_INTRINSIC_IMAGE_DIM"
110b8e80941Smrg# Non-zero if we are accessing an array image
111b8e80941SmrgIMAGE_ARRAY = "NIR_INTRINSIC_IMAGE_ARRAY"
112b8e80941Smrg# Access qualifiers for image and memory access intrinsics
113b8e80941SmrgACCESS = "NIR_INTRINSIC_ACCESS"
114b8e80941SmrgDST_ACCESS = "NIR_INTRINSIC_DST_ACCESS"
115b8e80941SmrgSRC_ACCESS = "NIR_INTRINSIC_SRC_ACCESS"
116b8e80941Smrg# Image format for image intrinsics
117b8e80941SmrgFORMAT = "NIR_INTRINSIC_FORMAT"
118b8e80941Smrg# Offset or address alignment
119b8e80941SmrgALIGN_MUL = "NIR_INTRINSIC_ALIGN_MUL"
120b8e80941SmrgALIGN_OFFSET = "NIR_INTRINSIC_ALIGN_OFFSET"
121b8e80941Smrg# The vulkan descriptor type for vulkan_resource_index
122b8e80941SmrgDESC_TYPE = "NIR_INTRINSIC_DESC_TYPE"
123b8e80941Smrg
124b8e80941Smrg#
125b8e80941Smrg# Possible flags:
126b8e80941Smrg#
127b8e80941Smrg
128b8e80941SmrgCAN_ELIMINATE = "NIR_INTRINSIC_CAN_ELIMINATE"
129b8e80941SmrgCAN_REORDER   = "NIR_INTRINSIC_CAN_REORDER"
130b8e80941Smrg
131b8e80941SmrgINTR_OPCODES = {}
132b8e80941Smrg
133b8e80941Smrg# Defines a new NIR intrinsic.  By default, the intrinsic will have no sources
134b8e80941Smrg# and no destination.
135b8e80941Smrg#
136b8e80941Smrg# You can set dest_comp=n to enable a destination for the intrinsic, in which
137b8e80941Smrg# case it will have that many components, or =0 for "as many components as the
138b8e80941Smrg# NIR destination value."
139b8e80941Smrg#
140b8e80941Smrg# Set src_comp=n to enable sources for the intruction.  It can be an array of
141b8e80941Smrg# component counts, or (for convenience) a scalar component count if there's
142b8e80941Smrg# only one source.  If a component count is 0, it will be as many components as
143b8e80941Smrg# the intrinsic has based on the dest_comp.
144b8e80941Smrgdef intrinsic(name, src_comp=[], dest_comp=-1, indices=[],
145b8e80941Smrg              flags=[], sysval=False, bit_sizes=[]):
146b8e80941Smrg    assert name not in INTR_OPCODES
147b8e80941Smrg    INTR_OPCODES[name] = Intrinsic(name, src_comp, dest_comp,
148b8e80941Smrg                                   indices, flags, sysval, bit_sizes)
149b8e80941Smrg
150b8e80941Smrgintrinsic("nop", flags=[CAN_ELIMINATE])
151b8e80941Smrg
152b8e80941Smrgintrinsic("load_param", dest_comp=0, indices=[PARAM_IDX], flags=[CAN_ELIMINATE])
153b8e80941Smrg
154b8e80941Smrgintrinsic("load_deref", dest_comp=0, src_comp=[-1],
155b8e80941Smrg          indices=[ACCESS], flags=[CAN_ELIMINATE])
156b8e80941Smrgintrinsic("store_deref", src_comp=[-1, 0], indices=[WRMASK, ACCESS])
157b8e80941Smrgintrinsic("copy_deref", src_comp=[-1, -1], indices=[DST_ACCESS, SRC_ACCESS])
158b8e80941Smrg
159b8e80941Smrg# Interpolation of input.  The interp_deref_at* intrinsics are similar to the
160b8e80941Smrg# load_var intrinsic acting on a shader input except that they interpolate the
161b8e80941Smrg# input differently.  The at_sample and at_offset intrinsics take an
162b8e80941Smrg# additional source that is an integer sample id or a vec2 position offset
163b8e80941Smrg# respectively.
164b8e80941Smrg
165b8e80941Smrgintrinsic("interp_deref_at_centroid", dest_comp=0, src_comp=[1],
166b8e80941Smrg          flags=[ CAN_ELIMINATE, CAN_REORDER])
167b8e80941Smrgintrinsic("interp_deref_at_sample", src_comp=[1, 1], dest_comp=0,
168b8e80941Smrg          flags=[CAN_ELIMINATE, CAN_REORDER])
169b8e80941Smrgintrinsic("interp_deref_at_offset", src_comp=[1, 2], dest_comp=0,
170b8e80941Smrg          flags=[CAN_ELIMINATE, CAN_REORDER])
171b8e80941Smrg
172b8e80941Smrg# Gets the length of an unsized array at the end of a buffer
173b8e80941Smrgintrinsic("deref_buffer_array_length", src_comp=[-1], dest_comp=1,
174b8e80941Smrg          flags=[CAN_ELIMINATE, CAN_REORDER])
175b8e80941Smrg
176b8e80941Smrg# Ask the driver for the size of a given buffer. It takes the buffer index
177b8e80941Smrg# as source.
178b8e80941Smrgintrinsic("get_buffer_size", src_comp=[-1], dest_comp=1,
179b8e80941Smrg          flags=[CAN_ELIMINATE, CAN_REORDER])
180b8e80941Smrg
181b8e80941Smrg# a barrier is an intrinsic with no inputs/outputs but which can't be moved
182b8e80941Smrg# around/optimized in general
183b8e80941Smrgdef barrier(name):
184b8e80941Smrg    intrinsic(name)
185b8e80941Smrg
186b8e80941Smrgbarrier("barrier")
187b8e80941Smrgbarrier("discard")
188b8e80941Smrg
189b8e80941Smrg# Memory barrier with semantics analogous to the memoryBarrier() GLSL
190b8e80941Smrg# intrinsic.
191b8e80941Smrgbarrier("memory_barrier")
192b8e80941Smrg
193b8e80941Smrg# Shader clock intrinsic with semantics analogous to the clock2x32ARB()
194b8e80941Smrg# GLSL intrinsic.
195b8e80941Smrg# The latter can be used as code motion barrier, which is currently not
196b8e80941Smrg# feasible with NIR.
197b8e80941Smrgintrinsic("shader_clock", dest_comp=2, flags=[CAN_ELIMINATE])
198b8e80941Smrg
199b8e80941Smrg# Shader ballot intrinsics with semantics analogous to the
200b8e80941Smrg#
201b8e80941Smrg#    ballotARB()
202b8e80941Smrg#    readInvocationARB()
203b8e80941Smrg#    readFirstInvocationARB()
204b8e80941Smrg#
205b8e80941Smrg# GLSL functions from ARB_shader_ballot.
206b8e80941Smrgintrinsic("ballot", src_comp=[1], dest_comp=0, flags=[CAN_ELIMINATE])
207b8e80941Smrgintrinsic("read_invocation", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
208b8e80941Smrgintrinsic("read_first_invocation", src_comp=[0], dest_comp=0, flags=[CAN_ELIMINATE])
209b8e80941Smrg
210b8e80941Smrg# Additional SPIR-V ballot intrinsics
211b8e80941Smrg#
212b8e80941Smrg# These correspond to the SPIR-V opcodes
213b8e80941Smrg#
214b8e80941Smrg#    OpGroupUniformElect
215b8e80941Smrg#    OpSubgroupFirstInvocationKHR
216b8e80941Smrgintrinsic("elect", dest_comp=1, flags=[CAN_ELIMINATE])
217b8e80941Smrgintrinsic("first_invocation", dest_comp=1, flags=[CAN_ELIMINATE])
218b8e80941Smrg
219b8e80941Smrg# Memory barrier with semantics analogous to the compute shader
220b8e80941Smrg# groupMemoryBarrier(), memoryBarrierAtomicCounter(), memoryBarrierBuffer(),
221b8e80941Smrg# memoryBarrierImage() and memoryBarrierShared() GLSL intrinsics.
222b8e80941Smrgbarrier("group_memory_barrier")
223b8e80941Smrgbarrier("memory_barrier_atomic_counter")
224b8e80941Smrgbarrier("memory_barrier_buffer")
225b8e80941Smrgbarrier("memory_barrier_image")
226b8e80941Smrgbarrier("memory_barrier_shared")
227b8e80941Smrgbarrier("begin_invocation_interlock")
228b8e80941Smrgbarrier("end_invocation_interlock")
229b8e80941Smrg
230b8e80941Smrg# A conditional discard, with a single boolean source.
231b8e80941Smrgintrinsic("discard_if", src_comp=[1])
232b8e80941Smrg
233b8e80941Smrg# ARB_shader_group_vote intrinsics
234b8e80941Smrgintrinsic("vote_any", src_comp=[1], dest_comp=1, flags=[CAN_ELIMINATE])
235b8e80941Smrgintrinsic("vote_all", src_comp=[1], dest_comp=1, flags=[CAN_ELIMINATE])
236b8e80941Smrgintrinsic("vote_feq", src_comp=[0], dest_comp=1, flags=[CAN_ELIMINATE])
237b8e80941Smrgintrinsic("vote_ieq", src_comp=[0], dest_comp=1, flags=[CAN_ELIMINATE])
238b8e80941Smrg
239b8e80941Smrg# Ballot ALU operations from SPIR-V.
240b8e80941Smrg#
241b8e80941Smrg# These operations work like their ALU counterparts except that the operate
242b8e80941Smrg# on a uvec4 which is treated as a 128bit integer.  Also, they are, in
243b8e80941Smrg# general, free to ignore any bits which are above the subgroup size.
244b8e80941Smrgintrinsic("ballot_bitfield_extract", src_comp=[4, 1], dest_comp=1, flags=[CAN_ELIMINATE])
245b8e80941Smrgintrinsic("ballot_bit_count_reduce", src_comp=[4], dest_comp=1, flags=[CAN_ELIMINATE])
246b8e80941Smrgintrinsic("ballot_bit_count_inclusive", src_comp=[4], dest_comp=1, flags=[CAN_ELIMINATE])
247b8e80941Smrgintrinsic("ballot_bit_count_exclusive", src_comp=[4], dest_comp=1, flags=[CAN_ELIMINATE])
248b8e80941Smrgintrinsic("ballot_find_lsb", src_comp=[4], dest_comp=1, flags=[CAN_ELIMINATE])
249b8e80941Smrgintrinsic("ballot_find_msb", src_comp=[4], dest_comp=1, flags=[CAN_ELIMINATE])
250b8e80941Smrg
251b8e80941Smrg# Shuffle operations from SPIR-V.
252b8e80941Smrgintrinsic("shuffle", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
253b8e80941Smrgintrinsic("shuffle_xor", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
254b8e80941Smrgintrinsic("shuffle_up", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
255b8e80941Smrgintrinsic("shuffle_down", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
256b8e80941Smrg
257b8e80941Smrg# Quad operations from SPIR-V.
258b8e80941Smrgintrinsic("quad_broadcast", src_comp=[0, 1], dest_comp=0, flags=[CAN_ELIMINATE])
259b8e80941Smrgintrinsic("quad_swap_horizontal", src_comp=[0], dest_comp=0, flags=[CAN_ELIMINATE])
260b8e80941Smrgintrinsic("quad_swap_vertical", src_comp=[0], dest_comp=0, flags=[CAN_ELIMINATE])
261b8e80941Smrgintrinsic("quad_swap_diagonal", src_comp=[0], dest_comp=0, flags=[CAN_ELIMINATE])
262b8e80941Smrg
263b8e80941Smrgintrinsic("reduce", src_comp=[0], dest_comp=0, indices=[REDUCTION_OP, CLUSTER_SIZE],
264b8e80941Smrg          flags=[CAN_ELIMINATE])
265b8e80941Smrgintrinsic("inclusive_scan", src_comp=[0], dest_comp=0, indices=[REDUCTION_OP],
266b8e80941Smrg          flags=[CAN_ELIMINATE])
267b8e80941Smrgintrinsic("exclusive_scan", src_comp=[0], dest_comp=0, indices=[REDUCTION_OP],
268b8e80941Smrg          flags=[CAN_ELIMINATE])
269b8e80941Smrg
270b8e80941Smrg# Basic Geometry Shader intrinsics.
271b8e80941Smrg#
272b8e80941Smrg# emit_vertex implements GLSL's EmitStreamVertex() built-in.  It takes a single
273b8e80941Smrg# index, which is the stream ID to write to.
274b8e80941Smrg#
275b8e80941Smrg# end_primitive implements GLSL's EndPrimitive() built-in.
276b8e80941Smrgintrinsic("emit_vertex",   indices=[STREAM_ID])
277b8e80941Smrgintrinsic("end_primitive", indices=[STREAM_ID])
278b8e80941Smrg
279b8e80941Smrg# Geometry Shader intrinsics with a vertex count.
280b8e80941Smrg#
281b8e80941Smrg# Alternatively, drivers may implement these intrinsics, and use
282b8e80941Smrg# nir_lower_gs_intrinsics() to convert from the basic intrinsics.
283b8e80941Smrg#
284b8e80941Smrg# These maintain a count of the number of vertices emitted, as an additional
285b8e80941Smrg# unsigned integer source.
286b8e80941Smrgintrinsic("emit_vertex_with_counter", src_comp=[1], indices=[STREAM_ID])
287b8e80941Smrgintrinsic("end_primitive_with_counter", src_comp=[1], indices=[STREAM_ID])
288b8e80941Smrgintrinsic("set_vertex_count", src_comp=[1])
289b8e80941Smrg
290b8e80941Smrg# Atomic counters
291b8e80941Smrg#
292b8e80941Smrg# The *_var variants take an atomic_uint nir_variable, while the other,
293b8e80941Smrg# lowered, variants take a constant buffer index and register offset.
294b8e80941Smrg
295b8e80941Smrgdef atomic(name, flags=[]):
296b8e80941Smrg    intrinsic(name + "_deref", src_comp=[-1], dest_comp=1, flags=flags)
297b8e80941Smrg    intrinsic(name, src_comp=[1], dest_comp=1, indices=[BASE], flags=flags)
298b8e80941Smrg
299b8e80941Smrgdef atomic2(name):
300b8e80941Smrg    intrinsic(name + "_deref", src_comp=[-1, 1], dest_comp=1)
301b8e80941Smrg    intrinsic(name, src_comp=[1, 1], dest_comp=1, indices=[BASE])
302b8e80941Smrg
303b8e80941Smrgdef atomic3(name):
304b8e80941Smrg    intrinsic(name + "_deref", src_comp=[-1, 1, 1], dest_comp=1)
305b8e80941Smrg    intrinsic(name, src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
306b8e80941Smrg
307b8e80941Smrgatomic("atomic_counter_inc")
308b8e80941Smrgatomic("atomic_counter_pre_dec")
309b8e80941Smrgatomic("atomic_counter_post_dec")
310b8e80941Smrgatomic("atomic_counter_read", flags=[CAN_ELIMINATE])
311b8e80941Smrgatomic2("atomic_counter_add")
312b8e80941Smrgatomic2("atomic_counter_min")
313b8e80941Smrgatomic2("atomic_counter_max")
314b8e80941Smrgatomic2("atomic_counter_and")
315b8e80941Smrgatomic2("atomic_counter_or")
316b8e80941Smrgatomic2("atomic_counter_xor")
317b8e80941Smrgatomic2("atomic_counter_exchange")
318b8e80941Smrgatomic3("atomic_counter_comp_swap")
319b8e80941Smrg
320b8e80941Smrg# Image load, store and atomic intrinsics.
321b8e80941Smrg#
322b8e80941Smrg# All image intrinsics come in three versions.  One which take an image target
323b8e80941Smrg# passed as a deref chain as the first source, one which takes an index as the
324b8e80941Smrg# first source, and one which takes a bindless handle as the first source.
325b8e80941Smrg# In the first version, the image variable contains the memory and layout
326b8e80941Smrg# qualifiers that influence the semantics of the intrinsic.  In the second and
327b8e80941Smrg# third, the image format and access qualifiers are provided as constant
328b8e80941Smrg# indices.
329b8e80941Smrg#
330b8e80941Smrg# All image intrinsics take a four-coordinate vector and a sample index as
331b8e80941Smrg# 2nd and 3rd sources, determining the location within the image that will be
332b8e80941Smrg# accessed by the intrinsic.  Components not applicable to the image target
333b8e80941Smrg# in use are undefined.  Image store takes an additional four-component
334b8e80941Smrg# argument with the value to be written, and image atomic operations take
335b8e80941Smrg# either one or two additional scalar arguments with the same meaning as in
336b8e80941Smrg# the ARB_shader_image_load_store specification.
337b8e80941Smrgdef image(name, src_comp=[], **kwargs):
338b8e80941Smrg    intrinsic("image_deref_" + name, src_comp=[1] + src_comp,
339b8e80941Smrg              indices=[ACCESS], **kwargs)
340b8e80941Smrg    intrinsic("image_" + name, src_comp=[1] + src_comp,
341b8e80941Smrg              indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS], **kwargs)
342b8e80941Smrg    intrinsic("bindless_image_" + name, src_comp=[1] + src_comp,
343b8e80941Smrg              indices=[IMAGE_DIM, IMAGE_ARRAY, FORMAT, ACCESS], **kwargs)
344b8e80941Smrg
345b8e80941Smrgimage("load", src_comp=[4, 1], dest_comp=0, flags=[CAN_ELIMINATE])
346b8e80941Smrgimage("store", src_comp=[4, 1, 0])
347b8e80941Smrgimage("atomic_add",  src_comp=[4, 1, 1], dest_comp=1)
348b8e80941Smrgimage("atomic_min",  src_comp=[4, 1, 1], dest_comp=1)
349b8e80941Smrgimage("atomic_max",  src_comp=[4, 1, 1], dest_comp=1)
350b8e80941Smrgimage("atomic_and",  src_comp=[4, 1, 1], dest_comp=1)
351b8e80941Smrgimage("atomic_or",   src_comp=[4, 1, 1], dest_comp=1)
352b8e80941Smrgimage("atomic_xor",  src_comp=[4, 1, 1], dest_comp=1)
353b8e80941Smrgimage("atomic_exchange",  src_comp=[4, 1, 1], dest_comp=1)
354b8e80941Smrgimage("atomic_comp_swap", src_comp=[4, 1, 1, 1], dest_comp=1)
355b8e80941Smrgimage("atomic_fadd",  src_comp=[1, 4, 1, 1], dest_comp=1)
356b8e80941Smrgimage("size",    dest_comp=0, flags=[CAN_ELIMINATE, CAN_REORDER])
357b8e80941Smrgimage("samples", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER])
358b8e80941Smrg
359b8e80941Smrg# Intel-specific query for loading from the brw_image_param struct passed
360b8e80941Smrg# into the shader as a uniform.  The variable is a deref to the image
361b8e80941Smrg# variable. The const index specifies which of the six parameters to load.
362b8e80941Smrgintrinsic("image_deref_load_param_intel", src_comp=[1], dest_comp=0,
363b8e80941Smrg          indices=[BASE], flags=[CAN_ELIMINATE, CAN_REORDER])
364b8e80941Smrgimage("load_raw_intel", src_comp=[1], dest_comp=0,
365b8e80941Smrg      flags=[CAN_ELIMINATE])
366b8e80941Smrgimage("store_raw_intel", src_comp=[1, 0])
367b8e80941Smrg
368b8e80941Smrg# Vulkan descriptor set intrinsics
369b8e80941Smrg#
370b8e80941Smrg# The Vulkan API uses a different binding model from GL.  In the Vulkan
371b8e80941Smrg# API, all external resources are represented by a tuple:
372b8e80941Smrg#
373b8e80941Smrg# (descriptor set, binding, array index)
374b8e80941Smrg#
375b8e80941Smrg# where the array index is the only thing allowed to be indirect.  The
376b8e80941Smrg# vulkan_surface_index intrinsic takes the descriptor set and binding as
377b8e80941Smrg# its first two indices and the array index as its source.  The third
378b8e80941Smrg# index is a nir_variable_mode in case that's useful to the backend.
379b8e80941Smrg#
380b8e80941Smrg# The intended usage is that the shader will call vulkan_surface_index to
381b8e80941Smrg# get an index and then pass that as the buffer index ubo/ssbo calls.
382b8e80941Smrg#
383b8e80941Smrg# The vulkan_resource_reindex intrinsic takes a resource index in src0
384b8e80941Smrg# (the result of a vulkan_resource_index or vulkan_resource_reindex) which
385b8e80941Smrg# corresponds to the tuple (set, binding, index) and computes an index
386b8e80941Smrg# corresponding to tuple (set, binding, idx + src1).
387b8e80941Smrgintrinsic("vulkan_resource_index", src_comp=[1], dest_comp=0,
388b8e80941Smrg          indices=[DESC_SET, BINDING, DESC_TYPE],
389b8e80941Smrg          flags=[CAN_ELIMINATE, CAN_REORDER])
390b8e80941Smrgintrinsic("vulkan_resource_reindex", src_comp=[0, 1], dest_comp=0,
391b8e80941Smrg          indices=[DESC_TYPE], flags=[CAN_ELIMINATE, CAN_REORDER])
392b8e80941Smrgintrinsic("load_vulkan_descriptor", src_comp=[-1], dest_comp=0,
393b8e80941Smrg          indices=[DESC_TYPE], flags=[CAN_ELIMINATE, CAN_REORDER])
394b8e80941Smrg
395b8e80941Smrg# variable atomic intrinsics
396b8e80941Smrg#
397b8e80941Smrg# All of these variable atomic memory operations read a value from memory,
398b8e80941Smrg# compute a new value using one of the operations below, write the new value
399b8e80941Smrg# to memory, and return the original value read.
400b8e80941Smrg#
401b8e80941Smrg# All operations take 2 sources except CompSwap that takes 3. These sources
402b8e80941Smrg# represent:
403b8e80941Smrg#
404b8e80941Smrg# 0: A deref to the memory on which to perform the atomic
405b8e80941Smrg# 1: The data parameter to the atomic function (i.e. the value to add
406b8e80941Smrg#    in shared_atomic_add, etc).
407b8e80941Smrg# 2: For CompSwap only: the second data parameter.
408b8e80941Smrgintrinsic("deref_atomic_add",  src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
409b8e80941Smrgintrinsic("deref_atomic_imin", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
410b8e80941Smrgintrinsic("deref_atomic_umin", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
411b8e80941Smrgintrinsic("deref_atomic_imax", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
412b8e80941Smrgintrinsic("deref_atomic_umax", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
413b8e80941Smrgintrinsic("deref_atomic_and",  src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
414b8e80941Smrgintrinsic("deref_atomic_or",   src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
415b8e80941Smrgintrinsic("deref_atomic_xor",  src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
416b8e80941Smrgintrinsic("deref_atomic_exchange", src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
417b8e80941Smrgintrinsic("deref_atomic_comp_swap", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
418b8e80941Smrgintrinsic("deref_atomic_fadd",  src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
419b8e80941Smrgintrinsic("deref_atomic_fmin",  src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
420b8e80941Smrgintrinsic("deref_atomic_fmax",  src_comp=[-1, 1], dest_comp=1, indices=[ACCESS])
421b8e80941Smrgintrinsic("deref_atomic_fcomp_swap", src_comp=[-1, 1, 1], dest_comp=1, indices=[ACCESS])
422b8e80941Smrg
423b8e80941Smrg# SSBO atomic intrinsics
424b8e80941Smrg#
425b8e80941Smrg# All of the SSBO atomic memory operations read a value from memory,
426b8e80941Smrg# compute a new value using one of the operations below, write the new
427b8e80941Smrg# value to memory, and return the original value read.
428b8e80941Smrg#
429b8e80941Smrg# All operations take 3 sources except CompSwap that takes 4. These
430b8e80941Smrg# sources represent:
431b8e80941Smrg#
432b8e80941Smrg# 0: The SSBO buffer index.
433b8e80941Smrg# 1: The offset into the SSBO buffer of the variable that the atomic
434b8e80941Smrg#    operation will operate on.
435b8e80941Smrg# 2: The data parameter to the atomic function (i.e. the value to add
436b8e80941Smrg#    in ssbo_atomic_add, etc).
437b8e80941Smrg# 3: For CompSwap only: the second data parameter.
438b8e80941Smrgintrinsic("ssbo_atomic_add",  src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
439b8e80941Smrgintrinsic("ssbo_atomic_imin", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
440b8e80941Smrgintrinsic("ssbo_atomic_umin", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
441b8e80941Smrgintrinsic("ssbo_atomic_imax", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
442b8e80941Smrgintrinsic("ssbo_atomic_umax", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
443b8e80941Smrgintrinsic("ssbo_atomic_and",  src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
444b8e80941Smrgintrinsic("ssbo_atomic_or",   src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
445b8e80941Smrgintrinsic("ssbo_atomic_xor",  src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
446b8e80941Smrgintrinsic("ssbo_atomic_exchange", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
447b8e80941Smrgintrinsic("ssbo_atomic_comp_swap", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
448b8e80941Smrgintrinsic("ssbo_atomic_fadd", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
449b8e80941Smrgintrinsic("ssbo_atomic_fmin", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
450b8e80941Smrgintrinsic("ssbo_atomic_fmax", src_comp=[1, 1, 1], dest_comp=1, indices=[ACCESS])
451b8e80941Smrgintrinsic("ssbo_atomic_fcomp_swap", src_comp=[1, 1, 1, 1], dest_comp=1, indices=[ACCESS])
452b8e80941Smrg
453b8e80941Smrg# CS shared variable atomic intrinsics
454b8e80941Smrg#
455b8e80941Smrg# All of the shared variable atomic memory operations read a value from
456b8e80941Smrg# memory, compute a new value using one of the operations below, write the
457b8e80941Smrg# new value to memory, and return the original value read.
458b8e80941Smrg#
459b8e80941Smrg# All operations take 2 sources except CompSwap that takes 3. These
460b8e80941Smrg# sources represent:
461b8e80941Smrg#
462b8e80941Smrg# 0: The offset into the shared variable storage region that the atomic
463b8e80941Smrg#    operation will operate on.
464b8e80941Smrg# 1: The data parameter to the atomic function (i.e. the value to add
465b8e80941Smrg#    in shared_atomic_add, etc).
466b8e80941Smrg# 2: For CompSwap only: the second data parameter.
467b8e80941Smrgintrinsic("shared_atomic_add",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
468b8e80941Smrgintrinsic("shared_atomic_imin", src_comp=[1, 1], dest_comp=1, indices=[BASE])
469b8e80941Smrgintrinsic("shared_atomic_umin", src_comp=[1, 1], dest_comp=1, indices=[BASE])
470b8e80941Smrgintrinsic("shared_atomic_imax", src_comp=[1, 1], dest_comp=1, indices=[BASE])
471b8e80941Smrgintrinsic("shared_atomic_umax", src_comp=[1, 1], dest_comp=1, indices=[BASE])
472b8e80941Smrgintrinsic("shared_atomic_and",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
473b8e80941Smrgintrinsic("shared_atomic_or",   src_comp=[1, 1], dest_comp=1, indices=[BASE])
474b8e80941Smrgintrinsic("shared_atomic_xor",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
475b8e80941Smrgintrinsic("shared_atomic_exchange", src_comp=[1, 1], dest_comp=1, indices=[BASE])
476b8e80941Smrgintrinsic("shared_atomic_comp_swap", src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
477b8e80941Smrgintrinsic("shared_atomic_fadd",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
478b8e80941Smrgintrinsic("shared_atomic_fmin",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
479b8e80941Smrgintrinsic("shared_atomic_fmax",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
480b8e80941Smrgintrinsic("shared_atomic_fcomp_swap", src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
481b8e80941Smrg
482b8e80941Smrg# Global atomic intrinsics
483b8e80941Smrg#
484b8e80941Smrg# All of the shared variable atomic memory operations read a value from
485b8e80941Smrg# memory, compute a new value using one of the operations below, write the
486b8e80941Smrg# new value to memory, and return the original value read.
487b8e80941Smrg#
488b8e80941Smrg# All operations take 2 sources except CompSwap that takes 3. These
489b8e80941Smrg# sources represent:
490b8e80941Smrg#
491b8e80941Smrg# 0: The memory address that the atomic operation will operate on.
492b8e80941Smrg# 1: The data parameter to the atomic function (i.e. the value to add
493b8e80941Smrg#    in shared_atomic_add, etc).
494b8e80941Smrg# 2: For CompSwap only: the second data parameter.
495b8e80941Smrgintrinsic("global_atomic_add",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
496b8e80941Smrgintrinsic("global_atomic_imin", src_comp=[1, 1], dest_comp=1, indices=[BASE])
497b8e80941Smrgintrinsic("global_atomic_umin", src_comp=[1, 1], dest_comp=1, indices=[BASE])
498b8e80941Smrgintrinsic("global_atomic_imax", src_comp=[1, 1], dest_comp=1, indices=[BASE])
499b8e80941Smrgintrinsic("global_atomic_umax", src_comp=[1, 1], dest_comp=1, indices=[BASE])
500b8e80941Smrgintrinsic("global_atomic_and",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
501b8e80941Smrgintrinsic("global_atomic_or",   src_comp=[1, 1], dest_comp=1, indices=[BASE])
502b8e80941Smrgintrinsic("global_atomic_xor",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
503b8e80941Smrgintrinsic("global_atomic_exchange", src_comp=[1, 1], dest_comp=1, indices=[BASE])
504b8e80941Smrgintrinsic("global_atomic_comp_swap", src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
505b8e80941Smrgintrinsic("global_atomic_fadd",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
506b8e80941Smrgintrinsic("global_atomic_fmin",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
507b8e80941Smrgintrinsic("global_atomic_fmax",  src_comp=[1, 1], dest_comp=1, indices=[BASE])
508b8e80941Smrgintrinsic("global_atomic_fcomp_swap", src_comp=[1, 1, 1], dest_comp=1, indices=[BASE])
509b8e80941Smrg
510b8e80941Smrgdef system_value(name, dest_comp, indices=[], bit_sizes=[32]):
511b8e80941Smrg    intrinsic("load_" + name, [], dest_comp, indices,
512b8e80941Smrg              flags=[CAN_ELIMINATE, CAN_REORDER], sysval=True,
513b8e80941Smrg              bit_sizes=bit_sizes)
514b8e80941Smrg
515b8e80941Smrgsystem_value("frag_coord", 4)
516b8e80941Smrgsystem_value("front_face", 1, bit_sizes=[1, 32])
517b8e80941Smrgsystem_value("vertex_id", 1)
518b8e80941Smrgsystem_value("vertex_id_zero_base", 1)
519b8e80941Smrgsystem_value("first_vertex", 1)
520b8e80941Smrgsystem_value("is_indexed_draw", 1)
521b8e80941Smrgsystem_value("base_vertex", 1)
522b8e80941Smrgsystem_value("instance_id", 1)
523b8e80941Smrgsystem_value("base_instance", 1)
524b8e80941Smrgsystem_value("draw_id", 1)
525b8e80941Smrgsystem_value("sample_id", 1)
526b8e80941Smrg# sample_id_no_per_sample is like sample_id but does not imply per-
527b8e80941Smrg# sample shading.  See the lower_helper_invocation option.
528b8e80941Smrgsystem_value("sample_id_no_per_sample", 1)
529b8e80941Smrgsystem_value("sample_pos", 2)
530b8e80941Smrgsystem_value("sample_mask_in", 1)
531b8e80941Smrgsystem_value("primitive_id", 1)
532b8e80941Smrgsystem_value("invocation_id", 1)
533b8e80941Smrgsystem_value("tess_coord", 3)
534b8e80941Smrgsystem_value("tess_level_outer", 4)
535b8e80941Smrgsystem_value("tess_level_inner", 2)
536b8e80941Smrgsystem_value("patch_vertices_in", 1)
537b8e80941Smrgsystem_value("local_invocation_id", 3)
538b8e80941Smrgsystem_value("local_invocation_index", 1)
539b8e80941Smrgsystem_value("work_group_id", 3)
540b8e80941Smrgsystem_value("user_clip_plane", 4, indices=[UCP_ID])
541b8e80941Smrgsystem_value("num_work_groups", 3)
542b8e80941Smrgsystem_value("helper_invocation", 1, bit_sizes=[1, 32])
543b8e80941Smrgsystem_value("alpha_ref_float", 1)
544b8e80941Smrgsystem_value("layer_id", 1)
545b8e80941Smrgsystem_value("view_index", 1)
546b8e80941Smrgsystem_value("subgroup_size", 1)
547b8e80941Smrgsystem_value("subgroup_invocation", 1)
548b8e80941Smrgsystem_value("subgroup_eq_mask", 0, bit_sizes=[32, 64])
549b8e80941Smrgsystem_value("subgroup_ge_mask", 0, bit_sizes=[32, 64])
550b8e80941Smrgsystem_value("subgroup_gt_mask", 0, bit_sizes=[32, 64])
551b8e80941Smrgsystem_value("subgroup_le_mask", 0, bit_sizes=[32, 64])
552b8e80941Smrgsystem_value("subgroup_lt_mask", 0, bit_sizes=[32, 64])
553b8e80941Smrgsystem_value("num_subgroups", 1)
554b8e80941Smrgsystem_value("subgroup_id", 1)
555b8e80941Smrgsystem_value("local_group_size", 3)
556b8e80941Smrgsystem_value("global_invocation_id", 3, bit_sizes=[32, 64])
557b8e80941Smrgsystem_value("global_invocation_index", 1, bit_sizes=[32, 64])
558b8e80941Smrgsystem_value("work_dim", 1)
559b8e80941Smrg# Driver-specific viewport scale/offset parameters.
560b8e80941Smrg#
561b8e80941Smrg# VC4 and V3D need to emit a scaled version of the position in the vertex
562b8e80941Smrg# shaders for binning, and having system values lets us move the math for that
563b8e80941Smrg# into NIR.
564b8e80941Smrg#
565b8e80941Smrg# Panfrost needs to implement all coordinate transformation in the
566b8e80941Smrg# vertex shader; system values allow us to share this routine in NIR.
567b8e80941Smrgsystem_value("viewport_x_scale", 1)
568b8e80941Smrgsystem_value("viewport_y_scale", 1)
569b8e80941Smrgsystem_value("viewport_z_scale", 1)
570b8e80941Smrgsystem_value("viewport_z_offset", 1)
571b8e80941Smrgsystem_value("viewport_scale", 3)
572b8e80941Smrgsystem_value("viewport_offset", 3)
573b8e80941Smrg
574b8e80941Smrg# Blend constant color values.  Float values are clamped.#
575b8e80941Smrgsystem_value("blend_const_color_r_float", 1)
576b8e80941Smrgsystem_value("blend_const_color_g_float", 1)
577b8e80941Smrgsystem_value("blend_const_color_b_float", 1)
578b8e80941Smrgsystem_value("blend_const_color_a_float", 1)
579b8e80941Smrgsystem_value("blend_const_color_rgba8888_unorm", 1)
580b8e80941Smrgsystem_value("blend_const_color_aaaa8888_unorm", 1)
581b8e80941Smrg
582b8e80941Smrg# Barycentric coordinate intrinsics.
583b8e80941Smrg#
584b8e80941Smrg# These set up the barycentric coordinates for a particular interpolation.
585b8e80941Smrg# The first three are for the simple cases: pixel, centroid, or per-sample
586b8e80941Smrg# (at gl_SampleID).  The next two handle interpolating at a specified
587b8e80941Smrg# sample location, or interpolating with a vec2 offset,
588b8e80941Smrg#
589b8e80941Smrg# The interp_mode index should be either the INTERP_MODE_SMOOTH or
590b8e80941Smrg# INTERP_MODE_NOPERSPECTIVE enum values.
591b8e80941Smrg#
592b8e80941Smrg# The vec2 value produced by these intrinsics is intended for use as the
593b8e80941Smrg# barycoord source of a load_interpolated_input intrinsic.
594b8e80941Smrg
595b8e80941Smrgdef barycentric(name, src_comp=[]):
596b8e80941Smrg    intrinsic("load_barycentric_" + name, src_comp=src_comp, dest_comp=2,
597b8e80941Smrg              indices=[INTERP_MODE], flags=[CAN_ELIMINATE, CAN_REORDER])
598b8e80941Smrg
599b8e80941Smrg# no sources.
600b8e80941Smrgbarycentric("pixel")
601b8e80941Smrgbarycentric("centroid")
602b8e80941Smrgbarycentric("sample")
603b8e80941Smrg# src[] = { sample_id }.
604b8e80941Smrgbarycentric("at_sample", [1])
605b8e80941Smrg# src[] = { offset.xy }.
606b8e80941Smrgbarycentric("at_offset", [2])
607b8e80941Smrg
608b8e80941Smrg# Load sample position:
609b8e80941Smrg#
610b8e80941Smrg# Takes a sample # and returns a sample position.  Used for lowering
611b8e80941Smrg# interpolateAtSample() to interpolateAtOffset()
612b8e80941Smrgintrinsic("load_sample_pos_from_id", src_comp=[1], dest_comp=2,
613b8e80941Smrg          flags=[CAN_ELIMINATE, CAN_REORDER])
614b8e80941Smrg
615b8e80941Smrg# Loads what I believe is the primitive size, for scaling ij to pixel size:
616b8e80941Smrgintrinsic("load_size_ir3", dest_comp=1, flags=[CAN_ELIMINATE, CAN_REORDER])
617b8e80941Smrg
618b8e80941Smrg# Load operations pull data from some piece of GPU memory.  All load
619b8e80941Smrg# operations operate in terms of offsets into some piece of theoretical
620b8e80941Smrg# memory.  Loads from externally visible memory (UBO and SSBO) simply take a
621b8e80941Smrg# byte offset as a source.  Loads from opaque memory (uniforms, inputs, etc.)
622b8e80941Smrg# take a base+offset pair where the nir_intrinsic_base() gives the location
623b8e80941Smrg# of the start of the variable being loaded and and the offset source is a
624b8e80941Smrg# offset into that variable.
625b8e80941Smrg#
626b8e80941Smrg# Uniform load operations have a nir_intrinsic_range() index that specifies the
627b8e80941Smrg# range (starting at base) of the data from which we are loading.  If
628b8e80941Smrg# range == 0, then the range is unknown.
629b8e80941Smrg#
630b8e80941Smrg# Some load operations such as UBO/SSBO load and per_vertex loads take an
631b8e80941Smrg# additional source to specify which UBO/SSBO/vertex to load from.
632b8e80941Smrg#
633b8e80941Smrg# The exact address type depends on the lowering pass that generates the
634b8e80941Smrg# load/store intrinsics.  Typically, this is vec4 units for things such as
635b8e80941Smrg# varying slots and float units for fragment shader inputs.  UBO and SSBO
636b8e80941Smrg# offsets are always in bytes.
637b8e80941Smrg
638b8e80941Smrgdef load(name, num_srcs, indices=[], flags=[]):
639b8e80941Smrg    intrinsic("load_" + name, [1] * num_srcs, dest_comp=0, indices=indices,
640b8e80941Smrg              flags=flags)
641b8e80941Smrg
642b8e80941Smrg# src[] = { offset }.
643b8e80941Smrgload("uniform", 1, [BASE, RANGE], [CAN_ELIMINATE, CAN_REORDER])
644b8e80941Smrg# src[] = { buffer_index, offset }.
645b8e80941Smrgload("ubo", 2, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE, CAN_REORDER])
646b8e80941Smrg# src[] = { offset }.
647b8e80941Smrgload("input", 1, [BASE, COMPONENT], [CAN_ELIMINATE, CAN_REORDER])
648b8e80941Smrg# src[] = { vertex, offset }.
649b8e80941Smrgload("per_vertex_input", 2, [BASE, COMPONENT], [CAN_ELIMINATE, CAN_REORDER])
650b8e80941Smrg# src[] = { barycoord, offset }.
651b8e80941Smrgintrinsic("load_interpolated_input", src_comp=[2, 1], dest_comp=0,
652b8e80941Smrg          indices=[BASE, COMPONENT], flags=[CAN_ELIMINATE, CAN_REORDER])
653b8e80941Smrg
654b8e80941Smrg# src[] = { buffer_index, offset }.
655b8e80941Smrgload("ssbo", 2, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
656b8e80941Smrg# src[] = { offset }.
657b8e80941Smrgload("output", 1, [BASE, COMPONENT], flags=[CAN_ELIMINATE])
658b8e80941Smrg# src[] = { vertex, offset }.
659b8e80941Smrgload("per_vertex_output", 2, [BASE, COMPONENT], [CAN_ELIMINATE])
660b8e80941Smrg# src[] = { offset }.
661b8e80941Smrgload("shared", 1, [BASE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
662b8e80941Smrg# src[] = { offset }.
663b8e80941Smrgload("push_constant", 1, [BASE, RANGE], [CAN_ELIMINATE, CAN_REORDER])
664b8e80941Smrg# src[] = { offset }.
665b8e80941Smrgload("constant", 1, [BASE, RANGE], [CAN_ELIMINATE, CAN_REORDER])
666b8e80941Smrg# src[] = { address }.
667b8e80941Smrgload("global", 1, [ACCESS, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
668b8e80941Smrg# src[] = { address }.
669b8e80941Smrgload("kernel_input", 1, [BASE, RANGE, ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE, CAN_REORDER])
670b8e80941Smrg# src[] = { offset }.
671b8e80941Smrgload("scratch", 1, [ALIGN_MUL, ALIGN_OFFSET], [CAN_ELIMINATE])
672b8e80941Smrg
673b8e80941Smrg# Stores work the same way as loads, except now the first source is the value
674b8e80941Smrg# to store and the second (and possibly third) source specify where to store
675b8e80941Smrg# the value.  SSBO and shared memory stores also have a
676b8e80941Smrg# nir_intrinsic_write_mask()
677b8e80941Smrg
678b8e80941Smrgdef store(name, num_srcs, indices=[], flags=[]):
679b8e80941Smrg    intrinsic("store_" + name, [0] + ([1] * (num_srcs - 1)), indices=indices, flags=flags)
680b8e80941Smrg
681b8e80941Smrg# src[] = { value, offset }.
682b8e80941Smrgstore("output", 2, [BASE, WRMASK, COMPONENT])
683b8e80941Smrg# src[] = { value, vertex, offset }.
684b8e80941Smrgstore("per_vertex_output", 3, [BASE, WRMASK, COMPONENT])
685b8e80941Smrg# src[] = { value, block_index, offset }
686b8e80941Smrgstore("ssbo", 3, [WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET])
687b8e80941Smrg# src[] = { value, offset }.
688b8e80941Smrgstore("shared", 2, [BASE, WRMASK, ALIGN_MUL, ALIGN_OFFSET])
689b8e80941Smrg# src[] = { value, address }.
690b8e80941Smrgstore("global", 2, [WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET])
691b8e80941Smrg# src[] = { value, offset }.
692b8e80941Smrgstore("scratch", 2, [ALIGN_MUL, ALIGN_OFFSET, WRMASK])
693b8e80941Smrg
694b8e80941Smrg# IR3-specific version of most SSBO intrinsics. The only different
695b8e80941Smrg# compare to the originals is that they add an extra source to hold
696b8e80941Smrg# the dword-offset, which is needed by the backend code apart from
697b8e80941Smrg# the byte-offset already provided by NIR in one of the sources.
698b8e80941Smrg#
699b8e80941Smrg# NIR lowering pass 'ir3_nir_lower_io_offset' will replace the
700b8e80941Smrg# original SSBO intrinsics by these, placing the computed
701b8e80941Smrg# dword-offset always in the last source.
702b8e80941Smrg#
703b8e80941Smrg# The float versions are not handled because those are not supported
704b8e80941Smrg# by the backend.
705b8e80941Smrgintrinsic("store_ssbo_ir3",  src_comp=[0, 1, 1, 1],
706b8e80941Smrg          indices=[WRMASK, ACCESS, ALIGN_MUL, ALIGN_OFFSET])
707b8e80941Smrgintrinsic("load_ssbo_ir3",  src_comp=[1, 1, 1], dest_comp=0,
708b8e80941Smrg          indices=[ACCESS, ALIGN_MUL, ALIGN_OFFSET], flags=[CAN_ELIMINATE])
709b8e80941Smrgintrinsic("ssbo_atomic_add_ir3",        src_comp=[1, 1, 1, 1],    dest_comp=1)
710b8e80941Smrgintrinsic("ssbo_atomic_imin_ir3",       src_comp=[1, 1, 1, 1],    dest_comp=1)
711b8e80941Smrgintrinsic("ssbo_atomic_umin_ir3",       src_comp=[1, 1, 1, 1],    dest_comp=1)
712b8e80941Smrgintrinsic("ssbo_atomic_imax_ir3",       src_comp=[1, 1, 1, 1],    dest_comp=1)
713b8e80941Smrgintrinsic("ssbo_atomic_umax_ir3",       src_comp=[1, 1, 1, 1],    dest_comp=1)
714b8e80941Smrgintrinsic("ssbo_atomic_and_ir3",        src_comp=[1, 1, 1, 1],    dest_comp=1)
715b8e80941Smrgintrinsic("ssbo_atomic_or_ir3",         src_comp=[1, 1, 1, 1],    dest_comp=1)
716b8e80941Smrgintrinsic("ssbo_atomic_xor_ir3",        src_comp=[1, 1, 1, 1],    dest_comp=1)
717b8e80941Smrgintrinsic("ssbo_atomic_exchange_ir3",   src_comp=[1, 1, 1, 1],    dest_comp=1)
718b8e80941Smrgintrinsic("ssbo_atomic_comp_swap_ir3",  src_comp=[1, 1, 1, 1, 1], dest_comp=1)
719