1/*
2 * Copyright © 2019 Valve Corporation
3 * Copyright © 2018 Red Hat
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25#include "radv_meta.h"
26#include "radv_private.h"
27#include "vk_format.h"
28
29static nir_shader *
30build_fmask_expand_compute_shader(struct radv_device *device, int samples)
31{
32   const struct glsl_type *type =
33      glsl_sampler_type(GLSL_SAMPLER_DIM_MS, false, true, GLSL_TYPE_FLOAT);
34   const struct glsl_type *img_type = glsl_image_type(GLSL_SAMPLER_DIM_MS, true, GLSL_TYPE_FLOAT);
35
36   nir_builder b =
37      nir_builder_init_simple_shader(MESA_SHADER_COMPUTE, NULL, "meta_fmask_expand_cs-%d", samples);
38   b.shader->info.workgroup_size[0] = 8;
39   b.shader->info.workgroup_size[1] = 8;
40   b.shader->info.workgroup_size[2] = 1;
41
42   nir_variable *input_img = nir_variable_create(b.shader, nir_var_uniform, type, "s_tex");
43   input_img->data.descriptor_set = 0;
44   input_img->data.binding = 0;
45
46   nir_variable *output_img = nir_variable_create(b.shader, nir_var_uniform, img_type, "out_img");
47   output_img->data.descriptor_set = 0;
48   output_img->data.binding = 1;
49   output_img->data.access = ACCESS_NON_READABLE;
50
51   nir_ssa_def *input_img_deref = &nir_build_deref_var(&b, input_img)->dest.ssa;
52   nir_ssa_def *output_img_deref = &nir_build_deref_var(&b, output_img)->dest.ssa;
53
54   nir_ssa_def *tex_coord = get_global_ids(&b, 3);
55
56   nir_tex_instr *tex_instr[8];
57   for (uint32_t i = 0; i < samples; i++) {
58      tex_instr[i] = nir_tex_instr_create(b.shader, 3);
59
60      nir_tex_instr *tex = tex_instr[i];
61      tex->sampler_dim = GLSL_SAMPLER_DIM_MS;
62      tex->op = nir_texop_txf_ms;
63      tex->src[0].src_type = nir_tex_src_coord;
64      tex->src[0].src = nir_src_for_ssa(tex_coord);
65      tex->src[1].src_type = nir_tex_src_ms_index;
66      tex->src[1].src = nir_src_for_ssa(nir_imm_int(&b, i));
67      tex->src[2].src_type = nir_tex_src_texture_deref;
68      tex->src[2].src = nir_src_for_ssa(input_img_deref);
69      tex->dest_type = nir_type_float32;
70      tex->is_array = true;
71      tex->coord_components = 3;
72
73      nir_ssa_dest_init(&tex->instr, &tex->dest, 4, 32, "tex");
74      nir_builder_instr_insert(&b, &tex->instr);
75   }
76
77   nir_ssa_def *img_coord =
78      nir_vec4(&b, nir_channel(&b, tex_coord, 0), nir_channel(&b, tex_coord, 1),
79               nir_channel(&b, tex_coord, 2), nir_ssa_undef(&b, 1, 32));
80
81   for (uint32_t i = 0; i < samples; i++) {
82      nir_ssa_def *outval = &tex_instr[i]->dest.ssa;
83
84      nir_image_deref_store(&b, output_img_deref, img_coord, nir_imm_int(&b, i), outval,
85                            nir_imm_int(&b, 0), .image_dim = GLSL_SAMPLER_DIM_MS, .image_array = true);
86   }
87
88   return b.shader;
89}
90
91void
92radv_expand_fmask_image_inplace(struct radv_cmd_buffer *cmd_buffer, struct radv_image *image,
93                                const VkImageSubresourceRange *subresourceRange)
94{
95   struct radv_device *device = cmd_buffer->device;
96   struct radv_meta_saved_state saved_state;
97   const uint32_t samples = image->info.samples;
98   const uint32_t samples_log2 = ffs(samples) - 1;
99   unsigned layer_count = radv_get_layerCount(image, subresourceRange);
100   struct radv_image_view iview;
101
102   radv_meta_save(&saved_state, cmd_buffer,
103                  RADV_META_SAVE_COMPUTE_PIPELINE | RADV_META_SAVE_DESCRIPTORS);
104
105   VkPipeline pipeline = device->meta_state.fmask_expand.pipeline[samples_log2];
106
107   radv_CmdBindPipeline(radv_cmd_buffer_to_handle(cmd_buffer), VK_PIPELINE_BIND_POINT_COMPUTE,
108                        pipeline);
109
110   cmd_buffer->state.flush_bits |= radv_dst_access_flush(
111      cmd_buffer, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT, image);
112
113   radv_image_view_init(&iview, device,
114                        &(VkImageViewCreateInfo){
115                           .sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
116                           .image = radv_image_to_handle(image),
117                           .viewType = radv_meta_get_view_type(image),
118                           .format = vk_format_no_srgb(image->vk_format),
119                           .subresourceRange =
120                              {
121                                 .aspectMask = subresourceRange->aspectMask,
122                                 .baseMipLevel = 0,
123                                 .levelCount = 1,
124                                 .baseArrayLayer = subresourceRange->baseArrayLayer,
125                                 .layerCount = layer_count,
126                              },
127                        },
128                        NULL);
129
130   radv_meta_push_descriptor_set(
131      cmd_buffer, VK_PIPELINE_BIND_POINT_COMPUTE,
132      cmd_buffer->device->meta_state.fmask_expand.p_layout, 0, /* set */
133      2,                                                       /* descriptorWriteCount */
134      (VkWriteDescriptorSet[]){{.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
135                                .dstBinding = 0,
136                                .dstArrayElement = 0,
137                                .descriptorCount = 1,
138                                .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
139                                .pImageInfo =
140                                   (VkDescriptorImageInfo[]){
141                                      {.sampler = VK_NULL_HANDLE,
142                                       .imageView = radv_image_view_to_handle(&iview),
143                                       .imageLayout = VK_IMAGE_LAYOUT_GENERAL},
144                                   }},
145                               {.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
146                                .dstBinding = 1,
147                                .dstArrayElement = 0,
148                                .descriptorCount = 1,
149                                .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
150                                .pImageInfo = (VkDescriptorImageInfo[]){
151                                   {.sampler = VK_NULL_HANDLE,
152                                    .imageView = radv_image_view_to_handle(&iview),
153                                    .imageLayout = VK_IMAGE_LAYOUT_GENERAL},
154                                }}});
155
156   radv_unaligned_dispatch(cmd_buffer, image->info.width, image->info.height, layer_count);
157
158   radv_image_view_finish(&iview);
159
160   radv_meta_restore(&saved_state, cmd_buffer);
161
162   cmd_buffer->state.flush_bits |=
163      RADV_CMD_FLAG_CS_PARTIAL_FLUSH |
164      radv_src_access_flush(cmd_buffer, VK_ACCESS_SHADER_WRITE_BIT, image);
165
166   /* Re-initialize FMASK in fully expanded mode. */
167   cmd_buffer->state.flush_bits |= radv_init_fmask(cmd_buffer, image, subresourceRange);
168}
169
170void
171radv_device_finish_meta_fmask_expand_state(struct radv_device *device)
172{
173   struct radv_meta_state *state = &device->meta_state;
174
175   for (uint32_t i = 0; i < MAX_SAMPLES_LOG2; ++i) {
176      radv_DestroyPipeline(radv_device_to_handle(device), state->fmask_expand.pipeline[i],
177                           &state->alloc);
178   }
179   radv_DestroyPipelineLayout(radv_device_to_handle(device), state->fmask_expand.p_layout,
180                              &state->alloc);
181
182   radv_DestroyDescriptorSetLayout(radv_device_to_handle(device), state->fmask_expand.ds_layout,
183                                   &state->alloc);
184}
185
186static VkResult
187create_fmask_expand_pipeline(struct radv_device *device, int samples, VkPipeline *pipeline)
188{
189   struct radv_meta_state *state = &device->meta_state;
190   VkResult result;
191   nir_shader *cs = build_fmask_expand_compute_shader(device, samples);
192   ;
193
194   VkPipelineShaderStageCreateInfo pipeline_shader_stage = {
195      .sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
196      .stage = VK_SHADER_STAGE_COMPUTE_BIT,
197      .module = vk_shader_module_handle_from_nir(cs),
198      .pName = "main",
199      .pSpecializationInfo = NULL,
200   };
201
202   VkComputePipelineCreateInfo vk_pipeline_info = {
203      .sType = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
204      .stage = pipeline_shader_stage,
205      .flags = 0,
206      .layout = state->fmask_expand.p_layout,
207   };
208
209   result = radv_CreateComputePipelines(radv_device_to_handle(device),
210                                        radv_pipeline_cache_to_handle(&state->cache), 1,
211                                        &vk_pipeline_info, NULL, pipeline);
212
213   ralloc_free(cs);
214   return result;
215}
216
217VkResult
218radv_device_init_meta_fmask_expand_state(struct radv_device *device)
219{
220   struct radv_meta_state *state = &device->meta_state;
221   VkResult result;
222
223   VkDescriptorSetLayoutCreateInfo ds_create_info = {
224      .sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
225      .flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR,
226      .bindingCount = 2,
227      .pBindings = (VkDescriptorSetLayoutBinding[]){
228         {.binding = 0,
229          .descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE,
230          .descriptorCount = 1,
231          .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
232          .pImmutableSamplers = NULL},
233         {.binding = 1,
234          .descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
235          .descriptorCount = 1,
236          .stageFlags = VK_SHADER_STAGE_COMPUTE_BIT,
237          .pImmutableSamplers = NULL},
238      }};
239
240   result = radv_CreateDescriptorSetLayout(radv_device_to_handle(device), &ds_create_info,
241                                           &state->alloc, &state->fmask_expand.ds_layout);
242   if (result != VK_SUCCESS)
243      goto fail;
244
245   VkPipelineLayoutCreateInfo color_create_info = {
246      .sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
247      .setLayoutCount = 1,
248      .pSetLayouts = &state->fmask_expand.ds_layout,
249      .pushConstantRangeCount = 0,
250      .pPushConstantRanges = NULL,
251   };
252
253   result = radv_CreatePipelineLayout(radv_device_to_handle(device), &color_create_info,
254                                      &state->alloc, &state->fmask_expand.p_layout);
255   if (result != VK_SUCCESS)
256      goto fail;
257
258   for (uint32_t i = 0; i < MAX_SAMPLES_LOG2; i++) {
259      uint32_t samples = 1 << i;
260      result = create_fmask_expand_pipeline(device, samples, &state->fmask_expand.pipeline[i]);
261      if (result != VK_SUCCESS)
262         goto fail;
263   }
264
265   return VK_SUCCESS;
266fail:
267   radv_device_finish_meta_fmask_expand_state(device);
268   return result;
269}
270