1// 2// Copyright 2019 Karol Herbst 3// 4// Permission is hereby granted, free of charge, to any person obtaining a 5// copy of this software and associated documentation files (the "Software"), 6// to deal in the Software without restriction, including without limitation 7// the rights to use, copy, modify, merge, publish, distribute, sublicense, 8// and/or sell copies of the Software, and to permit persons to whom the 9// Software is furnished to do so, subject to the following conditions: 10// 11// The above copyright notice and this permission notice shall be included in 12// all copies or substantial portions of the Software. 13// 14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 18// OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20// OTHER DEALINGS IN THE SOFTWARE. 21// 22 23#include "invocation.hpp" 24 25#include <tuple> 26 27#include "core/device.hpp" 28#include "core/error.hpp" 29#include "core/binary.hpp" 30#include "pipe/p_state.h" 31#include "util/algorithm.hpp" 32#include "util/functional.hpp" 33 34#include <compiler/glsl_types.h> 35#include <compiler/nir/nir_builder.h> 36#include <compiler/nir/nir_serialize.h> 37#include <compiler/spirv/nir_spirv.h> 38#include <util/u_math.h> 39 40using namespace clover; 41 42#ifdef HAVE_CLOVER_SPIRV 43 44// Refs and unrefs the glsl_type_singleton. 45static class glsl_type_ref { 46public: 47 glsl_type_ref() { 48 glsl_type_singleton_init_or_ref(); 49 } 50 51 ~glsl_type_ref() { 52 glsl_type_singleton_decref(); 53 } 54} glsl_type_ref; 55 56static const nir_shader_compiler_options * 57dev_get_nir_compiler_options(const device &dev) 58{ 59 const void *co = dev.get_compiler_options(PIPE_SHADER_IR_NIR); 60 return static_cast<const nir_shader_compiler_options*>(co); 61} 62 63static void debug_function(void *private_data, 64 enum nir_spirv_debug_level level, size_t spirv_offset, 65 const char *message) 66{ 67 assert(private_data); 68 auto r_log = reinterpret_cast<std::string *>(private_data); 69 *r_log += message; 70} 71 72static void 73clover_arg_size_align(const glsl_type *type, unsigned *size, unsigned *align) 74{ 75 if (type == glsl_type::sampler_type) { 76 *size = 0; 77 *align = 1; 78 } else if (type->is_image()) { 79 *size = *align = sizeof(cl_mem); 80 } else { 81 *size = type->cl_size(); 82 *align = type->cl_alignment(); 83 } 84} 85 86static bool 87clover_nir_lower_images(nir_shader *shader) 88{ 89 nir_function_impl *impl = nir_shader_get_entrypoint(shader); 90 91 ASSERTED int last_loc = -1; 92 int num_rd_images = 0, num_wr_images = 0, num_samplers = 0; 93 nir_foreach_uniform_variable(var, shader) { 94 if (glsl_type_is_image(var->type) || glsl_type_is_sampler(var->type)) { 95 /* Assume they come in order */ 96 assert(var->data.location > last_loc); 97 last_loc = var->data.location; 98 } 99 100 /* TODO: Constant samplers */ 101 if (var->type == glsl_bare_sampler_type()) { 102 var->data.driver_location = num_samplers++; 103 } else if (glsl_type_is_image(var->type)) { 104 if (var->data.access & ACCESS_NON_WRITEABLE) 105 var->data.driver_location = num_rd_images++; 106 else 107 var->data.driver_location = num_wr_images++; 108 } else { 109 /* CL shouldn't have any sampled images */ 110 assert(!glsl_type_is_sampler(var->type)); 111 } 112 } 113 shader->info.num_textures = num_rd_images; 114 BITSET_ZERO(shader->info.textures_used); 115 if (num_rd_images) 116 BITSET_SET_RANGE_INSIDE_WORD(shader->info.textures_used, 0, num_rd_images - 1); 117 shader->info.num_images = num_wr_images; 118 119 nir_builder b; 120 nir_builder_init(&b, impl); 121 122 bool progress = false; 123 nir_foreach_block_reverse(block, impl) { 124 nir_foreach_instr_reverse_safe(instr, block) { 125 switch (instr->type) { 126 case nir_instr_type_deref: { 127 nir_deref_instr *deref = nir_instr_as_deref(instr); 128 if (deref->deref_type != nir_deref_type_var) 129 break; 130 131 if (!glsl_type_is_image(deref->type) && 132 !glsl_type_is_sampler(deref->type)) 133 break; 134 135 b.cursor = nir_instr_remove(&deref->instr); 136 nir_ssa_def *loc = 137 nir_imm_intN_t(&b, deref->var->data.driver_location, 138 deref->dest.ssa.bit_size); 139 nir_ssa_def_rewrite_uses(&deref->dest.ssa, loc); 140 progress = true; 141 break; 142 } 143 144 case nir_instr_type_tex: { 145 nir_tex_instr *tex = nir_instr_as_tex(instr); 146 unsigned count = 0; 147 for (unsigned i = 0; i < tex->num_srcs; i++) { 148 if (tex->src[i].src_type == nir_tex_src_texture_deref || 149 tex->src[i].src_type == nir_tex_src_sampler_deref) { 150 nir_deref_instr *deref = nir_src_as_deref(tex->src[i].src); 151 if (deref->deref_type == nir_deref_type_var) { 152 /* In this case, we know the actual variable */ 153 if (tex->src[i].src_type == nir_tex_src_texture_deref) 154 tex->texture_index = deref->var->data.driver_location; 155 else 156 tex->sampler_index = deref->var->data.driver_location; 157 /* This source gets discarded */ 158 nir_instr_rewrite_src(&tex->instr, &tex->src[i].src, 159 NIR_SRC_INIT); 160 continue; 161 } else { 162 assert(tex->src[i].src.is_ssa); 163 b.cursor = nir_before_instr(&tex->instr); 164 /* Back-ends expect a 32-bit thing, not 64-bit */ 165 nir_ssa_def *offset = nir_u2u32(&b, tex->src[i].src.ssa); 166 if (tex->src[i].src_type == nir_tex_src_texture_deref) 167 tex->src[count].src_type = nir_tex_src_texture_offset; 168 else 169 tex->src[count].src_type = nir_tex_src_sampler_offset; 170 nir_instr_rewrite_src(&tex->instr, &tex->src[count].src, 171 nir_src_for_ssa(offset)); 172 } 173 } else { 174 /* If we've removed a source, move this one down */ 175 if (count != i) { 176 assert(count < i); 177 tex->src[count].src_type = tex->src[i].src_type; 178 nir_instr_move_src(&tex->instr, &tex->src[count].src, 179 &tex->src[i].src); 180 } 181 } 182 count++; 183 } 184 tex->num_srcs = count; 185 progress = true; 186 break; 187 } 188 189 case nir_instr_type_intrinsic: { 190 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr); 191 switch (intrin->intrinsic) { 192 case nir_intrinsic_image_deref_load: 193 case nir_intrinsic_image_deref_store: 194 case nir_intrinsic_image_deref_atomic_add: 195 case nir_intrinsic_image_deref_atomic_imin: 196 case nir_intrinsic_image_deref_atomic_umin: 197 case nir_intrinsic_image_deref_atomic_imax: 198 case nir_intrinsic_image_deref_atomic_umax: 199 case nir_intrinsic_image_deref_atomic_and: 200 case nir_intrinsic_image_deref_atomic_or: 201 case nir_intrinsic_image_deref_atomic_xor: 202 case nir_intrinsic_image_deref_atomic_exchange: 203 case nir_intrinsic_image_deref_atomic_comp_swap: 204 case nir_intrinsic_image_deref_atomic_fadd: 205 case nir_intrinsic_image_deref_atomic_inc_wrap: 206 case nir_intrinsic_image_deref_atomic_dec_wrap: 207 case nir_intrinsic_image_deref_size: 208 case nir_intrinsic_image_deref_samples: { 209 assert(intrin->src[0].is_ssa); 210 b.cursor = nir_before_instr(&intrin->instr); 211 /* Back-ends expect a 32-bit thing, not 64-bit */ 212 nir_ssa_def *offset = nir_u2u32(&b, intrin->src[0].ssa); 213 nir_rewrite_image_intrinsic(intrin, offset, false); 214 progress = true; 215 break; 216 } 217 218 default: 219 break; 220 } 221 break; 222 } 223 224 default: 225 break; 226 } 227 } 228 } 229 230 if (progress) { 231 nir_metadata_preserve(impl, nir_metadata_block_index | 232 nir_metadata_dominance); 233 } else { 234 nir_metadata_preserve(impl, nir_metadata_all); 235 } 236 237 return progress; 238} 239 240struct clover_lower_nir_state { 241 std::vector<binary::argument> &args; 242 uint32_t global_dims; 243 nir_variable *constant_var; 244 nir_variable *printf_buffer; 245 nir_variable *offset_vars[3]; 246}; 247 248static bool 249clover_lower_nir_filter(const nir_instr *instr, const void *) 250{ 251 return instr->type == nir_instr_type_intrinsic; 252} 253 254static nir_ssa_def * 255clover_lower_nir_instr(nir_builder *b, nir_instr *instr, void *_state) 256{ 257 clover_lower_nir_state *state = reinterpret_cast<clover_lower_nir_state*>(_state); 258 nir_intrinsic_instr *intrinsic = nir_instr_as_intrinsic(instr); 259 260 switch (intrinsic->intrinsic) { 261 case nir_intrinsic_load_printf_buffer_address: { 262 if (!state->printf_buffer) { 263 unsigned location = state->args.size(); 264 state->args.emplace_back(binary::argument::global, sizeof(size_t), 265 8, 8, binary::argument::zero_ext, 266 binary::argument::printf_buffer); 267 268 const glsl_type *type = glsl_uint64_t_type(); 269 state->printf_buffer = nir_variable_create(b->shader, nir_var_uniform, 270 type, "global_printf_buffer"); 271 state->printf_buffer->data.location = location; 272 } 273 return nir_load_var(b, state->printf_buffer); 274 } 275 case nir_intrinsic_load_base_global_invocation_id: { 276 nir_ssa_def *loads[3]; 277 278 /* create variables if we didn't do so alrady */ 279 if (!state->offset_vars[0]) { 280 /* TODO: fix for 64 bit */ 281 /* Even though we only place one scalar argument, clover will bind up to 282 * three 32 bit values 283 */ 284 unsigned location = state->args.size(); 285 state->args.emplace_back(binary::argument::scalar, 4, 4, 4, 286 binary::argument::zero_ext, 287 binary::argument::grid_offset); 288 289 const glsl_type *type = glsl_uint_type(); 290 for (uint32_t i = 0; i < 3; i++) { 291 state->offset_vars[i] = 292 nir_variable_create(b->shader, nir_var_uniform, type, 293 "global_invocation_id_offsets"); 294 state->offset_vars[i]->data.location = location + i; 295 } 296 } 297 298 for (int i = 0; i < 3; i++) { 299 nir_variable *var = state->offset_vars[i]; 300 loads[i] = var ? nir_load_var(b, var) : nir_imm_int(b, 0); 301 } 302 303 return nir_u2u(b, nir_vec(b, loads, state->global_dims), 304 nir_dest_bit_size(intrinsic->dest)); 305 } 306 case nir_intrinsic_load_constant_base_ptr: { 307 return nir_load_var(b, state->constant_var); 308 } 309 310 default: 311 return NULL; 312 } 313} 314 315static bool 316clover_lower_nir(nir_shader *nir, std::vector<binary::argument> &args, 317 uint32_t dims, uint32_t pointer_bit_size) 318{ 319 nir_variable *constant_var = NULL; 320 if (nir->constant_data_size) { 321 const glsl_type *type = pointer_bit_size == 64 ? glsl_uint64_t_type() : glsl_uint_type(); 322 323 constant_var = nir_variable_create(nir, nir_var_uniform, type, 324 "constant_buffer_addr"); 325 constant_var->data.location = args.size(); 326 327 args.emplace_back(binary::argument::global, sizeof(cl_mem), 328 pointer_bit_size / 8, pointer_bit_size / 8, 329 binary::argument::zero_ext, 330 binary::argument::constant_buffer); 331 } 332 333 clover_lower_nir_state state = { args, dims, constant_var }; 334 return nir_shader_lower_instructions(nir, 335 clover_lower_nir_filter, clover_lower_nir_instr, &state); 336} 337 338static spirv_to_nir_options 339create_spirv_options(const device &dev, std::string &r_log) 340{ 341 struct spirv_to_nir_options spirv_options = {}; 342 spirv_options.environment = NIR_SPIRV_OPENCL; 343 if (dev.address_bits() == 32u) { 344 spirv_options.shared_addr_format = nir_address_format_32bit_offset; 345 spirv_options.global_addr_format = nir_address_format_32bit_global; 346 spirv_options.temp_addr_format = nir_address_format_32bit_offset; 347 spirv_options.constant_addr_format = nir_address_format_32bit_global; 348 } else { 349 spirv_options.shared_addr_format = nir_address_format_32bit_offset_as_64bit; 350 spirv_options.global_addr_format = nir_address_format_64bit_global; 351 spirv_options.temp_addr_format = nir_address_format_32bit_offset_as_64bit; 352 spirv_options.constant_addr_format = nir_address_format_64bit_global; 353 } 354 spirv_options.caps.address = true; 355 spirv_options.caps.float64 = true; 356 spirv_options.caps.int8 = true; 357 spirv_options.caps.int16 = true; 358 spirv_options.caps.int64 = true; 359 spirv_options.caps.kernel = true; 360 spirv_options.caps.kernel_image = dev.image_support(); 361 spirv_options.caps.int64_atomics = dev.has_int64_atomics(); 362 spirv_options.debug.func = &debug_function; 363 spirv_options.debug.private_data = &r_log; 364 spirv_options.caps.printf = true; 365 return spirv_options; 366} 367 368struct disk_cache *clover::nir::create_clc_disk_cache(void) 369{ 370 struct mesa_sha1 ctx; 371 unsigned char sha1[20]; 372 char cache_id[20 * 2 + 1]; 373 _mesa_sha1_init(&ctx); 374 375 if (!disk_cache_get_function_identifier((void *)clover::nir::create_clc_disk_cache, &ctx)) 376 return NULL; 377 378 _mesa_sha1_final(&ctx, sha1); 379 380 disk_cache_format_hex_id(cache_id, sha1, 20 * 2); 381 return disk_cache_create("clover-clc", cache_id, 0); 382} 383 384void clover::nir::check_for_libclc(const device &dev) 385{ 386 if (!nir_can_find_libclc(dev.address_bits())) 387 throw error(CL_COMPILER_NOT_AVAILABLE); 388} 389 390nir_shader *clover::nir::load_libclc_nir(const device &dev, std::string &r_log) 391{ 392 spirv_to_nir_options spirv_options = create_spirv_options(dev, r_log); 393 auto *compiler_options = dev_get_nir_compiler_options(dev); 394 395 return nir_load_libclc_shader(dev.address_bits(), dev.clc_cache, 396 &spirv_options, compiler_options); 397} 398 399static bool 400can_remove_var(nir_variable *var, void *data) 401{ 402 return !(var->type->is_sampler() || var->type->is_image()); 403} 404 405binary clover::nir::spirv_to_nir(const binary &mod, const device &dev, 406 std::string &r_log) 407{ 408 spirv_to_nir_options spirv_options = create_spirv_options(dev, r_log); 409 std::shared_ptr<nir_shader> nir = dev.clc_nir; 410 spirv_options.clc_shader = nir.get(); 411 412 binary b; 413 // We only insert one section. 414 assert(mod.secs.size() == 1); 415 auto §ion = mod.secs[0]; 416 417 binary::resource_id section_id = 0; 418 for (const auto &sym : mod.syms) { 419 assert(sym.section == 0); 420 421 const auto *binary = 422 reinterpret_cast<const pipe_binary_program_header *>(section.data.data()); 423 const uint32_t *data = reinterpret_cast<const uint32_t *>(binary->blob); 424 const size_t num_words = binary->num_bytes / 4; 425 const char *name = sym.name.c_str(); 426 auto *compiler_options = dev_get_nir_compiler_options(dev); 427 428 nir_shader *nir = spirv_to_nir(data, num_words, nullptr, 0, 429 MESA_SHADER_KERNEL, name, 430 &spirv_options, compiler_options); 431 if (!nir) { 432 r_log += "Translation from SPIR-V to NIR for kernel \"" + sym.name + 433 "\" failed.\n"; 434 throw build_error(); 435 } 436 437 nir->info.workgroup_size_variable = sym.reqd_work_group_size[0] == 0; 438 nir->info.workgroup_size[0] = sym.reqd_work_group_size[0]; 439 nir->info.workgroup_size[1] = sym.reqd_work_group_size[1]; 440 nir->info.workgroup_size[2] = sym.reqd_work_group_size[2]; 441 nir_validate_shader(nir, "clover"); 442 443 // Inline all functions first. 444 // according to the comment on nir_inline_functions 445 NIR_PASS_V(nir, nir_lower_variable_initializers, nir_var_function_temp); 446 NIR_PASS_V(nir, nir_lower_returns); 447 NIR_PASS_V(nir, nir_lower_libclc, spirv_options.clc_shader); 448 449 NIR_PASS_V(nir, nir_inline_functions); 450 NIR_PASS_V(nir, nir_copy_prop); 451 NIR_PASS_V(nir, nir_opt_deref); 452 453 // Pick off the single entrypoint that we want. 454 foreach_list_typed_safe(nir_function, func, node, &nir->functions) { 455 if (!func->is_entrypoint) 456 exec_node_remove(&func->node); 457 } 458 assert(exec_list_length(&nir->functions) == 1); 459 460 nir_validate_shader(nir, "clover after function inlining"); 461 462 NIR_PASS_V(nir, nir_lower_variable_initializers, ~nir_var_function_temp); 463 464 struct nir_lower_printf_options printf_options; 465 printf_options.treat_doubles_as_floats = false; 466 printf_options.max_buffer_size = dev.max_printf_buffer_size(); 467 468 NIR_PASS_V(nir, nir_lower_printf, &printf_options); 469 470 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_function_temp, NULL); 471 472 // copy propagate to prepare for lower_explicit_io 473 NIR_PASS_V(nir, nir_split_var_copies); 474 NIR_PASS_V(nir, nir_opt_copy_prop_vars); 475 NIR_PASS_V(nir, nir_lower_var_copies); 476 NIR_PASS_V(nir, nir_lower_vars_to_ssa); 477 NIR_PASS_V(nir, nir_opt_dce); 478 NIR_PASS_V(nir, nir_lower_convert_alu_types, NULL); 479 480 NIR_PASS_V(nir, nir_lower_system_values); 481 nir_lower_compute_system_values_options sysval_options = { 0 }; 482 sysval_options.has_base_global_invocation_id = true; 483 NIR_PASS_V(nir, nir_lower_compute_system_values, &sysval_options); 484 485 // constant fold before lowering mem constants 486 NIR_PASS_V(nir, nir_opt_constant_folding); 487 488 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_mem_constant, NULL); 489 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, nir_var_mem_constant, 490 glsl_get_cl_type_size_align); 491 if (nir->constant_data_size > 0) { 492 assert(nir->constant_data == NULL); 493 nir->constant_data = rzalloc_size(nir, nir->constant_data_size); 494 nir_gather_explicit_io_initializers(nir, nir->constant_data, 495 nir->constant_data_size, 496 nir_var_mem_constant); 497 } 498 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_constant, 499 spirv_options.constant_addr_format); 500 501 auto args = sym.args; 502 NIR_PASS_V(nir, clover_lower_nir, args, dev.max_block_size().size(), 503 dev.address_bits()); 504 505 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, 506 nir_var_uniform, clover_arg_size_align); 507 NIR_PASS_V(nir, nir_lower_vars_to_explicit_types, 508 nir_var_mem_shared | nir_var_mem_global | 509 nir_var_function_temp, 510 glsl_get_cl_type_size_align); 511 512 NIR_PASS_V(nir, nir_opt_deref); 513 NIR_PASS_V(nir, nir_lower_readonly_images_to_tex, false); 514 NIR_PASS_V(nir, clover_nir_lower_images); 515 NIR_PASS_V(nir, nir_lower_memcpy); 516 517 /* use offsets for kernel inputs (uniform) */ 518 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_uniform, 519 nir->info.cs.ptr_size == 64 ? 520 nir_address_format_32bit_offset_as_64bit : 521 nir_address_format_32bit_offset); 522 523 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_constant, 524 spirv_options.constant_addr_format); 525 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_shared, 526 spirv_options.shared_addr_format); 527 528 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_function_temp, 529 spirv_options.temp_addr_format); 530 531 NIR_PASS_V(nir, nir_lower_explicit_io, nir_var_mem_global, 532 spirv_options.global_addr_format); 533 534 struct nir_remove_dead_variables_options remove_dead_variables_options = { 535 .can_remove_var = can_remove_var, 536 }; 537 NIR_PASS_V(nir, nir_remove_dead_variables, nir_var_all, &remove_dead_variables_options); 538 539 if (compiler_options->lower_int64_options) 540 NIR_PASS_V(nir, nir_lower_int64); 541 542 NIR_PASS_V(nir, nir_opt_dce); 543 544 if (nir->constant_data_size) { 545 const char *ptr = reinterpret_cast<const char *>(nir->constant_data); 546 const binary::section constants { 547 section_id, 548 binary::section::data_constant, 549 nir->constant_data_size, 550 { ptr, ptr + nir->constant_data_size } 551 }; 552 nir->constant_data = NULL; 553 nir->constant_data_size = 0; 554 b.secs.push_back(constants); 555 } 556 557 void *mem_ctx = ralloc_context(NULL); 558 unsigned printf_info_count = nir->printf_info_count; 559 nir_printf_info *printf_infos = nir->printf_info; 560 561 ralloc_steal(mem_ctx, printf_infos); 562 563 struct blob blob; 564 blob_init(&blob); 565 nir_serialize(&blob, nir, false); 566 567 ralloc_free(nir); 568 569 const pipe_binary_program_header header { uint32_t(blob.size) }; 570 binary::section text { section_id, binary::section::text_executable, header.num_bytes, {} }; 571 text.data.insert(text.data.end(), reinterpret_cast<const char *>(&header), 572 reinterpret_cast<const char *>(&header) + sizeof(header)); 573 text.data.insert(text.data.end(), blob.data, blob.data + blob.size); 574 575 free(blob.data); 576 577 b.printf_strings_in_buffer = false; 578 b.printf_infos.reserve(printf_info_count); 579 for (unsigned i = 0; i < printf_info_count; i++) { 580 binary::printf_info info; 581 582 info.arg_sizes.reserve(printf_infos[i].num_args); 583 for (unsigned j = 0; j < printf_infos[i].num_args; j++) 584 info.arg_sizes.push_back(printf_infos[i].arg_sizes[j]); 585 586 info.strings.resize(printf_infos[i].string_size); 587 memcpy(info.strings.data(), printf_infos[i].strings, printf_infos[i].string_size); 588 b.printf_infos.push_back(info); 589 } 590 591 ralloc_free(mem_ctx); 592 593 b.syms.emplace_back(sym.name, sym.attributes, 594 sym.reqd_work_group_size, section_id, 0, args); 595 b.secs.push_back(text); 596 section_id++; 597 } 598 return b; 599} 600#else 601binary clover::nir::spirv_to_nir(const binary &mod, const device &dev, std::string &r_log) 602{ 603 r_log += "SPIR-V support in clover is not enabled.\n"; 604 throw error(CL_LINKER_NOT_AVAILABLE); 605} 606#endif 607