u_inlines.h revision 01e04c3f
1/**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef U_INLINES_H
29#define U_INLINES_H
30
31#include "pipe/p_context.h"
32#include "pipe/p_defines.h"
33#include "pipe/p_shader_tokens.h"
34#include "pipe/p_state.h"
35#include "pipe/p_screen.h"
36#include "util/u_debug.h"
37#include "util/u_debug_describe.h"
38#include "util/u_debug_refcnt.h"
39#include "util/u_atomic.h"
40#include "util/u_box.h"
41#include "util/u_math.h"
42
43
44#ifdef __cplusplus
45extern "C" {
46#endif
47
48
49/*
50 * Reference counting helper functions.
51 */
52
53
54static inline void
55pipe_reference_init(struct pipe_reference *dst, unsigned count)
56{
57   p_atomic_set(&dst->count, count);
58}
59
60static inline boolean
61pipe_is_referenced(struct pipe_reference *src)
62{
63   return p_atomic_read(&src->count) != 0;
64}
65
66/**
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'dst' and 'src' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
71 */
72static inline boolean
73pipe_reference_described(struct pipe_reference *dst,
74                         struct pipe_reference *src,
75                         debug_reference_descriptor get_desc)
76{
77   if (dst != src) {
78      /* bump the src.count first */
79      if (src) {
80         MAYBE_UNUSED int count = p_atomic_inc_return(&src->count);
81         assert(count != 1); /* src had to be referenced */
82         debug_reference(src, get_desc, 1);
83      }
84
85      if (dst) {
86         int count = p_atomic_dec_return(&dst->count);
87         assert(count != -1); /* dst had to be referenced */
88         debug_reference(dst, get_desc, -1);
89         if (!count)
90            return true;
91      }
92   }
93
94   return false;
95}
96
97static inline boolean
98pipe_reference(struct pipe_reference *dst, struct pipe_reference *src)
99{
100   return pipe_reference_described(dst, src,
101                                   (debug_reference_descriptor)
102                                   debug_describe_reference);
103}
104
105static inline void
106pipe_surface_reference(struct pipe_surface **dst, struct pipe_surface *src)
107{
108   struct pipe_surface *old_dst = *dst;
109
110   if (pipe_reference_described(&old_dst->reference, &src->reference,
111                                (debug_reference_descriptor)
112                                debug_describe_surface))
113      old_dst->context->surface_destroy(old_dst->context, old_dst);
114   *dst = src;
115}
116
117/**
118 * Similar to pipe_surface_reference() but always set the pointer to NULL
119 * and pass in an explicit context.  The explicit context avoids the problem
120 * of using a deleted context's surface_destroy() method when freeing a surface
121 * that's shared by multiple contexts.
122 */
123static inline void
124pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
125{
126   struct pipe_surface *old = *ptr;
127
128   if (pipe_reference_described(&old->reference, NULL,
129                                (debug_reference_descriptor)
130                                debug_describe_surface))
131      pipe->surface_destroy(pipe, old);
132   *ptr = NULL;
133}
134
135
136static inline void
137pipe_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
138{
139   struct pipe_resource *old_dst = *dst;
140
141   if (pipe_reference_described(&old_dst->reference, &src->reference,
142                                (debug_reference_descriptor)
143                                debug_describe_resource)) {
144      /* Avoid recursion, which would prevent inlining this function */
145      do {
146         struct pipe_resource *next = old_dst->next;
147
148         old_dst->screen->resource_destroy(old_dst->screen, old_dst);
149         old_dst = next;
150      } while (pipe_reference_described(&old_dst->reference, NULL,
151                                        (debug_reference_descriptor)
152                                        debug_describe_resource));
153   }
154   *dst = src;
155}
156
157/**
158 * Same as pipe_surface_release, but used when pipe_context doesn't exist
159 * anymore.
160 */
161static inline void
162pipe_surface_release_no_context(struct pipe_surface **ptr)
163{
164   struct pipe_surface *surf = *ptr;
165
166   if (pipe_reference_described(&surf->reference, NULL,
167                                (debug_reference_descriptor)
168                                debug_describe_surface)) {
169      /* trivially destroy pipe_surface */
170      pipe_resource_reference(&surf->texture, NULL);
171      free(surf);
172   }
173   *ptr = NULL;
174}
175
176/**
177 * Set *dst to \p src with proper reference counting.
178 *
179 * The caller must guarantee that \p src and *dst were created in
180 * the same context (if they exist), and that this must be the current context.
181 */
182static inline void
183pipe_sampler_view_reference(struct pipe_sampler_view **dst,
184                            struct pipe_sampler_view *src)
185{
186   struct pipe_sampler_view *old_dst = *dst;
187
188   if (pipe_reference_described(&old_dst->reference, &src->reference,
189                                (debug_reference_descriptor)
190                                debug_describe_sampler_view))
191      old_dst->context->sampler_view_destroy(old_dst->context, old_dst);
192   *dst = src;
193}
194
195/**
196 * Similar to pipe_sampler_view_reference() but always set the pointer to
197 * NULL and pass in the current context explicitly.
198 *
199 * If *ptr is non-NULL, it may refer to a view that was created in a different
200 * context (however, that context must still be alive).
201 */
202static inline void
203pipe_sampler_view_release(struct pipe_context *ctx,
204                          struct pipe_sampler_view **ptr)
205{
206   struct pipe_sampler_view *old_view = *ptr;
207
208   if (pipe_reference_described(&old_view->reference, NULL,
209                    (debug_reference_descriptor)debug_describe_sampler_view)) {
210      ctx->sampler_view_destroy(ctx, old_view);
211   }
212   *ptr = NULL;
213}
214
215static inline void
216pipe_so_target_reference(struct pipe_stream_output_target **dst,
217                         struct pipe_stream_output_target *src)
218{
219   struct pipe_stream_output_target *old_dst = *dst;
220
221   if (pipe_reference_described(&old_dst->reference, &src->reference,
222                     (debug_reference_descriptor)debug_describe_so_target))
223      old_dst->context->stream_output_target_destroy(old_dst->context, old_dst);
224   *dst = src;
225}
226
227static inline void
228pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
229{
230   if (dst->is_user_buffer)
231      dst->buffer.user = NULL;
232   else
233      pipe_resource_reference(&dst->buffer.resource, NULL);
234}
235
236static inline void
237pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
238                             const struct pipe_vertex_buffer *src)
239{
240   pipe_vertex_buffer_unreference(dst);
241   if (!src->is_user_buffer)
242      pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
243   memcpy(dst, src, sizeof(*src));
244}
245
246static inline void
247pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
248                   struct pipe_resource *pt, unsigned level, unsigned layer)
249{
250   pipe_resource_reference(&ps->texture, pt);
251   ps->format = pt->format;
252   ps->width = u_minify(pt->width0, level);
253   ps->height = u_minify(pt->height0, level);
254   ps->u.tex.level = level;
255   ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
256   ps->context = ctx;
257}
258
259static inline void
260pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
261                  struct pipe_resource *pt, unsigned level, unsigned layer)
262{
263   ps->texture = 0;
264   pipe_reference_init(&ps->reference, 1);
265   pipe_surface_reset(ctx, ps, pt, level, layer);
266}
267
268/* Return true if the surfaces are equal. */
269static inline boolean
270pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
271{
272   return s1->texture == s2->texture &&
273          s1->format == s2->format &&
274          (s1->texture->target != PIPE_BUFFER ||
275           (s1->u.buf.first_element == s2->u.buf.first_element &&
276            s1->u.buf.last_element == s2->u.buf.last_element)) &&
277          (s1->texture->target == PIPE_BUFFER ||
278           (s1->u.tex.level == s2->u.tex.level &&
279            s1->u.tex.first_layer == s2->u.tex.first_layer &&
280            s1->u.tex.last_layer == s2->u.tex.last_layer));
281}
282
283/*
284 * Convenience wrappers for screen buffer functions.
285 */
286
287
288/**
289 * Create a new resource.
290 * \param bind  bitmask of PIPE_BIND_x flags
291 * \param usage  a PIPE_USAGE_x value
292 */
293static inline struct pipe_resource *
294pipe_buffer_create(struct pipe_screen *screen,
295                   unsigned bind,
296                   enum pipe_resource_usage usage,
297                   unsigned size)
298{
299   struct pipe_resource buffer;
300   memset(&buffer, 0, sizeof buffer);
301   buffer.target = PIPE_BUFFER;
302   buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
303   buffer.bind = bind;
304   buffer.usage = usage;
305   buffer.flags = 0;
306   buffer.width0 = size;
307   buffer.height0 = 1;
308   buffer.depth0 = 1;
309   buffer.array_size = 1;
310   return screen->resource_create(screen, &buffer);
311}
312
313
314static inline struct pipe_resource *
315pipe_buffer_create_const0(struct pipe_screen *screen,
316                          unsigned bind,
317                          enum pipe_resource_usage usage,
318                          unsigned size)
319{
320   struct pipe_resource buffer;
321   memset(&buffer, 0, sizeof buffer);
322   buffer.target = PIPE_BUFFER;
323   buffer.format = PIPE_FORMAT_R8_UNORM;
324   buffer.bind = bind;
325   buffer.usage = usage;
326   buffer.flags = screen->get_param(screen, PIPE_CAP_CONSTBUF0_FLAGS);
327   buffer.width0 = size;
328   buffer.height0 = 1;
329   buffer.depth0 = 1;
330   buffer.array_size = 1;
331   return screen->resource_create(screen, &buffer);
332}
333
334
335/**
336 * Map a range of a resource.
337 * \param offset  start of region, in bytes
338 * \param length  size of region, in bytes
339 * \param access  bitmask of PIPE_TRANSFER_x flags
340 * \param transfer  returns a transfer object
341 */
342static inline void *
343pipe_buffer_map_range(struct pipe_context *pipe,
344                      struct pipe_resource *buffer,
345                      unsigned offset,
346                      unsigned length,
347                      unsigned access,
348                      struct pipe_transfer **transfer)
349{
350   struct pipe_box box;
351   void *map;
352
353   assert(offset < buffer->width0);
354   assert(offset + length <= buffer->width0);
355   assert(length);
356
357   u_box_1d(offset, length, &box);
358
359   map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
360   if (!map) {
361      return NULL;
362   }
363
364   return map;
365}
366
367
368/**
369 * Map whole resource.
370 * \param access  bitmask of PIPE_TRANSFER_x flags
371 * \param transfer  returns a transfer object
372 */
373static inline void *
374pipe_buffer_map(struct pipe_context *pipe,
375                struct pipe_resource *buffer,
376                unsigned access,
377                struct pipe_transfer **transfer)
378{
379   return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0,
380                                access, transfer);
381}
382
383
384static inline void
385pipe_buffer_unmap(struct pipe_context *pipe,
386                  struct pipe_transfer *transfer)
387{
388   pipe->transfer_unmap(pipe, transfer);
389}
390
391static inline void
392pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
393                               struct pipe_transfer *transfer,
394                               unsigned offset,
395                               unsigned length)
396{
397   struct pipe_box box;
398   int transfer_offset;
399
400   assert(length);
401   assert(transfer->box.x <= (int) offset);
402   assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
403
404   /* Match old screen->buffer_flush_mapped_range() behaviour, where
405    * offset parameter is relative to the start of the buffer, not the
406    * mapped range.
407    */
408   transfer_offset = offset - transfer->box.x;
409
410   u_box_1d(transfer_offset, length, &box);
411
412   pipe->transfer_flush_region(pipe, transfer, &box);
413}
414
415static inline void
416pipe_buffer_write(struct pipe_context *pipe,
417                  struct pipe_resource *buf,
418                  unsigned offset,
419                  unsigned size,
420                  const void *data)
421{
422   /* Don't set any other usage bits. Drivers should derive them. */
423   pipe->buffer_subdata(pipe, buf, PIPE_TRANSFER_WRITE, offset, size, data);
424}
425
426/**
427 * Special case for writing non-overlapping ranges.
428 *
429 * We can avoid GPU/CPU synchronization when writing range that has never
430 * been written before.
431 */
432static inline void
433pipe_buffer_write_nooverlap(struct pipe_context *pipe,
434                            struct pipe_resource *buf,
435                            unsigned offset, unsigned size,
436                            const void *data)
437{
438   pipe->buffer_subdata(pipe, buf,
439                        (PIPE_TRANSFER_WRITE |
440                         PIPE_TRANSFER_UNSYNCHRONIZED),
441                        offset, size, data);
442}
443
444
445/**
446 * Create a new resource and immediately put data into it
447 * \param bind  bitmask of PIPE_BIND_x flags
448 * \param usage  bitmask of PIPE_USAGE_x flags
449 */
450static inline struct pipe_resource *
451pipe_buffer_create_with_data(struct pipe_context *pipe,
452                             unsigned bind,
453                             enum pipe_resource_usage usage,
454                             unsigned size,
455                             const void *ptr)
456{
457   struct pipe_resource *res = pipe_buffer_create(pipe->screen,
458                                                  bind, usage, size);
459   pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
460   return res;
461}
462
463static inline void
464pipe_buffer_read(struct pipe_context *pipe,
465                 struct pipe_resource *buf,
466                 unsigned offset,
467                 unsigned size,
468                 void *data)
469{
470   struct pipe_transfer *src_transfer;
471   ubyte *map;
472
473   map = (ubyte *) pipe_buffer_map_range(pipe,
474                                         buf,
475                                         offset, size,
476                                         PIPE_TRANSFER_READ,
477                                         &src_transfer);
478   if (!map)
479      return;
480
481   memcpy(data, map, size);
482   pipe_buffer_unmap(pipe, src_transfer);
483}
484
485
486/**
487 * Map a resource for reading/writing.
488 * \param access  bitmask of PIPE_TRANSFER_x flags
489 */
490static inline void *
491pipe_transfer_map(struct pipe_context *context,
492                  struct pipe_resource *resource,
493                  unsigned level, unsigned layer,
494                  unsigned access,
495                  unsigned x, unsigned y,
496                  unsigned w, unsigned h,
497                  struct pipe_transfer **transfer)
498{
499   struct pipe_box box;
500   u_box_2d_zslice(x, y, layer, w, h, &box);
501   return context->transfer_map(context,
502                                resource,
503                                level,
504                                access,
505                                &box, transfer);
506}
507
508
509/**
510 * Map a 3D (texture) resource for reading/writing.
511 * \param access  bitmask of PIPE_TRANSFER_x flags
512 */
513static inline void *
514pipe_transfer_map_3d(struct pipe_context *context,
515                     struct pipe_resource *resource,
516                     unsigned level,
517                     unsigned access,
518                     unsigned x, unsigned y, unsigned z,
519                     unsigned w, unsigned h, unsigned d,
520                     struct pipe_transfer **transfer)
521{
522   struct pipe_box box;
523   u_box_3d(x, y, z, w, h, d, &box);
524   return context->transfer_map(context,
525                                resource,
526                                level,
527                                access,
528                                &box, transfer);
529}
530
531static inline void
532pipe_transfer_unmap(struct pipe_context *context,
533                    struct pipe_transfer *transfer)
534{
535   context->transfer_unmap(context, transfer);
536}
537
538static inline void
539pipe_set_constant_buffer(struct pipe_context *pipe,
540                         enum pipe_shader_type shader, uint index,
541                         struct pipe_resource *buf)
542{
543   if (buf) {
544      struct pipe_constant_buffer cb;
545      cb.buffer = buf;
546      cb.buffer_offset = 0;
547      cb.buffer_size = buf->width0;
548      cb.user_buffer = NULL;
549      pipe->set_constant_buffer(pipe, shader, index, &cb);
550   } else {
551      pipe->set_constant_buffer(pipe, shader, index, NULL);
552   }
553}
554
555
556/**
557 * Get the polygon offset enable/disable flag for the given polygon fill mode.
558 * \param fill_mode  one of PIPE_POLYGON_MODE_POINT/LINE/FILL
559 */
560static inline boolean
561util_get_offset(const struct pipe_rasterizer_state *templ,
562                unsigned fill_mode)
563{
564   switch(fill_mode) {
565   case PIPE_POLYGON_MODE_POINT:
566      return templ->offset_point;
567   case PIPE_POLYGON_MODE_LINE:
568      return templ->offset_line;
569   case PIPE_POLYGON_MODE_FILL:
570      return templ->offset_tri;
571   default:
572      assert(0);
573      return FALSE;
574   }
575}
576
577static inline float
578util_get_min_point_size(const struct pipe_rasterizer_state *state)
579{
580   /* The point size should be clamped to this value at the rasterizer stage.
581    */
582   return !state->point_quad_rasterization &&
583          !state->point_smooth &&
584          !state->multisample ? 1.0f : 0.0f;
585}
586
587static inline void
588util_query_clear_result(union pipe_query_result *result, unsigned type)
589{
590   switch (type) {
591   case PIPE_QUERY_OCCLUSION_PREDICATE:
592   case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
593   case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
594   case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
595   case PIPE_QUERY_GPU_FINISHED:
596      result->b = FALSE;
597      break;
598   case PIPE_QUERY_OCCLUSION_COUNTER:
599   case PIPE_QUERY_TIMESTAMP:
600   case PIPE_QUERY_TIME_ELAPSED:
601   case PIPE_QUERY_PRIMITIVES_GENERATED:
602   case PIPE_QUERY_PRIMITIVES_EMITTED:
603      result->u64 = 0;
604      break;
605   case PIPE_QUERY_SO_STATISTICS:
606      memset(&result->so_statistics, 0, sizeof(result->so_statistics));
607      break;
608   case PIPE_QUERY_TIMESTAMP_DISJOINT:
609      memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
610      break;
611   case PIPE_QUERY_PIPELINE_STATISTICS:
612      memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
613      break;
614   default:
615      memset(result, 0, sizeof(*result));
616   }
617}
618
619/** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
620static inline enum tgsi_texture_type
621util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
622                          unsigned nr_samples)
623{
624   switch (pipe_tex_target) {
625   case PIPE_BUFFER:
626      return TGSI_TEXTURE_BUFFER;
627
628   case PIPE_TEXTURE_1D:
629      assert(nr_samples <= 1);
630      return TGSI_TEXTURE_1D;
631
632   case PIPE_TEXTURE_2D:
633      return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
634
635   case PIPE_TEXTURE_RECT:
636      assert(nr_samples <= 1);
637      return TGSI_TEXTURE_RECT;
638
639   case PIPE_TEXTURE_3D:
640      assert(nr_samples <= 1);
641      return TGSI_TEXTURE_3D;
642
643   case PIPE_TEXTURE_CUBE:
644      assert(nr_samples <= 1);
645      return TGSI_TEXTURE_CUBE;
646
647   case PIPE_TEXTURE_1D_ARRAY:
648      assert(nr_samples <= 1);
649      return TGSI_TEXTURE_1D_ARRAY;
650
651   case PIPE_TEXTURE_2D_ARRAY:
652      return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
653                              TGSI_TEXTURE_2D_ARRAY;
654
655   case PIPE_TEXTURE_CUBE_ARRAY:
656      return TGSI_TEXTURE_CUBE_ARRAY;
657
658   default:
659      assert(0 && "unexpected texture target");
660      return TGSI_TEXTURE_UNKNOWN;
661   }
662}
663
664
665static inline void
666util_copy_constant_buffer(struct pipe_constant_buffer *dst,
667                          const struct pipe_constant_buffer *src)
668{
669   if (src) {
670      pipe_resource_reference(&dst->buffer, src->buffer);
671      dst->buffer_offset = src->buffer_offset;
672      dst->buffer_size = src->buffer_size;
673      dst->user_buffer = src->user_buffer;
674   }
675   else {
676      pipe_resource_reference(&dst->buffer, NULL);
677      dst->buffer_offset = 0;
678      dst->buffer_size = 0;
679      dst->user_buffer = NULL;
680   }
681}
682
683static inline void
684util_copy_image_view(struct pipe_image_view *dst,
685                     const struct pipe_image_view *src)
686{
687   if (src) {
688      pipe_resource_reference(&dst->resource, src->resource);
689      dst->format = src->format;
690      dst->access = src->access;
691      dst->u = src->u;
692   } else {
693      pipe_resource_reference(&dst->resource, NULL);
694      dst->format = PIPE_FORMAT_NONE;
695      dst->access = 0;
696      memset(&dst->u, 0, sizeof(dst->u));
697   }
698}
699
700static inline unsigned
701util_max_layer(const struct pipe_resource *r, unsigned level)
702{
703   switch (r->target) {
704   case PIPE_TEXTURE_3D:
705      return u_minify(r->depth0, level) - 1;
706   case PIPE_TEXTURE_CUBE:
707      assert(r->array_size == 6);
708      /* fall-through */
709   case PIPE_TEXTURE_1D_ARRAY:
710   case PIPE_TEXTURE_2D_ARRAY:
711   case PIPE_TEXTURE_CUBE_ARRAY:
712      return r->array_size - 1;
713   default:
714      return 0;
715   }
716}
717
718static inline unsigned
719util_num_layers(const struct pipe_resource *r, unsigned level)
720{
721   return util_max_layer(r, level) + 1;
722}
723
724static inline bool
725util_texrange_covers_whole_level(const struct pipe_resource *tex,
726                                 unsigned level, unsigned x, unsigned y,
727                                 unsigned z, unsigned width,
728                                 unsigned height, unsigned depth)
729{
730   return x == 0 && y == 0 && z == 0 &&
731          width == u_minify(tex->width0, level) &&
732          height == u_minify(tex->height0, level) &&
733          depth == util_num_layers(tex, level);
734}
735
736#ifdef __cplusplus
737}
738#endif
739
740#endif /* U_INLINES_H */
741