u_inlines.h revision 7ec681f3
1/**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef U_INLINES_H
29#define U_INLINES_H
30
31#include "pipe/p_context.h"
32#include "pipe/p_defines.h"
33#include "pipe/p_shader_tokens.h"
34#include "pipe/p_state.h"
35#include "pipe/p_screen.h"
36#include "util/compiler.h"
37#include "util/format/u_format.h"
38#include "util/u_debug.h"
39#include "util/u_debug_describe.h"
40#include "util/u_debug_refcnt.h"
41#include "util/u_atomic.h"
42#include "util/u_box.h"
43#include "util/u_math.h"
44
45
46#ifdef __cplusplus
47extern "C" {
48#endif
49
50
51/*
52 * Reference counting helper functions.
53 */
54
55
56static inline void
57pipe_reference_init(struct pipe_reference *dst, unsigned count)
58{
59   dst->count = count;
60}
61
62static inline boolean
63pipe_is_referenced(struct pipe_reference *src)
64{
65   return p_atomic_read(&src->count) != 0;
66}
67
68/**
69 * Update reference counting.
70 * The old thing pointed to, if any, will be unreferenced.
71 * Both 'dst' and 'src' may be NULL.
72 * \return TRUE if the object's refcount hits zero and should be destroyed.
73 */
74static inline boolean
75pipe_reference_described(struct pipe_reference *dst,
76                         struct pipe_reference *src,
77                         debug_reference_descriptor get_desc)
78{
79   if (dst != src) {
80      /* bump the src.count first */
81      if (src) {
82         ASSERTED int count = p_atomic_inc_return(&src->count);
83         assert(count != 1); /* src had to be referenced */
84         debug_reference(src, get_desc, 1);
85      }
86
87      if (dst) {
88         int count = p_atomic_dec_return(&dst->count);
89         assert(count != -1); /* dst had to be referenced */
90         debug_reference(dst, get_desc, -1);
91         if (!count)
92            return true;
93      }
94   }
95
96   return false;
97}
98
99static inline boolean
100pipe_reference(struct pipe_reference *dst, struct pipe_reference *src)
101{
102   return pipe_reference_described(dst, src,
103                                   (debug_reference_descriptor)
104                                   debug_describe_reference);
105}
106
107static inline void
108pipe_surface_reference(struct pipe_surface **dst, struct pipe_surface *src)
109{
110   struct pipe_surface *old_dst = *dst;
111
112   if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
113                                src ? &src->reference : NULL,
114                                (debug_reference_descriptor)
115                                debug_describe_surface))
116      old_dst->context->surface_destroy(old_dst->context, old_dst);
117   *dst = src;
118}
119
120/**
121 * Similar to pipe_surface_reference() but always set the pointer to NULL
122 * and pass in an explicit context.  The explicit context avoids the problem
123 * of using a deleted context's surface_destroy() method when freeing a surface
124 * that's shared by multiple contexts.
125 */
126static inline void
127pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
128{
129   struct pipe_surface *old = *ptr;
130
131   if (pipe_reference_described(&old->reference, NULL,
132                                (debug_reference_descriptor)
133                                debug_describe_surface))
134      pipe->surface_destroy(pipe, old);
135   *ptr = NULL;
136}
137
138static inline void
139pipe_resource_destroy(struct pipe_resource *res)
140{
141   /* Avoid recursion, which would prevent inlining this function */
142   do {
143      struct pipe_resource *next = res->next;
144
145      res->screen->resource_destroy(res->screen, res);
146      res = next;
147   } while (pipe_reference_described(res ? &res->reference : NULL,
148                                     NULL,
149                                     (debug_reference_descriptor)
150                                     debug_describe_resource));
151}
152
153static inline void
154pipe_resource_reference(struct pipe_resource **dst, struct pipe_resource *src)
155{
156   struct pipe_resource *old_dst = *dst;
157
158   if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
159                                src ? &src->reference : NULL,
160                                (debug_reference_descriptor)
161                                debug_describe_resource)) {
162      pipe_resource_destroy(old_dst);
163   }
164   *dst = src;
165}
166
167/**
168 * Subtract the given number of references.
169 */
170static inline void
171pipe_drop_resource_references(struct pipe_resource *dst, int num_refs)
172{
173   int count = p_atomic_add_return(&dst->reference.count, -num_refs);
174
175   assert(count >= 0);
176   /* Underflows shouldn't happen, but let's be safe. */
177   if (count <= 0)
178      pipe_resource_destroy(dst);
179}
180
181/**
182 * Same as pipe_surface_release, but used when pipe_context doesn't exist
183 * anymore.
184 */
185static inline void
186pipe_surface_release_no_context(struct pipe_surface **ptr)
187{
188   struct pipe_surface *surf = *ptr;
189
190   if (pipe_reference_described(&surf->reference, NULL,
191                                (debug_reference_descriptor)
192                                debug_describe_surface)) {
193      /* trivially destroy pipe_surface */
194      pipe_resource_reference(&surf->texture, NULL);
195      free(surf);
196   }
197   *ptr = NULL;
198}
199
200/**
201 * Set *dst to \p src with proper reference counting.
202 *
203 * The caller must guarantee that \p src and *dst were created in
204 * the same context (if they exist), and that this must be the current context.
205 */
206static inline void
207pipe_sampler_view_reference(struct pipe_sampler_view **dst,
208                            struct pipe_sampler_view *src)
209{
210   struct pipe_sampler_view *old_dst = *dst;
211
212   if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
213                                src ? &src->reference : NULL,
214                                (debug_reference_descriptor)
215                                debug_describe_sampler_view))
216      old_dst->context->sampler_view_destroy(old_dst->context, old_dst);
217   *dst = src;
218}
219
220static inline void
221pipe_so_target_reference(struct pipe_stream_output_target **dst,
222                         struct pipe_stream_output_target *src)
223{
224   struct pipe_stream_output_target *old_dst = *dst;
225
226   if (pipe_reference_described(old_dst ? &old_dst->reference : NULL,
227                     src ? &src->reference : NULL,
228                     (debug_reference_descriptor)debug_describe_so_target))
229      old_dst->context->stream_output_target_destroy(old_dst->context, old_dst);
230   *dst = src;
231}
232
233static inline void
234pipe_vertex_state_reference(struct pipe_vertex_state **dst,
235                            struct pipe_vertex_state *src)
236{
237   struct pipe_vertex_state *old_dst = *dst;
238
239   if (pipe_reference(old_dst ? &old_dst->reference : NULL,
240                      src ? &src->reference : NULL))
241      old_dst->screen->vertex_state_destroy(old_dst->screen, old_dst);
242   *dst = src;
243}
244
245static inline void
246pipe_vertex_buffer_unreference(struct pipe_vertex_buffer *dst)
247{
248   if (dst->is_user_buffer)
249      dst->buffer.user = NULL;
250   else
251      pipe_resource_reference(&dst->buffer.resource, NULL);
252}
253
254static inline void
255pipe_vertex_buffer_reference(struct pipe_vertex_buffer *dst,
256                             const struct pipe_vertex_buffer *src)
257{
258   if (dst->buffer.resource == src->buffer.resource) {
259      /* Just copy the fields, don't touch reference counts. */
260      dst->stride = src->stride;
261      dst->is_user_buffer = src->is_user_buffer;
262      dst->buffer_offset = src->buffer_offset;
263      return;
264   }
265
266   pipe_vertex_buffer_unreference(dst);
267   /* Don't use memcpy because there is a hole between variables.
268    * dst can be used as a hash key.
269    */
270   dst->stride = src->stride;
271   dst->is_user_buffer = src->is_user_buffer;
272   dst->buffer_offset = src->buffer_offset;
273
274   if (src->is_user_buffer)
275      dst->buffer.user = src->buffer.user;
276   else
277      pipe_resource_reference(&dst->buffer.resource, src->buffer.resource);
278}
279
280static inline void
281pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
282                   struct pipe_resource *pt, unsigned level, unsigned layer)
283{
284   pipe_resource_reference(&ps->texture, pt);
285   ps->format = pt->format;
286   ps->width = u_minify(pt->width0, level);
287   ps->height = u_minify(pt->height0, level);
288   ps->u.tex.level = level;
289   ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
290   ps->context = ctx;
291}
292
293static inline void
294pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
295                  struct pipe_resource *pt, unsigned level, unsigned layer)
296{
297   ps->texture = 0;
298   pipe_reference_init(&ps->reference, 1);
299   pipe_surface_reset(ctx, ps, pt, level, layer);
300}
301
302/* Return true if the surfaces are equal. */
303static inline boolean
304pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
305{
306   return s1->texture == s2->texture &&
307          s1->format == s2->format &&
308          (s1->texture->target != PIPE_BUFFER ||
309           (s1->u.buf.first_element == s2->u.buf.first_element &&
310            s1->u.buf.last_element == s2->u.buf.last_element)) &&
311          (s1->texture->target == PIPE_BUFFER ||
312           (s1->u.tex.level == s2->u.tex.level &&
313            s1->u.tex.first_layer == s2->u.tex.first_layer &&
314            s1->u.tex.last_layer == s2->u.tex.last_layer));
315}
316
317/*
318 * Convenience wrappers for screen buffer functions.
319 */
320
321
322/**
323 * Create a new resource.
324 * \param bind  bitmask of PIPE_BIND_x flags
325 * \param usage  a PIPE_USAGE_x value
326 */
327static inline struct pipe_resource *
328pipe_buffer_create(struct pipe_screen *screen,
329                   unsigned bind,
330                   enum pipe_resource_usage usage,
331                   unsigned size)
332{
333   struct pipe_resource buffer;
334   memset(&buffer, 0, sizeof buffer);
335   buffer.target = PIPE_BUFFER;
336   buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
337   buffer.bind = bind;
338   buffer.usage = usage;
339   buffer.flags = 0;
340   buffer.width0 = size;
341   buffer.height0 = 1;
342   buffer.depth0 = 1;
343   buffer.array_size = 1;
344   return screen->resource_create(screen, &buffer);
345}
346
347
348static inline struct pipe_resource *
349pipe_buffer_create_const0(struct pipe_screen *screen,
350                          unsigned bind,
351                          enum pipe_resource_usage usage,
352                          unsigned size)
353{
354   struct pipe_resource buffer;
355   memset(&buffer, 0, sizeof buffer);
356   buffer.target = PIPE_BUFFER;
357   buffer.format = PIPE_FORMAT_R8_UNORM;
358   buffer.bind = bind;
359   buffer.usage = usage;
360   buffer.flags = screen->get_param(screen, PIPE_CAP_CONSTBUF0_FLAGS);
361   buffer.width0 = size;
362   buffer.height0 = 1;
363   buffer.depth0 = 1;
364   buffer.array_size = 1;
365   return screen->resource_create(screen, &buffer);
366}
367
368
369/**
370 * Map a range of a resource.
371 * \param offset  start of region, in bytes
372 * \param length  size of region, in bytes
373 * \param access  bitmask of PIPE_MAP_x flags
374 * \param transfer  returns a transfer object
375 */
376static inline void *
377pipe_buffer_map_range(struct pipe_context *pipe,
378                      struct pipe_resource *buffer,
379                      unsigned offset,
380                      unsigned length,
381                      unsigned access,
382                      struct pipe_transfer **transfer)
383{
384   struct pipe_box box;
385   void *map;
386
387   assert(offset < buffer->width0);
388   assert(offset + length <= buffer->width0);
389   assert(length);
390
391   u_box_1d(offset, length, &box);
392
393   map = pipe->buffer_map(pipe, buffer, 0, access, &box, transfer);
394   if (!map) {
395      return NULL;
396   }
397
398   return map;
399}
400
401
402/**
403 * Map whole resource.
404 * \param access  bitmask of PIPE_MAP_x flags
405 * \param transfer  returns a transfer object
406 */
407static inline void *
408pipe_buffer_map(struct pipe_context *pipe,
409                struct pipe_resource *buffer,
410                unsigned access,
411                struct pipe_transfer **transfer)
412{
413   return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0,
414                                access, transfer);
415}
416
417
418static inline void
419pipe_buffer_unmap(struct pipe_context *pipe,
420                  struct pipe_transfer *transfer)
421{
422   pipe->buffer_unmap(pipe, transfer);
423}
424
425static inline void
426pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
427                               struct pipe_transfer *transfer,
428                               unsigned offset,
429                               unsigned length)
430{
431   struct pipe_box box;
432   int transfer_offset;
433
434   assert(length);
435   assert(transfer->box.x <= (int) offset);
436   assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
437
438   /* Match old screen->buffer_flush_mapped_range() behaviour, where
439    * offset parameter is relative to the start of the buffer, not the
440    * mapped range.
441    */
442   transfer_offset = offset - transfer->box.x;
443
444   u_box_1d(transfer_offset, length, &box);
445
446   pipe->transfer_flush_region(pipe, transfer, &box);
447}
448
449static inline void
450pipe_buffer_write(struct pipe_context *pipe,
451                  struct pipe_resource *buf,
452                  unsigned offset,
453                  unsigned size,
454                  const void *data)
455{
456   /* Don't set any other usage bits. Drivers should derive them. */
457   pipe->buffer_subdata(pipe, buf, PIPE_MAP_WRITE, offset, size, data);
458}
459
460/**
461 * Special case for writing non-overlapping ranges.
462 *
463 * We can avoid GPU/CPU synchronization when writing range that has never
464 * been written before.
465 */
466static inline void
467pipe_buffer_write_nooverlap(struct pipe_context *pipe,
468                            struct pipe_resource *buf,
469                            unsigned offset, unsigned size,
470                            const void *data)
471{
472   pipe->buffer_subdata(pipe, buf,
473                        (PIPE_MAP_WRITE |
474                         PIPE_MAP_UNSYNCHRONIZED),
475                        offset, size, data);
476}
477
478/**
479 * Utility for simplifying pipe_context::resource_copy_region calls
480 */
481static inline void
482pipe_buffer_copy(struct pipe_context *pipe,
483                 struct pipe_resource *dst,
484                 struct pipe_resource *src,
485                 unsigned dst_offset,
486                 unsigned src_offset,
487                 unsigned size)
488{
489   struct pipe_box box;
490   /* only these fields are used */
491   box.x = (int)src_offset;
492   box.width = (int)size;
493   pipe->resource_copy_region(pipe, dst, 0, dst_offset, 0, 0, src, 0, &box);
494}
495
496/**
497 * Create a new resource and immediately put data into it
498 * \param bind  bitmask of PIPE_BIND_x flags
499 * \param usage  bitmask of PIPE_USAGE_x flags
500 */
501static inline struct pipe_resource *
502pipe_buffer_create_with_data(struct pipe_context *pipe,
503                             unsigned bind,
504                             enum pipe_resource_usage usage,
505                             unsigned size,
506                             const void *ptr)
507{
508   struct pipe_resource *res = pipe_buffer_create(pipe->screen,
509                                                  bind, usage, size);
510   pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
511   return res;
512}
513
514static inline void
515pipe_buffer_read(struct pipe_context *pipe,
516                 struct pipe_resource *buf,
517                 unsigned offset,
518                 unsigned size,
519                 void *data)
520{
521   struct pipe_transfer *src_transfer;
522   ubyte *map;
523
524   map = (ubyte *) pipe_buffer_map_range(pipe,
525                                         buf,
526                                         offset, size,
527                                         PIPE_MAP_READ,
528                                         &src_transfer);
529   if (!map)
530      return;
531
532   memcpy(data, map, size);
533   pipe_buffer_unmap(pipe, src_transfer);
534}
535
536
537/**
538 * Map a resource for reading/writing.
539 * \param access  bitmask of PIPE_MAP_x flags
540 */
541static inline void *
542pipe_texture_map(struct pipe_context *context,
543                 struct pipe_resource *resource,
544                 unsigned level, unsigned layer,
545                 unsigned access,
546                 unsigned x, unsigned y,
547                 unsigned w, unsigned h,
548                 struct pipe_transfer **transfer)
549{
550   struct pipe_box box;
551   u_box_2d_zslice(x, y, layer, w, h, &box);
552   return context->texture_map(context, resource, level, access,
553                               &box, transfer);
554}
555
556
557/**
558 * Map a 3D (texture) resource for reading/writing.
559 * \param access  bitmask of PIPE_MAP_x flags
560 */
561static inline void *
562pipe_texture_map_3d(struct pipe_context *context,
563                    struct pipe_resource *resource,
564                    unsigned level,
565                    unsigned access,
566                    unsigned x, unsigned y, unsigned z,
567                    unsigned w, unsigned h, unsigned d,
568                    struct pipe_transfer **transfer)
569{
570   struct pipe_box box;
571   u_box_3d(x, y, z, w, h, d, &box);
572   return context->texture_map(context, resource, level, access,
573                               &box, transfer);
574}
575
576static inline void
577pipe_texture_unmap(struct pipe_context *context,
578                   struct pipe_transfer *transfer)
579{
580   context->texture_unmap(context, transfer);
581}
582
583static inline void
584pipe_set_constant_buffer(struct pipe_context *pipe,
585                         enum pipe_shader_type shader, uint index,
586                         struct pipe_resource *buf)
587{
588   if (buf) {
589      struct pipe_constant_buffer cb;
590      cb.buffer = buf;
591      cb.buffer_offset = 0;
592      cb.buffer_size = buf->width0;
593      cb.user_buffer = NULL;
594      pipe->set_constant_buffer(pipe, shader, index, false, &cb);
595   } else {
596      pipe->set_constant_buffer(pipe, shader, index, false, NULL);
597   }
598}
599
600
601/**
602 * Get the polygon offset enable/disable flag for the given polygon fill mode.
603 * \param fill_mode  one of PIPE_POLYGON_MODE_POINT/LINE/FILL
604 */
605static inline boolean
606util_get_offset(const struct pipe_rasterizer_state *templ,
607                unsigned fill_mode)
608{
609   switch(fill_mode) {
610   case PIPE_POLYGON_MODE_POINT:
611      return templ->offset_point;
612   case PIPE_POLYGON_MODE_LINE:
613      return templ->offset_line;
614   case PIPE_POLYGON_MODE_FILL:
615      return templ->offset_tri;
616   default:
617      assert(0);
618      return FALSE;
619   }
620}
621
622static inline float
623util_get_min_point_size(const struct pipe_rasterizer_state *state)
624{
625   /* The point size should be clamped to this value at the rasterizer stage.
626    */
627   return !state->point_quad_rasterization &&
628          !state->point_smooth &&
629          !state->multisample ? 1.0f : 0.0f;
630}
631
632static inline void
633util_query_clear_result(union pipe_query_result *result, unsigned type)
634{
635   switch (type) {
636   case PIPE_QUERY_OCCLUSION_PREDICATE:
637   case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE:
638   case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
639   case PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE:
640   case PIPE_QUERY_GPU_FINISHED:
641      result->b = FALSE;
642      break;
643   case PIPE_QUERY_OCCLUSION_COUNTER:
644   case PIPE_QUERY_TIMESTAMP:
645   case PIPE_QUERY_TIME_ELAPSED:
646   case PIPE_QUERY_PRIMITIVES_GENERATED:
647   case PIPE_QUERY_PRIMITIVES_EMITTED:
648      result->u64 = 0;
649      break;
650   case PIPE_QUERY_SO_STATISTICS:
651      memset(&result->so_statistics, 0, sizeof(result->so_statistics));
652      break;
653   case PIPE_QUERY_TIMESTAMP_DISJOINT:
654      memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
655      break;
656   case PIPE_QUERY_PIPELINE_STATISTICS:
657      memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
658      break;
659   default:
660      memset(result, 0, sizeof(*result));
661   }
662}
663
664/** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
665static inline enum tgsi_texture_type
666util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
667                          unsigned nr_samples)
668{
669   switch (pipe_tex_target) {
670   case PIPE_BUFFER:
671      return TGSI_TEXTURE_BUFFER;
672
673   case PIPE_TEXTURE_1D:
674      assert(nr_samples <= 1);
675      return TGSI_TEXTURE_1D;
676
677   case PIPE_TEXTURE_2D:
678      return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
679
680   case PIPE_TEXTURE_RECT:
681      assert(nr_samples <= 1);
682      return TGSI_TEXTURE_RECT;
683
684   case PIPE_TEXTURE_3D:
685      assert(nr_samples <= 1);
686      return TGSI_TEXTURE_3D;
687
688   case PIPE_TEXTURE_CUBE:
689      assert(nr_samples <= 1);
690      return TGSI_TEXTURE_CUBE;
691
692   case PIPE_TEXTURE_1D_ARRAY:
693      assert(nr_samples <= 1);
694      return TGSI_TEXTURE_1D_ARRAY;
695
696   case PIPE_TEXTURE_2D_ARRAY:
697      return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
698                              TGSI_TEXTURE_2D_ARRAY;
699
700   case PIPE_TEXTURE_CUBE_ARRAY:
701      return TGSI_TEXTURE_CUBE_ARRAY;
702
703   default:
704      assert(0 && "unexpected texture target");
705      return TGSI_TEXTURE_UNKNOWN;
706   }
707}
708
709
710static inline void
711util_copy_constant_buffer(struct pipe_constant_buffer *dst,
712                          const struct pipe_constant_buffer *src,
713                          bool take_ownership)
714{
715   if (src) {
716      if (take_ownership) {
717         pipe_resource_reference(&dst->buffer, NULL);
718         dst->buffer = src->buffer;
719      } else {
720         pipe_resource_reference(&dst->buffer, src->buffer);
721      }
722      dst->buffer_offset = src->buffer_offset;
723      dst->buffer_size = src->buffer_size;
724      dst->user_buffer = src->user_buffer;
725   }
726   else {
727      pipe_resource_reference(&dst->buffer, NULL);
728      dst->buffer_offset = 0;
729      dst->buffer_size = 0;
730      dst->user_buffer = NULL;
731   }
732}
733
734static inline void
735util_copy_shader_buffer(struct pipe_shader_buffer *dst,
736                        const struct pipe_shader_buffer *src)
737{
738   if (src) {
739      pipe_resource_reference(&dst->buffer, src->buffer);
740      dst->buffer_offset = src->buffer_offset;
741      dst->buffer_size = src->buffer_size;
742   }
743   else {
744      pipe_resource_reference(&dst->buffer, NULL);
745      dst->buffer_offset = 0;
746      dst->buffer_size = 0;
747   }
748}
749
750static inline void
751util_copy_image_view(struct pipe_image_view *dst,
752                     const struct pipe_image_view *src)
753{
754   if (src) {
755      pipe_resource_reference(&dst->resource, src->resource);
756      dst->format = src->format;
757      dst->access = src->access;
758      dst->shader_access = src->shader_access;
759      dst->u = src->u;
760   } else {
761      pipe_resource_reference(&dst->resource, NULL);
762      dst->format = PIPE_FORMAT_NONE;
763      dst->access = 0;
764      dst->shader_access = 0;
765      memset(&dst->u, 0, sizeof(dst->u));
766   }
767}
768
769static inline unsigned
770util_max_layer(const struct pipe_resource *r, unsigned level)
771{
772   switch (r->target) {
773   case PIPE_TEXTURE_3D:
774      return u_minify(r->depth0, level) - 1;
775   case PIPE_TEXTURE_CUBE:
776      assert(r->array_size == 6);
777      FALLTHROUGH;
778   case PIPE_TEXTURE_1D_ARRAY:
779   case PIPE_TEXTURE_2D_ARRAY:
780   case PIPE_TEXTURE_CUBE_ARRAY:
781      return r->array_size - 1;
782   default:
783      return 0;
784   }
785}
786
787static inline unsigned
788util_num_layers(const struct pipe_resource *r, unsigned level)
789{
790   return util_max_layer(r, level) + 1;
791}
792
793static inline bool
794util_texrange_covers_whole_level(const struct pipe_resource *tex,
795                                 unsigned level, unsigned x, unsigned y,
796                                 unsigned z, unsigned width,
797                                 unsigned height, unsigned depth)
798{
799   return x == 0 && y == 0 && z == 0 &&
800          width == u_minify(tex->width0, level) &&
801          height == u_minify(tex->height0, level) &&
802          depth == util_num_layers(tex, level);
803}
804
805/**
806 * Returns true if the blit will fully initialize all pixels in the resource.
807 */
808static inline bool
809util_blit_covers_whole_resource(const struct pipe_blit_info *info)
810{
811   /* No conditional rendering or scissoring.  (We assume that the caller would
812    * have dropped any redundant scissoring)
813    */
814   if (info->scissor_enable || info->window_rectangle_include || info->render_condition_enable || info->alpha_blend)
815      return false;
816
817   const struct pipe_resource *dst = info->dst.resource;
818   /* A single blit can't initialize a miptree. */
819   if (dst->last_level != 0)
820      return false;
821
822   assert(info->dst.level == 0);
823
824   /* Make sure the dst box covers the whole resource. */
825   if (!(util_texrange_covers_whole_level(dst, 0,
826         0, 0, 0,
827         info->dst.box.width, info->dst.box.height, info->dst.box.depth))) {
828      return false;
829   }
830
831   /* Make sure the mask actually updates all the channels present in the dst format. */
832   if (info->mask & PIPE_MASK_RGBA) {
833      if ((info->mask & PIPE_MASK_RGBA) != PIPE_MASK_RGBA)
834         return false;
835   }
836
837   if (info->mask & PIPE_MASK_ZS) {
838      const struct util_format_description *format_desc = util_format_description(info->dst.format);
839      uint32_t dst_has = 0;
840      if (util_format_has_depth(format_desc))
841         dst_has |= PIPE_MASK_Z;
842      if (util_format_has_stencil(format_desc))
843         dst_has |= PIPE_MASK_S;
844      if (dst_has & ~(info->mask & PIPE_MASK_ZS))
845         return false;
846   }
847
848   return true;
849}
850
851static inline bool
852util_logicop_reads_dest(enum pipe_logicop op)
853{
854   switch (op) {
855   case PIPE_LOGICOP_NOR:
856   case PIPE_LOGICOP_AND_INVERTED:
857   case PIPE_LOGICOP_AND_REVERSE:
858   case PIPE_LOGICOP_INVERT:
859   case PIPE_LOGICOP_XOR:
860   case PIPE_LOGICOP_NAND:
861   case PIPE_LOGICOP_AND:
862   case PIPE_LOGICOP_EQUIV:
863   case PIPE_LOGICOP_NOOP:
864   case PIPE_LOGICOP_OR_INVERTED:
865   case PIPE_LOGICOP_OR_REVERSE:
866   case PIPE_LOGICOP_OR:
867      return true;
868   case PIPE_LOGICOP_CLEAR:
869   case PIPE_LOGICOP_COPY_INVERTED:
870   case PIPE_LOGICOP_COPY:
871   case PIPE_LOGICOP_SET:
872      return false;
873   }
874   unreachable("bad logicop");
875}
876
877static inline bool
878util_writes_stencil(const struct pipe_stencil_state *s)
879{
880   return s->enabled && s->writemask &&
881        ((s->fail_op != PIPE_STENCIL_OP_KEEP) ||
882         (s->zpass_op != PIPE_STENCIL_OP_KEEP) ||
883         (s->zfail_op != PIPE_STENCIL_OP_KEEP));
884}
885
886static inline bool
887util_writes_depth_stencil(const struct pipe_depth_stencil_alpha_state *zsa)
888{
889   if (zsa->depth_enabled && zsa->depth_writemask &&
890       (zsa->depth_func != PIPE_FUNC_NEVER))
891      return true;
892
893   return util_writes_stencil(&zsa->stencil[0]) ||
894          util_writes_stencil(&zsa->stencil[1]);
895}
896
897static inline struct pipe_context *
898pipe_create_multimedia_context(struct pipe_screen *screen)
899{
900   unsigned flags = 0;
901
902   if (!screen->get_param(screen, PIPE_CAP_GRAPHICS))
903      flags |= PIPE_CONTEXT_COMPUTE_ONLY;
904
905   return screen->context_create(screen, NULL, flags);
906}
907
908static inline unsigned util_res_sample_count(struct pipe_resource *res)
909{
910   return res->nr_samples > 0 ? res->nr_samples : 1;
911}
912
913#ifdef __cplusplus
914}
915#endif
916
917#endif /* U_INLINES_H */
918