u_inlines.h revision af69d88d
1/**************************************************************************
2 *
3 * Copyright 2007 VMware, Inc.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#ifndef U_INLINES_H
29#define U_INLINES_H
30
31#include "pipe/p_context.h"
32#include "pipe/p_defines.h"
33#include "pipe/p_shader_tokens.h"
34#include "pipe/p_state.h"
35#include "pipe/p_screen.h"
36#include "util/u_debug.h"
37#include "util/u_debug_describe.h"
38#include "util/u_debug_refcnt.h"
39#include "util/u_atomic.h"
40#include "util/u_box.h"
41#include "util/u_math.h"
42
43
44#ifdef __cplusplus
45extern "C" {
46#endif
47
48
49/*
50 * Reference counting helper functions.
51 */
52
53
54static INLINE void
55pipe_reference_init(struct pipe_reference *reference, unsigned count)
56{
57   p_atomic_set(&reference->count, count);
58}
59
60static INLINE boolean
61pipe_is_referenced(struct pipe_reference *reference)
62{
63   return p_atomic_read(&reference->count) != 0;
64}
65
66/**
67 * Update reference counting.
68 * The old thing pointed to, if any, will be unreferenced.
69 * Both 'ptr' and 'reference' may be NULL.
70 * \return TRUE if the object's refcount hits zero and should be destroyed.
71 */
72static INLINE boolean
73pipe_reference_described(struct pipe_reference *ptr,
74                         struct pipe_reference *reference,
75                         debug_reference_descriptor get_desc)
76{
77   boolean destroy = FALSE;
78
79   if(ptr != reference) {
80      /* bump the reference.count first */
81      if (reference) {
82         assert(pipe_is_referenced(reference));
83         p_atomic_inc(&reference->count);
84         debug_reference(reference, get_desc, 1);
85      }
86
87      if (ptr) {
88         assert(pipe_is_referenced(ptr));
89         if (p_atomic_dec_zero(&ptr->count)) {
90            destroy = TRUE;
91         }
92         debug_reference(ptr, get_desc, -1);
93      }
94   }
95
96   return destroy;
97}
98
99static INLINE boolean
100pipe_reference(struct pipe_reference *ptr, struct pipe_reference *reference)
101{
102   return pipe_reference_described(ptr, reference,
103                                   (debug_reference_descriptor)debug_describe_reference);
104}
105
106static INLINE void
107pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
108{
109   struct pipe_surface *old_surf = *ptr;
110
111   if (pipe_reference_described(&(*ptr)->reference, &surf->reference,
112                                (debug_reference_descriptor)debug_describe_surface))
113      old_surf->context->surface_destroy(old_surf->context, old_surf);
114   *ptr = surf;
115}
116
117/**
118 * Similar to pipe_surface_reference() but always set the pointer to NULL
119 * and pass in an explicit context.  The explicit context avoids the problem
120 * of using a deleted context's surface_destroy() method when freeing a surface
121 * that's shared by multiple contexts.
122 */
123static INLINE void
124pipe_surface_release(struct pipe_context *pipe, struct pipe_surface **ptr)
125{
126   if (pipe_reference_described(&(*ptr)->reference, NULL,
127                                (debug_reference_descriptor)debug_describe_surface))
128      pipe->surface_destroy(pipe, *ptr);
129   *ptr = NULL;
130}
131
132
133static INLINE void
134pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
135{
136   struct pipe_resource *old_tex = *ptr;
137
138   if (pipe_reference_described(&(*ptr)->reference, &tex->reference,
139                                (debug_reference_descriptor)debug_describe_resource))
140      old_tex->screen->resource_destroy(old_tex->screen, old_tex);
141   *ptr = tex;
142}
143
144static INLINE void
145pipe_sampler_view_reference(struct pipe_sampler_view **ptr, struct pipe_sampler_view *view)
146{
147   struct pipe_sampler_view *old_view = *ptr;
148
149   if (pipe_reference_described(&(*ptr)->reference, &view->reference,
150                                (debug_reference_descriptor)debug_describe_sampler_view))
151      old_view->context->sampler_view_destroy(old_view->context, old_view);
152   *ptr = view;
153}
154
155/**
156 * Similar to pipe_sampler_view_reference() but always set the pointer to
157 * NULL and pass in an explicit context.  Passing an explicit context is a
158 * work-around for fixing a dangling context pointer problem when textures
159 * are shared by multiple contexts.  XXX fix this someday.
160 */
161static INLINE void
162pipe_sampler_view_release(struct pipe_context *ctx,
163                          struct pipe_sampler_view **ptr)
164{
165   struct pipe_sampler_view *old_view = *ptr;
166   if (*ptr && (*ptr)->context != ctx) {
167      debug_printf_once(("context mis-match in pipe_sampler_view_release()\n"));
168   }
169   if (pipe_reference_described(&(*ptr)->reference, NULL,
170                    (debug_reference_descriptor)debug_describe_sampler_view)) {
171      ctx->sampler_view_destroy(ctx, old_view);
172   }
173   *ptr = NULL;
174}
175
176
177static INLINE void
178pipe_so_target_reference(struct pipe_stream_output_target **ptr,
179                         struct pipe_stream_output_target *target)
180{
181   struct pipe_stream_output_target *old = *ptr;
182
183   if (pipe_reference_described(&(*ptr)->reference, &target->reference,
184                     (debug_reference_descriptor)debug_describe_so_target))
185      old->context->stream_output_target_destroy(old->context, old);
186   *ptr = target;
187}
188
189static INLINE void
190pipe_surface_reset(struct pipe_context *ctx, struct pipe_surface* ps,
191                   struct pipe_resource *pt, unsigned level, unsigned layer)
192{
193   pipe_resource_reference(&ps->texture, pt);
194   ps->format = pt->format;
195   ps->width = u_minify(pt->width0, level);
196   ps->height = u_minify(pt->height0, level);
197   ps->u.tex.level = level;
198   ps->u.tex.first_layer = ps->u.tex.last_layer = layer;
199   ps->context = ctx;
200}
201
202static INLINE void
203pipe_surface_init(struct pipe_context *ctx, struct pipe_surface* ps,
204                  struct pipe_resource *pt, unsigned level, unsigned layer)
205{
206   ps->texture = 0;
207   pipe_reference_init(&ps->reference, 1);
208   pipe_surface_reset(ctx, ps, pt, level, layer);
209}
210
211/* Return true if the surfaces are equal. */
212static INLINE boolean
213pipe_surface_equal(struct pipe_surface *s1, struct pipe_surface *s2)
214{
215   return s1->texture == s2->texture &&
216          s1->format == s2->format &&
217          (s1->texture->target != PIPE_BUFFER ||
218           (s1->u.buf.first_element == s2->u.buf.first_element &&
219            s1->u.buf.last_element == s2->u.buf.last_element)) &&
220          (s1->texture->target == PIPE_BUFFER ||
221           (s1->u.tex.level == s2->u.tex.level &&
222            s1->u.tex.first_layer == s2->u.tex.first_layer &&
223            s1->u.tex.last_layer == s2->u.tex.last_layer));
224}
225
226/*
227 * Convenience wrappers for screen buffer functions.
228 */
229
230
231/**
232 * Create a new resource.
233 * \param bind  bitmask of PIPE_BIND_x flags
234 * \param usage  bitmask of PIPE_USAGE_x flags
235 */
236static INLINE struct pipe_resource *
237pipe_buffer_create( struct pipe_screen *screen,
238		    unsigned bind,
239		    unsigned usage,
240		    unsigned size )
241{
242   struct pipe_resource buffer;
243   memset(&buffer, 0, sizeof buffer);
244   buffer.target = PIPE_BUFFER;
245   buffer.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
246   buffer.bind = bind;
247   buffer.usage = usage;
248   buffer.flags = 0;
249   buffer.width0 = size;
250   buffer.height0 = 1;
251   buffer.depth0 = 1;
252   buffer.array_size = 1;
253   return screen->resource_create(screen, &buffer);
254}
255
256
257/**
258 * Map a range of a resource.
259 * \param offset  start of region, in bytes
260 * \param length  size of region, in bytes
261 * \param access  bitmask of PIPE_TRANSFER_x flags
262 * \param transfer  returns a transfer object
263 */
264static INLINE void *
265pipe_buffer_map_range(struct pipe_context *pipe,
266		      struct pipe_resource *buffer,
267		      unsigned offset,
268		      unsigned length,
269		      unsigned access,
270		      struct pipe_transfer **transfer)
271{
272   struct pipe_box box;
273   void *map;
274
275   assert(offset < buffer->width0);
276   assert(offset + length <= buffer->width0);
277   assert(length);
278
279   u_box_1d(offset, length, &box);
280
281   map = pipe->transfer_map(pipe, buffer, 0, access, &box, transfer);
282   if (map == NULL) {
283      return NULL;
284   }
285
286   return map;
287}
288
289
290/**
291 * Map whole resource.
292 * \param access  bitmask of PIPE_TRANSFER_x flags
293 * \param transfer  returns a transfer object
294 */
295static INLINE void *
296pipe_buffer_map(struct pipe_context *pipe,
297                struct pipe_resource *buffer,
298                unsigned access,
299                struct pipe_transfer **transfer)
300{
301   return pipe_buffer_map_range(pipe, buffer, 0, buffer->width0, access, transfer);
302}
303
304
305static INLINE void
306pipe_buffer_unmap(struct pipe_context *pipe,
307                  struct pipe_transfer *transfer)
308{
309   pipe->transfer_unmap(pipe, transfer);
310}
311
312static INLINE void
313pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
314                               struct pipe_transfer *transfer,
315                               unsigned offset,
316                               unsigned length)
317{
318   struct pipe_box box;
319   int transfer_offset;
320
321   assert(length);
322   assert(transfer->box.x <= (int) offset);
323   assert((int) (offset + length) <= transfer->box.x + transfer->box.width);
324
325   /* Match old screen->buffer_flush_mapped_range() behaviour, where
326    * offset parameter is relative to the start of the buffer, not the
327    * mapped range.
328    */
329   transfer_offset = offset - transfer->box.x;
330
331   u_box_1d(transfer_offset, length, &box);
332
333   pipe->transfer_flush_region(pipe, transfer, &box);
334}
335
336static INLINE void
337pipe_buffer_write(struct pipe_context *pipe,
338                  struct pipe_resource *buf,
339                  unsigned offset,
340                  unsigned size,
341                  const void *data)
342{
343   struct pipe_box box;
344   unsigned access = PIPE_TRANSFER_WRITE;
345
346   if (offset == 0 && size == buf->width0) {
347      access |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
348   } else {
349      access |= PIPE_TRANSFER_DISCARD_RANGE;
350   }
351
352   u_box_1d(offset, size, &box);
353
354   pipe->transfer_inline_write( pipe,
355                                buf,
356                                0,
357                                access,
358                                &box,
359                                data,
360                                size,
361                                0);
362}
363
364/**
365 * Special case for writing non-overlapping ranges.
366 *
367 * We can avoid GPU/CPU synchronization when writing range that has never
368 * been written before.
369 */
370static INLINE void
371pipe_buffer_write_nooverlap(struct pipe_context *pipe,
372                            struct pipe_resource *buf,
373                            unsigned offset, unsigned size,
374                            const void *data)
375{
376   struct pipe_box box;
377
378   u_box_1d(offset, size, &box);
379
380   pipe->transfer_inline_write(pipe,
381                               buf,
382                               0,
383                               (PIPE_TRANSFER_WRITE |
384                                PIPE_TRANSFER_UNSYNCHRONIZED),
385                               &box,
386                               data,
387                               0, 0);
388}
389
390
391/**
392 * Create a new resource and immediately put data into it
393 * \param bind  bitmask of PIPE_BIND_x flags
394 * \param usage  bitmask of PIPE_USAGE_x flags
395 */
396static INLINE struct pipe_resource *
397pipe_buffer_create_with_data(struct pipe_context *pipe,
398                             unsigned bind,
399                             unsigned usage,
400                             unsigned size,
401                             const void *ptr)
402{
403   struct pipe_resource *res = pipe_buffer_create(pipe->screen,
404                                                  bind, usage, size);
405   pipe_buffer_write_nooverlap(pipe, res, 0, size, ptr);
406   return res;
407}
408
409static INLINE void
410pipe_buffer_read(struct pipe_context *pipe,
411                 struct pipe_resource *buf,
412                 unsigned offset,
413                 unsigned size,
414                 void *data)
415{
416   struct pipe_transfer *src_transfer;
417   ubyte *map;
418
419   map = (ubyte *) pipe_buffer_map_range(pipe,
420					 buf,
421					 offset, size,
422					 PIPE_TRANSFER_READ,
423					 &src_transfer);
424   if (!map)
425      return;
426
427   memcpy(data, map, size);
428   pipe_buffer_unmap(pipe, src_transfer);
429}
430
431
432/**
433 * Map a resource for reading/writing.
434 * \param access  bitmask of PIPE_TRANSFER_x flags
435 */
436static INLINE void *
437pipe_transfer_map(struct pipe_context *context,
438                  struct pipe_resource *resource,
439                  unsigned level, unsigned layer,
440                  unsigned access,
441                  unsigned x, unsigned y,
442                  unsigned w, unsigned h,
443                  struct pipe_transfer **transfer)
444{
445   struct pipe_box box;
446   u_box_2d_zslice(x, y, layer, w, h, &box);
447   return context->transfer_map(context,
448                                resource,
449                                level,
450                                access,
451                                &box, transfer);
452}
453
454
455/**
456 * Map a 3D (texture) resource for reading/writing.
457 * \param access  bitmask of PIPE_TRANSFER_x flags
458 */
459static INLINE void *
460pipe_transfer_map_3d(struct pipe_context *context,
461                     struct pipe_resource *resource,
462                     unsigned level,
463                     unsigned access,
464                     unsigned x, unsigned y, unsigned z,
465                     unsigned w, unsigned h, unsigned d,
466                     struct pipe_transfer **transfer)
467{
468   struct pipe_box box;
469   u_box_3d(x, y, z, w, h, d, &box);
470   return context->transfer_map(context,
471                                resource,
472                                level,
473                                access,
474                                &box, transfer);
475}
476
477static INLINE void
478pipe_transfer_unmap( struct pipe_context *context,
479                     struct pipe_transfer *transfer )
480{
481   context->transfer_unmap( context, transfer );
482}
483
484static INLINE void
485pipe_set_constant_buffer(struct pipe_context *pipe, uint shader, uint index,
486                         struct pipe_resource *buf)
487{
488   if (buf) {
489      struct pipe_constant_buffer cb;
490      cb.buffer = buf;
491      cb.buffer_offset = 0;
492      cb.buffer_size = buf->width0;
493      cb.user_buffer = NULL;
494      pipe->set_constant_buffer(pipe, shader, index, &cb);
495   } else {
496      pipe->set_constant_buffer(pipe, shader, index, NULL);
497   }
498}
499
500
501/**
502 * Get the polygon offset enable/disable flag for the given polygon fill mode.
503 * \param fill_mode  one of PIPE_POLYGON_MODE_POINT/LINE/FILL
504 */
505static INLINE boolean
506util_get_offset(const struct pipe_rasterizer_state *templ,
507                unsigned fill_mode)
508{
509   switch(fill_mode) {
510   case PIPE_POLYGON_MODE_POINT:
511      return templ->offset_point;
512   case PIPE_POLYGON_MODE_LINE:
513      return templ->offset_line;
514   case PIPE_POLYGON_MODE_FILL:
515      return templ->offset_tri;
516   default:
517      assert(0);
518      return FALSE;
519   }
520}
521
522static INLINE float
523util_get_min_point_size(const struct pipe_rasterizer_state *state)
524{
525   /* The point size should be clamped to this value at the rasterizer stage.
526    */
527   return !state->point_quad_rasterization &&
528          !state->point_smooth &&
529          !state->multisample ? 1.0f : 0.0f;
530}
531
532static INLINE void
533util_query_clear_result(union pipe_query_result *result, unsigned type)
534{
535   switch (type) {
536   case PIPE_QUERY_OCCLUSION_PREDICATE:
537   case PIPE_QUERY_SO_OVERFLOW_PREDICATE:
538   case PIPE_QUERY_GPU_FINISHED:
539      result->b = FALSE;
540      break;
541   case PIPE_QUERY_OCCLUSION_COUNTER:
542   case PIPE_QUERY_TIMESTAMP:
543   case PIPE_QUERY_TIME_ELAPSED:
544   case PIPE_QUERY_PRIMITIVES_GENERATED:
545   case PIPE_QUERY_PRIMITIVES_EMITTED:
546      result->u64 = 0;
547      break;
548   case PIPE_QUERY_SO_STATISTICS:
549      memset(&result->so_statistics, 0, sizeof(result->so_statistics));
550      break;
551   case PIPE_QUERY_TIMESTAMP_DISJOINT:
552      memset(&result->timestamp_disjoint, 0, sizeof(result->timestamp_disjoint));
553      break;
554   case PIPE_QUERY_PIPELINE_STATISTICS:
555      memset(&result->pipeline_statistics, 0, sizeof(result->pipeline_statistics));
556      break;
557   default:
558      memset(result, 0, sizeof(*result));
559   }
560}
561
562/** Convert PIPE_TEXTURE_x to TGSI_TEXTURE_x */
563static INLINE unsigned
564util_pipe_tex_to_tgsi_tex(enum pipe_texture_target pipe_tex_target,
565                          unsigned nr_samples)
566{
567   switch (pipe_tex_target) {
568   case PIPE_BUFFER:
569      return TGSI_TEXTURE_BUFFER;
570
571   case PIPE_TEXTURE_1D:
572      assert(nr_samples <= 1);
573      return TGSI_TEXTURE_1D;
574
575   case PIPE_TEXTURE_2D:
576      return nr_samples > 1 ? TGSI_TEXTURE_2D_MSAA : TGSI_TEXTURE_2D;
577
578   case PIPE_TEXTURE_RECT:
579      assert(nr_samples <= 1);
580      return TGSI_TEXTURE_RECT;
581
582   case PIPE_TEXTURE_3D:
583      assert(nr_samples <= 1);
584      return TGSI_TEXTURE_3D;
585
586   case PIPE_TEXTURE_CUBE:
587      assert(nr_samples <= 1);
588      return TGSI_TEXTURE_CUBE;
589
590   case PIPE_TEXTURE_1D_ARRAY:
591      assert(nr_samples <= 1);
592      return TGSI_TEXTURE_1D_ARRAY;
593
594   case PIPE_TEXTURE_2D_ARRAY:
595      return nr_samples > 1 ? TGSI_TEXTURE_2D_ARRAY_MSAA :
596                              TGSI_TEXTURE_2D_ARRAY;
597
598   case PIPE_TEXTURE_CUBE_ARRAY:
599      return TGSI_TEXTURE_CUBE_ARRAY;
600
601   default:
602      assert(0 && "unexpected texture target");
603      return TGSI_TEXTURE_UNKNOWN;
604   }
605}
606
607
608static INLINE void
609util_copy_constant_buffer(struct pipe_constant_buffer *dst,
610                          const struct pipe_constant_buffer *src)
611{
612   if (src) {
613      pipe_resource_reference(&dst->buffer, src->buffer);
614      dst->buffer_offset = src->buffer_offset;
615      dst->buffer_size = src->buffer_size;
616      dst->user_buffer = src->user_buffer;
617   }
618   else {
619      pipe_resource_reference(&dst->buffer, NULL);
620      dst->buffer_offset = 0;
621      dst->buffer_size = 0;
622      dst->user_buffer = NULL;
623   }
624}
625
626static INLINE unsigned
627util_max_layer(const struct pipe_resource *r, unsigned level)
628{
629   switch (r->target) {
630   case PIPE_TEXTURE_CUBE:
631      return 6 - 1;
632   case PIPE_TEXTURE_3D:
633      return u_minify(r->depth0, level) - 1;
634   case PIPE_TEXTURE_1D_ARRAY:
635   case PIPE_TEXTURE_2D_ARRAY:
636   case PIPE_TEXTURE_CUBE_ARRAY:
637      return r->array_size - 1;
638   default:
639      return 0;
640   }
641}
642
643#ifdef __cplusplus
644}
645#endif
646
647#endif /* U_INLINES_H */
648