1/*
2 * Copyright 2003 VMware, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
19 * VMWARE AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 *    Keith Whitwell <keithw@vmware.com>
26 */
27
28#include <stdio.h>
29#include "main/glheader.h"
30#include "main/context.h"
31#include "main/execmem.h"
32#include "util/u_memory.h"
33#include "swrast/s_chan.h"
34#include "t_context.h"
35#include "t_vertex.h"
36
37#define DBG 0
38
39/* Build and manage clipspace/ndc/window vertices.
40 */
41
42static GLboolean match_fastpath( struct tnl_clipspace *vtx,
43				 const struct tnl_clipspace_fastpath *fp)
44{
45   GLuint j;
46
47   if (vtx->attr_count != fp->attr_count)
48      return GL_FALSE;
49
50   for (j = 0; j < vtx->attr_count; j++)
51      if (vtx->attr[j].format != fp->attr[j].format ||
52	  vtx->attr[j].inputsize != fp->attr[j].size ||
53	  vtx->attr[j].vertoffset != fp->attr[j].offset)
54	 return GL_FALSE;
55
56   if (fp->match_strides) {
57      if (vtx->vertex_size != fp->vertex_size)
58	 return GL_FALSE;
59
60      for (j = 0; j < vtx->attr_count; j++)
61	 if (vtx->attr[j].inputstride != fp->attr[j].stride)
62	    return GL_FALSE;
63   }
64
65   return GL_TRUE;
66}
67
68static GLboolean search_fastpath_emit( struct tnl_clipspace *vtx )
69{
70   struct tnl_clipspace_fastpath *fp = vtx->fastpath;
71
72   for ( ; fp ; fp = fp->next) {
73      if (match_fastpath(vtx, fp)) {
74         vtx->emit = fp->func;
75	 return GL_TRUE;
76      }
77   }
78
79   return GL_FALSE;
80}
81
82void _tnl_register_fastpath( struct tnl_clipspace *vtx,
83			     GLboolean match_strides )
84{
85   struct tnl_clipspace_fastpath *fastpath = CALLOC_STRUCT(tnl_clipspace_fastpath);
86   GLuint i;
87
88   if (fastpath == NULL) {
89      _mesa_error_no_memory(__func__);
90      return;
91   }
92
93   fastpath->vertex_size = vtx->vertex_size;
94   fastpath->attr_count = vtx->attr_count;
95   fastpath->match_strides = match_strides;
96   fastpath->func = vtx->emit;
97   fastpath->attr = malloc(vtx->attr_count * sizeof(fastpath->attr[0]));
98
99   if (fastpath->attr == NULL) {
100      free(fastpath);
101      _mesa_error_no_memory(__func__);
102      return;
103   }
104
105   for (i = 0; i < vtx->attr_count; i++) {
106      fastpath->attr[i].format = vtx->attr[i].format;
107      fastpath->attr[i].stride = vtx->attr[i].inputstride;
108      fastpath->attr[i].size = vtx->attr[i].inputsize;
109      fastpath->attr[i].offset = vtx->attr[i].vertoffset;
110   }
111
112   fastpath->next = vtx->fastpath;
113   vtx->fastpath = fastpath;
114}
115
116
117
118/***********************************************************************
119 * Build codegen functions or return generic ones:
120 */
121static void choose_emit_func( struct gl_context *ctx, GLuint count, GLubyte *dest)
122{
123   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
124   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
125   struct tnl_clipspace_attr *a = vtx->attr;
126   const GLuint attr_count = vtx->attr_count;
127   GLuint j;
128
129   for (j = 0; j < attr_count; j++) {
130      GLvector4f *vptr = VB->AttribPtr[a[j].attrib];
131      a[j].inputstride = vptr->stride;
132      a[j].inputsize = vptr->size;
133      a[j].emit = a[j].insert[vptr->size - 1]; /* not always used */
134   }
135
136   vtx->emit = NULL;
137
138   /* Does this match an existing (hardwired, codegen or known-bad)
139    * fastpath?
140    */
141   if (search_fastpath_emit(vtx)) {
142      /* Use this result.  If it is null, then it is already known
143       * that the current state will fail for codegen and there is no
144       * point trying again.
145       */
146   }
147   else if (vtx->codegen_emit) {
148      vtx->codegen_emit(ctx);
149   }
150
151   if (!vtx->emit) {
152      _tnl_generate_hardwired_emit(ctx);
153   }
154
155   /* Otherwise use the generic version:
156    */
157   if (!vtx->emit)
158      vtx->emit = _tnl_generic_emit;
159
160   vtx->emit( ctx, count, dest );
161}
162
163
164
165static void choose_interp_func( struct gl_context *ctx,
166				GLfloat t,
167				GLuint edst, GLuint eout, GLuint ein,
168				GLboolean force_boundary )
169{
170   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
171   GLboolean unfilled = (ctx->Polygon.FrontMode != GL_FILL ||
172                         ctx->Polygon.BackMode != GL_FILL);
173   GLboolean twosided = ctx->Light.Enabled && ctx->Light.Model.TwoSide;
174
175   if (vtx->need_extras && (twosided || unfilled)) {
176      vtx->interp = _tnl_generic_interp_extras;
177   } else {
178      vtx->interp = _tnl_generic_interp;
179   }
180
181   vtx->interp( ctx, t, edst, eout, ein, force_boundary );
182}
183
184
185static void choose_copy_pv_func(  struct gl_context *ctx, GLuint edst, GLuint esrc )
186{
187   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
188   GLboolean unfilled = (ctx->Polygon.FrontMode != GL_FILL ||
189                         ctx->Polygon.BackMode != GL_FILL);
190
191   GLboolean twosided = ctx->Light.Enabled && ctx->Light.Model.TwoSide;
192
193   if (vtx->need_extras && (twosided || unfilled)) {
194      vtx->copy_pv = _tnl_generic_copy_pv_extras;
195   } else {
196      vtx->copy_pv = _tnl_generic_copy_pv;
197   }
198
199   vtx->copy_pv( ctx, edst, esrc );
200}
201
202
203/***********************************************************************
204 * Public entrypoints, mostly dispatch to the above:
205 */
206
207
208/* Interpolate between two vertices to produce a third:
209 */
210void _tnl_interp( struct gl_context *ctx,
211		  GLfloat t,
212		  GLuint edst, GLuint eout, GLuint ein,
213		  GLboolean force_boundary )
214{
215   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
216   vtx->interp( ctx, t, edst, eout, ein, force_boundary );
217}
218
219/* Copy colors from one vertex to another:
220 */
221void _tnl_copy_pv(  struct gl_context *ctx, GLuint edst, GLuint esrc )
222{
223   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
224   vtx->copy_pv( ctx, edst, esrc );
225}
226
227
228/* Extract a named attribute from a hardware vertex.  Will have to
229 * reverse any viewport transformation, swizzling or other conversions
230 * which may have been applied:
231 */
232void _tnl_get_attr( struct gl_context *ctx, const void *vin,
233			      GLenum attr, GLfloat *dest )
234{
235   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
236   const struct tnl_clipspace_attr *a = vtx->attr;
237   const GLuint attr_count = vtx->attr_count;
238   GLuint j;
239
240   for (j = 0; j < attr_count; j++) {
241      if (a[j].attrib == attr) {
242	 a[j].extract( &a[j], dest, (GLubyte *)vin + a[j].vertoffset );
243	 return;
244      }
245   }
246
247   /* Else return the value from ctx->Current.
248    */
249   if (attr == _TNL_ATTRIB_POINTSIZE) {
250      /* If the hardware vertex doesn't have point size then use size from
251       * struct gl_context.  XXX this will be wrong if drawing attenuated points!
252       */
253      dest[0] = ctx->Point.Size;
254   }
255   else {
256      memcpy( dest, ctx->Current.Attrib[attr], 4*sizeof(GLfloat));
257   }
258}
259
260
261/* Complementary operation to the above.
262 */
263void _tnl_set_attr( struct gl_context *ctx, void *vout,
264		    GLenum attr, const GLfloat *src )
265{
266   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
267   const struct tnl_clipspace_attr *a = vtx->attr;
268   const GLuint attr_count = vtx->attr_count;
269   GLuint j;
270
271   for (j = 0; j < attr_count; j++) {
272      if (a[j].attrib == attr) {
273	 a[j].insert[4-1]( &a[j], (GLubyte *)vout + a[j].vertoffset, src );
274	 return;
275      }
276   }
277}
278
279
280void *_tnl_get_vertex( struct gl_context *ctx, GLuint nr )
281{
282   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
283
284   return vtx->vertex_buf + nr * vtx->vertex_size;
285}
286
287void _tnl_invalidate_vertex_state( struct gl_context *ctx, GLuint new_state )
288{
289   /* if two-sided lighting changes or filled/unfilled polygon state changes */
290   if (new_state & (_NEW_LIGHT | _NEW_POLYGON) ) {
291      struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
292      vtx->new_inputs = ~0;
293      vtx->interp = choose_interp_func;
294      vtx->copy_pv = choose_copy_pv_func;
295   }
296}
297
298static void invalidate_funcs( struct tnl_clipspace *vtx )
299{
300   vtx->emit = choose_emit_func;
301   vtx->interp = choose_interp_func;
302   vtx->copy_pv = choose_copy_pv_func;
303   vtx->new_inputs = ~0;
304}
305
306GLuint _tnl_install_attrs( struct gl_context *ctx, const struct tnl_attr_map *map,
307			   GLuint nr, const GLfloat *vp,
308			   GLuint unpacked_size )
309{
310   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
311   GLuint offset = 0;
312   GLuint i, j;
313
314   assert(nr < _TNL_ATTRIB_MAX);
315   assert(nr == 0 || map[0].attrib == VERT_ATTRIB_POS);
316
317   vtx->new_inputs = ~0;
318   vtx->need_viewport = GL_FALSE;
319
320   if (vp) {
321      vtx->need_viewport = GL_TRUE;
322   }
323
324   for (j = 0, i = 0; i < nr; i++) {
325      const GLuint format = map[i].format;
326      if (format == EMIT_PAD) {
327	 if (DBG)
328	    printf("%d: pad %d, offset %d\n", i,
329		   map[i].offset, offset);
330
331	 offset += map[i].offset;
332
333      }
334      else {
335	 GLuint tmpoffset;
336
337	 if (unpacked_size)
338	    tmpoffset = map[i].offset;
339	 else
340	    tmpoffset = offset;
341
342	 if (vtx->attr_count != j ||
343	     vtx->attr[j].attrib != map[i].attrib ||
344	     vtx->attr[j].format != format ||
345	     vtx->attr[j].vertoffset != tmpoffset) {
346	    invalidate_funcs(vtx);
347
348	    vtx->attr[j].attrib = map[i].attrib;
349	    vtx->attr[j].format = format;
350	    vtx->attr[j].vp = vp;
351	    vtx->attr[j].insert = _tnl_format_info[format].insert;
352	    vtx->attr[j].extract = _tnl_format_info[format].extract;
353	    vtx->attr[j].vertattrsize = _tnl_format_info[format].attrsize;
354	    vtx->attr[j].vertoffset = tmpoffset;
355	 }
356
357
358	 if (DBG)
359	    printf("%d: %s, vp %p, offset %d\n", i,
360		   _tnl_format_info[format].name, (void *)vp,
361		   vtx->attr[j].vertoffset);
362
363	 offset += _tnl_format_info[format].attrsize;
364	 j++;
365      }
366   }
367
368   vtx->attr_count = j;
369
370   if (unpacked_size)
371      vtx->vertex_size = unpacked_size;
372   else
373      vtx->vertex_size = offset;
374
375   assert(vtx->vertex_size <= vtx->max_vertex_size);
376   return vtx->vertex_size;
377}
378
379
380
381void _tnl_invalidate_vertices( struct gl_context *ctx, GLuint newinputs )
382{
383   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
384   vtx->new_inputs |= newinputs;
385}
386
387
388/* This event has broader use beyond this file - will move elsewhere
389 * and probably invoke a driver callback.
390 */
391void _tnl_notify_pipeline_output_change( struct gl_context *ctx )
392{
393   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
394   invalidate_funcs(vtx);
395}
396
397
398static void adjust_input_ptrs( struct gl_context *ctx, GLint diff)
399{
400   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
401   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
402   struct tnl_clipspace_attr *a = vtx->attr;
403   const GLuint count = vtx->attr_count;
404   GLuint j;
405
406   diff -= 1;
407   for (j=0; j<count; ++j) {
408           register GLvector4f *vptr = VB->AttribPtr[a->attrib];
409	   (a++)->inputptr += diff*vptr->stride;
410   }
411}
412
413static void update_input_ptrs( struct gl_context *ctx, GLuint start )
414{
415   struct vertex_buffer *VB = &TNL_CONTEXT(ctx)->vb;
416   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
417   struct tnl_clipspace_attr *a = vtx->attr;
418   const GLuint count = vtx->attr_count;
419   GLuint j;
420
421   for (j = 0; j < count; j++) {
422      GLvector4f *vptr = VB->AttribPtr[a[j].attrib];
423
424      if (vtx->emit != choose_emit_func) {
425	 assert(a[j].inputstride == vptr->stride);
426	 assert(a[j].inputsize == vptr->size);
427      }
428
429      a[j].inputptr = ((GLubyte *)vptr->data) + start * vptr->stride;
430   }
431
432   if (a->vp) {
433      vtx->vp_scale[0] = a->vp[MAT_SX];
434      vtx->vp_scale[1] = a->vp[MAT_SY];
435      vtx->vp_scale[2] = a->vp[MAT_SZ];
436      vtx->vp_scale[3] = 1.0;
437      vtx->vp_xlate[0] = a->vp[MAT_TX];
438      vtx->vp_xlate[1] = a->vp[MAT_TY];
439      vtx->vp_xlate[2] = a->vp[MAT_TZ];
440      vtx->vp_xlate[3] = 0.0;
441   }
442}
443
444
445void _tnl_build_vertices( struct gl_context *ctx,
446			  GLuint start,
447			  GLuint end,
448			  GLuint newinputs )
449{
450   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
451   update_input_ptrs( ctx, start );
452   vtx->emit( ctx, end - start,
453	      (GLubyte *)(vtx->vertex_buf +
454			  start * vtx->vertex_size));
455}
456
457/* Emit VB vertices start..end to dest.  Note that VB vertex at
458 * postion start will be emitted to dest at position zero.
459 */
460void *_tnl_emit_vertices_to_buffer( struct gl_context *ctx,
461				    GLuint start,
462				    GLuint end,
463				    void *dest )
464{
465   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
466
467   update_input_ptrs(ctx, start);
468   /* Note: dest should not be adjusted for non-zero 'start' values:
469    */
470   vtx->emit( ctx, end - start, (GLubyte*) dest );
471   return (void *)((GLubyte *)dest + vtx->vertex_size * (end - start));
472}
473
474/* Emit indexed VB vertices start..end to dest.  Note that VB vertex at
475 * postion start will be emitted to dest at position zero.
476 */
477
478void *_tnl_emit_indexed_vertices_to_buffer( struct gl_context *ctx,
479					    const GLuint *elts,
480					    GLuint start,
481					    GLuint end,
482					    void *dest )
483{
484   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
485   GLuint oldIndex;
486   GLubyte *cdest = dest;
487
488   update_input_ptrs(ctx, oldIndex = elts[start++]);
489   vtx->emit( ctx, 1, cdest );
490   cdest += vtx->vertex_size;
491
492   for (; start < end; ++start) {
493      adjust_input_ptrs(ctx, elts[start] - oldIndex);
494      oldIndex = elts[start];
495      vtx->emit( ctx, 1, cdest);
496      cdest += vtx->vertex_size;
497   }
498
499   return (void *) cdest;
500}
501
502
503void _tnl_init_vertices( struct gl_context *ctx,
504			GLuint vb_size,
505			GLuint max_vertex_size )
506{
507   struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
508
509   _tnl_install_attrs( ctx, NULL, 0, NULL, 0 );
510
511   vtx->need_extras = GL_TRUE;
512   if (max_vertex_size > vtx->max_vertex_size) {
513      _tnl_free_vertices( ctx );
514      vtx->max_vertex_size = max_vertex_size;
515      vtx->vertex_buf = align_calloc(vb_size * max_vertex_size, 32 );
516      invalidate_funcs(vtx);
517   }
518
519   switch(CHAN_TYPE) {
520   case GL_UNSIGNED_BYTE:
521      vtx->chan_scale[0] = 255.0;
522      vtx->chan_scale[1] = 255.0;
523      vtx->chan_scale[2] = 255.0;
524      vtx->chan_scale[3] = 255.0;
525      break;
526   case GL_UNSIGNED_SHORT:
527      vtx->chan_scale[0] = 65535.0;
528      vtx->chan_scale[1] = 65535.0;
529      vtx->chan_scale[2] = 65535.0;
530      vtx->chan_scale[3] = 65535.0;
531      break;
532   default:
533      vtx->chan_scale[0] = 1.0;
534      vtx->chan_scale[1] = 1.0;
535      vtx->chan_scale[2] = 1.0;
536      vtx->chan_scale[3] = 1.0;
537      break;
538   }
539
540   vtx->identity[0] = 0.0;
541   vtx->identity[1] = 0.0;
542   vtx->identity[2] = 0.0;
543   vtx->identity[3] = 1.0;
544
545   vtx->codegen_emit = NULL;
546
547#ifdef USE_SSE_ASM
548   if (!getenv("MESA_NO_CODEGEN"))
549      vtx->codegen_emit = _tnl_generate_sse_emit;
550#endif
551}
552
553
554void _tnl_free_vertices( struct gl_context *ctx )
555{
556   TNLcontext *tnl = TNL_CONTEXT(ctx);
557   if (tnl) {
558      struct tnl_clipspace *vtx = GET_VERTEX_STATE(ctx);
559      struct tnl_clipspace_fastpath *fp, *tmp;
560
561      align_free(vtx->vertex_buf);
562      vtx->vertex_buf = NULL;
563
564      for (fp = vtx->fastpath ; fp ; fp = tmp) {
565         tmp = fp->next;
566         free(fp->attr);
567
568         /* KW: At the moment, fp->func is constrained to be allocated by
569          * _mesa_exec_alloc(), as the hardwired fastpaths in
570          * t_vertex_generic.c are handled specially.  It would be nice
571          * to unify them, but this probably won't change until this
572          * module gets another overhaul.
573          */
574         _mesa_exec_free((void *) fp->func);
575         free(fp);
576      }
577
578      vtx->fastpath = NULL;
579   }
580}
581