1848b8605Smrg/**************************************************************************
2848b8605Smrg *
3848b8605Smrg * Copyright 2008 VMware, Inc.
4848b8605Smrg * All Rights Reserved.
5848b8605Smrg * Copyright 2008 VMware, Inc.  All rights reserved.
6848b8605Smrg * Copyright 2009 Marek Olšák <maraeo@gmail.com>
7848b8605Smrg *
8848b8605Smrg * Permission is hereby granted, free of charge, to any person obtaining a
9848b8605Smrg * copy of this software and associated documentation files (the
10848b8605Smrg * "Software"), to deal in the Software without restriction, including
11848b8605Smrg * without limitation the rights to use, copy, modify, merge, publish,
12848b8605Smrg * distribute, sub license, and/or sell copies of the Software, and to
13848b8605Smrg * permit persons to whom the Software is furnished to do so, subject to
14848b8605Smrg * the following conditions:
15848b8605Smrg *
16848b8605Smrg * The above copyright notice and this permission notice (including the
17848b8605Smrg * next paragraph) shall be included in all copies or substantial portions
18848b8605Smrg * of the Software.
19848b8605Smrg *
20848b8605Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21848b8605Smrg * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22848b8605Smrg * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23848b8605Smrg * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
24848b8605Smrg * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25848b8605Smrg * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26848b8605Smrg * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27848b8605Smrg *
28848b8605Smrg **************************************************************************/
29848b8605Smrg
30848b8605Smrg/**
31848b8605Smrg * @file
32848b8605Smrg * Texture mapping utility functions.
33848b8605Smrg *
34848b8605Smrg * @author Brian Paul
35848b8605Smrg *         Marek Olšák
36848b8605Smrg */
37848b8605Smrg
38848b8605Smrg#include "pipe/p_defines.h"
39848b8605Smrg
40848b8605Smrg#include "util/u_debug.h"
41848b8605Smrg#include "util/u_texture.h"
42848b8605Smrg
43848b8605Smrgvoid util_map_texcoords2d_onto_cubemap(unsigned face,
44848b8605Smrg                                       const float *in_st, unsigned in_stride,
45848b8605Smrg                                       float *out_str, unsigned out_stride,
46848b8605Smrg                                       boolean allow_scale)
47848b8605Smrg{
48848b8605Smrg   int i;
49848b8605Smrg   float rx, ry, rz;
50848b8605Smrg
51848b8605Smrg   /* loop over quad verts */
52848b8605Smrg   for (i = 0; i < 4; i++) {
53848b8605Smrg      /* Compute sc = +/-scale and tc = +/-scale.
54848b8605Smrg       * Not +/-1 to avoid cube face selection ambiguity near the edges,
55848b8605Smrg       * though that can still sometimes happen with this scale factor...
56848b8605Smrg       *
57848b8605Smrg       * XXX: Yep, there is no safe scale factor that will prevent sampling
58848b8605Smrg       * the neighbouring face when stretching out.  A more reliable solution
59848b8605Smrg       * would be to clamp (sc, tc) against +/- 1.0-1.0/mipsize, in the shader.
60848b8605Smrg       *
61848b8605Smrg       * Also, this is not necessary when minifying, or 1:1 blits.
62848b8605Smrg       */
63848b8605Smrg      const float scale = allow_scale ? 0.9999f : 1.0f;
64848b8605Smrg      const float sc = (2 * in_st[0] - 1) * scale;
65848b8605Smrg      const float tc = (2 * in_st[1] - 1) * scale;
66848b8605Smrg
67848b8605Smrg      switch (face) {
68848b8605Smrg         case PIPE_TEX_FACE_POS_X:
69848b8605Smrg            rx = 1;
70848b8605Smrg            ry = -tc;
71848b8605Smrg            rz = -sc;
72848b8605Smrg            break;
73848b8605Smrg         case PIPE_TEX_FACE_NEG_X:
74848b8605Smrg            rx = -1;
75848b8605Smrg            ry = -tc;
76848b8605Smrg            rz = sc;
77848b8605Smrg            break;
78848b8605Smrg         case PIPE_TEX_FACE_POS_Y:
79848b8605Smrg            rx = sc;
80848b8605Smrg            ry = 1;
81848b8605Smrg            rz = tc;
82848b8605Smrg            break;
83848b8605Smrg         case PIPE_TEX_FACE_NEG_Y:
84848b8605Smrg            rx = sc;
85848b8605Smrg            ry = -1;
86848b8605Smrg            rz = -tc;
87848b8605Smrg            break;
88848b8605Smrg         case PIPE_TEX_FACE_POS_Z:
89848b8605Smrg            rx = sc;
90848b8605Smrg            ry = -tc;
91848b8605Smrg            rz = 1;
92848b8605Smrg            break;
93848b8605Smrg         case PIPE_TEX_FACE_NEG_Z:
94848b8605Smrg            rx = -sc;
95848b8605Smrg            ry = -tc;
96848b8605Smrg            rz = -1;
97848b8605Smrg            break;
98848b8605Smrg         default:
99848b8605Smrg            rx = ry = rz = 0;
100848b8605Smrg            assert(0);
101848b8605Smrg      }
102848b8605Smrg
103848b8605Smrg      out_str[0] = rx; /*s*/
104848b8605Smrg      out_str[1] = ry; /*t*/
105848b8605Smrg      out_str[2] = rz; /*r*/
106848b8605Smrg
107848b8605Smrg      in_st += in_stride;
108848b8605Smrg      out_str += out_stride;
109848b8605Smrg   }
110848b8605Smrg}
111