indirect_glx.c revision af69d88d
1/* 2 * Copyright © 2010 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Soft- 6 * ware"), to deal in the Software without restriction, including without 7 * limitation the rights to use, copy, modify, merge, publish, distribute, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, provided that the above copyright 10 * notice(s) and this permission notice appear in all copies of the Soft- 11 * ware and that both the above copyright notice(s) and this permission 12 * notice appear in supporting documentation. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- 16 * ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY 17 * RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN 18 * THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSE- 19 * QUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFOR- 22 * MANCE OF THIS SOFTWARE. 23 * 24 * Except as contained in this notice, the name of a copyright holder shall 25 * not be used in advertising or otherwise to promote the sale, use or 26 * other dealings in this Software without prior written authorization of 27 * the copyright holder. 28 * 29 * Authors: 30 * Kristian Høgsberg (krh@bitplanet.net) 31 */ 32 33#include "glapi.h" 34#include "glxclient.h" 35 36#ifndef GLX_USE_APPLEGL 37 38extern struct _glapi_table *__glXNewIndirectAPI(void); 39 40/* 41** All indirect rendering contexts will share the same indirect dispatch table. 42*/ 43static struct _glapi_table *IndirectAPI = NULL; 44 45static void 46indirect_destroy_context(struct glx_context *gc) 47{ 48 __glXFreeVertexArrayState(gc); 49 50 free((char *) gc->vendor); 51 free((char *) gc->renderer); 52 free((char *) gc->version); 53 free((char *) gc->extensions); 54 __glFreeAttributeState(gc); 55 free((char *) gc->buf); 56 free((char *) gc->client_state_private); 57 free((char *) gc); 58} 59 60static Bool 61SendMakeCurrentRequest(Display * dpy, CARD8 opcode, 62 GLXContextID gc_id, GLXContextTag gc_tag, 63 GLXDrawable draw, GLXDrawable read, 64 GLXContextTag *out_tag) 65{ 66 xGLXMakeCurrentReply reply; 67 Bool ret; 68 69 LockDisplay(dpy); 70 71 if (draw == read) { 72 xGLXMakeCurrentReq *req; 73 74 GetReq(GLXMakeCurrent, req); 75 req->reqType = opcode; 76 req->glxCode = X_GLXMakeCurrent; 77 req->drawable = draw; 78 req->context = gc_id; 79 req->oldContextTag = gc_tag; 80 } 81 else { 82 struct glx_display *priv = __glXInitialize(dpy); 83 84 /* If the server can support the GLX 1.3 version, we should 85 * perfer that. Not only that, some servers support GLX 1.3 but 86 * not the SGI extension. 87 */ 88 89 if ((priv->majorVersion > 1) || (priv->minorVersion >= 3)) { 90 xGLXMakeContextCurrentReq *req; 91 92 GetReq(GLXMakeContextCurrent, req); 93 req->reqType = opcode; 94 req->glxCode = X_GLXMakeContextCurrent; 95 req->drawable = draw; 96 req->readdrawable = read; 97 req->context = gc_id; 98 req->oldContextTag = gc_tag; 99 } 100 else { 101 xGLXVendorPrivateWithReplyReq *vpreq; 102 xGLXMakeCurrentReadSGIReq *req; 103 104 GetReqExtra(GLXVendorPrivateWithReply, 105 sz_xGLXMakeCurrentReadSGIReq - 106 sz_xGLXVendorPrivateWithReplyReq, vpreq); 107 req = (xGLXMakeCurrentReadSGIReq *) vpreq; 108 req->reqType = opcode; 109 req->glxCode = X_GLXVendorPrivateWithReply; 110 req->vendorCode = X_GLXvop_MakeCurrentReadSGI; 111 req->drawable = draw; 112 req->readable = read; 113 req->context = gc_id; 114 req->oldContextTag = gc_tag; 115 } 116 } 117 118 ret = _XReply(dpy, (xReply *) &reply, 0, False); 119 120 if (out_tag) 121 *out_tag = reply.contextTag; 122 123 UnlockDisplay(dpy); 124 SyncHandle(); 125 126 return ret; 127} 128 129static int 130indirect_bind_context(struct glx_context *gc, struct glx_context *old, 131 GLXDrawable draw, GLXDrawable read) 132{ 133 GLXContextTag tag; 134 __GLXattribute *state; 135 Display *dpy = gc->psc->dpy; 136 int opcode = __glXSetupForCommand(dpy); 137 Bool sent; 138 139 if (old != &dummyContext && !old->isDirect && old->psc->dpy == dpy) { 140 tag = old->currentContextTag; 141 old->currentContextTag = 0; 142 } else { 143 tag = 0; 144 } 145 146 sent = SendMakeCurrentRequest(dpy, opcode, gc->xid, tag, draw, read, 147 &gc->currentContextTag); 148 149 if (!IndirectAPI) 150 IndirectAPI = __glXNewIndirectAPI(); 151 _glapi_set_dispatch(IndirectAPI); 152 153 state = gc->client_state_private; 154 if (state->array_state == NULL) { 155 glGetString(GL_EXTENSIONS); 156 glGetString(GL_VERSION); 157 __glXInitVertexArrayState(gc); 158 } 159 160 return !sent; 161} 162 163static void 164indirect_unbind_context(struct glx_context *gc, struct glx_context *new) 165{ 166 Display *dpy = gc->psc->dpy; 167 int opcode = __glXSetupForCommand(dpy); 168 169 if (gc == new) 170 return; 171 172 /* We are either switching to no context, away from a indirect 173 * context to a direct context or from one dpy to another and have 174 * to send a request to the dpy to unbind the previous context. 175 */ 176 if (!new || new->isDirect || new->psc->dpy != dpy) { 177 SendMakeCurrentRequest(dpy, opcode, None, 178 gc->currentContextTag, None, None, NULL); 179 gc->currentContextTag = 0; 180 } 181} 182 183static void 184indirect_wait_gl(struct glx_context *gc) 185{ 186 xGLXWaitGLReq *req; 187 Display *dpy = gc->currentDpy; 188 189 /* Flush any pending commands out */ 190 __glXFlushRenderBuffer(gc, gc->pc); 191 192 /* Send the glXWaitGL request */ 193 LockDisplay(dpy); 194 GetReq(GLXWaitGL, req); 195 req->reqType = gc->majorOpcode; 196 req->glxCode = X_GLXWaitGL; 197 req->contextTag = gc->currentContextTag; 198 UnlockDisplay(dpy); 199 SyncHandle(); 200} 201 202static void 203indirect_wait_x(struct glx_context *gc) 204{ 205 xGLXWaitXReq *req; 206 Display *dpy = gc->currentDpy; 207 208 /* Flush any pending commands out */ 209 __glXFlushRenderBuffer(gc, gc->pc); 210 211 LockDisplay(dpy); 212 GetReq(GLXWaitX, req); 213 req->reqType = gc->majorOpcode; 214 req->glxCode = X_GLXWaitX; 215 req->contextTag = gc->currentContextTag; 216 UnlockDisplay(dpy); 217 SyncHandle(); 218} 219 220static void 221indirect_use_x_font(struct glx_context *gc, 222 Font font, int first, int count, int listBase) 223{ 224 xGLXUseXFontReq *req; 225 Display *dpy = gc->currentDpy; 226 227 /* Flush any pending commands out */ 228 __glXFlushRenderBuffer(gc, gc->pc); 229 230 /* Send the glXUseFont request */ 231 LockDisplay(dpy); 232 GetReq(GLXUseXFont, req); 233 req->reqType = gc->majorOpcode; 234 req->glxCode = X_GLXUseXFont; 235 req->contextTag = gc->currentContextTag; 236 req->font = font; 237 req->first = first; 238 req->count = count; 239 req->listBase = listBase; 240 UnlockDisplay(dpy); 241 SyncHandle(); 242} 243 244static void 245indirect_bind_tex_image(Display * dpy, 246 GLXDrawable drawable, 247 int buffer, const int *attrib_list) 248{ 249 xGLXVendorPrivateReq *req; 250 struct glx_context *gc = __glXGetCurrentContext(); 251 CARD32 *drawable_ptr; 252 INT32 *buffer_ptr; 253 CARD32 *num_attrib_ptr; 254 CARD32 *attrib_ptr; 255 CARD8 opcode; 256 unsigned int i; 257 258 i = 0; 259 if (attrib_list) { 260 while (attrib_list[i * 2] != None) 261 i++; 262 } 263 264 opcode = __glXSetupForCommand(dpy); 265 if (!opcode) 266 return; 267 268 LockDisplay(dpy); 269 GetReqExtra(GLXVendorPrivate, 12 + 8 * i, req); 270 req->reqType = opcode; 271 req->glxCode = X_GLXVendorPrivate; 272 req->vendorCode = X_GLXvop_BindTexImageEXT; 273 req->contextTag = gc->currentContextTag; 274 275 drawable_ptr = (CARD32 *) (req + 1); 276 buffer_ptr = (INT32 *) (drawable_ptr + 1); 277 num_attrib_ptr = (CARD32 *) (buffer_ptr + 1); 278 attrib_ptr = (CARD32 *) (num_attrib_ptr + 1); 279 280 *drawable_ptr = drawable; 281 *buffer_ptr = buffer; 282 *num_attrib_ptr = (CARD32) i; 283 284 i = 0; 285 if (attrib_list) { 286 while (attrib_list[i * 2] != None) { 287 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 0]; 288 *attrib_ptr++ = (CARD32) attrib_list[i * 2 + 1]; 289 i++; 290 } 291 } 292 293 UnlockDisplay(dpy); 294 SyncHandle(); 295} 296 297static void 298indirect_release_tex_image(Display * dpy, GLXDrawable drawable, int buffer) 299{ 300 xGLXVendorPrivateReq *req; 301 struct glx_context *gc = __glXGetCurrentContext(); 302 CARD32 *drawable_ptr; 303 INT32 *buffer_ptr; 304 CARD8 opcode; 305 306 opcode = __glXSetupForCommand(dpy); 307 if (!opcode) 308 return; 309 310 LockDisplay(dpy); 311 GetReqExtra(GLXVendorPrivate, sizeof(CARD32) + sizeof(INT32), req); 312 req->reqType = opcode; 313 req->glxCode = X_GLXVendorPrivate; 314 req->vendorCode = X_GLXvop_ReleaseTexImageEXT; 315 req->contextTag = gc->currentContextTag; 316 317 drawable_ptr = (CARD32 *) (req + 1); 318 buffer_ptr = (INT32 *) (drawable_ptr + 1); 319 320 *drawable_ptr = drawable; 321 *buffer_ptr = buffer; 322 323 UnlockDisplay(dpy); 324 SyncHandle(); 325} 326 327static const struct glx_context_vtable indirect_context_vtable = { 328 .destroy = indirect_destroy_context, 329 .bind = indirect_bind_context, 330 .unbind = indirect_unbind_context, 331 .wait_gl = indirect_wait_gl, 332 .wait_x = indirect_wait_x, 333 .use_x_font = indirect_use_x_font, 334 .bind_tex_image = indirect_bind_tex_image, 335 .release_tex_image = indirect_release_tex_image, 336 .get_proc_address = NULL, 337}; 338 339/** 340 * \todo Eliminate \c __glXInitVertexArrayState. Replace it with a new 341 * function called \c __glXAllocateClientState that allocates the memory and 342 * does all the initialization (including the pixel pack / unpack). 343 * 344 * \note 345 * This function is \b not the place to validate the context creation 346 * parameters. It is just the allocator for the \c glx_context. 347 */ 348_X_HIDDEN struct glx_context * 349indirect_create_context(struct glx_screen *psc, 350 struct glx_config *mode, 351 struct glx_context *shareList, int renderType) 352{ 353 struct glx_context *gc; 354 int bufSize; 355 CARD8 opcode; 356 __GLXattribute *state; 357 358 opcode = __glXSetupForCommand(psc->dpy); 359 if (!opcode) { 360 return NULL; 361 } 362 363 /* Allocate our context record */ 364 gc = calloc(1, sizeof *gc); 365 if (!gc) { 366 /* Out of memory */ 367 return NULL; 368 } 369 370 glx_context_init(gc, psc, mode); 371 gc->isDirect = GL_FALSE; 372 gc->vtable = &indirect_context_vtable; 373 state = calloc(1, sizeof(struct __GLXattributeRec)); 374 gc->renderType = renderType; 375 376 if (state == NULL) { 377 /* Out of memory */ 378 free(gc); 379 return NULL; 380 } 381 gc->client_state_private = state; 382 state->NoDrawArraysProtocol = (getenv("LIBGL_NO_DRAWARRAYS") != NULL); 383 384 /* 385 ** Create a temporary buffer to hold GLX rendering commands. The size 386 ** of the buffer is selected so that the maximum number of GLX rendering 387 ** commands can fit in a single X packet and still have room in the X 388 ** packet for the GLXRenderReq header. 389 */ 390 391 bufSize = (XMaxRequestSize(psc->dpy) * 4) - sz_xGLXRenderReq; 392 gc->buf = malloc(bufSize); 393 if (!gc->buf) { 394 free(gc->client_state_private); 395 free(gc); 396 return NULL; 397 } 398 gc->bufSize = bufSize; 399 400 /* Fill in the new context */ 401 gc->renderMode = GL_RENDER; 402 403 state->storePack.alignment = 4; 404 state->storeUnpack.alignment = 4; 405 406 gc->attributes.stackPointer = &gc->attributes.stack[0]; 407 408 /* 409 ** PERFORMANCE NOTE: A mode dependent fill image can speed things up. 410 */ 411 gc->fillImage = __glFillImage; 412 gc->pc = gc->buf; 413 gc->bufEnd = gc->buf + bufSize; 414 gc->isDirect = GL_FALSE; 415 if (__glXDebug) { 416 /* 417 ** Set limit register so that there will be one command per packet 418 */ 419 gc->limit = gc->buf; 420 } 421 else { 422 gc->limit = gc->buf + bufSize - __GLX_BUFFER_LIMIT_SIZE; 423 } 424 gc->majorOpcode = opcode; 425 426 /* 427 ** Constrain the maximum drawing command size allowed to be 428 ** transfered using the X_GLXRender protocol request. First 429 ** constrain by a software limit, then constrain by the protocl 430 ** limit. 431 */ 432 if (bufSize > __GLX_RENDER_CMD_SIZE_LIMIT) { 433 bufSize = __GLX_RENDER_CMD_SIZE_LIMIT; 434 } 435 if (bufSize > __GLX_MAX_RENDER_CMD_SIZE) { 436 bufSize = __GLX_MAX_RENDER_CMD_SIZE; 437 } 438 gc->maxSmallRenderCommandSize = bufSize; 439 440 441 return gc; 442} 443 444_X_HIDDEN struct glx_context * 445indirect_create_context_attribs(struct glx_screen *base, 446 struct glx_config *config_base, 447 struct glx_context *shareList, 448 unsigned num_attribs, 449 const uint32_t *attribs, 450 unsigned *error) 451{ 452 int renderType = GLX_RGBA_TYPE; 453 unsigned i; 454 455 /* The error parameter is only used on the server so that correct GLX 456 * protocol errors can be generated. On the client, it can be ignored. 457 */ 458 (void) error; 459 460 /* All of the attribute validation for indirect contexts is handled on the 461 * server, so there's not much to do here. Still, we need to parse the 462 * attributes to correctly set renderType. 463 */ 464 for (i = 0; i < num_attribs; i++) { 465 if (attribs[i * 2] == GLX_RENDER_TYPE) 466 renderType = attribs[i * 2 + 1]; 467 } 468 469 return indirect_create_context(base, config_base, shareList, renderType); 470} 471 472static const struct glx_screen_vtable indirect_screen_vtable = { 473 .create_context = indirect_create_context, 474 .create_context_attribs = indirect_create_context_attribs, 475 .query_renderer_integer = NULL, 476 .query_renderer_string = NULL, 477}; 478 479_X_HIDDEN struct glx_screen * 480indirect_create_screen(int screen, struct glx_display * priv) 481{ 482 struct glx_screen *psc; 483 484 psc = calloc(1, sizeof *psc); 485 if (psc == NULL) 486 return NULL; 487 488 glx_screen_init(psc, screen, priv); 489 psc->vtable = &indirect_screen_vtable; 490 491 return psc; 492} 493 494#endif 495