xcb_io.c revision d4a3aaf4
1/* Copyright (C) 2003-2006 Jamey Sharp, Josh Triplett 2 * This file is licensed under the MIT license. See the file COPYING. */ 3 4#ifdef HAVE_CONFIG_H 5#include <config.h> 6#endif 7 8#include "Xlibint.h" 9#include "locking.h" 10#include "Xprivate.h" 11#include "Xxcbint.h" 12#include <xcb/xcbext.h> 13 14#include <assert.h> 15#ifdef HAVE_INTTYPES_H 16#include <inttypes.h> 17#endif 18#include <stdio.h> 19#include <stdint.h> 20#include <stdlib.h> 21#include <string.h> 22#include <limits.h> 23#ifdef HAVE_SYS_SELECT_H 24#include <sys/select.h> 25#endif 26 27#define xcb_fail_assert(_message, _var) { \ 28 unsigned int _var = 1; \ 29 fprintf(stderr, "[xcb] Aborting, sorry about that.\n"); \ 30 assert(!_var); \ 31} 32 33#define throw_thread_fail_assert(_message, _var) { \ 34 fprintf(stderr, "[xcb] " _message "\n"); \ 35 fprintf(stderr, "[xcb] Most likely this is a multi-threaded client " \ 36 "and XInitThreads has not been called\n"); \ 37 xcb_fail_assert(_message, _var); \ 38} 39 40/* XXX: It would probably be most useful if we stored the last-processed 41 * request, so we could find the offender from the message. */ 42#define throw_extlib_fail_assert(_message, _var) { \ 43 fprintf(stderr, "[xcb] " _message "\n"); \ 44 fprintf(stderr, "[xcb] This is most likely caused by a broken X " \ 45 "extension library\n"); \ 46 xcb_fail_assert(_message, _var); \ 47} 48 49static void return_socket(void *closure) 50{ 51 Display *dpy = closure; 52 InternalLockDisplay(dpy, /* don't skip user locks */ 0); 53 _XSend(dpy, NULL, 0); 54 dpy->bufmax = dpy->buffer; 55 UnlockDisplay(dpy); 56} 57 58static Bool require_socket(Display *dpy) 59{ 60 if(dpy->bufmax == dpy->buffer) 61 { 62 uint64_t sent; 63 int flags = 0; 64 /* if we don't own the event queue, we have to ask XCB 65 * to set our errors aside for us. */ 66 if(dpy->xcb->event_owner != XlibOwnsEventQueue) 67 flags = XCB_REQUEST_CHECKED; 68 if(!xcb_take_socket(dpy->xcb->connection, return_socket, dpy, 69 flags, &sent)) { 70 _XIOError(dpy); 71 return False; 72 } 73 dpy->xcb->last_flushed = sent; 74 X_DPY_SET_REQUEST(dpy, sent); 75 dpy->bufmax = dpy->xcb->real_bufmax; 76 } 77 return True; 78} 79 80/* Call internal connection callbacks for any fds that are currently 81 * ready to read. This function will not block unless one of the 82 * callbacks blocks. 83 * 84 * This code borrowed from _XWaitForReadable. Inverse call tree: 85 * _XRead 86 * _XWaitForWritable 87 * _XFlush 88 * _XSend 89 * _XEventsQueued 90 * _XReadEvents 91 * _XRead[0-9]+ 92 * _XAllocIDs 93 * _XReply 94 * _XEatData 95 * _XReadPad 96 */ 97static Bool check_internal_connections(Display *dpy) 98{ 99 struct _XConnectionInfo *ilist; 100 fd_set r_mask; 101 struct timeval tv; 102 int result; 103 int highest_fd = -1; 104 105 if(dpy->flags & XlibDisplayProcConni || !dpy->im_fd_info) 106 return True; 107 108 FD_ZERO(&r_mask); 109 for(ilist = dpy->im_fd_info; ilist; ilist = ilist->next) 110 { 111 assert(ilist->fd >= 0); 112 FD_SET(ilist->fd, &r_mask); 113 if(ilist->fd > highest_fd) 114 highest_fd = ilist->fd; 115 } 116 assert(highest_fd >= 0); 117 118 tv.tv_sec = 0; 119 tv.tv_usec = 0; 120 result = select(highest_fd + 1, &r_mask, NULL, NULL, &tv); 121 122 if(result == -1) 123 { 124 if(errno != EINTR) { 125 _XIOError(dpy); 126 return False; 127 } 128 129 return True; 130 } 131 132 for(ilist = dpy->im_fd_info; result && ilist; ilist = ilist->next) 133 if(FD_ISSET(ilist->fd, &r_mask)) 134 { 135 _XProcessInternalConnection(dpy, ilist); 136 --result; 137 } 138 139 return True; 140} 141 142static PendingRequest *append_pending_request(Display *dpy, uint64_t sequence) 143{ 144 PendingRequest *node = malloc(sizeof(PendingRequest)); 145 assert(node); 146 node->next = NULL; 147 node->sequence = sequence; 148 node->reply_waiter = 0; 149 if(dpy->xcb->pending_requests_tail) 150 { 151 if (XLIB_SEQUENCE_COMPARE(dpy->xcb->pending_requests_tail->sequence, 152 >=, node->sequence)) 153 throw_thread_fail_assert("Unknown sequence number " 154 "while appending request", 155 xcb_xlib_unknown_seq_number); 156 if (dpy->xcb->pending_requests_tail->next != NULL) 157 throw_thread_fail_assert("Unknown request in queue " 158 "while appending request", 159 xcb_xlib_unknown_req_pending); 160 dpy->xcb->pending_requests_tail->next = node; 161 } 162 else 163 dpy->xcb->pending_requests = node; 164 dpy->xcb->pending_requests_tail = node; 165 return node; 166} 167 168static void dequeue_pending_request(Display *dpy, PendingRequest *req) 169{ 170 if (req != dpy->xcb->pending_requests) 171 throw_thread_fail_assert("Unknown request in queue while " 172 "dequeuing", 173 xcb_xlib_unknown_req_in_deq); 174 175 dpy->xcb->pending_requests = req->next; 176 if(!dpy->xcb->pending_requests) 177 { 178 if (req != dpy->xcb->pending_requests_tail) 179 throw_thread_fail_assert("Unknown request in queue " 180 "while dequeuing", 181 xcb_xlib_unknown_req_in_deq); 182 dpy->xcb->pending_requests_tail = NULL; 183 } 184 else if (XLIB_SEQUENCE_COMPARE(req->sequence, >=, 185 dpy->xcb->pending_requests->sequence)) 186 throw_thread_fail_assert("Unknown sequence number while " 187 "dequeuing request", 188 xcb_xlib_threads_sequence_lost); 189 190 free(req); 191} 192 193static int handle_error(Display *dpy, xError *err, Bool in_XReply) 194{ 195 _XExtension *ext; 196 int ret_code; 197 /* Oddly, Xlib only allows extensions to suppress errors when 198 * those errors were seen by _XReply. */ 199 if(in_XReply) 200 /* 201 * we better see if there is an extension who may 202 * want to suppress the error. 203 */ 204 for(ext = dpy->ext_procs; ext; ext = ext->next) 205 if(ext->error && (*ext->error)(dpy, err, &ext->codes, &ret_code)) 206 return ret_code; 207 _XError(dpy, err); 208 return 0; 209} 210 211/* Widen a 32-bit sequence number into a 64bit (uint64_t) sequence number. 212 * Treating the comparison as a 1 and shifting it avoids a conditional branch. 213 */ 214static void widen(uint64_t *wide, unsigned int narrow) 215{ 216 uint64_t new = (*wide & ~((uint64_t)0xFFFFFFFFUL)) | narrow; 217 *wide = new + (((uint64_t)(new < *wide)) << 32); 218} 219 220/* Thread-safety rules: 221 * 222 * At most one thread can be reading from XCB's event queue at a time. 223 * If you are not the current event-reading thread and you need to find 224 * out if an event is available, you must wait. 225 * 226 * The same rule applies for reading replies. 227 * 228 * A single thread cannot be both the the event-reading and the 229 * reply-reading thread at the same time. 230 * 231 * We always look at both the current event and the first pending reply 232 * to decide which to process next. 233 * 234 * We always process all responses in sequence-number order, which may 235 * mean waiting for another thread (either the event_waiter or the 236 * reply_waiter) to handle an earlier response before we can process or 237 * return a later one. If so, we wait on the corresponding condition 238 * variable for that thread to process the response and wake us up. 239 */ 240 241static xcb_generic_reply_t *poll_for_event(Display *dpy, Bool queued_only) 242{ 243 /* Make sure the Display's sequence numbers are valid */ 244 if (!require_socket(dpy)) 245 return NULL; 246 247 /* Precondition: This thread can safely get events from XCB. */ 248 assert(dpy->xcb->event_owner == XlibOwnsEventQueue && !dpy->xcb->event_waiter); 249 250 if(!dpy->xcb->next_event) { 251 if(queued_only) 252 dpy->xcb->next_event = xcb_poll_for_queued_event(dpy->xcb->connection); 253 else 254 dpy->xcb->next_event = xcb_poll_for_event(dpy->xcb->connection); 255 } 256 257 if(dpy->xcb->next_event) 258 { 259 PendingRequest *req = dpy->xcb->pending_requests; 260 xcb_generic_event_t *event = dpy->xcb->next_event; 261 uint64_t event_sequence = X_DPY_GET_LAST_REQUEST_READ(dpy); 262 widen(&event_sequence, event->full_sequence); 263 if(!req || XLIB_SEQUENCE_COMPARE(event_sequence, <, req->sequence) 264 || (event->response_type != X_Error && event_sequence == req->sequence)) 265 { 266 uint64_t request = X_DPY_GET_REQUEST(dpy); 267 if (XLIB_SEQUENCE_COMPARE(event_sequence, >, request)) 268 { 269 throw_thread_fail_assert("Unknown sequence " 270 "number while " 271 "processing queue", 272 xcb_xlib_threads_sequence_lost); 273 } 274 X_DPY_SET_LAST_REQUEST_READ(dpy, event_sequence); 275 dpy->xcb->next_event = NULL; 276 return (xcb_generic_reply_t *) event; 277 } 278 } 279 return NULL; 280} 281 282static xcb_generic_reply_t *poll_for_response(Display *dpy) 283{ 284 void *response; 285 xcb_generic_reply_t *event; 286 PendingRequest *req; 287 288 while(1) 289 { 290 xcb_generic_error_t *error = NULL; 291 uint64_t request; 292 Bool poll_queued_only = dpy->xcb->next_response != NULL; 293 294 /* Step 1: is there an event in our queue before the next 295 * reply/error? Return that first. 296 * 297 * If we don't have a reply/error saved from an earlier 298 * invocation we check incoming events too, otherwise only 299 * the ones already queued. 300 */ 301 response = poll_for_event(dpy, poll_queued_only); 302 if(response) 303 break; 304 305 /* Step 2: 306 * Response is NULL, i.e. we have no events. 307 * If we are not waiting for a reply or some other thread 308 * had dibs on the next reply, exit. 309 */ 310 req = dpy->xcb->pending_requests; 311 if(!req || req->reply_waiter) 312 break; 313 314 /* Step 3: 315 * We have some response (error or reply) related to req 316 * saved from an earlier invocation of this function. Let's 317 * use that one. 318 */ 319 if(dpy->xcb->next_response) 320 { 321 if (((xcb_generic_reply_t*)dpy->xcb->next_response)->response_type == X_Error) 322 { 323 error = dpy->xcb->next_response; 324 response = NULL; 325 } 326 else 327 { 328 response = dpy->xcb->next_response; 329 error = NULL; 330 } 331 dpy->xcb->next_response = NULL; 332 } 333 else 334 { 335 /* Step 4: pull down the next response from the wire. This 336 * should be the 99% case. 337 * xcb_poll_for_reply64() may also pull down events that 338 * happened before the reply. 339 */ 340 if(!xcb_poll_for_reply64(dpy->xcb->connection, req->sequence, 341 &response, &error)) { 342 /* if there is no reply/error, xcb_poll_for_reply64 343 * may have read events. Return that. */ 344 response = poll_for_event(dpy, True); 345 break; 346 } 347 348 /* Step 5: we have a new response, but we may also have some 349 * events that happened before that response. Return those 350 * first and save our reply/error for the next invocation. 351 */ 352 event = poll_for_event(dpy, True); 353 if(event) 354 { 355 dpy->xcb->next_response = error ? error : response; 356 response = event; 357 break; 358 } 359 } 360 361 /* Step 6: actually handle the reply/error now... */ 362 request = X_DPY_GET_REQUEST(dpy); 363 if(XLIB_SEQUENCE_COMPARE(req->sequence, >, request)) 364 { 365 throw_thread_fail_assert("Unknown sequence number " 366 "while awaiting reply", 367 xcb_xlib_threads_sequence_lost); 368 } 369 X_DPY_SET_LAST_REQUEST_READ(dpy, req->sequence); 370 if(response) 371 break; 372 dequeue_pending_request(dpy, req); 373 if(error) 374 return (xcb_generic_reply_t *) error; 375 } 376 return response; 377} 378 379static void handle_response(Display *dpy, xcb_generic_reply_t *response, Bool in_XReply) 380{ 381 _XAsyncHandler *async, *next; 382 switch(response->response_type) 383 { 384 case X_Reply: 385 for(async = dpy->async_handlers; async; async = next) 386 { 387 next = async->next; 388 if(async->handler(dpy, (xReply *) response, (char *) response, sizeof(xReply) + (response->length << 2), async->data)) 389 break; 390 } 391 break; 392 393 case X_Error: 394 handle_error(dpy, (xError *) response, in_XReply); 395 break; 396 397 default: /* event */ 398 /* GenericEvents may be > 32 bytes. In this case, the 399 * event struct is trailed by the additional bytes. the 400 * xcb_generic_event_t struct uses 4 bytes for internal 401 * numbering, so we need to shift the trailing data to 402 * be after the first 32 bytes. */ 403 if(response->response_type == GenericEvent && ((xcb_ge_event_t *) response)->length) 404 { 405 xcb_ge_event_t *event = (xcb_ge_event_t *) response; 406 memmove(&event->full_sequence, &event[1], event->length * 4); 407 } 408 _XEnq(dpy, (xEvent *) response); 409 break; 410 } 411 free(response); 412} 413 414int _XEventsQueued(Display *dpy, int mode) 415{ 416 xcb_generic_reply_t *response; 417 if(dpy->flags & XlibDisplayIOError) 418 return 0; 419 if(dpy->xcb->event_owner != XlibOwnsEventQueue) 420 return 0; 421 422 if(mode == QueuedAfterFlush) 423 _XSend(dpy, NULL, 0); 424 else if (!check_internal_connections(dpy)) 425 return 0; 426 427 /* If another thread is blocked waiting for events, then we must 428 * let that thread pick up the next event. Since it blocked, we 429 * can reasonably claim there are no new events right now. */ 430 if(!dpy->xcb->event_waiter) 431 { 432 while((response = poll_for_response(dpy))) 433 handle_response(dpy, response, False); 434 if(xcb_connection_has_error(dpy->xcb->connection)) { 435 _XIOError(dpy); 436 return 0; 437 } 438 } 439 return dpy->qlen; 440} 441 442/* _XReadEvents - Flush the output queue, 443 * then read as many events as possible (but at least 1) and enqueue them 444 */ 445void _XReadEvents(Display *dpy) 446{ 447 xcb_generic_reply_t *response; 448 unsigned long serial; 449 450 if(dpy->flags & XlibDisplayIOError) 451 return; 452 _XSend(dpy, NULL, 0); 453 if(dpy->xcb->event_owner != XlibOwnsEventQueue) 454 return; 455 if (!check_internal_connections(dpy)) 456 return; 457 458 serial = dpy->next_event_serial_num; 459 while(serial == dpy->next_event_serial_num || dpy->qlen == 0) 460 { 461 if(dpy->xcb->event_waiter) 462 { 463 ConditionWait(dpy, dpy->xcb->event_notify); 464 /* Maybe the other thread got us an event. */ 465 continue; 466 } 467 468 if(!dpy->xcb->next_event) 469 { 470 xcb_generic_event_t *event; 471 dpy->xcb->event_waiter = 1; 472 UnlockDisplay(dpy); 473 event = xcb_wait_for_event(dpy->xcb->connection); 474 /* It appears that classic Xlib respected user 475 * locks when waking up after waiting for 476 * events. However, if this thread did not have 477 * any user locks, and another thread takes a 478 * user lock and tries to read events, then we'd 479 * deadlock. So we'll choose to let the thread 480 * that got in first consume events, despite the 481 * later thread's user locks. */ 482 InternalLockDisplay(dpy, /* ignore user locks */ 1); 483 dpy->xcb->event_waiter = 0; 484 ConditionBroadcast(dpy, dpy->xcb->event_notify); 485 if(!event) 486 { 487 _XIOError(dpy); 488 return; 489 } 490 dpy->xcb->next_event = event; 491 } 492 493 /* We've established most of the conditions for 494 * poll_for_response to return non-NULL. The exceptions 495 * are connection shutdown, and finding that another 496 * thread is waiting for the next reply we'd like to 497 * process. */ 498 499 response = poll_for_response(dpy); 500 if(response) 501 handle_response(dpy, response, False); 502 else if(dpy->xcb->pending_requests->reply_waiter) 503 { /* need braces around ConditionWait */ 504 ConditionWait(dpy, dpy->xcb->reply_notify); 505 } 506 else 507 { 508 _XIOError(dpy); 509 return; 510 } 511 } 512 513 /* The preceding loop established that there is no 514 * event_waiter--unless we just called ConditionWait because of 515 * a reply_waiter, in which case another thread may have become 516 * the event_waiter while we slept unlocked. */ 517 if(!dpy->xcb->event_waiter) 518 while((response = poll_for_response(dpy))) 519 handle_response(dpy, response, False); 520 if(xcb_connection_has_error(dpy->xcb->connection)) 521 _XIOError(dpy); 522} 523 524/* 525 * _XSend - Flush the buffer and send the client data. 32 bit word aligned 526 * transmission is used, if size is not 0 mod 4, extra bytes are transmitted. 527 * 528 * Note that the connection must not be read from once the data currently 529 * in the buffer has been written. 530 */ 531void _XSend(Display *dpy, const char *data, long size) 532{ 533 static const xReq dummy_request; 534 static char const pad[3]; 535 struct iovec vec[3]; 536 uint64_t requests; 537 uint64_t dpy_request; 538 _XExtension *ext; 539 xcb_connection_t *c = dpy->xcb->connection; 540 if(dpy->flags & XlibDisplayIOError) 541 return; 542 543 if(dpy->bufptr == dpy->buffer && !size) 544 return; 545 546 /* append_pending_request does not alter the dpy request number 547 * therefore we can get it outside of the loop and the if 548 */ 549 dpy_request = X_DPY_GET_REQUEST(dpy); 550 /* iff we asked XCB to set aside errors, we must pick those up 551 * eventually. iff there are async handlers, we may have just 552 * issued requests that will generate replies. in either case, 553 * we need to remember to check later. */ 554 if(dpy->xcb->event_owner != XlibOwnsEventQueue || dpy->async_handlers) 555 { 556 uint64_t sequence; 557 for(sequence = dpy->xcb->last_flushed + 1; sequence <= dpy_request; ++sequence) 558 append_pending_request(dpy, sequence); 559 } 560 requests = dpy_request - dpy->xcb->last_flushed; 561 dpy->xcb->last_flushed = dpy_request; 562 563 vec[0].iov_base = dpy->buffer; 564 vec[0].iov_len = dpy->bufptr - dpy->buffer; 565 vec[1].iov_base = (char *)data; 566 vec[1].iov_len = size; 567 vec[2].iov_base = (char *)pad; 568 vec[2].iov_len = -size & 3; 569 570 for(ext = dpy->flushes; ext; ext = ext->next_flush) 571 { 572 int i; 573 for(i = 0; i < 3; ++i) 574 if(vec[i].iov_len) 575 ext->before_flush(dpy, &ext->codes, vec[i].iov_base, vec[i].iov_len); 576 } 577 578 if(xcb_writev(c, vec, 3, requests) < 0) { 579 _XIOError(dpy); 580 return; 581 } 582 dpy->bufptr = dpy->buffer; 583 dpy->last_req = (char *) &dummy_request; 584 585 if (!check_internal_connections(dpy)) 586 return; 587 588 _XSetSeqSyncFunction(dpy); 589} 590 591/* 592 * _XFlush - Flush the X request buffer. If the buffer is empty, no 593 * action is taken. 594 */ 595void _XFlush(Display *dpy) 596{ 597 if (!require_socket(dpy)) 598 return; 599 600 _XSend(dpy, NULL, 0); 601 602 _XEventsQueued(dpy, QueuedAfterReading); 603} 604 605static const XID inval_id = ~0UL; 606 607void _XIDHandler(Display *dpy) 608{ 609 if (dpy->xcb->next_xid == inval_id) 610 _XAllocIDs(dpy, &dpy->xcb->next_xid, 1); 611} 612 613/* _XAllocID - resource ID allocation routine. */ 614XID _XAllocID(Display *dpy) 615{ 616 XID ret = dpy->xcb->next_xid; 617 assert (ret != inval_id); 618 dpy->xcb->next_xid = inval_id; 619 _XSetPrivSyncFunction(dpy); 620 return ret; 621} 622 623/* _XAllocIDs - multiple resource ID allocation routine. */ 624void _XAllocIDs(Display *dpy, XID *ids, int count) 625{ 626 int i; 627#ifdef XTHREADS 628 if (dpy->lock) 629 (*dpy->lock->user_lock_display)(dpy); 630 UnlockDisplay(dpy); 631#endif 632 for (i = 0; i < count; i++) 633 ids[i] = xcb_generate_id(dpy->xcb->connection); 634#ifdef XTHREADS 635 InternalLockDisplay(dpy, /* don't skip user locks */ 0); 636 if (dpy->lock) 637 (*dpy->lock->user_unlock_display)(dpy); 638#endif 639} 640 641static void _XFreeReplyData(Display *dpy, Bool force) 642{ 643 if(!force && dpy->xcb->reply_consumed < dpy->xcb->reply_length) 644 return; 645 free(dpy->xcb->reply_data); 646 dpy->xcb->reply_data = NULL; 647} 648 649/* 650 * _XReply - Wait for a reply packet and copy its contents into the 651 * specified rep. 652 * extra: number of 32-bit words expected after the reply 653 * discard: should I discard data following "extra" words? 654 */ 655Status _XReply(Display *dpy, xReply *rep, int extra, Bool discard) 656{ 657 xcb_generic_error_t *error; 658 xcb_connection_t *c = dpy->xcb->connection; 659 char *reply; 660 PendingRequest *current; 661 uint64_t dpy_request; 662 663 if (dpy->xcb->reply_data) 664 throw_extlib_fail_assert("Extra reply data still left in queue", 665 xcb_xlib_extra_reply_data_left); 666 667 if(dpy->flags & XlibDisplayIOError) 668 return 0; 669 670 _XSend(dpy, NULL, 0); 671 dpy_request = X_DPY_GET_REQUEST(dpy); 672 if(dpy->xcb->pending_requests_tail 673 && dpy->xcb->pending_requests_tail->sequence == dpy_request) 674 current = dpy->xcb->pending_requests_tail; 675 else 676 current = append_pending_request(dpy, dpy_request); 677 /* Don't let any other thread get this reply. */ 678 current->reply_waiter = 1; 679 680 while(1) 681 { 682 PendingRequest *req = dpy->xcb->pending_requests; 683 xcb_generic_reply_t *response; 684 685 if(req != current && req->reply_waiter) 686 { 687 ConditionWait(dpy, dpy->xcb->reply_notify); 688 /* Another thread got this reply. */ 689 continue; 690 } 691 req->reply_waiter = 1; 692 UnlockDisplay(dpy); 693 response = xcb_wait_for_reply64(c, req->sequence, &error); 694 /* Any user locks on another thread must have been taken 695 * while we slept in xcb_wait_for_reply64. Classic Xlib 696 * ignored those user locks in this case, so we do too. */ 697 InternalLockDisplay(dpy, /* ignore user locks */ 1); 698 699 /* We have the response we're looking for. Now, before 700 * letting anyone else process this sequence number, we 701 * need to process any events that should have come 702 * earlier. */ 703 704 if(dpy->xcb->event_owner == XlibOwnsEventQueue) 705 { 706 xcb_generic_reply_t *event; 707 /* If some thread is already waiting for events, 708 * it will get the first one. That thread must 709 * process that event before we can continue. */ 710 /* FIXME: That event might be after this reply, 711 * and might never even come--or there might be 712 * multiple threads trying to get events. */ 713 while(dpy->xcb->event_waiter) 714 { /* need braces around ConditionWait */ 715 ConditionWait(dpy, dpy->xcb->event_notify); 716 } 717 while((event = poll_for_event(dpy, True))) 718 handle_response(dpy, event, True); 719 } 720 721 req->reply_waiter = 0; 722 ConditionBroadcast(dpy, dpy->xcb->reply_notify); 723 dpy_request = X_DPY_GET_REQUEST(dpy); 724 if(XLIB_SEQUENCE_COMPARE(req->sequence, >, dpy_request)) { 725 throw_thread_fail_assert("Unknown sequence number " 726 "while processing reply", 727 xcb_xlib_threads_sequence_lost); 728 } 729 X_DPY_SET_LAST_REQUEST_READ(dpy, req->sequence); 730 if(!response) 731 dequeue_pending_request(dpy, req); 732 733 if(req == current) 734 { 735 reply = (char *) response; 736 break; 737 } 738 739 if(error) 740 handle_response(dpy, (xcb_generic_reply_t *) error, True); 741 else if(response) 742 handle_response(dpy, response, True); 743 } 744 if (!check_internal_connections(dpy)) 745 return 0; 746 747 if(dpy->xcb->next_event && dpy->xcb->next_event->response_type == X_Error) 748 { 749 xcb_generic_event_t *event = dpy->xcb->next_event; 750 uint64_t last_request_read = X_DPY_GET_LAST_REQUEST_READ(dpy); 751 uint64_t event_sequence = last_request_read; 752 widen(&event_sequence, event->full_sequence); 753 if(event_sequence == last_request_read) 754 { 755 error = (xcb_generic_error_t *) event; 756 dpy->xcb->next_event = NULL; 757 } 758 } 759 760 if(error) 761 { 762 int ret_code; 763 764 /* Xlib is evil and assumes that even errors will be 765 * copied into rep. */ 766 memcpy(rep, error, 32); 767 768 /* do not die on "no such font", "can't allocate", 769 "can't grab" failures */ 770 switch(error->error_code) 771 { 772 case BadName: 773 switch(error->major_code) 774 { 775 case X_LookupColor: 776 case X_AllocNamedColor: 777 free(error); 778 return 0; 779 } 780 break; 781 case BadFont: 782 if(error->major_code == X_QueryFont) { 783 free(error); 784 return 0; 785 } 786 break; 787 case BadAlloc: 788 case BadAccess: 789 free(error); 790 return 0; 791 } 792 793 ret_code = handle_error(dpy, (xError *) error, True); 794 free(error); 795 return ret_code; 796 } 797 798 /* it's not an error, but we don't have a reply, so it's an I/O 799 * error. */ 800 if(!reply) { 801 _XIOError(dpy); 802 return 0; 803 } 804 805 /* there's no error and we have a reply. */ 806 dpy->xcb->reply_data = reply; 807 dpy->xcb->reply_consumed = sizeof(xReply) + (extra * 4); 808 dpy->xcb->reply_length = sizeof(xReply); 809 if(dpy->xcb->reply_data[0] == 1) 810 dpy->xcb->reply_length += (((xcb_generic_reply_t *) dpy->xcb->reply_data)->length * 4); 811 812 /* error: Xlib asks too much. give them what we can anyway. */ 813 if(dpy->xcb->reply_length < dpy->xcb->reply_consumed) 814 dpy->xcb->reply_consumed = dpy->xcb->reply_length; 815 816 memcpy(rep, dpy->xcb->reply_data, dpy->xcb->reply_consumed); 817 _XFreeReplyData(dpy, discard); 818 return 1; 819} 820 821int _XRead(Display *dpy, char *data, long size) 822{ 823 assert(size >= 0); 824 if(size == 0) 825 return 0; 826 if(dpy->xcb->reply_data == NULL || 827 dpy->xcb->reply_consumed + size > dpy->xcb->reply_length) 828 throw_extlib_fail_assert("Too much data requested from _XRead", 829 xcb_xlib_too_much_data_requested); 830 memcpy(data, dpy->xcb->reply_data + dpy->xcb->reply_consumed, size); 831 dpy->xcb->reply_consumed += size; 832 _XFreeReplyData(dpy, False); 833 return 0; 834} 835 836/* 837 * _XReadPad - Read bytes from the socket taking into account incomplete 838 * reads. If the number of bytes is not 0 mod 4, read additional pad 839 * bytes. 840 */ 841void _XReadPad(Display *dpy, char *data, long size) 842{ 843 _XRead(dpy, data, size); 844 dpy->xcb->reply_consumed += -size & 3; 845 _XFreeReplyData(dpy, False); 846} 847 848/* Read and discard "n" 8-bit bytes of data */ 849void _XEatData(Display *dpy, unsigned long n) 850{ 851 dpy->xcb->reply_consumed += n; 852 _XFreeReplyData(dpy, False); 853} 854 855/* 856 * Read and discard "n" 32-bit words of data 857 * Matches the units of the length field in X protocol replies, and provides 858 * a single implementation of overflow checking to avoid having to replicate 859 * those checks in every caller. 860 */ 861void _XEatDataWords(Display *dpy, unsigned long n) 862{ 863 if (n < ((INT_MAX - dpy->xcb->reply_consumed) >> 2)) 864 dpy->xcb->reply_consumed += (n << 2); 865 else 866 /* Overflow would happen, so just eat the rest of the reply */ 867 dpy->xcb->reply_consumed = dpy->xcb->reply_length; 868 _XFreeReplyData(dpy, False); 869} 870 871unsigned long 872_XNextRequest(Display *dpy) 873{ 874 /* This will update dpy->request. The assumption is that the next thing 875 * that the application will do is make a request so there's little 876 * overhead. 877 */ 878 require_socket(dpy); 879 return NextRequest(dpy); 880} 881