11ab64890Smrg/* Copyright (C) 2003-2006 Jamey Sharp, Josh Triplett 21ab64890Smrg * This file is licensed under the MIT license. See the file COPYING. */ 31ab64890Smrg 4b4ee4795Smrg#ifdef HAVE_CONFIG_H 5b4ee4795Smrg#include <config.h> 6b4ee4795Smrg#endif 7b4ee4795Smrg 81ab64890Smrg#include "Xlibint.h" 91ab64890Smrg#include "locking.h" 1061b2299dSmrg#include "Xprivate.h" 111ab64890Smrg#include "Xxcbint.h" 121ab64890Smrg#include <xcb/xcbext.h> 131ab64890Smrg 141ab64890Smrg#include <assert.h> 1588de56ccSmrg#ifdef HAVE_INTTYPES_H 1661b2299dSmrg#include <inttypes.h> 1788de56ccSmrg#endif 186cc2b21fSmrg#include <stdio.h> 1961b2299dSmrg#include <stdint.h> 201ab64890Smrg#include <stdlib.h> 211ab64890Smrg#include <string.h> 22eb411b4bSmrg#include <limits.h> 23b4ee4795Smrg#ifdef HAVE_SYS_SELECT_H 24b4ee4795Smrg#include <sys/select.h> 25b4ee4795Smrg#endif 261ab64890Smrg 2707fb9b8fSmrg#define xcb_fail_assert(_message, _var) do { \ 286cc2b21fSmrg unsigned int _var = 1; \ 296cc2b21fSmrg fprintf(stderr, "[xcb] Aborting, sorry about that.\n"); \ 306cc2b21fSmrg assert(!_var); \ 3107fb9b8fSmrg} while (0) 326cc2b21fSmrg 3307fb9b8fSmrg#define throw_thread_fail_assert(_message, _var) do { \ 346cc2b21fSmrg fprintf(stderr, "[xcb] " _message "\n"); \ 35e9628295Smrg if (_Xglobal_lock) { \ 36e9628295Smrg fprintf(stderr, "[xcb] You called XInitThreads, this is not your fault\n"); \ 37e9628295Smrg } else { \ 38e9628295Smrg fprintf(stderr, "[xcb] Most likely this is a multi-threaded client " \ 39e9628295Smrg "and XInitThreads has not been called\n"); \ 40e9628295Smrg } \ 416cc2b21fSmrg xcb_fail_assert(_message, _var); \ 4207fb9b8fSmrg} while (0) 436cc2b21fSmrg 446cc2b21fSmrg/* XXX: It would probably be most useful if we stored the last-processed 456cc2b21fSmrg * request, so we could find the offender from the message. */ 4607fb9b8fSmrg#define throw_extlib_fail_assert(_message, _var) do { \ 476cc2b21fSmrg fprintf(stderr, "[xcb] " _message "\n"); \ 486cc2b21fSmrg fprintf(stderr, "[xcb] This is most likely caused by a broken X " \ 496cc2b21fSmrg "extension library\n"); \ 506cc2b21fSmrg xcb_fail_assert(_message, _var); \ 5107fb9b8fSmrg} while (0) 526cc2b21fSmrg 5361b2299dSmrgstatic void return_socket(void *closure) 5461b2299dSmrg{ 5561b2299dSmrg Display *dpy = closure; 5688de56ccSmrg InternalLockDisplay(dpy, /* don't skip user locks */ 0); 5761b2299dSmrg _XSend(dpy, NULL, 0); 5861b2299dSmrg dpy->bufmax = dpy->buffer; 5961b2299dSmrg UnlockDisplay(dpy); 6061b2299dSmrg} 6161b2299dSmrg 62d4a3aaf4Smrgstatic Bool require_socket(Display *dpy) 6361b2299dSmrg{ 6461b2299dSmrg if(dpy->bufmax == dpy->buffer) 6561b2299dSmrg { 6661b2299dSmrg uint64_t sent; 6761b2299dSmrg int flags = 0; 6861b2299dSmrg /* if we don't own the event queue, we have to ask XCB 6961b2299dSmrg * to set our errors aside for us. */ 7061b2299dSmrg if(dpy->xcb->event_owner != XlibOwnsEventQueue) 7161b2299dSmrg flags = XCB_REQUEST_CHECKED; 7261b2299dSmrg if(!xcb_take_socket(dpy->xcb->connection, return_socket, dpy, 73d4a3aaf4Smrg flags, &sent)) { 7461b2299dSmrg _XIOError(dpy); 75d4a3aaf4Smrg return False; 76d4a3aaf4Smrg } 772d67cb4fSmrg dpy->xcb->last_flushed = sent; 782d67cb4fSmrg X_DPY_SET_REQUEST(dpy, sent); 7961b2299dSmrg dpy->bufmax = dpy->xcb->real_bufmax; 8061b2299dSmrg } 81d4a3aaf4Smrg return True; 8261b2299dSmrg} 8361b2299dSmrg 841ab64890Smrg/* Call internal connection callbacks for any fds that are currently 851ab64890Smrg * ready to read. This function will not block unless one of the 861ab64890Smrg * callbacks blocks. 871ab64890Smrg * 881ab64890Smrg * This code borrowed from _XWaitForReadable. Inverse call tree: 891ab64890Smrg * _XRead 901ab64890Smrg * _XWaitForWritable 911ab64890Smrg * _XFlush 921ab64890Smrg * _XSend 931ab64890Smrg * _XEventsQueued 941ab64890Smrg * _XReadEvents 951ab64890Smrg * _XRead[0-9]+ 961ab64890Smrg * _XAllocIDs 971ab64890Smrg * _XReply 981ab64890Smrg * _XEatData 991ab64890Smrg * _XReadPad 1001ab64890Smrg */ 101d4a3aaf4Smrgstatic Bool check_internal_connections(Display *dpy) 1021ab64890Smrg{ 10361b2299dSmrg struct _XConnectionInfo *ilist; 1041ab64890Smrg fd_set r_mask; 1051ab64890Smrg struct timeval tv; 1061ab64890Smrg int result; 1071ab64890Smrg int highest_fd = -1; 1081ab64890Smrg 1091ab64890Smrg if(dpy->flags & XlibDisplayProcConni || !dpy->im_fd_info) 110d4a3aaf4Smrg return True; 1111ab64890Smrg 1121ab64890Smrg FD_ZERO(&r_mask); 1131ab64890Smrg for(ilist = dpy->im_fd_info; ilist; ilist = ilist->next) 1141ab64890Smrg { 1151ab64890Smrg assert(ilist->fd >= 0); 1161ab64890Smrg FD_SET(ilist->fd, &r_mask); 1171ab64890Smrg if(ilist->fd > highest_fd) 1181ab64890Smrg highest_fd = ilist->fd; 1191ab64890Smrg } 1201ab64890Smrg assert(highest_fd >= 0); 1211ab64890Smrg 1221ab64890Smrg tv.tv_sec = 0; 1231ab64890Smrg tv.tv_usec = 0; 1241ab64890Smrg result = select(highest_fd + 1, &r_mask, NULL, NULL, &tv); 1251ab64890Smrg 1261ab64890Smrg if(result == -1) 1271ab64890Smrg { 128d4a3aaf4Smrg if(errno != EINTR) { 129d4a3aaf4Smrg _XIOError(dpy); 130d4a3aaf4Smrg return False; 131d4a3aaf4Smrg } 132d4a3aaf4Smrg 133d4a3aaf4Smrg return True; 1341ab64890Smrg } 1351ab64890Smrg 1361ab64890Smrg for(ilist = dpy->im_fd_info; result && ilist; ilist = ilist->next) 1371ab64890Smrg if(FD_ISSET(ilist->fd, &r_mask)) 1381ab64890Smrg { 1391ab64890Smrg _XProcessInternalConnection(dpy, ilist); 1401ab64890Smrg --result; 1411ab64890Smrg } 142d4a3aaf4Smrg 143d4a3aaf4Smrg return True; 1441ab64890Smrg} 1451ab64890Smrg 1462d67cb4fSmrgstatic PendingRequest *append_pending_request(Display *dpy, uint64_t sequence) 1471ab64890Smrg{ 14888de56ccSmrg PendingRequest *node = malloc(sizeof(PendingRequest)); 14988de56ccSmrg assert(node); 15088de56ccSmrg node->next = NULL; 15188de56ccSmrg node->sequence = sequence; 15288de56ccSmrg node->reply_waiter = 0; 15388de56ccSmrg if(dpy->xcb->pending_requests_tail) 1541ab64890Smrg { 1556cc2b21fSmrg if (XLIB_SEQUENCE_COMPARE(dpy->xcb->pending_requests_tail->sequence, 1566cc2b21fSmrg >=, node->sequence)) 1576cc2b21fSmrg throw_thread_fail_assert("Unknown sequence number " 1586cc2b21fSmrg "while appending request", 1596cc2b21fSmrg xcb_xlib_unknown_seq_number); 1606cc2b21fSmrg if (dpy->xcb->pending_requests_tail->next != NULL) 1616cc2b21fSmrg throw_thread_fail_assert("Unknown request in queue " 1626cc2b21fSmrg "while appending request", 1636cc2b21fSmrg xcb_xlib_unknown_req_pending); 16488de56ccSmrg dpy->xcb->pending_requests_tail->next = node; 1651ab64890Smrg } 16688de56ccSmrg else 16788de56ccSmrg dpy->xcb->pending_requests = node; 16888de56ccSmrg dpy->xcb->pending_requests_tail = node; 16988de56ccSmrg return node; 1701ab64890Smrg} 1711ab64890Smrg 17288de56ccSmrgstatic void dequeue_pending_request(Display *dpy, PendingRequest *req) 1731ab64890Smrg{ 1746cc2b21fSmrg if (req != dpy->xcb->pending_requests) 1756cc2b21fSmrg throw_thread_fail_assert("Unknown request in queue while " 1766cc2b21fSmrg "dequeuing", 1776cc2b21fSmrg xcb_xlib_unknown_req_in_deq); 1786cc2b21fSmrg 17988de56ccSmrg dpy->xcb->pending_requests = req->next; 18088de56ccSmrg if(!dpy->xcb->pending_requests) 1811ab64890Smrg { 1826cc2b21fSmrg if (req != dpy->xcb->pending_requests_tail) 1836cc2b21fSmrg throw_thread_fail_assert("Unknown request in queue " 1846cc2b21fSmrg "while dequeuing", 1856cc2b21fSmrg xcb_xlib_unknown_req_in_deq); 18688de56ccSmrg dpy->xcb->pending_requests_tail = NULL; 1871ab64890Smrg } 1886cc2b21fSmrg else if (XLIB_SEQUENCE_COMPARE(req->sequence, >=, 1896cc2b21fSmrg dpy->xcb->pending_requests->sequence)) 1906cc2b21fSmrg throw_thread_fail_assert("Unknown sequence number while " 1916cc2b21fSmrg "dequeuing request", 1926cc2b21fSmrg xcb_xlib_threads_sequence_lost); 1936cc2b21fSmrg 19488de56ccSmrg free(req); 19588de56ccSmrg} 19688de56ccSmrg 19788de56ccSmrgstatic int handle_error(Display *dpy, xError *err, Bool in_XReply) 19888de56ccSmrg{ 19988de56ccSmrg _XExtension *ext; 20088de56ccSmrg int ret_code; 20188de56ccSmrg /* Oddly, Xlib only allows extensions to suppress errors when 20288de56ccSmrg * those errors were seen by _XReply. */ 20388de56ccSmrg if(in_XReply) 20488de56ccSmrg /* 20588de56ccSmrg * we better see if there is an extension who may 20688de56ccSmrg * want to suppress the error. 20788de56ccSmrg */ 20888de56ccSmrg for(ext = dpy->ext_procs; ext; ext = ext->next) 20988de56ccSmrg if(ext->error && (*ext->error)(dpy, err, &ext->codes, &ret_code)) 21088de56ccSmrg return ret_code; 21188de56ccSmrg _XError(dpy, err); 21288de56ccSmrg return 0; 2131ab64890Smrg} 2141ab64890Smrg 2152d67cb4fSmrg/* Widen a 32-bit sequence number into a 64bit (uint64_t) sequence number. 2162d67cb4fSmrg * Treating the comparison as a 1 and shifting it avoids a conditional branch. 2172d67cb4fSmrg */ 2182d67cb4fSmrgstatic void widen(uint64_t *wide, unsigned int narrow) 21961b2299dSmrg{ 2202d67cb4fSmrg uint64_t new = (*wide & ~((uint64_t)0xFFFFFFFFUL)) | narrow; 221e9628295Smrg /* If just copying the upper dword of *wide makes the number 222e9628295Smrg * go down by more than 2^31, then it means that the lower 223e9628295Smrg * dword has wrapped (or we have skipped 2^31 requests, which 224e9628295Smrg * is hopefully improbable), so we add a carry. */ 225e9628295Smrg uint64_t wraps = new + (1UL << 31) < *wide; 226e9628295Smrg *wide = new + (wraps << 32); 22761b2299dSmrg} 22861b2299dSmrg 22988de56ccSmrg/* Thread-safety rules: 23088de56ccSmrg * 23188de56ccSmrg * At most one thread can be reading from XCB's event queue at a time. 23288de56ccSmrg * If you are not the current event-reading thread and you need to find 23388de56ccSmrg * out if an event is available, you must wait. 23488de56ccSmrg * 23588de56ccSmrg * The same rule applies for reading replies. 23688de56ccSmrg * 23788de56ccSmrg * A single thread cannot be both the the event-reading and the 23888de56ccSmrg * reply-reading thread at the same time. 23988de56ccSmrg * 24088de56ccSmrg * We always look at both the current event and the first pending reply 24188de56ccSmrg * to decide which to process next. 24288de56ccSmrg * 24388de56ccSmrg * We always process all responses in sequence-number order, which may 24488de56ccSmrg * mean waiting for another thread (either the event_waiter or the 24588de56ccSmrg * reply_waiter) to handle an earlier response before we can process or 24688de56ccSmrg * return a later one. If so, we wait on the corresponding condition 24788de56ccSmrg * variable for that thread to process the response and wake us up. 24888de56ccSmrg */ 2491ab64890Smrg 25038ae11fcSmrgstatic xcb_generic_reply_t *poll_for_event(Display *dpy, Bool queued_only) 25188de56ccSmrg{ 25288de56ccSmrg /* Make sure the Display's sequence numbers are valid */ 253d4a3aaf4Smrg if (!require_socket(dpy)) 254d4a3aaf4Smrg return NULL; 25561b2299dSmrg 25688de56ccSmrg /* Precondition: This thread can safely get events from XCB. */ 25788de56ccSmrg assert(dpy->xcb->event_owner == XlibOwnsEventQueue && !dpy->xcb->event_waiter); 25888de56ccSmrg 25938ae11fcSmrg if(!dpy->xcb->next_event) { 26038ae11fcSmrg if(queued_only) 26138ae11fcSmrg dpy->xcb->next_event = xcb_poll_for_queued_event(dpy->xcb->connection); 26238ae11fcSmrg else 26338ae11fcSmrg dpy->xcb->next_event = xcb_poll_for_event(dpy->xcb->connection); 26438ae11fcSmrg } 26588de56ccSmrg 26688de56ccSmrg if(dpy->xcb->next_event) 2671ab64890Smrg { 2681ab64890Smrg PendingRequest *req = dpy->xcb->pending_requests; 26988de56ccSmrg xcb_generic_event_t *event = dpy->xcb->next_event; 2702d67cb4fSmrg uint64_t event_sequence = X_DPY_GET_LAST_REQUEST_READ(dpy); 27188de56ccSmrg widen(&event_sequence, event->full_sequence); 27288de56ccSmrg if(!req || XLIB_SEQUENCE_COMPARE(event_sequence, <, req->sequence) 27388de56ccSmrg || (event->response_type != X_Error && event_sequence == req->sequence)) 2741ab64890Smrg { 2752d67cb4fSmrg uint64_t request = X_DPY_GET_REQUEST(dpy); 2762d67cb4fSmrg if (XLIB_SEQUENCE_COMPARE(event_sequence, >, request)) 2776cc2b21fSmrg { 2786cc2b21fSmrg throw_thread_fail_assert("Unknown sequence " 2796cc2b21fSmrg "number while " 2806cc2b21fSmrg "processing queue", 2816cc2b21fSmrg xcb_xlib_threads_sequence_lost); 2826cc2b21fSmrg } 2832d67cb4fSmrg X_DPY_SET_LAST_REQUEST_READ(dpy, event_sequence); 28488de56ccSmrg dpy->xcb->next_event = NULL; 28588de56ccSmrg return (xcb_generic_reply_t *) event; 2861ab64890Smrg } 28788de56ccSmrg } 28888de56ccSmrg return NULL; 28988de56ccSmrg} 29088de56ccSmrg 29188de56ccSmrgstatic xcb_generic_reply_t *poll_for_response(Display *dpy) 29288de56ccSmrg{ 29388de56ccSmrg void *response; 294d4a3aaf4Smrg xcb_generic_reply_t *event; 29588de56ccSmrg PendingRequest *req; 296d4a3aaf4Smrg 297d4a3aaf4Smrg while(1) 29888de56ccSmrg { 299d4a3aaf4Smrg xcb_generic_error_t *error = NULL; 30038ae11fcSmrg uint64_t request; 301d4a3aaf4Smrg Bool poll_queued_only = dpy->xcb->next_response != NULL; 302d4a3aaf4Smrg 303d4a3aaf4Smrg /* Step 1: is there an event in our queue before the next 304d4a3aaf4Smrg * reply/error? Return that first. 305d4a3aaf4Smrg * 306d4a3aaf4Smrg * If we don't have a reply/error saved from an earlier 307d4a3aaf4Smrg * invocation we check incoming events too, otherwise only 308d4a3aaf4Smrg * the ones already queued. 309d4a3aaf4Smrg */ 310d4a3aaf4Smrg response = poll_for_event(dpy, poll_queued_only); 311d4a3aaf4Smrg if(response) 312d4a3aaf4Smrg break; 31338ae11fcSmrg 314d4a3aaf4Smrg /* Step 2: 315d4a3aaf4Smrg * Response is NULL, i.e. we have no events. 316d4a3aaf4Smrg * If we are not waiting for a reply or some other thread 317d4a3aaf4Smrg * had dibs on the next reply, exit. 318d4a3aaf4Smrg */ 319d4a3aaf4Smrg req = dpy->xcb->pending_requests; 320d4a3aaf4Smrg if(!req || req->reply_waiter) 32138ae11fcSmrg break; 322d4a3aaf4Smrg 323d4a3aaf4Smrg /* Step 3: 324d4a3aaf4Smrg * We have some response (error or reply) related to req 325d4a3aaf4Smrg * saved from an earlier invocation of this function. Let's 326d4a3aaf4Smrg * use that one. 327d4a3aaf4Smrg */ 328d4a3aaf4Smrg if(dpy->xcb->next_response) 329d4a3aaf4Smrg { 330d4a3aaf4Smrg if (((xcb_generic_reply_t*)dpy->xcb->next_response)->response_type == X_Error) 331d4a3aaf4Smrg { 332d4a3aaf4Smrg error = dpy->xcb->next_response; 333d4a3aaf4Smrg response = NULL; 334d4a3aaf4Smrg } 335d4a3aaf4Smrg else 336d4a3aaf4Smrg { 337d4a3aaf4Smrg response = dpy->xcb->next_response; 338d4a3aaf4Smrg error = NULL; 339d4a3aaf4Smrg } 340d4a3aaf4Smrg dpy->xcb->next_response = NULL; 341d4a3aaf4Smrg } 342d4a3aaf4Smrg else 343d4a3aaf4Smrg { 344d4a3aaf4Smrg /* Step 4: pull down the next response from the wire. This 345d4a3aaf4Smrg * should be the 99% case. 346d4a3aaf4Smrg * xcb_poll_for_reply64() may also pull down events that 347d4a3aaf4Smrg * happened before the reply. 348d4a3aaf4Smrg */ 349d4a3aaf4Smrg if(!xcb_poll_for_reply64(dpy->xcb->connection, req->sequence, 350d4a3aaf4Smrg &response, &error)) { 351d4a3aaf4Smrg /* if there is no reply/error, xcb_poll_for_reply64 352d4a3aaf4Smrg * may have read events. Return that. */ 353d4a3aaf4Smrg response = poll_for_event(dpy, True); 354d4a3aaf4Smrg break; 355d4a3aaf4Smrg } 356d4a3aaf4Smrg 357d4a3aaf4Smrg /* Step 5: we have a new response, but we may also have some 358d4a3aaf4Smrg * events that happened before that response. Return those 359d4a3aaf4Smrg * first and save our reply/error for the next invocation. 360d4a3aaf4Smrg */ 361d4a3aaf4Smrg event = poll_for_event(dpy, True); 362d4a3aaf4Smrg if(event) 363d4a3aaf4Smrg { 364d4a3aaf4Smrg dpy->xcb->next_response = error ? error : response; 365d4a3aaf4Smrg response = event; 366d4a3aaf4Smrg break; 367d4a3aaf4Smrg } 36838ae11fcSmrg } 36938ae11fcSmrg 370d4a3aaf4Smrg /* Step 6: actually handle the reply/error now... */ 37138ae11fcSmrg request = X_DPY_GET_REQUEST(dpy); 3722d67cb4fSmrg if(XLIB_SEQUENCE_COMPARE(req->sequence, >, request)) 3736cc2b21fSmrg { 3746cc2b21fSmrg throw_thread_fail_assert("Unknown sequence number " 3756cc2b21fSmrg "while awaiting reply", 3766cc2b21fSmrg xcb_xlib_threads_sequence_lost); 3776cc2b21fSmrg } 3782d67cb4fSmrg X_DPY_SET_LAST_REQUEST_READ(dpy, req->sequence); 379e9fcaa8aSmrg if(response) 380e9fcaa8aSmrg break; 381e9fcaa8aSmrg dequeue_pending_request(dpy, req); 38288de56ccSmrg if(error) 38388de56ccSmrg return (xcb_generic_reply_t *) error; 38488de56ccSmrg } 38588de56ccSmrg return response; 38688de56ccSmrg} 38788de56ccSmrg 38888de56ccSmrgstatic void handle_response(Display *dpy, xcb_generic_reply_t *response, Bool in_XReply) 38988de56ccSmrg{ 39088de56ccSmrg _XAsyncHandler *async, *next; 39188de56ccSmrg switch(response->response_type) 39288de56ccSmrg { 39388de56ccSmrg case X_Reply: 39488de56ccSmrg for(async = dpy->async_handlers; async; async = next) 3951ab64890Smrg { 39688de56ccSmrg next = async->next; 39788de56ccSmrg if(async->handler(dpy, (xReply *) response, (char *) response, sizeof(xReply) + (response->length << 2), async->data)) 39888de56ccSmrg break; 3991ab64890Smrg } 40088de56ccSmrg break; 40188de56ccSmrg 40288de56ccSmrg case X_Error: 40388de56ccSmrg handle_error(dpy, (xError *) response, in_XReply); 40488de56ccSmrg break; 40588de56ccSmrg 40688de56ccSmrg default: /* event */ 40788de56ccSmrg /* GenericEvents may be > 32 bytes. In this case, the 40888de56ccSmrg * event struct is trailed by the additional bytes. the 40988de56ccSmrg * xcb_generic_event_t struct uses 4 bytes for internal 41088de56ccSmrg * numbering, so we need to shift the trailing data to 41188de56ccSmrg * be after the first 32 bytes. */ 41288de56ccSmrg if(response->response_type == GenericEvent && ((xcb_ge_event_t *) response)->length) 4131ab64890Smrg { 41488de56ccSmrg xcb_ge_event_t *event = (xcb_ge_event_t *) response; 41588de56ccSmrg memmove(&event->full_sequence, &event[1], event->length * 4); 4161ab64890Smrg } 41788de56ccSmrg _XEnq(dpy, (xEvent *) response); 41888de56ccSmrg break; 4191ab64890Smrg } 42088de56ccSmrg free(response); 4211ab64890Smrg} 4221ab64890Smrg 4231ab64890Smrgint _XEventsQueued(Display *dpy, int mode) 4241ab64890Smrg{ 42588de56ccSmrg xcb_generic_reply_t *response; 4261ab64890Smrg if(dpy->flags & XlibDisplayIOError) 4271ab64890Smrg return 0; 4281ab64890Smrg if(dpy->xcb->event_owner != XlibOwnsEventQueue) 4291ab64890Smrg return 0; 4301ab64890Smrg 4311ab64890Smrg if(mode == QueuedAfterFlush) 43261b2299dSmrg _XSend(dpy, NULL, 0); 433d4a3aaf4Smrg else if (!check_internal_connections(dpy)) 434d4a3aaf4Smrg return 0; 43588de56ccSmrg 43688de56ccSmrg /* If another thread is blocked waiting for events, then we must 43788de56ccSmrg * let that thread pick up the next event. Since it blocked, we 43888de56ccSmrg * can reasonably claim there are no new events right now. */ 43988de56ccSmrg if(!dpy->xcb->event_waiter) 44088de56ccSmrg { 44188de56ccSmrg while((response = poll_for_response(dpy))) 44288de56ccSmrg handle_response(dpy, response, False); 443d4a3aaf4Smrg if(xcb_connection_has_error(dpy->xcb->connection)) { 44488de56ccSmrg _XIOError(dpy); 445d4a3aaf4Smrg return 0; 446d4a3aaf4Smrg } 44788de56ccSmrg } 4481ab64890Smrg return dpy->qlen; 4491ab64890Smrg} 4501ab64890Smrg 4511ab64890Smrg/* _XReadEvents - Flush the output queue, 4521ab64890Smrg * then read as many events as possible (but at least 1) and enqueue them 4531ab64890Smrg */ 4541ab64890Smrgvoid _XReadEvents(Display *dpy) 4551ab64890Smrg{ 45688de56ccSmrg xcb_generic_reply_t *response; 45788de56ccSmrg unsigned long serial; 45888de56ccSmrg 4591ab64890Smrg if(dpy->flags & XlibDisplayIOError) 4601ab64890Smrg return; 46161b2299dSmrg _XSend(dpy, NULL, 0); 4621ab64890Smrg if(dpy->xcb->event_owner != XlibOwnsEventQueue) 4631ab64890Smrg return; 464d4a3aaf4Smrg if (!check_internal_connections(dpy)) 465d4a3aaf4Smrg return; 46688de56ccSmrg 46788de56ccSmrg serial = dpy->next_event_serial_num; 46888de56ccSmrg while(serial == dpy->next_event_serial_num || dpy->qlen == 0) 46988de56ccSmrg { 47088de56ccSmrg if(dpy->xcb->event_waiter) 47188de56ccSmrg { 47288de56ccSmrg ConditionWait(dpy, dpy->xcb->event_notify); 47388de56ccSmrg /* Maybe the other thread got us an event. */ 47488de56ccSmrg continue; 47588de56ccSmrg } 47688de56ccSmrg 47788de56ccSmrg if(!dpy->xcb->next_event) 47888de56ccSmrg { 47988de56ccSmrg xcb_generic_event_t *event; 48088de56ccSmrg dpy->xcb->event_waiter = 1; 48188de56ccSmrg UnlockDisplay(dpy); 48288de56ccSmrg event = xcb_wait_for_event(dpy->xcb->connection); 48357f47464Smrg /* It appears that classic Xlib respected user 48457f47464Smrg * locks when waking up after waiting for 48557f47464Smrg * events. However, if this thread did not have 48657f47464Smrg * any user locks, and another thread takes a 48757f47464Smrg * user lock and tries to read events, then we'd 48857f47464Smrg * deadlock. So we'll choose to let the thread 48957f47464Smrg * that got in first consume events, despite the 49057f47464Smrg * later thread's user locks. */ 49157f47464Smrg InternalLockDisplay(dpy, /* ignore user locks */ 1); 49288de56ccSmrg dpy->xcb->event_waiter = 0; 49388de56ccSmrg ConditionBroadcast(dpy, dpy->xcb->event_notify); 49488de56ccSmrg if(!event) 495d4a3aaf4Smrg { 49688de56ccSmrg _XIOError(dpy); 497d4a3aaf4Smrg return; 498d4a3aaf4Smrg } 49988de56ccSmrg dpy->xcb->next_event = event; 50088de56ccSmrg } 50188de56ccSmrg 50288de56ccSmrg /* We've established most of the conditions for 50388de56ccSmrg * poll_for_response to return non-NULL. The exceptions 50488de56ccSmrg * are connection shutdown, and finding that another 50588de56ccSmrg * thread is waiting for the next reply we'd like to 50688de56ccSmrg * process. */ 50788de56ccSmrg 50888de56ccSmrg response = poll_for_response(dpy); 50988de56ccSmrg if(response) 51088de56ccSmrg handle_response(dpy, response, False); 51188de56ccSmrg else if(dpy->xcb->pending_requests->reply_waiter) 51288de56ccSmrg { /* need braces around ConditionWait */ 51388de56ccSmrg ConditionWait(dpy, dpy->xcb->reply_notify); 51488de56ccSmrg } 51588de56ccSmrg else 516d4a3aaf4Smrg { 517d4a3aaf4Smrg _XIOError(dpy); 518d4a3aaf4Smrg return; 519d4a3aaf4Smrg } 52088de56ccSmrg } 52188de56ccSmrg 52288de56ccSmrg /* The preceding loop established that there is no 52388de56ccSmrg * event_waiter--unless we just called ConditionWait because of 52488de56ccSmrg * a reply_waiter, in which case another thread may have become 52588de56ccSmrg * the event_waiter while we slept unlocked. */ 52688de56ccSmrg if(!dpy->xcb->event_waiter) 52788de56ccSmrg while((response = poll_for_response(dpy))) 52888de56ccSmrg handle_response(dpy, response, False); 52988de56ccSmrg if(xcb_connection_has_error(dpy->xcb->connection)) 53088de56ccSmrg _XIOError(dpy); 5311ab64890Smrg} 5321ab64890Smrg 5331ab64890Smrg/* 5341ab64890Smrg * _XSend - Flush the buffer and send the client data. 32 bit word aligned 5351ab64890Smrg * transmission is used, if size is not 0 mod 4, extra bytes are transmitted. 5361ab64890Smrg * 5371ab64890Smrg * Note that the connection must not be read from once the data currently 5381ab64890Smrg * in the buffer has been written. 5391ab64890Smrg */ 5401ab64890Smrgvoid _XSend(Display *dpy, const char *data, long size) 5411ab64890Smrg{ 54261b2299dSmrg static const xReq dummy_request; 54361b2299dSmrg static char const pad[3]; 54461b2299dSmrg struct iovec vec[3]; 54561b2299dSmrg uint64_t requests; 5462d67cb4fSmrg uint64_t dpy_request; 54761b2299dSmrg _XExtension *ext; 5481ab64890Smrg xcb_connection_t *c = dpy->xcb->connection; 5491ab64890Smrg if(dpy->flags & XlibDisplayIOError) 5501ab64890Smrg return; 5511ab64890Smrg 55261b2299dSmrg if(dpy->bufptr == dpy->buffer && !size) 55361b2299dSmrg return; 5541ab64890Smrg 5552d67cb4fSmrg /* append_pending_request does not alter the dpy request number 5562d67cb4fSmrg * therefore we can get it outside of the loop and the if 5572d67cb4fSmrg */ 5582d67cb4fSmrg dpy_request = X_DPY_GET_REQUEST(dpy); 55961b2299dSmrg /* iff we asked XCB to set aside errors, we must pick those up 56061b2299dSmrg * eventually. iff there are async handlers, we may have just 56161b2299dSmrg * issued requests that will generate replies. in either case, 56261b2299dSmrg * we need to remember to check later. */ 56361b2299dSmrg if(dpy->xcb->event_owner != XlibOwnsEventQueue || dpy->async_handlers) 56461b2299dSmrg { 5652e9c7c8cSmrg uint64_t sequence; 5662d67cb4fSmrg for(sequence = dpy->xcb->last_flushed + 1; sequence <= dpy_request; ++sequence) 56788de56ccSmrg append_pending_request(dpy, sequence); 56861b2299dSmrg } 5692d67cb4fSmrg requests = dpy_request - dpy->xcb->last_flushed; 5702d67cb4fSmrg dpy->xcb->last_flushed = dpy_request; 5711ab64890Smrg 57261b2299dSmrg vec[0].iov_base = dpy->buffer; 57361b2299dSmrg vec[0].iov_len = dpy->bufptr - dpy->buffer; 574eb411b4bSmrg vec[1].iov_base = (char *)data; 57561b2299dSmrg vec[1].iov_len = size; 576eb411b4bSmrg vec[2].iov_base = (char *)pad; 57761b2299dSmrg vec[2].iov_len = -size & 3; 57861b2299dSmrg 57961b2299dSmrg for(ext = dpy->flushes; ext; ext = ext->next_flush) 58061b2299dSmrg { 58161b2299dSmrg int i; 58261b2299dSmrg for(i = 0; i < 3; ++i) 58361b2299dSmrg if(vec[i].iov_len) 58461b2299dSmrg ext->before_flush(dpy, &ext->codes, vec[i].iov_base, vec[i].iov_len); 58561b2299dSmrg } 5861ab64890Smrg 587d4a3aaf4Smrg if(xcb_writev(c, vec, 3, requests) < 0) { 58861b2299dSmrg _XIOError(dpy); 589d4a3aaf4Smrg return; 590d4a3aaf4Smrg } 59161b2299dSmrg dpy->bufptr = dpy->buffer; 59261b2299dSmrg dpy->last_req = (char *) &dummy_request; 5931ab64890Smrg 594d4a3aaf4Smrg if (!check_internal_connections(dpy)) 595d4a3aaf4Smrg return; 5961ab64890Smrg 59761b2299dSmrg _XSetSeqSyncFunction(dpy); 5981ab64890Smrg} 5991ab64890Smrg 6001ab64890Smrg/* 6011ab64890Smrg * _XFlush - Flush the X request buffer. If the buffer is empty, no 6021ab64890Smrg * action is taken. 6031ab64890Smrg */ 6041ab64890Smrgvoid _XFlush(Display *dpy) 6051ab64890Smrg{ 606d4a3aaf4Smrg if (!require_socket(dpy)) 607d4a3aaf4Smrg return; 608d4a3aaf4Smrg 60961b2299dSmrg _XSend(dpy, NULL, 0); 6101ab64890Smrg 6111ab64890Smrg _XEventsQueued(dpy, QueuedAfterReading); 6121ab64890Smrg} 6131ab64890Smrg 61461b2299dSmrgstatic const XID inval_id = ~0UL; 61561b2299dSmrg 61688de56ccSmrgvoid _XIDHandler(Display *dpy) 6171ab64890Smrg{ 61888de56ccSmrg if (dpy->xcb->next_xid == inval_id) 61988de56ccSmrg _XAllocIDs(dpy, &dpy->xcb->next_xid, 1); 6201ab64890Smrg} 6211ab64890Smrg 6221ab64890Smrg/* _XAllocID - resource ID allocation routine. */ 6231ab64890SmrgXID _XAllocID(Display *dpy) 6241ab64890Smrg{ 6251ab64890Smrg XID ret = dpy->xcb->next_xid; 62661b2299dSmrg assert (ret != inval_id); 62761b2299dSmrg dpy->xcb->next_xid = inval_id; 62861b2299dSmrg _XSetPrivSyncFunction(dpy); 6291ab64890Smrg return ret; 6301ab64890Smrg} 6311ab64890Smrg 6321ab64890Smrg/* _XAllocIDs - multiple resource ID allocation routine. */ 6331ab64890Smrgvoid _XAllocIDs(Display *dpy, XID *ids, int count) 6341ab64890Smrg{ 6351ab64890Smrg int i; 63661b2299dSmrg#ifdef XTHREADS 63761b2299dSmrg if (dpy->lock) 63861b2299dSmrg (*dpy->lock->user_lock_display)(dpy); 63961b2299dSmrg UnlockDisplay(dpy); 64061b2299dSmrg#endif 6411ab64890Smrg for (i = 0; i < count; i++) 6421ab64890Smrg ids[i] = xcb_generate_id(dpy->xcb->connection); 64361b2299dSmrg#ifdef XTHREADS 64488de56ccSmrg InternalLockDisplay(dpy, /* don't skip user locks */ 0); 64561b2299dSmrg if (dpy->lock) 64661b2299dSmrg (*dpy->lock->user_unlock_display)(dpy); 64761b2299dSmrg#endif 6481ab64890Smrg} 6491ab64890Smrg 6501ab64890Smrgstatic void _XFreeReplyData(Display *dpy, Bool force) 6511ab64890Smrg{ 6521ab64890Smrg if(!force && dpy->xcb->reply_consumed < dpy->xcb->reply_length) 6531ab64890Smrg return; 6541ab64890Smrg free(dpy->xcb->reply_data); 65561b2299dSmrg dpy->xcb->reply_data = NULL; 6561ab64890Smrg} 6571ab64890Smrg 6581ab64890Smrg/* 6591ab64890Smrg * _XReply - Wait for a reply packet and copy its contents into the 6601ab64890Smrg * specified rep. 6611ab64890Smrg * extra: number of 32-bit words expected after the reply 6621ab64890Smrg * discard: should I discard data following "extra" words? 6631ab64890Smrg */ 6641ab64890SmrgStatus _XReply(Display *dpy, xReply *rep, int extra, Bool discard) 6651ab64890Smrg{ 6661ab64890Smrg xcb_generic_error_t *error; 6671ab64890Smrg xcb_connection_t *c = dpy->xcb->connection; 6681ab64890Smrg char *reply; 6691ab64890Smrg PendingRequest *current; 6702d67cb4fSmrg uint64_t dpy_request; 6711ab64890Smrg 6726cc2b21fSmrg if (dpy->xcb->reply_data) 6736cc2b21fSmrg throw_extlib_fail_assert("Extra reply data still left in queue", 6746cc2b21fSmrg xcb_xlib_extra_reply_data_left); 6751ab64890Smrg 6761ab64890Smrg if(dpy->flags & XlibDisplayIOError) 6771ab64890Smrg return 0; 6781ab64890Smrg 67961b2299dSmrg _XSend(dpy, NULL, 0); 6802d67cb4fSmrg dpy_request = X_DPY_GET_REQUEST(dpy); 6812d67cb4fSmrg if(dpy->xcb->pending_requests_tail 6822d67cb4fSmrg && dpy->xcb->pending_requests_tail->sequence == dpy_request) 68388de56ccSmrg current = dpy->xcb->pending_requests_tail; 68488de56ccSmrg else 6852d67cb4fSmrg current = append_pending_request(dpy, dpy_request); 68688de56ccSmrg /* Don't let any other thread get this reply. */ 68788de56ccSmrg current->reply_waiter = 1; 68888de56ccSmrg 68988de56ccSmrg while(1) 69088de56ccSmrg { 69188de56ccSmrg PendingRequest *req = dpy->xcb->pending_requests; 69288de56ccSmrg xcb_generic_reply_t *response; 6931ab64890Smrg 69488de56ccSmrg if(req != current && req->reply_waiter) 69588de56ccSmrg { 69688de56ccSmrg ConditionWait(dpy, dpy->xcb->reply_notify); 69788de56ccSmrg /* Another thread got this reply. */ 69888de56ccSmrg continue; 69988de56ccSmrg } 70088de56ccSmrg req->reply_waiter = 1; 70188de56ccSmrg UnlockDisplay(dpy); 7022d67cb4fSmrg response = xcb_wait_for_reply64(c, req->sequence, &error); 70357f47464Smrg /* Any user locks on another thread must have been taken 7042d67cb4fSmrg * while we slept in xcb_wait_for_reply64. Classic Xlib 70557f47464Smrg * ignored those user locks in this case, so we do too. */ 70657f47464Smrg InternalLockDisplay(dpy, /* ignore user locks */ 1); 70788de56ccSmrg 70888de56ccSmrg /* We have the response we're looking for. Now, before 70988de56ccSmrg * letting anyone else process this sequence number, we 71088de56ccSmrg * need to process any events that should have come 71188de56ccSmrg * earlier. */ 71288de56ccSmrg 71388de56ccSmrg if(dpy->xcb->event_owner == XlibOwnsEventQueue) 71488de56ccSmrg { 71588de56ccSmrg xcb_generic_reply_t *event; 716e9628295Smrg 717e9628295Smrg /* Assume event queue is empty if another thread is blocking 718e9628295Smrg * waiting for event. */ 719e9628295Smrg if(!dpy->xcb->event_waiter) 720e9628295Smrg { 721e9628295Smrg while((event = poll_for_response(dpy))) 722e9628295Smrg handle_response(dpy, event, True); 723e9628295Smrg } 72488de56ccSmrg } 72588de56ccSmrg 72688de56ccSmrg req->reply_waiter = 0; 72788de56ccSmrg ConditionBroadcast(dpy, dpy->xcb->reply_notify); 7282d67cb4fSmrg dpy_request = X_DPY_GET_REQUEST(dpy); 7292d67cb4fSmrg if(XLIB_SEQUENCE_COMPARE(req->sequence, >, dpy_request)) { 7306cc2b21fSmrg throw_thread_fail_assert("Unknown sequence number " 7316cc2b21fSmrg "while processing reply", 7326cc2b21fSmrg xcb_xlib_threads_sequence_lost); 7336cc2b21fSmrg } 7342d67cb4fSmrg X_DPY_SET_LAST_REQUEST_READ(dpy, req->sequence); 73588de56ccSmrg if(!response) 73688de56ccSmrg dequeue_pending_request(dpy, req); 73788de56ccSmrg 73888de56ccSmrg if(req == current) 73988de56ccSmrg { 74088de56ccSmrg reply = (char *) response; 74188de56ccSmrg break; 74288de56ccSmrg } 74388de56ccSmrg 74488de56ccSmrg if(error) 74588de56ccSmrg handle_response(dpy, (xcb_generic_reply_t *) error, True); 74688de56ccSmrg else if(response) 74788de56ccSmrg handle_response(dpy, response, True); 74888de56ccSmrg } 749d4a3aaf4Smrg if (!check_internal_connections(dpy)) 750d4a3aaf4Smrg return 0; 75188de56ccSmrg 75288de56ccSmrg if(dpy->xcb->next_event && dpy->xcb->next_event->response_type == X_Error) 75388de56ccSmrg { 75488de56ccSmrg xcb_generic_event_t *event = dpy->xcb->next_event; 7552d67cb4fSmrg uint64_t last_request_read = X_DPY_GET_LAST_REQUEST_READ(dpy); 7562d67cb4fSmrg uint64_t event_sequence = last_request_read; 75788de56ccSmrg widen(&event_sequence, event->full_sequence); 7582d67cb4fSmrg if(event_sequence == last_request_read) 75988de56ccSmrg { 76088de56ccSmrg error = (xcb_generic_error_t *) event; 76188de56ccSmrg dpy->xcb->next_event = NULL; 76288de56ccSmrg } 76388de56ccSmrg } 7641ab64890Smrg 7651ab64890Smrg if(error) 7661ab64890Smrg { 7671ab64890Smrg int ret_code; 7681ab64890Smrg 7691ab64890Smrg /* Xlib is evil and assumes that even errors will be 7701ab64890Smrg * copied into rep. */ 7711ab64890Smrg memcpy(rep, error, 32); 7721ab64890Smrg 7731ab64890Smrg /* do not die on "no such font", "can't allocate", 7741ab64890Smrg "can't grab" failures */ 77588de56ccSmrg switch(error->error_code) 7761ab64890Smrg { 7771ab64890Smrg case BadName: 77888de56ccSmrg switch(error->major_code) 7791ab64890Smrg { 7801ab64890Smrg case X_LookupColor: 7811ab64890Smrg case X_AllocNamedColor: 78261b2299dSmrg free(error); 7831ab64890Smrg return 0; 7841ab64890Smrg } 7851ab64890Smrg break; 7861ab64890Smrg case BadFont: 78788de56ccSmrg if(error->major_code == X_QueryFont) { 78861b2299dSmrg free(error); 7891ab64890Smrg return 0; 79061b2299dSmrg } 7911ab64890Smrg break; 7921ab64890Smrg case BadAlloc: 7931ab64890Smrg case BadAccess: 79461b2299dSmrg free(error); 7951ab64890Smrg return 0; 7961ab64890Smrg } 7971ab64890Smrg 79888de56ccSmrg ret_code = handle_error(dpy, (xError *) error, True); 79961b2299dSmrg free(error); 80088de56ccSmrg return ret_code; 8011ab64890Smrg } 8021ab64890Smrg 8031ab64890Smrg /* it's not an error, but we don't have a reply, so it's an I/O 8041ab64890Smrg * error. */ 805d4a3aaf4Smrg if(!reply) { 8061ab64890Smrg _XIOError(dpy); 807d4a3aaf4Smrg return 0; 808d4a3aaf4Smrg } 8091ab64890Smrg 8101ab64890Smrg /* there's no error and we have a reply. */ 8111ab64890Smrg dpy->xcb->reply_data = reply; 8121ab64890Smrg dpy->xcb->reply_consumed = sizeof(xReply) + (extra * 4); 8131ab64890Smrg dpy->xcb->reply_length = sizeof(xReply); 8141ab64890Smrg if(dpy->xcb->reply_data[0] == 1) 8151ab64890Smrg dpy->xcb->reply_length += (((xcb_generic_reply_t *) dpy->xcb->reply_data)->length * 4); 8161ab64890Smrg 8171ab64890Smrg /* error: Xlib asks too much. give them what we can anyway. */ 8181ab64890Smrg if(dpy->xcb->reply_length < dpy->xcb->reply_consumed) 8191ab64890Smrg dpy->xcb->reply_consumed = dpy->xcb->reply_length; 8201ab64890Smrg 8211ab64890Smrg memcpy(rep, dpy->xcb->reply_data, dpy->xcb->reply_consumed); 8221ab64890Smrg _XFreeReplyData(dpy, discard); 8231ab64890Smrg return 1; 8241ab64890Smrg} 8251ab64890Smrg 8261ab64890Smrgint _XRead(Display *dpy, char *data, long size) 8271ab64890Smrg{ 8281ab64890Smrg assert(size >= 0); 8291ab64890Smrg if(size == 0) 8301ab64890Smrg return 0; 8316cc2b21fSmrg if(dpy->xcb->reply_data == NULL || 8326cc2b21fSmrg dpy->xcb->reply_consumed + size > dpy->xcb->reply_length) 8336cc2b21fSmrg throw_extlib_fail_assert("Too much data requested from _XRead", 8346cc2b21fSmrg xcb_xlib_too_much_data_requested); 8351ab64890Smrg memcpy(data, dpy->xcb->reply_data + dpy->xcb->reply_consumed, size); 8361ab64890Smrg dpy->xcb->reply_consumed += size; 8371ab64890Smrg _XFreeReplyData(dpy, False); 8381ab64890Smrg return 0; 8391ab64890Smrg} 8401ab64890Smrg 8411ab64890Smrg/* 8421ab64890Smrg * _XReadPad - Read bytes from the socket taking into account incomplete 8431ab64890Smrg * reads. If the number of bytes is not 0 mod 4, read additional pad 8441ab64890Smrg * bytes. 8451ab64890Smrg */ 8461ab64890Smrgvoid _XReadPad(Display *dpy, char *data, long size) 8471ab64890Smrg{ 8481ab64890Smrg _XRead(dpy, data, size); 8491ab64890Smrg dpy->xcb->reply_consumed += -size & 3; 8501ab64890Smrg _XFreeReplyData(dpy, False); 8511ab64890Smrg} 8521ab64890Smrg 8531ab64890Smrg/* Read and discard "n" 8-bit bytes of data */ 8541ab64890Smrgvoid _XEatData(Display *dpy, unsigned long n) 8551ab64890Smrg{ 8561ab64890Smrg dpy->xcb->reply_consumed += n; 8571ab64890Smrg _XFreeReplyData(dpy, False); 8581ab64890Smrg} 859eb411b4bSmrg 860eb411b4bSmrg/* 861eb411b4bSmrg * Read and discard "n" 32-bit words of data 862eb411b4bSmrg * Matches the units of the length field in X protocol replies, and provides 863eb411b4bSmrg * a single implementation of overflow checking to avoid having to replicate 864eb411b4bSmrg * those checks in every caller. 865eb411b4bSmrg */ 866eb411b4bSmrgvoid _XEatDataWords(Display *dpy, unsigned long n) 867eb411b4bSmrg{ 868eb411b4bSmrg if (n < ((INT_MAX - dpy->xcb->reply_consumed) >> 2)) 869eb411b4bSmrg dpy->xcb->reply_consumed += (n << 2); 870eb411b4bSmrg else 871eb411b4bSmrg /* Overflow would happen, so just eat the rest of the reply */ 872eb411b4bSmrg dpy->xcb->reply_consumed = dpy->xcb->reply_length; 873eb411b4bSmrg _XFreeReplyData(dpy, False); 874eb411b4bSmrg} 8750f8248bfSmrg 8760f8248bfSmrgunsigned long 8770f8248bfSmrg_XNextRequest(Display *dpy) 8780f8248bfSmrg{ 8790f8248bfSmrg /* This will update dpy->request. The assumption is that the next thing 8800f8248bfSmrg * that the application will do is make a request so there's little 8810f8248bfSmrg * overhead. 8820f8248bfSmrg */ 8830f8248bfSmrg require_socket(dpy); 8840f8248bfSmrg return NextRequest(dpy); 8850f8248bfSmrg} 886