1/* Copyright (C) 2003-2006 Jamey Sharp, Josh Triplett
2 * This file is licensed under the MIT license. See the file COPYING. */
3
4#ifdef HAVE_CONFIG_H
5#include <config.h>
6#endif
7
8#include "Xlibint.h"
9#include "locking.h"
10#include "Xprivate.h"
11#include "Xxcbint.h"
12#include <xcb/xcbext.h>
13
14#include <assert.h>
15#ifdef HAVE_INTTYPES_H
16#include <inttypes.h>
17#endif
18#include <stdio.h>
19#include <stdint.h>
20#include <stdlib.h>
21#include <string.h>
22#include <limits.h>
23#ifdef HAVE_SYS_SELECT_H
24#include <sys/select.h>
25#endif
26
27#define xcb_fail_assert(_message, _var) do { \
28	unsigned int _var = 1; \
29	fprintf(stderr, "[xcb] Aborting, sorry about that.\n"); \
30	assert(!_var); \
31} while (0)
32
33#define throw_thread_fail_assert(_message, _var) do { \
34	fprintf(stderr, "[xcb] " _message "\n"); \
35        if (_Xglobal_lock) { \
36            fprintf(stderr, "[xcb] You called XInitThreads, this is not your fault\n"); \
37        } else { \
38            fprintf(stderr, "[xcb] Most likely this is a multi-threaded client " \
39                            "and XInitThreads has not been called\n"); \
40        } \
41	xcb_fail_assert(_message, _var); \
42} while (0)
43
44/* XXX: It would probably be most useful if we stored the last-processed
45 *      request, so we could find the offender from the message. */
46#define throw_extlib_fail_assert(_message, _var) do { \
47	fprintf(stderr, "[xcb] " _message "\n"); \
48	fprintf(stderr, "[xcb] This is most likely caused by a broken X " \
49	                "extension library\n"); \
50	xcb_fail_assert(_message, _var); \
51} while (0)
52
53static void return_socket(void *closure)
54{
55	Display *dpy = closure;
56	InternalLockDisplay(dpy, /* don't skip user locks */ 0);
57	_XSend(dpy, NULL, 0);
58	dpy->bufmax = dpy->buffer;
59	UnlockDisplay(dpy);
60}
61
62static Bool require_socket(Display *dpy)
63{
64	if(dpy->bufmax == dpy->buffer)
65	{
66		uint64_t sent;
67		int flags = 0;
68		/* if we don't own the event queue, we have to ask XCB
69		 * to set our errors aside for us. */
70		if(dpy->xcb->event_owner != XlibOwnsEventQueue)
71			flags = XCB_REQUEST_CHECKED;
72		if(!xcb_take_socket(dpy->xcb->connection, return_socket, dpy,
73		                    flags, &sent)) {
74			_XIOError(dpy);
75			return False;
76		}
77		dpy->xcb->last_flushed = sent;
78		X_DPY_SET_REQUEST(dpy, sent);
79		dpy->bufmax = dpy->xcb->real_bufmax;
80	}
81	return True;
82}
83
84/* Call internal connection callbacks for any fds that are currently
85 * ready to read. This function will not block unless one of the
86 * callbacks blocks.
87 *
88 * This code borrowed from _XWaitForReadable. Inverse call tree:
89 * _XRead
90 *  _XWaitForWritable
91 *   _XFlush
92 *   _XSend
93 *  _XEventsQueued
94 *  _XReadEvents
95 *  _XRead[0-9]+
96 *   _XAllocIDs
97 *  _XReply
98 *  _XEatData
99 * _XReadPad
100 */
101static Bool check_internal_connections(Display *dpy)
102{
103	struct _XConnectionInfo *ilist;
104	fd_set r_mask;
105	struct timeval tv;
106	int result;
107	int highest_fd = -1;
108
109	if(dpy->flags & XlibDisplayProcConni || !dpy->im_fd_info)
110		return True;
111
112	FD_ZERO(&r_mask);
113	for(ilist = dpy->im_fd_info; ilist; ilist = ilist->next)
114	{
115		assert(ilist->fd >= 0);
116		FD_SET(ilist->fd, &r_mask);
117		if(ilist->fd > highest_fd)
118			highest_fd = ilist->fd;
119	}
120	assert(highest_fd >= 0);
121
122	tv.tv_sec = 0;
123	tv.tv_usec = 0;
124	result = select(highest_fd + 1, &r_mask, NULL, NULL, &tv);
125
126	if(result == -1)
127	{
128		if(errno != EINTR) {
129			_XIOError(dpy);
130			return False;
131		}
132
133		return True;
134	}
135
136	for(ilist = dpy->im_fd_info; result && ilist; ilist = ilist->next)
137		if(FD_ISSET(ilist->fd, &r_mask))
138		{
139			_XProcessInternalConnection(dpy, ilist);
140			--result;
141		}
142
143	return True;
144}
145
146static PendingRequest *append_pending_request(Display *dpy, uint64_t sequence)
147{
148	PendingRequest *node = malloc(sizeof(PendingRequest));
149	assert(node);
150	node->next = NULL;
151	node->sequence = sequence;
152	node->reply_waiter = 0;
153	if(dpy->xcb->pending_requests_tail)
154	{
155		if (XLIB_SEQUENCE_COMPARE(dpy->xcb->pending_requests_tail->sequence,
156		                          >=, node->sequence))
157			throw_thread_fail_assert("Unknown sequence number "
158			                         "while appending request",
159			                         xcb_xlib_unknown_seq_number);
160		if (dpy->xcb->pending_requests_tail->next != NULL)
161			throw_thread_fail_assert("Unknown request in queue "
162			                         "while appending request",
163			                         xcb_xlib_unknown_req_pending);
164		dpy->xcb->pending_requests_tail->next = node;
165	}
166	else
167		dpy->xcb->pending_requests = node;
168	dpy->xcb->pending_requests_tail = node;
169	return node;
170}
171
172static void dequeue_pending_request(Display *dpy, PendingRequest *req)
173{
174	if (req != dpy->xcb->pending_requests)
175		throw_thread_fail_assert("Unknown request in queue while "
176		                         "dequeuing",
177		                         xcb_xlib_unknown_req_in_deq);
178
179	dpy->xcb->pending_requests = req->next;
180	if(!dpy->xcb->pending_requests)
181	{
182		if (req != dpy->xcb->pending_requests_tail)
183			throw_thread_fail_assert("Unknown request in queue "
184			                         "while dequeuing",
185			                         xcb_xlib_unknown_req_in_deq);
186		dpy->xcb->pending_requests_tail = NULL;
187	}
188	else if (XLIB_SEQUENCE_COMPARE(req->sequence, >=,
189	                               dpy->xcb->pending_requests->sequence))
190		throw_thread_fail_assert("Unknown sequence number while "
191		                         "dequeuing request",
192		                         xcb_xlib_threads_sequence_lost);
193
194	free(req);
195}
196
197static int handle_error(Display *dpy, xError *err, Bool in_XReply)
198{
199	_XExtension *ext;
200	int ret_code;
201	/* Oddly, Xlib only allows extensions to suppress errors when
202	 * those errors were seen by _XReply. */
203	if(in_XReply)
204		/*
205		 * we better see if there is an extension who may
206		 * want to suppress the error.
207		 */
208		for(ext = dpy->ext_procs; ext; ext = ext->next)
209			if(ext->error && (*ext->error)(dpy, err, &ext->codes, &ret_code))
210				return ret_code;
211	_XError(dpy, err);
212	return 0;
213}
214
215/* Widen a 32-bit sequence number into a 64bit (uint64_t) sequence number.
216 * Treating the comparison as a 1 and shifting it avoids a conditional branch.
217 */
218static void widen(uint64_t *wide, unsigned int narrow)
219{
220	uint64_t new = (*wide & ~((uint64_t)0xFFFFFFFFUL)) | narrow;
221	/* If just copying the upper dword of *wide makes the number
222	 * go down by more than 2^31, then it means that the lower
223	 * dword has wrapped (or we have skipped 2^31 requests, which
224	 * is hopefully improbable), so we add a carry. */
225	uint64_t wraps = new + (1UL << 31) < *wide;
226	*wide = new + (wraps << 32);
227}
228
229/* Thread-safety rules:
230 *
231 * At most one thread can be reading from XCB's event queue at a time.
232 * If you are not the current event-reading thread and you need to find
233 * out if an event is available, you must wait.
234 *
235 * The same rule applies for reading replies.
236 *
237 * A single thread cannot be both the the event-reading and the
238 * reply-reading thread at the same time.
239 *
240 * We always look at both the current event and the first pending reply
241 * to decide which to process next.
242 *
243 * We always process all responses in sequence-number order, which may
244 * mean waiting for another thread (either the event_waiter or the
245 * reply_waiter) to handle an earlier response before we can process or
246 * return a later one. If so, we wait on the corresponding condition
247 * variable for that thread to process the response and wake us up.
248 */
249
250static xcb_generic_reply_t *poll_for_event(Display *dpy, Bool queued_only)
251{
252	/* Make sure the Display's sequence numbers are valid */
253	if (!require_socket(dpy))
254		return NULL;
255
256	/* Precondition: This thread can safely get events from XCB. */
257	assert(dpy->xcb->event_owner == XlibOwnsEventQueue && !dpy->xcb->event_waiter);
258
259	if(!dpy->xcb->next_event) {
260		if(queued_only)
261			dpy->xcb->next_event = xcb_poll_for_queued_event(dpy->xcb->connection);
262		else
263			dpy->xcb->next_event = xcb_poll_for_event(dpy->xcb->connection);
264	}
265
266	if(dpy->xcb->next_event)
267	{
268		PendingRequest *req = dpy->xcb->pending_requests;
269		xcb_generic_event_t *event = dpy->xcb->next_event;
270		uint64_t event_sequence = X_DPY_GET_LAST_REQUEST_READ(dpy);
271		widen(&event_sequence, event->full_sequence);
272		if(!req || XLIB_SEQUENCE_COMPARE(event_sequence, <, req->sequence)
273		        || (event->response_type != X_Error && event_sequence == req->sequence))
274		{
275			uint64_t request = X_DPY_GET_REQUEST(dpy);
276			if (XLIB_SEQUENCE_COMPARE(event_sequence, >, request))
277			{
278				throw_thread_fail_assert("Unknown sequence "
279				                         "number while "
280							 "processing queue",
281				                xcb_xlib_threads_sequence_lost);
282			}
283			X_DPY_SET_LAST_REQUEST_READ(dpy, event_sequence);
284			dpy->xcb->next_event = NULL;
285			return (xcb_generic_reply_t *) event;
286		}
287	}
288	return NULL;
289}
290
291static xcb_generic_reply_t *poll_for_response(Display *dpy)
292{
293	void *response;
294	xcb_generic_reply_t *event;
295	PendingRequest *req;
296
297	while(1)
298	{
299		xcb_generic_error_t *error = NULL;
300		uint64_t request;
301		Bool poll_queued_only = dpy->xcb->next_response != NULL;
302
303		/* Step 1: is there an event in our queue before the next
304		 * reply/error? Return that first.
305		 *
306		 * If we don't have a reply/error saved from an earlier
307		 * invocation we check incoming events too, otherwise only
308		 * the ones already queued.
309		 */
310		response = poll_for_event(dpy, poll_queued_only);
311		if(response)
312			break;
313
314		/* Step 2:
315		 * Response is NULL, i.e. we have no events.
316		 * If we are not waiting for a reply or some other thread
317		 * had dibs on the next reply, exit.
318		 */
319		req = dpy->xcb->pending_requests;
320		if(!req || req->reply_waiter)
321			break;
322
323		/* Step 3:
324		 * We have some response (error or reply) related to req
325		 * saved from an earlier invocation of this function. Let's
326		 * use that one.
327		 */
328		if(dpy->xcb->next_response)
329		{
330			if (((xcb_generic_reply_t*)dpy->xcb->next_response)->response_type == X_Error)
331			{
332				error = dpy->xcb->next_response;
333				response = NULL;
334			}
335			else
336			{
337				response = dpy->xcb->next_response;
338				error = NULL;
339			}
340			dpy->xcb->next_response = NULL;
341		}
342		else
343		{
344			/* Step 4: pull down the next response from the wire. This
345			 * should be the 99% case.
346			 * xcb_poll_for_reply64() may also pull down events that
347			 * happened before the reply.
348			 */
349			if(!xcb_poll_for_reply64(dpy->xcb->connection, req->sequence,
350						 &response, &error)) {
351				/* if there is no reply/error, xcb_poll_for_reply64
352				 * may have read events. Return that. */
353				response = poll_for_event(dpy, True);
354				break;
355			}
356
357			/* Step 5: we have a new response, but we may also have some
358			 * events that happened before that response. Return those
359			 * first and save our reply/error for the next invocation.
360			 */
361			event = poll_for_event(dpy, True);
362			if(event)
363			{
364				dpy->xcb->next_response = error ? error : response;
365				response = event;
366				break;
367			}
368		}
369
370		/* Step 6: actually handle the reply/error now... */
371		request = X_DPY_GET_REQUEST(dpy);
372		if(XLIB_SEQUENCE_COMPARE(req->sequence, >, request))
373		{
374			throw_thread_fail_assert("Unknown sequence number "
375			                         "while awaiting reply",
376			                        xcb_xlib_threads_sequence_lost);
377		}
378		X_DPY_SET_LAST_REQUEST_READ(dpy, req->sequence);
379		if(response)
380			break;
381		dequeue_pending_request(dpy, req);
382		if(error)
383			return (xcb_generic_reply_t *) error;
384	}
385	return response;
386}
387
388static void handle_response(Display *dpy, xcb_generic_reply_t *response, Bool in_XReply)
389{
390	_XAsyncHandler *async, *next;
391	switch(response->response_type)
392	{
393	case X_Reply:
394		for(async = dpy->async_handlers; async; async = next)
395		{
396			next = async->next;
397			if(async->handler(dpy, (xReply *) response, (char *) response, sizeof(xReply) + (response->length << 2), async->data))
398				break;
399		}
400		break;
401
402	case X_Error:
403		handle_error(dpy, (xError *) response, in_XReply);
404		break;
405
406	default: /* event */
407		/* GenericEvents may be > 32 bytes. In this case, the
408		 * event struct is trailed by the additional bytes. the
409		 * xcb_generic_event_t struct uses 4 bytes for internal
410		 * numbering, so we need to shift the trailing data to
411		 * be after the first 32 bytes. */
412		if(response->response_type == GenericEvent && ((xcb_ge_event_t *) response)->length)
413		{
414			xcb_ge_event_t *event = (xcb_ge_event_t *) response;
415			memmove(&event->full_sequence, &event[1], event->length * 4);
416		}
417		_XEnq(dpy, (xEvent *) response);
418		break;
419	}
420	free(response);
421}
422
423int _XEventsQueued(Display *dpy, int mode)
424{
425	xcb_generic_reply_t *response;
426	if(dpy->flags & XlibDisplayIOError)
427		return 0;
428	if(dpy->xcb->event_owner != XlibOwnsEventQueue)
429		return 0;
430
431	if(mode == QueuedAfterFlush)
432		_XSend(dpy, NULL, 0);
433	else if (!check_internal_connections(dpy))
434		return 0;
435
436	/* If another thread is blocked waiting for events, then we must
437	 * let that thread pick up the next event. Since it blocked, we
438	 * can reasonably claim there are no new events right now. */
439	if(!dpy->xcb->event_waiter)
440	{
441		while((response = poll_for_response(dpy)))
442			handle_response(dpy, response, False);
443		if(xcb_connection_has_error(dpy->xcb->connection)) {
444			_XIOError(dpy);
445			return 0;
446		}
447	}
448	return dpy->qlen;
449}
450
451/* _XReadEvents - Flush the output queue,
452 * then read as many events as possible (but at least 1) and enqueue them
453 */
454void _XReadEvents(Display *dpy)
455{
456	xcb_generic_reply_t *response;
457	unsigned long serial;
458
459	if(dpy->flags & XlibDisplayIOError)
460		return;
461	_XSend(dpy, NULL, 0);
462	if(dpy->xcb->event_owner != XlibOwnsEventQueue)
463		return;
464	if (!check_internal_connections(dpy))
465		return;
466
467	serial = dpy->next_event_serial_num;
468	while(serial == dpy->next_event_serial_num || dpy->qlen == 0)
469	{
470		if(dpy->xcb->event_waiter)
471		{
472			ConditionWait(dpy, dpy->xcb->event_notify);
473			/* Maybe the other thread got us an event. */
474			continue;
475		}
476
477		if(!dpy->xcb->next_event)
478		{
479			xcb_generic_event_t *event;
480			dpy->xcb->event_waiter = 1;
481			UnlockDisplay(dpy);
482			event = xcb_wait_for_event(dpy->xcb->connection);
483			/* It appears that classic Xlib respected user
484			 * locks when waking up after waiting for
485			 * events. However, if this thread did not have
486			 * any user locks, and another thread takes a
487			 * user lock and tries to read events, then we'd
488			 * deadlock. So we'll choose to let the thread
489			 * that got in first consume events, despite the
490			 * later thread's user locks. */
491			InternalLockDisplay(dpy, /* ignore user locks */ 1);
492			dpy->xcb->event_waiter = 0;
493			ConditionBroadcast(dpy, dpy->xcb->event_notify);
494			if(!event)
495			{
496				_XIOError(dpy);
497				return;
498			}
499			dpy->xcb->next_event = event;
500		}
501
502		/* We've established most of the conditions for
503		 * poll_for_response to return non-NULL. The exceptions
504		 * are connection shutdown, and finding that another
505		 * thread is waiting for the next reply we'd like to
506		 * process. */
507
508		response = poll_for_response(dpy);
509		if(response)
510			handle_response(dpy, response, False);
511		else if(dpy->xcb->pending_requests->reply_waiter)
512		{ /* need braces around ConditionWait */
513			ConditionWait(dpy, dpy->xcb->reply_notify);
514		}
515		else
516		{
517		        _XIOError(dpy);
518		        return;
519		}
520	}
521
522	/* The preceding loop established that there is no
523	 * event_waiter--unless we just called ConditionWait because of
524	 * a reply_waiter, in which case another thread may have become
525	 * the event_waiter while we slept unlocked. */
526	if(!dpy->xcb->event_waiter)
527		while((response = poll_for_response(dpy)))
528			handle_response(dpy, response, False);
529	if(xcb_connection_has_error(dpy->xcb->connection))
530		_XIOError(dpy);
531}
532
533/*
534 * _XSend - Flush the buffer and send the client data. 32 bit word aligned
535 * transmission is used, if size is not 0 mod 4, extra bytes are transmitted.
536 *
537 * Note that the connection must not be read from once the data currently
538 * in the buffer has been written.
539 */
540void _XSend(Display *dpy, const char *data, long size)
541{
542	static const xReq dummy_request;
543	static char const pad[3];
544	struct iovec vec[3];
545	uint64_t requests;
546	uint64_t dpy_request;
547	_XExtension *ext;
548	xcb_connection_t *c = dpy->xcb->connection;
549	if(dpy->flags & XlibDisplayIOError)
550		return;
551
552	if(dpy->bufptr == dpy->buffer && !size)
553		return;
554
555	/* append_pending_request does not alter the dpy request number
556	 * therefore we can get it outside of the loop and the if
557	 */
558	dpy_request = X_DPY_GET_REQUEST(dpy);
559	/* iff we asked XCB to set aside errors, we must pick those up
560	 * eventually. iff there are async handlers, we may have just
561	 * issued requests that will generate replies. in either case,
562	 * we need to remember to check later. */
563	if(dpy->xcb->event_owner != XlibOwnsEventQueue || dpy->async_handlers)
564	{
565		uint64_t sequence;
566		for(sequence = dpy->xcb->last_flushed + 1; sequence <= dpy_request; ++sequence)
567			append_pending_request(dpy, sequence);
568	}
569	requests = dpy_request - dpy->xcb->last_flushed;
570	dpy->xcb->last_flushed = dpy_request;
571
572	vec[0].iov_base = dpy->buffer;
573	vec[0].iov_len = dpy->bufptr - dpy->buffer;
574	vec[1].iov_base = (char *)data;
575	vec[1].iov_len = size;
576	vec[2].iov_base = (char *)pad;
577	vec[2].iov_len = -size & 3;
578
579	for(ext = dpy->flushes; ext; ext = ext->next_flush)
580	{
581		int i;
582		for(i = 0; i < 3; ++i)
583			if(vec[i].iov_len)
584				ext->before_flush(dpy, &ext->codes, vec[i].iov_base, vec[i].iov_len);
585	}
586
587	if(xcb_writev(c, vec, 3, requests) < 0) {
588		_XIOError(dpy);
589		return;
590	}
591	dpy->bufptr = dpy->buffer;
592	dpy->last_req = (char *) &dummy_request;
593
594	if (!check_internal_connections(dpy))
595		return;
596
597	_XSetSeqSyncFunction(dpy);
598}
599
600/*
601 * _XFlush - Flush the X request buffer.  If the buffer is empty, no
602 * action is taken.
603 */
604void _XFlush(Display *dpy)
605{
606	if (!require_socket(dpy))
607		return;
608
609	_XSend(dpy, NULL, 0);
610
611	_XEventsQueued(dpy, QueuedAfterReading);
612}
613
614static const XID inval_id = ~0UL;
615
616void _XIDHandler(Display *dpy)
617{
618	if (dpy->xcb->next_xid == inval_id)
619		_XAllocIDs(dpy, &dpy->xcb->next_xid, 1);
620}
621
622/* _XAllocID - resource ID allocation routine. */
623XID _XAllocID(Display *dpy)
624{
625	XID ret = dpy->xcb->next_xid;
626	assert (ret != inval_id);
627	dpy->xcb->next_xid = inval_id;
628	_XSetPrivSyncFunction(dpy);
629	return ret;
630}
631
632/* _XAllocIDs - multiple resource ID allocation routine. */
633void _XAllocIDs(Display *dpy, XID *ids, int count)
634{
635	int i;
636#ifdef XTHREADS
637	if (dpy->lock)
638		(*dpy->lock->user_lock_display)(dpy);
639	UnlockDisplay(dpy);
640#endif
641	for (i = 0; i < count; i++)
642		ids[i] = xcb_generate_id(dpy->xcb->connection);
643#ifdef XTHREADS
644	InternalLockDisplay(dpy, /* don't skip user locks */ 0);
645	if (dpy->lock)
646		(*dpy->lock->user_unlock_display)(dpy);
647#endif
648}
649
650static void _XFreeReplyData(Display *dpy, Bool force)
651{
652	if(!force && dpy->xcb->reply_consumed < dpy->xcb->reply_length)
653		return;
654	free(dpy->xcb->reply_data);
655	dpy->xcb->reply_data = NULL;
656}
657
658/*
659 * _XReply - Wait for a reply packet and copy its contents into the
660 * specified rep.
661 * extra: number of 32-bit words expected after the reply
662 * discard: should I discard data following "extra" words?
663 */
664Status _XReply(Display *dpy, xReply *rep, int extra, Bool discard)
665{
666	xcb_generic_error_t *error;
667	xcb_connection_t *c = dpy->xcb->connection;
668	char *reply;
669	PendingRequest *current;
670	uint64_t dpy_request;
671
672	if (dpy->xcb->reply_data)
673		throw_extlib_fail_assert("Extra reply data still left in queue",
674		                         xcb_xlib_extra_reply_data_left);
675
676	if(dpy->flags & XlibDisplayIOError)
677		return 0;
678
679	_XSend(dpy, NULL, 0);
680	dpy_request = X_DPY_GET_REQUEST(dpy);
681	if(dpy->xcb->pending_requests_tail
682	   && dpy->xcb->pending_requests_tail->sequence == dpy_request)
683		current = dpy->xcb->pending_requests_tail;
684	else
685		current = append_pending_request(dpy, dpy_request);
686	/* Don't let any other thread get this reply. */
687	current->reply_waiter = 1;
688
689	while(1)
690	{
691		PendingRequest *req = dpy->xcb->pending_requests;
692		xcb_generic_reply_t *response;
693
694		if(req != current && req->reply_waiter)
695		{
696			ConditionWait(dpy, dpy->xcb->reply_notify);
697			/* Another thread got this reply. */
698			continue;
699		}
700		req->reply_waiter = 1;
701		UnlockDisplay(dpy);
702		response = xcb_wait_for_reply64(c, req->sequence, &error);
703		/* Any user locks on another thread must have been taken
704		 * while we slept in xcb_wait_for_reply64. Classic Xlib
705		 * ignored those user locks in this case, so we do too. */
706		InternalLockDisplay(dpy, /* ignore user locks */ 1);
707
708		/* We have the response we're looking for. Now, before
709		 * letting anyone else process this sequence number, we
710		 * need to process any events that should have come
711		 * earlier. */
712
713		if(dpy->xcb->event_owner == XlibOwnsEventQueue)
714		{
715			xcb_generic_reply_t *event;
716
717			/* Assume event queue is empty if another thread is blocking
718			 * waiting for event. */
719			if(!dpy->xcb->event_waiter)
720			{
721				while((event = poll_for_response(dpy)))
722					handle_response(dpy, event, True);
723                        }
724		}
725
726		req->reply_waiter = 0;
727		ConditionBroadcast(dpy, dpy->xcb->reply_notify);
728		dpy_request = X_DPY_GET_REQUEST(dpy);
729		if(XLIB_SEQUENCE_COMPARE(req->sequence, >, dpy_request)) {
730			throw_thread_fail_assert("Unknown sequence number "
731			                         "while processing reply",
732			                        xcb_xlib_threads_sequence_lost);
733		}
734		X_DPY_SET_LAST_REQUEST_READ(dpy, req->sequence);
735		if(!response)
736			dequeue_pending_request(dpy, req);
737
738		if(req == current)
739		{
740			reply = (char *) response;
741			break;
742		}
743
744		if(error)
745			handle_response(dpy, (xcb_generic_reply_t *) error, True);
746		else if(response)
747			handle_response(dpy, response, True);
748	}
749	if (!check_internal_connections(dpy))
750		return 0;
751
752	if(dpy->xcb->next_event && dpy->xcb->next_event->response_type == X_Error)
753	{
754		xcb_generic_event_t *event = dpy->xcb->next_event;
755		uint64_t last_request_read = X_DPY_GET_LAST_REQUEST_READ(dpy);
756		uint64_t event_sequence = last_request_read;
757		widen(&event_sequence, event->full_sequence);
758		if(event_sequence == last_request_read)
759		{
760			error = (xcb_generic_error_t *) event;
761			dpy->xcb->next_event = NULL;
762		}
763	}
764
765	if(error)
766	{
767		int ret_code;
768
769		/* Xlib is evil and assumes that even errors will be
770		 * copied into rep. */
771		memcpy(rep, error, 32);
772
773		/* do not die on "no such font", "can't allocate",
774		   "can't grab" failures */
775		switch(error->error_code)
776		{
777			case BadName:
778				switch(error->major_code)
779				{
780					case X_LookupColor:
781					case X_AllocNamedColor:
782						free(error);
783						return 0;
784				}
785				break;
786			case BadFont:
787				if(error->major_code == X_QueryFont) {
788					free(error);
789					return 0;
790				}
791				break;
792			case BadAlloc:
793			case BadAccess:
794				free(error);
795				return 0;
796		}
797
798		ret_code = handle_error(dpy, (xError *) error, True);
799		free(error);
800		return ret_code;
801	}
802
803	/* it's not an error, but we don't have a reply, so it's an I/O
804	 * error. */
805	if(!reply) {
806		_XIOError(dpy);
807		return 0;
808	}
809
810	/* there's no error and we have a reply. */
811	dpy->xcb->reply_data = reply;
812	dpy->xcb->reply_consumed = sizeof(xReply) + (extra * 4);
813	dpy->xcb->reply_length = sizeof(xReply);
814	if(dpy->xcb->reply_data[0] == 1)
815		dpy->xcb->reply_length += (((xcb_generic_reply_t *) dpy->xcb->reply_data)->length * 4);
816
817	/* error: Xlib asks too much. give them what we can anyway. */
818	if(dpy->xcb->reply_length < dpy->xcb->reply_consumed)
819		dpy->xcb->reply_consumed = dpy->xcb->reply_length;
820
821	memcpy(rep, dpy->xcb->reply_data, dpy->xcb->reply_consumed);
822	_XFreeReplyData(dpy, discard);
823	return 1;
824}
825
826int _XRead(Display *dpy, char *data, long size)
827{
828	assert(size >= 0);
829	if(size == 0)
830		return 0;
831	if(dpy->xcb->reply_data == NULL ||
832	   dpy->xcb->reply_consumed + size > dpy->xcb->reply_length)
833		throw_extlib_fail_assert("Too much data requested from _XRead",
834		                         xcb_xlib_too_much_data_requested);
835	memcpy(data, dpy->xcb->reply_data + dpy->xcb->reply_consumed, size);
836	dpy->xcb->reply_consumed += size;
837	_XFreeReplyData(dpy, False);
838	return 0;
839}
840
841/*
842 * _XReadPad - Read bytes from the socket taking into account incomplete
843 * reads.  If the number of bytes is not 0 mod 4, read additional pad
844 * bytes.
845 */
846void _XReadPad(Display *dpy, char *data, long size)
847{
848	_XRead(dpy, data, size);
849	dpy->xcb->reply_consumed += -size & 3;
850	_XFreeReplyData(dpy, False);
851}
852
853/* Read and discard "n" 8-bit bytes of data */
854void _XEatData(Display *dpy, unsigned long n)
855{
856	dpy->xcb->reply_consumed += n;
857	_XFreeReplyData(dpy, False);
858}
859
860/*
861 * Read and discard "n" 32-bit words of data
862 * Matches the units of the length field in X protocol replies, and provides
863 * a single implementation of overflow checking to avoid having to replicate
864 * those checks in every caller.
865 */
866void _XEatDataWords(Display *dpy, unsigned long n)
867{
868	if (n < ((INT_MAX - dpy->xcb->reply_consumed) >> 2))
869		dpy->xcb->reply_consumed += (n << 2);
870	else
871		/* Overflow would happen, so just eat the rest of the reply */
872		dpy->xcb->reply_consumed = dpy->xcb->reply_length;
873	_XFreeReplyData(dpy, False);
874}
875
876unsigned long
877_XNextRequest(Display *dpy)
878{
879    /* This will update dpy->request. The assumption is that the next thing
880     * that the application will do is make a request so there's little
881     * overhead.
882     */
883    require_socket(dpy);
884    return NextRequest(dpy);
885}
886