xcb_io.c revision 57f47464
1/* Copyright (C) 2003-2006 Jamey Sharp, Josh Triplett
2 * This file is licensed under the MIT license. See the file COPYING. */
3
4#ifdef HAVE_CONFIG_H
5#include <config.h>
6#endif
7
8#include "Xlibint.h"
9#include "locking.h"
10#include "Xprivate.h"
11#include "Xxcbint.h"
12#include <xcb/xcbext.h>
13
14#include <assert.h>
15#ifdef HAVE_INTTYPES_H
16#include <inttypes.h>
17#endif
18#include <stdint.h>
19#include <stdlib.h>
20#include <string.h>
21#ifdef HAVE_SYS_SELECT_H
22#include <sys/select.h>
23#endif
24
25static void return_socket(void *closure)
26{
27	Display *dpy = closure;
28	InternalLockDisplay(dpy, /* don't skip user locks */ 0);
29	_XSend(dpy, NULL, 0);
30	dpy->bufmax = dpy->buffer;
31	UnlockDisplay(dpy);
32}
33
34static void require_socket(Display *dpy)
35{
36	if(dpy->bufmax == dpy->buffer)
37	{
38		uint64_t sent;
39		int flags = 0;
40		/* if we don't own the event queue, we have to ask XCB
41		 * to set our errors aside for us. */
42		if(dpy->xcb->event_owner != XlibOwnsEventQueue)
43			flags = XCB_REQUEST_CHECKED;
44		if(!xcb_take_socket(dpy->xcb->connection, return_socket, dpy,
45		                    flags, &sent))
46			_XIOError(dpy);
47		/* Xlib uses unsigned long for sequence numbers.  XCB
48		 * uses 64-bit internally, but currently exposes an
49		 * unsigned int API.  If these differ, Xlib cannot track
50		 * the full 64-bit sequence number if 32-bit wrap
51		 * happens while Xlib does not own the socket.  A
52		 * complete fix would be to make XCB's public API use
53		 * 64-bit sequence numbers. */
54		assert(!(sizeof(unsigned long) > sizeof(unsigned int)
55		         && dpy->xcb->event_owner == XlibOwnsEventQueue
56		         && (sent - dpy->last_request_read >= (UINT64_C(1) << 32))));
57		dpy->xcb->last_flushed = dpy->request = sent;
58		dpy->bufmax = dpy->xcb->real_bufmax;
59	}
60}
61
62/* Call internal connection callbacks for any fds that are currently
63 * ready to read. This function will not block unless one of the
64 * callbacks blocks.
65 *
66 * This code borrowed from _XWaitForReadable. Inverse call tree:
67 * _XRead
68 *  _XWaitForWritable
69 *   _XFlush
70 *   _XSend
71 *  _XEventsQueued
72 *  _XReadEvents
73 *  _XRead[0-9]+
74 *   _XAllocIDs
75 *  _XReply
76 *  _XEatData
77 * _XReadPad
78 */
79static void check_internal_connections(Display *dpy)
80{
81	struct _XConnectionInfo *ilist;
82	fd_set r_mask;
83	struct timeval tv;
84	int result;
85	int highest_fd = -1;
86
87	if(dpy->flags & XlibDisplayProcConni || !dpy->im_fd_info)
88		return;
89
90	FD_ZERO(&r_mask);
91	for(ilist = dpy->im_fd_info; ilist; ilist = ilist->next)
92	{
93		assert(ilist->fd >= 0);
94		FD_SET(ilist->fd, &r_mask);
95		if(ilist->fd > highest_fd)
96			highest_fd = ilist->fd;
97	}
98	assert(highest_fd >= 0);
99
100	tv.tv_sec = 0;
101	tv.tv_usec = 0;
102	result = select(highest_fd + 1, &r_mask, NULL, NULL, &tv);
103
104	if(result == -1)
105	{
106		if(errno == EINTR)
107			return;
108		_XIOError(dpy);
109	}
110
111	for(ilist = dpy->im_fd_info; result && ilist; ilist = ilist->next)
112		if(FD_ISSET(ilist->fd, &r_mask))
113		{
114			_XProcessInternalConnection(dpy, ilist);
115			--result;
116		}
117}
118
119static PendingRequest *append_pending_request(Display *dpy, unsigned long sequence)
120{
121	PendingRequest *node = malloc(sizeof(PendingRequest));
122	assert(node);
123	node->next = NULL;
124	node->sequence = sequence;
125	node->reply_waiter = 0;
126	if(dpy->xcb->pending_requests_tail)
127	{
128		assert(XLIB_SEQUENCE_COMPARE(dpy->xcb->pending_requests_tail->sequence, <, node->sequence));
129		assert(dpy->xcb->pending_requests_tail->next == NULL);
130		dpy->xcb->pending_requests_tail->next = node;
131	}
132	else
133		dpy->xcb->pending_requests = node;
134	dpy->xcb->pending_requests_tail = node;
135	return node;
136}
137
138static void dequeue_pending_request(Display *dpy, PendingRequest *req)
139{
140	assert(req == dpy->xcb->pending_requests);
141	dpy->xcb->pending_requests = req->next;
142	if(!dpy->xcb->pending_requests)
143	{
144		assert(req == dpy->xcb->pending_requests_tail);
145		dpy->xcb->pending_requests_tail = NULL;
146	}
147	else
148		assert(XLIB_SEQUENCE_COMPARE(req->sequence, <, dpy->xcb->pending_requests->sequence));
149	free(req);
150}
151
152static int handle_error(Display *dpy, xError *err, Bool in_XReply)
153{
154	_XExtension *ext;
155	int ret_code;
156	/* Oddly, Xlib only allows extensions to suppress errors when
157	 * those errors were seen by _XReply. */
158	if(in_XReply)
159		/*
160		 * we better see if there is an extension who may
161		 * want to suppress the error.
162		 */
163		for(ext = dpy->ext_procs; ext; ext = ext->next)
164			if(ext->error && (*ext->error)(dpy, err, &ext->codes, &ret_code))
165				return ret_code;
166	_XError(dpy, err);
167	return 0;
168}
169
170/* Widen a 32-bit sequence number into a native-word-size (unsigned long)
171 * sequence number.  Treating the comparison as a 1 and shifting it avoids a
172 * conditional branch, and shifting by 16 twice avoids a compiler warning when
173 * sizeof(unsigned long) == 4. */
174static void widen(unsigned long *wide, unsigned int narrow)
175{
176	unsigned long new = (*wide & ~0xFFFFFFFFUL) | narrow;
177	*wide = new + ((unsigned long) (new < *wide) << 16 << 16);
178}
179
180/* Thread-safety rules:
181 *
182 * At most one thread can be reading from XCB's event queue at a time.
183 * If you are not the current event-reading thread and you need to find
184 * out if an event is available, you must wait.
185 *
186 * The same rule applies for reading replies.
187 *
188 * A single thread cannot be both the the event-reading and the
189 * reply-reading thread at the same time.
190 *
191 * We always look at both the current event and the first pending reply
192 * to decide which to process next.
193 *
194 * We always process all responses in sequence-number order, which may
195 * mean waiting for another thread (either the event_waiter or the
196 * reply_waiter) to handle an earlier response before we can process or
197 * return a later one. If so, we wait on the corresponding condition
198 * variable for that thread to process the response and wake us up.
199 */
200
201static xcb_generic_reply_t *poll_for_event(Display *dpy)
202{
203	/* Make sure the Display's sequence numbers are valid */
204	require_socket(dpy);
205
206	/* Precondition: This thread can safely get events from XCB. */
207	assert(dpy->xcb->event_owner == XlibOwnsEventQueue && !dpy->xcb->event_waiter);
208
209	if(!dpy->xcb->next_event)
210		dpy->xcb->next_event = xcb_poll_for_event(dpy->xcb->connection);
211
212	if(dpy->xcb->next_event)
213	{
214		PendingRequest *req = dpy->xcb->pending_requests;
215		xcb_generic_event_t *event = dpy->xcb->next_event;
216		unsigned long event_sequence = dpy->last_request_read;
217		widen(&event_sequence, event->full_sequence);
218		if(!req || XLIB_SEQUENCE_COMPARE(event_sequence, <, req->sequence)
219		        || (event->response_type != X_Error && event_sequence == req->sequence))
220		{
221			assert(XLIB_SEQUENCE_COMPARE(event_sequence, <=, dpy->request));
222			dpy->last_request_read = event_sequence;
223			dpy->xcb->next_event = NULL;
224			return (xcb_generic_reply_t *) event;
225		}
226	}
227	return NULL;
228}
229
230static xcb_generic_reply_t *poll_for_response(Display *dpy)
231{
232	void *response;
233	xcb_generic_error_t *error;
234	PendingRequest *req;
235	while(!(response = poll_for_event(dpy)) &&
236	      (req = dpy->xcb->pending_requests) &&
237	      !req->reply_waiter &&
238	      xcb_poll_for_reply(dpy->xcb->connection, req->sequence, &response, &error))
239	{
240		assert(XLIB_SEQUENCE_COMPARE(req->sequence, <=, dpy->request));
241		dpy->last_request_read = req->sequence;
242		if(response)
243			break;
244		dequeue_pending_request(dpy, req);
245		if(error)
246			return (xcb_generic_reply_t *) error;
247	}
248	return response;
249}
250
251static void handle_response(Display *dpy, xcb_generic_reply_t *response, Bool in_XReply)
252{
253	_XAsyncHandler *async, *next;
254	switch(response->response_type)
255	{
256	case X_Reply:
257		for(async = dpy->async_handlers; async; async = next)
258		{
259			next = async->next;
260			if(async->handler(dpy, (xReply *) response, (char *) response, sizeof(xReply) + (response->length << 2), async->data))
261				break;
262		}
263		break;
264
265	case X_Error:
266		handle_error(dpy, (xError *) response, in_XReply);
267		break;
268
269	default: /* event */
270		/* GenericEvents may be > 32 bytes. In this case, the
271		 * event struct is trailed by the additional bytes. the
272		 * xcb_generic_event_t struct uses 4 bytes for internal
273		 * numbering, so we need to shift the trailing data to
274		 * be after the first 32 bytes. */
275		if(response->response_type == GenericEvent && ((xcb_ge_event_t *) response)->length)
276		{
277			xcb_ge_event_t *event = (xcb_ge_event_t *) response;
278			memmove(&event->full_sequence, &event[1], event->length * 4);
279		}
280		_XEnq(dpy, (xEvent *) response);
281		break;
282	}
283	free(response);
284}
285
286int _XEventsQueued(Display *dpy, int mode)
287{
288	xcb_generic_reply_t *response;
289	if(dpy->flags & XlibDisplayIOError)
290		return 0;
291	if(dpy->xcb->event_owner != XlibOwnsEventQueue)
292		return 0;
293
294	if(mode == QueuedAfterFlush)
295		_XSend(dpy, NULL, 0);
296	else
297		check_internal_connections(dpy);
298
299	/* If another thread is blocked waiting for events, then we must
300	 * let that thread pick up the next event. Since it blocked, we
301	 * can reasonably claim there are no new events right now. */
302	if(!dpy->xcb->event_waiter)
303	{
304		while((response = poll_for_response(dpy)))
305			handle_response(dpy, response, False);
306		if(xcb_connection_has_error(dpy->xcb->connection))
307			_XIOError(dpy);
308	}
309	return dpy->qlen;
310}
311
312/* _XReadEvents - Flush the output queue,
313 * then read as many events as possible (but at least 1) and enqueue them
314 */
315void _XReadEvents(Display *dpy)
316{
317	xcb_generic_reply_t *response;
318	unsigned long serial;
319
320	if(dpy->flags & XlibDisplayIOError)
321		return;
322	_XSend(dpy, NULL, 0);
323	if(dpy->xcb->event_owner != XlibOwnsEventQueue)
324		return;
325	check_internal_connections(dpy);
326
327	serial = dpy->next_event_serial_num;
328	while(serial == dpy->next_event_serial_num || dpy->qlen == 0)
329	{
330		if(dpy->xcb->event_waiter)
331		{
332			ConditionWait(dpy, dpy->xcb->event_notify);
333			/* Maybe the other thread got us an event. */
334			continue;
335		}
336
337		if(!dpy->xcb->next_event)
338		{
339			xcb_generic_event_t *event;
340			dpy->xcb->event_waiter = 1;
341			UnlockDisplay(dpy);
342			event = xcb_wait_for_event(dpy->xcb->connection);
343			/* It appears that classic Xlib respected user
344			 * locks when waking up after waiting for
345			 * events. However, if this thread did not have
346			 * any user locks, and another thread takes a
347			 * user lock and tries to read events, then we'd
348			 * deadlock. So we'll choose to let the thread
349			 * that got in first consume events, despite the
350			 * later thread's user locks. */
351			InternalLockDisplay(dpy, /* ignore user locks */ 1);
352			dpy->xcb->event_waiter = 0;
353			ConditionBroadcast(dpy, dpy->xcb->event_notify);
354			if(!event)
355				_XIOError(dpy);
356			dpy->xcb->next_event = event;
357		}
358
359		/* We've established most of the conditions for
360		 * poll_for_response to return non-NULL. The exceptions
361		 * are connection shutdown, and finding that another
362		 * thread is waiting for the next reply we'd like to
363		 * process. */
364
365		response = poll_for_response(dpy);
366		if(response)
367			handle_response(dpy, response, False);
368		else if(dpy->xcb->pending_requests->reply_waiter)
369		{ /* need braces around ConditionWait */
370			ConditionWait(dpy, dpy->xcb->reply_notify);
371		}
372		else
373			_XIOError(dpy);
374	}
375
376	/* The preceding loop established that there is no
377	 * event_waiter--unless we just called ConditionWait because of
378	 * a reply_waiter, in which case another thread may have become
379	 * the event_waiter while we slept unlocked. */
380	if(!dpy->xcb->event_waiter)
381		while((response = poll_for_response(dpy)))
382			handle_response(dpy, response, False);
383	if(xcb_connection_has_error(dpy->xcb->connection))
384		_XIOError(dpy);
385}
386
387/*
388 * _XSend - Flush the buffer and send the client data. 32 bit word aligned
389 * transmission is used, if size is not 0 mod 4, extra bytes are transmitted.
390 *
391 * Note that the connection must not be read from once the data currently
392 * in the buffer has been written.
393 */
394void _XSend(Display *dpy, const char *data, long size)
395{
396	static const xReq dummy_request;
397	static char const pad[3];
398	struct iovec vec[3];
399	uint64_t requests;
400	_XExtension *ext;
401	xcb_connection_t *c = dpy->xcb->connection;
402	if(dpy->flags & XlibDisplayIOError)
403		return;
404
405	if(dpy->bufptr == dpy->buffer && !size)
406		return;
407
408	/* iff we asked XCB to set aside errors, we must pick those up
409	 * eventually. iff there are async handlers, we may have just
410	 * issued requests that will generate replies. in either case,
411	 * we need to remember to check later. */
412	if(dpy->xcb->event_owner != XlibOwnsEventQueue || dpy->async_handlers)
413	{
414		uint64_t sequence;
415		for(sequence = dpy->xcb->last_flushed + 1; sequence <= dpy->request; ++sequence)
416			append_pending_request(dpy, sequence);
417	}
418	requests = dpy->request - dpy->xcb->last_flushed;
419	dpy->xcb->last_flushed = dpy->request;
420
421	vec[0].iov_base = dpy->buffer;
422	vec[0].iov_len = dpy->bufptr - dpy->buffer;
423	vec[1].iov_base = (caddr_t) data;
424	vec[1].iov_len = size;
425	vec[2].iov_base = (caddr_t) pad;
426	vec[2].iov_len = -size & 3;
427
428	for(ext = dpy->flushes; ext; ext = ext->next_flush)
429	{
430		int i;
431		for(i = 0; i < 3; ++i)
432			if(vec[i].iov_len)
433				ext->before_flush(dpy, &ext->codes, vec[i].iov_base, vec[i].iov_len);
434	}
435
436	if(xcb_writev(c, vec, 3, requests) < 0)
437		_XIOError(dpy);
438	dpy->bufptr = dpy->buffer;
439	dpy->last_req = (char *) &dummy_request;
440
441	check_internal_connections(dpy);
442
443	_XSetSeqSyncFunction(dpy);
444}
445
446/*
447 * _XFlush - Flush the X request buffer.  If the buffer is empty, no
448 * action is taken.
449 */
450void _XFlush(Display *dpy)
451{
452	require_socket(dpy);
453	_XSend(dpy, NULL, 0);
454
455	_XEventsQueued(dpy, QueuedAfterReading);
456}
457
458static const XID inval_id = ~0UL;
459
460void _XIDHandler(Display *dpy)
461{
462	if (dpy->xcb->next_xid == inval_id)
463		_XAllocIDs(dpy, &dpy->xcb->next_xid, 1);
464}
465
466/* _XAllocID - resource ID allocation routine. */
467XID _XAllocID(Display *dpy)
468{
469	XID ret = dpy->xcb->next_xid;
470	assert (ret != inval_id);
471	dpy->xcb->next_xid = inval_id;
472	_XSetPrivSyncFunction(dpy);
473	return ret;
474}
475
476/* _XAllocIDs - multiple resource ID allocation routine. */
477void _XAllocIDs(Display *dpy, XID *ids, int count)
478{
479	int i;
480#ifdef XTHREADS
481	if (dpy->lock)
482		(*dpy->lock->user_lock_display)(dpy);
483	UnlockDisplay(dpy);
484#endif
485	for (i = 0; i < count; i++)
486		ids[i] = xcb_generate_id(dpy->xcb->connection);
487#ifdef XTHREADS
488	InternalLockDisplay(dpy, /* don't skip user locks */ 0);
489	if (dpy->lock)
490		(*dpy->lock->user_unlock_display)(dpy);
491#endif
492}
493
494static void _XFreeReplyData(Display *dpy, Bool force)
495{
496	if(!force && dpy->xcb->reply_consumed < dpy->xcb->reply_length)
497		return;
498	free(dpy->xcb->reply_data);
499	dpy->xcb->reply_data = NULL;
500}
501
502/*
503 * _XReply - Wait for a reply packet and copy its contents into the
504 * specified rep.
505 * extra: number of 32-bit words expected after the reply
506 * discard: should I discard data following "extra" words?
507 */
508Status _XReply(Display *dpy, xReply *rep, int extra, Bool discard)
509{
510	xcb_generic_error_t *error;
511	xcb_connection_t *c = dpy->xcb->connection;
512	char *reply;
513	PendingRequest *current;
514
515	assert(!dpy->xcb->reply_data);
516
517	if(dpy->flags & XlibDisplayIOError)
518		return 0;
519
520	_XSend(dpy, NULL, 0);
521	if(dpy->xcb->pending_requests_tail && dpy->xcb->pending_requests_tail->sequence == dpy->request)
522		current = dpy->xcb->pending_requests_tail;
523	else
524		current = append_pending_request(dpy, dpy->request);
525	/* Don't let any other thread get this reply. */
526	current->reply_waiter = 1;
527
528	while(1)
529	{
530		PendingRequest *req = dpy->xcb->pending_requests;
531		xcb_generic_reply_t *response;
532
533		if(req != current && req->reply_waiter)
534		{
535			ConditionWait(dpy, dpy->xcb->reply_notify);
536			/* Another thread got this reply. */
537			continue;
538		}
539		req->reply_waiter = 1;
540		UnlockDisplay(dpy);
541		response = xcb_wait_for_reply(c, req->sequence, &error);
542		/* Any user locks on another thread must have been taken
543		 * while we slept in xcb_wait_for_reply. Classic Xlib
544		 * ignored those user locks in this case, so we do too. */
545		InternalLockDisplay(dpy, /* ignore user locks */ 1);
546
547		/* We have the response we're looking for. Now, before
548		 * letting anyone else process this sequence number, we
549		 * need to process any events that should have come
550		 * earlier. */
551
552		if(dpy->xcb->event_owner == XlibOwnsEventQueue)
553		{
554			xcb_generic_reply_t *event;
555			/* If some thread is already waiting for events,
556			 * it will get the first one. That thread must
557			 * process that event before we can continue. */
558			/* FIXME: That event might be after this reply,
559			 * and might never even come--or there might be
560			 * multiple threads trying to get events. */
561			while(dpy->xcb->event_waiter)
562			{ /* need braces around ConditionWait */
563				ConditionWait(dpy, dpy->xcb->event_notify);
564			}
565			while((event = poll_for_event(dpy)))
566				handle_response(dpy, event, True);
567		}
568
569		req->reply_waiter = 0;
570		ConditionBroadcast(dpy, dpy->xcb->reply_notify);
571		assert(XLIB_SEQUENCE_COMPARE(req->sequence, <=, dpy->request));
572		dpy->last_request_read = req->sequence;
573		if(!response)
574			dequeue_pending_request(dpy, req);
575
576		if(req == current)
577		{
578			reply = (char *) response;
579			break;
580		}
581
582		if(error)
583			handle_response(dpy, (xcb_generic_reply_t *) error, True);
584		else if(response)
585			handle_response(dpy, response, True);
586	}
587	check_internal_connections(dpy);
588
589	if(dpy->xcb->next_event && dpy->xcb->next_event->response_type == X_Error)
590	{
591		xcb_generic_event_t *event = dpy->xcb->next_event;
592		unsigned long event_sequence = dpy->last_request_read;
593		widen(&event_sequence, event->full_sequence);
594		if(event_sequence == dpy->last_request_read)
595		{
596			error = (xcb_generic_error_t *) event;
597			dpy->xcb->next_event = NULL;
598		}
599	}
600
601	if(error)
602	{
603		int ret_code;
604
605		/* Xlib is evil and assumes that even errors will be
606		 * copied into rep. */
607		memcpy(rep, error, 32);
608
609		/* do not die on "no such font", "can't allocate",
610		   "can't grab" failures */
611		switch(error->error_code)
612		{
613			case BadName:
614				switch(error->major_code)
615				{
616					case X_LookupColor:
617					case X_AllocNamedColor:
618						free(error);
619						return 0;
620				}
621				break;
622			case BadFont:
623				if(error->major_code == X_QueryFont) {
624					free(error);
625					return 0;
626				}
627				break;
628			case BadAlloc:
629			case BadAccess:
630				free(error);
631				return 0;
632		}
633
634		ret_code = handle_error(dpy, (xError *) error, True);
635		free(error);
636		return ret_code;
637	}
638
639	/* it's not an error, but we don't have a reply, so it's an I/O
640	 * error. */
641	if(!reply)
642	{
643		_XIOError(dpy);
644		return 0;
645	}
646
647	/* there's no error and we have a reply. */
648	dpy->xcb->reply_data = reply;
649	dpy->xcb->reply_consumed = sizeof(xReply) + (extra * 4);
650	dpy->xcb->reply_length = sizeof(xReply);
651	if(dpy->xcb->reply_data[0] == 1)
652		dpy->xcb->reply_length += (((xcb_generic_reply_t *) dpy->xcb->reply_data)->length * 4);
653
654	/* error: Xlib asks too much. give them what we can anyway. */
655	if(dpy->xcb->reply_length < dpy->xcb->reply_consumed)
656		dpy->xcb->reply_consumed = dpy->xcb->reply_length;
657
658	memcpy(rep, dpy->xcb->reply_data, dpy->xcb->reply_consumed);
659	_XFreeReplyData(dpy, discard);
660	return 1;
661}
662
663int _XRead(Display *dpy, char *data, long size)
664{
665	assert(size >= 0);
666	if(size == 0)
667		return 0;
668	assert(dpy->xcb->reply_data != NULL);
669	assert(dpy->xcb->reply_consumed + size <= dpy->xcb->reply_length);
670	memcpy(data, dpy->xcb->reply_data + dpy->xcb->reply_consumed, size);
671	dpy->xcb->reply_consumed += size;
672	_XFreeReplyData(dpy, False);
673	return 0;
674}
675
676/*
677 * _XReadPad - Read bytes from the socket taking into account incomplete
678 * reads.  If the number of bytes is not 0 mod 4, read additional pad
679 * bytes.
680 */
681void _XReadPad(Display *dpy, char *data, long size)
682{
683	_XRead(dpy, data, size);
684	dpy->xcb->reply_consumed += -size & 3;
685	_XFreeReplyData(dpy, False);
686}
687
688/* Read and discard "n" 8-bit bytes of data */
689void _XEatData(Display *dpy, unsigned long n)
690{
691	dpy->xcb->reply_consumed += n;
692	_XFreeReplyData(dpy, False);
693}
694