xcb_io.c revision 2e9c7c8c
1/* Copyright (C) 2003-2006 Jamey Sharp, Josh Triplett
2 * This file is licensed under the MIT license. See the file COPYING. */
3
4#include "Xlibint.h"
5#include "locking.h"
6#include "Xprivate.h"
7#include "Xxcbint.h"
8#include <xcb/xcbext.h>
9
10#include <assert.h>
11#include <inttypes.h>
12#include <stdint.h>
13#include <stdlib.h>
14#include <string.h>
15
16static void return_socket(void *closure)
17{
18	Display *dpy = closure;
19	LockDisplay(dpy);
20	_XSend(dpy, NULL, 0);
21	dpy->bufmax = dpy->buffer;
22	UnlockDisplay(dpy);
23}
24
25static void require_socket(Display *dpy)
26{
27	if(dpy->bufmax == dpy->buffer)
28	{
29		uint64_t sent;
30		int flags = 0;
31		/* if we don't own the event queue, we have to ask XCB
32		 * to set our errors aside for us. */
33		if(dpy->xcb->event_owner != XlibOwnsEventQueue)
34			flags = XCB_REQUEST_CHECKED;
35		if(!xcb_take_socket(dpy->xcb->connection, return_socket, dpy,
36		                    flags, &sent))
37			_XIOError(dpy);
38		/* Xlib uses unsigned long for sequence numbers.  XCB
39		 * uses 64-bit internally, but currently exposes an
40		 * unsigned int API.  If these differ, Xlib cannot track
41		 * the full 64-bit sequence number if 32-bit wrap
42		 * happens while Xlib does not own the socket.  A
43		 * complete fix would be to make XCB's public API use
44		 * 64-bit sequence numbers. */
45		assert(!(sizeof(unsigned long) > sizeof(unsigned int)
46		         && dpy->xcb->event_owner == XlibOwnsEventQueue
47		         && (sent - dpy->last_request_read >= (UINT64_C(1) << 32))));
48		dpy->xcb->last_flushed = dpy->request = sent;
49		dpy->bufmax = dpy->xcb->real_bufmax;
50	}
51}
52
53/* Call internal connection callbacks for any fds that are currently
54 * ready to read. This function will not block unless one of the
55 * callbacks blocks.
56 *
57 * This code borrowed from _XWaitForReadable. Inverse call tree:
58 * _XRead
59 *  _XWaitForWritable
60 *   _XFlush
61 *   _XSend
62 *  _XEventsQueued
63 *  _XReadEvents
64 *  _XRead[0-9]+
65 *   _XAllocIDs
66 *  _XReply
67 *  _XEatData
68 * _XReadPad
69 */
70static void check_internal_connections(Display *dpy)
71{
72	struct _XConnectionInfo *ilist;
73	fd_set r_mask;
74	struct timeval tv;
75	int result;
76	int highest_fd = -1;
77
78	if(dpy->flags & XlibDisplayProcConni || !dpy->im_fd_info)
79		return;
80
81	FD_ZERO(&r_mask);
82	for(ilist = dpy->im_fd_info; ilist; ilist = ilist->next)
83	{
84		assert(ilist->fd >= 0);
85		FD_SET(ilist->fd, &r_mask);
86		if(ilist->fd > highest_fd)
87			highest_fd = ilist->fd;
88	}
89	assert(highest_fd >= 0);
90
91	tv.tv_sec = 0;
92	tv.tv_usec = 0;
93	result = select(highest_fd + 1, &r_mask, NULL, NULL, &tv);
94
95	if(result == -1)
96	{
97		if(errno == EINTR)
98			return;
99		_XIOError(dpy);
100	}
101
102	for(ilist = dpy->im_fd_info; result && ilist; ilist = ilist->next)
103		if(FD_ISSET(ilist->fd, &r_mask))
104		{
105			_XProcessInternalConnection(dpy, ilist);
106			--result;
107		}
108}
109
110static void call_handlers(Display *dpy, xcb_generic_reply_t *buf)
111{
112	_XAsyncHandler *async, *next;
113	for(async = dpy->async_handlers; async; async = next)
114	{
115		next = async->next;
116		if(async->handler(dpy, (xReply *) buf, (char *) buf, sizeof(xReply) + (buf->length << 2), async->data))
117			return;
118	}
119	if(buf->response_type == 0) /* unhandled error */
120	    _XError(dpy, (xError *) buf);
121}
122
123static xcb_generic_event_t * wait_or_poll_for_event(Display *dpy, int wait)
124{
125	xcb_connection_t *c = dpy->xcb->connection;
126	xcb_generic_event_t *event;
127	if(wait)
128	{
129		if(dpy->xcb->event_waiter)
130		{
131			ConditionWait(dpy, dpy->xcb->event_notify);
132			event = xcb_poll_for_event(c);
133		}
134		else
135		{
136			dpy->xcb->event_waiter = 1;
137			UnlockDisplay(dpy);
138			event = xcb_wait_for_event(c);
139			LockDisplay(dpy);
140			dpy->xcb->event_waiter = 0;
141			ConditionBroadcast(dpy, dpy->xcb->event_notify);
142		}
143	}
144	else
145		event = xcb_poll_for_event(c);
146	return event;
147}
148
149/* Widen a 32-bit sequence number into a native-word-size (unsigned long)
150 * sequence number.  Treating the comparison as a 1 and shifting it avoids a
151 * conditional branch, and shifting by 16 twice avoids a compiler warning when
152 * sizeof(unsigned long) == 4. */
153static void widen(unsigned long *wide, unsigned int narrow)
154{
155	unsigned long new = (*wide & ~0xFFFFFFFFUL) | narrow;
156	*wide = new + ((unsigned long) (new < *wide) << 16 << 16);
157}
158
159static void process_responses(Display *dpy, int wait_for_first_event, xcb_generic_error_t **current_error, unsigned long current_request)
160{
161	void *reply;
162	xcb_generic_event_t *event = dpy->xcb->next_event;
163	xcb_generic_error_t *error;
164	xcb_connection_t *c = dpy->xcb->connection;
165	if(!event && dpy->xcb->event_owner == XlibOwnsEventQueue)
166		event = wait_or_poll_for_event(dpy, wait_for_first_event);
167
168	require_socket(dpy);
169
170	while(1)
171	{
172		PendingRequest *req = dpy->xcb->pending_requests;
173		unsigned long event_sequence = dpy->last_request_read;
174		if(event)
175			widen(&event_sequence, event->full_sequence);
176		assert(!(req && current_request && !XLIB_SEQUENCE_COMPARE(req->sequence, <=, current_request)));
177		if(event && (!req || XLIB_SEQUENCE_COMPARE(event_sequence, <=, req->sequence)))
178		{
179			dpy->last_request_read = event_sequence;
180			if(event->response_type != X_Error)
181			{
182				/* GenericEvents may be > 32 bytes. In this
183				 * case, the event struct is trailed by the
184				 * additional bytes. the xcb_generic_event_t
185				 * struct uses 4 bytes for internal numbering,
186				 * so we need to shift the trailing data to be
187				 * after the first 32 bytes.  */
188                                if (event->response_type == GenericEvent &&
189                                        ((xcb_ge_event_t*)event)->length)
190				{
191					memmove(&event->full_sequence,
192                                                &event[1],
193						((xcb_ge_event_t*)event)->length * 4);
194				}
195				_XEnq(dpy, (xEvent *) event);
196				wait_for_first_event = 0;
197			}
198			else if(current_error && event_sequence == current_request)
199			{
200				/* This can only occur when called from
201				 * _XReply, which doesn't need a new event. */
202				*current_error = (xcb_generic_error_t *) event;
203				event = NULL;
204				break;
205			}
206			else
207				_XError(dpy, (xError *) event);
208			free(event);
209			event = wait_or_poll_for_event(dpy, wait_for_first_event);
210		}
211		else if(req && req->sequence == current_request)
212		{
213			break;
214		}
215		else if(req && xcb_poll_for_reply(dpy->xcb->connection, req->sequence, &reply, &error))
216		{
217			uint64_t sequence = req->sequence;
218			if(!reply)
219			{
220				dpy->xcb->pending_requests = req->next;
221				if(!dpy->xcb->pending_requests)
222					dpy->xcb->pending_requests_tail = &dpy->xcb->pending_requests;
223				free(req);
224				reply = error;
225			}
226			if(reply)
227			{
228				dpy->last_request_read = sequence;
229				call_handlers(dpy, reply);
230				free(reply);
231			}
232		}
233		else
234			break;
235	}
236
237	dpy->xcb->next_event = event;
238
239	if(xcb_connection_has_error(c))
240		_XIOError(dpy);
241
242	assert(XLIB_SEQUENCE_COMPARE(dpy->last_request_read, <=, dpy->request));
243}
244
245int _XEventsQueued(Display *dpy, int mode)
246{
247	if(dpy->flags & XlibDisplayIOError)
248		return 0;
249	if(dpy->xcb->event_owner != XlibOwnsEventQueue)
250		return 0;
251
252	if(mode == QueuedAfterFlush)
253		_XSend(dpy, NULL, 0);
254	else
255		check_internal_connections(dpy);
256	process_responses(dpy, 0, NULL, 0);
257	return dpy->qlen;
258}
259
260/* _XReadEvents - Flush the output queue,
261 * then read as many events as possible (but at least 1) and enqueue them
262 */
263void _XReadEvents(Display *dpy)
264{
265	if(dpy->flags & XlibDisplayIOError)
266		return;
267	_XSend(dpy, NULL, 0);
268	if(dpy->xcb->event_owner != XlibOwnsEventQueue)
269		return;
270	check_internal_connections(dpy);
271	do {
272		process_responses(dpy, 1, NULL, 0);
273	} while (dpy->qlen == 0);
274}
275
276/*
277 * _XSend - Flush the buffer and send the client data. 32 bit word aligned
278 * transmission is used, if size is not 0 mod 4, extra bytes are transmitted.
279 *
280 * Note that the connection must not be read from once the data currently
281 * in the buffer has been written.
282 */
283void _XSend(Display *dpy, const char *data, long size)
284{
285	static const xReq dummy_request;
286	static char const pad[3];
287	struct iovec vec[3];
288	uint64_t requests;
289	_XExtension *ext;
290	xcb_connection_t *c = dpy->xcb->connection;
291	if(dpy->flags & XlibDisplayIOError)
292		return;
293
294	if(dpy->bufptr == dpy->buffer && !size)
295		return;
296
297	/* iff we asked XCB to set aside errors, we must pick those up
298	 * eventually. iff there are async handlers, we may have just
299	 * issued requests that will generate replies. in either case,
300	 * we need to remember to check later. */
301	if(dpy->xcb->event_owner != XlibOwnsEventQueue || dpy->async_handlers)
302	{
303		uint64_t sequence;
304		for(sequence = dpy->xcb->last_flushed; sequence < dpy->request; ++sequence)
305		{
306			PendingRequest *req = malloc(sizeof(PendingRequest));
307			assert(req);
308			req->next = NULL;
309			req->sequence = sequence;
310			*dpy->xcb->pending_requests_tail = req;
311			dpy->xcb->pending_requests_tail = &req->next;
312		}
313	}
314	requests = dpy->request - dpy->xcb->last_flushed;
315	dpy->xcb->last_flushed = dpy->request;
316
317	vec[0].iov_base = dpy->buffer;
318	vec[0].iov_len = dpy->bufptr - dpy->buffer;
319	vec[1].iov_base = (caddr_t) data;
320	vec[1].iov_len = size;
321	vec[2].iov_base = (caddr_t) pad;
322	vec[2].iov_len = -size & 3;
323
324	for(ext = dpy->flushes; ext; ext = ext->next_flush)
325	{
326		int i;
327		for(i = 0; i < 3; ++i)
328			if(vec[i].iov_len)
329				ext->before_flush(dpy, &ext->codes, vec[i].iov_base, vec[i].iov_len);
330	}
331
332	if(xcb_writev(c, vec, 3, requests) < 0)
333		_XIOError(dpy);
334	dpy->bufptr = dpy->buffer;
335	dpy->last_req = (char *) &dummy_request;
336
337	check_internal_connections(dpy);
338
339	_XSetSeqSyncFunction(dpy);
340}
341
342/*
343 * _XFlush - Flush the X request buffer.  If the buffer is empty, no
344 * action is taken.
345 */
346void _XFlush(Display *dpy)
347{
348	require_socket(dpy);
349	_XSend(dpy, NULL, 0);
350
351	_XEventsQueued(dpy, QueuedAfterReading);
352}
353
354static const XID inval_id = ~0UL;
355
356int _XIDHandler(Display *dpy)
357{
358	XID next;
359
360	if (dpy->xcb->next_xid != inval_id)
361	    return 0;
362
363	next = xcb_generate_id(dpy->xcb->connection);
364	LockDisplay(dpy);
365	dpy->xcb->next_xid = next;
366#ifdef XTHREADS
367	if (dpy->lock)
368		(*dpy->lock->user_unlock_display)(dpy);
369#endif
370	UnlockDisplay(dpy);
371	return 0;
372}
373
374/* _XAllocID - resource ID allocation routine. */
375XID _XAllocID(Display *dpy)
376{
377	XID ret = dpy->xcb->next_xid;
378	assert (ret != inval_id);
379#ifdef XTHREADS
380	if (dpy->lock)
381		(*dpy->lock->user_lock_display)(dpy);
382#endif
383	dpy->xcb->next_xid = inval_id;
384	_XSetPrivSyncFunction(dpy);
385	return ret;
386}
387
388/* _XAllocIDs - multiple resource ID allocation routine. */
389void _XAllocIDs(Display *dpy, XID *ids, int count)
390{
391	int i;
392#ifdef XTHREADS
393	if (dpy->lock)
394		(*dpy->lock->user_lock_display)(dpy);
395	UnlockDisplay(dpy);
396#endif
397	for (i = 0; i < count; i++)
398		ids[i] = xcb_generate_id(dpy->xcb->connection);
399#ifdef XTHREADS
400	LockDisplay(dpy);
401	if (dpy->lock)
402		(*dpy->lock->user_unlock_display)(dpy);
403#endif
404}
405
406static void _XFreeReplyData(Display *dpy, Bool force)
407{
408	if(!force && dpy->xcb->reply_consumed < dpy->xcb->reply_length)
409		return;
410	free(dpy->xcb->reply_data);
411	dpy->xcb->reply_data = NULL;
412}
413
414static PendingRequest * insert_pending_request(Display *dpy)
415{
416	PendingRequest **cur = &dpy->xcb->pending_requests;
417	while(*cur && XLIB_SEQUENCE_COMPARE((*cur)->sequence, <, dpy->request))
418		cur = &((*cur)->next);
419	if(!*cur || (*cur)->sequence != dpy->request)
420	{
421		PendingRequest *node = malloc(sizeof(PendingRequest));
422		assert(node);
423		node->next = *cur;
424		node->sequence = dpy->request;
425		if(cur == dpy->xcb->pending_requests_tail)
426			dpy->xcb->pending_requests_tail = &(node->next);
427		*cur = node;
428	}
429	return *cur;
430}
431
432/*
433 * _XReply - Wait for a reply packet and copy its contents into the
434 * specified rep.
435 * extra: number of 32-bit words expected after the reply
436 * discard: should I discard data following "extra" words?
437 */
438Status _XReply(Display *dpy, xReply *rep, int extra, Bool discard)
439{
440	xcb_generic_error_t *error;
441	xcb_connection_t *c = dpy->xcb->connection;
442	char *reply;
443	PendingRequest *current;
444
445	assert(!dpy->xcb->reply_data);
446
447	if(dpy->flags & XlibDisplayIOError)
448		return 0;
449
450	_XSend(dpy, NULL, 0);
451	current = insert_pending_request(dpy);
452	/* FIXME: drop the Display lock while waiting?
453	 * Complicates process_responses. */
454	reply = xcb_wait_for_reply(c, current->sequence, &error);
455
456	check_internal_connections(dpy);
457	process_responses(dpy, 0, &error, current->sequence);
458
459	if(error)
460	{
461		_XExtension *ext;
462		xError *err = (xError *) error;
463		int ret_code;
464
465		dpy->last_request_read = error->full_sequence;
466
467		/* Xlib is evil and assumes that even errors will be
468		 * copied into rep. */
469		memcpy(rep, error, 32);
470
471		/* do not die on "no such font", "can't allocate",
472		   "can't grab" failures */
473		switch(err->errorCode)
474		{
475			case BadName:
476				switch(err->majorCode)
477				{
478					case X_LookupColor:
479					case X_AllocNamedColor:
480						free(error);
481						return 0;
482				}
483				break;
484			case BadFont:
485				if(err->majorCode == X_QueryFont) {
486					free(error);
487					return 0;
488				}
489				break;
490			case BadAlloc:
491			case BadAccess:
492				free(error);
493				return 0;
494		}
495
496		/*
497		 * we better see if there is an extension who may
498		 * want to suppress the error.
499		 */
500		for(ext = dpy->ext_procs; ext; ext = ext->next)
501			if(ext->error && ext->error(dpy, err, &ext->codes, &ret_code)) {
502				free(error);
503				return ret_code;
504			}
505
506		_XError(dpy, err);
507		free(error);
508		return 0;
509	}
510
511	/* it's not an error, but we don't have a reply, so it's an I/O
512	 * error. */
513	if(!reply)
514	{
515		_XIOError(dpy);
516		return 0;
517	}
518
519	dpy->last_request_read = current->sequence;
520
521	/* there's no error and we have a reply. */
522	dpy->xcb->reply_data = reply;
523	dpy->xcb->reply_consumed = sizeof(xReply) + (extra * 4);
524	dpy->xcb->reply_length = sizeof(xReply);
525	if(dpy->xcb->reply_data[0] == 1)
526		dpy->xcb->reply_length += (((xcb_generic_reply_t *) dpy->xcb->reply_data)->length * 4);
527
528	/* error: Xlib asks too much. give them what we can anyway. */
529	if(dpy->xcb->reply_length < dpy->xcb->reply_consumed)
530		dpy->xcb->reply_consumed = dpy->xcb->reply_length;
531
532	memcpy(rep, dpy->xcb->reply_data, dpy->xcb->reply_consumed);
533	_XFreeReplyData(dpy, discard);
534	return 1;
535}
536
537int _XRead(Display *dpy, char *data, long size)
538{
539	assert(size >= 0);
540	if(size == 0)
541		return 0;
542	assert(dpy->xcb->reply_data != NULL);
543	assert(dpy->xcb->reply_consumed + size <= dpy->xcb->reply_length);
544	memcpy(data, dpy->xcb->reply_data + dpy->xcb->reply_consumed, size);
545	dpy->xcb->reply_consumed += size;
546	_XFreeReplyData(dpy, False);
547	return 0;
548}
549
550/*
551 * _XReadPad - Read bytes from the socket taking into account incomplete
552 * reads.  If the number of bytes is not 0 mod 4, read additional pad
553 * bytes.
554 */
555void _XReadPad(Display *dpy, char *data, long size)
556{
557	_XRead(dpy, data, size);
558	dpy->xcb->reply_consumed += -size & 3;
559	_XFreeReplyData(dpy, False);
560}
561
562/* Read and discard "n" 8-bit bytes of data */
563void _XEatData(Display *dpy, unsigned long n)
564{
565	dpy->xcb->reply_consumed += n;
566	_XFreeReplyData(dpy, False);
567}
568