xcb_in.c revision 602e473d
1/* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
24 */
25
26/* Stuff that reads stuff from the server. */
27
28#include <assert.h>
29#include <string.h>
30#include <stdlib.h>
31#include <unistd.h>
32#include <stdio.h>
33#include <errno.h>
34
35#include "xcb.h"
36#include "xcbext.h"
37#include "xcbint.h"
38#if USE_POLL
39#include <poll.h>
40#else
41#include <sys/select.h>
42#endif
43
44#define XCB_ERROR 0
45#define XCB_REPLY 1
46#define XCB_XGE_EVENT 35
47
48struct event_list {
49    xcb_generic_event_t *event;
50    struct event_list *next;
51};
52
53struct reply_list {
54    void *reply;
55    struct reply_list *next;
56};
57
58typedef struct pending_reply {
59    uint64_t first_request;
60    uint64_t last_request;
61    enum workarounds workaround;
62    int flags;
63    struct pending_reply *next;
64} pending_reply;
65
66typedef struct reader_list {
67    unsigned int request;
68    pthread_cond_t *data;
69    struct reader_list *next;
70} reader_list;
71
72static void wake_up_next_reader(xcb_connection_t *c)
73{
74    int pthreadret;
75    if(c->in.readers)
76        pthreadret = pthread_cond_signal(c->in.readers->data);
77    else
78        pthreadret = pthread_cond_signal(&c->in.event_cond);
79    assert(pthreadret == 0);
80}
81
82static int read_packet(xcb_connection_t *c)
83{
84    xcb_generic_reply_t genrep;
85    int length = 32;
86    int eventlength = 0; /* length after first 32 bytes for GenericEvents */
87    void *buf;
88    pending_reply *pend = 0;
89    struct event_list *event;
90
91    /* Wait for there to be enough data for us to read a whole packet */
92    if(c->in.queue_len < length)
93        return 0;
94
95    /* Get the response type, length, and sequence number. */
96    memcpy(&genrep, c->in.queue, sizeof(genrep));
97
98    /* Compute 32-bit sequence number of this packet. */
99    if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
100    {
101        uint64_t lastread = c->in.request_read;
102        c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
103        if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
104            c->in.request_read += 0x10000;
105        if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
106            c->in.request_expected = c->in.request_read;
107
108        if(c->in.request_read != lastread)
109        {
110            if(c->in.current_reply)
111            {
112                _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
113                c->in.current_reply = 0;
114                c->in.current_reply_tail = &c->in.current_reply;
115            }
116            c->in.request_completed = c->in.request_read - 1;
117        }
118
119        while(c->in.pending_replies &&
120              c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
121	      XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
122        {
123            pending_reply *oldpend = c->in.pending_replies;
124            c->in.pending_replies = oldpend->next;
125            if(!oldpend->next)
126                c->in.pending_replies_tail = &c->in.pending_replies;
127            free(oldpend);
128        }
129
130        if(genrep.response_type == XCB_ERROR)
131            c->in.request_completed = c->in.request_read;
132    }
133
134    if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
135    {
136        pend = c->in.pending_replies;
137        if(pend &&
138           !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
139             (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
140              XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
141            pend = 0;
142    }
143
144    /* For reply packets, check that the entire packet is available. */
145    if(genrep.response_type == XCB_REPLY)
146    {
147        if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
148        {
149            uint32_t *p = (uint32_t *) c->in.queue;
150            genrep.length = p[2] * p[3] * 2;
151        }
152        length += genrep.length * 4;
153    }
154
155    /* XGE events may have sizes > 32 */
156    if (genrep.response_type == XCB_XGE_EVENT)
157    {
158        eventlength = ((xcb_ge_event_t*)&genrep)->length * 4;
159    }
160
161    buf = malloc(length + eventlength +
162            (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t)));
163    if(!buf)
164    {
165        _xcb_conn_shutdown(c);
166        return 0;
167    }
168
169    if(_xcb_in_read_block(c, buf, length) <= 0)
170    {
171        free(buf);
172        return 0;
173    }
174
175    /* pull in XGE event data if available, append after event struct */
176    if (eventlength)
177    {
178        if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
179        {
180            free(buf);
181            return 0;
182        }
183    }
184
185    if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
186    {
187        free(buf);
188        return 1;
189    }
190
191    if(genrep.response_type != XCB_REPLY)
192        ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
193
194    /* reply, or checked error */
195    if( genrep.response_type == XCB_REPLY ||
196       (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
197    {
198        reader_list *reader;
199        struct reply_list *cur = malloc(sizeof(struct reply_list));
200        if(!cur)
201        {
202            _xcb_conn_shutdown(c);
203            free(buf);
204            return 0;
205        }
206        cur->reply = buf;
207        cur->next = 0;
208        *c->in.current_reply_tail = cur;
209        c->in.current_reply_tail = &cur->next;
210        for(reader = c->in.readers;
211	    reader &&
212	    XCB_SEQUENCE_COMPARE_32(reader->request, <=, c->in.request_read);
213	    reader = reader->next)
214	{
215            if(XCB_SEQUENCE_COMPARE_32(reader->request, ==, c->in.request_read))
216            {
217                pthread_cond_signal(reader->data);
218                break;
219            }
220	}
221        return 1;
222    }
223
224    /* event, or unchecked error */
225    event = malloc(sizeof(struct event_list));
226    if(!event)
227    {
228        _xcb_conn_shutdown(c);
229        free(buf);
230        return 0;
231    }
232    event->event = buf;
233    event->next = 0;
234    *c->in.events_tail = event;
235    c->in.events_tail = &event->next;
236    pthread_cond_signal(&c->in.event_cond);
237    return 1; /* I have something for you... */
238}
239
240static xcb_generic_event_t *get_event(xcb_connection_t *c)
241{
242    struct event_list *cur = c->in.events;
243    xcb_generic_event_t *ret;
244    if(!c->in.events)
245        return 0;
246    ret = cur->event;
247    c->in.events = cur->next;
248    if(!cur->next)
249        c->in.events_tail = &c->in.events;
250    free(cur);
251    return ret;
252}
253
254static void free_reply_list(struct reply_list *head)
255{
256    while(head)
257    {
258        struct reply_list *cur = head;
259        head = cur->next;
260        free(cur->reply);
261        free(cur);
262    }
263}
264
265static int read_block(const int fd, void *buf, const ssize_t len)
266{
267    int done = 0;
268    while(done < len)
269    {
270        int ret = read(fd, ((char *) buf) + done, len - done);
271        if(ret > 0)
272            done += ret;
273        if(ret < 0 && errno == EAGAIN)
274        {
275#if USE_POLL
276            struct pollfd pfd;
277            pfd.fd = fd;
278            pfd.events = POLLIN;
279            pfd.revents = 0;
280            do {
281                ret = poll(&pfd, 1, -1);
282            } while (ret == -1 && errno == EINTR);
283#else
284            fd_set fds;
285            FD_ZERO(&fds);
286            FD_SET(fd, &fds);
287	    do {
288		ret = select(fd + 1, &fds, 0, 0, 0);
289	    } while (ret == -1 && errno == EINTR);
290#endif
291        }
292        if(ret <= 0)
293            return ret;
294    }
295    return len;
296}
297
298static int poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
299{
300    struct reply_list *head;
301
302    /* If an error occurred when issuing the request, fail immediately. */
303    if(!request)
304        head = 0;
305    /* We've read requests past the one we want, so if it has replies we have
306     * them all and they're in the replies map. */
307    else if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
308    {
309        head = _xcb_map_remove(c->in.replies, request);
310        if(head && head->next)
311            _xcb_map_put(c->in.replies, request, head->next);
312    }
313    /* We're currently processing the responses to the request we want, and we
314     * have a reply ready to return. So just return it without blocking. */
315    else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
316    {
317        head = c->in.current_reply;
318        c->in.current_reply = head->next;
319        if(!head->next)
320            c->in.current_reply_tail = &c->in.current_reply;
321    }
322    /* We know this request can't have any more replies, and we've already
323     * established it doesn't have a reply now. Don't bother blocking. */
324    else if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_completed))
325        head = 0;
326    /* We may have more replies on the way for this request: block until we're
327     * sure. */
328    else
329        return 0;
330
331    if(error)
332        *error = 0;
333    *reply = 0;
334
335    if(head)
336    {
337        if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
338        {
339            if(error)
340                *error = head->reply;
341            else
342                free(head->reply);
343        }
344        else
345            *reply = head->reply;
346
347        free(head);
348    }
349
350    return 1;
351}
352
353/* Public interface */
354
355void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
356{
357    uint64_t widened_request;
358    void *ret = 0;
359    if(e)
360        *e = 0;
361    if(c->has_error)
362        return 0;
363
364    pthread_mutex_lock(&c->iolock);
365
366    widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
367    if(widened_request > c->out.request)
368        widened_request -= UINT64_C(1) << 32;
369
370    /* If this request has not been written yet, write it. */
371    if(c->out.return_socket || _xcb_out_flush_to(c, widened_request))
372    {
373        pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
374        reader_list reader;
375        reader_list **prev_reader;
376
377        for(prev_reader = &c->in.readers;
378	    *prev_reader &&
379	    XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
380	    prev_reader = &(*prev_reader)->next)
381	{
382            /* empty */;
383	}
384        reader.request = request;
385        reader.data = &cond;
386        reader.next = *prev_reader;
387        *prev_reader = &reader;
388
389        while(!poll_for_reply(c, request, &ret, e))
390            if(!_xcb_conn_wait(c, &cond, 0, 0))
391                break;
392
393        for(prev_reader = &c->in.readers;
394	    *prev_reader &&
395	    XCB_SEQUENCE_COMPARE_32((*prev_reader)->request, <=, request);
396	    prev_reader = &(*prev_reader)->next)
397	{
398            if(*prev_reader == &reader)
399            {
400                *prev_reader = (*prev_reader)->next;
401                break;
402            }
403	}
404        pthread_cond_destroy(&cond);
405    }
406
407    wake_up_next_reader(c);
408    pthread_mutex_unlock(&c->iolock);
409    return ret;
410}
411
412static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
413{
414    pending_reply *pend;
415    pend = malloc(sizeof(*pend));
416    if(!pend)
417    {
418        _xcb_conn_shutdown(c);
419        return;
420    }
421
422    pend->first_request = seq;
423    pend->last_request = seq;
424    pend->workaround = 0;
425    pend->flags = XCB_REQUEST_DISCARD_REPLY;
426    pend->next = *prev_next;
427    *prev_next = pend;
428
429    if(!pend->next)
430        c->in.pending_replies_tail = &pend->next;
431}
432
433static void discard_reply(xcb_connection_t *c, unsigned int request)
434{
435    pending_reply *pend = 0;
436    pending_reply **prev_pend;
437    uint64_t widened_request;
438
439    /* We've read requests past the one we want, so if it has replies we have
440     * them all and they're in the replies map. */
441    if(XCB_SEQUENCE_COMPARE_32(request, <, c->in.request_read))
442    {
443        struct reply_list *head;
444        head = _xcb_map_remove(c->in.replies, request);
445        while (head)
446        {
447            struct reply_list *next = head->next;
448            free(head->reply);
449            free(head);
450            head = next;
451        }
452        return;
453    }
454
455    /* We're currently processing the responses to the request we want, and we
456     * have a reply ready to return. Free it, and mark the pend to free any further
457     * replies. */
458    if(XCB_SEQUENCE_COMPARE_32(request, ==, c->in.request_read) && c->in.current_reply)
459    {
460        struct reply_list *head;
461        head = c->in.current_reply;
462        c->in.current_reply = NULL;
463        c->in.current_reply_tail = &c->in.current_reply;
464        while (head)
465        {
466            struct reply_list *next = head->next;
467            free(head->reply);
468            free(head);
469            head = next;
470        }
471
472        pend = c->in.pending_replies;
473        if(pend &&
474            !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
475             (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
476              XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
477            pend = 0;
478        if(pend)
479            pend->flags |= XCB_REQUEST_DISCARD_REPLY;
480        else
481            insert_pending_discard(c, &c->in.pending_replies, c->in.request_read);
482
483        return;
484    }
485
486    /* Walk the list of pending requests. Mark the first match for deletion. */
487    for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
488    {
489        if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, >, request))
490            break;
491
492        if(XCB_SEQUENCE_COMPARE_32((*prev_pend)->first_request, ==, request))
493        {
494            /* Pending reply found. Mark for discard: */
495            (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
496            return;
497        }
498    }
499
500    /* Pending reply not found (likely due to _unchecked request). Create one: */
501    widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
502    if(widened_request > c->out.request)
503        widened_request -= UINT64_C(1) << 32;
504
505    insert_pending_discard(c, prev_pend, widened_request);
506}
507
508void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
509{
510    if(c->has_error)
511        return;
512
513    /* If an error occurred when issuing the request, fail immediately. */
514    if(!sequence)
515        return;
516
517    pthread_mutex_lock(&c->iolock);
518    discard_reply(c, sequence);
519    pthread_mutex_unlock(&c->iolock);
520}
521
522int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
523{
524    int ret;
525    if(c->has_error)
526    {
527        *reply = 0;
528        if(error)
529            *error = 0;
530        return 1; /* would not block */
531    }
532    assert(reply != 0);
533    pthread_mutex_lock(&c->iolock);
534    ret = poll_for_reply(c, request, reply, error);
535    pthread_mutex_unlock(&c->iolock);
536    return ret;
537}
538
539xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
540{
541    xcb_generic_event_t *ret;
542    if(c->has_error)
543        return 0;
544    pthread_mutex_lock(&c->iolock);
545    /* get_event returns 0 on empty list. */
546    while(!(ret = get_event(c)))
547        if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
548            break;
549
550    wake_up_next_reader(c);
551    pthread_mutex_unlock(&c->iolock);
552    return ret;
553}
554
555xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
556{
557    xcb_generic_event_t *ret = 0;
558    if(!c->has_error)
559    {
560        pthread_mutex_lock(&c->iolock);
561        /* FIXME: follow X meets Z architecture changes. */
562        ret = get_event(c);
563        if(!ret && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
564            ret = get_event(c);
565        pthread_mutex_unlock(&c->iolock);
566    }
567    return ret;
568}
569
570xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
571{
572    /* FIXME: this could hold the lock to avoid syncing unnecessarily, but
573     * that would require factoring the locking out of xcb_get_input_focus,
574     * xcb_get_input_focus_reply, and xcb_wait_for_reply. */
575    xcb_generic_error_t *ret;
576    void *reply;
577    if(c->has_error)
578        return 0;
579    if(XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_expected)
580       && XCB_SEQUENCE_COMPARE_32(cookie.sequence,>,c->in.request_completed))
581    {
582        free(xcb_get_input_focus_reply(c, xcb_get_input_focus(c), &ret));
583        assert(!ret);
584    }
585    reply = xcb_wait_for_reply(c, cookie.sequence, &ret);
586    assert(!reply);
587    return ret;
588}
589
590/* Private interface */
591
592int _xcb_in_init(_xcb_in *in)
593{
594    if(pthread_cond_init(&in->event_cond, 0))
595        return 0;
596    in->reading = 0;
597
598    in->queue_len = 0;
599
600    in->request_read = 0;
601    in->request_completed = 0;
602
603    in->replies = _xcb_map_new();
604    if(!in->replies)
605        return 0;
606
607    in->current_reply_tail = &in->current_reply;
608    in->events_tail = &in->events;
609    in->pending_replies_tail = &in->pending_replies;
610
611    return 1;
612}
613
614void _xcb_in_destroy(_xcb_in *in)
615{
616    pthread_cond_destroy(&in->event_cond);
617    free_reply_list(in->current_reply);
618    _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
619    while(in->events)
620    {
621        struct event_list *e = in->events;
622        in->events = e->next;
623        free(e->event);
624        free(e);
625    }
626    while(in->pending_replies)
627    {
628        pending_reply *pend = in->pending_replies;
629        in->pending_replies = pend->next;
630        free(pend);
631    }
632}
633
634int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
635{
636    pending_reply *pend = malloc(sizeof(pending_reply));
637    assert(workaround != WORKAROUND_NONE || flags != 0);
638    if(!pend)
639    {
640        _xcb_conn_shutdown(c);
641        return 0;
642    }
643    pend->first_request = pend->last_request = request;
644    pend->workaround = workaround;
645    pend->flags = flags;
646    pend->next = 0;
647    *c->in.pending_replies_tail = pend;
648    c->in.pending_replies_tail = &pend->next;
649    return 1;
650}
651
652void _xcb_in_replies_done(xcb_connection_t *c)
653{
654    struct pending_reply *pend;
655    if (c->in.pending_replies_tail != &c->in.pending_replies)
656    {
657        pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
658        if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
659        {
660            pend->last_request = c->out.request;
661            pend->workaround = WORKAROUND_NONE;
662        }
663    }
664}
665
666int _xcb_in_read(xcb_connection_t *c)
667{
668    int n = read(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len);
669    if(n > 0)
670        c->in.queue_len += n;
671    while(read_packet(c))
672        /* empty */;
673    if((n > 0) || (n < 0 && errno == EAGAIN))
674        return 1;
675    _xcb_conn_shutdown(c);
676    return 0;
677}
678
679int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
680{
681    int done = c->in.queue_len;
682    if(len < done)
683        done = len;
684
685    memcpy(buf, c->in.queue, done);
686    c->in.queue_len -= done;
687    memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
688
689    if(len > done)
690    {
691        int ret = read_block(c->fd, (char *) buf + done, len - done);
692        if(ret <= 0)
693        {
694            _xcb_conn_shutdown(c);
695            return ret;
696        }
697    }
698
699    return len;
700}
701