1/* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
24 */
25
26/* Stuff that reads stuff from the server. */
27
28#ifdef HAVE_CONFIG_H
29#include "config.h"
30#endif
31
32#include <assert.h>
33#include <string.h>
34#include <stdlib.h>
35#include <stdio.h>
36#include <errno.h>
37
38#if USE_POLL
39#include <poll.h>
40#endif
41#ifndef _WIN32
42#include <unistd.h>
43#include <sys/select.h>
44#include <sys/socket.h>
45#endif
46
47#ifdef _WIN32
48#include "xcb_windefs.h"
49#endif /* _WIN32 */
50
51#include "xcb.h"
52#include "xcbext.h"
53#include "xcbint.h"
54
55#define XCB_ERROR 0
56#define XCB_REPLY 1
57#define XCB_XGE_EVENT 35
58
59struct event_list {
60    xcb_generic_event_t *event;
61    struct event_list *next;
62};
63
64struct xcb_special_event {
65
66    struct xcb_special_event *next;
67
68    /* Match XGE events for the specific extension and event ID (the
69     * first 32 bit word after evtype)
70     */
71    uint8_t     extension;
72    uint32_t    eid;
73    uint32_t    *stamp;
74
75    struct event_list   *events;
76    struct event_list   **events_tail;
77
78    pthread_cond_t special_event_cond;
79};
80
81struct reply_list {
82    void *reply;
83    struct reply_list *next;
84};
85
86typedef struct pending_reply {
87    uint64_t first_request;
88    uint64_t last_request;
89    enum workarounds workaround;
90    int flags;
91    struct pending_reply *next;
92} pending_reply;
93
94typedef struct reader_list {
95    uint64_t request;
96    pthread_cond_t *data;
97    struct reader_list *next;
98} reader_list;
99
100typedef struct special_list {
101    xcb_special_event_t *se;
102    struct special_list *next;
103} special_list;
104
105static void remove_finished_readers(reader_list **prev_reader, uint64_t completed)
106{
107    while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, completed))
108    {
109        /* If you don't have what you're looking for now, you never
110         * will. Wake up and leave me alone. */
111        pthread_cond_signal((*prev_reader)->data);
112        *prev_reader = (*prev_reader)->next;
113    }
114}
115
116#if HAVE_SENDMSG
117static int read_fds(xcb_connection_t *c, int *fds, int nfd)
118{
119    int *ifds = &c->in.in_fd.fd[c->in.in_fd.ifd];
120    int infd = c->in.in_fd.nfd - c->in.in_fd.ifd;
121
122    if (nfd > infd)
123        return 0;
124    memcpy(fds, ifds, nfd * sizeof (int));
125    c->in.in_fd.ifd += nfd;
126    return 1;
127}
128#endif
129
130typedef struct xcb_ge_special_event_t {
131    uint8_t  response_type; /**<  */
132    uint8_t  extension; /**<  */
133    uint16_t sequence; /**<  */
134    uint32_t length; /**<  */
135    uint16_t evtype; /**<  */
136    uint8_t  pad0[2]; /**< */
137    uint32_t eid; /**< */
138    uint8_t  pad1[16]; /**<  */
139} xcb_ge_special_event_t;
140
141static int event_special(xcb_connection_t *c,
142                         struct event_list *event)
143{
144    struct xcb_special_event *special_event;
145    struct xcb_ge_special_event_t *ges = (void *) event->event;
146
147    /* Special events are always XGE events */
148    if ((ges->response_type & 0x7f) != XCB_XGE_EVENT)
149        return 0;
150
151    for (special_event = c->in.special_events;
152         special_event;
153         special_event = special_event->next)
154    {
155        if (ges->extension == special_event->extension &&
156            ges->eid == special_event->eid)
157        {
158            *special_event->events_tail = event;
159            special_event->events_tail = &event->next;
160            if (special_event->stamp)
161                ++(*special_event->stamp);
162            pthread_cond_signal(&special_event->special_event_cond);
163            return 1;
164        }
165    }
166
167    return 0;
168}
169
170static int read_packet(xcb_connection_t *c)
171{
172    xcb_generic_reply_t genrep;
173    uint64_t length = 32;
174    uint64_t eventlength = 0; /* length after first 32 bytes for GenericEvents */
175    int nfd = 0;         /* Number of file descriptors attached to the reply */
176    uint64_t bufsize;
177    void *buf;
178    pending_reply *pend = 0;
179    struct event_list *event;
180
181    /* Wait for there to be enough data for us to read a whole packet */
182    if(c->in.queue_len < length)
183        return 0;
184
185    /* Get the response type, length, and sequence number. */
186    memcpy(&genrep, c->in.queue, sizeof(genrep));
187
188    /* Compute 32-bit sequence number of this packet. */
189    if((genrep.response_type & 0x7f) != XCB_KEYMAP_NOTIFY)
190    {
191        uint64_t lastread = c->in.request_read;
192        c->in.request_read = (lastread & UINT64_C(0xffffffffffff0000)) | genrep.sequence;
193        if(XCB_SEQUENCE_COMPARE(c->in.request_read, <, lastread))
194            c->in.request_read += 0x10000;
195        if(XCB_SEQUENCE_COMPARE(c->in.request_read, >, c->in.request_expected))
196            c->in.request_expected = c->in.request_read;
197
198        if(c->in.request_read != lastread)
199        {
200            if(c->in.current_reply)
201            {
202                _xcb_map_put(c->in.replies, lastread, c->in.current_reply);
203                c->in.current_reply = 0;
204                c->in.current_reply_tail = &c->in.current_reply;
205            }
206            c->in.request_completed = c->in.request_read - 1;
207        }
208
209        while(c->in.pending_replies &&
210              c->in.pending_replies->workaround != WORKAROUND_EXTERNAL_SOCKET_OWNER &&
211              XCB_SEQUENCE_COMPARE (c->in.pending_replies->last_request, <=, c->in.request_completed))
212        {
213            pending_reply *oldpend = c->in.pending_replies;
214            c->in.pending_replies = oldpend->next;
215            if(!oldpend->next)
216                c->in.pending_replies_tail = &c->in.pending_replies;
217            free(oldpend);
218        }
219
220        if(genrep.response_type == XCB_ERROR)
221            c->in.request_completed = c->in.request_read;
222
223        remove_finished_readers(&c->in.readers, c->in.request_completed);
224    }
225
226    if(genrep.response_type == XCB_ERROR || genrep.response_type == XCB_REPLY)
227    {
228        pend = c->in.pending_replies;
229        if(pend &&
230           !(XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->in.request_read) &&
231             (pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER ||
232              XCB_SEQUENCE_COMPARE(c->in.request_read, <=, pend->last_request))))
233            pend = 0;
234    }
235
236    /* For reply packets, check that the entire packet is available. */
237    if(genrep.response_type == XCB_REPLY)
238    {
239        if(pend && pend->workaround == WORKAROUND_GLX_GET_FB_CONFIGS_BUG)
240        {
241            uint32_t *p = (uint32_t *) c->in.queue;
242            uint64_t new_length = ((uint64_t)p[2]) * ((uint64_t)p[3]);
243            if(new_length >= (UINT32_MAX / UINT32_C(16)))
244            {
245                _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
246                return 0;
247            }
248            genrep.length = (uint32_t)(new_length * UINT64_C(2));
249        }
250        length += genrep.length * UINT64_C(4);
251
252        /* XXX a bit of a hack -- we "know" that all FD replys place
253         * the number of fds in the pad0 byte */
254        if (pend && pend->flags & XCB_REQUEST_REPLY_FDS)
255            nfd = genrep.pad0;
256    }
257
258    /* XGE events may have sizes > 32 */
259    if ((genrep.response_type & 0x7f) == XCB_XGE_EVENT)
260        eventlength = genrep.length * UINT64_C(4);
261
262    bufsize = length + eventlength + nfd * sizeof(int)  +
263        (genrep.response_type == XCB_REPLY ? 0 : sizeof(uint32_t));
264    if (bufsize < INT32_MAX)
265        buf = malloc((size_t) bufsize);
266    else
267        buf = NULL;
268    if(!buf)
269    {
270        _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
271        return 0;
272    }
273
274    if(_xcb_in_read_block(c, buf, length) <= 0)
275    {
276        free(buf);
277        return 0;
278    }
279
280    /* pull in XGE event data if available, append after event struct */
281    if (eventlength)
282    {
283        if(_xcb_in_read_block(c, &((xcb_generic_event_t*)buf)[1], eventlength) <= 0)
284        {
285            free(buf);
286            return 0;
287        }
288    }
289
290#if HAVE_SENDMSG
291    if (nfd)
292    {
293        if (!read_fds(c, (int *) &((char *) buf)[length], nfd))
294        {
295            free(buf);
296            return 0;
297        }
298    }
299#endif
300
301    if(pend && (pend->flags & XCB_REQUEST_DISCARD_REPLY))
302    {
303        free(buf);
304        return 1;
305    }
306
307    if(genrep.response_type != XCB_REPLY)
308        ((xcb_generic_event_t *) buf)->full_sequence = c->in.request_read;
309
310    /* reply, or checked error */
311    if( genrep.response_type == XCB_REPLY ||
312       (genrep.response_type == XCB_ERROR && pend && (pend->flags & XCB_REQUEST_CHECKED)))
313    {
314        struct reply_list *cur = malloc(sizeof(struct reply_list));
315        if(!cur)
316        {
317            _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
318            free(buf);
319            return 0;
320        }
321        cur->reply = buf;
322        cur->next = 0;
323        *c->in.current_reply_tail = cur;
324        c->in.current_reply_tail = &cur->next;
325        if(c->in.readers && c->in.readers->request == c->in.request_read)
326            pthread_cond_signal(c->in.readers->data);
327        return 1;
328    }
329
330    /* event, or unchecked error */
331    event = malloc(sizeof(struct event_list));
332    if(!event)
333    {
334        _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
335        free(buf);
336        return 0;
337    }
338    event->event = buf;
339    event->next = 0;
340
341    if (!event_special(c, event)) {
342        *c->in.events_tail = event;
343        c->in.events_tail = &event->next;
344        pthread_cond_signal(&c->in.event_cond);
345    }
346    return 1; /* I have something for you... */
347}
348
349static xcb_generic_event_t *get_event(xcb_connection_t *c)
350{
351    struct event_list *cur = c->in.events;
352    xcb_generic_event_t *ret;
353    if(!c->in.events)
354        return 0;
355    ret = cur->event;
356    c->in.events = cur->next;
357    if(!cur->next)
358        c->in.events_tail = &c->in.events;
359    free(cur);
360    return ret;
361}
362
363static void free_reply_list(struct reply_list *head)
364{
365    while(head)
366    {
367        struct reply_list *cur = head;
368        head = cur->next;
369        free(cur->reply);
370        free(cur);
371    }
372}
373
374static int read_block(const int fd, void *buf, const intptr_t len)
375{
376    int done = 0;
377    while(done < len)
378    {
379        int ret = recv(fd, ((char *) buf) + done, len - done, 0);
380        if(ret > 0)
381            done += ret;
382#ifndef _WIN32
383        if(ret < 0 && errno == EAGAIN)
384#else
385        if(ret == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK)
386#endif /* !_Win32 */
387        {
388#if USE_POLL
389            struct pollfd pfd;
390            pfd.fd = fd;
391            pfd.events = POLLIN;
392            pfd.revents = 0;
393            do {
394                ret = poll(&pfd, 1, -1);
395            } while (ret == -1 && errno == EINTR);
396#else
397            fd_set fds;
398            FD_ZERO(&fds);
399            FD_SET(fd, &fds);
400
401            /* Initializing errno here makes sure that for Win32 this loop will execute only once */
402            errno = 0;
403            do {
404                ret = select(fd + 1, &fds, 0, 0, 0);
405            } while (ret == -1 && errno == EINTR);
406#endif /* USE_POLL */
407        }
408        if(ret <= 0)
409            return ret;
410    }
411    return len;
412}
413
414static int poll_for_reply(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
415{
416    struct reply_list *head;
417
418    /* If an error occurred when issuing the request, fail immediately. */
419    if(!request)
420        head = 0;
421    /* We've read requests past the one we want, so if it has replies we have
422     * them all and they're in the replies map. */
423    else if(XCB_SEQUENCE_COMPARE(request, <, c->in.request_read))
424    {
425        head = _xcb_map_remove(c->in.replies, request);
426        if(head && head->next)
427            _xcb_map_put(c->in.replies, request, head->next);
428    }
429    /* We're currently processing the responses to the request we want, and we
430     * have a reply ready to return. So just return it without blocking. */
431    else if(request == c->in.request_read && c->in.current_reply)
432    {
433        head = c->in.current_reply;
434        c->in.current_reply = head->next;
435        if(!head->next)
436            c->in.current_reply_tail = &c->in.current_reply;
437    }
438    /* We know this request can't have any more replies, and we've already
439     * established it doesn't have a reply now. Don't bother blocking. */
440    else if(request == c->in.request_completed)
441        head = 0;
442    /* We may have more replies on the way for this request: block until we're
443     * sure. */
444    else
445        return 0;
446
447    if(error)
448        *error = 0;
449    *reply = 0;
450
451    if(head)
452    {
453        if(((xcb_generic_reply_t *) head->reply)->response_type == XCB_ERROR)
454        {
455            if(error)
456                *error = head->reply;
457            else
458                free(head->reply);
459        }
460        else
461            *reply = head->reply;
462
463        free(head);
464    }
465
466    return 1;
467}
468
469static void insert_reader(reader_list **prev_reader, reader_list *reader, uint64_t request, pthread_cond_t *cond)
470{
471    while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, request))
472        prev_reader = &(*prev_reader)->next;
473    reader->request = request;
474    reader->data = cond;
475    reader->next = *prev_reader;
476    *prev_reader = reader;
477}
478
479static void remove_reader(reader_list **prev_reader, reader_list *reader)
480{
481    while(*prev_reader && XCB_SEQUENCE_COMPARE((*prev_reader)->request, <=, reader->request))
482        if(*prev_reader == reader)
483        {
484            *prev_reader = (*prev_reader)->next;
485            break;
486        }
487}
488
489static void insert_special(special_list **prev_special, special_list *special, xcb_special_event_t *se)
490{
491    special->se = se;
492    special->next = *prev_special;
493    *prev_special = special;
494}
495
496static void remove_special(special_list **prev_special, special_list *special)
497{
498    while(*prev_special)
499    {
500        if(*prev_special == special)
501        {
502            *prev_special = (*prev_special)->next;
503            break;
504        }
505        prev_special = &(*prev_special)->next;
506    }
507}
508
509static void *wait_for_reply(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
510{
511    void *ret = 0;
512
513    /* If this request has not been written yet, write it. */
514    if(c->out.return_socket || _xcb_out_flush_to(c, request))
515    {
516        pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
517        reader_list reader;
518
519        insert_reader(&c->in.readers, &reader, request, &cond);
520
521        while(!poll_for_reply(c, request, &ret, e))
522            if(!_xcb_conn_wait(c, &cond, 0, 0))
523                break;
524
525        remove_reader(&c->in.readers, &reader);
526        pthread_cond_destroy(&cond);
527    }
528
529    _xcb_in_wake_up_next_reader(c);
530    return ret;
531}
532
533static uint64_t widen(xcb_connection_t *c, unsigned int request)
534{
535    uint64_t widened_request = (c->out.request & UINT64_C(0xffffffff00000000)) | request;
536    if(widened_request > c->out.request)
537        widened_request -= UINT64_C(1) << 32;
538    return widened_request;
539}
540
541/* Public interface */
542
543void *xcb_wait_for_reply(xcb_connection_t *c, unsigned int request, xcb_generic_error_t **e)
544{
545    void *ret;
546    if(e)
547        *e = 0;
548    if(c->has_error)
549        return 0;
550
551    pthread_mutex_lock(&c->iolock);
552    ret = wait_for_reply(c, widen(c, request), e);
553    pthread_mutex_unlock(&c->iolock);
554    return ret;
555}
556
557void *xcb_wait_for_reply64(xcb_connection_t *c, uint64_t request, xcb_generic_error_t **e)
558{
559    void *ret;
560    if(e)
561        *e = 0;
562    if(c->has_error)
563        return 0;
564
565    pthread_mutex_lock(&c->iolock);
566    ret = wait_for_reply(c, request, e);
567    pthread_mutex_unlock(&c->iolock);
568    return ret;
569}
570
571int *xcb_get_reply_fds(xcb_connection_t *c, void *reply, size_t reply_size)
572{
573    return (int *) (&((char *) reply)[reply_size]);
574}
575
576static void insert_pending_discard(xcb_connection_t *c, pending_reply **prev_next, uint64_t seq)
577{
578    pending_reply *pend;
579    pend = malloc(sizeof(*pend));
580    if(!pend)
581    {
582        _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
583        return;
584    }
585
586    pend->first_request = seq;
587    pend->last_request = seq;
588    pend->workaround = 0;
589    pend->flags = XCB_REQUEST_DISCARD_REPLY;
590    pend->next = *prev_next;
591    *prev_next = pend;
592
593    if(!pend->next)
594        c->in.pending_replies_tail = &pend->next;
595}
596
597static void discard_reply(xcb_connection_t *c, uint64_t request)
598{
599    void *reply;
600    pending_reply **prev_pend;
601
602    /* Free any replies or errors that we've already read. Stop if
603     * xcb_wait_for_reply would block or we've run out of replies. */
604    while(poll_for_reply(c, request, &reply, 0) && reply)
605        free(reply);
606
607    /* If we've proven there are no more responses coming, we're done. */
608    if(XCB_SEQUENCE_COMPARE(request, <=, c->in.request_completed))
609        return;
610
611    /* Walk the list of pending requests. Mark the first match for deletion. */
612    for(prev_pend = &c->in.pending_replies; *prev_pend; prev_pend = &(*prev_pend)->next)
613    {
614        if(XCB_SEQUENCE_COMPARE((*prev_pend)->first_request, >, request))
615            break;
616
617        if((*prev_pend)->first_request == request)
618        {
619            /* Pending reply found. Mark for discard: */
620            (*prev_pend)->flags |= XCB_REQUEST_DISCARD_REPLY;
621            return;
622        }
623    }
624
625    /* Pending reply not found (likely due to _unchecked request). Create one: */
626    insert_pending_discard(c, prev_pend, request);
627}
628
629void xcb_discard_reply(xcb_connection_t *c, unsigned int sequence)
630{
631    if(c->has_error)
632        return;
633
634    /* If an error occurred when issuing the request, fail immediately. */
635    if(!sequence)
636        return;
637
638    pthread_mutex_lock(&c->iolock);
639    discard_reply(c, widen(c, sequence));
640    pthread_mutex_unlock(&c->iolock);
641}
642
643void xcb_discard_reply64(xcb_connection_t *c, uint64_t sequence)
644{
645    if(c->has_error)
646        return;
647
648    /* If an error occurred when issuing the request, fail immediately. */
649    if(!sequence)
650        return;
651
652    pthread_mutex_lock(&c->iolock);
653    discard_reply(c, sequence);
654    pthread_mutex_unlock(&c->iolock);
655}
656
657int xcb_poll_for_reply(xcb_connection_t *c, unsigned int request, void **reply, xcb_generic_error_t **error)
658{
659    int ret;
660    if(c->has_error)
661    {
662        *reply = 0;
663        if(error)
664            *error = 0;
665        return 1; /* would not block */
666    }
667    assert(reply != 0);
668    pthread_mutex_lock(&c->iolock);
669    ret = poll_for_reply(c, widen(c, request), reply, error);
670    if(!ret && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
671        ret = poll_for_reply(c, widen(c, request), reply, error);
672    pthread_mutex_unlock(&c->iolock);
673    return ret;
674}
675
676int xcb_poll_for_reply64(xcb_connection_t *c, uint64_t request, void **reply, xcb_generic_error_t **error)
677{
678    int ret;
679    if(c->has_error)
680    {
681        *reply = 0;
682        if(error)
683            *error = 0;
684        return 1; /* would not block */
685    }
686    assert(reply != 0);
687    pthread_mutex_lock(&c->iolock);
688    ret = poll_for_reply(c, request, reply, error);
689    if(!ret && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
690        ret = poll_for_reply(c, request, reply, error);
691    pthread_mutex_unlock(&c->iolock);
692    return ret;
693}
694
695xcb_generic_event_t *xcb_wait_for_event(xcb_connection_t *c)
696{
697    xcb_generic_event_t *ret;
698    if(c->has_error)
699        return 0;
700    pthread_mutex_lock(&c->iolock);
701    /* get_event returns 0 on empty list. */
702    while(!(ret = get_event(c)))
703        if(!_xcb_conn_wait(c, &c->in.event_cond, 0, 0))
704            break;
705
706    _xcb_in_wake_up_next_reader(c);
707    pthread_mutex_unlock(&c->iolock);
708    return ret;
709}
710
711static xcb_generic_event_t *poll_for_next_event(xcb_connection_t *c, int queued)
712{
713    xcb_generic_event_t *ret = 0;
714    if(!c->has_error)
715    {
716        pthread_mutex_lock(&c->iolock);
717        /* FIXME: follow X meets Z architecture changes. */
718        ret = get_event(c);
719        if(!ret && !queued && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
720            ret = get_event(c);
721        pthread_mutex_unlock(&c->iolock);
722    }
723    return ret;
724}
725
726xcb_generic_event_t *xcb_poll_for_event(xcb_connection_t *c)
727{
728    return poll_for_next_event(c, 0);
729}
730
731xcb_generic_event_t *xcb_poll_for_queued_event(xcb_connection_t *c)
732{
733    return poll_for_next_event(c, 1);
734}
735
736xcb_generic_error_t *xcb_request_check(xcb_connection_t *c, xcb_void_cookie_t cookie)
737{
738    uint64_t request;
739    xcb_generic_error_t *ret = 0;
740    void *reply;
741    if(c->has_error)
742        return 0;
743    pthread_mutex_lock(&c->iolock);
744    request = widen(c, cookie.sequence);
745    if (XCB_SEQUENCE_COMPARE(request, >, c->in.request_completed))
746    {
747        if(XCB_SEQUENCE_COMPARE(request, >=, c->in.request_expected))
748        {
749            _xcb_out_send_sync(c);
750        }
751        if (XCB_SEQUENCE_COMPARE(request, >=, c->out.request_expected_written))
752        {
753            _xcb_out_flush_to(c, c->out.request);
754        }
755    }
756    reply = wait_for_reply(c, request, &ret);
757    assert(!reply);
758    pthread_mutex_unlock(&c->iolock);
759    return ret;
760}
761
762static xcb_generic_event_t *get_special_event(xcb_connection_t *c,
763                                              xcb_special_event_t *se)
764{
765    xcb_generic_event_t *event = NULL;
766    struct event_list *events;
767
768    if ((events = se->events) != NULL) {
769        event = events->event;
770        if (!(se->events = events->next))
771            se->events_tail = &se->events;
772        free (events);
773    }
774    return event;
775}
776
777xcb_generic_event_t *xcb_poll_for_special_event(xcb_connection_t *c,
778                                                xcb_special_event_t *se)
779{
780    xcb_generic_event_t *event;
781
782    if(c->has_error)
783        return 0;
784    pthread_mutex_lock(&c->iolock);
785    event = get_special_event(c, se);
786    if(!event && c->in.reading == 0 && _xcb_in_read(c)) /* _xcb_in_read shuts down the connection on error */
787        event = get_special_event(c, se);
788    pthread_mutex_unlock(&c->iolock);
789    return event;
790}
791
792xcb_generic_event_t *xcb_wait_for_special_event(xcb_connection_t *c,
793                                                xcb_special_event_t *se)
794{
795    special_list special;
796    xcb_generic_event_t *event;
797
798    if(c->has_error)
799        return 0;
800    pthread_mutex_lock(&c->iolock);
801
802    insert_special(&c->in.special_waiters, &special, se);
803
804    /* get_special_event returns 0 on empty list. */
805    while(!(event = get_special_event(c, se)))
806        if(!_xcb_conn_wait(c, &se->special_event_cond, 0, 0))
807            break;
808
809    remove_special(&c->in.special_waiters, &special);
810
811    _xcb_in_wake_up_next_reader(c);
812    pthread_mutex_unlock(&c->iolock);
813    return event;
814}
815
816xcb_special_event_t *
817xcb_register_for_special_xge(xcb_connection_t *c,
818                             xcb_extension_t *ext,
819                             uint32_t eid,
820                             uint32_t *stamp)
821{
822    xcb_special_event_t *se;
823    const xcb_query_extension_reply_t   *ext_reply;
824
825    if(c->has_error)
826        return NULL;
827    ext_reply = xcb_get_extension_data(c, ext);
828    if (!ext_reply)
829        return NULL;
830    pthread_mutex_lock(&c->iolock);
831    for (se = c->in.special_events; se; se = se->next) {
832        if (se->extension == ext_reply->major_opcode &&
833            se->eid == eid) {
834            pthread_mutex_unlock(&c->iolock);
835            return NULL;
836        }
837    }
838    se = calloc(1, sizeof(xcb_special_event_t));
839    if (!se) {
840        pthread_mutex_unlock(&c->iolock);
841        return NULL;
842    }
843
844    se->extension = ext_reply->major_opcode;
845    se->eid = eid;
846
847    se->events = NULL;
848    se->events_tail = &se->events;
849    se->stamp = stamp;
850
851    pthread_cond_init(&se->special_event_cond, 0);
852
853    se->next = c->in.special_events;
854    c->in.special_events = se;
855    pthread_mutex_unlock(&c->iolock);
856    return se;
857}
858
859void
860xcb_unregister_for_special_event(xcb_connection_t *c,
861                                 xcb_special_event_t *se)
862{
863    xcb_special_event_t *s, **prev;
864    struct event_list   *events, *next;
865
866    if (!se)
867        return;
868
869    if (c->has_error)
870        return;
871
872    pthread_mutex_lock(&c->iolock);
873
874    for (prev = &c->in.special_events; (s = *prev) != NULL; prev = &(s->next)) {
875        if (s == se) {
876            *prev = se->next;
877            for (events = se->events; events; events = next) {
878                next = events->next;
879                free (events->event);
880                free (events);
881            }
882            pthread_cond_destroy(&se->special_event_cond);
883            free (se);
884            break;
885        }
886    }
887    pthread_mutex_unlock(&c->iolock);
888}
889
890/* Private interface */
891
892int _xcb_in_init(_xcb_in *in)
893{
894    if(pthread_cond_init(&in->event_cond, 0))
895        return 0;
896    in->reading = 0;
897
898    in->queue_len = 0;
899
900    in->request_read = 0;
901    in->request_completed = 0;
902
903    in->replies = _xcb_map_new();
904    if(!in->replies)
905        return 0;
906
907    in->current_reply_tail = &in->current_reply;
908    in->events_tail = &in->events;
909    in->pending_replies_tail = &in->pending_replies;
910
911    return 1;
912}
913
914void _xcb_in_destroy(_xcb_in *in)
915{
916    pthread_cond_destroy(&in->event_cond);
917    free_reply_list(in->current_reply);
918    _xcb_map_delete(in->replies, (void (*)(void *)) free_reply_list);
919    while(in->events)
920    {
921        struct event_list *e = in->events;
922        in->events = e->next;
923        free(e->event);
924        free(e);
925    }
926    while(in->pending_replies)
927    {
928        pending_reply *pend = in->pending_replies;
929        in->pending_replies = pend->next;
930        free(pend);
931    }
932}
933
934void _xcb_in_wake_up_next_reader(xcb_connection_t *c)
935{
936    int pthreadret;
937    if(c->in.readers)
938        pthreadret = pthread_cond_signal(c->in.readers->data);
939    else if(c->in.special_waiters)
940        pthreadret = pthread_cond_signal(&c->in.special_waiters->se->special_event_cond);
941    else
942        pthreadret = pthread_cond_signal(&c->in.event_cond);
943    assert(pthreadret == 0);
944}
945
946int _xcb_in_expect_reply(xcb_connection_t *c, uint64_t request, enum workarounds workaround, int flags)
947{
948    pending_reply *pend = malloc(sizeof(pending_reply));
949    assert(workaround != WORKAROUND_NONE || flags != 0);
950    if(!pend)
951    {
952        _xcb_conn_shutdown(c, XCB_CONN_CLOSED_MEM_INSUFFICIENT);
953        return 0;
954    }
955    pend->first_request = pend->last_request = request;
956    pend->workaround = workaround;
957    pend->flags = flags;
958    pend->next = 0;
959    *c->in.pending_replies_tail = pend;
960    c->in.pending_replies_tail = &pend->next;
961    return 1;
962}
963
964void _xcb_in_replies_done(xcb_connection_t *c)
965{
966    struct pending_reply *pend;
967    if (c->in.pending_replies_tail != &c->in.pending_replies)
968    {
969        pend = container_of(c->in.pending_replies_tail, struct pending_reply, next);
970        if(pend->workaround == WORKAROUND_EXTERNAL_SOCKET_OWNER)
971        {
972            if (XCB_SEQUENCE_COMPARE(pend->first_request, <=, c->out.request)) {
973                pend->last_request = c->out.request;
974                pend->workaround = WORKAROUND_NONE;
975            } else {
976                /* The socket was taken, but no requests were actually sent
977                 * so just discard the pending_reply that was created.
978                 */
979                struct pending_reply **prev_next = &c->in.pending_replies;
980                while (*prev_next != pend)
981                    prev_next = &(*prev_next)->next;
982                *prev_next = NULL;
983                c->in.pending_replies_tail = prev_next;
984                free(pend);
985            }
986        }
987    }
988}
989
990int _xcb_in_read(xcb_connection_t *c)
991{
992    int n;
993
994#if HAVE_SENDMSG
995    struct iovec    iov = {
996        .iov_base = c->in.queue + c->in.queue_len,
997        .iov_len = sizeof(c->in.queue) - c->in.queue_len,
998    };
999    union {
1000        struct cmsghdr cmsghdr;
1001        char buf[CMSG_SPACE(XCB_MAX_PASS_FD * sizeof(int))];
1002    } cmsgbuf;
1003    struct msghdr msg = {
1004        .msg_name = NULL,
1005        .msg_namelen = 0,
1006        .msg_iov = &iov,
1007        .msg_iovlen = 1,
1008        .msg_control = cmsgbuf.buf,
1009        .msg_controllen = CMSG_SPACE(sizeof(int) * (XCB_MAX_PASS_FD - c->in.in_fd.nfd)),
1010    };
1011    n = recvmsg(c->fd, &msg, 0);
1012
1013    /* Check for truncation errors. Only MSG_CTRUNC is
1014     * probably possible here, which would indicate that
1015     * the sender tried to transmit more than XCB_MAX_PASS_FD
1016     * file descriptors.
1017     */
1018    if (msg.msg_flags & (MSG_TRUNC|MSG_CTRUNC)) {
1019        _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED);
1020        return 0;
1021    }
1022#else
1023    n = recv(c->fd, c->in.queue + c->in.queue_len, sizeof(c->in.queue) - c->in.queue_len, 0);
1024#endif
1025    if(n > 0) {
1026#if HAVE_SENDMSG
1027        struct cmsghdr *hdr;
1028
1029        if (msg.msg_controllen >= sizeof (struct cmsghdr)) {
1030            for (hdr = CMSG_FIRSTHDR(&msg); hdr; hdr = CMSG_NXTHDR(&msg, hdr)) {
1031                if (hdr->cmsg_level == SOL_SOCKET && hdr->cmsg_type == SCM_RIGHTS) {
1032                    int nfd = (hdr->cmsg_len - CMSG_LEN(0)) / sizeof (int);
1033                    memcpy(&c->in.in_fd.fd[c->in.in_fd.nfd], CMSG_DATA(hdr), nfd * sizeof (int));
1034                    c->in.in_fd.nfd += nfd;
1035                }
1036            }
1037        }
1038#endif
1039        c->in.total_read += n;
1040        c->in.queue_len += n;
1041    }
1042    while(read_packet(c))
1043        /* empty */;
1044#if HAVE_SENDMSG
1045    if (c->in.in_fd.nfd) {
1046        c->in.in_fd.nfd -= c->in.in_fd.ifd;
1047        memmove(&c->in.in_fd.fd[0],
1048                &c->in.in_fd.fd[c->in.in_fd.ifd],
1049                c->in.in_fd.nfd * sizeof (int));
1050        c->in.in_fd.ifd = 0;
1051
1052        /* If we have any left-over file descriptors after emptying
1053         * the input buffer, then the server sent some that we weren't
1054         * expecting.  Close them and mark the connection as broken;
1055         */
1056        if (c->in.queue_len == 0 && c->in.in_fd.nfd != 0) {
1057            int i;
1058            for (i = 0; i < c->in.in_fd.nfd; i++)
1059                close(c->in.in_fd.fd[i]);
1060            _xcb_conn_shutdown(c, XCB_CONN_CLOSED_FDPASSING_FAILED);
1061            return 0;
1062        }
1063    }
1064#endif
1065#ifndef _WIN32
1066    if((n > 0) || (n < 0 && (errno == EAGAIN || errno == EINTR)))
1067#else
1068    if((n > 0) || (n < 0 && WSAGetLastError() == WSAEWOULDBLOCK))
1069#endif /* !_WIN32 */
1070        return 1;
1071    _xcb_conn_shutdown(c, XCB_CONN_ERROR);
1072    return 0;
1073}
1074
1075int _xcb_in_read_block(xcb_connection_t *c, void *buf, int len)
1076{
1077    int done = c->in.queue_len;
1078    if(len < done)
1079        done = len;
1080
1081    memcpy(buf, c->in.queue, done);
1082    c->in.queue_len -= done;
1083    memmove(c->in.queue, c->in.queue + done, c->in.queue_len);
1084
1085    if(len > done)
1086    {
1087        int ret = read_block(c->fd, (char *) buf + done, len - done);
1088        if(ret <= 0)
1089        {
1090            _xcb_conn_shutdown(c, XCB_CONN_ERROR);
1091            return ret;
1092        }
1093    }
1094
1095    return len;
1096}
1097