1/* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
24 */
25
26/* Stuff that sends stuff to the server. */
27
28#ifdef HAVE_CONFIG_H
29#include "config.h"
30#endif
31
32#include <assert.h>
33#include <stdlib.h>
34#ifdef _WIN32
35#include <io.h>
36#else
37#include <unistd.h>
38#endif
39#include <string.h>
40
41#include "xcb.h"
42#include "xcbext.h"
43#include "xcbint.h"
44#include "bigreq.h"
45
46static inline void send_request(xcb_connection_t *c, int isvoid, enum workarounds workaround, int flags, struct iovec *vector, int count)
47{
48    if(c->has_error)
49        return;
50
51    ++c->out.request;
52    if(!isvoid)
53        c->in.request_expected = c->out.request;
54    if(workaround != WORKAROUND_NONE || flags != 0)
55        _xcb_in_expect_reply(c, c->out.request, workaround, flags);
56
57    while(count && c->out.queue_len + vector[0].iov_len <= sizeof(c->out.queue))
58    {
59        memcpy(c->out.queue + c->out.queue_len, vector[0].iov_base, vector[0].iov_len);
60        c->out.queue_len += vector[0].iov_len;
61        vector[0].iov_base = (char *) vector[0].iov_base + vector[0].iov_len;
62        vector[0].iov_len = 0;
63        ++vector, --count;
64    }
65    if(!count)
66        return;
67
68    --vector, ++count;
69    vector[0].iov_base = c->out.queue;
70    vector[0].iov_len = c->out.queue_len;
71    c->out.queue_len = 0;
72    _xcb_out_send(c, vector, count);
73}
74
75static void send_sync(xcb_connection_t *c)
76{
77    static const union {
78        struct {
79            uint8_t major;
80            uint8_t pad;
81            uint16_t len;
82        } fields;
83        uint32_t packet;
84    } sync_req = { { /* GetInputFocus */ 43, 0, 1 } };
85    struct iovec vector[2];
86    vector[1].iov_base = (char *) &sync_req;
87    vector[1].iov_len = sizeof(sync_req);
88    send_request(c, 0, WORKAROUND_NONE, XCB_REQUEST_DISCARD_REPLY, vector + 1, 1);
89}
90
91static void get_socket_back(xcb_connection_t *c)
92{
93    while(c->out.return_socket && c->out.socket_moving)
94        pthread_cond_wait(&c->out.socket_cond, &c->iolock);
95    if(!c->out.return_socket)
96        return;
97
98    c->out.socket_moving = 1;
99    pthread_mutex_unlock(&c->iolock);
100    c->out.return_socket(c->out.socket_closure);
101    pthread_mutex_lock(&c->iolock);
102    c->out.socket_moving = 0;
103
104    pthread_cond_broadcast(&c->out.socket_cond);
105    c->out.return_socket = 0;
106    c->out.socket_closure = 0;
107    _xcb_in_replies_done(c);
108}
109
110static void prepare_socket_request(xcb_connection_t *c)
111{
112    /* We're about to append data to out.queue, so we need to
113     * atomically test for an external socket owner *and* some other
114     * thread currently writing.
115     *
116     * If we have an external socket owner, we have to get the socket back
117     * before we can use it again.
118     *
119     * If some other thread is writing to the socket, we assume it's
120     * writing from out.queue, and so we can't stick data there.
121     *
122     * We satisfy this condition by first calling get_socket_back
123     * (which may drop the lock, but will return when XCB owns the
124     * socket again) and then checking for another writing thread and
125     * escaping the loop if we're ready to go.
126     */
127    for (;;) {
128        if(c->has_error)
129            return;
130        get_socket_back(c);
131        if (!c->out.writing)
132            break;
133        pthread_cond_wait(&c->out.cond, &c->iolock);
134    }
135}
136
137/* Public interface */
138
139void xcb_prefetch_maximum_request_length(xcb_connection_t *c)
140{
141    if(c->has_error)
142        return;
143    pthread_mutex_lock(&c->out.reqlenlock);
144    if(c->out.maximum_request_length_tag == LAZY_NONE)
145    {
146        const xcb_query_extension_reply_t *ext;
147        ext = xcb_get_extension_data(c, &xcb_big_requests_id);
148        if(ext && ext->present)
149        {
150            c->out.maximum_request_length_tag = LAZY_COOKIE;
151            c->out.maximum_request_length.cookie = xcb_big_requests_enable(c);
152        }
153        else
154        {
155            c->out.maximum_request_length_tag = LAZY_FORCED;
156            c->out.maximum_request_length.value = c->setup->maximum_request_length;
157        }
158    }
159    pthread_mutex_unlock(&c->out.reqlenlock);
160}
161
162uint32_t xcb_get_maximum_request_length(xcb_connection_t *c)
163{
164    if(c->has_error)
165        return 0;
166    xcb_prefetch_maximum_request_length(c);
167    pthread_mutex_lock(&c->out.reqlenlock);
168    if(c->out.maximum_request_length_tag == LAZY_COOKIE)
169    {
170        xcb_big_requests_enable_reply_t *r = xcb_big_requests_enable_reply(c, c->out.maximum_request_length.cookie, 0);
171        c->out.maximum_request_length_tag = LAZY_FORCED;
172        if(r)
173        {
174            c->out.maximum_request_length.value = r->maximum_request_length;
175            free(r);
176        }
177        else
178            c->out.maximum_request_length.value = c->setup->maximum_request_length;
179    }
180    pthread_mutex_unlock(&c->out.reqlenlock);
181    return c->out.maximum_request_length.value;
182}
183
184static void close_fds(int *fds, unsigned int num_fds)
185{
186    for (unsigned int index = 0; index < num_fds; index++)
187        close(fds[index]);
188}
189
190static void send_fds(xcb_connection_t *c, int *fds, unsigned int num_fds)
191{
192#if HAVE_SENDMSG
193    /* Calling _xcb_out_flush_to() can drop the iolock and wait on a condition
194     * variable if another thread is currently writing (c->out.writing > 0).
195     * This call waits for writers to be done and thus _xcb_out_flush_to() will
196     * do the work itself (in which case we are a writer and
197     * prepare_socket_request() will wait for us to be done if another threads
198     * tries to send fds, too). Thanks to this, we can atomically write out FDs.
199     */
200    prepare_socket_request(c);
201
202    while (num_fds > 0) {
203        while (c->out.out_fd.nfd == XCB_MAX_PASS_FD && !c->has_error) {
204            /* XXX: if c->out.writing > 0, this releases the iolock and
205             * potentially allows other threads to interfere with their own fds.
206             */
207            _xcb_out_flush_to(c, c->out.request);
208
209            if (c->out.out_fd.nfd == XCB_MAX_PASS_FD) {
210                /* We need some request to send FDs with */
211                _xcb_out_send_sync(c);
212            }
213        }
214        if (c->has_error)
215            break;
216
217        c->out.out_fd.fd[c->out.out_fd.nfd++] = fds[0];
218        fds++;
219        num_fds--;
220    }
221#endif
222    close_fds(fds, num_fds);
223}
224
225uint64_t xcb_send_request_with_fds64(xcb_connection_t *c, int flags, struct iovec *vector,
226                const xcb_protocol_request_t *req, unsigned int num_fds, int *fds)
227{
228    uint64_t request;
229    uint32_t prefix[2];
230    int veclen = req->count;
231    enum workarounds workaround = WORKAROUND_NONE;
232
233    if(c->has_error) {
234        close_fds(fds, num_fds);
235        return 0;
236    }
237
238    assert(c != 0);
239    assert(vector != 0);
240    assert(req->count > 0);
241
242    if(!(flags & XCB_REQUEST_RAW))
243    {
244        static const char pad[3];
245        unsigned int i;
246        uint16_t shortlen = 0;
247        size_t longlen = 0;
248        assert(vector[0].iov_len >= 4);
249        /* set the major opcode, and the minor opcode for extensions */
250        if(req->ext)
251        {
252            const xcb_query_extension_reply_t *extension = xcb_get_extension_data(c, req->ext);
253            if(!(extension && extension->present))
254            {
255                close_fds(fds, num_fds);
256                _xcb_conn_shutdown(c, XCB_CONN_CLOSED_EXT_NOTSUPPORTED);
257                return 0;
258            }
259            ((uint8_t *) vector[0].iov_base)[0] = extension->major_opcode;
260            ((uint8_t *) vector[0].iov_base)[1] = req->opcode;
261        }
262        else
263            ((uint8_t *) vector[0].iov_base)[0] = req->opcode;
264
265        /* put together the length field, possibly using BIGREQUESTS */
266        for(i = 0; i < req->count; ++i)
267        {
268            longlen += vector[i].iov_len;
269            if(!vector[i].iov_base)
270            {
271                vector[i].iov_base = (char *) pad;
272                assert(vector[i].iov_len <= sizeof(pad));
273            }
274        }
275        assert((longlen & 3) == 0);
276        longlen >>= 2;
277
278        if(longlen <= c->setup->maximum_request_length)
279        {
280            /* we don't need BIGREQUESTS. */
281            shortlen = longlen;
282            longlen = 0;
283        }
284        else if(longlen > xcb_get_maximum_request_length(c))
285        {
286            close_fds(fds, num_fds);
287            _xcb_conn_shutdown(c, XCB_CONN_CLOSED_REQ_LEN_EXCEED);
288            return 0; /* server can't take this; maybe need BIGREQUESTS? */
289        }
290
291        /* set the length field. */
292        ((uint16_t *) vector[0].iov_base)[1] = shortlen;
293        if(!shortlen)
294        {
295            prefix[0] = ((uint32_t *) vector[0].iov_base)[0];
296            prefix[1] = ++longlen;
297            vector[0].iov_base = (uint32_t *) vector[0].iov_base + 1;
298            vector[0].iov_len -= sizeof(uint32_t);
299            --vector, ++veclen;
300            vector[0].iov_base = prefix;
301            vector[0].iov_len = sizeof(prefix);
302        }
303    }
304    flags &= ~XCB_REQUEST_RAW;
305
306    /* do we need to work around the X server bug described in glx.xml? */
307    /* XXX: GetFBConfigs won't use BIG-REQUESTS in any sane
308     * configuration, but that should be handled here anyway. */
309    if(req->ext && !req->isvoid && !strcmp(req->ext->name, "GLX") &&
310            ((req->opcode == 17 && ((uint32_t *) vector[0].iov_base)[1] == 0x10004) ||
311             req->opcode == 21))
312        workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG;
313
314    /* get a sequence number and arrange for delivery. */
315    pthread_mutex_lock(&c->iolock);
316
317    /* send FDs before establishing a good request number, because this might
318     * call send_sync(), too
319     */
320    send_fds(c, fds, num_fds);
321
322    prepare_socket_request(c);
323
324    /* send GetInputFocus (sync_req) when 64k-2 requests have been sent without
325     * a reply.
326     * Also send sync_req (could use NoOp) at 32-bit wrap to avoid having
327     * applications see sequence 0 as that is used to indicate
328     * an error in sending the request
329     */
330
331    while ((req->isvoid && c->out.request == c->in.request_expected + (1 << 16) - 2) ||
332           (unsigned int) (c->out.request + 1) == 0)
333    {
334        send_sync(c);
335        prepare_socket_request(c);
336    }
337
338    send_request(c, req->isvoid, workaround, flags, vector, veclen);
339    request = c->has_error ? 0 : c->out.request;
340    pthread_mutex_unlock(&c->iolock);
341    return request;
342}
343
344/* request number are actually uint64_t internally but keep API compat with unsigned int */
345unsigned int xcb_send_request_with_fds(xcb_connection_t *c, int flags, struct iovec *vector,
346        const xcb_protocol_request_t *req, unsigned int num_fds, int *fds)
347{
348    return xcb_send_request_with_fds64(c, flags, vector, req, num_fds, fds);
349}
350
351uint64_t xcb_send_request64(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req)
352{
353    return xcb_send_request_with_fds64(c, flags, vector, req, 0, NULL);
354}
355
356/* request number are actually uint64_t internally but keep API compat with unsigned int */
357unsigned int xcb_send_request(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req)
358{
359    return xcb_send_request64(c, flags, vector, req);
360}
361
362void
363xcb_send_fd(xcb_connection_t *c, int fd)
364{
365    int fds[1] = { fd };
366
367    if (c->has_error) {
368        close(fd);
369        return;
370    }
371    pthread_mutex_lock(&c->iolock);
372    send_fds(c, &fds[0], 1);
373    pthread_mutex_unlock(&c->iolock);
374}
375
376int xcb_take_socket(xcb_connection_t *c, void (*return_socket)(void *closure), void *closure, int flags, uint64_t *sent)
377{
378    int ret;
379    if(c->has_error)
380        return 0;
381    pthread_mutex_lock(&c->iolock);
382    get_socket_back(c);
383
384    /* _xcb_out_flush may drop the iolock allowing other threads to
385     * write requests, so keep flushing until we're done
386     */
387    do
388        ret = _xcb_out_flush_to(c, c->out.request);
389    while (ret && c->out.request != c->out.request_written);
390    if(ret)
391    {
392        c->out.return_socket = return_socket;
393        c->out.socket_closure = closure;
394        if(flags) {
395            /* c->out.request + 1 will be the first request sent by the external
396             * socket owner. If the socket is returned before this request is sent
397             * it will be detected in _xcb_in_replies_done and this pending_reply
398             * will be discarded.
399             */
400            _xcb_in_expect_reply(c, c->out.request + 1, WORKAROUND_EXTERNAL_SOCKET_OWNER, flags);
401        }
402        assert(c->out.request == c->out.request_written);
403        *sent = c->out.request;
404    }
405    pthread_mutex_unlock(&c->iolock);
406    return ret;
407}
408
409int xcb_writev(xcb_connection_t *c, struct iovec *vector, int count, uint64_t requests)
410{
411    int ret;
412    if(c->has_error)
413        return 0;
414    pthread_mutex_lock(&c->iolock);
415    c->out.request += requests;
416    ret = _xcb_out_send(c, vector, count);
417    pthread_mutex_unlock(&c->iolock);
418    return ret;
419}
420
421int xcb_flush(xcb_connection_t *c)
422{
423    int ret;
424    if(c->has_error)
425        return 0;
426    pthread_mutex_lock(&c->iolock);
427    ret = _xcb_out_flush_to(c, c->out.request);
428    pthread_mutex_unlock(&c->iolock);
429    return ret;
430}
431
432/* Private interface */
433
434int _xcb_out_init(_xcb_out *out)
435{
436    if(pthread_cond_init(&out->socket_cond, 0))
437        return 0;
438    out->return_socket = 0;
439    out->socket_closure = 0;
440    out->socket_moving = 0;
441
442    if(pthread_cond_init(&out->cond, 0))
443        return 0;
444    out->writing = 0;
445
446    out->queue_len = 0;
447
448    out->request = 0;
449    out->request_written = 0;
450    out->request_expected_written = 0;
451
452    if(pthread_mutex_init(&out->reqlenlock, 0))
453        return 0;
454    out->maximum_request_length_tag = LAZY_NONE;
455
456    return 1;
457}
458
459void _xcb_out_destroy(_xcb_out *out)
460{
461    pthread_mutex_destroy(&out->reqlenlock);
462    pthread_cond_destroy(&out->cond);
463    pthread_cond_destroy(&out->socket_cond);
464}
465
466int _xcb_out_send(xcb_connection_t *c, struct iovec *vector, int count)
467{
468    int ret = 1;
469    while(ret && count)
470        ret = _xcb_conn_wait(c, &c->out.cond, &vector, &count);
471    c->out.request_written = c->out.request;
472    c->out.request_expected_written = c->in.request_expected;
473    pthread_cond_broadcast(&c->out.cond);
474    _xcb_in_wake_up_next_reader(c);
475    return ret;
476}
477
478void _xcb_out_send_sync(xcb_connection_t *c)
479{
480    prepare_socket_request(c);
481    send_sync(c);
482}
483
484int _xcb_out_flush_to(xcb_connection_t *c, uint64_t request)
485{
486    assert(XCB_SEQUENCE_COMPARE(request, <=, c->out.request));
487    if(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request))
488        return 1;
489    if(c->out.queue_len)
490    {
491        struct iovec vec;
492        vec.iov_base = c->out.queue;
493        vec.iov_len = c->out.queue_len;
494        c->out.queue_len = 0;
495        return _xcb_out_send(c, &vec, 1);
496    }
497    while(c->out.writing)
498        pthread_cond_wait(&c->out.cond, &c->iolock);
499    assert(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request));
500    return 1;
501}
502