xcb_out.c revision 21298544
1/* Copyright (C) 2001-2004 Bart Massey and Jamey Sharp.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a
4 * copy of this software and associated documentation files (the "Software"),
5 * to deal in the Software without restriction, including without limitation
6 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7 * and/or sell copies of the Software, and to permit persons to whom the
8 * Software is furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
17 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
18 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
19 *
20 * Except as contained in this notice, the names of the authors or their
21 * institutions shall not be used in advertising or otherwise to promote the
22 * sale, use or other dealings in this Software without prior written
23 * authorization from the authors.
24 */
25
26/* Stuff that sends stuff to the server. */
27
28#ifdef HAVE_CONFIG_H
29#include "config.h"
30#endif
31
32#include <assert.h>
33#include <stdlib.h>
34#include <unistd.h>
35#include <string.h>
36
37#include "xcb.h"
38#include "xcbext.h"
39#include "xcbint.h"
40#include "bigreq.h"
41
42static inline void send_request(xcb_connection_t *c, int isvoid, enum workarounds workaround, int flags, struct iovec *vector, int count)
43{
44    if(c->has_error)
45        return;
46
47    ++c->out.request;
48    if(!isvoid)
49        c->in.request_expected = c->out.request;
50    if(workaround != WORKAROUND_NONE || flags != 0)
51        _xcb_in_expect_reply(c, c->out.request, workaround, flags);
52
53    while(count && c->out.queue_len + vector[0].iov_len <= sizeof(c->out.queue))
54    {
55        memcpy(c->out.queue + c->out.queue_len, vector[0].iov_base, vector[0].iov_len);
56        c->out.queue_len += vector[0].iov_len;
57        vector[0].iov_base = (char *) vector[0].iov_base + vector[0].iov_len;
58        vector[0].iov_len = 0;
59        ++vector, --count;
60    }
61    if(!count)
62        return;
63
64    --vector, ++count;
65    vector[0].iov_base = c->out.queue;
66    vector[0].iov_len = c->out.queue_len;
67    c->out.queue_len = 0;
68    _xcb_out_send(c, vector, count);
69}
70
71static void send_sync(xcb_connection_t *c)
72{
73    static const union {
74        struct {
75            uint8_t major;
76            uint8_t pad;
77            uint16_t len;
78        } fields;
79        uint32_t packet;
80    } sync_req = { { /* GetInputFocus */ 43, 0, 1 } };
81    struct iovec vector[2];
82    vector[1].iov_base = (char *) &sync_req;
83    vector[1].iov_len = sizeof(sync_req);
84    send_request(c, 0, WORKAROUND_NONE, XCB_REQUEST_DISCARD_REPLY, vector + 1, 1);
85}
86
87static void get_socket_back(xcb_connection_t *c)
88{
89    while(c->out.return_socket && c->out.socket_moving)
90        pthread_cond_wait(&c->out.socket_cond, &c->iolock);
91    if(!c->out.return_socket)
92        return;
93
94    c->out.socket_moving = 1;
95    pthread_mutex_unlock(&c->iolock);
96    c->out.return_socket(c->out.socket_closure);
97    pthread_mutex_lock(&c->iolock);
98    c->out.socket_moving = 0;
99
100    pthread_cond_broadcast(&c->out.socket_cond);
101    c->out.return_socket = 0;
102    c->out.socket_closure = 0;
103    _xcb_in_replies_done(c);
104}
105
106/* Public interface */
107
108void xcb_prefetch_maximum_request_length(xcb_connection_t *c)
109{
110    if(c->has_error)
111        return;
112    pthread_mutex_lock(&c->out.reqlenlock);
113    if(c->out.maximum_request_length_tag == LAZY_NONE)
114    {
115        const xcb_query_extension_reply_t *ext;
116        ext = xcb_get_extension_data(c, &xcb_big_requests_id);
117        if(ext && ext->present)
118        {
119            c->out.maximum_request_length_tag = LAZY_COOKIE;
120            c->out.maximum_request_length.cookie = xcb_big_requests_enable(c);
121        }
122        else
123        {
124            c->out.maximum_request_length_tag = LAZY_FORCED;
125            c->out.maximum_request_length.value = c->setup->maximum_request_length;
126        }
127    }
128    pthread_mutex_unlock(&c->out.reqlenlock);
129}
130
131uint32_t xcb_get_maximum_request_length(xcb_connection_t *c)
132{
133    if(c->has_error)
134        return 0;
135    xcb_prefetch_maximum_request_length(c);
136    pthread_mutex_lock(&c->out.reqlenlock);
137    if(c->out.maximum_request_length_tag == LAZY_COOKIE)
138    {
139        xcb_big_requests_enable_reply_t *r = xcb_big_requests_enable_reply(c, c->out.maximum_request_length.cookie, 0);
140        c->out.maximum_request_length_tag = LAZY_FORCED;
141        if(r)
142        {
143            c->out.maximum_request_length.value = r->maximum_request_length;
144            free(r);
145        }
146        else
147            c->out.maximum_request_length.value = c->setup->maximum_request_length;
148    }
149    pthread_mutex_unlock(&c->out.reqlenlock);
150    return c->out.maximum_request_length.value;
151}
152
153unsigned int xcb_send_request(xcb_connection_t *c, int flags, struct iovec *vector, const xcb_protocol_request_t *req)
154{
155    uint64_t request;
156    uint32_t prefix[2];
157    int veclen = req->count;
158    enum workarounds workaround = WORKAROUND_NONE;
159
160    if(c->has_error)
161        return 0;
162
163    assert(c != 0);
164    assert(vector != 0);
165    assert(req->count > 0);
166
167    if(!(flags & XCB_REQUEST_RAW))
168    {
169        static const char pad[3];
170        unsigned int i;
171        uint16_t shortlen = 0;
172        size_t longlen = 0;
173        assert(vector[0].iov_len >= 4);
174        /* set the major opcode, and the minor opcode for extensions */
175        if(req->ext)
176        {
177            const xcb_query_extension_reply_t *extension = xcb_get_extension_data(c, req->ext);
178            if(!(extension && extension->present))
179            {
180                _xcb_conn_shutdown(c, XCB_CONN_CLOSED_EXT_NOTSUPPORTED);
181                return 0;
182            }
183            ((uint8_t *) vector[0].iov_base)[0] = extension->major_opcode;
184            ((uint8_t *) vector[0].iov_base)[1] = req->opcode;
185        }
186        else
187            ((uint8_t *) vector[0].iov_base)[0] = req->opcode;
188
189        /* put together the length field, possibly using BIGREQUESTS */
190        for(i = 0; i < req->count; ++i)
191        {
192            longlen += vector[i].iov_len;
193            if(!vector[i].iov_base)
194            {
195                vector[i].iov_base = (char *) pad;
196                assert(vector[i].iov_len <= sizeof(pad));
197            }
198        }
199        assert((longlen & 3) == 0);
200        longlen >>= 2;
201
202        if(longlen <= c->setup->maximum_request_length)
203        {
204            /* we don't need BIGREQUESTS. */
205            shortlen = longlen;
206            longlen = 0;
207        }
208        else if(longlen > xcb_get_maximum_request_length(c))
209        {
210            _xcb_conn_shutdown(c, XCB_CONN_CLOSED_REQ_LEN_EXCEED);
211            return 0; /* server can't take this; maybe need BIGREQUESTS? */
212        }
213
214        /* set the length field. */
215        ((uint16_t *) vector[0].iov_base)[1] = shortlen;
216        if(!shortlen)
217        {
218            prefix[0] = ((uint32_t *) vector[0].iov_base)[0];
219            prefix[1] = ++longlen;
220            vector[0].iov_base = (uint32_t *) vector[0].iov_base + 1;
221            vector[0].iov_len -= sizeof(uint32_t);
222            --vector, ++veclen;
223            vector[0].iov_base = prefix;
224            vector[0].iov_len = sizeof(prefix);
225        }
226    }
227    flags &= ~XCB_REQUEST_RAW;
228
229    /* do we need to work around the X server bug described in glx.xml? */
230    /* XXX: GetFBConfigs won't use BIG-REQUESTS in any sane
231     * configuration, but that should be handled here anyway. */
232    if(req->ext && !req->isvoid && !strcmp(req->ext->name, "GLX") &&
233            ((req->opcode == 17 && ((uint32_t *) vector[0].iov_base)[1] == 0x10004) ||
234             req->opcode == 21))
235        workaround = WORKAROUND_GLX_GET_FB_CONFIGS_BUG;
236
237    /* get a sequence number and arrange for delivery. */
238    pthread_mutex_lock(&c->iolock);
239    /* wait for other writing threads to get out of my way. */
240    while(c->out.writing)
241        pthread_cond_wait(&c->out.cond, &c->iolock);
242    get_socket_back(c);
243
244    /* send GetInputFocus (sync_req) when 64k-2 requests have been sent without
245     * a reply. */
246    if(req->isvoid && c->out.request == c->in.request_expected + (1 << 16) - 2)
247        send_sync(c);
248    /* Also send sync_req (could use NoOp) at 32-bit wrap to avoid having
249     * applications see sequence 0 as that is used to indicate
250     * an error in sending the request */
251    if((unsigned int) (c->out.request + 1) == 0)
252        send_sync(c);
253
254    /* The above send_sync calls could drop the I/O lock, but this
255     * thread will still exclude any other thread that tries to write,
256     * so the sequence number postconditions still hold. */
257    send_request(c, req->isvoid, workaround, flags, vector, veclen);
258    request = c->has_error ? 0 : c->out.request;
259    pthread_mutex_unlock(&c->iolock);
260    return request;
261}
262
263int xcb_take_socket(xcb_connection_t *c, void (*return_socket)(void *closure), void *closure, int flags, uint64_t *sent)
264{
265    int ret;
266    if(c->has_error)
267        return 0;
268    pthread_mutex_lock(&c->iolock);
269    get_socket_back(c);
270
271    /* _xcb_out_flush may drop the iolock allowing other threads to
272     * write requests, so keep flushing until we're done
273     */
274    do
275	    ret = _xcb_out_flush_to(c, c->out.request);
276    while (ret && c->out.request != c->out.request_written);
277    if(ret)
278    {
279        c->out.return_socket = return_socket;
280        c->out.socket_closure = closure;
281        if(flags)
282            _xcb_in_expect_reply(c, c->out.request, WORKAROUND_EXTERNAL_SOCKET_OWNER, flags);
283        assert(c->out.request == c->out.request_written);
284        *sent = c->out.request;
285    }
286    pthread_mutex_unlock(&c->iolock);
287    return ret;
288}
289
290int xcb_writev(xcb_connection_t *c, struct iovec *vector, int count, uint64_t requests)
291{
292    int ret;
293    if(c->has_error)
294        return 0;
295    pthread_mutex_lock(&c->iolock);
296    c->out.request += requests;
297    ret = _xcb_out_send(c, vector, count);
298    pthread_mutex_unlock(&c->iolock);
299    return ret;
300}
301
302int xcb_flush(xcb_connection_t *c)
303{
304    int ret;
305    if(c->has_error)
306        return 0;
307    pthread_mutex_lock(&c->iolock);
308    ret = _xcb_out_flush_to(c, c->out.request);
309    pthread_mutex_unlock(&c->iolock);
310    return ret;
311}
312
313/* Private interface */
314
315int _xcb_out_init(_xcb_out *out)
316{
317    if(pthread_cond_init(&out->socket_cond, 0))
318        return 0;
319    out->return_socket = 0;
320    out->socket_closure = 0;
321    out->socket_moving = 0;
322
323    if(pthread_cond_init(&out->cond, 0))
324        return 0;
325    out->writing = 0;
326
327    out->queue_len = 0;
328
329    out->request = 0;
330    out->request_written = 0;
331
332    if(pthread_mutex_init(&out->reqlenlock, 0))
333        return 0;
334    out->maximum_request_length_tag = LAZY_NONE;
335
336    return 1;
337}
338
339void _xcb_out_destroy(_xcb_out *out)
340{
341    pthread_cond_destroy(&out->cond);
342    pthread_mutex_destroy(&out->reqlenlock);
343}
344
345int _xcb_out_send(xcb_connection_t *c, struct iovec *vector, int count)
346{
347    int ret = 1;
348    while(ret && count)
349        ret = _xcb_conn_wait(c, &c->out.cond, &vector, &count);
350    c->out.request_written = c->out.request;
351    pthread_cond_broadcast(&c->out.cond);
352    _xcb_in_wake_up_next_reader(c);
353    return ret;
354}
355
356void _xcb_out_send_sync(xcb_connection_t *c)
357{
358    /* wait for other writing threads to get out of my way. */
359    while(c->out.writing)
360        pthread_cond_wait(&c->out.cond, &c->iolock);
361    get_socket_back(c);
362    send_sync(c);
363}
364
365int _xcb_out_flush_to(xcb_connection_t *c, uint64_t request)
366{
367    assert(XCB_SEQUENCE_COMPARE(request, <=, c->out.request));
368    if(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request))
369        return 1;
370    if(c->out.queue_len)
371    {
372        struct iovec vec;
373        vec.iov_base = c->out.queue;
374        vec.iov_len = c->out.queue_len;
375        c->out.queue_len = 0;
376        return _xcb_out_send(c, &vec, 1);
377    }
378    while(c->out.writing)
379        pthread_cond_wait(&c->out.cond, &c->iolock);
380    assert(XCB_SEQUENCE_COMPARE(c->out.request_written, >=, request));
381    return 1;
382}
383