xcb_io.c revision b4ee4795
1/* Copyright (C) 2003-2006 Jamey Sharp, Josh Triplett 2 * This file is licensed under the MIT license. See the file COPYING. */ 3 4#ifdef HAVE_CONFIG_H 5#include <config.h> 6#endif 7 8#include "Xlibint.h" 9#include "locking.h" 10#include "Xprivate.h" 11#include "Xxcbint.h" 12#include <xcb/xcbext.h> 13 14#include <assert.h> 15#include <inttypes.h> 16#include <stdint.h> 17#include <stdlib.h> 18#include <string.h> 19#ifdef HAVE_SYS_SELECT_H 20#include <sys/select.h> 21#endif 22 23static void return_socket(void *closure) 24{ 25 Display *dpy = closure; 26 LockDisplay(dpy); 27 _XSend(dpy, NULL, 0); 28 dpy->bufmax = dpy->buffer; 29 UnlockDisplay(dpy); 30} 31 32static void require_socket(Display *dpy) 33{ 34 if(dpy->bufmax == dpy->buffer) 35 { 36 uint64_t sent; 37 int flags = 0; 38 /* if we don't own the event queue, we have to ask XCB 39 * to set our errors aside for us. */ 40 if(dpy->xcb->event_owner != XlibOwnsEventQueue) 41 flags = XCB_REQUEST_CHECKED; 42 if(!xcb_take_socket(dpy->xcb->connection, return_socket, dpy, 43 flags, &sent)) 44 _XIOError(dpy); 45 /* Xlib uses unsigned long for sequence numbers. XCB 46 * uses 64-bit internally, but currently exposes an 47 * unsigned int API. If these differ, Xlib cannot track 48 * the full 64-bit sequence number if 32-bit wrap 49 * happens while Xlib does not own the socket. A 50 * complete fix would be to make XCB's public API use 51 * 64-bit sequence numbers. */ 52 assert(!(sizeof(unsigned long) > sizeof(unsigned int) 53 && dpy->xcb->event_owner == XlibOwnsEventQueue 54 && (sent - dpy->last_request_read >= (UINT64_C(1) << 32)))); 55 dpy->xcb->last_flushed = dpy->request = sent; 56 dpy->bufmax = dpy->xcb->real_bufmax; 57 } 58} 59 60/* Call internal connection callbacks for any fds that are currently 61 * ready to read. This function will not block unless one of the 62 * callbacks blocks. 63 * 64 * This code borrowed from _XWaitForReadable. Inverse call tree: 65 * _XRead 66 * _XWaitForWritable 67 * _XFlush 68 * _XSend 69 * _XEventsQueued 70 * _XReadEvents 71 * _XRead[0-9]+ 72 * _XAllocIDs 73 * _XReply 74 * _XEatData 75 * _XReadPad 76 */ 77static void check_internal_connections(Display *dpy) 78{ 79 struct _XConnectionInfo *ilist; 80 fd_set r_mask; 81 struct timeval tv; 82 int result; 83 int highest_fd = -1; 84 85 if(dpy->flags & XlibDisplayProcConni || !dpy->im_fd_info) 86 return; 87 88 FD_ZERO(&r_mask); 89 for(ilist = dpy->im_fd_info; ilist; ilist = ilist->next) 90 { 91 assert(ilist->fd >= 0); 92 FD_SET(ilist->fd, &r_mask); 93 if(ilist->fd > highest_fd) 94 highest_fd = ilist->fd; 95 } 96 assert(highest_fd >= 0); 97 98 tv.tv_sec = 0; 99 tv.tv_usec = 0; 100 result = select(highest_fd + 1, &r_mask, NULL, NULL, &tv); 101 102 if(result == -1) 103 { 104 if(errno == EINTR) 105 return; 106 _XIOError(dpy); 107 } 108 109 for(ilist = dpy->im_fd_info; result && ilist; ilist = ilist->next) 110 if(FD_ISSET(ilist->fd, &r_mask)) 111 { 112 _XProcessInternalConnection(dpy, ilist); 113 --result; 114 } 115} 116 117static void call_handlers(Display *dpy, xcb_generic_reply_t *buf) 118{ 119 _XAsyncHandler *async, *next; 120 for(async = dpy->async_handlers; async; async = next) 121 { 122 next = async->next; 123 if(async->handler(dpy, (xReply *) buf, (char *) buf, sizeof(xReply) + (buf->length << 2), async->data)) 124 return; 125 } 126 if(buf->response_type == 0) /* unhandled error */ 127 _XError(dpy, (xError *) buf); 128} 129 130static xcb_generic_event_t * wait_or_poll_for_event(Display *dpy, int wait) 131{ 132 xcb_connection_t *c = dpy->xcb->connection; 133 xcb_generic_event_t *event; 134 if(wait) 135 { 136 if(dpy->xcb->event_waiter) 137 { 138 ConditionWait(dpy, dpy->xcb->event_notify); 139 event = xcb_poll_for_event(c); 140 } 141 else 142 { 143 dpy->xcb->event_waiter = 1; 144 UnlockDisplay(dpy); 145 event = xcb_wait_for_event(c); 146 LockDisplay(dpy); 147 dpy->xcb->event_waiter = 0; 148 ConditionBroadcast(dpy, dpy->xcb->event_notify); 149 } 150 } 151 else 152 event = xcb_poll_for_event(c); 153 return event; 154} 155 156/* Widen a 32-bit sequence number into a native-word-size (unsigned long) 157 * sequence number. Treating the comparison as a 1 and shifting it avoids a 158 * conditional branch, and shifting by 16 twice avoids a compiler warning when 159 * sizeof(unsigned long) == 4. */ 160static void widen(unsigned long *wide, unsigned int narrow) 161{ 162 unsigned long new = (*wide & ~0xFFFFFFFFUL) | narrow; 163 *wide = new + ((unsigned long) (new < *wide) << 16 << 16); 164} 165 166static void process_responses(Display *dpy, int wait_for_first_event, xcb_generic_error_t **current_error, unsigned long current_request) 167{ 168 void *reply; 169 xcb_generic_event_t *event = dpy->xcb->next_event; 170 xcb_generic_error_t *error; 171 xcb_connection_t *c = dpy->xcb->connection; 172 if(!event && dpy->xcb->event_owner == XlibOwnsEventQueue) 173 event = wait_or_poll_for_event(dpy, wait_for_first_event); 174 175 require_socket(dpy); 176 177 while(1) 178 { 179 PendingRequest *req = dpy->xcb->pending_requests; 180 unsigned long event_sequence = dpy->last_request_read; 181 if(event) 182 widen(&event_sequence, event->full_sequence); 183 assert(!(req && current_request && !XLIB_SEQUENCE_COMPARE(req->sequence, <=, current_request))); 184 if(event && (!req || XLIB_SEQUENCE_COMPARE(event_sequence, <=, req->sequence))) 185 { 186 dpy->last_request_read = event_sequence; 187 if(event->response_type != X_Error) 188 { 189 /* GenericEvents may be > 32 bytes. In this 190 * case, the event struct is trailed by the 191 * additional bytes. the xcb_generic_event_t 192 * struct uses 4 bytes for internal numbering, 193 * so we need to shift the trailing data to be 194 * after the first 32 bytes. */ 195 if (event->response_type == GenericEvent && 196 ((xcb_ge_event_t*)event)->length) 197 { 198 memmove(&event->full_sequence, 199 &event[1], 200 ((xcb_ge_event_t*)event)->length * 4); 201 } 202 _XEnq(dpy, (xEvent *) event); 203 wait_for_first_event = 0; 204 } 205 else if(current_error && event_sequence == current_request) 206 { 207 /* This can only occur when called from 208 * _XReply, which doesn't need a new event. */ 209 *current_error = (xcb_generic_error_t *) event; 210 event = NULL; 211 break; 212 } 213 else 214 _XError(dpy, (xError *) event); 215 free(event); 216 event = wait_or_poll_for_event(dpy, wait_for_first_event); 217 } 218 else if(req && req->sequence == current_request) 219 { 220 break; 221 } 222 else if(req && xcb_poll_for_reply(dpy->xcb->connection, req->sequence, &reply, &error)) 223 { 224 uint64_t sequence = req->sequence; 225 if(!reply) 226 { 227 dpy->xcb->pending_requests = req->next; 228 if(!dpy->xcb->pending_requests) 229 dpy->xcb->pending_requests_tail = &dpy->xcb->pending_requests; 230 free(req); 231 reply = error; 232 } 233 if(reply) 234 { 235 dpy->last_request_read = sequence; 236 call_handlers(dpy, reply); 237 free(reply); 238 } 239 } 240 else 241 break; 242 } 243 244 dpy->xcb->next_event = event; 245 246 if(xcb_connection_has_error(c)) 247 _XIOError(dpy); 248 249 assert(XLIB_SEQUENCE_COMPARE(dpy->last_request_read, <=, dpy->request)); 250} 251 252int _XEventsQueued(Display *dpy, int mode) 253{ 254 if(dpy->flags & XlibDisplayIOError) 255 return 0; 256 if(dpy->xcb->event_owner != XlibOwnsEventQueue) 257 return 0; 258 259 if(mode == QueuedAfterFlush) 260 _XSend(dpy, NULL, 0); 261 else 262 check_internal_connections(dpy); 263 process_responses(dpy, 0, NULL, 0); 264 return dpy->qlen; 265} 266 267/* _XReadEvents - Flush the output queue, 268 * then read as many events as possible (but at least 1) and enqueue them 269 */ 270void _XReadEvents(Display *dpy) 271{ 272 if(dpy->flags & XlibDisplayIOError) 273 return; 274 _XSend(dpy, NULL, 0); 275 if(dpy->xcb->event_owner != XlibOwnsEventQueue) 276 return; 277 check_internal_connections(dpy); 278 do { 279 process_responses(dpy, 1, NULL, 0); 280 } while (dpy->qlen == 0); 281} 282 283/* 284 * _XSend - Flush the buffer and send the client data. 32 bit word aligned 285 * transmission is used, if size is not 0 mod 4, extra bytes are transmitted. 286 * 287 * Note that the connection must not be read from once the data currently 288 * in the buffer has been written. 289 */ 290void _XSend(Display *dpy, const char *data, long size) 291{ 292 static const xReq dummy_request; 293 static char const pad[3]; 294 struct iovec vec[3]; 295 uint64_t requests; 296 _XExtension *ext; 297 xcb_connection_t *c = dpy->xcb->connection; 298 if(dpy->flags & XlibDisplayIOError) 299 return; 300 301 if(dpy->bufptr == dpy->buffer && !size) 302 return; 303 304 /* iff we asked XCB to set aside errors, we must pick those up 305 * eventually. iff there are async handlers, we may have just 306 * issued requests that will generate replies. in either case, 307 * we need to remember to check later. */ 308 if(dpy->xcb->event_owner != XlibOwnsEventQueue || dpy->async_handlers) 309 { 310 uint64_t sequence; 311 for(sequence = dpy->xcb->last_flushed; sequence < dpy->request; ++sequence) 312 { 313 PendingRequest *req = malloc(sizeof(PendingRequest)); 314 assert(req); 315 req->next = NULL; 316 req->sequence = sequence; 317 *dpy->xcb->pending_requests_tail = req; 318 dpy->xcb->pending_requests_tail = &req->next; 319 } 320 } 321 requests = dpy->request - dpy->xcb->last_flushed; 322 dpy->xcb->last_flushed = dpy->request; 323 324 vec[0].iov_base = dpy->buffer; 325 vec[0].iov_len = dpy->bufptr - dpy->buffer; 326 vec[1].iov_base = (caddr_t) data; 327 vec[1].iov_len = size; 328 vec[2].iov_base = (caddr_t) pad; 329 vec[2].iov_len = -size & 3; 330 331 for(ext = dpy->flushes; ext; ext = ext->next_flush) 332 { 333 int i; 334 for(i = 0; i < 3; ++i) 335 if(vec[i].iov_len) 336 ext->before_flush(dpy, &ext->codes, vec[i].iov_base, vec[i].iov_len); 337 } 338 339 if(xcb_writev(c, vec, 3, requests) < 0) 340 _XIOError(dpy); 341 dpy->bufptr = dpy->buffer; 342 dpy->last_req = (char *) &dummy_request; 343 344 check_internal_connections(dpy); 345 346 _XSetSeqSyncFunction(dpy); 347} 348 349/* 350 * _XFlush - Flush the X request buffer. If the buffer is empty, no 351 * action is taken. 352 */ 353void _XFlush(Display *dpy) 354{ 355 require_socket(dpy); 356 _XSend(dpy, NULL, 0); 357 358 _XEventsQueued(dpy, QueuedAfterReading); 359} 360 361static const XID inval_id = ~0UL; 362 363int _XIDHandler(Display *dpy) 364{ 365 XID next; 366 367 if (dpy->xcb->next_xid != inval_id) 368 return 0; 369 370 next = xcb_generate_id(dpy->xcb->connection); 371 LockDisplay(dpy); 372 dpy->xcb->next_xid = next; 373#ifdef XTHREADS 374 if (dpy->lock) 375 (*dpy->lock->user_unlock_display)(dpy); 376#endif 377 UnlockDisplay(dpy); 378 return 0; 379} 380 381/* _XAllocID - resource ID allocation routine. */ 382XID _XAllocID(Display *dpy) 383{ 384 XID ret = dpy->xcb->next_xid; 385 assert (ret != inval_id); 386#ifdef XTHREADS 387 if (dpy->lock) 388 (*dpy->lock->user_lock_display)(dpy); 389#endif 390 dpy->xcb->next_xid = inval_id; 391 _XSetPrivSyncFunction(dpy); 392 return ret; 393} 394 395/* _XAllocIDs - multiple resource ID allocation routine. */ 396void _XAllocIDs(Display *dpy, XID *ids, int count) 397{ 398 int i; 399#ifdef XTHREADS 400 if (dpy->lock) 401 (*dpy->lock->user_lock_display)(dpy); 402 UnlockDisplay(dpy); 403#endif 404 for (i = 0; i < count; i++) 405 ids[i] = xcb_generate_id(dpy->xcb->connection); 406#ifdef XTHREADS 407 LockDisplay(dpy); 408 if (dpy->lock) 409 (*dpy->lock->user_unlock_display)(dpy); 410#endif 411} 412 413static void _XFreeReplyData(Display *dpy, Bool force) 414{ 415 if(!force && dpy->xcb->reply_consumed < dpy->xcb->reply_length) 416 return; 417 free(dpy->xcb->reply_data); 418 dpy->xcb->reply_data = NULL; 419} 420 421static PendingRequest * insert_pending_request(Display *dpy) 422{ 423 PendingRequest **cur = &dpy->xcb->pending_requests; 424 while(*cur && XLIB_SEQUENCE_COMPARE((*cur)->sequence, <, dpy->request)) 425 cur = &((*cur)->next); 426 if(!*cur || (*cur)->sequence != dpy->request) 427 { 428 PendingRequest *node = malloc(sizeof(PendingRequest)); 429 assert(node); 430 node->next = *cur; 431 node->sequence = dpy->request; 432 if(cur == dpy->xcb->pending_requests_tail) 433 dpy->xcb->pending_requests_tail = &(node->next); 434 *cur = node; 435 } 436 return *cur; 437} 438 439/* 440 * _XReply - Wait for a reply packet and copy its contents into the 441 * specified rep. 442 * extra: number of 32-bit words expected after the reply 443 * discard: should I discard data following "extra" words? 444 */ 445Status _XReply(Display *dpy, xReply *rep, int extra, Bool discard) 446{ 447 xcb_generic_error_t *error; 448 xcb_connection_t *c = dpy->xcb->connection; 449 char *reply; 450 PendingRequest *current; 451 452 assert(!dpy->xcb->reply_data); 453 454 if(dpy->flags & XlibDisplayIOError) 455 return 0; 456 457 _XSend(dpy, NULL, 0); 458 current = insert_pending_request(dpy); 459 /* FIXME: drop the Display lock while waiting? 460 * Complicates process_responses. */ 461 reply = xcb_wait_for_reply(c, current->sequence, &error); 462 463 check_internal_connections(dpy); 464 process_responses(dpy, 0, &error, current->sequence); 465 466 if(error) 467 { 468 _XExtension *ext; 469 xError *err = (xError *) error; 470 int ret_code; 471 472 dpy->last_request_read = error->full_sequence; 473 474 /* Xlib is evil and assumes that even errors will be 475 * copied into rep. */ 476 memcpy(rep, error, 32); 477 478 /* do not die on "no such font", "can't allocate", 479 "can't grab" failures */ 480 switch(err->errorCode) 481 { 482 case BadName: 483 switch(err->majorCode) 484 { 485 case X_LookupColor: 486 case X_AllocNamedColor: 487 free(error); 488 return 0; 489 } 490 break; 491 case BadFont: 492 if(err->majorCode == X_QueryFont) { 493 free(error); 494 return 0; 495 } 496 break; 497 case BadAlloc: 498 case BadAccess: 499 free(error); 500 return 0; 501 } 502 503 /* 504 * we better see if there is an extension who may 505 * want to suppress the error. 506 */ 507 for(ext = dpy->ext_procs; ext; ext = ext->next) 508 if(ext->error && ext->error(dpy, err, &ext->codes, &ret_code)) { 509 free(error); 510 return ret_code; 511 } 512 513 _XError(dpy, err); 514 free(error); 515 return 0; 516 } 517 518 /* it's not an error, but we don't have a reply, so it's an I/O 519 * error. */ 520 if(!reply) 521 { 522 _XIOError(dpy); 523 return 0; 524 } 525 526 dpy->last_request_read = current->sequence; 527 528 /* there's no error and we have a reply. */ 529 dpy->xcb->reply_data = reply; 530 dpy->xcb->reply_consumed = sizeof(xReply) + (extra * 4); 531 dpy->xcb->reply_length = sizeof(xReply); 532 if(dpy->xcb->reply_data[0] == 1) 533 dpy->xcb->reply_length += (((xcb_generic_reply_t *) dpy->xcb->reply_data)->length * 4); 534 535 /* error: Xlib asks too much. give them what we can anyway. */ 536 if(dpy->xcb->reply_length < dpy->xcb->reply_consumed) 537 dpy->xcb->reply_consumed = dpy->xcb->reply_length; 538 539 memcpy(rep, dpy->xcb->reply_data, dpy->xcb->reply_consumed); 540 _XFreeReplyData(dpy, discard); 541 return 1; 542} 543 544int _XRead(Display *dpy, char *data, long size) 545{ 546 assert(size >= 0); 547 if(size == 0) 548 return 0; 549 assert(dpy->xcb->reply_data != NULL); 550 assert(dpy->xcb->reply_consumed + size <= dpy->xcb->reply_length); 551 memcpy(data, dpy->xcb->reply_data + dpy->xcb->reply_consumed, size); 552 dpy->xcb->reply_consumed += size; 553 _XFreeReplyData(dpy, False); 554 return 0; 555} 556 557/* 558 * _XReadPad - Read bytes from the socket taking into account incomplete 559 * reads. If the number of bytes is not 0 mod 4, read additional pad 560 * bytes. 561 */ 562void _XReadPad(Display *dpy, char *data, long size) 563{ 564 _XRead(dpy, data, size); 565 dpy->xcb->reply_consumed += -size & 3; 566 _XFreeReplyData(dpy, False); 567} 568 569/* Read and discard "n" 8-bit bytes of data */ 570void _XEatData(Display *dpy, unsigned long n) 571{ 572 dpy->xcb->reply_consumed += n; 573 _XFreeReplyData(dpy, False); 574} 575