Home | History | Annotate | Line # | Download | only in libevent
bufferevent_async.c revision 1.2.4.2
      1 /*	$NetBSD: bufferevent_async.c,v 1.2.4.2 2014/12/25 02:28:14 snj Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2009-2012 Niels Provos and Nick Mathewson
      5  *
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. The name of the author may not be used to endorse or promote products
     17  *    derived from this software without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     20  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     21  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     22  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     23  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     24  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     28  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include "event2/event-config.h"
     32 #include "evconfig-private.h"
     33 
     34 #ifdef EVENT__HAVE_SYS_TIME_H
     35 #include <sys/time.h>
     36 #endif
     37 
     38 #include <errno.h>
     39 #include <stdio.h>
     40 #include <stdlib.h>
     41 #include <string.h>
     42 #ifdef EVENT__HAVE_STDARG_H
     43 #include <stdarg.h>
     44 #endif
     45 #ifdef EVENT__HAVE_UNISTD_H
     46 #include <unistd.h>
     47 #endif
     48 
     49 #ifdef _WIN32
     50 #include <winsock2.h>
     51 #include <ws2tcpip.h>
     52 #endif
     53 
     54 #include <sys/queue.h>
     55 
     56 #include "event2/util.h"
     57 #include "event2/bufferevent.h"
     58 #include "event2/buffer.h"
     59 #include "event2/bufferevent_struct.h"
     60 #include "event2/event.h"
     61 #include "event2/util.h"
     62 #include "event-internal.h"
     63 #include "log-internal.h"
     64 #include "mm-internal.h"
     65 #include "bufferevent-internal.h"
     66 #include "util-internal.h"
     67 #include "iocp-internal.h"
     68 
     69 #ifndef SO_UPDATE_CONNECT_CONTEXT
     70 /* Mingw is sometimes missing this */
     71 #define SO_UPDATE_CONNECT_CONTEXT 0x7010
     72 #endif
     73 
     74 /* prototypes */
     75 static int be_async_enable(struct bufferevent *, short);
     76 static int be_async_disable(struct bufferevent *, short);
     77 static void be_async_destruct(struct bufferevent *);
     78 static int be_async_flush(struct bufferevent *, short, enum bufferevent_flush_mode);
     79 static int be_async_ctrl(struct bufferevent *, enum bufferevent_ctrl_op, union bufferevent_ctrl_data *);
     80 
     81 struct bufferevent_async {
     82 	struct bufferevent_private bev;
     83 	struct event_overlapped connect_overlapped;
     84 	struct event_overlapped read_overlapped;
     85 	struct event_overlapped write_overlapped;
     86 	size_t read_in_progress;
     87 	size_t write_in_progress;
     88 	unsigned ok : 1;
     89 	unsigned read_added : 1;
     90 	unsigned write_added : 1;
     91 };
     92 
     93 const struct bufferevent_ops bufferevent_ops_async = {
     94 	"socket_async",
     95 	evutil_offsetof(struct bufferevent_async, bev.bev),
     96 	be_async_enable,
     97 	be_async_disable,
     98 	NULL, /* Unlink */
     99 	be_async_destruct,
    100 	bufferevent_generic_adj_timeouts_,
    101 	be_async_flush,
    102 	be_async_ctrl,
    103 };
    104 
    105 static inline struct bufferevent_async *
    106 upcast(struct bufferevent *bev)
    107 {
    108 	struct bufferevent_async *bev_a;
    109 	if (bev->be_ops != &bufferevent_ops_async)
    110 		return NULL;
    111 	bev_a = EVUTIL_UPCAST(bev, struct bufferevent_async, bev.bev);
    112 	return bev_a;
    113 }
    114 
    115 static inline struct bufferevent_async *
    116 upcast_connect(struct event_overlapped *eo)
    117 {
    118 	struct bufferevent_async *bev_a;
    119 	bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, connect_overlapped);
    120 	EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
    121 	return bev_a;
    122 }
    123 
    124 static inline struct bufferevent_async *
    125 upcast_read(struct event_overlapped *eo)
    126 {
    127 	struct bufferevent_async *bev_a;
    128 	bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, read_overlapped);
    129 	EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
    130 	return bev_a;
    131 }
    132 
    133 static inline struct bufferevent_async *
    134 upcast_write(struct event_overlapped *eo)
    135 {
    136 	struct bufferevent_async *bev_a;
    137 	bev_a = EVUTIL_UPCAST(eo, struct bufferevent_async, write_overlapped);
    138 	EVUTIL_ASSERT(BEV_IS_ASYNC(&bev_a->bev.bev));
    139 	return bev_a;
    140 }
    141 
    142 static void
    143 bev_async_del_write(struct bufferevent_async *beva)
    144 {
    145 	struct bufferevent *bev = &beva->bev.bev;
    146 
    147 	if (beva->write_added) {
    148 		beva->write_added = 0;
    149 		event_base_del_virtual_(bev->ev_base);
    150 	}
    151 }
    152 
    153 static void
    154 bev_async_del_read(struct bufferevent_async *beva)
    155 {
    156 	struct bufferevent *bev = &beva->bev.bev;
    157 
    158 	if (beva->read_added) {
    159 		beva->read_added = 0;
    160 		event_base_del_virtual_(bev->ev_base);
    161 	}
    162 }
    163 
    164 static void
    165 bev_async_add_write(struct bufferevent_async *beva)
    166 {
    167 	struct bufferevent *bev = &beva->bev.bev;
    168 
    169 	if (!beva->write_added) {
    170 		beva->write_added = 1;
    171 		event_base_add_virtual_(bev->ev_base);
    172 	}
    173 }
    174 
    175 static void
    176 bev_async_add_read(struct bufferevent_async *beva)
    177 {
    178 	struct bufferevent *bev = &beva->bev.bev;
    179 
    180 	if (!beva->read_added) {
    181 		beva->read_added = 1;
    182 		event_base_add_virtual_(bev->ev_base);
    183 	}
    184 }
    185 
    186 static void
    187 bev_async_consider_writing(struct bufferevent_async *beva)
    188 {
    189 	size_t at_most;
    190 	int limit;
    191 	struct bufferevent *bev = &beva->bev.bev;
    192 
    193 	/* Don't write if there's a write in progress, or we do not
    194 	 * want to write, or when there's nothing left to write. */
    195 	if (beva->write_in_progress || beva->bev.connecting)
    196 		return;
    197 	if (!beva->ok || !(bev->enabled&EV_WRITE) ||
    198 	    !evbuffer_get_length(bev->output)) {
    199 		bev_async_del_write(beva);
    200 		return;
    201 	}
    202 
    203 	at_most = evbuffer_get_length(bev->output);
    204 
    205 	/* This is safe so long as bufferevent_get_write_max never returns
    206 	 * more than INT_MAX.  That's true for now. XXXX */
    207 	limit = (int)bufferevent_get_write_max_(&beva->bev);
    208 	if (at_most >= (size_t)limit && limit >= 0)
    209 		at_most = limit;
    210 
    211 	if (beva->bev.write_suspended) {
    212 		bev_async_del_write(beva);
    213 		return;
    214 	}
    215 
    216 	/*  XXXX doesn't respect low-water mark very well. */
    217 	bufferevent_incref_(bev);
    218 	if (evbuffer_launch_write_(bev->output, at_most,
    219 	    &beva->write_overlapped)) {
    220 		bufferevent_decref_(bev);
    221 		beva->ok = 0;
    222 		bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
    223 	} else {
    224 		beva->write_in_progress = at_most;
    225 		bufferevent_decrement_write_buckets_(&beva->bev, at_most);
    226 		bev_async_add_write(beva);
    227 	}
    228 }
    229 
    230 static void
    231 bev_async_consider_reading(struct bufferevent_async *beva)
    232 {
    233 	size_t cur_size;
    234 	size_t read_high;
    235 	size_t at_most;
    236 	int limit;
    237 	struct bufferevent *bev = &beva->bev.bev;
    238 
    239 	/* Don't read if there is a read in progress, or we do not
    240 	 * want to read. */
    241 	if (beva->read_in_progress || beva->bev.connecting)
    242 		return;
    243 	if (!beva->ok || !(bev->enabled&EV_READ)) {
    244 		bev_async_del_read(beva);
    245 		return;
    246 	}
    247 
    248 	/* Don't read if we're full */
    249 	cur_size = evbuffer_get_length(bev->input);
    250 	read_high = bev->wm_read.high;
    251 	if (read_high) {
    252 		if (cur_size >= read_high) {
    253 			bev_async_del_read(beva);
    254 			return;
    255 		}
    256 		at_most = read_high - cur_size;
    257 	} else {
    258 		at_most = 16384; /* FIXME totally magic. */
    259 	}
    260 
    261 	/* XXXX This over-commits. */
    262 	/* XXXX see also not above on cast on bufferevent_get_write_max_() */
    263 	limit = (int)bufferevent_get_read_max_(&beva->bev);
    264 	if (at_most >= (size_t)limit && limit >= 0)
    265 		at_most = limit;
    266 
    267 	if (beva->bev.read_suspended) {
    268 		bev_async_del_read(beva);
    269 		return;
    270 	}
    271 
    272 	bufferevent_incref_(bev);
    273 	if (evbuffer_launch_read_(bev->input, at_most, &beva->read_overlapped)) {
    274 		beva->ok = 0;
    275 		bufferevent_run_eventcb_(bev, BEV_EVENT_ERROR, 0);
    276 		bufferevent_decref_(bev);
    277 	} else {
    278 		beva->read_in_progress = at_most;
    279 		bufferevent_decrement_read_buckets_(&beva->bev, at_most);
    280 		bev_async_add_read(beva);
    281 	}
    282 
    283 	return;
    284 }
    285 
    286 static void
    287 be_async_outbuf_callback(struct evbuffer *buf,
    288     const struct evbuffer_cb_info *cbinfo,
    289     void *arg)
    290 {
    291 	struct bufferevent *bev = arg;
    292 	struct bufferevent_async *bev_async = upcast(bev);
    293 
    294 	/* If we added data to the outbuf and were not writing before,
    295 	 * we may want to write now. */
    296 
    297 	bufferevent_incref_and_lock_(bev);
    298 
    299 	if (cbinfo->n_added)
    300 		bev_async_consider_writing(bev_async);
    301 
    302 	bufferevent_decref_and_unlock_(bev);
    303 }
    304 
    305 static void
    306 be_async_inbuf_callback(struct evbuffer *buf,
    307     const struct evbuffer_cb_info *cbinfo,
    308     void *arg)
    309 {
    310 	struct bufferevent *bev = arg;
    311 	struct bufferevent_async *bev_async = upcast(bev);
    312 
    313 	/* If we drained data from the inbuf and were not reading before,
    314 	 * we may want to read now */
    315 
    316 	bufferevent_incref_and_lock_(bev);
    317 
    318 	if (cbinfo->n_deleted)
    319 		bev_async_consider_reading(bev_async);
    320 
    321 	bufferevent_decref_and_unlock_(bev);
    322 }
    323 
    324 static int
    325 be_async_enable(struct bufferevent *buf, short what)
    326 {
    327 	struct bufferevent_async *bev_async = upcast(buf);
    328 
    329 	if (!bev_async->ok)
    330 		return -1;
    331 
    332 	if (bev_async->bev.connecting) {
    333 		/* Don't launch anything during connection attempts. */
    334 		return 0;
    335 	}
    336 
    337 	if (what & EV_READ)
    338 		BEV_RESET_GENERIC_READ_TIMEOUT(buf);
    339 	if (what & EV_WRITE)
    340 		BEV_RESET_GENERIC_WRITE_TIMEOUT(buf);
    341 
    342 	/* If we newly enable reading or writing, and we aren't reading or
    343 	   writing already, consider launching a new read or write. */
    344 
    345 	if (what & EV_READ)
    346 		bev_async_consider_reading(bev_async);
    347 	if (what & EV_WRITE)
    348 		bev_async_consider_writing(bev_async);
    349 	return 0;
    350 }
    351 
    352 static int
    353 be_async_disable(struct bufferevent *bev, short what)
    354 {
    355 	struct bufferevent_async *bev_async = upcast(bev);
    356 	/* XXXX If we disable reading or writing, we may want to consider
    357 	 * canceling any in-progress read or write operation, though it might
    358 	 * not work. */
    359 
    360 	if (what & EV_READ) {
    361 		BEV_DEL_GENERIC_READ_TIMEOUT(bev);
    362 		bev_async_del_read(bev_async);
    363 	}
    364 	if (what & EV_WRITE) {
    365 		BEV_DEL_GENERIC_WRITE_TIMEOUT(bev);
    366 		bev_async_del_write(bev_async);
    367 	}
    368 
    369 	return 0;
    370 }
    371 
    372 static void
    373 be_async_destruct(struct bufferevent *bev)
    374 {
    375 	struct bufferevent_async *bev_async = upcast(bev);
    376 	struct bufferevent_private *bev_p = BEV_UPCAST(bev);
    377 	evutil_socket_t fd;
    378 
    379 	EVUTIL_ASSERT(!upcast(bev)->write_in_progress &&
    380 			!upcast(bev)->read_in_progress);
    381 
    382 	bev_async_del_read(bev_async);
    383 	bev_async_del_write(bev_async);
    384 
    385 	fd = evbuffer_overlapped_get_fd_(bev->input);
    386 	if (bev_p->options & BEV_OPT_CLOSE_ON_FREE) {
    387 		/* XXXX possible double-close */
    388 		evutil_closesocket(fd);
    389 	}
    390 }
    391 
    392 /* GetQueuedCompletionStatus doesn't reliably yield WSA error codes, so
    393  * we use WSAGetOverlappedResult to translate. */
    394 static void
    395 bev_async_set_wsa_error(struct bufferevent *bev, struct event_overlapped *eo)
    396 {
    397 	DWORD bytes, flags;
    398 	evutil_socket_t fd;
    399 
    400 	fd = evbuffer_overlapped_get_fd_(bev->input);
    401 	WSAGetOverlappedResult(fd, &eo->overlapped, &bytes, FALSE, &flags);
    402 }
    403 
    404 static int
    405 be_async_flush(struct bufferevent *bev, short what,
    406     enum bufferevent_flush_mode mode)
    407 {
    408 	return 0;
    409 }
    410 
    411 static void
    412 connect_complete(struct event_overlapped *eo, ev_uintptr_t key,
    413     ev_ssize_t nbytes, int ok)
    414 {
    415 	struct bufferevent_async *bev_a = upcast_connect(eo);
    416 	struct bufferevent *bev = &bev_a->bev.bev;
    417 	evutil_socket_t sock;
    418 
    419 	BEV_LOCK(bev);
    420 
    421 	EVUTIL_ASSERT(bev_a->bev.connecting);
    422 	bev_a->bev.connecting = 0;
    423 	sock = evbuffer_overlapped_get_fd_(bev_a->bev.bev.input);
    424 	/* XXXX Handle error? */
    425 	setsockopt(sock, SOL_SOCKET, SO_UPDATE_CONNECT_CONTEXT, NULL, 0);
    426 
    427 	if (ok)
    428 		bufferevent_async_set_connected_(bev);
    429 	else
    430 		bev_async_set_wsa_error(bev, eo);
    431 
    432 	bufferevent_run_eventcb_(bev,
    433 			ok? BEV_EVENT_CONNECTED : BEV_EVENT_ERROR, 0);
    434 
    435 	event_base_del_virtual_(bev->ev_base);
    436 
    437 	bufferevent_decref_and_unlock_(bev);
    438 }
    439 
    440 static void
    441 read_complete(struct event_overlapped *eo, ev_uintptr_t key,
    442     ev_ssize_t nbytes, int ok)
    443 {
    444 	struct bufferevent_async *bev_a = upcast_read(eo);
    445 	struct bufferevent *bev = &bev_a->bev.bev;
    446 	short what = BEV_EVENT_READING;
    447 	ev_ssize_t amount_unread;
    448 	BEV_LOCK(bev);
    449 	EVUTIL_ASSERT(bev_a->read_in_progress);
    450 
    451 	amount_unread = bev_a->read_in_progress - nbytes;
    452 	evbuffer_commit_read_(bev->input, nbytes);
    453 	bev_a->read_in_progress = 0;
    454 	if (amount_unread)
    455 		bufferevent_decrement_read_buckets_(&bev_a->bev, -amount_unread);
    456 
    457 	if (!ok)
    458 		bev_async_set_wsa_error(bev, eo);
    459 
    460 	if (bev_a->ok) {
    461 		if (ok && nbytes) {
    462 			BEV_RESET_GENERIC_READ_TIMEOUT(bev);
    463 			bufferevent_trigger_nolock_(bev, EV_READ, 0);
    464 			bev_async_consider_reading(bev_a);
    465 		} else if (!ok) {
    466 			what |= BEV_EVENT_ERROR;
    467 			bev_a->ok = 0;
    468 			bufferevent_run_eventcb_(bev, what, 0);
    469 		} else if (!nbytes) {
    470 			what |= BEV_EVENT_EOF;
    471 			bev_a->ok = 0;
    472 			bufferevent_run_eventcb_(bev, what, 0);
    473 		}
    474 	}
    475 
    476 	bufferevent_decref_and_unlock_(bev);
    477 }
    478 
    479 static void
    480 write_complete(struct event_overlapped *eo, ev_uintptr_t key,
    481     ev_ssize_t nbytes, int ok)
    482 {
    483 	struct bufferevent_async *bev_a = upcast_write(eo);
    484 	struct bufferevent *bev = &bev_a->bev.bev;
    485 	short what = BEV_EVENT_WRITING;
    486 	ev_ssize_t amount_unwritten;
    487 
    488 	BEV_LOCK(bev);
    489 	EVUTIL_ASSERT(bev_a->write_in_progress);
    490 
    491 	amount_unwritten = bev_a->write_in_progress - nbytes;
    492 	evbuffer_commit_write_(bev->output, nbytes);
    493 	bev_a->write_in_progress = 0;
    494 
    495 	if (amount_unwritten)
    496 		bufferevent_decrement_write_buckets_(&bev_a->bev,
    497 		                                     -amount_unwritten);
    498 
    499 
    500 	if (!ok)
    501 		bev_async_set_wsa_error(bev, eo);
    502 
    503 	if (bev_a->ok) {
    504 		if (ok && nbytes) {
    505 			BEV_RESET_GENERIC_WRITE_TIMEOUT(bev);
    506 			bufferevent_trigger_nolock_(bev, EV_WRITE, 0);
    507 			bev_async_consider_writing(bev_a);
    508 		} else if (!ok) {
    509 			what |= BEV_EVENT_ERROR;
    510 			bev_a->ok = 0;
    511 			bufferevent_run_eventcb_(bev, what, 0);
    512 		} else if (!nbytes) {
    513 			what |= BEV_EVENT_EOF;
    514 			bev_a->ok = 0;
    515 			bufferevent_run_eventcb_(bev, what, 0);
    516 		}
    517 	}
    518 
    519 	bufferevent_decref_and_unlock_(bev);
    520 }
    521 
    522 struct bufferevent *
    523 bufferevent_async_new_(struct event_base *base,
    524     evutil_socket_t fd, int options)
    525 {
    526 	struct bufferevent_async *bev_a;
    527 	struct bufferevent *bev;
    528 	struct event_iocp_port *iocp;
    529 
    530 	options |= BEV_OPT_THREADSAFE;
    531 
    532 	if (!(iocp = event_base_get_iocp_(base)))
    533 		return NULL;
    534 
    535 	if (fd >= 0 && event_iocp_port_associate_(iocp, fd, 1)<0) {
    536 		int err = GetLastError();
    537 		/* We may have alrady associated this fd with a port.
    538 		 * Let's hope it's this port, and that the error code
    539 		 * for doing this neer changes. */
    540 		if (err != ERROR_INVALID_PARAMETER)
    541 			return NULL;
    542 	}
    543 
    544 	if (!(bev_a = mm_calloc(1, sizeof(struct bufferevent_async))))
    545 		return NULL;
    546 
    547 	bev = &bev_a->bev.bev;
    548 	if (!(bev->input = evbuffer_overlapped_new_(fd))) {
    549 		mm_free(bev_a);
    550 		return NULL;
    551 	}
    552 	if (!(bev->output = evbuffer_overlapped_new_(fd))) {
    553 		evbuffer_free(bev->input);
    554 		mm_free(bev_a);
    555 		return NULL;
    556 	}
    557 
    558 	if (bufferevent_init_common_(&bev_a->bev, base, &bufferevent_ops_async,
    559 		options)<0)
    560 		goto err;
    561 
    562 	evbuffer_add_cb(bev->input, be_async_inbuf_callback, bev);
    563 	evbuffer_add_cb(bev->output, be_async_outbuf_callback, bev);
    564 
    565 	event_overlapped_init_(&bev_a->connect_overlapped, connect_complete);
    566 	event_overlapped_init_(&bev_a->read_overlapped, read_complete);
    567 	event_overlapped_init_(&bev_a->write_overlapped, write_complete);
    568 
    569 	bev_a->ok = fd >= 0;
    570 	if (bev_a->ok)
    571 		bufferevent_init_generic_timeout_cbs_(bev);
    572 
    573 	return bev;
    574 err:
    575 	bufferevent_free(&bev_a->bev.bev);
    576 	return NULL;
    577 }
    578 
    579 void
    580 bufferevent_async_set_connected_(struct bufferevent *bev)
    581 {
    582 	struct bufferevent_async *bev_async = upcast(bev);
    583 	bev_async->ok = 1;
    584 	bufferevent_init_generic_timeout_cbs_(bev);
    585 	/* Now's a good time to consider reading/writing */
    586 	be_async_enable(bev, bev->enabled);
    587 }
    588 
    589 int
    590 bufferevent_async_can_connect_(struct bufferevent *bev)
    591 {
    592 	const struct win32_extension_fns *ext =
    593 	    event_get_win32_extension_fns_();
    594 
    595 	if (BEV_IS_ASYNC(bev) &&
    596 	    event_base_get_iocp_(bev->ev_base) &&
    597 	    ext && ext->ConnectEx)
    598 		return 1;
    599 
    600 	return 0;
    601 }
    602 
    603 int
    604 bufferevent_async_connect_(struct bufferevent *bev, evutil_socket_t fd,
    605 	const struct sockaddr *sa, int socklen)
    606 {
    607 	BOOL rc;
    608 	struct bufferevent_async *bev_async = upcast(bev);
    609 	struct sockaddr_storage ss;
    610 	const struct win32_extension_fns *ext =
    611 	    event_get_win32_extension_fns_();
    612 
    613 	EVUTIL_ASSERT(ext && ext->ConnectEx && fd >= 0 && sa != NULL);
    614 
    615 	/* ConnectEx() requires that the socket be bound to an address
    616 	 * with bind() before using, otherwise it will fail. We attempt
    617 	 * to issue a bind() here, taking into account that the error
    618 	 * code is set to WSAEINVAL when the socket is already bound. */
    619 	memset(&ss, 0, sizeof(ss));
    620 	if (sa->sa_family == AF_INET) {
    621 		struct sockaddr_in *sin = (struct sockaddr_in *)&ss;
    622 		sin->sin_family = AF_INET;
    623 		sin->sin_addr.s_addr = INADDR_ANY;
    624 	} else if (sa->sa_family == AF_INET6) {
    625 		struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ss;
    626 		sin6->sin6_family = AF_INET6;
    627 		sin6->sin6_addr = in6addr_any;
    628 	} else {
    629 		/* Well, the user will have to bind() */
    630 		return -1;
    631 	}
    632 	if (bind(fd, (struct sockaddr *)&ss, sizeof(ss)) < 0 &&
    633 	    WSAGetLastError() != WSAEINVAL)
    634 		return -1;
    635 
    636 	event_base_add_virtual_(bev->ev_base);
    637 	bufferevent_incref_(bev);
    638 	rc = ext->ConnectEx(fd, sa, socklen, NULL, 0, NULL,
    639 			    &bev_async->connect_overlapped.overlapped);
    640 	if (rc || WSAGetLastError() == ERROR_IO_PENDING)
    641 		return 0;
    642 
    643 	event_base_del_virtual_(bev->ev_base);
    644 	bufferevent_decref_(bev);
    645 
    646 	return -1;
    647 }
    648 
    649 static int
    650 be_async_ctrl(struct bufferevent *bev, enum bufferevent_ctrl_op op,
    651     union bufferevent_ctrl_data *data)
    652 {
    653 	switch (op) {
    654 	case BEV_CTRL_GET_FD:
    655 		data->fd = evbuffer_overlapped_get_fd_(bev->input);
    656 		return 0;
    657 	case BEV_CTRL_SET_FD: {
    658 		struct event_iocp_port *iocp;
    659 
    660 		if (data->fd == evbuffer_overlapped_get_fd_(bev->input))
    661 			return 0;
    662 		if (!(iocp = event_base_get_iocp_(bev->ev_base)))
    663 			return -1;
    664 		if (event_iocp_port_associate_(iocp, data->fd, 1) < 0)
    665 			return -1;
    666 		evbuffer_overlapped_set_fd_(bev->input, data->fd);
    667 		evbuffer_overlapped_set_fd_(bev->output, data->fd);
    668 		return 0;
    669 	}
    670 	case BEV_CTRL_CANCEL_ALL: {
    671 		struct bufferevent_async *bev_a = upcast(bev);
    672 		evutil_socket_t fd = evbuffer_overlapped_get_fd_(bev->input);
    673 		if (fd != (evutil_socket_t)INVALID_SOCKET &&
    674 		    (bev_a->bev.options & BEV_OPT_CLOSE_ON_FREE)) {
    675 			closesocket(fd);
    676 		}
    677 		bev_a->ok = 0;
    678 		return 0;
    679 	}
    680 	case BEV_CTRL_GET_UNDERLYING:
    681 	default:
    682 		return -1;
    683 	}
    684 }
    685 
    686 
    687