main.c revision 1.9 1 /* $NetBSD: main.c,v 1.9 2013/12/29 03:26:07 christos Exp $ */
2
3 #include <config.h>
4
5 #include <event2/util.h>
6 #include <event2/event.h>
7
8 #include "ntp_workimpl.h"
9 #ifdef WORK_THREAD
10 # include <event2/thread.h>
11 #endif
12
13 #include "main.h"
14 #include "ntp_libopts.h"
15 #include "kod_management.h"
16 #include "networking.h"
17 #include "utilities.h"
18 #include "log.h"
19 #include "libntp.h"
20
21
22 int shutting_down;
23 int time_derived;
24 int time_adjusted;
25 int n_pending_dns = 0;
26 int n_pending_ntp = 0;
27 int ai_fam_pref = AF_UNSPEC;
28 int ntpver = 4;
29 double steplimit = -1;
30 SOCKET sock4 = -1; /* Socket for IPv4 */
31 SOCKET sock6 = -1; /* Socket for IPv6 */
32 /*
33 ** BCAST *must* listen on port 123 (by default), so we can only
34 ** use the UCST sockets (above) if they too are using port 123
35 */
36 SOCKET bsock4 = -1; /* Broadcast Socket for IPv4 */
37 SOCKET bsock6 = -1; /* Broadcast Socket for IPv6 */
38 struct event_base *base;
39 struct event *ev_sock4;
40 struct event *ev_sock6;
41 struct event *ev_worker_timeout;
42 struct event *ev_xmt_timer;
43
44 struct dns_ctx {
45 const char * name;
46 int flags;
47 #define CTX_BCST 0x0001
48 #define CTX_UCST 0x0002
49 #define CTX_xCST 0x0003
50 #define CTX_CONC 0x0004
51 #define CTX_unused 0xfffd
52 int key_id;
53 struct timeval timeout;
54 struct key * key;
55 };
56
57 typedef struct sent_pkt_tag sent_pkt;
58 struct sent_pkt_tag {
59 sent_pkt * link;
60 struct dns_ctx * dctx;
61 sockaddr_u addr;
62 time_t stime;
63 int done;
64 struct pkt x_pkt;
65 };
66
67 typedef struct xmt_ctx_tag xmt_ctx;
68 struct xmt_ctx_tag {
69 xmt_ctx * link;
70 SOCKET sock;
71 time_t sched;
72 sent_pkt * spkt;
73 };
74
75 struct timeval gap;
76 xmt_ctx * xmt_q;
77 struct key * keys = NULL;
78 int response_timeout;
79 struct timeval response_tv;
80 struct timeval start_tv;
81 /* check the timeout at least once per second */
82 struct timeval wakeup_tv = { 0, 888888 };
83
84 sent_pkt * fam_listheads[2];
85 #define v4_pkts_list (fam_listheads[0])
86 #define v6_pkts_list (fam_listheads[1])
87
88 static union {
89 struct pkt pkt;
90 char buf[1500];
91 } rbuf;
92
93 #define r_pkt rbuf.pkt
94
95 #ifdef HAVE_DROPROOT
96 int droproot; /* intres imports these */
97 int root_dropped;
98 #endif
99 u_long current_time; /* libntp/authkeys.c */
100
101 void open_sockets(void);
102 void handle_lookup(const char *name, int flags);
103 void sntp_addremove_fd(int fd, int is_pipe, int remove_it);
104 void worker_timeout(evutil_socket_t, short, void *);
105 void worker_resp_cb(evutil_socket_t, short, void *);
106 void sntp_name_resolved(int, int, void *, const char *, const char *,
107 const struct addrinfo *,
108 const struct addrinfo *);
109 void queue_xmt(SOCKET sock, struct dns_ctx *dctx, sent_pkt *spkt,
110 u_int xmt_delay);
111 void xmt_timer_cb(evutil_socket_t, short, void *ptr);
112 void xmt(xmt_ctx *xctx);
113 int check_kod(const struct addrinfo *ai);
114 void timeout_query(sent_pkt *);
115 void timeout_queries(void);
116 void sock_cb(evutil_socket_t, short, void *);
117 void check_exit_conditions(void);
118 void sntp_libevent_log_cb(int, const char *);
119 void set_li_vn_mode(struct pkt *spkt, char leap, char version, char mode);
120 int set_time(double offset);
121 void dec_pending_ntp(const char *, sockaddr_u *);
122 int libevent_version_ok(void);
123 int gettimeofday_cached(struct event_base *b, struct timeval *tv);
124
125
126 /*
127 * The actual main function.
128 */
129 int
130 sntp_main (
131 int argc,
132 char **argv,
133 const char *sntpVersion
134 )
135 {
136 int i;
137 int exitcode;
138 int optct;
139 struct event_config * evcfg;
140
141 /* Initialize logging system - sets up progname */
142 sntp_init_logging(argv[0]);
143
144 if (!libevent_version_ok())
145 exit(EX_SOFTWARE);
146
147 init_lib();
148 init_auth();
149
150 optct = ntpOptionProcess(&sntpOptions, argc, argv);
151 argc -= optct;
152 argv += optct;
153
154
155 debug = OPT_VALUE_SET_DEBUG_LEVEL;
156
157 TRACE(2, ("init_lib() done, %s%s\n",
158 (ipv4_works)
159 ? "ipv4_works "
160 : "",
161 (ipv6_works)
162 ? "ipv6_works "
163 : ""));
164 ntpver = OPT_VALUE_NTPVERSION;
165 steplimit = OPT_VALUE_STEPLIMIT / 1e3;
166 gap.tv_usec = max(0, OPT_VALUE_GAP * 1000);
167 gap.tv_usec = min(gap.tv_usec, 999999);
168
169 if (HAVE_OPT(LOGFILE))
170 open_logfile(OPT_ARG(LOGFILE));
171
172 msyslog(LOG_INFO, "%s", sntpVersion);
173
174 if (0 == argc && !HAVE_OPT(BROADCAST) && !HAVE_OPT(CONCURRENT)) {
175 printf("%s: Must supply at least one of -b hostname, -c hostname, or hostname.\n",
176 progname);
177 exit(EX_USAGE);
178 }
179
180
181 /*
182 ** Eventually, we probably want:
183 ** - separate bcst and ucst timeouts (why?)
184 ** - multiple --timeout values in the commandline
185 */
186
187 response_timeout = OPT_VALUE_TIMEOUT;
188 response_tv.tv_sec = response_timeout;
189 response_tv.tv_usec = 0;
190
191 /* IPv6 available? */
192 if (isc_net_probeipv6() != ISC_R_SUCCESS) {
193 ai_fam_pref = AF_INET;
194 TRACE(1, ("No ipv6 support available, forcing ipv4\n"));
195 } else {
196 /* Check for options -4 and -6 */
197 if (HAVE_OPT(IPV4))
198 ai_fam_pref = AF_INET;
199 else if (HAVE_OPT(IPV6))
200 ai_fam_pref = AF_INET6;
201 }
202
203 /* TODO: Parse config file if declared */
204
205 /*
206 ** Init the KOD system.
207 ** For embedded systems with no writable filesystem,
208 ** -K /dev/null can be used to disable KoD storage.
209 */
210 kod_init_kod_db(OPT_ARG(KOD), FALSE);
211
212 // HMS: Should we use arg-defalt for this too?
213 if (HAVE_OPT(KEYFILE))
214 auth_init(OPT_ARG(KEYFILE), &keys);
215
216 /*
217 ** Considering employing a variable that prevents functions of doing
218 ** anything until everything is initialized properly
219 **
220 ** HMS: What exactly does the above mean?
221 */
222 event_set_log_callback(&sntp_libevent_log_cb);
223 if (debug > 0)
224 event_enable_debug_mode();
225 #ifdef WORK_THREAD
226 evthread_use_pthreads();
227 /* we use libevent from main thread only, locks should be academic */
228 if (debug > 0)
229 evthread_enable_lock_debuging();
230 #endif
231 evcfg = event_config_new();
232 if (NULL == evcfg) {
233 printf("%s: event_config_new() failed!\n", progname);
234 return -1;
235 }
236 #ifndef HAVE_SOCKETPAIR
237 event_config_require_features(evcfg, EV_FEATURE_FDS);
238 #endif
239 /* all libevent calls are from main thread */
240 /* event_config_set_flag(evcfg, EVENT_BASE_FLAG_NOLOCK); */
241 base = event_base_new_with_config(evcfg);
242 event_config_free(evcfg);
243 if (NULL == base) {
244 printf("%s: event_base_new() failed!\n", progname);
245 return -1;
246 }
247
248 /* wire into intres resolver */
249 worker_per_query = TRUE;
250 addremove_io_fd = &sntp_addremove_fd;
251
252 open_sockets();
253
254 if (HAVE_OPT(BROADCAST)) {
255 int cn = STACKCT_OPT( BROADCAST );
256 const char ** cp = STACKLST_OPT( BROADCAST );
257
258 while (cn-- > 0) {
259 handle_lookup(*cp, CTX_BCST);
260 cp++;
261 }
262 }
263
264 if (HAVE_OPT(CONCURRENT)) {
265 int cn = STACKCT_OPT( CONCURRENT );
266 const char ** cp = STACKLST_OPT( CONCURRENT );
267
268 while (cn-- > 0) {
269 handle_lookup(*cp, CTX_UCST | CTX_CONC);
270 cp++;
271 }
272 }
273
274 for (i = 0; i < argc; ++i)
275 handle_lookup(argv[i], CTX_UCST);
276
277 gettimeofday_cached(base, &start_tv);
278 event_base_dispatch(base);
279 event_base_free(base);
280
281 if (!time_adjusted &&
282 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
283 exitcode = 1;
284 else
285 exitcode = 0;
286
287 return exitcode;
288 }
289
290
291 /*
292 ** open sockets and make them non-blocking
293 */
294 void
295 open_sockets(
296 void
297 )
298 {
299 sockaddr_u name;
300 int one_fam_works;
301
302 one_fam_works = FALSE;
303 if (-1 == sock4) {
304 sock4 = socket(PF_INET, SOCK_DGRAM, 0);
305 if (-1 == sock4) {
306 /* error getting a socket */
307 msyslog(LOG_ERR, "open_sockets: socket(PF_INET) failed: %m");
308 exit(1);
309 }
310 /* Make it non-blocking */
311 make_socket_nonblocking(sock4);
312
313 /* Let's try using a wildcard... */
314 ZERO(name);
315 AF(&name) = AF_INET;
316 SET_ADDR4N(&name, INADDR_ANY);
317 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
318
319 if (-1 == bind(sock4, &name.sa,
320 SOCKLEN(&name))) {
321 msyslog(LOG_ERR, "open_sockets: bind(sock4) failed: %m");
322 exit(1);
323 }
324
325 /* Register an NTP callback for recv/timeout */
326 ev_sock4 = event_new(base, sock4,
327 EV_TIMEOUT | EV_READ | EV_PERSIST,
328 &sock_cb, NULL);
329 if (NULL == ev_sock4) {
330 msyslog(LOG_ERR,
331 "open_sockets: event_new(base, sock4) failed!");
332 } else {
333 one_fam_works = TRUE;
334 event_add(ev_sock4, &wakeup_tv);
335 }
336 }
337
338 /* We may not always have IPv6... */
339 if (-1 == sock6 && ipv6_works) {
340 sock6 = socket(PF_INET6, SOCK_DGRAM, 0);
341 if (-1 == sock6 && ipv6_works) {
342 /* error getting a socket */
343 msyslog(LOG_ERR, "open_sockets: socket(PF_INET6) failed: %m");
344 exit(1);
345 }
346 /* Make it non-blocking */
347 make_socket_nonblocking(sock6);
348
349 /* Let's try using a wildcard... */
350 ZERO(name);
351 AF(&name) = AF_INET6;
352 SET_ADDR6N(&name, in6addr_any);
353 SET_PORT(&name, (HAVE_OPT(USERESERVEDPORT) ? 123 : 0));
354
355 if (-1 == bind(sock6, &name.sa,
356 SOCKLEN(&name))) {
357 msyslog(LOG_ERR, "open_sockets: bind(sock6) failed: %m");
358 exit(1);
359 }
360 /* Register an NTP callback for recv/timeout */
361 ev_sock6 = event_new(base, sock6,
362 EV_TIMEOUT | EV_READ | EV_PERSIST,
363 &sock_cb, NULL);
364 if (NULL == ev_sock6) {
365 msyslog(LOG_ERR,
366 "open_sockets: event_new(base, sock6) failed!");
367 } else {
368 one_fam_works = TRUE;
369 event_add(ev_sock6, &wakeup_tv);
370 }
371 }
372
373 return;
374 }
375
376
377 /*
378 ** handle_lookup
379 */
380 void
381 handle_lookup(
382 const char *name,
383 int flags
384 )
385 {
386 struct addrinfo hints; /* Local copy is OK */
387 struct dns_ctx *ctx;
388 long l;
389 char * name_copy;
390 size_t name_sz;
391 size_t octets;
392
393 TRACE(1, ("handle_lookup(%s,%#x)\n", name, flags));
394
395 ZERO(hints);
396 hints.ai_family = ai_fam_pref;
397 hints.ai_flags = AI_CANONNAME | Z_AI_NUMERICSERV;
398 /*
399 ** Unless we specify a socktype, we'll get at least two
400 ** entries for each address: one for TCP and one for
401 ** UDP. That's not what we want.
402 */
403 hints.ai_socktype = SOCK_DGRAM;
404 hints.ai_protocol = IPPROTO_UDP;
405
406 name_sz = 1 + strlen(name);
407 octets = sizeof(*ctx) + name_sz; // Space for a ctx and the name
408 ctx = emalloc_zero(octets); // ctx at ctx[0]
409 name_copy = (char *)(ctx + 1); // Put the name at ctx[1]
410 memcpy(name_copy, name, name_sz); // copy the name to ctx[1]
411 ctx->name = name_copy; // point to it...
412 ctx->flags = flags;
413 ctx->timeout = response_tv;
414
415 /* The following should arguably be passed in... */
416 if (ENABLED_OPT(AUTHENTICATION) &&
417 atoint(OPT_ARG(AUTHENTICATION), &l)) {
418 ctx->key_id = l;
419 get_key(ctx->key_id, &ctx->key);
420 } else {
421 ctx->key_id = -1;
422 ctx->key = NULL;
423 }
424
425 ++n_pending_dns;
426 getaddrinfo_sometime(name, "123", &hints, 0,
427 &sntp_name_resolved, ctx);
428 }
429
430
431 /*
432 ** DNS Callback:
433 ** - For each IP:
434 ** - - open a socket
435 ** - - increment n_pending_ntp
436 ** - - send a request if this is a Unicast callback
437 ** - - queue wait for response
438 ** - decrement n_pending_dns
439 */
440 void
441 sntp_name_resolved(
442 int rescode,
443 int gai_errno,
444 void * context,
445 const char * name,
446 const char * service,
447 const struct addrinfo * hints,
448 const struct addrinfo * addr
449 )
450 {
451 struct dns_ctx * dctx;
452 sent_pkt * spkt;
453 const struct addrinfo * ai;
454 SOCKET sock;
455 u_int xmt_delay_v4;
456 u_int xmt_delay_v6;
457 u_int xmt_delay;
458 size_t octets;
459
460 xmt_delay_v4 = 0;
461 xmt_delay_v6 = 0;
462 dctx = context;
463 if (rescode) {
464 #ifdef EAI_SYSTEM
465 if (EAI_SYSTEM == rescode) {
466 errno = gai_errno;
467 mfprintf(stderr, "%s lookup error %m\n",
468 dctx->name);
469 } else
470 #endif
471 fprintf(stderr, "%s lookup error %s\n",
472 dctx->name, gai_strerror(rescode));
473 } else {
474 TRACE(3, ("%s [%s]\n", dctx->name,
475 (addr->ai_canonname != NULL)
476 ? addr->ai_canonname
477 : ""));
478
479 for (ai = addr; ai != NULL; ai = ai->ai_next) {
480
481 if (check_kod(ai))
482 continue;
483
484 switch (ai->ai_family) {
485
486 case AF_INET:
487 sock = sock4;
488 xmt_delay = xmt_delay_v4;
489 xmt_delay_v4++;
490 break;
491
492 case AF_INET6:
493 if (!ipv6_works)
494 continue;
495
496 sock = sock6;
497 xmt_delay = xmt_delay_v6;
498 xmt_delay_v6++;
499 break;
500
501 default:
502 msyslog(LOG_ERR, "sntp_name_resolved: unexpected ai_family: %d",
503 ai->ai_family);
504 exit(1);
505 break;
506 }
507
508 /*
509 ** We're waiting for a response for either unicast
510 ** or broadcast, so...
511 */
512 ++n_pending_ntp;
513
514 /* If this is for a unicast IP, queue a request */
515 if (dctx->flags & CTX_UCST) {
516 spkt = emalloc_zero(sizeof(*spkt));
517 spkt->dctx = dctx;
518 octets = min(ai->ai_addrlen, sizeof(spkt->addr));
519 memcpy(&spkt->addr, ai->ai_addr, octets);
520 queue_xmt(sock, dctx, spkt, xmt_delay);
521 }
522 }
523 }
524 /* n_pending_dns really should be >0 here... */
525 --n_pending_dns;
526 check_exit_conditions();
527 }
528
529
530 /*
531 ** queue_xmt
532 */
533 void
534 queue_xmt(
535 SOCKET sock,
536 struct dns_ctx * dctx,
537 sent_pkt * spkt,
538 u_int xmt_delay
539 )
540 {
541 sockaddr_u * dest;
542 sent_pkt ** pkt_listp;
543 sent_pkt * match;
544 xmt_ctx * xctx;
545 struct timeval start_cb;
546 struct timeval delay;
547
548 dest = &spkt->addr;
549 if (IS_IPV6(dest))
550 pkt_listp = &v6_pkts_list;
551 else
552 pkt_listp = &v4_pkts_list;
553
554 /* reject attempts to add address already listed */
555 for (match = *pkt_listp; match != NULL; match = match->link) {
556 if (ADDR_PORT_EQ(&spkt->addr, &match->addr)) {
557 if (strcasecmp(spkt->dctx->name,
558 match->dctx->name))
559 printf("%s %s duplicate address from %s ignored.\n",
560 sptoa(&match->addr),
561 match->dctx->name,
562 spkt->dctx->name);
563 else
564 printf("%s %s, duplicate address ignored.\n",
565 sptoa(&match->addr),
566 match->dctx->name);
567 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
568 free(spkt);
569 return;
570 }
571 }
572
573 LINK_SLIST(*pkt_listp, spkt, link);
574
575 xctx = emalloc_zero(sizeof(*xctx));
576 xctx->sock = sock;
577 xctx->spkt = spkt;
578 gettimeofday_cached(base, &start_cb);
579 xctx->sched = start_cb.tv_sec + (2 * xmt_delay);
580
581 LINK_SORT_SLIST(xmt_q, xctx, (xctx->sched < L_S_S_CUR()->sched),
582 link, xmt_ctx);
583 if (xmt_q == xctx) {
584 /*
585 * The new entry is the first scheduled. The timer is
586 * either not active or is set for the second xmt
587 * context in xmt_q.
588 */
589 if (NULL == ev_xmt_timer)
590 ev_xmt_timer = event_new(base, INVALID_SOCKET,
591 EV_TIMEOUT,
592 &xmt_timer_cb, NULL);
593 if (NULL == ev_xmt_timer) {
594 msyslog(LOG_ERR,
595 "queue_xmt: event_new(base, -1, EV_TIMEOUT) failed!");
596 exit(1);
597 }
598 ZERO(delay);
599 if (xctx->sched > start_cb.tv_sec)
600 delay.tv_sec = xctx->sched - start_cb.tv_sec;
601 event_add(ev_xmt_timer, &delay);
602 TRACE(2, ("queue_xmt: xmt timer for %u usec\n",
603 (u_int)delay.tv_usec));
604 }
605 }
606
607
608 /*
609 ** xmt_timer_cb
610 */
611 void
612 xmt_timer_cb(
613 evutil_socket_t fd,
614 short what,
615 void * ctx
616 )
617 {
618 struct timeval start_cb;
619 struct timeval delay;
620 xmt_ctx * x;
621
622 UNUSED_ARG(fd);
623 UNUSED_ARG(ctx);
624 DEBUG_INSIST(EV_TIMEOUT == what);
625
626 if (NULL == xmt_q || shutting_down)
627 return;
628 gettimeofday_cached(base, &start_cb);
629 if (xmt_q->sched <= start_cb.tv_sec) {
630 UNLINK_HEAD_SLIST(x, xmt_q, link);
631 TRACE(2, ("xmt_timer_cb: at .%6.6u -> %s\n",
632 (u_int)start_cb.tv_usec, stoa(&x->spkt->addr)));
633 xmt(x);
634 free(x);
635 if (NULL == xmt_q)
636 return;
637 }
638 if (xmt_q->sched <= start_cb.tv_sec) {
639 event_add(ev_xmt_timer, &gap);
640 TRACE(2, ("xmt_timer_cb: at .%6.6u gap %6.6u\n",
641 (u_int)start_cb.tv_usec,
642 (u_int)gap.tv_usec));
643 } else {
644 delay.tv_sec = xmt_q->sched - start_cb.tv_sec;
645 delay.tv_usec = 0;
646 event_add(ev_xmt_timer, &delay);
647 TRACE(2, ("xmt_timer_cb: at .%6.6u next %ld seconds\n",
648 (u_int)start_cb.tv_usec,
649 (long)delay.tv_sec));
650 }
651 }
652
653
654 /*
655 ** xmt()
656 */
657 void
658 xmt(
659 xmt_ctx * xctx
660 )
661 {
662 SOCKET sock = xctx->sock;
663 struct dns_ctx *dctx = xctx->spkt->dctx;
664 sent_pkt * spkt = xctx->spkt;
665 sockaddr_u * dst = &spkt->addr;
666 struct timeval tv_xmt;
667 struct pkt x_pkt;
668 size_t pkt_len;
669 int sent;
670
671 if (0 != gettimeofday(&tv_xmt, NULL)) {
672 msyslog(LOG_ERR,
673 "xmt: gettimeofday() failed: %m");
674 exit(1);
675 }
676 tv_xmt.tv_sec += JAN_1970;
677
678 pkt_len = generate_pkt(&x_pkt, &tv_xmt, dctx->key_id,
679 dctx->key);
680
681 sent = sendpkt(sock, dst, &x_pkt, pkt_len);
682 if (sent) {
683 /* Save the packet we sent... */
684 memcpy(&spkt->x_pkt, &x_pkt, min(sizeof(spkt->x_pkt),
685 pkt_len));
686 spkt->stime = tv_xmt.tv_sec - JAN_1970;
687
688 TRACE(2, ("xmt: %lx.%6.6u %s %s\n", (u_long)tv_xmt.tv_sec,
689 (u_int)tv_xmt.tv_usec, dctx->name, stoa(dst)));
690 } else {
691 dec_pending_ntp(dctx->name, dst);
692 }
693
694 return;
695 }
696
697
698 /*
699 * timeout_queries() -- give up on unrequited NTP queries
700 */
701 void
702 timeout_queries(void)
703 {
704 struct timeval start_cb;
705 u_int idx;
706 sent_pkt * head;
707 sent_pkt * spkt;
708 sent_pkt * spkt_next;
709 long age;
710 int didsomething = 0;
711
712 TRACE(3, ("timeout_queries: called to check %u items\n",
713 (unsigned)COUNTOF(fam_listheads)));
714
715 gettimeofday_cached(base, &start_cb);
716 for (idx = 0; idx < COUNTOF(fam_listheads); idx++) {
717 head = fam_listheads[idx];
718 for (spkt = head; spkt != NULL; spkt = spkt_next) {
719 char xcst;
720
721 didsomething = 1;
722 switch (spkt->dctx->flags & CTX_xCST) {
723 case CTX_BCST:
724 xcst = 'B';
725 break;
726
727 case CTX_UCST:
728 xcst = 'U';
729 break;
730
731 default:
732 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
733 break;
734 }
735
736 spkt_next = spkt->link;
737 if (0 == spkt->stime || spkt->done)
738 continue;
739 age = start_cb.tv_sec - spkt->stime;
740 TRACE(3, ("%s %s %cCST age %ld\n",
741 stoa(&spkt->addr),
742 spkt->dctx->name, xcst, age));
743 if (age > response_timeout)
744 timeout_query(spkt);
745 }
746 }
747 // Do we care about didsomething?
748 TRACE(3, ("timeout_queries: didsomething is %d, age is %ld\n",
749 didsomething, (long) (start_cb.tv_sec - start_tv.tv_sec)));
750 if (start_cb.tv_sec - start_tv.tv_sec > response_timeout) {
751 TRACE(3, ("timeout_queries: bail!\n"));
752 event_base_loopexit(base, NULL);
753 shutting_down = TRUE;
754 }
755 }
756
757
758 void dec_pending_ntp(
759 const char * name,
760 sockaddr_u * server
761 )
762 {
763 if (n_pending_ntp > 0) {
764 --n_pending_ntp;
765 check_exit_conditions();
766 } else {
767 INSIST(0 == n_pending_ntp);
768 TRACE(1, ("n_pending_ntp was zero before decrement for %s\n",
769 hostnameaddr(name, server)));
770 }
771 }
772
773
774 void timeout_query(
775 sent_pkt * spkt
776 )
777 {
778 sockaddr_u * server;
779 char xcst;
780
781
782 switch (spkt->dctx->flags & CTX_xCST) {
783 case CTX_BCST:
784 xcst = 'B';
785 break;
786
787 case CTX_UCST:
788 xcst = 'U';
789 break;
790
791 default:
792 INSIST(!"spkt->dctx->flags neither UCST nor BCST");
793 break;
794 }
795 spkt->done = TRUE;
796 server = &spkt->addr;
797 msyslog(LOG_INFO, "%s no %cCST response after %d seconds",
798 hostnameaddr(spkt->dctx->name, server), xcst,
799 response_timeout);
800 dec_pending_ntp(spkt->dctx->name, server);
801 return;
802 }
803
804
805 /*
806 ** check_kod
807 */
808 int
809 check_kod(
810 const struct addrinfo * ai
811 )
812 {
813 char *hostname;
814 struct kod_entry *reason;
815
816 /* Is there a KoD on file for this address? */
817 hostname = addrinfo_to_str(ai);
818 TRACE(2, ("check_kod: checking <%s>\n", hostname));
819 if (search_entry(hostname, &reason)) {
820 printf("prior KoD for %s, skipping.\n",
821 hostname);
822 free(reason);
823 free(hostname);
824
825 return 1;
826 }
827 free(hostname);
828
829 return 0;
830 }
831
832
833 /*
834 ** Socket readable/timeout Callback:
835 ** Read in the packet
836 ** Unicast:
837 ** - close socket
838 ** - decrement n_pending_ntp
839 ** - If packet is good, set the time and "exit"
840 ** Broadcast:
841 ** - If packet is good, set the time and "exit"
842 */
843 void
844 sock_cb(
845 evutil_socket_t fd,
846 short what,
847 void *ptr
848 )
849 {
850 sockaddr_u sender;
851 sockaddr_u * psau;
852 sent_pkt ** p_pktlist;
853 sent_pkt * spkt;
854 int rpktl;
855 int rc;
856
857 INSIST(sock4 == fd || sock6 == fd);
858
859 TRACE(3, ("sock_cb: event on sock%s:%s%s%s%s\n",
860 (fd == sock6)
861 ? "6"
862 : "4",
863 (what & EV_TIMEOUT) ? " timeout" : "",
864 (what & EV_READ) ? " read" : "",
865 (what & EV_WRITE) ? " write" : "",
866 (what & EV_SIGNAL) ? " signal" : ""));
867
868 if (!(EV_READ & what)) {
869 if (EV_TIMEOUT & what)
870 timeout_queries();
871
872 return;
873 }
874
875 /* Read in the packet */
876 rpktl = recvdata(fd, &sender, &rbuf, sizeof(rbuf));
877 if (rpktl < 0) {
878 msyslog(LOG_DEBUG, "recvfrom error %m");
879 return;
880 }
881
882 if (sock6 == fd)
883 p_pktlist = &v6_pkts_list;
884 else
885 p_pktlist = &v4_pkts_list;
886
887 for (spkt = *p_pktlist; spkt != NULL; spkt = spkt->link) {
888 psau = &spkt->addr;
889 if (SOCK_EQ(&sender, psau))
890 break;
891 }
892 if (NULL == spkt) {
893 msyslog(LOG_WARNING,
894 "Packet from unexpected source %s dropped",
895 sptoa(&sender));
896 return;
897 }
898
899 TRACE(1, ("sock_cb: %s %s\n", spkt->dctx->name,
900 sptoa(&sender)));
901
902 rpktl = process_pkt(&r_pkt, &sender, rpktl, MODE_SERVER,
903 &spkt->x_pkt, "sock_cb");
904
905 TRACE(2, ("sock_cb: process_pkt returned %d\n", rpktl));
906
907 /* If this is a Unicast packet, one down ... */
908 if (!spkt->done && (CTX_UCST & spkt->dctx->flags)) {
909 dec_pending_ntp(spkt->dctx->name, &spkt->addr);
910 spkt->done = TRUE;
911 }
912
913
914 /* If the packet is good, set the time and we're all done */
915 rc = handle_pkt(rpktl, &r_pkt, &spkt->addr, spkt->dctx->name);
916 if (0 != rc)
917 TRACE(1, ("sock_cb: handle_pkt() returned %d\n", rc));
918 check_exit_conditions();
919 }
920
921
922 /*
923 * check_exit_conditions()
924 *
925 * If sntp has a reply, ask the event loop to stop after this round of
926 * callbacks, unless --wait was used.
927 */
928 void
929 check_exit_conditions(void)
930 {
931 if ((0 == n_pending_ntp && 0 == n_pending_dns) ||
932 (time_derived && !HAVE_OPT(WAIT))) {
933 event_base_loopexit(base, NULL);
934 shutting_down = TRUE;
935 } else {
936 TRACE(2, ("%d NTP and %d name queries pending\n",
937 n_pending_ntp, n_pending_dns));
938 }
939 }
940
941
942 /*
943 * sntp_addremove_fd() is invoked by the intres blocking worker code
944 * to read from a pipe, or to stop same.
945 */
946 void sntp_addremove_fd(
947 int fd,
948 int is_pipe,
949 int remove_it
950 )
951 {
952 u_int idx;
953 blocking_child *c;
954 struct event * ev;
955
956 #ifdef HAVE_SOCKETPAIR
957 if (is_pipe) {
958 /* sntp only asks for EV_FEATURE_FDS without HAVE_SOCKETPAIR */
959 msyslog(LOG_ERR, "fatal: pipes not supported on systems with socketpair()");
960 exit(1);
961 }
962 #endif
963
964 c = NULL;
965 for (idx = 0; idx < blocking_children_alloc; idx++) {
966 c = blocking_children[idx];
967 if (NULL == c)
968 continue;
969 if (fd == c->resp_read_pipe)
970 break;
971 }
972 if (idx == blocking_children_alloc)
973 return;
974
975 if (remove_it) {
976 ev = c->resp_read_ctx;
977 c->resp_read_ctx = NULL;
978 event_del(ev);
979 event_free(ev);
980
981 return;
982 }
983
984 ev = event_new(base, fd, EV_READ | EV_PERSIST,
985 &worker_resp_cb, c);
986 if (NULL == ev) {
987 msyslog(LOG_ERR,
988 "sntp_addremove_fd: event_new(base, fd) failed!");
989 return;
990 }
991 c->resp_read_ctx = ev;
992 event_add(ev, NULL);
993 }
994
995
996 /* called by forked intres child to close open descriptors */
997 #ifdef WORK_FORK
998 void
999 kill_asyncio(
1000 int startfd
1001 )
1002 {
1003 if (INVALID_SOCKET != sock4) {
1004 closesocket(sock4);
1005 sock4 = INVALID_SOCKET;
1006 }
1007 if (INVALID_SOCKET != sock6) {
1008 closesocket(sock6);
1009 sock6 = INVALID_SOCKET;
1010 }
1011 if (INVALID_SOCKET != bsock4) {
1012 closesocket(sock4);
1013 sock4 = INVALID_SOCKET;
1014 }
1015 if (INVALID_SOCKET != bsock6) {
1016 closesocket(sock6);
1017 sock6 = INVALID_SOCKET;
1018 }
1019 }
1020 #endif
1021
1022
1023 /*
1024 * worker_resp_cb() is invoked when resp_read_pipe is readable.
1025 */
1026 void
1027 worker_resp_cb(
1028 evutil_socket_t fd,
1029 short what,
1030 void * ctx /* blocking_child * */
1031 )
1032 {
1033 blocking_child * c;
1034
1035 DEBUG_INSIST(EV_READ & what);
1036 c = ctx;
1037 DEBUG_INSIST(fd == c->resp_read_pipe);
1038 process_blocking_resp(c);
1039 }
1040
1041
1042 /*
1043 * intres_timeout_req(s) is invoked in the parent to schedule an idle
1044 * timeout to fire in s seconds, if not reset earlier by a call to
1045 * intres_timeout_req(0), which clears any pending timeout. When the
1046 * timeout expires, worker_idle_timer_fired() is invoked (again, in the
1047 * parent).
1048 *
1049 * sntp and ntpd each provide implementations adapted to their timers.
1050 */
1051 void
1052 intres_timeout_req(
1053 u_int seconds /* 0 cancels */
1054 )
1055 {
1056 struct timeval tv_to;
1057
1058 if (NULL == ev_worker_timeout) {
1059 ev_worker_timeout = event_new(base, -1,
1060 EV_TIMEOUT | EV_PERSIST,
1061 &worker_timeout, NULL);
1062 DEBUG_INSIST(NULL != ev_worker_timeout);
1063 } else {
1064 event_del(ev_worker_timeout);
1065 }
1066 if (0 == seconds)
1067 return;
1068 tv_to.tv_sec = seconds;
1069 tv_to.tv_usec = 0;
1070 event_add(ev_worker_timeout, &tv_to);
1071 }
1072
1073
1074 void
1075 worker_timeout(
1076 evutil_socket_t fd,
1077 short what,
1078 void * ctx
1079 )
1080 {
1081 UNUSED_ARG(fd);
1082 UNUSED_ARG(ctx);
1083
1084 DEBUG_REQUIRE(EV_TIMEOUT & what);
1085 worker_idle_timer_fired();
1086 }
1087
1088
1089 void
1090 sntp_libevent_log_cb(
1091 int severity,
1092 const char * msg
1093 )
1094 {
1095 int level;
1096
1097 switch (severity) {
1098
1099 default:
1100 case _EVENT_LOG_DEBUG:
1101 level = LOG_DEBUG;
1102 break;
1103
1104 case _EVENT_LOG_MSG:
1105 level = LOG_NOTICE;
1106 break;
1107
1108 case _EVENT_LOG_WARN:
1109 level = LOG_WARNING;
1110 break;
1111
1112 case _EVENT_LOG_ERR:
1113 level = LOG_ERR;
1114 break;
1115 }
1116
1117 msyslog(level, "%s", msg);
1118 }
1119
1120
1121 int
1122 generate_pkt (
1123 struct pkt *x_pkt,
1124 const struct timeval *tv_xmt,
1125 int key_id,
1126 struct key *pkt_key
1127 )
1128 {
1129 l_fp xmt_fp;
1130 int pkt_len;
1131 int mac_size;
1132
1133 pkt_len = LEN_PKT_NOMAC;
1134 ZERO(*x_pkt);
1135 TVTOTS(tv_xmt, &xmt_fp);
1136 HTONL_FP(&xmt_fp, &x_pkt->xmt);
1137 x_pkt->stratum = STRATUM_TO_PKT(STRATUM_UNSPEC);
1138 x_pkt->ppoll = 8;
1139 /* FIXME! Modus broadcast + adr. check -> bdr. pkt */
1140 set_li_vn_mode(x_pkt, LEAP_NOTINSYNC, ntpver, 3);
1141 if (pkt_key != NULL) {
1142 x_pkt->exten[0] = htonl(key_id);
1143 mac_size = 20; /* max room for MAC */
1144 mac_size = make_mac((char *)x_pkt, pkt_len, mac_size,
1145 pkt_key, (char *)&x_pkt->exten[1]);
1146 if (mac_size > 0)
1147 pkt_len += mac_size + 4;
1148 }
1149 return pkt_len;
1150 }
1151
1152
1153 int
1154 handle_pkt(
1155 int rpktl,
1156 struct pkt * rpkt,
1157 sockaddr_u * host,
1158 const char * hostname
1159 )
1160 {
1161 char disptxt[32];
1162 const char * addrtxt;
1163 struct timeval tv_dst;
1164 int cnt;
1165 int sw_case;
1166 int digits;
1167 int stratum;
1168 char * ref;
1169 char * ts_str;
1170 double offset;
1171 double precision;
1172 double synch_distance;
1173 char * p_SNTP_PRETEND_TIME;
1174 time_t pretend_time;
1175 #if SIZEOF_TIME_T == 8
1176 long long ll;
1177 #else
1178 long l;
1179 #endif
1180
1181 ts_str = NULL;
1182
1183 if (rpktl > 0)
1184 sw_case = 1;
1185 else
1186 sw_case = rpktl;
1187
1188 switch (sw_case) {
1189
1190 case SERVER_UNUSEABLE:
1191 return -1;
1192 break;
1193
1194 case PACKET_UNUSEABLE:
1195 break;
1196
1197 case SERVER_AUTH_FAIL:
1198 break;
1199
1200 case KOD_DEMOBILIZE:
1201 /* Received a DENY or RESTR KOD packet */
1202 addrtxt = stoa(host);
1203 ref = (char *)&rpkt->refid;
1204 add_entry(addrtxt, ref);
1205 msyslog(LOG_WARNING, "KOD code %c%c%c%c from %s %s",
1206 ref[0], ref[1], ref[2], ref[3], addrtxt, hostname);
1207 break;
1208
1209 case KOD_RATE:
1210 /*
1211 ** Hmm...
1212 ** We should probably call add_entry() with an
1213 ** expiration timestamp of several seconds in the future,
1214 ** and back-off even more if we get more RATE responses.
1215 */
1216 break;
1217
1218 case 1:
1219 TRACE(3, ("handle_pkt: %d bytes from %s %s\n",
1220 rpktl, stoa(host), hostname));
1221
1222 gettimeofday_cached(base, &tv_dst);
1223
1224 p_SNTP_PRETEND_TIME = getenv("SNTP_PRETEND_TIME");
1225 if (p_SNTP_PRETEND_TIME) {
1226 pretend_time = 0;
1227 #if SIZEOF_TIME_T == 4
1228 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%ld", &l))
1229 pretend_time = (time_t)l;
1230 #elif SIZEOF_TIME_T == 8
1231 if (1 == sscanf(p_SNTP_PRETEND_TIME, "%lld", &ll))
1232 pretend_time = (time_t)ll;
1233 #else
1234 # include "GRONK: unexpected value for SIZEOF_TIME_T"
1235 #endif
1236 if (0 != pretend_time)
1237 tv_dst.tv_sec = pretend_time;
1238 }
1239
1240 offset_calculation(rpkt, rpktl, &tv_dst, &offset,
1241 &precision, &synch_distance);
1242 time_derived = TRUE;
1243
1244 for (digits = 0; (precision *= 10.) < 1.; ++digits)
1245 /* empty */ ;
1246 if (digits > 6)
1247 digits = 6;
1248
1249 ts_str = tv_to_str(&tv_dst);
1250 stratum = rpkt->stratum;
1251 if (0 == stratum)
1252 stratum = 16;
1253
1254 if (synch_distance > 0.) {
1255 cnt = snprintf(disptxt, sizeof(disptxt),
1256 " +/- %f", synch_distance);
1257 if ((size_t)cnt >= sizeof(disptxt))
1258 snprintf(disptxt, sizeof(disptxt),
1259 "ERROR %d >= %d", cnt,
1260 (int)sizeof(disptxt));
1261 } else {
1262 disptxt[0] = '\0';
1263 }
1264
1265 msyslog(LOG_INFO, "%s %+.*f%s %s s%d%s", ts_str,
1266 digits, offset, disptxt,
1267 hostnameaddr(hostname, host), stratum,
1268 (time_adjusted)
1269 ? " [excess]"
1270 : "");
1271 free(ts_str);
1272
1273 if (p_SNTP_PRETEND_TIME)
1274 return 0;
1275
1276 if (!time_adjusted &&
1277 (ENABLED_OPT(STEP) || ENABLED_OPT(SLEW)))
1278 return set_time(offset);
1279
1280 return EX_OK;
1281 }
1282
1283 return 1;
1284 }
1285
1286
1287 void
1288 offset_calculation(
1289 struct pkt *rpkt,
1290 int rpktl,
1291 struct timeval *tv_dst,
1292 double *offset,
1293 double *precision,
1294 double *synch_distance
1295 )
1296 {
1297 l_fp p_rec, p_xmt, p_ref, p_org, tmp, dst;
1298 u_fp p_rdly, p_rdsp;
1299 double t21, t34, delta;
1300
1301 /* Convert timestamps from network to host byte order */
1302 p_rdly = NTOHS_FP(rpkt->rootdelay);
1303 p_rdsp = NTOHS_FP(rpkt->rootdisp);
1304 NTOHL_FP(&rpkt->reftime, &p_ref);
1305 NTOHL_FP(&rpkt->org, &p_org);
1306 NTOHL_FP(&rpkt->rec, &p_rec);
1307 NTOHL_FP(&rpkt->xmt, &p_xmt);
1308
1309 *precision = LOGTOD(rpkt->precision);
1310
1311 TRACE(3, ("offset_calculation: LOGTOD(rpkt->precision): %f\n", *precision));
1312
1313 /* Compute offset etc. */
1314 tmp = p_rec;
1315 L_SUB(&tmp, &p_org);
1316 LFPTOD(&tmp, t21);
1317 TVTOTS(tv_dst, &dst);
1318 dst.l_ui += JAN_1970;
1319 tmp = p_xmt;
1320 L_SUB(&tmp, &dst);
1321 LFPTOD(&tmp, t34);
1322 *offset = (t21 + t34) / 2.;
1323 delta = t21 - t34;
1324
1325 // synch_distance is:
1326 // (peer->delay + peer->rootdelay) / 2 + peer->disp
1327 // + peer->rootdisp + clock_phi * (current_time - peer->update)
1328 // + peer->jitter;
1329 //
1330 // and peer->delay = fabs(peer->offset - p_offset) * 2;
1331 // and peer->offset needs history, so we're left with
1332 // p_offset = (t21 + t34) / 2.;
1333 // peer->disp = 0; (we have no history to augment this)
1334 // clock_phi = 15e-6;
1335 // peer->jitter = LOGTOD(sys_precision); (we have no history to augment this)
1336 // and ntp_proto.c:set_sys_tick_precision() should get us sys_precision.
1337 //
1338 // so our answer seems to be:
1339 //
1340 // (fabs(t21 + t34) + peer->rootdelay) / 3.
1341 // + 0 (peer->disp)
1342 // + peer->rootdisp
1343 // + 15e-6 (clock_phi)
1344 // + LOGTOD(sys_precision)
1345
1346 INSIST( FPTOD(p_rdly) >= 0. );
1347 #if 1
1348 *synch_distance = (fabs(t21 + t34) + FPTOD(p_rdly)) / 3.
1349 + 0.
1350 + FPTOD(p_rdsp)
1351 + 15e-6
1352 + 0. /* LOGTOD(sys_precision) when we can get it */
1353 ;
1354 INSIST( *synch_distance >= 0. );
1355 #else
1356 *synch_distance = (FPTOD(p_rdly) + FPTOD(p_rdsp))/2.0;
1357 #endif
1358
1359 #ifdef DEBUG
1360 if (debug > 3) {
1361 printf("sntp rootdelay: %f\n", FPTOD(p_rdly));
1362 printf("sntp rootdisp: %f\n", FPTOD(p_rdsp));
1363 printf("sntp syncdist: %f\n", *synch_distance);
1364
1365 pkt_output(rpkt, rpktl, stdout);
1366
1367 printf("sntp offset_calculation: rpkt->reftime:\n");
1368 l_fp_output(&p_ref, stdout);
1369 printf("sntp offset_calculation: rpkt->org:\n");
1370 l_fp_output(&p_org, stdout);
1371 printf("sntp offset_calculation: rpkt->rec:\n");
1372 l_fp_output(&p_rec, stdout);
1373 printf("sntp offset_calculation: rpkt->xmt:\n");
1374 l_fp_output(&p_xmt, stdout);
1375 }
1376 #endif
1377
1378 TRACE(3, ("sntp offset_calculation:\trec - org t21: %.6f\n"
1379 "\txmt - dst t34: %.6f\tdelta: %.6f\toffset: %.6f\n",
1380 t21, t34, delta, *offset));
1381
1382 return;
1383 }
1384
1385
1386
1387 /* Compute the 8 bits for li_vn_mode */
1388 void
1389 set_li_vn_mode (
1390 struct pkt *spkt,
1391 char leap,
1392 char version,
1393 char mode
1394 )
1395 {
1396 if (leap > 3) {
1397 msyslog(LOG_DEBUG, "set_li_vn_mode: leap > 3, using max. 3");
1398 leap = 3;
1399 }
1400
1401 if ((unsigned char)version > 7) {
1402 msyslog(LOG_DEBUG, "set_li_vn_mode: version < 0 or > 7, using 4");
1403 version = 4;
1404 }
1405
1406 if (mode > 7) {
1407 msyslog(LOG_DEBUG, "set_li_vn_mode: mode > 7, using client mode 3");
1408 mode = 3;
1409 }
1410
1411 spkt->li_vn_mode = leap << 6;
1412 spkt->li_vn_mode |= version << 3;
1413 spkt->li_vn_mode |= mode;
1414 }
1415
1416
1417 /*
1418 ** set_time applies 'offset' to the local clock.
1419 */
1420 int
1421 set_time(
1422 double offset
1423 )
1424 {
1425 int rc;
1426
1427 if (time_adjusted)
1428 return EX_OK;
1429
1430 /*
1431 ** If we can step but we cannot slew, then step.
1432 ** If we can step or slew and and |offset| > steplimit, then step.
1433 */
1434 if (ENABLED_OPT(STEP) &&
1435 ( !ENABLED_OPT(SLEW)
1436 || (ENABLED_OPT(SLEW) && (fabs(offset) > steplimit))
1437 )) {
1438 rc = step_systime(offset);
1439
1440 /* If there was a problem, can we rely on errno? */
1441 if (1 == rc)
1442 time_adjusted = TRUE;
1443 return (time_adjusted)
1444 ? EX_OK
1445 : 1;
1446 /*
1447 ** In case of error, what should we use?
1448 ** EX_UNAVAILABLE?
1449 ** EX_OSERR?
1450 ** EX_NOPERM?
1451 */
1452 }
1453
1454 if (ENABLED_OPT(SLEW)) {
1455 rc = adj_systime(offset);
1456
1457 /* If there was a problem, can we rely on errno? */
1458 if (1 == rc)
1459 time_adjusted = TRUE;
1460 return (time_adjusted)
1461 ? EX_OK
1462 : 1;
1463 /*
1464 ** In case of error, what should we use?
1465 ** EX_UNAVAILABLE?
1466 ** EX_OSERR?
1467 ** EX_NOPERM?
1468 */
1469 }
1470
1471 return EX_SOFTWARE;
1472 }
1473
1474
1475 int
1476 libevent_version_ok(void)
1477 {
1478 ev_uint32_t v_compile_maj;
1479 ev_uint32_t v_run_maj;
1480
1481 v_compile_maj = LIBEVENT_VERSION_NUMBER & 0xffff0000;
1482 v_run_maj = event_get_version_number() & 0xffff0000;
1483 if (v_compile_maj != v_run_maj) {
1484 fprintf(stderr,
1485 "Incompatible libevent versions: have %s, built with %s\n",
1486 event_get_version(),
1487 LIBEVENT_VERSION);
1488 return 0;
1489 }
1490 return 1;
1491 }
1492
1493 /*
1494 * gettimeofday_cached()
1495 *
1496 * Clones the event_base_gettimeofday_cached() interface but ensures the
1497 * times are always on the gettimeofday() 1970 scale. Older libevent 2
1498 * sometimes used gettimeofday(), sometimes the since-system-start
1499 * clock_gettime(CLOCK_MONOTONIC), depending on the platform.
1500 *
1501 * It is not cleanly possible to tell which timescale older libevent is
1502 * using.
1503 *
1504 * The strategy involves 1 hour thresholds chosen to be far longer than
1505 * the duration of a round of libevent callbacks, which share a cached
1506 * start-of-round time. First compare the last cached time with the
1507 * current gettimeofday() time. If they are within one hour, libevent
1508 * is using the proper timescale so leave the offset 0. Otherwise,
1509 * compare libevent's cached time and the current time on the monotonic
1510 * scale. If they are within an hour, libevent is using the monotonic
1511 * scale so calculate the offset to add to such times to bring them to
1512 * gettimeofday()'s scale.
1513 */
1514 int
1515 gettimeofday_cached(
1516 struct event_base * b,
1517 struct timeval * caller_tv
1518 )
1519 {
1520 #if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
1521 static struct event_base * cached_b;
1522 static struct timeval cached;
1523 static struct timeval adj_cached;
1524 static struct timeval offset;
1525 static int offset_ready;
1526 struct timeval latest;
1527 struct timeval systemt;
1528 struct timespec ts;
1529 struct timeval mono;
1530 struct timeval diff;
1531 int cgt_rc;
1532 int gtod_rc;
1533
1534 event_base_gettimeofday_cached(b, &latest);
1535 if (b == cached_b &&
1536 !memcmp(&latest, &cached, sizeof(latest))) {
1537 *caller_tv = adj_cached;
1538 return 0;
1539 }
1540 cached = latest;
1541 cached_b = b;
1542 if (!offset_ready) {
1543 cgt_rc = clock_gettime(CLOCK_MONOTONIC, &ts);
1544 gtod_rc = gettimeofday(&systemt, NULL);
1545 if (0 != gtod_rc) {
1546 msyslog(LOG_ERR,
1547 "%s: gettimeofday() error %m",
1548 progname);
1549 exit(1);
1550 }
1551 diff = sub_tval(systemt, latest);
1552 if (debug > 1)
1553 printf("system minus cached %+ld.%06ld\n",
1554 (long)diff.tv_sec, (long)diff.tv_usec);
1555 if (0 != cgt_rc || labs((long)diff.tv_sec) < 3600) {
1556 /*
1557 * Either use_monotonic == 0, or this libevent
1558 * has been repaired. Leave offset at zero.
1559 */
1560 } else {
1561 mono.tv_sec = ts.tv_sec;
1562 mono.tv_usec = ts.tv_nsec / 1000;
1563 diff = sub_tval(latest, mono);
1564 if (debug > 1)
1565 printf("cached minus monotonic %+ld.%06ld\n",
1566 (long)diff.tv_sec, (long)diff.tv_usec);
1567 if (labs((long)diff.tv_sec) < 3600) {
1568 /* older libevent2 using monotonic */
1569 offset = sub_tval(systemt, mono);
1570 TRACE(1, ("%s: Offsetting libevent CLOCK_MONOTONIC times by %+ld.%06ld\n",
1571 "gettimeofday_cached",
1572 (long)offset.tv_sec,
1573 (long)offset.tv_usec));
1574 }
1575 }
1576 offset_ready = TRUE;
1577 }
1578 adj_cached = add_tval(cached, offset);
1579 *caller_tv = adj_cached;
1580
1581 return 0;
1582 #else
1583 return event_base_gettimeofday_cached(b, caller_tv);
1584 #endif
1585 }
1586
1587